diff options
author | Takashi Iwai <tiwai@suse.de> | 2015-02-05 15:29:33 -0500 |
---|---|---|
committer | Takashi Iwai <tiwai@suse.de> | 2015-02-05 15:29:33 -0500 |
commit | 08c191de948d6cd9dc613db0683e2467ce44a18c (patch) | |
tree | 2b37672c43991efc3cc4e0ccd88ffc3051fbd3b7 /arch | |
parent | 4161b4505f1690358ac0a9ee59845a7887336b21 (diff) | |
parent | c503ca52a57cfe24f8937ca145a96ba94f6bb112 (diff) |
Merge tag 'asoc-fix-v3.19-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v3.19
A few last minute fixes for v3.19, all driver specific. None of them
stand out particularly - it's all the standard people who are affected
will care stuff.
The Samsung fix is a DT only fix for the audio controller, it's being
merged via the ASoC tree due to process messups (the submitter sent it
at the end of a tangentally related series rather than separately to the
ARM folks) in order to make sure that it gets to people sooner.
Diffstat (limited to 'arch')
74 files changed, 600 insertions, 313 deletions
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c index 98838a05ba6d..9d0ac091a52a 100644 --- a/arch/alpha/mm/fault.c +++ b/arch/alpha/mm/fault.c | |||
@@ -156,6 +156,8 @@ retry: | |||
156 | if (unlikely(fault & VM_FAULT_ERROR)) { | 156 | if (unlikely(fault & VM_FAULT_ERROR)) { |
157 | if (fault & VM_FAULT_OOM) | 157 | if (fault & VM_FAULT_OOM) |
158 | goto out_of_memory; | 158 | goto out_of_memory; |
159 | else if (fault & VM_FAULT_SIGSEGV) | ||
160 | goto bad_area; | ||
159 | else if (fault & VM_FAULT_SIGBUS) | 161 | else if (fault & VM_FAULT_SIGBUS) |
160 | goto do_sigbus; | 162 | goto do_sigbus; |
161 | BUG(); | 163 | BUG(); |
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c index 6f7e3a68803a..563cb27e37f5 100644 --- a/arch/arc/mm/fault.c +++ b/arch/arc/mm/fault.c | |||
@@ -161,6 +161,8 @@ good_area: | |||
161 | 161 | ||
162 | if (fault & VM_FAULT_OOM) | 162 | if (fault & VM_FAULT_OOM) |
163 | goto out_of_memory; | 163 | goto out_of_memory; |
164 | else if (fault & VM_FAULT_SIGSEGV) | ||
165 | goto bad_area; | ||
164 | else if (fault & VM_FAULT_SIGBUS) | 166 | else if (fault & VM_FAULT_SIGBUS) |
165 | goto do_sigbus; | 167 | goto do_sigbus; |
166 | 168 | ||
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi index b8168f1f8139..24ff27049ce0 100644 --- a/arch/arm/boot/dts/exynos4.dtsi +++ b/arch/arm/boot/dts/exynos4.dtsi | |||
@@ -368,7 +368,7 @@ | |||
368 | }; | 368 | }; |
369 | 369 | ||
370 | i2s1: i2s@13960000 { | 370 | i2s1: i2s@13960000 { |
371 | compatible = "samsung,s5pv210-i2s"; | 371 | compatible = "samsung,s3c6410-i2s"; |
372 | reg = <0x13960000 0x100>; | 372 | reg = <0x13960000 0x100>; |
373 | clocks = <&clock CLK_I2S1>; | 373 | clocks = <&clock CLK_I2S1>; |
374 | clock-names = "iis"; | 374 | clock-names = "iis"; |
@@ -378,7 +378,7 @@ | |||
378 | }; | 378 | }; |
379 | 379 | ||
380 | i2s2: i2s@13970000 { | 380 | i2s2: i2s@13970000 { |
381 | compatible = "samsung,s5pv210-i2s"; | 381 | compatible = "samsung,s3c6410-i2s"; |
382 | reg = <0x13970000 0x100>; | 382 | reg = <0x13970000 0x100>; |
383 | clocks = <&clock CLK_I2S2>; | 383 | clocks = <&clock CLK_I2S2>; |
384 | clock-names = "iis"; | 384 | clock-names = "iis"; |
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 8c1febd7e3f2..c108bb451337 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts | |||
@@ -166,12 +166,12 @@ | |||
166 | #address-cells = <1>; | 166 | #address-cells = <1>; |
167 | #size-cells = <0>; | 167 | #size-cells = <0>; |
168 | 168 | ||
169 | ethphy1: ethernet-phy@0 { | 169 | ethphy1: ethernet-phy@1 { |
170 | reg = <0>; | 170 | reg = <1>; |
171 | }; | 171 | }; |
172 | 172 | ||
173 | ethphy2: ethernet-phy@1 { | 173 | ethphy2: ethernet-phy@2 { |
174 | reg = <1>; | 174 | reg = <2>; |
175 | }; | 175 | }; |
176 | }; | 176 | }; |
177 | }; | 177 | }; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 7b4099fcf817..d5c4669224b1 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -17,14 +17,6 @@ | |||
17 | 17 | ||
18 | aliases { | 18 | aliases { |
19 | ethernet0 = &emac; | 19 | ethernet0 = &emac; |
20 | serial0 = &uart0; | ||
21 | serial1 = &uart1; | ||
22 | serial2 = &uart2; | ||
23 | serial3 = &uart3; | ||
24 | serial4 = &uart4; | ||
25 | serial5 = &uart5; | ||
26 | serial6 = &uart6; | ||
27 | serial7 = &uart7; | ||
28 | }; | 20 | }; |
29 | 21 | ||
30 | chosen { | 22 | chosen { |
@@ -39,6 +31,14 @@ | |||
39 | <&ahb_gates 44>; | 31 | <&ahb_gates 44>; |
40 | status = "disabled"; | 32 | status = "disabled"; |
41 | }; | 33 | }; |
34 | |||
35 | framebuffer@1 { | ||
36 | compatible = "allwinner,simple-framebuffer", "simple-framebuffer"; | ||
37 | allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi"; | ||
38 | clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>, | ||
39 | <&ahb_gates 44>, <&ahb_gates 46>; | ||
40 | status = "disabled"; | ||
41 | }; | ||
42 | }; | 42 | }; |
43 | 43 | ||
44 | cpus { | 44 | cpus { |
@@ -438,8 +438,8 @@ | |||
438 | reg-names = "phy_ctrl", "pmu1", "pmu2"; | 438 | reg-names = "phy_ctrl", "pmu1", "pmu2"; |
439 | clocks = <&usb_clk 8>; | 439 | clocks = <&usb_clk 8>; |
440 | clock-names = "usb_phy"; | 440 | clock-names = "usb_phy"; |
441 | resets = <&usb_clk 1>, <&usb_clk 2>; | 441 | resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>; |
442 | reset-names = "usb1_reset", "usb2_reset"; | 442 | reset-names = "usb0_reset", "usb1_reset", "usb2_reset"; |
443 | status = "disabled"; | 443 | status = "disabled"; |
444 | }; | 444 | }; |
445 | 445 | ||
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts index fe3c559ca6a8..bfa742817690 100644 --- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts | |||
@@ -55,6 +55,12 @@ | |||
55 | model = "Olimex A10s-Olinuxino Micro"; | 55 | model = "Olimex A10s-Olinuxino Micro"; |
56 | compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; | 56 | compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart0; | ||
60 | serial1 = &uart2; | ||
61 | serial2 = &uart3; | ||
62 | }; | ||
63 | |||
58 | soc@01c00000 { | 64 | soc@01c00000 { |
59 | emac: ethernet@01c0b000 { | 65 | emac: ethernet@01c0b000 { |
60 | pinctrl-names = "default"; | 66 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 1b76667f3182..2e7d8263799d 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi | |||
@@ -18,10 +18,6 @@ | |||
18 | 18 | ||
19 | aliases { | 19 | aliases { |
20 | ethernet0 = &emac; | 20 | ethernet0 = &emac; |
21 | serial0 = &uart0; | ||
22 | serial1 = &uart1; | ||
23 | serial2 = &uart2; | ||
24 | serial3 = &uart3; | ||
25 | }; | 21 | }; |
26 | 22 | ||
27 | chosen { | 23 | chosen { |
@@ -390,8 +386,8 @@ | |||
390 | reg-names = "phy_ctrl", "pmu1"; | 386 | reg-names = "phy_ctrl", "pmu1"; |
391 | clocks = <&usb_clk 8>; | 387 | clocks = <&usb_clk 8>; |
392 | clock-names = "usb_phy"; | 388 | clock-names = "usb_phy"; |
393 | resets = <&usb_clk 1>; | 389 | resets = <&usb_clk 0>, <&usb_clk 1>; |
394 | reset-names = "usb1_reset"; | 390 | reset-names = "usb0_reset", "usb1_reset"; |
395 | status = "disabled"; | 391 | status = "disabled"; |
396 | }; | 392 | }; |
397 | 393 | ||
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts index eeed1f236ee8..c7be3abd9fcc 100644 --- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts +++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts | |||
@@ -53,6 +53,10 @@ | |||
53 | model = "HSG H702"; | 53 | model = "HSG H702"; |
54 | compatible = "hsg,h702", "allwinner,sun5i-a13"; | 54 | compatible = "hsg,h702", "allwinner,sun5i-a13"; |
55 | 55 | ||
56 | aliases { | ||
57 | serial0 = &uart1; | ||
58 | }; | ||
59 | |||
56 | soc@01c00000 { | 60 | soc@01c00000 { |
57 | mmc0: mmc@01c0f000 { | 61 | mmc0: mmc@01c0f000 { |
58 | pinctrl-names = "default"; | 62 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts index 916ee8bb826f..3decefb3c37a 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts | |||
@@ -54,6 +54,10 @@ | |||
54 | model = "Olimex A13-Olinuxino Micro"; | 54 | model = "Olimex A13-Olinuxino Micro"; |
55 | compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; | 55 | compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13"; |
56 | 56 | ||
57 | aliases { | ||
58 | serial0 = &uart1; | ||
59 | }; | ||
60 | |||
57 | soc@01c00000 { | 61 | soc@01c00000 { |
58 | mmc0: mmc@01c0f000 { | 62 | mmc0: mmc@01c0f000 { |
59 | pinctrl-names = "default"; | 63 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts index e31d291d14cb..b421f7fa197b 100644 --- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts +++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts | |||
@@ -55,6 +55,10 @@ | |||
55 | model = "Olimex A13-Olinuxino"; | 55 | model = "Olimex A13-Olinuxino"; |
56 | compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; | 56 | compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart1; | ||
60 | }; | ||
61 | |||
58 | soc@01c00000 { | 62 | soc@01c00000 { |
59 | mmc0: mmc@01c0f000 { | 63 | mmc0: mmc@01c0f000 { |
60 | pinctrl-names = "default"; | 64 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index c35217ea1f64..c556688f8b8b 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -16,11 +16,6 @@ | |||
16 | / { | 16 | / { |
17 | interrupt-parent = <&intc>; | 17 | interrupt-parent = <&intc>; |
18 | 18 | ||
19 | aliases { | ||
20 | serial0 = &uart1; | ||
21 | serial1 = &uart3; | ||
22 | }; | ||
23 | |||
24 | cpus { | 19 | cpus { |
25 | #address-cells = <1>; | 20 | #address-cells = <1>; |
26 | #size-cells = <0>; | 21 | #size-cells = <0>; |
@@ -349,8 +344,8 @@ | |||
349 | reg-names = "phy_ctrl", "pmu1"; | 344 | reg-names = "phy_ctrl", "pmu1"; |
350 | clocks = <&usb_clk 8>; | 345 | clocks = <&usb_clk 8>; |
351 | clock-names = "usb_phy"; | 346 | clock-names = "usb_phy"; |
352 | resets = <&usb_clk 1>; | 347 | resets = <&usb_clk 0>, <&usb_clk 1>; |
353 | reset-names = "usb1_reset"; | 348 | reset-names = "usb0_reset", "usb1_reset"; |
354 | status = "disabled"; | 349 | status = "disabled"; |
355 | }; | 350 | }; |
356 | 351 | ||
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi index f47156b6572b..1e7e7bcf8307 100644 --- a/arch/arm/boot/dts/sun6i-a31.dtsi +++ b/arch/arm/boot/dts/sun6i-a31.dtsi | |||
@@ -53,12 +53,6 @@ | |||
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | 55 | aliases { |
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &uart5; | ||
62 | ethernet0 = &gmac; | 56 | ethernet0 = &gmac; |
63 | }; | 57 | }; |
64 | 58 | ||
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts index 1cf1214cc068..bd7b15add697 100644 --- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts +++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts | |||
@@ -55,6 +55,12 @@ | |||
55 | model = "LeMaker Banana Pi"; | 55 | model = "LeMaker Banana Pi"; |
56 | compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; | 56 | compatible = "lemaker,bananapi", "allwinner,sun7i-a20"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &uart0; | ||
60 | serial1 = &uart3; | ||
61 | serial2 = &uart7; | ||
62 | }; | ||
63 | |||
58 | soc@01c00000 { | 64 | soc@01c00000 { |
59 | spi0: spi@01c05000 { | 65 | spi0: spi@01c05000 { |
60 | pinctrl-names = "default"; | 66 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts index 0e4bfa3b2b85..0bcefcbbb756 100644 --- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts +++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts | |||
@@ -19,6 +19,14 @@ | |||
19 | model = "Merrii A20 Hummingbird"; | 19 | model = "Merrii A20 Hummingbird"; |
20 | compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; | 20 | compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20"; |
21 | 21 | ||
22 | aliases { | ||
23 | serial0 = &uart0; | ||
24 | serial1 = &uart2; | ||
25 | serial2 = &uart3; | ||
26 | serial3 = &uart4; | ||
27 | serial4 = &uart5; | ||
28 | }; | ||
29 | |||
22 | soc@01c00000 { | 30 | soc@01c00000 { |
23 | mmc0: mmc@01c0f000 { | 31 | mmc0: mmc@01c0f000 { |
24 | pinctrl-names = "default"; | 32 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts index 9d669cdf031d..66cc77707198 100644 --- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts +++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts | |||
@@ -20,6 +20,9 @@ | |||
20 | compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; | 20 | compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20"; |
21 | 21 | ||
22 | aliases { | 22 | aliases { |
23 | serial0 = &uart0; | ||
24 | serial1 = &uart6; | ||
25 | serial2 = &uart7; | ||
23 | spi0 = &spi1; | 26 | spi0 = &spi1; |
24 | spi1 = &spi2; | 27 | spi1 = &spi2; |
25 | }; | 28 | }; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index e21ce5992d56..89749ce34a84 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -54,14 +54,6 @@ | |||
54 | 54 | ||
55 | aliases { | 55 | aliases { |
56 | ethernet0 = &gmac; | 56 | ethernet0 = &gmac; |
57 | serial0 = &uart0; | ||
58 | serial1 = &uart1; | ||
59 | serial2 = &uart2; | ||
60 | serial3 = &uart3; | ||
61 | serial4 = &uart4; | ||
62 | serial5 = &uart5; | ||
63 | serial6 = &uart6; | ||
64 | serial7 = &uart7; | ||
65 | }; | 57 | }; |
66 | 58 | ||
67 | chosen { | 59 | chosen { |
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts index 7f2117ce6985..32ad80804dbb 100644 --- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts +++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts | |||
@@ -55,6 +55,10 @@ | |||
55 | model = "Ippo Q8H Dual Core Tablet (v5)"; | 55 | model = "Ippo Q8H Dual Core Tablet (v5)"; |
56 | compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; | 56 | compatible = "ippo,q8h-v5", "allwinner,sun8i-a23"; |
57 | 57 | ||
58 | aliases { | ||
59 | serial0 = &r_uart; | ||
60 | }; | ||
61 | |||
58 | chosen { | 62 | chosen { |
59 | bootargs = "earlyprintk console=ttyS0,115200"; | 63 | bootargs = "earlyprintk console=ttyS0,115200"; |
60 | }; | 64 | }; |
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi index 0746cd1024d7..86584fcf5e32 100644 --- a/arch/arm/boot/dts/sun8i-a23.dtsi +++ b/arch/arm/boot/dts/sun8i-a23.dtsi | |||
@@ -52,15 +52,6 @@ | |||
52 | / { | 52 | / { |
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | ||
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &r_uart; | ||
62 | }; | ||
63 | |||
64 | cpus { | 55 | cpus { |
65 | #address-cells = <1>; | 56 | #address-cells = <1>; |
66 | #size-cells = <0>; | 57 | #size-cells = <0>; |
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts index 506948f582ee..11ec71072e81 100644 --- a/arch/arm/boot/dts/sun9i-a80-optimus.dts +++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts | |||
@@ -54,6 +54,11 @@ | |||
54 | model = "Merrii A80 Optimus Board"; | 54 | model = "Merrii A80 Optimus Board"; |
55 | compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; | 55 | compatible = "merrii,a80-optimus", "allwinner,sun9i-a80"; |
56 | 56 | ||
57 | aliases { | ||
58 | serial0 = &uart0; | ||
59 | serial1 = &uart4; | ||
60 | }; | ||
61 | |||
57 | chosen { | 62 | chosen { |
58 | bootargs = "earlyprintk console=ttyS0,115200"; | 63 | bootargs = "earlyprintk console=ttyS0,115200"; |
59 | }; | 64 | }; |
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi index 494714f67b57..9ef4438206a9 100644 --- a/arch/arm/boot/dts/sun9i-a80.dtsi +++ b/arch/arm/boot/dts/sun9i-a80.dtsi | |||
@@ -52,16 +52,6 @@ | |||
52 | / { | 52 | / { |
53 | interrupt-parent = <&gic>; | 53 | interrupt-parent = <&gic>; |
54 | 54 | ||
55 | aliases { | ||
56 | serial0 = &uart0; | ||
57 | serial1 = &uart1; | ||
58 | serial2 = &uart2; | ||
59 | serial3 = &uart3; | ||
60 | serial4 = &uart4; | ||
61 | serial5 = &uart5; | ||
62 | serial6 = &r_uart; | ||
63 | }; | ||
64 | |||
65 | cpus { | 55 | cpus { |
66 | #address-cells = <1>; | 56 | #address-cells = <1>; |
67 | #size-cells = <0>; | 57 | #size-cells = <0>; |
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 66ce17655bb9..7b0152321b20 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | |||
38 | vcpu->arch.hcr = HCR_GUEST_MASK; | 38 | vcpu->arch.hcr = HCR_GUEST_MASK; |
39 | } | 39 | } |
40 | 40 | ||
41 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
42 | { | ||
43 | return vcpu->arch.hcr; | ||
44 | } | ||
45 | |||
46 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
47 | { | ||
48 | vcpu->arch.hcr = hcr; | ||
49 | } | ||
50 | |||
41 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) | 51 | static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) |
42 | { | 52 | { |
43 | return 1; | 53 | return 1; |
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 254e0650e48b..04b4ea0b550a 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h | |||
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch { | |||
125 | * Anything that is not used directly from assembly code goes | 125 | * Anything that is not used directly from assembly code goes |
126 | * here. | 126 | * here. |
127 | */ | 127 | */ |
128 | /* dcache set/way operation pending */ | ||
129 | int last_pcpu; | ||
130 | cpumask_t require_dcache_flush; | ||
131 | 128 | ||
132 | /* Don't run the guest on this vcpu */ | 129 | /* Don't run the guest on this vcpu */ |
133 | bool pause; | 130 | bool pause; |
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h index 63e0ecc04901..1bca8f8af442 100644 --- a/arch/arm/include/asm/kvm_mmu.h +++ b/arch/arm/include/asm/kvm_mmu.h | |||
@@ -44,6 +44,7 @@ | |||
44 | 44 | ||
45 | #ifndef __ASSEMBLY__ | 45 | #ifndef __ASSEMBLY__ |
46 | 46 | ||
47 | #include <linux/highmem.h> | ||
47 | #include <asm/cacheflush.h> | 48 | #include <asm/cacheflush.h> |
48 | #include <asm/pgalloc.h> | 49 | #include <asm/pgalloc.h> |
49 | 50 | ||
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
161 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; | 162 | return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101; |
162 | } | 163 | } |
163 | 164 | ||
164 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 165 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
165 | unsigned long size, | 166 | unsigned long size, |
166 | bool ipa_uncached) | 167 | bool ipa_uncached) |
167 | { | 168 | { |
168 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | ||
169 | kvm_flush_dcache_to_poc((void *)hva, size); | ||
170 | |||
171 | /* | 169 | /* |
172 | * If we are going to insert an instruction page and the icache is | 170 | * If we are going to insert an instruction page and the icache is |
173 | * either VIPT or PIPT, there is a potential problem where the host | 171 | * either VIPT or PIPT, there is a potential problem where the host |
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | |||
179 | * | 177 | * |
180 | * VIVT caches are tagged using both the ASID and the VMID and doesn't | 178 | * VIVT caches are tagged using both the ASID and the VMID and doesn't |
181 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). | 179 | * need any kind of flushing (DDI 0406C.b - Page B3-1392). |
180 | * | ||
181 | * We need to do this through a kernel mapping (using the | ||
182 | * user-space mapping has proved to be the wrong | ||
183 | * solution). For that, we need to kmap one page at a time, | ||
184 | * and iterate over the range. | ||
182 | */ | 185 | */ |
183 | if (icache_is_pipt()) { | 186 | |
184 | __cpuc_coherent_user_range(hva, hva + size); | 187 | bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached; |
185 | } else if (!icache_is_vivt_asid_tagged()) { | 188 | |
189 | VM_BUG_ON(size & PAGE_MASK); | ||
190 | |||
191 | if (!need_flush && !icache_is_pipt()) | ||
192 | goto vipt_cache; | ||
193 | |||
194 | while (size) { | ||
195 | void *va = kmap_atomic_pfn(pfn); | ||
196 | |||
197 | if (need_flush) | ||
198 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
199 | |||
200 | if (icache_is_pipt()) | ||
201 | __cpuc_coherent_user_range((unsigned long)va, | ||
202 | (unsigned long)va + PAGE_SIZE); | ||
203 | |||
204 | size -= PAGE_SIZE; | ||
205 | pfn++; | ||
206 | |||
207 | kunmap_atomic(va); | ||
208 | } | ||
209 | |||
210 | vipt_cache: | ||
211 | if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) { | ||
186 | /* any kind of VIPT cache */ | 212 | /* any kind of VIPT cache */ |
187 | __flush_icache_all(); | 213 | __flush_icache_all(); |
188 | } | 214 | } |
189 | } | 215 | } |
190 | 216 | ||
217 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
218 | { | ||
219 | void *va = kmap_atomic(pte_page(pte)); | ||
220 | |||
221 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
222 | |||
223 | kunmap_atomic(va); | ||
224 | } | ||
225 | |||
226 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
227 | { | ||
228 | unsigned long size = PMD_SIZE; | ||
229 | pfn_t pfn = pmd_pfn(pmd); | ||
230 | |||
231 | while (size) { | ||
232 | void *va = kmap_atomic_pfn(pfn); | ||
233 | |||
234 | kvm_flush_dcache_to_poc(va, PAGE_SIZE); | ||
235 | |||
236 | pfn++; | ||
237 | size -= PAGE_SIZE; | ||
238 | |||
239 | kunmap_atomic(va); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
244 | { | ||
245 | } | ||
246 | |||
191 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) | 247 | #define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x)) |
192 | 248 | ||
193 | void stage2_flush_vm(struct kvm *kvm); | 249 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
250 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
194 | 251 | ||
195 | #endif /* !__ASSEMBLY__ */ | 252 | #endif /* !__ASSEMBLY__ */ |
196 | 253 | ||
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 2d6d91001062..0b0d58a905c4 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
281 | vcpu->cpu = cpu; | 281 | vcpu->cpu = cpu; |
282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); | 282 | vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); |
283 | 283 | ||
284 | /* | ||
285 | * Check whether this vcpu requires the cache to be flushed on | ||
286 | * this physical CPU. This is a consequence of doing dcache | ||
287 | * operations by set/way on this vcpu. We do it here to be in | ||
288 | * a non-preemptible section. | ||
289 | */ | ||
290 | if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) | ||
291 | flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ | ||
292 | |||
293 | kvm_arm_set_running_vcpu(vcpu); | 284 | kvm_arm_set_running_vcpu(vcpu); |
294 | } | 285 | } |
295 | 286 | ||
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
541 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); | 532 | ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); |
542 | 533 | ||
543 | vcpu->mode = OUTSIDE_GUEST_MODE; | 534 | vcpu->mode = OUTSIDE_GUEST_MODE; |
544 | vcpu->arch.last_pcpu = smp_processor_id(); | ||
545 | kvm_guest_exit(); | 535 | kvm_guest_exit(); |
546 | trace_kvm_exit(*vcpu_pc(vcpu)); | 536 | trace_kvm_exit(*vcpu_pc(vcpu)); |
547 | /* | 537 | /* |
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c index 7928dbdf2102..f3d88dc388bc 100644 --- a/arch/arm/kvm/coproc.c +++ b/arch/arm/kvm/coproc.c | |||
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu, | |||
189 | return true; | 189 | return true; |
190 | } | 190 | } |
191 | 191 | ||
192 | /* See note at ARM ARM B1.14.4 */ | 192 | /* |
193 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
194 | */ | ||
193 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 195 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
194 | const struct coproc_params *p, | 196 | const struct coproc_params *p, |
195 | const struct coproc_reg *r) | 197 | const struct coproc_reg *r) |
196 | { | 198 | { |
197 | unsigned long val; | ||
198 | int cpu; | ||
199 | |||
200 | if (!p->is_write) | 199 | if (!p->is_write) |
201 | return read_from_write_only(vcpu, p); | 200 | return read_from_write_only(vcpu, p); |
202 | 201 | ||
203 | cpu = get_cpu(); | 202 | kvm_set_way_flush(vcpu); |
204 | |||
205 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
206 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
207 | |||
208 | /* If we were already preempted, take the long way around */ | ||
209 | if (cpu != vcpu->arch.last_pcpu) { | ||
210 | flush_cache_all(); | ||
211 | goto done; | ||
212 | } | ||
213 | |||
214 | val = *vcpu_reg(vcpu, p->Rt1); | ||
215 | |||
216 | switch (p->CRm) { | ||
217 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
218 | case 14: /* DCCISW */ | ||
219 | asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); | ||
220 | break; | ||
221 | |||
222 | case 10: /* DCCSW */ | ||
223 | asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); | ||
224 | break; | ||
225 | } | ||
226 | |||
227 | done: | ||
228 | put_cpu(); | ||
229 | |||
230 | return true; | 203 | return true; |
231 | } | 204 | } |
232 | 205 | ||
233 | /* | 206 | /* |
234 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 207 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
235 | * is set. | 208 | * is set. If the guest enables the MMU, we stop trapping the VM |
209 | * sys_regs and leave it in complete control of the caches. | ||
210 | * | ||
211 | * Used by the cpu-specific code. | ||
236 | */ | 212 | */ |
237 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 213 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
238 | const struct coproc_params *p, | 214 | const struct coproc_params *p, |
239 | const struct coproc_reg *r) | 215 | const struct coproc_reg *r) |
240 | { | 216 | { |
217 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
218 | |||
241 | BUG_ON(!p->is_write); | 219 | BUG_ON(!p->is_write); |
242 | 220 | ||
243 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); | 221 | vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1); |
244 | if (p->is_64bit) | 222 | if (p->is_64bit) |
245 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); | 223 | vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2); |
246 | 224 | ||
247 | return true; | 225 | kvm_toggle_cache(vcpu, was_enabled); |
248 | } | ||
249 | |||
250 | /* | ||
251 | * SCTLR accessor. Only called as long as HCR_TVM is set. If the | ||
252 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
253 | * it in complete control of the caches. | ||
254 | * | ||
255 | * Used by the cpu-specific code. | ||
256 | */ | ||
257 | bool access_sctlr(struct kvm_vcpu *vcpu, | ||
258 | const struct coproc_params *p, | ||
259 | const struct coproc_reg *r) | ||
260 | { | ||
261 | access_vm_reg(vcpu, p, r); | ||
262 | |||
263 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
264 | vcpu->arch.hcr &= ~HCR_TVM; | ||
265 | stage2_flush_vm(vcpu->kvm); | ||
266 | } | ||
267 | |||
268 | return true; | 226 | return true; |
269 | } | 227 | } |
270 | 228 | ||
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h index 1a44bbe39643..88d24a3a9778 100644 --- a/arch/arm/kvm/coproc.h +++ b/arch/arm/kvm/coproc.h | |||
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1, | |||
153 | #define is64 .is_64 = true | 153 | #define is64 .is_64 = true |
154 | #define is32 .is_64 = false | 154 | #define is32 .is_64 = false |
155 | 155 | ||
156 | bool access_sctlr(struct kvm_vcpu *vcpu, | 156 | bool access_vm_reg(struct kvm_vcpu *vcpu, |
157 | const struct coproc_params *p, | 157 | const struct coproc_params *p, |
158 | const struct coproc_reg *r); | 158 | const struct coproc_reg *r); |
159 | 159 | ||
160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ | 160 | #endif /* __ARM_KVM_COPROC_LOCAL_H__ */ |
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c index e6f4ae48bda9..a7136757d373 100644 --- a/arch/arm/kvm/coproc_a15.c +++ b/arch/arm/kvm/coproc_a15.c | |||
@@ -34,7 +34,7 @@ | |||
34 | static const struct coproc_reg a15_regs[] = { | 34 | static const struct coproc_reg a15_regs[] = { |
35 | /* SCTLR: swapped by interrupt.S. */ | 35 | /* SCTLR: swapped by interrupt.S. */ |
36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 36 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
37 | access_sctlr, reset_val, c1_SCTLR, 0x00C50078 }, | 37 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 }, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | static struct kvm_coproc_target_table a15_target_table = { | 40 | static struct kvm_coproc_target_table a15_target_table = { |
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c index 17fc7cd479d3..b19e46d1b2c0 100644 --- a/arch/arm/kvm/coproc_a7.c +++ b/arch/arm/kvm/coproc_a7.c | |||
@@ -37,7 +37,7 @@ | |||
37 | static const struct coproc_reg a7_regs[] = { | 37 | static const struct coproc_reg a7_regs[] = { |
38 | /* SCTLR: swapped by interrupt.S. */ | 38 | /* SCTLR: swapped by interrupt.S. */ |
39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, | 39 | { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, |
40 | access_sctlr, reset_val, c1_SCTLR, 0x00C50878 }, | 40 | access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 }, |
41 | }; | 41 | }; |
42 | 42 | ||
43 | static struct kvm_coproc_target_table a7_target_table = { | 43 | static struct kvm_coproc_target_table a7_target_table = { |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index 1dc9778a00af..136662547ca6 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) | |||
58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); | 58 | kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa); |
59 | } | 59 | } |
60 | 60 | ||
61 | /* | ||
62 | * D-Cache management functions. They take the page table entries by | ||
63 | * value, as they are flushing the cache using the kernel mapping (or | ||
64 | * kmap on 32bit). | ||
65 | */ | ||
66 | static void kvm_flush_dcache_pte(pte_t pte) | ||
67 | { | ||
68 | __kvm_flush_dcache_pte(pte); | ||
69 | } | ||
70 | |||
71 | static void kvm_flush_dcache_pmd(pmd_t pmd) | ||
72 | { | ||
73 | __kvm_flush_dcache_pmd(pmd); | ||
74 | } | ||
75 | |||
76 | static void kvm_flush_dcache_pud(pud_t pud) | ||
77 | { | ||
78 | __kvm_flush_dcache_pud(pud); | ||
79 | } | ||
80 | |||
61 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, | 81 | static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, |
62 | int min, int max) | 82 | int min, int max) |
63 | { | 83 | { |
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr) | |||
119 | put_page(virt_to_page(pmd)); | 139 | put_page(virt_to_page(pmd)); |
120 | } | 140 | } |
121 | 141 | ||
142 | /* | ||
143 | * Unmapping vs dcache management: | ||
144 | * | ||
145 | * If a guest maps certain memory pages as uncached, all writes will | ||
146 | * bypass the data cache and go directly to RAM. However, the CPUs | ||
147 | * can still speculate reads (not writes) and fill cache lines with | ||
148 | * data. | ||
149 | * | ||
150 | * Those cache lines will be *clean* cache lines though, so a | ||
151 | * clean+invalidate operation is equivalent to an invalidate | ||
152 | * operation, because no cache lines are marked dirty. | ||
153 | * | ||
154 | * Those clean cache lines could be filled prior to an uncached write | ||
155 | * by the guest, and the cache coherent IO subsystem would therefore | ||
156 | * end up writing old data to disk. | ||
157 | * | ||
158 | * This is why right after unmapping a page/section and invalidating | ||
159 | * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure | ||
160 | * the IO subsystem will never hit in the cache. | ||
161 | */ | ||
122 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | 162 | static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, |
123 | phys_addr_t addr, phys_addr_t end) | 163 | phys_addr_t addr, phys_addr_t end) |
124 | { | 164 | { |
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd, | |||
128 | start_pte = pte = pte_offset_kernel(pmd, addr); | 168 | start_pte = pte = pte_offset_kernel(pmd, addr); |
129 | do { | 169 | do { |
130 | if (!pte_none(*pte)) { | 170 | if (!pte_none(*pte)) { |
171 | pte_t old_pte = *pte; | ||
172 | |||
131 | kvm_set_pte(pte, __pte(0)); | 173 | kvm_set_pte(pte, __pte(0)); |
132 | put_page(virt_to_page(pte)); | ||
133 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 174 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
175 | |||
176 | /* No need to invalidate the cache for device mappings */ | ||
177 | if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) | ||
178 | kvm_flush_dcache_pte(old_pte); | ||
179 | |||
180 | put_page(virt_to_page(pte)); | ||
134 | } | 181 | } |
135 | } while (pte++, addr += PAGE_SIZE, addr != end); | 182 | } while (pte++, addr += PAGE_SIZE, addr != end); |
136 | 183 | ||
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud, | |||
149 | next = kvm_pmd_addr_end(addr, end); | 196 | next = kvm_pmd_addr_end(addr, end); |
150 | if (!pmd_none(*pmd)) { | 197 | if (!pmd_none(*pmd)) { |
151 | if (kvm_pmd_huge(*pmd)) { | 198 | if (kvm_pmd_huge(*pmd)) { |
199 | pmd_t old_pmd = *pmd; | ||
200 | |||
152 | pmd_clear(pmd); | 201 | pmd_clear(pmd); |
153 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 202 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
203 | |||
204 | kvm_flush_dcache_pmd(old_pmd); | ||
205 | |||
154 | put_page(virt_to_page(pmd)); | 206 | put_page(virt_to_page(pmd)); |
155 | } else { | 207 | } else { |
156 | unmap_ptes(kvm, pmd, addr, next); | 208 | unmap_ptes(kvm, pmd, addr, next); |
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd, | |||
173 | next = kvm_pud_addr_end(addr, end); | 225 | next = kvm_pud_addr_end(addr, end); |
174 | if (!pud_none(*pud)) { | 226 | if (!pud_none(*pud)) { |
175 | if (pud_huge(*pud)) { | 227 | if (pud_huge(*pud)) { |
228 | pud_t old_pud = *pud; | ||
229 | |||
176 | pud_clear(pud); | 230 | pud_clear(pud); |
177 | kvm_tlb_flush_vmid_ipa(kvm, addr); | 231 | kvm_tlb_flush_vmid_ipa(kvm, addr); |
232 | |||
233 | kvm_flush_dcache_pud(old_pud); | ||
234 | |||
178 | put_page(virt_to_page(pud)); | 235 | put_page(virt_to_page(pud)); |
179 | } else { | 236 | } else { |
180 | unmap_pmds(kvm, pud, addr, next); | 237 | unmap_pmds(kvm, pud, addr, next); |
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd, | |||
209 | 266 | ||
210 | pte = pte_offset_kernel(pmd, addr); | 267 | pte = pte_offset_kernel(pmd, addr); |
211 | do { | 268 | do { |
212 | if (!pte_none(*pte)) { | 269 | if (!pte_none(*pte) && |
213 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 270 | (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE) |
214 | kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE); | 271 | kvm_flush_dcache_pte(*pte); |
215 | } | ||
216 | } while (pte++, addr += PAGE_SIZE, addr != end); | 272 | } while (pte++, addr += PAGE_SIZE, addr != end); |
217 | } | 273 | } |
218 | 274 | ||
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud, | |||
226 | do { | 282 | do { |
227 | next = kvm_pmd_addr_end(addr, end); | 283 | next = kvm_pmd_addr_end(addr, end); |
228 | if (!pmd_none(*pmd)) { | 284 | if (!pmd_none(*pmd)) { |
229 | if (kvm_pmd_huge(*pmd)) { | 285 | if (kvm_pmd_huge(*pmd)) |
230 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 286 | kvm_flush_dcache_pmd(*pmd); |
231 | kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE); | 287 | else |
232 | } else { | ||
233 | stage2_flush_ptes(kvm, pmd, addr, next); | 288 | stage2_flush_ptes(kvm, pmd, addr, next); |
234 | } | ||
235 | } | 289 | } |
236 | } while (pmd++, addr = next, addr != end); | 290 | } while (pmd++, addr = next, addr != end); |
237 | } | 291 | } |
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd, | |||
246 | do { | 300 | do { |
247 | next = kvm_pud_addr_end(addr, end); | 301 | next = kvm_pud_addr_end(addr, end); |
248 | if (!pud_none(*pud)) { | 302 | if (!pud_none(*pud)) { |
249 | if (pud_huge(*pud)) { | 303 | if (pud_huge(*pud)) |
250 | hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT); | 304 | kvm_flush_dcache_pud(*pud); |
251 | kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE); | 305 | else |
252 | } else { | ||
253 | stage2_flush_pmds(kvm, pud, addr, next); | 306 | stage2_flush_pmds(kvm, pud, addr, next); |
254 | } | ||
255 | } | 307 | } |
256 | } while (pud++, addr = next, addr != end); | 308 | } while (pud++, addr = next, addr != end); |
257 | } | 309 | } |
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm, | |||
278 | * Go through the stage 2 page tables and invalidate any cache lines | 330 | * Go through the stage 2 page tables and invalidate any cache lines |
279 | * backing memory already mapped to the VM. | 331 | * backing memory already mapped to the VM. |
280 | */ | 332 | */ |
281 | void stage2_flush_vm(struct kvm *kvm) | 333 | static void stage2_flush_vm(struct kvm *kvm) |
282 | { | 334 | { |
283 | struct kvm_memslots *slots; | 335 | struct kvm_memslots *slots; |
284 | struct kvm_memory_slot *memslot; | 336 | struct kvm_memory_slot *memslot; |
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn) | |||
905 | return !pfn_valid(pfn); | 957 | return !pfn_valid(pfn); |
906 | } | 958 | } |
907 | 959 | ||
960 | static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, | ||
961 | unsigned long size, bool uncached) | ||
962 | { | ||
963 | __coherent_cache_guest_page(vcpu, pfn, size, uncached); | ||
964 | } | ||
965 | |||
908 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | 966 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
909 | struct kvm_memory_slot *memslot, unsigned long hva, | 967 | struct kvm_memory_slot *memslot, unsigned long hva, |
910 | unsigned long fault_status) | 968 | unsigned long fault_status) |
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
994 | kvm_set_s2pmd_writable(&new_pmd); | 1052 | kvm_set_s2pmd_writable(&new_pmd); |
995 | kvm_set_pfn_dirty(pfn); | 1053 | kvm_set_pfn_dirty(pfn); |
996 | } | 1054 | } |
997 | coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE, | 1055 | coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached); |
998 | fault_ipa_uncached); | ||
999 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); | 1056 | ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); |
1000 | } else { | 1057 | } else { |
1001 | pte_t new_pte = pfn_pte(pfn, mem_type); | 1058 | pte_t new_pte = pfn_pte(pfn, mem_type); |
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, | |||
1003 | kvm_set_s2pte_writable(&new_pte); | 1060 | kvm_set_s2pte_writable(&new_pte); |
1004 | kvm_set_pfn_dirty(pfn); | 1061 | kvm_set_pfn_dirty(pfn); |
1005 | } | 1062 | } |
1006 | coherent_cache_guest_page(vcpu, hva, PAGE_SIZE, | 1063 | coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached); |
1007 | fault_ipa_uncached); | ||
1008 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, | 1064 | ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, |
1009 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); | 1065 | pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); |
1010 | } | 1066 | } |
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
1411 | unmap_stage2_range(kvm, gpa, size); | 1467 | unmap_stage2_range(kvm, gpa, size); |
1412 | spin_unlock(&kvm->mmu_lock); | 1468 | spin_unlock(&kvm->mmu_lock); |
1413 | } | 1469 | } |
1470 | |||
1471 | /* | ||
1472 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). | ||
1473 | * | ||
1474 | * Main problems: | ||
1475 | * - S/W ops are local to a CPU (not broadcast) | ||
1476 | * - We have line migration behind our back (speculation) | ||
1477 | * - System caches don't support S/W at all (damn!) | ||
1478 | * | ||
1479 | * In the face of the above, the best we can do is to try and convert | ||
1480 | * S/W ops to VA ops. Because the guest is not allowed to infer the | ||
1481 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, | ||
1482 | * which is a rather good thing for us. | ||
1483 | * | ||
1484 | * Also, it is only used when turning caches on/off ("The expected | ||
1485 | * usage of the cache maintenance instructions that operate by set/way | ||
1486 | * is associated with the cache maintenance instructions associated | ||
1487 | * with the powerdown and powerup of caches, if this is required by | ||
1488 | * the implementation."). | ||
1489 | * | ||
1490 | * We use the following policy: | ||
1491 | * | ||
1492 | * - If we trap a S/W operation, we enable VM trapping to detect | ||
1493 | * caches being turned on/off, and do a full clean. | ||
1494 | * | ||
1495 | * - We flush the caches on both caches being turned on and off. | ||
1496 | * | ||
1497 | * - Once the caches are enabled, we stop trapping VM ops. | ||
1498 | */ | ||
1499 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) | ||
1500 | { | ||
1501 | unsigned long hcr = vcpu_get_hcr(vcpu); | ||
1502 | |||
1503 | /* | ||
1504 | * If this is the first time we do a S/W operation | ||
1505 | * (i.e. HCR_TVM not set) flush the whole memory, and set the | ||
1506 | * VM trapping. | ||
1507 | * | ||
1508 | * Otherwise, rely on the VM trapping to wait for the MMU + | ||
1509 | * Caches to be turned off. At that point, we'll be able to | ||
1510 | * clean the caches again. | ||
1511 | */ | ||
1512 | if (!(hcr & HCR_TVM)) { | ||
1513 | trace_kvm_set_way_flush(*vcpu_pc(vcpu), | ||
1514 | vcpu_has_cache_enabled(vcpu)); | ||
1515 | stage2_flush_vm(vcpu->kvm); | ||
1516 | vcpu_set_hcr(vcpu, hcr | HCR_TVM); | ||
1517 | } | ||
1518 | } | ||
1519 | |||
1520 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) | ||
1521 | { | ||
1522 | bool now_enabled = vcpu_has_cache_enabled(vcpu); | ||
1523 | |||
1524 | /* | ||
1525 | * If switching the MMU+caches on, need to invalidate the caches. | ||
1526 | * If switching it off, need to clean the caches. | ||
1527 | * Clean + invalidate does the trick always. | ||
1528 | */ | ||
1529 | if (now_enabled != was_enabled) | ||
1530 | stage2_flush_vm(vcpu->kvm); | ||
1531 | |||
1532 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ | ||
1533 | if (now_enabled) | ||
1534 | vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM); | ||
1535 | |||
1536 | trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled); | ||
1537 | } | ||
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h index b1d640f78623..b6a6e7102201 100644 --- a/arch/arm/kvm/trace.h +++ b/arch/arm/kvm/trace.h | |||
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc, | |||
223 | __entry->vcpu_pc, __entry->r0, __entry->imm) | 223 | __entry->vcpu_pc, __entry->r0, __entry->imm) |
224 | ); | 224 | ); |
225 | 225 | ||
226 | TRACE_EVENT(kvm_set_way_flush, | ||
227 | TP_PROTO(unsigned long vcpu_pc, bool cache), | ||
228 | TP_ARGS(vcpu_pc, cache), | ||
229 | |||
230 | TP_STRUCT__entry( | ||
231 | __field( unsigned long, vcpu_pc ) | ||
232 | __field( bool, cache ) | ||
233 | ), | ||
234 | |||
235 | TP_fast_assign( | ||
236 | __entry->vcpu_pc = vcpu_pc; | ||
237 | __entry->cache = cache; | ||
238 | ), | ||
239 | |||
240 | TP_printk("S/W flush at 0x%016lx (cache %s)", | ||
241 | __entry->vcpu_pc, __entry->cache ? "on" : "off") | ||
242 | ); | ||
243 | |||
244 | TRACE_EVENT(kvm_toggle_cache, | ||
245 | TP_PROTO(unsigned long vcpu_pc, bool was, bool now), | ||
246 | TP_ARGS(vcpu_pc, was, now), | ||
247 | |||
248 | TP_STRUCT__entry( | ||
249 | __field( unsigned long, vcpu_pc ) | ||
250 | __field( bool, was ) | ||
251 | __field( bool, now ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->vcpu_pc = vcpu_pc; | ||
256 | __entry->was = was; | ||
257 | __entry->now = now; | ||
258 | ), | ||
259 | |||
260 | TP_printk("VM op at 0x%016lx (cache was %s, now %s)", | ||
261 | __entry->vcpu_pc, __entry->was ? "on" : "off", | ||
262 | __entry->now ? "on" : "off") | ||
263 | ); | ||
264 | |||
226 | #endif /* _TRACE_KVM_H */ | 265 | #endif /* _TRACE_KVM_H */ |
227 | 266 | ||
228 | #undef TRACE_INCLUDE_PATH | 267 | #undef TRACE_INCLUDE_PATH |
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index caa21e9b8cd9..ccef8806bb58 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c | |||
@@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np) | |||
190 | arch_ioremap_caller = armada_pcie_wa_ioremap_caller; | 190 | arch_ioremap_caller = armada_pcie_wa_ioremap_caller; |
191 | 191 | ||
192 | /* | 192 | /* |
193 | * We should switch the PL310 to I/O coherency mode only if | ||
194 | * I/O coherency is actually enabled. | ||
195 | */ | ||
196 | if (!coherency_available()) | ||
197 | return; | ||
198 | |||
199 | /* | ||
193 | * Add the PL310 property "arm,io-coherent". This makes sure the | 200 | * Add the PL310 property "arm,io-coherent". This makes sure the |
194 | * outer sync operation is not used, which allows to | 201 | * outer sync operation is not used, which allows to |
195 | * workaround the system erratum that causes deadlocks when | 202 | * workaround the system erratum that causes deadlocks when |
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c index 66f67816a844..444f22d370f0 100644 --- a/arch/arm/mach-shmobile/board-ape6evm.c +++ b/arch/arm/mach-shmobile/board-ape6evm.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/gpio_keys.h> | 18 | #include <linux/gpio_keys.h> |
19 | #include <linux/input.h> | 19 | #include <linux/input.h> |
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/irqchip.h> | ||
22 | #include <linux/irqchip/arm-gic.h> | ||
21 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
22 | #include <linux/mfd/tmio.h> | 24 | #include <linux/mfd/tmio.h> |
23 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void) | |||
273 | sizeof(ape6evm_leds_pdata)); | 275 | sizeof(ape6evm_leds_pdata)); |
274 | } | 276 | } |
275 | 277 | ||
278 | static void __init ape6evm_legacy_init_time(void) | ||
279 | { | ||
280 | /* Do not invoke DT-based timers via clocksource_of_init() */ | ||
281 | } | ||
282 | |||
283 | static void __init ape6evm_legacy_init_irq(void) | ||
284 | { | ||
285 | void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); | ||
286 | void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); | ||
287 | |||
288 | gic_init(0, 29, gic_dist_base, gic_cpu_base); | ||
289 | |||
290 | /* Do not invoke DT-based interrupt code via irqchip_init() */ | ||
291 | } | ||
292 | |||
293 | |||
276 | static const char *ape6evm_boards_compat_dt[] __initdata = { | 294 | static const char *ape6evm_boards_compat_dt[] __initdata = { |
277 | "renesas,ape6evm", | 295 | "renesas,ape6evm", |
278 | NULL, | 296 | NULL, |
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = { | |||
280 | 298 | ||
281 | DT_MACHINE_START(APE6EVM_DT, "ape6evm") | 299 | DT_MACHINE_START(APE6EVM_DT, "ape6evm") |
282 | .init_early = shmobile_init_delay, | 300 | .init_early = shmobile_init_delay, |
301 | .init_irq = ape6evm_legacy_init_irq, | ||
283 | .init_machine = ape6evm_add_standard_devices, | 302 | .init_machine = ape6evm_add_standard_devices, |
284 | .init_late = shmobile_init_late, | 303 | .init_late = shmobile_init_late, |
285 | .dt_compat = ape6evm_boards_compat_dt, | 304 | .dt_compat = ape6evm_boards_compat_dt, |
305 | .init_time = ape6evm_legacy_init_time, | ||
286 | MACHINE_END | 306 | MACHINE_END |
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c index f8197eb6e566..65b128dd4072 100644 --- a/arch/arm/mach-shmobile/board-lager.c +++ b/arch/arm/mach-shmobile/board-lager.c | |||
@@ -21,6 +21,8 @@ | |||
21 | #include <linux/input.h> | 21 | #include <linux/input.h> |
22 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/irqchip.h> | ||
25 | #include <linux/irqchip/arm-gic.h> | ||
24 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
25 | #include <linux/leds.h> | 27 | #include <linux/leds.h> |
26 | #include <linux/mfd/tmio.h> | 28 | #include <linux/mfd/tmio.h> |
@@ -811,6 +813,16 @@ static void __init lager_init(void) | |||
811 | lager_ksz8041_fixup); | 813 | lager_ksz8041_fixup); |
812 | } | 814 | } |
813 | 815 | ||
816 | static void __init lager_legacy_init_irq(void) | ||
817 | { | ||
818 | void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000); | ||
819 | void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000); | ||
820 | |||
821 | gic_init(0, 29, gic_dist_base, gic_cpu_base); | ||
822 | |||
823 | /* Do not invoke DT-based interrupt code via irqchip_init() */ | ||
824 | } | ||
825 | |||
814 | static const char * const lager_boards_compat_dt[] __initconst = { | 826 | static const char * const lager_boards_compat_dt[] __initconst = { |
815 | "renesas,lager", | 827 | "renesas,lager", |
816 | NULL, | 828 | NULL, |
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = { | |||
819 | DT_MACHINE_START(LAGER_DT, "lager") | 831 | DT_MACHINE_START(LAGER_DT, "lager") |
820 | .smp = smp_ops(r8a7790_smp_ops), | 832 | .smp = smp_ops(r8a7790_smp_ops), |
821 | .init_early = shmobile_init_delay, | 833 | .init_early = shmobile_init_delay, |
834 | .init_irq = lager_legacy_init_irq, | ||
822 | .init_time = rcar_gen2_timer_init, | 835 | .init_time = rcar_gen2_timer_init, |
823 | .init_machine = lager_init, | 836 | .init_machine = lager_init, |
824 | .init_late = shmobile_init_late, | 837 | .init_late = shmobile_init_late, |
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c index 3dd6edd9bd1d..cc9470dfb1ce 100644 --- a/arch/arm/mach-shmobile/setup-rcar-gen2.c +++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c | |||
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void) | |||
133 | #ifdef CONFIG_COMMON_CLK | 133 | #ifdef CONFIG_COMMON_CLK |
134 | rcar_gen2_clocks_init(mode); | 134 | rcar_gen2_clocks_init(mode); |
135 | #endif | 135 | #endif |
136 | #ifdef CONFIG_ARCH_SHMOBILE_MULTI | ||
136 | clocksource_of_init(); | 137 | clocksource_of_init(); |
138 | #endif | ||
137 | } | 139 | } |
138 | 140 | ||
139 | struct memory_reserve_config { | 141 | struct memory_reserve_config { |
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c index f1d027aa7a81..0edf2a6d2bbe 100644 --- a/arch/arm/mach-shmobile/timer.c +++ b/arch/arm/mach-shmobile/timer.c | |||
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void) | |||
70 | if (!max_freq) | 70 | if (!max_freq) |
71 | return; | 71 | return; |
72 | 72 | ||
73 | #ifdef CONFIG_ARCH_SHMOBILE_LEGACY | ||
74 | /* Non-multiplatform r8a73a4 SoC cannot use arch timer due | ||
75 | * to GIC being initialized from C and arch timer via DT */ | ||
76 | if (of_machine_is_compatible("renesas,r8a73a4")) | ||
77 | has_arch_timer = false; | ||
78 | |||
79 | /* Non-multiplatform r8a7790 SoC cannot use arch timer due | ||
80 | * to GIC being initialized from C and arch timer via DT */ | ||
81 | if (of_machine_is_compatible("renesas,r8a7790")) | ||
82 | has_arch_timer = false; | ||
83 | #endif | ||
84 | |||
73 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { | 85 | if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) { |
74 | if (is_a7_a8_a9) | 86 | if (is_a7_a8_a9) |
75 | shmobile_setup_delay_hz(max_freq, 1, 3); | 87 | shmobile_setup_delay_hz(max_freq, 1, 3); |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7864797609b3..a673c7f7e208 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) | |||
1940 | } | 1940 | } |
1941 | EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); | 1941 | EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); |
1942 | 1942 | ||
1943 | static int __arm_iommu_attach_device(struct device *dev, | ||
1944 | struct dma_iommu_mapping *mapping) | ||
1945 | { | ||
1946 | int err; | ||
1947 | |||
1948 | err = iommu_attach_device(mapping->domain, dev); | ||
1949 | if (err) | ||
1950 | return err; | ||
1951 | |||
1952 | kref_get(&mapping->kref); | ||
1953 | dev->archdata.mapping = mapping; | ||
1954 | |||
1955 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | ||
1956 | return 0; | ||
1957 | } | ||
1958 | |||
1943 | /** | 1959 | /** |
1944 | * arm_iommu_attach_device | 1960 | * arm_iommu_attach_device |
1945 | * @dev: valid struct device pointer | 1961 | * @dev: valid struct device pointer |
1946 | * @mapping: io address space mapping structure (returned from | 1962 | * @mapping: io address space mapping structure (returned from |
1947 | * arm_iommu_create_mapping) | 1963 | * arm_iommu_create_mapping) |
1948 | * | 1964 | * |
1949 | * Attaches specified io address space mapping to the provided device, | 1965 | * Attaches specified io address space mapping to the provided device. |
1966 | * This replaces the dma operations (dma_map_ops pointer) with the | ||
1967 | * IOMMU aware version. | ||
1968 | * | ||
1950 | * More than one client might be attached to the same io address space | 1969 | * More than one client might be attached to the same io address space |
1951 | * mapping. | 1970 | * mapping. |
1952 | */ | 1971 | */ |
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev, | |||
1955 | { | 1974 | { |
1956 | int err; | 1975 | int err; |
1957 | 1976 | ||
1958 | err = iommu_attach_device(mapping->domain, dev); | 1977 | err = __arm_iommu_attach_device(dev, mapping); |
1959 | if (err) | 1978 | if (err) |
1960 | return err; | 1979 | return err; |
1961 | 1980 | ||
1962 | kref_get(&mapping->kref); | 1981 | set_dma_ops(dev, &iommu_ops); |
1963 | dev->archdata.mapping = mapping; | ||
1964 | |||
1965 | pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); | ||
1966 | return 0; | 1982 | return 0; |
1967 | } | 1983 | } |
1968 | EXPORT_SYMBOL_GPL(arm_iommu_attach_device); | 1984 | EXPORT_SYMBOL_GPL(arm_iommu_attach_device); |
1969 | 1985 | ||
1970 | /** | 1986 | static void __arm_iommu_detach_device(struct device *dev) |
1971 | * arm_iommu_detach_device | ||
1972 | * @dev: valid struct device pointer | ||
1973 | * | ||
1974 | * Detaches the provided device from a previously attached map. | ||
1975 | */ | ||
1976 | void arm_iommu_detach_device(struct device *dev) | ||
1977 | { | 1987 | { |
1978 | struct dma_iommu_mapping *mapping; | 1988 | struct dma_iommu_mapping *mapping; |
1979 | 1989 | ||
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev) | |||
1989 | 1999 | ||
1990 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); | 2000 | pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); |
1991 | } | 2001 | } |
2002 | |||
2003 | /** | ||
2004 | * arm_iommu_detach_device | ||
2005 | * @dev: valid struct device pointer | ||
2006 | * | ||
2007 | * Detaches the provided device from a previously attached map. | ||
2008 | * This voids the dma operations (dma_map_ops pointer) | ||
2009 | */ | ||
2010 | void arm_iommu_detach_device(struct device *dev) | ||
2011 | { | ||
2012 | __arm_iommu_detach_device(dev); | ||
2013 | set_dma_ops(dev, NULL); | ||
2014 | } | ||
1992 | EXPORT_SYMBOL_GPL(arm_iommu_detach_device); | 2015 | EXPORT_SYMBOL_GPL(arm_iommu_detach_device); |
1993 | 2016 | ||
1994 | static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) | 2017 | static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent) |
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size, | |||
2011 | return false; | 2034 | return false; |
2012 | } | 2035 | } |
2013 | 2036 | ||
2014 | if (arm_iommu_attach_device(dev, mapping)) { | 2037 | if (__arm_iommu_attach_device(dev, mapping)) { |
2015 | pr_warn("Failed to attached device %s to IOMMU_mapping\n", | 2038 | pr_warn("Failed to attached device %s to IOMMU_mapping\n", |
2016 | dev_name(dev)); | 2039 | dev_name(dev)); |
2017 | arm_iommu_release_mapping(mapping); | 2040 | arm_iommu_release_mapping(mapping); |
@@ -2025,7 +2048,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) | |||
2025 | { | 2048 | { |
2026 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; | 2049 | struct dma_iommu_mapping *mapping = dev->archdata.mapping; |
2027 | 2050 | ||
2028 | arm_iommu_detach_device(dev); | 2051 | __arm_iommu_detach_device(dev); |
2029 | arm_iommu_release_mapping(mapping); | 2052 | arm_iommu_release_mapping(mapping); |
2030 | } | 2053 | } |
2031 | 2054 | ||
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 865a7e28ea2d..3cb4c856b10d 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -45,6 +45,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | |||
45 | vcpu->arch.hcr_el2 &= ~HCR_RW; | 45 | vcpu->arch.hcr_el2 &= ~HCR_RW; |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | return vcpu->arch.hcr_el2; | ||
51 | } | ||
52 | |||
53 | static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr) | ||
54 | { | ||
55 | vcpu->arch.hcr_el2 = hcr; | ||
56 | } | ||
57 | |||
48 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) | 58 | static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
49 | { | 59 | { |
50 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; | 60 | return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; |
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 0b7dfdb931df..acd101a9014d 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h | |||
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch { | |||
116 | * Anything that is not used directly from assembly code goes | 116 | * Anything that is not used directly from assembly code goes |
117 | * here. | 117 | * here. |
118 | */ | 118 | */ |
119 | /* dcache set/way operation pending */ | ||
120 | int last_pcpu; | ||
121 | cpumask_t require_dcache_flush; | ||
122 | 119 | ||
123 | /* Don't run the guest */ | 120 | /* Don't run the guest */ |
124 | bool pause; | 121 | bool pause; |
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 14a74f136272..adcf49547301 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu) | |||
243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; | 243 | return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101; |
244 | } | 244 | } |
245 | 245 | ||
246 | static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva, | 246 | static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn, |
247 | unsigned long size, | 247 | unsigned long size, |
248 | bool ipa_uncached) | 248 | bool ipa_uncached) |
249 | { | 249 | { |
250 | void *va = page_address(pfn_to_page(pfn)); | ||
251 | |||
250 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) | 252 | if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached) |
251 | kvm_flush_dcache_to_poc((void *)hva, size); | 253 | kvm_flush_dcache_to_poc(va, size); |
252 | 254 | ||
253 | if (!icache_is_aliasing()) { /* PIPT */ | 255 | if (!icache_is_aliasing()) { /* PIPT */ |
254 | flush_icache_range(hva, hva + size); | 256 | flush_icache_range((unsigned long)va, |
257 | (unsigned long)va + size); | ||
255 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ | 258 | } else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */ |
256 | /* any kind of VIPT cache */ | 259 | /* any kind of VIPT cache */ |
257 | __flush_icache_all(); | 260 | __flush_icache_all(); |
258 | } | 261 | } |
259 | } | 262 | } |
260 | 263 | ||
264 | static inline void __kvm_flush_dcache_pte(pte_t pte) | ||
265 | { | ||
266 | struct page *page = pte_page(pte); | ||
267 | kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE); | ||
268 | } | ||
269 | |||
270 | static inline void __kvm_flush_dcache_pmd(pmd_t pmd) | ||
271 | { | ||
272 | struct page *page = pmd_page(pmd); | ||
273 | kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE); | ||
274 | } | ||
275 | |||
276 | static inline void __kvm_flush_dcache_pud(pud_t pud) | ||
277 | { | ||
278 | struct page *page = pud_page(pud); | ||
279 | kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE); | ||
280 | } | ||
281 | |||
261 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) | 282 | #define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x)) |
262 | 283 | ||
263 | void stage2_flush_vm(struct kvm *kvm); | 284 | void kvm_set_way_flush(struct kvm_vcpu *vcpu); |
285 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled); | ||
264 | 286 | ||
265 | #endif /* __ASSEMBLY__ */ | 287 | #endif /* __ASSEMBLY__ */ |
266 | #endif /* __ARM64_KVM_MMU_H__ */ | 288 | #endif /* __ARM64_KVM_MMU_H__ */ |
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 3d7c2df89946..f31e8bb2bc5b 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c | |||
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr) | |||
69 | return ccsidr; | 69 | return ccsidr; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void do_dc_cisw(u32 val) | 72 | /* |
73 | { | 73 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
74 | asm volatile("dc cisw, %x0" : : "r" (val)); | 74 | */ |
75 | dsb(ish); | ||
76 | } | ||
77 | |||
78 | static void do_dc_csw(u32 val) | ||
79 | { | ||
80 | asm volatile("dc csw, %x0" : : "r" (val)); | ||
81 | dsb(ish); | ||
82 | } | ||
83 | |||
84 | /* See note at ARM ARM B1.14.4 */ | ||
85 | static bool access_dcsw(struct kvm_vcpu *vcpu, | 75 | static bool access_dcsw(struct kvm_vcpu *vcpu, |
86 | const struct sys_reg_params *p, | 76 | const struct sys_reg_params *p, |
87 | const struct sys_reg_desc *r) | 77 | const struct sys_reg_desc *r) |
88 | { | 78 | { |
89 | unsigned long val; | ||
90 | int cpu; | ||
91 | |||
92 | if (!p->is_write) | 79 | if (!p->is_write) |
93 | return read_from_write_only(vcpu, p); | 80 | return read_from_write_only(vcpu, p); |
94 | 81 | ||
95 | cpu = get_cpu(); | 82 | kvm_set_way_flush(vcpu); |
96 | |||
97 | cpumask_setall(&vcpu->arch.require_dcache_flush); | ||
98 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | ||
99 | |||
100 | /* If we were already preempted, take the long way around */ | ||
101 | if (cpu != vcpu->arch.last_pcpu) { | ||
102 | flush_cache_all(); | ||
103 | goto done; | ||
104 | } | ||
105 | |||
106 | val = *vcpu_reg(vcpu, p->Rt); | ||
107 | |||
108 | switch (p->CRm) { | ||
109 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | ||
110 | case 14: /* DCCISW */ | ||
111 | do_dc_cisw(val); | ||
112 | break; | ||
113 | |||
114 | case 10: /* DCCSW */ | ||
115 | do_dc_csw(val); | ||
116 | break; | ||
117 | } | ||
118 | |||
119 | done: | ||
120 | put_cpu(); | ||
121 | |||
122 | return true; | 83 | return true; |
123 | } | 84 | } |
124 | 85 | ||
125 | /* | 86 | /* |
126 | * Generic accessor for VM registers. Only called as long as HCR_TVM | 87 | * Generic accessor for VM registers. Only called as long as HCR_TVM |
127 | * is set. | 88 | * is set. If the guest enables the MMU, we stop trapping the VM |
89 | * sys_regs and leave it in complete control of the caches. | ||
128 | */ | 90 | */ |
129 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | 91 | static bool access_vm_reg(struct kvm_vcpu *vcpu, |
130 | const struct sys_reg_params *p, | 92 | const struct sys_reg_params *p, |
131 | const struct sys_reg_desc *r) | 93 | const struct sys_reg_desc *r) |
132 | { | 94 | { |
133 | unsigned long val; | 95 | unsigned long val; |
96 | bool was_enabled = vcpu_has_cache_enabled(vcpu); | ||
134 | 97 | ||
135 | BUG_ON(!p->is_write); | 98 | BUG_ON(!p->is_write); |
136 | 99 | ||
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu, | |||
143 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; | 106 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; |
144 | } | 107 | } |
145 | 108 | ||
146 | return true; | 109 | kvm_toggle_cache(vcpu, was_enabled); |
147 | } | ||
148 | |||
149 | /* | ||
150 | * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the | ||
151 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | ||
152 | * it in complete control of the caches. | ||
153 | */ | ||
154 | static bool access_sctlr(struct kvm_vcpu *vcpu, | ||
155 | const struct sys_reg_params *p, | ||
156 | const struct sys_reg_desc *r) | ||
157 | { | ||
158 | access_vm_reg(vcpu, p, r); | ||
159 | |||
160 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ | ||
161 | vcpu->arch.hcr_el2 &= ~HCR_TVM; | ||
162 | stage2_flush_vm(vcpu->kvm); | ||
163 | } | ||
164 | |||
165 | return true; | 110 | return true; |
166 | } | 111 | } |
167 | 112 | ||
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = { | |||
377 | NULL, reset_mpidr, MPIDR_EL1 }, | 322 | NULL, reset_mpidr, MPIDR_EL1 }, |
378 | /* SCTLR_EL1 */ | 323 | /* SCTLR_EL1 */ |
379 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | 324 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), |
380 | access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, | 325 | access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 }, |
381 | /* CPACR_EL1 */ | 326 | /* CPACR_EL1 */ |
382 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | 327 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), |
383 | NULL, reset_val, CPACR_EL1, 0 }, | 328 | NULL, reset_val, CPACR_EL1, 0 }, |
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = { | |||
657 | * register). | 602 | * register). |
658 | */ | 603 | */ |
659 | static const struct sys_reg_desc cp15_regs[] = { | 604 | static const struct sys_reg_desc cp15_regs[] = { |
660 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, | 605 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR }, |
661 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | 606 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
662 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, | 607 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, |
663 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, | 608 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, |
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c index 0eca93327195..d223a8b57c1e 100644 --- a/arch/avr32/mm/fault.c +++ b/arch/avr32/mm/fault.c | |||
@@ -142,6 +142,8 @@ good_area: | |||
142 | if (unlikely(fault & VM_FAULT_ERROR)) { | 142 | if (unlikely(fault & VM_FAULT_ERROR)) { |
143 | if (fault & VM_FAULT_OOM) | 143 | if (fault & VM_FAULT_OOM) |
144 | goto out_of_memory; | 144 | goto out_of_memory; |
145 | else if (fault & VM_FAULT_SIGSEGV) | ||
146 | goto bad_area; | ||
145 | else if (fault & VM_FAULT_SIGBUS) | 147 | else if (fault & VM_FAULT_SIGBUS) |
146 | goto do_sigbus; | 148 | goto do_sigbus; |
147 | BUG(); | 149 | BUG(); |
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c index 1790f22e71a2..2686a7aa8ec8 100644 --- a/arch/cris/mm/fault.c +++ b/arch/cris/mm/fault.c | |||
@@ -176,6 +176,8 @@ retry: | |||
176 | if (unlikely(fault & VM_FAULT_ERROR)) { | 176 | if (unlikely(fault & VM_FAULT_ERROR)) { |
177 | if (fault & VM_FAULT_OOM) | 177 | if (fault & VM_FAULT_OOM) |
178 | goto out_of_memory; | 178 | goto out_of_memory; |
179 | else if (fault & VM_FAULT_SIGSEGV) | ||
180 | goto bad_area; | ||
179 | else if (fault & VM_FAULT_SIGBUS) | 181 | else if (fault & VM_FAULT_SIGBUS) |
180 | goto do_sigbus; | 182 | goto do_sigbus; |
181 | BUG(); | 183 | BUG(); |
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 9a66372fc7c7..ec4917ddf678 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c | |||
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear | |||
168 | if (unlikely(fault & VM_FAULT_ERROR)) { | 168 | if (unlikely(fault & VM_FAULT_ERROR)) { |
169 | if (fault & VM_FAULT_OOM) | 169 | if (fault & VM_FAULT_OOM) |
170 | goto out_of_memory; | 170 | goto out_of_memory; |
171 | else if (fault & VM_FAULT_SIGSEGV) | ||
172 | goto bad_area; | ||
171 | else if (fault & VM_FAULT_SIGBUS) | 173 | else if (fault & VM_FAULT_SIGBUS) |
172 | goto do_sigbus; | 174 | goto do_sigbus; |
173 | BUG(); | 175 | BUG(); |
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index 7225dad87094..ba5ba7accd0d 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c | |||
@@ -172,6 +172,8 @@ retry: | |||
172 | */ | 172 | */ |
173 | if (fault & VM_FAULT_OOM) { | 173 | if (fault & VM_FAULT_OOM) { |
174 | goto out_of_memory; | 174 | goto out_of_memory; |
175 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
176 | goto bad_area; | ||
175 | } else if (fault & VM_FAULT_SIGBUS) { | 177 | } else if (fault & VM_FAULT_SIGBUS) { |
176 | signal = SIGBUS; | 178 | signal = SIGBUS; |
177 | goto bad_area; | 179 | goto bad_area; |
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index e9c6a8014bd6..e3d4d4890104 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c | |||
@@ -200,6 +200,8 @@ good_area: | |||
200 | if (unlikely(fault & VM_FAULT_ERROR)) { | 200 | if (unlikely(fault & VM_FAULT_ERROR)) { |
201 | if (fault & VM_FAULT_OOM) | 201 | if (fault & VM_FAULT_OOM) |
202 | goto out_of_memory; | 202 | goto out_of_memory; |
203 | else if (fault & VM_FAULT_SIGSEGV) | ||
204 | goto bad_area; | ||
203 | else if (fault & VM_FAULT_SIGBUS) | 205 | else if (fault & VM_FAULT_SIGBUS) |
204 | goto do_sigbus; | 206 | goto do_sigbus; |
205 | BUG(); | 207 | BUG(); |
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index 2bd7487440c4..b2f04aee46ec 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c | |||
@@ -145,6 +145,8 @@ good_area: | |||
145 | if (unlikely(fault & VM_FAULT_ERROR)) { | 145 | if (unlikely(fault & VM_FAULT_ERROR)) { |
146 | if (fault & VM_FAULT_OOM) | 146 | if (fault & VM_FAULT_OOM) |
147 | goto out_of_memory; | 147 | goto out_of_memory; |
148 | else if (fault & VM_FAULT_SIGSEGV) | ||
149 | goto map_err; | ||
148 | else if (fault & VM_FAULT_SIGBUS) | 150 | else if (fault & VM_FAULT_SIGBUS) |
149 | goto bus_err; | 151 | goto bus_err; |
150 | BUG(); | 152 | BUG(); |
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c index 332680e5ebf2..2de5dc695a87 100644 --- a/arch/metag/mm/fault.c +++ b/arch/metag/mm/fault.c | |||
@@ -141,6 +141,8 @@ good_area: | |||
141 | if (unlikely(fault & VM_FAULT_ERROR)) { | 141 | if (unlikely(fault & VM_FAULT_ERROR)) { |
142 | if (fault & VM_FAULT_OOM) | 142 | if (fault & VM_FAULT_OOM) |
143 | goto out_of_memory; | 143 | goto out_of_memory; |
144 | else if (fault & VM_FAULT_SIGSEGV) | ||
145 | goto bad_area; | ||
144 | else if (fault & VM_FAULT_SIGBUS) | 146 | else if (fault & VM_FAULT_SIGBUS) |
145 | goto do_sigbus; | 147 | goto do_sigbus; |
146 | BUG(); | 148 | BUG(); |
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c index fa4cf52aa7a6..d46a5ebb7570 100644 --- a/arch/microblaze/mm/fault.c +++ b/arch/microblaze/mm/fault.c | |||
@@ -224,6 +224,8 @@ good_area: | |||
224 | if (unlikely(fault & VM_FAULT_ERROR)) { | 224 | if (unlikely(fault & VM_FAULT_ERROR)) { |
225 | if (fault & VM_FAULT_OOM) | 225 | if (fault & VM_FAULT_OOM) |
226 | goto out_of_memory; | 226 | goto out_of_memory; |
227 | else if (fault & VM_FAULT_SIGSEGV) | ||
228 | goto bad_area; | ||
227 | else if (fault & VM_FAULT_SIGBUS) | 229 | else if (fault & VM_FAULT_SIGBUS) |
228 | goto do_sigbus; | 230 | goto do_sigbus; |
229 | BUG(); | 231 | BUG(); |
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c index becc42bb1849..70ab5d664332 100644 --- a/arch/mips/mm/fault.c +++ b/arch/mips/mm/fault.c | |||
@@ -158,6 +158,8 @@ good_area: | |||
158 | if (unlikely(fault & VM_FAULT_ERROR)) { | 158 | if (unlikely(fault & VM_FAULT_ERROR)) { |
159 | if (fault & VM_FAULT_OOM) | 159 | if (fault & VM_FAULT_OOM) |
160 | goto out_of_memory; | 160 | goto out_of_memory; |
161 | else if (fault & VM_FAULT_SIGSEGV) | ||
162 | goto bad_area; | ||
161 | else if (fault & VM_FAULT_SIGBUS) | 163 | else if (fault & VM_FAULT_SIGBUS) |
162 | goto do_sigbus; | 164 | goto do_sigbus; |
163 | BUG(); | 165 | BUG(); |
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 3516cbdf1ee9..0c2cc5d39c8e 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c | |||
@@ -262,6 +262,8 @@ good_area: | |||
262 | if (unlikely(fault & VM_FAULT_ERROR)) { | 262 | if (unlikely(fault & VM_FAULT_ERROR)) { |
263 | if (fault & VM_FAULT_OOM) | 263 | if (fault & VM_FAULT_OOM) |
264 | goto out_of_memory; | 264 | goto out_of_memory; |
265 | else if (fault & VM_FAULT_SIGSEGV) | ||
266 | goto bad_area; | ||
265 | else if (fault & VM_FAULT_SIGBUS) | 267 | else if (fault & VM_FAULT_SIGBUS) |
266 | goto do_sigbus; | 268 | goto do_sigbus; |
267 | BUG(); | 269 | BUG(); |
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c index 15a0bb5fc06d..34429d5a0ccd 100644 --- a/arch/nios2/mm/fault.c +++ b/arch/nios2/mm/fault.c | |||
@@ -135,6 +135,8 @@ survive: | |||
135 | if (unlikely(fault & VM_FAULT_ERROR)) { | 135 | if (unlikely(fault & VM_FAULT_ERROR)) { |
136 | if (fault & VM_FAULT_OOM) | 136 | if (fault & VM_FAULT_OOM) |
137 | goto out_of_memory; | 137 | goto out_of_memory; |
138 | else if (fault & VM_FAULT_SIGSEGV) | ||
139 | goto bad_area; | ||
138 | else if (fault & VM_FAULT_SIGBUS) | 140 | else if (fault & VM_FAULT_SIGBUS) |
139 | goto do_sigbus; | 141 | goto do_sigbus; |
140 | BUG(); | 142 | BUG(); |
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c index 0703acf7d327..230ac20ae794 100644 --- a/arch/openrisc/mm/fault.c +++ b/arch/openrisc/mm/fault.c | |||
@@ -171,6 +171,8 @@ good_area: | |||
171 | if (unlikely(fault & VM_FAULT_ERROR)) { | 171 | if (unlikely(fault & VM_FAULT_ERROR)) { |
172 | if (fault & VM_FAULT_OOM) | 172 | if (fault & VM_FAULT_OOM) |
173 | goto out_of_memory; | 173 | goto out_of_memory; |
174 | else if (fault & VM_FAULT_SIGSEGV) | ||
175 | goto bad_area; | ||
174 | else if (fault & VM_FAULT_SIGBUS) | 176 | else if (fault & VM_FAULT_SIGBUS) |
175 | goto do_sigbus; | 177 | goto do_sigbus; |
176 | BUG(); | 178 | BUG(); |
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 3ca9c1131cfe..e5120e653240 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c | |||
@@ -256,6 +256,8 @@ good_area: | |||
256 | */ | 256 | */ |
257 | if (fault & VM_FAULT_OOM) | 257 | if (fault & VM_FAULT_OOM) |
258 | goto out_of_memory; | 258 | goto out_of_memory; |
259 | else if (fault & VM_FAULT_SIGSEGV) | ||
260 | goto bad_area; | ||
259 | else if (fault & VM_FAULT_SIGBUS) | 261 | else if (fault & VM_FAULT_SIGBUS) |
260 | goto bad_area; | 262 | goto bad_area; |
261 | BUG(); | 263 | BUG(); |
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c index 5a236f082c78..1b5305d4bdab 100644 --- a/arch/powerpc/mm/copro_fault.c +++ b/arch/powerpc/mm/copro_fault.c | |||
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea, | |||
76 | if (*flt & VM_FAULT_OOM) { | 76 | if (*flt & VM_FAULT_OOM) { |
77 | ret = -ENOMEM; | 77 | ret = -ENOMEM; |
78 | goto out_unlock; | 78 | goto out_unlock; |
79 | } else if (*flt & VM_FAULT_SIGBUS) { | 79 | } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) { |
80 | ret = -EFAULT; | 80 | ret = -EFAULT; |
81 | goto out_unlock; | 81 | goto out_unlock; |
82 | } | 82 | } |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index eb79907f34fa..6154b0a2b063 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -437,6 +437,8 @@ good_area: | |||
437 | */ | 437 | */ |
438 | fault = handle_mm_fault(mm, vma, address, flags); | 438 | fault = handle_mm_fault(mm, vma, address, flags); |
439 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { | 439 | if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) { |
440 | if (fault & VM_FAULT_SIGSEGV) | ||
441 | goto bad_area; | ||
440 | rc = mm_fault_error(regs, address, fault); | 442 | rc = mm_fault_error(regs, address, fault); |
441 | if (rc >= MM_FAULT_RETURN) | 443 | if (rc >= MM_FAULT_RETURN) |
442 | goto bail; | 444 | goto bail; |
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index b700a329c31d..d2de7d5d7574 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c | |||
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void) | |||
304 | * all cpus at boot. Get these reg values of current cpu and use the | 304 | * all cpus at boot. Get these reg values of current cpu and use the |
305 | * same accross all cpus. | 305 | * same accross all cpus. |
306 | */ | 306 | */ |
307 | uint64_t lpcr_val = mfspr(SPRN_LPCR); | 307 | uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1; |
308 | uint64_t hid0_val = mfspr(SPRN_HID0); | 308 | uint64_t hid0_val = mfspr(SPRN_HID0); |
309 | uint64_t hid1_val = mfspr(SPRN_HID1); | 309 | uint64_t hid1_val = mfspr(SPRN_HID1); |
310 | uint64_t hid4_val = mfspr(SPRN_HID4); | 310 | uint64_t hid4_val = mfspr(SPRN_HID4); |
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index 5b150f0c5df9..13c6e200b24e 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void) | |||
337 | args.token = rtas_token("set-indicator"); | 337 | args.token = rtas_token("set-indicator"); |
338 | if (args.token == RTAS_UNKNOWN_SERVICE) | 338 | if (args.token == RTAS_UNKNOWN_SERVICE) |
339 | return; | 339 | return; |
340 | args.token = cpu_to_be32(args.token); | ||
340 | args.nargs = cpu_to_be32(3); | 341 | args.nargs = cpu_to_be32(3); |
341 | args.nret = cpu_to_be32(1); | 342 | args.nret = cpu_to_be32(1); |
342 | args.rets = &args.args[3]; | 343 | args.rets = &args.args[3]; |
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index 811937bb90be..9065d5aa3932 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault) | |||
374 | do_no_context(regs); | 374 | do_no_context(regs); |
375 | else | 375 | else |
376 | pagefault_out_of_memory(); | 376 | pagefault_out_of_memory(); |
377 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
378 | /* Kernel mode? Handle exceptions or die */ | ||
379 | if (!user_mode(regs)) | ||
380 | do_no_context(regs); | ||
381 | else | ||
382 | do_sigsegv(regs, SEGV_MAPERR); | ||
377 | } else if (fault & VM_FAULT_SIGBUS) { | 383 | } else if (fault & VM_FAULT_SIGBUS) { |
378 | /* Kernel mode? Handle exceptions or die */ | 384 | /* Kernel mode? Handle exceptions or die */ |
379 | if (!user_mode(regs)) | 385 | if (!user_mode(regs)) |
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c index 52238983527d..6860beb2a280 100644 --- a/arch/score/mm/fault.c +++ b/arch/score/mm/fault.c | |||
@@ -114,6 +114,8 @@ good_area: | |||
114 | if (unlikely(fault & VM_FAULT_ERROR)) { | 114 | if (unlikely(fault & VM_FAULT_ERROR)) { |
115 | if (fault & VM_FAULT_OOM) | 115 | if (fault & VM_FAULT_OOM) |
116 | goto out_of_memory; | 116 | goto out_of_memory; |
117 | else if (fault & VM_FAULT_SIGSEGV) | ||
118 | goto bad_area; | ||
117 | else if (fault & VM_FAULT_SIGBUS) | 119 | else if (fault & VM_FAULT_SIGBUS) |
118 | goto do_sigbus; | 120 | goto do_sigbus; |
119 | BUG(); | 121 | BUG(); |
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c index 541dc6101508..a58fec9b55e0 100644 --- a/arch/sh/mm/fault.c +++ b/arch/sh/mm/fault.c | |||
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
353 | } else { | 353 | } else { |
354 | if (fault & VM_FAULT_SIGBUS) | 354 | if (fault & VM_FAULT_SIGBUS) |
355 | do_sigbus(regs, error_code, address); | 355 | do_sigbus(regs, error_code, address); |
356 | else if (fault & VM_FAULT_SIGSEGV) | ||
357 | bad_area(regs, error_code, address); | ||
356 | else | 358 | else |
357 | BUG(); | 359 | BUG(); |
358 | } | 360 | } |
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c index 908e8c17c902..70d817154fe8 100644 --- a/arch/sparc/mm/fault_32.c +++ b/arch/sparc/mm/fault_32.c | |||
@@ -249,6 +249,8 @@ good_area: | |||
249 | if (unlikely(fault & VM_FAULT_ERROR)) { | 249 | if (unlikely(fault & VM_FAULT_ERROR)) { |
250 | if (fault & VM_FAULT_OOM) | 250 | if (fault & VM_FAULT_OOM) |
251 | goto out_of_memory; | 251 | goto out_of_memory; |
252 | else if (fault & VM_FAULT_SIGSEGV) | ||
253 | goto bad_area; | ||
252 | else if (fault & VM_FAULT_SIGBUS) | 254 | else if (fault & VM_FAULT_SIGBUS) |
253 | goto do_sigbus; | 255 | goto do_sigbus; |
254 | BUG(); | 256 | BUG(); |
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c index 18fcd7167095..479823249429 100644 --- a/arch/sparc/mm/fault_64.c +++ b/arch/sparc/mm/fault_64.c | |||
@@ -446,6 +446,8 @@ good_area: | |||
446 | if (unlikely(fault & VM_FAULT_ERROR)) { | 446 | if (unlikely(fault & VM_FAULT_ERROR)) { |
447 | if (fault & VM_FAULT_OOM) | 447 | if (fault & VM_FAULT_OOM) |
448 | goto out_of_memory; | 448 | goto out_of_memory; |
449 | else if (fault & VM_FAULT_SIGSEGV) | ||
450 | goto bad_area; | ||
449 | else if (fault & VM_FAULT_SIGBUS) | 451 | else if (fault & VM_FAULT_SIGBUS) |
450 | goto do_sigbus; | 452 | goto do_sigbus; |
451 | BUG(); | 453 | BUG(); |
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c index 565e25a98334..0f61a73534e6 100644 --- a/arch/tile/mm/fault.c +++ b/arch/tile/mm/fault.c | |||
@@ -442,6 +442,8 @@ good_area: | |||
442 | if (unlikely(fault & VM_FAULT_ERROR)) { | 442 | if (unlikely(fault & VM_FAULT_ERROR)) { |
443 | if (fault & VM_FAULT_OOM) | 443 | if (fault & VM_FAULT_OOM) |
444 | goto out_of_memory; | 444 | goto out_of_memory; |
445 | else if (fault & VM_FAULT_SIGSEGV) | ||
446 | goto bad_area; | ||
445 | else if (fault & VM_FAULT_SIGBUS) | 447 | else if (fault & VM_FAULT_SIGBUS) |
446 | goto do_sigbus; | 448 | goto do_sigbus; |
447 | BUG(); | 449 | BUG(); |
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c index 5678c3571e7c..209617302df8 100644 --- a/arch/um/kernel/trap.c +++ b/arch/um/kernel/trap.c | |||
@@ -80,6 +80,8 @@ good_area: | |||
80 | if (unlikely(fault & VM_FAULT_ERROR)) { | 80 | if (unlikely(fault & VM_FAULT_ERROR)) { |
81 | if (fault & VM_FAULT_OOM) { | 81 | if (fault & VM_FAULT_OOM) { |
82 | goto out_of_memory; | 82 | goto out_of_memory; |
83 | } else if (fault & VM_FAULT_SIGSEGV) { | ||
84 | goto out; | ||
83 | } else if (fault & VM_FAULT_SIGBUS) { | 85 | } else if (fault & VM_FAULT_SIGBUS) { |
84 | err = -EACCES; | 86 | err = -EACCES; |
85 | goto out; | 87 | goto out; |
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index d999398928bc..ad754b4411f7 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo | |||
90 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 | 90 | suffix-$(CONFIG_KERNEL_LZ4) := lz4 |
91 | 91 | ||
92 | RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ | 92 | RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \ |
93 | perl $(srctree)/arch/x86/tools/calc_run_size.pl) | 93 | $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh) |
94 | quiet_cmd_mkpiggy = MKPIGGY $@ | 94 | quiet_cmd_mkpiggy = MKPIGGY $@ |
95 | cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) | 95 | cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false ) |
96 | 96 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 944bf019b74f..498b6d967138 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void) | |||
2431 | break; | 2431 | break; |
2432 | 2432 | ||
2433 | case 55: /* 22nm Atom "Silvermont" */ | 2433 | case 55: /* 22nm Atom "Silvermont" */ |
2434 | case 76: /* 14nm Atom "Airmont" */ | ||
2434 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ | 2435 | case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */ |
2435 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, | 2436 | memcpy(hw_cache_event_ids, slm_hw_cache_event_ids, |
2436 | sizeof(hw_cache_event_ids)); | 2437 | sizeof(hw_cache_event_ids)); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 6e434f8e5fc8..c4bb8b8e5017 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c | |||
@@ -142,7 +142,7 @@ static inline u64 rapl_scale(u64 v) | |||
142 | * or use ldexp(count, -32). | 142 | * or use ldexp(count, -32). |
143 | * Watts = Joules/Time delta | 143 | * Watts = Joules/Time delta |
144 | */ | 144 | */ |
145 | return v << (32 - __this_cpu_read(rapl_pmu->hw_unit)); | 145 | return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit); |
146 | } | 146 | } |
147 | 147 | ||
148 | static u64 rapl_event_update(struct perf_event *event) | 148 | static u64 rapl_event_update(struct perf_event *event) |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 10b8d3eaaf15..c635b8b49e93 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id | |||
840 | box->phys_id = phys_id; | 840 | box->phys_id = phys_id; |
841 | box->pci_dev = pdev; | 841 | box->pci_dev = pdev; |
842 | box->pmu = pmu; | 842 | box->pmu = pmu; |
843 | uncore_box_init(box); | ||
844 | pci_set_drvdata(pdev, box); | 843 | pci_set_drvdata(pdev, box); |
845 | 844 | ||
846 | raw_spin_lock(&uncore_box_lock); | 845 | raw_spin_lock(&uncore_box_lock); |
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu) | |||
1004 | pmu = &type->pmus[j]; | 1003 | pmu = &type->pmus[j]; |
1005 | box = *per_cpu_ptr(pmu->box, cpu); | 1004 | box = *per_cpu_ptr(pmu->box, cpu); |
1006 | /* called by uncore_cpu_init? */ | 1005 | /* called by uncore_cpu_init? */ |
1007 | if (box && box->phys_id >= 0) { | 1006 | if (box && box->phys_id >= 0) |
1008 | uncore_box_init(box); | ||
1009 | continue; | 1007 | continue; |
1010 | } | ||
1011 | 1008 | ||
1012 | for_each_online_cpu(k) { | 1009 | for_each_online_cpu(k) { |
1013 | exist = *per_cpu_ptr(pmu->box, k); | 1010 | exist = *per_cpu_ptr(pmu->box, k); |
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu) | |||
1023 | } | 1020 | } |
1024 | } | 1021 | } |
1025 | 1022 | ||
1026 | if (box) { | 1023 | if (box) |
1027 | box->phys_id = phys_id; | 1024 | box->phys_id = phys_id; |
1028 | uncore_box_init(box); | ||
1029 | } | ||
1030 | } | 1025 | } |
1031 | } | 1026 | } |
1032 | return 0; | 1027 | return 0; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 863d9b02563e..6c8c1e7e69d8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h | |||
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box) | |||
257 | return box->pmu->type->num_counters; | 257 | return box->pmu->type->num_counters; |
258 | } | 258 | } |
259 | 259 | ||
260 | static inline void uncore_box_init(struct intel_uncore_box *box) | ||
261 | { | ||
262 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { | ||
263 | if (box->pmu->type->ops->init_box) | ||
264 | box->pmu->type->ops->init_box(box); | ||
265 | } | ||
266 | } | ||
267 | |||
260 | static inline void uncore_disable_box(struct intel_uncore_box *box) | 268 | static inline void uncore_disable_box(struct intel_uncore_box *box) |
261 | { | 269 | { |
262 | if (box->pmu->type->ops->disable_box) | 270 | if (box->pmu->type->ops->disable_box) |
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box) | |||
265 | 273 | ||
266 | static inline void uncore_enable_box(struct intel_uncore_box *box) | 274 | static inline void uncore_enable_box(struct intel_uncore_box *box) |
267 | { | 275 | { |
276 | uncore_box_init(box); | ||
277 | |||
268 | if (box->pmu->type->ops->enable_box) | 278 | if (box->pmu->type->ops->enable_box) |
269 | box->pmu->type->ops->enable_box(box); | 279 | box->pmu->type->ops->enable_box(box); |
270 | } | 280 | } |
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box, | |||
287 | return box->pmu->type->ops->read_counter(box, event); | 297 | return box->pmu->type->ops->read_counter(box, event); |
288 | } | 298 | } |
289 | 299 | ||
290 | static inline void uncore_box_init(struct intel_uncore_box *box) | ||
291 | { | ||
292 | if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { | ||
293 | if (box->pmu->type->ops->init_box) | ||
294 | box->pmu->type->ops->init_box(box); | ||
295 | } | ||
296 | } | ||
297 | |||
298 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) | 300 | static inline bool uncore_box_is_fake(struct intel_uncore_box *box) |
299 | { | 301 | { |
300 | return (box->phys_id < 0); | 302 | return (box->phys_id < 0); |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4f0c0b954686..d52dcf0776ea 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm) | |||
192 | u16 cid, lid; | 192 | u16 cid, lid; |
193 | u32 ldr, aid; | 193 | u32 ldr, aid; |
194 | 194 | ||
195 | if (!kvm_apic_present(vcpu)) | ||
196 | continue; | ||
197 | |||
195 | aid = kvm_apic_id(apic); | 198 | aid = kvm_apic_id(apic); |
196 | ldr = kvm_apic_get_reg(apic, APIC_LDR); | 199 | ldr = kvm_apic_get_reg(apic, APIC_LDR); |
197 | cid = apic_cluster_id(new, ldr); | 200 | cid = apic_cluster_id(new, ldr); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 38dcec403b46..e3ff27a5b634 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
898 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 898 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
899 | VM_FAULT_HWPOISON_LARGE)) | 899 | VM_FAULT_HWPOISON_LARGE)) |
900 | do_sigbus(regs, error_code, address, fault); | 900 | do_sigbus(regs, error_code, address, fault); |
901 | else if (fault & VM_FAULT_SIGSEGV) | ||
902 | bad_area_nosemaphore(regs, error_code, address); | ||
901 | else | 903 | else |
902 | BUG(); | 904 | BUG(); |
903 | } | 905 | } |
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl deleted file mode 100644 index 23210baade2d..000000000000 --- a/arch/x86/tools/calc_run_size.pl +++ /dev/null | |||
@@ -1,39 +0,0 @@ | |||
1 | #!/usr/bin/perl | ||
2 | # | ||
3 | # Calculate the amount of space needed to run the kernel, including room for | ||
4 | # the .bss and .brk sections. | ||
5 | # | ||
6 | # Usage: | ||
7 | # objdump -h a.out | perl calc_run_size.pl | ||
8 | use strict; | ||
9 | |||
10 | my $mem_size = 0; | ||
11 | my $file_offset = 0; | ||
12 | |||
13 | my $sections=" *[0-9]+ \.(?:bss|brk) +"; | ||
14 | while (<>) { | ||
15 | if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) { | ||
16 | my $size = hex($1); | ||
17 | my $offset = hex($2); | ||
18 | $mem_size += $size; | ||
19 | if ($file_offset == 0) { | ||
20 | $file_offset = $offset; | ||
21 | } elsif ($file_offset != $offset) { | ||
22 | # BFD linker shows the same file offset in ELF. | ||
23 | # Gold linker shows them as consecutive. | ||
24 | next if ($file_offset + $mem_size == $offset + $size); | ||
25 | |||
26 | printf STDERR "file_offset: 0x%lx\n", $file_offset; | ||
27 | printf STDERR "mem_size: 0x%lx\n", $mem_size; | ||
28 | printf STDERR "offset: 0x%lx\n", $offset; | ||
29 | printf STDERR "size: 0x%lx\n", $size; | ||
30 | |||
31 | die ".bss and .brk are non-contiguous\n"; | ||
32 | } | ||
33 | } | ||
34 | } | ||
35 | |||
36 | if ($file_offset == 0) { | ||
37 | die "Never found .bss or .brk file offset\n"; | ||
38 | } | ||
39 | printf("%d\n", $mem_size + $file_offset); | ||
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh new file mode 100644 index 000000000000..1a4c17bb3910 --- /dev/null +++ b/arch/x86/tools/calc_run_size.sh | |||
@@ -0,0 +1,42 @@ | |||
1 | #!/bin/sh | ||
2 | # | ||
3 | # Calculate the amount of space needed to run the kernel, including room for | ||
4 | # the .bss and .brk sections. | ||
5 | # | ||
6 | # Usage: | ||
7 | # objdump -h a.out | sh calc_run_size.sh | ||
8 | |||
9 | NUM='\([0-9a-fA-F]*[ \t]*\)' | ||
10 | OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p') | ||
11 | if [ -z "$OUT" ] ; then | ||
12 | echo "Never found .bss or .brk file offset" >&2 | ||
13 | exit 1 | ||
14 | fi | ||
15 | |||
16 | OUT=$(echo ${OUT# }) | ||
17 | sizeA=$(printf "%d" 0x${OUT%% *}) | ||
18 | OUT=${OUT#* } | ||
19 | offsetA=$(printf "%d" 0x${OUT%% *}) | ||
20 | OUT=${OUT#* } | ||
21 | sizeB=$(printf "%d" 0x${OUT%% *}) | ||
22 | OUT=${OUT#* } | ||
23 | offsetB=$(printf "%d" 0x${OUT%% *}) | ||
24 | |||
25 | run_size=$(( $offsetA + $sizeA + $sizeB )) | ||
26 | |||
27 | # BFD linker shows the same file offset in ELF. | ||
28 | if [ "$offsetA" -ne "$offsetB" ] ; then | ||
29 | # Gold linker shows them as consecutive. | ||
30 | endB=$(( $offsetB + $sizeB )) | ||
31 | if [ "$endB" != "$run_size" ] ; then | ||
32 | printf "sizeA: 0x%x\n" $sizeA >&2 | ||
33 | printf "offsetA: 0x%x\n" $offsetA >&2 | ||
34 | printf "sizeB: 0x%x\n" $sizeB >&2 | ||
35 | printf "offsetB: 0x%x\n" $offsetB >&2 | ||
36 | echo ".bss and .brk are non-contiguous" >&2 | ||
37 | exit 1 | ||
38 | fi | ||
39 | fi | ||
40 | |||
41 | printf "%d\n" $run_size | ||
42 | exit 0 | ||
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c index b57c4f91f487..9e3571a6535c 100644 --- a/arch/xtensa/mm/fault.c +++ b/arch/xtensa/mm/fault.c | |||
@@ -117,6 +117,8 @@ good_area: | |||
117 | if (unlikely(fault & VM_FAULT_ERROR)) { | 117 | if (unlikely(fault & VM_FAULT_ERROR)) { |
118 | if (fault & VM_FAULT_OOM) | 118 | if (fault & VM_FAULT_OOM) |
119 | goto out_of_memory; | 119 | goto out_of_memory; |
120 | else if (fault & VM_FAULT_SIGSEGV) | ||
121 | goto bad_area; | ||
120 | else if (fault & VM_FAULT_SIGBUS) | 122 | else if (fault & VM_FAULT_SIGBUS) |
121 | goto do_sigbus; | 123 | goto do_sigbus; |
122 | BUG(); | 124 | BUG(); |