diff options
| author | Ingo Molnar <mingo@kernel.org> | 2018-12-03 04:47:53 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2018-12-03 04:47:53 -0500 |
| commit | df60673198ae678f68af54873b8904ba93fe13a0 (patch) | |
| tree | 6e9a3393d0be7b68a69c2bbc58f4325ceb6fd853 /arch | |
| parent | 89f579ce99f7e028e81885d3965f973c0f787611 (diff) | |
| parent | 2595646791c319cadfdbf271563aac97d0843dc7 (diff) | |
Merge tag 'v4.20-rc5' into x86/cleanups, to sync up the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
190 files changed, 2245 insertions, 1307 deletions
diff --git a/arch/alpha/include/asm/termios.h b/arch/alpha/include/asm/termios.h index 6a8c53dec57e..b7c77bb1bfd2 100644 --- a/arch/alpha/include/asm/termios.h +++ b/arch/alpha/include/asm/termios.h | |||
| @@ -73,9 +73,15 @@ | |||
| 73 | }) | 73 | }) |
| 74 | 74 | ||
| 75 | #define user_termios_to_kernel_termios(k, u) \ | 75 | #define user_termios_to_kernel_termios(k, u) \ |
| 76 | copy_from_user(k, u, sizeof(struct termios)) | 76 | copy_from_user(k, u, sizeof(struct termios2)) |
| 77 | 77 | ||
| 78 | #define kernel_termios_to_user_termios(u, k) \ | 78 | #define kernel_termios_to_user_termios(u, k) \ |
| 79 | copy_to_user(u, k, sizeof(struct termios2)) | ||
| 80 | |||
| 81 | #define user_termios_to_kernel_termios_1(k, u) \ | ||
| 82 | copy_from_user(k, u, sizeof(struct termios)) | ||
| 83 | |||
| 84 | #define kernel_termios_to_user_termios_1(u, k) \ | ||
| 79 | copy_to_user(u, k, sizeof(struct termios)) | 85 | copy_to_user(u, k, sizeof(struct termios)) |
| 80 | 86 | ||
| 81 | #endif /* _ALPHA_TERMIOS_H */ | 87 | #endif /* _ALPHA_TERMIOS_H */ |
diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h index 1e9121c9b3c7..971311605288 100644 --- a/arch/alpha/include/uapi/asm/ioctls.h +++ b/arch/alpha/include/uapi/asm/ioctls.h | |||
| @@ -32,6 +32,11 @@ | |||
| 32 | #define TCXONC _IO('t', 30) | 32 | #define TCXONC _IO('t', 30) |
| 33 | #define TCFLSH _IO('t', 31) | 33 | #define TCFLSH _IO('t', 31) |
| 34 | 34 | ||
| 35 | #define TCGETS2 _IOR('T', 42, struct termios2) | ||
| 36 | #define TCSETS2 _IOW('T', 43, struct termios2) | ||
| 37 | #define TCSETSW2 _IOW('T', 44, struct termios2) | ||
| 38 | #define TCSETSF2 _IOW('T', 45, struct termios2) | ||
| 39 | |||
| 35 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) | 40 | #define TIOCSWINSZ _IOW('t', 103, struct winsize) |
| 36 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) | 41 | #define TIOCGWINSZ _IOR('t', 104, struct winsize) |
| 37 | #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ | 42 | #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ |
diff --git a/arch/alpha/include/uapi/asm/termbits.h b/arch/alpha/include/uapi/asm/termbits.h index de6c8360fbe3..4575ba34a0ea 100644 --- a/arch/alpha/include/uapi/asm/termbits.h +++ b/arch/alpha/include/uapi/asm/termbits.h | |||
| @@ -26,6 +26,19 @@ struct termios { | |||
| 26 | speed_t c_ospeed; /* output speed */ | 26 | speed_t c_ospeed; /* output speed */ |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | /* Alpha has identical termios and termios2 */ | ||
| 30 | |||
| 31 | struct termios2 { | ||
| 32 | tcflag_t c_iflag; /* input mode flags */ | ||
| 33 | tcflag_t c_oflag; /* output mode flags */ | ||
| 34 | tcflag_t c_cflag; /* control mode flags */ | ||
| 35 | tcflag_t c_lflag; /* local mode flags */ | ||
| 36 | cc_t c_cc[NCCS]; /* control characters */ | ||
| 37 | cc_t c_line; /* line discipline (== c_cc[19]) */ | ||
| 38 | speed_t c_ispeed; /* input speed */ | ||
| 39 | speed_t c_ospeed; /* output speed */ | ||
| 40 | }; | ||
| 41 | |||
| 29 | /* Alpha has matching termios and ktermios */ | 42 | /* Alpha has matching termios and ktermios */ |
| 30 | 43 | ||
| 31 | struct ktermios { | 44 | struct ktermios { |
| @@ -152,6 +165,7 @@ struct ktermios { | |||
| 152 | #define B3000000 00034 | 165 | #define B3000000 00034 |
| 153 | #define B3500000 00035 | 166 | #define B3500000 00035 |
| 154 | #define B4000000 00036 | 167 | #define B4000000 00036 |
| 168 | #define BOTHER 00037 | ||
| 155 | 169 | ||
| 156 | #define CSIZE 00001400 | 170 | #define CSIZE 00001400 |
| 157 | #define CS5 00000000 | 171 | #define CS5 00000000 |
| @@ -169,6 +183,9 @@ struct ktermios { | |||
| 169 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ | 183 | #define CMSPAR 010000000000 /* mark or space (stick) parity */ |
| 170 | #define CRTSCTS 020000000000 /* flow control */ | 184 | #define CRTSCTS 020000000000 /* flow control */ |
| 171 | 185 | ||
| 186 | #define CIBAUD 07600000 | ||
| 187 | #define IBSHIFT 16 | ||
| 188 | |||
| 172 | /* c_lflag bits */ | 189 | /* c_lflag bits */ |
| 173 | #define ISIG 0x00000080 | 190 | #define ISIG 0x00000080 |
| 174 | #define ICANON 0x00000100 | 191 | #define ICANON 0x00000100 |
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts index d4d33cd7adad..1e2bb68231ad 100644 --- a/arch/arm/boot/dts/am3517-evm.dts +++ b/arch/arm/boot/dts/am3517-evm.dts | |||
| @@ -228,7 +228,7 @@ | |||
| 228 | vmmc-supply = <&vmmc_fixed>; | 228 | vmmc-supply = <&vmmc_fixed>; |
| 229 | bus-width = <4>; | 229 | bus-width = <4>; |
| 230 | wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ | 230 | wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */ |
| 231 | cd-gpios = <&gpio4 31 GPIO_ACTIVE_HIGH>; /* gpio_127 */ | 231 | cd-gpios = <&gpio4 31 GPIO_ACTIVE_LOW>; /* gpio_127 */ |
| 232 | }; | 232 | }; |
| 233 | 233 | ||
| 234 | &mmc3 { | 234 | &mmc3 { |
diff --git a/arch/arm/boot/dts/am3517-som.dtsi b/arch/arm/boot/dts/am3517-som.dtsi index dae6e458e59f..b1c988eed87c 100644 --- a/arch/arm/boot/dts/am3517-som.dtsi +++ b/arch/arm/boot/dts/am3517-som.dtsi | |||
| @@ -163,7 +163,7 @@ | |||
| 163 | compatible = "ti,wl1271"; | 163 | compatible = "ti,wl1271"; |
| 164 | reg = <2>; | 164 | reg = <2>; |
| 165 | interrupt-parent = <&gpio6>; | 165 | interrupt-parent = <&gpio6>; |
| 166 | interrupts = <10 IRQ_TYPE_LEVEL_HIGH>; /* gpio_170 */ | 166 | interrupts = <10 IRQ_TYPE_EDGE_RISING>; /* gpio_170 */ |
| 167 | ref-clock-frequency = <26000000>; | 167 | ref-clock-frequency = <26000000>; |
| 168 | tcxo-clock-frequency = <26000000>; | 168 | tcxo-clock-frequency = <26000000>; |
| 169 | }; | 169 | }; |
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts index e45a15ceb94b..69d753cac89a 100644 --- a/arch/arm/boot/dts/imx51-zii-rdu1.dts +++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts | |||
| @@ -492,12 +492,6 @@ | |||
| 492 | pinctrl-0 = <&pinctrl_i2c2>; | 492 | pinctrl-0 = <&pinctrl_i2c2>; |
| 493 | status = "okay"; | 493 | status = "okay"; |
| 494 | 494 | ||
| 495 | eeprom@50 { | ||
| 496 | compatible = "atmel,24c04"; | ||
| 497 | pagesize = <16>; | ||
| 498 | reg = <0x50>; | ||
| 499 | }; | ||
| 500 | |||
| 501 | hpa1: amp@60 { | 495 | hpa1: amp@60 { |
| 502 | compatible = "ti,tpa6130a2"; | 496 | compatible = "ti,tpa6130a2"; |
| 503 | reg = <0x60>; | 497 | reg = <0x60>; |
diff --git a/arch/arm/boot/dts/imx53-ppd.dts b/arch/arm/boot/dts/imx53-ppd.dts index b560ff88459b..5ff9a179c83c 100644 --- a/arch/arm/boot/dts/imx53-ppd.dts +++ b/arch/arm/boot/dts/imx53-ppd.dts | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | chosen { | 57 | chosen { |
| 58 | stdout-path = "&uart1:115200n8"; | 58 | stdout-path = "serial0:115200n8"; |
| 59 | }; | 59 | }; |
| 60 | 60 | ||
| 61 | memory@70000000 { | 61 | memory@70000000 { |
diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index ed9a980bce85..beefa1b2049d 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi | |||
| @@ -740,7 +740,7 @@ | |||
| 740 | i2c1: i2c@21a0000 { | 740 | i2c1: i2c@21a0000 { |
| 741 | #address-cells = <1>; | 741 | #address-cells = <1>; |
| 742 | #size-cells = <0>; | 742 | #size-cells = <0>; |
| 743 | compatible = "fs,imx6sll-i2c", "fsl,imx21-i2c"; | 743 | compatible = "fsl,imx6sll-i2c", "fsl,imx21-i2c"; |
| 744 | reg = <0x021a0000 0x4000>; | 744 | reg = <0x021a0000 0x4000>; |
| 745 | interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; | 745 | interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; |
| 746 | clocks = <&clks IMX6SLL_CLK_I2C1>; | 746 | clocks = <&clks IMX6SLL_CLK_I2C1>; |
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi index 53b3408b5fab..7d7d679945d2 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dtsi +++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi | |||
| @@ -117,7 +117,9 @@ | |||
| 117 | regulator-name = "enet_3v3"; | 117 | regulator-name = "enet_3v3"; |
| 118 | regulator-min-microvolt = <3300000>; | 118 | regulator-min-microvolt = <3300000>; |
| 119 | regulator-max-microvolt = <3300000>; | 119 | regulator-max-microvolt = <3300000>; |
| 120 | gpios = <&gpio2 6 GPIO_ACTIVE_LOW>; | 120 | gpio = <&gpio2 6 GPIO_ACTIVE_LOW>; |
| 121 | regulator-boot-on; | ||
| 122 | regulator-always-on; | ||
| 121 | }; | 123 | }; |
| 122 | 124 | ||
| 123 | reg_pcie_gpio: regulator-pcie-gpio { | 125 | reg_pcie_gpio: regulator-pcie-gpio { |
| @@ -180,6 +182,7 @@ | |||
| 180 | phy-supply = <®_enet_3v3>; | 182 | phy-supply = <®_enet_3v3>; |
| 181 | phy-mode = "rgmii"; | 183 | phy-mode = "rgmii"; |
| 182 | phy-handle = <ðphy1>; | 184 | phy-handle = <ðphy1>; |
| 185 | phy-reset-gpios = <&gpio2 7 GPIO_ACTIVE_LOW>; | ||
| 183 | status = "okay"; | 186 | status = "okay"; |
| 184 | 187 | ||
| 185 | mdio { | 188 | mdio { |
| @@ -373,6 +376,8 @@ | |||
| 373 | MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081 | 376 | MX6SX_PAD_RGMII1_RD3__ENET1_RX_DATA_3 0x3081 |
| 374 | MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081 | 377 | MX6SX_PAD_RGMII1_RX_CTL__ENET1_RX_EN 0x3081 |
| 375 | MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91 | 378 | MX6SX_PAD_ENET2_RX_CLK__ENET2_REF_CLK_25M 0x91 |
| 379 | /* phy reset */ | ||
| 380 | MX6SX_PAD_ENET2_CRS__GPIO2_IO_7 0x10b0 | ||
| 376 | >; | 381 | >; |
| 377 | }; | 382 | }; |
| 378 | 383 | ||
diff --git a/arch/arm/boot/dts/logicpd-som-lv.dtsi b/arch/arm/boot/dts/logicpd-som-lv.dtsi index ac343330d0c8..98b682a8080c 100644 --- a/arch/arm/boot/dts/logicpd-som-lv.dtsi +++ b/arch/arm/boot/dts/logicpd-som-lv.dtsi | |||
| @@ -129,7 +129,7 @@ | |||
| 129 | }; | 129 | }; |
| 130 | 130 | ||
| 131 | &mmc3 { | 131 | &mmc3 { |
| 132 | interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; | 132 | interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; |
| 133 | pinctrl-0 = <&mmc3_pins &wl127x_gpio>; | 133 | pinctrl-0 = <&mmc3_pins &wl127x_gpio>; |
| 134 | pinctrl-names = "default"; | 134 | pinctrl-names = "default"; |
| 135 | vmmc-supply = <&wl12xx_vmmc>; | 135 | vmmc-supply = <&wl12xx_vmmc>; |
diff --git a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts index 9d5d53fbe9c0..c39cf2ca54da 100644 --- a/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts +++ b/arch/arm/boot/dts/logicpd-torpedo-37xx-devkit.dts | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | * jumpering combinations for the long run. | 35 | * jumpering combinations for the long run. |
| 36 | */ | 36 | */ |
| 37 | &mmc3 { | 37 | &mmc3 { |
| 38 | interrupts-extended = <&intc 94 &omap3_pmx_core2 0x46>; | 38 | interrupts-extended = <&intc 94 &omap3_pmx_core 0x136>; |
| 39 | pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; | 39 | pinctrl-0 = <&mmc3_pins &mmc3_core2_pins>; |
| 40 | pinctrl-names = "default"; | 40 | pinctrl-names = "default"; |
| 41 | vmmc-supply = <&wl12xx_vmmc>; | 41 | vmmc-supply = <&wl12xx_vmmc>; |
diff --git a/arch/arm/boot/dts/rk3288-veyron.dtsi b/arch/arm/boot/dts/rk3288-veyron.dtsi index 2075120cfc4d..d8bf939a3aff 100644 --- a/arch/arm/boot/dts/rk3288-veyron.dtsi +++ b/arch/arm/boot/dts/rk3288-veyron.dtsi | |||
| @@ -10,7 +10,11 @@ | |||
| 10 | #include "rk3288.dtsi" | 10 | #include "rk3288.dtsi" |
| 11 | 11 | ||
| 12 | / { | 12 | / { |
| 13 | memory@0 { | 13 | /* |
| 14 | * The default coreboot on veyron devices ignores memory@0 nodes | ||
| 15 | * and would instead create another memory node. | ||
| 16 | */ | ||
| 17 | memory { | ||
| 14 | device_type = "memory"; | 18 | device_type = "memory"; |
| 15 | reg = <0x0 0x0 0x0 0x80000000>; | 19 | reg = <0x0 0x0 0x0 0x80000000>; |
| 16 | }; | 20 | }; |
diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 843052f14f1c..dd0dda6ed44b 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi | |||
| @@ -314,7 +314,7 @@ | |||
| 314 | 0x1 0x0 0x60000000 0x10000000 | 314 | 0x1 0x0 0x60000000 0x10000000 |
| 315 | 0x2 0x0 0x70000000 0x10000000 | 315 | 0x2 0x0 0x70000000 0x10000000 |
| 316 | 0x3 0x0 0x80000000 0x10000000>; | 316 | 0x3 0x0 0x80000000 0x10000000>; |
| 317 | clocks = <&mck>; | 317 | clocks = <&h32ck>; |
| 318 | status = "disabled"; | 318 | status = "disabled"; |
| 319 | 319 | ||
| 320 | nand_controller: nand-controller { | 320 | nand_controller: nand-controller { |
diff --git a/arch/arm/boot/dts/vf610m4-colibri.dts b/arch/arm/boot/dts/vf610m4-colibri.dts index 41ec66a96990..ca6249558760 100644 --- a/arch/arm/boot/dts/vf610m4-colibri.dts +++ b/arch/arm/boot/dts/vf610m4-colibri.dts | |||
| @@ -50,8 +50,8 @@ | |||
| 50 | compatible = "fsl,vf610m4"; | 50 | compatible = "fsl,vf610m4"; |
| 51 | 51 | ||
| 52 | chosen { | 52 | chosen { |
| 53 | bootargs = "console=ttyLP2,115200 clk_ignore_unused init=/linuxrc rw"; | 53 | bootargs = "clk_ignore_unused init=/linuxrc rw"; |
| 54 | stdout-path = "&uart2"; | 54 | stdout-path = "serial2:115200"; |
| 55 | }; | 55 | }; |
| 56 | 56 | ||
| 57 | memory@8c000000 { | 57 | memory@8c000000 { |
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig index 1c7616815a86..63af6234c1b6 100644 --- a/arch/arm/configs/multi_v7_defconfig +++ b/arch/arm/configs/multi_v7_defconfig | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
| 2 | CONFIG_NO_HZ=y | 2 | CONFIG_NO_HZ=y |
| 3 | CONFIG_HIGH_RES_TIMERS=y | 3 | CONFIG_HIGH_RES_TIMERS=y |
| 4 | CONFIG_PREEMPT=y | ||
| 5 | CONFIG_CGROUPS=y | 4 | CONFIG_CGROUPS=y |
| 6 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
| 7 | CONFIG_EMBEDDED=y | 6 | CONFIG_EMBEDDED=y |
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 0d289240b6ca..775cac3c02bb 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
| @@ -111,6 +111,7 @@ | |||
| 111 | #include <linux/kernel.h> | 111 | #include <linux/kernel.h> |
| 112 | 112 | ||
| 113 | extern unsigned int processor_id; | 113 | extern unsigned int processor_id; |
| 114 | struct proc_info_list *lookup_processor(u32 midr); | ||
| 114 | 115 | ||
| 115 | #ifdef CONFIG_CPU_CP15 | 116 | #ifdef CONFIG_CPU_CP15 |
| 116 | #define read_cpuid(reg) \ | 117 | #define read_cpuid(reg) \ |
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index 92fd2c8a9af0..12659ce5c1f3 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h | |||
| @@ -10,7 +10,7 @@ | |||
| 10 | #ifndef _ASM_PGTABLE_2LEVEL_H | 10 | #ifndef _ASM_PGTABLE_2LEVEL_H |
| 11 | #define _ASM_PGTABLE_2LEVEL_H | 11 | #define _ASM_PGTABLE_2LEVEL_H |
| 12 | 12 | ||
| 13 | #define __PAGETABLE_PMD_FOLDED | 13 | #define __PAGETABLE_PMD_FOLDED 1 |
| 14 | 14 | ||
| 15 | /* | 15 | /* |
| 16 | * Hardware-wise, we have a two level page table structure, where the first | 16 | * Hardware-wise, we have a two level page table structure, where the first |
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index e25f4392e1b2..e1b6f280ab08 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h | |||
| @@ -23,7 +23,7 @@ struct mm_struct; | |||
| 23 | /* | 23 | /* |
| 24 | * Don't change this structure - ASM code relies on it. | 24 | * Don't change this structure - ASM code relies on it. |
| 25 | */ | 25 | */ |
| 26 | extern struct processor { | 26 | struct processor { |
| 27 | /* MISC | 27 | /* MISC |
| 28 | * get data abort address/flags | 28 | * get data abort address/flags |
| 29 | */ | 29 | */ |
| @@ -79,9 +79,13 @@ extern struct processor { | |||
| 79 | unsigned int suspend_size; | 79 | unsigned int suspend_size; |
| 80 | void (*do_suspend)(void *); | 80 | void (*do_suspend)(void *); |
| 81 | void (*do_resume)(void *); | 81 | void (*do_resume)(void *); |
| 82 | } processor; | 82 | }; |
| 83 | 83 | ||
| 84 | #ifndef MULTI_CPU | 84 | #ifndef MULTI_CPU |
| 85 | static inline void init_proc_vtable(const struct processor *p) | ||
| 86 | { | ||
| 87 | } | ||
| 88 | |||
| 85 | extern void cpu_proc_init(void); | 89 | extern void cpu_proc_init(void); |
| 86 | extern void cpu_proc_fin(void); | 90 | extern void cpu_proc_fin(void); |
| 87 | extern int cpu_do_idle(void); | 91 | extern int cpu_do_idle(void); |
| @@ -98,17 +102,50 @@ extern void cpu_reset(unsigned long addr, bool hvc) __attribute__((noreturn)); | |||
| 98 | extern void cpu_do_suspend(void *); | 102 | extern void cpu_do_suspend(void *); |
| 99 | extern void cpu_do_resume(void *); | 103 | extern void cpu_do_resume(void *); |
| 100 | #else | 104 | #else |
| 101 | #define cpu_proc_init processor._proc_init | ||
| 102 | #define cpu_proc_fin processor._proc_fin | ||
| 103 | #define cpu_reset processor.reset | ||
| 104 | #define cpu_do_idle processor._do_idle | ||
| 105 | #define cpu_dcache_clean_area processor.dcache_clean_area | ||
| 106 | #define cpu_set_pte_ext processor.set_pte_ext | ||
| 107 | #define cpu_do_switch_mm processor.switch_mm | ||
| 108 | 105 | ||
| 109 | /* These three are private to arch/arm/kernel/suspend.c */ | 106 | extern struct processor processor; |
| 110 | #define cpu_do_suspend processor.do_suspend | 107 | #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) |
| 111 | #define cpu_do_resume processor.do_resume | 108 | #include <linux/smp.h> |
| 109 | /* | ||
| 110 | * This can't be a per-cpu variable because we need to access it before | ||
| 111 | * per-cpu has been initialised. We have a couple of functions that are | ||
| 112 | * called in a pre-emptible context, and so can't use smp_processor_id() | ||
| 113 | * there, hence PROC_TABLE(). We insist in init_proc_vtable() that the | ||
| 114 | * function pointers for these are identical across all CPUs. | ||
| 115 | */ | ||
| 116 | extern struct processor *cpu_vtable[]; | ||
| 117 | #define PROC_VTABLE(f) cpu_vtable[smp_processor_id()]->f | ||
| 118 | #define PROC_TABLE(f) cpu_vtable[0]->f | ||
| 119 | static inline void init_proc_vtable(const struct processor *p) | ||
| 120 | { | ||
| 121 | unsigned int cpu = smp_processor_id(); | ||
| 122 | *cpu_vtable[cpu] = *p; | ||
| 123 | WARN_ON_ONCE(cpu_vtable[cpu]->dcache_clean_area != | ||
| 124 | cpu_vtable[0]->dcache_clean_area); | ||
| 125 | WARN_ON_ONCE(cpu_vtable[cpu]->set_pte_ext != | ||
| 126 | cpu_vtable[0]->set_pte_ext); | ||
| 127 | } | ||
| 128 | #else | ||
| 129 | #define PROC_VTABLE(f) processor.f | ||
| 130 | #define PROC_TABLE(f) processor.f | ||
| 131 | static inline void init_proc_vtable(const struct processor *p) | ||
| 132 | { | ||
| 133 | processor = *p; | ||
| 134 | } | ||
| 135 | #endif | ||
| 136 | |||
| 137 | #define cpu_proc_init PROC_VTABLE(_proc_init) | ||
| 138 | #define cpu_check_bugs PROC_VTABLE(check_bugs) | ||
| 139 | #define cpu_proc_fin PROC_VTABLE(_proc_fin) | ||
| 140 | #define cpu_reset PROC_VTABLE(reset) | ||
| 141 | #define cpu_do_idle PROC_VTABLE(_do_idle) | ||
| 142 | #define cpu_dcache_clean_area PROC_TABLE(dcache_clean_area) | ||
| 143 | #define cpu_set_pte_ext PROC_TABLE(set_pte_ext) | ||
| 144 | #define cpu_do_switch_mm PROC_VTABLE(switch_mm) | ||
| 145 | |||
| 146 | /* These two are private to arch/arm/kernel/suspend.c */ | ||
| 147 | #define cpu_do_suspend PROC_VTABLE(do_suspend) | ||
| 148 | #define cpu_do_resume PROC_VTABLE(do_resume) | ||
| 112 | #endif | 149 | #endif |
| 113 | 150 | ||
| 114 | extern void cpu_resume(void); | 151 | extern void cpu_resume(void); |
diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 7be511310191..d41d3598e5e5 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c | |||
| @@ -6,8 +6,8 @@ | |||
| 6 | void check_other_bugs(void) | 6 | void check_other_bugs(void) |
| 7 | { | 7 | { |
| 8 | #ifdef MULTI_CPU | 8 | #ifdef MULTI_CPU |
| 9 | if (processor.check_bugs) | 9 | if (cpu_check_bugs) |
| 10 | processor.check_bugs(); | 10 | cpu_check_bugs(); |
| 11 | #endif | 11 | #endif |
| 12 | } | 12 | } |
| 13 | 13 | ||
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c index 0142fcfcc3d3..bda949fd84e8 100644 --- a/arch/arm/kernel/ftrace.c +++ b/arch/arm/kernel/ftrace.c | |||
| @@ -183,9 +183,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 183 | unsigned long frame_pointer) | 183 | unsigned long frame_pointer) |
| 184 | { | 184 | { |
| 185 | unsigned long return_hooker = (unsigned long) &return_to_handler; | 185 | unsigned long return_hooker = (unsigned long) &return_to_handler; |
| 186 | struct ftrace_graph_ent trace; | ||
| 187 | unsigned long old; | 186 | unsigned long old; |
| 188 | int err; | ||
| 189 | 187 | ||
| 190 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 188 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 191 | return; | 189 | return; |
| @@ -193,21 +191,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 193 | old = *parent; | 191 | old = *parent; |
| 194 | *parent = return_hooker; | 192 | *parent = return_hooker; |
| 195 | 193 | ||
| 196 | trace.func = self_addr; | 194 | if (function_graph_enter(old, self_addr, frame_pointer, NULL)) |
| 197 | trace.depth = current->curr_ret_stack + 1; | ||
| 198 | |||
| 199 | /* Only trace if the calling function expects to */ | ||
| 200 | if (!ftrace_graph_entry(&trace)) { | ||
| 201 | *parent = old; | 195 | *parent = old; |
| 202 | return; | ||
| 203 | } | ||
| 204 | |||
| 205 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 206 | frame_pointer, NULL); | ||
| 207 | if (err == -EBUSY) { | ||
| 208 | *parent = old; | ||
| 209 | return; | ||
| 210 | } | ||
| 211 | } | 196 | } |
| 212 | 197 | ||
| 213 | #ifdef CONFIG_DYNAMIC_FTRACE | 198 | #ifdef CONFIG_DYNAMIC_FTRACE |
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 6e0375e7db05..997b02302c31 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
| @@ -145,6 +145,9 @@ __mmap_switched_data: | |||
| 145 | #endif | 145 | #endif |
| 146 | .size __mmap_switched_data, . - __mmap_switched_data | 146 | .size __mmap_switched_data, . - __mmap_switched_data |
| 147 | 147 | ||
| 148 | __FINIT | ||
| 149 | .text | ||
| 150 | |||
| 148 | /* | 151 | /* |
| 149 | * This provides a C-API version of __lookup_processor_type | 152 | * This provides a C-API version of __lookup_processor_type |
| 150 | */ | 153 | */ |
| @@ -156,9 +159,6 @@ ENTRY(lookup_processor_type) | |||
| 156 | ldmfd sp!, {r4 - r6, r9, pc} | 159 | ldmfd sp!, {r4 - r6, r9, pc} |
| 157 | ENDPROC(lookup_processor_type) | 160 | ENDPROC(lookup_processor_type) |
| 158 | 161 | ||
| 159 | __FINIT | ||
| 160 | .text | ||
| 161 | |||
| 162 | /* | 162 | /* |
| 163 | * Read processor ID register (CP#15, CR0), and look up in the linker-built | 163 | * Read processor ID register (CP#15, CR0), and look up in the linker-built |
| 164 | * supported processor list. Note that we can't use the absolute addresses | 164 | * supported processor list. Note that we can't use the absolute addresses |
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ac7e08886863..375b13f7e780 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
| @@ -114,6 +114,11 @@ EXPORT_SYMBOL(elf_hwcap2); | |||
| 114 | 114 | ||
| 115 | #ifdef MULTI_CPU | 115 | #ifdef MULTI_CPU |
| 116 | struct processor processor __ro_after_init; | 116 | struct processor processor __ro_after_init; |
| 117 | #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) | ||
| 118 | struct processor *cpu_vtable[NR_CPUS] = { | ||
| 119 | [0] = &processor, | ||
| 120 | }; | ||
| 121 | #endif | ||
| 117 | #endif | 122 | #endif |
| 118 | #ifdef MULTI_TLB | 123 | #ifdef MULTI_TLB |
| 119 | struct cpu_tlb_fns cpu_tlb __ro_after_init; | 124 | struct cpu_tlb_fns cpu_tlb __ro_after_init; |
| @@ -666,28 +671,33 @@ static void __init smp_build_mpidr_hash(void) | |||
| 666 | } | 671 | } |
| 667 | #endif | 672 | #endif |
| 668 | 673 | ||
| 669 | static void __init setup_processor(void) | 674 | /* |
| 675 | * locate processor in the list of supported processor types. The linker | ||
| 676 | * builds this table for us from the entries in arch/arm/mm/proc-*.S | ||
| 677 | */ | ||
| 678 | struct proc_info_list *lookup_processor(u32 midr) | ||
| 670 | { | 679 | { |
| 671 | struct proc_info_list *list; | 680 | struct proc_info_list *list = lookup_processor_type(midr); |
| 672 | 681 | ||
| 673 | /* | ||
| 674 | * locate processor in the list of supported processor | ||
| 675 | * types. The linker builds this table for us from the | ||
| 676 | * entries in arch/arm/mm/proc-*.S | ||
| 677 | */ | ||
| 678 | list = lookup_processor_type(read_cpuid_id()); | ||
| 679 | if (!list) { | 682 | if (!list) { |
| 680 | pr_err("CPU configuration botched (ID %08x), unable to continue.\n", | 683 | pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n", |
| 681 | read_cpuid_id()); | 684 | smp_processor_id(), midr); |
| 682 | while (1); | 685 | while (1) |
| 686 | /* can't use cpu_relax() here as it may require MMU setup */; | ||
| 683 | } | 687 | } |
| 684 | 688 | ||
| 689 | return list; | ||
| 690 | } | ||
| 691 | |||
| 692 | static void __init setup_processor(void) | ||
| 693 | { | ||
| 694 | unsigned int midr = read_cpuid_id(); | ||
| 695 | struct proc_info_list *list = lookup_processor(midr); | ||
| 696 | |||
| 685 | cpu_name = list->cpu_name; | 697 | cpu_name = list->cpu_name; |
| 686 | __cpu_architecture = __get_cpu_architecture(); | 698 | __cpu_architecture = __get_cpu_architecture(); |
| 687 | 699 | ||
| 688 | #ifdef MULTI_CPU | 700 | init_proc_vtable(list->proc); |
| 689 | processor = *list->proc; | ||
| 690 | #endif | ||
| 691 | #ifdef MULTI_TLB | 701 | #ifdef MULTI_TLB |
| 692 | cpu_tlb = *list->tlb; | 702 | cpu_tlb = *list->tlb; |
| 693 | #endif | 703 | #endif |
| @@ -699,7 +709,7 @@ static void __init setup_processor(void) | |||
| 699 | #endif | 709 | #endif |
| 700 | 710 | ||
| 701 | pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", | 711 | pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", |
| 702 | cpu_name, read_cpuid_id(), read_cpuid_id() & 15, | 712 | list->cpu_name, midr, midr & 15, |
| 703 | proc_arch[cpu_architecture()], get_cr()); | 713 | proc_arch[cpu_architecture()], get_cr()); |
| 704 | 714 | ||
| 705 | snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", | 715 | snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 0978282d5fc2..12a6172263c0 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -42,6 +42,7 @@ | |||
| 42 | #include <asm/mmu_context.h> | 42 | #include <asm/mmu_context.h> |
| 43 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
| 44 | #include <asm/pgalloc.h> | 44 | #include <asm/pgalloc.h> |
| 45 | #include <asm/procinfo.h> | ||
| 45 | #include <asm/processor.h> | 46 | #include <asm/processor.h> |
| 46 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
| 47 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
| @@ -102,6 +103,30 @@ static unsigned long get_arch_pgd(pgd_t *pgd) | |||
| 102 | #endif | 103 | #endif |
| 103 | } | 104 | } |
| 104 | 105 | ||
| 106 | #if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) | ||
| 107 | static int secondary_biglittle_prepare(unsigned int cpu) | ||
| 108 | { | ||
| 109 | if (!cpu_vtable[cpu]) | ||
| 110 | cpu_vtable[cpu] = kzalloc(sizeof(*cpu_vtable[cpu]), GFP_KERNEL); | ||
| 111 | |||
| 112 | return cpu_vtable[cpu] ? 0 : -ENOMEM; | ||
| 113 | } | ||
| 114 | |||
| 115 | static void secondary_biglittle_init(void) | ||
| 116 | { | ||
| 117 | init_proc_vtable(lookup_processor(read_cpuid_id())->proc); | ||
| 118 | } | ||
| 119 | #else | ||
| 120 | static int secondary_biglittle_prepare(unsigned int cpu) | ||
| 121 | { | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | |||
| 125 | static void secondary_biglittle_init(void) | ||
| 126 | { | ||
| 127 | } | ||
| 128 | #endif | ||
| 129 | |||
| 105 | int __cpu_up(unsigned int cpu, struct task_struct *idle) | 130 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
| 106 | { | 131 | { |
| 107 | int ret; | 132 | int ret; |
| @@ -109,6 +134,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) | |||
| 109 | if (!smp_ops.smp_boot_secondary) | 134 | if (!smp_ops.smp_boot_secondary) |
| 110 | return -ENOSYS; | 135 | return -ENOSYS; |
| 111 | 136 | ||
| 137 | ret = secondary_biglittle_prepare(cpu); | ||
| 138 | if (ret) | ||
| 139 | return ret; | ||
| 140 | |||
| 112 | /* | 141 | /* |
| 113 | * We need to tell the secondary core where to find | 142 | * We need to tell the secondary core where to find |
| 114 | * its stack and the page tables. | 143 | * its stack and the page tables. |
| @@ -359,6 +388,8 @@ asmlinkage void secondary_start_kernel(void) | |||
| 359 | struct mm_struct *mm = &init_mm; | 388 | struct mm_struct *mm = &init_mm; |
| 360 | unsigned int cpu; | 389 | unsigned int cpu; |
| 361 | 390 | ||
| 391 | secondary_biglittle_init(); | ||
| 392 | |||
| 362 | /* | 393 | /* |
| 363 | * The identity mapping is uncached (strongly ordered), so | 394 | * The identity mapping is uncached (strongly ordered), so |
| 364 | * switch away from it before attempting any exclusive accesses. | 395 | * switch away from it before attempting any exclusive accesses. |
diff --git a/arch/arm/mach-davinci/da830.c b/arch/arm/mach-davinci/da830.c index 0bc5bd2665df..2cc9fe4c3a91 100644 --- a/arch/arm/mach-davinci/da830.c +++ b/arch/arm/mach-davinci/da830.c | |||
| @@ -759,7 +759,9 @@ static struct davinci_id da830_ids[] = { | |||
| 759 | }; | 759 | }; |
| 760 | 760 | ||
| 761 | static struct davinci_gpio_platform_data da830_gpio_platform_data = { | 761 | static struct davinci_gpio_platform_data da830_gpio_platform_data = { |
| 762 | .ngpio = 128, | 762 | .no_auto_base = true, |
| 763 | .base = 0, | ||
| 764 | .ngpio = 128, | ||
| 763 | }; | 765 | }; |
| 764 | 766 | ||
| 765 | int __init da830_register_gpio(void) | 767 | int __init da830_register_gpio(void) |
diff --git a/arch/arm/mach-davinci/da850.c b/arch/arm/mach-davinci/da850.c index 4528bbf0c861..e7b78df2bfef 100644 --- a/arch/arm/mach-davinci/da850.c +++ b/arch/arm/mach-davinci/da850.c | |||
| @@ -719,7 +719,9 @@ int __init da850_register_vpif_capture(struct vpif_capture_config | |||
| 719 | } | 719 | } |
| 720 | 720 | ||
| 721 | static struct davinci_gpio_platform_data da850_gpio_platform_data = { | 721 | static struct davinci_gpio_platform_data da850_gpio_platform_data = { |
| 722 | .ngpio = 144, | 722 | .no_auto_base = true, |
| 723 | .base = 0, | ||
| 724 | .ngpio = 144, | ||
| 723 | }; | 725 | }; |
| 724 | 726 | ||
| 725 | int __init da850_register_gpio(void) | 727 | int __init da850_register_gpio(void) |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 1fd3619f6a09..cf78da5ab054 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
| @@ -701,6 +701,46 @@ static struct resource da8xx_gpio_resources[] = { | |||
| 701 | }, | 701 | }, |
| 702 | { /* interrupt */ | 702 | { /* interrupt */ |
| 703 | .start = IRQ_DA8XX_GPIO0, | 703 | .start = IRQ_DA8XX_GPIO0, |
| 704 | .end = IRQ_DA8XX_GPIO0, | ||
| 705 | .flags = IORESOURCE_IRQ, | ||
| 706 | }, | ||
| 707 | { | ||
| 708 | .start = IRQ_DA8XX_GPIO1, | ||
| 709 | .end = IRQ_DA8XX_GPIO1, | ||
| 710 | .flags = IORESOURCE_IRQ, | ||
| 711 | }, | ||
| 712 | { | ||
| 713 | .start = IRQ_DA8XX_GPIO2, | ||
| 714 | .end = IRQ_DA8XX_GPIO2, | ||
| 715 | .flags = IORESOURCE_IRQ, | ||
| 716 | }, | ||
| 717 | { | ||
| 718 | .start = IRQ_DA8XX_GPIO3, | ||
| 719 | .end = IRQ_DA8XX_GPIO3, | ||
| 720 | .flags = IORESOURCE_IRQ, | ||
| 721 | }, | ||
| 722 | { | ||
| 723 | .start = IRQ_DA8XX_GPIO4, | ||
| 724 | .end = IRQ_DA8XX_GPIO4, | ||
| 725 | .flags = IORESOURCE_IRQ, | ||
| 726 | }, | ||
| 727 | { | ||
| 728 | .start = IRQ_DA8XX_GPIO5, | ||
| 729 | .end = IRQ_DA8XX_GPIO5, | ||
| 730 | .flags = IORESOURCE_IRQ, | ||
| 731 | }, | ||
| 732 | { | ||
| 733 | .start = IRQ_DA8XX_GPIO6, | ||
| 734 | .end = IRQ_DA8XX_GPIO6, | ||
| 735 | .flags = IORESOURCE_IRQ, | ||
| 736 | }, | ||
| 737 | { | ||
| 738 | .start = IRQ_DA8XX_GPIO7, | ||
| 739 | .end = IRQ_DA8XX_GPIO7, | ||
| 740 | .flags = IORESOURCE_IRQ, | ||
| 741 | }, | ||
| 742 | { | ||
| 743 | .start = IRQ_DA8XX_GPIO8, | ||
| 704 | .end = IRQ_DA8XX_GPIO8, | 744 | .end = IRQ_DA8XX_GPIO8, |
| 705 | .flags = IORESOURCE_IRQ, | 745 | .flags = IORESOURCE_IRQ, |
| 706 | }, | 746 | }, |
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c index 9f7d38d12c88..4c6e0bef4509 100644 --- a/arch/arm/mach-davinci/dm355.c +++ b/arch/arm/mach-davinci/dm355.c | |||
| @@ -548,12 +548,44 @@ static struct resource dm355_gpio_resources[] = { | |||
| 548 | }, | 548 | }, |
| 549 | { /* interrupt */ | 549 | { /* interrupt */ |
| 550 | .start = IRQ_DM355_GPIOBNK0, | 550 | .start = IRQ_DM355_GPIOBNK0, |
| 551 | .end = IRQ_DM355_GPIOBNK0, | ||
| 552 | .flags = IORESOURCE_IRQ, | ||
| 553 | }, | ||
| 554 | { | ||
| 555 | .start = IRQ_DM355_GPIOBNK1, | ||
| 556 | .end = IRQ_DM355_GPIOBNK1, | ||
| 557 | .flags = IORESOURCE_IRQ, | ||
| 558 | }, | ||
| 559 | { | ||
| 560 | .start = IRQ_DM355_GPIOBNK2, | ||
| 561 | .end = IRQ_DM355_GPIOBNK2, | ||
| 562 | .flags = IORESOURCE_IRQ, | ||
| 563 | }, | ||
| 564 | { | ||
| 565 | .start = IRQ_DM355_GPIOBNK3, | ||
| 566 | .end = IRQ_DM355_GPIOBNK3, | ||
| 567 | .flags = IORESOURCE_IRQ, | ||
| 568 | }, | ||
| 569 | { | ||
| 570 | .start = IRQ_DM355_GPIOBNK4, | ||
| 571 | .end = IRQ_DM355_GPIOBNK4, | ||
| 572 | .flags = IORESOURCE_IRQ, | ||
| 573 | }, | ||
| 574 | { | ||
| 575 | .start = IRQ_DM355_GPIOBNK5, | ||
| 576 | .end = IRQ_DM355_GPIOBNK5, | ||
| 577 | .flags = IORESOURCE_IRQ, | ||
| 578 | }, | ||
| 579 | { | ||
| 580 | .start = IRQ_DM355_GPIOBNK6, | ||
| 551 | .end = IRQ_DM355_GPIOBNK6, | 581 | .end = IRQ_DM355_GPIOBNK6, |
| 552 | .flags = IORESOURCE_IRQ, | 582 | .flags = IORESOURCE_IRQ, |
| 553 | }, | 583 | }, |
| 554 | }; | 584 | }; |
| 555 | 585 | ||
| 556 | static struct davinci_gpio_platform_data dm355_gpio_platform_data = { | 586 | static struct davinci_gpio_platform_data dm355_gpio_platform_data = { |
| 587 | .no_auto_base = true, | ||
| 588 | .base = 0, | ||
| 557 | .ngpio = 104, | 589 | .ngpio = 104, |
| 558 | }; | 590 | }; |
| 559 | 591 | ||
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c index abcf2a5ed89b..01fb2b0c82de 100644 --- a/arch/arm/mach-davinci/dm365.c +++ b/arch/arm/mach-davinci/dm365.c | |||
| @@ -267,12 +267,49 @@ static struct resource dm365_gpio_resources[] = { | |||
| 267 | }, | 267 | }, |
| 268 | { /* interrupt */ | 268 | { /* interrupt */ |
| 269 | .start = IRQ_DM365_GPIO0, | 269 | .start = IRQ_DM365_GPIO0, |
| 270 | .end = IRQ_DM365_GPIO0, | ||
| 271 | .flags = IORESOURCE_IRQ, | ||
| 272 | }, | ||
| 273 | { | ||
| 274 | .start = IRQ_DM365_GPIO1, | ||
| 275 | .end = IRQ_DM365_GPIO1, | ||
| 276 | .flags = IORESOURCE_IRQ, | ||
| 277 | }, | ||
| 278 | { | ||
| 279 | .start = IRQ_DM365_GPIO2, | ||
| 280 | .end = IRQ_DM365_GPIO2, | ||
| 281 | .flags = IORESOURCE_IRQ, | ||
| 282 | }, | ||
| 283 | { | ||
| 284 | .start = IRQ_DM365_GPIO3, | ||
| 285 | .end = IRQ_DM365_GPIO3, | ||
| 286 | .flags = IORESOURCE_IRQ, | ||
| 287 | }, | ||
| 288 | { | ||
| 289 | .start = IRQ_DM365_GPIO4, | ||
| 290 | .end = IRQ_DM365_GPIO4, | ||
| 291 | .flags = IORESOURCE_IRQ, | ||
| 292 | }, | ||
| 293 | { | ||
| 294 | .start = IRQ_DM365_GPIO5, | ||
| 295 | .end = IRQ_DM365_GPIO5, | ||
| 296 | .flags = IORESOURCE_IRQ, | ||
| 297 | }, | ||
| 298 | { | ||
| 299 | .start = IRQ_DM365_GPIO6, | ||
| 300 | .end = IRQ_DM365_GPIO6, | ||
| 301 | .flags = IORESOURCE_IRQ, | ||
| 302 | }, | ||
| 303 | { | ||
| 304 | .start = IRQ_DM365_GPIO7, | ||
| 270 | .end = IRQ_DM365_GPIO7, | 305 | .end = IRQ_DM365_GPIO7, |
| 271 | .flags = IORESOURCE_IRQ, | 306 | .flags = IORESOURCE_IRQ, |
| 272 | }, | 307 | }, |
| 273 | }; | 308 | }; |
| 274 | 309 | ||
| 275 | static struct davinci_gpio_platform_data dm365_gpio_platform_data = { | 310 | static struct davinci_gpio_platform_data dm365_gpio_platform_data = { |
| 311 | .no_auto_base = true, | ||
| 312 | .base = 0, | ||
| 276 | .ngpio = 104, | 313 | .ngpio = 104, |
| 277 | .gpio_unbanked = 8, | 314 | .gpio_unbanked = 8, |
| 278 | }; | 315 | }; |
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c index 0720da7809a6..38f92b7d413e 100644 --- a/arch/arm/mach-davinci/dm644x.c +++ b/arch/arm/mach-davinci/dm644x.c | |||
| @@ -492,12 +492,34 @@ static struct resource dm644_gpio_resources[] = { | |||
| 492 | }, | 492 | }, |
| 493 | { /* interrupt */ | 493 | { /* interrupt */ |
| 494 | .start = IRQ_GPIOBNK0, | 494 | .start = IRQ_GPIOBNK0, |
| 495 | .end = IRQ_GPIOBNK0, | ||
| 496 | .flags = IORESOURCE_IRQ, | ||
| 497 | }, | ||
| 498 | { | ||
| 499 | .start = IRQ_GPIOBNK1, | ||
| 500 | .end = IRQ_GPIOBNK1, | ||
| 501 | .flags = IORESOURCE_IRQ, | ||
| 502 | }, | ||
| 503 | { | ||
| 504 | .start = IRQ_GPIOBNK2, | ||
| 505 | .end = IRQ_GPIOBNK2, | ||
| 506 | .flags = IORESOURCE_IRQ, | ||
| 507 | }, | ||
| 508 | { | ||
| 509 | .start = IRQ_GPIOBNK3, | ||
| 510 | .end = IRQ_GPIOBNK3, | ||
| 511 | .flags = IORESOURCE_IRQ, | ||
| 512 | }, | ||
| 513 | { | ||
| 514 | .start = IRQ_GPIOBNK4, | ||
| 495 | .end = IRQ_GPIOBNK4, | 515 | .end = IRQ_GPIOBNK4, |
| 496 | .flags = IORESOURCE_IRQ, | 516 | .flags = IORESOURCE_IRQ, |
| 497 | }, | 517 | }, |
| 498 | }; | 518 | }; |
| 499 | 519 | ||
| 500 | static struct davinci_gpio_platform_data dm644_gpio_platform_data = { | 520 | static struct davinci_gpio_platform_data dm644_gpio_platform_data = { |
| 521 | .no_auto_base = true, | ||
| 522 | .base = 0, | ||
| 501 | .ngpio = 71, | 523 | .ngpio = 71, |
| 502 | }; | 524 | }; |
| 503 | 525 | ||
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c index 6bd2ed069d0d..7dc54b2a610f 100644 --- a/arch/arm/mach-davinci/dm646x.c +++ b/arch/arm/mach-davinci/dm646x.c | |||
| @@ -442,12 +442,24 @@ static struct resource dm646x_gpio_resources[] = { | |||
| 442 | }, | 442 | }, |
| 443 | { /* interrupt */ | 443 | { /* interrupt */ |
| 444 | .start = IRQ_DM646X_GPIOBNK0, | 444 | .start = IRQ_DM646X_GPIOBNK0, |
| 445 | .end = IRQ_DM646X_GPIOBNK0, | ||
| 446 | .flags = IORESOURCE_IRQ, | ||
| 447 | }, | ||
| 448 | { | ||
| 449 | .start = IRQ_DM646X_GPIOBNK1, | ||
| 450 | .end = IRQ_DM646X_GPIOBNK1, | ||
| 451 | .flags = IORESOURCE_IRQ, | ||
| 452 | }, | ||
| 453 | { | ||
| 454 | .start = IRQ_DM646X_GPIOBNK2, | ||
| 445 | .end = IRQ_DM646X_GPIOBNK2, | 455 | .end = IRQ_DM646X_GPIOBNK2, |
| 446 | .flags = IORESOURCE_IRQ, | 456 | .flags = IORESOURCE_IRQ, |
| 447 | }, | 457 | }, |
| 448 | }; | 458 | }; |
| 449 | 459 | ||
| 450 | static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { | 460 | static struct davinci_gpio_platform_data dm646x_gpio_platform_data = { |
| 461 | .no_auto_base = true, | ||
| 462 | .base = 0, | ||
| 451 | .ngpio = 43, | 463 | .ngpio = 43, |
| 452 | }; | 464 | }; |
| 453 | 465 | ||
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c index 3d191fd52910..17886744dbe6 100644 --- a/arch/arm/mach-omap1/board-ams-delta.c +++ b/arch/arm/mach-omap1/board-ams-delta.c | |||
| @@ -750,6 +750,9 @@ static void modem_pm(struct uart_port *port, unsigned int state, unsigned old) | |||
| 750 | struct modem_private_data *priv = port->private_data; | 750 | struct modem_private_data *priv = port->private_data; |
| 751 | int ret; | 751 | int ret; |
| 752 | 752 | ||
| 753 | if (!priv) | ||
| 754 | return; | ||
| 755 | |||
| 753 | if (IS_ERR(priv->regulator)) | 756 | if (IS_ERR(priv->regulator)) |
| 754 | return; | 757 | return; |
| 755 | 758 | ||
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 9500b6e27380..f86b72d1d59e 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c | |||
| @@ -209,11 +209,61 @@ static int __init omapdss_init_fbdev(void) | |||
| 209 | 209 | ||
| 210 | return 0; | 210 | return 0; |
| 211 | } | 211 | } |
| 212 | #else | 212 | |
| 213 | static inline int omapdss_init_fbdev(void) | 213 | static const char * const omapdss_compat_names[] __initconst = { |
| 214 | "ti,omap2-dss", | ||
| 215 | "ti,omap3-dss", | ||
| 216 | "ti,omap4-dss", | ||
| 217 | "ti,omap5-dss", | ||
| 218 | "ti,dra7-dss", | ||
| 219 | }; | ||
| 220 | |||
| 221 | static struct device_node * __init omapdss_find_dss_of_node(void) | ||
| 214 | { | 222 | { |
| 215 | return 0; | 223 | struct device_node *node; |
| 224 | int i; | ||
| 225 | |||
| 226 | for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) { | ||
| 227 | node = of_find_compatible_node(NULL, NULL, | ||
| 228 | omapdss_compat_names[i]); | ||
| 229 | if (node) | ||
| 230 | return node; | ||
| 231 | } | ||
| 232 | |||
| 233 | return NULL; | ||
| 216 | } | 234 | } |
| 235 | |||
| 236 | static int __init omapdss_init_of(void) | ||
| 237 | { | ||
| 238 | int r; | ||
| 239 | struct device_node *node; | ||
| 240 | struct platform_device *pdev; | ||
| 241 | |||
| 242 | /* only create dss helper devices if dss is enabled in the .dts */ | ||
| 243 | |||
| 244 | node = omapdss_find_dss_of_node(); | ||
| 245 | if (!node) | ||
| 246 | return 0; | ||
| 247 | |||
| 248 | if (!of_device_is_available(node)) | ||
| 249 | return 0; | ||
| 250 | |||
| 251 | pdev = of_find_device_by_node(node); | ||
| 252 | |||
| 253 | if (!pdev) { | ||
| 254 | pr_err("Unable to find DSS platform device\n"); | ||
| 255 | return -ENODEV; | ||
| 256 | } | ||
| 257 | |||
| 258 | r = of_platform_populate(node, NULL, NULL, &pdev->dev); | ||
| 259 | if (r) { | ||
| 260 | pr_err("Unable to populate DSS submodule devices\n"); | ||
| 261 | return r; | ||
| 262 | } | ||
| 263 | |||
| 264 | return omapdss_init_fbdev(); | ||
| 265 | } | ||
| 266 | omap_device_initcall(omapdss_init_of); | ||
| 217 | #endif /* CONFIG_FB_OMAP2 */ | 267 | #endif /* CONFIG_FB_OMAP2 */ |
| 218 | 268 | ||
| 219 | static void dispc_disable_outputs(void) | 269 | static void dispc_disable_outputs(void) |
| @@ -361,58 +411,3 @@ int omap_dss_reset(struct omap_hwmod *oh) | |||
| 361 | 411 | ||
| 362 | return r; | 412 | return r; |
| 363 | } | 413 | } |
| 364 | |||
| 365 | static const char * const omapdss_compat_names[] __initconst = { | ||
| 366 | "ti,omap2-dss", | ||
| 367 | "ti,omap3-dss", | ||
| 368 | "ti,omap4-dss", | ||
| 369 | "ti,omap5-dss", | ||
| 370 | "ti,dra7-dss", | ||
| 371 | }; | ||
| 372 | |||
| 373 | static struct device_node * __init omapdss_find_dss_of_node(void) | ||
| 374 | { | ||
| 375 | struct device_node *node; | ||
| 376 | int i; | ||
| 377 | |||
| 378 | for (i = 0; i < ARRAY_SIZE(omapdss_compat_names); ++i) { | ||
| 379 | node = of_find_compatible_node(NULL, NULL, | ||
| 380 | omapdss_compat_names[i]); | ||
| 381 | if (node) | ||
| 382 | return node; | ||
| 383 | } | ||
| 384 | |||
| 385 | return NULL; | ||
| 386 | } | ||
| 387 | |||
| 388 | static int __init omapdss_init_of(void) | ||
| 389 | { | ||
| 390 | int r; | ||
| 391 | struct device_node *node; | ||
| 392 | struct platform_device *pdev; | ||
| 393 | |||
| 394 | /* only create dss helper devices if dss is enabled in the .dts */ | ||
| 395 | |||
| 396 | node = omapdss_find_dss_of_node(); | ||
| 397 | if (!node) | ||
| 398 | return 0; | ||
| 399 | |||
| 400 | if (!of_device_is_available(node)) | ||
| 401 | return 0; | ||
| 402 | |||
| 403 | pdev = of_find_device_by_node(node); | ||
| 404 | |||
| 405 | if (!pdev) { | ||
| 406 | pr_err("Unable to find DSS platform device\n"); | ||
| 407 | return -ENODEV; | ||
| 408 | } | ||
| 409 | |||
| 410 | r = of_platform_populate(node, NULL, NULL, &pdev->dev); | ||
| 411 | if (r) { | ||
| 412 | pr_err("Unable to populate DSS submodule devices\n"); | ||
| 413 | return r; | ||
| 414 | } | ||
| 415 | |||
| 416 | return omapdss_init_fbdev(); | ||
| 417 | } | ||
| 418 | omap_device_initcall(omapdss_init_of); | ||
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c index 7b95729e8359..38a1be6c3694 100644 --- a/arch/arm/mach-omap2/prm44xx.c +++ b/arch/arm/mach-omap2/prm44xx.c | |||
| @@ -351,7 +351,7 @@ static void omap44xx_prm_reconfigure_io_chain(void) | |||
| 351 | * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and | 351 | * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and |
| 352 | * omap44xx_prm_reconfigure_io_chain() must be called. No return value. | 352 | * omap44xx_prm_reconfigure_io_chain() must be called. No return value. |
| 353 | */ | 353 | */ |
| 354 | static void __init omap44xx_prm_enable_io_wakeup(void) | 354 | static void omap44xx_prm_enable_io_wakeup(void) |
| 355 | { | 355 | { |
| 356 | s32 inst = omap4_prmst_get_prm_dev_inst(); | 356 | s32 inst = omap4_prmst_get_prm_dev_inst(); |
| 357 | 357 | ||
diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index 5544b82a2e7a..9a07916af8dd 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c | |||
| @@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void) | |||
| 52 | case ARM_CPU_PART_CORTEX_A17: | 52 | case ARM_CPU_PART_CORTEX_A17: |
| 53 | case ARM_CPU_PART_CORTEX_A73: | 53 | case ARM_CPU_PART_CORTEX_A73: |
| 54 | case ARM_CPU_PART_CORTEX_A75: | 54 | case ARM_CPU_PART_CORTEX_A75: |
| 55 | if (processor.switch_mm != cpu_v7_bpiall_switch_mm) | ||
| 56 | goto bl_error; | ||
| 57 | per_cpu(harden_branch_predictor_fn, cpu) = | 55 | per_cpu(harden_branch_predictor_fn, cpu) = |
| 58 | harden_branch_predictor_bpiall; | 56 | harden_branch_predictor_bpiall; |
| 59 | spectre_v2_method = "BPIALL"; | 57 | spectre_v2_method = "BPIALL"; |
| @@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void) | |||
| 61 | 59 | ||
| 62 | case ARM_CPU_PART_CORTEX_A15: | 60 | case ARM_CPU_PART_CORTEX_A15: |
| 63 | case ARM_CPU_PART_BRAHMA_B15: | 61 | case ARM_CPU_PART_BRAHMA_B15: |
| 64 | if (processor.switch_mm != cpu_v7_iciallu_switch_mm) | ||
| 65 | goto bl_error; | ||
| 66 | per_cpu(harden_branch_predictor_fn, cpu) = | 62 | per_cpu(harden_branch_predictor_fn, cpu) = |
| 67 | harden_branch_predictor_iciallu; | 63 | harden_branch_predictor_iciallu; |
| 68 | spectre_v2_method = "ICIALLU"; | 64 | spectre_v2_method = "ICIALLU"; |
| @@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void) | |||
| 88 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 84 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
| 89 | if ((int)res.a0 != 0) | 85 | if ((int)res.a0 != 0) |
| 90 | break; | 86 | break; |
| 91 | if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) | ||
| 92 | goto bl_error; | ||
| 93 | per_cpu(harden_branch_predictor_fn, cpu) = | 87 | per_cpu(harden_branch_predictor_fn, cpu) = |
| 94 | call_hvc_arch_workaround_1; | 88 | call_hvc_arch_workaround_1; |
| 95 | processor.switch_mm = cpu_v7_hvc_switch_mm; | 89 | cpu_do_switch_mm = cpu_v7_hvc_switch_mm; |
| 96 | spectre_v2_method = "hypervisor"; | 90 | spectre_v2_method = "hypervisor"; |
| 97 | break; | 91 | break; |
| 98 | 92 | ||
| @@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void) | |||
| 101 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); | 95 | ARM_SMCCC_ARCH_WORKAROUND_1, &res); |
| 102 | if ((int)res.a0 != 0) | 96 | if ((int)res.a0 != 0) |
| 103 | break; | 97 | break; |
| 104 | if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) | ||
| 105 | goto bl_error; | ||
| 106 | per_cpu(harden_branch_predictor_fn, cpu) = | 98 | per_cpu(harden_branch_predictor_fn, cpu) = |
| 107 | call_smc_arch_workaround_1; | 99 | call_smc_arch_workaround_1; |
| 108 | processor.switch_mm = cpu_v7_smc_switch_mm; | 100 | cpu_do_switch_mm = cpu_v7_smc_switch_mm; |
| 109 | spectre_v2_method = "firmware"; | 101 | spectre_v2_method = "firmware"; |
| 110 | break; | 102 | break; |
| 111 | 103 | ||
| @@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void) | |||
| 119 | if (spectre_v2_method) | 111 | if (spectre_v2_method) |
| 120 | pr_info("CPU%u: Spectre v2: using %s workaround\n", | 112 | pr_info("CPU%u: Spectre v2: using %s workaround\n", |
| 121 | smp_processor_id(), spectre_v2_method); | 113 | smp_processor_id(), spectre_v2_method); |
| 122 | return; | ||
| 123 | |||
| 124 | bl_error: | ||
| 125 | pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", | ||
| 126 | cpu); | ||
| 127 | } | 114 | } |
| 128 | #else | 115 | #else |
| 129 | static void cpu_v7_spectre_init(void) | 116 | static void cpu_v7_spectre_init(void) |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6fe52819e014..339eb17c9808 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm) | |||
| 112 | hvc #0 | 112 | hvc #0 |
| 113 | ldmfd sp!, {r0 - r3} | 113 | ldmfd sp!, {r0 - r3} |
| 114 | b cpu_v7_switch_mm | 114 | b cpu_v7_switch_mm |
| 115 | ENDPROC(cpu_v7_smc_switch_mm) | 115 | ENDPROC(cpu_v7_hvc_switch_mm) |
| 116 | #endif | 116 | #endif |
| 117 | ENTRY(cpu_v7_iciallu_switch_mm) | 117 | ENTRY(cpu_v7_iciallu_switch_mm) |
| 118 | mov r3, #0 | 118 | mov r3, #0 |
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index aff6e6eadc70..ee7b07938dd5 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
| @@ -573,7 +573,7 @@ int vfp_preserve_user_clear_hwstate(struct user_vfp *ufp, | |||
| 573 | */ | 573 | */ |
| 574 | ufp_exc->fpexc = hwstate->fpexc; | 574 | ufp_exc->fpexc = hwstate->fpexc; |
| 575 | ufp_exc->fpinst = hwstate->fpinst; | 575 | ufp_exc->fpinst = hwstate->fpinst; |
| 576 | ufp_exc->fpinst2 = ufp_exc->fpinst2; | 576 | ufp_exc->fpinst2 = hwstate->fpinst2; |
| 577 | 577 | ||
| 578 | /* Ensure that VFP is disabled. */ | 578 | /* Ensure that VFP is disabled. */ |
| 579 | vfp_flush_hwstate(thread); | 579 | vfp_flush_hwstate(thread); |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 787d7850e064..ea2ab0330e3a 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
| @@ -497,6 +497,24 @@ config ARM64_ERRATUM_1188873 | |||
| 497 | 497 | ||
| 498 | If unsure, say Y. | 498 | If unsure, say Y. |
| 499 | 499 | ||
| 500 | config ARM64_ERRATUM_1286807 | ||
| 501 | bool "Cortex-A76: Modification of the translation table for a virtual address might lead to read-after-read ordering violation" | ||
| 502 | default y | ||
| 503 | select ARM64_WORKAROUND_REPEAT_TLBI | ||
| 504 | help | ||
| 505 | This option adds workaround for ARM Cortex-A76 erratum 1286807 | ||
| 506 | |||
| 507 | On the affected Cortex-A76 cores (r0p0 to r3p0), if a virtual | ||
| 508 | address for a cacheable mapping of a location is being | ||
| 509 | accessed by a core while another core is remapping the virtual | ||
| 510 | address to a new physical page using the recommended | ||
| 511 | break-before-make sequence, then under very rare circumstances | ||
| 512 | TLBI+DSB completes before a read using the translation being | ||
| 513 | invalidated has been observed by other observers. The | ||
| 514 | workaround repeats the TLBI+DSB operation. | ||
| 515 | |||
| 516 | If unsure, say Y. | ||
| 517 | |||
| 500 | config CAVIUM_ERRATUM_22375 | 518 | config CAVIUM_ERRATUM_22375 |
| 501 | bool "Cavium erratum 22375, 24313" | 519 | bool "Cavium erratum 22375, 24313" |
| 502 | default y | 520 | default y |
| @@ -566,9 +584,16 @@ config QCOM_FALKOR_ERRATUM_1003 | |||
| 566 | is unchanged. Work around the erratum by invalidating the walk cache | 584 | is unchanged. Work around the erratum by invalidating the walk cache |
| 567 | entries for the trampoline before entering the kernel proper. | 585 | entries for the trampoline before entering the kernel proper. |
| 568 | 586 | ||
| 587 | config ARM64_WORKAROUND_REPEAT_TLBI | ||
| 588 | bool | ||
| 589 | help | ||
| 590 | Enable the repeat TLBI workaround for Falkor erratum 1009 and | ||
| 591 | Cortex-A76 erratum 1286807. | ||
| 592 | |||
| 569 | config QCOM_FALKOR_ERRATUM_1009 | 593 | config QCOM_FALKOR_ERRATUM_1009 |
| 570 | bool "Falkor E1009: Prematurely complete a DSB after a TLBI" | 594 | bool "Falkor E1009: Prematurely complete a DSB after a TLBI" |
| 571 | default y | 595 | default y |
| 596 | select ARM64_WORKAROUND_REPEAT_TLBI | ||
| 572 | help | 597 | help |
| 573 | On Falkor v1, the CPU may prematurely complete a DSB following a | 598 | On Falkor v1, the CPU may prematurely complete a DSB following a |
| 574 | TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation | 599 | TLBI xxIS invalidate maintenance operation. Repeat the TLBI operation |
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi index 8253a1a9e985..fef7351e9f67 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi | |||
| @@ -139,6 +139,7 @@ | |||
| 139 | clock-names = "stmmaceth"; | 139 | clock-names = "stmmaceth"; |
| 140 | tx-fifo-depth = <16384>; | 140 | tx-fifo-depth = <16384>; |
| 141 | rx-fifo-depth = <16384>; | 141 | rx-fifo-depth = <16384>; |
| 142 | snps,multicast-filter-bins = <256>; | ||
| 142 | status = "disabled"; | 143 | status = "disabled"; |
| 143 | }; | 144 | }; |
| 144 | 145 | ||
| @@ -154,6 +155,7 @@ | |||
| 154 | clock-names = "stmmaceth"; | 155 | clock-names = "stmmaceth"; |
| 155 | tx-fifo-depth = <16384>; | 156 | tx-fifo-depth = <16384>; |
| 156 | rx-fifo-depth = <16384>; | 157 | rx-fifo-depth = <16384>; |
| 158 | snps,multicast-filter-bins = <256>; | ||
| 157 | status = "disabled"; | 159 | status = "disabled"; |
| 158 | }; | 160 | }; |
| 159 | 161 | ||
| @@ -169,6 +171,7 @@ | |||
| 169 | clock-names = "stmmaceth"; | 171 | clock-names = "stmmaceth"; |
| 170 | tx-fifo-depth = <16384>; | 172 | tx-fifo-depth = <16384>; |
| 171 | rx-fifo-depth = <16384>; | 173 | rx-fifo-depth = <16384>; |
| 174 | snps,multicast-filter-bins = <256>; | ||
| 172 | status = "disabled"; | 175 | status = "disabled"; |
| 173 | }; | 176 | }; |
| 174 | 177 | ||
diff --git a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi index b4276da1fb0d..11fd1fe8bdb5 100644 --- a/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi +++ b/arch/arm64/boot/dts/qcom/msm8998-mtp.dtsi | |||
| @@ -241,3 +241,7 @@ | |||
| 241 | }; | 241 | }; |
| 242 | }; | 242 | }; |
| 243 | }; | 243 | }; |
| 244 | |||
| 245 | &tlmm { | ||
| 246 | gpio-reserved-ranges = <0 4>, <81 4>; | ||
| 247 | }; | ||
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts index eedfaf8922e2..d667eee4e6d0 100644 --- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dts +++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dts | |||
| @@ -352,6 +352,10 @@ | |||
| 352 | status = "okay"; | 352 | status = "okay"; |
| 353 | }; | 353 | }; |
| 354 | 354 | ||
| 355 | &tlmm { | ||
| 356 | gpio-reserved-ranges = <0 4>, <81 4>; | ||
| 357 | }; | ||
| 358 | |||
| 355 | &uart9 { | 359 | &uart9 { |
| 356 | status = "okay"; | 360 | status = "okay"; |
| 357 | }; | 361 | }; |
diff --git a/arch/arm64/boot/dts/renesas/r8a7795.dtsi b/arch/arm64/boot/dts/renesas/r8a7795.dtsi index b5f2273caca4..a79c8d369e0b 100644 --- a/arch/arm64/boot/dts/renesas/r8a7795.dtsi +++ b/arch/arm64/boot/dts/renesas/r8a7795.dtsi | |||
| @@ -652,7 +652,7 @@ | |||
| 652 | clock-names = "fck", "brg_int", "scif_clk"; | 652 | clock-names = "fck", "brg_int", "scif_clk"; |
| 653 | dmas = <&dmac1 0x35>, <&dmac1 0x34>, | 653 | dmas = <&dmac1 0x35>, <&dmac1 0x34>, |
| 654 | <&dmac2 0x35>, <&dmac2 0x34>; | 654 | <&dmac2 0x35>, <&dmac2 0x34>; |
| 655 | dma-names = "tx", "rx"; | 655 | dma-names = "tx", "rx", "tx", "rx"; |
| 656 | power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; | 656 | power-domains = <&sysc R8A7795_PD_ALWAYS_ON>; |
| 657 | resets = <&cpg 518>; | 657 | resets = <&cpg 518>; |
| 658 | status = "disabled"; | 658 | status = "disabled"; |
diff --git a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts index fe2e2c051cc9..5a7012be0d6a 100644 --- a/arch/arm64/boot/dts/renesas/r8a77980-condor.dts +++ b/arch/arm64/boot/dts/renesas/r8a77980-condor.dts | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | 15 | ||
| 16 | aliases { | 16 | aliases { |
| 17 | serial0 = &scif0; | 17 | serial0 = &scif0; |
| 18 | ethernet0 = &avb; | 18 | ethernet0 = &gether; |
| 19 | }; | 19 | }; |
| 20 | 20 | ||
| 21 | chosen { | 21 | chosen { |
| @@ -97,23 +97,6 @@ | |||
| 97 | }; | 97 | }; |
| 98 | }; | 98 | }; |
| 99 | 99 | ||
| 100 | &avb { | ||
| 101 | pinctrl-0 = <&avb_pins>; | ||
| 102 | pinctrl-names = "default"; | ||
| 103 | |||
| 104 | phy-mode = "rgmii-id"; | ||
| 105 | phy-handle = <&phy0>; | ||
| 106 | renesas,no-ether-link; | ||
| 107 | status = "okay"; | ||
| 108 | |||
| 109 | phy0: ethernet-phy@0 { | ||
| 110 | rxc-skew-ps = <1500>; | ||
| 111 | reg = <0>; | ||
| 112 | interrupt-parent = <&gpio1>; | ||
| 113 | interrupts = <17 IRQ_TYPE_LEVEL_LOW>; | ||
| 114 | }; | ||
| 115 | }; | ||
| 116 | |||
| 117 | &canfd { | 100 | &canfd { |
| 118 | pinctrl-0 = <&canfd0_pins>; | 101 | pinctrl-0 = <&canfd0_pins>; |
| 119 | pinctrl-names = "default"; | 102 | pinctrl-names = "default"; |
| @@ -139,6 +122,23 @@ | |||
| 139 | clock-frequency = <32768>; | 122 | clock-frequency = <32768>; |
| 140 | }; | 123 | }; |
| 141 | 124 | ||
| 125 | &gether { | ||
| 126 | pinctrl-0 = <&gether_pins>; | ||
| 127 | pinctrl-names = "default"; | ||
| 128 | |||
| 129 | phy-mode = "rgmii-id"; | ||
| 130 | phy-handle = <&phy0>; | ||
| 131 | renesas,no-ether-link; | ||
| 132 | status = "okay"; | ||
| 133 | |||
| 134 | phy0: ethernet-phy@0 { | ||
| 135 | rxc-skew-ps = <1500>; | ||
| 136 | reg = <0>; | ||
| 137 | interrupt-parent = <&gpio4>; | ||
| 138 | interrupts = <23 IRQ_TYPE_LEVEL_LOW>; | ||
| 139 | }; | ||
| 140 | }; | ||
| 141 | |||
| 142 | &i2c0 { | 142 | &i2c0 { |
| 143 | pinctrl-0 = <&i2c0_pins>; | 143 | pinctrl-0 = <&i2c0_pins>; |
| 144 | pinctrl-names = "default"; | 144 | pinctrl-names = "default"; |
| @@ -236,16 +236,17 @@ | |||
| 236 | }; | 236 | }; |
| 237 | 237 | ||
| 238 | &pfc { | 238 | &pfc { |
| 239 | avb_pins: avb { | ||
| 240 | groups = "avb_mdio", "avb_rgmii"; | ||
| 241 | function = "avb"; | ||
| 242 | }; | ||
| 243 | |||
| 244 | canfd0_pins: canfd0 { | 239 | canfd0_pins: canfd0 { |
| 245 | groups = "canfd0_data_a"; | 240 | groups = "canfd0_data_a"; |
| 246 | function = "canfd0"; | 241 | function = "canfd0"; |
| 247 | }; | 242 | }; |
| 248 | 243 | ||
| 244 | gether_pins: gether { | ||
| 245 | groups = "gether_mdio_a", "gether_rgmii", | ||
| 246 | "gether_txcrefclk", "gether_txcrefclk_mega"; | ||
| 247 | function = "gether"; | ||
| 248 | }; | ||
| 249 | |||
| 249 | i2c0_pins: i2c0 { | 250 | i2c0_pins: i2c0 { |
| 250 | groups = "i2c0"; | 251 | groups = "i2c0"; |
| 251 | function = "i2c0"; | 252 | function = "i2c0"; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts index 2dceeea29b83..1e6a71066c16 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts +++ b/arch/arm64/boot/dts/rockchip/rk3399-puma-haikou.dts | |||
| @@ -153,7 +153,7 @@ | |||
| 153 | }; | 153 | }; |
| 154 | 154 | ||
| 155 | &pcie0 { | 155 | &pcie0 { |
| 156 | ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_LOW>; | 156 | ep-gpios = <&gpio4 RK_PC6 GPIO_ACTIVE_HIGH>; |
| 157 | num-lanes = <4>; | 157 | num-lanes = <4>; |
| 158 | pinctrl-names = "default"; | 158 | pinctrl-names = "default"; |
| 159 | pinctrl-0 = <&pcie_clkreqn_cpm>; | 159 | pinctrl-0 = <&pcie_clkreqn_cpm>; |
diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi index 6c8c4ab044aa..56abbb08c133 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock960.dtsi | |||
| @@ -57,18 +57,6 @@ | |||
| 57 | regulator-always-on; | 57 | regulator-always-on; |
| 58 | vin-supply = <&vcc_sys>; | 58 | vin-supply = <&vcc_sys>; |
| 59 | }; | 59 | }; |
| 60 | |||
| 61 | vdd_log: vdd-log { | ||
| 62 | compatible = "pwm-regulator"; | ||
| 63 | pwms = <&pwm2 0 25000 0>; | ||
| 64 | regulator-name = "vdd_log"; | ||
| 65 | regulator-min-microvolt = <800000>; | ||
| 66 | regulator-max-microvolt = <1400000>; | ||
| 67 | regulator-always-on; | ||
| 68 | regulator-boot-on; | ||
| 69 | vin-supply = <&vcc_sys>; | ||
| 70 | }; | ||
| 71 | |||
| 72 | }; | 60 | }; |
| 73 | 61 | ||
| 74 | &cpu_l0 { | 62 | &cpu_l0 { |
diff --git a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi index affc3c309353..8d7b47f9dfbf 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-wakeup.dtsi | |||
| @@ -36,7 +36,7 @@ | |||
| 36 | 36 | ||
| 37 | wkup_uart0: serial@42300000 { | 37 | wkup_uart0: serial@42300000 { |
| 38 | compatible = "ti,am654-uart"; | 38 | compatible = "ti,am654-uart"; |
| 39 | reg = <0x00 0x42300000 0x00 0x100>; | 39 | reg = <0x42300000 0x100>; |
| 40 | reg-shift = <2>; | 40 | reg-shift = <2>; |
| 41 | reg-io-width = <4>; | 41 | reg-io-width = <4>; |
| 42 | interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>; | 42 | interrupts = <GIC_SPI 697 IRQ_TYPE_LEVEL_HIGH>; |
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index caa955f10e19..fac54fb050d0 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h | |||
| @@ -56,6 +56,19 @@ static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs) | |||
| 56 | { | 56 | { |
| 57 | return is_compat_task(); | 57 | return is_compat_task(); |
| 58 | } | 58 | } |
| 59 | |||
| 60 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME | ||
| 61 | |||
| 62 | static inline bool arch_syscall_match_sym_name(const char *sym, | ||
| 63 | const char *name) | ||
| 64 | { | ||
| 65 | /* | ||
| 66 | * Since all syscall functions have __arm64_ prefix, we must skip it. | ||
| 67 | * However, as we described above, we decided to ignore compat | ||
| 68 | * syscalls, so we don't care about __arm64_compat_ prefix here. | ||
| 69 | */ | ||
| 70 | return !strcmp(sym + 8, name); | ||
| 71 | } | ||
| 59 | #endif /* ifndef __ASSEMBLY__ */ | 72 | #endif /* ifndef __ASSEMBLY__ */ |
| 60 | 73 | ||
| 61 | #endif /* __ASM_FTRACE_H */ | 74 | #endif /* __ASM_FTRACE_H */ |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3e2091708b8e..6b0d4dff5012 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
| @@ -24,6 +24,14 @@ | |||
| 24 | #define KERNEL_DS UL(-1) | 24 | #define KERNEL_DS UL(-1) |
| 25 | #define USER_DS (TASK_SIZE_64 - 1) | 25 | #define USER_DS (TASK_SIZE_64 - 1) |
| 26 | 26 | ||
| 27 | /* | ||
| 28 | * On arm64 systems, unaligned accesses by the CPU are cheap, and so there is | ||
| 29 | * no point in shifting all network buffers by 2 bytes just to make some IP | ||
| 30 | * header fields appear aligned in memory, potentially sacrificing some DMA | ||
| 31 | * performance on some platforms. | ||
| 32 | */ | ||
| 33 | #define NET_IP_ALIGN 0 | ||
| 34 | |||
| 27 | #ifndef __ASSEMBLY__ | 35 | #ifndef __ASSEMBLY__ |
| 28 | #ifdef __KERNEL__ | 36 | #ifdef __KERNEL__ |
| 29 | 37 | ||
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 0c909c4a932f..842fb9572661 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h | |||
| @@ -468,7 +468,7 @@ | |||
| 468 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ | 468 | SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ |
| 469 | SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) | 469 | SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) |
| 470 | 470 | ||
| 471 | #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffff | 471 | #if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL |
| 472 | #error "Inconsistent SCTLR_EL2 set/clear bits" | 472 | #error "Inconsistent SCTLR_EL2 set/clear bits" |
| 473 | #endif | 473 | #endif |
| 474 | 474 | ||
| @@ -509,7 +509,7 @@ | |||
| 509 | SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ | 509 | SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ |
| 510 | SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) | 510 | SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) |
| 511 | 511 | ||
| 512 | #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffff | 512 | #if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL |
| 513 | #error "Inconsistent SCTLR_EL1 set/clear bits" | 513 | #error "Inconsistent SCTLR_EL1 set/clear bits" |
| 514 | #endif | 514 | #endif |
| 515 | 515 | ||
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index c3c0387aee18..5dfd23897dea 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
| @@ -41,14 +41,14 @@ | |||
| 41 | ALTERNATIVE("nop\n nop", \ | 41 | ALTERNATIVE("nop\n nop", \ |
| 42 | "dsb ish\n tlbi " #op, \ | 42 | "dsb ish\n tlbi " #op, \ |
| 43 | ARM64_WORKAROUND_REPEAT_TLBI, \ | 43 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
| 44 | CONFIG_QCOM_FALKOR_ERRATUM_1009) \ | 44 | CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
| 45 | : : ) | 45 | : : ) |
| 46 | 46 | ||
| 47 | #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ | 47 | #define __TLBI_1(op, arg) asm ("tlbi " #op ", %0\n" \ |
| 48 | ALTERNATIVE("nop\n nop", \ | 48 | ALTERNATIVE("nop\n nop", \ |
| 49 | "dsb ish\n tlbi " #op ", %0", \ | 49 | "dsb ish\n tlbi " #op ", %0", \ |
| 50 | ARM64_WORKAROUND_REPEAT_TLBI, \ | 50 | ARM64_WORKAROUND_REPEAT_TLBI, \ |
| 51 | CONFIG_QCOM_FALKOR_ERRATUM_1009) \ | 51 | CONFIG_ARM64_WORKAROUND_REPEAT_TLBI) \ |
| 52 | : : "r" (arg)) | 52 | : : "r" (arg)) |
| 53 | 53 | ||
| 54 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) | 54 | #define __TLBI_N(op, arg, n, ...) __TLBI_##n(op, arg) |
diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index a509e35132d2..6ad715d67df8 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c | |||
| @@ -570,6 +570,20 @@ static const struct midr_range arm64_harden_el2_vectors[] = { | |||
| 570 | 570 | ||
| 571 | #endif | 571 | #endif |
| 572 | 572 | ||
| 573 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI | ||
| 574 | |||
| 575 | static const struct midr_range arm64_repeat_tlbi_cpus[] = { | ||
| 576 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 | ||
| 577 | MIDR_RANGE(MIDR_QCOM_FALKOR_V1, 0, 0, 0, 0), | ||
| 578 | #endif | ||
| 579 | #ifdef CONFIG_ARM64_ERRATUM_1286807 | ||
| 580 | MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0), | ||
| 581 | #endif | ||
| 582 | {}, | ||
| 583 | }; | ||
| 584 | |||
| 585 | #endif | ||
| 586 | |||
| 573 | const struct arm64_cpu_capabilities arm64_errata[] = { | 587 | const struct arm64_cpu_capabilities arm64_errata[] = { |
| 574 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ | 588 | #if defined(CONFIG_ARM64_ERRATUM_826319) || \ |
| 575 | defined(CONFIG_ARM64_ERRATUM_827319) || \ | 589 | defined(CONFIG_ARM64_ERRATUM_827319) || \ |
| @@ -695,11 +709,11 @@ const struct arm64_cpu_capabilities arm64_errata[] = { | |||
| 695 | .matches = is_kryo_midr, | 709 | .matches = is_kryo_midr, |
| 696 | }, | 710 | }, |
| 697 | #endif | 711 | #endif |
| 698 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009 | 712 | #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI |
| 699 | { | 713 | { |
| 700 | .desc = "Qualcomm Technologies Falkor erratum 1009", | 714 | .desc = "Qualcomm erratum 1009, ARM erratum 1286807", |
| 701 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, | 715 | .capability = ARM64_WORKAROUND_REPEAT_TLBI, |
| 702 | ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0), | 716 | ERRATA_MIDR_RANGE_LIST(arm64_repeat_tlbi_cpus), |
| 703 | }, | 717 | }, |
| 704 | #endif | 718 | #endif |
| 705 | #ifdef CONFIG_ARM64_ERRATUM_858921 | 719 | #ifdef CONFIG_ARM64_ERRATUM_858921 |
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index af50064dea51..aec5ecb85737 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c | |||
| @@ -1333,7 +1333,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
| 1333 | .cpu_enable = cpu_enable_hw_dbm, | 1333 | .cpu_enable = cpu_enable_hw_dbm, |
| 1334 | }, | 1334 | }, |
| 1335 | #endif | 1335 | #endif |
| 1336 | #ifdef CONFIG_ARM64_SSBD | ||
| 1337 | { | 1336 | { |
| 1338 | .desc = "CRC32 instructions", | 1337 | .desc = "CRC32 instructions", |
| 1339 | .capability = ARM64_HAS_CRC32, | 1338 | .capability = ARM64_HAS_CRC32, |
| @@ -1343,6 +1342,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { | |||
| 1343 | .field_pos = ID_AA64ISAR0_CRC32_SHIFT, | 1342 | .field_pos = ID_AA64ISAR0_CRC32_SHIFT, |
| 1344 | .min_field_value = 1, | 1343 | .min_field_value = 1, |
| 1345 | }, | 1344 | }, |
| 1345 | #ifdef CONFIG_ARM64_SSBD | ||
| 1346 | { | 1346 | { |
| 1347 | .desc = "Speculative Store Bypassing Safe (SSBS)", | 1347 | .desc = "Speculative Store Bypassing Safe (SSBS)", |
| 1348 | .capability = ARM64_SSBS, | 1348 | .capability = ARM64_SSBS, |
diff --git a/arch/arm64/kernel/ftrace.c b/arch/arm64/kernel/ftrace.c index 50986e388d2b..57e962290df3 100644 --- a/arch/arm64/kernel/ftrace.c +++ b/arch/arm64/kernel/ftrace.c | |||
| @@ -216,8 +216,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 216 | { | 216 | { |
| 217 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 217 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
| 218 | unsigned long old; | 218 | unsigned long old; |
| 219 | struct ftrace_graph_ent trace; | ||
| 220 | int err; | ||
| 221 | 219 | ||
| 222 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 220 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 223 | return; | 221 | return; |
| @@ -229,18 +227,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 229 | */ | 227 | */ |
| 230 | old = *parent; | 228 | old = *parent; |
| 231 | 229 | ||
| 232 | trace.func = self_addr; | 230 | if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) |
| 233 | trace.depth = current->curr_ret_stack + 1; | ||
| 234 | |||
| 235 | /* Only trace if the calling function expects to */ | ||
| 236 | if (!ftrace_graph_entry(&trace)) | ||
| 237 | return; | ||
| 238 | |||
| 239 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 240 | frame_pointer, NULL); | ||
| 241 | if (err == -EBUSY) | ||
| 242 | return; | ||
| 243 | else | ||
| 244 | *parent = return_hooker; | 231 | *parent = return_hooker; |
| 245 | } | 232 | } |
| 246 | 233 | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 953e316521fc..f4fc1e0544b7 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
| @@ -313,6 +313,7 @@ void __init setup_arch(char **cmdline_p) | |||
| 313 | arm64_memblock_init(); | 313 | arm64_memblock_init(); |
| 314 | 314 | ||
| 315 | paging_init(); | 315 | paging_init(); |
| 316 | efi_apply_persistent_mem_reservations(); | ||
| 316 | 317 | ||
| 317 | acpi_table_upgrade(); | 318 | acpi_table_upgrade(); |
| 318 | 319 | ||
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 9d9582cac6c4..9b432d9fcada 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
| @@ -483,8 +483,6 @@ void __init arm64_memblock_init(void) | |||
| 483 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; | 483 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; |
| 484 | 484 | ||
| 485 | dma_contiguous_reserve(arm64_dma_phys_limit); | 485 | dma_contiguous_reserve(arm64_dma_phys_limit); |
| 486 | |||
| 487 | memblock_allow_resize(); | ||
| 488 | } | 486 | } |
| 489 | 487 | ||
| 490 | void __init bootmem_init(void) | 488 | void __init bootmem_init(void) |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 394b8d554def..d1d6601b385d 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
| @@ -659,6 +659,8 @@ void __init paging_init(void) | |||
| 659 | 659 | ||
| 660 | memblock_free(__pa_symbol(init_pg_dir), | 660 | memblock_free(__pa_symbol(init_pg_dir), |
| 661 | __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); | 661 | __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); |
| 662 | |||
| 663 | memblock_allow_resize(); | ||
| 662 | } | 664 | } |
| 663 | 665 | ||
| 664 | /* | 666 | /* |
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index a6fdaea07c63..89198017e8e6 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c | |||
| @@ -351,7 +351,8 @@ static void build_epilogue(struct jit_ctx *ctx) | |||
| 351 | * >0 - successfully JITed a 16-byte eBPF instruction. | 351 | * >0 - successfully JITed a 16-byte eBPF instruction. |
| 352 | * <0 - failed to JIT. | 352 | * <0 - failed to JIT. |
| 353 | */ | 353 | */ |
| 354 | static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | 354 | static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, |
| 355 | bool extra_pass) | ||
| 355 | { | 356 | { |
| 356 | const u8 code = insn->code; | 357 | const u8 code = insn->code; |
| 357 | const u8 dst = bpf2a64[insn->dst_reg]; | 358 | const u8 dst = bpf2a64[insn->dst_reg]; |
| @@ -625,12 +626,19 @@ emit_cond_jmp: | |||
| 625 | case BPF_JMP | BPF_CALL: | 626 | case BPF_JMP | BPF_CALL: |
| 626 | { | 627 | { |
| 627 | const u8 r0 = bpf2a64[BPF_REG_0]; | 628 | const u8 r0 = bpf2a64[BPF_REG_0]; |
| 628 | const u64 func = (u64)__bpf_call_base + imm; | 629 | bool func_addr_fixed; |
| 630 | u64 func_addr; | ||
| 631 | int ret; | ||
| 629 | 632 | ||
| 630 | if (ctx->prog->is_func) | 633 | ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, |
| 631 | emit_addr_mov_i64(tmp, func, ctx); | 634 | &func_addr, &func_addr_fixed); |
| 635 | if (ret < 0) | ||
| 636 | return ret; | ||
| 637 | if (func_addr_fixed) | ||
| 638 | /* We can use optimized emission here. */ | ||
| 639 | emit_a64_mov_i64(tmp, func_addr, ctx); | ||
| 632 | else | 640 | else |
| 633 | emit_a64_mov_i64(tmp, func, ctx); | 641 | emit_addr_mov_i64(tmp, func_addr, ctx); |
| 634 | emit(A64_BLR(tmp), ctx); | 642 | emit(A64_BLR(tmp), ctx); |
| 635 | emit(A64_MOV(1, r0, A64_R(0)), ctx); | 643 | emit(A64_MOV(1, r0, A64_R(0)), ctx); |
| 636 | break; | 644 | break; |
| @@ -753,7 +761,7 @@ emit_cond_jmp: | |||
| 753 | return 0; | 761 | return 0; |
| 754 | } | 762 | } |
| 755 | 763 | ||
| 756 | static int build_body(struct jit_ctx *ctx) | 764 | static int build_body(struct jit_ctx *ctx, bool extra_pass) |
| 757 | { | 765 | { |
| 758 | const struct bpf_prog *prog = ctx->prog; | 766 | const struct bpf_prog *prog = ctx->prog; |
| 759 | int i; | 767 | int i; |
| @@ -762,7 +770,7 @@ static int build_body(struct jit_ctx *ctx) | |||
| 762 | const struct bpf_insn *insn = &prog->insnsi[i]; | 770 | const struct bpf_insn *insn = &prog->insnsi[i]; |
| 763 | int ret; | 771 | int ret; |
| 764 | 772 | ||
| 765 | ret = build_insn(insn, ctx); | 773 | ret = build_insn(insn, ctx, extra_pass); |
| 766 | if (ret > 0) { | 774 | if (ret > 0) { |
| 767 | i++; | 775 | i++; |
| 768 | if (ctx->image == NULL) | 776 | if (ctx->image == NULL) |
| @@ -858,7 +866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
| 858 | /* 1. Initial fake pass to compute ctx->idx. */ | 866 | /* 1. Initial fake pass to compute ctx->idx. */ |
| 859 | 867 | ||
| 860 | /* Fake pass to fill in ctx->offset. */ | 868 | /* Fake pass to fill in ctx->offset. */ |
| 861 | if (build_body(&ctx)) { | 869 | if (build_body(&ctx, extra_pass)) { |
| 862 | prog = orig_prog; | 870 | prog = orig_prog; |
| 863 | goto out_off; | 871 | goto out_off; |
| 864 | } | 872 | } |
| @@ -888,7 +896,7 @@ skip_init_ctx: | |||
| 888 | 896 | ||
| 889 | build_prologue(&ctx, was_classic); | 897 | build_prologue(&ctx, was_classic); |
| 890 | 898 | ||
| 891 | if (build_body(&ctx)) { | 899 | if (build_body(&ctx, extra_pass)) { |
| 892 | bpf_jit_binary_free(header); | 900 | bpf_jit_binary_free(header); |
| 893 | prog = orig_prog; | 901 | prog = orig_prog; |
| 894 | goto out_off; | 902 | goto out_off; |
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h index ebef7f40aabb..c5c253cb9bd6 100644 --- a/arch/ia64/include/asm/numa.h +++ b/arch/ia64/include/asm/numa.h | |||
| @@ -59,7 +59,9 @@ extern struct node_cpuid_s node_cpuid[NR_CPUS]; | |||
| 59 | */ | 59 | */ |
| 60 | 60 | ||
| 61 | extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; | 61 | extern u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; |
| 62 | #define node_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)]) | 62 | #define slit_distance(from,to) (numa_slit[(from) * MAX_NUMNODES + (to)]) |
| 63 | extern int __node_distance(int from, int to); | ||
| 64 | #define node_distance(from,to) __node_distance(from, to) | ||
| 63 | 65 | ||
| 64 | extern int paddr_to_nid(unsigned long paddr); | 66 | extern int paddr_to_nid(unsigned long paddr); |
| 65 | 67 | ||
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 1dacbf5e9e09..41eb281709da 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
| @@ -578,8 +578,8 @@ void __init acpi_numa_fixup(void) | |||
| 578 | if (!slit_table) { | 578 | if (!slit_table) { |
| 579 | for (i = 0; i < MAX_NUMNODES; i++) | 579 | for (i = 0; i < MAX_NUMNODES; i++) |
| 580 | for (j = 0; j < MAX_NUMNODES; j++) | 580 | for (j = 0; j < MAX_NUMNODES; j++) |
| 581 | node_distance(i, j) = i == j ? LOCAL_DISTANCE : | 581 | slit_distance(i, j) = i == j ? |
| 582 | REMOTE_DISTANCE; | 582 | LOCAL_DISTANCE : REMOTE_DISTANCE; |
| 583 | return; | 583 | return; |
| 584 | } | 584 | } |
| 585 | 585 | ||
| @@ -592,7 +592,7 @@ void __init acpi_numa_fixup(void) | |||
| 592 | if (!pxm_bit_test(j)) | 592 | if (!pxm_bit_test(j)) |
| 593 | continue; | 593 | continue; |
| 594 | node_to = pxm_to_node(j); | 594 | node_to = pxm_to_node(j); |
| 595 | node_distance(node_from, node_to) = | 595 | slit_distance(node_from, node_to) = |
| 596 | slit_table->entry[i * slit_table->locality_count + j]; | 596 | slit_table->entry[i * slit_table->locality_count + j]; |
| 597 | } | 597 | } |
| 598 | } | 598 | } |
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c index 3861d6e32d5f..a03803506b0c 100644 --- a/arch/ia64/mm/numa.c +++ b/arch/ia64/mm/numa.c | |||
| @@ -36,6 +36,12 @@ struct node_cpuid_s node_cpuid[NR_CPUS] = | |||
| 36 | */ | 36 | */ |
| 37 | u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; | 37 | u8 numa_slit[MAX_NUMNODES * MAX_NUMNODES]; |
| 38 | 38 | ||
| 39 | int __node_distance(int from, int to) | ||
| 40 | { | ||
| 41 | return slit_distance(from, to); | ||
| 42 | } | ||
| 43 | EXPORT_SYMBOL(__node_distance); | ||
| 44 | |||
| 39 | /* Identify which cnode a physical address resides on */ | 45 | /* Identify which cnode a physical address resides on */ |
| 40 | int | 46 | int |
| 41 | paddr_to_nid(unsigned long paddr) | 47 | paddr_to_nid(unsigned long paddr) |
diff --git a/arch/m68k/include/asm/pgtable_mm.h b/arch/m68k/include/asm/pgtable_mm.h index 6181e4134483..fe3ddd73a0cc 100644 --- a/arch/m68k/include/asm/pgtable_mm.h +++ b/arch/m68k/include/asm/pgtable_mm.h | |||
| @@ -55,12 +55,12 @@ | |||
| 55 | */ | 55 | */ |
| 56 | #ifdef CONFIG_SUN3 | 56 | #ifdef CONFIG_SUN3 |
| 57 | #define PTRS_PER_PTE 16 | 57 | #define PTRS_PER_PTE 16 |
| 58 | #define __PAGETABLE_PMD_FOLDED | 58 | #define __PAGETABLE_PMD_FOLDED 1 |
| 59 | #define PTRS_PER_PMD 1 | 59 | #define PTRS_PER_PMD 1 |
| 60 | #define PTRS_PER_PGD 2048 | 60 | #define PTRS_PER_PGD 2048 |
| 61 | #elif defined(CONFIG_COLDFIRE) | 61 | #elif defined(CONFIG_COLDFIRE) |
| 62 | #define PTRS_PER_PTE 512 | 62 | #define PTRS_PER_PTE 512 |
| 63 | #define __PAGETABLE_PMD_FOLDED | 63 | #define __PAGETABLE_PMD_FOLDED 1 |
| 64 | #define PTRS_PER_PMD 1 | 64 | #define PTRS_PER_PMD 1 |
| 65 | #define PTRS_PER_PGD 1024 | 65 | #define PTRS_PER_PGD 1024 |
| 66 | #else | 66 | #else |
diff --git a/arch/microblaze/include/asm/pgtable.h b/arch/microblaze/include/asm/pgtable.h index f64ebb9c9a41..e14b6621c933 100644 --- a/arch/microblaze/include/asm/pgtable.h +++ b/arch/microblaze/include/asm/pgtable.h | |||
| @@ -63,7 +63,7 @@ extern int mem_init_done; | |||
| 63 | 63 | ||
| 64 | #include <asm-generic/4level-fixup.h> | 64 | #include <asm-generic/4level-fixup.h> |
| 65 | 65 | ||
| 66 | #define __PAGETABLE_PMD_FOLDED | 66 | #define __PAGETABLE_PMD_FOLDED 1 |
| 67 | 67 | ||
| 68 | #ifdef __KERNEL__ | 68 | #ifdef __KERNEL__ |
| 69 | #ifndef __ASSEMBLY__ | 69 | #ifndef __ASSEMBLY__ |
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index d57563c58a26..224eea40e1ee 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c | |||
| @@ -22,8 +22,7 @@ | |||
| 22 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 22 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
| 23 | { | 23 | { |
| 24 | unsigned long old; | 24 | unsigned long old; |
| 25 | int faulted, err; | 25 | int faulted; |
| 26 | struct ftrace_graph_ent trace; | ||
| 27 | unsigned long return_hooker = (unsigned long) | 26 | unsigned long return_hooker = (unsigned long) |
| 28 | &return_to_handler; | 27 | &return_to_handler; |
| 29 | 28 | ||
| @@ -63,18 +62,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 63 | return; | 62 | return; |
| 64 | } | 63 | } |
| 65 | 64 | ||
| 66 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); | 65 | if (function_graph_enter(old, self_addr, 0, NULL)) |
| 67 | if (err == -EBUSY) { | ||
| 68 | *parent = old; | 66 | *parent = old; |
| 69 | return; | ||
| 70 | } | ||
| 71 | |||
| 72 | trace.func = self_addr; | ||
| 73 | /* Only trace if the calling function expects to */ | ||
| 74 | if (!ftrace_graph_entry(&trace)) { | ||
| 75 | current->curr_ret_stack--; | ||
| 76 | *parent = old; | ||
| 77 | } | ||
| 78 | } | 67 | } |
| 79 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 68 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 80 | 69 | ||
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c index 75108ec669eb..6c79e8a16a26 100644 --- a/arch/mips/cavium-octeon/executive/cvmx-helper.c +++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c | |||
| @@ -67,7 +67,7 @@ void (*cvmx_override_pko_queue_priority) (int pko_port, | |||
| 67 | void (*cvmx_override_ipd_port_setup) (int ipd_port); | 67 | void (*cvmx_override_ipd_port_setup) (int ipd_port); |
| 68 | 68 | ||
| 69 | /* Port count per interface */ | 69 | /* Port count per interface */ |
| 70 | static int interface_port_count[5]; | 70 | static int interface_port_count[9]; |
| 71 | 71 | ||
| 72 | /** | 72 | /** |
| 73 | * Return the number of interfaces the chip has. Each interface | 73 | * Return the number of interfaces the chip has. Each interface |
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig index 490b12af103c..c52d0efacd14 100644 --- a/arch/mips/configs/cavium_octeon_defconfig +++ b/arch/mips/configs/cavium_octeon_defconfig | |||
| @@ -140,6 +140,7 @@ CONFIG_RTC_CLASS=y | |||
| 140 | CONFIG_RTC_DRV_DS1307=y | 140 | CONFIG_RTC_DRV_DS1307=y |
| 141 | CONFIG_STAGING=y | 141 | CONFIG_STAGING=y |
| 142 | CONFIG_OCTEON_ETHERNET=y | 142 | CONFIG_OCTEON_ETHERNET=y |
| 143 | CONFIG_OCTEON_USB=y | ||
| 143 | # CONFIG_IOMMU_SUPPORT is not set | 144 | # CONFIG_IOMMU_SUPPORT is not set |
| 144 | CONFIG_RAS=y | 145 | CONFIG_RAS=y |
| 145 | CONFIG_EXT4_FS=y | 146 | CONFIG_EXT4_FS=y |
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h index 0170602a1e4e..6cf8ffb5367e 100644 --- a/arch/mips/include/asm/syscall.h +++ b/arch/mips/include/asm/syscall.h | |||
| @@ -73,7 +73,7 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg, | |||
| 73 | #ifdef CONFIG_64BIT | 73 | #ifdef CONFIG_64BIT |
| 74 | case 4: case 5: case 6: case 7: | 74 | case 4: case 5: case 6: case 7: |
| 75 | #ifdef CONFIG_MIPS32_O32 | 75 | #ifdef CONFIG_MIPS32_O32 |
| 76 | if (test_thread_flag(TIF_32BIT_REGS)) | 76 | if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) |
| 77 | return get_user(*arg, (int *)usp + n); | 77 | return get_user(*arg, (int *)usp + n); |
| 78 | else | 78 | else |
| 79 | #endif | 79 | #endif |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 7f3dfdbc3657..b122cbb4aad1 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
| @@ -322,7 +322,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
| 322 | unsigned long fp) | 322 | unsigned long fp) |
| 323 | { | 323 | { |
| 324 | unsigned long old_parent_ra; | 324 | unsigned long old_parent_ra; |
| 325 | struct ftrace_graph_ent trace; | ||
| 326 | unsigned long return_hooker = (unsigned long) | 325 | unsigned long return_hooker = (unsigned long) |
| 327 | &return_to_handler; | 326 | &return_to_handler; |
| 328 | int faulted, insns; | 327 | int faulted, insns; |
| @@ -369,12 +368,6 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
| 369 | if (unlikely(faulted)) | 368 | if (unlikely(faulted)) |
| 370 | goto out; | 369 | goto out; |
| 371 | 370 | ||
| 372 | if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp, | ||
| 373 | NULL) == -EBUSY) { | ||
| 374 | *parent_ra_addr = old_parent_ra; | ||
| 375 | return; | ||
| 376 | } | ||
| 377 | |||
| 378 | /* | 371 | /* |
| 379 | * Get the recorded ip of the current mcount calling site in the | 372 | * Get the recorded ip of the current mcount calling site in the |
| 380 | * __mcount_loc section, which will be used to filter the function | 373 | * __mcount_loc section, which will be used to filter the function |
| @@ -382,13 +375,10 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
| 382 | */ | 375 | */ |
| 383 | 376 | ||
| 384 | insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | 377 | insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; |
| 385 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | 378 | self_ra -= (MCOUNT_INSN_SIZE * insns); |
| 386 | 379 | ||
| 387 | /* Only trace if the calling function expects to */ | 380 | if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) |
| 388 | if (!ftrace_graph_entry(&trace)) { | ||
| 389 | current->curr_ret_stack--; | ||
| 390 | *parent_ra_addr = old_parent_ra; | 381 | *parent_ra_addr = old_parent_ra; |
| 391 | } | ||
| 392 | return; | 382 | return; |
| 393 | out: | 383 | out: |
| 394 | ftrace_graph_stop(); | 384 | ftrace_graph_stop(); |
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index ea09ed6a80a9..8c6c48ed786a 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c | |||
| @@ -794,6 +794,7 @@ static void __init arch_mem_init(char **cmdline_p) | |||
| 794 | 794 | ||
| 795 | /* call board setup routine */ | 795 | /* call board setup routine */ |
| 796 | plat_mem_setup(); | 796 | plat_mem_setup(); |
| 797 | memblock_set_bottom_up(true); | ||
| 797 | 798 | ||
| 798 | /* | 799 | /* |
| 799 | * Make sure all kernel memory is in the maps. The "UP" and | 800 | * Make sure all kernel memory is in the maps. The "UP" and |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 0f852e1b5891..15e103c6d799 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
| @@ -2260,10 +2260,8 @@ void __init trap_init(void) | |||
| 2260 | unsigned long size = 0x200 + VECTORSPACING*64; | 2260 | unsigned long size = 0x200 + VECTORSPACING*64; |
| 2261 | phys_addr_t ebase_pa; | 2261 | phys_addr_t ebase_pa; |
| 2262 | 2262 | ||
| 2263 | memblock_set_bottom_up(true); | ||
| 2264 | ebase = (unsigned long) | 2263 | ebase = (unsigned long) |
| 2265 | memblock_alloc_from(size, 1 << fls(size), 0); | 2264 | memblock_alloc_from(size, 1 << fls(size), 0); |
| 2266 | memblock_set_bottom_up(false); | ||
| 2267 | 2265 | ||
| 2268 | /* | 2266 | /* |
| 2269 | * Try to ensure ebase resides in KSeg0 if possible. | 2267 | * Try to ensure ebase resides in KSeg0 if possible. |
| @@ -2307,6 +2305,7 @@ void __init trap_init(void) | |||
| 2307 | if (board_ebase_setup) | 2305 | if (board_ebase_setup) |
| 2308 | board_ebase_setup(); | 2306 | board_ebase_setup(); |
| 2309 | per_cpu_trap_init(true); | 2307 | per_cpu_trap_init(true); |
| 2308 | memblock_set_bottom_up(false); | ||
| 2310 | 2309 | ||
| 2311 | /* | 2310 | /* |
| 2312 | * Copy the generic exception handlers to their final destination. | 2311 | * Copy the generic exception handlers to their final destination. |
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c index 622761878cd1..60bf0a1cb757 100644 --- a/arch/mips/loongson64/loongson-3/numa.c +++ b/arch/mips/loongson64/loongson-3/numa.c | |||
| @@ -231,6 +231,8 @@ static __init void prom_meminit(void) | |||
| 231 | cpumask_clear(&__node_data[(node)]->cpumask); | 231 | cpumask_clear(&__node_data[(node)]->cpumask); |
| 232 | } | 232 | } |
| 233 | } | 233 | } |
| 234 | max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); | ||
| 235 | |||
| 234 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { | 236 | for (cpu = 0; cpu < loongson_sysconf.nr_cpus; cpu++) { |
| 235 | node = cpu / loongson_sysconf.cores_per_node; | 237 | node = cpu / loongson_sysconf.cores_per_node; |
| 236 | if (node >= num_online_nodes()) | 238 | if (node >= num_online_nodes()) |
| @@ -248,19 +250,9 @@ static __init void prom_meminit(void) | |||
| 248 | 250 | ||
| 249 | void __init paging_init(void) | 251 | void __init paging_init(void) |
| 250 | { | 252 | { |
| 251 | unsigned node; | ||
| 252 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 253 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
| 253 | 254 | ||
| 254 | pagetable_init(); | 255 | pagetable_init(); |
| 255 | |||
| 256 | for_each_online_node(node) { | ||
| 257 | unsigned long start_pfn, end_pfn; | ||
| 258 | |||
| 259 | get_pfn_range_for_nid(node, &start_pfn, &end_pfn); | ||
| 260 | |||
| 261 | if (end_pfn > max_low_pfn) | ||
| 262 | max_low_pfn = end_pfn; | ||
| 263 | } | ||
| 264 | #ifdef CONFIG_ZONE_DMA32 | 256 | #ifdef CONFIG_ZONE_DMA32 |
| 265 | zones_size[ZONE_DMA32] = MAX_DMA32_PFN; | 257 | zones_size[ZONE_DMA32] = MAX_DMA32_PFN; |
| 266 | #endif | 258 | #endif |
diff --git a/arch/mips/mm/dma-noncoherent.c b/arch/mips/mm/dma-noncoherent.c index e6c9485cadcf..cb38461391cb 100644 --- a/arch/mips/mm/dma-noncoherent.c +++ b/arch/mips/mm/dma-noncoherent.c | |||
| @@ -50,7 +50,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, | |||
| 50 | void *ret; | 50 | void *ret; |
| 51 | 51 | ||
| 52 | ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); | 52 | ret = dma_direct_alloc_pages(dev, size, dma_handle, gfp, attrs); |
| 53 | if (!ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { | 53 | if (ret && !(attrs & DMA_ATTR_NON_CONSISTENT)) { |
| 54 | dma_cache_wback_inv((unsigned long) ret, size); | 54 | dma_cache_wback_inv((unsigned long) ret, size); |
| 55 | ret = (void *)UNCAC_ADDR(ret); | 55 | ret = (void *)UNCAC_ADDR(ret); |
| 56 | } | 56 | } |
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c index 41b71c4352c2..c1ce6f43642b 100644 --- a/arch/mips/ralink/mt7620.c +++ b/arch/mips/ralink/mt7620.c | |||
| @@ -84,7 +84,7 @@ static struct rt2880_pmx_func pcie_rst_grp[] = { | |||
| 84 | }; | 84 | }; |
| 85 | static struct rt2880_pmx_func nd_sd_grp[] = { | 85 | static struct rt2880_pmx_func nd_sd_grp[] = { |
| 86 | FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), | 86 | FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), |
| 87 | FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15) | 87 | FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) |
| 88 | }; | 88 | }; |
| 89 | 89 | ||
| 90 | static struct rt2880_pmx_group mt7620a_pinmux_data[] = { | 90 | static struct rt2880_pmx_group mt7620a_pinmux_data[] = { |
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index d8b8444d6795..813d13f92957 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c | |||
| @@ -435,6 +435,7 @@ void __init prom_meminit(void) | |||
| 435 | 435 | ||
| 436 | mlreset(); | 436 | mlreset(); |
| 437 | szmem(); | 437 | szmem(); |
| 438 | max_low_pfn = PHYS_PFN(memblock_end_of_DRAM()); | ||
| 438 | 439 | ||
| 439 | for (node = 0; node < MAX_COMPACT_NODES; node++) { | 440 | for (node = 0; node < MAX_COMPACT_NODES; node++) { |
| 440 | if (node_online(node)) { | 441 | if (node_online(node)) { |
| @@ -455,18 +456,8 @@ extern void setup_zero_pages(void); | |||
| 455 | void __init paging_init(void) | 456 | void __init paging_init(void) |
| 456 | { | 457 | { |
| 457 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; | 458 | unsigned long zones_size[MAX_NR_ZONES] = {0, }; |
| 458 | unsigned node; | ||
| 459 | 459 | ||
| 460 | pagetable_init(); | 460 | pagetable_init(); |
| 461 | |||
| 462 | for_each_online_node(node) { | ||
| 463 | unsigned long start_pfn, end_pfn; | ||
| 464 | |||
| 465 | get_pfn_range_for_nid(node, &start_pfn, &end_pfn); | ||
| 466 | |||
| 467 | if (end_pfn > max_low_pfn) | ||
| 468 | max_low_pfn = end_pfn; | ||
| 469 | } | ||
| 470 | zones_size[ZONE_NORMAL] = max_low_pfn; | 461 | zones_size[ZONE_NORMAL] = max_low_pfn; |
| 471 | free_area_init_nodes(zones_size); | 462 | free_area_init_nodes(zones_size); |
| 472 | } | 463 | } |
diff --git a/arch/nds32/include/asm/pgtable.h b/arch/nds32/include/asm/pgtable.h index d3e19a55cf53..9f52db930c00 100644 --- a/arch/nds32/include/asm/pgtable.h +++ b/arch/nds32/include/asm/pgtable.h | |||
| @@ -4,7 +4,7 @@ | |||
| 4 | #ifndef _ASMNDS32_PGTABLE_H | 4 | #ifndef _ASMNDS32_PGTABLE_H |
| 5 | #define _ASMNDS32_PGTABLE_H | 5 | #define _ASMNDS32_PGTABLE_H |
| 6 | 6 | ||
| 7 | #define __PAGETABLE_PMD_FOLDED | 7 | #define __PAGETABLE_PMD_FOLDED 1 |
| 8 | #include <asm-generic/4level-fixup.h> | 8 | #include <asm-generic/4level-fixup.h> |
| 9 | #include <asm-generic/sizes.h> | 9 | #include <asm-generic/sizes.h> |
| 10 | 10 | ||
diff --git a/arch/nds32/kernel/ftrace.c b/arch/nds32/kernel/ftrace.c index a0a9679ad5de..8a41372551ff 100644 --- a/arch/nds32/kernel/ftrace.c +++ b/arch/nds32/kernel/ftrace.c | |||
| @@ -211,29 +211,15 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 211 | unsigned long frame_pointer) | 211 | unsigned long frame_pointer) |
| 212 | { | 212 | { |
| 213 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 213 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
| 214 | struct ftrace_graph_ent trace; | ||
| 215 | unsigned long old; | 214 | unsigned long old; |
| 216 | int err; | ||
| 217 | 215 | ||
| 218 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 216 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 219 | return; | 217 | return; |
| 220 | 218 | ||
| 221 | old = *parent; | 219 | old = *parent; |
| 222 | 220 | ||
| 223 | trace.func = self_addr; | 221 | if (!function_graph_enter(old, self_addr, frame_pointer, NULL)) |
| 224 | trace.depth = current->curr_ret_stack + 1; | 222 | *parent = return_hooker; |
| 225 | |||
| 226 | /* Only trace if the calling function expects to */ | ||
| 227 | if (!ftrace_graph_entry(&trace)) | ||
| 228 | return; | ||
| 229 | |||
| 230 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 231 | frame_pointer, NULL); | ||
| 232 | |||
| 233 | if (err == -EBUSY) | ||
| 234 | return; | ||
| 235 | |||
| 236 | *parent = return_hooker; | ||
| 237 | } | 223 | } |
| 238 | 224 | ||
| 239 | noinline void ftrace_graph_caller(void) | 225 | noinline void ftrace_graph_caller(void) |
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index b941ac7d4e70..c7bb74e22436 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h | |||
| @@ -111,7 +111,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) | |||
| 111 | #if CONFIG_PGTABLE_LEVELS == 3 | 111 | #if CONFIG_PGTABLE_LEVELS == 3 |
| 112 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) | 112 | #define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY) |
| 113 | #else | 113 | #else |
| 114 | #define __PAGETABLE_PMD_FOLDED | 114 | #define __PAGETABLE_PMD_FOLDED 1 |
| 115 | #define BITS_PER_PMD 0 | 115 | #define BITS_PER_PMD 0 |
| 116 | #endif | 116 | #endif |
| 117 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) | 117 | #define PTRS_PER_PMD (1UL << BITS_PER_PMD) |
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index 16aec9ba2580..8a63515f03bf 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h | |||
| @@ -37,8 +37,8 @@ static inline void arch_spin_unlock(arch_spinlock_t *x) | |||
| 37 | volatile unsigned int *a; | 37 | volatile unsigned int *a; |
| 38 | 38 | ||
| 39 | a = __ldcw_align(x); | 39 | a = __ldcw_align(x); |
| 40 | /* Release with ordered store. */ | 40 | mb(); |
| 41 | __asm__ __volatile__("stw,ma %0,0(%1)" : : "r"(1), "r"(a) : "memory"); | 41 | *a = 1; |
| 42 | } | 42 | } |
| 43 | 43 | ||
| 44 | static inline int arch_spin_trylock(arch_spinlock_t *x) | 44 | static inline int arch_spin_trylock(arch_spinlock_t *x) |
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 6fa8535d3cce..e46a4157a894 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c | |||
| @@ -30,7 +30,6 @@ static void __hot prepare_ftrace_return(unsigned long *parent, | |||
| 30 | unsigned long self_addr) | 30 | unsigned long self_addr) |
| 31 | { | 31 | { |
| 32 | unsigned long old; | 32 | unsigned long old; |
| 33 | struct ftrace_graph_ent trace; | ||
| 34 | extern int parisc_return_to_handler; | 33 | extern int parisc_return_to_handler; |
| 35 | 34 | ||
| 36 | if (unlikely(ftrace_graph_is_dead())) | 35 | if (unlikely(ftrace_graph_is_dead())) |
| @@ -41,19 +40,9 @@ static void __hot prepare_ftrace_return(unsigned long *parent, | |||
| 41 | 40 | ||
| 42 | old = *parent; | 41 | old = *parent; |
| 43 | 42 | ||
| 44 | trace.func = self_addr; | 43 | if (!function_graph_enter(old, self_addr, 0, NULL)) |
| 45 | trace.depth = current->curr_ret_stack + 1; | 44 | /* activate parisc_return_to_handler() as return point */ |
| 46 | 45 | *parent = (unsigned long) &parisc_return_to_handler; | |
| 47 | /* Only trace if the calling function expects to */ | ||
| 48 | if (!ftrace_graph_entry(&trace)) | ||
| 49 | return; | ||
| 50 | |||
| 51 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 52 | 0, NULL) == -EBUSY) | ||
| 53 | return; | ||
| 54 | |||
| 55 | /* activate parisc_return_to_handler() as return point */ | ||
| 56 | *parent = (unsigned long) &parisc_return_to_handler; | ||
| 57 | } | 46 | } |
| 58 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 47 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
| 59 | 48 | ||
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 9505c317818d..a9bc90dc4ae7 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S | |||
| @@ -640,7 +640,8 @@ cas_action: | |||
| 640 | sub,<> %r28, %r25, %r0 | 640 | sub,<> %r28, %r25, %r0 |
| 641 | 2: stw %r24, 0(%r26) | 641 | 2: stw %r24, 0(%r26) |
| 642 | /* Free lock */ | 642 | /* Free lock */ |
| 643 | stw,ma %r20, 0(%sr2,%r20) | 643 | sync |
| 644 | stw %r20, 0(%sr2,%r20) | ||
| 644 | #if ENABLE_LWS_DEBUG | 645 | #if ENABLE_LWS_DEBUG |
| 645 | /* Clear thread register indicator */ | 646 | /* Clear thread register indicator */ |
| 646 | stw %r0, 4(%sr2,%r20) | 647 | stw %r0, 4(%sr2,%r20) |
| @@ -654,7 +655,8 @@ cas_action: | |||
| 654 | 3: | 655 | 3: |
| 655 | /* Error occurred on load or store */ | 656 | /* Error occurred on load or store */ |
| 656 | /* Free lock */ | 657 | /* Free lock */ |
| 657 | stw,ma %r20, 0(%sr2,%r20) | 658 | sync |
| 659 | stw %r20, 0(%sr2,%r20) | ||
| 658 | #if ENABLE_LWS_DEBUG | 660 | #if ENABLE_LWS_DEBUG |
| 659 | stw %r0, 4(%sr2,%r20) | 661 | stw %r0, 4(%sr2,%r20) |
| 660 | #endif | 662 | #endif |
| @@ -855,7 +857,8 @@ cas2_action: | |||
| 855 | 857 | ||
| 856 | cas2_end: | 858 | cas2_end: |
| 857 | /* Free lock */ | 859 | /* Free lock */ |
| 858 | stw,ma %r20, 0(%sr2,%r20) | 860 | sync |
| 861 | stw %r20, 0(%sr2,%r20) | ||
| 859 | /* Enable interrupts */ | 862 | /* Enable interrupts */ |
| 860 | ssm PSW_SM_I, %r0 | 863 | ssm PSW_SM_I, %r0 |
| 861 | /* Return to userspace, set no error */ | 864 | /* Return to userspace, set no error */ |
| @@ -865,7 +868,8 @@ cas2_end: | |||
| 865 | 22: | 868 | 22: |
| 866 | /* Error occurred on load or store */ | 869 | /* Error occurred on load or store */ |
| 867 | /* Free lock */ | 870 | /* Free lock */ |
| 868 | stw,ma %r20, 0(%sr2,%r20) | 871 | sync |
| 872 | stw %r20, 0(%sr2,%r20) | ||
| 869 | ssm PSW_SM_I, %r0 | 873 | ssm PSW_SM_I, %r0 |
| 870 | ldo 1(%r0),%r28 | 874 | ldo 1(%r0),%r28 |
| 871 | b lws_exit | 875 | b lws_exit |
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 3ef40b703c4a..e746becd9d6f 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h | |||
| @@ -268,19 +268,13 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, | |||
| 268 | * their hooks, a bitfield is reserved for use by the platform near the | 268 | * their hooks, a bitfield is reserved for use by the platform near the |
| 269 | * top of MMIO addresses (not PIO, those have to cope the hard way). | 269 | * top of MMIO addresses (not PIO, those have to cope the hard way). |
| 270 | * | 270 | * |
| 271 | * This bit field is 12 bits and is at the top of the IO virtual | 271 | * The highest address in the kernel virtual space are: |
| 272 | * addresses PCI_IO_INDIRECT_TOKEN_MASK. | ||
| 273 | * | 272 | * |
| 274 | * The kernel virtual space is thus: | 273 | * d0003fffffffffff # with Hash MMU |
| 274 | * c00fffffffffffff # with Radix MMU | ||
| 275 | * | 275 | * |
| 276 | * 0xD000000000000000 : vmalloc | 276 | * The top 4 bits are reserved as the region ID on hash, leaving us 8 bits |
| 277 | * 0xD000080000000000 : PCI PHB IO space | 277 | * that can be used for the field. |
| 278 | * 0xD000080080000000 : ioremap | ||
| 279 | * 0xD0000fffffffffff : end of ioremap region | ||
| 280 | * | ||
| 281 | * Since the top 4 bits are reserved as the region ID, we use thus | ||
| 282 | * the next 12 bits and keep 4 bits available for the future if the | ||
| 283 | * virtual address space is ever to be extended. | ||
| 284 | * | 278 | * |
| 285 | * The direct IO mapping operations will then mask off those bits | 279 | * The direct IO mapping operations will then mask off those bits |
| 286 | * before doing the actual access, though that only happen when | 280 | * before doing the actual access, though that only happen when |
| @@ -292,8 +286,8 @@ extern void _memcpy_toio(volatile void __iomem *dest, const void *src, | |||
| 292 | */ | 286 | */ |
| 293 | 287 | ||
| 294 | #ifdef CONFIG_PPC_INDIRECT_MMIO | 288 | #ifdef CONFIG_PPC_INDIRECT_MMIO |
| 295 | #define PCI_IO_IND_TOKEN_MASK 0x0fff000000000000ul | 289 | #define PCI_IO_IND_TOKEN_SHIFT 52 |
| 296 | #define PCI_IO_IND_TOKEN_SHIFT 48 | 290 | #define PCI_IO_IND_TOKEN_MASK (0xfful << PCI_IO_IND_TOKEN_SHIFT) |
| 297 | #define PCI_FIX_ADDR(addr) \ | 291 | #define PCI_FIX_ADDR(addr) \ |
| 298 | ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) | 292 | ((PCI_IO_ADDR)(((unsigned long)(addr)) & ~PCI_IO_IND_TOKEN_MASK)) |
| 299 | #define PCI_GET_ADDR_TOKEN(addr) \ | 293 | #define PCI_GET_ADDR_TOKEN(addr) \ |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 6093bc8f74e5..a6e9e314c707 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
| @@ -493,6 +493,8 @@ | |||
| 493 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) | 493 | __PPC_RS(t) | __PPC_RA0(a) | __PPC_RB(b)) |
| 494 | #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ | 494 | #define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ |
| 495 | __PPC_RT(t) | __PPC_RB(b)) | 495 | __PPC_RT(t) | __PPC_RB(b)) |
| 496 | #define __PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \ | ||
| 497 | ___PPC_RT(t) | ___PPC_RB(b)) | ||
| 496 | #define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ | 498 | #define PPC_ICBT(c,a,b) stringify_in_c(.long PPC_INST_ICBT | \ |
| 497 | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) | 499 | __PPC_CT(c) | __PPC_RA0(a) | __PPC_RB(b)) |
| 498 | /* PASemi instructions */ | 500 | /* PASemi instructions */ |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index f73886a1a7f5..0b8a735b6d85 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
| @@ -54,6 +54,7 @@ struct pt_regs | |||
| 54 | 54 | ||
| 55 | #ifdef CONFIG_PPC64 | 55 | #ifdef CONFIG_PPC64 |
| 56 | unsigned long ppr; | 56 | unsigned long ppr; |
| 57 | unsigned long __pad; /* Maintain 16 byte interrupt stack alignment */ | ||
| 57 | #endif | 58 | #endif |
| 58 | }; | 59 | }; |
| 59 | #endif | 60 | #endif |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 2a51e4cc8246..236c1151a3a7 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
| @@ -636,6 +636,8 @@ static void *__init alloc_stack(unsigned long limit, int cpu) | |||
| 636 | { | 636 | { |
| 637 | unsigned long pa; | 637 | unsigned long pa; |
| 638 | 638 | ||
| 639 | BUILD_BUG_ON(STACK_INT_FRAME_SIZE % 16); | ||
| 640 | |||
| 639 | pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, | 641 | pa = memblock_alloc_base_nid(THREAD_SIZE, THREAD_SIZE, limit, |
| 640 | early_cpu_to_node(cpu), MEMBLOCK_NONE); | 642 | early_cpu_to_node(cpu), MEMBLOCK_NONE); |
| 641 | if (!pa) { | 643 | if (!pa) { |
diff --git a/arch/powerpc/kernel/trace/ftrace.c b/arch/powerpc/kernel/trace/ftrace.c index 4bf051d3e21e..b65c8a34ad6e 100644 --- a/arch/powerpc/kernel/trace/ftrace.c +++ b/arch/powerpc/kernel/trace/ftrace.c | |||
| @@ -950,7 +950,6 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
| 950 | */ | 950 | */ |
| 951 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) | 951 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
| 952 | { | 952 | { |
| 953 | struct ftrace_graph_ent trace; | ||
| 954 | unsigned long return_hooker; | 953 | unsigned long return_hooker; |
| 955 | 954 | ||
| 956 | if (unlikely(ftrace_graph_is_dead())) | 955 | if (unlikely(ftrace_graph_is_dead())) |
| @@ -961,18 +960,8 @@ unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) | |||
| 961 | 960 | ||
| 962 | return_hooker = ppc_function_entry(return_to_handler); | 961 | return_hooker = ppc_function_entry(return_to_handler); |
| 963 | 962 | ||
| 964 | trace.func = ip; | 963 | if (!function_graph_enter(parent, ip, 0, NULL)) |
| 965 | trace.depth = current->curr_ret_stack + 1; | 964 | parent = return_hooker; |
| 966 | |||
| 967 | /* Only trace if the calling function expects to */ | ||
| 968 | if (!ftrace_graph_entry(&trace)) | ||
| 969 | goto out; | ||
| 970 | |||
| 971 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, | ||
| 972 | NULL) == -EBUSY) | ||
| 973 | goto out; | ||
| 974 | |||
| 975 | parent = return_hooker; | ||
| 976 | out: | 965 | out: |
| 977 | return parent; | 966 | return parent; |
| 978 | } | 967 | } |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index d65b961661fb..a56f8413758a 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
| @@ -983,6 +983,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
| 983 | ret = kvmhv_enter_nested_guest(vcpu); | 983 | ret = kvmhv_enter_nested_guest(vcpu); |
| 984 | if (ret == H_INTERRUPT) { | 984 | if (ret == H_INTERRUPT) { |
| 985 | kvmppc_set_gpr(vcpu, 3, 0); | 985 | kvmppc_set_gpr(vcpu, 3, 0); |
| 986 | vcpu->arch.hcall_needed = 0; | ||
| 986 | return -EINTR; | 987 | return -EINTR; |
| 987 | } | 988 | } |
| 988 | break; | 989 | break; |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index 491b0f715d6b..ea1d7c808319 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
| @@ -6,8 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
| 8 | #define TRACE_SYSTEM kvm | 8 | #define TRACE_SYSTEM kvm |
| 9 | #define TRACE_INCLUDE_PATH . | ||
| 10 | #define TRACE_INCLUDE_FILE trace | ||
| 11 | 9 | ||
| 12 | /* | 10 | /* |
| 13 | * Tracepoint for guest mode entry. | 11 | * Tracepoint for guest mode entry. |
| @@ -120,4 +118,10 @@ TRACE_EVENT(kvm_check_requests, | |||
| 120 | #endif /* _TRACE_KVM_H */ | 118 | #endif /* _TRACE_KVM_H */ |
| 121 | 119 | ||
| 122 | /* This part must be outside protection */ | 120 | /* This part must be outside protection */ |
| 121 | #undef TRACE_INCLUDE_PATH | ||
| 122 | #undef TRACE_INCLUDE_FILE | ||
| 123 | |||
| 124 | #define TRACE_INCLUDE_PATH . | ||
| 125 | #define TRACE_INCLUDE_FILE trace | ||
| 126 | |||
| 123 | #include <trace/define_trace.h> | 127 | #include <trace/define_trace.h> |
diff --git a/arch/powerpc/kvm/trace_booke.h b/arch/powerpc/kvm/trace_booke.h index ac640e81fdc5..3837842986aa 100644 --- a/arch/powerpc/kvm/trace_booke.h +++ b/arch/powerpc/kvm/trace_booke.h | |||
| @@ -6,8 +6,6 @@ | |||
| 6 | 6 | ||
| 7 | #undef TRACE_SYSTEM | 7 | #undef TRACE_SYSTEM |
| 8 | #define TRACE_SYSTEM kvm_booke | 8 | #define TRACE_SYSTEM kvm_booke |
| 9 | #define TRACE_INCLUDE_PATH . | ||
| 10 | #define TRACE_INCLUDE_FILE trace_booke | ||
| 11 | 9 | ||
| 12 | #define kvm_trace_symbol_exit \ | 10 | #define kvm_trace_symbol_exit \ |
| 13 | {0, "CRITICAL"}, \ | 11 | {0, "CRITICAL"}, \ |
| @@ -218,4 +216,11 @@ TRACE_EVENT(kvm_booke_queue_irqprio, | |||
| 218 | #endif | 216 | #endif |
| 219 | 217 | ||
| 220 | /* This part must be outside protection */ | 218 | /* This part must be outside protection */ |
| 219 | |||
| 220 | #undef TRACE_INCLUDE_PATH | ||
| 221 | #undef TRACE_INCLUDE_FILE | ||
| 222 | |||
| 223 | #define TRACE_INCLUDE_PATH . | ||
| 224 | #define TRACE_INCLUDE_FILE trace_booke | ||
| 225 | |||
| 221 | #include <trace/define_trace.h> | 226 | #include <trace/define_trace.h> |
diff --git a/arch/powerpc/kvm/trace_hv.h b/arch/powerpc/kvm/trace_hv.h index bcfe8a987f6a..8a1e3b0047f1 100644 --- a/arch/powerpc/kvm/trace_hv.h +++ b/arch/powerpc/kvm/trace_hv.h | |||
| @@ -9,8 +9,6 @@ | |||
| 9 | 9 | ||
| 10 | #undef TRACE_SYSTEM | 10 | #undef TRACE_SYSTEM |
| 11 | #define TRACE_SYSTEM kvm_hv | 11 | #define TRACE_SYSTEM kvm_hv |
| 12 | #define TRACE_INCLUDE_PATH . | ||
| 13 | #define TRACE_INCLUDE_FILE trace_hv | ||
| 14 | 12 | ||
| 15 | #define kvm_trace_symbol_hcall \ | 13 | #define kvm_trace_symbol_hcall \ |
| 16 | {H_REMOVE, "H_REMOVE"}, \ | 14 | {H_REMOVE, "H_REMOVE"}, \ |
| @@ -497,4 +495,11 @@ TRACE_EVENT(kvmppc_run_vcpu_exit, | |||
| 497 | #endif /* _TRACE_KVM_HV_H */ | 495 | #endif /* _TRACE_KVM_HV_H */ |
| 498 | 496 | ||
| 499 | /* This part must be outside protection */ | 497 | /* This part must be outside protection */ |
| 498 | |||
| 499 | #undef TRACE_INCLUDE_PATH | ||
| 500 | #undef TRACE_INCLUDE_FILE | ||
| 501 | |||
| 502 | #define TRACE_INCLUDE_PATH . | ||
| 503 | #define TRACE_INCLUDE_FILE trace_hv | ||
| 504 | |||
| 500 | #include <trace/define_trace.h> | 505 | #include <trace/define_trace.h> |
diff --git a/arch/powerpc/kvm/trace_pr.h b/arch/powerpc/kvm/trace_pr.h index 2f9a8829552b..46a46d328fbf 100644 --- a/arch/powerpc/kvm/trace_pr.h +++ b/arch/powerpc/kvm/trace_pr.h | |||
| @@ -8,8 +8,6 @@ | |||
| 8 | 8 | ||
| 9 | #undef TRACE_SYSTEM | 9 | #undef TRACE_SYSTEM |
| 10 | #define TRACE_SYSTEM kvm_pr | 10 | #define TRACE_SYSTEM kvm_pr |
| 11 | #define TRACE_INCLUDE_PATH . | ||
| 12 | #define TRACE_INCLUDE_FILE trace_pr | ||
| 13 | 11 | ||
| 14 | TRACE_EVENT(kvm_book3s_reenter, | 12 | TRACE_EVENT(kvm_book3s_reenter, |
| 15 | TP_PROTO(int r, struct kvm_vcpu *vcpu), | 13 | TP_PROTO(int r, struct kvm_vcpu *vcpu), |
| @@ -257,4 +255,11 @@ TRACE_EVENT(kvm_exit, | |||
| 257 | #endif /* _TRACE_KVM_H */ | 255 | #endif /* _TRACE_KVM_H */ |
| 258 | 256 | ||
| 259 | /* This part must be outside protection */ | 257 | /* This part must be outside protection */ |
| 258 | |||
| 259 | #undef TRACE_INCLUDE_PATH | ||
| 260 | #undef TRACE_INCLUDE_FILE | ||
| 261 | |||
| 262 | #define TRACE_INCLUDE_PATH . | ||
| 263 | #define TRACE_INCLUDE_FILE trace_pr | ||
| 264 | |||
| 260 | #include <trace/define_trace.h> | 265 | #include <trace/define_trace.h> |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 3a048e98a132..ce28ae5ca080 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
| @@ -1178,7 +1178,7 @@ static long vphn_get_associativity(unsigned long cpu, | |||
| 1178 | 1178 | ||
| 1179 | switch (rc) { | 1179 | switch (rc) { |
| 1180 | case H_FUNCTION: | 1180 | case H_FUNCTION: |
| 1181 | printk(KERN_INFO | 1181 | printk_once(KERN_INFO |
| 1182 | "VPHN is not supported. Disabling polling...\n"); | 1182 | "VPHN is not supported. Disabling polling...\n"); |
| 1183 | stop_topology_update(); | 1183 | stop_topology_update(); |
| 1184 | break; | 1184 | break; |
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index c3fdf2969d9f..bc3914d54e26 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
| @@ -19,6 +19,7 @@ | |||
| 19 | #include <asm/mmu.h> | 19 | #include <asm/mmu.h> |
| 20 | #include <asm/mmu_context.h> | 20 | #include <asm/mmu_context.h> |
| 21 | #include <asm/paca.h> | 21 | #include <asm/paca.h> |
| 22 | #include <asm/ppc-opcode.h> | ||
| 22 | #include <asm/cputable.h> | 23 | #include <asm/cputable.h> |
| 23 | #include <asm/cacheflush.h> | 24 | #include <asm/cacheflush.h> |
| 24 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
| @@ -58,27 +59,19 @@ static inline unsigned long mk_vsid_data(unsigned long ea, int ssize, | |||
| 58 | return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); | 59 | return __mk_vsid_data(get_kernel_vsid(ea, ssize), ssize, flags); |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | static void assert_slb_exists(unsigned long ea) | 62 | static void assert_slb_presence(bool present, unsigned long ea) |
| 62 | { | 63 | { |
| 63 | #ifdef CONFIG_DEBUG_VM | 64 | #ifdef CONFIG_DEBUG_VM |
| 64 | unsigned long tmp; | 65 | unsigned long tmp; |
| 65 | 66 | ||
| 66 | WARN_ON_ONCE(mfmsr() & MSR_EE); | 67 | WARN_ON_ONCE(mfmsr() & MSR_EE); |
| 67 | 68 | ||
| 68 | asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); | 69 | if (!cpu_has_feature(CPU_FTR_ARCH_206)) |
| 69 | WARN_ON(tmp == 0); | 70 | return; |
| 70 | #endif | ||
| 71 | } | ||
| 72 | |||
| 73 | static void assert_slb_notexists(unsigned long ea) | ||
| 74 | { | ||
| 75 | #ifdef CONFIG_DEBUG_VM | ||
| 76 | unsigned long tmp; | ||
| 77 | 71 | ||
| 78 | WARN_ON_ONCE(mfmsr() & MSR_EE); | 72 | asm volatile(__PPC_SLBFEE_DOT(%0, %1) : "=r"(tmp) : "r"(ea) : "cr0"); |
| 79 | 73 | ||
| 80 | asm volatile("slbfee. %0, %1" : "=r"(tmp) : "r"(ea) : "cr0"); | 74 | WARN_ON(present == (tmp == 0)); |
| 81 | WARN_ON(tmp != 0); | ||
| 82 | #endif | 75 | #endif |
| 83 | } | 76 | } |
| 84 | 77 | ||
| @@ -114,7 +107,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize, | |||
| 114 | */ | 107 | */ |
| 115 | slb_shadow_update(ea, ssize, flags, index); | 108 | slb_shadow_update(ea, ssize, flags, index); |
| 116 | 109 | ||
| 117 | assert_slb_notexists(ea); | 110 | assert_slb_presence(false, ea); |
| 118 | asm volatile("slbmte %0,%1" : | 111 | asm volatile("slbmte %0,%1" : |
| 119 | : "r" (mk_vsid_data(ea, ssize, flags)), | 112 | : "r" (mk_vsid_data(ea, ssize, flags)), |
| 120 | "r" (mk_esid_data(ea, ssize, index)) | 113 | "r" (mk_esid_data(ea, ssize, index)) |
| @@ -137,7 +130,7 @@ void __slb_restore_bolted_realmode(void) | |||
| 137 | "r" (be64_to_cpu(p->save_area[index].esid))); | 130 | "r" (be64_to_cpu(p->save_area[index].esid))); |
| 138 | } | 131 | } |
| 139 | 132 | ||
| 140 | assert_slb_exists(local_paca->kstack); | 133 | assert_slb_presence(true, local_paca->kstack); |
| 141 | } | 134 | } |
| 142 | 135 | ||
| 143 | /* | 136 | /* |
| @@ -185,7 +178,7 @@ void slb_flush_and_restore_bolted(void) | |||
| 185 | :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), | 178 | :: "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].vsid)), |
| 186 | "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) | 179 | "r" (be64_to_cpu(p->save_area[KSTACK_INDEX].esid)) |
| 187 | : "memory"); | 180 | : "memory"); |
| 188 | assert_slb_exists(get_paca()->kstack); | 181 | assert_slb_presence(true, get_paca()->kstack); |
| 189 | 182 | ||
| 190 | get_paca()->slb_cache_ptr = 0; | 183 | get_paca()->slb_cache_ptr = 0; |
| 191 | 184 | ||
| @@ -443,9 +436,9 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
| 443 | ea = (unsigned long) | 436 | ea = (unsigned long) |
| 444 | get_paca()->slb_cache[i] << SID_SHIFT; | 437 | get_paca()->slb_cache[i] << SID_SHIFT; |
| 445 | /* | 438 | /* |
| 446 | * Could assert_slb_exists here, but hypervisor | 439 | * Could assert_slb_presence(true) here, but |
| 447 | * or machine check could have come in and | 440 | * hypervisor or machine check could have come |
| 448 | * removed the entry at this point. | 441 | * in and removed the entry at this point. |
| 449 | */ | 442 | */ |
| 450 | 443 | ||
| 451 | slbie_data = ea; | 444 | slbie_data = ea; |
| @@ -676,7 +669,7 @@ static long slb_insert_entry(unsigned long ea, unsigned long context, | |||
| 676 | * User preloads should add isync afterwards in case the kernel | 669 | * User preloads should add isync afterwards in case the kernel |
| 677 | * accesses user memory before it returns to userspace with rfid. | 670 | * accesses user memory before it returns to userspace with rfid. |
| 678 | */ | 671 | */ |
| 679 | assert_slb_notexists(ea); | 672 | assert_slb_presence(false, ea); |
| 680 | asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); | 673 | asm volatile("slbmte %0, %1" : : "r" (vsid_data), "r" (esid_data)); |
| 681 | 674 | ||
| 682 | barrier(); | 675 | barrier(); |
| @@ -715,7 +708,7 @@ static long slb_allocate_kernel(unsigned long ea, unsigned long id) | |||
| 715 | return -EFAULT; | 708 | return -EFAULT; |
| 716 | 709 | ||
| 717 | if (ea < H_VMALLOC_END) | 710 | if (ea < H_VMALLOC_END) |
| 718 | flags = get_paca()->vmalloc_sllp; | 711 | flags = local_paca->vmalloc_sllp; |
| 719 | else | 712 | else |
| 720 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; | 713 | flags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_io_psize].sllp; |
| 721 | } else { | 714 | } else { |
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c index 50b129785aee..17482f5de3e2 100644 --- a/arch/powerpc/net/bpf_jit_comp64.c +++ b/arch/powerpc/net/bpf_jit_comp64.c | |||
| @@ -166,7 +166,33 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx) | |||
| 166 | PPC_BLR(); | 166 | PPC_BLR(); |
| 167 | } | 167 | } |
| 168 | 168 | ||
| 169 | static void bpf_jit_emit_func_call(u32 *image, struct codegen_context *ctx, u64 func) | 169 | static void bpf_jit_emit_func_call_hlp(u32 *image, struct codegen_context *ctx, |
| 170 | u64 func) | ||
| 171 | { | ||
| 172 | #ifdef PPC64_ELF_ABI_v1 | ||
| 173 | /* func points to the function descriptor */ | ||
| 174 | PPC_LI64(b2p[TMP_REG_2], func); | ||
| 175 | /* Load actual entry point from function descriptor */ | ||
| 176 | PPC_BPF_LL(b2p[TMP_REG_1], b2p[TMP_REG_2], 0); | ||
| 177 | /* ... and move it to LR */ | ||
| 178 | PPC_MTLR(b2p[TMP_REG_1]); | ||
| 179 | /* | ||
| 180 | * Load TOC from function descriptor at offset 8. | ||
| 181 | * We can clobber r2 since we get called through a | ||
| 182 | * function pointer (so caller will save/restore r2) | ||
| 183 | * and since we don't use a TOC ourself. | ||
| 184 | */ | ||
| 185 | PPC_BPF_LL(2, b2p[TMP_REG_2], 8); | ||
| 186 | #else | ||
| 187 | /* We can clobber r12 */ | ||
| 188 | PPC_FUNC_ADDR(12, func); | ||
| 189 | PPC_MTLR(12); | ||
| 190 | #endif | ||
| 191 | PPC_BLRL(); | ||
| 192 | } | ||
| 193 | |||
| 194 | static void bpf_jit_emit_func_call_rel(u32 *image, struct codegen_context *ctx, | ||
| 195 | u64 func) | ||
| 170 | { | 196 | { |
| 171 | unsigned int i, ctx_idx = ctx->idx; | 197 | unsigned int i, ctx_idx = ctx->idx; |
| 172 | 198 | ||
| @@ -273,7 +299,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
| 273 | { | 299 | { |
| 274 | const struct bpf_insn *insn = fp->insnsi; | 300 | const struct bpf_insn *insn = fp->insnsi; |
| 275 | int flen = fp->len; | 301 | int flen = fp->len; |
| 276 | int i; | 302 | int i, ret; |
| 277 | 303 | ||
| 278 | /* Start of epilogue code - will only be valid 2nd pass onwards */ | 304 | /* Start of epilogue code - will only be valid 2nd pass onwards */ |
| 279 | u32 exit_addr = addrs[flen]; | 305 | u32 exit_addr = addrs[flen]; |
| @@ -284,8 +310,9 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image, | |||
| 284 | u32 src_reg = b2p[insn[i].src_reg]; | 310 | u32 src_reg = b2p[insn[i].src_reg]; |
| 285 | s16 off = insn[i].off; | 311 | s16 off = insn[i].off; |
| 286 | s32 imm = insn[i].imm; | 312 | s32 imm = insn[i].imm; |
| 313 | bool func_addr_fixed; | ||
| 314 | u64 func_addr; | ||
| 287 | u64 imm64; | 315 | u64 imm64; |
| 288 | u8 *func; | ||
| 289 | u32 true_cond; | 316 | u32 true_cond; |
| 290 | u32 tmp_idx; | 317 | u32 tmp_idx; |
| 291 | 318 | ||
| @@ -711,23 +738,15 @@ emit_clear: | |||
| 711 | case BPF_JMP | BPF_CALL: | 738 | case BPF_JMP | BPF_CALL: |
| 712 | ctx->seen |= SEEN_FUNC; | 739 | ctx->seen |= SEEN_FUNC; |
| 713 | 740 | ||
| 714 | /* bpf function call */ | 741 | ret = bpf_jit_get_func_addr(fp, &insn[i], extra_pass, |
| 715 | if (insn[i].src_reg == BPF_PSEUDO_CALL) | 742 | &func_addr, &func_addr_fixed); |
| 716 | if (!extra_pass) | 743 | if (ret < 0) |
| 717 | func = NULL; | 744 | return ret; |
| 718 | else if (fp->aux->func && off < fp->aux->func_cnt) | ||
| 719 | /* use the subprog id from the off | ||
| 720 | * field to lookup the callee address | ||
| 721 | */ | ||
| 722 | func = (u8 *) fp->aux->func[off]->bpf_func; | ||
| 723 | else | ||
| 724 | return -EINVAL; | ||
| 725 | /* kernel helper call */ | ||
| 726 | else | ||
| 727 | func = (u8 *) __bpf_call_base + imm; | ||
| 728 | |||
| 729 | bpf_jit_emit_func_call(image, ctx, (u64)func); | ||
| 730 | 745 | ||
| 746 | if (func_addr_fixed) | ||
| 747 | bpf_jit_emit_func_call_hlp(image, ctx, func_addr); | ||
| 748 | else | ||
| 749 | bpf_jit_emit_func_call_rel(image, ctx, func_addr); | ||
| 731 | /* move return value from r3 to BPF_REG_0 */ | 750 | /* move return value from r3 to BPF_REG_0 */ |
| 732 | PPC_MR(b2p[BPF_REG_0], 3); | 751 | PPC_MR(b2p[BPF_REG_0], 3); |
| 733 | break; | 752 | break; |
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c index 6f60e0931922..75b935252981 100644 --- a/arch/powerpc/platforms/powernv/npu-dma.c +++ b/arch/powerpc/platforms/powernv/npu-dma.c | |||
| @@ -102,63 +102,6 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index) | |||
| 102 | } | 102 | } |
| 103 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); | 103 | EXPORT_SYMBOL(pnv_pci_get_npu_dev); |
| 104 | 104 | ||
| 105 | #define NPU_DMA_OP_UNSUPPORTED() \ | ||
| 106 | dev_err_once(dev, "%s operation unsupported for NVLink devices\n", \ | ||
| 107 | __func__) | ||
| 108 | |||
| 109 | static void *dma_npu_alloc(struct device *dev, size_t size, | ||
| 110 | dma_addr_t *dma_handle, gfp_t flag, | ||
| 111 | unsigned long attrs) | ||
| 112 | { | ||
| 113 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 114 | return NULL; | ||
| 115 | } | ||
| 116 | |||
| 117 | static void dma_npu_free(struct device *dev, size_t size, | ||
| 118 | void *vaddr, dma_addr_t dma_handle, | ||
| 119 | unsigned long attrs) | ||
| 120 | { | ||
| 121 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 122 | } | ||
| 123 | |||
| 124 | static dma_addr_t dma_npu_map_page(struct device *dev, struct page *page, | ||
| 125 | unsigned long offset, size_t size, | ||
| 126 | enum dma_data_direction direction, | ||
| 127 | unsigned long attrs) | ||
| 128 | { | ||
| 129 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 130 | return 0; | ||
| 131 | } | ||
| 132 | |||
| 133 | static int dma_npu_map_sg(struct device *dev, struct scatterlist *sglist, | ||
| 134 | int nelems, enum dma_data_direction direction, | ||
| 135 | unsigned long attrs) | ||
| 136 | { | ||
| 137 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 138 | return 0; | ||
| 139 | } | ||
| 140 | |||
| 141 | static int dma_npu_dma_supported(struct device *dev, u64 mask) | ||
| 142 | { | ||
| 143 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 144 | return 0; | ||
| 145 | } | ||
| 146 | |||
| 147 | static u64 dma_npu_get_required_mask(struct device *dev) | ||
| 148 | { | ||
| 149 | NPU_DMA_OP_UNSUPPORTED(); | ||
| 150 | return 0; | ||
| 151 | } | ||
| 152 | |||
| 153 | static const struct dma_map_ops dma_npu_ops = { | ||
| 154 | .map_page = dma_npu_map_page, | ||
| 155 | .map_sg = dma_npu_map_sg, | ||
| 156 | .alloc = dma_npu_alloc, | ||
| 157 | .free = dma_npu_free, | ||
| 158 | .dma_supported = dma_npu_dma_supported, | ||
| 159 | .get_required_mask = dma_npu_get_required_mask, | ||
| 160 | }; | ||
| 161 | |||
| 162 | /* | 105 | /* |
| 163 | * Returns the PE assoicated with the PCI device of the given | 106 | * Returns the PE assoicated with the PCI device of the given |
| 164 | * NPU. Returns the linked pci device if pci_dev != NULL. | 107 | * NPU. Returns the linked pci device if pci_dev != NULL. |
| @@ -270,10 +213,11 @@ static void pnv_npu_dma_set_32(struct pnv_ioda_pe *npe) | |||
| 270 | rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); | 213 | rc = pnv_npu_set_window(npe, 0, gpe->table_group.tables[0]); |
| 271 | 214 | ||
| 272 | /* | 215 | /* |
| 273 | * We don't initialise npu_pe->tce32_table as we always use | 216 | * NVLink devices use the same TCE table configuration as |
| 274 | * dma_npu_ops which are nops. | 217 | * their parent device so drivers shouldn't be doing DMA |
| 218 | * operations directly on these devices. | ||
| 275 | */ | 219 | */ |
| 276 | set_dma_ops(&npe->pdev->dev, &dma_npu_ops); | 220 | set_dma_ops(&npe->pdev->dev, NULL); |
| 277 | } | 221 | } |
| 278 | 222 | ||
| 279 | /* | 223 | /* |
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index d10146197533..4b594f2e4f7e 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile | |||
| @@ -71,10 +71,27 @@ KBUILD_CFLAGS += $(call cc-option,-mstrict-align) | |||
| 71 | # arch specific predefines for sparse | 71 | # arch specific predefines for sparse |
| 72 | CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) | 72 | CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS) |
| 73 | 73 | ||
| 74 | # Default target when executing plain make | ||
| 75 | boot := arch/riscv/boot | ||
| 76 | KBUILD_IMAGE := $(boot)/Image.gz | ||
| 77 | |||
| 74 | head-y := arch/riscv/kernel/head.o | 78 | head-y := arch/riscv/kernel/head.o |
| 75 | 79 | ||
| 76 | core-y += arch/riscv/kernel/ arch/riscv/mm/ | 80 | core-y += arch/riscv/kernel/ arch/riscv/mm/ |
| 77 | 81 | ||
| 78 | libs-y += arch/riscv/lib/ | 82 | libs-y += arch/riscv/lib/ |
| 79 | 83 | ||
| 80 | all: vmlinux | 84 | PHONY += vdso_install |
| 85 | vdso_install: | ||
| 86 | $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@ | ||
| 87 | |||
| 88 | all: Image.gz | ||
| 89 | |||
| 90 | Image: vmlinux | ||
| 91 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
| 92 | |||
| 93 | Image.%: Image | ||
| 94 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | ||
| 95 | |||
| 96 | zinstall install: | ||
| 97 | $(Q)$(MAKE) $(build)=$(boot) $@ | ||
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore new file mode 100644 index 000000000000..8dab0bb6ae66 --- /dev/null +++ b/arch/riscv/boot/.gitignore | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | Image | ||
| 2 | Image.gz | ||
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile new file mode 100644 index 000000000000..0990a9fdbe5d --- /dev/null +++ b/arch/riscv/boot/Makefile | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | # | ||
| 2 | # arch/riscv/boot/Makefile | ||
| 3 | # | ||
| 4 | # This file is included by the global makefile so that you can add your own | ||
| 5 | # architecture-specific flags and dependencies. | ||
| 6 | # | ||
| 7 | # This file is subject to the terms and conditions of the GNU General Public | ||
| 8 | # License. See the file "COPYING" in the main directory of this archive | ||
| 9 | # for more details. | ||
| 10 | # | ||
| 11 | # Copyright (C) 2018, Anup Patel. | ||
| 12 | # Author: Anup Patel <anup@brainfault.org> | ||
| 13 | # | ||
| 14 | # Based on the ia64 and arm64 boot/Makefile. | ||
| 15 | # | ||
| 16 | |||
| 17 | OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | ||
| 18 | |||
| 19 | targets := Image | ||
| 20 | |||
| 21 | $(obj)/Image: vmlinux FORCE | ||
| 22 | $(call if_changed,objcopy) | ||
| 23 | |||
| 24 | $(obj)/Image.gz: $(obj)/Image FORCE | ||
| 25 | $(call if_changed,gzip) | ||
| 26 | |||
| 27 | install: | ||
| 28 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 29 | $(obj)/Image System.map "$(INSTALL_PATH)" | ||
| 30 | |||
| 31 | zinstall: | ||
| 32 | $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \ | ||
| 33 | $(obj)/Image.gz System.map "$(INSTALL_PATH)" | ||
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh new file mode 100644 index 000000000000..18c39159c0ff --- /dev/null +++ b/arch/riscv/boot/install.sh | |||
| @@ -0,0 +1,60 @@ | |||
| 1 | #!/bin/sh | ||
| 2 | # | ||
| 3 | # arch/riscv/boot/install.sh | ||
| 4 | # | ||
| 5 | # This file is subject to the terms and conditions of the GNU General Public | ||
| 6 | # License. See the file "COPYING" in the main directory of this archive | ||
| 7 | # for more details. | ||
| 8 | # | ||
| 9 | # Copyright (C) 1995 by Linus Torvalds | ||
| 10 | # | ||
| 11 | # Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin | ||
| 12 | # Adapted from code in arch/i386/boot/install.sh by Russell King | ||
| 13 | # | ||
| 14 | # "make install" script for the RISC-V Linux port | ||
| 15 | # | ||
| 16 | # Arguments: | ||
| 17 | # $1 - kernel version | ||
| 18 | # $2 - kernel image file | ||
| 19 | # $3 - kernel map file | ||
| 20 | # $4 - default install path (blank if root directory) | ||
| 21 | # | ||
| 22 | |||
| 23 | verify () { | ||
| 24 | if [ ! -f "$1" ]; then | ||
| 25 | echo "" 1>&2 | ||
| 26 | echo " *** Missing file: $1" 1>&2 | ||
| 27 | echo ' *** You need to run "make" before "make install".' 1>&2 | ||
| 28 | echo "" 1>&2 | ||
| 29 | exit 1 | ||
| 30 | fi | ||
| 31 | } | ||
| 32 | |||
| 33 | # Make sure the files actually exist | ||
| 34 | verify "$2" | ||
| 35 | verify "$3" | ||
| 36 | |||
| 37 | # User may have a custom install script | ||
| 38 | if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi | ||
| 39 | if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi | ||
| 40 | |||
| 41 | if [ "$(basename $2)" = "Image.gz" ]; then | ||
| 42 | # Compressed install | ||
| 43 | echo "Installing compressed kernel" | ||
| 44 | base=vmlinuz | ||
| 45 | else | ||
| 46 | # Normal install | ||
| 47 | echo "Installing normal kernel" | ||
| 48 | base=vmlinux | ||
| 49 | fi | ||
| 50 | |||
| 51 | if [ -f $4/$base-$1 ]; then | ||
| 52 | mv $4/$base-$1 $4/$base-$1.old | ||
| 53 | fi | ||
| 54 | cat $2 > $4/$base-$1 | ||
| 55 | |||
| 56 | # Install system map file | ||
| 57 | if [ -f $4/System.map-$1 ]; then | ||
| 58 | mv $4/System.map-$1 $4/System.map-$1.old | ||
| 59 | fi | ||
| 60 | cp $3 $4/System.map-$1 | ||
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 07fa9ea75fea..ef4f15df9adf 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig | |||
| @@ -76,4 +76,5 @@ CONFIG_NFS_V4_1=y | |||
| 76 | CONFIG_NFS_V4_2=y | 76 | CONFIG_NFS_V4_2=y |
| 77 | CONFIG_ROOT_NFS=y | 77 | CONFIG_ROOT_NFS=y |
| 78 | CONFIG_CRYPTO_USER_API_HASH=y | 78 | CONFIG_CRYPTO_USER_API_HASH=y |
| 79 | CONFIG_PRINTK_TIME=y | ||
| 79 | # CONFIG_RCU_TRACE is not set | 80 | # CONFIG_RCU_TRACE is not set |
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h index 349df33808c4..cd2af4b013e3 100644 --- a/arch/riscv/include/asm/module.h +++ b/arch/riscv/include/asm/module.h | |||
| @@ -8,6 +8,7 @@ | |||
| 8 | 8 | ||
| 9 | #define MODULE_ARCH_VERMAGIC "riscv" | 9 | #define MODULE_ARCH_VERMAGIC "riscv" |
| 10 | 10 | ||
| 11 | struct module; | ||
| 11 | u64 module_emit_got_entry(struct module *mod, u64 val); | 12 | u64 module_emit_got_entry(struct module *mod, u64 val); |
| 12 | u64 module_emit_plt_entry(struct module *mod, u64 val); | 13 | u64 module_emit_plt_entry(struct module *mod, u64 val); |
| 13 | 14 | ||
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h index 2c5df945d43c..bbe1862e8f80 100644 --- a/arch/riscv/include/asm/ptrace.h +++ b/arch/riscv/include/asm/ptrace.h | |||
| @@ -56,8 +56,8 @@ struct pt_regs { | |||
| 56 | unsigned long sstatus; | 56 | unsigned long sstatus; |
| 57 | unsigned long sbadaddr; | 57 | unsigned long sbadaddr; |
| 58 | unsigned long scause; | 58 | unsigned long scause; |
| 59 | /* a0 value before the syscall */ | 59 | /* a0 value before the syscall */ |
| 60 | unsigned long orig_a0; | 60 | unsigned long orig_a0; |
| 61 | }; | 61 | }; |
| 62 | 62 | ||
| 63 | #ifdef CONFIG_64BIT | 63 | #ifdef CONFIG_64BIT |
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index 473cfc84e412..8c3e3e3c8be1 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h | |||
| @@ -400,13 +400,13 @@ extern unsigned long __must_check __asm_copy_from_user(void *to, | |||
| 400 | static inline unsigned long | 400 | static inline unsigned long |
| 401 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) | 401 | raw_copy_from_user(void *to, const void __user *from, unsigned long n) |
| 402 | { | 402 | { |
| 403 | return __asm_copy_to_user(to, from, n); | 403 | return __asm_copy_from_user(to, from, n); |
| 404 | } | 404 | } |
| 405 | 405 | ||
| 406 | static inline unsigned long | 406 | static inline unsigned long |
| 407 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) | 407 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
| 408 | { | 408 | { |
| 409 | return __asm_copy_from_user(to, from, n); | 409 | return __asm_copy_to_user(to, from, n); |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | extern long strncpy_from_user(char *dest, const char __user *src, long count); | 412 | extern long strncpy_from_user(char *dest, const char __user *src, long count); |
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h index eff7aa9aa163..fef96f117b4d 100644 --- a/arch/riscv/include/asm/unistd.h +++ b/arch/riscv/include/asm/unistd.h | |||
| @@ -13,10 +13,9 @@ | |||
| 13 | 13 | ||
| 14 | /* | 14 | /* |
| 15 | * There is explicitly no include guard here because this file is expected to | 15 | * There is explicitly no include guard here because this file is expected to |
| 16 | * be included multiple times. See uapi/asm/syscalls.h for more info. | 16 | * be included multiple times. |
| 17 | */ | 17 | */ |
| 18 | 18 | ||
| 19 | #define __ARCH_WANT_NEW_STAT | ||
| 20 | #define __ARCH_WANT_SYS_CLONE | 19 | #define __ARCH_WANT_SYS_CLONE |
| 20 | |||
| 21 | #include <uapi/asm/unistd.h> | 21 | #include <uapi/asm/unistd.h> |
| 22 | #include <uapi/asm/syscalls.h> | ||
diff --git a/arch/riscv/include/uapi/asm/syscalls.h b/arch/riscv/include/uapi/asm/unistd.h index 206dc4b0f6ea..1f3bd3ebbb0d 100644 --- a/arch/riscv/include/uapi/asm/syscalls.h +++ b/arch/riscv/include/uapi/asm/unistd.h | |||
| @@ -1,13 +1,25 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ |
| 2 | /* | 2 | /* |
| 3 | * Copyright (C) 2017-2018 SiFive | 3 | * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com> |
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License version 2 as | ||
| 7 | * published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
| 4 | */ | 16 | */ |
| 5 | 17 | ||
| 6 | /* | 18 | #ifdef __LP64__ |
| 7 | * There is explicitly no include guard here because this file is expected to | 19 | #define __ARCH_WANT_NEW_STAT |
| 8 | * be included multiple times in order to define the syscall macros via | 20 | #endif /* __LP64__ */ |
| 9 | * __SYSCALL. | 21 | |
| 10 | */ | 22 | #include <asm-generic/unistd.h> |
| 11 | 23 | ||
| 12 | /* | 24 | /* |
| 13 | * Allows the instruction cache to be flushed from userspace. Despite RISC-V | 25 | * Allows the instruction cache to be flushed from userspace. Despite RISC-V |
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index 3a5a2ee31547..b4a7d4427fbb 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c | |||
| @@ -64,7 +64,7 @@ int riscv_of_processor_hartid(struct device_node *node) | |||
| 64 | 64 | ||
| 65 | static void print_isa(struct seq_file *f, const char *orig_isa) | 65 | static void print_isa(struct seq_file *f, const char *orig_isa) |
| 66 | { | 66 | { |
| 67 | static const char *ext = "mafdc"; | 67 | static const char *ext = "mafdcsu"; |
| 68 | const char *isa = orig_isa; | 68 | const char *isa = orig_isa; |
| 69 | const char *e; | 69 | const char *e; |
| 70 | 70 | ||
| @@ -88,11 +88,14 @@ static void print_isa(struct seq_file *f, const char *orig_isa) | |||
| 88 | /* | 88 | /* |
| 89 | * Check the rest of the ISA string for valid extensions, printing those | 89 | * Check the rest of the ISA string for valid extensions, printing those |
| 90 | * we find. RISC-V ISA strings define an order, so we only print the | 90 | * we find. RISC-V ISA strings define an order, so we only print the |
| 91 | * extension bits when they're in order. | 91 | * extension bits when they're in order. Hide the supervisor (S) |
| 92 | * extension from userspace as it's not accessible from there. | ||
| 92 | */ | 93 | */ |
| 93 | for (e = ext; *e != '\0'; ++e) { | 94 | for (e = ext; *e != '\0'; ++e) { |
| 94 | if (isa[0] == e[0]) { | 95 | if (isa[0] == e[0]) { |
| 95 | seq_write(f, isa, 1); | 96 | if (isa[0] != 's') |
| 97 | seq_write(f, isa, 1); | ||
| 98 | |||
| 96 | isa++; | 99 | isa++; |
| 97 | } | 100 | } |
| 98 | } | 101 | } |
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 1157b6b52d25..c433f6d3dd64 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c | |||
| @@ -132,7 +132,6 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 132 | { | 132 | { |
| 133 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 133 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
| 134 | unsigned long old; | 134 | unsigned long old; |
| 135 | struct ftrace_graph_ent trace; | ||
| 136 | int err; | 135 | int err; |
| 137 | 136 | ||
| 138 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 137 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| @@ -144,17 +143,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
| 144 | */ | 143 | */ |
| 145 | old = *parent; | 144 | old = *parent; |
| 146 | 145 | ||
| 147 | trace.func = self_addr; | 146 | if (function_graph_enter(old, self_addr, frame_pointer, parent)) |
| 148 | trace.depth = current->curr_ret_stack + 1; | 147 | *parent = return_hooker; |
| 149 | |||
| 150 | if (!ftrace_graph_entry(&trace)) | ||
| 151 | return; | ||
| 152 | |||
| 153 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 154 | frame_pointer, parent); | ||
| 155 | if (err == -EBUSY) | ||
| 156 | return; | ||
| 157 | *parent = return_hooker; | ||
| 158 | } | 148 | } |
| 159 | 149 | ||
| 160 | #ifdef CONFIG_DYNAMIC_FTRACE | 150 | #ifdef CONFIG_DYNAMIC_FTRACE |
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 711190d473d4..fe884cd69abd 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S | |||
| @@ -44,6 +44,16 @@ ENTRY(_start) | |||
| 44 | amoadd.w a3, a2, (a3) | 44 | amoadd.w a3, a2, (a3) |
| 45 | bnez a3, .Lsecondary_start | 45 | bnez a3, .Lsecondary_start |
| 46 | 46 | ||
| 47 | /* Clear BSS for flat non-ELF images */ | ||
| 48 | la a3, __bss_start | ||
| 49 | la a4, __bss_stop | ||
| 50 | ble a4, a3, clear_bss_done | ||
| 51 | clear_bss: | ||
| 52 | REG_S zero, (a3) | ||
| 53 | add a3, a3, RISCV_SZPTR | ||
| 54 | blt a3, a4, clear_bss | ||
| 55 | clear_bss_done: | ||
| 56 | |||
| 47 | /* Save hart ID and DTB physical address */ | 57 | /* Save hart ID and DTB physical address */ |
| 48 | mv s0, a0 | 58 | mv s0, a0 |
| 49 | mv s1, a1 | 59 | mv s1, a1 |
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 3303ed2cd419..7dd308129b40 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c | |||
| @@ -21,7 +21,7 @@ static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) | |||
| 21 | { | 21 | { |
| 22 | if (v != (u32)v) { | 22 | if (v != (u32)v) { |
| 23 | pr_err("%s: value %016llx out of range for 32-bit field\n", | 23 | pr_err("%s: value %016llx out of range for 32-bit field\n", |
| 24 | me->name, v); | 24 | me->name, (long long)v); |
| 25 | return -EINVAL; | 25 | return -EINVAL; |
| 26 | } | 26 | } |
| 27 | *location = v; | 27 | *location = v; |
| @@ -102,7 +102,7 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, | |||
| 102 | if (offset != (s32)offset) { | 102 | if (offset != (s32)offset) { |
| 103 | pr_err( | 103 | pr_err( |
| 104 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 104 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 105 | me->name, v, location); | 105 | me->name, (long long)v, location); |
| 106 | return -EINVAL; | 106 | return -EINVAL; |
| 107 | } | 107 | } |
| 108 | 108 | ||
| @@ -144,7 +144,7 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, | |||
| 144 | if (IS_ENABLED(CMODEL_MEDLOW)) { | 144 | if (IS_ENABLED(CMODEL_MEDLOW)) { |
| 145 | pr_err( | 145 | pr_err( |
| 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 146 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 147 | me->name, v, location); | 147 | me->name, (long long)v, location); |
| 148 | return -EINVAL; | 148 | return -EINVAL; |
| 149 | } | 149 | } |
| 150 | 150 | ||
| @@ -188,7 +188,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, | |||
| 188 | } else { | 188 | } else { |
| 189 | pr_err( | 189 | pr_err( |
| 190 | "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", | 190 | "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", |
| 191 | me->name, v, location); | 191 | me->name, (long long)v, location); |
| 192 | return -EINVAL; | 192 | return -EINVAL; |
| 193 | } | 193 | } |
| 194 | 194 | ||
| @@ -212,7 +212,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, | |||
| 212 | } else { | 212 | } else { |
| 213 | pr_err( | 213 | pr_err( |
| 214 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 214 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 215 | me->name, v, location); | 215 | me->name, (long long)v, location); |
| 216 | return -EINVAL; | 216 | return -EINVAL; |
| 217 | } | 217 | } |
| 218 | } | 218 | } |
| @@ -234,7 +234,7 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, | |||
| 234 | if (offset != fill_v) { | 234 | if (offset != fill_v) { |
| 235 | pr_err( | 235 | pr_err( |
| 236 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", | 236 | "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", |
| 237 | me->name, v, location); | 237 | me->name, (long long)v, location); |
| 238 | return -EINVAL; | 238 | return -EINVAL; |
| 239 | } | 239 | } |
| 240 | 240 | ||
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S index ece84991609c..65df1dfdc303 100644 --- a/arch/riscv/kernel/vmlinux.lds.S +++ b/arch/riscv/kernel/vmlinux.lds.S | |||
| @@ -74,7 +74,7 @@ SECTIONS | |||
| 74 | *(.sbss*) | 74 | *(.sbss*) |
| 75 | } | 75 | } |
| 76 | 76 | ||
| 77 | BSS_SECTION(0, 0, 0) | 77 | BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0) |
| 78 | 78 | ||
| 79 | EXCEPTION_TABLE(0x10) | 79 | EXCEPTION_TABLE(0x10) |
| 80 | NOTES | 80 | NOTES |
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 5739bd05d289..4e2e600f7d53 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile | |||
| @@ -3,6 +3,6 @@ lib-y += memcpy.o | |||
| 3 | lib-y += memset.o | 3 | lib-y += memset.o |
| 4 | lib-y += uaccess.o | 4 | lib-y += uaccess.o |
| 5 | 5 | ||
| 6 | lib-(CONFIG_64BIT) += tishift.o | 6 | lib-$(CONFIG_64BIT) += tishift.o |
| 7 | 7 | ||
| 8 | lib-$(CONFIG_32BIT) += udivdi3.o | 8 | lib-$(CONFIG_32BIT) += udivdi3.o |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 0b33577932c3..e21053e5e0da 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
| @@ -27,7 +27,7 @@ KBUILD_CFLAGS_DECOMPRESSOR += $(call cc-option,-ffreestanding) | |||
| 27 | KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) | 27 | KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO),-g) |
| 28 | KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) | 28 | KBUILD_CFLAGS_DECOMPRESSOR += $(if $(CONFIG_DEBUG_INFO_DWARF4), $(call cc-option, -gdwarf-4,)) |
| 29 | UTS_MACHINE := s390x | 29 | UTS_MACHINE := s390x |
| 30 | STACK_SIZE := $(if $(CONFIG_KASAN),32768,16384) | 30 | STACK_SIZE := $(if $(CONFIG_KASAN),65536,16384) |
| 31 | CHECKFLAGS += -D__s390__ -D__s390x__ | 31 | CHECKFLAGS += -D__s390__ -D__s390x__ |
| 32 | 32 | ||
| 33 | export LD_BFD | 33 | export LD_BFD |
diff --git a/arch/s390/boot/compressed/Makefile b/arch/s390/boot/compressed/Makefile index 593039620487..b1bdd15e3429 100644 --- a/arch/s390/boot/compressed/Makefile +++ b/arch/s390/boot/compressed/Makefile | |||
| @@ -22,10 +22,10 @@ OBJCOPYFLAGS := | |||
| 22 | OBJECTS := $(addprefix $(obj)/,$(obj-y)) | 22 | OBJECTS := $(addprefix $(obj)/,$(obj-y)) |
| 23 | 23 | ||
| 24 | LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T | 24 | LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T |
| 25 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) | 25 | $(obj)/vmlinux: $(obj)/vmlinux.lds $(objtree)/arch/s390/boot/startup.a $(OBJECTS) FORCE |
| 26 | $(call if_changed,ld) | 26 | $(call if_changed,ld) |
| 27 | 27 | ||
| 28 | OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info | 28 | OBJCOPYFLAGS_info.bin := -O binary --only-section=.vmlinux.info --set-section-flags .vmlinux.info=load |
| 29 | $(obj)/info.bin: vmlinux FORCE | 29 | $(obj)/info.bin: vmlinux FORCE |
| 30 | $(call if_changed,objcopy) | 30 | $(call if_changed,objcopy) |
| 31 | 31 | ||
| @@ -46,17 +46,17 @@ suffix-$(CONFIG_KERNEL_LZMA) := .lzma | |||
| 46 | suffix-$(CONFIG_KERNEL_LZO) := .lzo | 46 | suffix-$(CONFIG_KERNEL_LZO) := .lzo |
| 47 | suffix-$(CONFIG_KERNEL_XZ) := .xz | 47 | suffix-$(CONFIG_KERNEL_XZ) := .xz |
| 48 | 48 | ||
| 49 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) | 49 | $(obj)/vmlinux.bin.gz: $(vmlinux.bin.all-y) FORCE |
| 50 | $(call if_changed,gzip) | 50 | $(call if_changed,gzip) |
| 51 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) | 51 | $(obj)/vmlinux.bin.bz2: $(vmlinux.bin.all-y) FORCE |
| 52 | $(call if_changed,bzip2) | 52 | $(call if_changed,bzip2) |
| 53 | $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) | 53 | $(obj)/vmlinux.bin.lz4: $(vmlinux.bin.all-y) FORCE |
| 54 | $(call if_changed,lz4) | 54 | $(call if_changed,lz4) |
| 55 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) | 55 | $(obj)/vmlinux.bin.lzma: $(vmlinux.bin.all-y) FORCE |
| 56 | $(call if_changed,lzma) | 56 | $(call if_changed,lzma) |
| 57 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) | 57 | $(obj)/vmlinux.bin.lzo: $(vmlinux.bin.all-y) FORCE |
| 58 | $(call if_changed,lzo) | 58 | $(call if_changed,lzo) |
| 59 | $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) | 59 | $(obj)/vmlinux.bin.xz: $(vmlinux.bin.all-y) FORCE |
| 60 | $(call if_changed,xzkern) | 60 | $(call if_changed,xzkern) |
| 61 | 61 | ||
| 62 | OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed | 62 | OBJCOPYFLAGS_piggy.o := -I binary -O elf64-s390 -B s390:64-bit --rename-section .data=.vmlinux.bin.compressed |
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 259d1698ac50..c69cb04b7a59 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig | |||
| @@ -64,6 +64,8 @@ CONFIG_NUMA=y | |||
| 64 | CONFIG_PREEMPT=y | 64 | CONFIG_PREEMPT=y |
| 65 | CONFIG_HZ_100=y | 65 | CONFIG_HZ_100=y |
| 66 | CONFIG_KEXEC_FILE=y | 66 | CONFIG_KEXEC_FILE=y |
| 67 | CONFIG_EXPOLINE=y | ||
| 68 | CONFIG_EXPOLINE_AUTO=y | ||
| 67 | CONFIG_MEMORY_HOTPLUG=y | 69 | CONFIG_MEMORY_HOTPLUG=y |
| 68 | CONFIG_MEMORY_HOTREMOVE=y | 70 | CONFIG_MEMORY_HOTREMOVE=y |
| 69 | CONFIG_KSM=y | 71 | CONFIG_KSM=y |
| @@ -84,9 +86,11 @@ CONFIG_PCI_DEBUG=y | |||
| 84 | CONFIG_HOTPLUG_PCI=y | 86 | CONFIG_HOTPLUG_PCI=y |
| 85 | CONFIG_HOTPLUG_PCI_S390=y | 87 | CONFIG_HOTPLUG_PCI_S390=y |
| 86 | CONFIG_CHSC_SCH=y | 88 | CONFIG_CHSC_SCH=y |
| 89 | CONFIG_VFIO_AP=m | ||
| 87 | CONFIG_CRASH_DUMP=y | 90 | CONFIG_CRASH_DUMP=y |
| 88 | CONFIG_BINFMT_MISC=m | 91 | CONFIG_BINFMT_MISC=m |
| 89 | CONFIG_HIBERNATION=y | 92 | CONFIG_HIBERNATION=y |
| 93 | CONFIG_PM_DEBUG=y | ||
| 90 | CONFIG_NET=y | 94 | CONFIG_NET=y |
| 91 | CONFIG_PACKET=y | 95 | CONFIG_PACKET=y |
| 92 | CONFIG_PACKET_DIAG=m | 96 | CONFIG_PACKET_DIAG=m |
| @@ -161,8 +165,6 @@ CONFIG_NF_CONNTRACK_TFTP=m | |||
| 161 | CONFIG_NF_CT_NETLINK=m | 165 | CONFIG_NF_CT_NETLINK=m |
| 162 | CONFIG_NF_CT_NETLINK_TIMEOUT=m | 166 | CONFIG_NF_CT_NETLINK_TIMEOUT=m |
| 163 | CONFIG_NF_TABLES=m | 167 | CONFIG_NF_TABLES=m |
| 164 | CONFIG_NFT_EXTHDR=m | ||
| 165 | CONFIG_NFT_META=m | ||
| 166 | CONFIG_NFT_CT=m | 168 | CONFIG_NFT_CT=m |
| 167 | CONFIG_NFT_COUNTER=m | 169 | CONFIG_NFT_COUNTER=m |
| 168 | CONFIG_NFT_LOG=m | 170 | CONFIG_NFT_LOG=m |
| @@ -365,6 +367,8 @@ CONFIG_NET_ACT_SKBEDIT=m | |||
| 365 | CONFIG_NET_ACT_CSUM=m | 367 | CONFIG_NET_ACT_CSUM=m |
| 366 | CONFIG_DNS_RESOLVER=y | 368 | CONFIG_DNS_RESOLVER=y |
| 367 | CONFIG_OPENVSWITCH=m | 369 | CONFIG_OPENVSWITCH=m |
| 370 | CONFIG_VSOCKETS=m | ||
| 371 | CONFIG_VIRTIO_VSOCKETS=m | ||
| 368 | CONFIG_NETLINK_DIAG=m | 372 | CONFIG_NETLINK_DIAG=m |
| 369 | CONFIG_CGROUP_NET_PRIO=y | 373 | CONFIG_CGROUP_NET_PRIO=y |
| 370 | CONFIG_BPF_JIT=y | 374 | CONFIG_BPF_JIT=y |
| @@ -461,6 +465,7 @@ CONFIG_PPTP=m | |||
| 461 | CONFIG_PPPOL2TP=m | 465 | CONFIG_PPPOL2TP=m |
| 462 | CONFIG_PPP_ASYNC=m | 466 | CONFIG_PPP_ASYNC=m |
| 463 | CONFIG_PPP_SYNC_TTY=m | 467 | CONFIG_PPP_SYNC_TTY=m |
| 468 | CONFIG_ISM=m | ||
| 464 | CONFIG_INPUT_EVDEV=y | 469 | CONFIG_INPUT_EVDEV=y |
| 465 | # CONFIG_INPUT_KEYBOARD is not set | 470 | # CONFIG_INPUT_KEYBOARD is not set |
| 466 | # CONFIG_INPUT_MOUSE is not set | 471 | # CONFIG_INPUT_MOUSE is not set |
| @@ -486,9 +491,12 @@ CONFIG_MLX4_INFINIBAND=m | |||
| 486 | CONFIG_MLX5_INFINIBAND=m | 491 | CONFIG_MLX5_INFINIBAND=m |
| 487 | CONFIG_VFIO=m | 492 | CONFIG_VFIO=m |
| 488 | CONFIG_VFIO_PCI=m | 493 | CONFIG_VFIO_PCI=m |
| 494 | CONFIG_VFIO_MDEV=m | ||
| 495 | CONFIG_VFIO_MDEV_DEVICE=m | ||
| 489 | CONFIG_VIRTIO_PCI=m | 496 | CONFIG_VIRTIO_PCI=m |
| 490 | CONFIG_VIRTIO_BALLOON=m | 497 | CONFIG_VIRTIO_BALLOON=m |
| 491 | CONFIG_VIRTIO_INPUT=y | 498 | CONFIG_VIRTIO_INPUT=y |
| 499 | CONFIG_S390_AP_IOMMU=y | ||
| 492 | CONFIG_EXT4_FS=y | 500 | CONFIG_EXT4_FS=y |
| 493 | CONFIG_EXT4_FS_POSIX_ACL=y | 501 | CONFIG_EXT4_FS_POSIX_ACL=y |
| 494 | CONFIG_EXT4_FS_SECURITY=y | 502 | CONFIG_EXT4_FS_SECURITY=y |
| @@ -615,7 +623,6 @@ CONFIG_DEBUG_CREDENTIALS=y | |||
| 615 | CONFIG_RCU_TORTURE_TEST=m | 623 | CONFIG_RCU_TORTURE_TEST=m |
| 616 | CONFIG_RCU_CPU_STALL_TIMEOUT=300 | 624 | CONFIG_RCU_CPU_STALL_TIMEOUT=300 |
| 617 | CONFIG_NOTIFIER_ERROR_INJECTION=m | 625 | CONFIG_NOTIFIER_ERROR_INJECTION=m |
| 618 | CONFIG_PM_NOTIFIER_ERROR_INJECT=m | ||
| 619 | CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m | 626 | CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m |
| 620 | CONFIG_FAULT_INJECTION=y | 627 | CONFIG_FAULT_INJECTION=y |
| 621 | CONFIG_FAILSLAB=y | 628 | CONFIG_FAILSLAB=y |
| @@ -727,3 +734,4 @@ CONFIG_APPLDATA_BASE=y | |||
| 727 | CONFIG_KVM=m | 734 | CONFIG_KVM=m |
| 728 | CONFIG_KVM_S390_UCONTROL=y | 735 | CONFIG_KVM_S390_UCONTROL=y |
| 729 | CONFIG_VHOST_NET=m | 736 | CONFIG_VHOST_NET=m |
| 737 | CONFIG_VHOST_VSOCK=m | ||
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 37fd60c20e22..32f539dc9c19 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig | |||
| @@ -65,6 +65,8 @@ CONFIG_NR_CPUS=512 | |||
| 65 | CONFIG_NUMA=y | 65 | CONFIG_NUMA=y |
| 66 | CONFIG_HZ_100=y | 66 | CONFIG_HZ_100=y |
| 67 | CONFIG_KEXEC_FILE=y | 67 | CONFIG_KEXEC_FILE=y |
| 68 | CONFIG_EXPOLINE=y | ||
| 69 | CONFIG_EXPOLINE_AUTO=y | ||
| 68 | CONFIG_MEMORY_HOTPLUG=y | 70 | CONFIG_MEMORY_HOTPLUG=y |
| 69 | CONFIG_MEMORY_HOTREMOVE=y | 71 | CONFIG_MEMORY_HOTREMOVE=y |
| 70 | CONFIG_KSM=y | 72 | CONFIG_KSM=y |
| @@ -82,9 +84,11 @@ CONFIG_PCI=y | |||
| 82 | CONFIG_HOTPLUG_PCI=y | 84 | CONFIG_HOTPLUG_PCI=y |
| 83 | CONFIG_HOTPLUG_PCI_S390=y | 85 | CONFIG_HOTPLUG_PCI_S390=y |
| 84 | CONFIG_CHSC_SCH=y | 86 | CONFIG_CHSC_SCH=y |
| 87 | CONFIG_VFIO_AP=m | ||
| 85 | CONFIG_CRASH_DUMP=y | 88 | CONFIG_CRASH_DUMP=y |
| 86 | CONFIG_BINFMT_MISC=m | 89 | CONFIG_BINFMT_MISC=m |
| 87 | CONFIG_HIBERNATION=y | 90 | CONFIG_HIBERNATION=y |
| 91 | CONFIG_PM_DEBUG=y | ||
| 88 | CONFIG_NET=y | 92 | CONFIG_NET=y |
| 89 | CONFIG_PACKET=y | 93 | CONFIG_PACKET=y |
| 90 | CONFIG_PACKET_DIAG=m | 94 | CONFIG_PACKET_DIAG=m |
| @@ -159,8 +163,6 @@ CONFIG_NF_CONNTRACK_TFTP=m | |||
| 159 | CONFIG_NF_CT_NETLINK=m | 163 | CONFIG_NF_CT_NETLINK=m |
| 160 | CONFIG_NF_CT_NETLINK_TIMEOUT=m | 164 | CONFIG_NF_CT_NETLINK_TIMEOUT=m |
| 161 | CONFIG_NF_TABLES=m | 165 | CONFIG_NF_TABLES=m |
| 162 | CONFIG_NFT_EXTHDR=m | ||
| 163 | CONFIG_NFT_META=m | ||
| 164 | CONFIG_NFT_CT=m | 166 | CONFIG_NFT_CT=m |
| 165 | CONFIG_NFT_COUNTER=m | 167 | CONFIG_NFT_COUNTER=m |
| 166 | CONFIG_NFT_LOG=m | 168 | CONFIG_NFT_LOG=m |
| @@ -362,6 +364,8 @@ CONFIG_NET_ACT_SKBEDIT=m | |||
| 362 | CONFIG_NET_ACT_CSUM=m | 364 | CONFIG_NET_ACT_CSUM=m |
| 363 | CONFIG_DNS_RESOLVER=y | 365 | CONFIG_DNS_RESOLVER=y |
| 364 | CONFIG_OPENVSWITCH=m | 366 | CONFIG_OPENVSWITCH=m |
| 367 | CONFIG_VSOCKETS=m | ||
| 368 | CONFIG_VIRTIO_VSOCKETS=m | ||
| 365 | CONFIG_NETLINK_DIAG=m | 369 | CONFIG_NETLINK_DIAG=m |
| 366 | CONFIG_CGROUP_NET_PRIO=y | 370 | CONFIG_CGROUP_NET_PRIO=y |
| 367 | CONFIG_BPF_JIT=y | 371 | CONFIG_BPF_JIT=y |
| @@ -458,6 +462,7 @@ CONFIG_PPTP=m | |||
| 458 | CONFIG_PPPOL2TP=m | 462 | CONFIG_PPPOL2TP=m |
| 459 | CONFIG_PPP_ASYNC=m | 463 | CONFIG_PPP_ASYNC=m |
| 460 | CONFIG_PPP_SYNC_TTY=m | 464 | CONFIG_PPP_SYNC_TTY=m |
| 465 | CONFIG_ISM=m | ||
| 461 | CONFIG_INPUT_EVDEV=y | 466 | CONFIG_INPUT_EVDEV=y |
| 462 | # CONFIG_INPUT_KEYBOARD is not set | 467 | # CONFIG_INPUT_KEYBOARD is not set |
| 463 | # CONFIG_INPUT_MOUSE is not set | 468 | # CONFIG_INPUT_MOUSE is not set |
| @@ -483,9 +488,12 @@ CONFIG_MLX4_INFINIBAND=m | |||
| 483 | CONFIG_MLX5_INFINIBAND=m | 488 | CONFIG_MLX5_INFINIBAND=m |
| 484 | CONFIG_VFIO=m | 489 | CONFIG_VFIO=m |
| 485 | CONFIG_VFIO_PCI=m | 490 | CONFIG_VFIO_PCI=m |
| 491 | CONFIG_VFIO_MDEV=m | ||
| 492 | CONFIG_VFIO_MDEV_DEVICE=m | ||
| 486 | CONFIG_VIRTIO_PCI=m | 493 | CONFIG_VIRTIO_PCI=m |
| 487 | CONFIG_VIRTIO_BALLOON=m | 494 | CONFIG_VIRTIO_BALLOON=m |
| 488 | CONFIG_VIRTIO_INPUT=y | 495 | CONFIG_VIRTIO_INPUT=y |
| 496 | CONFIG_S390_AP_IOMMU=y | ||
| 489 | CONFIG_EXT4_FS=y | 497 | CONFIG_EXT4_FS=y |
| 490 | CONFIG_EXT4_FS_POSIX_ACL=y | 498 | CONFIG_EXT4_FS_POSIX_ACL=y |
| 491 | CONFIG_EXT4_FS_SECURITY=y | 499 | CONFIG_EXT4_FS_SECURITY=y |
| @@ -666,3 +674,4 @@ CONFIG_APPLDATA_BASE=y | |||
| 666 | CONFIG_KVM=m | 674 | CONFIG_KVM=m |
| 667 | CONFIG_KVM_S390_UCONTROL=y | 675 | CONFIG_KVM_S390_UCONTROL=y |
| 668 | CONFIG_VHOST_NET=m | 676 | CONFIG_VHOST_NET=m |
| 677 | CONFIG_VHOST_VSOCK=m | ||
diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 7cb6a52f727d..4d58a92b5d97 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig | |||
| @@ -26,14 +26,23 @@ CONFIG_CGROUP_CPUACCT=y | |||
| 26 | CONFIG_CGROUP_PERF=y | 26 | CONFIG_CGROUP_PERF=y |
| 27 | CONFIG_NAMESPACES=y | 27 | CONFIG_NAMESPACES=y |
| 28 | CONFIG_USER_NS=y | 28 | CONFIG_USER_NS=y |
| 29 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 29 | CONFIG_BLK_DEV_INITRD=y | 30 | CONFIG_BLK_DEV_INITRD=y |
| 30 | CONFIG_EXPERT=y | 31 | CONFIG_EXPERT=y |
| 31 | # CONFIG_SYSFS_SYSCALL is not set | 32 | # CONFIG_SYSFS_SYSCALL is not set |
| 32 | CONFIG_CHECKPOINT_RESTORE=y | ||
| 33 | CONFIG_BPF_SYSCALL=y | 33 | CONFIG_BPF_SYSCALL=y |
| 34 | CONFIG_USERFAULTFD=y | 34 | CONFIG_USERFAULTFD=y |
| 35 | # CONFIG_COMPAT_BRK is not set | 35 | # CONFIG_COMPAT_BRK is not set |
| 36 | CONFIG_PROFILING=y | 36 | CONFIG_PROFILING=y |
| 37 | CONFIG_LIVEPATCH=y | ||
| 38 | CONFIG_NR_CPUS=256 | ||
| 39 | CONFIG_NUMA=y | ||
| 40 | CONFIG_HZ_100=y | ||
| 41 | CONFIG_KEXEC_FILE=y | ||
| 42 | CONFIG_CRASH_DUMP=y | ||
| 43 | CONFIG_HIBERNATION=y | ||
| 44 | CONFIG_PM_DEBUG=y | ||
| 45 | CONFIG_CMM=m | ||
| 37 | CONFIG_OPROFILE=y | 46 | CONFIG_OPROFILE=y |
| 38 | CONFIG_KPROBES=y | 47 | CONFIG_KPROBES=y |
| 39 | CONFIG_JUMP_LABEL=y | 48 | CONFIG_JUMP_LABEL=y |
| @@ -44,11 +53,7 @@ CONFIG_BLK_DEV_INTEGRITY=y | |||
| 44 | CONFIG_PARTITION_ADVANCED=y | 53 | CONFIG_PARTITION_ADVANCED=y |
| 45 | CONFIG_IBM_PARTITION=y | 54 | CONFIG_IBM_PARTITION=y |
| 46 | CONFIG_DEFAULT_DEADLINE=y | 55 | CONFIG_DEFAULT_DEADLINE=y |
| 47 | CONFIG_LIVEPATCH=y | 56 | CONFIG_BINFMT_MISC=m |
| 48 | CONFIG_NR_CPUS=256 | ||
| 49 | CONFIG_NUMA=y | ||
| 50 | CONFIG_HZ_100=y | ||
| 51 | CONFIG_KEXEC_FILE=y | ||
| 52 | CONFIG_MEMORY_HOTPLUG=y | 57 | CONFIG_MEMORY_HOTPLUG=y |
| 53 | CONFIG_MEMORY_HOTREMOVE=y | 58 | CONFIG_MEMORY_HOTREMOVE=y |
| 54 | CONFIG_KSM=y | 59 | CONFIG_KSM=y |
| @@ -60,9 +65,6 @@ CONFIG_ZBUD=m | |||
| 60 | CONFIG_ZSMALLOC=m | 65 | CONFIG_ZSMALLOC=m |
| 61 | CONFIG_ZSMALLOC_STAT=y | 66 | CONFIG_ZSMALLOC_STAT=y |
| 62 | CONFIG_IDLE_PAGE_TRACKING=y | 67 | CONFIG_IDLE_PAGE_TRACKING=y |
| 63 | CONFIG_CRASH_DUMP=y | ||
| 64 | CONFIG_BINFMT_MISC=m | ||
| 65 | CONFIG_HIBERNATION=y | ||
| 66 | CONFIG_NET=y | 68 | CONFIG_NET=y |
| 67 | CONFIG_PACKET=y | 69 | CONFIG_PACKET=y |
| 68 | CONFIG_UNIX=y | 70 | CONFIG_UNIX=y |
| @@ -98,6 +100,7 @@ CONFIG_BLK_DEV_NBD=m | |||
| 98 | CONFIG_BLK_DEV_RAM=y | 100 | CONFIG_BLK_DEV_RAM=y |
| 99 | CONFIG_VIRTIO_BLK=y | 101 | CONFIG_VIRTIO_BLK=y |
| 100 | CONFIG_SCSI=y | 102 | CONFIG_SCSI=y |
| 103 | # CONFIG_SCSI_MQ_DEFAULT is not set | ||
| 101 | CONFIG_BLK_DEV_SD=y | 104 | CONFIG_BLK_DEV_SD=y |
| 102 | CONFIG_CHR_DEV_ST=y | 105 | CONFIG_CHR_DEV_ST=y |
| 103 | CONFIG_BLK_DEV_SR=y | 106 | CONFIG_BLK_DEV_SR=y |
| @@ -131,6 +134,7 @@ CONFIG_EQUALIZER=m | |||
| 131 | CONFIG_TUN=m | 134 | CONFIG_TUN=m |
| 132 | CONFIG_VIRTIO_NET=y | 135 | CONFIG_VIRTIO_NET=y |
| 133 | # CONFIG_NET_VENDOR_ALACRITECH is not set | 136 | # CONFIG_NET_VENDOR_ALACRITECH is not set |
| 137 | # CONFIG_NET_VENDOR_AURORA is not set | ||
| 134 | # CONFIG_NET_VENDOR_CORTINA is not set | 138 | # CONFIG_NET_VENDOR_CORTINA is not set |
| 135 | # CONFIG_NET_VENDOR_SOLARFLARE is not set | 139 | # CONFIG_NET_VENDOR_SOLARFLARE is not set |
| 136 | # CONFIG_NET_VENDOR_SOCIONEXT is not set | 140 | # CONFIG_NET_VENDOR_SOCIONEXT is not set |
| @@ -157,33 +161,6 @@ CONFIG_TMPFS=y | |||
| 157 | CONFIG_TMPFS_POSIX_ACL=y | 161 | CONFIG_TMPFS_POSIX_ACL=y |
| 158 | CONFIG_HUGETLBFS=y | 162 | CONFIG_HUGETLBFS=y |
| 159 | # CONFIG_NETWORK_FILESYSTEMS is not set | 163 | # CONFIG_NETWORK_FILESYSTEMS is not set |
| 160 | CONFIG_DEBUG_INFO=y | ||
| 161 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 162 | CONFIG_GDB_SCRIPTS=y | ||
| 163 | CONFIG_UNUSED_SYMBOLS=y | ||
| 164 | CONFIG_DEBUG_SECTION_MISMATCH=y | ||
| 165 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | ||
| 166 | CONFIG_MAGIC_SYSRQ=y | ||
| 167 | CONFIG_DEBUG_PAGEALLOC=y | ||
| 168 | CONFIG_DETECT_HUNG_TASK=y | ||
| 169 | CONFIG_PANIC_ON_OOPS=y | ||
| 170 | CONFIG_PROVE_LOCKING=y | ||
| 171 | CONFIG_LOCK_STAT=y | ||
| 172 | CONFIG_DEBUG_LOCKDEP=y | ||
| 173 | CONFIG_DEBUG_ATOMIC_SLEEP=y | ||
| 174 | CONFIG_DEBUG_LIST=y | ||
| 175 | CONFIG_DEBUG_SG=y | ||
| 176 | CONFIG_DEBUG_NOTIFIERS=y | ||
| 177 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | ||
| 178 | CONFIG_LATENCYTOP=y | ||
| 179 | CONFIG_SCHED_TRACER=y | ||
| 180 | CONFIG_FTRACE_SYSCALLS=y | ||
| 181 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | ||
| 182 | CONFIG_STACK_TRACER=y | ||
| 183 | CONFIG_BLK_DEV_IO_TRACE=y | ||
| 184 | CONFIG_FUNCTION_PROFILER=y | ||
| 185 | # CONFIG_RUNTIME_TESTING_MENU is not set | ||
| 186 | CONFIG_S390_PTDUMP=y | ||
| 187 | CONFIG_CRYPTO_CRYPTD=m | 164 | CONFIG_CRYPTO_CRYPTD=m |
| 188 | CONFIG_CRYPTO_AUTHENC=m | 165 | CONFIG_CRYPTO_AUTHENC=m |
| 189 | CONFIG_CRYPTO_TEST=m | 166 | CONFIG_CRYPTO_TEST=m |
| @@ -193,6 +170,7 @@ CONFIG_CRYPTO_CBC=y | |||
| 193 | CONFIG_CRYPTO_CFB=m | 170 | CONFIG_CRYPTO_CFB=m |
| 194 | CONFIG_CRYPTO_CTS=m | 171 | CONFIG_CRYPTO_CTS=m |
| 195 | CONFIG_CRYPTO_LRW=m | 172 | CONFIG_CRYPTO_LRW=m |
| 173 | CONFIG_CRYPTO_OFB=m | ||
| 196 | CONFIG_CRYPTO_PCBC=m | 174 | CONFIG_CRYPTO_PCBC=m |
| 197 | CONFIG_CRYPTO_XTS=m | 175 | CONFIG_CRYPTO_XTS=m |
| 198 | CONFIG_CRYPTO_CMAC=m | 176 | CONFIG_CRYPTO_CMAC=m |
| @@ -231,7 +209,6 @@ CONFIG_CRYPTO_USER_API_HASH=m | |||
| 231 | CONFIG_CRYPTO_USER_API_SKCIPHER=m | 209 | CONFIG_CRYPTO_USER_API_SKCIPHER=m |
| 232 | CONFIG_CRYPTO_USER_API_RNG=m | 210 | CONFIG_CRYPTO_USER_API_RNG=m |
| 233 | CONFIG_ZCRYPT=m | 211 | CONFIG_ZCRYPT=m |
| 234 | CONFIG_ZCRYPT_MULTIDEVNODES=y | ||
| 235 | CONFIG_PKEY=m | 212 | CONFIG_PKEY=m |
| 236 | CONFIG_CRYPTO_PAES_S390=m | 213 | CONFIG_CRYPTO_PAES_S390=m |
| 237 | CONFIG_CRYPTO_SHA1_S390=m | 214 | CONFIG_CRYPTO_SHA1_S390=m |
| @@ -247,4 +224,30 @@ CONFIG_CRC7=m | |||
| 247 | # CONFIG_XZ_DEC_ARM is not set | 224 | # CONFIG_XZ_DEC_ARM is not set |
| 248 | # CONFIG_XZ_DEC_ARMTHUMB is not set | 225 | # CONFIG_XZ_DEC_ARMTHUMB is not set |
| 249 | # CONFIG_XZ_DEC_SPARC is not set | 226 | # CONFIG_XZ_DEC_SPARC is not set |
| 250 | CONFIG_CMM=m | 227 | CONFIG_DEBUG_INFO=y |
| 228 | CONFIG_DEBUG_INFO_DWARF4=y | ||
| 229 | CONFIG_GDB_SCRIPTS=y | ||
| 230 | CONFIG_UNUSED_SYMBOLS=y | ||
| 231 | CONFIG_DEBUG_SECTION_MISMATCH=y | ||
| 232 | CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y | ||
| 233 | CONFIG_MAGIC_SYSRQ=y | ||
| 234 | CONFIG_DEBUG_PAGEALLOC=y | ||
| 235 | CONFIG_DETECT_HUNG_TASK=y | ||
| 236 | CONFIG_PANIC_ON_OOPS=y | ||
| 237 | CONFIG_PROVE_LOCKING=y | ||
| 238 | CONFIG_LOCK_STAT=y | ||
| 239 | CONFIG_DEBUG_LOCKDEP=y | ||
| 240 | CONFIG_DEBUG_ATOMIC_SLEEP=y | ||
| 241 | CONFIG_DEBUG_LIST=y | ||
| 242 | CONFIG_DEBUG_SG=y | ||
| 243 | CONFIG_DEBUG_NOTIFIERS=y | ||
| 244 | CONFIG_RCU_CPU_STALL_TIMEOUT=60 | ||
| 245 | CONFIG_LATENCYTOP=y | ||
| 246 | CONFIG_SCHED_TRACER=y | ||
| 247 | CONFIG_FTRACE_SYSCALLS=y | ||
| 248 | CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y | ||
| 249 | CONFIG_STACK_TRACER=y | ||
| 250 | CONFIG_BLK_DEV_IO_TRACE=y | ||
| 251 | CONFIG_FUNCTION_PROFILER=y | ||
| 252 | # CONFIG_RUNTIME_TESTING_MENU is not set | ||
| 253 | CONFIG_S390_PTDUMP=y | ||
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index dbd689d556ce..ccbb53e22024 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h | |||
| @@ -46,8 +46,6 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 46 | mm->context.asce_limit = STACK_TOP_MAX; | 46 | mm->context.asce_limit = STACK_TOP_MAX; |
| 47 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | 47 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
| 48 | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; | 48 | _ASCE_USER_BITS | _ASCE_TYPE_REGION3; |
| 49 | /* pgd_alloc() did not account this pud */ | ||
| 50 | mm_inc_nr_puds(mm); | ||
| 51 | break; | 49 | break; |
| 52 | case -PAGE_SIZE: | 50 | case -PAGE_SIZE: |
| 53 | /* forked 5-level task, set new asce with new_mm->pgd */ | 51 | /* forked 5-level task, set new asce with new_mm->pgd */ |
| @@ -63,9 +61,6 @@ static inline int init_new_context(struct task_struct *tsk, | |||
| 63 | /* forked 2-level compat task, set new asce with new mm->pgd */ | 61 | /* forked 2-level compat task, set new asce with new mm->pgd */ |
| 64 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | 62 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
| 65 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; | 63 | _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT; |
| 66 | /* pgd_alloc() did not account this pmd */ | ||
| 67 | mm_inc_nr_pmds(mm); | ||
| 68 | mm_inc_nr_puds(mm); | ||
| 69 | } | 64 | } |
| 70 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); | 65 | crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm)); |
| 71 | return 0; | 66 | return 0; |
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h index f0f9bcf94c03..5ee733720a57 100644 --- a/arch/s390/include/asm/pgalloc.h +++ b/arch/s390/include/asm/pgalloc.h | |||
| @@ -36,11 +36,11 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry) | |||
| 36 | 36 | ||
| 37 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) | 37 | static inline unsigned long pgd_entry_type(struct mm_struct *mm) |
| 38 | { | 38 | { |
| 39 | if (mm->context.asce_limit <= _REGION3_SIZE) | 39 | if (mm_pmd_folded(mm)) |
| 40 | return _SEGMENT_ENTRY_EMPTY; | 40 | return _SEGMENT_ENTRY_EMPTY; |
| 41 | if (mm->context.asce_limit <= _REGION2_SIZE) | 41 | if (mm_pud_folded(mm)) |
| 42 | return _REGION3_ENTRY_EMPTY; | 42 | return _REGION3_ENTRY_EMPTY; |
| 43 | if (mm->context.asce_limit <= _REGION1_SIZE) | 43 | if (mm_p4d_folded(mm)) |
| 44 | return _REGION2_ENTRY_EMPTY; | 44 | return _REGION2_ENTRY_EMPTY; |
| 45 | return _REGION1_ENTRY_EMPTY; | 45 | return _REGION1_ENTRY_EMPTY; |
| 46 | } | 46 | } |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 411d435e7a7d..063732414dfb 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
| @@ -493,6 +493,24 @@ static inline int is_module_addr(void *addr) | |||
| 493 | _REGION_ENTRY_PROTECT | \ | 493 | _REGION_ENTRY_PROTECT | \ |
| 494 | _REGION_ENTRY_NOEXEC) | 494 | _REGION_ENTRY_NOEXEC) |
| 495 | 495 | ||
| 496 | static inline bool mm_p4d_folded(struct mm_struct *mm) | ||
| 497 | { | ||
| 498 | return mm->context.asce_limit <= _REGION1_SIZE; | ||
| 499 | } | ||
| 500 | #define mm_p4d_folded(mm) mm_p4d_folded(mm) | ||
| 501 | |||
| 502 | static inline bool mm_pud_folded(struct mm_struct *mm) | ||
| 503 | { | ||
| 504 | return mm->context.asce_limit <= _REGION2_SIZE; | ||
| 505 | } | ||
| 506 | #define mm_pud_folded(mm) mm_pud_folded(mm) | ||
| 507 | |||
| 508 | static inline bool mm_pmd_folded(struct mm_struct *mm) | ||
| 509 | { | ||
| 510 | return mm->context.asce_limit <= _REGION3_SIZE; | ||
| 511 | } | ||
| 512 | #define mm_pmd_folded(mm) mm_pmd_folded(mm) | ||
| 513 | |||
| 496 | static inline int mm_has_pgste(struct mm_struct *mm) | 514 | static inline int mm_has_pgste(struct mm_struct *mm) |
| 497 | { | 515 | { |
| 498 | #ifdef CONFIG_PGSTE | 516 | #ifdef CONFIG_PGSTE |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 302795c47c06..81038ab357ce 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
| @@ -236,7 +236,7 @@ static inline unsigned long current_stack_pointer(void) | |||
| 236 | return sp; | 236 | return sp; |
| 237 | } | 237 | } |
| 238 | 238 | ||
| 239 | static __no_sanitize_address_or_inline unsigned short stap(void) | 239 | static __no_kasan_or_inline unsigned short stap(void) |
| 240 | { | 240 | { |
| 241 | unsigned short cpu_address; | 241 | unsigned short cpu_address; |
| 242 | 242 | ||
| @@ -330,7 +330,7 @@ static inline void __load_psw(psw_t psw) | |||
| 330 | * Set PSW mask to specified value, while leaving the | 330 | * Set PSW mask to specified value, while leaving the |
| 331 | * PSW addr pointing to the next instruction. | 331 | * PSW addr pointing to the next instruction. |
| 332 | */ | 332 | */ |
| 333 | static __no_sanitize_address_or_inline void __load_psw_mask(unsigned long mask) | 333 | static __no_kasan_or_inline void __load_psw_mask(unsigned long mask) |
| 334 | { | 334 | { |
| 335 | unsigned long addr; | 335 | unsigned long addr; |
| 336 | psw_t psw; | 336 | psw_t psw; |
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h index 27248f42a03c..ce4e17c9aad6 100644 --- a/arch/s390/include/asm/thread_info.h +++ b/arch/s390/include/asm/thread_info.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | * General size of kernel stacks | 14 | * General size of kernel stacks |
| 15 | */ | 15 | */ |
| 16 | #ifdef CONFIG_KASAN | 16 | #ifdef CONFIG_KASAN |
| 17 | #define THREAD_SIZE_ORDER 3 | 17 | #define THREAD_SIZE_ORDER 4 |
| 18 | #else | 18 | #else |
| 19 | #define THREAD_SIZE_ORDER 2 | 19 | #define THREAD_SIZE_ORDER 2 |
| 20 | #endif | 20 | #endif |
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h index 457b7ba0fbb6..b31c779cf581 100644 --- a/arch/s390/include/asm/tlb.h +++ b/arch/s390/include/asm/tlb.h | |||
| @@ -136,7 +136,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
| 136 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | 136 | static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, |
| 137 | unsigned long address) | 137 | unsigned long address) |
| 138 | { | 138 | { |
| 139 | if (tlb->mm->context.asce_limit <= _REGION3_SIZE) | 139 | if (mm_pmd_folded(tlb->mm)) |
| 140 | return; | 140 | return; |
| 141 | pgtable_pmd_page_dtor(virt_to_page(pmd)); | 141 | pgtable_pmd_page_dtor(virt_to_page(pmd)); |
| 142 | tlb_remove_table(tlb, pmd); | 142 | tlb_remove_table(tlb, pmd); |
| @@ -152,7 +152,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, | |||
| 152 | static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, | 152 | static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, |
| 153 | unsigned long address) | 153 | unsigned long address) |
| 154 | { | 154 | { |
| 155 | if (tlb->mm->context.asce_limit <= _REGION1_SIZE) | 155 | if (mm_p4d_folded(tlb->mm)) |
| 156 | return; | 156 | return; |
| 157 | tlb_remove_table(tlb, p4d); | 157 | tlb_remove_table(tlb, p4d); |
| 158 | } | 158 | } |
| @@ -167,7 +167,7 @@ static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, | |||
| 167 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, | 167 | static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, |
| 168 | unsigned long address) | 168 | unsigned long address) |
| 169 | { | 169 | { |
| 170 | if (tlb->mm->context.asce_limit <= _REGION2_SIZE) | 170 | if (mm_pud_folded(tlb->mm)) |
| 171 | return; | 171 | return; |
| 172 | tlb_remove_table(tlb, pud); | 172 | tlb_remove_table(tlb, pud); |
| 173 | } | 173 | } |
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 724fba4d09d2..39191a0feed1 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S | |||
| @@ -236,10 +236,10 @@ ENTRY(__switch_to) | |||
| 236 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task | 236 | stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task |
| 237 | lghi %r4,__TASK_stack | 237 | lghi %r4,__TASK_stack |
| 238 | lghi %r1,__TASK_thread | 238 | lghi %r1,__TASK_thread |
| 239 | lg %r5,0(%r4,%r3) # start of kernel stack of next | 239 | llill %r5,STACK_INIT |
| 240 | stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev | 240 | stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev |
| 241 | lgr %r15,%r5 | 241 | lg %r15,0(%r4,%r3) # start of kernel stack of next |
| 242 | aghi %r15,STACK_INIT # end of kernel stack of next | 242 | agr %r15,%r5 # end of kernel stack of next |
| 243 | stg %r3,__LC_CURRENT # store task struct of next | 243 | stg %r3,__LC_CURRENT # store task struct of next |
| 244 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack | 244 | stg %r15,__LC_KERNEL_STACK # store end of kernel stack |
| 245 | lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next | 245 | lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next |
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c index 84be7f02d0c2..39b13d71a8fe 100644 --- a/arch/s390/kernel/ftrace.c +++ b/arch/s390/kernel/ftrace.c | |||
| @@ -203,22 +203,13 @@ device_initcall(ftrace_plt_init); | |||
| 203 | */ | 203 | */ |
| 204 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) | 204 | unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip) |
| 205 | { | 205 | { |
| 206 | struct ftrace_graph_ent trace; | ||
| 207 | |||
| 208 | if (unlikely(ftrace_graph_is_dead())) | 206 | if (unlikely(ftrace_graph_is_dead())) |
| 209 | goto out; | 207 | goto out; |
| 210 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 208 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 211 | goto out; | 209 | goto out; |
| 212 | ip -= MCOUNT_INSN_SIZE; | 210 | ip -= MCOUNT_INSN_SIZE; |
| 213 | trace.func = ip; | 211 | if (!function_graph_enter(parent, ip, 0, NULL)) |
| 214 | trace.depth = current->curr_ret_stack + 1; | 212 | parent = (unsigned long) return_to_handler; |
| 215 | /* Only trace if the calling function expects to. */ | ||
| 216 | if (!ftrace_graph_entry(&trace)) | ||
| 217 | goto out; | ||
| 218 | if (ftrace_push_return_trace(parent, ip, &trace.depth, 0, | ||
| 219 | NULL) == -EBUSY) | ||
| 220 | goto out; | ||
| 221 | parent = (unsigned long) return_to_handler; | ||
| 222 | out: | 213 | out: |
| 223 | return parent; | 214 | return parent; |
| 224 | } | 215 | } |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index cc085e2d2ce9..d5523adeddbf 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
| @@ -346,6 +346,8 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 346 | break; | 346 | break; |
| 347 | 347 | ||
| 348 | case PERF_TYPE_HARDWARE: | 348 | case PERF_TYPE_HARDWARE: |
| 349 | if (is_sampling_event(event)) /* No sampling support */ | ||
| 350 | return -ENOENT; | ||
| 349 | ev = attr->config; | 351 | ev = attr->config; |
| 350 | /* Count user space (problem-state) only */ | 352 | /* Count user space (problem-state) only */ |
| 351 | if (!attr->exclude_user && attr->exclude_kernel) { | 353 | if (!attr->exclude_user && attr->exclude_kernel) { |
| @@ -373,7 +375,7 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
| 373 | return -ENOENT; | 375 | return -ENOENT; |
| 374 | 376 | ||
| 375 | if (ev > PERF_CPUM_CF_MAX_CTR) | 377 | if (ev > PERF_CPUM_CF_MAX_CTR) |
| 376 | return -EINVAL; | 378 | return -ENOENT; |
| 377 | 379 | ||
| 378 | /* Obtain the counter set to which the specified counter belongs */ | 380 | /* Obtain the counter set to which the specified counter belongs */ |
| 379 | set = get_counter_set(ev); | 381 | set = get_counter_set(ev); |
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index 7bf604ff50a1..bfabeb1889cc 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c | |||
| @@ -1842,10 +1842,30 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags) | |||
| 1842 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); | 1842 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); |
| 1843 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); | 1843 | CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); |
| 1844 | 1844 | ||
| 1845 | static struct attribute *cpumsf_pmu_events_attr[] = { | 1845 | /* Attribute list for CPU_SF. |
| 1846 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC), | 1846 | * |
| 1847 | NULL, | 1847 | * The availablitiy depends on the CPU_MF sampling facility authorization |
| 1848 | NULL, | 1848 | * for basic + diagnositic samples. This is determined at initialization |
| 1849 | * time by the sampling facility device driver. | ||
| 1850 | * If the authorization for basic samples is turned off, it should be | ||
| 1851 | * also turned off for diagnostic sampling. | ||
| 1852 | * | ||
| 1853 | * During initialization of the device driver, check the authorization | ||
| 1854 | * level for diagnostic sampling and installs the attribute | ||
| 1855 | * file for diagnostic sampling if necessary. | ||
| 1856 | * | ||
| 1857 | * For now install a placeholder to reference all possible attributes: | ||
| 1858 | * SF_CYCLES_BASIC and SF_CYCLES_BASIC_DIAG. | ||
| 1859 | * Add another entry for the final NULL pointer. | ||
| 1860 | */ | ||
| 1861 | enum { | ||
| 1862 | SF_CYCLES_BASIC_ATTR_IDX = 0, | ||
| 1863 | SF_CYCLES_BASIC_DIAG_ATTR_IDX, | ||
| 1864 | SF_CYCLES_ATTR_MAX | ||
| 1865 | }; | ||
| 1866 | |||
| 1867 | static struct attribute *cpumsf_pmu_events_attr[SF_CYCLES_ATTR_MAX + 1] = { | ||
| 1868 | [SF_CYCLES_BASIC_ATTR_IDX] = CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC) | ||
| 1849 | }; | 1869 | }; |
| 1850 | 1870 | ||
| 1851 | PMU_FORMAT_ATTR(event, "config:0-63"); | 1871 | PMU_FORMAT_ATTR(event, "config:0-63"); |
| @@ -2040,7 +2060,10 @@ static int __init init_cpum_sampling_pmu(void) | |||
| 2040 | 2060 | ||
| 2041 | if (si.ad) { | 2061 | if (si.ad) { |
| 2042 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); | 2062 | sfb_set_limits(CPUM_SF_MIN_SDB, CPUM_SF_MAX_SDB); |
| 2043 | cpumsf_pmu_events_attr[1] = | 2063 | /* Sampling of diagnostic data authorized, |
| 2064 | * install event into attribute list of PMU device. | ||
| 2065 | */ | ||
| 2066 | cpumsf_pmu_events_attr[SF_CYCLES_BASIC_DIAG_ATTR_IDX] = | ||
| 2044 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); | 2067 | CPUMF_EVENT_PTR(SF, SF_CYCLES_BASIC_DIAG); |
| 2045 | } | 2068 | } |
| 2046 | 2069 | ||
diff --git a/arch/s390/kernel/vdso32/Makefile b/arch/s390/kernel/vdso32/Makefile index eb8aebea3ea7..e76309fbbcb3 100644 --- a/arch/s390/kernel/vdso32/Makefile +++ b/arch/s390/kernel/vdso32/Makefile | |||
| @@ -37,7 +37,7 @@ KASAN_SANITIZE := n | |||
| 37 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so | 37 | $(obj)/vdso32_wrapper.o : $(obj)/vdso32.so |
| 38 | 38 | ||
| 39 | # link rule for the .so file, .lds has to be first | 39 | # link rule for the .so file, .lds has to be first |
| 40 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) | 40 | $(obj)/vdso32.so.dbg: $(src)/vdso32.lds $(obj-vdso32) FORCE |
| 41 | $(call if_changed,vdso32ld) | 41 | $(call if_changed,vdso32ld) |
| 42 | 42 | ||
| 43 | # strip rule for the .so file | 43 | # strip rule for the .so file |
| @@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
| 46 | $(call if_changed,objcopy) | 46 | $(call if_changed,objcopy) |
| 47 | 47 | ||
| 48 | # assembly rules for the .S files | 48 | # assembly rules for the .S files |
| 49 | $(obj-vdso32): %.o: %.S | 49 | $(obj-vdso32): %.o: %.S FORCE |
| 50 | $(call if_changed_dep,vdso32as) | 50 | $(call if_changed_dep,vdso32as) |
| 51 | 51 | ||
| 52 | # actual build commands | 52 | # actual build commands |
| 53 | quiet_cmd_vdso32ld = VDSO32L $@ | 53 | quiet_cmd_vdso32ld = VDSO32L $@ |
| 54 | cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | 54 | cmd_vdso32ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ |
| 55 | quiet_cmd_vdso32as = VDSO32A $@ | 55 | quiet_cmd_vdso32as = VDSO32A $@ |
| 56 | cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< | 56 | cmd_vdso32as = $(CC) $(a_flags) -c -o $@ $< |
| 57 | 57 | ||
diff --git a/arch/s390/kernel/vdso64/Makefile b/arch/s390/kernel/vdso64/Makefile index a22b2cf86eec..f849ac61c5da 100644 --- a/arch/s390/kernel/vdso64/Makefile +++ b/arch/s390/kernel/vdso64/Makefile | |||
| @@ -37,7 +37,7 @@ KASAN_SANITIZE := n | |||
| 37 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so | 37 | $(obj)/vdso64_wrapper.o : $(obj)/vdso64.so |
| 38 | 38 | ||
| 39 | # link rule for the .so file, .lds has to be first | 39 | # link rule for the .so file, .lds has to be first |
| 40 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) | 40 | $(obj)/vdso64.so.dbg: $(src)/vdso64.lds $(obj-vdso64) FORCE |
| 41 | $(call if_changed,vdso64ld) | 41 | $(call if_changed,vdso64ld) |
| 42 | 42 | ||
| 43 | # strip rule for the .so file | 43 | # strip rule for the .so file |
| @@ -46,12 +46,12 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE | |||
| 46 | $(call if_changed,objcopy) | 46 | $(call if_changed,objcopy) |
| 47 | 47 | ||
| 48 | # assembly rules for the .S files | 48 | # assembly rules for the .S files |
| 49 | $(obj-vdso64): %.o: %.S | 49 | $(obj-vdso64): %.o: %.S FORCE |
| 50 | $(call if_changed_dep,vdso64as) | 50 | $(call if_changed_dep,vdso64as) |
| 51 | 51 | ||
| 52 | # actual build commands | 52 | # actual build commands |
| 53 | quiet_cmd_vdso64ld = VDSO64L $@ | 53 | quiet_cmd_vdso64ld = VDSO64L $@ |
| 54 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $^ -o $@ | 54 | cmd_vdso64ld = $(CC) $(c_flags) -Wl,-T $(filter %.lds %.o,$^) -o $@ |
| 55 | quiet_cmd_vdso64as = VDSO64A $@ | 55 | quiet_cmd_vdso64as = VDSO64A $@ |
| 56 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< | 56 | cmd_vdso64as = $(CC) $(a_flags) -c -o $@ $< |
| 57 | 57 | ||
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index 21eb7407d51b..8429ab079715 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S | |||
| @@ -154,14 +154,14 @@ SECTIONS | |||
| 154 | * uncompressed image info used by the decompressor | 154 | * uncompressed image info used by the decompressor |
| 155 | * it should match struct vmlinux_info | 155 | * it should match struct vmlinux_info |
| 156 | */ | 156 | */ |
| 157 | .vmlinux.info 0 : { | 157 | .vmlinux.info 0 (INFO) : { |
| 158 | QUAD(_stext) /* default_lma */ | 158 | QUAD(_stext) /* default_lma */ |
| 159 | QUAD(startup_continue) /* entry */ | 159 | QUAD(startup_continue) /* entry */ |
| 160 | QUAD(__bss_start - _stext) /* image_size */ | 160 | QUAD(__bss_start - _stext) /* image_size */ |
| 161 | QUAD(__bss_stop - __bss_start) /* bss_size */ | 161 | QUAD(__bss_stop - __bss_start) /* bss_size */ |
| 162 | QUAD(__boot_data_start) /* bootdata_off */ | 162 | QUAD(__boot_data_start) /* bootdata_off */ |
| 163 | QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */ | 163 | QUAD(__boot_data_end - __boot_data_start) /* bootdata_size */ |
| 164 | } | 164 | } :NONE |
| 165 | 165 | ||
| 166 | /* Debugging sections. */ | 166 | /* Debugging sections. */ |
| 167 | STABS_DEBUG | 167 | STABS_DEBUG |
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 76d89ee8b428..6791562779ee 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c | |||
| @@ -101,6 +101,7 @@ int crst_table_upgrade(struct mm_struct *mm, unsigned long end) | |||
| 101 | mm->context.asce_limit = _REGION1_SIZE; | 101 | mm->context.asce_limit = _REGION1_SIZE; |
| 102 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | 102 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
| 103 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; | 103 | _ASCE_USER_BITS | _ASCE_TYPE_REGION2; |
| 104 | mm_inc_nr_puds(mm); | ||
| 104 | } else { | 105 | } else { |
| 105 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | 106 | crst_table_init(table, _REGION1_ENTRY_EMPTY); |
| 106 | pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); | 107 | pgd_populate(mm, (pgd_t *) table, (p4d_t *) pgd); |
| @@ -130,6 +131,7 @@ void crst_table_downgrade(struct mm_struct *mm) | |||
| 130 | } | 131 | } |
| 131 | 132 | ||
| 132 | pgd = mm->pgd; | 133 | pgd = mm->pgd; |
| 134 | mm_dec_nr_pmds(mm); | ||
| 133 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); | 135 | mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN); |
| 134 | mm->context.asce_limit = _REGION3_SIZE; | 136 | mm->context.asce_limit = _REGION3_SIZE; |
| 135 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | | 137 | mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH | |
diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c index ae0d9e889534..d31bde0870d8 100644 --- a/arch/s390/numa/numa.c +++ b/arch/s390/numa/numa.c | |||
| @@ -53,6 +53,7 @@ int __node_distance(int a, int b) | |||
| 53 | { | 53 | { |
| 54 | return mode->distance ? mode->distance(a, b) : 0; | 54 | return mode->distance ? mode->distance(a, b) : 0; |
| 55 | } | 55 | } |
| 56 | EXPORT_SYMBOL(__node_distance); | ||
| 56 | 57 | ||
| 57 | int numa_debug_enabled; | 58 | int numa_debug_enabled; |
| 58 | 59 | ||
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 96dd9f7da250..1b04270e5460 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
| @@ -321,8 +321,7 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
| 321 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 321 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) |
| 322 | { | 322 | { |
| 323 | unsigned long old; | 323 | unsigned long old; |
| 324 | int faulted, err; | 324 | int faulted; |
| 325 | struct ftrace_graph_ent trace; | ||
| 326 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 325 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
| 327 | 326 | ||
| 328 | if (unlikely(ftrace_graph_is_dead())) | 327 | if (unlikely(ftrace_graph_is_dead())) |
| @@ -365,18 +364,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
| 365 | return; | 364 | return; |
| 366 | } | 365 | } |
| 367 | 366 | ||
| 368 | err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0, NULL); | 367 | if (function_graph_enter(old, self_addr, 0, NULL)) |
| 369 | if (err == -EBUSY) { | ||
| 370 | __raw_writel(old, parent); | 368 | __raw_writel(old, parent); |
| 371 | return; | ||
| 372 | } | ||
| 373 | |||
| 374 | trace.func = self_addr; | ||
| 375 | |||
| 376 | /* Only trace if the calling function expects to */ | ||
| 377 | if (!ftrace_graph_entry(&trace)) { | ||
| 378 | current->curr_ret_stack--; | ||
| 379 | __raw_writel(old, parent); | ||
| 380 | } | ||
| 381 | } | 369 | } |
| 382 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 370 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 915dda4ae412..684b84ce397f 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c | |||
| @@ -126,20 +126,11 @@ unsigned long prepare_ftrace_return(unsigned long parent, | |||
| 126 | unsigned long frame_pointer) | 126 | unsigned long frame_pointer) |
| 127 | { | 127 | { |
| 128 | unsigned long return_hooker = (unsigned long) &return_to_handler; | 128 | unsigned long return_hooker = (unsigned long) &return_to_handler; |
| 129 | struct ftrace_graph_ent trace; | ||
| 130 | 129 | ||
| 131 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 130 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
| 132 | return parent + 8UL; | 131 | return parent + 8UL; |
| 133 | 132 | ||
| 134 | trace.func = self_addr; | 133 | if (function_graph_enter(parent, self_addr, frame_pointer, NULL)) |
| 135 | trace.depth = current->curr_ret_stack + 1; | ||
| 136 | |||
| 137 | /* Only trace if the calling function expects to */ | ||
| 138 | if (!ftrace_graph_entry(&trace)) | ||
| 139 | return parent + 8UL; | ||
| 140 | |||
| 141 | if (ftrace_push_return_trace(parent, self_addr, &trace.depth, | ||
| 142 | frame_pointer, NULL) == -EBUSY) | ||
| 143 | return parent + 8UL; | 134 | return parent + 8UL; |
| 144 | 135 | ||
| 145 | return return_hooker; | 136 | return return_hooker; |
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c index 222785af550b..5fda4f7bf15d 100644 --- a/arch/sparc/net/bpf_jit_comp_64.c +++ b/arch/sparc/net/bpf_jit_comp_64.c | |||
| @@ -791,7 +791,7 @@ static int emit_compare_and_branch(const u8 code, const u8 dst, u8 src, | |||
| 791 | } | 791 | } |
| 792 | 792 | ||
| 793 | /* Just skip the save instruction and the ctx register move. */ | 793 | /* Just skip the save instruction and the ctx register move. */ |
| 794 | #define BPF_TAILCALL_PROLOGUE_SKIP 16 | 794 | #define BPF_TAILCALL_PROLOGUE_SKIP 32 |
| 795 | #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) | 795 | #define BPF_TAILCALL_CNT_SP_OFF (STACK_BIAS + 128) |
| 796 | 796 | ||
| 797 | static void build_prologue(struct jit_ctx *ctx) | 797 | static void build_prologue(struct jit_ctx *ctx) |
| @@ -824,9 +824,15 @@ static void build_prologue(struct jit_ctx *ctx) | |||
| 824 | const u8 vfp = bpf2sparc[BPF_REG_FP]; | 824 | const u8 vfp = bpf2sparc[BPF_REG_FP]; |
| 825 | 825 | ||
| 826 | emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); | 826 | emit(ADD | IMMED | RS1(FP) | S13(STACK_BIAS) | RD(vfp), ctx); |
| 827 | } else { | ||
| 828 | emit_nop(ctx); | ||
| 827 | } | 829 | } |
| 828 | 830 | ||
| 829 | emit_reg_move(I0, O0, ctx); | 831 | emit_reg_move(I0, O0, ctx); |
| 832 | emit_reg_move(I1, O1, ctx); | ||
| 833 | emit_reg_move(I2, O2, ctx); | ||
| 834 | emit_reg_move(I3, O3, ctx); | ||
| 835 | emit_reg_move(I4, O4, ctx); | ||
| 830 | /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ | 836 | /* If you add anything here, adjust BPF_TAILCALL_PROLOGUE_SKIP above. */ |
| 831 | } | 837 | } |
| 832 | 838 | ||
| @@ -1270,6 +1276,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |||
| 1270 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; | 1276 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; |
| 1271 | u32 opcode = 0, rs2; | 1277 | u32 opcode = 0, rs2; |
| 1272 | 1278 | ||
| 1279 | if (insn->dst_reg == BPF_REG_FP) | ||
| 1280 | ctx->saw_frame_pointer = true; | ||
| 1281 | |||
| 1273 | ctx->tmp_2_used = true; | 1282 | ctx->tmp_2_used = true; |
| 1274 | emit_loadimm(imm, tmp2, ctx); | 1283 | emit_loadimm(imm, tmp2, ctx); |
| 1275 | 1284 | ||
| @@ -1308,6 +1317,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |||
| 1308 | const u8 tmp = bpf2sparc[TMP_REG_1]; | 1317 | const u8 tmp = bpf2sparc[TMP_REG_1]; |
| 1309 | u32 opcode = 0, rs2; | 1318 | u32 opcode = 0, rs2; |
| 1310 | 1319 | ||
| 1320 | if (insn->dst_reg == BPF_REG_FP) | ||
| 1321 | ctx->saw_frame_pointer = true; | ||
| 1322 | |||
| 1311 | switch (BPF_SIZE(code)) { | 1323 | switch (BPF_SIZE(code)) { |
| 1312 | case BPF_W: | 1324 | case BPF_W: |
| 1313 | opcode = ST32; | 1325 | opcode = ST32; |
| @@ -1340,6 +1352,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |||
| 1340 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; | 1352 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; |
| 1341 | const u8 tmp3 = bpf2sparc[TMP_REG_3]; | 1353 | const u8 tmp3 = bpf2sparc[TMP_REG_3]; |
| 1342 | 1354 | ||
| 1355 | if (insn->dst_reg == BPF_REG_FP) | ||
| 1356 | ctx->saw_frame_pointer = true; | ||
| 1357 | |||
| 1343 | ctx->tmp_1_used = true; | 1358 | ctx->tmp_1_used = true; |
| 1344 | ctx->tmp_2_used = true; | 1359 | ctx->tmp_2_used = true; |
| 1345 | ctx->tmp_3_used = true; | 1360 | ctx->tmp_3_used = true; |
| @@ -1360,6 +1375,9 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) | |||
| 1360 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; | 1375 | const u8 tmp2 = bpf2sparc[TMP_REG_2]; |
| 1361 | const u8 tmp3 = bpf2sparc[TMP_REG_3]; | 1376 | const u8 tmp3 = bpf2sparc[TMP_REG_3]; |
| 1362 | 1377 | ||
| 1378 | if (insn->dst_reg == BPF_REG_FP) | ||
| 1379 | ctx->saw_frame_pointer = true; | ||
| 1380 | |||
| 1363 | ctx->tmp_1_used = true; | 1381 | ctx->tmp_1_used = true; |
| 1364 | ctx->tmp_2_used = true; | 1382 | ctx->tmp_2_used = true; |
| 1365 | ctx->tmp_3_used = true; | 1383 | ctx->tmp_3_used = true; |
| @@ -1425,12 +1443,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
| 1425 | struct bpf_prog *tmp, *orig_prog = prog; | 1443 | struct bpf_prog *tmp, *orig_prog = prog; |
| 1426 | struct sparc64_jit_data *jit_data; | 1444 | struct sparc64_jit_data *jit_data; |
| 1427 | struct bpf_binary_header *header; | 1445 | struct bpf_binary_header *header; |
| 1446 | u32 prev_image_size, image_size; | ||
| 1428 | bool tmp_blinded = false; | 1447 | bool tmp_blinded = false; |
| 1429 | bool extra_pass = false; | 1448 | bool extra_pass = false; |
| 1430 | struct jit_ctx ctx; | 1449 | struct jit_ctx ctx; |
| 1431 | u32 image_size; | ||
| 1432 | u8 *image_ptr; | 1450 | u8 *image_ptr; |
| 1433 | int pass; | 1451 | int pass, i; |
| 1434 | 1452 | ||
| 1435 | if (!prog->jit_requested) | 1453 | if (!prog->jit_requested) |
| 1436 | return orig_prog; | 1454 | return orig_prog; |
| @@ -1461,61 +1479,82 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | |||
| 1461 | header = jit_data->header; | 1479 | header = jit_data->header; |
| 1462 | extra_pass = true; | 1480 | extra_pass = true; |
| 1463 | image_size = sizeof(u32) * ctx.idx; | 1481 | image_size = sizeof(u32) * ctx.idx; |
| 1482 | prev_image_size = image_size; | ||
| 1483 | pass = 1; | ||
| 1464 | goto skip_init_ctx; | 1484 | goto skip_init_ctx; |
| 1465 | } | 1485 | } |
| 1466 | 1486 | ||
| 1467 | memset(&ctx, 0, sizeof(ctx)); | 1487 | memset(&ctx, 0, sizeof(ctx)); |
| 1468 | ctx.prog = prog; | 1488 | ctx.prog = prog; |
| 1469 | 1489 | ||
| 1470 | ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); | 1490 | ctx.offset = kmalloc_array(prog->len, sizeof(unsigned int), GFP_KERNEL); |
| 1471 | if (ctx.offset == NULL) { | 1491 | if (ctx.offset == NULL) { |
| 1472 | prog = orig_prog; | 1492 | prog = orig_prog; |
| 1473 | goto out_off; | 1493 | goto out_off; |
| 1474 | } | 1494 | } |
| 1475 | 1495 | ||
| 1476 | /* Fake pass to detect features used, and get an accurate assessment | 1496 | /* Longest sequence emitted is for bswap32, 12 instructions. Pre-cook |
| 1477 | * of what the final image size will be. | 1497 | * the offset array so that we converge faster. |
| 1478 | */ | 1498 | */ |
| 1479 | if (build_body(&ctx)) { | 1499 | for (i = 0; i < prog->len; i++) |
| 1480 | prog = orig_prog; | 1500 | ctx.offset[i] = i * (12 * 4); |
| 1481 | goto out_off; | ||
| 1482 | } | ||
| 1483 | build_prologue(&ctx); | ||
| 1484 | build_epilogue(&ctx); | ||
| 1485 | |||
| 1486 | /* Now we know the actual image size. */ | ||
| 1487 | image_size = sizeof(u32) * ctx.idx; | ||
| 1488 | header = bpf_jit_binary_alloc(image_size, &image_ptr, | ||
| 1489 | sizeof(u32), jit_fill_hole); | ||
| 1490 | if (header == NULL) { | ||
| 1491 | prog = orig_prog; | ||
| 1492 | goto out_off; | ||
| 1493 | } | ||
| 1494 | 1501 | ||
| 1495 | ctx.image = (u32 *)image_ptr; | 1502 | prev_image_size = ~0U; |
| 1496 | skip_init_ctx: | 1503 | for (pass = 1; pass < 40; pass++) { |
| 1497 | for (pass = 1; pass < 3; pass++) { | ||
| 1498 | ctx.idx = 0; | 1504 | ctx.idx = 0; |
| 1499 | 1505 | ||
| 1500 | build_prologue(&ctx); | 1506 | build_prologue(&ctx); |
| 1501 | |||
| 1502 | if (build_body(&ctx)) { | 1507 | if (build_body(&ctx)) { |
| 1503 | bpf_jit_binary_free(header); | ||
| 1504 | prog = orig_prog; | 1508 | prog = orig_prog; |
| 1505 | goto out_off; | 1509 | goto out_off; |
| 1506 | } | 1510 | } |
| 1507 | |||
| 1508 | build_epilogue(&ctx); | 1511 | build_epilogue(&ctx); |
| 1509 | 1512 | ||
| 1510 | if (bpf_jit_enable > 1) | 1513 | if (bpf_jit_enable > 1) |
| 1511 | pr_info("Pass %d: shrink = %d, seen = [%c%c%c%c%c%c]\n", pass, | 1514 | pr_info("Pass %d: size = %u, seen = [%c%c%c%c%c%c]\n", pass, |
| 1512 | image_size - (ctx.idx * 4), | 1515 | ctx.idx * 4, |
| 1513 | ctx.tmp_1_used ? '1' : ' ', | 1516 | ctx.tmp_1_used ? '1' : ' ', |
| 1514 | ctx.tmp_2_used ? '2' : ' ', | 1517 | ctx.tmp_2_used ? '2' : ' ', |
| 1515 | ctx.tmp_3_used ? '3' : ' ', | 1518 | ctx.tmp_3_used ? '3' : ' ', |
| 1516 | ctx.saw_frame_pointer ? 'F' : ' ', | 1519 | ctx.saw_frame_pointer ? 'F' : ' ', |
| 1517 | ctx.saw_call ? 'C' : ' ', | 1520 | ctx.saw_call ? 'C' : ' ', |
| 1518 | ctx.saw_tail_call ? 'T' : ' '); | 1521 | ctx.saw_tail_call ? 'T' : ' '); |
| 1522 | |||
| 1523 | if (ctx.idx * 4 == prev_image_size) | ||
| 1524 | break; | ||
| 1525 | prev_image_size = ctx.idx * 4; | ||
| 1526 | cond_resched(); | ||
| 1527 | } | ||
| 1528 | |||
| 1529 | /* Now we know the actual image size. */ | ||
| 1530 | image_size = sizeof(u32) * ctx.idx; | ||
| 1531 | header = bpf_jit_binary_alloc(image_size, &image_ptr, | ||
| 1532 | sizeof(u32), jit_fill_hole); | ||
| 1533 | if (header == NULL) { | ||
| 1534 | prog = orig_prog; | ||
| 1535 | goto out_off; | ||
| 1536 | } | ||
| 1537 | |||
| 1538 | ctx.image = (u32 *)image_ptr; | ||
| 1539 | skip_init_ctx: | ||
| 1540 | ctx.idx = 0; | ||
| 1541 | |||
| 1542 | build_prologue(&ctx); | ||
| 1543 | |||
| 1544 | if (build_body(&ctx)) { | ||
| 1545 | bpf_jit_binary_free(header); | ||
| 1546 | prog = orig_prog; | ||
| 1547 | goto out_off; | ||
| 1548 | } | ||
| 1549 | |||
| 1550 | build_epilogue(&ctx); | ||
| 1551 | |||
| 1552 | if (ctx.idx * 4 != prev_image_size) { | ||
| 1553 | pr_err("bpf_jit: Failed to converge, prev_size=%u size=%d\n", | ||
| 1554 | prev_image_size, ctx.idx * 4); | ||
| 1555 | bpf_jit_binary_free(header); | ||
| 1556 | prog = orig_prog; | ||
| 1557 | goto out_off; | ||
| 1519 | } | 1558 | } |
| 1520 | 1559 | ||
| 1521 | if (bpf_jit_enable > 1) | 1560 | if (bpf_jit_enable > 1) |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 74c002ddc0ce..28c40624bcb6 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
| @@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, | |||
| 1305 | io_req->fds[0] = dev->cow.fd; | 1305 | io_req->fds[0] = dev->cow.fd; |
| 1306 | else | 1306 | else |
| 1307 | io_req->fds[0] = dev->fd; | 1307 | io_req->fds[0] = dev->fd; |
| 1308 | io_req->error = 0; | ||
| 1308 | 1309 | ||
| 1309 | if (req_op(req) == REQ_OP_FLUSH) { | 1310 | if (req_op(req) == REQ_OP_FLUSH) { |
| 1310 | io_req->op = UBD_FLUSH; | 1311 | io_req->op = UBD_FLUSH; |
| @@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, | |||
| 1313 | io_req->cow_offset = -1; | 1314 | io_req->cow_offset = -1; |
| 1314 | io_req->offset = off; | 1315 | io_req->offset = off; |
| 1315 | io_req->length = bvec->bv_len; | 1316 | io_req->length = bvec->bv_len; |
| 1316 | io_req->error = 0; | ||
| 1317 | io_req->sector_mask = 0; | 1317 | io_req->sector_mask = 0; |
| 1318 | |||
| 1319 | io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE; | 1318 | io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE; |
| 1320 | io_req->offsets[0] = 0; | 1319 | io_req->offsets[0] = 0; |
| 1321 | io_req->offsets[1] = dev->cow.data_offset; | 1320 | io_req->offsets[1] = dev->cow.data_offset; |
| @@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, | |||
| 1341 | static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, | 1340 | static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, |
| 1342 | const struct blk_mq_queue_data *bd) | 1341 | const struct blk_mq_queue_data *bd) |
| 1343 | { | 1342 | { |
| 1343 | struct ubd *ubd_dev = hctx->queue->queuedata; | ||
| 1344 | struct request *req = bd->rq; | 1344 | struct request *req = bd->rq; |
| 1345 | int ret = 0; | 1345 | int ret = 0; |
| 1346 | 1346 | ||
| 1347 | blk_mq_start_request(req); | 1347 | blk_mq_start_request(req); |
| 1348 | 1348 | ||
| 1349 | spin_lock_irq(&ubd_dev->lock); | ||
| 1350 | |||
| 1349 | if (req_op(req) == REQ_OP_FLUSH) { | 1351 | if (req_op(req) == REQ_OP_FLUSH) { |
| 1350 | ret = ubd_queue_one_vec(hctx, req, 0, NULL); | 1352 | ret = ubd_queue_one_vec(hctx, req, 0, NULL); |
| 1351 | } else { | 1353 | } else { |
| @@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
| 1361 | } | 1363 | } |
| 1362 | } | 1364 | } |
| 1363 | out: | 1365 | out: |
| 1364 | if (ret < 0) { | 1366 | spin_unlock_irq(&ubd_dev->lock); |
| 1367 | |||
| 1368 | if (ret < 0) | ||
| 1365 | blk_mq_requeue_request(req, true); | 1369 | blk_mq_requeue_request(req, true); |
| 1366 | } | 1370 | |
| 1367 | return BLK_STS_OK; | 1371 | return BLK_STS_OK; |
| 1368 | } | 1372 | } |
| 1369 | 1373 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index ba7e3464ee92..8689e794a43c 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
| @@ -444,10 +444,6 @@ config RETPOLINE | |||
| 444 | branches. Requires a compiler with -mindirect-branch=thunk-extern | 444 | branches. Requires a compiler with -mindirect-branch=thunk-extern |
| 445 | support for full protection. The kernel may run slower. | 445 | support for full protection. The kernel may run slower. |
| 446 | 446 | ||
| 447 | Without compiler support, at least indirect branches in assembler | ||
| 448 | code are eliminated. Since this includes the syscall entry path, | ||
| 449 | it is not entirely pointless. | ||
| 450 | |||
| 451 | config INTEL_RDT | 447 | config INTEL_RDT |
| 452 | bool "Intel Resource Director Technology support" | 448 | bool "Intel Resource Director Technology support" |
| 453 | depends on X86 && CPU_SUP_INTEL | 449 | depends on X86 && CPU_SUP_INTEL |
| @@ -525,7 +521,6 @@ config X86_VSMP | |||
| 525 | bool "ScaleMP vSMP" | 521 | bool "ScaleMP vSMP" |
| 526 | select HYPERVISOR_GUEST | 522 | select HYPERVISOR_GUEST |
| 527 | select PARAVIRT | 523 | select PARAVIRT |
| 528 | select PARAVIRT_XXL | ||
| 529 | depends on X86_64 && PCI | 524 | depends on X86_64 && PCI |
| 530 | depends on X86_EXTENDED_PLATFORM | 525 | depends on X86_EXTENDED_PLATFORM |
| 531 | depends on SMP | 526 | depends on SMP |
| @@ -1005,13 +1000,7 @@ config NR_CPUS | |||
| 1005 | to the kernel image. | 1000 | to the kernel image. |
| 1006 | 1001 | ||
| 1007 | config SCHED_SMT | 1002 | config SCHED_SMT |
| 1008 | bool "SMT (Hyperthreading) scheduler support" | 1003 | def_bool y if SMP |
| 1009 | depends on SMP | ||
| 1010 | ---help--- | ||
| 1011 | SMT scheduler support improves the CPU scheduler's decision making | ||
| 1012 | when dealing with Intel Pentium 4 chips with HyperThreading at a | ||
| 1013 | cost of slightly increased overhead in some places. If unsure say | ||
| 1014 | N here. | ||
| 1015 | 1004 | ||
| 1016 | config SCHED_MC | 1005 | config SCHED_MC |
| 1017 | def_bool y | 1006 | def_bool y |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 5b562e464009..f5d7f4134524 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
| @@ -213,8 +213,6 @@ ifdef CONFIG_X86_64 | |||
| 213 | KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000) | 213 | KBUILD_LDFLAGS += $(call ld-option, -z max-page-size=0x200000) |
| 214 | endif | 214 | endif |
| 215 | 215 | ||
| 216 | # Speed up the build | ||
| 217 | KBUILD_CFLAGS += -pipe | ||
| 218 | # Workaround for a gcc prelease that unfortunately was shipped in a suse release | 216 | # Workaround for a gcc prelease that unfortunately was shipped in a suse release |
| 219 | KBUILD_CFLAGS += -Wno-sign-compare | 217 | KBUILD_CFLAGS += -Wno-sign-compare |
| 220 | # | 218 | # |
| @@ -222,9 +220,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables | |||
| 222 | 220 | ||
| 223 | # Avoid indirect branches in kernel to deal with Spectre | 221 | # Avoid indirect branches in kernel to deal with Spectre |
| 224 | ifdef CONFIG_RETPOLINE | 222 | ifdef CONFIG_RETPOLINE |
| 225 | ifneq ($(RETPOLINE_CFLAGS),) | 223 | ifeq ($(RETPOLINE_CFLAGS),) |
| 226 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) -DRETPOLINE | 224 | $(error You are building kernel with non-retpoline compiler, please update your compiler.) |
| 227 | endif | 225 | endif |
| 226 | KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) | ||
| 228 | endif | 227 | endif |
| 229 | 228 | ||
| 230 | archscripts: scripts_basic | 229 | archscripts: scripts_basic |
| @@ -239,7 +238,7 @@ archheaders: | |||
| 239 | archmacros: | 238 | archmacros: |
| 240 | $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s | 239 | $(Q)$(MAKE) $(build)=arch/x86/kernel arch/x86/kernel/macros.s |
| 241 | 240 | ||
| 242 | ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s -Wa,- | 241 | ASM_MACRO_FLAGS = -Wa,arch/x86/kernel/macros.s |
| 243 | export ASM_MACRO_FLAGS | 242 | export ASM_MACRO_FLAGS |
| 244 | KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) | 243 | KBUILD_CFLAGS += $(ASM_MACRO_FLAGS) |
| 245 | 244 | ||
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 4c881c850125..850b8762e889 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
| @@ -300,7 +300,7 @@ _start: | |||
| 300 | # Part 2 of the header, from the old setup.S | 300 | # Part 2 of the header, from the old setup.S |
| 301 | 301 | ||
| 302 | .ascii "HdrS" # header signature | 302 | .ascii "HdrS" # header signature |
| 303 | .word 0x020e # header version number (>= 0x0105) | 303 | .word 0x020d # header version number (>= 0x0105) |
| 304 | # or else old loadlin-1.5 will fail) | 304 | # or else old loadlin-1.5 will fail) |
| 305 | .globl realmode_swtch | 305 | .globl realmode_swtch |
| 306 | realmode_swtch: .word 0, 0 # default_switch, SETUPSEG | 306 | realmode_swtch: .word 0, 0 # default_switch, SETUPSEG |
| @@ -558,10 +558,6 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr | |||
| 558 | init_size: .long INIT_SIZE # kernel initialization size | 558 | init_size: .long INIT_SIZE # kernel initialization size |
| 559 | handover_offset: .long 0 # Filled in by build.c | 559 | handover_offset: .long 0 # Filled in by build.c |
| 560 | 560 | ||
| 561 | acpi_rsdp_addr: .quad 0 # 64-bit physical pointer to the | ||
| 562 | # ACPI RSDP table, added with | ||
| 563 | # version 2.14 | ||
| 564 | |||
| 565 | # End of setup header ##################################################### | 561 | # End of setup header ##################################################### |
| 566 | 562 | ||
| 567 | .section ".entrytext", "ax" | 563 | .section ".entrytext", "ax" |
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 106911b603bd..374a19712e20 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c | |||
| @@ -438,26 +438,6 @@ int x86_setup_perfctr(struct perf_event *event) | |||
| 438 | if (config == -1LL) | 438 | if (config == -1LL) |
| 439 | return -EINVAL; | 439 | return -EINVAL; |
| 440 | 440 | ||
| 441 | /* | ||
| 442 | * Branch tracing: | ||
| 443 | */ | ||
| 444 | if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && | ||
| 445 | !attr->freq && hwc->sample_period == 1) { | ||
| 446 | /* BTS is not supported by this architecture. */ | ||
| 447 | if (!x86_pmu.bts_active) | ||
| 448 | return -EOPNOTSUPP; | ||
| 449 | |||
| 450 | /* BTS is currently only allowed for user-mode. */ | ||
| 451 | if (!attr->exclude_kernel) | ||
| 452 | return -EOPNOTSUPP; | ||
| 453 | |||
| 454 | /* disallow bts if conflicting events are present */ | ||
| 455 | if (x86_add_exclusive(x86_lbr_exclusive_lbr)) | ||
| 456 | return -EBUSY; | ||
| 457 | |||
| 458 | event->destroy = hw_perf_lbr_event_destroy; | ||
| 459 | } | ||
| 460 | |||
| 461 | hwc->config |= config; | 441 | hwc->config |= config; |
| 462 | 442 | ||
| 463 | return 0; | 443 | return 0; |
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 273c62e81546..ecc3e34ca955 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c | |||
| @@ -2306,14 +2306,18 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) | |||
| 2306 | return handled; | 2306 | return handled; |
| 2307 | } | 2307 | } |
| 2308 | 2308 | ||
| 2309 | static bool disable_counter_freezing; | 2309 | static bool disable_counter_freezing = true; |
| 2310 | static int __init intel_perf_counter_freezing_setup(char *s) | 2310 | static int __init intel_perf_counter_freezing_setup(char *s) |
| 2311 | { | 2311 | { |
| 2312 | disable_counter_freezing = true; | 2312 | bool res; |
| 2313 | pr_info("Intel PMU Counter freezing feature disabled\n"); | 2313 | |
| 2314 | if (kstrtobool(s, &res)) | ||
| 2315 | return -EINVAL; | ||
| 2316 | |||
| 2317 | disable_counter_freezing = !res; | ||
| 2314 | return 1; | 2318 | return 1; |
| 2315 | } | 2319 | } |
| 2316 | __setup("disable_counter_freezing", intel_perf_counter_freezing_setup); | 2320 | __setup("perf_v4_pmi=", intel_perf_counter_freezing_setup); |
| 2317 | 2321 | ||
| 2318 | /* | 2322 | /* |
| 2319 | * Simplified handler for Arch Perfmon v4: | 2323 | * Simplified handler for Arch Perfmon v4: |
| @@ -2470,16 +2474,7 @@ done: | |||
| 2470 | static struct event_constraint * | 2474 | static struct event_constraint * |
| 2471 | intel_bts_constraints(struct perf_event *event) | 2475 | intel_bts_constraints(struct perf_event *event) |
| 2472 | { | 2476 | { |
| 2473 | struct hw_perf_event *hwc = &event->hw; | 2477 | if (unlikely(intel_pmu_has_bts(event))) |
| 2474 | unsigned int hw_event, bts_event; | ||
| 2475 | |||
| 2476 | if (event->attr.freq) | ||
| 2477 | return NULL; | ||
| 2478 | |||
| 2479 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | ||
| 2480 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | ||
| 2481 | |||
| 2482 | if (unlikely(hw_event == bts_event && hwc->sample_period == 1)) | ||
| 2483 | return &bts_constraint; | 2478 | return &bts_constraint; |
| 2484 | 2479 | ||
| 2485 | return NULL; | 2480 | return NULL; |
| @@ -3098,6 +3093,43 @@ static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) | |||
| 3098 | return flags; | 3093 | return flags; |
| 3099 | } | 3094 | } |
| 3100 | 3095 | ||
| 3096 | static int intel_pmu_bts_config(struct perf_event *event) | ||
| 3097 | { | ||
| 3098 | struct perf_event_attr *attr = &event->attr; | ||
| 3099 | |||
| 3100 | if (unlikely(intel_pmu_has_bts(event))) { | ||
| 3101 | /* BTS is not supported by this architecture. */ | ||
| 3102 | if (!x86_pmu.bts_active) | ||
| 3103 | return -EOPNOTSUPP; | ||
| 3104 | |||
| 3105 | /* BTS is currently only allowed for user-mode. */ | ||
| 3106 | if (!attr->exclude_kernel) | ||
| 3107 | return -EOPNOTSUPP; | ||
| 3108 | |||
| 3109 | /* BTS is not allowed for precise events. */ | ||
| 3110 | if (attr->precise_ip) | ||
| 3111 | return -EOPNOTSUPP; | ||
| 3112 | |||
| 3113 | /* disallow bts if conflicting events are present */ | ||
| 3114 | if (x86_add_exclusive(x86_lbr_exclusive_lbr)) | ||
| 3115 | return -EBUSY; | ||
| 3116 | |||
| 3117 | event->destroy = hw_perf_lbr_event_destroy; | ||
| 3118 | } | ||
| 3119 | |||
| 3120 | return 0; | ||
| 3121 | } | ||
| 3122 | |||
| 3123 | static int core_pmu_hw_config(struct perf_event *event) | ||
| 3124 | { | ||
| 3125 | int ret = x86_pmu_hw_config(event); | ||
| 3126 | |||
| 3127 | if (ret) | ||
| 3128 | return ret; | ||
| 3129 | |||
| 3130 | return intel_pmu_bts_config(event); | ||
| 3131 | } | ||
| 3132 | |||
| 3101 | static int intel_pmu_hw_config(struct perf_event *event) | 3133 | static int intel_pmu_hw_config(struct perf_event *event) |
| 3102 | { | 3134 | { |
| 3103 | int ret = x86_pmu_hw_config(event); | 3135 | int ret = x86_pmu_hw_config(event); |
| @@ -3105,6 +3137,10 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
| 3105 | if (ret) | 3137 | if (ret) |
| 3106 | return ret; | 3138 | return ret; |
| 3107 | 3139 | ||
| 3140 | ret = intel_pmu_bts_config(event); | ||
| 3141 | if (ret) | ||
| 3142 | return ret; | ||
| 3143 | |||
| 3108 | if (event->attr.precise_ip) { | 3144 | if (event->attr.precise_ip) { |
| 3109 | if (!event->attr.freq) { | 3145 | if (!event->attr.freq) { |
| 3110 | event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; | 3146 | event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; |
| @@ -3127,7 +3163,7 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
| 3127 | /* | 3163 | /* |
| 3128 | * BTS is set up earlier in this path, so don't account twice | 3164 | * BTS is set up earlier in this path, so don't account twice |
| 3129 | */ | 3165 | */ |
| 3130 | if (!intel_pmu_has_bts(event)) { | 3166 | if (!unlikely(intel_pmu_has_bts(event))) { |
| 3131 | /* disallow lbr if conflicting events are present */ | 3167 | /* disallow lbr if conflicting events are present */ |
| 3132 | if (x86_add_exclusive(x86_lbr_exclusive_lbr)) | 3168 | if (x86_add_exclusive(x86_lbr_exclusive_lbr)) |
| 3133 | return -EBUSY; | 3169 | return -EBUSY; |
| @@ -3596,7 +3632,7 @@ static __initconst const struct x86_pmu core_pmu = { | |||
| 3596 | .enable_all = core_pmu_enable_all, | 3632 | .enable_all = core_pmu_enable_all, |
| 3597 | .enable = core_pmu_enable_event, | 3633 | .enable = core_pmu_enable_event, |
| 3598 | .disable = x86_pmu_disable_event, | 3634 | .disable = x86_pmu_disable_event, |
| 3599 | .hw_config = x86_pmu_hw_config, | 3635 | .hw_config = core_pmu_hw_config, |
| 3600 | .schedule_events = x86_schedule_events, | 3636 | .schedule_events = x86_schedule_events, |
| 3601 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, | 3637 | .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, |
| 3602 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, | 3638 | .perfctr = MSR_ARCH_PERFMON_PERFCTR0, |
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index e17ab885b1e9..cb46d602a6b8 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h | |||
| @@ -129,8 +129,15 @@ struct intel_uncore_box { | |||
| 129 | struct intel_uncore_extra_reg shared_regs[0]; | 129 | struct intel_uncore_extra_reg shared_regs[0]; |
| 130 | }; | 130 | }; |
| 131 | 131 | ||
| 132 | #define UNCORE_BOX_FLAG_INITIATED 0 | 132 | /* CFL uncore 8th cbox MSRs */ |
| 133 | #define UNCORE_BOX_FLAG_CTL_OFFS8 1 /* event config registers are 8-byte apart */ | 133 | #define CFL_UNC_CBO_7_PERFEVTSEL0 0xf70 |
| 134 | #define CFL_UNC_CBO_7_PER_CTR0 0xf76 | ||
| 135 | |||
| 136 | #define UNCORE_BOX_FLAG_INITIATED 0 | ||
| 137 | /* event config registers are 8-byte apart */ | ||
| 138 | #define UNCORE_BOX_FLAG_CTL_OFFS8 1 | ||
| 139 | /* CFL 8th CBOX has different MSR space */ | ||
| 140 | #define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS 2 | ||
| 134 | 141 | ||
| 135 | struct uncore_event_desc { | 142 | struct uncore_event_desc { |
| 136 | struct kobj_attribute attr; | 143 | struct kobj_attribute attr; |
| @@ -297,17 +304,27 @@ unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, | |||
| 297 | static inline | 304 | static inline |
| 298 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) | 305 | unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) |
| 299 | { | 306 | { |
| 300 | return box->pmu->type->event_ctl + | 307 | if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { |
| 301 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | 308 | return CFL_UNC_CBO_7_PERFEVTSEL0 + |
| 302 | uncore_msr_box_offset(box); | 309 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); |
| 310 | } else { | ||
| 311 | return box->pmu->type->event_ctl + | ||
| 312 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
| 313 | uncore_msr_box_offset(box); | ||
| 314 | } | ||
| 303 | } | 315 | } |
| 304 | 316 | ||
| 305 | static inline | 317 | static inline |
| 306 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) | 318 | unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) |
| 307 | { | 319 | { |
| 308 | return box->pmu->type->perf_ctr + | 320 | if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { |
| 309 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | 321 | return CFL_UNC_CBO_7_PER_CTR0 + |
| 310 | uncore_msr_box_offset(box); | 322 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); |
| 323 | } else { | ||
| 324 | return box->pmu->type->perf_ctr + | ||
| 325 | (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + | ||
| 326 | uncore_msr_box_offset(box); | ||
| 327 | } | ||
| 311 | } | 328 | } |
| 312 | 329 | ||
| 313 | static inline | 330 | static inline |
diff --git a/arch/x86/events/intel/uncore_snb.c b/arch/x86/events/intel/uncore_snb.c index 8527c3e1038b..2593b0d7aeee 100644 --- a/arch/x86/events/intel/uncore_snb.c +++ b/arch/x86/events/intel/uncore_snb.c | |||
| @@ -15,6 +15,25 @@ | |||
| 15 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 | 15 | #define PCI_DEVICE_ID_INTEL_SKL_HQ_IMC 0x1910 |
| 16 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f | 16 | #define PCI_DEVICE_ID_INTEL_SKL_SD_IMC 0x190f |
| 17 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f | 17 | #define PCI_DEVICE_ID_INTEL_SKL_SQ_IMC 0x191f |
| 18 | #define PCI_DEVICE_ID_INTEL_KBL_Y_IMC 0x590c | ||
| 19 | #define PCI_DEVICE_ID_INTEL_KBL_U_IMC 0x5904 | ||
| 20 | #define PCI_DEVICE_ID_INTEL_KBL_UQ_IMC 0x5914 | ||
| 21 | #define PCI_DEVICE_ID_INTEL_KBL_SD_IMC 0x590f | ||
| 22 | #define PCI_DEVICE_ID_INTEL_KBL_SQ_IMC 0x591f | ||
| 23 | #define PCI_DEVICE_ID_INTEL_CFL_2U_IMC 0x3ecc | ||
| 24 | #define PCI_DEVICE_ID_INTEL_CFL_4U_IMC 0x3ed0 | ||
| 25 | #define PCI_DEVICE_ID_INTEL_CFL_4H_IMC 0x3e10 | ||
| 26 | #define PCI_DEVICE_ID_INTEL_CFL_6H_IMC 0x3ec4 | ||
| 27 | #define PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC 0x3e0f | ||
| 28 | #define PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC 0x3e1f | ||
| 29 | #define PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC 0x3ec2 | ||
| 30 | #define PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC 0x3e30 | ||
| 31 | #define PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC 0x3e18 | ||
| 32 | #define PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC 0x3ec6 | ||
| 33 | #define PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC 0x3e31 | ||
| 34 | #define PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC 0x3e33 | ||
| 35 | #define PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC 0x3eca | ||
| 36 | #define PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC 0x3e32 | ||
| 18 | 37 | ||
| 19 | /* SNB event control */ | 38 | /* SNB event control */ |
| 20 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff | 39 | #define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff |
| @@ -202,6 +221,10 @@ static void skl_uncore_msr_init_box(struct intel_uncore_box *box) | |||
| 202 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, | 221 | wrmsrl(SKL_UNC_PERF_GLOBAL_CTL, |
| 203 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); | 222 | SNB_UNC_GLOBAL_CTL_EN | SKL_UNC_GLOBAL_CTL_CORE_ALL); |
| 204 | } | 223 | } |
| 224 | |||
| 225 | /* The 8th CBOX has different MSR space */ | ||
| 226 | if (box->pmu->pmu_idx == 7) | ||
| 227 | __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); | ||
| 205 | } | 228 | } |
| 206 | 229 | ||
| 207 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) | 230 | static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) |
| @@ -228,7 +251,7 @@ static struct intel_uncore_ops skl_uncore_msr_ops = { | |||
| 228 | static struct intel_uncore_type skl_uncore_cbox = { | 251 | static struct intel_uncore_type skl_uncore_cbox = { |
| 229 | .name = "cbox", | 252 | .name = "cbox", |
| 230 | .num_counters = 4, | 253 | .num_counters = 4, |
| 231 | .num_boxes = 5, | 254 | .num_boxes = 8, |
| 232 | .perf_ctr_bits = 44, | 255 | .perf_ctr_bits = 44, |
| 233 | .fixed_ctr_bits = 48, | 256 | .fixed_ctr_bits = 48, |
| 234 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, | 257 | .perf_ctr = SNB_UNC_CBO_0_PER_CTR0, |
| @@ -569,7 +592,82 @@ static const struct pci_device_id skl_uncore_pci_ids[] = { | |||
| 569 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), | 592 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SKL_SQ_IMC), |
| 570 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | 593 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), |
| 571 | }, | 594 | }, |
| 572 | 595 | { /* IMC */ | |
| 596 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_Y_IMC), | ||
| 597 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 598 | }, | ||
| 599 | { /* IMC */ | ||
| 600 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_U_IMC), | ||
| 601 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 602 | }, | ||
| 603 | { /* IMC */ | ||
| 604 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_UQ_IMC), | ||
| 605 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 606 | }, | ||
| 607 | { /* IMC */ | ||
| 608 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SD_IMC), | ||
| 609 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 610 | }, | ||
| 611 | { /* IMC */ | ||
| 612 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KBL_SQ_IMC), | ||
| 613 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 614 | }, | ||
| 615 | { /* IMC */ | ||
| 616 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2U_IMC), | ||
| 617 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 618 | }, | ||
| 619 | { /* IMC */ | ||
| 620 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4U_IMC), | ||
| 621 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 622 | }, | ||
| 623 | { /* IMC */ | ||
| 624 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4H_IMC), | ||
| 625 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 626 | }, | ||
| 627 | { /* IMC */ | ||
| 628 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6H_IMC), | ||
| 629 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 630 | }, | ||
| 631 | { /* IMC */ | ||
| 632 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_2S_D_IMC), | ||
| 633 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 634 | }, | ||
| 635 | { /* IMC */ | ||
| 636 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_D_IMC), | ||
| 637 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 638 | }, | ||
| 639 | { /* IMC */ | ||
| 640 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_D_IMC), | ||
| 641 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 642 | }, | ||
| 643 | { /* IMC */ | ||
| 644 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_D_IMC), | ||
| 645 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 646 | }, | ||
| 647 | { /* IMC */ | ||
| 648 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_W_IMC), | ||
| 649 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 650 | }, | ||
| 651 | { /* IMC */ | ||
| 652 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_W_IMC), | ||
| 653 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 654 | }, | ||
| 655 | { /* IMC */ | ||
| 656 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_W_IMC), | ||
| 657 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 658 | }, | ||
| 659 | { /* IMC */ | ||
| 660 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_4S_S_IMC), | ||
| 661 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 662 | }, | ||
| 663 | { /* IMC */ | ||
| 664 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_6S_S_IMC), | ||
| 665 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 666 | }, | ||
| 667 | { /* IMC */ | ||
| 668 | PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CFL_8S_S_IMC), | ||
| 669 | .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0), | ||
| 670 | }, | ||
| 573 | { /* end: all zeroes */ }, | 671 | { /* end: all zeroes */ }, |
| 574 | }; | 672 | }; |
| 575 | 673 | ||
| @@ -618,6 +716,25 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = { | |||
| 618 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ | 716 | IMC_DEV(SKL_HQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core H Quad Core */ |
| 619 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ | 717 | IMC_DEV(SKL_SD_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Dual Core */ |
| 620 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ | 718 | IMC_DEV(SKL_SQ_IMC, &skl_uncore_pci_driver), /* 6th Gen Core S Quad Core */ |
| 719 | IMC_DEV(KBL_Y_IMC, &skl_uncore_pci_driver), /* 7th Gen Core Y */ | ||
| 720 | IMC_DEV(KBL_U_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U */ | ||
| 721 | IMC_DEV(KBL_UQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core U Quad Core */ | ||
| 722 | IMC_DEV(KBL_SD_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Dual Core */ | ||
| 723 | IMC_DEV(KBL_SQ_IMC, &skl_uncore_pci_driver), /* 7th Gen Core S Quad Core */ | ||
| 724 | IMC_DEV(CFL_2U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 2 Cores */ | ||
| 725 | IMC_DEV(CFL_4U_IMC, &skl_uncore_pci_driver), /* 8th Gen Core U 4 Cores */ | ||
| 726 | IMC_DEV(CFL_4H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 4 Cores */ | ||
| 727 | IMC_DEV(CFL_6H_IMC, &skl_uncore_pci_driver), /* 8th Gen Core H 6 Cores */ | ||
| 728 | IMC_DEV(CFL_2S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 2 Cores Desktop */ | ||
| 729 | IMC_DEV(CFL_4S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Desktop */ | ||
| 730 | IMC_DEV(CFL_6S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Desktop */ | ||
| 731 | IMC_DEV(CFL_8S_D_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Desktop */ | ||
| 732 | IMC_DEV(CFL_4S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Work Station */ | ||
| 733 | IMC_DEV(CFL_6S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Work Station */ | ||
| 734 | IMC_DEV(CFL_8S_W_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Work Station */ | ||
| 735 | IMC_DEV(CFL_4S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 4 Cores Server */ | ||
| 736 | IMC_DEV(CFL_6S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 6 Cores Server */ | ||
| 737 | IMC_DEV(CFL_8S_S_IMC, &skl_uncore_pci_driver), /* 8th Gen Core S 8 Cores Server */ | ||
| 621 | { /* end marker */ } | 738 | { /* end marker */ } |
| 622 | }; | 739 | }; |
| 623 | 740 | ||
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index adae087cecdd..78d7b7031bfc 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h | |||
| @@ -859,11 +859,16 @@ static inline int amd_pmu_init(void) | |||
| 859 | 859 | ||
| 860 | static inline bool intel_pmu_has_bts(struct perf_event *event) | 860 | static inline bool intel_pmu_has_bts(struct perf_event *event) |
| 861 | { | 861 | { |
| 862 | if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS && | 862 | struct hw_perf_event *hwc = &event->hw; |
| 863 | !event->attr.freq && event->hw.sample_period == 1) | 863 | unsigned int hw_event, bts_event; |
| 864 | return true; | 864 | |
| 865 | if (event->attr.freq) | ||
| 866 | return false; | ||
| 867 | |||
| 868 | hw_event = hwc->config & INTEL_ARCH_EVENT_MASK; | ||
| 869 | bts_event = x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | ||
| 865 | 870 | ||
| 866 | return false; | 871 | return hw_event == bts_event && hwc->sample_period == 1; |
| 867 | } | 872 | } |
| 868 | 873 | ||
| 869 | int intel_pmu_save_and_restart(struct perf_event *event); | 874 | int intel_pmu_save_and_restart(struct perf_event *event); |
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index 5f7290e6e954..69dcdf195b61 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h | |||
| @@ -226,7 +226,7 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu) | |||
| 226 | "3: movl $-2,%[err]\n\t" \ | 226 | "3: movl $-2,%[err]\n\t" \ |
| 227 | "jmp 2b\n\t" \ | 227 | "jmp 2b\n\t" \ |
| 228 | ".popsection\n\t" \ | 228 | ".popsection\n\t" \ |
| 229 | _ASM_EXTABLE_UA(1b, 3b) \ | 229 | _ASM_EXTABLE(1b, 3b) \ |
| 230 | : [err] "=r" (err) \ | 230 | : [err] "=r" (err) \ |
| 231 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ | 231 | : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \ |
| 232 | : "memory") | 232 | : "memory") |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 55e51ff7e421..fbda5a917c5b 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -1094,7 +1094,8 @@ struct kvm_x86_ops { | |||
| 1094 | bool (*has_wbinvd_exit)(void); | 1094 | bool (*has_wbinvd_exit)(void); |
| 1095 | 1095 | ||
| 1096 | u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); | 1096 | u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu); |
| 1097 | void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | 1097 | /* Returns actual tsc_offset set in active VMCS */ |
| 1098 | u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); | ||
| 1098 | 1099 | ||
| 1099 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); | 1100 | void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); |
| 1100 | 1101 | ||
diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 4da9b1c58d28..c1a812bd5a27 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h | |||
| @@ -221,6 +221,8 @@ static inline void mce_hygon_feature_init(struct cpuinfo_x86 *c) { return mce_am | |||
| 221 | 221 | ||
| 222 | int mce_available(struct cpuinfo_x86 *c); | 222 | int mce_available(struct cpuinfo_x86 *c); |
| 223 | bool mce_is_memory_error(struct mce *m); | 223 | bool mce_is_memory_error(struct mce *m); |
| 224 | bool mce_is_correctable(struct mce *m); | ||
| 225 | int mce_usable_address(struct mce *m); | ||
| 224 | 226 | ||
| 225 | DECLARE_PER_CPU(unsigned, mce_exception_count); | 227 | DECLARE_PER_CPU(unsigned, mce_exception_count); |
| 226 | DECLARE_PER_CPU(unsigned, mce_poll_count); | 228 | DECLARE_PER_CPU(unsigned, mce_poll_count); |
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index 0d6271cce198..1d0a7778e163 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h | |||
| @@ -232,7 +232,7 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) | |||
| 232 | : "cc"); | 232 | : "cc"); |
| 233 | } | 233 | } |
| 234 | #endif | 234 | #endif |
| 235 | return hv_status; | 235 | return hv_status; |
| 236 | } | 236 | } |
| 237 | 237 | ||
| 238 | /* | 238 | /* |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 80f4a4f38c79..c8f73efb4ece 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -41,9 +41,10 @@ | |||
| 41 | 41 | ||
| 42 | #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ | 42 | #define MSR_IA32_SPEC_CTRL 0x00000048 /* Speculation Control */ |
| 43 | #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ | 43 | #define SPEC_CTRL_IBRS (1 << 0) /* Indirect Branch Restricted Speculation */ |
| 44 | #define SPEC_CTRL_STIBP (1 << 1) /* Single Thread Indirect Branch Predictors */ | 44 | #define SPEC_CTRL_STIBP_SHIFT 1 /* Single Thread Indirect Branch Predictor (STIBP) bit */ |
| 45 | #define SPEC_CTRL_STIBP (1 << SPEC_CTRL_STIBP_SHIFT) /* STIBP mask */ | ||
| 45 | #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ | 46 | #define SPEC_CTRL_SSBD_SHIFT 2 /* Speculative Store Bypass Disable bit */ |
| 46 | #define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ | 47 | #define SPEC_CTRL_SSBD (1 << SPEC_CTRL_SSBD_SHIFT) /* Speculative Store Bypass Disable */ |
| 47 | 48 | ||
| 48 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ | 49 | #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ |
| 49 | #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ | 50 | #define PRED_CMD_IBPB (1 << 0) /* Indirect Branch Prediction Barrier */ |
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 80dc14422495..032b6009baab 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h | |||
| @@ -3,6 +3,8 @@ | |||
| 3 | #ifndef _ASM_X86_NOSPEC_BRANCH_H_ | 3 | #ifndef _ASM_X86_NOSPEC_BRANCH_H_ |
| 4 | #define _ASM_X86_NOSPEC_BRANCH_H_ | 4 | #define _ASM_X86_NOSPEC_BRANCH_H_ |
| 5 | 5 | ||
| 6 | #include <linux/static_key.h> | ||
| 7 | |||
| 6 | #include <asm/alternative.h> | 8 | #include <asm/alternative.h> |
| 7 | #include <asm/alternative-asm.h> | 9 | #include <asm/alternative-asm.h> |
| 8 | #include <asm/cpufeatures.h> | 10 | #include <asm/cpufeatures.h> |
| @@ -162,11 +164,12 @@ | |||
| 162 | _ASM_PTR " 999b\n\t" \ | 164 | _ASM_PTR " 999b\n\t" \ |
| 163 | ".popsection\n\t" | 165 | ".popsection\n\t" |
| 164 | 166 | ||
| 165 | #if defined(CONFIG_X86_64) && defined(RETPOLINE) | 167 | #ifdef CONFIG_RETPOLINE |
| 168 | #ifdef CONFIG_X86_64 | ||
| 166 | 169 | ||
| 167 | /* | 170 | /* |
| 168 | * Since the inline asm uses the %V modifier which is only in newer GCC, | 171 | * Inline asm uses the %V modifier which is only in newer GCC |
| 169 | * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE. | 172 | * which is ensured when CONFIG_RETPOLINE is defined. |
| 170 | */ | 173 | */ |
| 171 | # define CALL_NOSPEC \ | 174 | # define CALL_NOSPEC \ |
| 172 | ANNOTATE_NOSPEC_ALTERNATIVE \ | 175 | ANNOTATE_NOSPEC_ALTERNATIVE \ |
| @@ -181,7 +184,7 @@ | |||
| 181 | X86_FEATURE_RETPOLINE_AMD) | 184 | X86_FEATURE_RETPOLINE_AMD) |
| 182 | # define THUNK_TARGET(addr) [thunk_target] "r" (addr) | 185 | # define THUNK_TARGET(addr) [thunk_target] "r" (addr) |
| 183 | 186 | ||
| 184 | #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE) | 187 | #else /* CONFIG_X86_32 */ |
| 185 | /* | 188 | /* |
| 186 | * For i386 we use the original ret-equivalent retpoline, because | 189 | * For i386 we use the original ret-equivalent retpoline, because |
| 187 | * otherwise we'll run out of registers. We don't care about CET | 190 | * otherwise we'll run out of registers. We don't care about CET |
| @@ -211,6 +214,7 @@ | |||
| 211 | X86_FEATURE_RETPOLINE_AMD) | 214 | X86_FEATURE_RETPOLINE_AMD) |
| 212 | 215 | ||
| 213 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | 216 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) |
| 217 | #endif | ||
| 214 | #else /* No retpoline for C / inline asm */ | 218 | #else /* No retpoline for C / inline asm */ |
| 215 | # define CALL_NOSPEC "call *%[thunk_target]\n" | 219 | # define CALL_NOSPEC "call *%[thunk_target]\n" |
| 216 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) | 220 | # define THUNK_TARGET(addr) [thunk_target] "rm" (addr) |
| @@ -219,13 +223,19 @@ | |||
| 219 | /* The Spectre V2 mitigation variants */ | 223 | /* The Spectre V2 mitigation variants */ |
| 220 | enum spectre_v2_mitigation { | 224 | enum spectre_v2_mitigation { |
| 221 | SPECTRE_V2_NONE, | 225 | SPECTRE_V2_NONE, |
| 222 | SPECTRE_V2_RETPOLINE_MINIMAL, | ||
| 223 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD, | ||
| 224 | SPECTRE_V2_RETPOLINE_GENERIC, | 226 | SPECTRE_V2_RETPOLINE_GENERIC, |
| 225 | SPECTRE_V2_RETPOLINE_AMD, | 227 | SPECTRE_V2_RETPOLINE_AMD, |
| 226 | SPECTRE_V2_IBRS_ENHANCED, | 228 | SPECTRE_V2_IBRS_ENHANCED, |
| 227 | }; | 229 | }; |
| 228 | 230 | ||
| 231 | /* The indirect branch speculation control variants */ | ||
| 232 | enum spectre_v2_user_mitigation { | ||
| 233 | SPECTRE_V2_USER_NONE, | ||
| 234 | SPECTRE_V2_USER_STRICT, | ||
| 235 | SPECTRE_V2_USER_PRCTL, | ||
| 236 | SPECTRE_V2_USER_SECCOMP, | ||
| 237 | }; | ||
| 238 | |||
| 229 | /* The Speculative Store Bypass disable variants */ | 239 | /* The Speculative Store Bypass disable variants */ |
| 230 | enum ssb_mitigation { | 240 | enum ssb_mitigation { |
| 231 | SPEC_STORE_BYPASS_NONE, | 241 | SPEC_STORE_BYPASS_NONE, |
| @@ -303,6 +313,10 @@ do { \ | |||
| 303 | preempt_enable(); \ | 313 | preempt_enable(); \ |
| 304 | } while (0) | 314 | } while (0) |
| 305 | 315 | ||
| 316 | DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp); | ||
| 317 | DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); | ||
| 318 | DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb); | ||
| 319 | |||
| 306 | #endif /* __ASSEMBLY__ */ | 320 | #endif /* __ASSEMBLY__ */ |
| 307 | 321 | ||
| 308 | /* | 322 | /* |
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index cd0cf1c568b4..8f657286d599 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h | |||
| @@ -33,12 +33,14 @@ | |||
| 33 | 33 | ||
| 34 | /* | 34 | /* |
| 35 | * Set __PAGE_OFFSET to the most negative possible address + | 35 | * Set __PAGE_OFFSET to the most negative possible address + |
| 36 | * PGDIR_SIZE*16 (pgd slot 272). The gap is to allow a space for a | 36 | * PGDIR_SIZE*17 (pgd slot 273). |
| 37 | * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's | 37 | * |
| 38 | * what Xen requires. | 38 | * The gap is to allow a space for LDT remap for PTI (1 pgd slot) and space for |
| 39 | * a hypervisor (16 slots). Choosing 16 slots for a hypervisor is arbitrary, | ||
| 40 | * but it's what Xen requires. | ||
| 39 | */ | 41 | */ |
| 40 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) | 42 | #define __PAGE_OFFSET_BASE_L5 _AC(0xff11000000000000, UL) |
| 41 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) | 43 | #define __PAGE_OFFSET_BASE_L4 _AC(0xffff888000000000, UL) |
| 42 | 44 | ||
| 43 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT | 45 | #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT |
| 44 | #define __PAGE_OFFSET page_offset_base | 46 | #define __PAGE_OFFSET page_offset_base |
diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 04edd2d58211..84bd9bdc1987 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h | |||
| @@ -111,9 +111,7 @@ extern unsigned int ptrs_per_p4d; | |||
| 111 | */ | 111 | */ |
| 112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) | 112 | #define MAXMEM (1UL << MAX_PHYSMEM_BITS) |
| 113 | 113 | ||
| 114 | #define LDT_PGD_ENTRY_L4 -3UL | 114 | #define LDT_PGD_ENTRY -240UL |
| 115 | #define LDT_PGD_ENTRY_L5 -112UL | ||
| 116 | #define LDT_PGD_ENTRY (pgtable_l5_enabled() ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) | ||
| 117 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) | 115 | #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) |
| 118 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) | 116 | #define LDT_END_ADDR (LDT_BASE_ADDR + PGDIR_SIZE) |
| 119 | 117 | ||
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h index 87623c6b13db..bd5ac6cc37db 100644 --- a/arch/x86/include/asm/qspinlock.h +++ b/arch/x86/include/asm/qspinlock.h | |||
| @@ -13,12 +13,15 @@ | |||
| 13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire | 13 | #define queued_fetch_set_pending_acquire queued_fetch_set_pending_acquire |
| 14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) | 14 | static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) |
| 15 | { | 15 | { |
| 16 | u32 val = 0; | 16 | u32 val; |
| 17 | |||
| 18 | if (GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, | ||
| 19 | "I", _Q_PENDING_OFFSET)) | ||
| 20 | val |= _Q_PENDING_VAL; | ||
| 21 | 17 | ||
| 18 | /* | ||
| 19 | * We can't use GEN_BINARY_RMWcc() inside an if() stmt because asm goto | ||
| 20 | * and CONFIG_PROFILE_ALL_BRANCHES=y results in a label inside a | ||
| 21 | * statement expression, which GCC doesn't like. | ||
| 22 | */ | ||
| 23 | val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, | ||
| 24 | "I", _Q_PENDING_OFFSET) * _Q_PENDING_VAL; | ||
| 22 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; | 25 | val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; |
| 23 | 26 | ||
| 24 | return val; | 27 | return val; |
diff --git a/arch/x86/include/asm/spec-ctrl.h b/arch/x86/include/asm/spec-ctrl.h index ae7c2c5cd7f0..5393babc0598 100644 --- a/arch/x86/include/asm/spec-ctrl.h +++ b/arch/x86/include/asm/spec-ctrl.h | |||
| @@ -53,12 +53,24 @@ static inline u64 ssbd_tif_to_spec_ctrl(u64 tifn) | |||
| 53 | return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); | 53 | return (tifn & _TIF_SSBD) >> (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static inline u64 stibp_tif_to_spec_ctrl(u64 tifn) | ||
| 57 | { | ||
| 58 | BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); | ||
| 59 | return (tifn & _TIF_SPEC_IB) >> (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); | ||
| 60 | } | ||
| 61 | |||
| 56 | static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) | 62 | static inline unsigned long ssbd_spec_ctrl_to_tif(u64 spec_ctrl) |
| 57 | { | 63 | { |
| 58 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); | 64 | BUILD_BUG_ON(TIF_SSBD < SPEC_CTRL_SSBD_SHIFT); |
| 59 | return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); | 65 | return (spec_ctrl & SPEC_CTRL_SSBD) << (TIF_SSBD - SPEC_CTRL_SSBD_SHIFT); |
| 60 | } | 66 | } |
| 61 | 67 | ||
| 68 | static inline unsigned long stibp_spec_ctrl_to_tif(u64 spec_ctrl) | ||
| 69 | { | ||
| 70 | BUILD_BUG_ON(TIF_SPEC_IB < SPEC_CTRL_STIBP_SHIFT); | ||
| 71 | return (spec_ctrl & SPEC_CTRL_STIBP) << (TIF_SPEC_IB - SPEC_CTRL_STIBP_SHIFT); | ||
| 72 | } | ||
| 73 | |||
| 62 | static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) | 74 | static inline u64 ssbd_tif_to_amd_ls_cfg(u64 tifn) |
| 63 | { | 75 | { |
| 64 | return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; | 76 | return (tifn & _TIF_SSBD) ? x86_amd_ls_cfg_ssbd_mask : 0ULL; |
| @@ -70,11 +82,7 @@ extern void speculative_store_bypass_ht_init(void); | |||
| 70 | static inline void speculative_store_bypass_ht_init(void) { } | 82 | static inline void speculative_store_bypass_ht_init(void) { } |
| 71 | #endif | 83 | #endif |
| 72 | 84 | ||
| 73 | extern void speculative_store_bypass_update(unsigned long tif); | 85 | extern void speculation_ctrl_update(unsigned long tif); |
| 74 | 86 | extern void speculation_ctrl_update_current(void); | |
| 75 | static inline void speculative_store_bypass_update_current(void) | ||
| 76 | { | ||
| 77 | speculative_store_bypass_update(current_thread_info()->flags); | ||
| 78 | } | ||
| 79 | 87 | ||
| 80 | #endif | 88 | #endif |
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 36bd243843d6..7cf1a270d891 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h | |||
| @@ -11,9 +11,6 @@ struct task_struct *__switch_to_asm(struct task_struct *prev, | |||
| 11 | 11 | ||
| 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, | 12 | __visible struct task_struct *__switch_to(struct task_struct *prev, |
| 13 | struct task_struct *next); | 13 | struct task_struct *next); |
| 14 | struct tss_struct; | ||
| 15 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | ||
| 16 | struct tss_struct *tss); | ||
| 17 | 14 | ||
| 18 | /* This runs runs on the previous thread's stack. */ | 15 | /* This runs runs on the previous thread's stack. */ |
| 19 | static inline void prepare_switch_to(struct task_struct *next) | 16 | static inline void prepare_switch_to(struct task_struct *next) |
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index 2ff2a30a264f..82b73b75d67c 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h | |||
| @@ -79,10 +79,12 @@ struct thread_info { | |||
| 79 | #define TIF_SIGPENDING 2 /* signal pending */ | 79 | #define TIF_SIGPENDING 2 /* signal pending */ |
| 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ | 80 | #define TIF_NEED_RESCHED 3 /* rescheduling necessary */ |
| 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ | 81 | #define TIF_SINGLESTEP 4 /* reenable singlestep on user return*/ |
| 82 | #define TIF_SSBD 5 /* Reduced data speculation */ | 82 | #define TIF_SSBD 5 /* Speculative store bypass disable */ |
| 83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ | 83 | #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ |
| 84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ | 84 | #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ |
| 85 | #define TIF_SECCOMP 8 /* secure computing */ | 85 | #define TIF_SECCOMP 8 /* secure computing */ |
| 86 | #define TIF_SPEC_IB 9 /* Indirect branch speculation mitigation */ | ||
| 87 | #define TIF_SPEC_FORCE_UPDATE 10 /* Force speculation MSR update in context switch */ | ||
| 86 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ | 88 | #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ |
| 87 | #define TIF_UPROBE 12 /* breakpointed or singlestepping */ | 89 | #define TIF_UPROBE 12 /* breakpointed or singlestepping */ |
| 88 | #define TIF_PATCH_PENDING 13 /* pending live patching update */ | 90 | #define TIF_PATCH_PENDING 13 /* pending live patching update */ |
| @@ -110,6 +112,8 @@ struct thread_info { | |||
| 110 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) | 112 | #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) |
| 111 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 113 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
| 112 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) | 114 | #define _TIF_SECCOMP (1 << TIF_SECCOMP) |
| 115 | #define _TIF_SPEC_IB (1 << TIF_SPEC_IB) | ||
| 116 | #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) | ||
| 113 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) | 117 | #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) |
| 114 | #define _TIF_UPROBE (1 << TIF_UPROBE) | 118 | #define _TIF_UPROBE (1 << TIF_UPROBE) |
| 115 | #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) | 119 | #define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING) |
| @@ -145,8 +149,18 @@ struct thread_info { | |||
| 145 | _TIF_FSCHECK) | 149 | _TIF_FSCHECK) |
| 146 | 150 | ||
| 147 | /* flags to check in __switch_to() */ | 151 | /* flags to check in __switch_to() */ |
| 148 | #define _TIF_WORK_CTXSW \ | 152 | #define _TIF_WORK_CTXSW_BASE \ |
| 149 | (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP|_TIF_SSBD) | 153 | (_TIF_IO_BITMAP|_TIF_NOCPUID|_TIF_NOTSC|_TIF_BLOCKSTEP| \ |
| 154 | _TIF_SSBD | _TIF_SPEC_FORCE_UPDATE) | ||
| 155 | |||
| 156 | /* | ||
| 157 | * Avoid calls to __switch_to_xtra() on UP as STIBP is not evaluated. | ||
| 158 | */ | ||
| 159 | #ifdef CONFIG_SMP | ||
| 160 | # define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE | _TIF_SPEC_IB) | ||
| 161 | #else | ||
| 162 | # define _TIF_WORK_CTXSW (_TIF_WORK_CTXSW_BASE) | ||
| 163 | #endif | ||
| 150 | 164 | ||
| 151 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) | 165 | #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) |
| 152 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) | 166 | #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) |
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index d760611cfc35..f4204bf377fc 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h | |||
| @@ -169,10 +169,14 @@ struct tlb_state { | |||
| 169 | 169 | ||
| 170 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) | 170 | #define LOADED_MM_SWITCHING ((struct mm_struct *)1) |
| 171 | 171 | ||
| 172 | /* Last user mm for optimizing IBPB */ | ||
| 173 | union { | ||
| 174 | struct mm_struct *last_user_mm; | ||
| 175 | unsigned long last_user_mm_ibpb; | ||
| 176 | }; | ||
| 177 | |||
| 172 | u16 loaded_mm_asid; | 178 | u16 loaded_mm_asid; |
| 173 | u16 next_asid; | 179 | u16 next_asid; |
| 174 | /* last user mm's ctx id */ | ||
| 175 | u64 last_ctx_id; | ||
| 176 | 180 | ||
| 177 | /* | 181 | /* |
| 178 | * We can be in one of several states: | 182 | * We can be in one of several states: |
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 0f842104862c..b85a7c54c6a1 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h | |||
| @@ -303,6 +303,4 @@ extern void x86_init_noop(void); | |||
| 303 | extern void x86_init_uint_noop(unsigned int unused); | 303 | extern void x86_init_uint_noop(unsigned int unused); |
| 304 | extern bool x86_pnpbios_disabled(void); | 304 | extern bool x86_pnpbios_disabled(void); |
| 305 | 305 | ||
| 306 | void x86_verify_bootdata_version(void); | ||
| 307 | |||
| 308 | #endif | 306 | #endif |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index 123e669bf363..790ce08e41f2 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #include <linux/mm.h> | 9 | #include <linux/mm.h> |
| 10 | #include <linux/device.h> | 10 | #include <linux/device.h> |
| 11 | 11 | ||
| 12 | #include <linux/uaccess.h> | 12 | #include <asm/extable.h> |
| 13 | #include <asm/page.h> | 13 | #include <asm/page.h> |
| 14 | #include <asm/pgtable.h> | 14 | #include <asm/pgtable.h> |
| 15 | 15 | ||
| @@ -93,12 +93,39 @@ clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, | |||
| 93 | */ | 93 | */ |
| 94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) | 94 | static inline int xen_safe_write_ulong(unsigned long *addr, unsigned long val) |
| 95 | { | 95 | { |
| 96 | return __put_user(val, (unsigned long __user *)addr); | 96 | int ret = 0; |
| 97 | |||
| 98 | asm volatile("1: mov %[val], %[ptr]\n" | ||
| 99 | "2:\n" | ||
| 100 | ".section .fixup, \"ax\"\n" | ||
| 101 | "3: sub $1, %[ret]\n" | ||
| 102 | " jmp 2b\n" | ||
| 103 | ".previous\n" | ||
| 104 | _ASM_EXTABLE(1b, 3b) | ||
| 105 | : [ret] "+r" (ret), [ptr] "=m" (*addr) | ||
| 106 | : [val] "r" (val)); | ||
| 107 | |||
| 108 | return ret; | ||
| 97 | } | 109 | } |
| 98 | 110 | ||
| 99 | static inline int xen_safe_read_ulong(unsigned long *addr, unsigned long *val) | 111 | static inline int xen_safe_read_ulong(const unsigned long *addr, |
| 112 | unsigned long *val) | ||
| 100 | { | 113 | { |
| 101 | return __get_user(*val, (unsigned long __user *)addr); | 114 | int ret = 0; |
| 115 | unsigned long rval = ~0ul; | ||
| 116 | |||
| 117 | asm volatile("1: mov %[ptr], %[rval]\n" | ||
| 118 | "2:\n" | ||
| 119 | ".section .fixup, \"ax\"\n" | ||
| 120 | "3: sub $1, %[ret]\n" | ||
| 121 | " jmp 2b\n" | ||
| 122 | ".previous\n" | ||
| 123 | _ASM_EXTABLE(1b, 3b) | ||
| 124 | : [ret] "+r" (ret), [rval] "+r" (rval) | ||
| 125 | : [ptr] "m" (*addr)); | ||
| 126 | *val = rval; | ||
| 127 | |||
| 128 | return ret; | ||
| 102 | } | 129 | } |
| 103 | 130 | ||
| 104 | #ifdef CONFIG_XEN_PV | 131 | #ifdef CONFIG_XEN_PV |
diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 22f89d040ddd..60733f137e9a 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h | |||
| @@ -16,9 +16,6 @@ | |||
| 16 | #define RAMDISK_PROMPT_FLAG 0x8000 | 16 | #define RAMDISK_PROMPT_FLAG 0x8000 |
| 17 | #define RAMDISK_LOAD_FLAG 0x4000 | 17 | #define RAMDISK_LOAD_FLAG 0x4000 |
| 18 | 18 | ||
| 19 | /* version flags */ | ||
| 20 | #define VERSION_WRITTEN 0x8000 | ||
| 21 | |||
| 22 | /* loadflags */ | 19 | /* loadflags */ |
| 23 | #define LOADED_HIGH (1<<0) | 20 | #define LOADED_HIGH (1<<0) |
| 24 | #define KASLR_FLAG (1<<1) | 21 | #define KASLR_FLAG (1<<1) |
| @@ -89,7 +86,6 @@ struct setup_header { | |||
| 89 | __u64 pref_address; | 86 | __u64 pref_address; |
| 90 | __u32 init_size; | 87 | __u32 init_size; |
| 91 | __u32 handover_offset; | 88 | __u32 handover_offset; |
| 92 | __u64 acpi_rsdp_addr; | ||
| 93 | } __attribute__((packed)); | 89 | } __attribute__((packed)); |
| 94 | 90 | ||
| 95 | struct sys_desc_table { | 91 | struct sys_desc_table { |
| @@ -159,7 +155,8 @@ struct boot_params { | |||
| 159 | __u8 _pad2[4]; /* 0x054 */ | 155 | __u8 _pad2[4]; /* 0x054 */ |
| 160 | __u64 tboot_addr; /* 0x058 */ | 156 | __u64 tboot_addr; /* 0x058 */ |
| 161 | struct ist_info ist_info; /* 0x060 */ | 157 | struct ist_info ist_info; /* 0x060 */ |
| 162 | __u8 _pad3[16]; /* 0x070 */ | 158 | __u64 acpi_rsdp_addr; /* 0x070 */ |
| 159 | __u8 _pad3[8]; /* 0x078 */ | ||
| 163 | __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ | 160 | __u8 hd0_info[16]; /* obsolete! */ /* 0x080 */ |
| 164 | __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ | 161 | __u8 hd1_info[16]; /* obsolete! */ /* 0x090 */ |
| 165 | struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ | 162 | struct sys_desc_table sys_desc_table; /* obsolete! */ /* 0x0a0 */ |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 92c76bf97ad8..06635fbca81c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
| @@ -1776,5 +1776,5 @@ void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size) | |||
| 1776 | 1776 | ||
| 1777 | u64 x86_default_get_root_pointer(void) | 1777 | u64 x86_default_get_root_pointer(void) |
| 1778 | { | 1778 | { |
| 1779 | return boot_params.hdr.acpi_rsdp_addr; | 1779 | return boot_params.acpi_rsdp_addr; |
| 1780 | } | 1780 | } |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c37e66e493bf..500278f5308e 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
| 15 | #include <linux/nospec.h> | 15 | #include <linux/nospec.h> |
| 16 | #include <linux/prctl.h> | 16 | #include <linux/prctl.h> |
| 17 | #include <linux/sched/smt.h> | ||
| 17 | 18 | ||
| 18 | #include <asm/spec-ctrl.h> | 19 | #include <asm/spec-ctrl.h> |
| 19 | #include <asm/cmdline.h> | 20 | #include <asm/cmdline.h> |
| @@ -53,6 +54,13 @@ static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS; | |||
| 53 | u64 __ro_after_init x86_amd_ls_cfg_base; | 54 | u64 __ro_after_init x86_amd_ls_cfg_base; |
| 54 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; | 55 | u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask; |
| 55 | 56 | ||
| 57 | /* Control conditional STIPB in switch_to() */ | ||
| 58 | DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp); | ||
| 59 | /* Control conditional IBPB in switch_mm() */ | ||
| 60 | DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb); | ||
| 61 | /* Control unconditional IBPB in switch_mm() */ | ||
| 62 | DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb); | ||
| 63 | |||
| 56 | void __init check_bugs(void) | 64 | void __init check_bugs(void) |
| 57 | { | 65 | { |
| 58 | identify_boot_cpu(); | 66 | identify_boot_cpu(); |
| @@ -123,31 +131,6 @@ void __init check_bugs(void) | |||
| 123 | #endif | 131 | #endif |
| 124 | } | 132 | } |
| 125 | 133 | ||
| 126 | /* The kernel command line selection */ | ||
| 127 | enum spectre_v2_mitigation_cmd { | ||
| 128 | SPECTRE_V2_CMD_NONE, | ||
| 129 | SPECTRE_V2_CMD_AUTO, | ||
| 130 | SPECTRE_V2_CMD_FORCE, | ||
| 131 | SPECTRE_V2_CMD_RETPOLINE, | ||
| 132 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, | ||
| 133 | SPECTRE_V2_CMD_RETPOLINE_AMD, | ||
| 134 | }; | ||
| 135 | |||
| 136 | static const char *spectre_v2_strings[] = { | ||
| 137 | [SPECTRE_V2_NONE] = "Vulnerable", | ||
| 138 | [SPECTRE_V2_RETPOLINE_MINIMAL] = "Vulnerable: Minimal generic ASM retpoline", | ||
| 139 | [SPECTRE_V2_RETPOLINE_MINIMAL_AMD] = "Vulnerable: Minimal AMD ASM retpoline", | ||
| 140 | [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", | ||
| 141 | [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", | ||
| 142 | [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", | ||
| 143 | }; | ||
| 144 | |||
| 145 | #undef pr_fmt | ||
| 146 | #define pr_fmt(fmt) "Spectre V2 : " fmt | ||
| 147 | |||
| 148 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = | ||
| 149 | SPECTRE_V2_NONE; | ||
| 150 | |||
| 151 | void | 134 | void |
| 152 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | 135 | x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) |
| 153 | { | 136 | { |
| @@ -169,6 +152,10 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | |||
| 169 | static_cpu_has(X86_FEATURE_AMD_SSBD)) | 152 | static_cpu_has(X86_FEATURE_AMD_SSBD)) |
| 170 | hostval |= ssbd_tif_to_spec_ctrl(ti->flags); | 153 | hostval |= ssbd_tif_to_spec_ctrl(ti->flags); |
| 171 | 154 | ||
| 155 | /* Conditional STIBP enabled? */ | ||
| 156 | if (static_branch_unlikely(&switch_to_cond_stibp)) | ||
| 157 | hostval |= stibp_tif_to_spec_ctrl(ti->flags); | ||
| 158 | |||
| 172 | if (hostval != guestval) { | 159 | if (hostval != guestval) { |
| 173 | msrval = setguest ? guestval : hostval; | 160 | msrval = setguest ? guestval : hostval; |
| 174 | wrmsrl(MSR_IA32_SPEC_CTRL, msrval); | 161 | wrmsrl(MSR_IA32_SPEC_CTRL, msrval); |
| @@ -202,7 +189,7 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest) | |||
| 202 | tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : | 189 | tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) : |
| 203 | ssbd_spec_ctrl_to_tif(hostval); | 190 | ssbd_spec_ctrl_to_tif(hostval); |
| 204 | 191 | ||
| 205 | speculative_store_bypass_update(tif); | 192 | speculation_ctrl_update(tif); |
| 206 | } | 193 | } |
| 207 | } | 194 | } |
| 208 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); | 195 | EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl); |
| @@ -217,6 +204,15 @@ static void x86_amd_ssb_disable(void) | |||
| 217 | wrmsrl(MSR_AMD64_LS_CFG, msrval); | 204 | wrmsrl(MSR_AMD64_LS_CFG, msrval); |
| 218 | } | 205 | } |
| 219 | 206 | ||
| 207 | #undef pr_fmt | ||
| 208 | #define pr_fmt(fmt) "Spectre V2 : " fmt | ||
| 209 | |||
| 210 | static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = | ||
| 211 | SPECTRE_V2_NONE; | ||
| 212 | |||
| 213 | static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = | ||
| 214 | SPECTRE_V2_USER_NONE; | ||
| 215 | |||
| 220 | #ifdef RETPOLINE | 216 | #ifdef RETPOLINE |
| 221 | static bool spectre_v2_bad_module; | 217 | static bool spectre_v2_bad_module; |
| 222 | 218 | ||
| @@ -238,67 +234,217 @@ static inline const char *spectre_v2_module_string(void) | |||
| 238 | static inline const char *spectre_v2_module_string(void) { return ""; } | 234 | static inline const char *spectre_v2_module_string(void) { return ""; } |
| 239 | #endif | 235 | #endif |
| 240 | 236 | ||
| 241 | static void __init spec2_print_if_insecure(const char *reason) | 237 | static inline bool match_option(const char *arg, int arglen, const char *opt) |
| 242 | { | 238 | { |
| 243 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | 239 | int len = strlen(opt); |
| 244 | pr_info("%s selected on command line.\n", reason); | 240 | |
| 241 | return len == arglen && !strncmp(arg, opt, len); | ||
| 245 | } | 242 | } |
| 246 | 243 | ||
| 247 | static void __init spec2_print_if_secure(const char *reason) | 244 | /* The kernel command line selection for spectre v2 */ |
| 245 | enum spectre_v2_mitigation_cmd { | ||
| 246 | SPECTRE_V2_CMD_NONE, | ||
| 247 | SPECTRE_V2_CMD_AUTO, | ||
| 248 | SPECTRE_V2_CMD_FORCE, | ||
| 249 | SPECTRE_V2_CMD_RETPOLINE, | ||
| 250 | SPECTRE_V2_CMD_RETPOLINE_GENERIC, | ||
| 251 | SPECTRE_V2_CMD_RETPOLINE_AMD, | ||
| 252 | }; | ||
| 253 | |||
| 254 | enum spectre_v2_user_cmd { | ||
| 255 | SPECTRE_V2_USER_CMD_NONE, | ||
| 256 | SPECTRE_V2_USER_CMD_AUTO, | ||
| 257 | SPECTRE_V2_USER_CMD_FORCE, | ||
| 258 | SPECTRE_V2_USER_CMD_PRCTL, | ||
| 259 | SPECTRE_V2_USER_CMD_PRCTL_IBPB, | ||
| 260 | SPECTRE_V2_USER_CMD_SECCOMP, | ||
| 261 | SPECTRE_V2_USER_CMD_SECCOMP_IBPB, | ||
| 262 | }; | ||
| 263 | |||
| 264 | static const char * const spectre_v2_user_strings[] = { | ||
| 265 | [SPECTRE_V2_USER_NONE] = "User space: Vulnerable", | ||
| 266 | [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection", | ||
| 267 | [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl", | ||
| 268 | [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl", | ||
| 269 | }; | ||
| 270 | |||
| 271 | static const struct { | ||
| 272 | const char *option; | ||
| 273 | enum spectre_v2_user_cmd cmd; | ||
| 274 | bool secure; | ||
| 275 | } v2_user_options[] __initdata = { | ||
| 276 | { "auto", SPECTRE_V2_USER_CMD_AUTO, false }, | ||
| 277 | { "off", SPECTRE_V2_USER_CMD_NONE, false }, | ||
| 278 | { "on", SPECTRE_V2_USER_CMD_FORCE, true }, | ||
| 279 | { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false }, | ||
| 280 | { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false }, | ||
| 281 | { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false }, | ||
| 282 | { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false }, | ||
| 283 | }; | ||
| 284 | |||
| 285 | static void __init spec_v2_user_print_cond(const char *reason, bool secure) | ||
| 248 | { | 286 | { |
| 249 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | 287 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) |
| 250 | pr_info("%s selected on command line.\n", reason); | 288 | pr_info("spectre_v2_user=%s forced on command line.\n", reason); |
| 251 | } | 289 | } |
| 252 | 290 | ||
| 253 | static inline bool retp_compiler(void) | 291 | static enum spectre_v2_user_cmd __init |
| 292 | spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) | ||
| 254 | { | 293 | { |
| 255 | return __is_defined(RETPOLINE); | 294 | char arg[20]; |
| 295 | int ret, i; | ||
| 296 | |||
| 297 | switch (v2_cmd) { | ||
| 298 | case SPECTRE_V2_CMD_NONE: | ||
| 299 | return SPECTRE_V2_USER_CMD_NONE; | ||
| 300 | case SPECTRE_V2_CMD_FORCE: | ||
| 301 | return SPECTRE_V2_USER_CMD_FORCE; | ||
| 302 | default: | ||
| 303 | break; | ||
| 304 | } | ||
| 305 | |||
| 306 | ret = cmdline_find_option(boot_command_line, "spectre_v2_user", | ||
| 307 | arg, sizeof(arg)); | ||
| 308 | if (ret < 0) | ||
| 309 | return SPECTRE_V2_USER_CMD_AUTO; | ||
| 310 | |||
| 311 | for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { | ||
| 312 | if (match_option(arg, ret, v2_user_options[i].option)) { | ||
| 313 | spec_v2_user_print_cond(v2_user_options[i].option, | ||
| 314 | v2_user_options[i].secure); | ||
| 315 | return v2_user_options[i].cmd; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | |||
| 319 | pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg); | ||
| 320 | return SPECTRE_V2_USER_CMD_AUTO; | ||
| 256 | } | 321 | } |
| 257 | 322 | ||
| 258 | static inline bool match_option(const char *arg, int arglen, const char *opt) | 323 | static void __init |
| 324 | spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd) | ||
| 259 | { | 325 | { |
| 260 | int len = strlen(opt); | 326 | enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; |
| 327 | bool smt_possible = IS_ENABLED(CONFIG_SMP); | ||
| 328 | enum spectre_v2_user_cmd cmd; | ||
| 261 | 329 | ||
| 262 | return len == arglen && !strncmp(arg, opt, len); | 330 | if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP)) |
| 331 | return; | ||
| 332 | |||
| 333 | if (cpu_smt_control == CPU_SMT_FORCE_DISABLED || | ||
| 334 | cpu_smt_control == CPU_SMT_NOT_SUPPORTED) | ||
| 335 | smt_possible = false; | ||
| 336 | |||
| 337 | cmd = spectre_v2_parse_user_cmdline(v2_cmd); | ||
| 338 | switch (cmd) { | ||
| 339 | case SPECTRE_V2_USER_CMD_NONE: | ||
| 340 | goto set_mode; | ||
| 341 | case SPECTRE_V2_USER_CMD_FORCE: | ||
| 342 | mode = SPECTRE_V2_USER_STRICT; | ||
| 343 | break; | ||
| 344 | case SPECTRE_V2_USER_CMD_PRCTL: | ||
| 345 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: | ||
| 346 | mode = SPECTRE_V2_USER_PRCTL; | ||
| 347 | break; | ||
| 348 | case SPECTRE_V2_USER_CMD_AUTO: | ||
| 349 | case SPECTRE_V2_USER_CMD_SECCOMP: | ||
| 350 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: | ||
| 351 | if (IS_ENABLED(CONFIG_SECCOMP)) | ||
| 352 | mode = SPECTRE_V2_USER_SECCOMP; | ||
| 353 | else | ||
| 354 | mode = SPECTRE_V2_USER_PRCTL; | ||
| 355 | break; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Initialize Indirect Branch Prediction Barrier */ | ||
| 359 | if (boot_cpu_has(X86_FEATURE_IBPB)) { | ||
| 360 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); | ||
| 361 | |||
| 362 | switch (cmd) { | ||
| 363 | case SPECTRE_V2_USER_CMD_FORCE: | ||
| 364 | case SPECTRE_V2_USER_CMD_PRCTL_IBPB: | ||
| 365 | case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: | ||
| 366 | static_branch_enable(&switch_mm_always_ibpb); | ||
| 367 | break; | ||
| 368 | case SPECTRE_V2_USER_CMD_PRCTL: | ||
| 369 | case SPECTRE_V2_USER_CMD_AUTO: | ||
| 370 | case SPECTRE_V2_USER_CMD_SECCOMP: | ||
| 371 | static_branch_enable(&switch_mm_cond_ibpb); | ||
| 372 | break; | ||
| 373 | default: | ||
| 374 | break; | ||
| 375 | } | ||
| 376 | |||
| 377 | pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n", | ||
| 378 | static_key_enabled(&switch_mm_always_ibpb) ? | ||
| 379 | "always-on" : "conditional"); | ||
| 380 | } | ||
| 381 | |||
| 382 | /* If enhanced IBRS is enabled no STIPB required */ | ||
| 383 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) | ||
| 384 | return; | ||
| 385 | |||
| 386 | /* | ||
| 387 | * If SMT is not possible or STIBP is not available clear the STIPB | ||
| 388 | * mode. | ||
| 389 | */ | ||
| 390 | if (!smt_possible || !boot_cpu_has(X86_FEATURE_STIBP)) | ||
| 391 | mode = SPECTRE_V2_USER_NONE; | ||
| 392 | set_mode: | ||
| 393 | spectre_v2_user = mode; | ||
| 394 | /* Only print the STIBP mode when SMT possible */ | ||
| 395 | if (smt_possible) | ||
| 396 | pr_info("%s\n", spectre_v2_user_strings[mode]); | ||
| 263 | } | 397 | } |
| 264 | 398 | ||
| 399 | static const char * const spectre_v2_strings[] = { | ||
| 400 | [SPECTRE_V2_NONE] = "Vulnerable", | ||
| 401 | [SPECTRE_V2_RETPOLINE_GENERIC] = "Mitigation: Full generic retpoline", | ||
| 402 | [SPECTRE_V2_RETPOLINE_AMD] = "Mitigation: Full AMD retpoline", | ||
| 403 | [SPECTRE_V2_IBRS_ENHANCED] = "Mitigation: Enhanced IBRS", | ||
| 404 | }; | ||
| 405 | |||
| 265 | static const struct { | 406 | static const struct { |
| 266 | const char *option; | 407 | const char *option; |
| 267 | enum spectre_v2_mitigation_cmd cmd; | 408 | enum spectre_v2_mitigation_cmd cmd; |
| 268 | bool secure; | 409 | bool secure; |
| 269 | } mitigation_options[] = { | 410 | } mitigation_options[] __initdata = { |
| 270 | { "off", SPECTRE_V2_CMD_NONE, false }, | 411 | { "off", SPECTRE_V2_CMD_NONE, false }, |
| 271 | { "on", SPECTRE_V2_CMD_FORCE, true }, | 412 | { "on", SPECTRE_V2_CMD_FORCE, true }, |
| 272 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, | 413 | { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false }, |
| 273 | { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, | 414 | { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_AMD, false }, |
| 274 | { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, | 415 | { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false }, |
| 275 | { "auto", SPECTRE_V2_CMD_AUTO, false }, | 416 | { "auto", SPECTRE_V2_CMD_AUTO, false }, |
| 276 | }; | 417 | }; |
| 277 | 418 | ||
| 419 | static void __init spec_v2_print_cond(const char *reason, bool secure) | ||
| 420 | { | ||
| 421 | if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure) | ||
| 422 | pr_info("%s selected on command line.\n", reason); | ||
| 423 | } | ||
| 424 | |||
| 278 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) | 425 | static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) |
| 279 | { | 426 | { |
| 427 | enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; | ||
| 280 | char arg[20]; | 428 | char arg[20]; |
| 281 | int ret, i; | 429 | int ret, i; |
| 282 | enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; | ||
| 283 | 430 | ||
| 284 | if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) | 431 | if (cmdline_find_option_bool(boot_command_line, "nospectre_v2")) |
| 285 | return SPECTRE_V2_CMD_NONE; | 432 | return SPECTRE_V2_CMD_NONE; |
| 286 | else { | ||
| 287 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); | ||
| 288 | if (ret < 0) | ||
| 289 | return SPECTRE_V2_CMD_AUTO; | ||
| 290 | 433 | ||
| 291 | for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { | 434 | ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); |
| 292 | if (!match_option(arg, ret, mitigation_options[i].option)) | 435 | if (ret < 0) |
| 293 | continue; | 436 | return SPECTRE_V2_CMD_AUTO; |
| 294 | cmd = mitigation_options[i].cmd; | ||
| 295 | break; | ||
| 296 | } | ||
| 297 | 437 | ||
| 298 | if (i >= ARRAY_SIZE(mitigation_options)) { | 438 | for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { |
| 299 | pr_err("unknown option (%s). Switching to AUTO select\n", arg); | 439 | if (!match_option(arg, ret, mitigation_options[i].option)) |
| 300 | return SPECTRE_V2_CMD_AUTO; | 440 | continue; |
| 301 | } | 441 | cmd = mitigation_options[i].cmd; |
| 442 | break; | ||
| 443 | } | ||
| 444 | |||
| 445 | if (i >= ARRAY_SIZE(mitigation_options)) { | ||
| 446 | pr_err("unknown option (%s). Switching to AUTO select\n", arg); | ||
| 447 | return SPECTRE_V2_CMD_AUTO; | ||
| 302 | } | 448 | } |
| 303 | 449 | ||
| 304 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || | 450 | if ((cmd == SPECTRE_V2_CMD_RETPOLINE || |
| @@ -316,54 +462,11 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void) | |||
| 316 | return SPECTRE_V2_CMD_AUTO; | 462 | return SPECTRE_V2_CMD_AUTO; |
| 317 | } | 463 | } |
| 318 | 464 | ||
| 319 | if (mitigation_options[i].secure) | 465 | spec_v2_print_cond(mitigation_options[i].option, |
| 320 | spec2_print_if_secure(mitigation_options[i].option); | 466 | mitigation_options[i].secure); |
| 321 | else | ||
| 322 | spec2_print_if_insecure(mitigation_options[i].option); | ||
| 323 | |||
| 324 | return cmd; | 467 | return cmd; |
| 325 | } | 468 | } |
| 326 | 469 | ||
| 327 | static bool stibp_needed(void) | ||
| 328 | { | ||
| 329 | if (spectre_v2_enabled == SPECTRE_V2_NONE) | ||
| 330 | return false; | ||
| 331 | |||
| 332 | if (!boot_cpu_has(X86_FEATURE_STIBP)) | ||
| 333 | return false; | ||
| 334 | |||
| 335 | return true; | ||
| 336 | } | ||
| 337 | |||
| 338 | static void update_stibp_msr(void *info) | ||
| 339 | { | ||
| 340 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | ||
| 341 | } | ||
| 342 | |||
| 343 | void arch_smt_update(void) | ||
| 344 | { | ||
| 345 | u64 mask; | ||
| 346 | |||
| 347 | if (!stibp_needed()) | ||
| 348 | return; | ||
| 349 | |||
| 350 | mutex_lock(&spec_ctrl_mutex); | ||
| 351 | mask = x86_spec_ctrl_base; | ||
| 352 | if (cpu_smt_control == CPU_SMT_ENABLED) | ||
| 353 | mask |= SPEC_CTRL_STIBP; | ||
| 354 | else | ||
| 355 | mask &= ~SPEC_CTRL_STIBP; | ||
| 356 | |||
| 357 | if (mask != x86_spec_ctrl_base) { | ||
| 358 | pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n", | ||
| 359 | cpu_smt_control == CPU_SMT_ENABLED ? | ||
| 360 | "Enabling" : "Disabling"); | ||
| 361 | x86_spec_ctrl_base = mask; | ||
| 362 | on_each_cpu(update_stibp_msr, NULL, 1); | ||
| 363 | } | ||
| 364 | mutex_unlock(&spec_ctrl_mutex); | ||
| 365 | } | ||
| 366 | |||
| 367 | static void __init spectre_v2_select_mitigation(void) | 470 | static void __init spectre_v2_select_mitigation(void) |
| 368 | { | 471 | { |
| 369 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); | 472 | enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline(); |
| @@ -417,14 +520,12 @@ retpoline_auto: | |||
| 417 | pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); | 520 | pr_err("Spectre mitigation: LFENCE not serializing, switching to generic retpoline\n"); |
| 418 | goto retpoline_generic; | 521 | goto retpoline_generic; |
| 419 | } | 522 | } |
| 420 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_AMD : | 523 | mode = SPECTRE_V2_RETPOLINE_AMD; |
| 421 | SPECTRE_V2_RETPOLINE_MINIMAL_AMD; | ||
| 422 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); | 524 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE_AMD); |
| 423 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | 525 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
| 424 | } else { | 526 | } else { |
| 425 | retpoline_generic: | 527 | retpoline_generic: |
| 426 | mode = retp_compiler() ? SPECTRE_V2_RETPOLINE_GENERIC : | 528 | mode = SPECTRE_V2_RETPOLINE_GENERIC; |
| 427 | SPECTRE_V2_RETPOLINE_MINIMAL; | ||
| 428 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); | 529 | setup_force_cpu_cap(X86_FEATURE_RETPOLINE); |
| 429 | } | 530 | } |
| 430 | 531 | ||
| @@ -443,12 +544,6 @@ specv2_set_mode: | |||
| 443 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); | 544 | setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW); |
| 444 | pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); | 545 | pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n"); |
| 445 | 546 | ||
| 446 | /* Initialize Indirect Branch Prediction Barrier if supported */ | ||
| 447 | if (boot_cpu_has(X86_FEATURE_IBPB)) { | ||
| 448 | setup_force_cpu_cap(X86_FEATURE_USE_IBPB); | ||
| 449 | pr_info("Spectre v2 mitigation: Enabling Indirect Branch Prediction Barrier\n"); | ||
| 450 | } | ||
| 451 | |||
| 452 | /* | 547 | /* |
| 453 | * Retpoline means the kernel is safe because it has no indirect | 548 | * Retpoline means the kernel is safe because it has no indirect |
| 454 | * branches. Enhanced IBRS protects firmware too, so, enable restricted | 549 | * branches. Enhanced IBRS protects firmware too, so, enable restricted |
| @@ -465,10 +560,67 @@ specv2_set_mode: | |||
| 465 | pr_info("Enabling Restricted Speculation for firmware calls\n"); | 560 | pr_info("Enabling Restricted Speculation for firmware calls\n"); |
| 466 | } | 561 | } |
| 467 | 562 | ||
| 563 | /* Set up IBPB and STIBP depending on the general spectre V2 command */ | ||
| 564 | spectre_v2_user_select_mitigation(cmd); | ||
| 565 | |||
| 468 | /* Enable STIBP if appropriate */ | 566 | /* Enable STIBP if appropriate */ |
| 469 | arch_smt_update(); | 567 | arch_smt_update(); |
| 470 | } | 568 | } |
| 471 | 569 | ||
| 570 | static void update_stibp_msr(void * __unused) | ||
| 571 | { | ||
| 572 | wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base); | ||
| 573 | } | ||
| 574 | |||
| 575 | /* Update x86_spec_ctrl_base in case SMT state changed. */ | ||
| 576 | static void update_stibp_strict(void) | ||
| 577 | { | ||
| 578 | u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP; | ||
| 579 | |||
| 580 | if (sched_smt_active()) | ||
| 581 | mask |= SPEC_CTRL_STIBP; | ||
| 582 | |||
| 583 | if (mask == x86_spec_ctrl_base) | ||
| 584 | return; | ||
| 585 | |||
| 586 | pr_info("Update user space SMT mitigation: STIBP %s\n", | ||
| 587 | mask & SPEC_CTRL_STIBP ? "always-on" : "off"); | ||
| 588 | x86_spec_ctrl_base = mask; | ||
| 589 | on_each_cpu(update_stibp_msr, NULL, 1); | ||
| 590 | } | ||
| 591 | |||
| 592 | /* Update the static key controlling the evaluation of TIF_SPEC_IB */ | ||
| 593 | static void update_indir_branch_cond(void) | ||
| 594 | { | ||
| 595 | if (sched_smt_active()) | ||
| 596 | static_branch_enable(&switch_to_cond_stibp); | ||
| 597 | else | ||
| 598 | static_branch_disable(&switch_to_cond_stibp); | ||
| 599 | } | ||
| 600 | |||
| 601 | void arch_smt_update(void) | ||
| 602 | { | ||
| 603 | /* Enhanced IBRS implies STIBP. No update required. */ | ||
| 604 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) | ||
| 605 | return; | ||
| 606 | |||
| 607 | mutex_lock(&spec_ctrl_mutex); | ||
| 608 | |||
| 609 | switch (spectre_v2_user) { | ||
| 610 | case SPECTRE_V2_USER_NONE: | ||
| 611 | break; | ||
| 612 | case SPECTRE_V2_USER_STRICT: | ||
| 613 | update_stibp_strict(); | ||
| 614 | break; | ||
| 615 | case SPECTRE_V2_USER_PRCTL: | ||
| 616 | case SPECTRE_V2_USER_SECCOMP: | ||
| 617 | update_indir_branch_cond(); | ||
| 618 | break; | ||
| 619 | } | ||
| 620 | |||
| 621 | mutex_unlock(&spec_ctrl_mutex); | ||
| 622 | } | ||
| 623 | |||
| 472 | #undef pr_fmt | 624 | #undef pr_fmt |
| 473 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt | 625 | #define pr_fmt(fmt) "Speculative Store Bypass: " fmt |
| 474 | 626 | ||
| @@ -483,7 +635,7 @@ enum ssb_mitigation_cmd { | |||
| 483 | SPEC_STORE_BYPASS_CMD_SECCOMP, | 635 | SPEC_STORE_BYPASS_CMD_SECCOMP, |
| 484 | }; | 636 | }; |
| 485 | 637 | ||
| 486 | static const char *ssb_strings[] = { | 638 | static const char * const ssb_strings[] = { |
| 487 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable", | 639 | [SPEC_STORE_BYPASS_NONE] = "Vulnerable", |
| 488 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", | 640 | [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled", |
| 489 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", | 641 | [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl", |
| @@ -493,7 +645,7 @@ static const char *ssb_strings[] = { | |||
| 493 | static const struct { | 645 | static const struct { |
| 494 | const char *option; | 646 | const char *option; |
| 495 | enum ssb_mitigation_cmd cmd; | 647 | enum ssb_mitigation_cmd cmd; |
| 496 | } ssb_mitigation_options[] = { | 648 | } ssb_mitigation_options[] __initdata = { |
| 497 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ | 649 | { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */ |
| 498 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ | 650 | { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */ |
| 499 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ | 651 | { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */ |
| @@ -604,10 +756,25 @@ static void ssb_select_mitigation(void) | |||
| 604 | #undef pr_fmt | 756 | #undef pr_fmt |
| 605 | #define pr_fmt(fmt) "Speculation prctl: " fmt | 757 | #define pr_fmt(fmt) "Speculation prctl: " fmt |
| 606 | 758 | ||
| 607 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) | 759 | static void task_update_spec_tif(struct task_struct *tsk) |
| 608 | { | 760 | { |
| 609 | bool update; | 761 | /* Force the update of the real TIF bits */ |
| 762 | set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE); | ||
| 610 | 763 | ||
| 764 | /* | ||
| 765 | * Immediately update the speculation control MSRs for the current | ||
| 766 | * task, but for a non-current task delay setting the CPU | ||
| 767 | * mitigation until it is scheduled next. | ||
| 768 | * | ||
| 769 | * This can only happen for SECCOMP mitigation. For PRCTL it's | ||
| 770 | * always the current task. | ||
| 771 | */ | ||
| 772 | if (tsk == current) | ||
| 773 | speculation_ctrl_update_current(); | ||
| 774 | } | ||
| 775 | |||
| 776 | static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) | ||
| 777 | { | ||
| 611 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && | 778 | if (ssb_mode != SPEC_STORE_BYPASS_PRCTL && |
| 612 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) | 779 | ssb_mode != SPEC_STORE_BYPASS_SECCOMP) |
| 613 | return -ENXIO; | 780 | return -ENXIO; |
| @@ -618,28 +785,56 @@ static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl) | |||
| 618 | if (task_spec_ssb_force_disable(task)) | 785 | if (task_spec_ssb_force_disable(task)) |
| 619 | return -EPERM; | 786 | return -EPERM; |
| 620 | task_clear_spec_ssb_disable(task); | 787 | task_clear_spec_ssb_disable(task); |
| 621 | update = test_and_clear_tsk_thread_flag(task, TIF_SSBD); | 788 | task_update_spec_tif(task); |
| 622 | break; | 789 | break; |
| 623 | case PR_SPEC_DISABLE: | 790 | case PR_SPEC_DISABLE: |
| 624 | task_set_spec_ssb_disable(task); | 791 | task_set_spec_ssb_disable(task); |
| 625 | update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); | 792 | task_update_spec_tif(task); |
| 626 | break; | 793 | break; |
| 627 | case PR_SPEC_FORCE_DISABLE: | 794 | case PR_SPEC_FORCE_DISABLE: |
| 628 | task_set_spec_ssb_disable(task); | 795 | task_set_spec_ssb_disable(task); |
| 629 | task_set_spec_ssb_force_disable(task); | 796 | task_set_spec_ssb_force_disable(task); |
| 630 | update = !test_and_set_tsk_thread_flag(task, TIF_SSBD); | 797 | task_update_spec_tif(task); |
| 631 | break; | 798 | break; |
| 632 | default: | 799 | default: |
| 633 | return -ERANGE; | 800 | return -ERANGE; |
| 634 | } | 801 | } |
| 802 | return 0; | ||
| 803 | } | ||
| 635 | 804 | ||
| 636 | /* | 805 | static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) |
| 637 | * If being set on non-current task, delay setting the CPU | 806 | { |
| 638 | * mitigation until it is next scheduled. | 807 | switch (ctrl) { |
| 639 | */ | 808 | case PR_SPEC_ENABLE: |
| 640 | if (task == current && update) | 809 | if (spectre_v2_user == SPECTRE_V2_USER_NONE) |
| 641 | speculative_store_bypass_update_current(); | 810 | return 0; |
| 642 | 811 | /* | |
| 812 | * Indirect branch speculation is always disabled in strict | ||
| 813 | * mode. | ||
| 814 | */ | ||
| 815 | if (spectre_v2_user == SPECTRE_V2_USER_STRICT) | ||
| 816 | return -EPERM; | ||
| 817 | task_clear_spec_ib_disable(task); | ||
| 818 | task_update_spec_tif(task); | ||
| 819 | break; | ||
| 820 | case PR_SPEC_DISABLE: | ||
| 821 | case PR_SPEC_FORCE_DISABLE: | ||
| 822 | /* | ||
| 823 | * Indirect branch speculation is always allowed when | ||
| 824 | * mitigation is force disabled. | ||
| 825 | */ | ||
| 826 | if (spectre_v2_user == SPECTRE_V2_USER_NONE) | ||
| 827 | return -EPERM; | ||
| 828 | if (spectre_v2_user == SPECTRE_V2_USER_STRICT) | ||
| 829 | return 0; | ||
| 830 | task_set_spec_ib_disable(task); | ||
| 831 | if (ctrl == PR_SPEC_FORCE_DISABLE) | ||
| 832 | task_set_spec_ib_force_disable(task); | ||
| 833 | task_update_spec_tif(task); | ||
| 834 | break; | ||
| 835 | default: | ||
| 836 | return -ERANGE; | ||
| 837 | } | ||
| 643 | return 0; | 838 | return 0; |
| 644 | } | 839 | } |
| 645 | 840 | ||
| @@ -649,6 +844,8 @@ int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which, | |||
| 649 | switch (which) { | 844 | switch (which) { |
| 650 | case PR_SPEC_STORE_BYPASS: | 845 | case PR_SPEC_STORE_BYPASS: |
| 651 | return ssb_prctl_set(task, ctrl); | 846 | return ssb_prctl_set(task, ctrl); |
| 847 | case PR_SPEC_INDIRECT_BRANCH: | ||
| 848 | return ib_prctl_set(task, ctrl); | ||
| 652 | default: | 849 | default: |
| 653 | return -ENODEV; | 850 | return -ENODEV; |
| 654 | } | 851 | } |
| @@ -659,6 +856,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) | |||
| 659 | { | 856 | { |
| 660 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) | 857 | if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) |
| 661 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); | 858 | ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); |
| 859 | if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) | ||
| 860 | ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); | ||
| 662 | } | 861 | } |
| 663 | #endif | 862 | #endif |
| 664 | 863 | ||
| @@ -681,11 +880,35 @@ static int ssb_prctl_get(struct task_struct *task) | |||
| 681 | } | 880 | } |
| 682 | } | 881 | } |
| 683 | 882 | ||
| 883 | static int ib_prctl_get(struct task_struct *task) | ||
| 884 | { | ||
| 885 | if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) | ||
| 886 | return PR_SPEC_NOT_AFFECTED; | ||
| 887 | |||
| 888 | switch (spectre_v2_user) { | ||
| 889 | case SPECTRE_V2_USER_NONE: | ||
| 890 | return PR_SPEC_ENABLE; | ||
| 891 | case SPECTRE_V2_USER_PRCTL: | ||
| 892 | case SPECTRE_V2_USER_SECCOMP: | ||
| 893 | if (task_spec_ib_force_disable(task)) | ||
| 894 | return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; | ||
| 895 | if (task_spec_ib_disable(task)) | ||
| 896 | return PR_SPEC_PRCTL | PR_SPEC_DISABLE; | ||
| 897 | return PR_SPEC_PRCTL | PR_SPEC_ENABLE; | ||
| 898 | case SPECTRE_V2_USER_STRICT: | ||
| 899 | return PR_SPEC_DISABLE; | ||
| 900 | default: | ||
| 901 | return PR_SPEC_NOT_AFFECTED; | ||
| 902 | } | ||
| 903 | } | ||
| 904 | |||
| 684 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) | 905 | int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) |
| 685 | { | 906 | { |
| 686 | switch (which) { | 907 | switch (which) { |
| 687 | case PR_SPEC_STORE_BYPASS: | 908 | case PR_SPEC_STORE_BYPASS: |
| 688 | return ssb_prctl_get(task); | 909 | return ssb_prctl_get(task); |
| 910 | case PR_SPEC_INDIRECT_BRANCH: | ||
| 911 | return ib_prctl_get(task); | ||
| 689 | default: | 912 | default: |
| 690 | return -ENODEV; | 913 | return -ENODEV; |
| 691 | } | 914 | } |
| @@ -823,7 +1046,7 @@ early_param("l1tf", l1tf_cmdline); | |||
| 823 | #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" | 1046 | #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion" |
| 824 | 1047 | ||
| 825 | #if IS_ENABLED(CONFIG_KVM_INTEL) | 1048 | #if IS_ENABLED(CONFIG_KVM_INTEL) |
| 826 | static const char *l1tf_vmx_states[] = { | 1049 | static const char * const l1tf_vmx_states[] = { |
| 827 | [VMENTER_L1D_FLUSH_AUTO] = "auto", | 1050 | [VMENTER_L1D_FLUSH_AUTO] = "auto", |
| 828 | [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", | 1051 | [VMENTER_L1D_FLUSH_NEVER] = "vulnerable", |
| 829 | [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", | 1052 | [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes", |
| @@ -839,13 +1062,14 @@ static ssize_t l1tf_show_state(char *buf) | |||
| 839 | 1062 | ||
| 840 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || | 1063 | if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED || |
| 841 | (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && | 1064 | (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER && |
| 842 | cpu_smt_control == CPU_SMT_ENABLED)) | 1065 | sched_smt_active())) { |
| 843 | return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, | 1066 | return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG, |
| 844 | l1tf_vmx_states[l1tf_vmx_mitigation]); | 1067 | l1tf_vmx_states[l1tf_vmx_mitigation]); |
| 1068 | } | ||
| 845 | 1069 | ||
| 846 | return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, | 1070 | return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG, |
| 847 | l1tf_vmx_states[l1tf_vmx_mitigation], | 1071 | l1tf_vmx_states[l1tf_vmx_mitigation], |
| 848 | cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled"); | 1072 | sched_smt_active() ? "vulnerable" : "disabled"); |
| 849 | } | 1073 | } |
| 850 | #else | 1074 | #else |
| 851 | static ssize_t l1tf_show_state(char *buf) | 1075 | static ssize_t l1tf_show_state(char *buf) |
| @@ -854,11 +1078,39 @@ static ssize_t l1tf_show_state(char *buf) | |||
| 854 | } | 1078 | } |
| 855 | #endif | 1079 | #endif |
| 856 | 1080 | ||
| 1081 | static char *stibp_state(void) | ||
| 1082 | { | ||
| 1083 | if (spectre_v2_enabled == SPECTRE_V2_IBRS_ENHANCED) | ||
| 1084 | return ""; | ||
| 1085 | |||
| 1086 | switch (spectre_v2_user) { | ||
| 1087 | case SPECTRE_V2_USER_NONE: | ||
| 1088 | return ", STIBP: disabled"; | ||
| 1089 | case SPECTRE_V2_USER_STRICT: | ||
| 1090 | return ", STIBP: forced"; | ||
| 1091 | case SPECTRE_V2_USER_PRCTL: | ||
| 1092 | case SPECTRE_V2_USER_SECCOMP: | ||
| 1093 | if (static_key_enabled(&switch_to_cond_stibp)) | ||
| 1094 | return ", STIBP: conditional"; | ||
| 1095 | } | ||
| 1096 | return ""; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | static char *ibpb_state(void) | ||
| 1100 | { | ||
| 1101 | if (boot_cpu_has(X86_FEATURE_IBPB)) { | ||
| 1102 | if (static_key_enabled(&switch_mm_always_ibpb)) | ||
| 1103 | return ", IBPB: always-on"; | ||
| 1104 | if (static_key_enabled(&switch_mm_cond_ibpb)) | ||
| 1105 | return ", IBPB: conditional"; | ||
| 1106 | return ", IBPB: disabled"; | ||
| 1107 | } | ||
| 1108 | return ""; | ||
| 1109 | } | ||
| 1110 | |||
| 857 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, | 1111 | static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, |
| 858 | char *buf, unsigned int bug) | 1112 | char *buf, unsigned int bug) |
| 859 | { | 1113 | { |
| 860 | int ret; | ||
| 861 | |||
| 862 | if (!boot_cpu_has_bug(bug)) | 1114 | if (!boot_cpu_has_bug(bug)) |
| 863 | return sprintf(buf, "Not affected\n"); | 1115 | return sprintf(buf, "Not affected\n"); |
| 864 | 1116 | ||
| @@ -876,13 +1128,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr | |||
| 876 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); | 1128 | return sprintf(buf, "Mitigation: __user pointer sanitization\n"); |
| 877 | 1129 | ||
| 878 | case X86_BUG_SPECTRE_V2: | 1130 | case X86_BUG_SPECTRE_V2: |
| 879 | ret = sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], | 1131 | return sprintf(buf, "%s%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled], |
| 880 | boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "", | 1132 | ibpb_state(), |
| 881 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", | 1133 | boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "", |
| 882 | (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "", | 1134 | stibp_state(), |
| 883 | boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", | 1135 | boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "", |
| 884 | spectre_v2_module_string()); | 1136 | spectre_v2_module_string()); |
| 885 | return ret; | ||
| 886 | 1137 | ||
| 887 | case X86_BUG_SPEC_STORE_BYPASS: | 1138 | case X86_BUG_SPEC_STORE_BYPASS: |
| 888 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); | 1139 | return sprintf(buf, "%s\n", ssb_strings[ssb_mode]); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8c66d2fc8f81..36d2696c9563 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -485,7 +485,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
| 485 | * be somewhat complicated (e.g. segment offset would require an instruction | 485 | * be somewhat complicated (e.g. segment offset would require an instruction |
| 486 | * parser). So only support physical addresses up to page granuality for now. | 486 | * parser). So only support physical addresses up to page granuality for now. |
| 487 | */ | 487 | */ |
| 488 | static int mce_usable_address(struct mce *m) | 488 | int mce_usable_address(struct mce *m) |
| 489 | { | 489 | { |
| 490 | if (!(m->status & MCI_STATUS_ADDRV)) | 490 | if (!(m->status & MCI_STATUS_ADDRV)) |
| 491 | return 0; | 491 | return 0; |
| @@ -505,6 +505,7 @@ static int mce_usable_address(struct mce *m) | |||
| 505 | 505 | ||
| 506 | return 1; | 506 | return 1; |
| 507 | } | 507 | } |
| 508 | EXPORT_SYMBOL_GPL(mce_usable_address); | ||
| 508 | 509 | ||
| 509 | bool mce_is_memory_error(struct mce *m) | 510 | bool mce_is_memory_error(struct mce *m) |
| 510 | { | 511 | { |
| @@ -534,7 +535,7 @@ bool mce_is_memory_error(struct mce *m) | |||
| 534 | } | 535 | } |
| 535 | EXPORT_SYMBOL_GPL(mce_is_memory_error); | 536 | EXPORT_SYMBOL_GPL(mce_is_memory_error); |
| 536 | 537 | ||
| 537 | static bool mce_is_correctable(struct mce *m) | 538 | bool mce_is_correctable(struct mce *m) |
| 538 | { | 539 | { |
| 539 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) | 540 | if (m->cpuvendor == X86_VENDOR_AMD && m->status & MCI_STATUS_DEFERRED) |
| 540 | return false; | 541 | return false; |
| @@ -547,6 +548,7 @@ static bool mce_is_correctable(struct mce *m) | |||
| 547 | 548 | ||
| 548 | return true; | 549 | return true; |
| 549 | } | 550 | } |
| 551 | EXPORT_SYMBOL_GPL(mce_is_correctable); | ||
| 550 | 552 | ||
| 551 | static bool cec_add_mce(struct mce *m) | 553 | static bool cec_add_mce(struct mce *m) |
| 552 | { | 554 | { |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 9c8e2daa48cd..9f915a8791cc 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
| @@ -57,7 +57,7 @@ | |||
| 57 | /* Threshold LVT offset is at MSR0xC0000410[15:12] */ | 57 | /* Threshold LVT offset is at MSR0xC0000410[15:12] */ |
| 58 | #define SMCA_THR_LVT_OFF 0xF000 | 58 | #define SMCA_THR_LVT_OFF 0xF000 |
| 59 | 59 | ||
| 60 | static bool thresholding_en; | 60 | static bool thresholding_irq_en; |
| 61 | 61 | ||
| 62 | static const char * const th_names[] = { | 62 | static const char * const th_names[] = { |
| 63 | "load_store", | 63 | "load_store", |
| @@ -535,9 +535,8 @@ prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, | |||
| 535 | 535 | ||
| 536 | set_offset: | 536 | set_offset: |
| 537 | offset = setup_APIC_mce_threshold(offset, new); | 537 | offset = setup_APIC_mce_threshold(offset, new); |
| 538 | 538 | if (offset == new) | |
| 539 | if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) | 539 | thresholding_irq_en = true; |
| 540 | mce_threshold_vector = amd_threshold_interrupt; | ||
| 541 | 540 | ||
| 542 | done: | 541 | done: |
| 543 | mce_threshold_block_init(&b, offset); | 542 | mce_threshold_block_init(&b, offset); |
| @@ -1358,9 +1357,6 @@ int mce_threshold_remove_device(unsigned int cpu) | |||
| 1358 | { | 1357 | { |
| 1359 | unsigned int bank; | 1358 | unsigned int bank; |
| 1360 | 1359 | ||
| 1361 | if (!thresholding_en) | ||
| 1362 | return 0; | ||
| 1363 | |||
| 1364 | for (bank = 0; bank < mca_cfg.banks; ++bank) { | 1360 | for (bank = 0; bank < mca_cfg.banks; ++bank) { |
| 1365 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) | 1361 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) |
| 1366 | continue; | 1362 | continue; |
| @@ -1378,9 +1374,6 @@ int mce_threshold_create_device(unsigned int cpu) | |||
| 1378 | struct threshold_bank **bp; | 1374 | struct threshold_bank **bp; |
| 1379 | int err = 0; | 1375 | int err = 0; |
| 1380 | 1376 | ||
| 1381 | if (!thresholding_en) | ||
| 1382 | return 0; | ||
| 1383 | |||
| 1384 | bp = per_cpu(threshold_banks, cpu); | 1377 | bp = per_cpu(threshold_banks, cpu); |
| 1385 | if (bp) | 1378 | if (bp) |
| 1386 | return 0; | 1379 | return 0; |
| @@ -1409,9 +1402,6 @@ static __init int threshold_init_device(void) | |||
| 1409 | { | 1402 | { |
| 1410 | unsigned lcpu = 0; | 1403 | unsigned lcpu = 0; |
| 1411 | 1404 | ||
| 1412 | if (mce_threshold_vector == amd_threshold_interrupt) | ||
| 1413 | thresholding_en = true; | ||
| 1414 | |||
| 1415 | /* to hit CPUs online before the notifier is up */ | 1405 | /* to hit CPUs online before the notifier is up */ |
| 1416 | for_each_online_cpu(lcpu) { | 1406 | for_each_online_cpu(lcpu) { |
| 1417 | int err = mce_threshold_create_device(lcpu); | 1407 | int err = mce_threshold_create_device(lcpu); |
| @@ -1420,6 +1410,9 @@ static __init int threshold_init_device(void) | |||
| 1420 | return err; | 1410 | return err; |
| 1421 | } | 1411 | } |
| 1422 | 1412 | ||
| 1413 | if (thresholding_irq_en) | ||
| 1414 | mce_threshold_vector = amd_threshold_interrupt; | ||
| 1415 | |||
| 1423 | return 0; | 1416 | return 0; |
| 1424 | } | 1417 | } |
| 1425 | /* | 1418 | /* |
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 1c72f3819eb1..e81a2db42df7 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c | |||
| @@ -20,6 +20,7 @@ | |||
| 20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
| 21 | #include <linux/irq.h> | 21 | #include <linux/irq.h> |
| 22 | #include <linux/kexec.h> | 22 | #include <linux/kexec.h> |
| 23 | #include <linux/i8253.h> | ||
| 23 | #include <asm/processor.h> | 24 | #include <asm/processor.h> |
| 24 | #include <asm/hypervisor.h> | 25 | #include <asm/hypervisor.h> |
| 25 | #include <asm/hyperv-tlfs.h> | 26 | #include <asm/hyperv-tlfs.h> |
| @@ -295,6 +296,16 @@ static void __init ms_hyperv_init_platform(void) | |||
| 295 | if (efi_enabled(EFI_BOOT)) | 296 | if (efi_enabled(EFI_BOOT)) |
| 296 | x86_platform.get_nmi_reason = hv_get_nmi_reason; | 297 | x86_platform.get_nmi_reason = hv_get_nmi_reason; |
| 297 | 298 | ||
| 299 | /* | ||
| 300 | * Hyper-V VMs have a PIT emulation quirk such that zeroing the | ||
| 301 | * counter register during PIT shutdown restarts the PIT. So it | ||
| 302 | * continues to interrupt @18.2 HZ. Setting i8253_clear_counter | ||
| 303 | * to false tells pit_shutdown() not to zero the counter so that | ||
| 304 | * the PIT really is shutdown. Generation 2 VMs don't have a PIT, | ||
| 305 | * and setting this value has no effect. | ||
| 306 | */ | ||
| 307 | i8253_clear_counter_on_shutdown = false; | ||
| 308 | |||
| 298 | #if IS_ENABLED(CONFIG_HYPERV) | 309 | #if IS_ENABLED(CONFIG_HYPERV) |
| 299 | /* | 310 | /* |
| 300 | * Setup the hook to get control post apic initialization. | 311 | * Setup the hook to get control post apic initialization. |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index d9ab49bed8af..0eda91f8eeac 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
| @@ -77,7 +77,7 @@ static __init int setup_vmw_sched_clock(char *s) | |||
| 77 | } | 77 | } |
| 78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); | 78 | early_param("no-vmw-sched-clock", setup_vmw_sched_clock); |
| 79 | 79 | ||
| 80 | static unsigned long long vmware_sched_clock(void) | 80 | static unsigned long long notrace vmware_sched_clock(void) |
| 81 | { | 81 | { |
| 82 | unsigned long long ns; | 82 | unsigned long long ns; |
| 83 | 83 | ||
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c index 61a949d84dfa..d99a8ee9e185 100644 --- a/arch/x86/kernel/fpu/signal.c +++ b/arch/x86/kernel/fpu/signal.c | |||
| @@ -344,10 +344,10 @@ static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size) | |||
| 344 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); | 344 | sanitize_restored_xstate(tsk, &env, xfeatures, fx_only); |
| 345 | } | 345 | } |
| 346 | 346 | ||
| 347 | local_bh_disable(); | ||
| 347 | fpu->initialized = 1; | 348 | fpu->initialized = 1; |
| 348 | preempt_disable(); | ||
| 349 | fpu__restore(fpu); | 349 | fpu__restore(fpu); |
| 350 | preempt_enable(); | 350 | local_bh_enable(); |
| 351 | 351 | ||
| 352 | return err; | 352 | return err; |
| 353 | } else { | 353 | } else { |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 01ebcb6f263e..7ee8067cbf45 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
| @@ -994,7 +994,6 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, | |||
| 994 | { | 994 | { |
| 995 | unsigned long old; | 995 | unsigned long old; |
| 996 | int faulted; | 996 | int faulted; |
| 997 | struct ftrace_graph_ent trace; | ||
| 998 | unsigned long return_hooker = (unsigned long) | 997 | unsigned long return_hooker = (unsigned long) |
| 999 | &return_to_handler; | 998 | &return_to_handler; |
| 1000 | 999 | ||
| @@ -1046,19 +1045,7 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent, | |||
| 1046 | return; | 1045 | return; |
| 1047 | } | 1046 | } |
| 1048 | 1047 | ||
| 1049 | trace.func = self_addr; | 1048 | if (function_graph_enter(old, self_addr, frame_pointer, parent)) |
| 1050 | trace.depth = current->curr_ret_stack + 1; | ||
| 1051 | |||
| 1052 | /* Only trace if the calling function expects to */ | ||
| 1053 | if (!ftrace_graph_entry(&trace)) { | ||
| 1054 | *parent = old; | 1049 | *parent = old; |
| 1055 | return; | ||
| 1056 | } | ||
| 1057 | |||
| 1058 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, | ||
| 1059 | frame_pointer, parent) == -EBUSY) { | ||
| 1060 | *parent = old; | ||
| 1061 | return; | ||
| 1062 | } | ||
| 1063 | } | 1050 | } |
| 1064 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1051 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c index 76fa3b836598..ec6fefbfd3c0 100644 --- a/arch/x86/kernel/head32.c +++ b/arch/x86/kernel/head32.c | |||
| @@ -37,7 +37,6 @@ asmlinkage __visible void __init i386_start_kernel(void) | |||
| 37 | cr4_init_shadow(); | 37 | cr4_init_shadow(); |
| 38 | 38 | ||
| 39 | sanitize_boot_params(&boot_params); | 39 | sanitize_boot_params(&boot_params); |
| 40 | x86_verify_bootdata_version(); | ||
| 41 | 40 | ||
| 42 | x86_early_init_platform_quirks(); | 41 | x86_early_init_platform_quirks(); |
| 43 | 42 | ||
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7663a8eb602b..16b1cbd3a61e 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
| @@ -457,8 +457,6 @@ void __init x86_64_start_reservations(char *real_mode_data) | |||
| 457 | if (!boot_params.hdr.version) | 457 | if (!boot_params.hdr.version) |
| 458 | copy_bootdata(__va(real_mode_data)); | 458 | copy_bootdata(__va(real_mode_data)); |
| 459 | 459 | ||
| 460 | x86_verify_bootdata_version(); | ||
| 461 | |||
| 462 | x86_early_init_platform_quirks(); | 460 | x86_early_init_platform_quirks(); |
| 463 | 461 | ||
| 464 | switch (boot_params.hdr.hardware_subarch) { | 462 | switch (boot_params.hdr.hardware_subarch) { |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index ab18e0884dc6..6135ae8ce036 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
| @@ -199,14 +199,6 @@ static void sanity_check_ldt_mapping(struct mm_struct *mm) | |||
| 199 | /* | 199 | /* |
| 200 | * If PTI is enabled, this maps the LDT into the kernelmode and | 200 | * If PTI is enabled, this maps the LDT into the kernelmode and |
| 201 | * usermode tables for the given mm. | 201 | * usermode tables for the given mm. |
| 202 | * | ||
| 203 | * There is no corresponding unmap function. Even if the LDT is freed, we | ||
| 204 | * leave the PTEs around until the slot is reused or the mm is destroyed. | ||
| 205 | * This is harmless: the LDT is always in ordinary memory, and no one will | ||
| 206 | * access the freed slot. | ||
| 207 | * | ||
| 208 | * If we wanted to unmap freed LDTs, we'd also need to do a flush to make | ||
| 209 | * it useful, and the flush would slow down modify_ldt(). | ||
| 210 | */ | 202 | */ |
| 211 | static int | 203 | static int |
| 212 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | 204 | map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) |
| @@ -214,8 +206,7 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 214 | unsigned long va; | 206 | unsigned long va; |
| 215 | bool is_vmalloc; | 207 | bool is_vmalloc; |
| 216 | spinlock_t *ptl; | 208 | spinlock_t *ptl; |
| 217 | pgd_t *pgd; | 209 | int i, nr_pages; |
| 218 | int i; | ||
| 219 | 210 | ||
| 220 | if (!static_cpu_has(X86_FEATURE_PTI)) | 211 | if (!static_cpu_has(X86_FEATURE_PTI)) |
| 221 | return 0; | 212 | return 0; |
| @@ -229,16 +220,11 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 229 | /* Check if the current mappings are sane */ | 220 | /* Check if the current mappings are sane */ |
| 230 | sanity_check_ldt_mapping(mm); | 221 | sanity_check_ldt_mapping(mm); |
| 231 | 222 | ||
| 232 | /* | ||
| 233 | * Did we already have the top level entry allocated? We can't | ||
| 234 | * use pgd_none() for this because it doens't do anything on | ||
| 235 | * 4-level page table kernels. | ||
| 236 | */ | ||
| 237 | pgd = pgd_offset(mm, LDT_BASE_ADDR); | ||
| 238 | |||
| 239 | is_vmalloc = is_vmalloc_addr(ldt->entries); | 223 | is_vmalloc = is_vmalloc_addr(ldt->entries); |
| 240 | 224 | ||
| 241 | for (i = 0; i * PAGE_SIZE < ldt->nr_entries * LDT_ENTRY_SIZE; i++) { | 225 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); |
| 226 | |||
| 227 | for (i = 0; i < nr_pages; i++) { | ||
| 242 | unsigned long offset = i << PAGE_SHIFT; | 228 | unsigned long offset = i << PAGE_SHIFT; |
| 243 | const void *src = (char *)ldt->entries + offset; | 229 | const void *src = (char *)ldt->entries + offset; |
| 244 | unsigned long pfn; | 230 | unsigned long pfn; |
| @@ -272,13 +258,39 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 272 | /* Propagate LDT mapping to the user page-table */ | 258 | /* Propagate LDT mapping to the user page-table */ |
| 273 | map_ldt_struct_to_user(mm); | 259 | map_ldt_struct_to_user(mm); |
| 274 | 260 | ||
| 275 | va = (unsigned long)ldt_slot_va(slot); | ||
| 276 | flush_tlb_mm_range(mm, va, va + LDT_SLOT_STRIDE, PAGE_SHIFT, false); | ||
| 277 | |||
| 278 | ldt->slot = slot; | 261 | ldt->slot = slot; |
| 279 | return 0; | 262 | return 0; |
| 280 | } | 263 | } |
| 281 | 264 | ||
| 265 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
| 266 | { | ||
| 267 | unsigned long va; | ||
| 268 | int i, nr_pages; | ||
| 269 | |||
| 270 | if (!ldt) | ||
| 271 | return; | ||
| 272 | |||
| 273 | /* LDT map/unmap is only required for PTI */ | ||
| 274 | if (!static_cpu_has(X86_FEATURE_PTI)) | ||
| 275 | return; | ||
| 276 | |||
| 277 | nr_pages = DIV_ROUND_UP(ldt->nr_entries * LDT_ENTRY_SIZE, PAGE_SIZE); | ||
| 278 | |||
| 279 | for (i = 0; i < nr_pages; i++) { | ||
| 280 | unsigned long offset = i << PAGE_SHIFT; | ||
| 281 | spinlock_t *ptl; | ||
| 282 | pte_t *ptep; | ||
| 283 | |||
| 284 | va = (unsigned long)ldt_slot_va(ldt->slot) + offset; | ||
| 285 | ptep = get_locked_pte(mm, va, &ptl); | ||
| 286 | pte_clear(mm, va, ptep); | ||
| 287 | pte_unmap_unlock(ptep, ptl); | ||
| 288 | } | ||
| 289 | |||
| 290 | va = (unsigned long)ldt_slot_va(ldt->slot); | ||
| 291 | flush_tlb_mm_range(mm, va, va + nr_pages * PAGE_SIZE, PAGE_SHIFT, false); | ||
| 292 | } | ||
| 293 | |||
| 282 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ | 294 | #else /* !CONFIG_PAGE_TABLE_ISOLATION */ |
| 283 | 295 | ||
| 284 | static int | 296 | static int |
| @@ -286,6 +298,10 @@ map_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt, int slot) | |||
| 286 | { | 298 | { |
| 287 | return 0; | 299 | return 0; |
| 288 | } | 300 | } |
| 301 | |||
| 302 | static void unmap_ldt_struct(struct mm_struct *mm, struct ldt_struct *ldt) | ||
| 303 | { | ||
| 304 | } | ||
| 289 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ | 305 | #endif /* CONFIG_PAGE_TABLE_ISOLATION */ |
| 290 | 306 | ||
| 291 | static void free_ldt_pgtables(struct mm_struct *mm) | 307 | static void free_ldt_pgtables(struct mm_struct *mm) |
| @@ -524,6 +540,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode) | |||
| 524 | } | 540 | } |
| 525 | 541 | ||
| 526 | install_ldt(mm, new_ldt); | 542 | install_ldt(mm, new_ldt); |
| 543 | unmap_ldt_struct(mm, old_ldt); | ||
| 527 | free_ldt_struct(old_ldt); | 544 | free_ldt_struct(old_ldt); |
| 528 | error = 0; | 545 | error = 0; |
| 529 | 546 | ||
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index b7cb5348f37f..90ae0ca51083 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
| @@ -43,6 +43,8 @@ | |||
| 43 | #include <asm/spec-ctrl.h> | 43 | #include <asm/spec-ctrl.h> |
| 44 | #include <asm/proto.h> | 44 | #include <asm/proto.h> |
| 45 | 45 | ||
| 46 | #include "process.h" | ||
| 47 | |||
| 46 | /* | 48 | /* |
| 47 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, | 49 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
| 48 | * no more per-task TSS's. The TSS size is kept cacheline-aligned | 50 | * no more per-task TSS's. The TSS size is kept cacheline-aligned |
| @@ -255,11 +257,12 @@ void arch_setup_new_exec(void) | |||
| 255 | enable_cpuid(); | 257 | enable_cpuid(); |
| 256 | } | 258 | } |
| 257 | 259 | ||
| 258 | static inline void switch_to_bitmap(struct tss_struct *tss, | 260 | static inline void switch_to_bitmap(struct thread_struct *prev, |
| 259 | struct thread_struct *prev, | ||
| 260 | struct thread_struct *next, | 261 | struct thread_struct *next, |
| 261 | unsigned long tifp, unsigned long tifn) | 262 | unsigned long tifp, unsigned long tifn) |
| 262 | { | 263 | { |
| 264 | struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); | ||
| 265 | |||
| 263 | if (tifn & _TIF_IO_BITMAP) { | 266 | if (tifn & _TIF_IO_BITMAP) { |
| 264 | /* | 267 | /* |
| 265 | * Copy the relevant range of the IO bitmap. | 268 | * Copy the relevant range of the IO bitmap. |
| @@ -398,32 +401,85 @@ static __always_inline void amd_set_ssb_virt_state(unsigned long tifn) | |||
| 398 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); | 401 | wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, ssbd_tif_to_spec_ctrl(tifn)); |
| 399 | } | 402 | } |
| 400 | 403 | ||
| 401 | static __always_inline void intel_set_ssb_state(unsigned long tifn) | 404 | /* |
| 405 | * Update the MSRs managing speculation control, during context switch. | ||
| 406 | * | ||
| 407 | * tifp: Previous task's thread flags | ||
| 408 | * tifn: Next task's thread flags | ||
| 409 | */ | ||
| 410 | static __always_inline void __speculation_ctrl_update(unsigned long tifp, | ||
| 411 | unsigned long tifn) | ||
| 402 | { | 412 | { |
| 403 | u64 msr = x86_spec_ctrl_base | ssbd_tif_to_spec_ctrl(tifn); | 413 | unsigned long tif_diff = tifp ^ tifn; |
| 414 | u64 msr = x86_spec_ctrl_base; | ||
| 415 | bool updmsr = false; | ||
| 416 | |||
| 417 | /* | ||
| 418 | * If TIF_SSBD is different, select the proper mitigation | ||
| 419 | * method. Note that if SSBD mitigation is disabled or permanentely | ||
| 420 | * enabled this branch can't be taken because nothing can set | ||
| 421 | * TIF_SSBD. | ||
| 422 | */ | ||
| 423 | if (tif_diff & _TIF_SSBD) { | ||
| 424 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { | ||
| 425 | amd_set_ssb_virt_state(tifn); | ||
| 426 | } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { | ||
| 427 | amd_set_core_ssb_state(tifn); | ||
| 428 | } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || | ||
| 429 | static_cpu_has(X86_FEATURE_AMD_SSBD)) { | ||
| 430 | msr |= ssbd_tif_to_spec_ctrl(tifn); | ||
| 431 | updmsr = true; | ||
| 432 | } | ||
| 433 | } | ||
| 434 | |||
| 435 | /* | ||
| 436 | * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, | ||
| 437 | * otherwise avoid the MSR write. | ||
| 438 | */ | ||
| 439 | if (IS_ENABLED(CONFIG_SMP) && | ||
| 440 | static_branch_unlikely(&switch_to_cond_stibp)) { | ||
| 441 | updmsr |= !!(tif_diff & _TIF_SPEC_IB); | ||
| 442 | msr |= stibp_tif_to_spec_ctrl(tifn); | ||
| 443 | } | ||
| 404 | 444 | ||
| 405 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); | 445 | if (updmsr) |
| 446 | wrmsrl(MSR_IA32_SPEC_CTRL, msr); | ||
| 406 | } | 447 | } |
| 407 | 448 | ||
| 408 | static __always_inline void __speculative_store_bypass_update(unsigned long tifn) | 449 | static unsigned long speculation_ctrl_update_tif(struct task_struct *tsk) |
| 409 | { | 450 | { |
| 410 | if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) | 451 | if (test_and_clear_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE)) { |
| 411 | amd_set_ssb_virt_state(tifn); | 452 | if (task_spec_ssb_disable(tsk)) |
| 412 | else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) | 453 | set_tsk_thread_flag(tsk, TIF_SSBD); |
| 413 | amd_set_core_ssb_state(tifn); | 454 | else |
| 414 | else | 455 | clear_tsk_thread_flag(tsk, TIF_SSBD); |
| 415 | intel_set_ssb_state(tifn); | 456 | |
| 457 | if (task_spec_ib_disable(tsk)) | ||
| 458 | set_tsk_thread_flag(tsk, TIF_SPEC_IB); | ||
| 459 | else | ||
| 460 | clear_tsk_thread_flag(tsk, TIF_SPEC_IB); | ||
| 461 | } | ||
| 462 | /* Return the updated threadinfo flags*/ | ||
| 463 | return task_thread_info(tsk)->flags; | ||
| 416 | } | 464 | } |
| 417 | 465 | ||
| 418 | void speculative_store_bypass_update(unsigned long tif) | 466 | void speculation_ctrl_update(unsigned long tif) |
| 419 | { | 467 | { |
| 468 | /* Forced update. Make sure all relevant TIF flags are different */ | ||
| 420 | preempt_disable(); | 469 | preempt_disable(); |
| 421 | __speculative_store_bypass_update(tif); | 470 | __speculation_ctrl_update(~tif, tif); |
| 422 | preempt_enable(); | 471 | preempt_enable(); |
| 423 | } | 472 | } |
| 424 | 473 | ||
| 425 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | 474 | /* Called from seccomp/prctl update */ |
| 426 | struct tss_struct *tss) | 475 | void speculation_ctrl_update_current(void) |
| 476 | { | ||
| 477 | preempt_disable(); | ||
| 478 | speculation_ctrl_update(speculation_ctrl_update_tif(current)); | ||
| 479 | preempt_enable(); | ||
| 480 | } | ||
| 481 | |||
| 482 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p) | ||
| 427 | { | 483 | { |
| 428 | struct thread_struct *prev, *next; | 484 | struct thread_struct *prev, *next; |
| 429 | unsigned long tifp, tifn; | 485 | unsigned long tifp, tifn; |
| @@ -433,7 +489,7 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
| 433 | 489 | ||
| 434 | tifn = READ_ONCE(task_thread_info(next_p)->flags); | 490 | tifn = READ_ONCE(task_thread_info(next_p)->flags); |
| 435 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); | 491 | tifp = READ_ONCE(task_thread_info(prev_p)->flags); |
| 436 | switch_to_bitmap(tss, prev, next, tifp, tifn); | 492 | switch_to_bitmap(prev, next, tifp, tifn); |
| 437 | 493 | ||
| 438 | propagate_user_return_notify(prev_p, next_p); | 494 | propagate_user_return_notify(prev_p, next_p); |
| 439 | 495 | ||
| @@ -454,8 +510,15 @@ void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |||
| 454 | if ((tifp ^ tifn) & _TIF_NOCPUID) | 510 | if ((tifp ^ tifn) & _TIF_NOCPUID) |
| 455 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); | 511 | set_cpuid_faulting(!!(tifn & _TIF_NOCPUID)); |
| 456 | 512 | ||
| 457 | if ((tifp ^ tifn) & _TIF_SSBD) | 513 | if (likely(!((tifp | tifn) & _TIF_SPEC_FORCE_UPDATE))) { |
| 458 | __speculative_store_bypass_update(tifn); | 514 | __speculation_ctrl_update(tifp, tifn); |
| 515 | } else { | ||
| 516 | speculation_ctrl_update_tif(prev_p); | ||
| 517 | tifn = speculation_ctrl_update_tif(next_p); | ||
| 518 | |||
| 519 | /* Enforce MSR update to ensure consistent state */ | ||
| 520 | __speculation_ctrl_update(~tifn, tifn); | ||
| 521 | } | ||
| 459 | } | 522 | } |
| 460 | 523 | ||
| 461 | /* | 524 | /* |
diff --git a/arch/x86/kernel/process.h b/arch/x86/kernel/process.h new file mode 100644 index 000000000000..898e97cf6629 --- /dev/null +++ b/arch/x86/kernel/process.h | |||
| @@ -0,0 +1,39 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | // | ||
| 3 | // Code shared between 32 and 64 bit | ||
| 4 | |||
| 5 | #include <asm/spec-ctrl.h> | ||
| 6 | |||
| 7 | void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p); | ||
| 8 | |||
| 9 | /* | ||
| 10 | * This needs to be inline to optimize for the common case where no extra | ||
| 11 | * work needs to be done. | ||
| 12 | */ | ||
| 13 | static inline void switch_to_extra(struct task_struct *prev, | ||
| 14 | struct task_struct *next) | ||
| 15 | { | ||
| 16 | unsigned long next_tif = task_thread_info(next)->flags; | ||
| 17 | unsigned long prev_tif = task_thread_info(prev)->flags; | ||
| 18 | |||
| 19 | if (IS_ENABLED(CONFIG_SMP)) { | ||
| 20 | /* | ||
| 21 | * Avoid __switch_to_xtra() invocation when conditional | ||
| 22 | * STIPB is disabled and the only different bit is | ||
| 23 | * TIF_SPEC_IB. For CONFIG_SMP=n TIF_SPEC_IB is not | ||
| 24 | * in the TIF_WORK_CTXSW masks. | ||
| 25 | */ | ||
| 26 | if (!static_branch_likely(&switch_to_cond_stibp)) { | ||
| 27 | prev_tif &= ~_TIF_SPEC_IB; | ||
| 28 | next_tif &= ~_TIF_SPEC_IB; | ||
| 29 | } | ||
| 30 | } | ||
| 31 | |||
| 32 | /* | ||
| 33 | * __switch_to_xtra() handles debug registers, i/o bitmaps, | ||
| 34 | * speculation mitigations etc. | ||
| 35 | */ | ||
| 36 | if (unlikely(next_tif & _TIF_WORK_CTXSW_NEXT || | ||
| 37 | prev_tif & _TIF_WORK_CTXSW_PREV)) | ||
| 38 | __switch_to_xtra(prev, next); | ||
| 39 | } | ||
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index 5046a3c9dec2..d3e593eb189f 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c | |||
| @@ -59,6 +59,8 @@ | |||
| 59 | #include <asm/intel_rdt_sched.h> | 59 | #include <asm/intel_rdt_sched.h> |
| 60 | #include <asm/proto.h> | 60 | #include <asm/proto.h> |
| 61 | 61 | ||
| 62 | #include "process.h" | ||
| 63 | |||
| 62 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) | 64 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
| 63 | { | 65 | { |
| 64 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | 66 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; |
| @@ -232,7 +234,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 232 | struct fpu *prev_fpu = &prev->fpu; | 234 | struct fpu *prev_fpu = &prev->fpu; |
| 233 | struct fpu *next_fpu = &next->fpu; | 235 | struct fpu *next_fpu = &next->fpu; |
| 234 | int cpu = smp_processor_id(); | 236 | int cpu = smp_processor_id(); |
| 235 | struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | ||
| 236 | 237 | ||
| 237 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | 238 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ |
| 238 | 239 | ||
| @@ -264,12 +265,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 264 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | 265 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) |
| 265 | set_iopl_mask(next->iopl); | 266 | set_iopl_mask(next->iopl); |
| 266 | 267 | ||
| 267 | /* | 268 | switch_to_extra(prev_p, next_p); |
| 268 | * Now maybe handle debug registers and/or IO bitmaps | ||
| 269 | */ | ||
| 270 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || | ||
| 271 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | ||
| 272 | __switch_to_xtra(prev_p, next_p, tss); | ||
| 273 | 269 | ||
| 274 | /* | 270 | /* |
| 275 | * Leave lazy mode, flushing any hypercalls made here. | 271 | * Leave lazy mode, flushing any hypercalls made here. |
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 0e0b4288a4b2..bbfbf017065c 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c | |||
| @@ -60,6 +60,8 @@ | |||
| 60 | #include <asm/unistd_32_ia32.h> | 60 | #include <asm/unistd_32_ia32.h> |
| 61 | #endif | 61 | #endif |
| 62 | 62 | ||
| 63 | #include "process.h" | ||
| 64 | |||
| 63 | /* Prints also some state that isn't saved in the pt_regs */ | 65 | /* Prints also some state that isn't saved in the pt_regs */ |
| 64 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) | 66 | void __show_regs(struct pt_regs *regs, enum show_regs_mode mode) |
| 65 | { | 67 | { |
| @@ -553,7 +555,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 553 | struct fpu *prev_fpu = &prev->fpu; | 555 | struct fpu *prev_fpu = &prev->fpu; |
| 554 | struct fpu *next_fpu = &next->fpu; | 556 | struct fpu *next_fpu = &next->fpu; |
| 555 | int cpu = smp_processor_id(); | 557 | int cpu = smp_processor_id(); |
| 556 | struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | ||
| 557 | 558 | ||
| 558 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && | 559 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && |
| 559 | this_cpu_read(irq_count) != -1); | 560 | this_cpu_read(irq_count) != -1); |
| @@ -617,12 +618,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |||
| 617 | /* Reload sp0. */ | 618 | /* Reload sp0. */ |
| 618 | update_task_stack(next_p); | 619 | update_task_stack(next_p); |
| 619 | 620 | ||
| 620 | /* | 621 | switch_to_extra(prev_p, next_p); |
| 621 | * Now maybe reload the debug registers and handle I/O bitmaps | ||
| 622 | */ | ||
| 623 | if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || | ||
| 624 | task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) | ||
| 625 | __switch_to_xtra(prev_p, next_p, tss); | ||
| 626 | 622 | ||
| 627 | #ifdef CONFIG_XEN_PV | 623 | #ifdef CONFIG_XEN_PV |
| 628 | /* | 624 | /* |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index b74e7bfed6ab..d494b9bfe618 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
| @@ -1280,23 +1280,6 @@ void __init setup_arch(char **cmdline_p) | |||
| 1280 | unwind_init(); | 1280 | unwind_init(); |
| 1281 | } | 1281 | } |
| 1282 | 1282 | ||
| 1283 | /* | ||
| 1284 | * From boot protocol 2.14 onwards we expect the bootloader to set the | ||
| 1285 | * version to "0x8000 | <used version>". In case we find a version >= 2.14 | ||
| 1286 | * without the 0x8000 we assume the boot loader supports 2.13 only and | ||
| 1287 | * reset the version accordingly. The 0x8000 flag is removed in any case. | ||
| 1288 | */ | ||
| 1289 | void __init x86_verify_bootdata_version(void) | ||
| 1290 | { | ||
| 1291 | if (boot_params.hdr.version & VERSION_WRITTEN) | ||
| 1292 | boot_params.hdr.version &= ~VERSION_WRITTEN; | ||
| 1293 | else if (boot_params.hdr.version >= 0x020e) | ||
| 1294 | boot_params.hdr.version = 0x020d; | ||
| 1295 | |||
| 1296 | if (boot_params.hdr.version < 0x020e) | ||
| 1297 | boot_params.hdr.acpi_rsdp_addr = 0; | ||
| 1298 | } | ||
| 1299 | |||
| 1300 | #ifdef CONFIG_X86_32 | 1283 | #ifdef CONFIG_X86_32 |
| 1301 | 1284 | ||
| 1302 | static struct resource video_ram_resource = { | 1285 | static struct resource video_ram_resource = { |
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c index 1eae5af491c2..891a75dbc131 100644 --- a/arch/x86/kernel/vsmp_64.c +++ b/arch/x86/kernel/vsmp_64.c | |||
| @@ -26,65 +26,8 @@ | |||
| 26 | 26 | ||
| 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 | 27 | #define TOPOLOGY_REGISTER_OFFSET 0x10 |
| 28 | 28 | ||
| 29 | #if defined CONFIG_PCI && defined CONFIG_PARAVIRT_XXL | 29 | #ifdef CONFIG_PCI |
| 30 | /* | 30 | static void __init set_vsmp_ctl(void) |
| 31 | * Interrupt control on vSMPowered systems: | ||
| 32 | * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' | ||
| 33 | * and vice versa. | ||
| 34 | */ | ||
| 35 | |||
| 36 | asmlinkage __visible unsigned long vsmp_save_fl(void) | ||
| 37 | { | ||
| 38 | unsigned long flags = native_save_fl(); | ||
| 39 | |||
| 40 | if (!(flags & X86_EFLAGS_IF) || (flags & X86_EFLAGS_AC)) | ||
| 41 | flags &= ~X86_EFLAGS_IF; | ||
| 42 | return flags; | ||
| 43 | } | ||
| 44 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_save_fl); | ||
| 45 | |||
| 46 | __visible void vsmp_restore_fl(unsigned long flags) | ||
| 47 | { | ||
| 48 | if (flags & X86_EFLAGS_IF) | ||
| 49 | flags &= ~X86_EFLAGS_AC; | ||
| 50 | else | ||
| 51 | flags |= X86_EFLAGS_AC; | ||
| 52 | native_restore_fl(flags); | ||
| 53 | } | ||
| 54 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_restore_fl); | ||
| 55 | |||
| 56 | asmlinkage __visible void vsmp_irq_disable(void) | ||
| 57 | { | ||
| 58 | unsigned long flags = native_save_fl(); | ||
| 59 | |||
| 60 | native_restore_fl((flags & ~X86_EFLAGS_IF) | X86_EFLAGS_AC); | ||
| 61 | } | ||
| 62 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_disable); | ||
| 63 | |||
| 64 | asmlinkage __visible void vsmp_irq_enable(void) | ||
| 65 | { | ||
| 66 | unsigned long flags = native_save_fl(); | ||
| 67 | |||
| 68 | native_restore_fl((flags | X86_EFLAGS_IF) & (~X86_EFLAGS_AC)); | ||
| 69 | } | ||
| 70 | PV_CALLEE_SAVE_REGS_THUNK(vsmp_irq_enable); | ||
| 71 | |||
| 72 | static unsigned __init vsmp_patch(u8 type, void *ibuf, | ||
| 73 | unsigned long addr, unsigned len) | ||
| 74 | { | ||
| 75 | switch (type) { | ||
| 76 | case PARAVIRT_PATCH(irq.irq_enable): | ||
| 77 | case PARAVIRT_PATCH(irq.irq_disable): | ||
| 78 | case PARAVIRT_PATCH(irq.save_fl): | ||
| 79 | case PARAVIRT_PATCH(irq.restore_fl): | ||
| 80 | return paravirt_patch_default(type, ibuf, addr, len); | ||
| 81 | default: | ||
| 82 | return native_patch(type, ibuf, addr, len); | ||
| 83 | } | ||
| 84 | |||
| 85 | } | ||
| 86 | |||
| 87 | static void __init set_vsmp_pv_ops(void) | ||
| 88 | { | 31 | { |
| 89 | void __iomem *address; | 32 | void __iomem *address; |
| 90 | unsigned int cap, ctl, cfg; | 33 | unsigned int cap, ctl, cfg; |
| @@ -109,28 +52,12 @@ static void __init set_vsmp_pv_ops(void) | |||
| 109 | } | 52 | } |
| 110 | #endif | 53 | #endif |
| 111 | 54 | ||
| 112 | if (cap & ctl & (1 << 4)) { | ||
| 113 | /* Setup irq ops and turn on vSMP IRQ fastpath handling */ | ||
| 114 | pv_ops.irq.irq_disable = PV_CALLEE_SAVE(vsmp_irq_disable); | ||
| 115 | pv_ops.irq.irq_enable = PV_CALLEE_SAVE(vsmp_irq_enable); | ||
| 116 | pv_ops.irq.save_fl = PV_CALLEE_SAVE(vsmp_save_fl); | ||
| 117 | pv_ops.irq.restore_fl = PV_CALLEE_SAVE(vsmp_restore_fl); | ||
| 118 | pv_ops.init.patch = vsmp_patch; | ||
| 119 | ctl &= ~(1 << 4); | ||
| 120 | } | ||
| 121 | writel(ctl, address + 4); | 55 | writel(ctl, address + 4); |
| 122 | ctl = readl(address + 4); | 56 | ctl = readl(address + 4); |
| 123 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); | 57 | pr_info("vSMP CTL: control set to:0x%08x\n", ctl); |
| 124 | 58 | ||
| 125 | early_iounmap(address, 8); | 59 | early_iounmap(address, 8); |
| 126 | } | 60 | } |
| 127 | #else | ||
| 128 | static void __init set_vsmp_pv_ops(void) | ||
| 129 | { | ||
| 130 | } | ||
| 131 | #endif | ||
| 132 | |||
| 133 | #ifdef CONFIG_PCI | ||
| 134 | static int is_vsmp = -1; | 61 | static int is_vsmp = -1; |
| 135 | 62 | ||
| 136 | static void __init detect_vsmp_box(void) | 63 | static void __init detect_vsmp_box(void) |
| @@ -164,11 +91,14 @@ static int is_vsmp_box(void) | |||
| 164 | { | 91 | { |
| 165 | return 0; | 92 | return 0; |
| 166 | } | 93 | } |
| 94 | static void __init set_vsmp_ctl(void) | ||
| 95 | { | ||
| 96 | } | ||
| 167 | #endif | 97 | #endif |
| 168 | 98 | ||
| 169 | static void __init vsmp_cap_cpus(void) | 99 | static void __init vsmp_cap_cpus(void) |
| 170 | { | 100 | { |
| 171 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) | 101 | #if !defined(CONFIG_X86_VSMP) && defined(CONFIG_SMP) && defined(CONFIG_PCI) |
| 172 | void __iomem *address; | 102 | void __iomem *address; |
| 173 | unsigned int cfg, topology, node_shift, maxcpus; | 103 | unsigned int cfg, topology, node_shift, maxcpus; |
| 174 | 104 | ||
| @@ -221,6 +151,6 @@ void __init vsmp_init(void) | |||
| 221 | 151 | ||
| 222 | vsmp_cap_cpus(); | 152 | vsmp_cap_cpus(); |
| 223 | 153 | ||
| 224 | set_vsmp_pv_ops(); | 154 | set_vsmp_ctl(); |
| 225 | return; | 155 | return; |
| 226 | } | 156 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 89db20f8cb70..c4533d05c214 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
| @@ -55,7 +55,7 @@ | |||
| 55 | #define PRIo64 "o" | 55 | #define PRIo64 "o" |
| 56 | 56 | ||
| 57 | /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ | 57 | /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ |
| 58 | #define apic_debug(fmt, arg...) | 58 | #define apic_debug(fmt, arg...) do {} while (0) |
| 59 | 59 | ||
| 60 | /* 14 is the version for Xeon and Pentium 8.4.8*/ | 60 | /* 14 is the version for Xeon and Pentium 8.4.8*/ |
| 61 | #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) | 61 | #define APIC_VERSION (0x14UL | ((KVM_APIC_LVT_NUM - 1) << 16)) |
| @@ -576,6 +576,11 @@ int kvm_pv_send_ipi(struct kvm *kvm, unsigned long ipi_bitmap_low, | |||
| 576 | rcu_read_lock(); | 576 | rcu_read_lock(); |
| 577 | map = rcu_dereference(kvm->arch.apic_map); | 577 | map = rcu_dereference(kvm->arch.apic_map); |
| 578 | 578 | ||
| 579 | if (unlikely(!map)) { | ||
| 580 | count = -EOPNOTSUPP; | ||
| 581 | goto out; | ||
| 582 | } | ||
| 583 | |||
| 579 | if (min > map->max_apic_id) | 584 | if (min > map->max_apic_id) |
| 580 | goto out; | 585 | goto out; |
| 581 | /* Bits above cluster_size are masked in the caller. */ | 586 | /* Bits above cluster_size are masked in the caller. */ |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index cf5f572f2305..7c03c0f35444 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -5074,9 +5074,9 @@ static bool need_remote_flush(u64 old, u64 new) | |||
| 5074 | } | 5074 | } |
| 5075 | 5075 | ||
| 5076 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, | 5076 | static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, |
| 5077 | const u8 *new, int *bytes) | 5077 | int *bytes) |
| 5078 | { | 5078 | { |
| 5079 | u64 gentry; | 5079 | u64 gentry = 0; |
| 5080 | int r; | 5080 | int r; |
| 5081 | 5081 | ||
| 5082 | /* | 5082 | /* |
| @@ -5088,22 +5088,12 @@ static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa, | |||
| 5088 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ | 5088 | /* Handle a 32-bit guest writing two halves of a 64-bit gpte */ |
| 5089 | *gpa &= ~(gpa_t)7; | 5089 | *gpa &= ~(gpa_t)7; |
| 5090 | *bytes = 8; | 5090 | *bytes = 8; |
| 5091 | r = kvm_vcpu_read_guest(vcpu, *gpa, &gentry, 8); | ||
| 5092 | if (r) | ||
| 5093 | gentry = 0; | ||
| 5094 | new = (const u8 *)&gentry; | ||
| 5095 | } | 5091 | } |
| 5096 | 5092 | ||
| 5097 | switch (*bytes) { | 5093 | if (*bytes == 4 || *bytes == 8) { |
| 5098 | case 4: | 5094 | r = kvm_vcpu_read_guest_atomic(vcpu, *gpa, &gentry, *bytes); |
| 5099 | gentry = *(const u32 *)new; | 5095 | if (r) |
| 5100 | break; | 5096 | gentry = 0; |
| 5101 | case 8: | ||
| 5102 | gentry = *(const u64 *)new; | ||
| 5103 | break; | ||
| 5104 | default: | ||
| 5105 | gentry = 0; | ||
| 5106 | break; | ||
| 5107 | } | 5097 | } |
| 5108 | 5098 | ||
| 5109 | return gentry; | 5099 | return gentry; |
| @@ -5207,8 +5197,6 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 5207 | 5197 | ||
| 5208 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); | 5198 | pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes); |
| 5209 | 5199 | ||
| 5210 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, new, &bytes); | ||
| 5211 | |||
| 5212 | /* | 5200 | /* |
| 5213 | * No need to care whether allocation memory is successful | 5201 | * No need to care whether allocation memory is successful |
| 5214 | * or not since pte prefetch is skiped if it does not have | 5202 | * or not since pte prefetch is skiped if it does not have |
| @@ -5217,6 +5205,9 @@ static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
| 5217 | mmu_topup_memory_caches(vcpu); | 5205 | mmu_topup_memory_caches(vcpu); |
| 5218 | 5206 | ||
| 5219 | spin_lock(&vcpu->kvm->mmu_lock); | 5207 | spin_lock(&vcpu->kvm->mmu_lock); |
| 5208 | |||
| 5209 | gentry = mmu_pte_write_fetch_gpte(vcpu, &gpa, &bytes); | ||
| 5210 | |||
| 5220 | ++vcpu->kvm->stat.mmu_pte_write; | 5211 | ++vcpu->kvm->stat.mmu_pte_write; |
| 5221 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); | 5212 | kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); |
| 5222 | 5213 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e21ccc46792..cc6467b35a85 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -1446,7 +1446,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | |||
| 1446 | return vcpu->arch.tsc_offset; | 1446 | return vcpu->arch.tsc_offset; |
| 1447 | } | 1447 | } |
| 1448 | 1448 | ||
| 1449 | static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1449 | static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 1450 | { | 1450 | { |
| 1451 | struct vcpu_svm *svm = to_svm(vcpu); | 1451 | struct vcpu_svm *svm = to_svm(vcpu); |
| 1452 | u64 g_tsc_offset = 0; | 1452 | u64 g_tsc_offset = 0; |
| @@ -1464,6 +1464,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
| 1464 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; | 1464 | svm->vmcb->control.tsc_offset = offset + g_tsc_offset; |
| 1465 | 1465 | ||
| 1466 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); | 1466 | mark_dirty(svm->vmcb, VMCB_INTERCEPTS); |
| 1467 | return svm->vmcb->control.tsc_offset; | ||
| 1467 | } | 1468 | } |
| 1468 | 1469 | ||
| 1469 | static void avic_init_vmcb(struct vcpu_svm *svm) | 1470 | static void avic_init_vmcb(struct vcpu_svm *svm) |
| @@ -1664,20 +1665,23 @@ static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu, | |||
| 1664 | static int avic_init_access_page(struct kvm_vcpu *vcpu) | 1665 | static int avic_init_access_page(struct kvm_vcpu *vcpu) |
| 1665 | { | 1666 | { |
| 1666 | struct kvm *kvm = vcpu->kvm; | 1667 | struct kvm *kvm = vcpu->kvm; |
| 1667 | int ret; | 1668 | int ret = 0; |
| 1668 | 1669 | ||
| 1670 | mutex_lock(&kvm->slots_lock); | ||
| 1669 | if (kvm->arch.apic_access_page_done) | 1671 | if (kvm->arch.apic_access_page_done) |
| 1670 | return 0; | 1672 | goto out; |
| 1671 | 1673 | ||
| 1672 | ret = x86_set_memory_region(kvm, | 1674 | ret = __x86_set_memory_region(kvm, |
| 1673 | APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, | 1675 | APIC_ACCESS_PAGE_PRIVATE_MEMSLOT, |
| 1674 | APIC_DEFAULT_PHYS_BASE, | 1676 | APIC_DEFAULT_PHYS_BASE, |
| 1675 | PAGE_SIZE); | 1677 | PAGE_SIZE); |
| 1676 | if (ret) | 1678 | if (ret) |
| 1677 | return ret; | 1679 | goto out; |
| 1678 | 1680 | ||
| 1679 | kvm->arch.apic_access_page_done = true; | 1681 | kvm->arch.apic_access_page_done = true; |
| 1680 | return 0; | 1682 | out: |
| 1683 | mutex_unlock(&kvm->slots_lock); | ||
| 1684 | return ret; | ||
| 1681 | } | 1685 | } |
| 1682 | 1686 | ||
| 1683 | static int avic_init_backing_page(struct kvm_vcpu *vcpu) | 1687 | static int avic_init_backing_page(struct kvm_vcpu *vcpu) |
| @@ -2189,21 +2193,31 @@ out: | |||
| 2189 | return ERR_PTR(err); | 2193 | return ERR_PTR(err); |
| 2190 | } | 2194 | } |
| 2191 | 2195 | ||
| 2196 | static void svm_clear_current_vmcb(struct vmcb *vmcb) | ||
| 2197 | { | ||
| 2198 | int i; | ||
| 2199 | |||
| 2200 | for_each_online_cpu(i) | ||
| 2201 | cmpxchg(&per_cpu(svm_data, i)->current_vmcb, vmcb, NULL); | ||
| 2202 | } | ||
| 2203 | |||
| 2192 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) | 2204 | static void svm_free_vcpu(struct kvm_vcpu *vcpu) |
| 2193 | { | 2205 | { |
| 2194 | struct vcpu_svm *svm = to_svm(vcpu); | 2206 | struct vcpu_svm *svm = to_svm(vcpu); |
| 2195 | 2207 | ||
| 2208 | /* | ||
| 2209 | * The vmcb page can be recycled, causing a false negative in | ||
| 2210 | * svm_vcpu_load(). So, ensure that no logical CPU has this | ||
| 2211 | * vmcb page recorded as its current vmcb. | ||
| 2212 | */ | ||
| 2213 | svm_clear_current_vmcb(svm->vmcb); | ||
| 2214 | |||
| 2196 | __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); | 2215 | __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT)); |
| 2197 | __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); | 2216 | __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); |
| 2198 | __free_page(virt_to_page(svm->nested.hsave)); | 2217 | __free_page(virt_to_page(svm->nested.hsave)); |
| 2199 | __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); | 2218 | __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); |
| 2200 | kvm_vcpu_uninit(vcpu); | 2219 | kvm_vcpu_uninit(vcpu); |
| 2201 | kmem_cache_free(kvm_vcpu_cache, svm); | 2220 | kmem_cache_free(kvm_vcpu_cache, svm); |
| 2202 | /* | ||
| 2203 | * The vmcb page can be recycled, causing a false negative in | ||
| 2204 | * svm_vcpu_load(). So do a full IBPB now. | ||
| 2205 | */ | ||
| 2206 | indirect_branch_prediction_barrier(); | ||
| 2207 | } | 2221 | } |
| 2208 | 2222 | ||
| 2209 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 2223 | static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| @@ -7149,7 +7163,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
| 7149 | .has_wbinvd_exit = svm_has_wbinvd_exit, | 7163 | .has_wbinvd_exit = svm_has_wbinvd_exit, |
| 7150 | 7164 | ||
| 7151 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, | 7165 | .read_l1_tsc_offset = svm_read_l1_tsc_offset, |
| 7152 | .write_tsc_offset = svm_write_tsc_offset, | 7166 | .write_l1_tsc_offset = svm_write_l1_tsc_offset, |
| 7153 | 7167 | ||
| 7154 | .set_tdp_cr3 = set_tdp_cr3, | 7168 | .set_tdp_cr3 = set_tdp_cr3, |
| 7155 | 7169 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4555077d69ce..02edd9960e9d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -174,6 +174,7 @@ module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); | |||
| 174 | * refer SDM volume 3b section 21.6.13 & 22.1.3. | 174 | * refer SDM volume 3b section 21.6.13 & 22.1.3. |
| 175 | */ | 175 | */ |
| 176 | static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; | 176 | static unsigned int ple_gap = KVM_DEFAULT_PLE_GAP; |
| 177 | module_param(ple_gap, uint, 0444); | ||
| 177 | 178 | ||
| 178 | static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; | 179 | static unsigned int ple_window = KVM_VMX_DEFAULT_PLE_WINDOW; |
| 179 | module_param(ple_window, uint, 0444); | 180 | module_param(ple_window, uint, 0444); |
| @@ -984,6 +985,7 @@ struct vcpu_vmx { | |||
| 984 | struct shared_msr_entry *guest_msrs; | 985 | struct shared_msr_entry *guest_msrs; |
| 985 | int nmsrs; | 986 | int nmsrs; |
| 986 | int save_nmsrs; | 987 | int save_nmsrs; |
| 988 | bool guest_msrs_dirty; | ||
| 987 | unsigned long host_idt_base; | 989 | unsigned long host_idt_base; |
| 988 | #ifdef CONFIG_X86_64 | 990 | #ifdef CONFIG_X86_64 |
| 989 | u64 msr_host_kernel_gs_base; | 991 | u64 msr_host_kernel_gs_base; |
| @@ -1306,7 +1308,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | |||
| 1306 | static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, | 1308 | static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, |
| 1307 | u16 error_code); | 1309 | u16 error_code); |
| 1308 | static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); | 1310 | static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu); |
| 1309 | static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, | 1311 | static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, |
| 1310 | u32 msr, int type); | 1312 | u32 msr, int type); |
| 1311 | 1313 | ||
| 1312 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 1314 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
| @@ -1610,12 +1612,6 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, | |||
| 1610 | { | 1612 | { |
| 1611 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1613 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
| 1612 | 1614 | ||
| 1613 | /* We don't support disabling the feature for simplicity. */ | ||
| 1614 | if (vmx->nested.enlightened_vmcs_enabled) | ||
| 1615 | return 0; | ||
| 1616 | |||
| 1617 | vmx->nested.enlightened_vmcs_enabled = true; | ||
| 1618 | |||
| 1619 | /* | 1615 | /* |
| 1620 | * vmcs_version represents the range of supported Enlightened VMCS | 1616 | * vmcs_version represents the range of supported Enlightened VMCS |
| 1621 | * versions: lower 8 bits is the minimal version, higher 8 bits is the | 1617 | * versions: lower 8 bits is the minimal version, higher 8 bits is the |
| @@ -1625,6 +1621,12 @@ static int nested_enable_evmcs(struct kvm_vcpu *vcpu, | |||
| 1625 | if (vmcs_version) | 1621 | if (vmcs_version) |
| 1626 | *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; | 1622 | *vmcs_version = (KVM_EVMCS_VERSION << 8) | 1; |
| 1627 | 1623 | ||
| 1624 | /* We don't support disabling the feature for simplicity. */ | ||
| 1625 | if (vmx->nested.enlightened_vmcs_enabled) | ||
| 1626 | return 0; | ||
| 1627 | |||
| 1628 | vmx->nested.enlightened_vmcs_enabled = true; | ||
| 1629 | |||
| 1628 | vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; | 1630 | vmx->nested.msrs.pinbased_ctls_high &= ~EVMCS1_UNSUPPORTED_PINCTRL; |
| 1629 | vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; | 1631 | vmx->nested.msrs.entry_ctls_high &= ~EVMCS1_UNSUPPORTED_VMENTRY_CTRL; |
| 1630 | vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; | 1632 | vmx->nested.msrs.exit_ctls_high &= ~EVMCS1_UNSUPPORTED_VMEXIT_CTRL; |
| @@ -2897,6 +2899,20 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) | |||
| 2897 | 2899 | ||
| 2898 | vmx->req_immediate_exit = false; | 2900 | vmx->req_immediate_exit = false; |
| 2899 | 2901 | ||
| 2902 | /* | ||
| 2903 | * Note that guest MSRs to be saved/restored can also be changed | ||
| 2904 | * when guest state is loaded. This happens when guest transitions | ||
| 2905 | * to/from long-mode by setting MSR_EFER.LMA. | ||
| 2906 | */ | ||
| 2907 | if (!vmx->loaded_cpu_state || vmx->guest_msrs_dirty) { | ||
| 2908 | vmx->guest_msrs_dirty = false; | ||
| 2909 | for (i = 0; i < vmx->save_nmsrs; ++i) | ||
| 2910 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | ||
| 2911 | vmx->guest_msrs[i].data, | ||
| 2912 | vmx->guest_msrs[i].mask); | ||
| 2913 | |||
| 2914 | } | ||
| 2915 | |||
| 2900 | if (vmx->loaded_cpu_state) | 2916 | if (vmx->loaded_cpu_state) |
| 2901 | return; | 2917 | return; |
| 2902 | 2918 | ||
| @@ -2957,11 +2973,6 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) | |||
| 2957 | vmcs_writel(HOST_GS_BASE, gs_base); | 2973 | vmcs_writel(HOST_GS_BASE, gs_base); |
| 2958 | host_state->gs_base = gs_base; | 2974 | host_state->gs_base = gs_base; |
| 2959 | } | 2975 | } |
| 2960 | |||
| 2961 | for (i = 0; i < vmx->save_nmsrs; ++i) | ||
| 2962 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | ||
| 2963 | vmx->guest_msrs[i].data, | ||
| 2964 | vmx->guest_msrs[i].mask); | ||
| 2965 | } | 2976 | } |
| 2966 | 2977 | ||
| 2967 | static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) | 2978 | static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx) |
| @@ -3436,6 +3447,7 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
| 3436 | move_msr_up(vmx, index, save_nmsrs++); | 3447 | move_msr_up(vmx, index, save_nmsrs++); |
| 3437 | 3448 | ||
| 3438 | vmx->save_nmsrs = save_nmsrs; | 3449 | vmx->save_nmsrs = save_nmsrs; |
| 3450 | vmx->guest_msrs_dirty = true; | ||
| 3439 | 3451 | ||
| 3440 | if (cpu_has_vmx_msr_bitmap()) | 3452 | if (cpu_has_vmx_msr_bitmap()) |
| 3441 | vmx_update_msr_bitmap(&vmx->vcpu); | 3453 | vmx_update_msr_bitmap(&vmx->vcpu); |
| @@ -3452,11 +3464,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu) | |||
| 3452 | return vcpu->arch.tsc_offset; | 3464 | return vcpu->arch.tsc_offset; |
| 3453 | } | 3465 | } |
| 3454 | 3466 | ||
| 3455 | /* | 3467 | static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 3456 | * writes 'offset' into guest's timestamp counter offset register | ||
| 3457 | */ | ||
| 3458 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | ||
| 3459 | { | 3468 | { |
| 3469 | u64 active_offset = offset; | ||
| 3460 | if (is_guest_mode(vcpu)) { | 3470 | if (is_guest_mode(vcpu)) { |
| 3461 | /* | 3471 | /* |
| 3462 | * We're here if L1 chose not to trap WRMSR to TSC. According | 3472 | * We're here if L1 chose not to trap WRMSR to TSC. According |
| @@ -3464,17 +3474,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
| 3464 | * set for L2 remains unchanged, and still needs to be added | 3474 | * set for L2 remains unchanged, and still needs to be added |
| 3465 | * to the newly set TSC to get L2's TSC. | 3475 | * to the newly set TSC to get L2's TSC. |
| 3466 | */ | 3476 | */ |
| 3467 | struct vmcs12 *vmcs12; | 3477 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 3468 | /* recalculate vmcs02.TSC_OFFSET: */ | 3478 | if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING)) |
| 3469 | vmcs12 = get_vmcs12(vcpu); | 3479 | active_offset += vmcs12->tsc_offset; |
| 3470 | vmcs_write64(TSC_OFFSET, offset + | ||
| 3471 | (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? | ||
| 3472 | vmcs12->tsc_offset : 0)); | ||
| 3473 | } else { | 3480 | } else { |
| 3474 | trace_kvm_write_tsc_offset(vcpu->vcpu_id, | 3481 | trace_kvm_write_tsc_offset(vcpu->vcpu_id, |
| 3475 | vmcs_read64(TSC_OFFSET), offset); | 3482 | vmcs_read64(TSC_OFFSET), offset); |
| 3476 | vmcs_write64(TSC_OFFSET, offset); | ||
| 3477 | } | 3483 | } |
| 3484 | |||
| 3485 | vmcs_write64(TSC_OFFSET, active_offset); | ||
| 3486 | return active_offset; | ||
| 3478 | } | 3487 | } |
| 3479 | 3488 | ||
| 3480 | /* | 3489 | /* |
| @@ -5944,7 +5953,7 @@ static void free_vpid(int vpid) | |||
| 5944 | spin_unlock(&vmx_vpid_lock); | 5953 | spin_unlock(&vmx_vpid_lock); |
| 5945 | } | 5954 | } |
| 5946 | 5955 | ||
| 5947 | static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, | 5956 | static __always_inline void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, |
| 5948 | u32 msr, int type) | 5957 | u32 msr, int type) |
| 5949 | { | 5958 | { |
| 5950 | int f = sizeof(unsigned long); | 5959 | int f = sizeof(unsigned long); |
| @@ -5982,7 +5991,7 @@ static void __always_inline vmx_disable_intercept_for_msr(unsigned long *msr_bit | |||
| 5982 | } | 5991 | } |
| 5983 | } | 5992 | } |
| 5984 | 5993 | ||
| 5985 | static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, | 5994 | static __always_inline void vmx_enable_intercept_for_msr(unsigned long *msr_bitmap, |
| 5986 | u32 msr, int type) | 5995 | u32 msr, int type) |
| 5987 | { | 5996 | { |
| 5988 | int f = sizeof(unsigned long); | 5997 | int f = sizeof(unsigned long); |
| @@ -6020,7 +6029,7 @@ static void __always_inline vmx_enable_intercept_for_msr(unsigned long *msr_bitm | |||
| 6020 | } | 6029 | } |
| 6021 | } | 6030 | } |
| 6022 | 6031 | ||
| 6023 | static void __always_inline vmx_set_intercept_for_msr(unsigned long *msr_bitmap, | 6032 | static __always_inline void vmx_set_intercept_for_msr(unsigned long *msr_bitmap, |
| 6024 | u32 msr, int type, bool value) | 6033 | u32 msr, int type, bool value) |
| 6025 | { | 6034 | { |
| 6026 | if (value) | 6035 | if (value) |
| @@ -8664,8 +8673,6 @@ static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx) | |||
| 8664 | struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; | 8673 | struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12; |
| 8665 | struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; | 8674 | struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs; |
| 8666 | 8675 | ||
| 8667 | vmcs12->hdr.revision_id = evmcs->revision_id; | ||
| 8668 | |||
| 8669 | /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ | 8676 | /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */ |
| 8670 | vmcs12->tpr_threshold = evmcs->tpr_threshold; | 8677 | vmcs12->tpr_threshold = evmcs->tpr_threshold; |
| 8671 | vmcs12->guest_rip = evmcs->guest_rip; | 8678 | vmcs12->guest_rip = evmcs->guest_rip; |
| @@ -9369,7 +9376,30 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, | |||
| 9369 | 9376 | ||
| 9370 | vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); | 9377 | vmx->nested.hv_evmcs = kmap(vmx->nested.hv_evmcs_page); |
| 9371 | 9378 | ||
| 9372 | if (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION) { | 9379 | /* |
| 9380 | * Currently, KVM only supports eVMCS version 1 | ||
| 9381 | * (== KVM_EVMCS_VERSION) and thus we expect guest to set this | ||
| 9382 | * value to first u32 field of eVMCS which should specify eVMCS | ||
| 9383 | * VersionNumber. | ||
| 9384 | * | ||
| 9385 | * Guest should be aware of supported eVMCS versions by host by | ||
| 9386 | * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is | ||
| 9387 | * expected to set this CPUID leaf according to the value | ||
| 9388 | * returned in vmcs_version from nested_enable_evmcs(). | ||
| 9389 | * | ||
| 9390 | * However, it turns out that Microsoft Hyper-V fails to comply | ||
| 9391 | * to their own invented interface: When Hyper-V use eVMCS, it | ||
| 9392 | * just sets first u32 field of eVMCS to revision_id specified | ||
| 9393 | * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number | ||
| 9394 | * which is one of the supported versions specified in | ||
| 9395 | * CPUID.0x4000000A.EAX[0:15]. | ||
| 9396 | * | ||
| 9397 | * To overcome Hyper-V bug, we accept here either a supported | ||
| 9398 | * eVMCS version or VMCS12 revision_id as valid values for first | ||
| 9399 | * u32 field of eVMCS. | ||
| 9400 | */ | ||
| 9401 | if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) && | ||
| 9402 | (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) { | ||
| 9373 | nested_release_evmcs(vcpu); | 9403 | nested_release_evmcs(vcpu); |
| 9374 | return 0; | 9404 | return 0; |
| 9375 | } | 9405 | } |
| @@ -9390,9 +9420,11 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu, | |||
| 9390 | * present in struct hv_enlightened_vmcs, ...). Make sure there | 9420 | * present in struct hv_enlightened_vmcs, ...). Make sure there |
| 9391 | * are no leftovers. | 9421 | * are no leftovers. |
| 9392 | */ | 9422 | */ |
| 9393 | if (from_launch) | 9423 | if (from_launch) { |
| 9394 | memset(vmx->nested.cached_vmcs12, 0, | 9424 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
| 9395 | sizeof(*vmx->nested.cached_vmcs12)); | 9425 | memset(vmcs12, 0, sizeof(*vmcs12)); |
| 9426 | vmcs12->hdr.revision_id = VMCS12_REVISION; | ||
| 9427 | } | ||
| 9396 | 9428 | ||
| 9397 | } | 9429 | } |
| 9398 | return 1; | 9430 | return 1; |
| @@ -15062,7 +15094,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
| 15062 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, | 15094 | .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, |
| 15063 | 15095 | ||
| 15064 | .read_l1_tsc_offset = vmx_read_l1_tsc_offset, | 15096 | .read_l1_tsc_offset = vmx_read_l1_tsc_offset, |
| 15065 | .write_tsc_offset = vmx_write_tsc_offset, | 15097 | .write_l1_tsc_offset = vmx_write_l1_tsc_offset, |
| 15066 | 15098 | ||
| 15067 | .set_tdp_cr3 = vmx_set_cr3, | 15099 | .set_tdp_cr3 = vmx_set_cr3, |
| 15068 | 15100 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 5cd5647120f2..d02937760c3b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -1665,8 +1665,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc); | |||
| 1665 | 1665 | ||
| 1666 | static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1666 | static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
| 1667 | { | 1667 | { |
| 1668 | kvm_x86_ops->write_tsc_offset(vcpu, offset); | 1668 | vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset); |
| 1669 | vcpu->arch.tsc_offset = offset; | ||
| 1670 | } | 1669 | } |
| 1671 | 1670 | ||
| 1672 | static inline bool kvm_check_tsc_unstable(void) | 1671 | static inline bool kvm_check_tsc_unstable(void) |
| @@ -1794,7 +1793,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc); | |||
| 1794 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, | 1793 | static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, |
| 1795 | s64 adjustment) | 1794 | s64 adjustment) |
| 1796 | { | 1795 | { |
| 1797 | kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment); | 1796 | u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu); |
| 1797 | kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment); | ||
| 1798 | } | 1798 | } |
| 1799 | 1799 | ||
| 1800 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) | 1800 | static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) |
| @@ -6918,6 +6918,7 @@ static int kvm_pv_clock_pairing(struct kvm_vcpu *vcpu, gpa_t paddr, | |||
| 6918 | clock_pairing.nsec = ts.tv_nsec; | 6918 | clock_pairing.nsec = ts.tv_nsec; |
| 6919 | clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); | 6919 | clock_pairing.tsc = kvm_read_l1_tsc(vcpu, cycle); |
| 6920 | clock_pairing.flags = 0; | 6920 | clock_pairing.flags = 0; |
| 6921 | memset(&clock_pairing.pad, 0, sizeof(clock_pairing.pad)); | ||
| 6921 | 6922 | ||
| 6922 | ret = 0; | 6923 | ret = 0; |
| 6923 | if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, | 6924 | if (kvm_write_guest(vcpu->kvm, paddr, &clock_pairing, |
| @@ -7455,7 +7456,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu) | |||
| 7455 | else { | 7456 | else { |
| 7456 | if (vcpu->arch.apicv_active) | 7457 | if (vcpu->arch.apicv_active) |
| 7457 | kvm_x86_ops->sync_pir_to_irr(vcpu); | 7458 | kvm_x86_ops->sync_pir_to_irr(vcpu); |
| 7458 | kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); | 7459 | if (ioapic_in_kernel(vcpu->kvm)) |
| 7460 | kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); | ||
| 7459 | } | 7461 | } |
| 7460 | 7462 | ||
| 7461 | if (is_guest_mode(vcpu)) | 7463 | if (is_guest_mode(vcpu)) |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index bddd6b3cee1d..03b6b4c2238d 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
| @@ -7,7 +7,6 @@ | |||
| 7 | #include <linux/export.h> | 7 | #include <linux/export.h> |
| 8 | #include <linux/cpu.h> | 8 | #include <linux/cpu.h> |
| 9 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
| 10 | #include <linux/ptrace.h> | ||
| 11 | 10 | ||
| 12 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
| 13 | #include <asm/mmu_context.h> | 12 | #include <asm/mmu_context.h> |
| @@ -31,6 +30,12 @@ | |||
| 31 | */ | 30 | */ |
| 32 | 31 | ||
| 33 | /* | 32 | /* |
| 33 | * Use bit 0 to mangle the TIF_SPEC_IB state into the mm pointer which is | ||
| 34 | * stored in cpu_tlb_state.last_user_mm_ibpb. | ||
| 35 | */ | ||
| 36 | #define LAST_USER_MM_IBPB 0x1UL | ||
| 37 | |||
| 38 | /* | ||
| 34 | * We get here when we do something requiring a TLB invalidation | 39 | * We get here when we do something requiring a TLB invalidation |
| 35 | * but could not go invalidate all of the contexts. We do the | 40 | * but could not go invalidate all of the contexts. We do the |
| 36 | * necessary invalidation by clearing out the 'ctx_id' which | 41 | * necessary invalidation by clearing out the 'ctx_id' which |
| @@ -181,17 +186,87 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) | |||
| 181 | } | 186 | } |
| 182 | } | 187 | } |
| 183 | 188 | ||
| 184 | static bool ibpb_needed(struct task_struct *tsk, u64 last_ctx_id) | 189 | static inline unsigned long mm_mangle_tif_spec_ib(struct task_struct *next) |
| 190 | { | ||
| 191 | unsigned long next_tif = task_thread_info(next)->flags; | ||
| 192 | unsigned long ibpb = (next_tif >> TIF_SPEC_IB) & LAST_USER_MM_IBPB; | ||
| 193 | |||
| 194 | return (unsigned long)next->mm | ibpb; | ||
| 195 | } | ||
| 196 | |||
| 197 | static void cond_ibpb(struct task_struct *next) | ||
| 185 | { | 198 | { |
| 199 | if (!next || !next->mm) | ||
| 200 | return; | ||
| 201 | |||
| 186 | /* | 202 | /* |
| 187 | * Check if the current (previous) task has access to the memory | 203 | * Both, the conditional and the always IBPB mode use the mm |
| 188 | * of the @tsk (next) task. If access is denied, make sure to | 204 | * pointer to avoid the IBPB when switching between tasks of the |
| 189 | * issue a IBPB to stop user->user Spectre-v2 attacks. | 205 | * same process. Using the mm pointer instead of mm->context.ctx_id |
| 190 | * | 206 | * opens a hypothetical hole vs. mm_struct reuse, which is more or |
| 191 | * Note: __ptrace_may_access() returns 0 or -ERRNO. | 207 | * less impossible to control by an attacker. Aside of that it |
| 208 | * would only affect the first schedule so the theoretically | ||
| 209 | * exposed data is not really interesting. | ||
| 192 | */ | 210 | */ |
| 193 | return (tsk && tsk->mm && tsk->mm->context.ctx_id != last_ctx_id && | 211 | if (static_branch_likely(&switch_mm_cond_ibpb)) { |
| 194 | ptrace_may_access_sched(tsk, PTRACE_MODE_SPEC_IBPB)); | 212 | unsigned long prev_mm, next_mm; |
| 213 | |||
| 214 | /* | ||
| 215 | * This is a bit more complex than the always mode because | ||
| 216 | * it has to handle two cases: | ||
| 217 | * | ||
| 218 | * 1) Switch from a user space task (potential attacker) | ||
| 219 | * which has TIF_SPEC_IB set to a user space task | ||
| 220 | * (potential victim) which has TIF_SPEC_IB not set. | ||
| 221 | * | ||
| 222 | * 2) Switch from a user space task (potential attacker) | ||
| 223 | * which has TIF_SPEC_IB not set to a user space task | ||
| 224 | * (potential victim) which has TIF_SPEC_IB set. | ||
| 225 | * | ||
| 226 | * This could be done by unconditionally issuing IBPB when | ||
| 227 | * a task which has TIF_SPEC_IB set is either scheduled in | ||
| 228 | * or out. Though that results in two flushes when: | ||
| 229 | * | ||
| 230 | * - the same user space task is scheduled out and later | ||
| 231 | * scheduled in again and only a kernel thread ran in | ||
| 232 | * between. | ||
| 233 | * | ||
| 234 | * - a user space task belonging to the same process is | ||
| 235 | * scheduled in after a kernel thread ran in between | ||
| 236 | * | ||
| 237 | * - a user space task belonging to the same process is | ||
| 238 | * scheduled in immediately. | ||
| 239 | * | ||
| 240 | * Optimize this with reasonably small overhead for the | ||
| 241 | * above cases. Mangle the TIF_SPEC_IB bit into the mm | ||
| 242 | * pointer of the incoming task which is stored in | ||
| 243 | * cpu_tlbstate.last_user_mm_ibpb for comparison. | ||
| 244 | */ | ||
| 245 | next_mm = mm_mangle_tif_spec_ib(next); | ||
| 246 | prev_mm = this_cpu_read(cpu_tlbstate.last_user_mm_ibpb); | ||
| 247 | |||
| 248 | /* | ||
| 249 | * Issue IBPB only if the mm's are different and one or | ||
| 250 | * both have the IBPB bit set. | ||
| 251 | */ | ||
| 252 | if (next_mm != prev_mm && | ||
| 253 | (next_mm | prev_mm) & LAST_USER_MM_IBPB) | ||
| 254 | indirect_branch_prediction_barrier(); | ||
| 255 | |||
| 256 | this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, next_mm); | ||
| 257 | } | ||
| 258 | |||
| 259 | if (static_branch_unlikely(&switch_mm_always_ibpb)) { | ||
| 260 | /* | ||
| 261 | * Only flush when switching to a user space task with a | ||
| 262 | * different context than the user space task which ran | ||
| 263 | * last on this CPU. | ||
| 264 | */ | ||
| 265 | if (this_cpu_read(cpu_tlbstate.last_user_mm) != next->mm) { | ||
| 266 | indirect_branch_prediction_barrier(); | ||
| 267 | this_cpu_write(cpu_tlbstate.last_user_mm, next->mm); | ||
| 268 | } | ||
| 269 | } | ||
| 195 | } | 270 | } |
| 196 | 271 | ||
| 197 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | 272 | void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, |
| @@ -292,22 +367,12 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 292 | new_asid = prev_asid; | 367 | new_asid = prev_asid; |
| 293 | need_flush = true; | 368 | need_flush = true; |
| 294 | } else { | 369 | } else { |
| 295 | u64 last_ctx_id = this_cpu_read(cpu_tlbstate.last_ctx_id); | ||
| 296 | |||
| 297 | /* | 370 | /* |
| 298 | * Avoid user/user BTB poisoning by flushing the branch | 371 | * Avoid user/user BTB poisoning by flushing the branch |
| 299 | * predictor when switching between processes. This stops | 372 | * predictor when switching between processes. This stops |
| 300 | * one process from doing Spectre-v2 attacks on another. | 373 | * one process from doing Spectre-v2 attacks on another. |
| 301 | * | ||
| 302 | * As an optimization, flush indirect branches only when | ||
| 303 | * switching into a processes that can't be ptrace by the | ||
| 304 | * current one (as in such case, attacker has much more | ||
| 305 | * convenient way how to tamper with the next process than | ||
| 306 | * branch buffer poisoning). | ||
| 307 | */ | 374 | */ |
| 308 | if (static_cpu_has(X86_FEATURE_USE_IBPB) && | 375 | cond_ibpb(tsk); |
| 309 | ibpb_needed(tsk, last_ctx_id)) | ||
| 310 | indirect_branch_prediction_barrier(); | ||
| 311 | 376 | ||
| 312 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { | 377 | if (IS_ENABLED(CONFIG_VMAP_STACK)) { |
| 313 | /* | 378 | /* |
| @@ -365,14 +430,6 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next, | |||
| 365 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); | 430 | trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, 0); |
| 366 | } | 431 | } |
| 367 | 432 | ||
| 368 | /* | ||
| 369 | * Record last user mm's context id, so we can avoid | ||
| 370 | * flushing branch buffer with IBPB if we switch back | ||
| 371 | * to the same user. | ||
| 372 | */ | ||
| 373 | if (next != &init_mm) | ||
| 374 | this_cpu_write(cpu_tlbstate.last_ctx_id, next->context.ctx_id); | ||
| 375 | |||
| 376 | /* Make sure we write CR3 before loaded_mm. */ | 433 | /* Make sure we write CR3 before loaded_mm. */ |
| 377 | barrier(); | 434 | barrier(); |
| 378 | 435 | ||
| @@ -441,7 +498,7 @@ void initialize_tlbstate_and_flush(void) | |||
| 441 | write_cr3(build_cr3(mm->pgd, 0)); | 498 | write_cr3(build_cr3(mm->pgd, 0)); |
| 442 | 499 | ||
| 443 | /* Reinitialize tlbstate. */ | 500 | /* Reinitialize tlbstate. */ |
| 444 | this_cpu_write(cpu_tlbstate.last_ctx_id, mm->context.ctx_id); | 501 | this_cpu_write(cpu_tlbstate.last_user_mm_ibpb, LAST_USER_MM_IBPB); |
| 445 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); | 502 | this_cpu_write(cpu_tlbstate.loaded_mm_asid, 0); |
| 446 | this_cpu_write(cpu_tlbstate.next_asid, 1); | 503 | this_cpu_write(cpu_tlbstate.next_asid, 1); |
| 447 | this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); | 504 | this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, mm->context.ctx_id); |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index e996e8e744cb..750f46ad018a 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
| @@ -10,7 +10,6 @@ | |||
| 10 | #include <xen/xen.h> | 10 | #include <xen/xen.h> |
| 11 | #include <xen/features.h> | 11 | #include <xen/features.h> |
| 12 | #include <xen/page.h> | 12 | #include <xen/page.h> |
| 13 | #include <xen/interface/memory.h> | ||
| 14 | 13 | ||
| 15 | #include <asm/xen/hypercall.h> | 14 | #include <asm/xen/hypercall.h> |
| 16 | #include <asm/xen/hypervisor.h> | 15 | #include <asm/xen/hypervisor.h> |
| @@ -346,80 +345,3 @@ void xen_arch_unregister_cpu(int num) | |||
| 346 | } | 345 | } |
| 347 | EXPORT_SYMBOL(xen_arch_unregister_cpu); | 346 | EXPORT_SYMBOL(xen_arch_unregister_cpu); |
| 348 | #endif | 347 | #endif |
| 349 | |||
| 350 | #ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG | ||
| 351 | void __init arch_xen_balloon_init(struct resource *hostmem_resource) | ||
| 352 | { | ||
| 353 | struct xen_memory_map memmap; | ||
| 354 | int rc; | ||
| 355 | unsigned int i, last_guest_ram; | ||
| 356 | phys_addr_t max_addr = PFN_PHYS(max_pfn); | ||
| 357 | struct e820_table *xen_e820_table; | ||
| 358 | const struct e820_entry *entry; | ||
| 359 | struct resource *res; | ||
| 360 | |||
| 361 | if (!xen_initial_domain()) | ||
| 362 | return; | ||
| 363 | |||
| 364 | xen_e820_table = kmalloc(sizeof(*xen_e820_table), GFP_KERNEL); | ||
| 365 | if (!xen_e820_table) | ||
| 366 | return; | ||
| 367 | |||
| 368 | memmap.nr_entries = ARRAY_SIZE(xen_e820_table->entries); | ||
| 369 | set_xen_guest_handle(memmap.buffer, xen_e820_table->entries); | ||
| 370 | rc = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap); | ||
| 371 | if (rc) { | ||
| 372 | pr_warn("%s: Can't read host e820 (%d)\n", __func__, rc); | ||
| 373 | goto out; | ||
| 374 | } | ||
| 375 | |||
| 376 | last_guest_ram = 0; | ||
| 377 | for (i = 0; i < memmap.nr_entries; i++) { | ||
| 378 | if (xen_e820_table->entries[i].addr >= max_addr) | ||
| 379 | break; | ||
| 380 | if (xen_e820_table->entries[i].type == E820_TYPE_RAM) | ||
| 381 | last_guest_ram = i; | ||
| 382 | } | ||
| 383 | |||
| 384 | entry = &xen_e820_table->entries[last_guest_ram]; | ||
| 385 | if (max_addr >= entry->addr + entry->size) | ||
| 386 | goto out; /* No unallocated host RAM. */ | ||
| 387 | |||
| 388 | hostmem_resource->start = max_addr; | ||
| 389 | hostmem_resource->end = entry->addr + entry->size; | ||
| 390 | |||
| 391 | /* | ||
| 392 | * Mark non-RAM regions between the end of dom0 RAM and end of host RAM | ||
| 393 | * as unavailable. The rest of that region can be used for hotplug-based | ||
| 394 | * ballooning. | ||
| 395 | */ | ||
| 396 | for (; i < memmap.nr_entries; i++) { | ||
| 397 | entry = &xen_e820_table->entries[i]; | ||
| 398 | |||
| 399 | if (entry->type == E820_TYPE_RAM) | ||
| 400 | continue; | ||
| 401 | |||
| 402 | if (entry->addr >= hostmem_resource->end) | ||
| 403 | break; | ||
| 404 | |||
| 405 | res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
| 406 | if (!res) | ||
| 407 | goto out; | ||
| 408 | |||
| 409 | res->name = "Unavailable host RAM"; | ||
| 410 | res->start = entry->addr; | ||
| 411 | res->end = (entry->addr + entry->size < hostmem_resource->end) ? | ||
| 412 | entry->addr + entry->size : hostmem_resource->end; | ||
| 413 | rc = insert_resource(hostmem_resource, res); | ||
| 414 | if (rc) { | ||
| 415 | pr_warn("%s: Can't insert [%llx - %llx) (%d)\n", | ||
| 416 | __func__, res->start, res->end, rc); | ||
| 417 | kfree(res); | ||
| 418 | goto out; | ||
| 419 | } | ||
| 420 | } | ||
| 421 | |||
| 422 | out: | ||
| 423 | kfree(xen_e820_table); | ||
| 424 | } | ||
| 425 | #endif /* CONFIG_XEN_BALLOON_MEMORY_HOTPLUG */ | ||
diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index 0d7b3ae4960b..a5d7ed125337 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c | |||
| @@ -1905,7 +1905,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
| 1905 | init_top_pgt[0] = __pgd(0); | 1905 | init_top_pgt[0] = __pgd(0); |
| 1906 | 1906 | ||
| 1907 | /* Pre-constructed entries are in pfn, so convert to mfn */ | 1907 | /* Pre-constructed entries are in pfn, so convert to mfn */ |
| 1908 | /* L4[272] -> level3_ident_pgt */ | 1908 | /* L4[273] -> level3_ident_pgt */ |
| 1909 | /* L4[511] -> level3_kernel_pgt */ | 1909 | /* L4[511] -> level3_kernel_pgt */ |
| 1910 | convert_pfn_mfn(init_top_pgt); | 1910 | convert_pfn_mfn(init_top_pgt); |
| 1911 | 1911 | ||
| @@ -1925,8 +1925,8 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |||
| 1925 | addr[0] = (unsigned long)pgd; | 1925 | addr[0] = (unsigned long)pgd; |
| 1926 | addr[1] = (unsigned long)l3; | 1926 | addr[1] = (unsigned long)l3; |
| 1927 | addr[2] = (unsigned long)l2; | 1927 | addr[2] = (unsigned long)l2; |
| 1928 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: | 1928 | /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: |
| 1929 | * Both L4[272][0] and L4[511][510] have entries that point to the same | 1929 | * Both L4[273][0] and L4[511][510] have entries that point to the same |
| 1930 | * L2 (PMD) tables. Meaning that if you modify it in __va space | 1930 | * L2 (PMD) tables. Meaning that if you modify it in __va space |
| 1931 | * it will be also modified in the __ka space! (But if you just | 1931 | * it will be also modified in the __ka space! (But if you just |
| 1932 | * modify the PMD table to point to other PTE's or none, then you | 1932 | * modify the PMD table to point to other PTE's or none, then you |
diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c index 2bce7958ce8b..0766a08bdf45 100644 --- a/arch/x86/xen/multicalls.c +++ b/arch/x86/xen/multicalls.c | |||
| @@ -69,6 +69,11 @@ void xen_mc_flush(void) | |||
| 69 | 69 | ||
| 70 | trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); | 70 | trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); |
| 71 | 71 | ||
| 72 | #if MC_DEBUG | ||
| 73 | memcpy(b->debug, b->entries, | ||
| 74 | b->mcidx * sizeof(struct multicall_entry)); | ||
| 75 | #endif | ||
| 76 | |||
| 72 | switch (b->mcidx) { | 77 | switch (b->mcidx) { |
| 73 | case 0: | 78 | case 0: |
| 74 | /* no-op */ | 79 | /* no-op */ |
| @@ -87,32 +92,34 @@ void xen_mc_flush(void) | |||
| 87 | break; | 92 | break; |
| 88 | 93 | ||
| 89 | default: | 94 | default: |
| 90 | #if MC_DEBUG | ||
| 91 | memcpy(b->debug, b->entries, | ||
| 92 | b->mcidx * sizeof(struct multicall_entry)); | ||
| 93 | #endif | ||
| 94 | |||
| 95 | if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) | 95 | if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) |
| 96 | BUG(); | 96 | BUG(); |
| 97 | for (i = 0; i < b->mcidx; i++) | 97 | for (i = 0; i < b->mcidx; i++) |
| 98 | if (b->entries[i].result < 0) | 98 | if (b->entries[i].result < 0) |
| 99 | ret++; | 99 | ret++; |
| 100 | } | ||
| 100 | 101 | ||
| 102 | if (WARN_ON(ret)) { | ||
| 103 | pr_err("%d of %d multicall(s) failed: cpu %d\n", | ||
| 104 | ret, b->mcidx, smp_processor_id()); | ||
| 105 | for (i = 0; i < b->mcidx; i++) { | ||
| 106 | if (b->entries[i].result < 0) { | ||
| 101 | #if MC_DEBUG | 107 | #if MC_DEBUG |
| 102 | if (ret) { | 108 | pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pF\n", |
| 103 | printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", | 109 | i + 1, |
| 104 | ret, smp_processor_id()); | ||
| 105 | dump_stack(); | ||
| 106 | for (i = 0; i < b->mcidx; i++) { | ||
| 107 | printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n", | ||
| 108 | i+1, b->mcidx, | ||
| 109 | b->debug[i].op, | 110 | b->debug[i].op, |
| 110 | b->debug[i].args[0], | 111 | b->debug[i].args[0], |
| 111 | b->entries[i].result, | 112 | b->entries[i].result, |
| 112 | b->caller[i]); | 113 | b->caller[i]); |
| 114 | #else | ||
| 115 | pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n", | ||
| 116 | i + 1, | ||
| 117 | b->entries[i].op, | ||
| 118 | b->entries[i].args[0], | ||
| 119 | b->entries[i].result); | ||
| 120 | #endif | ||
| 113 | } | 121 | } |
| 114 | } | 122 | } |
| 115 | #endif | ||
| 116 | } | 123 | } |
| 117 | 124 | ||
| 118 | b->mcidx = 0; | 125 | b->mcidx = 0; |
| @@ -126,8 +133,6 @@ void xen_mc_flush(void) | |||
| 126 | b->cbidx = 0; | 133 | b->cbidx = 0; |
| 127 | 134 | ||
| 128 | local_irq_restore(flags); | 135 | local_irq_restore(flags); |
| 129 | |||
| 130 | WARN_ON(ret); | ||
| 131 | } | 136 | } |
| 132 | 137 | ||
| 133 | struct multicall_space __xen_mc_entry(size_t args) | 138 | struct multicall_space __xen_mc_entry(size_t args) |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index b06731705529..055e37e43541 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
| @@ -656,8 +656,7 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
| 656 | 656 | ||
| 657 | /* | 657 | /* |
| 658 | * The interface requires atomic updates on p2m elements. | 658 | * The interface requires atomic updates on p2m elements. |
| 659 | * xen_safe_write_ulong() is using __put_user which does an atomic | 659 | * xen_safe_write_ulong() is using an atomic store via asm(). |
| 660 | * store via asm(). | ||
| 661 | */ | 660 | */ |
| 662 | if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) | 661 | if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) |
| 663 | return true; | 662 | return true; |
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 1163e33121fb..075ed47993bb 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
| @@ -808,6 +808,7 @@ char * __init xen_memory_setup(void) | |||
| 808 | addr = xen_e820_table.entries[0].addr; | 808 | addr = xen_e820_table.entries[0].addr; |
| 809 | size = xen_e820_table.entries[0].size; | 809 | size = xen_e820_table.entries[0].size; |
| 810 | while (i < xen_e820_table.nr_entries) { | 810 | while (i < xen_e820_table.nr_entries) { |
| 811 | bool discard = false; | ||
| 811 | 812 | ||
| 812 | chunk_size = size; | 813 | chunk_size = size; |
| 813 | type = xen_e820_table.entries[i].type; | 814 | type = xen_e820_table.entries[i].type; |
| @@ -823,10 +824,11 @@ char * __init xen_memory_setup(void) | |||
| 823 | xen_add_extra_mem(pfn_s, n_pfns); | 824 | xen_add_extra_mem(pfn_s, n_pfns); |
| 824 | xen_max_p2m_pfn = pfn_s + n_pfns; | 825 | xen_max_p2m_pfn = pfn_s + n_pfns; |
| 825 | } else | 826 | } else |
| 826 | type = E820_TYPE_UNUSABLE; | 827 | discard = true; |
| 827 | } | 828 | } |
| 828 | 829 | ||
| 829 | xen_align_and_add_e820_region(addr, chunk_size, type); | 830 | if (!discard) |
| 831 | xen_align_and_add_e820_region(addr, chunk_size, type); | ||
| 830 | 832 | ||
| 831 | addr += chunk_size; | 833 | addr += chunk_size; |
| 832 | size -= chunk_size; | 834 | size -= chunk_size; |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 441c88262169..3776122c87cc 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
| @@ -3,24 +3,21 @@ | |||
| 3 | * Split spinlock implementation out into its own file, so it can be | 3 | * Split spinlock implementation out into its own file, so it can be |
| 4 | * compiled in a FTRACE-compatible way. | 4 | * compiled in a FTRACE-compatible way. |
| 5 | */ | 5 | */ |
| 6 | #include <linux/kernel_stat.h> | 6 | #include <linux/kernel.h> |
| 7 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
| 8 | #include <linux/debugfs.h> | ||
| 9 | #include <linux/log2.h> | ||
| 10 | #include <linux/gfp.h> | ||
| 11 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
| 9 | #include <linux/atomic.h> | ||
| 12 | 10 | ||
| 13 | #include <asm/paravirt.h> | 11 | #include <asm/paravirt.h> |
| 14 | #include <asm/qspinlock.h> | 12 | #include <asm/qspinlock.h> |
| 15 | 13 | ||
| 16 | #include <xen/interface/xen.h> | ||
| 17 | #include <xen/events.h> | 14 | #include <xen/events.h> |
| 18 | 15 | ||
| 19 | #include "xen-ops.h" | 16 | #include "xen-ops.h" |
| 20 | #include "debugfs.h" | ||
| 21 | 17 | ||
| 22 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 18 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
| 23 | static DEFINE_PER_CPU(char *, irq_name); | 19 | static DEFINE_PER_CPU(char *, irq_name); |
| 20 | static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); | ||
| 24 | static bool xen_pvspin = true; | 21 | static bool xen_pvspin = true; |
| 25 | 22 | ||
| 26 | static void xen_qlock_kick(int cpu) | 23 | static void xen_qlock_kick(int cpu) |
| @@ -39,25 +36,25 @@ static void xen_qlock_kick(int cpu) | |||
| 39 | */ | 36 | */ |
| 40 | static void xen_qlock_wait(u8 *byte, u8 val) | 37 | static void xen_qlock_wait(u8 *byte, u8 val) |
| 41 | { | 38 | { |
| 42 | unsigned long flags; | ||
| 43 | int irq = __this_cpu_read(lock_kicker_irq); | 39 | int irq = __this_cpu_read(lock_kicker_irq); |
| 40 | atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); | ||
| 44 | 41 | ||
| 45 | /* If kicker interrupts not initialized yet, just spin */ | 42 | /* If kicker interrupts not initialized yet, just spin */ |
| 46 | if (irq == -1 || in_nmi()) | 43 | if (irq == -1 || in_nmi()) |
| 47 | return; | 44 | return; |
| 48 | 45 | ||
| 49 | /* Guard against reentry. */ | 46 | /* Detect reentry. */ |
| 50 | local_irq_save(flags); | 47 | atomic_inc(nest_cnt); |
| 51 | 48 | ||
| 52 | /* If irq pending already clear it. */ | 49 | /* If irq pending already and no nested call clear it. */ |
| 53 | if (xen_test_irq_pending(irq)) { | 50 | if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { |
| 54 | xen_clear_irq_pending(irq); | 51 | xen_clear_irq_pending(irq); |
| 55 | } else if (READ_ONCE(*byte) == val) { | 52 | } else if (READ_ONCE(*byte) == val) { |
| 56 | /* Block until irq becomes pending (or a spurious wakeup) */ | 53 | /* Block until irq becomes pending (or a spurious wakeup) */ |
| 57 | xen_poll_irq(irq); | 54 | xen_poll_irq(irq); |
| 58 | } | 55 | } |
| 59 | 56 | ||
| 60 | local_irq_restore(flags); | 57 | atomic_dec(nest_cnt); |
| 61 | } | 58 | } |
| 62 | 59 | ||
| 63 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 60 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index be9bfd9aa865..34a23016dd14 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
| @@ -23,7 +23,11 @@ | |||
| 23 | # error Linux requires the Xtensa Windowed Registers Option. | 23 | # error Linux requires the Xtensa Windowed Registers Option. |
| 24 | #endif | 24 | #endif |
| 25 | 25 | ||
| 26 | #define ARCH_SLAB_MINALIGN XCHAL_DATA_WIDTH | 26 | /* Xtensa ABI requires stack alignment to be at least 16 */ |
| 27 | |||
| 28 | #define STACK_ALIGN (XCHAL_DATA_WIDTH > 16 ? XCHAL_DATA_WIDTH : 16) | ||
| 29 | |||
| 30 | #define ARCH_SLAB_MINALIGN STACK_ALIGN | ||
| 27 | 31 | ||
| 28 | /* | 32 | /* |
| 29 | * User space process size: 1 GB. | 33 | * User space process size: 1 GB. |
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index 67904f55f188..120dd746a147 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c | |||
| @@ -94,14 +94,14 @@ int main(void) | |||
| 94 | DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); | 94 | DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp)); |
| 95 | DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); | 95 | DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable)); |
| 96 | #if XTENSA_HAVE_COPROCESSORS | 96 | #if XTENSA_HAVE_COPROCESSORS |
| 97 | DEFINE(THREAD_XTREGS_CP0, offsetof (struct thread_info, xtregs_cp)); | 97 | DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0)); |
| 98 | DEFINE(THREAD_XTREGS_CP1, offsetof (struct thread_info, xtregs_cp)); | 98 | DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1)); |
| 99 | DEFINE(THREAD_XTREGS_CP2, offsetof (struct thread_info, xtregs_cp)); | 99 | DEFINE(THREAD_XTREGS_CP2, offsetof(struct thread_info, xtregs_cp.cp2)); |
| 100 | DEFINE(THREAD_XTREGS_CP3, offsetof (struct thread_info, xtregs_cp)); | 100 | DEFINE(THREAD_XTREGS_CP3, offsetof(struct thread_info, xtregs_cp.cp3)); |
| 101 | DEFINE(THREAD_XTREGS_CP4, offsetof (struct thread_info, xtregs_cp)); | 101 | DEFINE(THREAD_XTREGS_CP4, offsetof(struct thread_info, xtregs_cp.cp4)); |
| 102 | DEFINE(THREAD_XTREGS_CP5, offsetof (struct thread_info, xtregs_cp)); | 102 | DEFINE(THREAD_XTREGS_CP5, offsetof(struct thread_info, xtregs_cp.cp5)); |
| 103 | DEFINE(THREAD_XTREGS_CP6, offsetof (struct thread_info, xtregs_cp)); | 103 | DEFINE(THREAD_XTREGS_CP6, offsetof(struct thread_info, xtregs_cp.cp6)); |
| 104 | DEFINE(THREAD_XTREGS_CP7, offsetof (struct thread_info, xtregs_cp)); | 104 | DEFINE(THREAD_XTREGS_CP7, offsetof(struct thread_info, xtregs_cp.cp7)); |
| 105 | #endif | 105 | #endif |
| 106 | DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); | 106 | DEFINE(THREAD_XTREGS_USER, offsetof (struct thread_info, xtregs_user)); |
| 107 | DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); | 107 | DEFINE(XTREGS_USER_SIZE, sizeof(xtregs_user_t)); |
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S index 2f76118ecf62..9053a5622d2c 100644 --- a/arch/xtensa/kernel/head.S +++ b/arch/xtensa/kernel/head.S | |||
| @@ -88,9 +88,12 @@ _SetupMMU: | |||
| 88 | initialize_mmu | 88 | initialize_mmu |
| 89 | #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY | 89 | #if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY |
| 90 | rsr a2, excsave1 | 90 | rsr a2, excsave1 |
| 91 | movi a3, 0x08000000 | 91 | movi a3, XCHAL_KSEG_PADDR |
| 92 | bltu a2, a3, 1f | ||
| 93 | sub a2, a2, a3 | ||
| 94 | movi a3, XCHAL_KSEG_SIZE | ||
| 92 | bgeu a2, a3, 1f | 95 | bgeu a2, a3, 1f |
| 93 | movi a3, 0xd0000000 | 96 | movi a3, XCHAL_KSEG_CACHED_VADDR |
| 94 | add a2, a2, a3 | 97 | add a2, a2, a3 |
| 95 | wsr a2, excsave1 | 98 | wsr a2, excsave1 |
| 96 | 1: | 99 | 1: |
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c index 483dcfb6e681..4bb68133a72a 100644 --- a/arch/xtensa/kernel/process.c +++ b/arch/xtensa/kernel/process.c | |||
| @@ -94,18 +94,21 @@ void coprocessor_release_all(struct thread_info *ti) | |||
| 94 | 94 | ||
| 95 | void coprocessor_flush_all(struct thread_info *ti) | 95 | void coprocessor_flush_all(struct thread_info *ti) |
| 96 | { | 96 | { |
| 97 | unsigned long cpenable; | 97 | unsigned long cpenable, old_cpenable; |
| 98 | int i; | 98 | int i; |
| 99 | 99 | ||
| 100 | preempt_disable(); | 100 | preempt_disable(); |
| 101 | 101 | ||
| 102 | RSR_CPENABLE(old_cpenable); | ||
| 102 | cpenable = ti->cpenable; | 103 | cpenable = ti->cpenable; |
| 104 | WSR_CPENABLE(cpenable); | ||
| 103 | 105 | ||
| 104 | for (i = 0; i < XCHAL_CP_MAX; i++) { | 106 | for (i = 0; i < XCHAL_CP_MAX; i++) { |
| 105 | if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) | 107 | if ((cpenable & 1) != 0 && coprocessor_owner[i] == ti) |
| 106 | coprocessor_flush(ti, i); | 108 | coprocessor_flush(ti, i); |
| 107 | cpenable >>= 1; | 109 | cpenable >>= 1; |
| 108 | } | 110 | } |
| 111 | WSR_CPENABLE(old_cpenable); | ||
| 109 | 112 | ||
| 110 | preempt_enable(); | 113 | preempt_enable(); |
| 111 | } | 114 | } |
diff --git a/arch/xtensa/kernel/ptrace.c b/arch/xtensa/kernel/ptrace.c index c0845cb1cbb9..d9541be0605a 100644 --- a/arch/xtensa/kernel/ptrace.c +++ b/arch/xtensa/kernel/ptrace.c | |||
| @@ -127,12 +127,37 @@ static int ptrace_setregs(struct task_struct *child, void __user *uregs) | |||
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | 129 | ||
| 130 | #if XTENSA_HAVE_COPROCESSORS | ||
| 131 | #define CP_OFFSETS(cp) \ | ||
| 132 | { \ | ||
| 133 | .elf_xtregs_offset = offsetof(elf_xtregs_t, cp), \ | ||
| 134 | .ti_offset = offsetof(struct thread_info, xtregs_cp.cp), \ | ||
| 135 | .sz = sizeof(xtregs_ ## cp ## _t), \ | ||
| 136 | } | ||
| 137 | |||
| 138 | static const struct { | ||
| 139 | size_t elf_xtregs_offset; | ||
| 140 | size_t ti_offset; | ||
| 141 | size_t sz; | ||
| 142 | } cp_offsets[] = { | ||
| 143 | CP_OFFSETS(cp0), | ||
| 144 | CP_OFFSETS(cp1), | ||
| 145 | CP_OFFSETS(cp2), | ||
| 146 | CP_OFFSETS(cp3), | ||
| 147 | CP_OFFSETS(cp4), | ||
| 148 | CP_OFFSETS(cp5), | ||
| 149 | CP_OFFSETS(cp6), | ||
| 150 | CP_OFFSETS(cp7), | ||
| 151 | }; | ||
| 152 | #endif | ||
| 153 | |||
| 130 | static int ptrace_getxregs(struct task_struct *child, void __user *uregs) | 154 | static int ptrace_getxregs(struct task_struct *child, void __user *uregs) |
| 131 | { | 155 | { |
| 132 | struct pt_regs *regs = task_pt_regs(child); | 156 | struct pt_regs *regs = task_pt_regs(child); |
| 133 | struct thread_info *ti = task_thread_info(child); | 157 | struct thread_info *ti = task_thread_info(child); |
| 134 | elf_xtregs_t __user *xtregs = uregs; | 158 | elf_xtregs_t __user *xtregs = uregs; |
| 135 | int ret = 0; | 159 | int ret = 0; |
| 160 | int i __maybe_unused; | ||
| 136 | 161 | ||
| 137 | if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) | 162 | if (!access_ok(VERIFY_WRITE, uregs, sizeof(elf_xtregs_t))) |
| 138 | return -EIO; | 163 | return -EIO; |
| @@ -140,8 +165,13 @@ static int ptrace_getxregs(struct task_struct *child, void __user *uregs) | |||
| 140 | #if XTENSA_HAVE_COPROCESSORS | 165 | #if XTENSA_HAVE_COPROCESSORS |
| 141 | /* Flush all coprocessor registers to memory. */ | 166 | /* Flush all coprocessor registers to memory. */ |
| 142 | coprocessor_flush_all(ti); | 167 | coprocessor_flush_all(ti); |
| 143 | ret |= __copy_to_user(&xtregs->cp0, &ti->xtregs_cp, | 168 | |
| 144 | sizeof(xtregs_coprocessor_t)); | 169 | for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) |
| 170 | ret |= __copy_to_user((char __user *)xtregs + | ||
| 171 | cp_offsets[i].elf_xtregs_offset, | ||
| 172 | (const char *)ti + | ||
| 173 | cp_offsets[i].ti_offset, | ||
| 174 | cp_offsets[i].sz); | ||
| 145 | #endif | 175 | #endif |
| 146 | ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt, | 176 | ret |= __copy_to_user(&xtregs->opt, ®s->xtregs_opt, |
| 147 | sizeof(xtregs->opt)); | 177 | sizeof(xtregs->opt)); |
| @@ -157,6 +187,7 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) | |||
| 157 | struct pt_regs *regs = task_pt_regs(child); | 187 | struct pt_regs *regs = task_pt_regs(child); |
| 158 | elf_xtregs_t *xtregs = uregs; | 188 | elf_xtregs_t *xtregs = uregs; |
| 159 | int ret = 0; | 189 | int ret = 0; |
| 190 | int i __maybe_unused; | ||
| 160 | 191 | ||
| 161 | if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) | 192 | if (!access_ok(VERIFY_READ, uregs, sizeof(elf_xtregs_t))) |
| 162 | return -EFAULT; | 193 | return -EFAULT; |
| @@ -166,8 +197,11 @@ static int ptrace_setxregs(struct task_struct *child, void __user *uregs) | |||
| 166 | coprocessor_flush_all(ti); | 197 | coprocessor_flush_all(ti); |
| 167 | coprocessor_release_all(ti); | 198 | coprocessor_release_all(ti); |
| 168 | 199 | ||
| 169 | ret |= __copy_from_user(&ti->xtregs_cp, &xtregs->cp0, | 200 | for (i = 0; i < ARRAY_SIZE(cp_offsets); ++i) |
| 170 | sizeof(xtregs_coprocessor_t)); | 201 | ret |= __copy_from_user((char *)ti + cp_offsets[i].ti_offset, |
| 202 | (const char __user *)xtregs + | ||
| 203 | cp_offsets[i].elf_xtregs_offset, | ||
| 204 | cp_offsets[i].sz); | ||
| 171 | #endif | 205 | #endif |
| 172 | ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt, | 206 | ret |= __copy_from_user(®s->xtregs_opt, &xtregs->opt, |
| 173 | sizeof(xtregs->opt)); | 207 | sizeof(xtregs->opt)); |
