diff options
Diffstat (limited to 'arch')
316 files changed, 8095 insertions, 4033 deletions
diff --git a/arch/alpha/include/asm/processor.h b/arch/alpha/include/asm/processor.h index 6cb7fe85c4b5..b4cf03690394 100644 --- a/arch/alpha/include/asm/processor.h +++ b/arch/alpha/include/asm/processor.h | |||
@@ -57,6 +57,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
57 | ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) | 57 | ((tsk) == current ? rdusp() : task_thread_info(tsk)->pcb.usp) |
58 | 58 | ||
59 | #define cpu_relax() barrier() | 59 | #define cpu_relax() barrier() |
60 | #define cpu_relax_lowlatency() cpu_relax() | ||
60 | 61 | ||
61 | #define ARCH_HAS_PREFETCH | 62 | #define ARCH_HAS_PREFETCH |
62 | #define ARCH_HAS_PREFETCHW | 63 | #define ARCH_HAS_PREFETCHW |
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h index d99f9b37cd15..82588f3ba77f 100644 --- a/arch/arc/include/asm/processor.h +++ b/arch/arc/include/asm/processor.h | |||
@@ -62,6 +62,8 @@ unsigned long thread_saved_pc(struct task_struct *t); | |||
62 | #define cpu_relax() do { } while (0) | 62 | #define cpu_relax() do { } while (0) |
63 | #endif | 63 | #endif |
64 | 64 | ||
65 | #define cpu_relax_lowlatency() cpu_relax() | ||
66 | |||
65 | #define copy_segments(tsk, mm) do { } while (0) | 67 | #define copy_segments(tsk, mm) do { } while (0) |
66 | #define release_segments(mm) do { } while (0) | 68 | #define release_segments(mm) do { } while (0) |
67 | 69 | ||
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c index 63177e4cb66d..b9a5685a990e 100644 --- a/arch/arc/kernel/perf_event.c +++ b/arch/arc/kernel/perf_event.c | |||
@@ -99,10 +99,6 @@ static int arc_pmu_event_init(struct perf_event *event) | |||
99 | struct hw_perf_event *hwc = &event->hw; | 99 | struct hw_perf_event *hwc = &event->hw; |
100 | int ret; | 100 | int ret; |
101 | 101 | ||
102 | /* ARC 700 PMU does not support sampling events */ | ||
103 | if (is_sampling_event(event)) | ||
104 | return -ENOENT; | ||
105 | |||
106 | switch (event->attr.type) { | 102 | switch (event->attr.type) { |
107 | case PERF_TYPE_HARDWARE: | 103 | case PERF_TYPE_HARDWARE: |
108 | if (event->attr.config >= PERF_COUNT_HW_MAX) | 104 | if (event->attr.config >= PERF_COUNT_HW_MAX) |
@@ -298,6 +294,9 @@ static int arc_pmu_device_probe(struct platform_device *pdev) | |||
298 | .read = arc_pmu_read, | 294 | .read = arc_pmu_read, |
299 | }; | 295 | }; |
300 | 296 | ||
297 | /* ARC 700 PMU does not support sampling events */ | ||
298 | arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
299 | |||
301 | ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); | 300 | ret = perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); |
302 | 301 | ||
303 | return ret; | 302 | return ret; |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 88acf8bc1490..290f02ee0157 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -313,7 +313,7 @@ config ARCH_MULTIPLATFORM | |||
313 | config ARCH_INTEGRATOR | 313 | config ARCH_INTEGRATOR |
314 | bool "ARM Ltd. Integrator family" | 314 | bool "ARM Ltd. Integrator family" |
315 | select ARM_AMBA | 315 | select ARM_AMBA |
316 | select ARM_PATCH_PHYS_VIRT | 316 | select ARM_PATCH_PHYS_VIRT if MMU |
317 | select AUTO_ZRELADDR | 317 | select AUTO_ZRELADDR |
318 | select COMMON_CLK | 318 | select COMMON_CLK |
319 | select COMMON_CLK_VERSATILE | 319 | select COMMON_CLK_VERSATILE |
@@ -659,7 +659,7 @@ config ARCH_MSM | |||
659 | config ARCH_SHMOBILE_LEGACY | 659 | config ARCH_SHMOBILE_LEGACY |
660 | bool "Renesas ARM SoCs (non-multiplatform)" | 660 | bool "Renesas ARM SoCs (non-multiplatform)" |
661 | select ARCH_SHMOBILE | 661 | select ARCH_SHMOBILE |
662 | select ARM_PATCH_PHYS_VIRT | 662 | select ARM_PATCH_PHYS_VIRT if MMU |
663 | select CLKDEV_LOOKUP | 663 | select CLKDEV_LOOKUP |
664 | select GENERIC_CLOCKEVENTS | 664 | select GENERIC_CLOCKEVENTS |
665 | select HAVE_ARM_SCU if SMP | 665 | select HAVE_ARM_SCU if SMP |
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi index ee3001f38821..97ea7a9b1f62 100644 --- a/arch/arm/boot/dts/exynos4210.dtsi +++ b/arch/arm/boot/dts/exynos4210.dtsi | |||
@@ -31,6 +31,16 @@ | |||
31 | pinctrl2 = &pinctrl_2; | 31 | pinctrl2 = &pinctrl_2; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | pmu_system_controller: system-controller@10020000 { | ||
35 | clock-names = "clkout0", "clkout1", "clkout2", "clkout3", | ||
36 | "clkout4", "clkout8", "clkout9"; | ||
37 | clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>, | ||
38 | <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>, | ||
39 | <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>, | ||
40 | <&clock CLK_XUSBXTI>; | ||
41 | #clock-cells = <1>; | ||
42 | }; | ||
43 | |||
34 | sysram@02020000 { | 44 | sysram@02020000 { |
35 | compatible = "mmio-sram"; | 45 | compatible = "mmio-sram"; |
36 | reg = <0x02020000 0x20000>; | 46 | reg = <0x02020000 0x20000>; |
diff --git a/arch/arm/boot/dts/exynos4x12.dtsi b/arch/arm/boot/dts/exynos4x12.dtsi index c5a943df1cd7..de1f9c77b589 100644 --- a/arch/arm/boot/dts/exynos4x12.dtsi +++ b/arch/arm/boot/dts/exynos4x12.dtsi | |||
@@ -139,6 +139,13 @@ | |||
139 | 139 | ||
140 | pmu_system_controller: system-controller@10020000 { | 140 | pmu_system_controller: system-controller@10020000 { |
141 | compatible = "samsung,exynos4212-pmu", "syscon"; | 141 | compatible = "samsung,exynos4212-pmu", "syscon"; |
142 | clock-names = "clkout0", "clkout1", "clkout2", "clkout3", | ||
143 | "clkout4", "clkout8", "clkout9"; | ||
144 | clocks = <&clock CLK_OUT_DMC>, <&clock CLK_OUT_TOP>, | ||
145 | <&clock CLK_OUT_LEFTBUS>, <&clock CLK_OUT_RIGHTBUS>, | ||
146 | <&clock CLK_OUT_CPU>, <&clock CLK_XXTI>, | ||
147 | <&clock CLK_XUSBXTI>; | ||
148 | #clock-cells = <1>; | ||
142 | }; | 149 | }; |
143 | 150 | ||
144 | g2d@10800000 { | 151 | g2d@10800000 { |
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 834fb5a5306f..492e1eff37bd 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi | |||
@@ -191,6 +191,9 @@ | |||
191 | pmu_system_controller: system-controller@10040000 { | 191 | pmu_system_controller: system-controller@10040000 { |
192 | compatible = "samsung,exynos5250-pmu", "syscon"; | 192 | compatible = "samsung,exynos5250-pmu", "syscon"; |
193 | reg = <0x10040000 0x5000>; | 193 | reg = <0x10040000 0x5000>; |
194 | clock-names = "clkout16"; | ||
195 | clocks = <&clock CLK_FIN_PLL>; | ||
196 | #clock-cells = <1>; | ||
194 | }; | 197 | }; |
195 | 198 | ||
196 | sysreg_system_controller: syscon@10050000 { | 199 | sysreg_system_controller: syscon@10050000 { |
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 15957227ffda..a40a5c2b5a4f 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi | |||
@@ -727,6 +727,9 @@ | |||
727 | pmu_system_controller: system-controller@10040000 { | 727 | pmu_system_controller: system-controller@10040000 { |
728 | compatible = "samsung,exynos5420-pmu", "syscon"; | 728 | compatible = "samsung,exynos5420-pmu", "syscon"; |
729 | reg = <0x10040000 0x5000>; | 729 | reg = <0x10040000 0x5000>; |
730 | clock-names = "clkout16"; | ||
731 | clocks = <&clock CLK_FIN_PLL>; | ||
732 | #clock-cells = <1>; | ||
730 | }; | 733 | }; |
731 | 734 | ||
732 | sysreg_system_controller: syscon@10050000 { | 735 | sysreg_system_controller: syscon@10050000 { |
diff --git a/arch/arm/boot/dts/hi3620.dtsi b/arch/arm/boot/dts/hi3620.dtsi index ab1116d086be..83a5b8685bd9 100644 --- a/arch/arm/boot/dts/hi3620.dtsi +++ b/arch/arm/boot/dts/hi3620.dtsi | |||
@@ -73,7 +73,7 @@ | |||
73 | 73 | ||
74 | L2: l2-cache { | 74 | L2: l2-cache { |
75 | compatible = "arm,pl310-cache"; | 75 | compatible = "arm,pl310-cache"; |
76 | reg = <0xfc10000 0x100000>; | 76 | reg = <0x100000 0x100000>; |
77 | interrupts = <0 15 4>; | 77 | interrupts = <0 15 4>; |
78 | cache-unified; | 78 | cache-unified; |
79 | cache-level = <2>; | 79 | cache-level = <2>; |
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 1fe45d1f75ec..b15f1a77d684 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts | |||
@@ -353,7 +353,7 @@ | |||
353 | }; | 353 | }; |
354 | 354 | ||
355 | twl_power: power { | 355 | twl_power: power { |
356 | compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; | 356 | compatible = "ti,twl4030-power-n900"; |
357 | ti,use_poweroff; | 357 | ti,use_poweroff; |
358 | }; | 358 | }; |
359 | }; | 359 | }; |
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi index 8d7ffaeff6e0..79f68acfd5d4 100644 --- a/arch/arm/boot/dts/r8a7791.dtsi +++ b/arch/arm/boot/dts/r8a7791.dtsi | |||
@@ -540,9 +540,9 @@ | |||
540 | #clock-cells = <0>; | 540 | #clock-cells = <0>; |
541 | clock-output-names = "sd1"; | 541 | clock-output-names = "sd1"; |
542 | }; | 542 | }; |
543 | sd2_clk: sd3_clk@e615007c { | 543 | sd2_clk: sd3_clk@e615026c { |
544 | compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock"; | 544 | compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock"; |
545 | reg = <0 0xe615007c 0 4>; | 545 | reg = <0 0xe615026c 0 4>; |
546 | clocks = <&pll1_div2_clk>; | 546 | clocks = <&pll1_div2_clk>; |
547 | #clock-cells = <0>; | 547 | #clock-cells = <0>; |
548 | clock-output-names = "sd2"; | 548 | clock-output-names = "sd2"; |
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts index f557feb997f4..90d8b6c7a205 100644 --- a/arch/arm/boot/dts/ste-nomadik-s8815.dts +++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts | |||
@@ -4,7 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | /dts-v1/; | 6 | /dts-v1/; |
7 | /include/ "ste-nomadik-stn8815.dtsi" | 7 | #include "ste-nomadik-stn8815.dtsi" |
8 | 8 | ||
9 | / { | 9 | / { |
10 | model = "Calao Systems USB-S8815"; | 10 | model = "Calao Systems USB-S8815"; |
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi index d316c955bd5f..dbcf521b017f 100644 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi | |||
@@ -1,7 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC | 2 | * Device Tree for the ST-Ericsson Nomadik 8815 STn8815 SoC |
3 | */ | 3 | */ |
4 | /include/ "skeleton.dtsi" | 4 | |
5 | #include <dt-bindings/gpio/gpio.h> | ||
6 | #include "skeleton.dtsi" | ||
5 | 7 | ||
6 | / { | 8 | / { |
7 | #address-cells = <1>; | 9 | #address-cells = <1>; |
@@ -842,8 +844,7 @@ | |||
842 | bus-width = <4>; | 844 | bus-width = <4>; |
843 | cap-mmc-highspeed; | 845 | cap-mmc-highspeed; |
844 | cap-sd-highspeed; | 846 | cap-sd-highspeed; |
845 | cd-gpios = <&gpio3 15 0x1>; | 847 | cd-gpios = <&gpio3 15 GPIO_ACTIVE_LOW>; |
846 | cd-inverted; | ||
847 | pinctrl-names = "default"; | 848 | pinctrl-names = "default"; |
848 | pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>; | 849 | pinctrl-0 = <&mmcsd_default_mux>, <&mmcsd_default_mode>; |
849 | vmmc-supply = <&vmmc_regulator>; | 850 | vmmc-supply = <&vmmc_regulator>; |
diff --git a/arch/arm/crypto/aesbs-glue.c b/arch/arm/crypto/aesbs-glue.c index 4522366da759..15468fbbdea3 100644 --- a/arch/arm/crypto/aesbs-glue.c +++ b/arch/arm/crypto/aesbs-glue.c | |||
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc, | |||
137 | dst += AES_BLOCK_SIZE; | 137 | dst += AES_BLOCK_SIZE; |
138 | } while (--blocks); | 138 | } while (--blocks); |
139 | } | 139 | } |
140 | err = blkcipher_walk_done(desc, &walk, 0); | 140 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
141 | } | 141 | } |
142 | return err; | 142 | return err; |
143 | } | 143 | } |
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc, | |||
158 | bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 158 | bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
159 | walk.nbytes, &ctx->dec, walk.iv); | 159 | walk.nbytes, &ctx->dec, walk.iv); |
160 | kernel_neon_end(); | 160 | kernel_neon_end(); |
161 | err = blkcipher_walk_done(desc, &walk, 0); | 161 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
162 | } | 162 | } |
163 | while (walk.nbytes) { | 163 | while (walk.nbytes) { |
164 | u32 blocks = walk.nbytes / AES_BLOCK_SIZE; | 164 | u32 blocks = walk.nbytes / AES_BLOCK_SIZE; |
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc, | |||
182 | dst += AES_BLOCK_SIZE; | 182 | dst += AES_BLOCK_SIZE; |
183 | src += AES_BLOCK_SIZE; | 183 | src += AES_BLOCK_SIZE; |
184 | } while (--blocks); | 184 | } while (--blocks); |
185 | err = blkcipher_walk_done(desc, &walk, 0); | 185 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
186 | } | 186 | } |
187 | return err; | 187 | return err; |
188 | } | 188 | } |
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc, | |||
268 | bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, | 268 | bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr, |
269 | walk.nbytes, &ctx->enc, walk.iv); | 269 | walk.nbytes, &ctx->enc, walk.iv); |
270 | kernel_neon_end(); | 270 | kernel_neon_end(); |
271 | err = blkcipher_walk_done(desc, &walk, 0); | 271 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
272 | } | 272 | } |
273 | return err; | 273 | return err; |
274 | } | 274 | } |
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc, | |||
292 | bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, | 292 | bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr, |
293 | walk.nbytes, &ctx->dec, walk.iv); | 293 | walk.nbytes, &ctx->dec, walk.iv); |
294 | kernel_neon_end(); | 294 | kernel_neon_end(); |
295 | err = blkcipher_walk_done(desc, &walk, 0); | 295 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
296 | } | 296 | } |
297 | return err; | 297 | return err; |
298 | } | 298 | } |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 060a75e99263..0406cb3f1af7 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -50,6 +50,7 @@ struct machine_desc { | |||
50 | struct smp_operations *smp; /* SMP operations */ | 50 | struct smp_operations *smp; /* SMP operations */ |
51 | bool (*smp_init)(void); | 51 | bool (*smp_init)(void); |
52 | void (*fixup)(struct tag *, char **); | 52 | void (*fixup)(struct tag *, char **); |
53 | void (*dt_fixup)(void); | ||
53 | void (*init_meminfo)(void); | 54 | void (*init_meminfo)(void); |
54 | void (*reserve)(void);/* reserve mem blocks */ | 55 | void (*reserve)(void);/* reserve mem blocks */ |
55 | void (*map_io)(void);/* IO mapping function */ | 56 | void (*map_io)(void);/* IO mapping function */ |
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index c3d5fc124a05..8a1e8e995dae 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h | |||
@@ -82,6 +82,8 @@ unsigned long get_wchan(struct task_struct *p); | |||
82 | #define cpu_relax() barrier() | 82 | #define cpu_relax() barrier() |
83 | #endif | 83 | #endif |
84 | 84 | ||
85 | #define cpu_relax_lowlatency() cpu_relax() | ||
86 | |||
85 | #define task_pt_regs(p) \ | 87 | #define task_pt_regs(p) \ |
86 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 88 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
87 | 89 | ||
diff --git a/arch/arm/kernel/devtree.c b/arch/arm/kernel/devtree.c index e94a157ddff1..11c54de9f8cf 100644 --- a/arch/arm/kernel/devtree.c +++ b/arch/arm/kernel/devtree.c | |||
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) | |||
212 | mdesc_best = &__mach_desc_GENERIC_DT; | 212 | mdesc_best = &__mach_desc_GENERIC_DT; |
213 | #endif | 213 | #endif |
214 | 214 | ||
215 | if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys))) | 215 | if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys))) |
216 | return NULL; | 216 | return NULL; |
217 | 217 | ||
218 | mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); | 218 | mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach); |
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys) | |||
237 | dump_machine_table(); /* does not return */ | 237 | dump_machine_table(); /* does not return */ |
238 | } | 238 | } |
239 | 239 | ||
240 | /* We really don't want to do this, but sometimes firmware provides buggy data */ | ||
241 | if (mdesc->dt_fixup) | ||
242 | mdesc->dt_fixup(); | ||
243 | |||
244 | early_init_dt_scan_nodes(); | ||
245 | |||
240 | /* Change machine number to match the mdesc we're using */ | 246 | /* Change machine number to match the mdesc we're using */ |
241 | __machine_arch_type = mdesc->nr; | 247 | __machine_arch_type = mdesc->nr; |
242 | 248 | ||
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S index a5599cfc43cb..2b32978ae905 100644 --- a/arch/arm/kernel/iwmmxt.S +++ b/arch/arm/kernel/iwmmxt.S | |||
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable) | |||
94 | 94 | ||
95 | mrc p15, 0, r2, c2, c0, 0 | 95 | mrc p15, 0, r2, c2, c0, 0 |
96 | mov r2, r2 @ cpwait | 96 | mov r2, r2 @ cpwait |
97 | bl concan_save | ||
97 | 98 | ||
98 | teq r1, #0 @ test for last ownership | 99 | #ifdef CONFIG_PREEMPT_COUNT |
99 | mov lr, r9 @ normal exit from exception | 100 | get_thread_info r10 |
100 | beq concan_load @ no owner, skip save | 101 | #endif |
102 | 4: dec_preempt_count r10, r3 | ||
103 | mov pc, r9 @ normal exit from exception | ||
101 | 104 | ||
102 | concan_save: | 105 | concan_save: |
103 | 106 | ||
107 | teq r1, #0 @ test for last ownership | ||
108 | beq concan_load @ no owner, skip save | ||
109 | |||
104 | tmrc r2, wCon | 110 | tmrc r2, wCon |
105 | 111 | ||
106 | @ CUP? wCx | 112 | @ CUP? wCx |
@@ -138,7 +144,7 @@ concan_dump: | |||
138 | wstrd wR15, [r1, #MMX_WR15] | 144 | wstrd wR15, [r1, #MMX_WR15] |
139 | 145 | ||
140 | 2: teq r0, #0 @ anything to load? | 146 | 2: teq r0, #0 @ anything to load? |
141 | beq 3f | 147 | moveq pc, lr @ if not, return |
142 | 148 | ||
143 | concan_load: | 149 | concan_load: |
144 | 150 | ||
@@ -171,14 +177,9 @@ concan_load: | |||
171 | @ clear CUP/MUP (only if r1 != 0) | 177 | @ clear CUP/MUP (only if r1 != 0) |
172 | teq r1, #0 | 178 | teq r1, #0 |
173 | mov r2, #0 | 179 | mov r2, #0 |
174 | beq 3f | 180 | moveq pc, lr |
175 | tmcr wCon, r2 | ||
176 | 181 | ||
177 | 3: | 182 | tmcr wCon, r2 |
178 | #ifdef CONFIG_PREEMPT_COUNT | ||
179 | get_thread_info r10 | ||
180 | #endif | ||
181 | 4: dec_preempt_count r10, r3 | ||
182 | mov pc, lr | 183 | mov pc, lr |
183 | 184 | ||
184 | /* | 185 | /* |
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c index 778c2f7024ff..a74b53c1b7df 100644 --- a/arch/arm/kernel/kgdb.c +++ b/arch/arm/kernel/kgdb.c | |||
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr) | |||
160 | static struct undef_hook kgdb_brkpt_hook = { | 160 | static struct undef_hook kgdb_brkpt_hook = { |
161 | .instr_mask = 0xffffffff, | 161 | .instr_mask = 0xffffffff, |
162 | .instr_val = KGDB_BREAKINST, | 162 | .instr_val = KGDB_BREAKINST, |
163 | .cpsr_mask = MODE_MASK, | ||
164 | .cpsr_val = SVC_MODE, | ||
163 | .fn = kgdb_brk_fn | 165 | .fn = kgdb_brk_fn |
164 | }; | 166 | }; |
165 | 167 | ||
166 | static struct undef_hook kgdb_compiled_brkpt_hook = { | 168 | static struct undef_hook kgdb_compiled_brkpt_hook = { |
167 | .instr_mask = 0xffffffff, | 169 | .instr_mask = 0xffffffff, |
168 | .instr_val = KGDB_COMPILED_BREAK, | 170 | .instr_val = KGDB_COMPILED_BREAK, |
171 | .cpsr_mask = MODE_MASK, | ||
172 | .cpsr_val = SVC_MODE, | ||
169 | .fn = kgdb_compiled_brk_fn | 173 | .fn = kgdb_compiled_brk_fn |
170 | }; | 174 | }; |
171 | 175 | ||
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c index 46d893fcbe85..66c9b9614f3c 100644 --- a/arch/arm/mach-exynos/exynos.c +++ b/arch/arm/mach-exynos/exynos.c | |||
@@ -335,6 +335,15 @@ static void __init exynos_reserve(void) | |||
335 | #endif | 335 | #endif |
336 | } | 336 | } |
337 | 337 | ||
338 | static void __init exynos_dt_fixup(void) | ||
339 | { | ||
340 | /* | ||
341 | * Some versions of uboot pass garbage entries in the memory node, | ||
342 | * use the old CONFIG_ARM_NR_BANKS | ||
343 | */ | ||
344 | of_fdt_limit_memory(8); | ||
345 | } | ||
346 | |||
338 | DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") | 347 | DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") |
339 | /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ | 348 | /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ |
340 | /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ | 349 | /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ |
@@ -348,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") | |||
348 | .dt_compat = exynos_dt_compat, | 357 | .dt_compat = exynos_dt_compat, |
349 | .restart = exynos_restart, | 358 | .restart = exynos_restart, |
350 | .reserve = exynos_reserve, | 359 | .reserve = exynos_reserve, |
360 | .dt_fixup = exynos_dt_fixup, | ||
351 | MACHINE_END | 361 | MACHINE_END |
diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c index 17cd39360afe..93914d220069 100644 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ b/arch/arm/mach-omap2/gpmc-nand.c | |||
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) | |||
50 | soc_is_omap54xx() || soc_is_dra7xx()) | 50 | soc_is_omap54xx() || soc_is_dra7xx()) |
51 | return 1; | 51 | return 1; |
52 | 52 | ||
53 | if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW || | ||
54 | ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) { | ||
55 | if (cpu_is_omap24xx()) | ||
56 | return 0; | ||
57 | else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0)) | ||
58 | return 0; | ||
59 | else | ||
60 | return 1; | ||
61 | } | ||
62 | |||
53 | /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes | 63 | /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes |
54 | * which require H/W based ECC error detection */ | 64 | * which require H/W based ECC error detection */ |
55 | if ((cpu_is_omap34xx() || cpu_is_omap3630()) && | 65 | if ((cpu_is_omap34xx() || cpu_is_omap3630()) && |
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) | |||
57 | (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) | 67 | (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) |
58 | return 0; | 68 | return 0; |
59 | 69 | ||
60 | /* | ||
61 | * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1 | ||
62 | * and AM33xx derivates. Other chips may be added if confirmed to work. | ||
63 | */ | ||
64 | if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) && | ||
65 | (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) | ||
66 | return 0; | ||
67 | |||
68 | /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ | 70 | /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ |
69 | if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) | 71 | if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) |
70 | return 1; | 72 | return 1; |
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 539e8106eb96..a0fe747634c1 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c | |||
@@ -168,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg) | |||
168 | smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; | 168 | smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; |
169 | break; | 169 | break; |
170 | 170 | ||
171 | case L310_POWER_CTRL: | ||
172 | pr_info_once("OMAP L2C310: ROM does not support power control setting\n"); | ||
173 | return; | ||
174 | |||
171 | default: | 175 | default: |
172 | WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); | 176 | WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); |
173 | return; | 177 | return; |
diff --git a/arch/arm/mach-rockchip/Kconfig b/arch/arm/mach-rockchip/Kconfig index 1caee6d548b8..e4564c259ed1 100644 --- a/arch/arm/mach-rockchip/Kconfig +++ b/arch/arm/mach-rockchip/Kconfig | |||
@@ -2,6 +2,7 @@ config ARCH_ROCKCHIP | |||
2 | bool "Rockchip RK2928 and RK3xxx SOCs" if ARCH_MULTI_V7 | 2 | bool "Rockchip RK2928 and RK3xxx SOCs" if ARCH_MULTI_V7 |
3 | select PINCTRL | 3 | select PINCTRL |
4 | select PINCTRL_ROCKCHIP | 4 | select PINCTRL_ROCKCHIP |
5 | select ARCH_HAS_RESET_CONTROLLER | ||
5 | select ARCH_REQUIRE_GPIOLIB | 6 | select ARCH_REQUIRE_GPIOLIB |
6 | select ARM_GIC | 7 | select ARM_GIC |
7 | select CACHE_L2X0 | 8 | select CACHE_L2X0 |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4c88935654ca..1f88db06b133 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void) | |||
461 | map.type = MT_MEMORY_DMA_READY; | 461 | map.type = MT_MEMORY_DMA_READY; |
462 | 462 | ||
463 | /* | 463 | /* |
464 | * Clear previous low-memory mapping | 464 | * Clear previous low-memory mapping to ensure that the |
465 | * TLB does not see any conflicting entries, then flush | ||
466 | * the TLB of the old entries before creating new mappings. | ||
467 | * | ||
468 | * This ensures that any speculatively loaded TLB entries | ||
469 | * (even though they may be rare) can not cause any problems, | ||
470 | * and ensures that this code is architecturally compliant. | ||
465 | */ | 471 | */ |
466 | for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); | 472 | for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); |
467 | addr += PMD_SIZE) | 473 | addr += PMD_SIZE) |
468 | pmd_clear(pmd_off_k(addr)); | 474 | pmd_clear(pmd_off_k(addr)); |
469 | 475 | ||
476 | flush_tlb_kernel_range(__phys_to_virt(start), | ||
477 | __phys_to_virt(end)); | ||
478 | |||
470 | iotable_init(&map, 1); | 479 | iotable_init(&map, 1); |
471 | } | 480 | } |
472 | } | 481 | } |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 8e0e52eb76b5..c447ec70e868 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
@@ -9,6 +9,11 @@ | |||
9 | #include <asm/sections.h> | 9 | #include <asm/sections.h> |
10 | #include <asm/system_info.h> | 10 | #include <asm/system_info.h> |
11 | 11 | ||
12 | /* | ||
13 | * Note: accesses outside of the kernel image and the identity map area | ||
14 | * are not supported on any CPU using the idmap tables as its current | ||
15 | * page tables. | ||
16 | */ | ||
12 | pgd_t *idmap_pgd; | 17 | pgd_t *idmap_pgd; |
13 | phys_addr_t (*arch_virt_to_idmap) (unsigned long x); | 18 | phys_addr_t (*arch_virt_to_idmap) (unsigned long x); |
14 | 19 | ||
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, | |||
25 | pr_warning("Failed to allocate identity pmd.\n"); | 30 | pr_warning("Failed to allocate identity pmd.\n"); |
26 | return; | 31 | return; |
27 | } | 32 | } |
33 | /* | ||
34 | * Copy the original PMD to ensure that the PMD entries for | ||
35 | * the kernel image are preserved. | ||
36 | */ | ||
37 | if (!pud_none(*pud)) | ||
38 | memcpy(pmd, pmd_offset(pud, 0), | ||
39 | PTRS_PER_PMD * sizeof(pmd_t)); | ||
28 | pud_populate(&init_mm, pud, pmd); | 40 | pud_populate(&init_mm, pud, pmd); |
29 | pmd += pmd_index(addr); | 41 | pmd += pmd_index(addr); |
30 | } else | 42 | } else |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab14b79b03f0..6e3ba8d112a2 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc, | |||
1406 | return; | 1406 | return; |
1407 | 1407 | ||
1408 | /* remap kernel code and data */ | 1408 | /* remap kernel code and data */ |
1409 | map_start = init_mm.start_code; | 1409 | map_start = init_mm.start_code & PMD_MASK; |
1410 | map_end = init_mm.brk; | 1410 | map_end = ALIGN(init_mm.brk, PMD_SIZE); |
1411 | 1411 | ||
1412 | /* get a handle on things... */ | 1412 | /* get a handle on things... */ |
1413 | pgd0 = pgd_offset_k(0); | 1413 | pgd0 = pgd_offset_k(0); |
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc, | |||
1442 | } | 1442 | } |
1443 | 1443 | ||
1444 | /* remap pmds for kernel mapping */ | 1444 | /* remap pmds for kernel mapping */ |
1445 | phys = __pa(map_start) & PMD_MASK; | 1445 | phys = __pa(map_start); |
1446 | do { | 1446 | do { |
1447 | *pmdk++ = __pmd(phys | pmdprot); | 1447 | *pmdk++ = __pmd(phys | pmdprot); |
1448 | phys += PMD_SIZE; | 1448 | phys += PMD_SIZE; |
diff --git a/arch/arm/xen/grant-table.c b/arch/arm/xen/grant-table.c index 859a9bb002d5..91cf08ba1e95 100644 --- a/arch/arm/xen/grant-table.c +++ b/arch/arm/xen/grant-table.c | |||
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, | |||
51 | { | 51 | { |
52 | return -ENOSYS; | 52 | return -ENOSYS; |
53 | } | 53 | } |
54 | |||
55 | int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) | ||
56 | { | ||
57 | return 0; | ||
58 | } | ||
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 839f48c26ef0..b0f9c9db9590 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -1,7 +1,6 @@ | |||
1 | config ARM64 | 1 | config ARM64 |
2 | def_bool y | 2 | def_bool y |
3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE | 3 | select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE |
4 | select ARCH_HAS_OPP | ||
5 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | 4 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST |
6 | select ARCH_USE_CMPXCHG_LOCKREF | 5 | select ARCH_USE_CMPXCHG_LOCKREF |
7 | select ARCH_SUPPORTS_ATOMIC_RMW | 6 | select ARCH_SUPPORTS_ATOMIC_RMW |
@@ -11,6 +10,8 @@ config ARM64 | |||
11 | select ARM_AMBA | 10 | select ARM_AMBA |
12 | select ARM_ARCH_TIMER | 11 | select ARM_ARCH_TIMER |
13 | select ARM_GIC | 12 | select ARM_GIC |
13 | select AUDIT_ARCH_COMPAT_GENERIC | ||
14 | select ARM_GIC_V3 | ||
14 | select BUILDTIME_EXTABLE_SORT | 15 | select BUILDTIME_EXTABLE_SORT |
15 | select CLONE_BACKWARDS | 16 | select CLONE_BACKWARDS |
16 | select COMMON_CLK | 17 | select COMMON_CLK |
@@ -29,10 +30,12 @@ config ARM64 | |||
29 | select GENERIC_STRNLEN_USER | 30 | select GENERIC_STRNLEN_USER |
30 | select GENERIC_TIME_VSYSCALL | 31 | select GENERIC_TIME_VSYSCALL |
31 | select HARDIRQS_SW_RESEND | 32 | select HARDIRQS_SW_RESEND |
33 | select HAVE_ARCH_AUDITSYSCALL | ||
32 | select HAVE_ARCH_JUMP_LABEL | 34 | select HAVE_ARCH_JUMP_LABEL |
33 | select HAVE_ARCH_KGDB | 35 | select HAVE_ARCH_KGDB |
34 | select HAVE_ARCH_TRACEHOOK | 36 | select HAVE_ARCH_TRACEHOOK |
35 | select HAVE_C_RECORDMCOUNT | 37 | select HAVE_C_RECORDMCOUNT |
38 | select HAVE_CC_STACKPROTECTOR | ||
36 | select HAVE_DEBUG_BUGVERBOSE | 39 | select HAVE_DEBUG_BUGVERBOSE |
37 | select HAVE_DEBUG_KMEMLEAK | 40 | select HAVE_DEBUG_KMEMLEAK |
38 | select HAVE_DMA_API_DEBUG | 41 | select HAVE_DMA_API_DEBUG |
@@ -63,6 +66,7 @@ config ARM64 | |||
63 | select RTC_LIB | 66 | select RTC_LIB |
64 | select SPARSE_IRQ | 67 | select SPARSE_IRQ |
65 | select SYSCTL_EXCEPTION_TRACE | 68 | select SYSCTL_EXCEPTION_TRACE |
69 | select HAVE_CONTEXT_TRACKING | ||
66 | help | 70 | help |
67 | ARM 64-bit (AArch64) Linux support. | 71 | ARM 64-bit (AArch64) Linux support. |
68 | 72 | ||
@@ -155,14 +159,63 @@ endmenu | |||
155 | 159 | ||
156 | menu "Kernel Features" | 160 | menu "Kernel Features" |
157 | 161 | ||
162 | choice | ||
163 | prompt "Page size" | ||
164 | default ARM64_4K_PAGES | ||
165 | help | ||
166 | Page size (translation granule) configuration. | ||
167 | |||
168 | config ARM64_4K_PAGES | ||
169 | bool "4KB" | ||
170 | help | ||
171 | This feature enables 4KB pages support. | ||
172 | |||
158 | config ARM64_64K_PAGES | 173 | config ARM64_64K_PAGES |
159 | bool "Enable 64KB pages support" | 174 | bool "64KB" |
160 | help | 175 | help |
161 | This feature enables 64KB pages support (4KB by default) | 176 | This feature enables 64KB pages support (4KB by default) |
162 | allowing only two levels of page tables and faster TLB | 177 | allowing only two levels of page tables and faster TLB |
163 | look-up. AArch32 emulation is not available when this feature | 178 | look-up. AArch32 emulation is not available when this feature |
164 | is enabled. | 179 | is enabled. |
165 | 180 | ||
181 | endchoice | ||
182 | |||
183 | choice | ||
184 | prompt "Virtual address space size" | ||
185 | default ARM64_VA_BITS_39 if ARM64_4K_PAGES | ||
186 | default ARM64_VA_BITS_42 if ARM64_64K_PAGES | ||
187 | help | ||
188 | Allows choosing one of multiple possible virtual address | ||
189 | space sizes. The level of translation table is determined by | ||
190 | a combination of page size and virtual address space size. | ||
191 | |||
192 | config ARM64_VA_BITS_39 | ||
193 | bool "39-bit" | ||
194 | depends on ARM64_4K_PAGES | ||
195 | |||
196 | config ARM64_VA_BITS_42 | ||
197 | bool "42-bit" | ||
198 | depends on ARM64_64K_PAGES | ||
199 | |||
200 | config ARM64_VA_BITS_48 | ||
201 | bool "48-bit" | ||
202 | depends on BROKEN | ||
203 | |||
204 | endchoice | ||
205 | |||
206 | config ARM64_VA_BITS | ||
207 | int | ||
208 | default 39 if ARM64_VA_BITS_39 | ||
209 | default 42 if ARM64_VA_BITS_42 | ||
210 | default 48 if ARM64_VA_BITS_48 | ||
211 | |||
212 | config ARM64_PGTABLE_LEVELS | ||
213 | int | ||
214 | default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 | ||
215 | default 3 if ARM64_64K_PAGES && ARM64_VA_BITS_48 | ||
216 | default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 | ||
217 | default 4 if ARM64_4K_PAGES && ARM64_VA_BITS_48 | ||
218 | |||
166 | config CPU_BIG_ENDIAN | 219 | config CPU_BIG_ENDIAN |
167 | bool "Build big-endian kernel" | 220 | bool "Build big-endian kernel" |
168 | help | 221 | help |
@@ -294,12 +347,18 @@ config CMDLINE_FORCE | |||
294 | This is useful if you cannot or don't want to change the | 347 | This is useful if you cannot or don't want to change the |
295 | command-line options your boot loader passes to the kernel. | 348 | command-line options your boot loader passes to the kernel. |
296 | 349 | ||
350 | config EFI_STUB | ||
351 | bool | ||
352 | |||
297 | config EFI | 353 | config EFI |
298 | bool "UEFI runtime support" | 354 | bool "UEFI runtime support" |
299 | depends on OF && !CPU_BIG_ENDIAN | 355 | depends on OF && !CPU_BIG_ENDIAN |
300 | select LIBFDT | 356 | select LIBFDT |
301 | select UCS2_STRING | 357 | select UCS2_STRING |
302 | select EFI_PARAMS_FROM_FDT | 358 | select EFI_PARAMS_FROM_FDT |
359 | select EFI_RUNTIME_WRAPPERS | ||
360 | select EFI_STUB | ||
361 | select EFI_ARMSTUB | ||
303 | default y | 362 | default y |
304 | help | 363 | help |
305 | This option provides support for runtime services provided | 364 | This option provides support for runtime services provided |
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug index 1c1b75629842..4ee8e90b7a45 100644 --- a/arch/arm64/Kconfig.debug +++ b/arch/arm64/Kconfig.debug | |||
@@ -28,4 +28,19 @@ config PID_IN_CONTEXTIDR | |||
28 | instructions during context switch. Say Y here only if you are | 28 | instructions during context switch. Say Y here only if you are |
29 | planning to use hardware trace tools with this kernel. | 29 | planning to use hardware trace tools with this kernel. |
30 | 30 | ||
31 | config ARM64_RANDOMIZE_TEXT_OFFSET | ||
32 | bool "Randomize TEXT_OFFSET at build time" | ||
33 | help | ||
34 | Say Y here if you want the image load offset (AKA TEXT_OFFSET) | ||
35 | of the kernel to be randomized at build-time. When selected, | ||
36 | this option will cause TEXT_OFFSET to be randomized upon any | ||
37 | build of the kernel, and the offset will be reflected in the | ||
38 | text_offset field of the resulting Image. This can be used to | ||
39 | fuzz-test bootloaders which respect text_offset. | ||
40 | |||
41 | This option is intended for bootloader and/or kernel testing | ||
42 | only. Bootloaders must make no assumptions regarding the value | ||
43 | of TEXT_OFFSET and platforms must not require a specific | ||
44 | value. | ||
45 | |||
31 | endmenu | 46 | endmenu |
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 8185a913c5ed..57833546bf00 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile | |||
@@ -38,7 +38,11 @@ CHECKFLAGS += -D__aarch64__ | |||
38 | head-y := arch/arm64/kernel/head.o | 38 | head-y := arch/arm64/kernel/head.o |
39 | 39 | ||
40 | # The byte offset of the kernel image in RAM from the start of RAM. | 40 | # The byte offset of the kernel image in RAM from the start of RAM. |
41 | ifeq ($(CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET), y) | ||
42 | TEXT_OFFSET := $(shell awk 'BEGIN {srand(); printf "0x%04x0\n", int(65535 * rand())}') | ||
43 | else | ||
41 | TEXT_OFFSET := 0x00080000 | 44 | TEXT_OFFSET := 0x00080000 |
45 | endif | ||
42 | 46 | ||
43 | export TEXT_OFFSET GZFLAGS | 47 | export TEXT_OFFSET GZFLAGS |
44 | 48 | ||
@@ -48,6 +52,7 @@ core-$(CONFIG_XEN) += arch/arm64/xen/ | |||
48 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ | 52 | core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ |
49 | libs-y := arch/arm64/lib/ $(libs-y) | 53 | libs-y := arch/arm64/lib/ $(libs-y) |
50 | libs-y += $(LIBGCC) | 54 | libs-y += $(LIBGCC) |
55 | libs-$(CONFIG_EFI_STUB) += drivers/firmware/efi/libstub/ | ||
51 | 56 | ||
52 | # Default target when executing plain make | 57 | # Default target when executing plain make |
53 | KBUILD_IMAGE := Image.gz | 58 | KBUILD_IMAGE := Image.gz |
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 3421f316f5dc..1e52b741d806 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig | |||
@@ -52,8 +52,11 @@ CONFIG_IP_PNP_BOOTP=y | |||
52 | # CONFIG_INET_LRO is not set | 52 | # CONFIG_INET_LRO is not set |
53 | # CONFIG_IPV6 is not set | 53 | # CONFIG_IPV6 is not set |
54 | # CONFIG_WIRELESS is not set | 54 | # CONFIG_WIRELESS is not set |
55 | CONFIG_NET_9P=y | ||
56 | CONFIG_NET_9P_VIRTIO=y | ||
55 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 57 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
56 | CONFIG_DEVTMPFS=y | 58 | CONFIG_DEVTMPFS=y |
59 | CONFIG_DEVTMPFS_MOUNT=y | ||
57 | CONFIG_DMA_CMA=y | 60 | CONFIG_DMA_CMA=y |
58 | CONFIG_BLK_DEV_LOOP=y | 61 | CONFIG_BLK_DEV_LOOP=y |
59 | CONFIG_VIRTIO_BLK=y | 62 | CONFIG_VIRTIO_BLK=y |
@@ -65,6 +68,7 @@ CONFIG_PATA_PLATFORM=y | |||
65 | CONFIG_PATA_OF_PLATFORM=y | 68 | CONFIG_PATA_OF_PLATFORM=y |
66 | CONFIG_NETDEVICES=y | 69 | CONFIG_NETDEVICES=y |
67 | CONFIG_TUN=y | 70 | CONFIG_TUN=y |
71 | CONFIG_VIRTIO_NET=y | ||
68 | CONFIG_SMC91X=y | 72 | CONFIG_SMC91X=y |
69 | CONFIG_SMSC911X=y | 73 | CONFIG_SMSC911X=y |
70 | # CONFIG_WLAN is not set | 74 | # CONFIG_WLAN is not set |
@@ -76,6 +80,7 @@ CONFIG_SERIAL_8250_CONSOLE=y | |||
76 | CONFIG_SERIAL_AMBA_PL011=y | 80 | CONFIG_SERIAL_AMBA_PL011=y |
77 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y | 81 | CONFIG_SERIAL_AMBA_PL011_CONSOLE=y |
78 | CONFIG_SERIAL_OF_PLATFORM=y | 82 | CONFIG_SERIAL_OF_PLATFORM=y |
83 | CONFIG_VIRTIO_CONSOLE=y | ||
79 | # CONFIG_HW_RANDOM is not set | 84 | # CONFIG_HW_RANDOM is not set |
80 | # CONFIG_HWMON is not set | 85 | # CONFIG_HWMON is not set |
81 | CONFIG_REGULATOR=y | 86 | CONFIG_REGULATOR=y |
@@ -90,6 +95,7 @@ CONFIG_USB_ISP1760_HCD=y | |||
90 | CONFIG_USB_STORAGE=y | 95 | CONFIG_USB_STORAGE=y |
91 | CONFIG_MMC=y | 96 | CONFIG_MMC=y |
92 | CONFIG_MMC_ARMMMCI=y | 97 | CONFIG_MMC_ARMMMCI=y |
98 | CONFIG_VIRTIO_BALLOON=y | ||
93 | CONFIG_VIRTIO_MMIO=y | 99 | CONFIG_VIRTIO_MMIO=y |
94 | # CONFIG_IOMMU_SUPPORT is not set | 100 | # CONFIG_IOMMU_SUPPORT is not set |
95 | CONFIG_EXT2_FS=y | 101 | CONFIG_EXT2_FS=y |
@@ -107,6 +113,7 @@ CONFIG_HUGETLBFS=y | |||
107 | # CONFIG_MISC_FILESYSTEMS is not set | 113 | # CONFIG_MISC_FILESYSTEMS is not set |
108 | CONFIG_NFS_FS=y | 114 | CONFIG_NFS_FS=y |
109 | CONFIG_ROOT_NFS=y | 115 | CONFIG_ROOT_NFS=y |
116 | CONFIG_9P_FS=y | ||
110 | CONFIG_NLS_CODEPAGE_437=y | 117 | CONFIG_NLS_CODEPAGE_437=y |
111 | CONFIG_NLS_ISO8859_1=y | 118 | CONFIG_NLS_ISO8859_1=y |
112 | CONFIG_VIRTUALIZATION=y | 119 | CONFIG_VIRTUALIZATION=y |
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile index 2070a56ecc46..a3f935fde975 100644 --- a/arch/arm64/crypto/Makefile +++ b/arch/arm64/crypto/Makefile | |||
@@ -35,4 +35,4 @@ AFLAGS_aes-neon.o := -DINTERLEAVE=4 | |||
35 | CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS | 35 | CFLAGS_aes-glue-ce.o := -DUSE_V8_CRYPTO_EXTENSIONS |
36 | 36 | ||
37 | $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE | 37 | $(obj)/aes-glue-%.o: $(src)/aes-glue.c FORCE |
38 | $(call if_changed_dep,cc_o_c) | 38 | $(call if_changed_rule,cc_o_c) |
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c index 60f2f4c12256..79cd911ef88c 100644 --- a/arch/arm64/crypto/aes-glue.c +++ b/arch/arm64/crypto/aes-glue.c | |||
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
106 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 106 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
107 | aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 107 | aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
108 | (u8 *)ctx->key_enc, rounds, blocks, first); | 108 | (u8 *)ctx->key_enc, rounds, blocks, first); |
109 | err = blkcipher_walk_done(desc, &walk, 0); | 109 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
110 | } | 110 | } |
111 | kernel_neon_end(); | 111 | kernel_neon_end(); |
112 | return err; | 112 | return err; |
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
128 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { | 128 | for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) { |
129 | aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 129 | aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
130 | (u8 *)ctx->key_dec, rounds, blocks, first); | 130 | (u8 *)ctx->key_dec, rounds, blocks, first); |
131 | err = blkcipher_walk_done(desc, &walk, 0); | 131 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
132 | } | 132 | } |
133 | kernel_neon_end(); | 133 | kernel_neon_end(); |
134 | return err; | 134 | return err; |
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
151 | aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 151 | aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
152 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, | 152 | (u8 *)ctx->key_enc, rounds, blocks, walk.iv, |
153 | first); | 153 | first); |
154 | err = blkcipher_walk_done(desc, &walk, 0); | 154 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
155 | } | 155 | } |
156 | kernel_neon_end(); | 156 | kernel_neon_end(); |
157 | return err; | 157 | return err; |
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
174 | aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 174 | aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
175 | (u8 *)ctx->key_dec, rounds, blocks, walk.iv, | 175 | (u8 *)ctx->key_dec, rounds, blocks, walk.iv, |
176 | first); | 176 | first); |
177 | err = blkcipher_walk_done(desc, &walk, 0); | 177 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
178 | } | 178 | } |
179 | kernel_neon_end(); | 179 | kernel_neon_end(); |
180 | return err; | 180 | return err; |
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
243 | aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, | 243 | aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr, |
244 | (u8 *)ctx->key1.key_enc, rounds, blocks, | 244 | (u8 *)ctx->key1.key_enc, rounds, blocks, |
245 | (u8 *)ctx->key2.key_enc, walk.iv, first); | 245 | (u8 *)ctx->key2.key_enc, walk.iv, first); |
246 | err = blkcipher_walk_done(desc, &walk, 0); | 246 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
247 | } | 247 | } |
248 | kernel_neon_end(); | 248 | kernel_neon_end(); |
249 | 249 | ||
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | |||
267 | aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, | 267 | aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr, |
268 | (u8 *)ctx->key1.key_dec, rounds, blocks, | 268 | (u8 *)ctx->key1.key_dec, rounds, blocks, |
269 | (u8 *)ctx->key2.key_enc, walk.iv, first); | 269 | (u8 *)ctx->key2.key_enc, walk.iv, first); |
270 | err = blkcipher_walk_done(desc, &walk, 0); | 270 | err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); |
271 | } | 271 | } |
272 | kernel_neon_end(); | 272 | kernel_neon_end(); |
273 | 273 | ||
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h index a5176cf32dad..f2defe1c380c 100644 --- a/arch/arm64/include/asm/cacheflush.h +++ b/arch/arm64/include/asm/cacheflush.h | |||
@@ -138,19 +138,10 @@ static inline void __flush_icache_all(void) | |||
138 | #define flush_icache_page(vma,page) do { } while (0) | 138 | #define flush_icache_page(vma,page) do { } while (0) |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * flush_cache_vmap() is used when creating mappings (eg, via vmap, | 141 | * Not required on AArch64 (PIPT or VIPT non-aliasing D-cache). |
142 | * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT | ||
143 | * caches, since the direct-mappings of these pages may contain cached | ||
144 | * data, we need to do a full cache flush to ensure that writebacks | ||
145 | * don't corrupt data placed into these pages via the new mappings. | ||
146 | */ | 142 | */ |
147 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) | 143 | static inline void flush_cache_vmap(unsigned long start, unsigned long end) |
148 | { | 144 | { |
149 | /* | ||
150 | * set_pte_at() called from vmap_pte_range() does not | ||
151 | * have a DSB after cleaning the cache line. | ||
152 | */ | ||
153 | dsb(ish); | ||
154 | } | 145 | } |
155 | 146 | ||
156 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) | 147 | static inline void flush_cache_vunmap(unsigned long start, unsigned long end) |
diff --git a/arch/arm64/include/asm/cachetype.h b/arch/arm64/include/asm/cachetype.h index 4b23e758d5e0..7a2e0762cb40 100644 --- a/arch/arm64/include/asm/cachetype.h +++ b/arch/arm64/include/asm/cachetype.h | |||
@@ -30,10 +30,14 @@ | |||
30 | 30 | ||
31 | #ifndef __ASSEMBLY__ | 31 | #ifndef __ASSEMBLY__ |
32 | 32 | ||
33 | static inline u32 icache_policy(void) | 33 | #include <linux/bitops.h> |
34 | { | 34 | |
35 | return (read_cpuid_cachetype() >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK; | 35 | #define CTR_L1IP(ctr) (((ctr) >> CTR_L1IP_SHIFT) & CTR_L1IP_MASK) |
36 | } | 36 | |
37 | #define ICACHEF_ALIASING BIT(0) | ||
38 | #define ICACHEF_AIVIVT BIT(1) | ||
39 | |||
40 | extern unsigned long __icache_flags; | ||
37 | 41 | ||
38 | /* | 42 | /* |
39 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is | 43 | * Whilst the D-side always behaves as PIPT on AArch64, aliasing is |
@@ -41,12 +45,12 @@ static inline u32 icache_policy(void) | |||
41 | */ | 45 | */ |
42 | static inline int icache_is_aliasing(void) | 46 | static inline int icache_is_aliasing(void) |
43 | { | 47 | { |
44 | return icache_policy() != ICACHE_POLICY_PIPT; | 48 | return test_bit(ICACHEF_ALIASING, &__icache_flags); |
45 | } | 49 | } |
46 | 50 | ||
47 | static inline int icache_is_aivivt(void) | 51 | static inline int icache_is_aivivt(void) |
48 | { | 52 | { |
49 | return icache_policy() == ICACHE_POLICY_AIVIVT; | 53 | return test_bit(ICACHEF_AIVIVT, &__icache_flags); |
50 | } | 54 | } |
51 | 55 | ||
52 | static inline u32 cache_type_cwg(void) | 56 | static inline u32 cache_type_cwg(void) |
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h new file mode 100644 index 000000000000..056443086019 --- /dev/null +++ b/arch/arm64/include/asm/cpu.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_CPU_H | ||
17 | #define __ASM_CPU_H | ||
18 | |||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/percpu.h> | ||
22 | |||
23 | /* | ||
24 | * Records attributes of an individual CPU. | ||
25 | */ | ||
26 | struct cpuinfo_arm64 { | ||
27 | struct cpu cpu; | ||
28 | u32 reg_ctr; | ||
29 | u32 reg_cntfrq; | ||
30 | u32 reg_dczid; | ||
31 | u32 reg_midr; | ||
32 | |||
33 | u64 reg_id_aa64isar0; | ||
34 | u64 reg_id_aa64isar1; | ||
35 | u64 reg_id_aa64mmfr0; | ||
36 | u64 reg_id_aa64mmfr1; | ||
37 | u64 reg_id_aa64pfr0; | ||
38 | u64 reg_id_aa64pfr1; | ||
39 | |||
40 | u32 reg_id_isar0; | ||
41 | u32 reg_id_isar1; | ||
42 | u32 reg_id_isar2; | ||
43 | u32 reg_id_isar3; | ||
44 | u32 reg_id_isar4; | ||
45 | u32 reg_id_isar5; | ||
46 | u32 reg_id_mmfr0; | ||
47 | u32 reg_id_mmfr1; | ||
48 | u32 reg_id_mmfr2; | ||
49 | u32 reg_id_mmfr3; | ||
50 | u32 reg_id_pfr0; | ||
51 | u32 reg_id_pfr1; | ||
52 | }; | ||
53 | |||
54 | DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); | ||
55 | |||
56 | void cpuinfo_store_cpu(void); | ||
57 | void __init cpuinfo_store_boot_cpu(void); | ||
58 | |||
59 | #endif /* __ASM_CPU_H */ | ||
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index 27f54a7cc81b..379d0b874328 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h | |||
@@ -18,6 +18,8 @@ | |||
18 | 18 | ||
19 | #define INVALID_HWID ULONG_MAX | 19 | #define INVALID_HWID ULONG_MAX |
20 | 20 | ||
21 | #define MPIDR_UP_BITMASK (0x1 << 30) | ||
22 | #define MPIDR_MT_BITMASK (0x1 << 24) | ||
21 | #define MPIDR_HWID_BITMASK 0xff00ffffff | 23 | #define MPIDR_HWID_BITMASK 0xff00ffffff |
22 | 24 | ||
23 | #define MPIDR_LEVEL_BITS_SHIFT 3 | 25 | #define MPIDR_LEVEL_BITS_SHIFT 3 |
@@ -36,15 +38,34 @@ | |||
36 | __val; \ | 38 | __val; \ |
37 | }) | 39 | }) |
38 | 40 | ||
41 | #define MIDR_REVISION_MASK 0xf | ||
42 | #define MIDR_REVISION(midr) ((midr) & MIDR_REVISION_MASK) | ||
43 | #define MIDR_PARTNUM_SHIFT 4 | ||
44 | #define MIDR_PARTNUM_MASK (0xfff << MIDR_PARTNUM_SHIFT) | ||
45 | #define MIDR_PARTNUM(midr) \ | ||
46 | (((midr) & MIDR_PARTNUM_MASK) >> MIDR_PARTNUM_SHIFT) | ||
47 | #define MIDR_ARCHITECTURE_SHIFT 16 | ||
48 | #define MIDR_ARCHITECTURE_MASK (0xf << MIDR_ARCHITECTURE_SHIFT) | ||
49 | #define MIDR_ARCHITECTURE(midr) \ | ||
50 | (((midr) & MIDR_ARCHITECTURE_MASK) >> MIDR_ARCHITECTURE_SHIFT) | ||
51 | #define MIDR_VARIANT_SHIFT 20 | ||
52 | #define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT) | ||
53 | #define MIDR_VARIANT(midr) \ | ||
54 | (((midr) & MIDR_VARIANT_MASK) >> MIDR_VARIANT_SHIFT) | ||
55 | #define MIDR_IMPLEMENTOR_SHIFT 24 | ||
56 | #define MIDR_IMPLEMENTOR_MASK (0xff << MIDR_IMPLEMENTOR_SHIFT) | ||
57 | #define MIDR_IMPLEMENTOR(midr) \ | ||
58 | (((midr) & MIDR_IMPLEMENTOR_MASK) >> MIDR_IMPLEMENTOR_SHIFT) | ||
59 | |||
39 | #define ARM_CPU_IMP_ARM 0x41 | 60 | #define ARM_CPU_IMP_ARM 0x41 |
40 | #define ARM_CPU_IMP_APM 0x50 | 61 | #define ARM_CPU_IMP_APM 0x50 |
41 | 62 | ||
42 | #define ARM_CPU_PART_AEM_V8 0xD0F0 | 63 | #define ARM_CPU_PART_AEM_V8 0xD0F |
43 | #define ARM_CPU_PART_FOUNDATION 0xD000 | 64 | #define ARM_CPU_PART_FOUNDATION 0xD00 |
44 | #define ARM_CPU_PART_CORTEX_A53 0xD030 | 65 | #define ARM_CPU_PART_CORTEX_A57 0xD07 |
45 | #define ARM_CPU_PART_CORTEX_A57 0xD070 | 66 | #define ARM_CPU_PART_CORTEX_A53 0xD03 |
46 | 67 | ||
47 | #define APM_CPU_PART_POTENZA 0x0000 | 68 | #define APM_CPU_PART_POTENZA 0x000 |
48 | 69 | ||
49 | #ifndef __ASSEMBLY__ | 70 | #ifndef __ASSEMBLY__ |
50 | 71 | ||
@@ -65,12 +86,12 @@ static inline u64 __attribute_const__ read_cpuid_mpidr(void) | |||
65 | 86 | ||
66 | static inline unsigned int __attribute_const__ read_cpuid_implementor(void) | 87 | static inline unsigned int __attribute_const__ read_cpuid_implementor(void) |
67 | { | 88 | { |
68 | return (read_cpuid_id() & 0xFF000000) >> 24; | 89 | return MIDR_IMPLEMENTOR(read_cpuid_id()); |
69 | } | 90 | } |
70 | 91 | ||
71 | static inline unsigned int __attribute_const__ read_cpuid_part_number(void) | 92 | static inline unsigned int __attribute_const__ read_cpuid_part_number(void) |
72 | { | 93 | { |
73 | return (read_cpuid_id() & 0xFFF0); | 94 | return MIDR_PARTNUM(read_cpuid_id()); |
74 | } | 95 | } |
75 | 96 | ||
76 | static inline u32 __attribute_const__ read_cpuid_cachetype(void) | 97 | static inline u32 __attribute_const__ read_cpuid_cachetype(void) |
diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 5a46c4e7f539..a34fd3b12e2b 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_EFI_H | 2 | #define _ASM_EFI_H |
3 | 3 | ||
4 | #include <asm/io.h> | 4 | #include <asm/io.h> |
5 | #include <asm/neon.h> | ||
5 | 6 | ||
6 | #ifdef CONFIG_EFI | 7 | #ifdef CONFIG_EFI |
7 | extern void efi_init(void); | 8 | extern void efi_init(void); |
@@ -11,4 +12,36 @@ extern void efi_idmap_init(void); | |||
11 | #define efi_idmap_init() | 12 | #define efi_idmap_init() |
12 | #endif | 13 | #endif |
13 | 14 | ||
15 | #define efi_call_virt(f, ...) \ | ||
16 | ({ \ | ||
17 | efi_##f##_t *__f = efi.systab->runtime->f; \ | ||
18 | efi_status_t __s; \ | ||
19 | \ | ||
20 | kernel_neon_begin(); \ | ||
21 | __s = __f(__VA_ARGS__); \ | ||
22 | kernel_neon_end(); \ | ||
23 | __s; \ | ||
24 | }) | ||
25 | |||
26 | #define __efi_call_virt(f, ...) \ | ||
27 | ({ \ | ||
28 | efi_##f##_t *__f = efi.systab->runtime->f; \ | ||
29 | \ | ||
30 | kernel_neon_begin(); \ | ||
31 | __f(__VA_ARGS__); \ | ||
32 | kernel_neon_end(); \ | ||
33 | }) | ||
34 | |||
35 | /* arch specific definitions used by the stub code */ | ||
36 | |||
37 | /* | ||
38 | * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from | ||
39 | * start of kernel and may not cross a 2MiB boundary. We set alignment to | ||
40 | * 2MiB so we know it won't cross a 2MiB boundary. | ||
41 | */ | ||
42 | #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ | ||
43 | #define MAX_FDT_OFFSET SZ_512M | ||
44 | |||
45 | #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) | ||
46 | |||
14 | #endif /* _ASM_EFI_H */ | 47 | #endif /* _ASM_EFI_H */ |
diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 768414d55e64..007618b8188c 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h | |||
@@ -40,6 +40,19 @@ | |||
40 | str w\tmpnr, [\state, #16 * 2 + 4] | 40 | str w\tmpnr, [\state, #16 * 2 + 4] |
41 | .endm | 41 | .endm |
42 | 42 | ||
43 | .macro fpsimd_restore_fpcr state, tmp | ||
44 | /* | ||
45 | * Writes to fpcr may be self-synchronising, so avoid restoring | ||
46 | * the register if it hasn't changed. | ||
47 | */ | ||
48 | mrs \tmp, fpcr | ||
49 | cmp \tmp, \state | ||
50 | b.eq 9999f | ||
51 | msr fpcr, \state | ||
52 | 9999: | ||
53 | .endm | ||
54 | |||
55 | /* Clobbers \state */ | ||
43 | .macro fpsimd_restore state, tmpnr | 56 | .macro fpsimd_restore state, tmpnr |
44 | ldp q0, q1, [\state, #16 * 0] | 57 | ldp q0, q1, [\state, #16 * 0] |
45 | ldp q2, q3, [\state, #16 * 2] | 58 | ldp q2, q3, [\state, #16 * 2] |
@@ -60,7 +73,7 @@ | |||
60 | ldr w\tmpnr, [\state, #16 * 2] | 73 | ldr w\tmpnr, [\state, #16 * 2] |
61 | msr fpsr, x\tmpnr | 74 | msr fpsr, x\tmpnr |
62 | ldr w\tmpnr, [\state, #16 * 2 + 4] | 75 | ldr w\tmpnr, [\state, #16 * 2 + 4] |
63 | msr fpcr, x\tmpnr | 76 | fpsimd_restore_fpcr x\tmpnr, \state |
64 | .endm | 77 | .endm |
65 | 78 | ||
66 | .altmacro | 79 | .altmacro |
@@ -84,7 +97,7 @@ | |||
84 | .macro fpsimd_restore_partial state, tmpnr1, tmpnr2 | 97 | .macro fpsimd_restore_partial state, tmpnr1, tmpnr2 |
85 | ldp w\tmpnr1, w\tmpnr2, [\state] | 98 | ldp w\tmpnr1, w\tmpnr2, [\state] |
86 | msr fpsr, x\tmpnr1 | 99 | msr fpsr, x\tmpnr1 |
87 | msr fpcr, x\tmpnr2 | 100 | fpsimd_restore_fpcr x\tmpnr2, x\tmpnr1 |
88 | adr x\tmpnr1, 0f | 101 | adr x\tmpnr1, 0f |
89 | ldr w\tmpnr2, [\state, #8] | 102 | ldr w\tmpnr2, [\state, #8] |
90 | add \state, \state, x\tmpnr2, lsl #4 | 103 | add \state, \state, x\tmpnr2, lsl #4 |
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 902eb708804a..ccc7087d3c4e 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h | |||
@@ -41,11 +41,7 @@ | |||
41 | * The module space lives between the addresses given by TASK_SIZE | 41 | * The module space lives between the addresses given by TASK_SIZE |
42 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. | 42 | * and PAGE_OFFSET - it must be within 128MB of the kernel text. |
43 | */ | 43 | */ |
44 | #ifdef CONFIG_ARM64_64K_PAGES | 44 | #define VA_BITS (CONFIG_ARM64_VA_BITS) |
45 | #define VA_BITS (42) | ||
46 | #else | ||
47 | #define VA_BITS (39) | ||
48 | #endif | ||
49 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) | 45 | #define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1)) |
50 | #define MODULES_END (PAGE_OFFSET) | 46 | #define MODULES_END (PAGE_OFFSET) |
51 | #define MODULES_VADDR (MODULES_END - SZ_64M) | 47 | #define MODULES_VADDR (MODULES_END - SZ_64M) |
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 46bf66628b6a..7a3f462133b0 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h | |||
@@ -31,14 +31,26 @@ | |||
31 | /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ | 31 | /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */ |
32 | #define __HAVE_ARCH_GATE_AREA 1 | 32 | #define __HAVE_ARCH_GATE_AREA 1 |
33 | 33 | ||
34 | #ifndef __ASSEMBLY__ | 34 | /* |
35 | 35 | * The idmap and swapper page tables need some space reserved in the kernel | |
36 | * image. Both require pgd, pud (4 levels only) and pmd tables to (section) | ||
37 | * map the kernel. With the 64K page configuration, swapper and idmap need to | ||
38 | * map to pte level. The swapper also maps the FDT (see __create_page_tables | ||
39 | * for more information). | ||
40 | */ | ||
36 | #ifdef CONFIG_ARM64_64K_PAGES | 41 | #ifdef CONFIG_ARM64_64K_PAGES |
37 | #include <asm/pgtable-2level-types.h> | 42 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) |
38 | #else | 43 | #else |
39 | #include <asm/pgtable-3level-types.h> | 44 | #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS - 1) |
40 | #endif | 45 | #endif |
41 | 46 | ||
47 | #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) | ||
48 | #define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) | ||
49 | |||
50 | #ifndef __ASSEMBLY__ | ||
51 | |||
52 | #include <asm/pgtable-types.h> | ||
53 | |||
42 | extern void __cpu_clear_user_page(void *p, unsigned long user); | 54 | extern void __cpu_clear_user_page(void *p, unsigned long user); |
43 | extern void __cpu_copy_user_page(void *to, const void *from, | 55 | extern void __cpu_copy_user_page(void *to, const void *from, |
44 | unsigned long user); | 56 | unsigned long user); |
diff --git a/arch/arm64/include/asm/pgalloc.h b/arch/arm64/include/asm/pgalloc.h index 9bea6e74a001..d5bed02073d6 100644 --- a/arch/arm64/include/asm/pgalloc.h +++ b/arch/arm64/include/asm/pgalloc.h | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #define check_pgt_cache() do { } while (0) | 27 | #define check_pgt_cache() do { } while (0) |
28 | 28 | ||
29 | #ifndef CONFIG_ARM64_64K_PAGES | 29 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 |
30 | 30 | ||
31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) | 31 | static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) |
32 | { | 32 | { |
@@ -44,7 +44,27 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) | |||
44 | set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); | 44 | set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); |
45 | } | 45 | } |
46 | 46 | ||
47 | #endif /* CONFIG_ARM64_64K_PAGES */ | 47 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ |
48 | |||
49 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
50 | |||
51 | static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) | ||
52 | { | ||
53 | return (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); | ||
54 | } | ||
55 | |||
56 | static inline void pud_free(struct mm_struct *mm, pud_t *pud) | ||
57 | { | ||
58 | BUG_ON((unsigned long)pud & (PAGE_SIZE-1)); | ||
59 | free_page((unsigned long)pud); | ||
60 | } | ||
61 | |||
62 | static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) | ||
63 | { | ||
64 | set_pgd(pgd, __pgd(__pa(pud) | PUD_TYPE_TABLE)); | ||
65 | } | ||
66 | |||
67 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ | ||
48 | 68 | ||
49 | extern pgd_t *pgd_alloc(struct mm_struct *mm); | 69 | extern pgd_t *pgd_alloc(struct mm_struct *mm); |
50 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); | 70 | extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); |
diff --git a/arch/arm64/include/asm/pgtable-2level-hwdef.h b/arch/arm64/include/asm/pgtable-2level-hwdef.h deleted file mode 100644 index 2593b490c56a..000000000000 --- a/arch/arm64/include/asm/pgtable-2level-hwdef.h +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_PGTABLE_2LEVEL_HWDEF_H | ||
17 | #define __ASM_PGTABLE_2LEVEL_HWDEF_H | ||
18 | |||
19 | /* | ||
20 | * With LPAE and 64KB pages, there are 2 levels of page tables. Each level has | ||
21 | * 8192 entries of 8 bytes each, occupying a 64KB page. Levels 0 and 1 are not | ||
22 | * used. The 2nd level table (PGD for Linux) can cover a range of 4TB, each | ||
23 | * entry representing 512MB. The user and kernel address spaces are limited to | ||
24 | * 4TB in the 64KB page configuration. | ||
25 | */ | ||
26 | #define PTRS_PER_PTE 8192 | ||
27 | #define PTRS_PER_PGD 8192 | ||
28 | |||
29 | /* | ||
30 | * PGDIR_SHIFT determines the size a top-level page table entry can map. | ||
31 | */ | ||
32 | #define PGDIR_SHIFT 29 | ||
33 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) | ||
34 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
35 | |||
36 | /* | ||
37 | * section address mask and size definitions. | ||
38 | */ | ||
39 | #define SECTION_SHIFT 29 | ||
40 | #define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) | ||
41 | #define SECTION_MASK (~(SECTION_SIZE-1)) | ||
42 | |||
43 | #endif | ||
diff --git a/arch/arm64/include/asm/pgtable-2level-types.h b/arch/arm64/include/asm/pgtable-2level-types.h deleted file mode 100644 index 5f101e63dfc1..000000000000 --- a/arch/arm64/include/asm/pgtable-2level-types.h +++ /dev/null | |||
@@ -1,62 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_PGTABLE_2LEVEL_TYPES_H | ||
17 | #define __ASM_PGTABLE_2LEVEL_TYPES_H | ||
18 | |||
19 | #include <asm/types.h> | ||
20 | |||
21 | typedef u64 pteval_t; | ||
22 | typedef u64 pgdval_t; | ||
23 | typedef pgdval_t pmdval_t; | ||
24 | |||
25 | #undef STRICT_MM_TYPECHECKS | ||
26 | |||
27 | #ifdef STRICT_MM_TYPECHECKS | ||
28 | |||
29 | /* | ||
30 | * These are used to make use of C type-checking.. | ||
31 | */ | ||
32 | typedef struct { pteval_t pte; } pte_t; | ||
33 | typedef struct { pgdval_t pgd; } pgd_t; | ||
34 | typedef struct { pteval_t pgprot; } pgprot_t; | ||
35 | |||
36 | #define pte_val(x) ((x).pte) | ||
37 | #define pgd_val(x) ((x).pgd) | ||
38 | #define pgprot_val(x) ((x).pgprot) | ||
39 | |||
40 | #define __pte(x) ((pte_t) { (x) } ) | ||
41 | #define __pgd(x) ((pgd_t) { (x) } ) | ||
42 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
43 | |||
44 | #else /* !STRICT_MM_TYPECHECKS */ | ||
45 | |||
46 | typedef pteval_t pte_t; | ||
47 | typedef pgdval_t pgd_t; | ||
48 | typedef pteval_t pgprot_t; | ||
49 | |||
50 | #define pte_val(x) (x) | ||
51 | #define pgd_val(x) (x) | ||
52 | #define pgprot_val(x) (x) | ||
53 | |||
54 | #define __pte(x) (x) | ||
55 | #define __pgd(x) (x) | ||
56 | #define __pgprot(x) (x) | ||
57 | |||
58 | #endif /* STRICT_MM_TYPECHECKS */ | ||
59 | |||
60 | #include <asm-generic/pgtable-nopmd.h> | ||
61 | |||
62 | #endif /* __ASM_PGTABLE_2LEVEL_TYPES_H */ | ||
diff --git a/arch/arm64/include/asm/pgtable-3level-hwdef.h b/arch/arm64/include/asm/pgtable-3level-hwdef.h deleted file mode 100644 index 3dbf941d7767..000000000000 --- a/arch/arm64/include/asm/pgtable-3level-hwdef.h +++ /dev/null | |||
@@ -1,50 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 ARM Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
15 | */ | ||
16 | #ifndef __ASM_PGTABLE_3LEVEL_HWDEF_H | ||
17 | #define __ASM_PGTABLE_3LEVEL_HWDEF_H | ||
18 | |||
19 | /* | ||
20 | * With LPAE and 4KB pages, there are 3 levels of page tables. Each level has | ||
21 | * 512 entries of 8 bytes each, occupying a 4K page. The first level table | ||
22 | * covers a range of 512GB, each entry representing 1GB. The user and kernel | ||
23 | * address spaces are limited to 512GB each. | ||
24 | */ | ||
25 | #define PTRS_PER_PTE 512 | ||
26 | #define PTRS_PER_PMD 512 | ||
27 | #define PTRS_PER_PGD 512 | ||
28 | |||
29 | /* | ||
30 | * PGDIR_SHIFT determines the size a top-level page table entry can map. | ||
31 | */ | ||
32 | #define PGDIR_SHIFT 30 | ||
33 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) | ||
34 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
35 | |||
36 | /* | ||
37 | * PMD_SHIFT determines the size a middle-level page table entry can map. | ||
38 | */ | ||
39 | #define PMD_SHIFT 21 | ||
40 | #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) | ||
41 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
42 | |||
43 | /* | ||
44 | * section address mask and size definitions. | ||
45 | */ | ||
46 | #define SECTION_SHIFT 21 | ||
47 | #define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) | ||
48 | #define SECTION_MASK (~(SECTION_SIZE-1)) | ||
49 | |||
50 | #endif | ||
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index 955e8c5f0afb..88174e0bfafe 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h | |||
@@ -16,18 +16,50 @@ | |||
16 | #ifndef __ASM_PGTABLE_HWDEF_H | 16 | #ifndef __ASM_PGTABLE_HWDEF_H |
17 | #define __ASM_PGTABLE_HWDEF_H | 17 | #define __ASM_PGTABLE_HWDEF_H |
18 | 18 | ||
19 | #ifdef CONFIG_ARM64_64K_PAGES | 19 | #define PTRS_PER_PTE (1 << (PAGE_SHIFT - 3)) |
20 | #include <asm/pgtable-2level-hwdef.h> | 20 | |
21 | #else | 21 | /* |
22 | #include <asm/pgtable-3level-hwdef.h> | 22 | * PMD_SHIFT determines the size a level 2 page table entry can map. |
23 | */ | ||
24 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | ||
25 | #define PMD_SHIFT ((PAGE_SHIFT - 3) * 2 + 3) | ||
26 | #define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) | ||
27 | #define PMD_MASK (~(PMD_SIZE-1)) | ||
28 | #define PTRS_PER_PMD PTRS_PER_PTE | ||
29 | #endif | ||
30 | |||
31 | /* | ||
32 | * PUD_SHIFT determines the size a level 1 page table entry can map. | ||
33 | */ | ||
34 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
35 | #define PUD_SHIFT ((PAGE_SHIFT - 3) * 3 + 3) | ||
36 | #define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) | ||
37 | #define PUD_MASK (~(PUD_SIZE-1)) | ||
38 | #define PTRS_PER_PUD PTRS_PER_PTE | ||
23 | #endif | 39 | #endif |
24 | 40 | ||
25 | /* | 41 | /* |
42 | * PGDIR_SHIFT determines the size a top-level page table entry can map | ||
43 | * (depending on the configuration, this level can be 0, 1 or 2). | ||
44 | */ | ||
45 | #define PGDIR_SHIFT ((PAGE_SHIFT - 3) * CONFIG_ARM64_PGTABLE_LEVELS + 3) | ||
46 | #define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) | ||
47 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | ||
48 | #define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT)) | ||
49 | |||
50 | /* | ||
51 | * Section address mask and size definitions. | ||
52 | */ | ||
53 | #define SECTION_SHIFT PMD_SHIFT | ||
54 | #define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT) | ||
55 | #define SECTION_MASK (~(SECTION_SIZE-1)) | ||
56 | |||
57 | /* | ||
26 | * Hardware page table definitions. | 58 | * Hardware page table definitions. |
27 | * | 59 | * |
28 | * Level 1 descriptor (PUD). | 60 | * Level 1 descriptor (PUD). |
29 | */ | 61 | */ |
30 | 62 | #define PUD_TYPE_TABLE (_AT(pudval_t, 3) << 0) | |
31 | #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) | 63 | #define PUD_TABLE_BIT (_AT(pgdval_t, 1) << 1) |
32 | #define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0) | 64 | #define PUD_TYPE_MASK (_AT(pgdval_t, 3) << 0) |
33 | #define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0) | 65 | #define PUD_TYPE_SECT (_AT(pgdval_t, 1) << 0) |
diff --git a/arch/arm64/include/asm/pgtable-3level-types.h b/arch/arm64/include/asm/pgtable-types.h index 4e94424938a4..ca9df80af896 100644 --- a/arch/arm64/include/asm/pgtable-3level-types.h +++ b/arch/arm64/include/asm/pgtable-types.h | |||
@@ -1,7 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2012 ARM Ltd. | 2 | * Page table types definitions. |
3 | * | 3 | * |
4 | * This program is free software; you can redistribute it and/or modify | 4 | * Copyright (C) 2014 ARM Ltd. |
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | 8 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 9 | * published by the Free Software Foundation. |
7 | * | 10 | * |
@@ -13,13 +16,15 @@ | |||
13 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU General Public License |
14 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
15 | */ | 18 | */ |
16 | #ifndef __ASM_PGTABLE_3LEVEL_TYPES_H | 19 | |
17 | #define __ASM_PGTABLE_3LEVEL_TYPES_H | 20 | #ifndef __ASM_PGTABLE_TYPES_H |
21 | #define __ASM_PGTABLE_TYPES_H | ||
18 | 22 | ||
19 | #include <asm/types.h> | 23 | #include <asm/types.h> |
20 | 24 | ||
21 | typedef u64 pteval_t; | 25 | typedef u64 pteval_t; |
22 | typedef u64 pmdval_t; | 26 | typedef u64 pmdval_t; |
27 | typedef u64 pudval_t; | ||
23 | typedef u64 pgdval_t; | 28 | typedef u64 pgdval_t; |
24 | 29 | ||
25 | #undef STRICT_MM_TYPECHECKS | 30 | #undef STRICT_MM_TYPECHECKS |
@@ -30,39 +35,61 @@ typedef u64 pgdval_t; | |||
30 | * These are used to make use of C type-checking.. | 35 | * These are used to make use of C type-checking.. |
31 | */ | 36 | */ |
32 | typedef struct { pteval_t pte; } pte_t; | 37 | typedef struct { pteval_t pte; } pte_t; |
38 | #define pte_val(x) ((x).pte) | ||
39 | #define __pte(x) ((pte_t) { (x) } ) | ||
40 | |||
41 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | ||
33 | typedef struct { pmdval_t pmd; } pmd_t; | 42 | typedef struct { pmdval_t pmd; } pmd_t; |
34 | typedef struct { pgdval_t pgd; } pgd_t; | 43 | #define pmd_val(x) ((x).pmd) |
35 | typedef struct { pteval_t pgprot; } pgprot_t; | 44 | #define __pmd(x) ((pmd_t) { (x) } ) |
45 | #endif | ||
36 | 46 | ||
37 | #define pte_val(x) ((x).pte) | 47 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 |
38 | #define pmd_val(x) ((x).pmd) | 48 | typedef struct { pudval_t pud; } pud_t; |
39 | #define pgd_val(x) ((x).pgd) | 49 | #define pud_val(x) ((x).pud) |
40 | #define pgprot_val(x) ((x).pgprot) | 50 | #define __pud(x) ((pud_t) { (x) } ) |
51 | #endif | ||
41 | 52 | ||
42 | #define __pte(x) ((pte_t) { (x) } ) | 53 | typedef struct { pgdval_t pgd; } pgd_t; |
43 | #define __pmd(x) ((pmd_t) { (x) } ) | 54 | #define pgd_val(x) ((x).pgd) |
44 | #define __pgd(x) ((pgd_t) { (x) } ) | 55 | #define __pgd(x) ((pgd_t) { (x) } ) |
45 | #define __pgprot(x) ((pgprot_t) { (x) } ) | 56 | |
57 | typedef struct { pteval_t pgprot; } pgprot_t; | ||
58 | #define pgprot_val(x) ((x).pgprot) | ||
59 | #define __pgprot(x) ((pgprot_t) { (x) } ) | ||
46 | 60 | ||
47 | #else /* !STRICT_MM_TYPECHECKS */ | 61 | #else /* !STRICT_MM_TYPECHECKS */ |
48 | 62 | ||
49 | typedef pteval_t pte_t; | 63 | typedef pteval_t pte_t; |
50 | typedef pmdval_t pmd_t; | ||
51 | typedef pgdval_t pgd_t; | ||
52 | typedef pteval_t pgprot_t; | ||
53 | |||
54 | #define pte_val(x) (x) | 64 | #define pte_val(x) (x) |
55 | #define pmd_val(x) (x) | ||
56 | #define pgd_val(x) (x) | ||
57 | #define pgprot_val(x) (x) | ||
58 | |||
59 | #define __pte(x) (x) | 65 | #define __pte(x) (x) |
66 | |||
67 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | ||
68 | typedef pmdval_t pmd_t; | ||
69 | #define pmd_val(x) (x) | ||
60 | #define __pmd(x) (x) | 70 | #define __pmd(x) (x) |
71 | #endif | ||
72 | |||
73 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
74 | typedef pudval_t pud_t; | ||
75 | #define pud_val(x) (x) | ||
76 | #define __pud(x) (x) | ||
77 | #endif | ||
78 | |||
79 | typedef pgdval_t pgd_t; | ||
80 | #define pgd_val(x) (x) | ||
61 | #define __pgd(x) (x) | 81 | #define __pgd(x) (x) |
82 | |||
83 | typedef pteval_t pgprot_t; | ||
84 | #define pgprot_val(x) (x) | ||
62 | #define __pgprot(x) (x) | 85 | #define __pgprot(x) (x) |
63 | 86 | ||
64 | #endif /* STRICT_MM_TYPECHECKS */ | 87 | #endif /* STRICT_MM_TYPECHECKS */ |
65 | 88 | ||
89 | #if CONFIG_ARM64_PGTABLE_LEVELS == 2 | ||
90 | #include <asm-generic/pgtable-nopmd.h> | ||
91 | #elif CONFIG_ARM64_PGTABLE_LEVELS == 3 | ||
66 | #include <asm-generic/pgtable-nopud.h> | 92 | #include <asm-generic/pgtable-nopud.h> |
93 | #endif | ||
67 | 94 | ||
68 | #endif /* __ASM_PGTABLE_3LEVEL_TYPES_H */ | 95 | #endif /* __ASM_PGTABLE_TYPES_H */ |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index e0ccceb317d9..ffe1ba0506d1 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -33,9 +33,16 @@ | |||
33 | 33 | ||
34 | /* | 34 | /* |
35 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. | 35 | * VMALLOC and SPARSEMEM_VMEMMAP ranges. |
36 | * | ||
37 | * VMEMAP_SIZE: allows the whole VA space to be covered by a struct page array | ||
38 | * (rounded up to PUD_SIZE). | ||
39 | * VMALLOC_START: beginning of the kernel VA space | ||
40 | * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space, | ||
41 | * fixed mappings and modules | ||
36 | */ | 42 | */ |
43 | #define VMEMMAP_SIZE ALIGN((1UL << (VA_BITS - PAGE_SHIFT)) * sizeof(struct page), PUD_SIZE) | ||
37 | #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) | 44 | #define VMALLOC_START (UL(0xffffffffffffffff) << VA_BITS) |
38 | #define VMALLOC_END (PAGE_OFFSET - UL(0x400000000) - SZ_64K) | 45 | #define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) |
39 | 46 | ||
40 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) | 47 | #define vmemmap ((struct page *)(VMALLOC_END + SZ_64K)) |
41 | 48 | ||
@@ -44,14 +51,9 @@ | |||
44 | #ifndef __ASSEMBLY__ | 51 | #ifndef __ASSEMBLY__ |
45 | extern void __pte_error(const char *file, int line, unsigned long val); | 52 | extern void __pte_error(const char *file, int line, unsigned long val); |
46 | extern void __pmd_error(const char *file, int line, unsigned long val); | 53 | extern void __pmd_error(const char *file, int line, unsigned long val); |
54 | extern void __pud_error(const char *file, int line, unsigned long val); | ||
47 | extern void __pgd_error(const char *file, int line, unsigned long val); | 55 | extern void __pgd_error(const char *file, int line, unsigned long val); |
48 | 56 | ||
49 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | ||
50 | #ifndef CONFIG_ARM64_64K_PAGES | ||
51 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | ||
52 | #endif | ||
53 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | ||
54 | |||
55 | #ifdef CONFIG_SMP | 57 | #ifdef CONFIG_SMP |
56 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) | 58 | #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) |
57 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) | 59 | #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) |
@@ -112,6 +114,8 @@ extern void __pgd_error(const char *file, int line, unsigned long val); | |||
112 | extern struct page *empty_zero_page; | 114 | extern struct page *empty_zero_page; |
113 | #define ZERO_PAGE(vaddr) (empty_zero_page) | 115 | #define ZERO_PAGE(vaddr) (empty_zero_page) |
114 | 116 | ||
117 | #define pte_ERROR(pte) __pte_error(__FILE__, __LINE__, pte_val(pte)) | ||
118 | |||
115 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) | 119 | #define pte_pfn(pte) ((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT) |
116 | 120 | ||
117 | #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) | 121 | #define pfn_pte(pfn,prot) (__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot))) |
@@ -119,6 +123,10 @@ extern struct page *empty_zero_page; | |||
119 | #define pte_none(pte) (!pte_val(pte)) | 123 | #define pte_none(pte) (!pte_val(pte)) |
120 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) | 124 | #define pte_clear(mm,addr,ptep) set_pte(ptep, __pte(0)) |
121 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) | 125 | #define pte_page(pte) (pfn_to_page(pte_pfn(pte))) |
126 | |||
127 | /* Find an entry in the third-level page table. */ | ||
128 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
129 | |||
122 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) | 130 | #define pte_offset_kernel(dir,addr) (pmd_page_vaddr(*(dir)) + pte_index(addr)) |
123 | 131 | ||
124 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 132 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
@@ -138,6 +146,8 @@ extern struct page *empty_zero_page; | |||
138 | 146 | ||
139 | #define pte_valid_user(pte) \ | 147 | #define pte_valid_user(pte) \ |
140 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) | 148 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) |
149 | #define pte_valid_not_user(pte) \ | ||
150 | ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID) | ||
141 | 151 | ||
142 | static inline pte_t pte_wrprotect(pte_t pte) | 152 | static inline pte_t pte_wrprotect(pte_t pte) |
143 | { | 153 | { |
@@ -184,6 +194,15 @@ static inline pte_t pte_mkspecial(pte_t pte) | |||
184 | static inline void set_pte(pte_t *ptep, pte_t pte) | 194 | static inline void set_pte(pte_t *ptep, pte_t pte) |
185 | { | 195 | { |
186 | *ptep = pte; | 196 | *ptep = pte; |
197 | |||
198 | /* | ||
199 | * Only if the new pte is valid and kernel, otherwise TLB maintenance | ||
200 | * or update_mmu_cache() have the necessary barriers. | ||
201 | */ | ||
202 | if (pte_valid_not_user(pte)) { | ||
203 | dsb(ishst); | ||
204 | isb(); | ||
205 | } | ||
187 | } | 206 | } |
188 | 207 | ||
189 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); | 208 | extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); |
@@ -303,6 +322,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) | |||
303 | { | 322 | { |
304 | *pmdp = pmd; | 323 | *pmdp = pmd; |
305 | dsb(ishst); | 324 | dsb(ishst); |
325 | isb(); | ||
306 | } | 326 | } |
307 | 327 | ||
308 | static inline void pmd_clear(pmd_t *pmdp) | 328 | static inline void pmd_clear(pmd_t *pmdp) |
@@ -323,7 +343,9 @@ static inline pte_t *pmd_page_vaddr(pmd_t pmd) | |||
323 | */ | 343 | */ |
324 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) | 344 | #define mk_pte(page,prot) pfn_pte(page_to_pfn(page),prot) |
325 | 345 | ||
326 | #ifndef CONFIG_ARM64_64K_PAGES | 346 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 |
347 | |||
348 | #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd_val(pmd)) | ||
327 | 349 | ||
328 | #define pud_none(pud) (!pud_val(pud)) | 350 | #define pud_none(pud) (!pud_val(pud)) |
329 | #define pud_bad(pud) (!(pud_val(pud) & 2)) | 351 | #define pud_bad(pud) (!(pud_val(pud) & 2)) |
@@ -333,6 +355,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud) | |||
333 | { | 355 | { |
334 | *pudp = pud; | 356 | *pudp = pud; |
335 | dsb(ishst); | 357 | dsb(ishst); |
358 | isb(); | ||
336 | } | 359 | } |
337 | 360 | ||
338 | static inline void pud_clear(pud_t *pudp) | 361 | static inline void pud_clear(pud_t *pudp) |
@@ -345,7 +368,51 @@ static inline pmd_t *pud_page_vaddr(pud_t pud) | |||
345 | return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); | 368 | return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); |
346 | } | 369 | } |
347 | 370 | ||
348 | #endif /* CONFIG_ARM64_64K_PAGES */ | 371 | /* Find an entry in the second-level page table. */ |
372 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | ||
373 | |||
374 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | ||
375 | { | ||
376 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | ||
377 | } | ||
378 | |||
379 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */ | ||
380 | |||
381 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
382 | |||
383 | #define pud_ERROR(pud) __pud_error(__FILE__, __LINE__, pud_val(pud)) | ||
384 | |||
385 | #define pgd_none(pgd) (!pgd_val(pgd)) | ||
386 | #define pgd_bad(pgd) (!(pgd_val(pgd) & 2)) | ||
387 | #define pgd_present(pgd) (pgd_val(pgd)) | ||
388 | |||
389 | static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) | ||
390 | { | ||
391 | *pgdp = pgd; | ||
392 | dsb(ishst); | ||
393 | } | ||
394 | |||
395 | static inline void pgd_clear(pgd_t *pgdp) | ||
396 | { | ||
397 | set_pgd(pgdp, __pgd(0)); | ||
398 | } | ||
399 | |||
400 | static inline pud_t *pgd_page_vaddr(pgd_t pgd) | ||
401 | { | ||
402 | return __va(pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK); | ||
403 | } | ||
404 | |||
405 | /* Find an entry in the frst-level page table. */ | ||
406 | #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) | ||
407 | |||
408 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr) | ||
409 | { | ||
410 | return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr); | ||
411 | } | ||
412 | |||
413 | #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */ | ||
414 | |||
415 | #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd)) | ||
349 | 416 | ||
350 | /* to find an entry in a page-table-directory */ | 417 | /* to find an entry in a page-table-directory */ |
351 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | 418 | #define pgd_index(addr) (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) |
@@ -355,18 +422,6 @@ static inline pmd_t *pud_page_vaddr(pud_t pud) | |||
355 | /* to find an entry in a kernel page-table-directory */ | 422 | /* to find an entry in a kernel page-table-directory */ |
356 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) | 423 | #define pgd_offset_k(addr) pgd_offset(&init_mm, addr) |
357 | 424 | ||
358 | /* Find an entry in the second-level page table.. */ | ||
359 | #ifndef CONFIG_ARM64_64K_PAGES | ||
360 | #define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) | ||
361 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | ||
362 | { | ||
363 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); | ||
364 | } | ||
365 | #endif | ||
366 | |||
367 | /* Find an entry in the third-level page table.. */ | ||
368 | #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | ||
369 | |||
370 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | 425 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
371 | { | 426 | { |
372 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | | 427 | const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | |
@@ -383,9 +438,6 @@ static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) | |||
383 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; | 438 | extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
384 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; | 439 | extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; |
385 | 440 | ||
386 | #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) | ||
387 | #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) | ||
388 | |||
389 | /* | 441 | /* |
390 | * Encode and decode a swap entry: | 442 | * Encode and decode a swap entry: |
391 | * bits 0-1: present (must be zero) | 443 | * bits 0-1: present (must be zero) |
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 34de2a8f7d93..3df21feeabdd 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h | |||
@@ -129,6 +129,7 @@ extern void release_thread(struct task_struct *); | |||
129 | unsigned long get_wchan(struct task_struct *p); | 129 | unsigned long get_wchan(struct task_struct *p); |
130 | 130 | ||
131 | #define cpu_relax() barrier() | 131 | #define cpu_relax() barrier() |
132 | #define cpu_relax_lowlatency() cpu_relax() | ||
132 | 133 | ||
133 | /* Thread switching */ | 134 | /* Thread switching */ |
134 | extern struct task_struct *cpu_switch_to(struct task_struct *prev, | 135 | extern struct task_struct *cpu_switch_to(struct task_struct *prev, |
@@ -137,8 +138,8 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, | |||
137 | #define task_pt_regs(p) \ | 138 | #define task_pt_regs(p) \ |
138 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 139 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
139 | 140 | ||
140 | #define KSTK_EIP(tsk) task_pt_regs(tsk)->pc | 141 | #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) |
141 | #define KSTK_ESP(tsk) task_pt_regs(tsk)->sp | 142 | #define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) |
142 | 143 | ||
143 | /* | 144 | /* |
144 | * Prefetching support | 145 | * Prefetching support |
diff --git a/arch/arm64/include/asm/stackprotector.h b/arch/arm64/include/asm/stackprotector.h new file mode 100644 index 000000000000..fe5e287dc56b --- /dev/null +++ b/arch/arm64/include/asm/stackprotector.h | |||
@@ -0,0 +1,38 @@ | |||
1 | /* | ||
2 | * GCC stack protector support. | ||
3 | * | ||
4 | * Stack protector works by putting predefined pattern at the start of | ||
5 | * the stack frame and verifying that it hasn't been overwritten when | ||
6 | * returning from the function. The pattern is called stack canary | ||
7 | * and gcc expects it to be defined by a global variable called | ||
8 | * "__stack_chk_guard" on ARM. This unfortunately means that on SMP | ||
9 | * we cannot have a different canary value per task. | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_STACKPROTECTOR_H | ||
13 | #define __ASM_STACKPROTECTOR_H | ||
14 | |||
15 | #include <linux/random.h> | ||
16 | #include <linux/version.h> | ||
17 | |||
18 | extern unsigned long __stack_chk_guard; | ||
19 | |||
20 | /* | ||
21 | * Initialize the stackprotector canary value. | ||
22 | * | ||
23 | * NOTE: this must only be called from functions that never return, | ||
24 | * and it must always be inlined. | ||
25 | */ | ||
26 | static __always_inline void boot_init_stack_canary(void) | ||
27 | { | ||
28 | unsigned long canary; | ||
29 | |||
30 | /* Try to get a semi random initial value. */ | ||
31 | get_random_bytes(&canary, sizeof(canary)); | ||
32 | canary ^= LINUX_VERSION_CODE; | ||
33 | |||
34 | current->stack_canary = canary; | ||
35 | __stack_chk_guard = current->stack_canary; | ||
36 | } | ||
37 | |||
38 | #endif /* _ASM_STACKPROTECTOR_H */ | ||
diff --git a/arch/arm64/include/asm/syscall.h b/arch/arm64/include/asm/syscall.h index 383771eb0b87..709a574468f0 100644 --- a/arch/arm64/include/asm/syscall.h +++ b/arch/arm64/include/asm/syscall.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef __ASM_SYSCALL_H | 16 | #ifndef __ASM_SYSCALL_H |
17 | #define __ASM_SYSCALL_H | 17 | #define __ASM_SYSCALL_H |
18 | 18 | ||
19 | #include <uapi/linux/audit.h> | ||
20 | #include <linux/compat.h> | ||
19 | #include <linux/err.h> | 21 | #include <linux/err.h> |
20 | 22 | ||
21 | extern const void *sys_call_table[]; | 23 | extern const void *sys_call_table[]; |
@@ -105,4 +107,16 @@ static inline void syscall_set_arguments(struct task_struct *task, | |||
105 | memcpy(®s->regs[i], args, n * sizeof(args[0])); | 107 | memcpy(®s->regs[i], args, n * sizeof(args[0])); |
106 | } | 108 | } |
107 | 109 | ||
110 | /* | ||
111 | * We don't care about endianness (__AUDIT_ARCH_LE bit) here because | ||
112 | * AArch64 has the same system calls both on little- and big- endian. | ||
113 | */ | ||
114 | static inline int syscall_get_arch(void) | ||
115 | { | ||
116 | if (is_compat_task()) | ||
117 | return AUDIT_ARCH_ARM; | ||
118 | |||
119 | return AUDIT_ARCH_AARCH64; | ||
120 | } | ||
121 | |||
108 | #endif /* __ASM_SYSCALL_H */ | 122 | #endif /* __ASM_SYSCALL_H */ |
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h new file mode 100644 index 000000000000..5c89df0acbcb --- /dev/null +++ b/arch/arm64/include/asm/sysreg.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* | ||
2 | * Macros for accessing system registers with older binutils. | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * Author: Catalin Marinas <catalin.marinas@arm.com> | ||
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
18 | */ | ||
19 | |||
20 | #ifndef __ASM_SYSREG_H | ||
21 | #define __ASM_SYSREG_H | ||
22 | |||
23 | #define sys_reg(op0, op1, crn, crm, op2) \ | ||
24 | ((((op0)-2)<<19)|((op1)<<16)|((crn)<<12)|((crm)<<8)|((op2)<<5)) | ||
25 | |||
26 | #ifdef __ASSEMBLY__ | ||
27 | |||
28 | .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30 | ||
29 | .equ __reg_num_x\num, \num | ||
30 | .endr | ||
31 | .equ __reg_num_xzr, 31 | ||
32 | |||
33 | .macro mrs_s, rt, sreg | ||
34 | .inst 0xd5300000|(\sreg)|(__reg_num_\rt) | ||
35 | .endm | ||
36 | |||
37 | .macro msr_s, sreg, rt | ||
38 | .inst 0xd5100000|(\sreg)|(__reg_num_\rt) | ||
39 | .endm | ||
40 | |||
41 | #else | ||
42 | |||
43 | asm( | ||
44 | " .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30\n" | ||
45 | " .equ __reg_num_x\\num, \\num\n" | ||
46 | " .endr\n" | ||
47 | " .equ __reg_num_xzr, 31\n" | ||
48 | "\n" | ||
49 | " .macro mrs_s, rt, sreg\n" | ||
50 | " .inst 0xd5300000|(\\sreg)|(__reg_num_\\rt)\n" | ||
51 | " .endm\n" | ||
52 | "\n" | ||
53 | " .macro msr_s, sreg, rt\n" | ||
54 | " .inst 0xd5100000|(\\sreg)|(__reg_num_\\rt)\n" | ||
55 | " .endm\n" | ||
56 | ); | ||
57 | |||
58 | #endif | ||
59 | |||
60 | #endif /* __ASM_SYSREG_H */ | ||
diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e40b6d06d515..45108d802f5e 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h | |||
@@ -103,6 +103,7 @@ static inline struct thread_info *current_thread_info(void) | |||
103 | #define TIF_NEED_RESCHED 1 | 103 | #define TIF_NEED_RESCHED 1 |
104 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ | 104 | #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ |
105 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ | 105 | #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ |
106 | #define TIF_NOHZ 7 | ||
106 | #define TIF_SYSCALL_TRACE 8 | 107 | #define TIF_SYSCALL_TRACE 8 |
107 | #define TIF_SYSCALL_AUDIT 9 | 108 | #define TIF_SYSCALL_AUDIT 9 |
108 | #define TIF_SYSCALL_TRACEPOINT 10 | 109 | #define TIF_SYSCALL_TRACEPOINT 10 |
@@ -118,6 +119,7 @@ static inline struct thread_info *current_thread_info(void) | |||
118 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) | 119 | #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) |
119 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) | 120 | #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) |
120 | #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) | 121 | #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) |
122 | #define _TIF_NOHZ (1 << TIF_NOHZ) | ||
121 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) | 123 | #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) |
122 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) | 124 | #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) |
123 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) | 125 | #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) |
@@ -128,7 +130,8 @@ static inline struct thread_info *current_thread_info(void) | |||
128 | _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) | 130 | _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) |
129 | 131 | ||
130 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ | 132 | #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
131 | _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP) | 133 | _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ |
134 | _TIF_NOHZ) | ||
132 | 135 | ||
133 | #endif /* __KERNEL__ */ | 136 | #endif /* __KERNEL__ */ |
134 | #endif /* __ASM_THREAD_INFO_H */ | 137 | #endif /* __ASM_THREAD_INFO_H */ |
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h index 80e2c08900d6..62731ef9749a 100644 --- a/arch/arm64/include/asm/tlb.h +++ b/arch/arm64/include/asm/tlb.h | |||
@@ -91,7 +91,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | |||
91 | tlb_remove_page(tlb, pte); | 91 | tlb_remove_page(tlb, pte); |
92 | } | 92 | } |
93 | 93 | ||
94 | #ifndef CONFIG_ARM64_64K_PAGES | 94 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 |
95 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | 95 | static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, |
96 | unsigned long addr) | 96 | unsigned long addr) |
97 | { | 97 | { |
@@ -100,6 +100,15 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp, | |||
100 | } | 100 | } |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
104 | static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pudp, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | tlb_add_flush(tlb, addr); | ||
108 | tlb_remove_page(tlb, virt_to_page(pudp)); | ||
109 | } | ||
110 | #endif | ||
111 | |||
103 | static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, | 112 | static inline void __tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, |
104 | unsigned long address) | 113 | unsigned long address) |
105 | { | 114 | { |
diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index b9349c4513ea..73f0ce570fb3 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h | |||
@@ -98,8 +98,8 @@ static inline void flush_tlb_page(struct vm_area_struct *vma, | |||
98 | dsb(ish); | 98 | dsb(ish); |
99 | } | 99 | } |
100 | 100 | ||
101 | static inline void flush_tlb_range(struct vm_area_struct *vma, | 101 | static inline void __flush_tlb_range(struct vm_area_struct *vma, |
102 | unsigned long start, unsigned long end) | 102 | unsigned long start, unsigned long end) |
103 | { | 103 | { |
104 | unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; | 104 | unsigned long asid = (unsigned long)ASID(vma->vm_mm) << 48; |
105 | unsigned long addr; | 105 | unsigned long addr; |
@@ -112,7 +112,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, | |||
112 | dsb(ish); | 112 | dsb(ish); |
113 | } | 113 | } |
114 | 114 | ||
115 | static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 115 | static inline void __flush_tlb_kernel_range(unsigned long start, unsigned long end) |
116 | { | 116 | { |
117 | unsigned long addr; | 117 | unsigned long addr; |
118 | start >>= 12; | 118 | start >>= 12; |
@@ -122,6 +122,30 @@ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end | |||
122 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) | 122 | for (addr = start; addr < end; addr += 1 << (PAGE_SHIFT - 12)) |
123 | asm("tlbi vaae1is, %0" : : "r"(addr)); | 123 | asm("tlbi vaae1is, %0" : : "r"(addr)); |
124 | dsb(ish); | 124 | dsb(ish); |
125 | isb(); | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * This is meant to avoid soft lock-ups on large TLB flushing ranges and not | ||
130 | * necessarily a performance improvement. | ||
131 | */ | ||
132 | #define MAX_TLB_RANGE (1024UL << PAGE_SHIFT) | ||
133 | |||
134 | static inline void flush_tlb_range(struct vm_area_struct *vma, | ||
135 | unsigned long start, unsigned long end) | ||
136 | { | ||
137 | if ((end - start) <= MAX_TLB_RANGE) | ||
138 | __flush_tlb_range(vma, start, end); | ||
139 | else | ||
140 | flush_tlb_mm(vma->vm_mm); | ||
141 | } | ||
142 | |||
143 | static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
144 | { | ||
145 | if ((end - start) <= MAX_TLB_RANGE) | ||
146 | __flush_tlb_kernel_range(start, end); | ||
147 | else | ||
148 | flush_tlb_all(); | ||
125 | } | 149 | } |
126 | 150 | ||
127 | /* | 151 | /* |
@@ -131,8 +155,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, | |||
131 | unsigned long addr, pte_t *ptep) | 155 | unsigned long addr, pte_t *ptep) |
132 | { | 156 | { |
133 | /* | 157 | /* |
134 | * set_pte() does not have a DSB, so make sure that the page table | 158 | * set_pte() does not have a DSB for user mappings, so make sure that |
135 | * write is visible. | 159 | * the page table write is visible. |
136 | */ | 160 | */ |
137 | dsb(ishst); | 161 | dsb(ishst); |
138 | } | 162 | } |
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e5f47df00c24..4bc95d27e063 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h | |||
@@ -26,7 +26,24 @@ | |||
26 | #define __ARCH_WANT_COMPAT_SYS_SENDFILE | 26 | #define __ARCH_WANT_COMPAT_SYS_SENDFILE |
27 | #define __ARCH_WANT_SYS_FORK | 27 | #define __ARCH_WANT_SYS_FORK |
28 | #define __ARCH_WANT_SYS_VFORK | 28 | #define __ARCH_WANT_SYS_VFORK |
29 | |||
30 | /* | ||
31 | * Compat syscall numbers used by the AArch64 kernel. | ||
32 | */ | ||
33 | #define __NR_compat_restart_syscall 0 | ||
34 | #define __NR_compat_sigreturn 119 | ||
35 | #define __NR_compat_rt_sigreturn 173 | ||
36 | |||
37 | /* | ||
38 | * The following SVCs are ARM private. | ||
39 | */ | ||
40 | #define __ARM_NR_COMPAT_BASE 0x0f0000 | ||
41 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | ||
42 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | ||
43 | |||
44 | #define __NR_compat_syscalls 383 | ||
29 | #endif | 45 | #endif |
46 | |||
30 | #define __ARCH_WANT_SYS_CLONE | 47 | #define __ARCH_WANT_SYS_CLONE |
31 | #include <uapi/asm/unistd.h> | 48 | #include <uapi/asm/unistd.h> |
32 | 49 | ||
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index c8d8fc17bd5a..e242600c4046 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h | |||
@@ -21,403 +21,769 @@ | |||
21 | #define __SYSCALL(x, y) | 21 | #define __SYSCALL(x, y) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | __SYSCALL(0, sys_restart_syscall) | 24 | #define __NR_restart_syscall 0 |
25 | __SYSCALL(1, sys_exit) | 25 | __SYSCALL(__NR_restart_syscall, sys_restart_syscall) |
26 | __SYSCALL(2, sys_fork) | 26 | #define __NR_exit 1 |
27 | __SYSCALL(3, sys_read) | 27 | __SYSCALL(__NR_exit, sys_exit) |
28 | __SYSCALL(4, sys_write) | 28 | #define __NR_fork 2 |
29 | __SYSCALL(5, compat_sys_open) | 29 | __SYSCALL(__NR_fork, sys_fork) |
30 | __SYSCALL(6, sys_close) | 30 | #define __NR_read 3 |
31 | __SYSCALL(7, sys_ni_syscall) /* 7 was sys_waitpid */ | 31 | __SYSCALL(__NR_read, sys_read) |
32 | __SYSCALL(8, sys_creat) | 32 | #define __NR_write 4 |
33 | __SYSCALL(9, sys_link) | 33 | __SYSCALL(__NR_write, sys_write) |
34 | __SYSCALL(10, sys_unlink) | 34 | #define __NR_open 5 |
35 | __SYSCALL(11, compat_sys_execve) | 35 | __SYSCALL(__NR_open, compat_sys_open) |
36 | __SYSCALL(12, sys_chdir) | 36 | #define __NR_close 6 |
37 | __SYSCALL(13, sys_ni_syscall) /* 13 was sys_time */ | 37 | __SYSCALL(__NR_close, sys_close) |
38 | __SYSCALL(14, sys_mknod) | 38 | /* 7 was sys_waitpid */ |
39 | __SYSCALL(15, sys_chmod) | 39 | __SYSCALL(7, sys_ni_syscall) |
40 | __SYSCALL(16, sys_lchown16) | 40 | #define __NR_creat 8 |
41 | __SYSCALL(17, sys_ni_syscall) /* 17 was sys_break */ | 41 | __SYSCALL(__NR_creat, sys_creat) |
42 | __SYSCALL(18, sys_ni_syscall) /* 18 was sys_stat */ | 42 | #define __NR_link 9 |
43 | __SYSCALL(19, compat_sys_lseek) | 43 | __SYSCALL(__NR_link, sys_link) |
44 | __SYSCALL(20, sys_getpid) | 44 | #define __NR_unlink 10 |
45 | __SYSCALL(21, compat_sys_mount) | 45 | __SYSCALL(__NR_unlink, sys_unlink) |
46 | __SYSCALL(22, sys_ni_syscall) /* 22 was sys_umount */ | 46 | #define __NR_execve 11 |
47 | __SYSCALL(23, sys_setuid16) | 47 | __SYSCALL(__NR_execve, compat_sys_execve) |
48 | __SYSCALL(24, sys_getuid16) | 48 | #define __NR_chdir 12 |
49 | __SYSCALL(25, sys_ni_syscall) /* 25 was sys_stime */ | 49 | __SYSCALL(__NR_chdir, sys_chdir) |
50 | __SYSCALL(26, compat_sys_ptrace) | 50 | /* 13 was sys_time */ |
51 | __SYSCALL(27, sys_ni_syscall) /* 27 was sys_alarm */ | 51 | __SYSCALL(13, sys_ni_syscall) |
52 | __SYSCALL(28, sys_ni_syscall) /* 28 was sys_fstat */ | 52 | #define __NR_mknod 14 |
53 | __SYSCALL(29, sys_pause) | 53 | __SYSCALL(__NR_mknod, sys_mknod) |
54 | __SYSCALL(30, sys_ni_syscall) /* 30 was sys_utime */ | 54 | #define __NR_chmod 15 |
55 | __SYSCALL(31, sys_ni_syscall) /* 31 was sys_stty */ | 55 | __SYSCALL(__NR_chmod, sys_chmod) |
56 | __SYSCALL(32, sys_ni_syscall) /* 32 was sys_gtty */ | 56 | #define __NR_lchown 16 |
57 | __SYSCALL(33, sys_access) | 57 | __SYSCALL(__NR_lchown, sys_lchown16) |
58 | __SYSCALL(34, sys_nice) | 58 | /* 17 was sys_break */ |
59 | __SYSCALL(35, sys_ni_syscall) /* 35 was sys_ftime */ | 59 | __SYSCALL(17, sys_ni_syscall) |
60 | __SYSCALL(36, sys_sync) | 60 | /* 18 was sys_stat */ |
61 | __SYSCALL(37, sys_kill) | 61 | __SYSCALL(18, sys_ni_syscall) |
62 | __SYSCALL(38, sys_rename) | 62 | #define __NR_lseek 19 |
63 | __SYSCALL(39, sys_mkdir) | 63 | __SYSCALL(__NR_lseek, compat_sys_lseek) |
64 | __SYSCALL(40, sys_rmdir) | 64 | #define __NR_getpid 20 |
65 | __SYSCALL(41, sys_dup) | 65 | __SYSCALL(__NR_getpid, sys_getpid) |
66 | __SYSCALL(42, sys_pipe) | 66 | #define __NR_mount 21 |
67 | __SYSCALL(43, compat_sys_times) | 67 | __SYSCALL(__NR_mount, compat_sys_mount) |
68 | __SYSCALL(44, sys_ni_syscall) /* 44 was sys_prof */ | 68 | /* 22 was sys_umount */ |
69 | __SYSCALL(45, sys_brk) | 69 | __SYSCALL(22, sys_ni_syscall) |
70 | __SYSCALL(46, sys_setgid16) | 70 | #define __NR_setuid 23 |
71 | __SYSCALL(47, sys_getgid16) | 71 | __SYSCALL(__NR_setuid, sys_setuid16) |
72 | __SYSCALL(48, sys_ni_syscall) /* 48 was sys_signal */ | 72 | #define __NR_getuid 24 |
73 | __SYSCALL(49, sys_geteuid16) | 73 | __SYSCALL(__NR_getuid, sys_getuid16) |
74 | __SYSCALL(50, sys_getegid16) | 74 | /* 25 was sys_stime */ |
75 | __SYSCALL(51, sys_acct) | 75 | __SYSCALL(25, sys_ni_syscall) |
76 | __SYSCALL(52, sys_umount) | 76 | #define __NR_ptrace 26 |
77 | __SYSCALL(53, sys_ni_syscall) /* 53 was sys_lock */ | 77 | __SYSCALL(__NR_ptrace, compat_sys_ptrace) |
78 | __SYSCALL(54, compat_sys_ioctl) | 78 | /* 27 was sys_alarm */ |
79 | __SYSCALL(55, compat_sys_fcntl) | 79 | __SYSCALL(27, sys_ni_syscall) |
80 | __SYSCALL(56, sys_ni_syscall) /* 56 was sys_mpx */ | 80 | /* 28 was sys_fstat */ |
81 | __SYSCALL(57, sys_setpgid) | 81 | __SYSCALL(28, sys_ni_syscall) |
82 | __SYSCALL(58, sys_ni_syscall) /* 58 was sys_ulimit */ | 82 | #define __NR_pause 29 |
83 | __SYSCALL(59, sys_ni_syscall) /* 59 was sys_olduname */ | 83 | __SYSCALL(__NR_pause, sys_pause) |
84 | __SYSCALL(60, sys_umask) | 84 | /* 30 was sys_utime */ |
85 | __SYSCALL(61, sys_chroot) | 85 | __SYSCALL(30, sys_ni_syscall) |
86 | __SYSCALL(62, compat_sys_ustat) | 86 | /* 31 was sys_stty */ |
87 | __SYSCALL(63, sys_dup2) | 87 | __SYSCALL(31, sys_ni_syscall) |
88 | __SYSCALL(64, sys_getppid) | 88 | /* 32 was sys_gtty */ |
89 | __SYSCALL(65, sys_getpgrp) | 89 | __SYSCALL(32, sys_ni_syscall) |
90 | __SYSCALL(66, sys_setsid) | 90 | #define __NR_access 33 |
91 | __SYSCALL(67, compat_sys_sigaction) | 91 | __SYSCALL(__NR_access, sys_access) |
92 | __SYSCALL(68, sys_ni_syscall) /* 68 was sys_sgetmask */ | 92 | #define __NR_nice 34 |
93 | __SYSCALL(69, sys_ni_syscall) /* 69 was sys_ssetmask */ | 93 | __SYSCALL(__NR_nice, sys_nice) |
94 | __SYSCALL(70, sys_setreuid16) | 94 | /* 35 was sys_ftime */ |
95 | __SYSCALL(71, sys_setregid16) | 95 | __SYSCALL(35, sys_ni_syscall) |
96 | __SYSCALL(72, sys_sigsuspend) | 96 | #define __NR_sync 36 |
97 | __SYSCALL(73, compat_sys_sigpending) | 97 | __SYSCALL(__NR_sync, sys_sync) |
98 | __SYSCALL(74, sys_sethostname) | 98 | #define __NR_kill 37 |
99 | __SYSCALL(75, compat_sys_setrlimit) | 99 | __SYSCALL(__NR_kill, sys_kill) |
100 | __SYSCALL(76, sys_ni_syscall) /* 76 was compat_sys_getrlimit */ | 100 | #define __NR_rename 38 |
101 | __SYSCALL(77, compat_sys_getrusage) | 101 | __SYSCALL(__NR_rename, sys_rename) |
102 | __SYSCALL(78, compat_sys_gettimeofday) | 102 | #define __NR_mkdir 39 |
103 | __SYSCALL(79, compat_sys_settimeofday) | 103 | __SYSCALL(__NR_mkdir, sys_mkdir) |
104 | __SYSCALL(80, sys_getgroups16) | 104 | #define __NR_rmdir 40 |
105 | __SYSCALL(81, sys_setgroups16) | 105 | __SYSCALL(__NR_rmdir, sys_rmdir) |
106 | __SYSCALL(82, sys_ni_syscall) /* 82 was compat_sys_select */ | 106 | #define __NR_dup 41 |
107 | __SYSCALL(83, sys_symlink) | 107 | __SYSCALL(__NR_dup, sys_dup) |
108 | __SYSCALL(84, sys_ni_syscall) /* 84 was sys_lstat */ | 108 | #define __NR_pipe 42 |
109 | __SYSCALL(85, sys_readlink) | 109 | __SYSCALL(__NR_pipe, sys_pipe) |
110 | __SYSCALL(86, sys_uselib) | 110 | #define __NR_times 43 |
111 | __SYSCALL(87, sys_swapon) | 111 | __SYSCALL(__NR_times, compat_sys_times) |
112 | __SYSCALL(88, sys_reboot) | 112 | /* 44 was sys_prof */ |
113 | __SYSCALL(89, sys_ni_syscall) /* 89 was sys_readdir */ | 113 | __SYSCALL(44, sys_ni_syscall) |
114 | __SYSCALL(90, sys_ni_syscall) /* 90 was sys_mmap */ | 114 | #define __NR_brk 45 |
115 | __SYSCALL(91, sys_munmap) | 115 | __SYSCALL(__NR_brk, sys_brk) |
116 | __SYSCALL(92, compat_sys_truncate) | 116 | #define __NR_setgid 46 |
117 | __SYSCALL(93, compat_sys_ftruncate) | 117 | __SYSCALL(__NR_setgid, sys_setgid16) |
118 | __SYSCALL(94, sys_fchmod) | 118 | #define __NR_getgid 47 |
119 | __SYSCALL(95, sys_fchown16) | 119 | __SYSCALL(__NR_getgid, sys_getgid16) |
120 | __SYSCALL(96, sys_getpriority) | 120 | /* 48 was sys_signal */ |
121 | __SYSCALL(97, sys_setpriority) | 121 | __SYSCALL(48, sys_ni_syscall) |
122 | __SYSCALL(98, sys_ni_syscall) /* 98 was sys_profil */ | 122 | #define __NR_geteuid 49 |
123 | __SYSCALL(99, compat_sys_statfs) | 123 | __SYSCALL(__NR_geteuid, sys_geteuid16) |
124 | __SYSCALL(100, compat_sys_fstatfs) | 124 | #define __NR_getegid 50 |
125 | __SYSCALL(101, sys_ni_syscall) /* 101 was sys_ioperm */ | 125 | __SYSCALL(__NR_getegid, sys_getegid16) |
126 | __SYSCALL(102, sys_ni_syscall) /* 102 was sys_socketcall */ | 126 | #define __NR_acct 51 |
127 | __SYSCALL(103, sys_syslog) | 127 | __SYSCALL(__NR_acct, sys_acct) |
128 | __SYSCALL(104, compat_sys_setitimer) | 128 | #define __NR_umount2 52 |
129 | __SYSCALL(105, compat_sys_getitimer) | 129 | __SYSCALL(__NR_umount2, sys_umount) |
130 | __SYSCALL(106, compat_sys_newstat) | 130 | /* 53 was sys_lock */ |
131 | __SYSCALL(107, compat_sys_newlstat) | 131 | __SYSCALL(53, sys_ni_syscall) |
132 | __SYSCALL(108, compat_sys_newfstat) | 132 | #define __NR_ioctl 54 |
133 | __SYSCALL(109, sys_ni_syscall) /* 109 was sys_uname */ | 133 | __SYSCALL(__NR_ioctl, compat_sys_ioctl) |
134 | __SYSCALL(110, sys_ni_syscall) /* 110 was sys_iopl */ | 134 | #define __NR_fcntl 55 |
135 | __SYSCALL(111, sys_vhangup) | 135 | __SYSCALL(__NR_fcntl, compat_sys_fcntl) |
136 | __SYSCALL(112, sys_ni_syscall) /* 112 was sys_idle */ | 136 | /* 56 was sys_mpx */ |
137 | __SYSCALL(113, sys_ni_syscall) /* 113 was sys_syscall */ | 137 | __SYSCALL(56, sys_ni_syscall) |
138 | __SYSCALL(114, compat_sys_wait4) | 138 | #define __NR_setpgid 57 |
139 | __SYSCALL(115, sys_swapoff) | 139 | __SYSCALL(__NR_setpgid, sys_setpgid) |
140 | __SYSCALL(116, compat_sys_sysinfo) | 140 | /* 58 was sys_ulimit */ |
141 | __SYSCALL(117, sys_ni_syscall) /* 117 was sys_ipc */ | 141 | __SYSCALL(58, sys_ni_syscall) |
142 | __SYSCALL(118, sys_fsync) | 142 | /* 59 was sys_olduname */ |
143 | __SYSCALL(119, compat_sys_sigreturn_wrapper) | 143 | __SYSCALL(59, sys_ni_syscall) |
144 | __SYSCALL(120, sys_clone) | 144 | #define __NR_umask 60 |
145 | __SYSCALL(121, sys_setdomainname) | 145 | __SYSCALL(__NR_umask, sys_umask) |
146 | __SYSCALL(122, sys_newuname) | 146 | #define __NR_chroot 61 |
147 | __SYSCALL(123, sys_ni_syscall) /* 123 was sys_modify_ldt */ | 147 | __SYSCALL(__NR_chroot, sys_chroot) |
148 | __SYSCALL(124, compat_sys_adjtimex) | 148 | #define __NR_ustat 62 |
149 | __SYSCALL(125, sys_mprotect) | 149 | __SYSCALL(__NR_ustat, compat_sys_ustat) |
150 | __SYSCALL(126, compat_sys_sigprocmask) | 150 | #define __NR_dup2 63 |
151 | __SYSCALL(127, sys_ni_syscall) /* 127 was sys_create_module */ | 151 | __SYSCALL(__NR_dup2, sys_dup2) |
152 | __SYSCALL(128, sys_init_module) | 152 | #define __NR_getppid 64 |
153 | __SYSCALL(129, sys_delete_module) | 153 | __SYSCALL(__NR_getppid, sys_getppid) |
154 | __SYSCALL(130, sys_ni_syscall) /* 130 was sys_get_kernel_syms */ | 154 | #define __NR_getpgrp 65 |
155 | __SYSCALL(131, sys_quotactl) | 155 | __SYSCALL(__NR_getpgrp, sys_getpgrp) |
156 | __SYSCALL(132, sys_getpgid) | 156 | #define __NR_setsid 66 |
157 | __SYSCALL(133, sys_fchdir) | 157 | __SYSCALL(__NR_setsid, sys_setsid) |
158 | __SYSCALL(134, sys_bdflush) | 158 | #define __NR_sigaction 67 |
159 | __SYSCALL(135, sys_sysfs) | 159 | __SYSCALL(__NR_sigaction, compat_sys_sigaction) |
160 | __SYSCALL(136, sys_personality) | 160 | /* 68 was sys_sgetmask */ |
161 | __SYSCALL(137, sys_ni_syscall) /* 137 was sys_afs_syscall */ | 161 | __SYSCALL(68, sys_ni_syscall) |
162 | __SYSCALL(138, sys_setfsuid16) | 162 | /* 69 was sys_ssetmask */ |
163 | __SYSCALL(139, sys_setfsgid16) | 163 | __SYSCALL(69, sys_ni_syscall) |
164 | __SYSCALL(140, sys_llseek) | 164 | #define __NR_setreuid 70 |
165 | __SYSCALL(141, compat_sys_getdents) | 165 | __SYSCALL(__NR_setreuid, sys_setreuid16) |
166 | __SYSCALL(142, compat_sys_select) | 166 | #define __NR_setregid 71 |
167 | __SYSCALL(143, sys_flock) | 167 | __SYSCALL(__NR_setregid, sys_setregid16) |
168 | __SYSCALL(144, sys_msync) | 168 | #define __NR_sigsuspend 72 |
169 | __SYSCALL(145, compat_sys_readv) | 169 | __SYSCALL(__NR_sigsuspend, sys_sigsuspend) |
170 | __SYSCALL(146, compat_sys_writev) | 170 | #define __NR_sigpending 73 |
171 | __SYSCALL(147, sys_getsid) | 171 | __SYSCALL(__NR_sigpending, compat_sys_sigpending) |
172 | __SYSCALL(148, sys_fdatasync) | 172 | #define __NR_sethostname 74 |
173 | __SYSCALL(149, compat_sys_sysctl) | 173 | __SYSCALL(__NR_sethostname, sys_sethostname) |
174 | __SYSCALL(150, sys_mlock) | 174 | #define __NR_setrlimit 75 |
175 | __SYSCALL(151, sys_munlock) | 175 | __SYSCALL(__NR_setrlimit, compat_sys_setrlimit) |
176 | __SYSCALL(152, sys_mlockall) | 176 | /* 76 was compat_sys_getrlimit */ |
177 | __SYSCALL(153, sys_munlockall) | 177 | __SYSCALL(76, sys_ni_syscall) |
178 | __SYSCALL(154, sys_sched_setparam) | 178 | #define __NR_getrusage 77 |
179 | __SYSCALL(155, sys_sched_getparam) | 179 | __SYSCALL(__NR_getrusage, compat_sys_getrusage) |
180 | __SYSCALL(156, sys_sched_setscheduler) | 180 | #define __NR_gettimeofday 78 |
181 | __SYSCALL(157, sys_sched_getscheduler) | 181 | __SYSCALL(__NR_gettimeofday, compat_sys_gettimeofday) |
182 | __SYSCALL(158, sys_sched_yield) | 182 | #define __NR_settimeofday 79 |
183 | __SYSCALL(159, sys_sched_get_priority_max) | 183 | __SYSCALL(__NR_settimeofday, compat_sys_settimeofday) |
184 | __SYSCALL(160, sys_sched_get_priority_min) | 184 | #define __NR_getgroups 80 |
185 | __SYSCALL(161, compat_sys_sched_rr_get_interval) | 185 | __SYSCALL(__NR_getgroups, sys_getgroups16) |
186 | __SYSCALL(162, compat_sys_nanosleep) | 186 | #define __NR_setgroups 81 |
187 | __SYSCALL(163, sys_mremap) | 187 | __SYSCALL(__NR_setgroups, sys_setgroups16) |
188 | __SYSCALL(164, sys_setresuid16) | 188 | /* 82 was compat_sys_select */ |
189 | __SYSCALL(165, sys_getresuid16) | 189 | __SYSCALL(82, sys_ni_syscall) |
190 | __SYSCALL(166, sys_ni_syscall) /* 166 was sys_vm86 */ | 190 | #define __NR_symlink 83 |
191 | __SYSCALL(167, sys_ni_syscall) /* 167 was sys_query_module */ | 191 | __SYSCALL(__NR_symlink, sys_symlink) |
192 | __SYSCALL(168, sys_poll) | 192 | /* 84 was sys_lstat */ |
193 | __SYSCALL(169, sys_ni_syscall) | 193 | __SYSCALL(84, sys_ni_syscall) |
194 | __SYSCALL(170, sys_setresgid16) | 194 | #define __NR_readlink 85 |
195 | __SYSCALL(171, sys_getresgid16) | 195 | __SYSCALL(__NR_readlink, sys_readlink) |
196 | __SYSCALL(172, sys_prctl) | 196 | #define __NR_uselib 86 |
197 | __SYSCALL(173, compat_sys_rt_sigreturn_wrapper) | 197 | __SYSCALL(__NR_uselib, sys_uselib) |
198 | __SYSCALL(174, compat_sys_rt_sigaction) | 198 | #define __NR_swapon 87 |
199 | __SYSCALL(175, compat_sys_rt_sigprocmask) | 199 | __SYSCALL(__NR_swapon, sys_swapon) |
200 | __SYSCALL(176, compat_sys_rt_sigpending) | 200 | #define __NR_reboot 88 |
201 | __SYSCALL(177, compat_sys_rt_sigtimedwait) | 201 | __SYSCALL(__NR_reboot, sys_reboot) |
202 | __SYSCALL(178, compat_sys_rt_sigqueueinfo) | 202 | /* 89 was sys_readdir */ |
203 | __SYSCALL(179, compat_sys_rt_sigsuspend) | 203 | __SYSCALL(89, sys_ni_syscall) |
204 | __SYSCALL(180, compat_sys_pread64_wrapper) | 204 | /* 90 was sys_mmap */ |
205 | __SYSCALL(181, compat_sys_pwrite64_wrapper) | 205 | __SYSCALL(90, sys_ni_syscall) |
206 | __SYSCALL(182, sys_chown16) | 206 | #define __NR_munmap 91 |
207 | __SYSCALL(183, sys_getcwd) | 207 | __SYSCALL(__NR_munmap, sys_munmap) |
208 | __SYSCALL(184, sys_capget) | 208 | #define __NR_truncate 92 |
209 | __SYSCALL(185, sys_capset) | 209 | __SYSCALL(__NR_truncate, compat_sys_truncate) |
210 | __SYSCALL(186, compat_sys_sigaltstack) | 210 | #define __NR_ftruncate 93 |
211 | __SYSCALL(187, compat_sys_sendfile) | 211 | __SYSCALL(__NR_ftruncate, compat_sys_ftruncate) |
212 | __SYSCALL(188, sys_ni_syscall) /* 188 reserved */ | 212 | #define __NR_fchmod 94 |
213 | __SYSCALL(189, sys_ni_syscall) /* 189 reserved */ | 213 | __SYSCALL(__NR_fchmod, sys_fchmod) |
214 | __SYSCALL(190, sys_vfork) | 214 | #define __NR_fchown 95 |
215 | __SYSCALL(191, compat_sys_getrlimit) /* SuS compliant getrlimit */ | 215 | __SYSCALL(__NR_fchown, sys_fchown16) |
216 | __SYSCALL(192, sys_mmap_pgoff) | 216 | #define __NR_getpriority 96 |
217 | __SYSCALL(193, compat_sys_truncate64_wrapper) | 217 | __SYSCALL(__NR_getpriority, sys_getpriority) |
218 | __SYSCALL(194, compat_sys_ftruncate64_wrapper) | 218 | #define __NR_setpriority 97 |
219 | __SYSCALL(195, sys_stat64) | 219 | __SYSCALL(__NR_setpriority, sys_setpriority) |
220 | __SYSCALL(196, sys_lstat64) | 220 | /* 98 was sys_profil */ |
221 | __SYSCALL(197, sys_fstat64) | 221 | __SYSCALL(98, sys_ni_syscall) |
222 | __SYSCALL(198, sys_lchown) | 222 | #define __NR_statfs 99 |
223 | __SYSCALL(199, sys_getuid) | 223 | __SYSCALL(__NR_statfs, compat_sys_statfs) |
224 | __SYSCALL(200, sys_getgid) | 224 | #define __NR_fstatfs 100 |
225 | __SYSCALL(201, sys_geteuid) | 225 | __SYSCALL(__NR_fstatfs, compat_sys_fstatfs) |
226 | __SYSCALL(202, sys_getegid) | 226 | /* 101 was sys_ioperm */ |
227 | __SYSCALL(203, sys_setreuid) | 227 | __SYSCALL(101, sys_ni_syscall) |
228 | __SYSCALL(204, sys_setregid) | 228 | /* 102 was sys_socketcall */ |
229 | __SYSCALL(205, sys_getgroups) | 229 | __SYSCALL(102, sys_ni_syscall) |
230 | __SYSCALL(206, sys_setgroups) | 230 | #define __NR_syslog 103 |
231 | __SYSCALL(207, sys_fchown) | 231 | __SYSCALL(__NR_syslog, sys_syslog) |
232 | __SYSCALL(208, sys_setresuid) | 232 | #define __NR_setitimer 104 |
233 | __SYSCALL(209, sys_getresuid) | 233 | __SYSCALL(__NR_setitimer, compat_sys_setitimer) |
234 | __SYSCALL(210, sys_setresgid) | 234 | #define __NR_getitimer 105 |
235 | __SYSCALL(211, sys_getresgid) | 235 | __SYSCALL(__NR_getitimer, compat_sys_getitimer) |
236 | __SYSCALL(212, sys_chown) | 236 | #define __NR_stat 106 |
237 | __SYSCALL(213, sys_setuid) | 237 | __SYSCALL(__NR_stat, compat_sys_newstat) |
238 | __SYSCALL(214, sys_setgid) | 238 | #define __NR_lstat 107 |
239 | __SYSCALL(215, sys_setfsuid) | 239 | __SYSCALL(__NR_lstat, compat_sys_newlstat) |
240 | __SYSCALL(216, sys_setfsgid) | 240 | #define __NR_fstat 108 |
241 | __SYSCALL(217, compat_sys_getdents64) | 241 | __SYSCALL(__NR_fstat, compat_sys_newfstat) |
242 | __SYSCALL(218, sys_pivot_root) | 242 | /* 109 was sys_uname */ |
243 | __SYSCALL(219, sys_mincore) | 243 | __SYSCALL(109, sys_ni_syscall) |
244 | __SYSCALL(220, sys_madvise) | 244 | /* 110 was sys_iopl */ |
245 | __SYSCALL(221, compat_sys_fcntl64) | 245 | __SYSCALL(110, sys_ni_syscall) |
246 | __SYSCALL(222, sys_ni_syscall) /* 222 for tux */ | 246 | #define __NR_vhangup 111 |
247 | __SYSCALL(223, sys_ni_syscall) /* 223 is unused */ | 247 | __SYSCALL(__NR_vhangup, sys_vhangup) |
248 | __SYSCALL(224, sys_gettid) | 248 | /* 112 was sys_idle */ |
249 | __SYSCALL(225, compat_sys_readahead_wrapper) | 249 | __SYSCALL(112, sys_ni_syscall) |
250 | __SYSCALL(226, sys_setxattr) | 250 | /* 113 was sys_syscall */ |
251 | __SYSCALL(227, sys_lsetxattr) | 251 | __SYSCALL(113, sys_ni_syscall) |
252 | __SYSCALL(228, sys_fsetxattr) | 252 | #define __NR_wait4 114 |
253 | __SYSCALL(229, sys_getxattr) | 253 | __SYSCALL(__NR_wait4, compat_sys_wait4) |
254 | __SYSCALL(230, sys_lgetxattr) | 254 | #define __NR_swapoff 115 |
255 | __SYSCALL(231, sys_fgetxattr) | 255 | __SYSCALL(__NR_swapoff, sys_swapoff) |
256 | __SYSCALL(232, sys_listxattr) | 256 | #define __NR_sysinfo 116 |
257 | __SYSCALL(233, sys_llistxattr) | 257 | __SYSCALL(__NR_sysinfo, compat_sys_sysinfo) |
258 | __SYSCALL(234, sys_flistxattr) | 258 | /* 117 was sys_ipc */ |
259 | __SYSCALL(235, sys_removexattr) | 259 | __SYSCALL(117, sys_ni_syscall) |
260 | __SYSCALL(236, sys_lremovexattr) | 260 | #define __NR_fsync 118 |
261 | __SYSCALL(237, sys_fremovexattr) | 261 | __SYSCALL(__NR_fsync, sys_fsync) |
262 | __SYSCALL(238, sys_tkill) | 262 | #define __NR_sigreturn 119 |
263 | __SYSCALL(239, sys_sendfile64) | 263 | __SYSCALL(__NR_sigreturn, compat_sys_sigreturn_wrapper) |
264 | __SYSCALL(240, compat_sys_futex) | 264 | #define __NR_clone 120 |
265 | __SYSCALL(241, compat_sys_sched_setaffinity) | 265 | __SYSCALL(__NR_clone, sys_clone) |
266 | __SYSCALL(242, compat_sys_sched_getaffinity) | 266 | #define __NR_setdomainname 121 |
267 | __SYSCALL(243, compat_sys_io_setup) | 267 | __SYSCALL(__NR_setdomainname, sys_setdomainname) |
268 | __SYSCALL(244, sys_io_destroy) | 268 | #define __NR_uname 122 |
269 | __SYSCALL(245, compat_sys_io_getevents) | 269 | __SYSCALL(__NR_uname, sys_newuname) |
270 | __SYSCALL(246, compat_sys_io_submit) | 270 | /* 123 was sys_modify_ldt */ |
271 | __SYSCALL(247, sys_io_cancel) | 271 | __SYSCALL(123, sys_ni_syscall) |
272 | __SYSCALL(248, sys_exit_group) | 272 | #define __NR_adjtimex 124 |
273 | __SYSCALL(249, compat_sys_lookup_dcookie) | 273 | __SYSCALL(__NR_adjtimex, compat_sys_adjtimex) |
274 | __SYSCALL(250, sys_epoll_create) | 274 | #define __NR_mprotect 125 |
275 | __SYSCALL(251, sys_epoll_ctl) | 275 | __SYSCALL(__NR_mprotect, sys_mprotect) |
276 | __SYSCALL(252, sys_epoll_wait) | 276 | #define __NR_sigprocmask 126 |
277 | __SYSCALL(253, sys_remap_file_pages) | 277 | __SYSCALL(__NR_sigprocmask, compat_sys_sigprocmask) |
278 | __SYSCALL(254, sys_ni_syscall) /* 254 for set_thread_area */ | 278 | /* 127 was sys_create_module */ |
279 | __SYSCALL(255, sys_ni_syscall) /* 255 for get_thread_area */ | 279 | __SYSCALL(127, sys_ni_syscall) |
280 | __SYSCALL(256, sys_set_tid_address) | 280 | #define __NR_init_module 128 |
281 | __SYSCALL(257, compat_sys_timer_create) | 281 | __SYSCALL(__NR_init_module, sys_init_module) |
282 | __SYSCALL(258, compat_sys_timer_settime) | 282 | #define __NR_delete_module 129 |
283 | __SYSCALL(259, compat_sys_timer_gettime) | 283 | __SYSCALL(__NR_delete_module, sys_delete_module) |
284 | __SYSCALL(260, sys_timer_getoverrun) | 284 | /* 130 was sys_get_kernel_syms */ |
285 | __SYSCALL(261, sys_timer_delete) | 285 | __SYSCALL(130, sys_ni_syscall) |
286 | __SYSCALL(262, compat_sys_clock_settime) | 286 | #define __NR_quotactl 131 |
287 | __SYSCALL(263, compat_sys_clock_gettime) | 287 | __SYSCALL(__NR_quotactl, sys_quotactl) |
288 | __SYSCALL(264, compat_sys_clock_getres) | 288 | #define __NR_getpgid 132 |
289 | __SYSCALL(265, compat_sys_clock_nanosleep) | 289 | __SYSCALL(__NR_getpgid, sys_getpgid) |
290 | __SYSCALL(266, compat_sys_statfs64_wrapper) | 290 | #define __NR_fchdir 133 |
291 | __SYSCALL(267, compat_sys_fstatfs64_wrapper) | 291 | __SYSCALL(__NR_fchdir, sys_fchdir) |
292 | __SYSCALL(268, sys_tgkill) | 292 | #define __NR_bdflush 134 |
293 | __SYSCALL(269, compat_sys_utimes) | 293 | __SYSCALL(__NR_bdflush, sys_bdflush) |
294 | __SYSCALL(270, compat_sys_fadvise64_64_wrapper) | 294 | #define __NR_sysfs 135 |
295 | __SYSCALL(271, sys_pciconfig_iobase) | 295 | __SYSCALL(__NR_sysfs, sys_sysfs) |
296 | __SYSCALL(272, sys_pciconfig_read) | 296 | #define __NR_personality 136 |
297 | __SYSCALL(273, sys_pciconfig_write) | 297 | __SYSCALL(__NR_personality, sys_personality) |
298 | __SYSCALL(274, compat_sys_mq_open) | 298 | /* 137 was sys_afs_syscall */ |
299 | __SYSCALL(275, sys_mq_unlink) | 299 | __SYSCALL(137, sys_ni_syscall) |
300 | __SYSCALL(276, compat_sys_mq_timedsend) | 300 | #define __NR_setfsuid 138 |
301 | __SYSCALL(277, compat_sys_mq_timedreceive) | 301 | __SYSCALL(__NR_setfsuid, sys_setfsuid16) |
302 | __SYSCALL(278, compat_sys_mq_notify) | 302 | #define __NR_setfsgid 139 |
303 | __SYSCALL(279, compat_sys_mq_getsetattr) | 303 | __SYSCALL(__NR_setfsgid, sys_setfsgid16) |
304 | __SYSCALL(280, compat_sys_waitid) | 304 | #define __NR__llseek 140 |
305 | __SYSCALL(281, sys_socket) | 305 | __SYSCALL(__NR__llseek, sys_llseek) |
306 | __SYSCALL(282, sys_bind) | 306 | #define __NR_getdents 141 |
307 | __SYSCALL(283, sys_connect) | 307 | __SYSCALL(__NR_getdents, compat_sys_getdents) |
308 | __SYSCALL(284, sys_listen) | 308 | #define __NR__newselect 142 |
309 | __SYSCALL(285, sys_accept) | 309 | __SYSCALL(__NR__newselect, compat_sys_select) |
310 | __SYSCALL(286, sys_getsockname) | 310 | #define __NR_flock 143 |
311 | __SYSCALL(287, sys_getpeername) | 311 | __SYSCALL(__NR_flock, sys_flock) |
312 | __SYSCALL(288, sys_socketpair) | 312 | #define __NR_msync 144 |
313 | __SYSCALL(289, sys_send) | 313 | __SYSCALL(__NR_msync, sys_msync) |
314 | __SYSCALL(290, sys_sendto) | 314 | #define __NR_readv 145 |
315 | __SYSCALL(291, compat_sys_recv) | 315 | __SYSCALL(__NR_readv, compat_sys_readv) |
316 | __SYSCALL(292, compat_sys_recvfrom) | 316 | #define __NR_writev 146 |
317 | __SYSCALL(293, sys_shutdown) | 317 | __SYSCALL(__NR_writev, compat_sys_writev) |
318 | __SYSCALL(294, compat_sys_setsockopt) | 318 | #define __NR_getsid 147 |
319 | __SYSCALL(295, compat_sys_getsockopt) | 319 | __SYSCALL(__NR_getsid, sys_getsid) |
320 | __SYSCALL(296, compat_sys_sendmsg) | 320 | #define __NR_fdatasync 148 |
321 | __SYSCALL(297, compat_sys_recvmsg) | 321 | __SYSCALL(__NR_fdatasync, sys_fdatasync) |
322 | __SYSCALL(298, sys_semop) | 322 | #define __NR__sysctl 149 |
323 | __SYSCALL(299, sys_semget) | 323 | __SYSCALL(__NR__sysctl, compat_sys_sysctl) |
324 | __SYSCALL(300, compat_sys_semctl) | 324 | #define __NR_mlock 150 |
325 | __SYSCALL(301, compat_sys_msgsnd) | 325 | __SYSCALL(__NR_mlock, sys_mlock) |
326 | __SYSCALL(302, compat_sys_msgrcv) | 326 | #define __NR_munlock 151 |
327 | __SYSCALL(303, sys_msgget) | 327 | __SYSCALL(__NR_munlock, sys_munlock) |
328 | __SYSCALL(304, compat_sys_msgctl) | 328 | #define __NR_mlockall 152 |
329 | __SYSCALL(305, compat_sys_shmat) | 329 | __SYSCALL(__NR_mlockall, sys_mlockall) |
330 | __SYSCALL(306, sys_shmdt) | 330 | #define __NR_munlockall 153 |
331 | __SYSCALL(307, sys_shmget) | 331 | __SYSCALL(__NR_munlockall, sys_munlockall) |
332 | __SYSCALL(308, compat_sys_shmctl) | 332 | #define __NR_sched_setparam 154 |
333 | __SYSCALL(309, sys_add_key) | 333 | __SYSCALL(__NR_sched_setparam, sys_sched_setparam) |
334 | __SYSCALL(310, sys_request_key) | 334 | #define __NR_sched_getparam 155 |
335 | __SYSCALL(311, compat_sys_keyctl) | 335 | __SYSCALL(__NR_sched_getparam, sys_sched_getparam) |
336 | __SYSCALL(312, compat_sys_semtimedop) | 336 | #define __NR_sched_setscheduler 156 |
337 | __SYSCALL(313, sys_ni_syscall) | 337 | __SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler) |
338 | __SYSCALL(314, sys_ioprio_set) | 338 | #define __NR_sched_getscheduler 157 |
339 | __SYSCALL(315, sys_ioprio_get) | 339 | __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler) |
340 | __SYSCALL(316, sys_inotify_init) | 340 | #define __NR_sched_yield 158 |
341 | __SYSCALL(317, sys_inotify_add_watch) | 341 | __SYSCALL(__NR_sched_yield, sys_sched_yield) |
342 | __SYSCALL(318, sys_inotify_rm_watch) | 342 | #define __NR_sched_get_priority_max 159 |
343 | __SYSCALL(319, compat_sys_mbind) | 343 | __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max) |
344 | __SYSCALL(320, compat_sys_get_mempolicy) | 344 | #define __NR_sched_get_priority_min 160 |
345 | __SYSCALL(321, compat_sys_set_mempolicy) | 345 | __SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) |
346 | __SYSCALL(322, compat_sys_openat) | 346 | #define __NR_sched_rr_get_interval 161 |
347 | __SYSCALL(323, sys_mkdirat) | 347 | __SYSCALL(__NR_sched_rr_get_interval, compat_sys_sched_rr_get_interval) |
348 | __SYSCALL(324, sys_mknodat) | 348 | #define __NR_nanosleep 162 |
349 | __SYSCALL(325, sys_fchownat) | 349 | __SYSCALL(__NR_nanosleep, compat_sys_nanosleep) |
350 | __SYSCALL(326, compat_sys_futimesat) | 350 | #define __NR_mremap 163 |
351 | __SYSCALL(327, sys_fstatat64) | 351 | __SYSCALL(__NR_mremap, sys_mremap) |
352 | __SYSCALL(328, sys_unlinkat) | 352 | #define __NR_setresuid 164 |
353 | __SYSCALL(329, sys_renameat) | 353 | __SYSCALL(__NR_setresuid, sys_setresuid16) |
354 | __SYSCALL(330, sys_linkat) | 354 | #define __NR_getresuid 165 |
355 | __SYSCALL(331, sys_symlinkat) | 355 | __SYSCALL(__NR_getresuid, sys_getresuid16) |
356 | __SYSCALL(332, sys_readlinkat) | 356 | /* 166 was sys_vm86 */ |
357 | __SYSCALL(333, sys_fchmodat) | 357 | __SYSCALL(166, sys_ni_syscall) |
358 | __SYSCALL(334, sys_faccessat) | 358 | /* 167 was sys_query_module */ |
359 | __SYSCALL(335, compat_sys_pselect6) | 359 | __SYSCALL(167, sys_ni_syscall) |
360 | __SYSCALL(336, compat_sys_ppoll) | 360 | #define __NR_poll 168 |
361 | __SYSCALL(337, sys_unshare) | 361 | __SYSCALL(__NR_poll, sys_poll) |
362 | __SYSCALL(338, compat_sys_set_robust_list) | 362 | #define __NR_nfsservctl 169 |
363 | __SYSCALL(339, compat_sys_get_robust_list) | 363 | __SYSCALL(__NR_nfsservctl, sys_ni_syscall) |
364 | __SYSCALL(340, sys_splice) | 364 | #define __NR_setresgid 170 |
365 | __SYSCALL(341, compat_sys_sync_file_range2_wrapper) | 365 | __SYSCALL(__NR_setresgid, sys_setresgid16) |
366 | __SYSCALL(342, sys_tee) | 366 | #define __NR_getresgid 171 |
367 | __SYSCALL(343, compat_sys_vmsplice) | 367 | __SYSCALL(__NR_getresgid, sys_getresgid16) |
368 | __SYSCALL(344, compat_sys_move_pages) | 368 | #define __NR_prctl 172 |
369 | __SYSCALL(345, sys_getcpu) | 369 | __SYSCALL(__NR_prctl, sys_prctl) |
370 | __SYSCALL(346, compat_sys_epoll_pwait) | 370 | #define __NR_rt_sigreturn 173 |
371 | __SYSCALL(347, compat_sys_kexec_load) | 371 | __SYSCALL(__NR_rt_sigreturn, compat_sys_rt_sigreturn_wrapper) |
372 | __SYSCALL(348, compat_sys_utimensat) | 372 | #define __NR_rt_sigaction 174 |
373 | __SYSCALL(349, compat_sys_signalfd) | 373 | __SYSCALL(__NR_rt_sigaction, compat_sys_rt_sigaction) |
374 | __SYSCALL(350, sys_timerfd_create) | 374 | #define __NR_rt_sigprocmask 175 |
375 | __SYSCALL(351, sys_eventfd) | 375 | __SYSCALL(__NR_rt_sigprocmask, compat_sys_rt_sigprocmask) |
376 | __SYSCALL(352, compat_sys_fallocate_wrapper) | 376 | #define __NR_rt_sigpending 176 |
377 | __SYSCALL(353, compat_sys_timerfd_settime) | 377 | __SYSCALL(__NR_rt_sigpending, compat_sys_rt_sigpending) |
378 | __SYSCALL(354, compat_sys_timerfd_gettime) | 378 | #define __NR_rt_sigtimedwait 177 |
379 | __SYSCALL(355, compat_sys_signalfd4) | 379 | __SYSCALL(__NR_rt_sigtimedwait, compat_sys_rt_sigtimedwait) |
380 | __SYSCALL(356, sys_eventfd2) | 380 | #define __NR_rt_sigqueueinfo 178 |
381 | __SYSCALL(357, sys_epoll_create1) | 381 | __SYSCALL(__NR_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo) |
382 | __SYSCALL(358, sys_dup3) | 382 | #define __NR_rt_sigsuspend 179 |
383 | __SYSCALL(359, sys_pipe2) | 383 | __SYSCALL(__NR_rt_sigsuspend, compat_sys_rt_sigsuspend) |
384 | __SYSCALL(360, sys_inotify_init1) | 384 | #define __NR_pread64 180 |
385 | __SYSCALL(361, compat_sys_preadv) | 385 | __SYSCALL(__NR_pread64, compat_sys_pread64_wrapper) |
386 | __SYSCALL(362, compat_sys_pwritev) | 386 | #define __NR_pwrite64 181 |
387 | __SYSCALL(363, compat_sys_rt_tgsigqueueinfo) | 387 | __SYSCALL(__NR_pwrite64, compat_sys_pwrite64_wrapper) |
388 | __SYSCALL(364, sys_perf_event_open) | 388 | #define __NR_chown 182 |
389 | __SYSCALL(365, compat_sys_recvmmsg) | 389 | __SYSCALL(__NR_chown, sys_chown16) |
390 | __SYSCALL(366, sys_accept4) | 390 | #define __NR_getcwd 183 |
391 | __SYSCALL(367, sys_fanotify_init) | 391 | __SYSCALL(__NR_getcwd, sys_getcwd) |
392 | __SYSCALL(368, compat_sys_fanotify_mark) | 392 | #define __NR_capget 184 |
393 | __SYSCALL(369, sys_prlimit64) | 393 | __SYSCALL(__NR_capget, sys_capget) |
394 | __SYSCALL(370, sys_name_to_handle_at) | 394 | #define __NR_capset 185 |
395 | __SYSCALL(371, compat_sys_open_by_handle_at) | 395 | __SYSCALL(__NR_capset, sys_capset) |
396 | __SYSCALL(372, compat_sys_clock_adjtime) | 396 | #define __NR_sigaltstack 186 |
397 | __SYSCALL(373, sys_syncfs) | 397 | __SYSCALL(__NR_sigaltstack, compat_sys_sigaltstack) |
398 | __SYSCALL(374, compat_sys_sendmmsg) | 398 | #define __NR_sendfile 187 |
399 | __SYSCALL(375, sys_setns) | 399 | __SYSCALL(__NR_sendfile, compat_sys_sendfile) |
400 | __SYSCALL(376, compat_sys_process_vm_readv) | 400 | /* 188 reserved */ |
401 | __SYSCALL(377, compat_sys_process_vm_writev) | 401 | __SYSCALL(188, sys_ni_syscall) |
402 | __SYSCALL(378, sys_kcmp) | 402 | /* 189 reserved */ |
403 | __SYSCALL(379, sys_finit_module) | 403 | __SYSCALL(189, sys_ni_syscall) |
404 | __SYSCALL(380, sys_sched_setattr) | 404 | #define __NR_vfork 190 |
405 | __SYSCALL(381, sys_sched_getattr) | 405 | __SYSCALL(__NR_vfork, sys_vfork) |
406 | __SYSCALL(382, sys_renameat2) | 406 | #define __NR_ugetrlimit 191 /* SuS compliant getrlimit */ |
407 | 407 | __SYSCALL(__NR_ugetrlimit, compat_sys_getrlimit) /* SuS compliant getrlimit */ | |
408 | #define __NR_compat_syscalls 383 | 408 | #define __NR_mmap2 192 |
409 | 409 | __SYSCALL(__NR_mmap2, sys_mmap_pgoff) | |
410 | /* | 410 | #define __NR_truncate64 193 |
411 | * Compat syscall numbers used by the AArch64 kernel. | 411 | __SYSCALL(__NR_truncate64, compat_sys_truncate64_wrapper) |
412 | */ | 412 | #define __NR_ftruncate64 194 |
413 | #define __NR_compat_restart_syscall 0 | 413 | __SYSCALL(__NR_ftruncate64, compat_sys_ftruncate64_wrapper) |
414 | #define __NR_compat_sigreturn 119 | 414 | #define __NR_stat64 195 |
415 | #define __NR_compat_rt_sigreturn 173 | 415 | __SYSCALL(__NR_stat64, sys_stat64) |
416 | 416 | #define __NR_lstat64 196 | |
417 | 417 | __SYSCALL(__NR_lstat64, sys_lstat64) | |
418 | /* | 418 | #define __NR_fstat64 197 |
419 | * The following SVCs are ARM private. | 419 | __SYSCALL(__NR_fstat64, sys_fstat64) |
420 | */ | 420 | #define __NR_lchown32 198 |
421 | #define __ARM_NR_COMPAT_BASE 0x0f0000 | 421 | __SYSCALL(__NR_lchown32, sys_lchown) |
422 | #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) | 422 | #define __NR_getuid32 199 |
423 | #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) | 423 | __SYSCALL(__NR_getuid32, sys_getuid) |
424 | #define __NR_getgid32 200 | ||
425 | __SYSCALL(__NR_getgid32, sys_getgid) | ||
426 | #define __NR_geteuid32 201 | ||
427 | __SYSCALL(__NR_geteuid32, sys_geteuid) | ||
428 | #define __NR_getegid32 202 | ||
429 | __SYSCALL(__NR_getegid32, sys_getegid) | ||
430 | #define __NR_setreuid32 203 | ||
431 | __SYSCALL(__NR_setreuid32, sys_setreuid) | ||
432 | #define __NR_setregid32 204 | ||
433 | __SYSCALL(__NR_setregid32, sys_setregid) | ||
434 | #define __NR_getgroups32 205 | ||
435 | __SYSCALL(__NR_getgroups32, sys_getgroups) | ||
436 | #define __NR_setgroups32 206 | ||
437 | __SYSCALL(__NR_setgroups32, sys_setgroups) | ||
438 | #define __NR_fchown32 207 | ||
439 | __SYSCALL(__NR_fchown32, sys_fchown) | ||
440 | #define __NR_setresuid32 208 | ||
441 | __SYSCALL(__NR_setresuid32, sys_setresuid) | ||
442 | #define __NR_getresuid32 209 | ||
443 | __SYSCALL(__NR_getresuid32, sys_getresuid) | ||
444 | #define __NR_setresgid32 210 | ||
445 | __SYSCALL(__NR_setresgid32, sys_setresgid) | ||
446 | #define __NR_getresgid32 211 | ||
447 | __SYSCALL(__NR_getresgid32, sys_getresgid) | ||
448 | #define __NR_chown32 212 | ||
449 | __SYSCALL(__NR_chown32, sys_chown) | ||
450 | #define __NR_setuid32 213 | ||
451 | __SYSCALL(__NR_setuid32, sys_setuid) | ||
452 | #define __NR_setgid32 214 | ||
453 | __SYSCALL(__NR_setgid32, sys_setgid) | ||
454 | #define __NR_setfsuid32 215 | ||
455 | __SYSCALL(__NR_setfsuid32, sys_setfsuid) | ||
456 | #define __NR_setfsgid32 216 | ||
457 | __SYSCALL(__NR_setfsgid32, sys_setfsgid) | ||
458 | #define __NR_getdents64 217 | ||
459 | __SYSCALL(__NR_getdents64, compat_sys_getdents64) | ||
460 | #define __NR_pivot_root 218 | ||
461 | __SYSCALL(__NR_pivot_root, sys_pivot_root) | ||
462 | #define __NR_mincore 219 | ||
463 | __SYSCALL(__NR_mincore, sys_mincore) | ||
464 | #define __NR_madvise 220 | ||
465 | __SYSCALL(__NR_madvise, sys_madvise) | ||
466 | #define __NR_fcntl64 221 | ||
467 | __SYSCALL(__NR_fcntl64, compat_sys_fcntl64) | ||
468 | /* 222 for tux */ | ||
469 | __SYSCALL(222, sys_ni_syscall) | ||
470 | /* 223 is unused */ | ||
471 | __SYSCALL(223, sys_ni_syscall) | ||
472 | #define __NR_gettid 224 | ||
473 | __SYSCALL(__NR_gettid, sys_gettid) | ||
474 | #define __NR_readahead 225 | ||
475 | __SYSCALL(__NR_readahead, compat_sys_readahead_wrapper) | ||
476 | #define __NR_setxattr 226 | ||
477 | __SYSCALL(__NR_setxattr, sys_setxattr) | ||
478 | #define __NR_lsetxattr 227 | ||
479 | __SYSCALL(__NR_lsetxattr, sys_lsetxattr) | ||
480 | #define __NR_fsetxattr 228 | ||
481 | __SYSCALL(__NR_fsetxattr, sys_fsetxattr) | ||
482 | #define __NR_getxattr 229 | ||
483 | __SYSCALL(__NR_getxattr, sys_getxattr) | ||
484 | #define __NR_lgetxattr 230 | ||
485 | __SYSCALL(__NR_lgetxattr, sys_lgetxattr) | ||
486 | #define __NR_fgetxattr 231 | ||
487 | __SYSCALL(__NR_fgetxattr, sys_fgetxattr) | ||
488 | #define __NR_listxattr 232 | ||
489 | __SYSCALL(__NR_listxattr, sys_listxattr) | ||
490 | #define __NR_llistxattr 233 | ||
491 | __SYSCALL(__NR_llistxattr, sys_llistxattr) | ||
492 | #define __NR_flistxattr 234 | ||
493 | __SYSCALL(__NR_flistxattr, sys_flistxattr) | ||
494 | #define __NR_removexattr 235 | ||
495 | __SYSCALL(__NR_removexattr, sys_removexattr) | ||
496 | #define __NR_lremovexattr 236 | ||
497 | __SYSCALL(__NR_lremovexattr, sys_lremovexattr) | ||
498 | #define __NR_fremovexattr 237 | ||
499 | __SYSCALL(__NR_fremovexattr, sys_fremovexattr) | ||
500 | #define __NR_tkill 238 | ||
501 | __SYSCALL(__NR_tkill, sys_tkill) | ||
502 | #define __NR_sendfile64 239 | ||
503 | __SYSCALL(__NR_sendfile64, sys_sendfile64) | ||
504 | #define __NR_futex 240 | ||
505 | __SYSCALL(__NR_futex, compat_sys_futex) | ||
506 | #define __NR_sched_setaffinity 241 | ||
507 | __SYSCALL(__NR_sched_setaffinity, compat_sys_sched_setaffinity) | ||
508 | #define __NR_sched_getaffinity 242 | ||
509 | __SYSCALL(__NR_sched_getaffinity, compat_sys_sched_getaffinity) | ||
510 | #define __NR_io_setup 243 | ||
511 | __SYSCALL(__NR_io_setup, compat_sys_io_setup) | ||
512 | #define __NR_io_destroy 244 | ||
513 | __SYSCALL(__NR_io_destroy, sys_io_destroy) | ||
514 | #define __NR_io_getevents 245 | ||
515 | __SYSCALL(__NR_io_getevents, compat_sys_io_getevents) | ||
516 | #define __NR_io_submit 246 | ||
517 | __SYSCALL(__NR_io_submit, compat_sys_io_submit) | ||
518 | #define __NR_io_cancel 247 | ||
519 | __SYSCALL(__NR_io_cancel, sys_io_cancel) | ||
520 | #define __NR_exit_group 248 | ||
521 | __SYSCALL(__NR_exit_group, sys_exit_group) | ||
522 | #define __NR_lookup_dcookie 249 | ||
523 | __SYSCALL(__NR_lookup_dcookie, compat_sys_lookup_dcookie) | ||
524 | #define __NR_epoll_create 250 | ||
525 | __SYSCALL(__NR_epoll_create, sys_epoll_create) | ||
526 | #define __NR_epoll_ctl 251 | ||
527 | __SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) | ||
528 | #define __NR_epoll_wait 252 | ||
529 | __SYSCALL(__NR_epoll_wait, sys_epoll_wait) | ||
530 | #define __NR_remap_file_pages 253 | ||
531 | __SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) | ||
532 | /* 254 for set_thread_area */ | ||
533 | __SYSCALL(254, sys_ni_syscall) | ||
534 | /* 255 for get_thread_area */ | ||
535 | __SYSCALL(255, sys_ni_syscall) | ||
536 | #define __NR_set_tid_address 256 | ||
537 | __SYSCALL(__NR_set_tid_address, sys_set_tid_address) | ||
538 | #define __NR_timer_create 257 | ||
539 | __SYSCALL(__NR_timer_create, compat_sys_timer_create) | ||
540 | #define __NR_timer_settime 258 | ||
541 | __SYSCALL(__NR_timer_settime, compat_sys_timer_settime) | ||
542 | #define __NR_timer_gettime 259 | ||
543 | __SYSCALL(__NR_timer_gettime, compat_sys_timer_gettime) | ||
544 | #define __NR_timer_getoverrun 260 | ||
545 | __SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) | ||
546 | #define __NR_timer_delete 261 | ||
547 | __SYSCALL(__NR_timer_delete, sys_timer_delete) | ||
548 | #define __NR_clock_settime 262 | ||
549 | __SYSCALL(__NR_clock_settime, compat_sys_clock_settime) | ||
550 | #define __NR_clock_gettime 263 | ||
551 | __SYSCALL(__NR_clock_gettime, compat_sys_clock_gettime) | ||
552 | #define __NR_clock_getres 264 | ||
553 | __SYSCALL(__NR_clock_getres, compat_sys_clock_getres) | ||
554 | #define __NR_clock_nanosleep 265 | ||
555 | __SYSCALL(__NR_clock_nanosleep, compat_sys_clock_nanosleep) | ||
556 | #define __NR_statfs64 266 | ||
557 | __SYSCALL(__NR_statfs64, compat_sys_statfs64_wrapper) | ||
558 | #define __NR_fstatfs64 267 | ||
559 | __SYSCALL(__NR_fstatfs64, compat_sys_fstatfs64_wrapper) | ||
560 | #define __NR_tgkill 268 | ||
561 | __SYSCALL(__NR_tgkill, sys_tgkill) | ||
562 | #define __NR_utimes 269 | ||
563 | __SYSCALL(__NR_utimes, compat_sys_utimes) | ||
564 | #define __NR_arm_fadvise64_64 270 | ||
565 | __SYSCALL(__NR_arm_fadvise64_64, compat_sys_fadvise64_64_wrapper) | ||
566 | #define __NR_pciconfig_iobase 271 | ||
567 | __SYSCALL(__NR_pciconfig_iobase, sys_pciconfig_iobase) | ||
568 | #define __NR_pciconfig_read 272 | ||
569 | __SYSCALL(__NR_pciconfig_read, sys_pciconfig_read) | ||
570 | #define __NR_pciconfig_write 273 | ||
571 | __SYSCALL(__NR_pciconfig_write, sys_pciconfig_write) | ||
572 | #define __NR_mq_open 274 | ||
573 | __SYSCALL(__NR_mq_open, compat_sys_mq_open) | ||
574 | #define __NR_mq_unlink 275 | ||
575 | __SYSCALL(__NR_mq_unlink, sys_mq_unlink) | ||
576 | #define __NR_mq_timedsend 276 | ||
577 | __SYSCALL(__NR_mq_timedsend, compat_sys_mq_timedsend) | ||
578 | #define __NR_mq_timedreceive 277 | ||
579 | __SYSCALL(__NR_mq_timedreceive, compat_sys_mq_timedreceive) | ||
580 | #define __NR_mq_notify 278 | ||
581 | __SYSCALL(__NR_mq_notify, compat_sys_mq_notify) | ||
582 | #define __NR_mq_getsetattr 279 | ||
583 | __SYSCALL(__NR_mq_getsetattr, compat_sys_mq_getsetattr) | ||
584 | #define __NR_waitid 280 | ||
585 | __SYSCALL(__NR_waitid, compat_sys_waitid) | ||
586 | #define __NR_socket 281 | ||
587 | __SYSCALL(__NR_socket, sys_socket) | ||
588 | #define __NR_bind 282 | ||
589 | __SYSCALL(__NR_bind, sys_bind) | ||
590 | #define __NR_connect 283 | ||
591 | __SYSCALL(__NR_connect, sys_connect) | ||
592 | #define __NR_listen 284 | ||
593 | __SYSCALL(__NR_listen, sys_listen) | ||
594 | #define __NR_accept 285 | ||
595 | __SYSCALL(__NR_accept, sys_accept) | ||
596 | #define __NR_getsockname 286 | ||
597 | __SYSCALL(__NR_getsockname, sys_getsockname) | ||
598 | #define __NR_getpeername 287 | ||
599 | __SYSCALL(__NR_getpeername, sys_getpeername) | ||
600 | #define __NR_socketpair 288 | ||
601 | __SYSCALL(__NR_socketpair, sys_socketpair) | ||
602 | #define __NR_send 289 | ||
603 | __SYSCALL(__NR_send, sys_send) | ||
604 | #define __NR_sendto 290 | ||
605 | __SYSCALL(__NR_sendto, sys_sendto) | ||
606 | #define __NR_recv 291 | ||
607 | __SYSCALL(__NR_recv, compat_sys_recv) | ||
608 | #define __NR_recvfrom 292 | ||
609 | __SYSCALL(__NR_recvfrom, compat_sys_recvfrom) | ||
610 | #define __NR_shutdown 293 | ||
611 | __SYSCALL(__NR_shutdown, sys_shutdown) | ||
612 | #define __NR_setsockopt 294 | ||
613 | __SYSCALL(__NR_setsockopt, compat_sys_setsockopt) | ||
614 | #define __NR_getsockopt 295 | ||
615 | __SYSCALL(__NR_getsockopt, compat_sys_getsockopt) | ||
616 | #define __NR_sendmsg 296 | ||
617 | __SYSCALL(__NR_sendmsg, compat_sys_sendmsg) | ||
618 | #define __NR_recvmsg 297 | ||
619 | __SYSCALL(__NR_recvmsg, compat_sys_recvmsg) | ||
620 | #define __NR_semop 298 | ||
621 | __SYSCALL(__NR_semop, sys_semop) | ||
622 | #define __NR_semget 299 | ||
623 | __SYSCALL(__NR_semget, sys_semget) | ||
624 | #define __NR_semctl 300 | ||
625 | __SYSCALL(__NR_semctl, compat_sys_semctl) | ||
626 | #define __NR_msgsnd 301 | ||
627 | __SYSCALL(__NR_msgsnd, compat_sys_msgsnd) | ||
628 | #define __NR_msgrcv 302 | ||
629 | __SYSCALL(__NR_msgrcv, compat_sys_msgrcv) | ||
630 | #define __NR_msgget 303 | ||
631 | __SYSCALL(__NR_msgget, sys_msgget) | ||
632 | #define __NR_msgctl 304 | ||
633 | __SYSCALL(__NR_msgctl, compat_sys_msgctl) | ||
634 | #define __NR_shmat 305 | ||
635 | __SYSCALL(__NR_shmat, compat_sys_shmat) | ||
636 | #define __NR_shmdt 306 | ||
637 | __SYSCALL(__NR_shmdt, sys_shmdt) | ||
638 | #define __NR_shmget 307 | ||
639 | __SYSCALL(__NR_shmget, sys_shmget) | ||
640 | #define __NR_shmctl 308 | ||
641 | __SYSCALL(__NR_shmctl, compat_sys_shmctl) | ||
642 | #define __NR_add_key 309 | ||
643 | __SYSCALL(__NR_add_key, sys_add_key) | ||
644 | #define __NR_request_key 310 | ||
645 | __SYSCALL(__NR_request_key, sys_request_key) | ||
646 | #define __NR_keyctl 311 | ||
647 | __SYSCALL(__NR_keyctl, compat_sys_keyctl) | ||
648 | #define __NR_semtimedop 312 | ||
649 | __SYSCALL(__NR_semtimedop, compat_sys_semtimedop) | ||
650 | #define __NR_vserver 313 | ||
651 | __SYSCALL(__NR_vserver, sys_ni_syscall) | ||
652 | #define __NR_ioprio_set 314 | ||
653 | __SYSCALL(__NR_ioprio_set, sys_ioprio_set) | ||
654 | #define __NR_ioprio_get 315 | ||
655 | __SYSCALL(__NR_ioprio_get, sys_ioprio_get) | ||
656 | #define __NR_inotify_init 316 | ||
657 | __SYSCALL(__NR_inotify_init, sys_inotify_init) | ||
658 | #define __NR_inotify_add_watch 317 | ||
659 | __SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch) | ||
660 | #define __NR_inotify_rm_watch 318 | ||
661 | __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch) | ||
662 | #define __NR_mbind 319 | ||
663 | __SYSCALL(__NR_mbind, compat_sys_mbind) | ||
664 | #define __NR_get_mempolicy 320 | ||
665 | __SYSCALL(__NR_get_mempolicy, compat_sys_get_mempolicy) | ||
666 | #define __NR_set_mempolicy 321 | ||
667 | __SYSCALL(__NR_set_mempolicy, compat_sys_set_mempolicy) | ||
668 | #define __NR_openat 322 | ||
669 | __SYSCALL(__NR_openat, compat_sys_openat) | ||
670 | #define __NR_mkdirat 323 | ||
671 | __SYSCALL(__NR_mkdirat, sys_mkdirat) | ||
672 | #define __NR_mknodat 324 | ||
673 | __SYSCALL(__NR_mknodat, sys_mknodat) | ||
674 | #define __NR_fchownat 325 | ||
675 | __SYSCALL(__NR_fchownat, sys_fchownat) | ||
676 | #define __NR_futimesat 326 | ||
677 | __SYSCALL(__NR_futimesat, compat_sys_futimesat) | ||
678 | #define __NR_fstatat64 327 | ||
679 | __SYSCALL(__NR_fstatat64, sys_fstatat64) | ||
680 | #define __NR_unlinkat 328 | ||
681 | __SYSCALL(__NR_unlinkat, sys_unlinkat) | ||
682 | #define __NR_renameat 329 | ||
683 | __SYSCALL(__NR_renameat, sys_renameat) | ||
684 | #define __NR_linkat 330 | ||
685 | __SYSCALL(__NR_linkat, sys_linkat) | ||
686 | #define __NR_symlinkat 331 | ||
687 | __SYSCALL(__NR_symlinkat, sys_symlinkat) | ||
688 | #define __NR_readlinkat 332 | ||
689 | __SYSCALL(__NR_readlinkat, sys_readlinkat) | ||
690 | #define __NR_fchmodat 333 | ||
691 | __SYSCALL(__NR_fchmodat, sys_fchmodat) | ||
692 | #define __NR_faccessat 334 | ||
693 | __SYSCALL(__NR_faccessat, sys_faccessat) | ||
694 | #define __NR_pselect6 335 | ||
695 | __SYSCALL(__NR_pselect6, compat_sys_pselect6) | ||
696 | #define __NR_ppoll 336 | ||
697 | __SYSCALL(__NR_ppoll, compat_sys_ppoll) | ||
698 | #define __NR_unshare 337 | ||
699 | __SYSCALL(__NR_unshare, sys_unshare) | ||
700 | #define __NR_set_robust_list 338 | ||
701 | __SYSCALL(__NR_set_robust_list, compat_sys_set_robust_list) | ||
702 | #define __NR_get_robust_list 339 | ||
703 | __SYSCALL(__NR_get_robust_list, compat_sys_get_robust_list) | ||
704 | #define __NR_splice 340 | ||
705 | __SYSCALL(__NR_splice, sys_splice) | ||
706 | #define __NR_sync_file_range2 341 | ||
707 | __SYSCALL(__NR_sync_file_range2, compat_sys_sync_file_range2_wrapper) | ||
708 | #define __NR_tee 342 | ||
709 | __SYSCALL(__NR_tee, sys_tee) | ||
710 | #define __NR_vmsplice 343 | ||
711 | __SYSCALL(__NR_vmsplice, compat_sys_vmsplice) | ||
712 | #define __NR_move_pages 344 | ||
713 | __SYSCALL(__NR_move_pages, compat_sys_move_pages) | ||
714 | #define __NR_getcpu 345 | ||
715 | __SYSCALL(__NR_getcpu, sys_getcpu) | ||
716 | #define __NR_epoll_pwait 346 | ||
717 | __SYSCALL(__NR_epoll_pwait, compat_sys_epoll_pwait) | ||
718 | #define __NR_kexec_load 347 | ||
719 | __SYSCALL(__NR_kexec_load, compat_sys_kexec_load) | ||
720 | #define __NR_utimensat 348 | ||
721 | __SYSCALL(__NR_utimensat, compat_sys_utimensat) | ||
722 | #define __NR_signalfd 349 | ||
723 | __SYSCALL(__NR_signalfd, compat_sys_signalfd) | ||
724 | #define __NR_timerfd_create 350 | ||
725 | __SYSCALL(__NR_timerfd_create, sys_timerfd_create) | ||
726 | #define __NR_eventfd 351 | ||
727 | __SYSCALL(__NR_eventfd, sys_eventfd) | ||
728 | #define __NR_fallocate 352 | ||
729 | __SYSCALL(__NR_fallocate, compat_sys_fallocate_wrapper) | ||
730 | #define __NR_timerfd_settime 353 | ||
731 | __SYSCALL(__NR_timerfd_settime, compat_sys_timerfd_settime) | ||
732 | #define __NR_timerfd_gettime 354 | ||
733 | __SYSCALL(__NR_timerfd_gettime, compat_sys_timerfd_gettime) | ||
734 | #define __NR_signalfd4 355 | ||
735 | __SYSCALL(__NR_signalfd4, compat_sys_signalfd4) | ||
736 | #define __NR_eventfd2 356 | ||
737 | __SYSCALL(__NR_eventfd2, sys_eventfd2) | ||
738 | #define __NR_epoll_create1 357 | ||
739 | __SYSCALL(__NR_epoll_create1, sys_epoll_create1) | ||
740 | #define __NR_dup3 358 | ||
741 | __SYSCALL(__NR_dup3, sys_dup3) | ||
742 | #define __NR_pipe2 359 | ||
743 | __SYSCALL(__NR_pipe2, sys_pipe2) | ||
744 | #define __NR_inotify_init1 360 | ||
745 | __SYSCALL(__NR_inotify_init1, sys_inotify_init1) | ||
746 | #define __NR_preadv 361 | ||
747 | __SYSCALL(__NR_preadv, compat_sys_preadv) | ||
748 | #define __NR_pwritev 362 | ||
749 | __SYSCALL(__NR_pwritev, compat_sys_pwritev) | ||
750 | #define __NR_rt_tgsigqueueinfo 363 | ||
751 | __SYSCALL(__NR_rt_tgsigqueueinfo, compat_sys_rt_tgsigqueueinfo) | ||
752 | #define __NR_perf_event_open 364 | ||
753 | __SYSCALL(__NR_perf_event_open, sys_perf_event_open) | ||
754 | #define __NR_recvmmsg 365 | ||
755 | __SYSCALL(__NR_recvmmsg, compat_sys_recvmmsg) | ||
756 | #define __NR_accept4 366 | ||
757 | __SYSCALL(__NR_accept4, sys_accept4) | ||
758 | #define __NR_fanotify_init 367 | ||
759 | __SYSCALL(__NR_fanotify_init, sys_fanotify_init) | ||
760 | #define __NR_fanotify_mark 368 | ||
761 | __SYSCALL(__NR_fanotify_mark, compat_sys_fanotify_mark) | ||
762 | #define __NR_prlimit64 369 | ||
763 | __SYSCALL(__NR_prlimit64, sys_prlimit64) | ||
764 | #define __NR_name_to_handle_at 370 | ||
765 | __SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) | ||
766 | #define __NR_open_by_handle_at 371 | ||
767 | __SYSCALL(__NR_open_by_handle_at, compat_sys_open_by_handle_at) | ||
768 | #define __NR_clock_adjtime 372 | ||
769 | __SYSCALL(__NR_clock_adjtime, compat_sys_clock_adjtime) | ||
770 | #define __NR_syncfs 373 | ||
771 | __SYSCALL(__NR_syncfs, sys_syncfs) | ||
772 | #define __NR_sendmmsg 374 | ||
773 | __SYSCALL(__NR_sendmmsg, compat_sys_sendmmsg) | ||
774 | #define __NR_setns 375 | ||
775 | __SYSCALL(__NR_setns, sys_setns) | ||
776 | #define __NR_process_vm_readv 376 | ||
777 | __SYSCALL(__NR_process_vm_readv, compat_sys_process_vm_readv) | ||
778 | #define __NR_process_vm_writev 377 | ||
779 | __SYSCALL(__NR_process_vm_writev, compat_sys_process_vm_writev) | ||
780 | #define __NR_kcmp 378 | ||
781 | __SYSCALL(__NR_kcmp, sys_kcmp) | ||
782 | #define __NR_finit_module 379 | ||
783 | __SYSCALL(__NR_finit_module, sys_finit_module) | ||
784 | #define __NR_sched_setattr 380 | ||
785 | __SYSCALL(__NR_sched_setattr, sys_sched_setattr) | ||
786 | #define __NR_sched_getattr 381 | ||
787 | __SYSCALL(__NR_sched_getattr, sys_sched_getattr) | ||
788 | #define __NR_renameat2 382 | ||
789 | __SYSCALL(__NR_renameat2, sys_renameat2) | ||
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile index cdaedad3afe5..df7ef8768fc2 100644 --- a/arch/arm64/kernel/Makefile +++ b/arch/arm64/kernel/Makefile | |||
@@ -4,8 +4,7 @@ | |||
4 | 4 | ||
5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) | 5 | CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET) |
6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) | 6 | AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
7 | CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) \ | 7 | CFLAGS_efi-stub.o := -DTEXT_OFFSET=$(TEXT_OFFSET) |
8 | -I$(src)/../../../scripts/dtc/libfdt | ||
9 | 8 | ||
10 | CFLAGS_REMOVE_ftrace.o = -pg | 9 | CFLAGS_REMOVE_ftrace.o = -pg |
11 | CFLAGS_REMOVE_insn.o = -pg | 10 | CFLAGS_REMOVE_insn.o = -pg |
@@ -15,7 +14,8 @@ CFLAGS_REMOVE_return_address.o = -pg | |||
15 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ | 14 | arm64-obj-y := cputable.o debug-monitors.o entry.o irq.o fpsimd.o \ |
16 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ | 15 | entry-fpsimd.o process.o ptrace.o setup.o signal.o \ |
17 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ | 16 | sys.o stacktrace.o time.o traps.o io.o vdso.o \ |
18 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o | 17 | hyp-stub.o psci.o cpu_ops.o insn.o return_address.o \ |
18 | cpuinfo.o | ||
19 | 19 | ||
20 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ | 20 | arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \ |
21 | sys_compat.o | 21 | sys_compat.o |
diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c index d62d12fb36c8..cce952440c64 100644 --- a/arch/arm64/kernel/cpu_ops.c +++ b/arch/arm64/kernel/cpu_ops.c | |||
@@ -30,8 +30,8 @@ const struct cpu_operations *cpu_ops[NR_CPUS]; | |||
30 | static const struct cpu_operations *supported_cpu_ops[] __initconst = { | 30 | static const struct cpu_operations *supported_cpu_ops[] __initconst = { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | &smp_spin_table_ops, | 32 | &smp_spin_table_ops, |
33 | &cpu_psci_ops, | ||
34 | #endif | 33 | #endif |
34 | &cpu_psci_ops, | ||
35 | NULL, | 35 | NULL, |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c new file mode 100644 index 000000000000..f798f66634af --- /dev/null +++ b/arch/arm64/kernel/cpuinfo.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Record and handle CPU attributes. | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | #include <asm/arch_timer.h> | ||
18 | #include <asm/cachetype.h> | ||
19 | #include <asm/cpu.h> | ||
20 | #include <asm/cputype.h> | ||
21 | |||
22 | #include <linux/bitops.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/printk.h> | ||
26 | #include <linux/smp.h> | ||
27 | |||
28 | /* | ||
29 | * In case the boot CPU is hotpluggable, we record its initial state and | ||
30 | * current state separately. Certain system registers may contain different | ||
31 | * values depending on configuration at or after reset. | ||
32 | */ | ||
33 | DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); | ||
34 | static struct cpuinfo_arm64 boot_cpu_data; | ||
35 | |||
36 | static char *icache_policy_str[] = { | ||
37 | [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", | ||
38 | [ICACHE_POLICY_AIVIVT] = "AIVIVT", | ||
39 | [ICACHE_POLICY_VIPT] = "VIPT", | ||
40 | [ICACHE_POLICY_PIPT] = "PIPT", | ||
41 | }; | ||
42 | |||
43 | unsigned long __icache_flags; | ||
44 | |||
45 | static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) | ||
46 | { | ||
47 | unsigned int cpu = smp_processor_id(); | ||
48 | u32 l1ip = CTR_L1IP(info->reg_ctr); | ||
49 | |||
50 | if (l1ip != ICACHE_POLICY_PIPT) | ||
51 | set_bit(ICACHEF_ALIASING, &__icache_flags); | ||
52 | if (l1ip == ICACHE_POLICY_AIVIVT); | ||
53 | set_bit(ICACHEF_AIVIVT, &__icache_flags); | ||
54 | |||
55 | pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); | ||
56 | } | ||
57 | |||
58 | static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) | ||
59 | { | ||
60 | if ((boot & mask) == (cur & mask)) | ||
61 | return 0; | ||
62 | |||
63 | pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n", | ||
64 | name, (unsigned long)boot, cpu, (unsigned long)cur); | ||
65 | |||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | #define CHECK_MASK(field, mask, boot, cur, cpu) \ | ||
70 | check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu) | ||
71 | |||
72 | #define CHECK(field, boot, cur, cpu) \ | ||
73 | CHECK_MASK(field, ~0ULL, boot, cur, cpu) | ||
74 | |||
75 | /* | ||
76 | * Verify that CPUs don't have unexpected differences that will cause problems. | ||
77 | */ | ||
78 | static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) | ||
79 | { | ||
80 | unsigned int cpu = smp_processor_id(); | ||
81 | struct cpuinfo_arm64 *boot = &boot_cpu_data; | ||
82 | unsigned int diff = 0; | ||
83 | |||
84 | /* | ||
85 | * The kernel can handle differing I-cache policies, but otherwise | ||
86 | * caches should look identical. Userspace JITs will make use of | ||
87 | * *minLine. | ||
88 | */ | ||
89 | diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); | ||
90 | |||
91 | /* | ||
92 | * Userspace may perform DC ZVA instructions. Mismatched block sizes | ||
93 | * could result in too much or too little memory being zeroed if a | ||
94 | * process is preempted and migrated between CPUs. | ||
95 | */ | ||
96 | diff |= CHECK(dczid, boot, cur, cpu); | ||
97 | |||
98 | /* If different, timekeeping will be broken (especially with KVM) */ | ||
99 | diff |= CHECK(cntfrq, boot, cur, cpu); | ||
100 | |||
101 | /* | ||
102 | * Even in big.LITTLE, processors should be identical instruction-set | ||
103 | * wise. | ||
104 | */ | ||
105 | diff |= CHECK(id_aa64isar0, boot, cur, cpu); | ||
106 | diff |= CHECK(id_aa64isar1, boot, cur, cpu); | ||
107 | |||
108 | /* | ||
109 | * Differing PARange support is fine as long as all peripherals and | ||
110 | * memory are mapped within the minimum PARange of all CPUs. | ||
111 | * Linux should not care about secure memory. | ||
112 | * ID_AA64MMFR1 is currently RES0. | ||
113 | */ | ||
114 | diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); | ||
115 | diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); | ||
116 | |||
117 | /* | ||
118 | * EL3 is not our concern. | ||
119 | * ID_AA64PFR1 is currently RES0. | ||
120 | */ | ||
121 | diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); | ||
122 | diff |= CHECK(id_aa64pfr1, boot, cur, cpu); | ||
123 | |||
124 | /* | ||
125 | * If we have AArch32, we care about 32-bit features for compat. These | ||
126 | * registers should be RES0 otherwise. | ||
127 | */ | ||
128 | diff |= CHECK(id_isar0, boot, cur, cpu); | ||
129 | diff |= CHECK(id_isar1, boot, cur, cpu); | ||
130 | diff |= CHECK(id_isar2, boot, cur, cpu); | ||
131 | diff |= CHECK(id_isar3, boot, cur, cpu); | ||
132 | diff |= CHECK(id_isar4, boot, cur, cpu); | ||
133 | diff |= CHECK(id_isar5, boot, cur, cpu); | ||
134 | diff |= CHECK(id_mmfr0, boot, cur, cpu); | ||
135 | diff |= CHECK(id_mmfr1, boot, cur, cpu); | ||
136 | diff |= CHECK(id_mmfr2, boot, cur, cpu); | ||
137 | diff |= CHECK(id_mmfr3, boot, cur, cpu); | ||
138 | diff |= CHECK(id_pfr0, boot, cur, cpu); | ||
139 | diff |= CHECK(id_pfr1, boot, cur, cpu); | ||
140 | |||
141 | /* | ||
142 | * Mismatched CPU features are a recipe for disaster. Don't even | ||
143 | * pretend to support them. | ||
144 | */ | ||
145 | WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, | ||
146 | "Unsupported CPU feature variation."); | ||
147 | } | ||
148 | |||
149 | static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) | ||
150 | { | ||
151 | info->reg_cntfrq = arch_timer_get_cntfrq(); | ||
152 | info->reg_ctr = read_cpuid_cachetype(); | ||
153 | info->reg_dczid = read_cpuid(DCZID_EL0); | ||
154 | info->reg_midr = read_cpuid_id(); | ||
155 | |||
156 | info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); | ||
157 | info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); | ||
158 | info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); | ||
159 | info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); | ||
160 | info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); | ||
161 | info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); | ||
162 | |||
163 | info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); | ||
164 | info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); | ||
165 | info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); | ||
166 | info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); | ||
167 | info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); | ||
168 | info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); | ||
169 | info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); | ||
170 | info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); | ||
171 | info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); | ||
172 | info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); | ||
173 | info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); | ||
174 | info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); | ||
175 | |||
176 | cpuinfo_detect_icache_policy(info); | ||
177 | } | ||
178 | |||
179 | void cpuinfo_store_cpu(void) | ||
180 | { | ||
181 | struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); | ||
182 | __cpuinfo_store_cpu(info); | ||
183 | cpuinfo_sanity_check(info); | ||
184 | } | ||
185 | |||
186 | void __init cpuinfo_store_boot_cpu(void) | ||
187 | { | ||
188 | struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); | ||
189 | __cpuinfo_store_cpu(info); | ||
190 | |||
191 | boot_cpu_data = *info; | ||
192 | } | ||
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c index a7fb874b595e..fe5b94078d82 100644 --- a/arch/arm64/kernel/debug-monitors.c +++ b/arch/arm64/kernel/debug-monitors.c | |||
@@ -315,20 +315,20 @@ static int brk_handler(unsigned long addr, unsigned int esr, | |||
315 | { | 315 | { |
316 | siginfo_t info; | 316 | siginfo_t info; |
317 | 317 | ||
318 | if (call_break_hook(regs, esr) == DBG_HOOK_HANDLED) | 318 | if (user_mode(regs)) { |
319 | return 0; | 319 | info = (siginfo_t) { |
320 | .si_signo = SIGTRAP, | ||
321 | .si_errno = 0, | ||
322 | .si_code = TRAP_BRKPT, | ||
323 | .si_addr = (void __user *)instruction_pointer(regs), | ||
324 | }; | ||
320 | 325 | ||
321 | if (!user_mode(regs)) | 326 | force_sig_info(SIGTRAP, &info, current); |
327 | } else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) { | ||
328 | pr_warning("Unexpected kernel BRK exception at EL1\n"); | ||
322 | return -EFAULT; | 329 | return -EFAULT; |
330 | } | ||
323 | 331 | ||
324 | info = (siginfo_t) { | ||
325 | .si_signo = SIGTRAP, | ||
326 | .si_errno = 0, | ||
327 | .si_code = TRAP_BRKPT, | ||
328 | .si_addr = (void __user *)instruction_pointer(regs), | ||
329 | }; | ||
330 | |||
331 | force_sig_info(SIGTRAP, &info, current); | ||
332 | return 0; | 332 | return 0; |
333 | } | 333 | } |
334 | 334 | ||
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c index e786e6cdc400..1317fef8dde9 100644 --- a/arch/arm64/kernel/efi-stub.c +++ b/arch/arm64/kernel/efi-stub.c | |||
@@ -10,46 +10,16 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | #include <linux/efi.h> | 12 | #include <linux/efi.h> |
13 | #include <linux/libfdt.h> | 13 | #include <asm/efi.h> |
14 | #include <asm/sections.h> | 14 | #include <asm/sections.h> |
15 | 15 | ||
16 | /* | 16 | efi_status_t handle_kernel_image(efi_system_table_t *sys_table, |
17 | * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from | 17 | unsigned long *image_addr, |
18 | * start of kernel and may not cross a 2MiB boundary. We set alignment to | 18 | unsigned long *image_size, |
19 | * 2MiB so we know it won't cross a 2MiB boundary. | 19 | unsigned long *reserve_addr, |
20 | */ | 20 | unsigned long *reserve_size, |
21 | #define EFI_FDT_ALIGN SZ_2M /* used by allocate_new_fdt_and_exit_boot() */ | 21 | unsigned long dram_base, |
22 | #define MAX_FDT_OFFSET SZ_512M | 22 | efi_loaded_image_t *image) |
23 | |||
24 | #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) | ||
25 | |||
26 | static void efi_char16_printk(efi_system_table_t *sys_table_arg, | ||
27 | efi_char16_t *str); | ||
28 | |||
29 | static efi_status_t efi_open_volume(efi_system_table_t *sys_table, | ||
30 | void *__image, void **__fh); | ||
31 | static efi_status_t efi_file_close(void *handle); | ||
32 | |||
33 | static efi_status_t | ||
34 | efi_file_read(void *handle, unsigned long *size, void *addr); | ||
35 | |||
36 | static efi_status_t | ||
37 | efi_file_size(efi_system_table_t *sys_table, void *__fh, | ||
38 | efi_char16_t *filename_16, void **handle, u64 *file_sz); | ||
39 | |||
40 | /* Include shared EFI stub code */ | ||
41 | #include "../../../drivers/firmware/efi/efi-stub-helper.c" | ||
42 | #include "../../../drivers/firmware/efi/fdt.c" | ||
43 | #include "../../../drivers/firmware/efi/arm-stub.c" | ||
44 | |||
45 | |||
46 | static efi_status_t handle_kernel_image(efi_system_table_t *sys_table, | ||
47 | unsigned long *image_addr, | ||
48 | unsigned long *image_size, | ||
49 | unsigned long *reserve_addr, | ||
50 | unsigned long *reserve_size, | ||
51 | unsigned long dram_base, | ||
52 | efi_loaded_image_t *image) | ||
53 | { | 23 | { |
54 | efi_status_t status; | 24 | efi_status_t status; |
55 | unsigned long kernel_size, kernel_memsize = 0; | 25 | unsigned long kernel_size, kernel_memsize = 0; |
@@ -69,7 +39,7 @@ static efi_status_t handle_kernel_image(efi_system_table_t *sys_table, | |||
69 | if (*image_addr != (dram_base + TEXT_OFFSET)) { | 39 | if (*image_addr != (dram_base + TEXT_OFFSET)) { |
70 | pr_efi_err(sys_table, "Failed to alloc kernel memory\n"); | 40 | pr_efi_err(sys_table, "Failed to alloc kernel memory\n"); |
71 | efi_free(sys_table, kernel_memsize, *image_addr); | 41 | efi_free(sys_table, kernel_memsize, *image_addr); |
72 | return EFI_ERROR; | 42 | return EFI_LOAD_ERROR; |
73 | } | 43 | } |
74 | *image_size = kernel_memsize; | 44 | *image_size = kernel_memsize; |
75 | } | 45 | } |
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 14db1f6e8d7f..e72f3100958f 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c | |||
@@ -414,13 +414,24 @@ static int __init arm64_enter_virtual_mode(void) | |||
414 | for_each_efi_memory_desc(&memmap, md) { | 414 | for_each_efi_memory_desc(&memmap, md) { |
415 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) | 415 | if (!(md->attribute & EFI_MEMORY_RUNTIME)) |
416 | continue; | 416 | continue; |
417 | if (remap_region(md, &virt_md)) | 417 | if (!remap_region(md, &virt_md)) |
418 | ++count; | 418 | goto err_unmap; |
419 | ++count; | ||
419 | } | 420 | } |
420 | 421 | ||
421 | efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); | 422 | efi.systab = (__force void *)efi_lookup_mapped_addr(efi_system_table); |
422 | if (efi.systab) | 423 | if (!efi.systab) { |
423 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | 424 | /* |
425 | * If we have no virtual mapping for the System Table at this | ||
426 | * point, the memory map doesn't cover the physical offset where | ||
427 | * it resides. This means the System Table will be inaccessible | ||
428 | * to Runtime Services themselves once the virtual mapping is | ||
429 | * installed. | ||
430 | */ | ||
431 | pr_err("Failed to remap EFI System Table -- buggy firmware?\n"); | ||
432 | goto err_unmap; | ||
433 | } | ||
434 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | ||
424 | 435 | ||
425 | local_irq_save(flags); | 436 | local_irq_save(flags); |
426 | cpu_switch_mm(idmap_pg_dir, &init_mm); | 437 | cpu_switch_mm(idmap_pg_dir, &init_mm); |
@@ -449,21 +460,18 @@ static int __init arm64_enter_virtual_mode(void) | |||
449 | 460 | ||
450 | /* Set up runtime services function pointers */ | 461 | /* Set up runtime services function pointers */ |
451 | runtime = efi.systab->runtime; | 462 | runtime = efi.systab->runtime; |
452 | efi.get_time = runtime->get_time; | 463 | efi_native_runtime_setup(); |
453 | efi.set_time = runtime->set_time; | ||
454 | efi.get_wakeup_time = runtime->get_wakeup_time; | ||
455 | efi.set_wakeup_time = runtime->set_wakeup_time; | ||
456 | efi.get_variable = runtime->get_variable; | ||
457 | efi.get_next_variable = runtime->get_next_variable; | ||
458 | efi.set_variable = runtime->set_variable; | ||
459 | efi.query_variable_info = runtime->query_variable_info; | ||
460 | efi.update_capsule = runtime->update_capsule; | ||
461 | efi.query_capsule_caps = runtime->query_capsule_caps; | ||
462 | efi.get_next_high_mono_count = runtime->get_next_high_mono_count; | ||
463 | efi.reset_system = runtime->reset_system; | ||
464 | |||
465 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); | 464 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
466 | 465 | ||
467 | return 0; | 466 | return 0; |
467 | |||
468 | err_unmap: | ||
469 | /* unmap all mappings that succeeded: there are 'count' of those */ | ||
470 | for (virt_md = virtmap; count--; virt_md += memmap.desc_size) { | ||
471 | md = virt_md; | ||
472 | iounmap((__force void __iomem *)md->virt_addr); | ||
473 | } | ||
474 | kfree(virtmap); | ||
475 | return -1; | ||
468 | } | 476 | } |
469 | early_initcall(arm64_enter_virtual_mode); | 477 | early_initcall(arm64_enter_virtual_mode); |
diff --git a/arch/arm64/kernel/entry-fpsimd.S b/arch/arm64/kernel/entry-fpsimd.S index d358ccacfc00..c44a82f146b1 100644 --- a/arch/arm64/kernel/entry-fpsimd.S +++ b/arch/arm64/kernel/entry-fpsimd.S | |||
@@ -52,7 +52,7 @@ ENDPROC(fpsimd_load_state) | |||
52 | ENTRY(fpsimd_save_partial_state) | 52 | ENTRY(fpsimd_save_partial_state) |
53 | fpsimd_save_partial x0, 1, 8, 9 | 53 | fpsimd_save_partial x0, 1, 8, 9 |
54 | ret | 54 | ret |
55 | ENDPROC(fpsimd_load_partial_state) | 55 | ENDPROC(fpsimd_save_partial_state) |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Load the bottom n FP registers. | 58 | * Load the bottom n FP registers. |
diff --git a/arch/arm64/kernel/entry-ftrace.S b/arch/arm64/kernel/entry-ftrace.S index aa5f9fcbf9ee..38e704e597f7 100644 --- a/arch/arm64/kernel/entry-ftrace.S +++ b/arch/arm64/kernel/entry-ftrace.S | |||
@@ -96,11 +96,6 @@ | |||
96 | * - ftrace_graph_caller to set up an exit hook | 96 | * - ftrace_graph_caller to set up an exit hook |
97 | */ | 97 | */ |
98 | ENTRY(_mcount) | 98 | ENTRY(_mcount) |
99 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
100 | ldr x0, =ftrace_trace_stop | ||
101 | ldr x0, [x0] // if ftrace_trace_stop | ||
102 | ret // return; | ||
103 | #endif | ||
104 | mcount_enter | 99 | mcount_enter |
105 | 100 | ||
106 | ldr x0, =ftrace_trace_function | 101 | ldr x0, =ftrace_trace_function |
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 9ce04ba6bcb0..f0b5e5120a87 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S | |||
@@ -27,7 +27,32 @@ | |||
27 | #include <asm/esr.h> | 27 | #include <asm/esr.h> |
28 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
29 | #include <asm/unistd.h> | 29 | #include <asm/unistd.h> |
30 | #include <asm/unistd32.h> | 30 | |
31 | /* | ||
32 | * Context tracking subsystem. Used to instrument transitions | ||
33 | * between user and kernel mode. | ||
34 | */ | ||
35 | .macro ct_user_exit, syscall = 0 | ||
36 | #ifdef CONFIG_CONTEXT_TRACKING | ||
37 | bl context_tracking_user_exit | ||
38 | .if \syscall == 1 | ||
39 | /* | ||
40 | * Save/restore needed during syscalls. Restore syscall arguments from | ||
41 | * the values already saved on stack during kernel_entry. | ||
42 | */ | ||
43 | ldp x0, x1, [sp] | ||
44 | ldp x2, x3, [sp, #S_X2] | ||
45 | ldp x4, x5, [sp, #S_X4] | ||
46 | ldp x6, x7, [sp, #S_X6] | ||
47 | .endif | ||
48 | #endif | ||
49 | .endm | ||
50 | |||
51 | .macro ct_user_enter | ||
52 | #ifdef CONFIG_CONTEXT_TRACKING | ||
53 | bl context_tracking_user_enter | ||
54 | #endif | ||
55 | .endm | ||
31 | 56 | ||
32 | /* | 57 | /* |
33 | * Bad Abort numbers | 58 | * Bad Abort numbers |
@@ -91,6 +116,7 @@ | |||
91 | .macro kernel_exit, el, ret = 0 | 116 | .macro kernel_exit, el, ret = 0 |
92 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR | 117 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
93 | .if \el == 0 | 118 | .if \el == 0 |
119 | ct_user_enter | ||
94 | ldr x23, [sp, #S_SP] // load return stack pointer | 120 | ldr x23, [sp, #S_SP] // load return stack pointer |
95 | .endif | 121 | .endif |
96 | .if \ret | 122 | .if \ret |
@@ -353,7 +379,6 @@ el0_sync: | |||
353 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 379 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class |
354 | cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state | 380 | cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state |
355 | b.eq el0_svc | 381 | b.eq el0_svc |
356 | adr lr, ret_to_user | ||
357 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 382 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 |
358 | b.eq el0_da | 383 | b.eq el0_da |
359 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 384 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 |
@@ -382,7 +407,6 @@ el0_sync_compat: | |||
382 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class | 407 | lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class |
383 | cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state | 408 | cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state |
384 | b.eq el0_svc_compat | 409 | b.eq el0_svc_compat |
385 | adr lr, ret_to_user | ||
386 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 | 410 | cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 |
387 | b.eq el0_da | 411 | b.eq el0_da |
388 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 | 412 | cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 |
@@ -425,48 +449,59 @@ el0_da: | |||
425 | /* | 449 | /* |
426 | * Data abort handling | 450 | * Data abort handling |
427 | */ | 451 | */ |
428 | mrs x0, far_el1 | 452 | mrs x26, far_el1 |
429 | bic x0, x0, #(0xff << 56) | ||
430 | // enable interrupts before calling the main handler | 453 | // enable interrupts before calling the main handler |
431 | enable_dbg_and_irq | 454 | enable_dbg_and_irq |
455 | ct_user_exit | ||
456 | bic x0, x26, #(0xff << 56) | ||
432 | mov x1, x25 | 457 | mov x1, x25 |
433 | mov x2, sp | 458 | mov x2, sp |
459 | adr lr, ret_to_user | ||
434 | b do_mem_abort | 460 | b do_mem_abort |
435 | el0_ia: | 461 | el0_ia: |
436 | /* | 462 | /* |
437 | * Instruction abort handling | 463 | * Instruction abort handling |
438 | */ | 464 | */ |
439 | mrs x0, far_el1 | 465 | mrs x26, far_el1 |
440 | // enable interrupts before calling the main handler | 466 | // enable interrupts before calling the main handler |
441 | enable_dbg_and_irq | 467 | enable_dbg_and_irq |
468 | ct_user_exit | ||
469 | mov x0, x26 | ||
442 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts | 470 | orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts |
443 | mov x2, sp | 471 | mov x2, sp |
472 | adr lr, ret_to_user | ||
444 | b do_mem_abort | 473 | b do_mem_abort |
445 | el0_fpsimd_acc: | 474 | el0_fpsimd_acc: |
446 | /* | 475 | /* |
447 | * Floating Point or Advanced SIMD access | 476 | * Floating Point or Advanced SIMD access |
448 | */ | 477 | */ |
449 | enable_dbg | 478 | enable_dbg |
479 | ct_user_exit | ||
450 | mov x0, x25 | 480 | mov x0, x25 |
451 | mov x1, sp | 481 | mov x1, sp |
482 | adr lr, ret_to_user | ||
452 | b do_fpsimd_acc | 483 | b do_fpsimd_acc |
453 | el0_fpsimd_exc: | 484 | el0_fpsimd_exc: |
454 | /* | 485 | /* |
455 | * Floating Point or Advanced SIMD exception | 486 | * Floating Point or Advanced SIMD exception |
456 | */ | 487 | */ |
457 | enable_dbg | 488 | enable_dbg |
489 | ct_user_exit | ||
458 | mov x0, x25 | 490 | mov x0, x25 |
459 | mov x1, sp | 491 | mov x1, sp |
492 | adr lr, ret_to_user | ||
460 | b do_fpsimd_exc | 493 | b do_fpsimd_exc |
461 | el0_sp_pc: | 494 | el0_sp_pc: |
462 | /* | 495 | /* |
463 | * Stack or PC alignment exception handling | 496 | * Stack or PC alignment exception handling |
464 | */ | 497 | */ |
465 | mrs x0, far_el1 | 498 | mrs x26, far_el1 |
466 | // enable interrupts before calling the main handler | 499 | // enable interrupts before calling the main handler |
467 | enable_dbg_and_irq | 500 | enable_dbg_and_irq |
501 | mov x0, x26 | ||
468 | mov x1, x25 | 502 | mov x1, x25 |
469 | mov x2, sp | 503 | mov x2, sp |
504 | adr lr, ret_to_user | ||
470 | b do_sp_pc_abort | 505 | b do_sp_pc_abort |
471 | el0_undef: | 506 | el0_undef: |
472 | /* | 507 | /* |
@@ -474,7 +509,9 @@ el0_undef: | |||
474 | */ | 509 | */ |
475 | // enable interrupts before calling the main handler | 510 | // enable interrupts before calling the main handler |
476 | enable_dbg_and_irq | 511 | enable_dbg_and_irq |
512 | ct_user_exit | ||
477 | mov x0, sp | 513 | mov x0, sp |
514 | adr lr, ret_to_user | ||
478 | b do_undefinstr | 515 | b do_undefinstr |
479 | el0_dbg: | 516 | el0_dbg: |
480 | /* | 517 | /* |
@@ -486,12 +523,15 @@ el0_dbg: | |||
486 | mov x2, sp | 523 | mov x2, sp |
487 | bl do_debug_exception | 524 | bl do_debug_exception |
488 | enable_dbg | 525 | enable_dbg |
526 | ct_user_exit | ||
489 | b ret_to_user | 527 | b ret_to_user |
490 | el0_inv: | 528 | el0_inv: |
491 | enable_dbg | 529 | enable_dbg |
530 | ct_user_exit | ||
492 | mov x0, sp | 531 | mov x0, sp |
493 | mov x1, #BAD_SYNC | 532 | mov x1, #BAD_SYNC |
494 | mrs x2, esr_el1 | 533 | mrs x2, esr_el1 |
534 | adr lr, ret_to_user | ||
495 | b bad_mode | 535 | b bad_mode |
496 | ENDPROC(el0_sync) | 536 | ENDPROC(el0_sync) |
497 | 537 | ||
@@ -504,6 +544,7 @@ el0_irq_naked: | |||
504 | bl trace_hardirqs_off | 544 | bl trace_hardirqs_off |
505 | #endif | 545 | #endif |
506 | 546 | ||
547 | ct_user_exit | ||
507 | irq_handler | 548 | irq_handler |
508 | 549 | ||
509 | #ifdef CONFIG_TRACE_IRQFLAGS | 550 | #ifdef CONFIG_TRACE_IRQFLAGS |
@@ -608,6 +649,7 @@ el0_svc: | |||
608 | el0_svc_naked: // compat entry point | 649 | el0_svc_naked: // compat entry point |
609 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number | 650 | stp x0, scno, [sp, #S_ORIG_X0] // save the original x0 and syscall number |
610 | enable_dbg_and_irq | 651 | enable_dbg_and_irq |
652 | ct_user_exit 1 | ||
611 | 653 | ||
612 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks | 654 | ldr x16, [tsk, #TI_FLAGS] // check for syscall hooks |
613 | tst x16, #_TIF_SYSCALL_WORK | 655 | tst x16, #_TIF_SYSCALL_WORK |
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a2c1195abb7f..144f10567f82 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #include <linux/linkage.h> | 23 | #include <linux/linkage.h> |
24 | #include <linux/init.h> | 24 | #include <linux/init.h> |
25 | #include <linux/irqchip/arm-gic-v3.h> | ||
25 | 26 | ||
26 | #include <asm/assembler.h> | 27 | #include <asm/assembler.h> |
27 | #include <asm/ptrace.h> | 28 | #include <asm/ptrace.h> |
@@ -35,37 +36,31 @@ | |||
35 | #include <asm/page.h> | 36 | #include <asm/page.h> |
36 | #include <asm/virt.h> | 37 | #include <asm/virt.h> |
37 | 38 | ||
38 | /* | ||
39 | * swapper_pg_dir is the virtual address of the initial page table. We place | ||
40 | * the page tables 3 * PAGE_SIZE below KERNEL_RAM_VADDR. The idmap_pg_dir has | ||
41 | * 2 pages and is placed below swapper_pg_dir. | ||
42 | */ | ||
43 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) | 39 | #define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) |
44 | 40 | ||
45 | #if (KERNEL_RAM_VADDR & 0xfffff) != 0x80000 | 41 | #if (TEXT_OFFSET & 0xf) != 0 |
46 | #error KERNEL_RAM_VADDR must start at 0xXXX80000 | 42 | #error TEXT_OFFSET must be at least 16B aligned |
43 | #elif (PAGE_OFFSET & 0xfffff) != 0 | ||
44 | #error PAGE_OFFSET must be at least 2MB aligned | ||
45 | #elif TEXT_OFFSET > 0xfffff | ||
46 | #error TEXT_OFFSET must be less than 2MB | ||
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #define SWAPPER_DIR_SIZE (3 * PAGE_SIZE) | 49 | .macro pgtbl, ttb0, ttb1, virt_to_phys |
50 | #define IDMAP_DIR_SIZE (2 * PAGE_SIZE) | 50 | ldr \ttb1, =swapper_pg_dir |
51 | 51 | ldr \ttb0, =idmap_pg_dir | |
52 | .globl swapper_pg_dir | 52 | add \ttb1, \ttb1, \virt_to_phys |
53 | .equ swapper_pg_dir, KERNEL_RAM_VADDR - SWAPPER_DIR_SIZE | 53 | add \ttb0, \ttb0, \virt_to_phys |
54 | |||
55 | .globl idmap_pg_dir | ||
56 | .equ idmap_pg_dir, swapper_pg_dir - IDMAP_DIR_SIZE | ||
57 | |||
58 | .macro pgtbl, ttb0, ttb1, phys | ||
59 | add \ttb1, \phys, #TEXT_OFFSET - SWAPPER_DIR_SIZE | ||
60 | sub \ttb0, \ttb1, #IDMAP_DIR_SIZE | ||
61 | .endm | 54 | .endm |
62 | 55 | ||
63 | #ifdef CONFIG_ARM64_64K_PAGES | 56 | #ifdef CONFIG_ARM64_64K_PAGES |
64 | #define BLOCK_SHIFT PAGE_SHIFT | 57 | #define BLOCK_SHIFT PAGE_SHIFT |
65 | #define BLOCK_SIZE PAGE_SIZE | 58 | #define BLOCK_SIZE PAGE_SIZE |
59 | #define TABLE_SHIFT PMD_SHIFT | ||
66 | #else | 60 | #else |
67 | #define BLOCK_SHIFT SECTION_SHIFT | 61 | #define BLOCK_SHIFT SECTION_SHIFT |
68 | #define BLOCK_SIZE SECTION_SIZE | 62 | #define BLOCK_SIZE SECTION_SIZE |
63 | #define TABLE_SHIFT PUD_SHIFT | ||
69 | #endif | 64 | #endif |
70 | 65 | ||
71 | #define KERNEL_START KERNEL_RAM_VADDR | 66 | #define KERNEL_START KERNEL_RAM_VADDR |
@@ -120,9 +115,9 @@ efi_head: | |||
120 | b stext // branch to kernel start, magic | 115 | b stext // branch to kernel start, magic |
121 | .long 0 // reserved | 116 | .long 0 // reserved |
122 | #endif | 117 | #endif |
123 | .quad TEXT_OFFSET // Image load offset from start of RAM | 118 | .quad _kernel_offset_le // Image load offset from start of RAM, little-endian |
124 | .quad 0 // reserved | 119 | .quad _kernel_size_le // Effective size of kernel image, little-endian |
125 | .quad 0 // reserved | 120 | .quad _kernel_flags_le // Informative flags, little-endian |
126 | .quad 0 // reserved | 121 | .quad 0 // reserved |
127 | .quad 0 // reserved | 122 | .quad 0 // reserved |
128 | .quad 0 // reserved | 123 | .quad 0 // reserved |
@@ -295,6 +290,23 @@ CPU_LE( bic x0, x0, #(3 << 24) ) // Clear the EE and E0E bits for EL1 | |||
295 | msr cnthctl_el2, x0 | 290 | msr cnthctl_el2, x0 |
296 | msr cntvoff_el2, xzr // Clear virtual offset | 291 | msr cntvoff_el2, xzr // Clear virtual offset |
297 | 292 | ||
293 | #ifdef CONFIG_ARM_GIC_V3 | ||
294 | /* GICv3 system register access */ | ||
295 | mrs x0, id_aa64pfr0_el1 | ||
296 | ubfx x0, x0, #24, #4 | ||
297 | cmp x0, #1 | ||
298 | b.ne 3f | ||
299 | |||
300 | mrs_s x0, ICC_SRE_EL2 | ||
301 | orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1 | ||
302 | orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1 | ||
303 | msr_s ICC_SRE_EL2, x0 | ||
304 | isb // Make sure SRE is now set | ||
305 | msr_s ICH_HCR_EL2, xzr // Reset ICC_HCR_EL2 to defaults | ||
306 | |||
307 | 3: | ||
308 | #endif | ||
309 | |||
298 | /* Populate ID registers. */ | 310 | /* Populate ID registers. */ |
299 | mrs x0, midr_el1 | 311 | mrs x0, midr_el1 |
300 | mrs x1, mpidr_el1 | 312 | mrs x1, mpidr_el1 |
@@ -413,7 +425,7 @@ ENTRY(secondary_startup) | |||
413 | mov x23, x0 // x23=current cpu_table | 425 | mov x23, x0 // x23=current cpu_table |
414 | cbz x23, __error_p // invalid processor (x23=0)? | 426 | cbz x23, __error_p // invalid processor (x23=0)? |
415 | 427 | ||
416 | pgtbl x25, x26, x24 // x25=TTBR0, x26=TTBR1 | 428 | pgtbl x25, x26, x28 // x25=TTBR0, x26=TTBR1 |
417 | ldr x12, [x23, #CPU_INFO_SETUP] | 429 | ldr x12, [x23, #CPU_INFO_SETUP] |
418 | add x12, x12, x28 // __virt_to_phys | 430 | add x12, x12, x28 // __virt_to_phys |
419 | blr x12 // initialise processor | 431 | blr x12 // initialise processor |
@@ -455,8 +467,13 @@ ENDPROC(__enable_mmu) | |||
455 | * x27 = *virtual* address to jump to upon completion | 467 | * x27 = *virtual* address to jump to upon completion |
456 | * | 468 | * |
457 | * other registers depend on the function called upon completion | 469 | * other registers depend on the function called upon completion |
470 | * | ||
471 | * We align the entire function to the smallest power of two larger than it to | ||
472 | * ensure it fits within a single block map entry. Otherwise were PHYS_OFFSET | ||
473 | * close to the end of a 512MB or 1GB block we might require an additional | ||
474 | * table to map the entire function. | ||
458 | */ | 475 | */ |
459 | .align 6 | 476 | .align 4 |
460 | __turn_mmu_on: | 477 | __turn_mmu_on: |
461 | msr sctlr_el1, x0 | 478 | msr sctlr_el1, x0 |
462 | isb | 479 | isb |
@@ -479,17 +496,38 @@ ENDPROC(__calc_phys_offset) | |||
479 | .quad PAGE_OFFSET | 496 | .quad PAGE_OFFSET |
480 | 497 | ||
481 | /* | 498 | /* |
482 | * Macro to populate the PGD for the corresponding block entry in the next | 499 | * Macro to create a table entry to the next page. |
483 | * level (tbl) for the given virtual address. | 500 | * |
501 | * tbl: page table address | ||
502 | * virt: virtual address | ||
503 | * shift: #imm page table shift | ||
504 | * ptrs: #imm pointers per table page | ||
505 | * | ||
506 | * Preserves: virt | ||
507 | * Corrupts: tmp1, tmp2 | ||
508 | * Returns: tbl -> next level table page address | ||
509 | */ | ||
510 | .macro create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2 | ||
511 | lsr \tmp1, \virt, #\shift | ||
512 | and \tmp1, \tmp1, #\ptrs - 1 // table index | ||
513 | add \tmp2, \tbl, #PAGE_SIZE | ||
514 | orr \tmp2, \tmp2, #PMD_TYPE_TABLE // address of next table and entry type | ||
515 | str \tmp2, [\tbl, \tmp1, lsl #3] | ||
516 | add \tbl, \tbl, #PAGE_SIZE // next level table page | ||
517 | .endm | ||
518 | |||
519 | /* | ||
520 | * Macro to populate the PGD (and possibily PUD) for the corresponding | ||
521 | * block entry in the next level (tbl) for the given virtual address. | ||
484 | * | 522 | * |
485 | * Preserves: pgd, tbl, virt | 523 | * Preserves: tbl, next, virt |
486 | * Corrupts: tmp1, tmp2 | 524 | * Corrupts: tmp1, tmp2 |
487 | */ | 525 | */ |
488 | .macro create_pgd_entry, pgd, tbl, virt, tmp1, tmp2 | 526 | .macro create_pgd_entry, tbl, virt, tmp1, tmp2 |
489 | lsr \tmp1, \virt, #PGDIR_SHIFT | 527 | create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2 |
490 | and \tmp1, \tmp1, #PTRS_PER_PGD - 1 // PGD index | 528 | #if SWAPPER_PGTABLE_LEVELS == 3 |
491 | orr \tmp2, \tbl, #3 // PGD entry table type | 529 | create_table_entry \tbl, \virt, TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2 |
492 | str \tmp2, [\pgd, \tmp1, lsl #3] | 530 | #endif |
493 | .endm | 531 | .endm |
494 | 532 | ||
495 | /* | 533 | /* |
@@ -522,7 +560,7 @@ ENDPROC(__calc_phys_offset) | |||
522 | * - pgd entry for fixed mappings (TTBR1) | 560 | * - pgd entry for fixed mappings (TTBR1) |
523 | */ | 561 | */ |
524 | __create_page_tables: | 562 | __create_page_tables: |
525 | pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses | 563 | pgtbl x25, x26, x28 // idmap_pg_dir and swapper_pg_dir addresses |
526 | mov x27, lr | 564 | mov x27, lr |
527 | 565 | ||
528 | /* | 566 | /* |
@@ -550,10 +588,10 @@ __create_page_tables: | |||
550 | /* | 588 | /* |
551 | * Create the identity mapping. | 589 | * Create the identity mapping. |
552 | */ | 590 | */ |
553 | add x0, x25, #PAGE_SIZE // section table address | 591 | mov x0, x25 // idmap_pg_dir |
554 | ldr x3, =KERNEL_START | 592 | ldr x3, =KERNEL_START |
555 | add x3, x3, x28 // __pa(KERNEL_START) | 593 | add x3, x3, x28 // __pa(KERNEL_START) |
556 | create_pgd_entry x25, x0, x3, x5, x6 | 594 | create_pgd_entry x0, x3, x5, x6 |
557 | ldr x6, =KERNEL_END | 595 | ldr x6, =KERNEL_END |
558 | mov x5, x3 // __pa(KERNEL_START) | 596 | mov x5, x3 // __pa(KERNEL_START) |
559 | add x6, x6, x28 // __pa(KERNEL_END) | 597 | add x6, x6, x28 // __pa(KERNEL_END) |
@@ -562,9 +600,9 @@ __create_page_tables: | |||
562 | /* | 600 | /* |
563 | * Map the kernel image (starting with PHYS_OFFSET). | 601 | * Map the kernel image (starting with PHYS_OFFSET). |
564 | */ | 602 | */ |
565 | add x0, x26, #PAGE_SIZE // section table address | 603 | mov x0, x26 // swapper_pg_dir |
566 | mov x5, #PAGE_OFFSET | 604 | mov x5, #PAGE_OFFSET |
567 | create_pgd_entry x26, x0, x5, x3, x6 | 605 | create_pgd_entry x0, x5, x3, x6 |
568 | ldr x6, =KERNEL_END | 606 | ldr x6, =KERNEL_END |
569 | mov x3, x24 // phys offset | 607 | mov x3, x24 // phys offset |
570 | create_block_map x0, x7, x3, x5, x6 | 608 | create_block_map x0, x7, x3, x5, x6 |
@@ -586,13 +624,6 @@ __create_page_tables: | |||
586 | create_block_map x0, x7, x3, x5, x6 | 624 | create_block_map x0, x7, x3, x5, x6 |
587 | 1: | 625 | 1: |
588 | /* | 626 | /* |
589 | * Create the pgd entry for the fixed mappings. | ||
590 | */ | ||
591 | ldr x5, =FIXADDR_TOP // Fixed mapping virtual address | ||
592 | add x0, x26, #2 * PAGE_SIZE // section table address | ||
593 | create_pgd_entry x26, x0, x5, x6, x7 | ||
594 | |||
595 | /* | ||
596 | * Since the page tables have been populated with non-cacheable | 627 | * Since the page tables have been populated with non-cacheable |
597 | * accesses (MMU disabled), invalidate the idmap and swapper page | 628 | * accesses (MMU disabled), invalidate the idmap and swapper page |
598 | * tables again to remove any speculatively loaded cache lines. | 629 | * tables again to remove any speculatively loaded cache lines. |
@@ -611,7 +642,7 @@ ENDPROC(__create_page_tables) | |||
611 | __switch_data: | 642 | __switch_data: |
612 | .quad __mmap_switched | 643 | .quad __mmap_switched |
613 | .quad __bss_start // x6 | 644 | .quad __bss_start // x6 |
614 | .quad _end // x7 | 645 | .quad __bss_stop // x7 |
615 | .quad processor_id // x4 | 646 | .quad processor_id // x4 |
616 | .quad __fdt_pointer // x5 | 647 | .quad __fdt_pointer // x5 |
617 | .quad memstart_addr // x6 | 648 | .quad memstart_addr // x6 |
diff --git a/arch/arm64/kernel/hyp-stub.S b/arch/arm64/kernel/hyp-stub.S index 0959611d9ff1..a272f335c289 100644 --- a/arch/arm64/kernel/hyp-stub.S +++ b/arch/arm64/kernel/hyp-stub.S | |||
@@ -19,6 +19,7 @@ | |||
19 | 19 | ||
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | #include <linux/linkage.h> | 21 | #include <linux/linkage.h> |
22 | #include <linux/irqchip/arm-gic-v3.h> | ||
22 | 23 | ||
23 | #include <asm/assembler.h> | 24 | #include <asm/assembler.h> |
24 | #include <asm/ptrace.h> | 25 | #include <asm/ptrace.h> |
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h new file mode 100644 index 000000000000..8fae0756e175 --- /dev/null +++ b/arch/arm64/kernel/image.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Linker script macros to generate Image header fields. | ||
3 | * | ||
4 | * Copyright (C) 2014 ARM Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | ||
17 | */ | ||
18 | #ifndef __ASM_IMAGE_H | ||
19 | #define __ASM_IMAGE_H | ||
20 | |||
21 | #ifndef LINKER_SCRIPT | ||
22 | #error This file should only be included in vmlinux.lds.S | ||
23 | #endif | ||
24 | |||
25 | /* | ||
26 | * There aren't any ELF relocations we can use to endian-swap values known only | ||
27 | * at link time (e.g. the subtraction of two symbol addresses), so we must get | ||
28 | * the linker to endian-swap certain values before emitting them. | ||
29 | */ | ||
30 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
31 | #define DATA_LE64(data) \ | ||
32 | ((((data) & 0x00000000000000ff) << 56) | \ | ||
33 | (((data) & 0x000000000000ff00) << 40) | \ | ||
34 | (((data) & 0x0000000000ff0000) << 24) | \ | ||
35 | (((data) & 0x00000000ff000000) << 8) | \ | ||
36 | (((data) & 0x000000ff00000000) >> 8) | \ | ||
37 | (((data) & 0x0000ff0000000000) >> 24) | \ | ||
38 | (((data) & 0x00ff000000000000) >> 40) | \ | ||
39 | (((data) & 0xff00000000000000) >> 56)) | ||
40 | #else | ||
41 | #define DATA_LE64(data) ((data) & 0xffffffffffffffff) | ||
42 | #endif | ||
43 | |||
44 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
45 | #define __HEAD_FLAG_BE 1 | ||
46 | #else | ||
47 | #define __HEAD_FLAG_BE 0 | ||
48 | #endif | ||
49 | |||
50 | #define __HEAD_FLAGS (__HEAD_FLAG_BE << 0) | ||
51 | |||
52 | /* | ||
53 | * These will output as part of the Image header, which should be little-endian | ||
54 | * regardless of the endianness of the kernel. While constant values could be | ||
55 | * endian swapped in head.S, all are done here for consistency. | ||
56 | */ | ||
57 | #define HEAD_SYMBOLS \ | ||
58 | _kernel_size_le = DATA_LE64(_end - _text); \ | ||
59 | _kernel_offset_le = DATA_LE64(TEXT_OFFSET); \ | ||
60 | _kernel_flags_le = DATA_LE64(__HEAD_FLAGS); | ||
61 | |||
62 | #endif /* __ASM_IMAGE_H */ | ||
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S index 7787208e8cc6..997e6b27ff6a 100644 --- a/arch/arm64/kernel/kuser32.S +++ b/arch/arm64/kernel/kuser32.S | |||
@@ -28,7 +28,7 @@ | |||
28 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. | 28 | * See Documentation/arm/kernel_user_helpers.txt for formal definitions. |
29 | */ | 29 | */ |
30 | 30 | ||
31 | #include <asm/unistd32.h> | 31 | #include <asm/unistd.h> |
32 | 32 | ||
33 | .align 5 | 33 | .align 5 |
34 | .globl __kuser_helper_start | 34 | .globl __kuser_helper_start |
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 43b7c34f92cb..1309d64aa926 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c | |||
@@ -51,6 +51,12 @@ | |||
51 | #include <asm/processor.h> | 51 | #include <asm/processor.h> |
52 | #include <asm/stacktrace.h> | 52 | #include <asm/stacktrace.h> |
53 | 53 | ||
54 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
55 | #include <linux/stackprotector.h> | ||
56 | unsigned long __stack_chk_guard __read_mostly; | ||
57 | EXPORT_SYMBOL(__stack_chk_guard); | ||
58 | #endif | ||
59 | |||
54 | static void setup_restart(void) | 60 | static void setup_restart(void) |
55 | { | 61 | { |
56 | /* | 62 | /* |
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 9e9798f91172..553954771a67 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c | |||
@@ -235,7 +235,7 @@ static void psci_sys_poweroff(void) | |||
235 | * PSCI Function IDs for v0.2+ are well defined so use | 235 | * PSCI Function IDs for v0.2+ are well defined so use |
236 | * standard values. | 236 | * standard values. |
237 | */ | 237 | */ |
238 | static int psci_0_2_init(struct device_node *np) | 238 | static int __init psci_0_2_init(struct device_node *np) |
239 | { | 239 | { |
240 | int err, ver; | 240 | int err, ver; |
241 | 241 | ||
@@ -296,7 +296,7 @@ out_put_node: | |||
296 | /* | 296 | /* |
297 | * PSCI < v0.2 get PSCI Function IDs via DT. | 297 | * PSCI < v0.2 get PSCI Function IDs via DT. |
298 | */ | 298 | */ |
299 | static int psci_0_1_init(struct device_node *np) | 299 | static int __init psci_0_1_init(struct device_node *np) |
300 | { | 300 | { |
301 | u32 id; | 301 | u32 id; |
302 | int err; | 302 | int err; |
@@ -434,9 +434,11 @@ static int cpu_psci_cpu_kill(unsigned int cpu) | |||
434 | return 0; | 434 | return 0; |
435 | } | 435 | } |
436 | #endif | 436 | #endif |
437 | #endif | ||
437 | 438 | ||
438 | const struct cpu_operations cpu_psci_ops = { | 439 | const struct cpu_operations cpu_psci_ops = { |
439 | .name = "psci", | 440 | .name = "psci", |
441 | #ifdef CONFIG_SMP | ||
440 | .cpu_init = cpu_psci_cpu_init, | 442 | .cpu_init = cpu_psci_cpu_init, |
441 | .cpu_prepare = cpu_psci_cpu_prepare, | 443 | .cpu_prepare = cpu_psci_cpu_prepare, |
442 | .cpu_boot = cpu_psci_cpu_boot, | 444 | .cpu_boot = cpu_psci_cpu_boot, |
@@ -445,6 +447,6 @@ const struct cpu_operations cpu_psci_ops = { | |||
445 | .cpu_die = cpu_psci_cpu_die, | 447 | .cpu_die = cpu_psci_cpu_die, |
446 | .cpu_kill = cpu_psci_cpu_kill, | 448 | .cpu_kill = cpu_psci_cpu_kill, |
447 | #endif | 449 | #endif |
450 | #endif | ||
448 | }; | 451 | }; |
449 | 452 | ||
450 | #endif | ||
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 9fde010c945f..0310811bd77d 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c | |||
@@ -19,6 +19,7 @@ | |||
19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/audit.h> | ||
22 | #include <linux/compat.h> | 23 | #include <linux/compat.h> |
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
24 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
@@ -39,6 +40,7 @@ | |||
39 | #include <asm/compat.h> | 40 | #include <asm/compat.h> |
40 | #include <asm/debug-monitors.h> | 41 | #include <asm/debug-monitors.h> |
41 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
43 | #include <asm/syscall.h> | ||
42 | #include <asm/traps.h> | 44 | #include <asm/traps.h> |
43 | #include <asm/system_misc.h> | 45 | #include <asm/system_misc.h> |
44 | 46 | ||
@@ -1113,11 +1115,20 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) | |||
1113 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 1115 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
1114 | trace_sys_enter(regs, regs->syscallno); | 1116 | trace_sys_enter(regs, regs->syscallno); |
1115 | 1117 | ||
1118 | #ifdef CONFIG_AUDITSYSCALL | ||
1119 | audit_syscall_entry(syscall_get_arch(), regs->syscallno, | ||
1120 | regs->orig_x0, regs->regs[1], regs->regs[2], regs->regs[3]); | ||
1121 | #endif | ||
1122 | |||
1116 | return regs->syscallno; | 1123 | return regs->syscallno; |
1117 | } | 1124 | } |
1118 | 1125 | ||
1119 | asmlinkage void syscall_trace_exit(struct pt_regs *regs) | 1126 | asmlinkage void syscall_trace_exit(struct pt_regs *regs) |
1120 | { | 1127 | { |
1128 | #ifdef CONFIG_AUDITSYSCALL | ||
1129 | audit_syscall_exit(regs); | ||
1130 | #endif | ||
1131 | |||
1121 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) | 1132 | if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) |
1122 | trace_sys_exit(regs, regs_return_value(regs)); | 1133 | trace_sys_exit(regs, regs_return_value(regs)); |
1123 | 1134 | ||
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 46d1125571f6..f6f0ccf35ae6 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/efi.h> | 45 | #include <linux/efi.h> |
46 | 46 | ||
47 | #include <asm/fixmap.h> | 47 | #include <asm/fixmap.h> |
48 | #include <asm/cpu.h> | ||
48 | #include <asm/cputype.h> | 49 | #include <asm/cputype.h> |
49 | #include <asm/elf.h> | 50 | #include <asm/elf.h> |
50 | #include <asm/cputable.h> | 51 | #include <asm/cputable.h> |
@@ -77,7 +78,6 @@ unsigned int compat_elf_hwcap2 __read_mostly; | |||
77 | #endif | 78 | #endif |
78 | 79 | ||
79 | static const char *cpu_name; | 80 | static const char *cpu_name; |
80 | static const char *machine_name; | ||
81 | phys_addr_t __fdt_pointer __initdata; | 81 | phys_addr_t __fdt_pointer __initdata; |
82 | 82 | ||
83 | /* | 83 | /* |
@@ -219,6 +219,8 @@ static void __init setup_processor(void) | |||
219 | sprintf(init_utsname()->machine, ELF_PLATFORM); | 219 | sprintf(init_utsname()->machine, ELF_PLATFORM); |
220 | elf_hwcap = 0; | 220 | elf_hwcap = 0; |
221 | 221 | ||
222 | cpuinfo_store_boot_cpu(); | ||
223 | |||
222 | /* | 224 | /* |
223 | * Check for sane CTR_EL0.CWG value. | 225 | * Check for sane CTR_EL0.CWG value. |
224 | */ | 226 | */ |
@@ -307,8 +309,6 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) | |||
307 | while (true) | 309 | while (true) |
308 | cpu_relax(); | 310 | cpu_relax(); |
309 | } | 311 | } |
310 | |||
311 | machine_name = of_flat_dt_get_machine_name(); | ||
312 | } | 312 | } |
313 | 313 | ||
314 | /* | 314 | /* |
@@ -417,14 +417,12 @@ static int __init arm64_device_init(void) | |||
417 | } | 417 | } |
418 | arch_initcall_sync(arm64_device_init); | 418 | arch_initcall_sync(arm64_device_init); |
419 | 419 | ||
420 | static DEFINE_PER_CPU(struct cpu, cpu_data); | ||
421 | |||
422 | static int __init topology_init(void) | 420 | static int __init topology_init(void) |
423 | { | 421 | { |
424 | int i; | 422 | int i; |
425 | 423 | ||
426 | for_each_possible_cpu(i) { | 424 | for_each_possible_cpu(i) { |
427 | struct cpu *cpu = &per_cpu(cpu_data, i); | 425 | struct cpu *cpu = &per_cpu(cpu_data.cpu, i); |
428 | cpu->hotpluggable = 1; | 426 | cpu->hotpluggable = 1; |
429 | register_cpu(cpu, i); | 427 | register_cpu(cpu, i); |
430 | } | 428 | } |
@@ -449,10 +447,21 @@ static int c_show(struct seq_file *m, void *v) | |||
449 | { | 447 | { |
450 | int i; | 448 | int i; |
451 | 449 | ||
452 | seq_printf(m, "Processor\t: %s rev %d (%s)\n", | 450 | /* |
453 | cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); | 451 | * Dump out the common processor features in a single line. Userspace |
452 | * should read the hwcaps with getauxval(AT_HWCAP) rather than | ||
453 | * attempting to parse this. | ||
454 | */ | ||
455 | seq_puts(m, "features\t:"); | ||
456 | for (i = 0; hwcap_str[i]; i++) | ||
457 | if (elf_hwcap & (1 << i)) | ||
458 | seq_printf(m, " %s", hwcap_str[i]); | ||
459 | seq_puts(m, "\n\n"); | ||
454 | 460 | ||
455 | for_each_online_cpu(i) { | 461 | for_each_online_cpu(i) { |
462 | struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); | ||
463 | u32 midr = cpuinfo->reg_midr; | ||
464 | |||
456 | /* | 465 | /* |
457 | * glibc reads /proc/cpuinfo to determine the number of | 466 | * glibc reads /proc/cpuinfo to determine the number of |
458 | * online processors, looking for lines beginning with | 467 | * online processors, looking for lines beginning with |
@@ -461,25 +470,13 @@ static int c_show(struct seq_file *m, void *v) | |||
461 | #ifdef CONFIG_SMP | 470 | #ifdef CONFIG_SMP |
462 | seq_printf(m, "processor\t: %d\n", i); | 471 | seq_printf(m, "processor\t: %d\n", i); |
463 | #endif | 472 | #endif |
473 | seq_printf(m, "implementer\t: 0x%02x\n", | ||
474 | MIDR_IMPLEMENTOR(midr)); | ||
475 | seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr)); | ||
476 | seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr)); | ||
477 | seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr)); | ||
464 | } | 478 | } |
465 | 479 | ||
466 | /* dump out the processor features */ | ||
467 | seq_puts(m, "Features\t: "); | ||
468 | |||
469 | for (i = 0; hwcap_str[i]; i++) | ||
470 | if (elf_hwcap & (1 << i)) | ||
471 | seq_printf(m, "%s ", hwcap_str[i]); | ||
472 | |||
473 | seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); | ||
474 | seq_printf(m, "CPU architecture: AArch64\n"); | ||
475 | seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); | ||
476 | seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); | ||
477 | seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); | ||
478 | |||
479 | seq_puts(m, "\n"); | ||
480 | |||
481 | seq_printf(m, "Hardware\t: %s\n", machine_name); | ||
482 | |||
483 | return 0; | 480 | return 0; |
484 | } | 481 | } |
485 | 482 | ||
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c index 3491c638f172..c5ee208321c3 100644 --- a/arch/arm64/kernel/signal32.c +++ b/arch/arm64/kernel/signal32.c | |||
@@ -27,7 +27,7 @@ | |||
27 | #include <asm/fpsimd.h> | 27 | #include <asm/fpsimd.h> |
28 | #include <asm/signal32.h> | 28 | #include <asm/signal32.h> |
29 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
30 | #include <asm/unistd32.h> | 30 | #include <asm/unistd.h> |
31 | 31 | ||
32 | struct compat_sigcontext { | 32 | struct compat_sigcontext { |
33 | /* We always set these two fields to 0 */ | 33 | /* We always set these two fields to 0 */ |
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 40f38f46c8e0..3e2f5ebbf63e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -39,6 +39,7 @@ | |||
39 | 39 | ||
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | #include <asm/cacheflush.h> | 41 | #include <asm/cacheflush.h> |
42 | #include <asm/cpu.h> | ||
42 | #include <asm/cputype.h> | 43 | #include <asm/cputype.h> |
43 | #include <asm/cpu_ops.h> | 44 | #include <asm/cpu_ops.h> |
44 | #include <asm/mmu_context.h> | 45 | #include <asm/mmu_context.h> |
@@ -155,6 +156,11 @@ asmlinkage void secondary_start_kernel(void) | |||
155 | cpu_ops[cpu]->cpu_postboot(); | 156 | cpu_ops[cpu]->cpu_postboot(); |
156 | 157 | ||
157 | /* | 158 | /* |
159 | * Log the CPU info before it is marked online and might get read. | ||
160 | */ | ||
161 | cpuinfo_store_cpu(); | ||
162 | |||
163 | /* | ||
158 | * Enable GIC and timers. | 164 | * Enable GIC and timers. |
159 | */ | 165 | */ |
160 | notify_cpu_starting(cpu); | 166 | notify_cpu_starting(cpu); |
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c index 1fa9ce4afd8f..55a99b9a97e0 100644 --- a/arch/arm64/kernel/suspend.c +++ b/arch/arm64/kernel/suspend.c | |||
@@ -119,7 +119,7 @@ int cpu_suspend(unsigned long arg) | |||
119 | extern struct sleep_save_sp sleep_save_sp; | 119 | extern struct sleep_save_sp sleep_save_sp; |
120 | extern phys_addr_t sleep_idmap_phys; | 120 | extern phys_addr_t sleep_idmap_phys; |
121 | 121 | ||
122 | static int cpu_suspend_init(void) | 122 | static int __init cpu_suspend_init(void) |
123 | { | 123 | { |
124 | void *ctx_ptr; | 124 | void *ctx_ptr; |
125 | 125 | ||
diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index 26e9c4eeaba8..de2b0226e06d 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | 27 | ||
28 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
29 | #include <asm/unistd32.h> | 29 | #include <asm/unistd.h> |
30 | 30 | ||
31 | static inline void | 31 | static inline void |
32 | do_compat_cache_op(unsigned long start, unsigned long end, int flags) | 32 | do_compat_cache_op(unsigned long start, unsigned long end, int flags) |
diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 43514f905916..b6ee26b0939a 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/of.h> | 20 | #include <linux/of.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | 22 | ||
23 | #include <asm/cputype.h> | ||
23 | #include <asm/topology.h> | 24 | #include <asm/topology.h> |
24 | 25 | ||
25 | static int __init get_cpu_for_node(struct device_node *node) | 26 | static int __init get_cpu_for_node(struct device_node *node) |
@@ -188,13 +189,9 @@ static int __init parse_dt_topology(void) | |||
188 | * Check that all cores are in the topology; the SMP code will | 189 | * Check that all cores are in the topology; the SMP code will |
189 | * only mark cores described in the DT as possible. | 190 | * only mark cores described in the DT as possible. |
190 | */ | 191 | */ |
191 | for_each_possible_cpu(cpu) { | 192 | for_each_possible_cpu(cpu) |
192 | if (cpu_topology[cpu].cluster_id == -1) { | 193 | if (cpu_topology[cpu].cluster_id == -1) |
193 | pr_err("CPU%d: No topology information specified\n", | ||
194 | cpu); | ||
195 | ret = -EINVAL; | 194 | ret = -EINVAL; |
196 | } | ||
197 | } | ||
198 | 195 | ||
199 | out_map: | 196 | out_map: |
200 | of_node_put(map); | 197 | of_node_put(map); |
@@ -219,14 +216,6 @@ static void update_siblings_masks(unsigned int cpuid) | |||
219 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; | 216 | struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; |
220 | int cpu; | 217 | int cpu; |
221 | 218 | ||
222 | if (cpuid_topo->cluster_id == -1) { | ||
223 | /* | ||
224 | * DT does not contain topology information for this cpu. | ||
225 | */ | ||
226 | pr_debug("CPU%u: No topology information configured\n", cpuid); | ||
227 | return; | ||
228 | } | ||
229 | |||
230 | /* update core and thread sibling masks */ | 219 | /* update core and thread sibling masks */ |
231 | for_each_possible_cpu(cpu) { | 220 | for_each_possible_cpu(cpu) { |
232 | cpu_topo = &cpu_topology[cpu]; | 221 | cpu_topo = &cpu_topology[cpu]; |
@@ -249,6 +238,36 @@ static void update_siblings_masks(unsigned int cpuid) | |||
249 | 238 | ||
250 | void store_cpu_topology(unsigned int cpuid) | 239 | void store_cpu_topology(unsigned int cpuid) |
251 | { | 240 | { |
241 | struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; | ||
242 | u64 mpidr; | ||
243 | |||
244 | if (cpuid_topo->cluster_id != -1) | ||
245 | goto topology_populated; | ||
246 | |||
247 | mpidr = read_cpuid_mpidr(); | ||
248 | |||
249 | /* Uniprocessor systems can rely on default topology values */ | ||
250 | if (mpidr & MPIDR_UP_BITMASK) | ||
251 | return; | ||
252 | |||
253 | /* Create cpu topology mapping based on MPIDR. */ | ||
254 | if (mpidr & MPIDR_MT_BITMASK) { | ||
255 | /* Multiprocessor system : Multi-threads per core */ | ||
256 | cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
257 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
258 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); | ||
259 | } else { | ||
260 | /* Multiprocessor system : Single-thread per core */ | ||
261 | cpuid_topo->thread_id = -1; | ||
262 | cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); | ||
263 | cpuid_topo->cluster_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); | ||
264 | } | ||
265 | |||
266 | pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n", | ||
267 | cpuid, cpuid_topo->cluster_id, cpuid_topo->core_id, | ||
268 | cpuid_topo->thread_id, mpidr); | ||
269 | |||
270 | topology_populated: | ||
252 | update_siblings_masks(cpuid); | 271 | update_siblings_masks(cpuid); |
253 | } | 272 | } |
254 | 273 | ||
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index c43cfa9b8304..02cd3f023e9a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c | |||
@@ -156,7 +156,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk) | |||
156 | frame.pc = thread_saved_pc(tsk); | 156 | frame.pc = thread_saved_pc(tsk); |
157 | } | 157 | } |
158 | 158 | ||
159 | printk("Call trace:\n"); | 159 | pr_emerg("Call trace:\n"); |
160 | while (1) { | 160 | while (1) { |
161 | unsigned long where = frame.pc; | 161 | unsigned long where = frame.pc; |
162 | int ret; | 162 | int ret; |
@@ -331,17 +331,22 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) | |||
331 | 331 | ||
332 | void __pte_error(const char *file, int line, unsigned long val) | 332 | void __pte_error(const char *file, int line, unsigned long val) |
333 | { | 333 | { |
334 | printk("%s:%d: bad pte %016lx.\n", file, line, val); | 334 | pr_crit("%s:%d: bad pte %016lx.\n", file, line, val); |
335 | } | 335 | } |
336 | 336 | ||
337 | void __pmd_error(const char *file, int line, unsigned long val) | 337 | void __pmd_error(const char *file, int line, unsigned long val) |
338 | { | 338 | { |
339 | printk("%s:%d: bad pmd %016lx.\n", file, line, val); | 339 | pr_crit("%s:%d: bad pmd %016lx.\n", file, line, val); |
340 | } | ||
341 | |||
342 | void __pud_error(const char *file, int line, unsigned long val) | ||
343 | { | ||
344 | pr_crit("%s:%d: bad pud %016lx.\n", file, line, val); | ||
340 | } | 345 | } |
341 | 346 | ||
342 | void __pgd_error(const char *file, int line, unsigned long val) | 347 | void __pgd_error(const char *file, int line, unsigned long val) |
343 | { | 348 | { |
344 | printk("%s:%d: bad pgd %016lx.\n", file, line, val); | 349 | pr_crit("%s:%d: bad pgd %016lx.\n", file, line, val); |
345 | } | 350 | } |
346 | 351 | ||
347 | void __init trap_init(void) | 352 | void __init trap_init(void) |
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 50384fec56c4..24f2e8c62479 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -88,22 +88,29 @@ int aarch32_setup_vectors_page(struct linux_binprm *bprm, int uses_interp) | |||
88 | { | 88 | { |
89 | struct mm_struct *mm = current->mm; | 89 | struct mm_struct *mm = current->mm; |
90 | unsigned long addr = AARCH32_VECTORS_BASE; | 90 | unsigned long addr = AARCH32_VECTORS_BASE; |
91 | int ret; | 91 | static struct vm_special_mapping spec = { |
92 | .name = "[vectors]", | ||
93 | .pages = vectors_page, | ||
94 | |||
95 | }; | ||
96 | void *ret; | ||
92 | 97 | ||
93 | down_write(&mm->mmap_sem); | 98 | down_write(&mm->mmap_sem); |
94 | current->mm->context.vdso = (void *)addr; | 99 | current->mm->context.vdso = (void *)addr; |
95 | 100 | ||
96 | /* Map vectors page at the high address. */ | 101 | /* Map vectors page at the high address. */ |
97 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | 102 | ret = _install_special_mapping(mm, addr, PAGE_SIZE, |
98 | VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, | 103 | VM_READ|VM_EXEC|VM_MAYREAD|VM_MAYEXEC, |
99 | vectors_page); | 104 | &spec); |
100 | 105 | ||
101 | up_write(&mm->mmap_sem); | 106 | up_write(&mm->mmap_sem); |
102 | 107 | ||
103 | return ret; | 108 | return PTR_ERR_OR_ZERO(ret); |
104 | } | 109 | } |
105 | #endif /* CONFIG_COMPAT */ | 110 | #endif /* CONFIG_COMPAT */ |
106 | 111 | ||
112 | static struct vm_special_mapping vdso_spec[2]; | ||
113 | |||
107 | static int __init vdso_init(void) | 114 | static int __init vdso_init(void) |
108 | { | 115 | { |
109 | int i; | 116 | int i; |
@@ -114,8 +121,8 @@ static int __init vdso_init(void) | |||
114 | } | 121 | } |
115 | 122 | ||
116 | vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; | 123 | vdso_pages = (&vdso_end - &vdso_start) >> PAGE_SHIFT; |
117 | pr_info("vdso: %ld pages (%ld code, %ld data) at base %p\n", | 124 | pr_info("vdso: %ld pages (%ld code @ %p, %ld data @ %p)\n", |
118 | vdso_pages + 1, vdso_pages, 1L, &vdso_start); | 125 | vdso_pages + 1, vdso_pages, &vdso_start, 1L, vdso_data); |
119 | 126 | ||
120 | /* Allocate the vDSO pagelist, plus a page for the data. */ | 127 | /* Allocate the vDSO pagelist, plus a page for the data. */ |
121 | vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), | 128 | vdso_pagelist = kcalloc(vdso_pages + 1, sizeof(struct page *), |
@@ -123,12 +130,23 @@ static int __init vdso_init(void) | |||
123 | if (vdso_pagelist == NULL) | 130 | if (vdso_pagelist == NULL) |
124 | return -ENOMEM; | 131 | return -ENOMEM; |
125 | 132 | ||
133 | /* Grab the vDSO data page. */ | ||
134 | vdso_pagelist[0] = virt_to_page(vdso_data); | ||
135 | |||
126 | /* Grab the vDSO code pages. */ | 136 | /* Grab the vDSO code pages. */ |
127 | for (i = 0; i < vdso_pages; i++) | 137 | for (i = 0; i < vdso_pages; i++) |
128 | vdso_pagelist[i] = virt_to_page(&vdso_start + i * PAGE_SIZE); | 138 | vdso_pagelist[i + 1] = virt_to_page(&vdso_start + i * PAGE_SIZE); |
129 | 139 | ||
130 | /* Grab the vDSO data page. */ | 140 | /* Populate the special mapping structures */ |
131 | vdso_pagelist[i] = virt_to_page(vdso_data); | 141 | vdso_spec[0] = (struct vm_special_mapping) { |
142 | .name = "[vvar]", | ||
143 | .pages = vdso_pagelist, | ||
144 | }; | ||
145 | |||
146 | vdso_spec[1] = (struct vm_special_mapping) { | ||
147 | .name = "[vdso]", | ||
148 | .pages = &vdso_pagelist[1], | ||
149 | }; | ||
132 | 150 | ||
133 | return 0; | 151 | return 0; |
134 | } | 152 | } |
@@ -138,52 +156,42 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, | |||
138 | int uses_interp) | 156 | int uses_interp) |
139 | { | 157 | { |
140 | struct mm_struct *mm = current->mm; | 158 | struct mm_struct *mm = current->mm; |
141 | unsigned long vdso_base, vdso_mapping_len; | 159 | unsigned long vdso_base, vdso_text_len, vdso_mapping_len; |
142 | int ret; | 160 | void *ret; |
143 | 161 | ||
162 | vdso_text_len = vdso_pages << PAGE_SHIFT; | ||
144 | /* Be sure to map the data page */ | 163 | /* Be sure to map the data page */ |
145 | vdso_mapping_len = (vdso_pages + 1) << PAGE_SHIFT; | 164 | vdso_mapping_len = vdso_text_len + PAGE_SIZE; |
146 | 165 | ||
147 | down_write(&mm->mmap_sem); | 166 | down_write(&mm->mmap_sem); |
148 | vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); | 167 | vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0); |
149 | if (IS_ERR_VALUE(vdso_base)) { | 168 | if (IS_ERR_VALUE(vdso_base)) { |
150 | ret = vdso_base; | 169 | ret = ERR_PTR(vdso_base); |
151 | goto up_fail; | 170 | goto up_fail; |
152 | } | 171 | } |
153 | mm->context.vdso = (void *)vdso_base; | 172 | ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE, |
154 | 173 | VM_READ|VM_MAYREAD, | |
155 | ret = install_special_mapping(mm, vdso_base, vdso_mapping_len, | 174 | &vdso_spec[0]); |
156 | VM_READ|VM_EXEC| | 175 | if (IS_ERR(ret)) |
157 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | ||
158 | vdso_pagelist); | ||
159 | if (ret) { | ||
160 | mm->context.vdso = NULL; | ||
161 | goto up_fail; | 176 | goto up_fail; |
162 | } | ||
163 | 177 | ||
164 | up_fail: | 178 | vdso_base += PAGE_SIZE; |
165 | up_write(&mm->mmap_sem); | 179 | mm->context.vdso = (void *)vdso_base; |
180 | ret = _install_special_mapping(mm, vdso_base, vdso_text_len, | ||
181 | VM_READ|VM_EXEC| | ||
182 | VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, | ||
183 | &vdso_spec[1]); | ||
184 | if (IS_ERR(ret)) | ||
185 | goto up_fail; | ||
166 | 186 | ||
167 | return ret; | ||
168 | } | ||
169 | 187 | ||
170 | const char *arch_vma_name(struct vm_area_struct *vma) | 188 | up_write(&mm->mmap_sem); |
171 | { | 189 | return 0; |
172 | /* | ||
173 | * We can re-use the vdso pointer in mm_context_t for identifying | ||
174 | * the vectors page for compat applications. The vDSO will always | ||
175 | * sit above TASK_UNMAPPED_BASE and so we don't need to worry about | ||
176 | * it conflicting with the vectors base. | ||
177 | */ | ||
178 | if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) { | ||
179 | #ifdef CONFIG_COMPAT | ||
180 | if (vma->vm_start == AARCH32_VECTORS_BASE) | ||
181 | return "[vectors]"; | ||
182 | #endif | ||
183 | return "[vdso]"; | ||
184 | } | ||
185 | 190 | ||
186 | return NULL; | 191 | up_fail: |
192 | mm->context.vdso = NULL; | ||
193 | up_write(&mm->mmap_sem); | ||
194 | return PTR_ERR(ret); | ||
187 | } | 195 | } |
188 | 196 | ||
189 | /* | 197 | /* |
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile index 6d20b7d162d8..ff3bddea482d 100644 --- a/arch/arm64/kernel/vdso/Makefile +++ b/arch/arm64/kernel/vdso/Makefile | |||
@@ -43,13 +43,13 @@ $(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE | |||
43 | $(call if_changed,vdsosym) | 43 | $(call if_changed,vdsosym) |
44 | 44 | ||
45 | # Assembly rules for the .S files | 45 | # Assembly rules for the .S files |
46 | $(obj-vdso): %.o: %.S | 46 | $(obj-vdso): %.o: %.S FORCE |
47 | $(call if_changed_dep,vdsoas) | 47 | $(call if_changed_dep,vdsoas) |
48 | 48 | ||
49 | # Actual build commands | 49 | # Actual build commands |
50 | quiet_cmd_vdsold = VDSOL $@ | 50 | quiet_cmd_vdsold = VDSOL $@ |
51 | cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ | 51 | cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@ |
52 | quiet_cmd_vdsoas = VDSOA $@ | 52 | quiet_cmd_vdsoas = VDSOA $@ |
53 | cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< | 53 | cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $< |
54 | 54 | ||
55 | # Install commands for the unstripped file | 55 | # Install commands for the unstripped file |
diff --git a/arch/arm64/kernel/vdso/vdso.lds.S b/arch/arm64/kernel/vdso/vdso.lds.S index 8154b8d1c826..beca249bc2f3 100644 --- a/arch/arm64/kernel/vdso/vdso.lds.S +++ b/arch/arm64/kernel/vdso/vdso.lds.S | |||
@@ -28,6 +28,7 @@ OUTPUT_ARCH(aarch64) | |||
28 | 28 | ||
29 | SECTIONS | 29 | SECTIONS |
30 | { | 30 | { |
31 | PROVIDE(_vdso_data = . - PAGE_SIZE); | ||
31 | . = VDSO_LBASE + SIZEOF_HEADERS; | 32 | . = VDSO_LBASE + SIZEOF_HEADERS; |
32 | 33 | ||
33 | .hash : { *(.hash) } :text | 34 | .hash : { *(.hash) } :text |
@@ -57,9 +58,6 @@ SECTIONS | |||
57 | _end = .; | 58 | _end = .; |
58 | PROVIDE(end = .); | 59 | PROVIDE(end = .); |
59 | 60 | ||
60 | . = ALIGN(PAGE_SIZE); | ||
61 | PROVIDE(_vdso_data = .); | ||
62 | |||
63 | /DISCARD/ : { | 61 | /DISCARD/ : { |
64 | *(.note.GNU-stack) | 62 | *(.note.GNU-stack) |
65 | *(.data .data.* .gnu.linkonce.d.* .sdata*) | 63 | *(.data .data.* .gnu.linkonce.d.* .sdata*) |
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index f1e6d5c032e1..97f0c0429dfa 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <asm/memory.h> | 9 | #include <asm/memory.h> |
10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
11 | 11 | ||
12 | #include "image.h" | ||
13 | |||
12 | #define ARM_EXIT_KEEP(x) | 14 | #define ARM_EXIT_KEEP(x) |
13 | #define ARM_EXIT_DISCARD(x) x | 15 | #define ARM_EXIT_DISCARD(x) x |
14 | 16 | ||
@@ -104,9 +106,18 @@ SECTIONS | |||
104 | _edata = .; | 106 | _edata = .; |
105 | 107 | ||
106 | BSS_SECTION(0, 0, 0) | 108 | BSS_SECTION(0, 0, 0) |
109 | |||
110 | . = ALIGN(PAGE_SIZE); | ||
111 | idmap_pg_dir = .; | ||
112 | . += IDMAP_DIR_SIZE; | ||
113 | swapper_pg_dir = .; | ||
114 | . += SWAPPER_DIR_SIZE; | ||
115 | |||
107 | _end = .; | 116 | _end = .; |
108 | 117 | ||
109 | STABS_DEBUG | 118 | STABS_DEBUG |
119 | |||
120 | HEAD_SYMBOLS | ||
110 | } | 121 | } |
111 | 122 | ||
112 | /* | 123 | /* |
@@ -114,3 +125,8 @@ SECTIONS | |||
114 | */ | 125 | */ |
115 | ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), | 126 | ASSERT(((__hyp_idmap_text_start + PAGE_SIZE) > __hyp_idmap_text_end), |
116 | "HYP init code too big") | 127 | "HYP init code too big") |
128 | |||
129 | /* | ||
130 | * If padding is applied before .head.text, virt<->phys conversions will fail. | ||
131 | */ | ||
132 | ASSERT(_text == (PAGE_OFFSET + TEXT_OFFSET), "HEAD is misaligned") | ||
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index bcc965e2cce1..41cb6d3d6075 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c | |||
@@ -62,6 +62,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
62 | break; | 62 | break; |
63 | 63 | ||
64 | pud = pud_offset(pgd, addr); | 64 | pud = pud_offset(pgd, addr); |
65 | printk(", *pud=%016llx", pud_val(*pud)); | ||
65 | if (pud_none(*pud) || pud_bad(*pud)) | 66 | if (pud_none(*pud) || pud_bad(*pud)) |
66 | break; | 67 | break; |
67 | 68 | ||
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f43db8a69262..5b4526ee3a01 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/dma-mapping.h> | 33 | #include <linux/dma-mapping.h> |
34 | #include <linux/dma-contiguous.h> | 34 | #include <linux/dma-contiguous.h> |
35 | 35 | ||
36 | #include <asm/fixmap.h> | ||
36 | #include <asm/sections.h> | 37 | #include <asm/sections.h> |
37 | #include <asm/setup.h> | 38 | #include <asm/setup.h> |
38 | #include <asm/sizes.h> | 39 | #include <asm/sizes.h> |
@@ -60,6 +61,17 @@ static int __init early_initrd(char *p) | |||
60 | early_param("initrd", early_initrd); | 61 | early_param("initrd", early_initrd); |
61 | #endif | 62 | #endif |
62 | 63 | ||
64 | /* | ||
65 | * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It | ||
66 | * currently assumes that for memory starting above 4G, 32-bit devices will | ||
67 | * use a DMA offset. | ||
68 | */ | ||
69 | static phys_addr_t max_zone_dma_phys(void) | ||
70 | { | ||
71 | phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); | ||
72 | return min(offset + (1ULL << 32), memblock_end_of_DRAM()); | ||
73 | } | ||
74 | |||
63 | static void __init zone_sizes_init(unsigned long min, unsigned long max) | 75 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
64 | { | 76 | { |
65 | struct memblock_region *reg; | 77 | struct memblock_region *reg; |
@@ -70,9 +82,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) | |||
70 | 82 | ||
71 | /* 4GB maximum for 32-bit only capable devices */ | 83 | /* 4GB maximum for 32-bit only capable devices */ |
72 | if (IS_ENABLED(CONFIG_ZONE_DMA)) { | 84 | if (IS_ENABLED(CONFIG_ZONE_DMA)) { |
73 | unsigned long max_dma_phys = | 85 | max_dma = PFN_DOWN(max_zone_dma_phys()); |
74 | (unsigned long)(dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1); | ||
75 | max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT)); | ||
76 | zone_size[ZONE_DMA] = max_dma - min; | 86 | zone_size[ZONE_DMA] = max_dma - min; |
77 | } | 87 | } |
78 | zone_size[ZONE_NORMAL] = max - max_dma; | 88 | zone_size[ZONE_NORMAL] = max - max_dma; |
@@ -128,25 +138,21 @@ void __init arm64_memblock_init(void) | |||
128 | { | 138 | { |
129 | phys_addr_t dma_phys_limit = 0; | 139 | phys_addr_t dma_phys_limit = 0; |
130 | 140 | ||
131 | /* Register the kernel text, kernel data and initrd with memblock */ | 141 | /* |
142 | * Register the kernel text, kernel data, initrd, and initial | ||
143 | * pagetables with memblock. | ||
144 | */ | ||
132 | memblock_reserve(__pa(_text), _end - _text); | 145 | memblock_reserve(__pa(_text), _end - _text); |
133 | #ifdef CONFIG_BLK_DEV_INITRD | 146 | #ifdef CONFIG_BLK_DEV_INITRD |
134 | if (initrd_start) | 147 | if (initrd_start) |
135 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); | 148 | memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); |
136 | #endif | 149 | #endif |
137 | 150 | ||
138 | /* | ||
139 | * Reserve the page tables. These are already in use, | ||
140 | * and can only be in node 0. | ||
141 | */ | ||
142 | memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE); | ||
143 | memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE); | ||
144 | |||
145 | early_init_fdt_scan_reserved_mem(); | 151 | early_init_fdt_scan_reserved_mem(); |
146 | 152 | ||
147 | /* 4GB maximum for 32-bit only capable devices */ | 153 | /* 4GB maximum for 32-bit only capable devices */ |
148 | if (IS_ENABLED(CONFIG_ZONE_DMA)) | 154 | if (IS_ENABLED(CONFIG_ZONE_DMA)) |
149 | dma_phys_limit = dma_to_phys(NULL, DMA_BIT_MASK(32)) + 1; | 155 | dma_phys_limit = max_zone_dma_phys(); |
150 | dma_contiguous_reserve(dma_phys_limit); | 156 | dma_contiguous_reserve(dma_phys_limit); |
151 | 157 | ||
152 | memblock_allow_resize(); | 158 | memblock_allow_resize(); |
@@ -260,26 +266,33 @@ void __init mem_init(void) | |||
260 | 266 | ||
261 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 | 267 | #define MLK(b, t) b, t, ((t) - (b)) >> 10 |
262 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 | 268 | #define MLM(b, t) b, t, ((t) - (b)) >> 20 |
269 | #define MLG(b, t) b, t, ((t) - (b)) >> 30 | ||
263 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) | 270 | #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) |
264 | 271 | ||
265 | pr_notice("Virtual kernel memory layout:\n" | 272 | pr_notice("Virtual kernel memory layout:\n" |
266 | " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n" | 273 | " vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n" |
267 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 274 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
268 | " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n" | 275 | " vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n" |
276 | " 0x%16lx - 0x%16lx (%6ld MB actual)\n" | ||
269 | #endif | 277 | #endif |
278 | " PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n" | ||
279 | " fixed : 0x%16lx - 0x%16lx (%6ld KB)\n" | ||
270 | " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" | 280 | " modules : 0x%16lx - 0x%16lx (%6ld MB)\n" |
271 | " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" | 281 | " memory : 0x%16lx - 0x%16lx (%6ld MB)\n" |
272 | " .init : 0x%p" " - 0x%p" " (%6ld kB)\n" | 282 | " .init : 0x%p" " - 0x%p" " (%6ld KB)\n" |
273 | " .text : 0x%p" " - 0x%p" " (%6ld kB)\n" | 283 | " .text : 0x%p" " - 0x%p" " (%6ld KB)\n" |
274 | " .data : 0x%p" " - 0x%p" " (%6ld kB)\n", | 284 | " .data : 0x%p" " - 0x%p" " (%6ld KB)\n", |
275 | MLM(VMALLOC_START, VMALLOC_END), | 285 | MLG(VMALLOC_START, VMALLOC_END), |
276 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | 286 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
287 | MLG((unsigned long)vmemmap, | ||
288 | (unsigned long)vmemmap + VMEMMAP_SIZE), | ||
277 | MLM((unsigned long)virt_to_page(PAGE_OFFSET), | 289 | MLM((unsigned long)virt_to_page(PAGE_OFFSET), |
278 | (unsigned long)virt_to_page(high_memory)), | 290 | (unsigned long)virt_to_page(high_memory)), |
279 | #endif | 291 | #endif |
292 | MLM((unsigned long)PCI_IOBASE, (unsigned long)PCI_IOBASE + SZ_16M), | ||
293 | MLK(FIXADDR_START, FIXADDR_TOP), | ||
280 | MLM(MODULES_VADDR, MODULES_END), | 294 | MLM(MODULES_VADDR, MODULES_END), |
281 | MLM(PAGE_OFFSET, (unsigned long)high_memory), | 295 | MLM(PAGE_OFFSET, (unsigned long)high_memory), |
282 | |||
283 | MLK_ROUNDUP(__init_begin, __init_end), | 296 | MLK_ROUNDUP(__init_begin, __init_end), |
284 | MLK_ROUNDUP(_text, _etext), | 297 | MLK_ROUNDUP(_text, _etext), |
285 | MLK_ROUNDUP(_sdata, _edata)); | 298 | MLK_ROUNDUP(_sdata, _edata)); |
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index 7ec328392ae0..fa324bd5a5c4 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c | |||
@@ -103,19 +103,28 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) | |||
103 | } | 103 | } |
104 | EXPORT_SYMBOL(ioremap_cache); | 104 | EXPORT_SYMBOL(ioremap_cache); |
105 | 105 | ||
106 | #ifndef CONFIG_ARM64_64K_PAGES | ||
107 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; | 106 | static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; |
107 | #if CONFIG_ARM64_PGTABLE_LEVELS > 2 | ||
108 | static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; | ||
109 | #endif | ||
110 | #if CONFIG_ARM64_PGTABLE_LEVELS > 3 | ||
111 | static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; | ||
108 | #endif | 112 | #endif |
109 | 113 | ||
110 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 114 | static inline pud_t * __init early_ioremap_pud(unsigned long addr) |
111 | { | 115 | { |
112 | pgd_t *pgd; | 116 | pgd_t *pgd; |
113 | pud_t *pud; | ||
114 | 117 | ||
115 | pgd = pgd_offset_k(addr); | 118 | pgd = pgd_offset_k(addr); |
116 | BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); | 119 | BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd)); |
117 | 120 | ||
118 | pud = pud_offset(pgd, addr); | 121 | return pud_offset(pgd, addr); |
122 | } | ||
123 | |||
124 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | ||
125 | { | ||
126 | pud_t *pud = early_ioremap_pud(addr); | ||
127 | |||
119 | BUG_ON(pud_none(*pud) || pud_bad(*pud)); | 128 | BUG_ON(pud_none(*pud) || pud_bad(*pud)); |
120 | 129 | ||
121 | return pmd_offset(pud, addr); | 130 | return pmd_offset(pud, addr); |
@@ -132,13 +141,18 @@ static inline pte_t * __init early_ioremap_pte(unsigned long addr) | |||
132 | 141 | ||
133 | void __init early_ioremap_init(void) | 142 | void __init early_ioremap_init(void) |
134 | { | 143 | { |
144 | pgd_t *pgd; | ||
145 | pud_t *pud; | ||
135 | pmd_t *pmd; | 146 | pmd_t *pmd; |
147 | unsigned long addr = fix_to_virt(FIX_BTMAP_BEGIN); | ||
136 | 148 | ||
137 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); | 149 | pgd = pgd_offset_k(addr); |
138 | #ifndef CONFIG_ARM64_64K_PAGES | 150 | pgd_populate(&init_mm, pgd, bm_pud); |
139 | /* need to populate pmd for 4k pagesize only */ | 151 | pud = pud_offset(pgd, addr); |
152 | pud_populate(&init_mm, pud, bm_pmd); | ||
153 | pmd = pmd_offset(pud, addr); | ||
140 | pmd_populate_kernel(&init_mm, pmd, bm_pte); | 154 | pmd_populate_kernel(&init_mm, pmd, bm_pte); |
141 | #endif | 155 | |
142 | /* | 156 | /* |
143 | * The boot-ioremap range spans multiple pmds, for which | 157 | * The boot-ioremap range spans multiple pmds, for which |
144 | * we are not prepared: | 158 | * we are not prepared: |
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index c43f1dd19489..c55567283cde 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/setup.h> | 32 | #include <asm/setup.h> |
33 | #include <asm/sizes.h> | 33 | #include <asm/sizes.h> |
34 | #include <asm/tlb.h> | 34 | #include <asm/tlb.h> |
35 | #include <asm/memblock.h> | ||
35 | #include <asm/mmu_context.h> | 36 | #include <asm/mmu_context.h> |
36 | 37 | ||
37 | #include "mm.h" | 38 | #include "mm.h" |
@@ -204,9 +205,16 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, | |||
204 | unsigned long end, unsigned long phys, | 205 | unsigned long end, unsigned long phys, |
205 | int map_io) | 206 | int map_io) |
206 | { | 207 | { |
207 | pud_t *pud = pud_offset(pgd, addr); | 208 | pud_t *pud; |
208 | unsigned long next; | 209 | unsigned long next; |
209 | 210 | ||
211 | if (pgd_none(*pgd)) { | ||
212 | pud = early_alloc(PTRS_PER_PUD * sizeof(pud_t)); | ||
213 | pgd_populate(&init_mm, pgd, pud); | ||
214 | } | ||
215 | BUG_ON(pgd_bad(*pgd)); | ||
216 | |||
217 | pud = pud_offset(pgd, addr); | ||
210 | do { | 218 | do { |
211 | next = pud_addr_end(addr, end); | 219 | next = pud_addr_end(addr, end); |
212 | 220 | ||
@@ -290,10 +298,10 @@ static void __init map_mem(void) | |||
290 | * memory addressable from the initial direct kernel mapping. | 298 | * memory addressable from the initial direct kernel mapping. |
291 | * | 299 | * |
292 | * The initial direct kernel mapping, located at swapper_pg_dir, | 300 | * The initial direct kernel mapping, located at swapper_pg_dir, |
293 | * gives us PGDIR_SIZE memory starting from PHYS_OFFSET (which must be | 301 | * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be |
294 | * aligned to 2MB as per Documentation/arm64/booting.txt). | 302 | * aligned to 2MB as per Documentation/arm64/booting.txt). |
295 | */ | 303 | */ |
296 | limit = PHYS_OFFSET + PGDIR_SIZE; | 304 | limit = PHYS_OFFSET + PUD_SIZE; |
297 | memblock_set_current_limit(limit); | 305 | memblock_set_current_limit(limit); |
298 | 306 | ||
299 | /* map all the memory banks */ | 307 | /* map all the memory banks */ |
diff --git a/arch/avr32/include/asm/processor.h b/arch/avr32/include/asm/processor.h index 972adcc1e8f4..941593c7d9f3 100644 --- a/arch/avr32/include/asm/processor.h +++ b/arch/avr32/include/asm/processor.h | |||
@@ -92,6 +92,7 @@ extern struct avr32_cpuinfo boot_cpu_data; | |||
92 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) | 92 | #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) |
93 | 93 | ||
94 | #define cpu_relax() barrier() | 94 | #define cpu_relax() barrier() |
95 | #define cpu_relax_lowlatency() cpu_relax() | ||
95 | #define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory") | 96 | #define cpu_sync_pipeline() asm volatile("sub pc, -2" : : : "memory") |
96 | 97 | ||
97 | struct cpu_context { | 98 | struct cpu_context { |
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f81e7b989fff..ed30699cc635 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -18,7 +18,6 @@ config BLACKFIN | |||
18 | select HAVE_FTRACE_MCOUNT_RECORD | 18 | select HAVE_FTRACE_MCOUNT_RECORD |
19 | select HAVE_FUNCTION_GRAPH_TRACER | 19 | select HAVE_FUNCTION_GRAPH_TRACER |
20 | select HAVE_FUNCTION_TRACER | 20 | select HAVE_FUNCTION_TRACER |
21 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
22 | select HAVE_IDE | 21 | select HAVE_IDE |
23 | select HAVE_KERNEL_GZIP if RAMKERNEL | 22 | select HAVE_KERNEL_GZIP if RAMKERNEL |
24 | select HAVE_KERNEL_BZIP2 if RAMKERNEL | 23 | select HAVE_KERNEL_BZIP2 if RAMKERNEL |
diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig index a7e9bfd84183..fcec5ce71392 100644 --- a/arch/blackfin/configs/BF609-EZKIT_defconfig +++ b/arch/blackfin/configs/BF609-EZKIT_defconfig | |||
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y | |||
102 | CONFIG_I2C_BLACKFIN_TWI=y | 102 | CONFIG_I2C_BLACKFIN_TWI=y |
103 | CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 | 103 | CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100 |
104 | CONFIG_SPI=y | 104 | CONFIG_SPI=y |
105 | CONFIG_SPI_BFIN_V3=y | 105 | CONFIG_SPI_ADI_V3=y |
106 | CONFIG_GPIOLIB=y | 106 | CONFIG_GPIOLIB=y |
107 | CONFIG_GPIO_SYSFS=y | 107 | CONFIG_GPIO_SYSFS=y |
108 | # CONFIG_HWMON is not set | 108 | # CONFIG_HWMON is not set |
diff --git a/arch/blackfin/include/asm/processor.h b/arch/blackfin/include/asm/processor.h index d0e72e9475a6..7acd46653df3 100644 --- a/arch/blackfin/include/asm/processor.h +++ b/arch/blackfin/include/asm/processor.h | |||
@@ -99,7 +99,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
99 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) | 99 | #define KSTK_ESP(tsk) ((tsk) == current ? rdusp() : (tsk)->thread.usp) |
100 | 100 | ||
101 | #define cpu_relax() smp_mb() | 101 | #define cpu_relax() smp_mb() |
102 | 102 | #define cpu_relax_lowlatency() cpu_relax() | |
103 | 103 | ||
104 | /* Get the Silicon Revision of the chip */ | 104 | /* Get the Silicon Revision of the chip */ |
105 | static inline uint32_t __pure bfin_revid(void) | 105 | static inline uint32_t __pure bfin_revid(void) |
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 7eed00bbd26d..28d059540424 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S | |||
@@ -33,15 +33,6 @@ ENDPROC(__mcount) | |||
33 | * function will be waiting there. mmmm pie. | 33 | * function will be waiting there. mmmm pie. |
34 | */ | 34 | */ |
35 | ENTRY(_ftrace_caller) | 35 | ENTRY(_ftrace_caller) |
36 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
37 | /* optional micro optimization: return if stopped */ | ||
38 | p1.l = _function_trace_stop; | ||
39 | p1.h = _function_trace_stop; | ||
40 | r3 = [p1]; | ||
41 | cc = r3 == 0; | ||
42 | if ! cc jump _ftrace_stub (bp); | ||
43 | # endif | ||
44 | |||
45 | /* save first/second/third function arg and the return register */ | 36 | /* save first/second/third function arg and the return register */ |
46 | [--sp] = r2; | 37 | [--sp] = r2; |
47 | [--sp] = r0; | 38 | [--sp] = r0; |
@@ -83,15 +74,6 @@ ENDPROC(_ftrace_caller) | |||
83 | 74 | ||
84 | /* See documentation for _ftrace_caller */ | 75 | /* See documentation for _ftrace_caller */ |
85 | ENTRY(__mcount) | 76 | ENTRY(__mcount) |
86 | # ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
87 | /* optional micro optimization: return if stopped */ | ||
88 | p1.l = _function_trace_stop; | ||
89 | p1.h = _function_trace_stop; | ||
90 | r3 = [p1]; | ||
91 | cc = r3 == 0; | ||
92 | if ! cc jump _ftrace_stub (bp); | ||
93 | # endif | ||
94 | |||
95 | /* save third function arg early so we can do testing below */ | 77 | /* save third function arg early so we can do testing below */ |
96 | [--sp] = r2; | 78 | [--sp] = r2; |
97 | 79 | ||
diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c index 974e55496db3..ea2032013cc2 100644 --- a/arch/blackfin/kernel/perf_event.c +++ b/arch/blackfin/kernel/perf_event.c | |||
@@ -389,14 +389,6 @@ static int bfin_pmu_event_init(struct perf_event *event) | |||
389 | if (attr->exclude_hv || attr->exclude_idle) | 389 | if (attr->exclude_hv || attr->exclude_idle) |
390 | return -EPERM; | 390 | return -EPERM; |
391 | 391 | ||
392 | /* | ||
393 | * All of the on-chip counters are "limited", in that they have | ||
394 | * no interrupts, and are therefore unable to do sampling without | ||
395 | * further work and timer assistance. | ||
396 | */ | ||
397 | if (hwc->sample_period) | ||
398 | return -EINVAL; | ||
399 | |||
400 | ret = 0; | 392 | ret = 0; |
401 | switch (attr->type) { | 393 | switch (attr->type) { |
402 | case PERF_TYPE_RAW: | 394 | case PERF_TYPE_RAW: |
@@ -490,6 +482,13 @@ static int __init bfin_pmu_init(void) | |||
490 | { | 482 | { |
491 | int ret; | 483 | int ret; |
492 | 484 | ||
485 | /* | ||
486 | * All of the on-chip counters are "limited", in that they have | ||
487 | * no interrupts, and are therefore unable to do sampling without | ||
488 | * further work and timer assistance. | ||
489 | */ | ||
490 | pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
491 | |||
493 | ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 492 | ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
494 | if (!ret) | 493 | if (!ret) |
495 | perf_cpu_notifier(bfin_pmu_notifier); | 494 | perf_cpu_notifier(bfin_pmu_notifier); |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index ba35864b2b74..c9eec84aa258 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -145,7 +145,7 @@ SECTIONS | |||
145 | 145 | ||
146 | .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) | 146 | .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) |
147 | #else | 147 | #else |
148 | .init.data : AT(__data_lma + __data_len) | 148 | .init.data : AT(__data_lma + __data_len + 32) |
149 | { | 149 | { |
150 | __sinitdata = .; | 150 | __sinitdata = .; |
151 | INIT_DATA | 151 | INIT_DATA |
diff --git a/arch/blackfin/mach-bf533/boards/blackstamp.c b/arch/blackfin/mach-bf533/boards/blackstamp.c index 63b0e4fe760c..0ccf0cf4daaf 100644 --- a/arch/blackfin/mach-bf533/boards/blackstamp.c +++ b/arch/blackfin/mach-bf533/boards/blackstamp.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/spi/spi.h> | 20 | #include <linux/spi/spi.h> |
21 | #include <linux/spi/flash.h> | 21 | #include <linux/spi/flash.h> |
22 | #include <linux/irq.h> | 22 | #include <linux/irq.h> |
23 | #include <linux/gpio.h> | ||
23 | #include <linux/i2c.h> | 24 | #include <linux/i2c.h> |
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
25 | #include <asm/bfin5xx_spi.h> | 26 | #include <asm/bfin5xx_spi.h> |
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537e.c b/arch/blackfin/mach-bf537/boards/cm_bf537e.c index c65c6dbda3da..1e7290ef3525 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537e.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537e.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #endif | 21 | #endif |
22 | #include <linux/ata_platform.h> | 22 | #include <linux/ata_platform.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/gpio.h> | ||
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
25 | #include <asm/bfin5xx_spi.h> | 26 | #include <asm/bfin5xx_spi.h> |
26 | #include <asm/portmux.h> | 27 | #include <asm/portmux.h> |
diff --git a/arch/blackfin/mach-bf537/boards/cm_bf537u.c b/arch/blackfin/mach-bf537/boards/cm_bf537u.c index af58454b4bff..c7495dc74690 100644 --- a/arch/blackfin/mach-bf537/boards/cm_bf537u.c +++ b/arch/blackfin/mach-bf537/boards/cm_bf537u.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #endif | 21 | #endif |
22 | #include <linux/ata_platform.h> | 22 | #include <linux/ata_platform.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/gpio.h> | ||
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
25 | #include <asm/bfin5xx_spi.h> | 26 | #include <asm/bfin5xx_spi.h> |
26 | #include <asm/portmux.h> | 27 | #include <asm/portmux.h> |
diff --git a/arch/blackfin/mach-bf537/boards/tcm_bf537.c b/arch/blackfin/mach-bf537/boards/tcm_bf537.c index a0211225748d..6b988ad653d8 100644 --- a/arch/blackfin/mach-bf537/boards/tcm_bf537.c +++ b/arch/blackfin/mach-bf537/boards/tcm_bf537.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #endif | 21 | #endif |
22 | #include <linux/ata_platform.h> | 22 | #include <linux/ata_platform.h> |
23 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
24 | #include <linux/gpio.h> | ||
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
25 | #include <asm/bfin5xx_spi.h> | 26 | #include <asm/bfin5xx_spi.h> |
26 | #include <asm/portmux.h> | 27 | #include <asm/portmux.h> |
diff --git a/arch/blackfin/mach-bf548/boards/ezkit.c b/arch/blackfin/mach-bf548/boards/ezkit.c index 90138e6112c1..1fe7ff286619 100644 --- a/arch/blackfin/mach-bf548/boards/ezkit.c +++ b/arch/blackfin/mach-bf548/boards/ezkit.c | |||
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { | |||
2118 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), | 2118 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), |
2119 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), | 2119 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), |
2120 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), | 2120 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1", "pinctrl-adi2.0", NULL, "can1"), |
2121 | PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", NULL, "ppi0_24b"), | 2121 | PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"), |
2122 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), | 2122 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), |
2123 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), | 2123 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), |
2124 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), | 2124 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0", "pinctrl-adi2.0", NULL, "sport0"), |
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { | |||
2140 | PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), | 2140 | PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x", "pinctrl-adi2.0", NULL, "atapi_alter"), |
2141 | #endif | 2141 | #endif |
2142 | PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), | 2142 | PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0", "pinctrl-adi2.0", NULL, "nfc0"), |
2143 | PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", NULL, "keys_4x4"), | 2143 | PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys", "pinctrl-adi2.0", "keys_4x4grp", "keys"), |
2144 | PIN_MAP_MUX_GROUP("bf54x-keys", "4bit", "pinctrl-adi2.0", "keys_4x4grp", "keys"), | ||
2145 | PIN_MAP_MUX_GROUP("bf54x-keys", "8bit", "pinctrl-adi2.0", "keys_8x8grp", "keys"), | ||
2144 | }; | 2146 | }; |
2145 | 2147 | ||
2146 | static int __init ezkit_init(void) | 2148 | static int __init ezkit_init(void) |
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c index 430b16d5ccb1..6ab951534d79 100644 --- a/arch/blackfin/mach-bf561/boards/acvilon.c +++ b/arch/blackfin/mach-bf561/boards/acvilon.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/spi/flash.h> | 44 | #include <linux/spi/flash.h> |
45 | #include <linux/irq.h> | 45 | #include <linux/irq.h> |
46 | #include <linux/interrupt.h> | 46 | #include <linux/interrupt.h> |
47 | #include <linux/gpio.h> | ||
47 | #include <linux/jiffies.h> | 48 | #include <linux/jiffies.h> |
48 | #include <linux/i2c-pca-platform.h> | 49 | #include <linux/i2c-pca-platform.h> |
49 | #include <linux/delay.h> | 50 | #include <linux/delay.h> |
diff --git a/arch/blackfin/mach-bf561/boards/cm_bf561.c b/arch/blackfin/mach-bf561/boards/cm_bf561.c index 9f777df4cacc..e862f7823e68 100644 --- a/arch/blackfin/mach-bf561/boards/cm_bf561.c +++ b/arch/blackfin/mach-bf561/boards/cm_bf561.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #endif | 18 | #endif |
19 | #include <linux/ata_platform.h> | 19 | #include <linux/ata_platform.h> |
20 | #include <linux/irq.h> | 20 | #include <linux/irq.h> |
21 | #include <linux/gpio.h> | ||
21 | #include <asm/dma.h> | 22 | #include <asm/dma.h> |
22 | #include <asm/bfin5xx_spi.h> | 23 | #include <asm/bfin5xx_spi.h> |
23 | #include <asm/portmux.h> | 24 | #include <asm/portmux.h> |
diff --git a/arch/blackfin/mach-bf561/boards/ezkit.c b/arch/blackfin/mach-bf561/boards/ezkit.c index 88dee43e7abe..2de71e8c104b 100644 --- a/arch/blackfin/mach-bf561/boards/ezkit.c +++ b/arch/blackfin/mach-bf561/boards/ezkit.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/spi/spi.h> | 14 | #include <linux/spi/spi.h> |
15 | #include <linux/irq.h> | 15 | #include <linux/irq.h> |
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/gpio.h> | ||
17 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
18 | #include <asm/dma.h> | 19 | #include <asm/dma.h> |
19 | #include <asm/bfin5xx_spi.h> | 20 | #include <asm/bfin5xx_spi.h> |
diff --git a/arch/blackfin/mach-bf609/boards/ezkit.c b/arch/blackfin/mach-bf609/boards/ezkit.c index 1ba4600de69f..e2c0b024ce88 100644 --- a/arch/blackfin/mach-bf609/boards/ezkit.c +++ b/arch/blackfin/mach-bf609/boards/ezkit.c | |||
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev) | |||
698 | { | 698 | { |
699 | #define CONFIG_SMC_GCTL_VAL 0x00000010 | 699 | #define CONFIG_SMC_GCTL_VAL 0x00000010 |
700 | 700 | ||
701 | if (!devm_pinctrl_get_select_default(&pdev->dev)) | ||
702 | return -EBUSY; | ||
703 | bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); | 701 | bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL); |
704 | bfin_write32(SMC_B0CTL, 0x01002011); | 702 | bfin_write32(SMC_B0CTL, 0x01002011); |
705 | bfin_write32(SMC_B0TIM, 0x08170977); | 703 | bfin_write32(SMC_B0TIM, 0x08170977); |
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev) | |||
709 | 707 | ||
710 | void bf609_nor_flash_exit(struct platform_device *pdev) | 708 | void bf609_nor_flash_exit(struct platform_device *pdev) |
711 | { | 709 | { |
712 | devm_pinctrl_put(pdev->dev.pins->p); | ||
713 | bfin_write32(SMC_GCTL, 0); | 710 | bfin_write32(SMC_GCTL, 0); |
714 | } | 711 | } |
715 | 712 | ||
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = { | |||
2058 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), | 2055 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary", "pinctrl-adi2.0", NULL, "rotary"), |
2059 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), | 2056 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0", "pinctrl-adi2.0", NULL, "can0"), |
2060 | PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), | 2057 | PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0", "pinctrl-adi2.0", NULL, "smc0"), |
2061 | PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", NULL, "ppi2_16b"), | 2058 | PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), |
2062 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), | 2059 | PIN_MAP_MUX_GROUP("bfin_display.0", "8bit", "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"), |
2063 | #if IS_ENABLED(CONFIG_VIDEO_MT9M114) | 2060 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), |
2064 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_8b"), | 2061 | PIN_MAP_MUX_GROUP("bfin_display.0", "16bit", "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"), |
2065 | #elif IS_ENABLED(CONFIG_VIDEO_VS6624) | 2062 | PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit", "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"), |
2066 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_16b"), | 2063 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"), |
2067 | #else | 2064 | PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit", "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"), |
2068 | PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0", "pinctrl-adi2.0", NULL, "ppi0_24b"), | 2065 | PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit", "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"), |
2069 | #endif | ||
2070 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), | 2066 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0", "pinctrl-adi2.0", NULL, "sport0"), |
2071 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), | 2067 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0", "pinctrl-adi2.0", NULL, "sport0"), |
2072 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), | 2068 | PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1", "pinctrl-adi2.0", NULL, "sport1"), |
diff --git a/arch/blackfin/mach-bf609/include/mach/pm.h b/arch/blackfin/mach-bf609/include/mach/pm.h index 3ca0fb965636..a1efd936dd30 100644 --- a/arch/blackfin/mach-bf609/include/mach/pm.h +++ b/arch/blackfin/mach-bf609/include/mach/pm.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define __MACH_BF609_PM_H__ | 10 | #define __MACH_BF609_PM_H__ |
11 | 11 | ||
12 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
13 | #include <linux/platform_device.h> | ||
13 | 14 | ||
14 | extern int bfin609_pm_enter(suspend_state_t state); | 15 | extern int bfin609_pm_enter(suspend_state_t state); |
15 | extern int bf609_pm_prepare(void); | 16 | extern int bf609_pm_prepare(void); |
@@ -19,6 +20,6 @@ void bf609_hibernate(void); | |||
19 | void bfin_sec_raise_irq(unsigned int sid); | 20 | void bfin_sec_raise_irq(unsigned int sid); |
20 | void coreb_enable(void); | 21 | void coreb_enable(void); |
21 | 22 | ||
22 | int bf609_nor_flash_init(void); | 23 | int bf609_nor_flash_init(struct platform_device *pdev); |
23 | void bf609_nor_flash_exit(void); | 24 | void bf609_nor_flash_exit(struct platform_device *pdev); |
24 | #endif | 25 | #endif |
diff --git a/arch/blackfin/mach-bf609/pm.c b/arch/blackfin/mach-bf609/pm.c index 0cdd6955c7be..b1bfcf434d16 100644 --- a/arch/blackfin/mach-bf609/pm.c +++ b/arch/blackfin/mach-bf609/pm.c | |||
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = { | |||
291 | #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) | 291 | #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) |
292 | static int smc_pm_syscore_suspend(void) | 292 | static int smc_pm_syscore_suspend(void) |
293 | { | 293 | { |
294 | bf609_nor_flash_exit(); | 294 | bf609_nor_flash_exit(NULL); |
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | static void smc_pm_syscore_resume(void) | 298 | static void smc_pm_syscore_resume(void) |
299 | { | 299 | { |
300 | bf609_nor_flash_init(); | 300 | bf609_nor_flash_init(NULL); |
301 | } | 301 | } |
302 | 302 | ||
303 | static struct syscore_ops smc_pm_syscore_ops = { | 303 | static struct syscore_ops smc_pm_syscore_ops = { |
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 867b7cef204c..1f94784eab6d 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void) | |||
1208 | 1208 | ||
1209 | bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); | 1209 | bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); |
1210 | 1210 | ||
1211 | bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority); | ||
1212 | |||
1213 | /* Enable interrupts IVG7-15 */ | 1211 | /* Enable interrupts IVG7-15 */ |
1214 | bfin_irq_flags |= IMASK_IVG15 | | 1212 | bfin_irq_flags |= IMASK_IVG15 | |
1215 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | | 1213 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
diff --git a/arch/c6x/include/asm/processor.h b/arch/c6x/include/asm/processor.h index b9eb3da7f278..f2ef31be2f8b 100644 --- a/arch/c6x/include/asm/processor.h +++ b/arch/c6x/include/asm/processor.h | |||
@@ -121,6 +121,7 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
121 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) | 121 | #define KSTK_ESP(task) (task_pt_regs(task)->sp) |
122 | 122 | ||
123 | #define cpu_relax() do { } while (0) | 123 | #define cpu_relax() do { } while (0) |
124 | #define cpu_relax_lowlatency() cpu_relax() | ||
124 | 125 | ||
125 | extern const struct seq_operations cpuinfo_op; | 126 | extern const struct seq_operations cpuinfo_op; |
126 | 127 | ||
diff --git a/arch/cris/include/asm/processor.h b/arch/cris/include/asm/processor.h index 15b815df29c1..862126b58116 100644 --- a/arch/cris/include/asm/processor.h +++ b/arch/cris/include/asm/processor.h | |||
@@ -63,6 +63,7 @@ static inline void release_thread(struct task_struct *dead_task) | |||
63 | #define init_stack (init_thread_union.stack) | 63 | #define init_stack (init_thread_union.stack) |
64 | 64 | ||
65 | #define cpu_relax() barrier() | 65 | #define cpu_relax() barrier() |
66 | #define cpu_relax_lowlatency() cpu_relax() | ||
66 | 67 | ||
67 | void default_idle(void); | 68 | void default_idle(void); |
68 | 69 | ||
diff --git a/arch/hexagon/include/asm/processor.h b/arch/hexagon/include/asm/processor.h index 45a825402f63..d8501137c8d0 100644 --- a/arch/hexagon/include/asm/processor.h +++ b/arch/hexagon/include/asm/processor.h | |||
@@ -56,6 +56,7 @@ struct thread_struct { | |||
56 | } | 56 | } |
57 | 57 | ||
58 | #define cpu_relax() __vmyield() | 58 | #define cpu_relax() __vmyield() |
59 | #define cpu_relax_lowlatency() cpu_relax() | ||
59 | 60 | ||
60 | /* | 61 | /* |
61 | * Decides where the kernel will search for a free chunk of vm space during | 62 | * Decides where the kernel will search for a free chunk of vm space during |
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h index 0d2bcb37ec35..bee0acd52f7e 100644 --- a/arch/ia64/include/asm/io.h +++ b/arch/ia64/include/asm/io.h | |||
@@ -426,6 +426,7 @@ extern void iounmap (volatile void __iomem *addr); | |||
426 | extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); | 426 | extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size); |
427 | #define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) | 427 | #define early_memremap(phys_addr, size) early_ioremap(phys_addr, size) |
428 | extern void early_iounmap (volatile void __iomem *addr, unsigned long size); | 428 | extern void early_iounmap (volatile void __iomem *addr, unsigned long size); |
429 | #define early_memunmap(addr, size) early_iounmap(addr, size) | ||
429 | static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) | 430 | static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size) |
430 | { | 431 | { |
431 | return ioremap(phys_addr, size); | 432 | return ioremap(phys_addr, size); |
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h index efd1b927ccb7..c7367130ab14 100644 --- a/arch/ia64/include/asm/processor.h +++ b/arch/ia64/include/asm/processor.h | |||
@@ -548,6 +548,7 @@ ia64_eoi (void) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | #define cpu_relax() ia64_hint(ia64_hint_pause) | 550 | #define cpu_relax() ia64_hint(ia64_hint_pause) |
551 | #define cpu_relax_lowlatency() cpu_relax() | ||
551 | 552 | ||
552 | static inline int | 553 | static inline int |
553 | ia64_get_irr(unsigned int vector) | 554 | ia64_get_irr(unsigned int vector) |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 55d4ba47a907..deed6fa96bb0 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -662,7 +662,7 @@ void | |||
662 | machine_restart (char *restart_cmd) | 662 | machine_restart (char *restart_cmd) |
663 | { | 663 | { |
664 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); | 664 | (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0); |
665 | (*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL); | 665 | efi_reboot(REBOOT_WARM, NULL); |
666 | } | 666 | } |
667 | 667 | ||
668 | void | 668 | void |
diff --git a/arch/ia64/pci/fixup.c b/arch/ia64/pci/fixup.c index 1fe9aa5068ea..ec73b2cf912a 100644 --- a/arch/ia64/pci/fixup.c +++ b/arch/ia64/pci/fixup.c | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/init.h> | 7 | #include <linux/init.h> |
8 | #include <linux/vgaarb.h> | 8 | #include <linux/vgaarb.h> |
9 | #include <linux/screen_info.h> | ||
9 | 10 | ||
10 | #include <asm/machvec.h> | 11 | #include <asm/machvec.h> |
11 | 12 | ||
@@ -37,6 +38,27 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
37 | return; | 38 | return; |
38 | /* Maybe, this machine supports legacy memory map. */ | 39 | /* Maybe, this machine supports legacy memory map. */ |
39 | 40 | ||
41 | if (!vga_default_device()) { | ||
42 | resource_size_t start, end; | ||
43 | int i; | ||
44 | |||
45 | /* Does firmware framebuffer belong to us? */ | ||
46 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
47 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | ||
48 | continue; | ||
49 | |||
50 | start = pci_resource_start(pdev, i); | ||
51 | end = pci_resource_end(pdev, i); | ||
52 | |||
53 | if (!start || !end) | ||
54 | continue; | ||
55 | |||
56 | if (screen_info.lfb_base >= start && | ||
57 | (screen_info.lfb_base + screen_info.lfb_size) < end) | ||
58 | vga_set_default_device(pdev); | ||
59 | } | ||
60 | } | ||
61 | |||
40 | /* Is VGA routed to us? */ | 62 | /* Is VGA routed to us? */ |
41 | bus = pdev->bus; | 63 | bus = pdev->bus; |
42 | while (bus) { | 64 | while (bus) { |
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c index cad775a1a157..b2eb48490754 100644 --- a/arch/ia64/sn/kernel/bte.c +++ b/arch/ia64/sn/kernel/bte.c | |||
@@ -114,7 +114,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |||
114 | if (mode & BTE_USE_ANY) { | 114 | if (mode & BTE_USE_ANY) { |
115 | nasid_to_try[1] = my_nasid; | 115 | nasid_to_try[1] = my_nasid; |
116 | } else { | 116 | } else { |
117 | nasid_to_try[1] = (int)NULL; | 117 | nasid_to_try[1] = 0; |
118 | } | 118 | } |
119 | } else { | 119 | } else { |
120 | /* try local then remote */ | 120 | /* try local then remote */ |
@@ -122,7 +122,7 @@ bte_result_t bte_copy(u64 src, u64 dest, u64 len, u64 mode, void *notification) | |||
122 | if (mode & BTE_USE_ANY) { | 122 | if (mode & BTE_USE_ANY) { |
123 | nasid_to_try[1] = NASID_GET(dest); | 123 | nasid_to_try[1] = NASID_GET(dest); |
124 | } else { | 124 | } else { |
125 | nasid_to_try[1] = (int)NULL; | 125 | nasid_to_try[1] = 0; |
126 | } | 126 | } |
127 | } | 127 | } |
128 | 128 | ||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 53b01b8e2f19..36182c84363c 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -579,7 +579,7 @@ void sn_cpu_init(void) | |||
579 | (sn_prom_type == 1) ? "real" : "fake"); | 579 | (sn_prom_type == 1) ? "real" : "fake"); |
580 | } | 580 | } |
581 | 581 | ||
582 | memset(pda, 0, sizeof(pda)); | 582 | memset(pda, 0, sizeof(*pda)); |
583 | if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, | 583 | if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, |
584 | &sn_hub_info->nasid_bitmask, | 584 | &sn_hub_info->nasid_bitmask, |
585 | &sn_hub_info->nasid_shift, | 585 | &sn_hub_info->nasid_shift, |
diff --git a/arch/m32r/include/asm/processor.h b/arch/m32r/include/asm/processor.h index 5767367550c6..9f8fd9bef70f 100644 --- a/arch/m32r/include/asm/processor.h +++ b/arch/m32r/include/asm/processor.h | |||
@@ -133,5 +133,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
133 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) | 133 | #define KSTK_ESP(tsk) ((tsk)->thread.sp) |
134 | 134 | ||
135 | #define cpu_relax() barrier() | 135 | #define cpu_relax() barrier() |
136 | #define cpu_relax_lowlatency() cpu_relax() | ||
136 | 137 | ||
137 | #endif /* _ASM_M32R_PROCESSOR_H */ | 138 | #endif /* _ASM_M32R_PROCESSOR_H */ |
diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index b0768a657920..20dda1d4b860 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h | |||
@@ -176,5 +176,6 @@ unsigned long get_wchan(struct task_struct *p); | |||
176 | #define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0)) | 176 | #define task_pt_regs(tsk) ((struct pt_regs *) ((tsk)->thread.esp0)) |
177 | 177 | ||
178 | #define cpu_relax() barrier() | 178 | #define cpu_relax() barrier() |
179 | #define cpu_relax_lowlatency() cpu_relax() | ||
179 | 180 | ||
180 | #endif | 181 | #endif |
diff --git a/arch/m68k/include/asm/sun3_pgalloc.h b/arch/m68k/include/asm/sun3_pgalloc.h index f868506e3350..0931388de47f 100644 --- a/arch/m68k/include/asm/sun3_pgalloc.h +++ b/arch/m68k/include/asm/sun3_pgalloc.h | |||
@@ -12,10 +12,6 @@ | |||
12 | 12 | ||
13 | #include <asm/tlb.h> | 13 | #include <asm/tlb.h> |
14 | 14 | ||
15 | /* FIXME - when we get this compiling */ | ||
16 | /* erm, now that it's compiling, what do we do with it? */ | ||
17 | #define _KERNPG_TABLE 0 | ||
18 | |||
19 | extern const char bad_pmd_string[]; | 15 | extern const char bad_pmd_string[]; |
20 | 16 | ||
21 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) | 17 | #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) |
diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 499b7610eaaf..0b389a81c43a 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig | |||
@@ -13,7 +13,6 @@ config METAG | |||
13 | select HAVE_DYNAMIC_FTRACE | 13 | select HAVE_DYNAMIC_FTRACE |
14 | select HAVE_FTRACE_MCOUNT_RECORD | 14 | select HAVE_FTRACE_MCOUNT_RECORD |
15 | select HAVE_FUNCTION_TRACER | 15 | select HAVE_FUNCTION_TRACER |
16 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
17 | select HAVE_KERNEL_BZIP2 | 16 | select HAVE_KERNEL_BZIP2 |
18 | select HAVE_KERNEL_GZIP | 17 | select HAVE_KERNEL_GZIP |
19 | select HAVE_KERNEL_LZO | 18 | select HAVE_KERNEL_LZO |
diff --git a/arch/metag/include/asm/processor.h b/arch/metag/include/asm/processor.h index a8a37477c66e..881071c07942 100644 --- a/arch/metag/include/asm/processor.h +++ b/arch/metag/include/asm/processor.h | |||
@@ -155,6 +155,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
155 | #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) | 155 | #define user_stack_pointer(regs) ((regs)->ctx.AX[0].U0) |
156 | 156 | ||
157 | #define cpu_relax() barrier() | 157 | #define cpu_relax() barrier() |
158 | #define cpu_relax_lowlatency() cpu_relax() | ||
158 | 159 | ||
159 | extern void setup_priv(void); | 160 | extern void setup_priv(void); |
160 | 161 | ||
diff --git a/arch/metag/kernel/ftrace_stub.S b/arch/metag/kernel/ftrace_stub.S index e70bff745bdd..3acc288217c0 100644 --- a/arch/metag/kernel/ftrace_stub.S +++ b/arch/metag/kernel/ftrace_stub.S | |||
@@ -16,13 +16,6 @@ _mcount_wrapper: | |||
16 | .global _ftrace_caller | 16 | .global _ftrace_caller |
17 | .type _ftrace_caller,function | 17 | .type _ftrace_caller,function |
18 | _ftrace_caller: | 18 | _ftrace_caller: |
19 | MOVT D0Re0,#HI(_function_trace_stop) | ||
20 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
21 | GETD D0Re0,[D0Re0] | ||
22 | CMP D0Re0,#0 | ||
23 | BEQ $Lcall_stub | ||
24 | MOV PC,D0.4 | ||
25 | $Lcall_stub: | ||
26 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | 19 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 |
27 | MOV D1Ar1, D0.4 | 20 | MOV D1Ar1, D0.4 |
28 | MOV D0Ar2, D1RtP | 21 | MOV D0Ar2, D1RtP |
@@ -42,13 +35,6 @@ _ftrace_call: | |||
42 | .global _mcount_wrapper | 35 | .global _mcount_wrapper |
43 | .type _mcount_wrapper,function | 36 | .type _mcount_wrapper,function |
44 | _mcount_wrapper: | 37 | _mcount_wrapper: |
45 | MOVT D0Re0,#HI(_function_trace_stop) | ||
46 | ADD D0Re0,D0Re0,#LO(_function_trace_stop) | ||
47 | GETD D0Re0,[D0Re0] | ||
48 | CMP D0Re0,#0 | ||
49 | BEQ $Lcall_mcount | ||
50 | MOV PC,D0.4 | ||
51 | $Lcall_mcount: | ||
52 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 | 38 | MSETL [A0StP], D0Ar6, D0Ar4, D0Ar2, D0.4 |
53 | MOV D1Ar1, D0.4 | 39 | MOV D1Ar1, D0.4 |
54 | MOV D0Ar2, D1RtP | 40 | MOV D0Ar2, D1RtP |
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index 5cc4d4dcf3cf..02c08737f6aa 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c | |||
@@ -568,16 +568,6 @@ static int _hw_perf_event_init(struct perf_event *event) | |||
568 | return -EINVAL; | 568 | return -EINVAL; |
569 | 569 | ||
570 | /* | 570 | /* |
571 | * Early cores have "limited" counters - they have no overflow | ||
572 | * interrupts - and so are unable to do sampling without extra work | ||
573 | * and timer assistance. | ||
574 | */ | ||
575 | if (metag_pmu->max_period == 0) { | ||
576 | if (hwc->sample_period) | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
580 | /* | ||
581 | * Don't assign an index until the event is placed into the hardware. | 571 | * Don't assign an index until the event is placed into the hardware. |
582 | * -1 signifies that we're still deciding where to put it. On SMP | 572 | * -1 signifies that we're still deciding where to put it. On SMP |
583 | * systems each core has its own set of counters, so we can't do any | 573 | * systems each core has its own set of counters, so we can't do any |
@@ -866,6 +856,15 @@ static int __init init_hw_perf_events(void) | |||
866 | pr_info("enabled with %s PMU driver, %d counters available\n", | 856 | pr_info("enabled with %s PMU driver, %d counters available\n", |
867 | metag_pmu->name, metag_pmu->max_events); | 857 | metag_pmu->name, metag_pmu->max_events); |
868 | 858 | ||
859 | /* | ||
860 | * Early cores have "limited" counters - they have no overflow | ||
861 | * interrupts - and so are unable to do sampling without extra work | ||
862 | * and timer assistance. | ||
863 | */ | ||
864 | if (metag_pmu->max_period == 0) { | ||
865 | metag_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
866 | } | ||
867 | |||
869 | /* Initialise the active events and reservation mutex */ | 868 | /* Initialise the active events and reservation mutex */ |
870 | atomic_set(&metag_pmu->active_events, 0); | 869 | atomic_set(&metag_pmu->active_events, 0); |
871 | mutex_init(&metag_pmu->reserve_mutex); | 870 | mutex_init(&metag_pmu->reserve_mutex); |
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 9ae08541e30d..40e1c1dd0e24 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -22,7 +22,6 @@ config MICROBLAZE | |||
22 | select HAVE_DYNAMIC_FTRACE | 22 | select HAVE_DYNAMIC_FTRACE |
23 | select HAVE_FTRACE_MCOUNT_RECORD | 23 | select HAVE_FTRACE_MCOUNT_RECORD |
24 | select HAVE_FUNCTION_GRAPH_TRACER | 24 | select HAVE_FUNCTION_GRAPH_TRACER |
25 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
26 | select HAVE_FUNCTION_TRACER | 25 | select HAVE_FUNCTION_TRACER |
27 | select HAVE_MEMBLOCK | 26 | select HAVE_MEMBLOCK |
28 | select HAVE_MEMBLOCK_NODE_MAP | 27 | select HAVE_MEMBLOCK_NODE_MAP |
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h index 9d31b057c355..497a988d79c2 100644 --- a/arch/microblaze/include/asm/processor.h +++ b/arch/microblaze/include/asm/processor.h | |||
@@ -22,6 +22,7 @@ | |||
22 | extern const struct seq_operations cpuinfo_op; | 22 | extern const struct seq_operations cpuinfo_op; |
23 | 23 | ||
24 | # define cpu_relax() barrier() | 24 | # define cpu_relax() barrier() |
25 | # define cpu_relax_lowlatency() cpu_relax() | ||
25 | 26 | ||
26 | #define task_pt_regs(tsk) \ | 27 | #define task_pt_regs(tsk) \ |
27 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) | 28 | (((struct pt_regs *)(THREAD_SIZE + task_stack_page(tsk))) - 1) |
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c index bbcd2533766c..fc7b48a52cd5 100644 --- a/arch/microblaze/kernel/ftrace.c +++ b/arch/microblaze/kernel/ftrace.c | |||
@@ -27,6 +27,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
27 | unsigned long return_hooker = (unsigned long) | 27 | unsigned long return_hooker = (unsigned long) |
28 | &return_to_handler; | 28 | &return_to_handler; |
29 | 29 | ||
30 | if (unlikely(ftrace_graph_is_dead())) | ||
31 | return; | ||
32 | |||
30 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 33 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
31 | return; | 34 | return; |
32 | 35 | ||
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S index fc1e1322ce4c..fed9da5de8c4 100644 --- a/arch/microblaze/kernel/mcount.S +++ b/arch/microblaze/kernel/mcount.S | |||
@@ -91,11 +91,6 @@ ENTRY(ftrace_caller) | |||
91 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 91 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
92 | SAVE_REGS | 92 | SAVE_REGS |
93 | swi r15, r1, 0; | 93 | swi r15, r1, 0; |
94 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST begin of checking */ | ||
95 | lwi r5, r0, function_trace_stop; | ||
96 | bneid r5, end; | ||
97 | nop; | ||
98 | /* MS: HAVE_FUNCTION_TRACE_MCOUNT_TEST end of checking */ | ||
99 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 94 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
100 | #ifndef CONFIG_DYNAMIC_FTRACE | 95 | #ifndef CONFIG_DYNAMIC_FTRACE |
101 | lwi r5, r0, ftrace_graph_return; | 96 | lwi r5, r0, ftrace_graph_return; |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 4e238e6e661c..10f270bd3e25 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -15,7 +15,6 @@ config MIPS | |||
15 | select HAVE_BPF_JIT if !CPU_MICROMIPS | 15 | select HAVE_BPF_JIT if !CPU_MICROMIPS |
16 | select ARCH_HAVE_CUSTOM_GPIO_H | 16 | select ARCH_HAVE_CUSTOM_GPIO_H |
17 | select HAVE_FUNCTION_TRACER | 17 | select HAVE_FUNCTION_TRACER |
18 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
19 | select HAVE_DYNAMIC_FTRACE | 18 | select HAVE_DYNAMIC_FTRACE |
20 | select HAVE_FTRACE_MCOUNT_RECORD | 19 | select HAVE_FTRACE_MCOUNT_RECORD |
21 | select HAVE_C_RECORDMCOUNT | 20 | select HAVE_C_RECORDMCOUNT |
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b0aa95565752..7a3fc67bd7f9 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h | |||
@@ -359,13 +359,17 @@ enum emulation_result { | |||
359 | #define MIPS3_PG_FRAME 0x3fffffc0 | 359 | #define MIPS3_PG_FRAME 0x3fffffc0 |
360 | 360 | ||
361 | #define VPN2_MASK 0xffffe000 | 361 | #define VPN2_MASK 0xffffe000 |
362 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ | 362 | #define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \ |
363 | ((x).tlb_lo1 & MIPS3_PG_G)) | 363 | ((x).tlb_lo1 & MIPS3_PG_G)) |
364 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) | 364 | #define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK) |
365 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) | 365 | #define TLB_ASID(x) ((x).tlb_hi & ASID_MASK) |
366 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ | 366 | #define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \ |
367 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ | 367 | ? ((x).tlb_lo1 & MIPS3_PG_V) \ |
368 | : ((x).tlb_lo0 & MIPS3_PG_V)) | 368 | : ((x).tlb_lo0 & MIPS3_PG_V)) |
369 | #define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \ | ||
370 | ((y) & VPN2_MASK & ~(x).tlb_mask)) | ||
371 | #define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \ | ||
372 | TLB_ASID(x) == ((y) & ASID_MASK)) | ||
369 | 373 | ||
370 | struct kvm_mips_tlb { | 374 | struct kvm_mips_tlb { |
371 | long tlb_mask; | 375 | long tlb_mask; |
@@ -760,7 +764,7 @@ extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, | |||
760 | struct kvm_vcpu *vcpu); | 764 | struct kvm_vcpu *vcpu); |
761 | 765 | ||
762 | /* Misc */ | 766 | /* Misc */ |
763 | extern int kvm_mips_dump_stats(struct kvm_vcpu *vcpu); | 767 | extern void kvm_mips_dump_stats(struct kvm_vcpu *vcpu); |
764 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); | 768 | extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm); |
765 | 769 | ||
766 | 770 | ||
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h index ad70cba8daff..d5098bc554f4 100644 --- a/arch/mips/include/asm/processor.h +++ b/arch/mips/include/asm/processor.h | |||
@@ -367,6 +367,7 @@ unsigned long get_wchan(struct task_struct *p); | |||
367 | #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) | 367 | #define KSTK_STATUS(tsk) (task_pt_regs(tsk)->cp0_status) |
368 | 368 | ||
369 | #define cpu_relax() barrier() | 369 | #define cpu_relax() barrier() |
370 | #define cpu_relax_lowlatency() cpu_relax() | ||
370 | 371 | ||
371 | /* | 372 | /* |
372 | * Return_address is a replacement for __builtin_return_address(count) | 373 | * Return_address is a replacement for __builtin_return_address(count) |
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 0b8bd28a0df1..4520adc8699b 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h | |||
@@ -19,6 +19,9 @@ | |||
19 | #include <asm/mipsmtregs.h> | 19 | #include <asm/mipsmtregs.h> |
20 | #include <asm/uaccess.h> /* for segment_eq() */ | 20 | #include <asm/uaccess.h> /* for segment_eq() */ |
21 | 21 | ||
22 | extern void (*r4k_blast_dcache)(void); | ||
23 | extern void (*r4k_blast_icache)(void); | ||
24 | |||
22 | /* | 25 | /* |
23 | * This macro return a properly sign-extended address suitable as base address | 26 | * This macro return a properly sign-extended address suitable as base address |
24 | * for indexed cache operations. Two issues here: | 27 | * for indexed cache operations. Two issues here: |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 60e7e5e45af1..8b6538750fe1 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -302,6 +302,9 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | |||
302 | &return_to_handler; | 302 | &return_to_handler; |
303 | int faulted, insns; | 303 | int faulted, insns; |
304 | 304 | ||
305 | if (unlikely(ftrace_graph_is_dead())) | ||
306 | return; | ||
307 | |||
305 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 308 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
306 | return; | 309 | return; |
307 | 310 | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S index 539b6294b613..00940d1d5c4f 100644 --- a/arch/mips/kernel/mcount.S +++ b/arch/mips/kernel/mcount.S | |||
@@ -74,10 +74,6 @@ _mcount: | |||
74 | #endif | 74 | #endif |
75 | 75 | ||
76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | 76 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ |
77 | lw t1, function_trace_stop | ||
78 | bnez t1, ftrace_stub | ||
79 | nop | ||
80 | |||
81 | MCOUNT_SAVE_REGS | 77 | MCOUNT_SAVE_REGS |
82 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | 78 | #ifdef KBUILD_MCOUNT_RA_ADDRESS |
83 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) | 79 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) |
@@ -105,9 +101,6 @@ ftrace_stub: | |||
105 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 101 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
106 | 102 | ||
107 | NESTED(_mcount, PT_SIZE, ra) | 103 | NESTED(_mcount, PT_SIZE, ra) |
108 | lw t1, function_trace_stop | ||
109 | bnez t1, ftrace_stub | ||
110 | nop | ||
111 | PTR_LA t1, ftrace_stub | 104 | PTR_LA t1, ftrace_stub |
112 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ | 105 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ |
113 | bne t1, t2, static_trace | 106 | bne t1, t2, static_trace |
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile index 78d87bbc99db..401fe027c261 100644 --- a/arch/mips/kvm/Makefile +++ b/arch/mips/kvm/Makefile | |||
@@ -5,9 +5,9 @@ common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | |||
5 | 5 | ||
6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm | 6 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm |
7 | 7 | ||
8 | kvm-objs := $(common-objs) kvm_mips.o kvm_mips_emul.o kvm_locore.o \ | 8 | kvm-objs := $(common-objs) mips.o emulate.o locore.o \ |
9 | kvm_mips_int.o kvm_mips_stats.o kvm_mips_commpage.o \ | 9 | interrupt.o stats.o commpage.o \ |
10 | kvm_mips_dyntrans.o kvm_trap_emul.o | 10 | dyntrans.o trap_emul.o |
11 | 11 | ||
12 | obj-$(CONFIG_KVM) += kvm.o | 12 | obj-$(CONFIG_KVM) += kvm.o |
13 | obj-y += kvm_cb.o kvm_tlb.o | 13 | obj-y += callback.o tlb.o |
diff --git a/arch/mips/kvm/kvm_cb.c b/arch/mips/kvm/callback.c index 313c2e37b978..313c2e37b978 100644 --- a/arch/mips/kvm/kvm_cb.c +++ b/arch/mips/kvm/callback.c | |||
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c new file mode 100644 index 000000000000..2d6e976d1add --- /dev/null +++ b/arch/mips/kvm/commpage.c | |||
@@ -0,0 +1,33 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * commpage, currently used for Virtual COP0 registers. | ||
7 | * Mapped into the guest kernel @ 0x0. | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #include "commpage.h" | ||
26 | |||
27 | void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; | ||
30 | |||
31 | /* Specific init values for fields */ | ||
32 | vcpu->arch.cop0 = &page->cop0; | ||
33 | } | ||
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h new file mode 100644 index 000000000000..08c5fa2bbc0f --- /dev/null +++ b/arch/mips/kvm/commpage.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: commpage: mapped into get kernel space | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_COMMPAGE_H__ | ||
13 | #define __KVM_MIPS_COMMPAGE_H__ | ||
14 | |||
15 | struct kvm_mips_commpage { | ||
16 | /* COP0 state is mapped into Guest kernel via commpage */ | ||
17 | struct mips_coproc cop0; | ||
18 | }; | ||
19 | |||
20 | #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 | ||
21 | |||
22 | extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | #endif /* __KVM_MIPS_COMMPAGE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_dyntrans.c b/arch/mips/kvm/dyntrans.c index b80e41d858fd..521121bdebff 100644 --- a/arch/mips/kvm/kvm_mips_dyntrans.c +++ b/arch/mips/kvm/dyntrans.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. | 6 | * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/bootmem.h> | 18 | #include <linux/bootmem.h> |
19 | #include <asm/cacheflush.h> | 19 | #include <asm/cacheflush.h> |
20 | 20 | ||
21 | #include "kvm_mips_comm.h" | 21 | #include "commpage.h" |
22 | 22 | ||
23 | #define SYNCI_TEMPLATE 0x041f0000 | 23 | #define SYNCI_TEMPLATE 0x041f0000 |
24 | #define SYNCI_BASE(x) (((x) >> 21) & 0x1f) | 24 | #define SYNCI_BASE(x) (((x) >> 21) & 0x1f) |
@@ -28,9 +28,8 @@ | |||
28 | #define CLEAR_TEMPLATE 0x00000020 | 28 | #define CLEAR_TEMPLATE 0x00000020 |
29 | #define SW_TEMPLATE 0xac000000 | 29 | #define SW_TEMPLATE 0xac000000 |
30 | 30 | ||
31 | int | 31 | int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, |
32 | kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | 32 | struct kvm_vcpu *vcpu) |
33 | struct kvm_vcpu *vcpu) | ||
34 | { | 33 | { |
35 | int result = 0; | 34 | int result = 0; |
36 | unsigned long kseg0_opc; | 35 | unsigned long kseg0_opc; |
@@ -47,12 +46,11 @@ kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc, | |||
47 | } | 46 | } |
48 | 47 | ||
49 | /* | 48 | /* |
50 | * Address based CACHE instructions are transformed into synci(s). A little heavy | 49 | * Address based CACHE instructions are transformed into synci(s). A little |
51 | * for just D-cache invalidates, but avoids an expensive trap | 50 | * heavy for just D-cache invalidates, but avoids an expensive trap |
52 | */ | 51 | */ |
53 | int | 52 | int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, |
54 | kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, | 53 | struct kvm_vcpu *vcpu) |
55 | struct kvm_vcpu *vcpu) | ||
56 | { | 54 | { |
57 | int result = 0; | 55 | int result = 0; |
58 | unsigned long kseg0_opc; | 56 | unsigned long kseg0_opc; |
@@ -72,8 +70,7 @@ kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc, | |||
72 | return result; | 70 | return result; |
73 | } | 71 | } |
74 | 72 | ||
75 | int | 73 | int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) |
76 | kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | ||
77 | { | 74 | { |
78 | int32_t rt, rd, sel; | 75 | int32_t rt, rd, sel; |
79 | uint32_t mfc0_inst; | 76 | uint32_t mfc0_inst; |
@@ -115,8 +112,7 @@ kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | |||
115 | return 0; | 112 | return 0; |
116 | } | 113 | } |
117 | 114 | ||
118 | int | 115 | int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) |
119 | kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu) | ||
120 | { | 116 | { |
121 | int32_t rt, rd, sel; | 117 | int32_t rt, rd, sel; |
122 | uint32_t mtc0_inst = SW_TEMPLATE; | 118 | uint32_t mtc0_inst = SW_TEMPLATE; |
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/emulate.c index 8d4840090082..fb3e8dfd1ff6 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/emulate.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Instruction/Exception emulation | 6 | * KVM/MIPS: Instruction/Exception emulation |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -29,9 +29,9 @@ | |||
29 | #include <asm/r4kcache.h> | 29 | #include <asm/r4kcache.h> |
30 | #define CONFIG_MIPS_MT | 30 | #define CONFIG_MIPS_MT |
31 | 31 | ||
32 | #include "kvm_mips_opcode.h" | 32 | #include "opcode.h" |
33 | #include "kvm_mips_int.h" | 33 | #include "interrupt.h" |
34 | #include "kvm_mips_comm.h" | 34 | #include "commpage.h" |
35 | 35 | ||
36 | #include "trace.h" | 36 | #include "trace.h" |
37 | 37 | ||
@@ -51,18 +51,14 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
51 | if (epc & 3) | 51 | if (epc & 3) |
52 | goto unaligned; | 52 | goto unaligned; |
53 | 53 | ||
54 | /* | 54 | /* Read the instruction */ |
55 | * Read the instruction | ||
56 | */ | ||
57 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); | 55 | insn.word = kvm_get_inst((uint32_t *) epc, vcpu); |
58 | 56 | ||
59 | if (insn.word == KVM_INVALID_INST) | 57 | if (insn.word == KVM_INVALID_INST) |
60 | return KVM_INVALID_INST; | 58 | return KVM_INVALID_INST; |
61 | 59 | ||
62 | switch (insn.i_format.opcode) { | 60 | switch (insn.i_format.opcode) { |
63 | /* | 61 | /* jr and jalr are in r_format format. */ |
64 | * jr and jalr are in r_format format. | ||
65 | */ | ||
66 | case spec_op: | 62 | case spec_op: |
67 | switch (insn.r_format.func) { | 63 | switch (insn.r_format.func) { |
68 | case jalr_op: | 64 | case jalr_op: |
@@ -124,18 +120,16 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
124 | 120 | ||
125 | dspcontrol = rddsp(0x01); | 121 | dspcontrol = rddsp(0x01); |
126 | 122 | ||
127 | if (dspcontrol >= 32) { | 123 | if (dspcontrol >= 32) |
128 | epc = epc + 4 + (insn.i_format.simmediate << 2); | 124 | epc = epc + 4 + (insn.i_format.simmediate << 2); |
129 | } else | 125 | else |
130 | epc += 8; | 126 | epc += 8; |
131 | nextpc = epc; | 127 | nextpc = epc; |
132 | break; | 128 | break; |
133 | } | 129 | } |
134 | break; | 130 | break; |
135 | 131 | ||
136 | /* | 132 | /* These are unconditional and in j_format. */ |
137 | * These are unconditional and in j_format. | ||
138 | */ | ||
139 | case jal_op: | 133 | case jal_op: |
140 | arch->gprs[31] = instpc + 8; | 134 | arch->gprs[31] = instpc + 8; |
141 | case j_op: | 135 | case j_op: |
@@ -146,9 +140,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
146 | nextpc = epc; | 140 | nextpc = epc; |
147 | break; | 141 | break; |
148 | 142 | ||
149 | /* | 143 | /* These are conditional and in i_format. */ |
150 | * These are conditional and in i_format. | ||
151 | */ | ||
152 | case beq_op: | 144 | case beq_op: |
153 | case beql_op: | 145 | case beql_op: |
154 | if (arch->gprs[insn.i_format.rs] == | 146 | if (arch->gprs[insn.i_format.rs] == |
@@ -189,22 +181,20 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu, | |||
189 | nextpc = epc; | 181 | nextpc = epc; |
190 | break; | 182 | break; |
191 | 183 | ||
192 | /* | 184 | /* And now the FPA/cp1 branch instructions. */ |
193 | * And now the FPA/cp1 branch instructions. | ||
194 | */ | ||
195 | case cop1_op: | 185 | case cop1_op: |
196 | printk("%s: unsupported cop1_op\n", __func__); | 186 | kvm_err("%s: unsupported cop1_op\n", __func__); |
197 | break; | 187 | break; |
198 | } | 188 | } |
199 | 189 | ||
200 | return nextpc; | 190 | return nextpc; |
201 | 191 | ||
202 | unaligned: | 192 | unaligned: |
203 | printk("%s: unaligned epc\n", __func__); | 193 | kvm_err("%s: unaligned epc\n", __func__); |
204 | return nextpc; | 194 | return nextpc; |
205 | 195 | ||
206 | sigill: | 196 | sigill: |
207 | printk("%s: DSP branch but not DSP ASE\n", __func__); | 197 | kvm_err("%s: DSP branch but not DSP ASE\n", __func__); |
208 | return nextpc; | 198 | return nextpc; |
209 | } | 199 | } |
210 | 200 | ||
@@ -219,7 +209,8 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |||
219 | er = EMULATE_FAIL; | 209 | er = EMULATE_FAIL; |
220 | } else { | 210 | } else { |
221 | vcpu->arch.pc = branch_pc; | 211 | vcpu->arch.pc = branch_pc; |
222 | kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc); | 212 | kvm_debug("BD update_pc(): New PC: %#lx\n", |
213 | vcpu->arch.pc); | ||
223 | } | 214 | } |
224 | } else | 215 | } else |
225 | vcpu->arch.pc += 4; | 216 | vcpu->arch.pc += 4; |
@@ -240,6 +231,7 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause) | |||
240 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) | 231 | static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) |
241 | { | 232 | { |
242 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 233 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
234 | |||
243 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || | 235 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || |
244 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | 236 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); |
245 | } | 237 | } |
@@ -392,7 +384,6 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, | |||
392 | return now; | 384 | return now; |
393 | } | 385 | } |
394 | 386 | ||
395 | |||
396 | /** | 387 | /** |
397 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | 388 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. |
398 | * @vcpu: Virtual CPU. | 389 | * @vcpu: Virtual CPU. |
@@ -760,8 +751,8 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |||
760 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | 751 | kvm_clear_c0_guest_status(cop0, ST0_ERL); |
761 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | 752 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); |
762 | } else { | 753 | } else { |
763 | printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | 754 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", |
764 | vcpu->arch.pc); | 755 | vcpu->arch.pc); |
765 | er = EMULATE_FAIL; | 756 | er = EMULATE_FAIL; |
766 | } | 757 | } |
767 | 758 | ||
@@ -770,8 +761,6 @@ enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | |||
770 | 761 | ||
771 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | 762 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) |
772 | { | 763 | { |
773 | enum emulation_result er = EMULATE_DONE; | ||
774 | |||
775 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, | 764 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, |
776 | vcpu->arch.pending_exceptions); | 765 | vcpu->arch.pending_exceptions); |
777 | 766 | ||
@@ -781,8 +770,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |||
781 | vcpu->arch.wait = 1; | 770 | vcpu->arch.wait = 1; |
782 | kvm_vcpu_block(vcpu); | 771 | kvm_vcpu_block(vcpu); |
783 | 772 | ||
784 | /* We we are runnable, then definitely go off to user space to check if any | 773 | /* |
785 | * I/O interrupts are pending. | 774 | * We we are runnable, then definitely go off to user space to |
775 | * check if any I/O interrupts are pending. | ||
786 | */ | 776 | */ |
787 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | 777 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { |
788 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | 778 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
@@ -790,20 +780,20 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | |||
790 | } | 780 | } |
791 | } | 781 | } |
792 | 782 | ||
793 | return er; | 783 | return EMULATE_DONE; |
794 | } | 784 | } |
795 | 785 | ||
796 | /* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch | 786 | /* |
797 | * this, if things ever change | 787 | * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that |
788 | * we can catch this, if things ever change | ||
798 | */ | 789 | */ |
799 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | 790 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) |
800 | { | 791 | { |
801 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 792 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
802 | enum emulation_result er = EMULATE_FAIL; | ||
803 | uint32_t pc = vcpu->arch.pc; | 793 | uint32_t pc = vcpu->arch.pc; |
804 | 794 | ||
805 | printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); | 795 | kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0)); |
806 | return er; | 796 | return EMULATE_FAIL; |
807 | } | 797 | } |
808 | 798 | ||
809 | /* Write Guest TLB Entry @ Index */ | 799 | /* Write Guest TLB Entry @ Index */ |
@@ -811,88 +801,76 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | |||
811 | { | 801 | { |
812 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 802 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
813 | int index = kvm_read_c0_guest_index(cop0); | 803 | int index = kvm_read_c0_guest_index(cop0); |
814 | enum emulation_result er = EMULATE_DONE; | ||
815 | struct kvm_mips_tlb *tlb = NULL; | 804 | struct kvm_mips_tlb *tlb = NULL; |
816 | uint32_t pc = vcpu->arch.pc; | 805 | uint32_t pc = vcpu->arch.pc; |
817 | 806 | ||
818 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | 807 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { |
819 | printk("%s: illegal index: %d\n", __func__, index); | 808 | kvm_debug("%s: illegal index: %d\n", __func__, index); |
820 | printk | 809 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
821 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 810 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
822 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 811 | kvm_read_c0_guest_entrylo0(cop0), |
823 | kvm_read_c0_guest_entrylo0(cop0), | 812 | kvm_read_c0_guest_entrylo1(cop0), |
824 | kvm_read_c0_guest_entrylo1(cop0), | 813 | kvm_read_c0_guest_pagemask(cop0)); |
825 | kvm_read_c0_guest_pagemask(cop0)); | ||
826 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; | 814 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; |
827 | } | 815 | } |
828 | 816 | ||
829 | tlb = &vcpu->arch.guest_tlb[index]; | 817 | tlb = &vcpu->arch.guest_tlb[index]; |
830 | #if 1 | 818 | /* |
831 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | 819 | * Probe the shadow host TLB for the entry being overwritten, if one |
820 | * matches, invalidate it | ||
821 | */ | ||
832 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 822 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
833 | #endif | ||
834 | 823 | ||
835 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 824 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
836 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 825 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
837 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 826 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
838 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 827 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
839 | 828 | ||
840 | kvm_debug | 829 | kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", |
841 | ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | 830 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
842 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 831 | kvm_read_c0_guest_entrylo0(cop0), |
843 | kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0), | 832 | kvm_read_c0_guest_entrylo1(cop0), |
844 | kvm_read_c0_guest_pagemask(cop0)); | 833 | kvm_read_c0_guest_pagemask(cop0)); |
845 | 834 | ||
846 | return er; | 835 | return EMULATE_DONE; |
847 | } | 836 | } |
848 | 837 | ||
849 | /* Write Guest TLB Entry @ Random Index */ | 838 | /* Write Guest TLB Entry @ Random Index */ |
850 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | 839 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) |
851 | { | 840 | { |
852 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 841 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
853 | enum emulation_result er = EMULATE_DONE; | ||
854 | struct kvm_mips_tlb *tlb = NULL; | 842 | struct kvm_mips_tlb *tlb = NULL; |
855 | uint32_t pc = vcpu->arch.pc; | 843 | uint32_t pc = vcpu->arch.pc; |
856 | int index; | 844 | int index; |
857 | 845 | ||
858 | #if 1 | ||
859 | get_random_bytes(&index, sizeof(index)); | 846 | get_random_bytes(&index, sizeof(index)); |
860 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); | 847 | index &= (KVM_MIPS_GUEST_TLB_SIZE - 1); |
861 | #else | ||
862 | index = jiffies % KVM_MIPS_GUEST_TLB_SIZE; | ||
863 | #endif | ||
864 | |||
865 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | ||
866 | printk("%s: illegal index: %d\n", __func__, index); | ||
867 | return EMULATE_FAIL; | ||
868 | } | ||
869 | 848 | ||
870 | tlb = &vcpu->arch.guest_tlb[index]; | 849 | tlb = &vcpu->arch.guest_tlb[index]; |
871 | 850 | ||
872 | #if 1 | 851 | /* |
873 | /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */ | 852 | * Probe the shadow host TLB for the entry being overwritten, if one |
853 | * matches, invalidate it | ||
854 | */ | ||
874 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); | 855 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); |
875 | #endif | ||
876 | 856 | ||
877 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | 857 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); |
878 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | 858 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); |
879 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); | 859 | tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0); |
880 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); | 860 | tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0); |
881 | 861 | ||
882 | kvm_debug | 862 | kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", |
883 | ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", | 863 | pc, index, kvm_read_c0_guest_entryhi(cop0), |
884 | pc, index, kvm_read_c0_guest_entryhi(cop0), | 864 | kvm_read_c0_guest_entrylo0(cop0), |
885 | kvm_read_c0_guest_entrylo0(cop0), | 865 | kvm_read_c0_guest_entrylo1(cop0)); |
886 | kvm_read_c0_guest_entrylo1(cop0)); | ||
887 | 866 | ||
888 | return er; | 867 | return EMULATE_DONE; |
889 | } | 868 | } |
890 | 869 | ||
891 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | 870 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) |
892 | { | 871 | { |
893 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 872 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
894 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | 873 | long entryhi = kvm_read_c0_guest_entryhi(cop0); |
895 | enum emulation_result er = EMULATE_DONE; | ||
896 | uint32_t pc = vcpu->arch.pc; | 874 | uint32_t pc = vcpu->arch.pc; |
897 | int index = -1; | 875 | int index = -1; |
898 | 876 | ||
@@ -903,12 +881,12 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | |||
903 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | 881 | kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, |
904 | index); | 882 | index); |
905 | 883 | ||
906 | return er; | 884 | return EMULATE_DONE; |
907 | } | 885 | } |
908 | 886 | ||
909 | enum emulation_result | 887 | enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, |
910 | kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | 888 | uint32_t cause, struct kvm_run *run, |
911 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 889 | struct kvm_vcpu *vcpu) |
912 | { | 890 | { |
913 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 891 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
914 | enum emulation_result er = EMULATE_DONE; | 892 | enum emulation_result er = EMULATE_DONE; |
@@ -922,9 +900,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
922 | */ | 900 | */ |
923 | curr_pc = vcpu->arch.pc; | 901 | curr_pc = vcpu->arch.pc; |
924 | er = update_pc(vcpu, cause); | 902 | er = update_pc(vcpu, cause); |
925 | if (er == EMULATE_FAIL) { | 903 | if (er == EMULATE_FAIL) |
926 | return er; | 904 | return er; |
927 | } | ||
928 | 905 | ||
929 | copz = (inst >> 21) & 0x1f; | 906 | copz = (inst >> 21) & 0x1f; |
930 | rt = (inst >> 16) & 0x1f; | 907 | rt = (inst >> 16) & 0x1f; |
@@ -949,7 +926,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
949 | er = kvm_mips_emul_tlbp(vcpu); | 926 | er = kvm_mips_emul_tlbp(vcpu); |
950 | break; | 927 | break; |
951 | case rfe_op: | 928 | case rfe_op: |
952 | printk("!!!COP0_RFE!!!\n"); | 929 | kvm_err("!!!COP0_RFE!!!\n"); |
953 | break; | 930 | break; |
954 | case eret_op: | 931 | case eret_op: |
955 | er = kvm_mips_emul_eret(vcpu); | 932 | er = kvm_mips_emul_eret(vcpu); |
@@ -973,8 +950,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
973 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 950 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
974 | kvm_mips_trans_mfc0(inst, opc, vcpu); | 951 | kvm_mips_trans_mfc0(inst, opc, vcpu); |
975 | #endif | 952 | #endif |
976 | } | 953 | } else { |
977 | else { | ||
978 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | 954 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; |
979 | 955 | ||
980 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 956 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
@@ -999,8 +975,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
999 | if ((rd == MIPS_CP0_TLB_INDEX) | 975 | if ((rd == MIPS_CP0_TLB_INDEX) |
1000 | && (vcpu->arch.gprs[rt] >= | 976 | && (vcpu->arch.gprs[rt] >= |
1001 | KVM_MIPS_GUEST_TLB_SIZE)) { | 977 | KVM_MIPS_GUEST_TLB_SIZE)) { |
1002 | printk("Invalid TLB Index: %ld", | 978 | kvm_err("Invalid TLB Index: %ld", |
1003 | vcpu->arch.gprs[rt]); | 979 | vcpu->arch.gprs[rt]); |
1004 | er = EMULATE_FAIL; | 980 | er = EMULATE_FAIL; |
1005 | break; | 981 | break; |
1006 | } | 982 | } |
@@ -1010,21 +986,19 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1010 | kvm_change_c0_guest_ebase(cop0, | 986 | kvm_change_c0_guest_ebase(cop0, |
1011 | ~(C0_EBASE_CORE_MASK), | 987 | ~(C0_EBASE_CORE_MASK), |
1012 | vcpu->arch.gprs[rt]); | 988 | vcpu->arch.gprs[rt]); |
1013 | printk("MTCz, cop0->reg[EBASE]: %#lx\n", | 989 | kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n", |
1014 | kvm_read_c0_guest_ebase(cop0)); | 990 | kvm_read_c0_guest_ebase(cop0)); |
1015 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | 991 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { |
1016 | uint32_t nasid = | 992 | uint32_t nasid = |
1017 | vcpu->arch.gprs[rt] & ASID_MASK; | 993 | vcpu->arch.gprs[rt] & ASID_MASK; |
1018 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) | 994 | if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && |
1019 | && | ||
1020 | ((kvm_read_c0_guest_entryhi(cop0) & | 995 | ((kvm_read_c0_guest_entryhi(cop0) & |
1021 | ASID_MASK) != nasid)) { | 996 | ASID_MASK) != nasid)) { |
1022 | 997 | kvm_debug("MTCz, change ASID from %#lx to %#lx\n", | |
1023 | kvm_debug | 998 | kvm_read_c0_guest_entryhi(cop0) |
1024 | ("MTCz, change ASID from %#lx to %#lx\n", | 999 | & ASID_MASK, |
1025 | kvm_read_c0_guest_entryhi(cop0) & | 1000 | vcpu->arch.gprs[rt] |
1026 | ASID_MASK, | 1001 | & ASID_MASK); |
1027 | vcpu->arch.gprs[rt] & ASID_MASK); | ||
1028 | 1002 | ||
1029 | /* Blow away the shadow host TLBs */ | 1003 | /* Blow away the shadow host TLBs */ |
1030 | kvm_mips_flush_host_tlb(1); | 1004 | kvm_mips_flush_host_tlb(1); |
@@ -1049,7 +1023,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1049 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | 1023 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
1050 | kvm_write_c0_guest_status(cop0, | 1024 | kvm_write_c0_guest_status(cop0, |
1051 | vcpu->arch.gprs[rt]); | 1025 | vcpu->arch.gprs[rt]); |
1052 | /* Make sure that CU1 and NMI bits are never set */ | 1026 | /* |
1027 | * Make sure that CU1 and NMI bits are | ||
1028 | * never set | ||
1029 | */ | ||
1053 | kvm_clear_c0_guest_status(cop0, | 1030 | kvm_clear_c0_guest_status(cop0, |
1054 | (ST0_CU1 | ST0_NMI)); | 1031 | (ST0_CU1 | ST0_NMI)); |
1055 | 1032 | ||
@@ -1058,6 +1035,7 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1058 | #endif | 1035 | #endif |
1059 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | 1036 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
1060 | uint32_t old_cause, new_cause; | 1037 | uint32_t old_cause, new_cause; |
1038 | |||
1061 | old_cause = kvm_read_c0_guest_cause(cop0); | 1039 | old_cause = kvm_read_c0_guest_cause(cop0); |
1062 | new_cause = vcpu->arch.gprs[rt]; | 1040 | new_cause = vcpu->arch.gprs[rt]; |
1063 | /* Update R/W bits */ | 1041 | /* Update R/W bits */ |
@@ -1082,9 +1060,8 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1082 | break; | 1060 | break; |
1083 | 1061 | ||
1084 | case dmtc_op: | 1062 | case dmtc_op: |
1085 | printk | 1063 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", |
1086 | ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", | 1064 | vcpu->arch.pc, rt, rd, sel); |
1087 | vcpu->arch.pc, rt, rd, sel); | ||
1088 | er = EMULATE_FAIL; | 1065 | er = EMULATE_FAIL; |
1089 | break; | 1066 | break; |
1090 | 1067 | ||
@@ -1115,7 +1092,10 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1115 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | 1092 | cop0->reg[MIPS_CP0_STATUS][2] & 0xf; |
1116 | uint32_t pss = | 1093 | uint32_t pss = |
1117 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | 1094 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; |
1118 | /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */ | 1095 | /* |
1096 | * We don't support any shadow register sets, so | ||
1097 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | ||
1098 | */ | ||
1119 | if (css || pss) { | 1099 | if (css || pss) { |
1120 | er = EMULATE_FAIL; | 1100 | er = EMULATE_FAIL; |
1121 | break; | 1101 | break; |
@@ -1126,21 +1106,17 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1126 | } | 1106 | } |
1127 | break; | 1107 | break; |
1128 | default: | 1108 | default: |
1129 | printk | 1109 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", |
1130 | ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", | 1110 | vcpu->arch.pc, copz); |
1131 | vcpu->arch.pc, copz); | ||
1132 | er = EMULATE_FAIL; | 1111 | er = EMULATE_FAIL; |
1133 | break; | 1112 | break; |
1134 | } | 1113 | } |
1135 | } | 1114 | } |
1136 | 1115 | ||
1137 | done: | 1116 | done: |
1138 | /* | 1117 | /* Rollback PC only if emulation was unsuccessful */ |
1139 | * Rollback PC only if emulation was unsuccessful | 1118 | if (er == EMULATE_FAIL) |
1140 | */ | ||
1141 | if (er == EMULATE_FAIL) { | ||
1142 | vcpu->arch.pc = curr_pc; | 1119 | vcpu->arch.pc = curr_pc; |
1143 | } | ||
1144 | 1120 | ||
1145 | dont_update_pc: | 1121 | dont_update_pc: |
1146 | /* | 1122 | /* |
@@ -1152,9 +1128,9 @@ dont_update_pc: | |||
1152 | return er; | 1128 | return er; |
1153 | } | 1129 | } |
1154 | 1130 | ||
1155 | enum emulation_result | 1131 | enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause, |
1156 | kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | 1132 | struct kvm_run *run, |
1157 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1133 | struct kvm_vcpu *vcpu) |
1158 | { | 1134 | { |
1159 | enum emulation_result er = EMULATE_DO_MMIO; | 1135 | enum emulation_result er = EMULATE_DO_MMIO; |
1160 | int32_t op, base, rt, offset; | 1136 | int32_t op, base, rt, offset; |
@@ -1252,24 +1228,21 @@ kvm_mips_emulate_store(uint32_t inst, uint32_t cause, | |||
1252 | break; | 1228 | break; |
1253 | 1229 | ||
1254 | default: | 1230 | default: |
1255 | printk("Store not yet supported"); | 1231 | kvm_err("Store not yet supported"); |
1256 | er = EMULATE_FAIL; | 1232 | er = EMULATE_FAIL; |
1257 | break; | 1233 | break; |
1258 | } | 1234 | } |
1259 | 1235 | ||
1260 | /* | 1236 | /* Rollback PC if emulation was unsuccessful */ |
1261 | * Rollback PC if emulation was unsuccessful | 1237 | if (er == EMULATE_FAIL) |
1262 | */ | ||
1263 | if (er == EMULATE_FAIL) { | ||
1264 | vcpu->arch.pc = curr_pc; | 1238 | vcpu->arch.pc = curr_pc; |
1265 | } | ||
1266 | 1239 | ||
1267 | return er; | 1240 | return er; |
1268 | } | 1241 | } |
1269 | 1242 | ||
1270 | enum emulation_result | 1243 | enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause, |
1271 | kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | 1244 | struct kvm_run *run, |
1272 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1245 | struct kvm_vcpu *vcpu) |
1273 | { | 1246 | { |
1274 | enum emulation_result er = EMULATE_DO_MMIO; | 1247 | enum emulation_result er = EMULATE_DO_MMIO; |
1275 | int32_t op, base, rt, offset; | 1248 | int32_t op, base, rt, offset; |
@@ -1364,7 +1337,7 @@ kvm_mips_emulate_load(uint32_t inst, uint32_t cause, | |||
1364 | break; | 1337 | break; |
1365 | 1338 | ||
1366 | default: | 1339 | default: |
1367 | printk("Load not yet supported"); | 1340 | kvm_err("Load not yet supported"); |
1368 | er = EMULATE_FAIL; | 1341 | er = EMULATE_FAIL; |
1369 | break; | 1342 | break; |
1370 | } | 1343 | } |
@@ -1383,7 +1356,7 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1383 | gfn = va >> PAGE_SHIFT; | 1356 | gfn = va >> PAGE_SHIFT; |
1384 | 1357 | ||
1385 | if (gfn >= kvm->arch.guest_pmap_npages) { | 1358 | if (gfn >= kvm->arch.guest_pmap_npages) { |
1386 | printk("%s: Invalid gfn: %#llx\n", __func__, gfn); | 1359 | kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn); |
1387 | kvm_mips_dump_host_tlbs(); | 1360 | kvm_mips_dump_host_tlbs(); |
1388 | kvm_arch_vcpu_dump_regs(vcpu); | 1361 | kvm_arch_vcpu_dump_regs(vcpu); |
1389 | return -1; | 1362 | return -1; |
@@ -1391,7 +1364,8 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1391 | pfn = kvm->arch.guest_pmap[gfn]; | 1364 | pfn = kvm->arch.guest_pmap[gfn]; |
1392 | pa = (pfn << PAGE_SHIFT) | offset; | 1365 | pa = (pfn << PAGE_SHIFT) | offset; |
1393 | 1366 | ||
1394 | printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa)); | 1367 | kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va, |
1368 | CKSEG0ADDR(pa)); | ||
1395 | 1369 | ||
1396 | local_flush_icache_range(CKSEG0ADDR(pa), 32); | 1370 | local_flush_icache_range(CKSEG0ADDR(pa), 32); |
1397 | return 0; | 1371 | return 0; |
@@ -1410,13 +1384,12 @@ int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu) | |||
1410 | #define MIPS_CACHE_DCACHE 0x1 | 1384 | #define MIPS_CACHE_DCACHE 0x1 |
1411 | #define MIPS_CACHE_SEC 0x3 | 1385 | #define MIPS_CACHE_SEC 0x3 |
1412 | 1386 | ||
1413 | enum emulation_result | 1387 | enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, |
1414 | kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | 1388 | uint32_t cause, |
1415 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1389 | struct kvm_run *run, |
1390 | struct kvm_vcpu *vcpu) | ||
1416 | { | 1391 | { |
1417 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1392 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1418 | extern void (*r4k_blast_dcache) (void); | ||
1419 | extern void (*r4k_blast_icache) (void); | ||
1420 | enum emulation_result er = EMULATE_DONE; | 1393 | enum emulation_result er = EMULATE_DONE; |
1421 | int32_t offset, cache, op_inst, op, base; | 1394 | int32_t offset, cache, op_inst, op, base; |
1422 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1395 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1443,22 +1416,23 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1443 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1416 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1444 | cache, op, base, arch->gprs[base], offset); | 1417 | cache, op, base, arch->gprs[base], offset); |
1445 | 1418 | ||
1446 | /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate | 1419 | /* |
1447 | * the caches entirely by stepping through all the ways/indexes | 1420 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to |
1421 | * invalidate the caches entirely by stepping through all the | ||
1422 | * ways/indexes | ||
1448 | */ | 1423 | */ |
1449 | if (op == MIPS_CACHE_OP_INDEX_INV) { | 1424 | if (op == MIPS_CACHE_OP_INDEX_INV) { |
1450 | kvm_debug | 1425 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1451 | ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1426 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, |
1452 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | 1427 | arch->gprs[base], offset); |
1453 | arch->gprs[base], offset); | ||
1454 | 1428 | ||
1455 | if (cache == MIPS_CACHE_DCACHE) | 1429 | if (cache == MIPS_CACHE_DCACHE) |
1456 | r4k_blast_dcache(); | 1430 | r4k_blast_dcache(); |
1457 | else if (cache == MIPS_CACHE_ICACHE) | 1431 | else if (cache == MIPS_CACHE_ICACHE) |
1458 | r4k_blast_icache(); | 1432 | r4k_blast_icache(); |
1459 | else { | 1433 | else { |
1460 | printk("%s: unsupported CACHE INDEX operation\n", | 1434 | kvm_err("%s: unsupported CACHE INDEX operation\n", |
1461 | __func__); | 1435 | __func__); |
1462 | return EMULATE_FAIL; | 1436 | return EMULATE_FAIL; |
1463 | } | 1437 | } |
1464 | 1438 | ||
@@ -1470,21 +1444,19 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1470 | 1444 | ||
1471 | preempt_disable(); | 1445 | preempt_disable(); |
1472 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { | 1446 | if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { |
1473 | 1447 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) | |
1474 | if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) { | ||
1475 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); | 1448 | kvm_mips_handle_kseg0_tlb_fault(va, vcpu); |
1476 | } | ||
1477 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || | 1449 | } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) || |
1478 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { | 1450 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { |
1479 | int index; | 1451 | int index; |
1480 | 1452 | ||
1481 | /* If an entry already exists then skip */ | 1453 | /* If an entry already exists then skip */ |
1482 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) { | 1454 | if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) |
1483 | goto skip_fault; | 1455 | goto skip_fault; |
1484 | } | ||
1485 | 1456 | ||
1486 | /* If address not in the guest TLB, then give the guest a fault, the | 1457 | /* |
1487 | * resulting handler will do the right thing | 1458 | * If address not in the guest TLB, then give the guest a fault, |
1459 | * the resulting handler will do the right thing | ||
1488 | */ | 1460 | */ |
1489 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | | 1461 | index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | |
1490 | (kvm_read_c0_guest_entryhi | 1462 | (kvm_read_c0_guest_entryhi |
@@ -1499,23 +1471,28 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, | |||
1499 | goto dont_update_pc; | 1471 | goto dont_update_pc; |
1500 | } else { | 1472 | } else { |
1501 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 1473 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
1502 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | 1474 | /* |
1475 | * Check if the entry is valid, if not then setup a TLB | ||
1476 | * invalid exception to the guest | ||
1477 | */ | ||
1503 | if (!TLB_IS_VALID(*tlb, va)) { | 1478 | if (!TLB_IS_VALID(*tlb, va)) { |
1504 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, | 1479 | er = kvm_mips_emulate_tlbinv_ld(cause, NULL, |
1505 | run, vcpu); | 1480 | run, vcpu); |
1506 | preempt_enable(); | 1481 | preempt_enable(); |
1507 | goto dont_update_pc; | 1482 | goto dont_update_pc; |
1508 | } else { | 1483 | } else { |
1509 | /* We fault an entry from the guest tlb to the shadow host TLB */ | 1484 | /* |
1485 | * We fault an entry from the guest tlb to the | ||
1486 | * shadow host TLB | ||
1487 | */ | ||
1510 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, | 1488 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, |
1511 | NULL, | 1489 | NULL, |
1512 | NULL); | 1490 | NULL); |
1513 | } | 1491 | } |
1514 | } | 1492 | } |
1515 | } else { | 1493 | } else { |
1516 | printk | 1494 | kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1517 | ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1495 | cache, op, base, arch->gprs[base], offset); |
1518 | cache, op, base, arch->gprs[base], offset); | ||
1519 | er = EMULATE_FAIL; | 1496 | er = EMULATE_FAIL; |
1520 | preempt_enable(); | 1497 | preempt_enable(); |
1521 | goto dont_update_pc; | 1498 | goto dont_update_pc; |
@@ -1530,7 +1507,10 @@ skip_fault: | |||
1530 | flush_dcache_line(va); | 1507 | flush_dcache_line(va); |
1531 | 1508 | ||
1532 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | 1509 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS |
1533 | /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */ | 1510 | /* |
1511 | * Replace the CACHE instruction, with a SYNCI, not the same, | ||
1512 | * but avoids a trap | ||
1513 | */ | ||
1534 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1514 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1535 | #endif | 1515 | #endif |
1536 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { | 1516 | } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) { |
@@ -1542,9 +1522,8 @@ skip_fault: | |||
1542 | kvm_mips_trans_cache_va(inst, opc, vcpu); | 1522 | kvm_mips_trans_cache_va(inst, opc, vcpu); |
1543 | #endif | 1523 | #endif |
1544 | } else { | 1524 | } else { |
1545 | printk | 1525 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
1546 | ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | 1526 | cache, op, base, arch->gprs[base], offset); |
1547 | cache, op, base, arch->gprs[base], offset); | ||
1548 | er = EMULATE_FAIL; | 1527 | er = EMULATE_FAIL; |
1549 | preempt_enable(); | 1528 | preempt_enable(); |
1550 | goto dont_update_pc; | 1529 | goto dont_update_pc; |
@@ -1552,28 +1531,23 @@ skip_fault: | |||
1552 | 1531 | ||
1553 | preempt_enable(); | 1532 | preempt_enable(); |
1554 | 1533 | ||
1555 | dont_update_pc: | 1534 | dont_update_pc: |
1556 | /* | 1535 | /* Rollback PC */ |
1557 | * Rollback PC | ||
1558 | */ | ||
1559 | vcpu->arch.pc = curr_pc; | 1536 | vcpu->arch.pc = curr_pc; |
1560 | done: | 1537 | done: |
1561 | return er; | 1538 | return er; |
1562 | } | 1539 | } |
1563 | 1540 | ||
1564 | enum emulation_result | 1541 | enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, |
1565 | kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | 1542 | struct kvm_run *run, |
1566 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1543 | struct kvm_vcpu *vcpu) |
1567 | { | 1544 | { |
1568 | enum emulation_result er = EMULATE_DONE; | 1545 | enum emulation_result er = EMULATE_DONE; |
1569 | uint32_t inst; | 1546 | uint32_t inst; |
1570 | 1547 | ||
1571 | /* | 1548 | /* Fetch the instruction. */ |
1572 | * Fetch the instruction. | 1549 | if (cause & CAUSEF_BD) |
1573 | */ | ||
1574 | if (cause & CAUSEF_BD) { | ||
1575 | opc += 1; | 1550 | opc += 1; |
1576 | } | ||
1577 | 1551 | ||
1578 | inst = kvm_get_inst(opc, vcpu); | 1552 | inst = kvm_get_inst(opc, vcpu); |
1579 | 1553 | ||
@@ -1601,8 +1575,8 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | |||
1601 | break; | 1575 | break; |
1602 | 1576 | ||
1603 | default: | 1577 | default: |
1604 | printk("Instruction emulation not supported (%p/%#x)\n", opc, | 1578 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, |
1605 | inst); | 1579 | inst); |
1606 | kvm_arch_vcpu_dump_regs(vcpu); | 1580 | kvm_arch_vcpu_dump_regs(vcpu); |
1607 | er = EMULATE_FAIL; | 1581 | er = EMULATE_FAIL; |
1608 | break; | 1582 | break; |
@@ -1611,9 +1585,10 @@ kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc, | |||
1611 | return er; | 1585 | return er; |
1612 | } | 1586 | } |
1613 | 1587 | ||
1614 | enum emulation_result | 1588 | enum emulation_result kvm_mips_emulate_syscall(unsigned long cause, |
1615 | kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, | 1589 | uint32_t *opc, |
1616 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1590 | struct kvm_run *run, |
1591 | struct kvm_vcpu *vcpu) | ||
1617 | { | 1592 | { |
1618 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1593 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1619 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1594 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1638,20 +1613,20 @@ kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc, | |||
1638 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1613 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1639 | 1614 | ||
1640 | } else { | 1615 | } else { |
1641 | printk("Trying to deliver SYSCALL when EXL is already set\n"); | 1616 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); |
1642 | er = EMULATE_FAIL; | 1617 | er = EMULATE_FAIL; |
1643 | } | 1618 | } |
1644 | 1619 | ||
1645 | return er; | 1620 | return er; |
1646 | } | 1621 | } |
1647 | 1622 | ||
1648 | enum emulation_result | 1623 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause, |
1649 | kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | 1624 | uint32_t *opc, |
1650 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1625 | struct kvm_run *run, |
1626 | struct kvm_vcpu *vcpu) | ||
1651 | { | 1627 | { |
1652 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1628 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1653 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1629 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1654 | enum emulation_result er = EMULATE_DONE; | ||
1655 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | 1630 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | |
1656 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1631 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1657 | 1632 | ||
@@ -1688,16 +1663,16 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, | |||
1688 | /* Blow away the shadow host TLBs */ | 1663 | /* Blow away the shadow host TLBs */ |
1689 | kvm_mips_flush_host_tlb(1); | 1664 | kvm_mips_flush_host_tlb(1); |
1690 | 1665 | ||
1691 | return er; | 1666 | return EMULATE_DONE; |
1692 | } | 1667 | } |
1693 | 1668 | ||
1694 | enum emulation_result | 1669 | enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause, |
1695 | kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | 1670 | uint32_t *opc, |
1696 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1671 | struct kvm_run *run, |
1672 | struct kvm_vcpu *vcpu) | ||
1697 | { | 1673 | { |
1698 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1674 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1699 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1675 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1700 | enum emulation_result er = EMULATE_DONE; | ||
1701 | unsigned long entryhi = | 1676 | unsigned long entryhi = |
1702 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1677 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1703 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1678 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
@@ -1734,16 +1709,16 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, | |||
1734 | /* Blow away the shadow host TLBs */ | 1709 | /* Blow away the shadow host TLBs */ |
1735 | kvm_mips_flush_host_tlb(1); | 1710 | kvm_mips_flush_host_tlb(1); |
1736 | 1711 | ||
1737 | return er; | 1712 | return EMULATE_DONE; |
1738 | } | 1713 | } |
1739 | 1714 | ||
1740 | enum emulation_result | 1715 | enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause, |
1741 | kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | 1716 | uint32_t *opc, |
1742 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1717 | struct kvm_run *run, |
1718 | struct kvm_vcpu *vcpu) | ||
1743 | { | 1719 | { |
1744 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1720 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1745 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1721 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1746 | enum emulation_result er = EMULATE_DONE; | ||
1747 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1722 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1748 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1723 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1749 | 1724 | ||
@@ -1778,16 +1753,16 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, | |||
1778 | /* Blow away the shadow host TLBs */ | 1753 | /* Blow away the shadow host TLBs */ |
1779 | kvm_mips_flush_host_tlb(1); | 1754 | kvm_mips_flush_host_tlb(1); |
1780 | 1755 | ||
1781 | return er; | 1756 | return EMULATE_DONE; |
1782 | } | 1757 | } |
1783 | 1758 | ||
1784 | enum emulation_result | 1759 | enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause, |
1785 | kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | 1760 | uint32_t *opc, |
1786 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1761 | struct kvm_run *run, |
1762 | struct kvm_vcpu *vcpu) | ||
1787 | { | 1763 | { |
1788 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1764 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1789 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1765 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1790 | enum emulation_result er = EMULATE_DONE; | ||
1791 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1766 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1792 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1767 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1793 | 1768 | ||
@@ -1822,13 +1797,13 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, | |||
1822 | /* Blow away the shadow host TLBs */ | 1797 | /* Blow away the shadow host TLBs */ |
1823 | kvm_mips_flush_host_tlb(1); | 1798 | kvm_mips_flush_host_tlb(1); |
1824 | 1799 | ||
1825 | return er; | 1800 | return EMULATE_DONE; |
1826 | } | 1801 | } |
1827 | 1802 | ||
1828 | /* TLBMOD: store into address matching TLB with Dirty bit off */ | 1803 | /* TLBMOD: store into address matching TLB with Dirty bit off */ |
1829 | enum emulation_result | 1804 | enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, |
1830 | kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | 1805 | struct kvm_run *run, |
1831 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1806 | struct kvm_vcpu *vcpu) |
1832 | { | 1807 | { |
1833 | enum emulation_result er = EMULATE_DONE; | 1808 | enum emulation_result er = EMULATE_DONE; |
1834 | #ifdef DEBUG | 1809 | #ifdef DEBUG |
@@ -1837,9 +1812,7 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | |||
1837 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1812 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1838 | int index; | 1813 | int index; |
1839 | 1814 | ||
1840 | /* | 1815 | /* If address not in the guest TLB, then we are in trouble */ |
1841 | * If address not in the guest TLB, then we are in trouble | ||
1842 | */ | ||
1843 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | 1816 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); |
1844 | if (index < 0) { | 1817 | if (index < 0) { |
1845 | /* XXXKYMA Invalidate and retry */ | 1818 | /* XXXKYMA Invalidate and retry */ |
@@ -1856,15 +1829,15 @@ kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc, | |||
1856 | return er; | 1829 | return er; |
1857 | } | 1830 | } |
1858 | 1831 | ||
1859 | enum emulation_result | 1832 | enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause, |
1860 | kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | 1833 | uint32_t *opc, |
1861 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1834 | struct kvm_run *run, |
1835 | struct kvm_vcpu *vcpu) | ||
1862 | { | 1836 | { |
1863 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1837 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1864 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | 1838 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | |
1865 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); | 1839 | (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); |
1866 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1840 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1867 | enum emulation_result er = EMULATE_DONE; | ||
1868 | 1841 | ||
1869 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1842 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1870 | /* save old pc */ | 1843 | /* save old pc */ |
@@ -1895,16 +1868,16 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, | |||
1895 | /* Blow away the shadow host TLBs */ | 1868 | /* Blow away the shadow host TLBs */ |
1896 | kvm_mips_flush_host_tlb(1); | 1869 | kvm_mips_flush_host_tlb(1); |
1897 | 1870 | ||
1898 | return er; | 1871 | return EMULATE_DONE; |
1899 | } | 1872 | } |
1900 | 1873 | ||
1901 | enum emulation_result | 1874 | enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause, |
1902 | kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, | 1875 | uint32_t *opc, |
1903 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1876 | struct kvm_run *run, |
1877 | struct kvm_vcpu *vcpu) | ||
1904 | { | 1878 | { |
1905 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1879 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1906 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1880 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
1907 | enum emulation_result er = EMULATE_DONE; | ||
1908 | 1881 | ||
1909 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 1882 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
1910 | /* save old pc */ | 1883 | /* save old pc */ |
@@ -1924,12 +1897,13 @@ kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc, | |||
1924 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); | 1897 | (T_COP_UNUSABLE << CAUSEB_EXCCODE)); |
1925 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | 1898 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); |
1926 | 1899 | ||
1927 | return er; | 1900 | return EMULATE_DONE; |
1928 | } | 1901 | } |
1929 | 1902 | ||
1930 | enum emulation_result | 1903 | enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause, |
1931 | kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, | 1904 | uint32_t *opc, |
1932 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1905 | struct kvm_run *run, |
1906 | struct kvm_vcpu *vcpu) | ||
1933 | { | 1907 | { |
1934 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1908 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1935 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1909 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1961,9 +1935,10 @@ kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc, | |||
1961 | return er; | 1935 | return er; |
1962 | } | 1936 | } |
1963 | 1937 | ||
1964 | enum emulation_result | 1938 | enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause, |
1965 | kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | 1939 | uint32_t *opc, |
1966 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1940 | struct kvm_run *run, |
1941 | struct kvm_vcpu *vcpu) | ||
1967 | { | 1942 | { |
1968 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1943 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
1969 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1944 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -1988,16 +1963,14 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | |||
1988 | arch->pc = KVM_GUEST_KSEG0 + 0x180; | 1963 | arch->pc = KVM_GUEST_KSEG0 + 0x180; |
1989 | 1964 | ||
1990 | } else { | 1965 | } else { |
1991 | printk("Trying to deliver BP when EXL is already set\n"); | 1966 | kvm_err("Trying to deliver BP when EXL is already set\n"); |
1992 | er = EMULATE_FAIL; | 1967 | er = EMULATE_FAIL; |
1993 | } | 1968 | } |
1994 | 1969 | ||
1995 | return er; | 1970 | return er; |
1996 | } | 1971 | } |
1997 | 1972 | ||
1998 | /* | 1973 | /* ll/sc, rdhwr, sync emulation */ |
1999 | * ll/sc, rdhwr, sync emulation | ||
2000 | */ | ||
2001 | 1974 | ||
2002 | #define OPCODE 0xfc000000 | 1975 | #define OPCODE 0xfc000000 |
2003 | #define BASE 0x03e00000 | 1976 | #define BASE 0x03e00000 |
@@ -2012,9 +1985,9 @@ kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc, | |||
2012 | #define SYNC 0x0000000f | 1985 | #define SYNC 0x0000000f |
2013 | #define RDHWR 0x0000003b | 1986 | #define RDHWR 0x0000003b |
2014 | 1987 | ||
2015 | enum emulation_result | 1988 | enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, |
2016 | kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | 1989 | struct kvm_run *run, |
2017 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 1990 | struct kvm_vcpu *vcpu) |
2018 | { | 1991 | { |
2019 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 1992 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
2020 | struct kvm_vcpu_arch *arch = &vcpu->arch; | 1993 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
@@ -2031,16 +2004,14 @@ kvm_mips_handle_ri(unsigned long cause, uint32_t *opc, | |||
2031 | if (er == EMULATE_FAIL) | 2004 | if (er == EMULATE_FAIL) |
2032 | return er; | 2005 | return er; |
2033 | 2006 | ||
2034 | /* | 2007 | /* Fetch the instruction. */ |
2035 | * Fetch the instruction. | ||
2036 | */ | ||
2037 | if (cause & CAUSEF_BD) | 2008 | if (cause & CAUSEF_BD) |
2038 | opc += 1; | 2009 | opc += 1; |
2039 | 2010 | ||
2040 | inst = kvm_get_inst(opc, vcpu); | 2011 | inst = kvm_get_inst(opc, vcpu); |
2041 | 2012 | ||
2042 | if (inst == KVM_INVALID_INST) { | 2013 | if (inst == KVM_INVALID_INST) { |
2043 | printk("%s: Cannot get inst @ %p\n", __func__, opc); | 2014 | kvm_err("%s: Cannot get inst @ %p\n", __func__, opc); |
2044 | return EMULATE_FAIL; | 2015 | return EMULATE_FAIL; |
2045 | } | 2016 | } |
2046 | 2017 | ||
@@ -2099,15 +2070,15 @@ emulate_ri: | |||
2099 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); | 2070 | return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu); |
2100 | } | 2071 | } |
2101 | 2072 | ||
2102 | enum emulation_result | 2073 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, |
2103 | kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) | 2074 | struct kvm_run *run) |
2104 | { | 2075 | { |
2105 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | 2076 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; |
2106 | enum emulation_result er = EMULATE_DONE; | 2077 | enum emulation_result er = EMULATE_DONE; |
2107 | unsigned long curr_pc; | 2078 | unsigned long curr_pc; |
2108 | 2079 | ||
2109 | if (run->mmio.len > sizeof(*gpr)) { | 2080 | if (run->mmio.len > sizeof(*gpr)) { |
2110 | printk("Bad MMIO length: %d", run->mmio.len); | 2081 | kvm_err("Bad MMIO length: %d", run->mmio.len); |
2111 | er = EMULATE_FAIL; | 2082 | er = EMULATE_FAIL; |
2112 | goto done; | 2083 | goto done; |
2113 | } | 2084 | } |
@@ -2142,18 +2113,18 @@ kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
2142 | } | 2113 | } |
2143 | 2114 | ||
2144 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) | 2115 | if (vcpu->arch.pending_load_cause & CAUSEF_BD) |
2145 | kvm_debug | 2116 | kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", |
2146 | ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n", | 2117 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, |
2147 | vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr, | 2118 | vcpu->mmio_needed); |
2148 | vcpu->mmio_needed); | ||
2149 | 2119 | ||
2150 | done: | 2120 | done: |
2151 | return er; | 2121 | return er; |
2152 | } | 2122 | } |
2153 | 2123 | ||
2154 | static enum emulation_result | 2124 | static enum emulation_result kvm_mips_emulate_exc(unsigned long cause, |
2155 | kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, | 2125 | uint32_t *opc, |
2156 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2126 | struct kvm_run *run, |
2127 | struct kvm_vcpu *vcpu) | ||
2157 | { | 2128 | { |
2158 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2129 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
2159 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 2130 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
@@ -2181,16 +2152,17 @@ kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc, | |||
2181 | exccode, kvm_read_c0_guest_epc(cop0), | 2152 | exccode, kvm_read_c0_guest_epc(cop0), |
2182 | kvm_read_c0_guest_badvaddr(cop0)); | 2153 | kvm_read_c0_guest_badvaddr(cop0)); |
2183 | } else { | 2154 | } else { |
2184 | printk("Trying to deliver EXC when EXL is already set\n"); | 2155 | kvm_err("Trying to deliver EXC when EXL is already set\n"); |
2185 | er = EMULATE_FAIL; | 2156 | er = EMULATE_FAIL; |
2186 | } | 2157 | } |
2187 | 2158 | ||
2188 | return er; | 2159 | return er; |
2189 | } | 2160 | } |
2190 | 2161 | ||
2191 | enum emulation_result | 2162 | enum emulation_result kvm_mips_check_privilege(unsigned long cause, |
2192 | kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | 2163 | uint32_t *opc, |
2193 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2164 | struct kvm_run *run, |
2165 | struct kvm_vcpu *vcpu) | ||
2194 | { | 2166 | { |
2195 | enum emulation_result er = EMULATE_DONE; | 2167 | enum emulation_result er = EMULATE_DONE; |
2196 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2168 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
@@ -2215,10 +2187,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2215 | break; | 2187 | break; |
2216 | 2188 | ||
2217 | case T_TLB_LD_MISS: | 2189 | case T_TLB_LD_MISS: |
2218 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | 2190 | /* |
2191 | * We we are accessing Guest kernel space, then send an | ||
2192 | * address error exception to the guest | ||
2193 | */ | ||
2219 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2194 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2220 | printk("%s: LD MISS @ %#lx\n", __func__, | 2195 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, |
2221 | badvaddr); | 2196 | badvaddr); |
2222 | cause &= ~0xff; | 2197 | cause &= ~0xff; |
2223 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); | 2198 | cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE); |
2224 | er = EMULATE_PRIV_FAIL; | 2199 | er = EMULATE_PRIV_FAIL; |
@@ -2226,10 +2201,13 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2226 | break; | 2201 | break; |
2227 | 2202 | ||
2228 | case T_TLB_ST_MISS: | 2203 | case T_TLB_ST_MISS: |
2229 | /* We we are accessing Guest kernel space, then send an address error exception to the guest */ | 2204 | /* |
2205 | * We we are accessing Guest kernel space, then send an | ||
2206 | * address error exception to the guest | ||
2207 | */ | ||
2230 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | 2208 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { |
2231 | printk("%s: ST MISS @ %#lx\n", __func__, | 2209 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, |
2232 | badvaddr); | 2210 | badvaddr); |
2233 | cause &= ~0xff; | 2211 | cause &= ~0xff; |
2234 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); | 2212 | cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE); |
2235 | er = EMULATE_PRIV_FAIL; | 2213 | er = EMULATE_PRIV_FAIL; |
@@ -2237,8 +2215,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2237 | break; | 2215 | break; |
2238 | 2216 | ||
2239 | case T_ADDR_ERR_ST: | 2217 | case T_ADDR_ERR_ST: |
2240 | printk("%s: address error ST @ %#lx\n", __func__, | 2218 | kvm_debug("%s: address error ST @ %#lx\n", __func__, |
2241 | badvaddr); | 2219 | badvaddr); |
2242 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2220 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2243 | cause &= ~0xff; | 2221 | cause &= ~0xff; |
2244 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); | 2222 | cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE); |
@@ -2246,8 +2224,8 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2246 | er = EMULATE_PRIV_FAIL; | 2224 | er = EMULATE_PRIV_FAIL; |
2247 | break; | 2225 | break; |
2248 | case T_ADDR_ERR_LD: | 2226 | case T_ADDR_ERR_LD: |
2249 | printk("%s: address error LD @ %#lx\n", __func__, | 2227 | kvm_debug("%s: address error LD @ %#lx\n", __func__, |
2250 | badvaddr); | 2228 | badvaddr); |
2251 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | 2229 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { |
2252 | cause &= ~0xff; | 2230 | cause &= ~0xff; |
2253 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); | 2231 | cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE); |
@@ -2260,21 +2238,23 @@ kvm_mips_check_privilege(unsigned long cause, uint32_t *opc, | |||
2260 | } | 2238 | } |
2261 | } | 2239 | } |
2262 | 2240 | ||
2263 | if (er == EMULATE_PRIV_FAIL) { | 2241 | if (er == EMULATE_PRIV_FAIL) |
2264 | kvm_mips_emulate_exc(cause, opc, run, vcpu); | 2242 | kvm_mips_emulate_exc(cause, opc, run, vcpu); |
2265 | } | 2243 | |
2266 | return er; | 2244 | return er; |
2267 | } | 2245 | } |
2268 | 2246 | ||
2269 | /* User Address (UA) fault, this could happen if | 2247 | /* |
2248 | * User Address (UA) fault, this could happen if | ||
2270 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 2249 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this |
2271 | * case we pass on the fault to the guest kernel and let it handle it. | 2250 | * case we pass on the fault to the guest kernel and let it handle it. |
2272 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | 2251 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this |
2273 | * case we inject the TLB from the Guest TLB into the shadow host TLB | 2252 | * case we inject the TLB from the Guest TLB into the shadow host TLB |
2274 | */ | 2253 | */ |
2275 | enum emulation_result | 2254 | enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause, |
2276 | kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | 2255 | uint32_t *opc, |
2277 | struct kvm_run *run, struct kvm_vcpu *vcpu) | 2256 | struct kvm_run *run, |
2257 | struct kvm_vcpu *vcpu) | ||
2278 | { | 2258 | { |
2279 | enum emulation_result er = EMULATE_DONE; | 2259 | enum emulation_result er = EMULATE_DONE; |
2280 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | 2260 | uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; |
@@ -2284,10 +2264,11 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2284 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", | 2264 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n", |
2285 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); | 2265 | vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi); |
2286 | 2266 | ||
2287 | /* KVM would not have got the exception if this entry was valid in the shadow host TLB | 2267 | /* |
2288 | * Check the Guest TLB, if the entry is not there then send the guest an | 2268 | * KVM would not have got the exception if this entry was valid in the |
2289 | * exception. The guest exc handler should then inject an entry into the | 2269 | * shadow host TLB. Check the Guest TLB, if the entry is not there then |
2290 | * guest TLB | 2270 | * send the guest an exception. The guest exc handler should then inject |
2271 | * an entry into the guest TLB. | ||
2291 | */ | 2272 | */ |
2292 | index = kvm_mips_guest_tlb_lookup(vcpu, | 2273 | index = kvm_mips_guest_tlb_lookup(vcpu, |
2293 | (va & VPN2_MASK) | | 2274 | (va & VPN2_MASK) | |
@@ -2299,13 +2280,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2299 | } else if (exccode == T_TLB_ST_MISS) { | 2280 | } else if (exccode == T_TLB_ST_MISS) { |
2300 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); | 2281 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu); |
2301 | } else { | 2282 | } else { |
2302 | printk("%s: invalid exc code: %d\n", __func__, exccode); | 2283 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2284 | exccode); | ||
2303 | er = EMULATE_FAIL; | 2285 | er = EMULATE_FAIL; |
2304 | } | 2286 | } |
2305 | } else { | 2287 | } else { |
2306 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | 2288 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; |
2307 | 2289 | ||
2308 | /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */ | 2290 | /* |
2291 | * Check if the entry is valid, if not then setup a TLB invalid | ||
2292 | * exception to the guest | ||
2293 | */ | ||
2309 | if (!TLB_IS_VALID(*tlb, va)) { | 2294 | if (!TLB_IS_VALID(*tlb, va)) { |
2310 | if (exccode == T_TLB_LD_MISS) { | 2295 | if (exccode == T_TLB_LD_MISS) { |
2311 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, | 2296 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, run, |
@@ -2314,15 +2299,17 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, | |||
2314 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, | 2299 | er = kvm_mips_emulate_tlbinv_st(cause, opc, run, |
2315 | vcpu); | 2300 | vcpu); |
2316 | } else { | 2301 | } else { |
2317 | printk("%s: invalid exc code: %d\n", __func__, | 2302 | kvm_err("%s: invalid exc code: %d\n", __func__, |
2318 | exccode); | 2303 | exccode); |
2319 | er = EMULATE_FAIL; | 2304 | er = EMULATE_FAIL; |
2320 | } | 2305 | } |
2321 | } else { | 2306 | } else { |
2322 | kvm_debug | 2307 | kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", |
2323 | ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", | 2308 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); |
2324 | tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1); | 2309 | /* |
2325 | /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */ | 2310 | * OK we have a Guest TLB entry, now inject it into the |
2311 | * shadow host TLB | ||
2312 | */ | ||
2326 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, | 2313 | kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL, |
2327 | NULL); | 2314 | NULL); |
2328 | } | 2315 | } |
diff --git a/arch/mips/kvm/kvm_mips_int.c b/arch/mips/kvm/interrupt.c index 1e5de16afe29..9b4445940c2b 100644 --- a/arch/mips/kvm/kvm_mips_int.c +++ b/arch/mips/kvm/interrupt.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Interrupt delivery | 6 | * KVM/MIPS: Interrupt delivery |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | 22 | ||
23 | #include "kvm_mips_int.h" | 23 | #include "interrupt.h" |
24 | 24 | ||
25 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) | 25 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority) |
26 | { | 26 | { |
@@ -34,7 +34,8 @@ void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority) | |||
34 | 34 | ||
35 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) | 35 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) |
36 | { | 36 | { |
37 | /* Cause bits to reflect the pending timer interrupt, | 37 | /* |
38 | * Cause bits to reflect the pending timer interrupt, | ||
38 | * the EXC code will be set when we are actually | 39 | * the EXC code will be set when we are actually |
39 | * delivering the interrupt: | 40 | * delivering the interrupt: |
40 | */ | 41 | */ |
@@ -51,12 +52,13 @@ void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) | |||
51 | kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); | 52 | kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); |
52 | } | 53 | } |
53 | 54 | ||
54 | void | 55 | void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, |
55 | kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | 56 | struct kvm_mips_interrupt *irq) |
56 | { | 57 | { |
57 | int intr = (int)irq->irq; | 58 | int intr = (int)irq->irq; |
58 | 59 | ||
59 | /* Cause bits to reflect the pending IO interrupt, | 60 | /* |
61 | * Cause bits to reflect the pending IO interrupt, | ||
60 | * the EXC code will be set when we are actually | 62 | * the EXC code will be set when we are actually |
61 | * delivering the interrupt: | 63 | * delivering the interrupt: |
62 | */ | 64 | */ |
@@ -83,11 +85,11 @@ kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | |||
83 | 85 | ||
84 | } | 86 | } |
85 | 87 | ||
86 | void | 88 | void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, |
87 | kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | 89 | struct kvm_mips_interrupt *irq) |
88 | struct kvm_mips_interrupt *irq) | ||
89 | { | 90 | { |
90 | int intr = (int)irq->irq; | 91 | int intr = (int)irq->irq; |
92 | |||
91 | switch (intr) { | 93 | switch (intr) { |
92 | case -2: | 94 | case -2: |
93 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); | 95 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ0)); |
@@ -111,9 +113,8 @@ kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | |||
111 | } | 113 | } |
112 | 114 | ||
113 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 115 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
114 | int | 116 | int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
115 | kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | 117 | uint32_t cause) |
116 | uint32_t cause) | ||
117 | { | 118 | { |
118 | int allowed = 0; | 119 | int allowed = 0; |
119 | uint32_t exccode; | 120 | uint32_t exccode; |
@@ -164,7 +165,6 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | |||
164 | 165 | ||
165 | /* Are we allowed to deliver the interrupt ??? */ | 166 | /* Are we allowed to deliver the interrupt ??? */ |
166 | if (allowed) { | 167 | if (allowed) { |
167 | |||
168 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | 168 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { |
169 | /* save old pc */ | 169 | /* save old pc */ |
170 | kvm_write_c0_guest_epc(cop0, arch->pc); | 170 | kvm_write_c0_guest_epc(cop0, arch->pc); |
@@ -195,9 +195,8 @@ kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | |||
195 | return allowed; | 195 | return allowed; |
196 | } | 196 | } |
197 | 197 | ||
198 | int | 198 | int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
199 | kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, | 199 | uint32_t cause) |
200 | uint32_t cause) | ||
201 | { | 200 | { |
202 | return 1; | 201 | return 1; |
203 | } | 202 | } |
diff --git a/arch/mips/kvm/kvm_mips_int.h b/arch/mips/kvm/interrupt.h index 20da7d29eede..4ab4bdfad703 100644 --- a/arch/mips/kvm/kvm_mips_int.h +++ b/arch/mips/kvm/interrupt.h | |||
@@ -1,14 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Interrupts | 6 | * KVM/MIPS: Interrupts |
7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | /* MIPS Exception Priorities, exceptions (including interrupts) are queued up | 11 | /* |
12 | * MIPS Exception Priorities, exceptions (including interrupts) are queued up | ||
12 | * for the guest in the order specified by their priorities | 13 | * for the guest in the order specified by their priorities |
13 | */ | 14 | */ |
14 | 15 | ||
@@ -27,6 +28,9 @@ | |||
27 | #define MIPS_EXC_MAX 12 | 28 | #define MIPS_EXC_MAX 12 |
28 | /* XXXSL More to follow */ | 29 | /* XXXSL More to follow */ |
29 | 30 | ||
31 | extern char mips32_exception[], mips32_exceptionEnd[]; | ||
32 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; | ||
33 | |||
30 | #define C_TI (_ULCAST_(1) << 30) | 34 | #define C_TI (_ULCAST_(1) << 30) |
31 | 35 | ||
32 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) | 36 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) |
diff --git a/arch/mips/kvm/kvm_mips_comm.h b/arch/mips/kvm/kvm_mips_comm.h deleted file mode 100644 index a4a8c85cc8f7..000000000000 --- a/arch/mips/kvm/kvm_mips_comm.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: commpage: mapped into get kernel space | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_COMMPAGE_H__ | ||
13 | #define __KVM_MIPS_COMMPAGE_H__ | ||
14 | |||
15 | struct kvm_mips_commpage { | ||
16 | struct mips_coproc cop0; /* COP0 state is mapped into Guest kernel via commpage */ | ||
17 | }; | ||
18 | |||
19 | #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 | ||
20 | |||
21 | extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); | ||
22 | |||
23 | #endif /* __KVM_MIPS_COMMPAGE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_commpage.c b/arch/mips/kvm/kvm_mips_commpage.c deleted file mode 100644 index 3873b1ecc40f..000000000000 --- a/arch/mips/kvm/kvm_mips_commpage.c +++ /dev/null | |||
@@ -1,37 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * commpage, currently used for Virtual COP0 registers. | ||
7 | * Mapped into the guest kernel @ 0x0. | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/bootmem.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/mmu_context.h> | ||
22 | |||
23 | #include <linux/kvm_host.h> | ||
24 | |||
25 | #include "kvm_mips_comm.h" | ||
26 | |||
27 | void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) | ||
28 | { | ||
29 | struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; | ||
30 | memset(page, 0, sizeof(struct kvm_mips_commpage)); | ||
31 | |||
32 | /* Specific init values for fields */ | ||
33 | vcpu->arch.cop0 = &page->cop0; | ||
34 | memset(vcpu->arch.cop0, 0, sizeof(struct mips_coproc)); | ||
35 | |||
36 | return; | ||
37 | } | ||
diff --git a/arch/mips/kvm/kvm_mips_opcode.h b/arch/mips/kvm/kvm_mips_opcode.h deleted file mode 100644 index 86d3b4cc348b..000000000000 --- a/arch/mips/kvm/kvm_mips_opcode.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Define opcode values not defined in <asm/isnt.h> | ||
12 | */ | ||
13 | |||
14 | #ifndef __KVM_MIPS_OPCODE_H__ | ||
15 | #define __KVM_MIPS_OPCODE_H__ | ||
16 | |||
17 | /* COP0 Ops */ | ||
18 | #define mfmcz_op 0x0b /* 01011 */ | ||
19 | #define wrpgpr_op 0x0e /* 01110 */ | ||
20 | |||
21 | /* COP0 opcodes (only if COP0 and CO=1): */ | ||
22 | #define wait_op 0x20 /* 100000 */ | ||
23 | |||
24 | #endif /* __KVM_MIPS_OPCODE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_locore.S b/arch/mips/kvm/locore.S index 033ac343e72c..d7279c03c517 100644 --- a/arch/mips/kvm/kvm_locore.S +++ b/arch/mips/kvm/locore.S | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <asm/stackframe.h> | 16 | #include <asm/stackframe.h> |
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | 18 | ||
19 | |||
20 | #define _C_LABEL(x) x | 19 | #define _C_LABEL(x) x |
21 | #define MIPSX(name) mips32_ ## name | 20 | #define MIPSX(name) mips32_ ## name |
22 | #define CALLFRAME_SIZ 32 | 21 | #define CALLFRAME_SIZ 32 |
@@ -91,7 +90,10 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
91 | LONG_S $24, PT_R24(k1) | 90 | LONG_S $24, PT_R24(k1) |
92 | LONG_S $25, PT_R25(k1) | 91 | LONG_S $25, PT_R25(k1) |
93 | 92 | ||
94 | /* XXXKYMA k0/k1 not saved, not being used if we got here through an ioctl() */ | 93 | /* |
94 | * XXXKYMA k0/k1 not saved, not being used if we got here through | ||
95 | * an ioctl() | ||
96 | */ | ||
95 | 97 | ||
96 | LONG_S $28, PT_R28(k1) | 98 | LONG_S $28, PT_R28(k1) |
97 | LONG_S $29, PT_R29(k1) | 99 | LONG_S $29, PT_R29(k1) |
@@ -132,7 +134,10 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
132 | /* Save the kernel gp as well */ | 134 | /* Save the kernel gp as well */ |
133 | LONG_S gp, VCPU_HOST_GP(k1) | 135 | LONG_S gp, VCPU_HOST_GP(k1) |
134 | 136 | ||
135 | /* Setup status register for running the guest in UM, interrupts are disabled */ | 137 | /* |
138 | * Setup status register for running the guest in UM, interrupts | ||
139 | * are disabled | ||
140 | */ | ||
136 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) | 141 | li k0, (ST0_EXL | KSU_USER | ST0_BEV) |
137 | mtc0 k0, CP0_STATUS | 142 | mtc0 k0, CP0_STATUS |
138 | ehb | 143 | ehb |
@@ -152,7 +157,6 @@ FEXPORT(__kvm_mips_vcpu_run) | |||
152 | mtc0 k0, CP0_STATUS | 157 | mtc0 k0, CP0_STATUS |
153 | ehb | 158 | ehb |
154 | 159 | ||
155 | |||
156 | /* Set Guest EPC */ | 160 | /* Set Guest EPC */ |
157 | LONG_L t0, VCPU_PC(k1) | 161 | LONG_L t0, VCPU_PC(k1) |
158 | mtc0 t0, CP0_EPC | 162 | mtc0 t0, CP0_EPC |
@@ -165,7 +169,7 @@ FEXPORT(__kvm_mips_load_asid) | |||
165 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ | 169 | INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */ |
166 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ | 170 | INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */ |
167 | 1: | 171 | 1: |
168 | /* t1: contains the base of the ASID array, need to get the cpu id */ | 172 | /* t1: contains the base of the ASID array, need to get the cpu id */ |
169 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ | 173 | LONG_L t2, TI_CPU($28) /* smp_processor_id */ |
170 | INT_SLL t2, t2, 2 /* x4 */ | 174 | INT_SLL t2, t2, 2 /* x4 */ |
171 | REG_ADDU t3, t1, t2 | 175 | REG_ADDU t3, t1, t2 |
@@ -229,9 +233,7 @@ FEXPORT(__kvm_mips_load_k0k1) | |||
229 | eret | 233 | eret |
230 | 234 | ||
231 | VECTOR(MIPSX(exception), unknown) | 235 | VECTOR(MIPSX(exception), unknown) |
232 | /* | 236 | /* Find out what mode we came from and jump to the proper handler. */ |
233 | * Find out what mode we came from and jump to the proper handler. | ||
234 | */ | ||
235 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 | 237 | mtc0 k0, CP0_ERROREPC #01: Save guest k0 |
236 | ehb #02: | 238 | ehb #02: |
237 | 239 | ||
@@ -239,7 +241,8 @@ VECTOR(MIPSX(exception), unknown) | |||
239 | INT_SRL k0, k0, 10 #03: Get rid of CPUNum | 241 | INT_SRL k0, k0, 10 #03: Get rid of CPUNum |
240 | INT_SLL k0, k0, 10 #04 | 242 | INT_SLL k0, k0, 10 #04 |
241 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 | 243 | LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000 |
242 | INT_ADDIU k0, k0, 0x2000 #06: Exception handler is installed @ offset 0x2000 | 244 | INT_ADDIU k0, k0, 0x2000 #06: Exception handler is |
245 | # installed @ offset 0x2000 | ||
243 | j k0 #07: jump to the function | 246 | j k0 #07: jump to the function |
244 | nop #08: branch delay slot | 247 | nop #08: branch delay slot |
245 | VECTOR_END(MIPSX(exceptionEnd)) | 248 | VECTOR_END(MIPSX(exceptionEnd)) |
@@ -248,7 +251,6 @@ VECTOR_END(MIPSX(exceptionEnd)) | |||
248 | /* | 251 | /* |
249 | * Generic Guest exception handler. We end up here when the guest | 252 | * Generic Guest exception handler. We end up here when the guest |
250 | * does something that causes a trap to kernel mode. | 253 | * does something that causes a trap to kernel mode. |
251 | * | ||
252 | */ | 254 | */ |
253 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | 255 | NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) |
254 | /* Get the VCPU pointer from DDTATA_LO */ | 256 | /* Get the VCPU pointer from DDTATA_LO */ |
@@ -290,9 +292,7 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
290 | LONG_S $30, VCPU_R30(k1) | 292 | LONG_S $30, VCPU_R30(k1) |
291 | LONG_S $31, VCPU_R31(k1) | 293 | LONG_S $31, VCPU_R31(k1) |
292 | 294 | ||
293 | /* We need to save hi/lo and restore them on | 295 | /* We need to save hi/lo and restore them on the way out */ |
294 | * the way out | ||
295 | */ | ||
296 | mfhi t0 | 296 | mfhi t0 |
297 | LONG_S t0, VCPU_HI(k1) | 297 | LONG_S t0, VCPU_HI(k1) |
298 | 298 | ||
@@ -321,8 +321,10 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
321 | /* Save pointer to run in s0, will be saved by the compiler */ | 321 | /* Save pointer to run in s0, will be saved by the compiler */ |
322 | move s0, a0 | 322 | move s0, a0 |
323 | 323 | ||
324 | /* Save Host level EPC, BadVaddr and Cause to VCPU, useful to | 324 | /* |
325 | * process the exception */ | 325 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to |
326 | * process the exception | ||
327 | */ | ||
326 | mfc0 k0,CP0_EPC | 328 | mfc0 k0,CP0_EPC |
327 | LONG_S k0, VCPU_PC(k1) | 329 | LONG_S k0, VCPU_PC(k1) |
328 | 330 | ||
@@ -351,7 +353,6 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
351 | LONG_L k0, VCPU_HOST_EBASE(k1) | 353 | LONG_L k0, VCPU_HOST_EBASE(k1) |
352 | mtc0 k0,CP0_EBASE | 354 | mtc0 k0,CP0_EBASE |
353 | 355 | ||
354 | |||
355 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | 356 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ |
356 | .set at | 357 | .set at |
357 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) | 358 | and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE) |
@@ -369,7 +370,8 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
369 | /* Saved host state */ | 370 | /* Saved host state */ |
370 | INT_ADDIU sp, sp, -PT_SIZE | 371 | INT_ADDIU sp, sp, -PT_SIZE |
371 | 372 | ||
372 | /* XXXKYMA do we need to load the host ASID, maybe not because the | 373 | /* |
374 | * XXXKYMA do we need to load the host ASID, maybe not because the | ||
373 | * kernel entries are marked GLOBAL, need to verify | 375 | * kernel entries are marked GLOBAL, need to verify |
374 | */ | 376 | */ |
375 | 377 | ||
@@ -383,9 +385,11 @@ NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra) | |||
383 | 385 | ||
384 | /* Jump to handler */ | 386 | /* Jump to handler */ |
385 | FEXPORT(__kvm_mips_jump_to_handler) | 387 | FEXPORT(__kvm_mips_jump_to_handler) |
386 | /* XXXKYMA: not sure if this is safe, how large is the stack?? | 388 | /* |
389 | * XXXKYMA: not sure if this is safe, how large is the stack?? | ||
387 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | 390 | * Now jump to the kvm_mips_handle_exit() to see if we can deal |
388 | * with this in the kernel */ | 391 | * with this in the kernel |
392 | */ | ||
389 | PTR_LA t9, kvm_mips_handle_exit | 393 | PTR_LA t9, kvm_mips_handle_exit |
390 | jalr.hb t9 | 394 | jalr.hb t9 |
391 | INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ | 395 | INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */ |
@@ -394,7 +398,8 @@ FEXPORT(__kvm_mips_jump_to_handler) | |||
394 | di | 398 | di |
395 | ehb | 399 | ehb |
396 | 400 | ||
397 | /* XXXKYMA: k0/k1 could have been blown away if we processed | 401 | /* |
402 | * XXXKYMA: k0/k1 could have been blown away if we processed | ||
398 | * an exception while we were handling the exception from the | 403 | * an exception while we were handling the exception from the |
399 | * guest, reload k1 | 404 | * guest, reload k1 |
400 | */ | 405 | */ |
@@ -402,7 +407,8 @@ FEXPORT(__kvm_mips_jump_to_handler) | |||
402 | move k1, s1 | 407 | move k1, s1 |
403 | INT_ADDIU k1, k1, VCPU_HOST_ARCH | 408 | INT_ADDIU k1, k1, VCPU_HOST_ARCH |
404 | 409 | ||
405 | /* Check return value, should tell us if we are returning to the | 410 | /* |
411 | * Check return value, should tell us if we are returning to the | ||
406 | * host (handle I/O etc)or resuming the guest | 412 | * host (handle I/O etc)or resuming the guest |
407 | */ | 413 | */ |
408 | andi t0, v0, RESUME_HOST | 414 | andi t0, v0, RESUME_HOST |
@@ -521,8 +527,10 @@ __kvm_mips_return_to_host: | |||
521 | LONG_L $0, PT_R0(k1) | 527 | LONG_L $0, PT_R0(k1) |
522 | LONG_L $1, PT_R1(k1) | 528 | LONG_L $1, PT_R1(k1) |
523 | 529 | ||
524 | /* r2/v0 is the return code, shift it down by 2 (arithmetic) | 530 | /* |
525 | * to recover the err code */ | 531 | * r2/v0 is the return code, shift it down by 2 (arithmetic) |
532 | * to recover the err code | ||
533 | */ | ||
526 | INT_SRA k0, v0, 2 | 534 | INT_SRA k0, v0, 2 |
527 | move $2, k0 | 535 | move $2, k0 |
528 | 536 | ||
@@ -566,7 +574,6 @@ __kvm_mips_return_to_host: | |||
566 | PTR_LI k0, 0x2000000F | 574 | PTR_LI k0, 0x2000000F |
567 | mtc0 k0, CP0_HWRENA | 575 | mtc0 k0, CP0_HWRENA |
568 | 576 | ||
569 | |||
570 | /* Restore RA, which is the address we will return to */ | 577 | /* Restore RA, which is the address we will return to */ |
571 | LONG_L ra, PT_R31(k1) | 578 | LONG_L ra, PT_R31(k1) |
572 | j ra | 579 | j ra |
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/mips.c index f3c56a182fd8..4fda672cb58e 100644 --- a/arch/mips/kvm/kvm_mips.c +++ b/arch/mips/kvm/mips.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -21,8 +21,8 @@ | |||
21 | 21 | ||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | 23 | ||
24 | #include "kvm_mips_int.h" | 24 | #include "interrupt.h" |
25 | #include "kvm_mips_comm.h" | 25 | #include "commpage.h" |
26 | 26 | ||
27 | #define CREATE_TRACE_POINTS | 27 | #define CREATE_TRACE_POINTS |
28 | #include "trace.h" | 28 | #include "trace.h" |
@@ -31,38 +31,41 @@ | |||
31 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | 31 | #define VECTORSPACING 0x100 /* for EI/VI mode */ |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 34 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) |
35 | struct kvm_stats_debugfs_item debugfs_entries[] = { | 35 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
36 | { "wait", VCPU_STAT(wait_exits) }, | 36 | { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, |
37 | { "cache", VCPU_STAT(cache_exits) }, | 37 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, |
38 | { "signal", VCPU_STAT(signal_exits) }, | 38 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, |
39 | { "interrupt", VCPU_STAT(int_exits) }, | 39 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, |
40 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits) }, | 40 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, |
41 | { "tlbmod", VCPU_STAT(tlbmod_exits) }, | 41 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, |
42 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits) }, | 42 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, |
43 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits) }, | 43 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, |
44 | { "addrerr_st", VCPU_STAT(addrerr_st_exits) }, | 44 | { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, |
45 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits) }, | 45 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, |
46 | { "syscall", VCPU_STAT(syscall_exits) }, | 46 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, |
47 | { "resvd_inst", VCPU_STAT(resvd_inst_exits) }, | 47 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, |
48 | { "break_inst", VCPU_STAT(break_inst_exits) }, | 48 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, |
49 | { "flush_dcache", VCPU_STAT(flush_dcache_exits) }, | 49 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
50 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | 50 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
51 | {NULL} | 51 | {NULL} |
52 | }; | 52 | }; |
53 | 53 | ||
54 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) | 54 | static int kvm_mips_reset_vcpu(struct kvm_vcpu *vcpu) |
55 | { | 55 | { |
56 | int i; | 56 | int i; |
57 | |||
57 | for_each_possible_cpu(i) { | 58 | for_each_possible_cpu(i) { |
58 | vcpu->arch.guest_kernel_asid[i] = 0; | 59 | vcpu->arch.guest_kernel_asid[i] = 0; |
59 | vcpu->arch.guest_user_asid[i] = 0; | 60 | vcpu->arch.guest_user_asid[i] = 0; |
60 | } | 61 | } |
62 | |||
61 | return 0; | 63 | return 0; |
62 | } | 64 | } |
63 | 65 | ||
64 | /* XXXKYMA: We are simulatoring a processor that has the WII bit set in Config7, so we | 66 | /* |
65 | * are "runnable" if interrupts are pending | 67 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in |
68 | * Config7, so we are "runnable" if interrupts are pending | ||
66 | */ | 69 | */ |
67 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | 70 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) |
68 | { | 71 | { |
@@ -94,16 +97,17 @@ void kvm_arch_hardware_unsetup(void) | |||
94 | 97 | ||
95 | void kvm_arch_check_processor_compat(void *rtn) | 98 | void kvm_arch_check_processor_compat(void *rtn) |
96 | { | 99 | { |
97 | int *r = (int *)rtn; | 100 | *(int *)rtn = 0; |
98 | *r = 0; | ||
99 | return; | ||
100 | } | 101 | } |
101 | 102 | ||
102 | static void kvm_mips_init_tlbs(struct kvm *kvm) | 103 | static void kvm_mips_init_tlbs(struct kvm *kvm) |
103 | { | 104 | { |
104 | unsigned long wired; | 105 | unsigned long wired; |
105 | 106 | ||
106 | /* Add a wired entry to the TLB, it is used to map the commpage to the Guest kernel */ | 107 | /* |
108 | * Add a wired entry to the TLB, it is used to map the commpage to | ||
109 | * the Guest kernel | ||
110 | */ | ||
107 | wired = read_c0_wired(); | 111 | wired = read_c0_wired(); |
108 | write_c0_wired(wired + 1); | 112 | write_c0_wired(wired + 1); |
109 | mtc0_tlbw_hazard(); | 113 | mtc0_tlbw_hazard(); |
@@ -130,7 +134,6 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |||
130 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); | 134 | on_each_cpu(kvm_mips_init_vm_percpu, kvm, 1); |
131 | } | 135 | } |
132 | 136 | ||
133 | |||
134 | return 0; | 137 | return 0; |
135 | } | 138 | } |
136 | 139 | ||
@@ -185,8 +188,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) | |||
185 | } | 188 | } |
186 | } | 189 | } |
187 | 190 | ||
188 | long | 191 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
189 | kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 192 | unsigned long arg) |
190 | { | 193 | { |
191 | return -ENOIOCTLCMD; | 194 | return -ENOIOCTLCMD; |
192 | } | 195 | } |
@@ -207,20 +210,20 @@ void kvm_arch_memslots_updated(struct kvm *kvm) | |||
207 | } | 210 | } |
208 | 211 | ||
209 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 212 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
210 | struct kvm_memory_slot *memslot, | 213 | struct kvm_memory_slot *memslot, |
211 | struct kvm_userspace_memory_region *mem, | 214 | struct kvm_userspace_memory_region *mem, |
212 | enum kvm_mr_change change) | 215 | enum kvm_mr_change change) |
213 | { | 216 | { |
214 | return 0; | 217 | return 0; |
215 | } | 218 | } |
216 | 219 | ||
217 | void kvm_arch_commit_memory_region(struct kvm *kvm, | 220 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
218 | struct kvm_userspace_memory_region *mem, | 221 | struct kvm_userspace_memory_region *mem, |
219 | const struct kvm_memory_slot *old, | 222 | const struct kvm_memory_slot *old, |
220 | enum kvm_mr_change change) | 223 | enum kvm_mr_change change) |
221 | { | 224 | { |
222 | unsigned long npages = 0; | 225 | unsigned long npages = 0; |
223 | int i, err = 0; | 226 | int i; |
224 | 227 | ||
225 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | 228 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", |
226 | __func__, kvm, mem->slot, mem->guest_phys_addr, | 229 | __func__, kvm, mem->slot, mem->guest_phys_addr, |
@@ -238,21 +241,17 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
238 | 241 | ||
239 | if (!kvm->arch.guest_pmap) { | 242 | if (!kvm->arch.guest_pmap) { |
240 | kvm_err("Failed to allocate guest PMAP"); | 243 | kvm_err("Failed to allocate guest PMAP"); |
241 | err = -ENOMEM; | 244 | return; |
242 | goto out; | ||
243 | } | 245 | } |
244 | 246 | ||
245 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", | 247 | kvm_debug("Allocated space for Guest PMAP Table (%ld pages) @ %p\n", |
246 | npages, kvm->arch.guest_pmap); | 248 | npages, kvm->arch.guest_pmap); |
247 | 249 | ||
248 | /* Now setup the page table */ | 250 | /* Now setup the page table */ |
249 | for (i = 0; i < npages; i++) { | 251 | for (i = 0; i < npages; i++) |
250 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; | 252 | kvm->arch.guest_pmap[i] = KVM_INVALID_PAGE; |
251 | } | ||
252 | } | 253 | } |
253 | } | 254 | } |
254 | out: | ||
255 | return; | ||
256 | } | 255 | } |
257 | 256 | ||
258 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | 257 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
@@ -270,8 +269,6 @@ void kvm_arch_flush_shadow(struct kvm *kvm) | |||
270 | 269 | ||
271 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | 270 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
272 | { | 271 | { |
273 | extern char mips32_exception[], mips32_exceptionEnd[]; | ||
274 | extern char mips32_GuestException[], mips32_GuestExceptionEnd[]; | ||
275 | int err, size, offset; | 272 | int err, size, offset; |
276 | void *gebase; | 273 | void *gebase; |
277 | int i; | 274 | int i; |
@@ -290,14 +287,14 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
290 | 287 | ||
291 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); | 288 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); |
292 | 289 | ||
293 | /* Allocate space for host mode exception handlers that handle | 290 | /* |
291 | * Allocate space for host mode exception handlers that handle | ||
294 | * guest mode exits | 292 | * guest mode exits |
295 | */ | 293 | */ |
296 | if (cpu_has_veic || cpu_has_vint) { | 294 | if (cpu_has_veic || cpu_has_vint) |
297 | size = 0x200 + VECTORSPACING * 64; | 295 | size = 0x200 + VECTORSPACING * 64; |
298 | } else { | 296 | else |
299 | size = 0x4000; | 297 | size = 0x4000; |
300 | } | ||
301 | 298 | ||
302 | /* Save Linux EBASE */ | 299 | /* Save Linux EBASE */ |
303 | vcpu->arch.host_ebase = (void *)read_c0_ebase(); | 300 | vcpu->arch.host_ebase = (void *)read_c0_ebase(); |
@@ -345,7 +342,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |||
345 | local_flush_icache_range((unsigned long)gebase, | 342 | local_flush_icache_range((unsigned long)gebase, |
346 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | 343 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); |
347 | 344 | ||
348 | /* Allocate comm page for guest kernel, a TLB will be reserved for mapping GVA @ 0xFFFF8000 to this page */ | 345 | /* |
346 | * Allocate comm page for guest kernel, a TLB will be reserved for | ||
347 | * mapping GVA @ 0xFFFF8000 to this page | ||
348 | */ | ||
349 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); | 349 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); |
350 | 350 | ||
351 | if (!vcpu->arch.kseg0_commpage) { | 351 | if (!vcpu->arch.kseg0_commpage) { |
@@ -392,9 +392,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |||
392 | kvm_arch_vcpu_free(vcpu); | 392 | kvm_arch_vcpu_free(vcpu); |
393 | } | 393 | } |
394 | 394 | ||
395 | int | 395 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
396 | kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 396 | struct kvm_guest_debug *dbg) |
397 | struct kvm_guest_debug *dbg) | ||
398 | { | 397 | { |
399 | return -ENOIOCTLCMD; | 398 | return -ENOIOCTLCMD; |
400 | } | 399 | } |
@@ -431,8 +430,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |||
431 | return r; | 430 | return r; |
432 | } | 431 | } |
433 | 432 | ||
434 | int | 433 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
435 | kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | 434 | struct kvm_mips_interrupt *irq) |
436 | { | 435 | { |
437 | int intr = (int)irq->irq; | 436 | int intr = (int)irq->irq; |
438 | struct kvm_vcpu *dvcpu = NULL; | 437 | struct kvm_vcpu *dvcpu = NULL; |
@@ -459,23 +458,20 @@ kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_mips_interrupt *irq) | |||
459 | 458 | ||
460 | dvcpu->arch.wait = 0; | 459 | dvcpu->arch.wait = 0; |
461 | 460 | ||
462 | if (waitqueue_active(&dvcpu->wq)) { | 461 | if (waitqueue_active(&dvcpu->wq)) |
463 | wake_up_interruptible(&dvcpu->wq); | 462 | wake_up_interruptible(&dvcpu->wq); |
464 | } | ||
465 | 463 | ||
466 | return 0; | 464 | return 0; |
467 | } | 465 | } |
468 | 466 | ||
469 | int | 467 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
470 | kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 468 | struct kvm_mp_state *mp_state) |
471 | struct kvm_mp_state *mp_state) | ||
472 | { | 469 | { |
473 | return -ENOIOCTLCMD; | 470 | return -ENOIOCTLCMD; |
474 | } | 471 | } |
475 | 472 | ||
476 | int | 473 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
477 | kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 474 | struct kvm_mp_state *mp_state) |
478 | struct kvm_mp_state *mp_state) | ||
479 | { | 475 | { |
480 | return -ENOIOCTLCMD; | 476 | return -ENOIOCTLCMD; |
481 | } | 477 | } |
@@ -632,10 +628,12 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | |||
632 | } | 628 | } |
633 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | 629 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
634 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | 630 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; |
631 | |||
635 | return put_user(v, uaddr64); | 632 | return put_user(v, uaddr64); |
636 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | 633 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { |
637 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | 634 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; |
638 | u32 v32 = (u32)v; | 635 | u32 v32 = (u32)v; |
636 | |||
639 | return put_user(v32, uaddr32); | 637 | return put_user(v32, uaddr32); |
640 | } else { | 638 | } else { |
641 | return -EINVAL; | 639 | return -EINVAL; |
@@ -728,8 +726,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |||
728 | return 0; | 726 | return 0; |
729 | } | 727 | } |
730 | 728 | ||
731 | long | 729 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
732 | kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | 730 | unsigned long arg) |
733 | { | 731 | { |
734 | struct kvm_vcpu *vcpu = filp->private_data; | 732 | struct kvm_vcpu *vcpu = filp->private_data; |
735 | void __user *argp = (void __user *)arg; | 733 | void __user *argp = (void __user *)arg; |
@@ -739,6 +737,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
739 | case KVM_SET_ONE_REG: | 737 | case KVM_SET_ONE_REG: |
740 | case KVM_GET_ONE_REG: { | 738 | case KVM_GET_ONE_REG: { |
741 | struct kvm_one_reg reg; | 739 | struct kvm_one_reg reg; |
740 | |||
742 | if (copy_from_user(®, argp, sizeof(reg))) | 741 | if (copy_from_user(®, argp, sizeof(reg))) |
743 | return -EFAULT; | 742 | return -EFAULT; |
744 | if (ioctl == KVM_SET_ONE_REG) | 743 | if (ioctl == KVM_SET_ONE_REG) |
@@ -773,6 +772,7 @@ kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
773 | case KVM_INTERRUPT: | 772 | case KVM_INTERRUPT: |
774 | { | 773 | { |
775 | struct kvm_mips_interrupt irq; | 774 | struct kvm_mips_interrupt irq; |
775 | |||
776 | r = -EFAULT; | 776 | r = -EFAULT; |
777 | if (copy_from_user(&irq, argp, sizeof(irq))) | 777 | if (copy_from_user(&irq, argp, sizeof(irq))) |
778 | goto out; | 778 | goto out; |
@@ -791,9 +791,7 @@ out: | |||
791 | return r; | 791 | return r; |
792 | } | 792 | } |
793 | 793 | ||
794 | /* | 794 | /* Get (and clear) the dirty memory log for a memory slot. */ |
795 | * Get (and clear) the dirty memory log for a memory slot. | ||
796 | */ | ||
797 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | 795 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
798 | { | 796 | { |
799 | struct kvm_memory_slot *memslot; | 797 | struct kvm_memory_slot *memslot; |
@@ -815,8 +813,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |||
815 | ga = memslot->base_gfn << PAGE_SHIFT; | 813 | ga = memslot->base_gfn << PAGE_SHIFT; |
816 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | 814 | ga_end = ga + (memslot->npages << PAGE_SHIFT); |
817 | 815 | ||
818 | printk("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, | 816 | kvm_info("%s: dirty, ga: %#lx, ga_end %#lx\n", __func__, ga, |
819 | ga_end); | 817 | ga_end); |
820 | 818 | ||
821 | n = kvm_dirty_bitmap_bytes(memslot); | 819 | n = kvm_dirty_bitmap_bytes(memslot); |
822 | memset(memslot->dirty_bitmap, 0, n); | 820 | memset(memslot->dirty_bitmap, 0, n); |
@@ -843,16 +841,12 @@ long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |||
843 | 841 | ||
844 | int kvm_arch_init(void *opaque) | 842 | int kvm_arch_init(void *opaque) |
845 | { | 843 | { |
846 | int ret; | ||
847 | |||
848 | if (kvm_mips_callbacks) { | 844 | if (kvm_mips_callbacks) { |
849 | kvm_err("kvm: module already exists\n"); | 845 | kvm_err("kvm: module already exists\n"); |
850 | return -EEXIST; | 846 | return -EEXIST; |
851 | } | 847 | } |
852 | 848 | ||
853 | ret = kvm_mips_emulation_init(&kvm_mips_callbacks); | 849 | return kvm_mips_emulation_init(&kvm_mips_callbacks); |
854 | |||
855 | return ret; | ||
856 | } | 850 | } |
857 | 851 | ||
858 | void kvm_arch_exit(void) | 852 | void kvm_arch_exit(void) |
@@ -860,14 +854,14 @@ void kvm_arch_exit(void) | |||
860 | kvm_mips_callbacks = NULL; | 854 | kvm_mips_callbacks = NULL; |
861 | } | 855 | } |
862 | 856 | ||
863 | int | 857 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
864 | kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 858 | struct kvm_sregs *sregs) |
865 | { | 859 | { |
866 | return -ENOIOCTLCMD; | 860 | return -ENOIOCTLCMD; |
867 | } | 861 | } |
868 | 862 | ||
869 | int | 863 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
870 | kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | 864 | struct kvm_sregs *sregs) |
871 | { | 865 | { |
872 | return -ENOIOCTLCMD; | 866 | return -ENOIOCTLCMD; |
873 | } | 867 | } |
@@ -923,24 +917,25 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | |||
923 | if (!vcpu) | 917 | if (!vcpu) |
924 | return -1; | 918 | return -1; |
925 | 919 | ||
926 | printk("VCPU Register Dump:\n"); | 920 | kvm_debug("VCPU Register Dump:\n"); |
927 | printk("\tpc = 0x%08lx\n", vcpu->arch.pc); | 921 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); |
928 | printk("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | 922 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); |
929 | 923 | ||
930 | for (i = 0; i < 32; i += 4) { | 924 | for (i = 0; i < 32; i += 4) { |
931 | printk("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, | 925 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, |
932 | vcpu->arch.gprs[i], | 926 | vcpu->arch.gprs[i], |
933 | vcpu->arch.gprs[i + 1], | 927 | vcpu->arch.gprs[i + 1], |
934 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | 928 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); |
935 | } | 929 | } |
936 | printk("\thi: 0x%08lx\n", vcpu->arch.hi); | 930 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); |
937 | printk("\tlo: 0x%08lx\n", vcpu->arch.lo); | 931 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); |
938 | 932 | ||
939 | cop0 = vcpu->arch.cop0; | 933 | cop0 = vcpu->arch.cop0; |
940 | printk("\tStatus: 0x%08lx, Cause: 0x%08lx\n", | 934 | kvm_debug("\tStatus: 0x%08lx, Cause: 0x%08lx\n", |
941 | kvm_read_c0_guest_status(cop0), kvm_read_c0_guest_cause(cop0)); | 935 | kvm_read_c0_guest_status(cop0), |
936 | kvm_read_c0_guest_cause(cop0)); | ||
942 | 937 | ||
943 | printk("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); | 938 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); |
944 | 939 | ||
945 | return 0; | 940 | return 0; |
946 | } | 941 | } |
@@ -980,14 +975,11 @@ static void kvm_mips_comparecount_func(unsigned long data) | |||
980 | kvm_mips_callbacks->queue_timer_int(vcpu); | 975 | kvm_mips_callbacks->queue_timer_int(vcpu); |
981 | 976 | ||
982 | vcpu->arch.wait = 0; | 977 | vcpu->arch.wait = 0; |
983 | if (waitqueue_active(&vcpu->wq)) { | 978 | if (waitqueue_active(&vcpu->wq)) |
984 | wake_up_interruptible(&vcpu->wq); | 979 | wake_up_interruptible(&vcpu->wq); |
985 | } | ||
986 | } | 980 | } |
987 | 981 | ||
988 | /* | 982 | /* low level hrtimer wake routine */ |
989 | * low level hrtimer wake routine. | ||
990 | */ | ||
991 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | 983 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) |
992 | { | 984 | { |
993 | struct kvm_vcpu *vcpu; | 985 | struct kvm_vcpu *vcpu; |
@@ -1008,11 +1000,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
1008 | 1000 | ||
1009 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | 1001 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1010 | { | 1002 | { |
1011 | return; | ||
1012 | } | 1003 | } |
1013 | 1004 | ||
1014 | int | 1005 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1015 | kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, struct kvm_translation *tr) | 1006 | struct kvm_translation *tr) |
1016 | { | 1007 | { |
1017 | return 0; | 1008 | return 0; |
1018 | } | 1009 | } |
@@ -1023,8 +1014,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
1023 | return kvm_mips_callbacks->vcpu_setup(vcpu); | 1014 | return kvm_mips_callbacks->vcpu_setup(vcpu); |
1024 | } | 1015 | } |
1025 | 1016 | ||
1026 | static | 1017 | static void kvm_mips_set_c0_status(void) |
1027 | void kvm_mips_set_c0_status(void) | ||
1028 | { | 1018 | { |
1029 | uint32_t status = read_c0_status(); | 1019 | uint32_t status = read_c0_status(); |
1030 | 1020 | ||
@@ -1054,7 +1044,10 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1054 | run->exit_reason = KVM_EXIT_UNKNOWN; | 1044 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1055 | run->ready_for_interrupt_injection = 1; | 1045 | run->ready_for_interrupt_injection = 1; |
1056 | 1046 | ||
1057 | /* Set the appropriate status bits based on host CPU features, before we hit the scheduler */ | 1047 | /* |
1048 | * Set the appropriate status bits based on host CPU features, | ||
1049 | * before we hit the scheduler | ||
1050 | */ | ||
1058 | kvm_mips_set_c0_status(); | 1051 | kvm_mips_set_c0_status(); |
1059 | 1052 | ||
1060 | local_irq_enable(); | 1053 | local_irq_enable(); |
@@ -1062,7 +1055,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1062 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | 1055 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", |
1063 | cause, opc, run, vcpu); | 1056 | cause, opc, run, vcpu); |
1064 | 1057 | ||
1065 | /* Do a privilege check, if in UM most of these exit conditions end up | 1058 | /* |
1059 | * Do a privilege check, if in UM most of these exit conditions end up | ||
1066 | * causing an exception to be delivered to the Guest Kernel | 1060 | * causing an exception to be delivered to the Guest Kernel |
1067 | */ | 1061 | */ |
1068 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); | 1062 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); |
@@ -1081,9 +1075,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1081 | ++vcpu->stat.int_exits; | 1075 | ++vcpu->stat.int_exits; |
1082 | trace_kvm_exit(vcpu, INT_EXITS); | 1076 | trace_kvm_exit(vcpu, INT_EXITS); |
1083 | 1077 | ||
1084 | if (need_resched()) { | 1078 | if (need_resched()) |
1085 | cond_resched(); | 1079 | cond_resched(); |
1086 | } | ||
1087 | 1080 | ||
1088 | ret = RESUME_GUEST; | 1081 | ret = RESUME_GUEST; |
1089 | break; | 1082 | break; |
@@ -1095,9 +1088,8 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1095 | trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); | 1088 | trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS); |
1096 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); | 1089 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); |
1097 | /* XXXKYMA: Might need to return to user space */ | 1090 | /* XXXKYMA: Might need to return to user space */ |
1098 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) { | 1091 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
1099 | ret = RESUME_HOST; | 1092 | ret = RESUME_HOST; |
1100 | } | ||
1101 | break; | 1093 | break; |
1102 | 1094 | ||
1103 | case T_TLB_MOD: | 1095 | case T_TLB_MOD: |
@@ -1107,10 +1099,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1107 | break; | 1099 | break; |
1108 | 1100 | ||
1109 | case T_TLB_ST_MISS: | 1101 | case T_TLB_ST_MISS: |
1110 | kvm_debug | 1102 | kvm_debug("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", |
1111 | ("TLB ST fault: cause %#x, status %#lx, PC: %p, BadVaddr: %#lx\n", | 1103 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, |
1112 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, | 1104 | badvaddr); |
1113 | badvaddr); | ||
1114 | 1105 | ||
1115 | ++vcpu->stat.tlbmiss_st_exits; | 1106 | ++vcpu->stat.tlbmiss_st_exits; |
1116 | trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); | 1107 | trace_kvm_exit(vcpu, TLBMISS_ST_EXITS); |
@@ -1157,10 +1148,9 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1157 | break; | 1148 | break; |
1158 | 1149 | ||
1159 | default: | 1150 | default: |
1160 | kvm_err | 1151 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", |
1161 | ("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#lx\n", | 1152 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, |
1162 | exccode, opc, kvm_get_inst(opc, vcpu), badvaddr, | 1153 | kvm_read_c0_guest_status(vcpu->arch.cop0)); |
1163 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | ||
1164 | kvm_arch_vcpu_dump_regs(vcpu); | 1154 | kvm_arch_vcpu_dump_regs(vcpu); |
1165 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 1155 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
1166 | ret = RESUME_HOST; | 1156 | ret = RESUME_HOST; |
@@ -1175,7 +1165,7 @@ skip_emul: | |||
1175 | kvm_mips_deliver_interrupts(vcpu, cause); | 1165 | kvm_mips_deliver_interrupts(vcpu, cause); |
1176 | 1166 | ||
1177 | if (!(ret & RESUME_HOST)) { | 1167 | if (!(ret & RESUME_HOST)) { |
1178 | /* Only check for signals if not already exiting to userspace */ | 1168 | /* Only check for signals if not already exiting to userspace */ |
1179 | if (signal_pending(current)) { | 1169 | if (signal_pending(current)) { |
1180 | run->exit_reason = KVM_EXIT_INTR; | 1170 | run->exit_reason = KVM_EXIT_INTR; |
1181 | ret = (-EINTR << 2) | RESUME_HOST; | 1171 | ret = (-EINTR << 2) | RESUME_HOST; |
@@ -1196,11 +1186,13 @@ int __init kvm_mips_init(void) | |||
1196 | if (ret) | 1186 | if (ret) |
1197 | return ret; | 1187 | return ret; |
1198 | 1188 | ||
1199 | /* On MIPS, kernel modules are executed from "mapped space", which requires TLBs. | 1189 | /* |
1200 | * The TLB handling code is statically linked with the rest of the kernel (kvm_tlb.c) | 1190 | * On MIPS, kernel modules are executed from "mapped space", which |
1201 | * to avoid the possibility of double faulting. The issue is that the TLB code | 1191 | * requires TLBs. The TLB handling code is statically linked with |
1202 | * references routines that are part of the the KVM module, | 1192 | * the rest of the kernel (tlb.c) to avoid the possibility of |
1203 | * which are only available once the module is loaded. | 1193 | * double faulting. The issue is that the TLB code references |
1194 | * routines that are part of the the KVM module, which are only | ||
1195 | * available once the module is loaded. | ||
1204 | */ | 1196 | */ |
1205 | kvm_mips_gfn_to_pfn = gfn_to_pfn; | 1197 | kvm_mips_gfn_to_pfn = gfn_to_pfn; |
1206 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; | 1198 | kvm_mips_release_pfn_clean = kvm_release_pfn_clean; |
diff --git a/arch/mips/kvm/opcode.h b/arch/mips/kvm/opcode.h new file mode 100644 index 000000000000..03a6ae84c7df --- /dev/null +++ b/arch/mips/kvm/opcode.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | /* Define opcode values not defined in <asm/isnt.h> */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_OPCODE_H__ | ||
13 | #define __KVM_MIPS_OPCODE_H__ | ||
14 | |||
15 | /* COP0 Ops */ | ||
16 | #define mfmcz_op 0x0b /* 01011 */ | ||
17 | #define wrpgpr_op 0x0e /* 01110 */ | ||
18 | |||
19 | /* COP0 opcodes (only if COP0 and CO=1): */ | ||
20 | #define wait_op 0x20 /* 100000 */ | ||
21 | |||
22 | #endif /* __KVM_MIPS_OPCODE_H__ */ | ||
diff --git a/arch/mips/kvm/kvm_mips_stats.c b/arch/mips/kvm/stats.c index 075904bcac1b..a74d6024c5ad 100644 --- a/arch/mips/kvm/kvm_mips_stats.c +++ b/arch/mips/kvm/stats.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: COP0 access histogram | 6 | * KVM/MIPS: COP0 access histogram |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/kvm_host.h> | 12 | #include <linux/kvm_host.h> |
13 | 13 | ||
@@ -63,20 +63,18 @@ char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { | |||
63 | "DESAVE" | 63 | "DESAVE" |
64 | }; | 64 | }; |
65 | 65 | ||
66 | int kvm_mips_dump_stats(struct kvm_vcpu *vcpu) | 66 | void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) |
67 | { | 67 | { |
68 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | 68 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
69 | int i, j; | 69 | int i, j; |
70 | 70 | ||
71 | printk("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); | 71 | kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); |
72 | for (i = 0; i < N_MIPS_COPROC_REGS; i++) { | 72 | for (i = 0; i < N_MIPS_COPROC_REGS; i++) { |
73 | for (j = 0; j < N_MIPS_COPROC_SEL; j++) { | 73 | for (j = 0; j < N_MIPS_COPROC_SEL; j++) { |
74 | if (vcpu->arch.cop0->stat[i][j]) | 74 | if (vcpu->arch.cop0->stat[i][j]) |
75 | printk("%s[%d]: %lu\n", kvm_cop0_str[i], j, | 75 | kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, |
76 | vcpu->arch.cop0->stat[i][j]); | 76 | vcpu->arch.cop0->stat[i][j]); |
77 | } | 77 | } |
78 | } | 78 | } |
79 | #endif | 79 | #endif |
80 | |||
81 | return 0; | ||
82 | } | 80 | } |
diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/tlb.c index 8a5a700ad8de..bbcd82242059 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/tlb.c | |||
@@ -1,14 +1,14 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that | 6 | * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that |
7 | * TLB handlers run from KSEG0 | 7 | * TLB handlers run from KSEG0 |
8 | * | 8 | * |
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/kvm_host.h> | 18 | #include <linux/kvm_host.h> |
19 | #include <linux/srcu.h> | 19 | #include <linux/srcu.h> |
20 | 20 | ||
21 | |||
22 | #include <asm/cpu.h> | 21 | #include <asm/cpu.h> |
23 | #include <asm/bootinfo.h> | 22 | #include <asm/bootinfo.h> |
24 | #include <asm/mmu_context.h> | 23 | #include <asm/mmu_context.h> |
@@ -39,13 +38,13 @@ atomic_t kvm_mips_instance; | |||
39 | EXPORT_SYMBOL(kvm_mips_instance); | 38 | EXPORT_SYMBOL(kvm_mips_instance); |
40 | 39 | ||
41 | /* These function pointers are initialized once the KVM module is loaded */ | 40 | /* These function pointers are initialized once the KVM module is loaded */ |
42 | pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn); | 41 | pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn); |
43 | EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); | 42 | EXPORT_SYMBOL(kvm_mips_gfn_to_pfn); |
44 | 43 | ||
45 | void (*kvm_mips_release_pfn_clean) (pfn_t pfn); | 44 | void (*kvm_mips_release_pfn_clean)(pfn_t pfn); |
46 | EXPORT_SYMBOL(kvm_mips_release_pfn_clean); | 45 | EXPORT_SYMBOL(kvm_mips_release_pfn_clean); |
47 | 46 | ||
48 | bool(*kvm_mips_is_error_pfn) (pfn_t pfn); | 47 | bool (*kvm_mips_is_error_pfn)(pfn_t pfn); |
49 | EXPORT_SYMBOL(kvm_mips_is_error_pfn); | 48 | EXPORT_SYMBOL(kvm_mips_is_error_pfn); |
50 | 49 | ||
51 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | 50 | uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
@@ -53,21 +52,17 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | |||
53 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; | 52 | return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; |
54 | } | 53 | } |
55 | 54 | ||
56 | |||
57 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | 55 | uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
58 | { | 56 | { |
59 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; | 57 | return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; |
60 | } | 58 | } |
61 | 59 | ||
62 | inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) | 60 | inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu) |
63 | { | 61 | { |
64 | return vcpu->kvm->arch.commpage_tlb; | 62 | return vcpu->kvm->arch.commpage_tlb; |
65 | } | 63 | } |
66 | 64 | ||
67 | 65 | /* Structure defining an tlb entry data set. */ | |
68 | /* | ||
69 | * Structure defining an tlb entry data set. | ||
70 | */ | ||
71 | 66 | ||
72 | void kvm_mips_dump_host_tlbs(void) | 67 | void kvm_mips_dump_host_tlbs(void) |
73 | { | 68 | { |
@@ -82,8 +77,8 @@ void kvm_mips_dump_host_tlbs(void) | |||
82 | old_entryhi = read_c0_entryhi(); | 77 | old_entryhi = read_c0_entryhi(); |
83 | old_pagemask = read_c0_pagemask(); | 78 | old_pagemask = read_c0_pagemask(); |
84 | 79 | ||
85 | printk("HOST TLBs:\n"); | 80 | kvm_info("HOST TLBs:\n"); |
86 | printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); | 81 | kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); |
87 | 82 | ||
88 | for (i = 0; i < current_cpu_data.tlbsize; i++) { | 83 | for (i = 0; i < current_cpu_data.tlbsize; i++) { |
89 | write_c0_index(i); | 84 | write_c0_index(i); |
@@ -97,25 +92,26 @@ void kvm_mips_dump_host_tlbs(void) | |||
97 | tlb.tlb_lo1 = read_c0_entrylo1(); | 92 | tlb.tlb_lo1 = read_c0_entrylo1(); |
98 | tlb.tlb_mask = read_c0_pagemask(); | 93 | tlb.tlb_mask = read_c0_pagemask(); |
99 | 94 | ||
100 | printk("TLB%c%3d Hi 0x%08lx ", | 95 | kvm_info("TLB%c%3d Hi 0x%08lx ", |
101 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', | 96 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', |
102 | i, tlb.tlb_hi); | 97 | i, tlb.tlb_hi); |
103 | printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", | 98 | kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", |
104 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), | 99 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), |
105 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', | 100 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', |
106 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', | 101 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', |
107 | (tlb.tlb_lo0 >> 3) & 7); | 102 | (tlb.tlb_lo0 >> 3) & 7); |
108 | printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", | 103 | kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", |
109 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), | 104 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), |
110 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', | 105 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', |
111 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', | 106 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', |
112 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); | 107 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); |
113 | } | 108 | } |
114 | write_c0_entryhi(old_entryhi); | 109 | write_c0_entryhi(old_entryhi); |
115 | write_c0_pagemask(old_pagemask); | 110 | write_c0_pagemask(old_pagemask); |
116 | mtc0_tlbw_hazard(); | 111 | mtc0_tlbw_hazard(); |
117 | local_irq_restore(flags); | 112 | local_irq_restore(flags); |
118 | } | 113 | } |
114 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); | ||
119 | 115 | ||
120 | void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | 116 | void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) |
121 | { | 117 | { |
@@ -123,26 +119,27 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | |||
123 | struct kvm_mips_tlb tlb; | 119 | struct kvm_mips_tlb tlb; |
124 | int i; | 120 | int i; |
125 | 121 | ||
126 | printk("Guest TLBs:\n"); | 122 | kvm_info("Guest TLBs:\n"); |
127 | printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); | 123 | kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); |
128 | 124 | ||
129 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 125 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
130 | tlb = vcpu->arch.guest_tlb[i]; | 126 | tlb = vcpu->arch.guest_tlb[i]; |
131 | printk("TLB%c%3d Hi 0x%08lx ", | 127 | kvm_info("TLB%c%3d Hi 0x%08lx ", |
132 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', | 128 | (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*', |
133 | i, tlb.tlb_hi); | 129 | i, tlb.tlb_hi); |
134 | printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ", | 130 | kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ", |
135 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), | 131 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0), |
136 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', | 132 | (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ', |
137 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', | 133 | (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ', |
138 | (tlb.tlb_lo0 >> 3) & 7); | 134 | (tlb.tlb_lo0 >> 3) & 7); |
139 | printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", | 135 | kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n", |
140 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), | 136 | (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1), |
141 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', | 137 | (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ', |
142 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', | 138 | (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ', |
143 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); | 139 | (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask); |
144 | } | 140 | } |
145 | } | 141 | } |
142 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); | ||
146 | 143 | ||
147 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | 144 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) |
148 | { | 145 | { |
@@ -152,7 +149,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) | |||
152 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) | 149 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) |
153 | return 0; | 150 | return 0; |
154 | 151 | ||
155 | srcu_idx = srcu_read_lock(&kvm->srcu); | 152 | srcu_idx = srcu_read_lock(&kvm->srcu); |
156 | pfn = kvm_mips_gfn_to_pfn(kvm, gfn); | 153 | pfn = kvm_mips_gfn_to_pfn(kvm, gfn); |
157 | 154 | ||
158 | if (kvm_mips_is_error_pfn(pfn)) { | 155 | if (kvm_mips_is_error_pfn(pfn)) { |
@@ -169,7 +166,7 @@ out: | |||
169 | 166 | ||
170 | /* Translate guest KSEG0 addresses to Host PA */ | 167 | /* Translate guest KSEG0 addresses to Host PA */ |
171 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | 168 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, |
172 | unsigned long gva) | 169 | unsigned long gva) |
173 | { | 170 | { |
174 | gfn_t gfn; | 171 | gfn_t gfn; |
175 | uint32_t offset = gva & ~PAGE_MASK; | 172 | uint32_t offset = gva & ~PAGE_MASK; |
@@ -194,20 +191,20 @@ unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, | |||
194 | 191 | ||
195 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; | 192 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; |
196 | } | 193 | } |
194 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); | ||
197 | 195 | ||
198 | /* XXXKYMA: Must be called with interrupts disabled */ | 196 | /* XXXKYMA: Must be called with interrupts disabled */ |
199 | /* set flush_dcache_mask == 0 if no dcache flush required */ | 197 | /* set flush_dcache_mask == 0 if no dcache flush required */ |
200 | int | 198 | int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, |
201 | kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | 199 | unsigned long entrylo0, unsigned long entrylo1, |
202 | unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask) | 200 | int flush_dcache_mask) |
203 | { | 201 | { |
204 | unsigned long flags; | 202 | unsigned long flags; |
205 | unsigned long old_entryhi; | 203 | unsigned long old_entryhi; |
206 | volatile int idx; | 204 | int idx; |
207 | 205 | ||
208 | local_irq_save(flags); | 206 | local_irq_save(flags); |
209 | 207 | ||
210 | |||
211 | old_entryhi = read_c0_entryhi(); | 208 | old_entryhi = read_c0_entryhi(); |
212 | write_c0_entryhi(entryhi); | 209 | write_c0_entryhi(entryhi); |
213 | mtc0_tlbw_hazard(); | 210 | mtc0_tlbw_hazard(); |
@@ -240,12 +237,14 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
240 | if (flush_dcache_mask) { | 237 | if (flush_dcache_mask) { |
241 | if (entrylo0 & MIPS3_PG_V) { | 238 | if (entrylo0 & MIPS3_PG_V) { |
242 | ++vcpu->stat.flush_dcache_exits; | 239 | ++vcpu->stat.flush_dcache_exits; |
243 | flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask); | 240 | flush_data_cache_page((entryhi & VPN2_MASK) & |
241 | ~flush_dcache_mask); | ||
244 | } | 242 | } |
245 | if (entrylo1 & MIPS3_PG_V) { | 243 | if (entrylo1 & MIPS3_PG_V) { |
246 | ++vcpu->stat.flush_dcache_exits; | 244 | ++vcpu->stat.flush_dcache_exits; |
247 | flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) | | 245 | flush_data_cache_page(((entryhi & VPN2_MASK) & |
248 | (0x1 << PAGE_SHIFT)); | 246 | ~flush_dcache_mask) | |
247 | (0x1 << PAGE_SHIFT)); | ||
249 | } | 248 | } |
250 | } | 249 | } |
251 | 250 | ||
@@ -257,10 +256,9 @@ kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi, | |||
257 | return 0; | 256 | return 0; |
258 | } | 257 | } |
259 | 258 | ||
260 | |||
261 | /* XXXKYMA: Must be called with interrupts disabled */ | 259 | /* XXXKYMA: Must be called with interrupts disabled */ |
262 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | 260 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, |
263 | struct kvm_vcpu *vcpu) | 261 | struct kvm_vcpu *vcpu) |
264 | { | 262 | { |
265 | gfn_t gfn; | 263 | gfn_t gfn; |
266 | pfn_t pfn0, pfn1; | 264 | pfn_t pfn0, pfn1; |
@@ -270,7 +268,6 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
270 | struct kvm *kvm = vcpu->kvm; | 268 | struct kvm *kvm = vcpu->kvm; |
271 | const int flush_dcache_mask = 0; | 269 | const int flush_dcache_mask = 0; |
272 | 270 | ||
273 | |||
274 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { | 271 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { |
275 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); | 272 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); |
276 | kvm_mips_dump_host_tlbs(); | 273 | kvm_mips_dump_host_tlbs(); |
@@ -302,14 +299,15 @@ int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | |||
302 | } | 299 | } |
303 | 300 | ||
304 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); | 301 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); |
305 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 302 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
306 | (0x1 << 1); | 303 | (1 << 2) | (0x1 << 1); |
307 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 304 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
308 | (0x1 << 1); | 305 | (1 << 2) | (0x1 << 1); |
309 | 306 | ||
310 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | 307 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
311 | flush_dcache_mask); | 308 | flush_dcache_mask); |
312 | } | 309 | } |
310 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); | ||
313 | 311 | ||
314 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | 312 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, |
315 | struct kvm_vcpu *vcpu) | 313 | struct kvm_vcpu *vcpu) |
@@ -318,11 +316,10 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
318 | unsigned long flags, old_entryhi = 0, vaddr = 0; | 316 | unsigned long flags, old_entryhi = 0, vaddr = 0; |
319 | unsigned long entrylo0 = 0, entrylo1 = 0; | 317 | unsigned long entrylo0 = 0, entrylo1 = 0; |
320 | 318 | ||
321 | |||
322 | pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; | 319 | pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT; |
323 | pfn1 = 0; | 320 | pfn1 = 0; |
324 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) | | 321 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
325 | (0x1 << 1); | 322 | (1 << 2) | (0x1 << 1); |
326 | entrylo1 = 0; | 323 | entrylo1 = 0; |
327 | 324 | ||
328 | local_irq_save(flags); | 325 | local_irq_save(flags); |
@@ -341,9 +338,9 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
341 | mtc0_tlbw_hazard(); | 338 | mtc0_tlbw_hazard(); |
342 | tlbw_use_hazard(); | 339 | tlbw_use_hazard(); |
343 | 340 | ||
344 | kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", | 341 | kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n", |
345 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), | 342 | vcpu->arch.pc, read_c0_index(), read_c0_entryhi(), |
346 | read_c0_entrylo0(), read_c0_entrylo1()); | 343 | read_c0_entrylo0(), read_c0_entrylo1()); |
347 | 344 | ||
348 | /* Restore old ASID */ | 345 | /* Restore old ASID */ |
349 | write_c0_entryhi(old_entryhi); | 346 | write_c0_entryhi(old_entryhi); |
@@ -353,28 +350,33 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | |||
353 | 350 | ||
354 | return 0; | 351 | return 0; |
355 | } | 352 | } |
353 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); | ||
356 | 354 | ||
357 | int | 355 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
358 | kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | 356 | struct kvm_mips_tlb *tlb, |
359 | struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1) | 357 | unsigned long *hpa0, |
358 | unsigned long *hpa1) | ||
360 | { | 359 | { |
361 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; | 360 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
362 | struct kvm *kvm = vcpu->kvm; | 361 | struct kvm *kvm = vcpu->kvm; |
363 | pfn_t pfn0, pfn1; | 362 | pfn_t pfn0, pfn1; |
364 | 363 | ||
365 | |||
366 | if ((tlb->tlb_hi & VPN2_MASK) == 0) { | 364 | if ((tlb->tlb_hi & VPN2_MASK) == 0) { |
367 | pfn0 = 0; | 365 | pfn0 = 0; |
368 | pfn1 = 0; | 366 | pfn1 = 0; |
369 | } else { | 367 | } else { |
370 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0) | 368 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
369 | >> PAGE_SHIFT) < 0) | ||
371 | return -1; | 370 | return -1; |
372 | 371 | ||
373 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0) | 372 | if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) |
373 | >> PAGE_SHIFT) < 0) | ||
374 | return -1; | 374 | return -1; |
375 | 375 | ||
376 | pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT]; | 376 | pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) |
377 | pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT]; | 377 | >> PAGE_SHIFT]; |
378 | pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) | ||
379 | >> PAGE_SHIFT]; | ||
378 | } | 380 | } |
379 | 381 | ||
380 | if (hpa0) | 382 | if (hpa0) |
@@ -385,11 +387,12 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
385 | 387 | ||
386 | /* Get attributes from the Guest TLB */ | 388 | /* Get attributes from the Guest TLB */ |
387 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? | 389 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? |
388 | kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu)); | 390 | kvm_mips_get_kernel_asid(vcpu) : |
391 | kvm_mips_get_user_asid(vcpu)); | ||
389 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | | 392 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | |
390 | (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); | 393 | (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V); |
391 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | | 394 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | |
392 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); | 395 | (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V); |
393 | 396 | ||
394 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | 397 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
395 | tlb->tlb_lo0, tlb->tlb_lo1); | 398 | tlb->tlb_lo0, tlb->tlb_lo1); |
@@ -397,6 +400,7 @@ kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | |||
397 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, | 400 | return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
398 | tlb->tlb_mask); | 401 | tlb->tlb_mask); |
399 | } | 402 | } |
403 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); | ||
400 | 404 | ||
401 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | 405 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) |
402 | { | 406 | { |
@@ -404,10 +408,9 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
404 | int index = -1; | 408 | int index = -1; |
405 | struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; | 409 | struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; |
406 | 410 | ||
407 | |||
408 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | 411 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { |
409 | if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && | 412 | if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && |
410 | (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { | 413 | TLB_HI_ASID_HIT(tlb[i], entryhi)) { |
411 | index = i; | 414 | index = i; |
412 | break; | 415 | break; |
413 | } | 416 | } |
@@ -418,21 +421,23 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | |||
418 | 421 | ||
419 | return index; | 422 | return index; |
420 | } | 423 | } |
424 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); | ||
421 | 425 | ||
422 | int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | 426 | int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) |
423 | { | 427 | { |
424 | unsigned long old_entryhi, flags; | 428 | unsigned long old_entryhi, flags; |
425 | volatile int idx; | 429 | int idx; |
426 | |||
427 | 430 | ||
428 | local_irq_save(flags); | 431 | local_irq_save(flags); |
429 | 432 | ||
430 | old_entryhi = read_c0_entryhi(); | 433 | old_entryhi = read_c0_entryhi(); |
431 | 434 | ||
432 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 435 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
433 | write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu)); | 436 | write_c0_entryhi((vaddr & VPN2_MASK) | |
437 | kvm_mips_get_kernel_asid(vcpu)); | ||
434 | else { | 438 | else { |
435 | write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); | 439 | write_c0_entryhi((vaddr & VPN2_MASK) | |
440 | kvm_mips_get_user_asid(vcpu)); | ||
436 | } | 441 | } |
437 | 442 | ||
438 | mtc0_tlbw_hazard(); | 443 | mtc0_tlbw_hazard(); |
@@ -452,6 +457,7 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr) | |||
452 | 457 | ||
453 | return idx; | 458 | return idx; |
454 | } | 459 | } |
460 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); | ||
455 | 461 | ||
456 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | 462 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) |
457 | { | 463 | { |
@@ -460,7 +466,6 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
460 | 466 | ||
461 | local_irq_save(flags); | 467 | local_irq_save(flags); |
462 | 468 | ||
463 | |||
464 | old_entryhi = read_c0_entryhi(); | 469 | old_entryhi = read_c0_entryhi(); |
465 | 470 | ||
466 | write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); | 471 | write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu)); |
@@ -499,8 +504,9 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | |||
499 | 504 | ||
500 | return 0; | 505 | return 0; |
501 | } | 506 | } |
507 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); | ||
502 | 508 | ||
503 | /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/ | 509 | /* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID */ |
504 | int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) | 510 | int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) |
505 | { | 511 | { |
506 | unsigned long flags, old_entryhi; | 512 | unsigned long flags, old_entryhi; |
@@ -510,7 +516,6 @@ int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index) | |||
510 | 516 | ||
511 | local_irq_save(flags); | 517 | local_irq_save(flags); |
512 | 518 | ||
513 | |||
514 | old_entryhi = read_c0_entryhi(); | 519 | old_entryhi = read_c0_entryhi(); |
515 | 520 | ||
516 | write_c0_entryhi(UNIQUE_ENTRYHI(index)); | 521 | write_c0_entryhi(UNIQUE_ENTRYHI(index)); |
@@ -546,7 +551,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
546 | int entry = 0; | 551 | int entry = 0; |
547 | int maxentry = current_cpu_data.tlbsize; | 552 | int maxentry = current_cpu_data.tlbsize; |
548 | 553 | ||
549 | |||
550 | local_irq_save(flags); | 554 | local_irq_save(flags); |
551 | 555 | ||
552 | old_entryhi = read_c0_entryhi(); | 556 | old_entryhi = read_c0_entryhi(); |
@@ -554,7 +558,6 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
554 | 558 | ||
555 | /* Blast 'em all away. */ | 559 | /* Blast 'em all away. */ |
556 | for (entry = 0; entry < maxentry; entry++) { | 560 | for (entry = 0; entry < maxentry; entry++) { |
557 | |||
558 | write_c0_index(entry); | 561 | write_c0_index(entry); |
559 | mtc0_tlbw_hazard(); | 562 | mtc0_tlbw_hazard(); |
560 | 563 | ||
@@ -565,9 +568,8 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
565 | entryhi = read_c0_entryhi(); | 568 | entryhi = read_c0_entryhi(); |
566 | 569 | ||
567 | /* Don't blow away guest kernel entries */ | 570 | /* Don't blow away guest kernel entries */ |
568 | if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) { | 571 | if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) |
569 | continue; | 572 | continue; |
570 | } | ||
571 | } | 573 | } |
572 | 574 | ||
573 | /* Make sure all entries differ. */ | 575 | /* Make sure all entries differ. */ |
@@ -591,17 +593,17 @@ void kvm_mips_flush_host_tlb(int skip_kseg0) | |||
591 | 593 | ||
592 | local_irq_restore(flags); | 594 | local_irq_restore(flags); |
593 | } | 595 | } |
596 | EXPORT_SYMBOL(kvm_mips_flush_host_tlb); | ||
594 | 597 | ||
595 | void | 598 | void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, |
596 | kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, | 599 | struct kvm_vcpu *vcpu) |
597 | struct kvm_vcpu *vcpu) | ||
598 | { | 600 | { |
599 | unsigned long asid = asid_cache(cpu); | 601 | unsigned long asid = asid_cache(cpu); |
600 | 602 | ||
601 | if (!((asid += ASID_INC) & ASID_MASK)) { | 603 | asid += ASID_INC; |
602 | if (cpu_has_vtag_icache) { | 604 | if (!(asid & ASID_MASK)) { |
605 | if (cpu_has_vtag_icache) | ||
603 | flush_icache_all(); | 606 | flush_icache_all(); |
604 | } | ||
605 | 607 | ||
606 | kvm_local_flush_tlb_all(); /* start new asid cycle */ | 608 | kvm_local_flush_tlb_all(); /* start new asid cycle */ |
607 | 609 | ||
@@ -639,6 +641,7 @@ void kvm_local_flush_tlb_all(void) | |||
639 | 641 | ||
640 | local_irq_restore(flags); | 642 | local_irq_restore(flags); |
641 | } | 643 | } |
644 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); | ||
642 | 645 | ||
643 | /** | 646 | /** |
644 | * kvm_mips_migrate_count() - Migrate timer. | 647 | * kvm_mips_migrate_count() - Migrate timer. |
@@ -699,7 +702,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
699 | } | 702 | } |
700 | 703 | ||
701 | if (!newasid) { | 704 | if (!newasid) { |
702 | /* If we preempted while the guest was executing, then reload the pre-empted ASID */ | 705 | /* |
706 | * If we preempted while the guest was executing, then reload | ||
707 | * the pre-empted ASID | ||
708 | */ | ||
703 | if (current->flags & PF_VCPU) { | 709 | if (current->flags & PF_VCPU) { |
704 | write_c0_entryhi(vcpu->arch. | 710 | write_c0_entryhi(vcpu->arch. |
705 | preempt_entryhi & ASID_MASK); | 711 | preempt_entryhi & ASID_MASK); |
@@ -708,9 +714,10 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
708 | } else { | 714 | } else { |
709 | /* New ASIDs were allocated for the VM */ | 715 | /* New ASIDs were allocated for the VM */ |
710 | 716 | ||
711 | /* Were we in guest context? If so then the pre-empted ASID is no longer | 717 | /* |
712 | * valid, we need to set it to what it should be based on the mode of | 718 | * Were we in guest context? If so then the pre-empted ASID is |
713 | * the Guest (Kernel/User) | 719 | * no longer valid, we need to set it to what it should be based |
720 | * on the mode of the Guest (Kernel/User) | ||
714 | */ | 721 | */ |
715 | if (current->flags & PF_VCPU) { | 722 | if (current->flags & PF_VCPU) { |
716 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | 723 | if (KVM_GUEST_KERNEL_MODE(vcpu)) |
@@ -728,6 +735,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
728 | local_irq_restore(flags); | 735 | local_irq_restore(flags); |
729 | 736 | ||
730 | } | 737 | } |
738 | EXPORT_SYMBOL(kvm_arch_vcpu_load); | ||
731 | 739 | ||
732 | /* ASID can change if another task is scheduled during preemption */ | 740 | /* ASID can change if another task is scheduled during preemption */ |
733 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 741 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
@@ -739,7 +747,6 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
739 | 747 | ||
740 | cpu = smp_processor_id(); | 748 | cpu = smp_processor_id(); |
741 | 749 | ||
742 | |||
743 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); | 750 | vcpu->arch.preempt_entryhi = read_c0_entryhi(); |
744 | vcpu->arch.last_sched_cpu = cpu; | 751 | vcpu->arch.last_sched_cpu = cpu; |
745 | 752 | ||
@@ -754,11 +761,12 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |||
754 | 761 | ||
755 | local_irq_restore(flags); | 762 | local_irq_restore(flags); |
756 | } | 763 | } |
764 | EXPORT_SYMBOL(kvm_arch_vcpu_put); | ||
757 | 765 | ||
758 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | 766 | uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) |
759 | { | 767 | { |
760 | struct mips_coproc *cop0 = vcpu->arch.cop0; | 768 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
761 | unsigned long paddr, flags; | 769 | unsigned long paddr, flags, vpn2, asid; |
762 | uint32_t inst; | 770 | uint32_t inst; |
763 | int index; | 771 | int index; |
764 | 772 | ||
@@ -769,16 +777,12 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
769 | if (index >= 0) { | 777 | if (index >= 0) { |
770 | inst = *(opc); | 778 | inst = *(opc); |
771 | } else { | 779 | } else { |
772 | index = | 780 | vpn2 = (unsigned long) opc & VPN2_MASK; |
773 | kvm_mips_guest_tlb_lookup(vcpu, | 781 | asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK; |
774 | ((unsigned long) opc & VPN2_MASK) | 782 | index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); |
775 | | | ||
776 | (kvm_read_c0_guest_entryhi | ||
777 | (cop0) & ASID_MASK)); | ||
778 | if (index < 0) { | 783 | if (index < 0) { |
779 | kvm_err | 784 | kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |
780 | ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", | 785 | __func__, opc, vcpu, read_c0_entryhi()); |
781 | __func__, opc, vcpu, read_c0_entryhi()); | ||
782 | kvm_mips_dump_host_tlbs(); | 786 | kvm_mips_dump_host_tlbs(); |
783 | local_irq_restore(flags); | 787 | local_irq_restore(flags); |
784 | return KVM_INVALID_INST; | 788 | return KVM_INVALID_INST; |
@@ -793,7 +797,7 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
793 | } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { | 797 | } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) { |
794 | paddr = | 798 | paddr = |
795 | kvm_mips_translate_guest_kseg0_to_hpa(vcpu, | 799 | kvm_mips_translate_guest_kseg0_to_hpa(vcpu, |
796 | (unsigned long) opc); | 800 | (unsigned long) opc); |
797 | inst = *(uint32_t *) CKSEG0ADDR(paddr); | 801 | inst = *(uint32_t *) CKSEG0ADDR(paddr); |
798 | } else { | 802 | } else { |
799 | kvm_err("%s: illegal address: %p\n", __func__, opc); | 803 | kvm_err("%s: illegal address: %p\n", __func__, opc); |
@@ -802,18 +806,4 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) | |||
802 | 806 | ||
803 | return inst; | 807 | return inst; |
804 | } | 808 | } |
805 | |||
806 | EXPORT_SYMBOL(kvm_local_flush_tlb_all); | ||
807 | EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault); | ||
808 | EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault); | ||
809 | EXPORT_SYMBOL(kvm_mips_dump_host_tlbs); | ||
810 | EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault); | ||
811 | EXPORT_SYMBOL(kvm_mips_host_tlb_lookup); | ||
812 | EXPORT_SYMBOL(kvm_mips_flush_host_tlb); | ||
813 | EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup); | ||
814 | EXPORT_SYMBOL(kvm_mips_host_tlb_inv); | ||
815 | EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa); | ||
816 | EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs); | ||
817 | EXPORT_SYMBOL(kvm_get_inst); | 809 | EXPORT_SYMBOL(kvm_get_inst); |
818 | EXPORT_SYMBOL(kvm_arch_vcpu_load); | ||
819 | EXPORT_SYMBOL(kvm_arch_vcpu_put); | ||
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h index bc9e0f406c08..c1388d40663b 100644 --- a/arch/mips/kvm/trace.h +++ b/arch/mips/kvm/trace.h | |||
@@ -1,11 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | 10 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) |
11 | #define _TRACE_KVM_H | 11 | #define _TRACE_KVM_H |
@@ -17,9 +17,7 @@ | |||
17 | #define TRACE_INCLUDE_PATH . | 17 | #define TRACE_INCLUDE_PATH . |
18 | #define TRACE_INCLUDE_FILE trace | 18 | #define TRACE_INCLUDE_FILE trace |
19 | 19 | ||
20 | /* | 20 | /* Tracepoints for VM eists */ |
21 | * Tracepoints for VM eists | ||
22 | */ | ||
23 | extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; | 21 | extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES]; |
24 | 22 | ||
25 | TRACE_EVENT(kvm_exit, | 23 | TRACE_EVENT(kvm_exit, |
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/trap_emul.c index 693f952b2fbb..fd7257b70e65 100644 --- a/arch/mips/kvm/kvm_trap_emul.c +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -1,13 +1,13 @@ | |||
1 | /* | 1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * License. See the file "COPYING" in the main directory of this archive | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * for more details. | 4 | * for more details. |
5 | * | 5 | * |
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | 6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel |
7 | * | 7 | * |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
@@ -16,8 +16,8 @@ | |||
16 | 16 | ||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | 18 | ||
19 | #include "kvm_mips_opcode.h" | 19 | #include "opcode.h" |
20 | #include "kvm_mips_int.h" | 20 | #include "interrupt.h" |
21 | 21 | ||
22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | 22 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) |
23 | { | 23 | { |
@@ -27,7 +27,7 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | 27 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) |
28 | gpa = CPHYSADDR(gva); | 28 | gpa = CPHYSADDR(gva); |
29 | else { | 29 | else { |
30 | printk("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | 30 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); |
31 | kvm_mips_dump_host_tlbs(); | 31 | kvm_mips_dump_host_tlbs(); |
32 | gpa = KVM_INVALID_ADDR; | 32 | gpa = KVM_INVALID_ADDR; |
33 | } | 33 | } |
@@ -37,7 +37,6 @@ static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | |||
37 | return gpa; | 37 | return gpa; |
38 | } | 38 | } |
39 | 39 | ||
40 | |||
41 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | 40 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) |
42 | { | 41 | { |
43 | struct kvm_run *run = vcpu->run; | 42 | struct kvm_run *run = vcpu->run; |
@@ -46,9 +45,9 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | |||
46 | enum emulation_result er = EMULATE_DONE; | 45 | enum emulation_result er = EMULATE_DONE; |
47 | int ret = RESUME_GUEST; | 46 | int ret = RESUME_GUEST; |
48 | 47 | ||
49 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | 48 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) |
50 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); | 49 | er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu); |
51 | } else | 50 | else |
52 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 51 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
53 | 52 | ||
54 | switch (er) { | 53 | switch (er) { |
@@ -83,9 +82,8 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |||
83 | 82 | ||
84 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 83 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
85 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 84 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
86 | kvm_debug | 85 | kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
87 | ("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 86 | cause, opc, badvaddr); |
88 | cause, opc, badvaddr); | ||
89 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); | 87 | er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu); |
90 | 88 | ||
91 | if (er == EMULATE_DONE) | 89 | if (er == EMULATE_DONE) |
@@ -95,20 +93,20 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | |||
95 | ret = RESUME_HOST; | 93 | ret = RESUME_HOST; |
96 | } | 94 | } |
97 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 95 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
98 | /* XXXKYMA: The guest kernel does not expect to get this fault when we are not | 96 | /* |
99 | * using HIGHMEM. Need to address this in a HIGHMEM kernel | 97 | * XXXKYMA: The guest kernel does not expect to get this fault |
98 | * when we are not using HIGHMEM. Need to address this in a | ||
99 | * HIGHMEM kernel | ||
100 | */ | 100 | */ |
101 | printk | 101 | kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", |
102 | ("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n", | 102 | cause, opc, badvaddr); |
103 | cause, opc, badvaddr); | ||
104 | kvm_mips_dump_host_tlbs(); | 103 | kvm_mips_dump_host_tlbs(); |
105 | kvm_arch_vcpu_dump_regs(vcpu); | 104 | kvm_arch_vcpu_dump_regs(vcpu); |
106 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 105 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
107 | ret = RESUME_HOST; | 106 | ret = RESUME_HOST; |
108 | } else { | 107 | } else { |
109 | printk | 108 | kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
110 | ("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 109 | cause, opc, badvaddr); |
111 | cause, opc, badvaddr); | ||
112 | kvm_mips_dump_host_tlbs(); | 110 | kvm_mips_dump_host_tlbs(); |
113 | kvm_arch_vcpu_dump_regs(vcpu); | 111 | kvm_arch_vcpu_dump_regs(vcpu); |
114 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 112 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -134,9 +132,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
134 | } | 132 | } |
135 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | 133 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 |
136 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | 134 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { |
137 | kvm_debug | 135 | kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", |
138 | ("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n", | 136 | cause, opc, badvaddr); |
139 | cause, opc, badvaddr); | ||
140 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 137 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
141 | if (er == EMULATE_DONE) | 138 | if (er == EMULATE_DONE) |
142 | ret = RESUME_GUEST; | 139 | ret = RESUME_GUEST; |
@@ -145,8 +142,9 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
145 | ret = RESUME_HOST; | 142 | ret = RESUME_HOST; |
146 | } | 143 | } |
147 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | 144 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { |
148 | /* All KSEG0 faults are handled by KVM, as the guest kernel does not | 145 | /* |
149 | * expect to ever get them | 146 | * All KSEG0 faults are handled by KVM, as the guest kernel does |
147 | * not expect to ever get them | ||
150 | */ | 148 | */ |
151 | if (kvm_mips_handle_kseg0_tlb_fault | 149 | if (kvm_mips_handle_kseg0_tlb_fault |
152 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { | 150 | (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) { |
@@ -154,9 +152,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | |||
154 | ret = RESUME_HOST; | 152 | ret = RESUME_HOST; |
155 | } | 153 | } |
156 | } else { | 154 | } else { |
157 | kvm_err | 155 | kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
158 | ("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 156 | cause, opc, badvaddr); |
159 | cause, opc, badvaddr); | ||
160 | kvm_mips_dump_host_tlbs(); | 157 | kvm_mips_dump_host_tlbs(); |
161 | kvm_arch_vcpu_dump_regs(vcpu); | 158 | kvm_arch_vcpu_dump_regs(vcpu); |
162 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 159 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -185,11 +182,14 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |||
185 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", | 182 | kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n", |
186 | vcpu->arch.pc, badvaddr); | 183 | vcpu->arch.pc, badvaddr); |
187 | 184 | ||
188 | /* User Address (UA) fault, this could happen if | 185 | /* |
189 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | 186 | * User Address (UA) fault, this could happen if |
190 | * case we pass on the fault to the guest kernel and let it handle it. | 187 | * (1) TLB entry not present/valid in both Guest and shadow host |
191 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | 188 | * TLBs, in this case we pass on the fault to the guest |
192 | * case we inject the TLB from the Guest TLB into the shadow host TLB | 189 | * kernel and let it handle it. |
190 | * (2) TLB entry is present in the Guest TLB but not in the | ||
191 | * shadow, in this case we inject the TLB from the Guest TLB | ||
192 | * into the shadow host TLB | ||
193 | */ | 193 | */ |
194 | 194 | ||
195 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); | 195 | er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu); |
@@ -206,9 +206,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | |||
206 | ret = RESUME_HOST; | 206 | ret = RESUME_HOST; |
207 | } | 207 | } |
208 | } else { | 208 | } else { |
209 | printk | 209 | kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", |
210 | ("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n", | 210 | cause, opc, badvaddr); |
211 | cause, opc, badvaddr); | ||
212 | kvm_mips_dump_host_tlbs(); | 211 | kvm_mips_dump_host_tlbs(); |
213 | kvm_arch_vcpu_dump_regs(vcpu); | 212 | kvm_arch_vcpu_dump_regs(vcpu); |
214 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 213 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
@@ -231,7 +230,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
231 | kvm_debug("Emulate Store to MMIO space\n"); | 230 | kvm_debug("Emulate Store to MMIO space\n"); |
232 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 231 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
233 | if (er == EMULATE_FAIL) { | 232 | if (er == EMULATE_FAIL) { |
234 | printk("Emulate Store to MMIO space failed\n"); | 233 | kvm_err("Emulate Store to MMIO space failed\n"); |
235 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 234 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
236 | ret = RESUME_HOST; | 235 | ret = RESUME_HOST; |
237 | } else { | 236 | } else { |
@@ -239,9 +238,8 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | |||
239 | ret = RESUME_HOST; | 238 | ret = RESUME_HOST; |
240 | } | 239 | } |
241 | } else { | 240 | } else { |
242 | printk | 241 | kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
243 | ("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 242 | cause, opc, badvaddr); |
244 | cause, opc, badvaddr); | ||
245 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 243 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
246 | ret = RESUME_HOST; | 244 | ret = RESUME_HOST; |
247 | } | 245 | } |
@@ -261,7 +259,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
261 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); | 259 | kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr); |
262 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); | 260 | er = kvm_mips_emulate_inst(cause, opc, run, vcpu); |
263 | if (er == EMULATE_FAIL) { | 261 | if (er == EMULATE_FAIL) { |
264 | printk("Emulate Load from MMIO space failed\n"); | 262 | kvm_err("Emulate Load from MMIO space failed\n"); |
265 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 263 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
266 | ret = RESUME_HOST; | 264 | ret = RESUME_HOST; |
267 | } else { | 265 | } else { |
@@ -269,9 +267,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | |||
269 | ret = RESUME_HOST; | 267 | ret = RESUME_HOST; |
270 | } | 268 | } |
271 | } else { | 269 | } else { |
272 | printk | 270 | kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", |
273 | ("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n", | 271 | cause, opc, badvaddr); |
274 | cause, opc, badvaddr); | ||
275 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 272 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
276 | ret = RESUME_HOST; | 273 | ret = RESUME_HOST; |
277 | er = EMULATE_FAIL; | 274 | er = EMULATE_FAIL; |
@@ -349,9 +346,9 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
349 | uint32_t config1; | 346 | uint32_t config1; |
350 | int vcpu_id = vcpu->vcpu_id; | 347 | int vcpu_id = vcpu->vcpu_id; |
351 | 348 | ||
352 | /* Arch specific stuff, set up config registers properly so that the | 349 | /* |
353 | * guest will come up as expected, for now we simulate a | 350 | * Arch specific stuff, set up config registers properly so that the |
354 | * MIPS 24kc | 351 | * guest will come up as expected, for now we simulate a MIPS 24kc |
355 | */ | 352 | */ |
356 | kvm_write_c0_guest_prid(cop0, 0x00019300); | 353 | kvm_write_c0_guest_prid(cop0, 0x00019300); |
357 | kvm_write_c0_guest_config(cop0, | 354 | kvm_write_c0_guest_config(cop0, |
@@ -373,14 +370,15 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | |||
373 | 370 | ||
374 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); | 371 | kvm_write_c0_guest_config2(cop0, MIPS_CONFIG2); |
375 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ | 372 | /* MIPS_CONFIG2 | (read_c0_config2() & 0xfff) */ |
376 | kvm_write_c0_guest_config3(cop0, | 373 | kvm_write_c0_guest_config3(cop0, MIPS_CONFIG3 | (0 << CP0C3_VInt) | |
377 | MIPS_CONFIG3 | (0 << CP0C3_VInt) | (1 << | 374 | (1 << CP0C3_ULRI)); |
378 | CP0C3_ULRI)); | ||
379 | 375 | ||
380 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | 376 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ |
381 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | 377 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); |
382 | 378 | ||
383 | /* Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) */ | 379 | /* |
380 | * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5) | ||
381 | */ | ||
384 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); | 382 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); |
385 | 383 | ||
386 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | 384 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ |
diff --git a/arch/mn10300/include/asm/processor.h b/arch/mn10300/include/asm/processor.h index 8b80b19d0c8a..769d5ed8e992 100644 --- a/arch/mn10300/include/asm/processor.h +++ b/arch/mn10300/include/asm/processor.h | |||
@@ -68,7 +68,9 @@ extern struct mn10300_cpuinfo cpu_data[]; | |||
68 | extern void identify_cpu(struct mn10300_cpuinfo *); | 68 | extern void identify_cpu(struct mn10300_cpuinfo *); |
69 | extern void print_cpu_info(struct mn10300_cpuinfo *); | 69 | extern void print_cpu_info(struct mn10300_cpuinfo *); |
70 | extern void dodgy_tsc(void); | 70 | extern void dodgy_tsc(void); |
71 | |||
71 | #define cpu_relax() barrier() | 72 | #define cpu_relax() barrier() |
73 | #define cpu_relax_lowlatency() cpu_relax() | ||
72 | 74 | ||
73 | /* | 75 | /* |
74 | * User space process size: 1.75GB (default). | 76 | * User space process size: 1.75GB (default). |
diff --git a/arch/openrisc/include/asm/processor.h b/arch/openrisc/include/asm/processor.h index cab746fa9e87..4d235e3d2534 100644 --- a/arch/openrisc/include/asm/processor.h +++ b/arch/openrisc/include/asm/processor.h | |||
@@ -101,6 +101,7 @@ extern unsigned long thread_saved_pc(struct task_struct *t); | |||
101 | #define init_stack (init_thread_union.stack) | 101 | #define init_stack (init_thread_union.stack) |
102 | 102 | ||
103 | #define cpu_relax() barrier() | 103 | #define cpu_relax() barrier() |
104 | #define cpu_relax_lowlatency() cpu_relax() | ||
104 | 105 | ||
105 | #endif /* __ASSEMBLY__ */ | 106 | #endif /* __ASSEMBLY__ */ |
106 | #endif /* __ASM_OPENRISC_PROCESSOR_H */ | 107 | #endif /* __ASM_OPENRISC_PROCESSOR_H */ |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 108d48e652af..6e75e2030927 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -6,7 +6,6 @@ config PARISC | |||
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_FUNCTION_TRACER if 64BIT | 7 | select HAVE_FUNCTION_TRACER if 64BIT |
8 | select HAVE_FUNCTION_GRAPH_TRACER if 64BIT | 8 | select HAVE_FUNCTION_GRAPH_TRACER if 64BIT |
9 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST if 64BIT | ||
10 | select ARCH_WANT_FRAME_POINTERS | 9 | select ARCH_WANT_FRAME_POINTERS |
11 | select RTC_CLASS | 10 | select RTC_CLASS |
12 | select RTC_DRV_GENERIC | 11 | select RTC_DRV_GENERIC |
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h index d951c9681ab3..689a8ade3606 100644 --- a/arch/parisc/include/asm/processor.h +++ b/arch/parisc/include/asm/processor.h | |||
@@ -338,6 +338,7 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
338 | #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) | 338 | #define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30]) |
339 | 339 | ||
340 | #define cpu_relax() barrier() | 340 | #define cpu_relax() barrier() |
341 | #define cpu_relax_lowlatency() cpu_relax() | ||
341 | 342 | ||
342 | /* Used as a macro to identify the combined VIPT/PIPT cached | 343 | /* Used as a macro to identify the combined VIPT/PIPT cached |
343 | * CPUs which require a guarantee of coherency (no inequivalent | 344 | * CPUs which require a guarantee of coherency (no inequivalent |
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h index a2fa297196bc..f5645d6a89f2 100644 --- a/arch/parisc/include/uapi/asm/signal.h +++ b/arch/parisc/include/uapi/asm/signal.h | |||
@@ -69,8 +69,6 @@ | |||
69 | #define SA_NOMASK SA_NODEFER | 69 | #define SA_NOMASK SA_NODEFER |
70 | #define SA_ONESHOT SA_RESETHAND | 70 | #define SA_ONESHOT SA_RESETHAND |
71 | 71 | ||
72 | #define SA_RESTORER 0x04000000 /* obsolete -- ignored */ | ||
73 | |||
74 | #define MINSIGSTKSZ 2048 | 72 | #define MINSIGSTKSZ 2048 |
75 | #define SIGSTKSZ 8192 | 73 | #define SIGSTKSZ 8192 |
76 | 74 | ||
diff --git a/arch/parisc/kernel/ftrace.c b/arch/parisc/kernel/ftrace.c index 5beb97bafbb1..559d400f9385 100644 --- a/arch/parisc/kernel/ftrace.c +++ b/arch/parisc/kernel/ftrace.c | |||
@@ -112,6 +112,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
112 | unsigned long long calltime; | 112 | unsigned long long calltime; |
113 | struct ftrace_graph_ent trace; | 113 | struct ftrace_graph_ent trace; |
114 | 114 | ||
115 | if (unlikely(ftrace_graph_is_dead())) | ||
116 | return; | ||
117 | |||
115 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 118 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
116 | return; | 119 | return; |
117 | 120 | ||
@@ -152,9 +155,6 @@ void ftrace_function_trampoline(unsigned long parent, | |||
152 | { | 155 | { |
153 | extern ftrace_func_t ftrace_trace_function; | 156 | extern ftrace_func_t ftrace_trace_function; |
154 | 157 | ||
155 | if (function_trace_stop) | ||
156 | return; | ||
157 | |||
158 | if (ftrace_trace_function != ftrace_stub) { | 158 | if (ftrace_trace_function != ftrace_stub) { |
159 | ftrace_trace_function(parent, self_addr); | 159 | ftrace_trace_function(parent, self_addr); |
160 | return; | 160 | return; |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ae085ad0fba0..0bef864264c0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -728,7 +728,6 @@ static void __init pagetable_init(void) | |||
728 | #endif | 728 | #endif |
729 | 729 | ||
730 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); | 730 | empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); |
731 | memset(empty_zero_page, 0, PAGE_SIZE); | ||
732 | } | 731 | } |
733 | 732 | ||
734 | static void __init gateway_init(void) | 733 | static void __init gateway_init(void) |
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi index f75b4f820c3c..7d4a6a2354f4 100644 --- a/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi +++ b/arch/powerpc/boot/dts/fsl/qoriq-sec6.0-0.dtsi | |||
@@ -32,7 +32,8 @@ | |||
32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 32 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
33 | */ | 33 | */ |
34 | 34 | ||
35 | compatible = "fsl,sec-v6.0"; | 35 | compatible = "fsl,sec-v6.0", "fsl,sec-v5.0", |
36 | "fsl,sec-v4.0"; | ||
36 | fsl,sec-era = <6>; | 37 | fsl,sec-era = <6>; |
37 | #address-cells = <1>; | 38 | #address-cells = <1>; |
38 | #size-cells = <1>; | 39 | #size-cells = <1>; |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index bc2347774f0a..0fdd7eece6d9 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform; | |||
447 | CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ | 447 | CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \ |
448 | CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) | 448 | CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP) |
449 | #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) | 449 | #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG) |
450 | #define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL) | ||
450 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 451 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
451 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 452 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
452 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 453 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index fddb72b48ce9..d645428a65a4 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h | |||
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, | |||
198 | return rb; | 198 | return rb; |
199 | } | 199 | } |
200 | 200 | ||
201 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) | 201 | static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, |
202 | bool is_base_size) | ||
202 | { | 203 | { |
204 | |||
203 | int size, a_psize; | 205 | int size, a_psize; |
204 | /* Look at the 8 bit LP value */ | 206 | /* Look at the 8 bit LP value */ |
205 | unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); | 207 | unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); |
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) | |||
214 | continue; | 216 | continue; |
215 | 217 | ||
216 | a_psize = __hpte_actual_psize(lp, size); | 218 | a_psize = __hpte_actual_psize(lp, size); |
217 | if (a_psize != -1) | 219 | if (a_psize != -1) { |
220 | if (is_base_size) | ||
221 | return 1ul << mmu_psize_defs[size].shift; | ||
218 | return 1ul << mmu_psize_defs[a_psize].shift; | 222 | return 1ul << mmu_psize_defs[a_psize].shift; |
223 | } | ||
219 | } | 224 | } |
220 | 225 | ||
221 | } | 226 | } |
222 | return 0; | 227 | return 0; |
223 | } | 228 | } |
224 | 229 | ||
230 | static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) | ||
231 | { | ||
232 | return __hpte_page_size(h, l, 0); | ||
233 | } | ||
234 | |||
235 | static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) | ||
236 | { | ||
237 | return __hpte_page_size(h, l, 1); | ||
238 | } | ||
239 | |||
225 | static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) | 240 | static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize) |
226 | { | 241 | { |
227 | return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; | 242 | return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 807014dde821..c2b4dcf23d03 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -22,6 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #include <asm/pgtable-ppc64.h> | 23 | #include <asm/pgtable-ppc64.h> |
24 | #include <asm/bug.h> | 24 | #include <asm/bug.h> |
25 | #include <asm/processor.h> | ||
25 | 26 | ||
26 | /* | 27 | /* |
27 | * Segment table | 28 | * Segment table |
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size); | |||
496 | */ | 497 | */ |
497 | struct subpage_prot_table { | 498 | struct subpage_prot_table { |
498 | unsigned long maxaddr; /* only addresses < this are protected */ | 499 | unsigned long maxaddr; /* only addresses < this are protected */ |
499 | unsigned int **protptrs[2]; | 500 | unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)]; |
500 | unsigned int *low_prot[4]; | 501 | unsigned int *low_prot[4]; |
501 | }; | 502 | }; |
502 | 503 | ||
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 9ea266eae33e..7e4612528546 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -277,6 +277,8 @@ n: | |||
277 | .globl n; \ | 277 | .globl n; \ |
278 | n: | 278 | n: |
279 | 279 | ||
280 | #define _GLOBAL_TOC(name) _GLOBAL(name) | ||
281 | |||
280 | #define _KPROBE(n) \ | 282 | #define _KPROBE(n) \ |
281 | .section ".kprobes.text","a"; \ | 283 | .section ".kprobes.text","a"; \ |
282 | .globl n; \ | 284 | .globl n; \ |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 6d59072e13a7..dda7ac4c80bd 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -400,6 +400,8 @@ static inline unsigned long __pack_fe01(unsigned int fpmode) | |||
400 | #define cpu_relax() barrier() | 400 | #define cpu_relax() barrier() |
401 | #endif | 401 | #endif |
402 | 402 | ||
403 | #define cpu_relax_lowlatency() cpu_relax() | ||
404 | |||
403 | /* Check that a certain kernel stack pointer is valid in task_struct p */ | 405 | /* Check that a certain kernel stack pointer is valid in task_struct p */ |
404 | int validate_sp(unsigned long sp, struct task_struct *p, | 406 | int validate_sp(unsigned long sp, struct task_struct *p, |
405 | unsigned long nbytes); | 407 | unsigned long nbytes); |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 965291b4c2fa..0c157642c2a1 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
527 | .machine_check_early = __machine_check_early_realmode_p8, | 527 | .machine_check_early = __machine_check_early_realmode_p8, |
528 | .platform = "power8", | 528 | .platform = "power8", |
529 | }, | 529 | }, |
530 | { /* Power8 DD1: Does not support doorbell IPIs */ | ||
531 | .pvr_mask = 0xffffff00, | ||
532 | .pvr_value = 0x004d0100, | ||
533 | .cpu_name = "POWER8 (raw)", | ||
534 | .cpu_features = CPU_FTRS_POWER8_DD1, | ||
535 | .cpu_user_features = COMMON_USER_POWER8, | ||
536 | .cpu_user_features2 = COMMON_USER2_POWER8, | ||
537 | .mmu_features = MMU_FTRS_POWER8, | ||
538 | .icache_bsize = 128, | ||
539 | .dcache_bsize = 128, | ||
540 | .num_pmcs = 6, | ||
541 | .pmc_type = PPC_PMC_IBM, | ||
542 | .oprofile_cpu_type = "ppc64/power8", | ||
543 | .oprofile_type = PPC_OPROFILE_INVALID, | ||
544 | .cpu_setup = __setup_cpu_power8, | ||
545 | .cpu_restore = __restore_cpu_power8, | ||
546 | .flush_tlb = __flush_tlb_power8, | ||
547 | .machine_check_early = __machine_check_early_realmode_p8, | ||
548 | .platform = "power8", | ||
549 | }, | ||
530 | { /* Power8 */ | 550 | { /* Power8 */ |
531 | .pvr_mask = 0xffff0000, | 551 | .pvr_mask = 0xffff0000, |
532 | .pvr_value = 0x004d0000, | 552 | .pvr_value = 0x004d0000, |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index d178834fe508..390311c0f03d 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -525,6 +525,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
525 | struct ftrace_graph_ent trace; | 525 | struct ftrace_graph_ent trace; |
526 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 526 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
527 | 527 | ||
528 | if (unlikely(ftrace_graph_is_dead())) | ||
529 | return; | ||
530 | |||
528 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 531 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
529 | return; | 532 | return; |
530 | 533 | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index b49c72fd7f16..b2814e23e1ed 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -123,21 +123,12 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus, | |||
123 | 123 | ||
124 | void pcibios_reset_secondary_bus(struct pci_dev *dev) | 124 | void pcibios_reset_secondary_bus(struct pci_dev *dev) |
125 | { | 125 | { |
126 | u16 ctrl; | ||
127 | |||
128 | if (ppc_md.pcibios_reset_secondary_bus) { | 126 | if (ppc_md.pcibios_reset_secondary_bus) { |
129 | ppc_md.pcibios_reset_secondary_bus(dev); | 127 | ppc_md.pcibios_reset_secondary_bus(dev); |
130 | return; | 128 | return; |
131 | } | 129 | } |
132 | 130 | ||
133 | pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl); | 131 | pci_reset_secondary_bus(dev); |
134 | ctrl |= PCI_BRIDGE_CTL_BUS_RESET; | ||
135 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); | ||
136 | msleep(2); | ||
137 | |||
138 | ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET; | ||
139 | pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl); | ||
140 | ssleep(1); | ||
141 | } | 132 | } |
142 | 133 | ||
143 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) | 134 | static resource_size_t pcibios_io_size(const struct pci_controller *hose) |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 658e89d2025b..db2b482af658 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type) | |||
611 | for (f = flist; f; f = next) { | 611 | for (f = flist; f; f = next) { |
612 | /* Translate data addrs to absolute */ | 612 | /* Translate data addrs to absolute */ |
613 | for (i = 0; i < f->num_blocks; i++) { | 613 | for (i = 0; i < f->num_blocks; i++) { |
614 | f->blocks[i].data = (char *)__pa(f->blocks[i].data); | 614 | f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data)); |
615 | image_size += f->blocks[i].length; | 615 | image_size += f->blocks[i].length; |
616 | f->blocks[i].length = cpu_to_be64(f->blocks[i].length); | ||
616 | } | 617 | } |
617 | next = f->next; | 618 | next = f->next; |
618 | /* Don't translate NULL pointer for last entry */ | 619 | /* Don't translate NULL pointer for last entry */ |
619 | if (f->next) | 620 | if (f->next) |
620 | f->next = (struct flash_block_list *)__pa(f->next); | 621 | f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next)); |
621 | else | 622 | else |
622 | f->next = NULL; | 623 | f->next = NULL; |
623 | /* make num_blocks into the version/length field */ | 624 | /* make num_blocks into the version/length field */ |
624 | f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); | 625 | f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16); |
626 | f->num_blocks = cpu_to_be64(f->num_blocks); | ||
625 | } | 627 | } |
626 | 628 | ||
627 | printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); | 629 | printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80561074078d..68468d695f12 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c | |||
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, | |||
1562 | goto out; | 1562 | goto out; |
1563 | } | 1563 | } |
1564 | if (!rma_setup && is_vrma_hpte(v)) { | 1564 | if (!rma_setup && is_vrma_hpte(v)) { |
1565 | unsigned long psize = hpte_page_size(v, r); | 1565 | unsigned long psize = hpte_base_page_size(v, r); |
1566 | unsigned long senc = slb_pgsize_encoding(psize); | 1566 | unsigned long senc = slb_pgsize_encoding(psize); |
1567 | unsigned long lpcr; | 1567 | unsigned long lpcr; |
1568 | 1568 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6e6224318c36..5a24d3c2b6b8 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c | |||
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, | |||
814 | r = hpte[i+1]; | 814 | r = hpte[i+1]; |
815 | 815 | ||
816 | /* | 816 | /* |
817 | * Check the HPTE again, including large page size | 817 | * Check the HPTE again, including base page size |
818 | * Since we don't currently allow any MPSS (mixed | ||
819 | * page-size segment) page sizes, it is sufficient | ||
820 | * to check against the actual page size. | ||
821 | */ | 818 | */ |
822 | if ((v & valid) && (v & mask) == val && | 819 | if ((v & valid) && (v & mask) == val && |
823 | hpte_page_size(v, r) == (1ul << pshift)) | 820 | hpte_base_page_size(v, r) == (1ul << pshift)) |
824 | /* Return with the HPTE still locked */ | 821 | /* Return with the HPTE still locked */ |
825 | return (hash << 3) + (i >> 1); | 822 | return (hash << 3) + (i >> 1); |
826 | 823 | ||
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 868347ef09fd..558a67df8126 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -48,7 +48,7 @@ | |||
48 | * | 48 | * |
49 | * LR = return address to continue at after eventually re-enabling MMU | 49 | * LR = return address to continue at after eventually re-enabling MMU |
50 | */ | 50 | */ |
51 | _GLOBAL(kvmppc_hv_entry_trampoline) | 51 | _GLOBAL_TOC(kvmppc_hv_entry_trampoline) |
52 | mflr r0 | 52 | mflr r0 |
53 | std r0, PPC_LR_STKOFF(r1) | 53 | std r0, PPC_LR_STKOFF(r1) |
54 | stdu r1, -112(r1) | 54 | stdu r1, -112(r1) |
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S index e2c29e381dc7..d044b8b7c69d 100644 --- a/arch/powerpc/kvm/book3s_interrupts.S +++ b/arch/powerpc/kvm/book3s_interrupts.S | |||
@@ -25,7 +25,11 @@ | |||
25 | #include <asm/exception-64s.h> | 25 | #include <asm/exception-64s.h> |
26 | 26 | ||
27 | #if defined(CONFIG_PPC_BOOK3S_64) | 27 | #if defined(CONFIG_PPC_BOOK3S_64) |
28 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
29 | #define FUNC(name) name | ||
30 | #else | ||
28 | #define FUNC(name) GLUE(.,name) | 31 | #define FUNC(name) GLUE(.,name) |
32 | #endif | ||
29 | #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU | 33 | #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU |
30 | 34 | ||
31 | #elif defined(CONFIG_PPC_BOOK3S_32) | 35 | #elif defined(CONFIG_PPC_BOOK3S_32) |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 9eec675220e6..16c4d88ba27d 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -36,7 +36,11 @@ | |||
36 | 36 | ||
37 | #if defined(CONFIG_PPC_BOOK3S_64) | 37 | #if defined(CONFIG_PPC_BOOK3S_64) |
38 | 38 | ||
39 | #if defined(_CALL_ELF) && _CALL_ELF == 2 | ||
40 | #define FUNC(name) name | ||
41 | #else | ||
39 | #define FUNC(name) GLUE(.,name) | 42 | #define FUNC(name) GLUE(.,name) |
43 | #endif | ||
40 | 44 | ||
41 | #elif defined(CONFIG_PPC_BOOK3S_32) | 45 | #elif defined(CONFIG_PPC_BOOK3S_32) |
42 | 46 | ||
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins: | |||
146 | * On entry, r4 contains the guest shadow MSR | 150 | * On entry, r4 contains the guest shadow MSR |
147 | * MSR.EE has to be 0 when calling this function | 151 | * MSR.EE has to be 0 when calling this function |
148 | */ | 152 | */ |
149 | _GLOBAL(kvmppc_entry_trampoline) | 153 | _GLOBAL_TOC(kvmppc_entry_trampoline) |
150 | mfmsr r5 | 154 | mfmsr r5 |
151 | LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) | 155 | LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter) |
152 | toreal(r7) | 156 | toreal(r7) |
diff --git a/arch/powerpc/kvm/book3s_rtas.c b/arch/powerpc/kvm/book3s_rtas.c index edb14ba992b3..ef27fbd5d9c5 100644 --- a/arch/powerpc/kvm/book3s_rtas.c +++ b/arch/powerpc/kvm/book3s_rtas.c | |||
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) | |||
23 | u32 irq, server, priority; | 23 | u32 irq, server, priority; |
24 | int rc; | 24 | int rc; |
25 | 25 | ||
26 | if (args->nargs != 3 || args->nret != 1) { | 26 | if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) { |
27 | rc = -3; | 27 | rc = -3; |
28 | goto out; | 28 | goto out; |
29 | } | 29 | } |
30 | 30 | ||
31 | irq = args->args[0]; | 31 | irq = be32_to_cpu(args->args[0]); |
32 | server = args->args[1]; | 32 | server = be32_to_cpu(args->args[1]); |
33 | priority = args->args[2]; | 33 | priority = be32_to_cpu(args->args[2]); |
34 | 34 | ||
35 | rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); | 35 | rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority); |
36 | if (rc) | 36 | if (rc) |
37 | rc = -3; | 37 | rc = -3; |
38 | out: | 38 | out: |
39 | args->rets[0] = rc; | 39 | args->rets[0] = cpu_to_be32(rc); |
40 | } | 40 | } |
41 | 41 | ||
42 | static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) | 42 | static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) |
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) | |||
44 | u32 irq, server, priority; | 44 | u32 irq, server, priority; |
45 | int rc; | 45 | int rc; |
46 | 46 | ||
47 | if (args->nargs != 1 || args->nret != 3) { | 47 | if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) { |
48 | rc = -3; | 48 | rc = -3; |
49 | goto out; | 49 | goto out; |
50 | } | 50 | } |
51 | 51 | ||
52 | irq = args->args[0]; | 52 | irq = be32_to_cpu(args->args[0]); |
53 | 53 | ||
54 | server = priority = 0; | 54 | server = priority = 0; |
55 | rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); | 55 | rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority); |
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args) | |||
58 | goto out; | 58 | goto out; |
59 | } | 59 | } |
60 | 60 | ||
61 | args->rets[1] = server; | 61 | args->rets[1] = cpu_to_be32(server); |
62 | args->rets[2] = priority; | 62 | args->rets[2] = cpu_to_be32(priority); |
63 | out: | 63 | out: |
64 | args->rets[0] = rc; | 64 | args->rets[0] = cpu_to_be32(rc); |
65 | } | 65 | } |
66 | 66 | ||
67 | static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) | 67 | static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) |
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args) | |||
69 | u32 irq; | 69 | u32 irq; |
70 | int rc; | 70 | int rc; |
71 | 71 | ||
72 | if (args->nargs != 1 || args->nret != 1) { | 72 | if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { |
73 | rc = -3; | 73 | rc = -3; |
74 | goto out; | 74 | goto out; |
75 | } | 75 | } |
76 | 76 | ||
77 | irq = args->args[0]; | 77 | irq = be32_to_cpu(args->args[0]); |
78 | 78 | ||
79 | rc = kvmppc_xics_int_off(vcpu->kvm, irq); | 79 | rc = kvmppc_xics_int_off(vcpu->kvm, irq); |
80 | if (rc) | 80 | if (rc) |
81 | rc = -3; | 81 | rc = -3; |
82 | out: | 82 | out: |
83 | args->rets[0] = rc; | 83 | args->rets[0] = cpu_to_be32(rc); |
84 | } | 84 | } |
85 | 85 | ||
86 | static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) | 86 | static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) |
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args) | |||
88 | u32 irq; | 88 | u32 irq; |
89 | int rc; | 89 | int rc; |
90 | 90 | ||
91 | if (args->nargs != 1 || args->nret != 1) { | 91 | if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) { |
92 | rc = -3; | 92 | rc = -3; |
93 | goto out; | 93 | goto out; |
94 | } | 94 | } |
95 | 95 | ||
96 | irq = args->args[0]; | 96 | irq = be32_to_cpu(args->args[0]); |
97 | 97 | ||
98 | rc = kvmppc_xics_int_on(vcpu->kvm, irq); | 98 | rc = kvmppc_xics_int_on(vcpu->kvm, irq); |
99 | if (rc) | 99 | if (rc) |
100 | rc = -3; | 100 | rc = -3; |
101 | out: | 101 | out: |
102 | args->rets[0] = rc; | 102 | args->rets[0] = cpu_to_be32(rc); |
103 | } | 103 | } |
104 | #endif /* CONFIG_KVM_XICS */ | 104 | #endif /* CONFIG_KVM_XICS */ |
105 | 105 | ||
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp) | |||
205 | return rc; | 205 | return rc; |
206 | } | 206 | } |
207 | 207 | ||
208 | static void kvmppc_rtas_swap_endian_in(struct rtas_args *args) | ||
209 | { | ||
210 | #ifdef __LITTLE_ENDIAN__ | ||
211 | int i; | ||
212 | |||
213 | args->token = be32_to_cpu(args->token); | ||
214 | args->nargs = be32_to_cpu(args->nargs); | ||
215 | args->nret = be32_to_cpu(args->nret); | ||
216 | for (i = 0; i < args->nargs; i++) | ||
217 | args->args[i] = be32_to_cpu(args->args[i]); | ||
218 | #endif | ||
219 | } | ||
220 | |||
221 | static void kvmppc_rtas_swap_endian_out(struct rtas_args *args) | ||
222 | { | ||
223 | #ifdef __LITTLE_ENDIAN__ | ||
224 | int i; | ||
225 | |||
226 | for (i = 0; i < args->nret; i++) | ||
227 | args->args[i] = cpu_to_be32(args->args[i]); | ||
228 | args->token = cpu_to_be32(args->token); | ||
229 | args->nargs = cpu_to_be32(args->nargs); | ||
230 | args->nret = cpu_to_be32(args->nret); | ||
231 | #endif | ||
232 | } | ||
233 | |||
234 | int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | 208 | int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) |
235 | { | 209 | { |
236 | struct rtas_token_definition *d; | 210 | struct rtas_token_definition *d; |
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | |||
249 | if (rc) | 223 | if (rc) |
250 | goto fail; | 224 | goto fail; |
251 | 225 | ||
252 | kvmppc_rtas_swap_endian_in(&args); | ||
253 | |||
254 | /* | 226 | /* |
255 | * args->rets is a pointer into args->args. Now that we've | 227 | * args->rets is a pointer into args->args. Now that we've |
256 | * copied args we need to fix it up to point into our copy, | 228 | * copied args we need to fix it up to point into our copy, |
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | |||
258 | * value so we can restore it on the way out. | 230 | * value so we can restore it on the way out. |
259 | */ | 231 | */ |
260 | orig_rets = args.rets; | 232 | orig_rets = args.rets; |
261 | args.rets = &args.args[args.nargs]; | 233 | args.rets = &args.args[be32_to_cpu(args.nargs)]; |
262 | 234 | ||
263 | mutex_lock(&vcpu->kvm->lock); | 235 | mutex_lock(&vcpu->kvm->lock); |
264 | 236 | ||
265 | rc = -ENOENT; | 237 | rc = -ENOENT; |
266 | list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { | 238 | list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) { |
267 | if (d->token == args.token) { | 239 | if (d->token == be32_to_cpu(args.token)) { |
268 | d->handler->handler(vcpu, &args); | 240 | d->handler->handler(vcpu, &args); |
269 | rc = 0; | 241 | rc = 0; |
270 | break; | 242 | break; |
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu) | |||
275 | 247 | ||
276 | if (rc == 0) { | 248 | if (rc == 0) { |
277 | args.rets = orig_rets; | 249 | args.rets = orig_rets; |
278 | kvmppc_rtas_swap_endian_out(&args); | ||
279 | rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); | 250 | rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args)); |
280 | if (rc) | 251 | if (rc) |
281 | goto fail; | 252 | goto fail; |
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index dd2cc03f406f..86903d3f5a03 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c | |||
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
473 | if (printk_ratelimit()) | 473 | if (printk_ratelimit()) |
474 | pr_err("%s: pte not present: gfn %lx, pfn %lx\n", | 474 | pr_err("%s: pte not present: gfn %lx, pfn %lx\n", |
475 | __func__, (long)gfn, pfn); | 475 | __func__, (long)gfn, pfn); |
476 | return -EINVAL; | 476 | ret = -EINVAL; |
477 | goto out; | ||
477 | } | 478 | } |
478 | kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); | 479 | kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); |
479 | 480 | ||
diff --git a/arch/powerpc/lib/mem_64.S b/arch/powerpc/lib/mem_64.S index 0738f96befbf..43435c6892fb 100644 --- a/arch/powerpc/lib/mem_64.S +++ b/arch/powerpc/lib/mem_64.S | |||
@@ -77,7 +77,7 @@ _GLOBAL(memset) | |||
77 | stb r4,0(r6) | 77 | stb r4,0(r6) |
78 | blr | 78 | blr |
79 | 79 | ||
80 | _GLOBAL(memmove) | 80 | _GLOBAL_TOC(memmove) |
81 | cmplw 0,r3,r4 | 81 | cmplw 0,r3,r4 |
82 | bgt backwards_memcpy | 82 | bgt backwards_memcpy |
83 | b memcpy | 83 | b memcpy |
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 412dd46dd0b7..5c09f365c842 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1198 | sh = regs->gpr[rb] & 0x3f; | 1198 | sh = regs->gpr[rb] & 0x3f; |
1199 | ival = (signed int) regs->gpr[rd]; | 1199 | ival = (signed int) regs->gpr[rd]; |
1200 | regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); | 1200 | regs->gpr[ra] = ival >> (sh < 32 ? sh : 31); |
1201 | if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0)) | 1201 | if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0)) |
1202 | regs->xer |= XER_CA; | 1202 | regs->xer |= XER_CA; |
1203 | else | 1203 | else |
1204 | regs->xer &= ~XER_CA; | 1204 | regs->xer &= ~XER_CA; |
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1208 | sh = rb; | 1208 | sh = rb; |
1209 | ival = (signed int) regs->gpr[rd]; | 1209 | ival = (signed int) regs->gpr[rd]; |
1210 | regs->gpr[ra] = ival >> sh; | 1210 | regs->gpr[ra] = ival >> sh; |
1211 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | 1211 | if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) |
1212 | regs->xer |= XER_CA; | 1212 | regs->xer |= XER_CA; |
1213 | else | 1213 | else |
1214 | regs->xer &= ~XER_CA; | 1214 | regs->xer &= ~XER_CA; |
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1216 | 1216 | ||
1217 | #ifdef __powerpc64__ | 1217 | #ifdef __powerpc64__ |
1218 | case 27: /* sld */ | 1218 | case 27: /* sld */ |
1219 | sh = regs->gpr[rd] & 0x7f; | 1219 | sh = regs->gpr[rb] & 0x7f; |
1220 | if (sh < 64) | 1220 | if (sh < 64) |
1221 | regs->gpr[ra] = regs->gpr[rd] << sh; | 1221 | regs->gpr[ra] = regs->gpr[rd] << sh; |
1222 | else | 1222 | else |
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1235 | sh = regs->gpr[rb] & 0x7f; | 1235 | sh = regs->gpr[rb] & 0x7f; |
1236 | ival = (signed long int) regs->gpr[rd]; | 1236 | ival = (signed long int) regs->gpr[rd]; |
1237 | regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); | 1237 | regs->gpr[ra] = ival >> (sh < 64 ? sh : 63); |
1238 | if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0)) | 1238 | if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0)) |
1239 | regs->xer |= XER_CA; | 1239 | regs->xer |= XER_CA; |
1240 | else | 1240 | else |
1241 | regs->xer &= ~XER_CA; | 1241 | regs->xer &= ~XER_CA; |
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1246 | sh = rb | ((instr & 2) << 4); | 1246 | sh = rb | ((instr & 2) << 4); |
1247 | ival = (signed long int) regs->gpr[rd]; | 1247 | ival = (signed long int) regs->gpr[rd]; |
1248 | regs->gpr[ra] = ival >> sh; | 1248 | regs->gpr[ra] = ival >> sh; |
1249 | if (ival < 0 && (ival & ((1 << sh) - 1)) != 0) | 1249 | if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0) |
1250 | regs->xer |= XER_CA; | 1250 | regs->xer |= XER_CA; |
1251 | else | 1251 | else |
1252 | regs->xer &= ~XER_CA; | 1252 | regs->xer &= ~XER_CA; |
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 6b0641c3f03f..fe52db2eea6a 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -1307,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu) | |||
1307 | out_enable: | 1307 | out_enable: |
1308 | pmao_restore_workaround(ebb); | 1308 | pmao_restore_workaround(ebb); |
1309 | 1309 | ||
1310 | if (ppmu->flags & PPMU_ARCH_207S) | ||
1311 | mtspr(SPRN_MMCR2, 0); | ||
1312 | |||
1310 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); | 1313 | mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]); |
1311 | 1314 | ||
1312 | mb(); | 1315 | mb(); |
@@ -1315,9 +1318,6 @@ static void power_pmu_enable(struct pmu *pmu) | |||
1315 | 1318 | ||
1316 | write_mmcr0(cpuhw, mmcr0); | 1319 | write_mmcr0(cpuhw, mmcr0); |
1317 | 1320 | ||
1318 | if (ppmu->flags & PPMU_ARCH_207S) | ||
1319 | mtspr(SPRN_MMCR2, 0); | ||
1320 | |||
1321 | /* | 1321 | /* |
1322 | * Enable instruction sampling if necessary | 1322 | * Enable instruction sampling if necessary |
1323 | */ | 1323 | */ |
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c index e0766b82e165..66d0f179650f 100644 --- a/arch/powerpc/perf/hv-24x7.c +++ b/arch/powerpc/perf/hv-24x7.c | |||
@@ -387,8 +387,7 @@ static int h_24x7_event_init(struct perf_event *event) | |||
387 | event->attr.exclude_hv || | 387 | event->attr.exclude_hv || |
388 | event->attr.exclude_idle || | 388 | event->attr.exclude_idle || |
389 | event->attr.exclude_host || | 389 | event->attr.exclude_host || |
390 | event->attr.exclude_guest || | 390 | event->attr.exclude_guest) |
391 | is_sampling_event(event)) /* no sampling */ | ||
392 | return -EINVAL; | 391 | return -EINVAL; |
393 | 392 | ||
394 | /* no branch sampling */ | 393 | /* no branch sampling */ |
@@ -513,6 +512,9 @@ static int hv_24x7_init(void) | |||
513 | if (!hv_page_cache) | 512 | if (!hv_page_cache) |
514 | return -ENOMEM; | 513 | return -ENOMEM; |
515 | 514 | ||
515 | /* sampling not supported */ | ||
516 | h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
517 | |||
516 | r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); | 518 | r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1); |
517 | if (r) | 519 | if (r) |
518 | return r; | 520 | return r; |
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c index c9d399a2df82..15fc76c93022 100644 --- a/arch/powerpc/perf/hv-gpci.c +++ b/arch/powerpc/perf/hv-gpci.c | |||
@@ -210,8 +210,7 @@ static int h_gpci_event_init(struct perf_event *event) | |||
210 | event->attr.exclude_hv || | 210 | event->attr.exclude_hv || |
211 | event->attr.exclude_idle || | 211 | event->attr.exclude_idle || |
212 | event->attr.exclude_host || | 212 | event->attr.exclude_host || |
213 | event->attr.exclude_guest || | 213 | event->attr.exclude_guest) |
214 | is_sampling_event(event)) /* no sampling */ | ||
215 | return -EINVAL; | 214 | return -EINVAL; |
216 | 215 | ||
217 | /* no branch sampling */ | 216 | /* no branch sampling */ |
@@ -284,6 +283,9 @@ static int hv_gpci_init(void) | |||
284 | return -ENODEV; | 283 | return -ENODEV; |
285 | } | 284 | } |
286 | 285 | ||
286 | /* sampling not supported */ | ||
287 | h_gpci_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
288 | |||
287 | r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); | 289 | r = perf_pmu_register(&h_gpci_pmu, h_gpci_pmu.name, -1); |
288 | if (r) | 290 | if (r) |
289 | return r; | 291 | return r; |
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c index 10268c41d830..0ad533b617f7 100644 --- a/arch/powerpc/platforms/powernv/opal-elog.c +++ b/arch/powerpc/platforms/powernv/opal-elog.c | |||
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work) | |||
249 | 249 | ||
250 | rc = opal_get_elog_size(&id, &size, &type); | 250 | rc = opal_get_elog_size(&id, &size, &type); |
251 | if (rc != OPAL_SUCCESS) { | 251 | if (rc != OPAL_SUCCESS) { |
252 | pr_err("ELOG: Opal log read failed\n"); | 252 | pr_err("ELOG: OPAL log info read failed\n"); |
253 | return; | 253 | return; |
254 | } | 254 | } |
255 | 255 | ||
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work) | |||
257 | log_id = be64_to_cpu(id); | 257 | log_id = be64_to_cpu(id); |
258 | elog_type = be64_to_cpu(type); | 258 | elog_type = be64_to_cpu(type); |
259 | 259 | ||
260 | BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); | 260 | WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE); |
261 | 261 | ||
262 | if (elog_size >= OPAL_MAX_ERRLOG_SIZE) | 262 | if (elog_size >= OPAL_MAX_ERRLOG_SIZE) |
263 | elog_size = OPAL_MAX_ERRLOG_SIZE; | 263 | elog_size = OPAL_MAX_ERRLOG_SIZE; |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 022b38e6a80b..2d0b4d68a40a 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa, | |||
86 | } | 86 | } |
87 | 87 | ||
88 | of_node_set_flag(dn, OF_DYNAMIC); | 88 | of_node_set_flag(dn, OF_DYNAMIC); |
89 | of_node_init(dn); | ||
89 | 90 | ||
90 | return dn; | 91 | return dn; |
91 | } | 92 | } |
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 0435bb65d0aa..1c0a60d98867 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist | |||
69 | 69 | ||
70 | np->properties = proplist; | 70 | np->properties = proplist; |
71 | of_node_set_flag(np, OF_DYNAMIC); | 71 | of_node_set_flag(np, OF_DYNAMIC); |
72 | of_node_init(np); | ||
72 | 73 | ||
73 | np->parent = derive_parent(path); | 74 | np->parent = derive_parent(path); |
74 | if (IS_ERR(np->parent)) { | 75 | if (IS_ERR(np->parent)) { |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bb63499fc5d3..f5af5f6ef0f4 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -116,7 +116,6 @@ config S390 | |||
116 | select HAVE_FTRACE_MCOUNT_RECORD | 116 | select HAVE_FTRACE_MCOUNT_RECORD |
117 | select HAVE_FUNCTION_GRAPH_TRACER | 117 | select HAVE_FUNCTION_GRAPH_TRACER |
118 | select HAVE_FUNCTION_TRACER | 118 | select HAVE_FUNCTION_TRACER |
119 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
120 | select HAVE_FUTEX_CMPXCHG if FUTEX | 119 | select HAVE_FUTEX_CMPXCHG if FUTEX |
121 | select HAVE_KERNEL_BZIP2 | 120 | select HAVE_KERNEL_BZIP2 |
122 | select HAVE_KERNEL_GZIP | 121 | select HAVE_KERNEL_GZIP |
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h index 4181d7baabba..773bef7614d8 100644 --- a/arch/s390/include/asm/kvm_host.h +++ b/arch/s390/include/asm/kvm_host.h | |||
@@ -305,7 +305,6 @@ struct kvm_s390_local_interrupt { | |||
305 | struct list_head list; | 305 | struct list_head list; |
306 | atomic_t active; | 306 | atomic_t active; |
307 | struct kvm_s390_float_interrupt *float_int; | 307 | struct kvm_s390_float_interrupt *float_int; |
308 | int timer_due; /* event indicator for waitqueue below */ | ||
309 | wait_queue_head_t *wq; | 308 | wait_queue_head_t *wq; |
310 | atomic_t *cpuflags; | 309 | atomic_t *cpuflags; |
311 | unsigned int action_bits; | 310 | unsigned int action_bits; |
@@ -367,7 +366,6 @@ struct kvm_vcpu_arch { | |||
367 | s390_fp_regs guest_fpregs; | 366 | s390_fp_regs guest_fpregs; |
368 | struct kvm_s390_local_interrupt local_int; | 367 | struct kvm_s390_local_interrupt local_int; |
369 | struct hrtimer ckc_timer; | 368 | struct hrtimer ckc_timer; |
370 | struct tasklet_struct tasklet; | ||
371 | struct kvm_s390_pgm_info pgm; | 369 | struct kvm_s390_pgm_info pgm; |
372 | union { | 370 | union { |
373 | struct cpuid cpu_id; | 371 | struct cpuid cpu_id; |
@@ -418,6 +416,7 @@ struct kvm_arch{ | |||
418 | int css_support; | 416 | int css_support; |
419 | int use_irqchip; | 417 | int use_irqchip; |
420 | int use_cmma; | 418 | int use_cmma; |
419 | int user_cpu_state_ctrl; | ||
421 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; | 420 | struct s390_io_adapter *adapters[MAX_S390_IO_ADAPTERS]; |
422 | wait_queue_head_t ipte_wq; | 421 | wait_queue_head_t ipte_wq; |
423 | spinlock_t start_stop_lock; | 422 | spinlock_t start_stop_lock; |
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 6f02d452bbee..e568fc8a7250 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h | |||
@@ -217,7 +217,7 @@ static inline void cpu_relax(void) | |||
217 | barrier(); | 217 | barrier(); |
218 | } | 218 | } |
219 | 219 | ||
220 | #define arch_mutex_cpu_relax() barrier() | 220 | #define cpu_relax_lowlatency() barrier() |
221 | 221 | ||
222 | static inline void psw_set_key(unsigned int key) | 222 | static inline void psw_set_key(unsigned int key) |
223 | { | 223 | { |
diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h index df38c70cd59e..18ea9e3f8142 100644 --- a/arch/s390/include/asm/switch_to.h +++ b/arch/s390/include/asm/switch_to.h | |||
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc) | |||
51 | return 0; | 51 | return 0; |
52 | 52 | ||
53 | asm volatile( | 53 | asm volatile( |
54 | "0: lfpc %1\n" | 54 | " lfpc %1\n" |
55 | " la %0,0\n" | 55 | "0: la %0,0\n" |
56 | "1:\n" | 56 | "1:\n" |
57 | EX_TABLE(0b,1b) | 57 | EX_TABLE(0b,1b) |
58 | : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); | 58 | : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL)); |
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild index 736637363d31..08fe6dad9026 100644 --- a/arch/s390/include/uapi/asm/Kbuild +++ b/arch/s390/include/uapi/asm/Kbuild | |||
@@ -16,6 +16,7 @@ header-y += ioctls.h | |||
16 | header-y += ipcbuf.h | 16 | header-y += ipcbuf.h |
17 | header-y += kvm.h | 17 | header-y += kvm.h |
18 | header-y += kvm_para.h | 18 | header-y += kvm_para.h |
19 | header-y += kvm_perf.h | ||
19 | header-y += kvm_virtio.h | 20 | header-y += kvm_virtio.h |
20 | header-y += mman.h | 21 | header-y += mman.h |
21 | header-y += monwriter.h | 22 | header-y += monwriter.h |
diff --git a/arch/s390/include/uapi/asm/kvm_perf.h b/arch/s390/include/uapi/asm/kvm_perf.h new file mode 100644 index 000000000000..397282727e21 --- /dev/null +++ b/arch/s390/include/uapi/asm/kvm_perf.h | |||
@@ -0,0 +1,25 @@ | |||
1 | /* | ||
2 | * Definitions for perf-kvm on s390 | ||
3 | * | ||
4 | * Copyright 2014 IBM Corp. | ||
5 | * Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License (version 2 only) | ||
9 | * as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #ifndef __LINUX_KVM_PERF_S390_H | ||
13 | #define __LINUX_KVM_PERF_S390_H | ||
14 | |||
15 | #include <asm/sie.h> | ||
16 | |||
17 | #define DECODE_STR_LEN 40 | ||
18 | |||
19 | #define VCPU_ID "id" | ||
20 | |||
21 | #define KVM_ENTRY_TRACE "kvm:kvm_s390_sie_enter" | ||
22 | #define KVM_EXIT_TRACE "kvm:kvm_s390_sie_exit" | ||
23 | #define KVM_EXIT_REASON "icptcode" | ||
24 | |||
25 | #endif | ||
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h index 5d9cc19462c4..d4096fdfc6ab 100644 --- a/arch/s390/include/uapi/asm/sie.h +++ b/arch/s390/include/uapi/asm/sie.h | |||
@@ -108,6 +108,7 @@ | |||
108 | exit_code_ipa0(0xB2, 0x17, "STETR"), \ | 108 | exit_code_ipa0(0xB2, 0x17, "STETR"), \ |
109 | exit_code_ipa0(0xB2, 0x18, "PC"), \ | 109 | exit_code_ipa0(0xB2, 0x18, "PC"), \ |
110 | exit_code_ipa0(0xB2, 0x20, "SERVC"), \ | 110 | exit_code_ipa0(0xB2, 0x20, "SERVC"), \ |
111 | exit_code_ipa0(0xB2, 0x21, "IPTE"), \ | ||
111 | exit_code_ipa0(0xB2, 0x28, "PT"), \ | 112 | exit_code_ipa0(0xB2, 0x28, "PT"), \ |
112 | exit_code_ipa0(0xB2, 0x29, "ISKE"), \ | 113 | exit_code_ipa0(0xB2, 0x29, "ISKE"), \ |
113 | exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ | 114 | exit_code_ipa0(0xB2, 0x2a, "RRBE"), \ |
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 7ba7d6784510..e88d35d74950 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S | |||
@@ -437,11 +437,11 @@ ENTRY(startup_kdump) | |||
437 | 437 | ||
438 | #if defined(CONFIG_64BIT) | 438 | #if defined(CONFIG_64BIT) |
439 | #if defined(CONFIG_MARCH_ZEC12) | 439 | #if defined(CONFIG_MARCH_ZEC12) |
440 | .long 3, 0xc100efea, 0xf46ce800, 0x00400000 | 440 | .long 3, 0xc100eff2, 0xf46ce800, 0x00400000 |
441 | #elif defined(CONFIG_MARCH_Z196) | 441 | #elif defined(CONFIG_MARCH_Z196) |
442 | .long 2, 0xc100efea, 0xf46c0000 | 442 | .long 2, 0xc100eff2, 0xf46c0000 |
443 | #elif defined(CONFIG_MARCH_Z10) | 443 | #elif defined(CONFIG_MARCH_Z10) |
444 | .long 2, 0xc100efea, 0xf0680000 | 444 | .long 2, 0xc100eff2, 0xf0680000 |
445 | #elif defined(CONFIG_MARCH_Z9_109) | 445 | #elif defined(CONFIG_MARCH_Z9_109) |
446 | .long 1, 0xc100efc2 | 446 | .long 1, 0xc100efc2 |
447 | #elif defined(CONFIG_MARCH_Z990) | 447 | #elif defined(CONFIG_MARCH_Z990) |
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S index 08dcf21cb8df..433c6dbfa442 100644 --- a/arch/s390/kernel/mcount.S +++ b/arch/s390/kernel/mcount.S | |||
@@ -21,13 +21,9 @@ ENTRY(_mcount) | |||
21 | ENTRY(ftrace_caller) | 21 | ENTRY(ftrace_caller) |
22 | #endif | 22 | #endif |
23 | stm %r2,%r5,16(%r15) | 23 | stm %r2,%r5,16(%r15) |
24 | bras %r1,2f | 24 | bras %r1,1f |
25 | 0: .long ftrace_trace_function | 25 | 0: .long ftrace_trace_function |
26 | 1: .long function_trace_stop | 26 | 1: st %r14,56(%r15) |
27 | 2: l %r2,1b-0b(%r1) | ||
28 | icm %r2,0xf,0(%r2) | ||
29 | jnz 3f | ||
30 | st %r14,56(%r15) | ||
31 | lr %r0,%r15 | 27 | lr %r0,%r15 |
32 | ahi %r15,-96 | 28 | ahi %r15,-96 |
33 | l %r3,100(%r15) | 29 | l %r3,100(%r15) |
@@ -50,7 +46,7 @@ ENTRY(ftrace_graph_caller) | |||
50 | #endif | 46 | #endif |
51 | ahi %r15,96 | 47 | ahi %r15,96 |
52 | l %r14,56(%r15) | 48 | l %r14,56(%r15) |
53 | 3: lm %r2,%r5,16(%r15) | 49 | lm %r2,%r5,16(%r15) |
54 | br %r14 | 50 | br %r14 |
55 | 51 | ||
56 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 52 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
diff --git a/arch/s390/kernel/mcount64.S b/arch/s390/kernel/mcount64.S index 1c52eae3396a..c67a8bf0fd9a 100644 --- a/arch/s390/kernel/mcount64.S +++ b/arch/s390/kernel/mcount64.S | |||
@@ -20,9 +20,6 @@ ENTRY(_mcount) | |||
20 | 20 | ||
21 | ENTRY(ftrace_caller) | 21 | ENTRY(ftrace_caller) |
22 | #endif | 22 | #endif |
23 | larl %r1,function_trace_stop | ||
24 | icm %r1,0xf,0(%r1) | ||
25 | bnzr %r14 | ||
26 | stmg %r2,%r5,32(%r15) | 23 | stmg %r2,%r5,32(%r15) |
27 | stg %r14,112(%r15) | 24 | stg %r14,112(%r15) |
28 | lgr %r1,%r15 | 25 | lgr %r1,%r15 |
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index ea75d011a6fc..d3194de7ae1e 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c | |||
@@ -411,12 +411,6 @@ static int cpumf_pmu_event_init(struct perf_event *event) | |||
411 | case PERF_TYPE_HARDWARE: | 411 | case PERF_TYPE_HARDWARE: |
412 | case PERF_TYPE_HW_CACHE: | 412 | case PERF_TYPE_HW_CACHE: |
413 | case PERF_TYPE_RAW: | 413 | case PERF_TYPE_RAW: |
414 | /* The CPU measurement counter facility does not have overflow | ||
415 | * interrupts to do sampling. Sampling must be provided by | ||
416 | * external means, for example, by timers. | ||
417 | */ | ||
418 | if (is_sampling_event(event)) | ||
419 | return -ENOENT; | ||
420 | err = __hw_perf_event_init(event); | 414 | err = __hw_perf_event_init(event); |
421 | break; | 415 | break; |
422 | default: | 416 | default: |
@@ -681,6 +675,12 @@ static int __init cpumf_pmu_init(void) | |||
681 | goto out; | 675 | goto out; |
682 | } | 676 | } |
683 | 677 | ||
678 | /* The CPU measurement counter facility does not have overflow | ||
679 | * interrupts to do sampling. Sampling must be provided by | ||
680 | * external means, for example, by timers. | ||
681 | */ | ||
682 | cpumf_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
683 | |||
684 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); | 684 | cpumf_pmu.attr_groups = cpumf_cf_event_group(); |
685 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); | 685 | rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW); |
686 | if (rc) { | 686 | if (rc) { |
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c index 2d716734b5b1..5dc7ad9e2fbf 100644 --- a/arch/s390/kernel/ptrace.c +++ b/arch/s390/kernel/ptrace.c | |||
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data) | |||
334 | unsigned long mask = PSW_MASK_USER; | 334 | unsigned long mask = PSW_MASK_USER; |
335 | 335 | ||
336 | mask |= is_ri_task(child) ? PSW_MASK_RI : 0; | 336 | mask |= is_ri_task(child) ? PSW_MASK_RI : 0; |
337 | if ((data & ~mask) != PSW_USER_BITS) | 337 | if ((data ^ PSW_USER_BITS) & ~mask) |
338 | /* Invalid psw mask. */ | ||
339 | return -EINVAL; | ||
340 | if ((data & PSW_MASK_ASC) == PSW_ASC_HOME) | ||
341 | /* Invalid address-space-control bits */ | ||
338 | return -EINVAL; | 342 | return -EINVAL; |
339 | if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) | 343 | if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA)) |
344 | /* Invalid addressing mode bits */ | ||
340 | return -EINVAL; | 345 | return -EINVAL; |
341 | } | 346 | } |
342 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; | 347 | *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data; |
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child, | |||
672 | 677 | ||
673 | mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; | 678 | mask |= is_ri_task(child) ? PSW32_MASK_RI : 0; |
674 | /* Build a 64 bit psw mask from 31 bit mask. */ | 679 | /* Build a 64 bit psw mask from 31 bit mask. */ |
675 | if ((tmp & ~mask) != PSW32_USER_BITS) | 680 | if ((tmp ^ PSW32_USER_BITS) & ~mask) |
676 | /* Invalid psw mask. */ | 681 | /* Invalid psw mask. */ |
677 | return -EINVAL; | 682 | return -EINVAL; |
683 | if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME) | ||
684 | /* Invalid address-space-control bits */ | ||
685 | return -EINVAL; | ||
678 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | | 686 | regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | |
679 | (regs->psw.mask & PSW_MASK_BA) | | 687 | (regs->psw.mask & PSW_MASK_BA) | |
680 | (__u64)(tmp & mask) << 32; | 688 | (__u64)(tmp & mask) << 32; |
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c index 0161675878a2..59bd8f991b98 100644 --- a/arch/s390/kvm/diag.c +++ b/arch/s390/kvm/diag.c | |||
@@ -176,7 +176,8 @@ static int __diag_ipl_functions(struct kvm_vcpu *vcpu) | |||
176 | return -EOPNOTSUPP; | 176 | return -EOPNOTSUPP; |
177 | } | 177 | } |
178 | 178 | ||
179 | kvm_s390_vcpu_stop(vcpu); | 179 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
180 | kvm_s390_vcpu_stop(vcpu); | ||
180 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; | 181 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; |
181 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; | 182 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; |
182 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; | 183 | vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; |
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index a0b586c1913c..eaf46291d361 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c | |||
@@ -56,32 +56,26 @@ static int handle_noop(struct kvm_vcpu *vcpu) | |||
56 | static int handle_stop(struct kvm_vcpu *vcpu) | 56 | static int handle_stop(struct kvm_vcpu *vcpu) |
57 | { | 57 | { |
58 | int rc = 0; | 58 | int rc = 0; |
59 | unsigned int action_bits; | ||
59 | 60 | ||
60 | vcpu->stat.exit_stop_request++; | 61 | vcpu->stat.exit_stop_request++; |
61 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
62 | |||
63 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); | 62 | trace_kvm_s390_stop_request(vcpu->arch.local_int.action_bits); |
64 | 63 | ||
65 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) { | 64 | action_bits = vcpu->arch.local_int.action_bits; |
66 | kvm_s390_vcpu_stop(vcpu); | ||
67 | vcpu->arch.local_int.action_bits &= ~ACTION_STOP_ON_STOP; | ||
68 | VCPU_EVENT(vcpu, 3, "%s", "cpu stopped"); | ||
69 | rc = -EOPNOTSUPP; | ||
70 | } | ||
71 | 65 | ||
72 | if (vcpu->arch.local_int.action_bits & ACTION_STORE_ON_STOP) { | 66 | if (!(action_bits & ACTION_STOP_ON_STOP)) |
73 | vcpu->arch.local_int.action_bits &= ~ACTION_STORE_ON_STOP; | 67 | return 0; |
74 | /* store status must be called unlocked. Since local_int.lock | 68 | |
75 | * only protects local_int.* and not guest memory we can give | 69 | if (action_bits & ACTION_STORE_ON_STOP) { |
76 | * up the lock here */ | ||
77 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
78 | rc = kvm_s390_vcpu_store_status(vcpu, | 70 | rc = kvm_s390_vcpu_store_status(vcpu, |
79 | KVM_S390_STORE_STATUS_NOADDR); | 71 | KVM_S390_STORE_STATUS_NOADDR); |
80 | if (rc >= 0) | 72 | if (rc) |
81 | rc = -EOPNOTSUPP; | 73 | return rc; |
82 | } else | 74 | } |
83 | spin_unlock_bh(&vcpu->arch.local_int.lock); | 75 | |
84 | return rc; | 76 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
77 | kvm_s390_vcpu_stop(vcpu); | ||
78 | return -EOPNOTSUPP; | ||
85 | } | 79 | } |
86 | 80 | ||
87 | static int handle_validity(struct kvm_vcpu *vcpu) | 81 | static int handle_validity(struct kvm_vcpu *vcpu) |
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 90c8de22a2a0..92528a0bdda6 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c | |||
@@ -158,6 +158,9 @@ static void __reset_intercept_indicators(struct kvm_vcpu *vcpu) | |||
158 | LCTL_CR10 | LCTL_CR11); | 158 | LCTL_CR10 | LCTL_CR11); |
159 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); | 159 | vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT); |
160 | } | 160 | } |
161 | |||
162 | if (vcpu->arch.local_int.action_bits & ACTION_STOP_ON_STOP) | ||
163 | atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags); | ||
161 | } | 164 | } |
162 | 165 | ||
163 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) | 166 | static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag) |
@@ -544,13 +547,13 @@ int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu) | |||
544 | int rc = 0; | 547 | int rc = 0; |
545 | 548 | ||
546 | if (atomic_read(&li->active)) { | 549 | if (atomic_read(&li->active)) { |
547 | spin_lock_bh(&li->lock); | 550 | spin_lock(&li->lock); |
548 | list_for_each_entry(inti, &li->list, list) | 551 | list_for_each_entry(inti, &li->list, list) |
549 | if (__interrupt_is_deliverable(vcpu, inti)) { | 552 | if (__interrupt_is_deliverable(vcpu, inti)) { |
550 | rc = 1; | 553 | rc = 1; |
551 | break; | 554 | break; |
552 | } | 555 | } |
553 | spin_unlock_bh(&li->lock); | 556 | spin_unlock(&li->lock); |
554 | } | 557 | } |
555 | 558 | ||
556 | if ((!rc) && atomic_read(&fi->active)) { | 559 | if ((!rc) && atomic_read(&fi->active)) { |
@@ -585,88 +588,56 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |||
585 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) | 588 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) |
586 | { | 589 | { |
587 | u64 now, sltime; | 590 | u64 now, sltime; |
588 | DECLARE_WAITQUEUE(wait, current); | ||
589 | 591 | ||
590 | vcpu->stat.exit_wait_state++; | 592 | vcpu->stat.exit_wait_state++; |
591 | if (kvm_cpu_has_interrupt(vcpu)) | ||
592 | return 0; | ||
593 | 593 | ||
594 | __set_cpu_idle(vcpu); | 594 | /* fast path */ |
595 | spin_lock_bh(&vcpu->arch.local_int.lock); | 595 | if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu)) |
596 | vcpu->arch.local_int.timer_due = 0; | 596 | return 0; |
597 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
598 | 597 | ||
599 | if (psw_interrupts_disabled(vcpu)) { | 598 | if (psw_interrupts_disabled(vcpu)) { |
600 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); | 599 | VCPU_EVENT(vcpu, 3, "%s", "disabled wait"); |
601 | __unset_cpu_idle(vcpu); | ||
602 | return -EOPNOTSUPP; /* disabled wait */ | 600 | return -EOPNOTSUPP; /* disabled wait */ |
603 | } | 601 | } |
604 | 602 | ||
603 | __set_cpu_idle(vcpu); | ||
605 | if (!ckc_interrupts_enabled(vcpu)) { | 604 | if (!ckc_interrupts_enabled(vcpu)) { |
606 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); | 605 | VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer"); |
607 | goto no_timer; | 606 | goto no_timer; |
608 | } | 607 | } |
609 | 608 | ||
610 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; | 609 | now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch; |
611 | if (vcpu->arch.sie_block->ckc < now) { | ||
612 | __unset_cpu_idle(vcpu); | ||
613 | return 0; | ||
614 | } | ||
615 | |||
616 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); | 610 | sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); |
617 | |||
618 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); | 611 | hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); |
619 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); | 612 | VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); |
620 | no_timer: | 613 | no_timer: |
621 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 614 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
622 | spin_lock(&vcpu->arch.local_int.float_int->lock); | 615 | kvm_vcpu_block(vcpu); |
623 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
624 | add_wait_queue(&vcpu->wq, &wait); | ||
625 | while (list_empty(&vcpu->arch.local_int.list) && | ||
626 | list_empty(&vcpu->arch.local_int.float_int->list) && | ||
627 | (!vcpu->arch.local_int.timer_due) && | ||
628 | !signal_pending(current) && | ||
629 | !kvm_s390_si_ext_call_pending(vcpu)) { | ||
630 | set_current_state(TASK_INTERRUPTIBLE); | ||
631 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
632 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
633 | schedule(); | ||
634 | spin_lock(&vcpu->arch.local_int.float_int->lock); | ||
635 | spin_lock_bh(&vcpu->arch.local_int.lock); | ||
636 | } | ||
637 | __unset_cpu_idle(vcpu); | 616 | __unset_cpu_idle(vcpu); |
638 | __set_current_state(TASK_RUNNING); | ||
639 | remove_wait_queue(&vcpu->wq, &wait); | ||
640 | spin_unlock_bh(&vcpu->arch.local_int.lock); | ||
641 | spin_unlock(&vcpu->arch.local_int.float_int->lock); | ||
642 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | 617 | vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); |
643 | 618 | ||
644 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); | 619 | hrtimer_try_to_cancel(&vcpu->arch.ckc_timer); |
645 | return 0; | 620 | return 0; |
646 | } | 621 | } |
647 | 622 | ||
648 | void kvm_s390_tasklet(unsigned long parm) | 623 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) |
649 | { | 624 | { |
650 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *) parm; | 625 | if (waitqueue_active(&vcpu->wq)) { |
651 | 626 | /* | |
652 | spin_lock(&vcpu->arch.local_int.lock); | 627 | * The vcpu gave up the cpu voluntarily, mark it as a good |
653 | vcpu->arch.local_int.timer_due = 1; | 628 | * yield-candidate. |
654 | if (waitqueue_active(&vcpu->wq)) | 629 | */ |
630 | vcpu->preempted = true; | ||
655 | wake_up_interruptible(&vcpu->wq); | 631 | wake_up_interruptible(&vcpu->wq); |
656 | spin_unlock(&vcpu->arch.local_int.lock); | 632 | } |
657 | } | 633 | } |
658 | 634 | ||
659 | /* | ||
660 | * low level hrtimer wake routine. Because this runs in hardirq context | ||
661 | * we schedule a tasklet to do the real work. | ||
662 | */ | ||
663 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) | 635 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer) |
664 | { | 636 | { |
665 | struct kvm_vcpu *vcpu; | 637 | struct kvm_vcpu *vcpu; |
666 | 638 | ||
667 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); | 639 | vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer); |
668 | vcpu->preempted = true; | 640 | kvm_s390_vcpu_wakeup(vcpu); |
669 | tasklet_schedule(&vcpu->arch.tasklet); | ||
670 | 641 | ||
671 | return HRTIMER_NORESTART; | 642 | return HRTIMER_NORESTART; |
672 | } | 643 | } |
@@ -676,13 +647,13 @@ void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu) | |||
676 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; | 647 | struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int; |
677 | struct kvm_s390_interrupt_info *n, *inti = NULL; | 648 | struct kvm_s390_interrupt_info *n, *inti = NULL; |
678 | 649 | ||
679 | spin_lock_bh(&li->lock); | 650 | spin_lock(&li->lock); |
680 | list_for_each_entry_safe(inti, n, &li->list, list) { | 651 | list_for_each_entry_safe(inti, n, &li->list, list) { |
681 | list_del(&inti->list); | 652 | list_del(&inti->list); |
682 | kfree(inti); | 653 | kfree(inti); |
683 | } | 654 | } |
684 | atomic_set(&li->active, 0); | 655 | atomic_set(&li->active, 0); |
685 | spin_unlock_bh(&li->lock); | 656 | spin_unlock(&li->lock); |
686 | 657 | ||
687 | /* clear pending external calls set by sigp interpretation facility */ | 658 | /* clear pending external calls set by sigp interpretation facility */ |
688 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); | 659 | atomic_clear_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags); |
@@ -701,7 +672,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
701 | if (atomic_read(&li->active)) { | 672 | if (atomic_read(&li->active)) { |
702 | do { | 673 | do { |
703 | deliver = 0; | 674 | deliver = 0; |
704 | spin_lock_bh(&li->lock); | 675 | spin_lock(&li->lock); |
705 | list_for_each_entry_safe(inti, n, &li->list, list) { | 676 | list_for_each_entry_safe(inti, n, &li->list, list) { |
706 | if (__interrupt_is_deliverable(vcpu, inti)) { | 677 | if (__interrupt_is_deliverable(vcpu, inti)) { |
707 | list_del(&inti->list); | 678 | list_del(&inti->list); |
@@ -712,7 +683,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu) | |||
712 | } | 683 | } |
713 | if (list_empty(&li->list)) | 684 | if (list_empty(&li->list)) |
714 | atomic_set(&li->active, 0); | 685 | atomic_set(&li->active, 0); |
715 | spin_unlock_bh(&li->lock); | 686 | spin_unlock(&li->lock); |
716 | if (deliver) { | 687 | if (deliver) { |
717 | __do_deliver_interrupt(vcpu, inti); | 688 | __do_deliver_interrupt(vcpu, inti); |
718 | kfree(inti); | 689 | kfree(inti); |
@@ -758,7 +729,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
758 | if (atomic_read(&li->active)) { | 729 | if (atomic_read(&li->active)) { |
759 | do { | 730 | do { |
760 | deliver = 0; | 731 | deliver = 0; |
761 | spin_lock_bh(&li->lock); | 732 | spin_lock(&li->lock); |
762 | list_for_each_entry_safe(inti, n, &li->list, list) { | 733 | list_for_each_entry_safe(inti, n, &li->list, list) { |
763 | if ((inti->type == KVM_S390_MCHK) && | 734 | if ((inti->type == KVM_S390_MCHK) && |
764 | __interrupt_is_deliverable(vcpu, inti)) { | 735 | __interrupt_is_deliverable(vcpu, inti)) { |
@@ -770,7 +741,7 @@ void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu) | |||
770 | } | 741 | } |
771 | if (list_empty(&li->list)) | 742 | if (list_empty(&li->list)) |
772 | atomic_set(&li->active, 0); | 743 | atomic_set(&li->active, 0); |
773 | spin_unlock_bh(&li->lock); | 744 | spin_unlock(&li->lock); |
774 | if (deliver) { | 745 | if (deliver) { |
775 | __do_deliver_interrupt(vcpu, inti); | 746 | __do_deliver_interrupt(vcpu, inti); |
776 | kfree(inti); | 747 | kfree(inti); |
@@ -817,11 +788,11 @@ int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code) | |||
817 | 788 | ||
818 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); | 789 | VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code); |
819 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); | 790 | trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, inti->type, code, 0, 1); |
820 | spin_lock_bh(&li->lock); | 791 | spin_lock(&li->lock); |
821 | list_add(&inti->list, &li->list); | 792 | list_add(&inti->list, &li->list); |
822 | atomic_set(&li->active, 1); | 793 | atomic_set(&li->active, 1); |
823 | BUG_ON(waitqueue_active(li->wq)); | 794 | BUG_ON(waitqueue_active(li->wq)); |
824 | spin_unlock_bh(&li->lock); | 795 | spin_unlock(&li->lock); |
825 | return 0; | 796 | return 0; |
826 | } | 797 | } |
827 | 798 | ||
@@ -842,11 +813,11 @@ int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu, | |||
842 | 813 | ||
843 | inti->type = KVM_S390_PROGRAM_INT; | 814 | inti->type = KVM_S390_PROGRAM_INT; |
844 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); | 815 | memcpy(&inti->pgm, pgm_info, sizeof(inti->pgm)); |
845 | spin_lock_bh(&li->lock); | 816 | spin_lock(&li->lock); |
846 | list_add(&inti->list, &li->list); | 817 | list_add(&inti->list, &li->list); |
847 | atomic_set(&li->active, 1); | 818 | atomic_set(&li->active, 1); |
848 | BUG_ON(waitqueue_active(li->wq)); | 819 | BUG_ON(waitqueue_active(li->wq)); |
849 | spin_unlock_bh(&li->lock); | 820 | spin_unlock(&li->lock); |
850 | return 0; | 821 | return 0; |
851 | } | 822 | } |
852 | 823 | ||
@@ -934,12 +905,10 @@ static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti) | |||
934 | } | 905 | } |
935 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); | 906 | dst_vcpu = kvm_get_vcpu(kvm, sigcpu); |
936 | li = &dst_vcpu->arch.local_int; | 907 | li = &dst_vcpu->arch.local_int; |
937 | spin_lock_bh(&li->lock); | 908 | spin_lock(&li->lock); |
938 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 909 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
939 | if (waitqueue_active(li->wq)) | 910 | spin_unlock(&li->lock); |
940 | wake_up_interruptible(li->wq); | 911 | kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu)); |
941 | kvm_get_vcpu(kvm, sigcpu)->preempted = true; | ||
942 | spin_unlock_bh(&li->lock); | ||
943 | unlock_fi: | 912 | unlock_fi: |
944 | spin_unlock(&fi->lock); | 913 | spin_unlock(&fi->lock); |
945 | mutex_unlock(&kvm->lock); | 914 | mutex_unlock(&kvm->lock); |
@@ -1081,7 +1050,7 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1081 | 1050 | ||
1082 | mutex_lock(&vcpu->kvm->lock); | 1051 | mutex_lock(&vcpu->kvm->lock); |
1083 | li = &vcpu->arch.local_int; | 1052 | li = &vcpu->arch.local_int; |
1084 | spin_lock_bh(&li->lock); | 1053 | spin_lock(&li->lock); |
1085 | if (inti->type == KVM_S390_PROGRAM_INT) | 1054 | if (inti->type == KVM_S390_PROGRAM_INT) |
1086 | list_add(&inti->list, &li->list); | 1055 | list_add(&inti->list, &li->list); |
1087 | else | 1056 | else |
@@ -1090,11 +1059,9 @@ int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, | |||
1090 | if (inti->type == KVM_S390_SIGP_STOP) | 1059 | if (inti->type == KVM_S390_SIGP_STOP) |
1091 | li->action_bits |= ACTION_STOP_ON_STOP; | 1060 | li->action_bits |= ACTION_STOP_ON_STOP; |
1092 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); | 1061 | atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags); |
1093 | if (waitqueue_active(&vcpu->wq)) | 1062 | spin_unlock(&li->lock); |
1094 | wake_up_interruptible(&vcpu->wq); | ||
1095 | vcpu->preempted = true; | ||
1096 | spin_unlock_bh(&li->lock); | ||
1097 | mutex_unlock(&vcpu->kvm->lock); | 1063 | mutex_unlock(&vcpu->kvm->lock); |
1064 | kvm_s390_vcpu_wakeup(vcpu); | ||
1098 | return 0; | 1065 | return 0; |
1099 | } | 1066 | } |
1100 | 1067 | ||
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 2f3e14fe91a4..339b34a02fb8 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -166,7 +166,9 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
166 | case KVM_CAP_IOEVENTFD: | 166 | case KVM_CAP_IOEVENTFD: |
167 | case KVM_CAP_DEVICE_CTRL: | 167 | case KVM_CAP_DEVICE_CTRL: |
168 | case KVM_CAP_ENABLE_CAP_VM: | 168 | case KVM_CAP_ENABLE_CAP_VM: |
169 | case KVM_CAP_S390_IRQCHIP: | ||
169 | case KVM_CAP_VM_ATTRIBUTES: | 170 | case KVM_CAP_VM_ATTRIBUTES: |
171 | case KVM_CAP_MP_STATE: | ||
170 | r = 1; | 172 | r = 1; |
171 | break; | 173 | break; |
172 | case KVM_CAP_NR_VCPUS: | 174 | case KVM_CAP_NR_VCPUS: |
@@ -595,7 +597,8 @@ static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu) | |||
595 | vcpu->arch.sie_block->pp = 0; | 597 | vcpu->arch.sie_block->pp = 0; |
596 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; | 598 | vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID; |
597 | kvm_clear_async_pf_completion_queue(vcpu); | 599 | kvm_clear_async_pf_completion_queue(vcpu); |
598 | kvm_s390_vcpu_stop(vcpu); | 600 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) |
601 | kvm_s390_vcpu_stop(vcpu); | ||
599 | kvm_s390_clear_local_irqs(vcpu); | 602 | kvm_s390_clear_local_irqs(vcpu); |
600 | } | 603 | } |
601 | 604 | ||
@@ -647,8 +650,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |||
647 | return rc; | 650 | return rc; |
648 | } | 651 | } |
649 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | 652 | hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
650 | tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet, | ||
651 | (unsigned long) vcpu); | ||
652 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; | 653 | vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup; |
653 | get_cpu_id(&vcpu->arch.cpu_id); | 654 | get_cpu_id(&vcpu->arch.cpu_id); |
654 | vcpu->arch.cpu_id.version = 0xff; | 655 | vcpu->arch.cpu_id.version = 0xff; |
@@ -926,7 +927,7 @@ static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw) | |||
926 | { | 927 | { |
927 | int rc = 0; | 928 | int rc = 0; |
928 | 929 | ||
929 | if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED)) | 930 | if (!is_vcpu_stopped(vcpu)) |
930 | rc = -EBUSY; | 931 | rc = -EBUSY; |
931 | else { | 932 | else { |
932 | vcpu->run->psw_mask = psw.mask; | 933 | vcpu->run->psw_mask = psw.mask; |
@@ -980,13 +981,34 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | |||
980 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | 981 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
981 | struct kvm_mp_state *mp_state) | 982 | struct kvm_mp_state *mp_state) |
982 | { | 983 | { |
983 | return -EINVAL; /* not implemented yet */ | 984 | /* CHECK_STOP and LOAD are not supported yet */ |
985 | return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED : | ||
986 | KVM_MP_STATE_OPERATING; | ||
984 | } | 987 | } |
985 | 988 | ||
986 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | 989 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
987 | struct kvm_mp_state *mp_state) | 990 | struct kvm_mp_state *mp_state) |
988 | { | 991 | { |
989 | return -EINVAL; /* not implemented yet */ | 992 | int rc = 0; |
993 | |||
994 | /* user space knows about this interface - let it control the state */ | ||
995 | vcpu->kvm->arch.user_cpu_state_ctrl = 1; | ||
996 | |||
997 | switch (mp_state->mp_state) { | ||
998 | case KVM_MP_STATE_STOPPED: | ||
999 | kvm_s390_vcpu_stop(vcpu); | ||
1000 | break; | ||
1001 | case KVM_MP_STATE_OPERATING: | ||
1002 | kvm_s390_vcpu_start(vcpu); | ||
1003 | break; | ||
1004 | case KVM_MP_STATE_LOAD: | ||
1005 | case KVM_MP_STATE_CHECK_STOP: | ||
1006 | /* fall through - CHECK_STOP and LOAD are not supported yet */ | ||
1007 | default: | ||
1008 | rc = -ENXIO; | ||
1009 | } | ||
1010 | |||
1011 | return rc; | ||
990 | } | 1012 | } |
991 | 1013 | ||
992 | bool kvm_s390_cmma_enabled(struct kvm *kvm) | 1014 | bool kvm_s390_cmma_enabled(struct kvm *kvm) |
@@ -1045,6 +1067,9 @@ retry: | |||
1045 | goto retry; | 1067 | goto retry; |
1046 | } | 1068 | } |
1047 | 1069 | ||
1070 | /* nothing to do, just clear the request */ | ||
1071 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | ||
1072 | |||
1048 | return 0; | 1073 | return 0; |
1049 | } | 1074 | } |
1050 | 1075 | ||
@@ -1284,7 +1309,13 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1284 | if (vcpu->sigset_active) | 1309 | if (vcpu->sigset_active) |
1285 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | 1310 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); |
1286 | 1311 | ||
1287 | kvm_s390_vcpu_start(vcpu); | 1312 | if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) { |
1313 | kvm_s390_vcpu_start(vcpu); | ||
1314 | } else if (is_vcpu_stopped(vcpu)) { | ||
1315 | pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n", | ||
1316 | vcpu->vcpu_id); | ||
1317 | return -EINVAL; | ||
1318 | } | ||
1288 | 1319 | ||
1289 | switch (kvm_run->exit_reason) { | 1320 | switch (kvm_run->exit_reason) { |
1290 | case KVM_EXIT_S390_SIEIC: | 1321 | case KVM_EXIT_S390_SIEIC: |
@@ -1413,11 +1444,6 @@ int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr) | |||
1413 | return kvm_s390_store_status_unloaded(vcpu, addr); | 1444 | return kvm_s390_store_status_unloaded(vcpu, addr); |
1414 | } | 1445 | } |
1415 | 1446 | ||
1416 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) | ||
1417 | { | ||
1418 | return atomic_read(&(vcpu)->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; | ||
1419 | } | ||
1420 | |||
1421 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) | 1447 | static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu) |
1422 | { | 1448 | { |
1423 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); | 1449 | kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu); |
@@ -1451,7 +1477,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | |||
1451 | 1477 | ||
1452 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); | 1478 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1); |
1453 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | 1479 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
1454 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | 1480 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
1455 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | 1481 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
1456 | 1482 | ||
1457 | for (i = 0; i < online_vcpus; i++) { | 1483 | for (i = 0; i < online_vcpus; i++) { |
@@ -1477,7 +1503,7 @@ void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu) | |||
1477 | * Let's play safe and flush the VCPU at startup. | 1503 | * Let's play safe and flush the VCPU at startup. |
1478 | */ | 1504 | */ |
1479 | vcpu->arch.sie_block->ihcpu = 0xffff; | 1505 | vcpu->arch.sie_block->ihcpu = 0xffff; |
1480 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | 1506 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
1481 | return; | 1507 | return; |
1482 | } | 1508 | } |
1483 | 1509 | ||
@@ -1491,10 +1517,18 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
1491 | 1517 | ||
1492 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); | 1518 | trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0); |
1493 | /* Only one cpu at a time may enter/leave the STOPPED state. */ | 1519 | /* Only one cpu at a time may enter/leave the STOPPED state. */ |
1494 | spin_lock_bh(&vcpu->kvm->arch.start_stop_lock); | 1520 | spin_lock(&vcpu->kvm->arch.start_stop_lock); |
1495 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); | 1521 | online_vcpus = atomic_read(&vcpu->kvm->online_vcpus); |
1496 | 1522 | ||
1523 | /* Need to lock access to action_bits to avoid a SIGP race condition */ | ||
1524 | spin_lock(&vcpu->arch.local_int.lock); | ||
1497 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); | 1525 | atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags); |
1526 | |||
1527 | /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */ | ||
1528 | vcpu->arch.local_int.action_bits &= | ||
1529 | ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP); | ||
1530 | spin_unlock(&vcpu->arch.local_int.lock); | ||
1531 | |||
1498 | __disable_ibs_on_vcpu(vcpu); | 1532 | __disable_ibs_on_vcpu(vcpu); |
1499 | 1533 | ||
1500 | for (i = 0; i < online_vcpus; i++) { | 1534 | for (i = 0; i < online_vcpus; i++) { |
@@ -1512,7 +1546,7 @@ void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu) | |||
1512 | __enable_ibs_on_vcpu(started_vcpu); | 1546 | __enable_ibs_on_vcpu(started_vcpu); |
1513 | } | 1547 | } |
1514 | 1548 | ||
1515 | spin_unlock_bh(&vcpu->kvm->arch.start_stop_lock); | 1549 | spin_unlock(&vcpu->kvm->arch.start_stop_lock); |
1516 | return; | 1550 | return; |
1517 | } | 1551 | } |
1518 | 1552 | ||
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h index a8655ed31616..3862fa2cefe0 100644 --- a/arch/s390/kvm/kvm-s390.h +++ b/arch/s390/kvm/kvm-s390.h | |||
@@ -45,9 +45,9 @@ do { \ | |||
45 | d_args); \ | 45 | d_args); \ |
46 | } while (0) | 46 | } while (0) |
47 | 47 | ||
48 | static inline int __cpu_is_stopped(struct kvm_vcpu *vcpu) | 48 | static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu) |
49 | { | 49 | { |
50 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOP_INT; | 50 | return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED; |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline int kvm_is_ucontrol(struct kvm *kvm) | 53 | static inline int kvm_is_ucontrol(struct kvm *kvm) |
@@ -129,9 +129,15 @@ static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc) | |||
129 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; | 129 | vcpu->arch.sie_block->gpsw.mask |= cc << 44; |
130 | } | 130 | } |
131 | 131 | ||
132 | /* are cpu states controlled by user space */ | ||
133 | static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm) | ||
134 | { | ||
135 | return kvm->arch.user_cpu_state_ctrl != 0; | ||
136 | } | ||
137 | |||
132 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); | 138 | int kvm_s390_handle_wait(struct kvm_vcpu *vcpu); |
139 | void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu); | ||
133 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); | 140 | enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer); |
134 | void kvm_s390_tasklet(unsigned long parm); | ||
135 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); | 141 | void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu); |
136 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); | 142 | void kvm_s390_deliver_pending_machine_checks(struct kvm_vcpu *vcpu); |
137 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); | 143 | void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu); |
diff --git a/arch/s390/kvm/sigp.c b/arch/s390/kvm/sigp.c index 43079a48cc98..cf243ba3d50f 100644 --- a/arch/s390/kvm/sigp.c +++ b/arch/s390/kvm/sigp.c | |||
@@ -125,8 +125,9 @@ static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; | 125 | return rc ? rc : SIGP_CC_ORDER_CODE_ACCEPTED; |
126 | } | 126 | } |
127 | 127 | ||
128 | static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | 128 | static int __inject_sigp_stop(struct kvm_vcpu *dst_vcpu, int action) |
129 | { | 129 | { |
130 | struct kvm_s390_local_interrupt *li = &dst_vcpu->arch.local_int; | ||
130 | struct kvm_s390_interrupt_info *inti; | 131 | struct kvm_s390_interrupt_info *inti; |
131 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 132 | int rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
132 | 133 | ||
@@ -135,7 +136,13 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
135 | return -ENOMEM; | 136 | return -ENOMEM; |
136 | inti->type = KVM_S390_SIGP_STOP; | 137 | inti->type = KVM_S390_SIGP_STOP; |
137 | 138 | ||
138 | spin_lock_bh(&li->lock); | 139 | spin_lock(&li->lock); |
140 | if (li->action_bits & ACTION_STOP_ON_STOP) { | ||
141 | /* another SIGP STOP is pending */ | ||
142 | kfree(inti); | ||
143 | rc = SIGP_CC_BUSY; | ||
144 | goto out; | ||
145 | } | ||
139 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 146 | if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
140 | kfree(inti); | 147 | kfree(inti); |
141 | if ((action & ACTION_STORE_ON_STOP) != 0) | 148 | if ((action & ACTION_STORE_ON_STOP) != 0) |
@@ -144,19 +151,17 @@ static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action) | |||
144 | } | 151 | } |
145 | list_add_tail(&inti->list, &li->list); | 152 | list_add_tail(&inti->list, &li->list); |
146 | atomic_set(&li->active, 1); | 153 | atomic_set(&li->active, 1); |
147 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); | ||
148 | li->action_bits |= action; | 154 | li->action_bits |= action; |
149 | if (waitqueue_active(li->wq)) | 155 | atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags); |
150 | wake_up_interruptible(li->wq); | 156 | kvm_s390_vcpu_wakeup(dst_vcpu); |
151 | out: | 157 | out: |
152 | spin_unlock_bh(&li->lock); | 158 | spin_unlock(&li->lock); |
153 | 159 | ||
154 | return rc; | 160 | return rc; |
155 | } | 161 | } |
156 | 162 | ||
157 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | 163 | static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) |
158 | { | 164 | { |
159 | struct kvm_s390_local_interrupt *li; | ||
160 | struct kvm_vcpu *dst_vcpu = NULL; | 165 | struct kvm_vcpu *dst_vcpu = NULL; |
161 | int rc; | 166 | int rc; |
162 | 167 | ||
@@ -166,9 +171,8 @@ static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action) | |||
166 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 171 | dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
167 | if (!dst_vcpu) | 172 | if (!dst_vcpu) |
168 | return SIGP_CC_NOT_OPERATIONAL; | 173 | return SIGP_CC_NOT_OPERATIONAL; |
169 | li = &dst_vcpu->arch.local_int; | ||
170 | 174 | ||
171 | rc = __inject_sigp_stop(li, action); | 175 | rc = __inject_sigp_stop(dst_vcpu, action); |
172 | 176 | ||
173 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); | 177 | VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr); |
174 | 178 | ||
@@ -238,7 +242,7 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
238 | if (!inti) | 242 | if (!inti) |
239 | return SIGP_CC_BUSY; | 243 | return SIGP_CC_BUSY; |
240 | 244 | ||
241 | spin_lock_bh(&li->lock); | 245 | spin_lock(&li->lock); |
242 | /* cpu must be in stopped state */ | 246 | /* cpu must be in stopped state */ |
243 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { | 247 | if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) { |
244 | *reg &= 0xffffffff00000000UL; | 248 | *reg &= 0xffffffff00000000UL; |
@@ -253,13 +257,12 @@ static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address, | |||
253 | 257 | ||
254 | list_add_tail(&inti->list, &li->list); | 258 | list_add_tail(&inti->list, &li->list); |
255 | atomic_set(&li->active, 1); | 259 | atomic_set(&li->active, 1); |
256 | if (waitqueue_active(li->wq)) | 260 | kvm_s390_vcpu_wakeup(dst_vcpu); |
257 | wake_up_interruptible(li->wq); | ||
258 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; | 261 | rc = SIGP_CC_ORDER_CODE_ACCEPTED; |
259 | 262 | ||
260 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); | 263 | VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address); |
261 | out_li: | 264 | out_li: |
262 | spin_unlock_bh(&li->lock); | 265 | spin_unlock(&li->lock); |
263 | return rc; | 266 | return rc; |
264 | } | 267 | } |
265 | 268 | ||
@@ -275,9 +278,9 @@ static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id, | |||
275 | if (!dst_vcpu) | 278 | if (!dst_vcpu) |
276 | return SIGP_CC_NOT_OPERATIONAL; | 279 | return SIGP_CC_NOT_OPERATIONAL; |
277 | 280 | ||
278 | spin_lock_bh(&dst_vcpu->arch.local_int.lock); | 281 | spin_lock(&dst_vcpu->arch.local_int.lock); |
279 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); | 282 | flags = atomic_read(dst_vcpu->arch.local_int.cpuflags); |
280 | spin_unlock_bh(&dst_vcpu->arch.local_int.lock); | 283 | spin_unlock(&dst_vcpu->arch.local_int.lock); |
281 | if (!(flags & CPUSTAT_STOPPED)) { | 284 | if (!(flags & CPUSTAT_STOPPED)) { |
282 | *reg &= 0xffffffff00000000UL; | 285 | *reg &= 0xffffffff00000000UL; |
283 | *reg |= SIGP_STATUS_INCORRECT_STATE; | 286 | *reg |= SIGP_STATUS_INCORRECT_STATE; |
@@ -338,10 +341,10 @@ static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr) | |||
338 | if (!dst_vcpu) | 341 | if (!dst_vcpu) |
339 | return SIGP_CC_NOT_OPERATIONAL; | 342 | return SIGP_CC_NOT_OPERATIONAL; |
340 | li = &dst_vcpu->arch.local_int; | 343 | li = &dst_vcpu->arch.local_int; |
341 | spin_lock_bh(&li->lock); | 344 | spin_lock(&li->lock); |
342 | if (li->action_bits & ACTION_STOP_ON_STOP) | 345 | if (li->action_bits & ACTION_STOP_ON_STOP) |
343 | rc = SIGP_CC_BUSY; | 346 | rc = SIGP_CC_BUSY; |
344 | spin_unlock_bh(&li->lock); | 347 | spin_unlock(&li->lock); |
345 | 348 | ||
346 | return rc; | 349 | return rc; |
347 | } | 350 | } |
@@ -461,12 +464,7 @@ int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu) | |||
461 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); | 464 | dest_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr); |
462 | BUG_ON(dest_vcpu == NULL); | 465 | BUG_ON(dest_vcpu == NULL); |
463 | 466 | ||
464 | spin_lock_bh(&dest_vcpu->arch.local_int.lock); | 467 | kvm_s390_vcpu_wakeup(dest_vcpu); |
465 | if (waitqueue_active(&dest_vcpu->wq)) | ||
466 | wake_up_interruptible(&dest_vcpu->wq); | ||
467 | dest_vcpu->preempted = true; | ||
468 | spin_unlock_bh(&dest_vcpu->arch.local_int.lock); | ||
469 | |||
470 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); | 468 | kvm_s390_set_psw_cc(vcpu, SIGP_CC_ORDER_CODE_ACCEPTED); |
471 | return 0; | 469 | return 0; |
472 | } | 470 | } |
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 9ddc51eeb8d6..30de42730b2f 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c | |||
@@ -48,13 +48,10 @@ | |||
48 | static LIST_HEAD(zpci_list); | 48 | static LIST_HEAD(zpci_list); |
49 | static DEFINE_SPINLOCK(zpci_list_lock); | 49 | static DEFINE_SPINLOCK(zpci_list_lock); |
50 | 50 | ||
51 | static void zpci_enable_irq(struct irq_data *data); | ||
52 | static void zpci_disable_irq(struct irq_data *data); | ||
53 | |||
54 | static struct irq_chip zpci_irq_chip = { | 51 | static struct irq_chip zpci_irq_chip = { |
55 | .name = "zPCI", | 52 | .name = "zPCI", |
56 | .irq_unmask = zpci_enable_irq, | 53 | .irq_unmask = unmask_msi_irq, |
57 | .irq_mask = zpci_disable_irq, | 54 | .irq_mask = mask_msi_irq, |
58 | }; | 55 | }; |
59 | 56 | ||
60 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); | 57 | static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); |
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len) | |||
244 | return rc; | 241 | return rc; |
245 | } | 242 | } |
246 | 243 | ||
247 | static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag) | ||
248 | { | ||
249 | int offset, pos; | ||
250 | u32 mask_bits; | ||
251 | |||
252 | if (msi->msi_attrib.is_msix) { | ||
253 | offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | ||
254 | PCI_MSIX_ENTRY_VECTOR_CTRL; | ||
255 | msi->masked = readl(msi->mask_base + offset); | ||
256 | writel(flag, msi->mask_base + offset); | ||
257 | } else if (msi->msi_attrib.maskbit) { | ||
258 | pos = (long) msi->mask_base; | ||
259 | pci_read_config_dword(msi->dev, pos, &mask_bits); | ||
260 | mask_bits &= ~(mask); | ||
261 | mask_bits |= flag & mask; | ||
262 | pci_write_config_dword(msi->dev, pos, mask_bits); | ||
263 | } else | ||
264 | return 0; | ||
265 | |||
266 | msi->msi_attrib.maskbit = !!flag; | ||
267 | return 1; | ||
268 | } | ||
269 | |||
270 | static void zpci_enable_irq(struct irq_data *data) | ||
271 | { | ||
272 | struct msi_desc *msi = irq_get_msi_desc(data->irq); | ||
273 | |||
274 | zpci_msi_set_mask_bits(msi, 1, 0); | ||
275 | } | ||
276 | |||
277 | static void zpci_disable_irq(struct irq_data *data) | ||
278 | { | ||
279 | struct msi_desc *msi = irq_get_msi_desc(data->irq); | ||
280 | |||
281 | zpci_msi_set_mask_bits(msi, 1, 1); | ||
282 | } | ||
283 | |||
284 | void pcibios_fixup_bus(struct pci_bus *bus) | 244 | void pcibios_fixup_bus(struct pci_bus *bus) |
285 | { | 245 | { |
286 | } | 246 | } |
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) | |||
487 | 447 | ||
488 | /* Release MSI interrupts */ | 448 | /* Release MSI interrupts */ |
489 | list_for_each_entry(msi, &pdev->msi_list, list) { | 449 | list_for_each_entry(msi, &pdev->msi_list, list) { |
490 | zpci_msi_set_mask_bits(msi, 1, 1); | 450 | if (msi->msi_attrib.is_msix) |
451 | default_msix_mask_irq(msi, 1); | ||
452 | else | ||
453 | default_msi_mask_irq(msi, 1, 1); | ||
491 | irq_set_msi_desc(msi->irq, NULL); | 454 | irq_set_msi_desc(msi->irq, NULL); |
492 | irq_free_desc(msi->irq); | 455 | irq_free_desc(msi->irq); |
493 | msi->msg.address_lo = 0; | 456 | msi->msg.address_lo = 0; |
diff --git a/arch/score/include/asm/processor.h b/arch/score/include/asm/processor.h index d9a922d8711b..851f441991d2 100644 --- a/arch/score/include/asm/processor.h +++ b/arch/score/include/asm/processor.h | |||
@@ -24,6 +24,7 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
24 | #define current_text_addr() ({ __label__ _l; _l: &&_l; }) | 24 | #define current_text_addr() ({ __label__ _l; _l: &&_l; }) |
25 | 25 | ||
26 | #define cpu_relax() barrier() | 26 | #define cpu_relax() barrier() |
27 | #define cpu_relax_lowlatency() cpu_relax() | ||
27 | #define release_thread(thread) do {} while (0) | 28 | #define release_thread(thread) do {} while (0) |
28 | 29 | ||
29 | /* | 30 | /* |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 834b67c4db5a..aa2df3eaeb29 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -57,7 +57,6 @@ config SUPERH32 | |||
57 | select HAVE_FUNCTION_TRACER | 57 | select HAVE_FUNCTION_TRACER |
58 | select HAVE_FTRACE_MCOUNT_RECORD | 58 | select HAVE_FTRACE_MCOUNT_RECORD |
59 | select HAVE_DYNAMIC_FTRACE | 59 | select HAVE_DYNAMIC_FTRACE |
60 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
61 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE | 60 | select HAVE_FTRACE_NMI_ENTER if DYNAMIC_FTRACE |
62 | select ARCH_WANT_IPC_PARSE_VERSION | 61 | select ARCH_WANT_IPC_PARSE_VERSION |
63 | select HAVE_FUNCTION_GRAPH_TRACER | 62 | select HAVE_FUNCTION_GRAPH_TRACER |
diff --git a/arch/sh/Makefile b/arch/sh/Makefile index d4d16e4be07c..bf5b3f5f4962 100644 --- a/arch/sh/Makefile +++ b/arch/sh/Makefile | |||
@@ -32,7 +32,8 @@ endif | |||
32 | 32 | ||
33 | cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) | 33 | cflags-$(CONFIG_CPU_SH2) := $(call cc-option,-m2,) |
34 | cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ | 34 | cflags-$(CONFIG_CPU_SH2A) += $(call cc-option,-m2a,) \ |
35 | $(call cc-option,-m2a-nofpu,) | 35 | $(call cc-option,-m2a-nofpu,) \ |
36 | $(call cc-option,-m4-nofpu,) | ||
36 | cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) | 37 | cflags-$(CONFIG_CPU_SH3) := $(call cc-option,-m3,) |
37 | cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ | 38 | cflags-$(CONFIG_CPU_SH4) := $(call cc-option,-m4,) \ |
38 | $(call cc-option,-mno-implicit-fp,-m4-nofpu) | 39 | $(call cc-option,-mno-implicit-fp,-m4-nofpu) |
diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 5448f9bbf4ab..1506897648aa 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h | |||
@@ -97,6 +97,7 @@ extern struct sh_cpuinfo cpu_data[]; | |||
97 | 97 | ||
98 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") | 98 | #define cpu_sleep() __asm__ __volatile__ ("sleep" : : : "memory") |
99 | #define cpu_relax() barrier() | 99 | #define cpu_relax() barrier() |
100 | #define cpu_relax_lowlatency() cpu_relax() | ||
100 | 101 | ||
101 | void default_idle(void); | 102 | void default_idle(void); |
102 | void stop_this_cpu(void *); | 103 | void stop_this_cpu(void *); |
diff --git a/arch/sh/kernel/ftrace.c b/arch/sh/kernel/ftrace.c index 3c74f53db6db..079d70e6d74b 100644 --- a/arch/sh/kernel/ftrace.c +++ b/arch/sh/kernel/ftrace.c | |||
@@ -344,6 +344,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
344 | struct ftrace_graph_ent trace; | 344 | struct ftrace_graph_ent trace; |
345 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 345 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
346 | 346 | ||
347 | if (unlikely(ftrace_graph_is_dead())) | ||
348 | return; | ||
349 | |||
347 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 350 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
348 | return; | 351 | return; |
349 | 352 | ||
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 02331672b6db..7cfd7f153966 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -129,14 +129,6 @@ static int __hw_perf_event_init(struct perf_event *event) | |||
129 | return -ENODEV; | 129 | return -ENODEV; |
130 | 130 | ||
131 | /* | 131 | /* |
132 | * All of the on-chip counters are "limited", in that they have | ||
133 | * no interrupts, and are therefore unable to do sampling without | ||
134 | * further work and timer assistance. | ||
135 | */ | ||
136 | if (hwc->sample_period) | ||
137 | return -EINVAL; | ||
138 | |||
139 | /* | ||
140 | * See if we need to reserve the counter. | 132 | * See if we need to reserve the counter. |
141 | * | 133 | * |
142 | * If no events are currently in use, then we have to take a | 134 | * If no events are currently in use, then we have to take a |
@@ -392,6 +384,13 @@ int register_sh_pmu(struct sh_pmu *_pmu) | |||
392 | 384 | ||
393 | pr_info("Performance Events: %s support registered\n", _pmu->name); | 385 | pr_info("Performance Events: %s support registered\n", _pmu->name); |
394 | 386 | ||
387 | /* | ||
388 | * All of the on-chip counters are "limited", in that they have | ||
389 | * no interrupts, and are therefore unable to do sampling without | ||
390 | * further work and timer assistance. | ||
391 | */ | ||
392 | pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; | ||
393 | |||
395 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); | 394 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); |
396 | 395 | ||
397 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | 396 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); |
diff --git a/arch/sh/lib/mcount.S b/arch/sh/lib/mcount.S index 52aa2011d753..7a8572f9d58b 100644 --- a/arch/sh/lib/mcount.S +++ b/arch/sh/lib/mcount.S | |||
@@ -92,13 +92,6 @@ mcount: | |||
92 | rts | 92 | rts |
93 | nop | 93 | nop |
94 | #else | 94 | #else |
95 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
96 | mov.l .Lfunction_trace_stop, r0 | ||
97 | mov.l @r0, r0 | ||
98 | tst r0, r0 | ||
99 | bf ftrace_stub | ||
100 | #endif | ||
101 | |||
102 | MCOUNT_ENTER() | 95 | MCOUNT_ENTER() |
103 | 96 | ||
104 | #ifdef CONFIG_DYNAMIC_FTRACE | 97 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -174,11 +167,6 @@ ftrace_graph_call: | |||
174 | 167 | ||
175 | .globl ftrace_caller | 168 | .globl ftrace_caller |
176 | ftrace_caller: | 169 | ftrace_caller: |
177 | mov.l .Lfunction_trace_stop, r0 | ||
178 | mov.l @r0, r0 | ||
179 | tst r0, r0 | ||
180 | bf ftrace_stub | ||
181 | |||
182 | MCOUNT_ENTER() | 170 | MCOUNT_ENTER() |
183 | 171 | ||
184 | .globl ftrace_call | 172 | .globl ftrace_call |
@@ -196,8 +184,6 @@ ftrace_call: | |||
196 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 184 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
197 | 185 | ||
198 | .align 2 | 186 | .align 2 |
199 | .Lfunction_trace_stop: | ||
200 | .long function_trace_stop | ||
201 | 187 | ||
202 | /* | 188 | /* |
203 | * NOTE: From here on the locations of the .Lftrace_stub label and | 189 | * NOTE: From here on the locations of the .Lftrace_stub label and |
@@ -217,12 +203,7 @@ ftrace_stub: | |||
217 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 203 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
218 | .globl ftrace_graph_caller | 204 | .globl ftrace_graph_caller |
219 | ftrace_graph_caller: | 205 | ftrace_graph_caller: |
220 | mov.l 2f, r0 | 206 | mov.l 2f, r1 |
221 | mov.l @r0, r0 | ||
222 | tst r0, r0 | ||
223 | bt 1f | ||
224 | |||
225 | mov.l 3f, r1 | ||
226 | jmp @r1 | 207 | jmp @r1 |
227 | nop | 208 | nop |
228 | 1: | 209 | 1: |
@@ -242,8 +223,7 @@ ftrace_graph_caller: | |||
242 | MCOUNT_LEAVE() | 223 | MCOUNT_LEAVE() |
243 | 224 | ||
244 | .align 2 | 225 | .align 2 |
245 | 2: .long function_trace_stop | 226 | 2: .long skip_trace |
246 | 3: .long skip_trace | ||
247 | .Lprepare_ftrace_return: | 227 | .Lprepare_ftrace_return: |
248 | .long prepare_ftrace_return | 228 | .long prepare_ftrace_return |
249 | 229 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 407c87d9879a..4692c90936f1 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -55,7 +55,6 @@ config SPARC64 | |||
55 | select HAVE_FUNCTION_TRACER | 55 | select HAVE_FUNCTION_TRACER |
56 | select HAVE_FUNCTION_GRAPH_TRACER | 56 | select HAVE_FUNCTION_GRAPH_TRACER |
57 | select HAVE_FUNCTION_GRAPH_FP_TEST | 57 | select HAVE_FUNCTION_GRAPH_FP_TEST |
58 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
59 | select HAVE_KRETPROBES | 58 | select HAVE_KRETPROBES |
60 | select HAVE_KPROBES | 59 | select HAVE_KPROBES |
61 | select HAVE_RCU_TABLE_FREE if SMP | 60 | select HAVE_RCU_TABLE_FREE if SMP |
diff --git a/arch/sparc/include/asm/processor_32.h b/arch/sparc/include/asm/processor_32.h index a564817bbc2e..812fd08f3e62 100644 --- a/arch/sparc/include/asm/processor_32.h +++ b/arch/sparc/include/asm/processor_32.h | |||
@@ -119,6 +119,8 @@ extern struct task_struct *last_task_used_math; | |||
119 | int do_mathemu(struct pt_regs *regs, struct task_struct *fpt); | 119 | int do_mathemu(struct pt_regs *regs, struct task_struct *fpt); |
120 | 120 | ||
121 | #define cpu_relax() barrier() | 121 | #define cpu_relax() barrier() |
122 | #define cpu_relax_lowlatency() cpu_relax() | ||
123 | |||
122 | extern void (*sparc_idle)(void); | 124 | extern void (*sparc_idle)(void); |
123 | 125 | ||
124 | #endif | 126 | #endif |
diff --git a/arch/sparc/include/asm/processor_64.h b/arch/sparc/include/asm/processor_64.h index 7028fe1a7c04..6924bdefe148 100644 --- a/arch/sparc/include/asm/processor_64.h +++ b/arch/sparc/include/asm/processor_64.h | |||
@@ -216,6 +216,7 @@ unsigned long get_wchan(struct task_struct *task); | |||
216 | "nop\n\t" \ | 216 | "nop\n\t" \ |
217 | ".previous" \ | 217 | ".previous" \ |
218 | ::: "memory") | 218 | ::: "memory") |
219 | #define cpu_relax_lowlatency() cpu_relax() | ||
219 | 220 | ||
220 | /* Prefetch support. This is tuned for UltraSPARC-III and later. | 221 | /* Prefetch support. This is tuned for UltraSPARC-III and later. |
221 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has | 222 | * UltraSPARC-I will treat these as nops, and UltraSPARC-II has |
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h index b73274fb961a..42f2bca1d338 100644 --- a/arch/sparc/include/uapi/asm/unistd.h +++ b/arch/sparc/include/uapi/asm/unistd.h | |||
@@ -410,8 +410,9 @@ | |||
410 | #define __NR_finit_module 342 | 410 | #define __NR_finit_module 342 |
411 | #define __NR_sched_setattr 343 | 411 | #define __NR_sched_setattr 343 |
412 | #define __NR_sched_getattr 344 | 412 | #define __NR_sched_getattr 344 |
413 | #define __NR_renameat2 345 | ||
413 | 414 | ||
414 | #define NR_syscalls 345 | 415 | #define NR_syscalls 346 |
415 | 416 | ||
416 | /* Bitmask values returned from kern_features system call. */ | 417 | /* Bitmask values returned from kern_features system call. */ |
417 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 | 418 | #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 |
diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index d066eb18650c..f834224208ed 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S | |||
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1) | |||
48 | SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) | 48 | SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) |
49 | SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) | 49 | SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) |
50 | SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) | 50 | SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) |
51 | SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2) | ||
51 | 52 | ||
52 | .globl sys32_mmap2 | 53 | .globl sys32_mmap2 |
53 | sys32_mmap2: | 54 | sys32_mmap2: |
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index 151ace8766cc..85fe9b1087cd 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S | |||
@@ -86,3 +86,4 @@ sys_call_table: | |||
86 | /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime | 86 | /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime |
87 | /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev | 87 | /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev |
88 | /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 88 | /*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
89 | /*345*/ .long sys_renameat2 | ||
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 4bd4e2bb26cf..33ecba2826ea 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S | |||
@@ -87,6 +87,7 @@ sys_call_table32: | |||
87 | /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime | 87 | /*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime |
88 | .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev | 88 | .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev |
89 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 89 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
90 | .word sys32_renameat2 | ||
90 | 91 | ||
91 | #endif /* CONFIG_COMPAT */ | 92 | #endif /* CONFIG_COMPAT */ |
92 | 93 | ||
@@ -165,3 +166,4 @@ sys_call_table: | |||
165 | /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime | 166 | /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime |
166 | .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev | 167 | .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev |
167 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr | 168 | /*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr |
169 | .word sys_renameat2 | ||
diff --git a/arch/sparc/lib/mcount.S b/arch/sparc/lib/mcount.S index 3ad6cbdc2163..0b0ed4d34219 100644 --- a/arch/sparc/lib/mcount.S +++ b/arch/sparc/lib/mcount.S | |||
@@ -24,10 +24,7 @@ mcount: | |||
24 | #ifdef CONFIG_DYNAMIC_FTRACE | 24 | #ifdef CONFIG_DYNAMIC_FTRACE |
25 | /* Do nothing, the retl/nop below is all we need. */ | 25 | /* Do nothing, the retl/nop below is all we need. */ |
26 | #else | 26 | #else |
27 | sethi %hi(function_trace_stop), %g1 | 27 | sethi %hi(ftrace_trace_function), %g1 |
28 | lduw [%g1 + %lo(function_trace_stop)], %g2 | ||
29 | brnz,pn %g2, 2f | ||
30 | sethi %hi(ftrace_trace_function), %g1 | ||
31 | sethi %hi(ftrace_stub), %g2 | 28 | sethi %hi(ftrace_stub), %g2 |
32 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 | 29 | ldx [%g1 + %lo(ftrace_trace_function)], %g1 |
33 | or %g2, %lo(ftrace_stub), %g2 | 30 | or %g2, %lo(ftrace_stub), %g2 |
@@ -80,11 +77,8 @@ ftrace_stub: | |||
80 | .globl ftrace_caller | 77 | .globl ftrace_caller |
81 | .type ftrace_caller,#function | 78 | .type ftrace_caller,#function |
82 | ftrace_caller: | 79 | ftrace_caller: |
83 | sethi %hi(function_trace_stop), %g1 | ||
84 | mov %i7, %g2 | 80 | mov %i7, %g2 |
85 | lduw [%g1 + %lo(function_trace_stop)], %g1 | 81 | mov %fp, %g3 |
86 | brnz,pn %g1, ftrace_stub | ||
87 | mov %fp, %g3 | ||
88 | save %sp, -176, %sp | 82 | save %sp, -176, %sp |
89 | mov %g2, %o1 | 83 | mov %g2, %o1 |
90 | mov %g2, %l0 | 84 | mov %g2, %l0 |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4f3006b600e3..7fcd492adbfc 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -128,7 +128,6 @@ config TILEGX | |||
128 | select SPARSE_IRQ | 128 | select SPARSE_IRQ |
129 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ | 129 | select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ |
130 | select HAVE_FUNCTION_TRACER | 130 | select HAVE_FUNCTION_TRACER |
131 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
132 | select HAVE_FUNCTION_GRAPH_TRACER | 131 | select HAVE_FUNCTION_GRAPH_TRACER |
133 | select HAVE_DYNAMIC_FTRACE | 132 | select HAVE_DYNAMIC_FTRACE |
134 | select HAVE_FTRACE_MCOUNT_RECORD | 133 | select HAVE_FTRACE_MCOUNT_RECORD |
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h index 42323636c459..dd4f9f17e30a 100644 --- a/arch/tile/include/asm/processor.h +++ b/arch/tile/include/asm/processor.h | |||
@@ -266,6 +266,8 @@ static inline void cpu_relax(void) | |||
266 | barrier(); | 266 | barrier(); |
267 | } | 267 | } |
268 | 268 | ||
269 | #define cpu_relax_lowlatency() cpu_relax() | ||
270 | |||
269 | /* Info on this processor (see fs/proc/cpuinfo.c) */ | 271 | /* Info on this processor (see fs/proc/cpuinfo.c) */ |
270 | struct seq_operations; | 272 | struct seq_operations; |
271 | extern const struct seq_operations cpuinfo_op; | 273 | extern const struct seq_operations cpuinfo_op; |
diff --git a/arch/tile/kernel/mcount_64.S b/arch/tile/kernel/mcount_64.S index 70d7bb0c4d8f..3c2b8d5e1d1a 100644 --- a/arch/tile/kernel/mcount_64.S +++ b/arch/tile/kernel/mcount_64.S | |||
@@ -77,15 +77,6 @@ STD_ENDPROC(__mcount) | |||
77 | 77 | ||
78 | .align 64 | 78 | .align 64 |
79 | STD_ENTRY(ftrace_caller) | 79 | STD_ENTRY(ftrace_caller) |
80 | moveli r11, hw2_last(function_trace_stop) | ||
81 | { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } | ||
82 | { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } | ||
83 | ld r11, r11 | ||
84 | beqz r11, 1f | ||
85 | jrp r12 | ||
86 | |||
87 | 1: | ||
88 | { move r10, lr; move lr, r12 } | ||
89 | MCOUNT_SAVE_REGS | 80 | MCOUNT_SAVE_REGS |
90 | 81 | ||
91 | /* arg1: self return address */ | 82 | /* arg1: self return address */ |
@@ -119,15 +110,6 @@ STD_ENDPROC(ftrace_caller) | |||
119 | 110 | ||
120 | .align 64 | 111 | .align 64 |
121 | STD_ENTRY(__mcount) | 112 | STD_ENTRY(__mcount) |
122 | moveli r11, hw2_last(function_trace_stop) | ||
123 | { shl16insli r11, r11, hw1(function_trace_stop); move r12, lr } | ||
124 | { shl16insli r11, r11, hw0(function_trace_stop); move lr, r10 } | ||
125 | ld r11, r11 | ||
126 | beqz r11, 1f | ||
127 | jrp r12 | ||
128 | |||
129 | 1: | ||
130 | { move r10, lr; move lr, r12 } | ||
131 | { | 113 | { |
132 | moveli r11, hw2_last(ftrace_trace_function) | 114 | moveli r11, hw2_last(ftrace_trace_function) |
133 | moveli r13, hw2_last(ftrace_stub) | 115 | moveli r13, hw2_last(ftrace_stub) |
diff --git a/arch/unicore32/include/asm/processor.h b/arch/unicore32/include/asm/processor.h index 4eaa42167667..8d21b7adf26b 100644 --- a/arch/unicore32/include/asm/processor.h +++ b/arch/unicore32/include/asm/processor.h | |||
@@ -71,6 +71,7 @@ extern void release_thread(struct task_struct *); | |||
71 | unsigned long get_wchan(struct task_struct *p); | 71 | unsigned long get_wchan(struct task_struct *p); |
72 | 72 | ||
73 | #define cpu_relax() barrier() | 73 | #define cpu_relax() barrier() |
74 | #define cpu_relax_lowlatency() cpu_relax() | ||
74 | 75 | ||
75 | #define task_pt_regs(p) \ | 76 | #define task_pt_regs(p) \ |
76 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) | 77 | ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 43873442dee1..6b71f0417293 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -54,7 +54,6 @@ config X86 | |||
54 | select HAVE_FUNCTION_TRACER | 54 | select HAVE_FUNCTION_TRACER |
55 | select HAVE_FUNCTION_GRAPH_TRACER | 55 | select HAVE_FUNCTION_GRAPH_TRACER |
56 | select HAVE_FUNCTION_GRAPH_FP_TEST | 56 | select HAVE_FUNCTION_GRAPH_FP_TEST |
57 | select HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
58 | select HAVE_SYSCALL_TRACEPOINTS | 57 | select HAVE_SYSCALL_TRACEPOINTS |
59 | select SYSCTL_EXCEPTION_TRACE | 58 | select SYSCTL_EXCEPTION_TRACE |
60 | select HAVE_KVM | 59 | select HAVE_KVM |
@@ -1525,6 +1524,7 @@ config EFI | |||
1525 | bool "EFI runtime service support" | 1524 | bool "EFI runtime service support" |
1526 | depends on ACPI | 1525 | depends on ACPI |
1527 | select UCS2_STRING | 1526 | select UCS2_STRING |
1527 | select EFI_RUNTIME_WRAPPERS | ||
1528 | ---help--- | 1528 | ---help--- |
1529 | This enables the kernel to use EFI runtime services that are | 1529 | This enables the kernel to use EFI runtime services that are |
1530 | available (such as the EFI variable services). | 1530 | available (such as the EFI variable services). |
@@ -2406,6 +2406,10 @@ config IOSF_MBI | |||
2406 | default m | 2406 | default m |
2407 | depends on PCI | 2407 | depends on PCI |
2408 | 2408 | ||
2409 | config PMC_ATOM | ||
2410 | def_bool y | ||
2411 | depends on PCI | ||
2412 | |||
2409 | source "net/Kconfig" | 2413 | source "net/Kconfig" |
2410 | 2414 | ||
2411 | source "drivers/Kconfig" | 2415 | source "drivers/Kconfig" |
diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 33f71b01fd22..c65fd9650467 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile | |||
@@ -15,12 +15,9 @@ endif | |||
15 | # that way we can complain to the user if the CPU is insufficient. | 15 | # that way we can complain to the user if the CPU is insufficient. |
16 | # | 16 | # |
17 | # The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For | 17 | # The -m16 option is supported by GCC >= 4.9 and clang >= 3.5. For |
18 | # older versions of GCC, we need to play evil and unreliable tricks to | 18 | # older versions of GCC, include an *assembly* header to make sure that |
19 | # attempt to ensure that our asm(".code16gcc") is first in the asm | 19 | # gcc doesn't play any games behind our back. |
20 | # output. | 20 | CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h |
21 | CODE16GCC_CFLAGS := -m32 -include $(srctree)/arch/x86/boot/code16gcc.h \ | ||
22 | $(call cc-option, -fno-toplevel-reorder,\ | ||
23 | $(call cc-option, -fno-unit-at-a-time)) | ||
24 | M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) | 21 | M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) |
25 | 22 | ||
26 | REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ | 23 | REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ |
diff --git a/arch/x86/boot/code16gcc.h b/arch/x86/boot/code16gcc.h index d93e48010b61..5ff426535397 100644 --- a/arch/x86/boot/code16gcc.h +++ b/arch/x86/boot/code16gcc.h | |||
@@ -1,15 +1,11 @@ | |||
1 | /* | 1 | # |
2 | * code16gcc.h | 2 | # code16gcc.h |
3 | * | 3 | # |
4 | * This file is -include'd when compiling 16-bit C code. | 4 | # This file is added to the assembler via -Wa when compiling 16-bit C code. |
5 | * Note: this asm() needs to be emitted before gcc emits any code. | 5 | # This is done this way instead via asm() to make sure gcc does not reorder |
6 | * Depending on gcc version, this requires -fno-unit-at-a-time or | 6 | # things around us. |
7 | * -fno-toplevel-reorder. | 7 | # |
8 | * | 8 | # gcc 4.9+ has a real -m16 option so we can drop this hack long term. |
9 | * Hopefully gcc will eventually have a real -m16 option so we can | 9 | # |
10 | * drop this hack long term. | ||
11 | */ | ||
12 | 10 | ||
13 | #ifndef __ASSEMBLY__ | 11 | .code16gcc |
14 | asm(".code16gcc"); | ||
15 | #endif | ||
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index 0fcd9133790c..7a801a310e37 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile | |||
@@ -33,7 +33,8 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ | |||
33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone | 33 | $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone |
34 | 34 | ||
35 | ifeq ($(CONFIG_EFI_STUB), y) | 35 | ifeq ($(CONFIG_EFI_STUB), y) |
36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o | 36 | VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \ |
37 | $(objtree)/drivers/firmware/efi/libstub/lib.a | ||
37 | endif | 38 | endif |
38 | 39 | ||
39 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE | 40 | $(obj)/vmlinux: $(VMLINUX_OBJS) FORCE |
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 0331d765c2bb..f277184e2ac1 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c | |||
@@ -19,10 +19,7 @@ | |||
19 | 19 | ||
20 | static efi_system_table_t *sys_table; | 20 | static efi_system_table_t *sys_table; |
21 | 21 | ||
22 | static struct efi_config *efi_early; | 22 | struct efi_config *efi_early; |
23 | |||
24 | #define efi_call_early(f, ...) \ | ||
25 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
26 | 23 | ||
27 | #define BOOT_SERVICES(bits) \ | 24 | #define BOOT_SERVICES(bits) \ |
28 | static void setup_boot_services##bits(struct efi_config *c) \ | 25 | static void setup_boot_services##bits(struct efi_config *c) \ |
@@ -48,8 +45,7 @@ static void setup_boot_services##bits(struct efi_config *c) \ | |||
48 | BOOT_SERVICES(32); | 45 | BOOT_SERVICES(32); |
49 | BOOT_SERVICES(64); | 46 | BOOT_SERVICES(64); |
50 | 47 | ||
51 | static void efi_printk(efi_system_table_t *, char *); | 48 | void efi_char16_printk(efi_system_table_t *, efi_char16_t *); |
52 | static void efi_char16_printk(efi_system_table_t *, efi_char16_t *); | ||
53 | 49 | ||
54 | static efi_status_t | 50 | static efi_status_t |
55 | __file_size32(void *__fh, efi_char16_t *filename_16, | 51 | __file_size32(void *__fh, efi_char16_t *filename_16, |
@@ -156,7 +152,7 @@ grow: | |||
156 | 152 | ||
157 | return status; | 153 | return status; |
158 | } | 154 | } |
159 | static efi_status_t | 155 | efi_status_t |
160 | efi_file_size(efi_system_table_t *sys_table, void *__fh, | 156 | efi_file_size(efi_system_table_t *sys_table, void *__fh, |
161 | efi_char16_t *filename_16, void **handle, u64 *file_sz) | 157 | efi_char16_t *filename_16, void **handle, u64 *file_sz) |
162 | { | 158 | { |
@@ -166,7 +162,7 @@ efi_file_size(efi_system_table_t *sys_table, void *__fh, | |||
166 | return __file_size32(__fh, filename_16, handle, file_sz); | 162 | return __file_size32(__fh, filename_16, handle, file_sz); |
167 | } | 163 | } |
168 | 164 | ||
169 | static inline efi_status_t | 165 | efi_status_t |
170 | efi_file_read(void *handle, unsigned long *size, void *addr) | 166 | efi_file_read(void *handle, unsigned long *size, void *addr) |
171 | { | 167 | { |
172 | unsigned long func; | 168 | unsigned long func; |
@@ -184,7 +180,7 @@ efi_file_read(void *handle, unsigned long *size, void *addr) | |||
184 | } | 180 | } |
185 | } | 181 | } |
186 | 182 | ||
187 | static inline efi_status_t efi_file_close(void *handle) | 183 | efi_status_t efi_file_close(void *handle) |
188 | { | 184 | { |
189 | if (efi_early->is64) { | 185 | if (efi_early->is64) { |
190 | efi_file_handle_64_t *fh = handle; | 186 | efi_file_handle_64_t *fh = handle; |
@@ -249,7 +245,7 @@ static inline efi_status_t __open_volume64(void *__image, void **__fh) | |||
249 | return status; | 245 | return status; |
250 | } | 246 | } |
251 | 247 | ||
252 | static inline efi_status_t | 248 | efi_status_t |
253 | efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh) | 249 | efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh) |
254 | { | 250 | { |
255 | if (efi_early->is64) | 251 | if (efi_early->is64) |
@@ -258,7 +254,7 @@ efi_open_volume(efi_system_table_t *sys_table, void *__image, void **__fh) | |||
258 | return __open_volume32(__image, __fh); | 254 | return __open_volume32(__image, __fh); |
259 | } | 255 | } |
260 | 256 | ||
261 | static void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) | 257 | void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) |
262 | { | 258 | { |
263 | unsigned long output_string; | 259 | unsigned long output_string; |
264 | size_t offset; | 260 | size_t offset; |
@@ -284,8 +280,6 @@ static void efi_char16_printk(efi_system_table_t *table, efi_char16_t *str) | |||
284 | } | 280 | } |
285 | } | 281 | } |
286 | 282 | ||
287 | #include "../../../../drivers/firmware/efi/efi-stub-helper.c" | ||
288 | |||
289 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) | 283 | static void find_bits(unsigned long mask, u8 *pos, u8 *size) |
290 | { | 284 | { |
291 | u8 first, len; | 285 | u8 first, len; |
@@ -1038,6 +1032,7 @@ struct boot_params *make_boot_params(struct efi_config *c) | |||
1038 | int i; | 1032 | int i; |
1039 | unsigned long ramdisk_addr; | 1033 | unsigned long ramdisk_addr; |
1040 | unsigned long ramdisk_size; | 1034 | unsigned long ramdisk_size; |
1035 | unsigned long initrd_addr_max; | ||
1041 | 1036 | ||
1042 | efi_early = c; | 1037 | efi_early = c; |
1043 | sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; | 1038 | sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; |
@@ -1100,14 +1095,21 @@ struct boot_params *make_boot_params(struct efi_config *c) | |||
1100 | 1095 | ||
1101 | memset(sdt, 0, sizeof(*sdt)); | 1096 | memset(sdt, 0, sizeof(*sdt)); |
1102 | 1097 | ||
1098 | if (hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) | ||
1099 | initrd_addr_max = -1UL; | ||
1100 | else | ||
1101 | initrd_addr_max = hdr->initrd_addr_max; | ||
1102 | |||
1103 | status = handle_cmdline_files(sys_table, image, | 1103 | status = handle_cmdline_files(sys_table, image, |
1104 | (char *)(unsigned long)hdr->cmd_line_ptr, | 1104 | (char *)(unsigned long)hdr->cmd_line_ptr, |
1105 | "initrd=", hdr->initrd_addr_max, | 1105 | "initrd=", initrd_addr_max, |
1106 | &ramdisk_addr, &ramdisk_size); | 1106 | &ramdisk_addr, &ramdisk_size); |
1107 | if (status != EFI_SUCCESS) | 1107 | if (status != EFI_SUCCESS) |
1108 | goto fail2; | 1108 | goto fail2; |
1109 | hdr->ramdisk_image = ramdisk_addr; | 1109 | hdr->ramdisk_image = ramdisk_addr & 0xffffffff; |
1110 | hdr->ramdisk_size = ramdisk_size; | 1110 | hdr->ramdisk_size = ramdisk_size & 0xffffffff; |
1111 | boot_params->ext_ramdisk_image = (u64)ramdisk_addr >> 32; | ||
1112 | boot_params->ext_ramdisk_size = (u64)ramdisk_size >> 32; | ||
1111 | 1113 | ||
1112 | return boot_params; | 1114 | return boot_params; |
1113 | fail2: | 1115 | fail2: |
@@ -1374,7 +1376,10 @@ struct boot_params *efi_main(struct efi_config *c, | |||
1374 | 1376 | ||
1375 | setup_graphics(boot_params); | 1377 | setup_graphics(boot_params); |
1376 | 1378 | ||
1377 | setup_efi_pci(boot_params); | 1379 | status = setup_efi_pci(boot_params); |
1380 | if (status != EFI_SUCCESS) { | ||
1381 | efi_printk(sys_table, "setup_efi_pci() failed!\n"); | ||
1382 | } | ||
1378 | 1383 | ||
1379 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, | 1384 | status = efi_call_early(allocate_pool, EFI_LOADER_DATA, |
1380 | sizeof(*gdt), (void **)&gdt); | 1385 | sizeof(*gdt), (void **)&gdt); |
@@ -1401,16 +1406,20 @@ struct boot_params *efi_main(struct efi_config *c, | |||
1401 | hdr->init_size, hdr->init_size, | 1406 | hdr->init_size, hdr->init_size, |
1402 | hdr->pref_address, | 1407 | hdr->pref_address, |
1403 | hdr->kernel_alignment); | 1408 | hdr->kernel_alignment); |
1404 | if (status != EFI_SUCCESS) | 1409 | if (status != EFI_SUCCESS) { |
1410 | efi_printk(sys_table, "efi_relocate_kernel() failed!\n"); | ||
1405 | goto fail; | 1411 | goto fail; |
1412 | } | ||
1406 | 1413 | ||
1407 | hdr->pref_address = hdr->code32_start; | 1414 | hdr->pref_address = hdr->code32_start; |
1408 | hdr->code32_start = bzimage_addr; | 1415 | hdr->code32_start = bzimage_addr; |
1409 | } | 1416 | } |
1410 | 1417 | ||
1411 | status = exit_boot(boot_params, handle, is64); | 1418 | status = exit_boot(boot_params, handle, is64); |
1412 | if (status != EFI_SUCCESS) | 1419 | if (status != EFI_SUCCESS) { |
1420 | efi_printk(sys_table, "exit_boot() failed!\n"); | ||
1413 | goto fail; | 1421 | goto fail; |
1422 | } | ||
1414 | 1423 | ||
1415 | memset((char *)gdt->address, 0x0, gdt->size); | 1424 | memset((char *)gdt->address, 0x0, gdt->size); |
1416 | desc = (struct desc_struct *)gdt->address; | 1425 | desc = (struct desc_struct *)gdt->address; |
@@ -1470,5 +1479,6 @@ struct boot_params *efi_main(struct efi_config *c, | |||
1470 | 1479 | ||
1471 | return boot_params; | 1480 | return boot_params; |
1472 | fail: | 1481 | fail: |
1482 | efi_printk(sys_table, "efi_main() failed!\n"); | ||
1473 | return NULL; | 1483 | return NULL; |
1474 | } | 1484 | } |
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h index c88c31ecad12..d487e727f1ec 100644 --- a/arch/x86/boot/compressed/eboot.h +++ b/arch/x86/boot/compressed/eboot.h | |||
@@ -103,20 +103,4 @@ struct efi_uga_draw_protocol { | |||
103 | void *blt; | 103 | void *blt; |
104 | }; | 104 | }; |
105 | 105 | ||
106 | struct efi_config { | ||
107 | u64 image_handle; | ||
108 | u64 table; | ||
109 | u64 allocate_pool; | ||
110 | u64 allocate_pages; | ||
111 | u64 get_memory_map; | ||
112 | u64 free_pool; | ||
113 | u64 free_pages; | ||
114 | u64 locate_handle; | ||
115 | u64 handle_protocol; | ||
116 | u64 exit_boot_services; | ||
117 | u64 text_output; | ||
118 | efi_status_t (*call)(unsigned long, ...); | ||
119 | bool is64; | ||
120 | } __packed; | ||
121 | |||
122 | #endif /* BOOT_COMPRESSED_EBOOT_H */ | 106 | #endif /* BOOT_COMPRESSED_EBOOT_H */ |
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S index 7a6d43a554d7..16ef02596db2 100644 --- a/arch/x86/boot/header.S +++ b/arch/x86/boot/header.S | |||
@@ -154,7 +154,7 @@ extra_header_fields: | |||
154 | #else | 154 | #else |
155 | .quad 0 # ImageBase | 155 | .quad 0 # ImageBase |
156 | #endif | 156 | #endif |
157 | .long 0x20 # SectionAlignment | 157 | .long CONFIG_PHYSICAL_ALIGN # SectionAlignment |
158 | .long 0x20 # FileAlignment | 158 | .long 0x20 # FileAlignment |
159 | .word 0 # MajorOperatingSystemVersion | 159 | .word 0 # MajorOperatingSystemVersion |
160 | .word 0 # MinorOperatingSystemVersion | 160 | .word 0 # MinorOperatingSystemVersion |
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index 61d6e281898b..d551165a3159 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile | |||
@@ -14,6 +14,7 @@ obj-$(CONFIG_CRYPTO_SALSA20_586) += salsa20-i586.o | |||
14 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o | 14 | obj-$(CONFIG_CRYPTO_SERPENT_SSE2_586) += serpent-sse2-i586.o |
15 | 15 | ||
16 | obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o | 16 | obj-$(CONFIG_CRYPTO_AES_X86_64) += aes-x86_64.o |
17 | obj-$(CONFIG_CRYPTO_DES3_EDE_X86_64) += des3_ede-x86_64.o | ||
17 | obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o | 18 | obj-$(CONFIG_CRYPTO_CAMELLIA_X86_64) += camellia-x86_64.o |
18 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o | 19 | obj-$(CONFIG_CRYPTO_BLOWFISH_X86_64) += blowfish-x86_64.o |
19 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o | 20 | obj-$(CONFIG_CRYPTO_TWOFISH_X86_64) += twofish-x86_64.o |
@@ -52,6 +53,7 @@ salsa20-i586-y := salsa20-i586-asm_32.o salsa20_glue.o | |||
52 | serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o | 53 | serpent-sse2-i586-y := serpent-sse2-i586-asm_32.o serpent_sse2_glue.o |
53 | 54 | ||
54 | aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o | 55 | aes-x86_64-y := aes-x86_64-asm_64.o aes_glue.o |
56 | des3_ede-x86_64-y := des3_ede-asm_64.o des3_ede_glue.o | ||
55 | camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o | 57 | camellia-x86_64-y := camellia-x86_64-asm_64.o camellia_glue.o |
56 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o | 58 | blowfish-x86_64-y := blowfish-x86_64-asm_64.o blowfish_glue.o |
57 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o | 59 | twofish-x86_64-y := twofish-x86_64-asm_64.o twofish_glue.o |
@@ -76,7 +78,7 @@ ifeq ($(avx2_supported),yes) | |||
76 | endif | 78 | endif |
77 | 79 | ||
78 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o | 80 | aesni-intel-y := aesni-intel_asm.o aesni-intel_glue.o fpu.o |
79 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o | 81 | aesni-intel-$(CONFIG_64BIT) += aesni-intel_avx-x86_64.o aes_ctrby8_avx-x86_64.o |
80 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o | 82 | ghash-clmulni-intel-y := ghash-clmulni-intel_asm.o ghash-clmulni-intel_glue.o |
81 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o | 83 | sha1-ssse3-y := sha1_ssse3_asm.o sha1_ssse3_glue.o |
82 | ifeq ($(avx2_supported),yes) | 84 | ifeq ($(avx2_supported),yes) |
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S new file mode 100644 index 000000000000..f091f122ed24 --- /dev/null +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S | |||
@@ -0,0 +1,546 @@ | |||
1 | /* | ||
2 | * Implement AES CTR mode by8 optimization with AVX instructions. (x86_64) | ||
3 | * | ||
4 | * This is AES128/192/256 CTR mode optimization implementation. It requires | ||
5 | * the support of Intel(R) AESNI and AVX instructions. | ||
6 | * | ||
7 | * This work was inspired by the AES CTR mode optimization published | ||
8 | * in Intel Optimized IPSEC Cryptograhpic library. | ||
9 | * Additional information on it can be found at: | ||
10 | * http://downloadcenter.intel.com/Detail_Desc.aspx?agr=Y&DwnldID=22972 | ||
11 | * | ||
12 | * This file is provided under a dual BSD/GPLv2 license. When using or | ||
13 | * redistributing this file, you may do so under either license. | ||
14 | * | ||
15 | * GPL LICENSE SUMMARY | ||
16 | * | ||
17 | * Copyright(c) 2014 Intel Corporation. | ||
18 | * | ||
19 | * This program is free software; you can redistribute it and/or modify | ||
20 | * it under the terms of version 2 of the GNU General Public License as | ||
21 | * published by the Free Software Foundation. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, but | ||
24 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
26 | * General Public License for more details. | ||
27 | * | ||
28 | * Contact Information: | ||
29 | * James Guilford <james.guilford@intel.com> | ||
30 | * Sean Gulley <sean.m.gulley@intel.com> | ||
31 | * Chandramouli Narayanan <mouli@linux.intel.com> | ||
32 | * | ||
33 | * BSD LICENSE | ||
34 | * | ||
35 | * Copyright(c) 2014 Intel Corporation. | ||
36 | * | ||
37 | * Redistribution and use in source and binary forms, with or without | ||
38 | * modification, are permitted provided that the following conditions | ||
39 | * are met: | ||
40 | * | ||
41 | * Redistributions of source code must retain the above copyright | ||
42 | * notice, this list of conditions and the following disclaimer. | ||
43 | * Redistributions in binary form must reproduce the above copyright | ||
44 | * notice, this list of conditions and the following disclaimer in | ||
45 | * the documentation and/or other materials provided with the | ||
46 | * distribution. | ||
47 | * Neither the name of Intel Corporation nor the names of its | ||
48 | * contributors may be used to endorse or promote products derived | ||
49 | * from this software without specific prior written permission. | ||
50 | * | ||
51 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||
52 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||
53 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||
54 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||
55 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||
56 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||
57 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||
58 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||
59 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||
60 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||
61 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||
62 | * | ||
63 | */ | ||
64 | |||
65 | #include <linux/linkage.h> | ||
66 | #include <asm/inst.h> | ||
67 | |||
68 | #define CONCAT(a,b) a##b | ||
69 | #define VMOVDQ vmovdqu | ||
70 | |||
71 | #define xdata0 %xmm0 | ||
72 | #define xdata1 %xmm1 | ||
73 | #define xdata2 %xmm2 | ||
74 | #define xdata3 %xmm3 | ||
75 | #define xdata4 %xmm4 | ||
76 | #define xdata5 %xmm5 | ||
77 | #define xdata6 %xmm6 | ||
78 | #define xdata7 %xmm7 | ||
79 | #define xcounter %xmm8 | ||
80 | #define xbyteswap %xmm9 | ||
81 | #define xkey0 %xmm10 | ||
82 | #define xkey3 %xmm11 | ||
83 | #define xkey6 %xmm12 | ||
84 | #define xkey9 %xmm13 | ||
85 | #define xkey4 %xmm11 | ||
86 | #define xkey8 %xmm12 | ||
87 | #define xkey12 %xmm13 | ||
88 | #define xkeyA %xmm14 | ||
89 | #define xkeyB %xmm15 | ||
90 | |||
91 | #define p_in %rdi | ||
92 | #define p_iv %rsi | ||
93 | #define p_keys %rdx | ||
94 | #define p_out %rcx | ||
95 | #define num_bytes %r8 | ||
96 | |||
97 | #define tmp %r10 | ||
98 | #define DDQ(i) CONCAT(ddq_add_,i) | ||
99 | #define XMM(i) CONCAT(%xmm, i) | ||
100 | #define DDQ_DATA 0 | ||
101 | #define XDATA 1 | ||
102 | #define KEY_128 1 | ||
103 | #define KEY_192 2 | ||
104 | #define KEY_256 3 | ||
105 | |||
106 | .section .rodata | ||
107 | .align 16 | ||
108 | |||
109 | byteswap_const: | ||
110 | .octa 0x000102030405060708090A0B0C0D0E0F | ||
111 | ddq_add_1: | ||
112 | .octa 0x00000000000000000000000000000001 | ||
113 | ddq_add_2: | ||
114 | .octa 0x00000000000000000000000000000002 | ||
115 | ddq_add_3: | ||
116 | .octa 0x00000000000000000000000000000003 | ||
117 | ddq_add_4: | ||
118 | .octa 0x00000000000000000000000000000004 | ||
119 | ddq_add_5: | ||
120 | .octa 0x00000000000000000000000000000005 | ||
121 | ddq_add_6: | ||
122 | .octa 0x00000000000000000000000000000006 | ||
123 | ddq_add_7: | ||
124 | .octa 0x00000000000000000000000000000007 | ||
125 | ddq_add_8: | ||
126 | .octa 0x00000000000000000000000000000008 | ||
127 | |||
128 | .text | ||
129 | |||
130 | /* generate a unique variable for ddq_add_x */ | ||
131 | |||
132 | .macro setddq n | ||
133 | var_ddq_add = DDQ(\n) | ||
134 | .endm | ||
135 | |||
136 | /* generate a unique variable for xmm register */ | ||
137 | .macro setxdata n | ||
138 | var_xdata = XMM(\n) | ||
139 | .endm | ||
140 | |||
141 | /* club the numeric 'id' to the symbol 'name' */ | ||
142 | |||
143 | .macro club name, id | ||
144 | .altmacro | ||
145 | .if \name == DDQ_DATA | ||
146 | setddq %\id | ||
147 | .elseif \name == XDATA | ||
148 | setxdata %\id | ||
149 | .endif | ||
150 | .noaltmacro | ||
151 | .endm | ||
152 | |||
153 | /* | ||
154 | * do_aes num_in_par load_keys key_len | ||
155 | * This increments p_in, but not p_out | ||
156 | */ | ||
157 | .macro do_aes b, k, key_len | ||
158 | .set by, \b | ||
159 | .set load_keys, \k | ||
160 | .set klen, \key_len | ||
161 | |||
162 | .if (load_keys) | ||
163 | vmovdqa 0*16(p_keys), xkey0 | ||
164 | .endif | ||
165 | |||
166 | vpshufb xbyteswap, xcounter, xdata0 | ||
167 | |||
168 | .set i, 1 | ||
169 | .rept (by - 1) | ||
170 | club DDQ_DATA, i | ||
171 | club XDATA, i | ||
172 | vpaddd var_ddq_add(%rip), xcounter, var_xdata | ||
173 | vpshufb xbyteswap, var_xdata, var_xdata | ||
174 | .set i, (i +1) | ||
175 | .endr | ||
176 | |||
177 | vmovdqa 1*16(p_keys), xkeyA | ||
178 | |||
179 | vpxor xkey0, xdata0, xdata0 | ||
180 | club DDQ_DATA, by | ||
181 | vpaddd var_ddq_add(%rip), xcounter, xcounter | ||
182 | |||
183 | .set i, 1 | ||
184 | .rept (by - 1) | ||
185 | club XDATA, i | ||
186 | vpxor xkey0, var_xdata, var_xdata | ||
187 | .set i, (i +1) | ||
188 | .endr | ||
189 | |||
190 | vmovdqa 2*16(p_keys), xkeyB | ||
191 | |||
192 | .set i, 0 | ||
193 | .rept by | ||
194 | club XDATA, i | ||
195 | vaesenc xkeyA, var_xdata, var_xdata /* key 1 */ | ||
196 | .set i, (i +1) | ||
197 | .endr | ||
198 | |||
199 | .if (klen == KEY_128) | ||
200 | .if (load_keys) | ||
201 | vmovdqa 3*16(p_keys), xkeyA | ||
202 | .endif | ||
203 | .else | ||
204 | vmovdqa 3*16(p_keys), xkeyA | ||
205 | .endif | ||
206 | |||
207 | .set i, 0 | ||
208 | .rept by | ||
209 | club XDATA, i | ||
210 | vaesenc xkeyB, var_xdata, var_xdata /* key 2 */ | ||
211 | .set i, (i +1) | ||
212 | .endr | ||
213 | |||
214 | add $(16*by), p_in | ||
215 | |||
216 | .if (klen == KEY_128) | ||
217 | vmovdqa 4*16(p_keys), xkey4 | ||
218 | .else | ||
219 | .if (load_keys) | ||
220 | vmovdqa 4*16(p_keys), xkey4 | ||
221 | .endif | ||
222 | .endif | ||
223 | |||
224 | .set i, 0 | ||
225 | .rept by | ||
226 | club XDATA, i | ||
227 | vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ | ||
228 | .set i, (i +1) | ||
229 | .endr | ||
230 | |||
231 | vmovdqa 5*16(p_keys), xkeyA | ||
232 | |||
233 | .set i, 0 | ||
234 | .rept by | ||
235 | club XDATA, i | ||
236 | vaesenc xkey4, var_xdata, var_xdata /* key 4 */ | ||
237 | .set i, (i +1) | ||
238 | .endr | ||
239 | |||
240 | .if (klen == KEY_128) | ||
241 | .if (load_keys) | ||
242 | vmovdqa 6*16(p_keys), xkeyB | ||
243 | .endif | ||
244 | .else | ||
245 | vmovdqa 6*16(p_keys), xkeyB | ||
246 | .endif | ||
247 | |||
248 | .set i, 0 | ||
249 | .rept by | ||
250 | club XDATA, i | ||
251 | vaesenc xkeyA, var_xdata, var_xdata /* key 5 */ | ||
252 | .set i, (i +1) | ||
253 | .endr | ||
254 | |||
255 | vmovdqa 7*16(p_keys), xkeyA | ||
256 | |||
257 | .set i, 0 | ||
258 | .rept by | ||
259 | club XDATA, i | ||
260 | vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ | ||
261 | .set i, (i +1) | ||
262 | .endr | ||
263 | |||
264 | .if (klen == KEY_128) | ||
265 | vmovdqa 8*16(p_keys), xkey8 | ||
266 | .else | ||
267 | .if (load_keys) | ||
268 | vmovdqa 8*16(p_keys), xkey8 | ||
269 | .endif | ||
270 | .endif | ||
271 | |||
272 | .set i, 0 | ||
273 | .rept by | ||
274 | club XDATA, i | ||
275 | vaesenc xkeyA, var_xdata, var_xdata /* key 7 */ | ||
276 | .set i, (i +1) | ||
277 | .endr | ||
278 | |||
279 | .if (klen == KEY_128) | ||
280 | .if (load_keys) | ||
281 | vmovdqa 9*16(p_keys), xkeyA | ||
282 | .endif | ||
283 | .else | ||
284 | vmovdqa 9*16(p_keys), xkeyA | ||
285 | .endif | ||
286 | |||
287 | .set i, 0 | ||
288 | .rept by | ||
289 | club XDATA, i | ||
290 | vaesenc xkey8, var_xdata, var_xdata /* key 8 */ | ||
291 | .set i, (i +1) | ||
292 | .endr | ||
293 | |||
294 | vmovdqa 10*16(p_keys), xkeyB | ||
295 | |||
296 | .set i, 0 | ||
297 | .rept by | ||
298 | club XDATA, i | ||
299 | vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ | ||
300 | .set i, (i +1) | ||
301 | .endr | ||
302 | |||
303 | .if (klen != KEY_128) | ||
304 | vmovdqa 11*16(p_keys), xkeyA | ||
305 | .endif | ||
306 | |||
307 | .set i, 0 | ||
308 | .rept by | ||
309 | club XDATA, i | ||
310 | /* key 10 */ | ||
311 | .if (klen == KEY_128) | ||
312 | vaesenclast xkeyB, var_xdata, var_xdata | ||
313 | .else | ||
314 | vaesenc xkeyB, var_xdata, var_xdata | ||
315 | .endif | ||
316 | .set i, (i +1) | ||
317 | .endr | ||
318 | |||
319 | .if (klen != KEY_128) | ||
320 | .if (load_keys) | ||
321 | vmovdqa 12*16(p_keys), xkey12 | ||
322 | .endif | ||
323 | |||
324 | .set i, 0 | ||
325 | .rept by | ||
326 | club XDATA, i | ||
327 | vaesenc xkeyA, var_xdata, var_xdata /* key 11 */ | ||
328 | .set i, (i +1) | ||
329 | .endr | ||
330 | |||
331 | .if (klen == KEY_256) | ||
332 | vmovdqa 13*16(p_keys), xkeyA | ||
333 | .endif | ||
334 | |||
335 | .set i, 0 | ||
336 | .rept by | ||
337 | club XDATA, i | ||
338 | .if (klen == KEY_256) | ||
339 | /* key 12 */ | ||
340 | vaesenc xkey12, var_xdata, var_xdata | ||
341 | .else | ||
342 | vaesenclast xkey12, var_xdata, var_xdata | ||
343 | .endif | ||
344 | .set i, (i +1) | ||
345 | .endr | ||
346 | |||
347 | .if (klen == KEY_256) | ||
348 | vmovdqa 14*16(p_keys), xkeyB | ||
349 | |||
350 | .set i, 0 | ||
351 | .rept by | ||
352 | club XDATA, i | ||
353 | /* key 13 */ | ||
354 | vaesenc xkeyA, var_xdata, var_xdata | ||
355 | .set i, (i +1) | ||
356 | .endr | ||
357 | |||
358 | .set i, 0 | ||
359 | .rept by | ||
360 | club XDATA, i | ||
361 | /* key 14 */ | ||
362 | vaesenclast xkeyB, var_xdata, var_xdata | ||
363 | .set i, (i +1) | ||
364 | .endr | ||
365 | .endif | ||
366 | .endif | ||
367 | |||
368 | .set i, 0 | ||
369 | .rept (by / 2) | ||
370 | .set j, (i+1) | ||
371 | VMOVDQ (i*16 - 16*by)(p_in), xkeyA | ||
372 | VMOVDQ (j*16 - 16*by)(p_in), xkeyB | ||
373 | club XDATA, i | ||
374 | vpxor xkeyA, var_xdata, var_xdata | ||
375 | club XDATA, j | ||
376 | vpxor xkeyB, var_xdata, var_xdata | ||
377 | .set i, (i+2) | ||
378 | .endr | ||
379 | |||
380 | .if (i < by) | ||
381 | VMOVDQ (i*16 - 16*by)(p_in), xkeyA | ||
382 | club XDATA, i | ||
383 | vpxor xkeyA, var_xdata, var_xdata | ||
384 | .endif | ||
385 | |||
386 | .set i, 0 | ||
387 | .rept by | ||
388 | club XDATA, i | ||
389 | VMOVDQ var_xdata, i*16(p_out) | ||
390 | .set i, (i+1) | ||
391 | .endr | ||
392 | .endm | ||
393 | |||
394 | .macro do_aes_load val, key_len | ||
395 | do_aes \val, 1, \key_len | ||
396 | .endm | ||
397 | |||
398 | .macro do_aes_noload val, key_len | ||
399 | do_aes \val, 0, \key_len | ||
400 | .endm | ||
401 | |||
402 | /* main body of aes ctr load */ | ||
403 | |||
404 | .macro do_aes_ctrmain key_len | ||
405 | |||
406 | cmp $16, num_bytes | ||
407 | jb .Ldo_return2\key_len | ||
408 | |||
409 | vmovdqa byteswap_const(%rip), xbyteswap | ||
410 | vmovdqu (p_iv), xcounter | ||
411 | vpshufb xbyteswap, xcounter, xcounter | ||
412 | |||
413 | mov num_bytes, tmp | ||
414 | and $(7*16), tmp | ||
415 | jz .Lmult_of_8_blks\key_len | ||
416 | |||
417 | /* 1 <= tmp <= 7 */ | ||
418 | cmp $(4*16), tmp | ||
419 | jg .Lgt4\key_len | ||
420 | je .Leq4\key_len | ||
421 | |||
422 | .Llt4\key_len: | ||
423 | cmp $(2*16), tmp | ||
424 | jg .Leq3\key_len | ||
425 | je .Leq2\key_len | ||
426 | |||
427 | .Leq1\key_len: | ||
428 | do_aes_load 1, \key_len | ||
429 | add $(1*16), p_out | ||
430 | and $(~7*16), num_bytes | ||
431 | jz .Ldo_return2\key_len | ||
432 | jmp .Lmain_loop2\key_len | ||
433 | |||
434 | .Leq2\key_len: | ||
435 | do_aes_load 2, \key_len | ||
436 | add $(2*16), p_out | ||
437 | and $(~7*16), num_bytes | ||
438 | jz .Ldo_return2\key_len | ||
439 | jmp .Lmain_loop2\key_len | ||
440 | |||
441 | |||
442 | .Leq3\key_len: | ||
443 | do_aes_load 3, \key_len | ||
444 | add $(3*16), p_out | ||
445 | and $(~7*16), num_bytes | ||
446 | jz .Ldo_return2\key_len | ||
447 | jmp .Lmain_loop2\key_len | ||
448 | |||
449 | .Leq4\key_len: | ||
450 | do_aes_load 4, \key_len | ||
451 | add $(4*16), p_out | ||
452 | and $(~7*16), num_bytes | ||
453 | jz .Ldo_return2\key_len | ||
454 | jmp .Lmain_loop2\key_len | ||
455 | |||
456 | .Lgt4\key_len: | ||
457 | cmp $(6*16), tmp | ||
458 | jg .Leq7\key_len | ||
459 | je .Leq6\key_len | ||
460 | |||
461 | .Leq5\key_len: | ||
462 | do_aes_load 5, \key_len | ||
463 | add $(5*16), p_out | ||
464 | and $(~7*16), num_bytes | ||
465 | jz .Ldo_return2\key_len | ||
466 | jmp .Lmain_loop2\key_len | ||
467 | |||
468 | .Leq6\key_len: | ||
469 | do_aes_load 6, \key_len | ||
470 | add $(6*16), p_out | ||
471 | and $(~7*16), num_bytes | ||
472 | jz .Ldo_return2\key_len | ||
473 | jmp .Lmain_loop2\key_len | ||
474 | |||
475 | .Leq7\key_len: | ||
476 | do_aes_load 7, \key_len | ||
477 | add $(7*16), p_out | ||
478 | and $(~7*16), num_bytes | ||
479 | jz .Ldo_return2\key_len | ||
480 | jmp .Lmain_loop2\key_len | ||
481 | |||
482 | .Lmult_of_8_blks\key_len: | ||
483 | .if (\key_len != KEY_128) | ||
484 | vmovdqa 0*16(p_keys), xkey0 | ||
485 | vmovdqa 4*16(p_keys), xkey4 | ||
486 | vmovdqa 8*16(p_keys), xkey8 | ||
487 | vmovdqa 12*16(p_keys), xkey12 | ||
488 | .else | ||
489 | vmovdqa 0*16(p_keys), xkey0 | ||
490 | vmovdqa 3*16(p_keys), xkey4 | ||
491 | vmovdqa 6*16(p_keys), xkey8 | ||
492 | vmovdqa 9*16(p_keys), xkey12 | ||
493 | .endif | ||
494 | .align 16 | ||
495 | .Lmain_loop2\key_len: | ||
496 | /* num_bytes is a multiple of 8 and >0 */ | ||
497 | do_aes_noload 8, \key_len | ||
498 | add $(8*16), p_out | ||
499 | sub $(8*16), num_bytes | ||
500 | jne .Lmain_loop2\key_len | ||
501 | |||
502 | .Ldo_return2\key_len: | ||
503 | /* return updated IV */ | ||
504 | vpshufb xbyteswap, xcounter, xcounter | ||
505 | vmovdqu xcounter, (p_iv) | ||
506 | ret | ||
507 | .endm | ||
508 | |||
509 | /* | ||
510 | * routine to do AES128 CTR enc/decrypt "by8" | ||
511 | * XMM registers are clobbered. | ||
512 | * Saving/restoring must be done at a higher level | ||
513 | * aes_ctr_enc_128_avx_by8(void *in, void *iv, void *keys, void *out, | ||
514 | * unsigned int num_bytes) | ||
515 | */ | ||
516 | ENTRY(aes_ctr_enc_128_avx_by8) | ||
517 | /* call the aes main loop */ | ||
518 | do_aes_ctrmain KEY_128 | ||
519 | |||
520 | ENDPROC(aes_ctr_enc_128_avx_by8) | ||
521 | |||
522 | /* | ||
523 | * routine to do AES192 CTR enc/decrypt "by8" | ||
524 | * XMM registers are clobbered. | ||
525 | * Saving/restoring must be done at a higher level | ||
526 | * aes_ctr_enc_192_avx_by8(void *in, void *iv, void *keys, void *out, | ||
527 | * unsigned int num_bytes) | ||
528 | */ | ||
529 | ENTRY(aes_ctr_enc_192_avx_by8) | ||
530 | /* call the aes main loop */ | ||
531 | do_aes_ctrmain KEY_192 | ||
532 | |||
533 | ENDPROC(aes_ctr_enc_192_avx_by8) | ||
534 | |||
535 | /* | ||
536 | * routine to do AES256 CTR enc/decrypt "by8" | ||
537 | * XMM registers are clobbered. | ||
538 | * Saving/restoring must be done at a higher level | ||
539 | * aes_ctr_enc_256_avx_by8(void *in, void *iv, void *keys, void *out, | ||
540 | * unsigned int num_bytes) | ||
541 | */ | ||
542 | ENTRY(aes_ctr_enc_256_avx_by8) | ||
543 | /* call the aes main loop */ | ||
544 | do_aes_ctrmain KEY_256 | ||
545 | |||
546 | ENDPROC(aes_ctr_enc_256_avx_by8) | ||
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index 948ad0e77741..888950f29fd9 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -105,6 +105,9 @@ void crypto_fpu_exit(void); | |||
105 | #define AVX_GEN4_OPTSIZE 4096 | 105 | #define AVX_GEN4_OPTSIZE 4096 |
106 | 106 | ||
107 | #ifdef CONFIG_X86_64 | 107 | #ifdef CONFIG_X86_64 |
108 | |||
109 | static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out, | ||
110 | const u8 *in, unsigned int len, u8 *iv); | ||
108 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, | 111 | asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out, |
109 | const u8 *in, unsigned int len, u8 *iv); | 112 | const u8 *in, unsigned int len, u8 *iv); |
110 | 113 | ||
@@ -155,6 +158,12 @@ asmlinkage void aesni_gcm_dec(void *ctx, u8 *out, | |||
155 | 158 | ||
156 | 159 | ||
157 | #ifdef CONFIG_AS_AVX | 160 | #ifdef CONFIG_AS_AVX |
161 | asmlinkage void aes_ctr_enc_128_avx_by8(const u8 *in, u8 *iv, | ||
162 | void *keys, u8 *out, unsigned int num_bytes); | ||
163 | asmlinkage void aes_ctr_enc_192_avx_by8(const u8 *in, u8 *iv, | ||
164 | void *keys, u8 *out, unsigned int num_bytes); | ||
165 | asmlinkage void aes_ctr_enc_256_avx_by8(const u8 *in, u8 *iv, | ||
166 | void *keys, u8 *out, unsigned int num_bytes); | ||
158 | /* | 167 | /* |
159 | * asmlinkage void aesni_gcm_precomp_avx_gen2() | 168 | * asmlinkage void aesni_gcm_precomp_avx_gen2() |
160 | * gcm_data *my_ctx_data, context data | 169 | * gcm_data *my_ctx_data, context data |
@@ -472,6 +481,25 @@ static void ctr_crypt_final(struct crypto_aes_ctx *ctx, | |||
472 | crypto_inc(ctrblk, AES_BLOCK_SIZE); | 481 | crypto_inc(ctrblk, AES_BLOCK_SIZE); |
473 | } | 482 | } |
474 | 483 | ||
484 | #ifdef CONFIG_AS_AVX | ||
485 | static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, | ||
486 | const u8 *in, unsigned int len, u8 *iv) | ||
487 | { | ||
488 | /* | ||
489 | * based on key length, override with the by8 version | ||
490 | * of ctr mode encryption/decryption for improved performance | ||
491 | * aes_set_key_common() ensures that key length is one of | ||
492 | * {128,192,256} | ||
493 | */ | ||
494 | if (ctx->key_length == AES_KEYSIZE_128) | ||
495 | aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); | ||
496 | else if (ctx->key_length == AES_KEYSIZE_192) | ||
497 | aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); | ||
498 | else | ||
499 | aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); | ||
500 | } | ||
501 | #endif | ||
502 | |||
475 | static int ctr_crypt(struct blkcipher_desc *desc, | 503 | static int ctr_crypt(struct blkcipher_desc *desc, |
476 | struct scatterlist *dst, struct scatterlist *src, | 504 | struct scatterlist *dst, struct scatterlist *src, |
477 | unsigned int nbytes) | 505 | unsigned int nbytes) |
@@ -486,8 +514,8 @@ static int ctr_crypt(struct blkcipher_desc *desc, | |||
486 | 514 | ||
487 | kernel_fpu_begin(); | 515 | kernel_fpu_begin(); |
488 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { | 516 | while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { |
489 | aesni_ctr_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, | 517 | aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, |
490 | nbytes & AES_BLOCK_MASK, walk.iv); | 518 | nbytes & AES_BLOCK_MASK, walk.iv); |
491 | nbytes &= AES_BLOCK_SIZE - 1; | 519 | nbytes &= AES_BLOCK_SIZE - 1; |
492 | err = blkcipher_walk_done(desc, &walk, nbytes); | 520 | err = blkcipher_walk_done(desc, &walk, nbytes); |
493 | } | 521 | } |
@@ -1493,6 +1521,14 @@ static int __init aesni_init(void) | |||
1493 | aesni_gcm_enc_tfm = aesni_gcm_enc; | 1521 | aesni_gcm_enc_tfm = aesni_gcm_enc; |
1494 | aesni_gcm_dec_tfm = aesni_gcm_dec; | 1522 | aesni_gcm_dec_tfm = aesni_gcm_dec; |
1495 | } | 1523 | } |
1524 | aesni_ctr_enc_tfm = aesni_ctr_enc; | ||
1525 | #ifdef CONFIG_AS_AVX | ||
1526 | if (cpu_has_avx) { | ||
1527 | /* optimize performance of ctr mode encryption transform */ | ||
1528 | aesni_ctr_enc_tfm = aesni_ctr_enc_avx_tfm; | ||
1529 | pr_info("AES CTR mode by8 optimization enabled\n"); | ||
1530 | } | ||
1531 | #endif | ||
1496 | #endif | 1532 | #endif |
1497 | 1533 | ||
1498 | err = crypto_fpu_init(); | 1534 | err = crypto_fpu_init(); |
diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S index dbc4339b5417..26d49ebae040 100644 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S | |||
@@ -72,6 +72,7 @@ | |||
72 | 72 | ||
73 | # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); | 73 | # unsigned int crc_pcl(u8 *buffer, int len, unsigned int crc_init); |
74 | 74 | ||
75 | .text | ||
75 | ENTRY(crc_pcl) | 76 | ENTRY(crc_pcl) |
76 | #define bufp %rdi | 77 | #define bufp %rdi |
77 | #define bufp_dw %edi | 78 | #define bufp_dw %edi |
@@ -216,15 +217,11 @@ LABEL crc_ %i | |||
216 | ## 4) Combine three results: | 217 | ## 4) Combine three results: |
217 | ################################################################ | 218 | ################################################################ |
218 | 219 | ||
219 | lea (K_table-16)(%rip), bufp # first entry is for idx 1 | 220 | lea (K_table-8)(%rip), bufp # first entry is for idx 1 |
220 | shlq $3, %rax # rax *= 8 | 221 | shlq $3, %rax # rax *= 8 |
221 | subq %rax, tmp # tmp -= rax*8 | 222 | pmovzxdq (bufp,%rax), %xmm0 # 2 consts: K1:K2 |
222 | shlq $1, %rax | 223 | leal (%eax,%eax,2), %eax # rax *= 3 (total *24) |
223 | subq %rax, tmp # tmp -= rax*16 | 224 | subq %rax, tmp # tmp -= rax*24 |
224 | # (total tmp -= rax*24) | ||
225 | addq %rax, bufp | ||
226 | |||
227 | movdqa (bufp), %xmm0 # 2 consts: K1:K2 | ||
228 | 225 | ||
229 | movq crc_init, %xmm1 # CRC for block 1 | 226 | movq crc_init, %xmm1 # CRC for block 1 |
230 | PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 | 227 | PCLMULQDQ 0x00,%xmm0,%xmm1 # Multiply by K2 |
@@ -238,9 +235,9 @@ LABEL crc_ %i | |||
238 | mov crc2, crc_init | 235 | mov crc2, crc_init |
239 | crc32 %rax, crc_init | 236 | crc32 %rax, crc_init |
240 | 237 | ||
241 | ################################################################ | 238 | ################################################################ |
242 | ## 5) Check for end: | 239 | ## 5) Check for end: |
243 | ################################################################ | 240 | ################################################################ |
244 | 241 | ||
245 | LABEL crc_ 0 | 242 | LABEL crc_ 0 |
246 | mov tmp, len | 243 | mov tmp, len |
@@ -331,136 +328,136 @@ ENDPROC(crc_pcl) | |||
331 | 328 | ||
332 | ################################################################ | 329 | ################################################################ |
333 | ## PCLMULQDQ tables | 330 | ## PCLMULQDQ tables |
334 | ## Table is 128 entries x 2 quad words each | 331 | ## Table is 128 entries x 2 words (8 bytes) each |
335 | ################################################################ | 332 | ################################################################ |
336 | .data | 333 | .section .rotata, "a", %progbits |
337 | .align 64 | 334 | .align 8 |
338 | K_table: | 335 | K_table: |
339 | .quad 0x14cd00bd6,0x105ec76f0 | 336 | .long 0x493c7d27, 0x00000001 |
340 | .quad 0x0ba4fc28e,0x14cd00bd6 | 337 | .long 0xba4fc28e, 0x493c7d27 |
341 | .quad 0x1d82c63da,0x0f20c0dfe | 338 | .long 0xddc0152b, 0xf20c0dfe |
342 | .quad 0x09e4addf8,0x0ba4fc28e | 339 | .long 0x9e4addf8, 0xba4fc28e |
343 | .quad 0x039d3b296,0x1384aa63a | 340 | .long 0x39d3b296, 0x3da6d0cb |
344 | .quad 0x102f9b8a2,0x1d82c63da | 341 | .long 0x0715ce53, 0xddc0152b |
345 | .quad 0x14237f5e6,0x01c291d04 | 342 | .long 0x47db8317, 0x1c291d04 |
346 | .quad 0x00d3b6092,0x09e4addf8 | 343 | .long 0x0d3b6092, 0x9e4addf8 |
347 | .quad 0x0c96cfdc0,0x0740eef02 | 344 | .long 0xc96cfdc0, 0x740eef02 |
348 | .quad 0x18266e456,0x039d3b296 | 345 | .long 0x878a92a7, 0x39d3b296 |
349 | .quad 0x0daece73e,0x0083a6eec | 346 | .long 0xdaece73e, 0x083a6eec |
350 | .quad 0x0ab7aff2a,0x102f9b8a2 | 347 | .long 0xab7aff2a, 0x0715ce53 |
351 | .quad 0x1248ea574,0x1c1733996 | 348 | .long 0x2162d385, 0xc49f4f67 |
352 | .quad 0x083348832,0x14237f5e6 | 349 | .long 0x83348832, 0x47db8317 |
353 | .quad 0x12c743124,0x02ad91c30 | 350 | .long 0x299847d5, 0x2ad91c30 |
354 | .quad 0x0b9e02b86,0x00d3b6092 | 351 | .long 0xb9e02b86, 0x0d3b6092 |
355 | .quad 0x018b33a4e,0x06992cea2 | 352 | .long 0x18b33a4e, 0x6992cea2 |
356 | .quad 0x1b331e26a,0x0c96cfdc0 | 353 | .long 0xb6dd949b, 0xc96cfdc0 |
357 | .quad 0x17d35ba46,0x07e908048 | 354 | .long 0x78d9ccb7, 0x7e908048 |
358 | .quad 0x1bf2e8b8a,0x18266e456 | 355 | .long 0xbac2fd7b, 0x878a92a7 |
359 | .quad 0x1a3e0968a,0x11ed1f9d8 | 356 | .long 0xa60ce07b, 0x1b3d8f29 |
360 | .quad 0x0ce7f39f4,0x0daece73e | 357 | .long 0xce7f39f4, 0xdaece73e |
361 | .quad 0x061d82e56,0x0f1d0f55e | 358 | .long 0x61d82e56, 0xf1d0f55e |
362 | .quad 0x0d270f1a2,0x0ab7aff2a | 359 | .long 0xd270f1a2, 0xab7aff2a |
363 | .quad 0x1c3f5f66c,0x0a87ab8a8 | 360 | .long 0xc619809d, 0xa87ab8a8 |
364 | .quad 0x12ed0daac,0x1248ea574 | 361 | .long 0x2b3cac5d, 0x2162d385 |
365 | .quad 0x065863b64,0x08462d800 | 362 | .long 0x65863b64, 0x8462d800 |
366 | .quad 0x11eef4f8e,0x083348832 | 363 | .long 0x1b03397f, 0x83348832 |
367 | .quad 0x1ee54f54c,0x071d111a8 | 364 | .long 0xebb883bd, 0x71d111a8 |
368 | .quad 0x0b3e32c28,0x12c743124 | 365 | .long 0xb3e32c28, 0x299847d5 |
369 | .quad 0x0064f7f26,0x0ffd852c6 | 366 | .long 0x064f7f26, 0xffd852c6 |
370 | .quad 0x0dd7e3b0c,0x0b9e02b86 | 367 | .long 0xdd7e3b0c, 0xb9e02b86 |
371 | .quad 0x0f285651c,0x0dcb17aa4 | 368 | .long 0xf285651c, 0xdcb17aa4 |
372 | .quad 0x010746f3c,0x018b33a4e | 369 | .long 0x10746f3c, 0x18b33a4e |
373 | .quad 0x1c24afea4,0x0f37c5aee | 370 | .long 0xc7a68855, 0xf37c5aee |
374 | .quad 0x0271d9844,0x1b331e26a | 371 | .long 0x271d9844, 0xb6dd949b |
375 | .quad 0x08e766a0c,0x06051d5a2 | 372 | .long 0x8e766a0c, 0x6051d5a2 |
376 | .quad 0x093a5f730,0x17d35ba46 | 373 | .long 0x93a5f730, 0x78d9ccb7 |
377 | .quad 0x06cb08e5c,0x11d5ca20e | 374 | .long 0x6cb08e5c, 0x18b0d4ff |
378 | .quad 0x06b749fb2,0x1bf2e8b8a | 375 | .long 0x6b749fb2, 0xbac2fd7b |
379 | .quad 0x1167f94f2,0x021f3d99c | 376 | .long 0x1393e203, 0x21f3d99c |
380 | .quad 0x0cec3662e,0x1a3e0968a | 377 | .long 0xcec3662e, 0xa60ce07b |
381 | .quad 0x19329634a,0x08f158014 | 378 | .long 0x96c515bb, 0x8f158014 |
382 | .quad 0x0e6fc4e6a,0x0ce7f39f4 | 379 | .long 0xe6fc4e6a, 0xce7f39f4 |
383 | .quad 0x08227bb8a,0x1a5e82106 | 380 | .long 0x8227bb8a, 0xa00457f7 |
384 | .quad 0x0b0cd4768,0x061d82e56 | 381 | .long 0xb0cd4768, 0x61d82e56 |
385 | .quad 0x13c2b89c4,0x188815ab2 | 382 | .long 0x39c7ff35, 0x8d6d2c43 |
386 | .quad 0x0d7a4825c,0x0d270f1a2 | 383 | .long 0xd7a4825c, 0xd270f1a2 |
387 | .quad 0x10f5ff2ba,0x105405f3e | 384 | .long 0x0ab3844b, 0x00ac29cf |
388 | .quad 0x00167d312,0x1c3f5f66c | 385 | .long 0x0167d312, 0xc619809d |
389 | .quad 0x0f6076544,0x0e9adf796 | 386 | .long 0xf6076544, 0xe9adf796 |
390 | .quad 0x026f6a60a,0x12ed0daac | 387 | .long 0x26f6a60a, 0x2b3cac5d |
391 | .quad 0x1a2adb74e,0x096638b34 | 388 | .long 0xa741c1bf, 0x96638b34 |
392 | .quad 0x19d34af3a,0x065863b64 | 389 | .long 0x98d8d9cb, 0x65863b64 |
393 | .quad 0x049c3cc9c,0x1e50585a0 | 390 | .long 0x49c3cc9c, 0xe0e9f351 |
394 | .quad 0x068bce87a,0x11eef4f8e | 391 | .long 0x68bce87a, 0x1b03397f |
395 | .quad 0x1524fa6c6,0x19f1c69dc | 392 | .long 0x57a3d037, 0x9af01f2d |
396 | .quad 0x16cba8aca,0x1ee54f54c | 393 | .long 0x6956fc3b, 0xebb883bd |
397 | .quad 0x042d98888,0x12913343e | 394 | .long 0x42d98888, 0x2cff42cf |
398 | .quad 0x1329d9f7e,0x0b3e32c28 | 395 | .long 0x3771e98f, 0xb3e32c28 |
399 | .quad 0x1b1c69528,0x088f25a3a | 396 | .long 0xb42ae3d9, 0x88f25a3a |
400 | .quad 0x02178513a,0x0064f7f26 | 397 | .long 0x2178513a, 0x064f7f26 |
401 | .quad 0x0e0ac139e,0x04e36f0b0 | 398 | .long 0xe0ac139e, 0x4e36f0b0 |
402 | .quad 0x0170076fa,0x0dd7e3b0c | 399 | .long 0x170076fa, 0xdd7e3b0c |
403 | .quad 0x141a1a2e2,0x0bd6f81f8 | 400 | .long 0x444dd413, 0xbd6f81f8 |
404 | .quad 0x16ad828b4,0x0f285651c | 401 | .long 0x6f345e45, 0xf285651c |
405 | .quad 0x041d17b64,0x19425cbba | 402 | .long 0x41d17b64, 0x91c9bd4b |
406 | .quad 0x1fae1cc66,0x010746f3c | 403 | .long 0xff0dba97, 0x10746f3c |
407 | .quad 0x1a75b4b00,0x18db37e8a | 404 | .long 0xa2b73df1, 0x885f087b |
408 | .quad 0x0f872e54c,0x1c24afea4 | 405 | .long 0xf872e54c, 0xc7a68855 |
409 | .quad 0x01e41e9fc,0x04c144932 | 406 | .long 0x1e41e9fc, 0x4c144932 |
410 | .quad 0x086d8e4d2,0x0271d9844 | 407 | .long 0x86d8e4d2, 0x271d9844 |
411 | .quad 0x160f7af7a,0x052148f02 | 408 | .long 0x651bd98b, 0x52148f02 |
412 | .quad 0x05bb8f1bc,0x08e766a0c | 409 | .long 0x5bb8f1bc, 0x8e766a0c |
413 | .quad 0x0a90fd27a,0x0a3c6f37a | 410 | .long 0xa90fd27a, 0xa3c6f37a |
414 | .quad 0x0b3af077a,0x093a5f730 | 411 | .long 0xb3af077a, 0x93a5f730 |
415 | .quad 0x04984d782,0x1d22c238e | 412 | .long 0x4984d782, 0xd7c0557f |
416 | .quad 0x0ca6ef3ac,0x06cb08e5c | 413 | .long 0xca6ef3ac, 0x6cb08e5c |
417 | .quad 0x0234e0b26,0x063ded06a | 414 | .long 0x234e0b26, 0x63ded06a |
418 | .quad 0x1d88abd4a,0x06b749fb2 | 415 | .long 0xdd66cbbb, 0x6b749fb2 |
419 | .quad 0x04597456a,0x04d56973c | 416 | .long 0x4597456a, 0x4d56973c |
420 | .quad 0x0e9e28eb4,0x1167f94f2 | 417 | .long 0xe9e28eb4, 0x1393e203 |
421 | .quad 0x07b3ff57a,0x19385bf2e | 418 | .long 0x7b3ff57a, 0x9669c9df |
422 | .quad 0x0c9c8b782,0x0cec3662e | 419 | .long 0xc9c8b782, 0xcec3662e |
423 | .quad 0x13a9cba9e,0x0e417f38a | 420 | .long 0x3f70cc6f, 0xe417f38a |
424 | .quad 0x093e106a4,0x19329634a | 421 | .long 0x93e106a4, 0x96c515bb |
425 | .quad 0x167001a9c,0x14e727980 | 422 | .long 0x62ec6c6d, 0x4b9e0f71 |
426 | .quad 0x1ddffc5d4,0x0e6fc4e6a | 423 | .long 0xd813b325, 0xe6fc4e6a |
427 | .quad 0x00df04680,0x0d104b8fc | 424 | .long 0x0df04680, 0xd104b8fc |
428 | .quad 0x02342001e,0x08227bb8a | 425 | .long 0x2342001e, 0x8227bb8a |
429 | .quad 0x00a2a8d7e,0x05b397730 | 426 | .long 0x0a2a8d7e, 0x5b397730 |
430 | .quad 0x168763fa6,0x0b0cd4768 | 427 | .long 0x6d9a4957, 0xb0cd4768 |
431 | .quad 0x1ed5a407a,0x0e78eb416 | 428 | .long 0xe8b6368b, 0xe78eb416 |
432 | .quad 0x0d2c3ed1a,0x13c2b89c4 | 429 | .long 0xd2c3ed1a, 0x39c7ff35 |
433 | .quad 0x0995a5724,0x1641378f0 | 430 | .long 0x995a5724, 0x61ff0e01 |
434 | .quad 0x19b1afbc4,0x0d7a4825c | 431 | .long 0x9ef68d35, 0xd7a4825c |
435 | .quad 0x109ffedc0,0x08d96551c | 432 | .long 0x0c139b31, 0x8d96551c |
436 | .quad 0x0f2271e60,0x10f5ff2ba | 433 | .long 0xf2271e60, 0x0ab3844b |
437 | .quad 0x00b0bf8ca,0x00bf80dd2 | 434 | .long 0x0b0bf8ca, 0x0bf80dd2 |
438 | .quad 0x123888b7a,0x00167d312 | 435 | .long 0x2664fd8b, 0x0167d312 |
439 | .quad 0x1e888f7dc,0x18dcddd1c | 436 | .long 0xed64812d, 0x8821abed |
440 | .quad 0x002ee03b2,0x0f6076544 | 437 | .long 0x02ee03b2, 0xf6076544 |
441 | .quad 0x183e8d8fe,0x06a45d2b2 | 438 | .long 0x8604ae0f, 0x6a45d2b2 |
442 | .quad 0x133d7a042,0x026f6a60a | 439 | .long 0x363bd6b3, 0x26f6a60a |
443 | .quad 0x116b0f50c,0x1dd3e10e8 | 440 | .long 0x135c83fd, 0xd8d26619 |
444 | .quad 0x05fabe670,0x1a2adb74e | 441 | .long 0x5fabe670, 0xa741c1bf |
445 | .quad 0x130004488,0x0de87806c | 442 | .long 0x35ec3279, 0xde87806c |
446 | .quad 0x000bcf5f6,0x19d34af3a | 443 | .long 0x00bcf5f6, 0x98d8d9cb |
447 | .quad 0x18f0c7078,0x014338754 | 444 | .long 0x8ae00689, 0x14338754 |
448 | .quad 0x017f27698,0x049c3cc9c | 445 | .long 0x17f27698, 0x49c3cc9c |
449 | .quad 0x058ca5f00,0x15e3e77ee | 446 | .long 0x58ca5f00, 0x5bd2011f |
450 | .quad 0x1af900c24,0x068bce87a | 447 | .long 0xaa7c7ad5, 0x68bce87a |
451 | .quad 0x0b5cfca28,0x0dd07448e | 448 | .long 0xb5cfca28, 0xdd07448e |
452 | .quad 0x0ded288f8,0x1524fa6c6 | 449 | .long 0xded288f8, 0x57a3d037 |
453 | .quad 0x059f229bc,0x1d8048348 | 450 | .long 0x59f229bc, 0xdde8f5b9 |
454 | .quad 0x06d390dec,0x16cba8aca | 451 | .long 0x6d390dec, 0x6956fc3b |
455 | .quad 0x037170390,0x0a3e3e02c | 452 | .long 0x37170390, 0xa3e3e02c |
456 | .quad 0x06353c1cc,0x042d98888 | 453 | .long 0x6353c1cc, 0x42d98888 |
457 | .quad 0x0c4584f5c,0x0d73c7bea | 454 | .long 0xc4584f5c, 0xd73c7bea |
458 | .quad 0x1f16a3418,0x1329d9f7e | 455 | .long 0xf48642e9, 0x3771e98f |
459 | .quad 0x0531377e2,0x185137662 | 456 | .long 0x531377e2, 0x80ff0093 |
460 | .quad 0x1d8d9ca7c,0x1b1c69528 | 457 | .long 0xdd35bc8d, 0xb42ae3d9 |
461 | .quad 0x0b25b29f2,0x18a08b5bc | 458 | .long 0xb25b29f2, 0x8fe4c34d |
462 | .quad 0x19fb2a8b0,0x02178513a | 459 | .long 0x9a5ede41, 0x2178513a |
463 | .quad 0x1a08fe6ac,0x1da758ae0 | 460 | .long 0xa563905d, 0xdf99fc11 |
464 | .quad 0x045cddf4e,0x0e0ac139e | 461 | .long 0x45cddf4e, 0xe0ac139e |
465 | .quad 0x1a91647f2,0x169cf9eb0 | 462 | .long 0xacfa3103, 0x6c23e841 |
466 | .quad 0x1a0f717c4,0x0170076fa | 463 | .long 0xa51b6135, 0x170076fa |
diff --git a/arch/x86/crypto/des3_ede-asm_64.S b/arch/x86/crypto/des3_ede-asm_64.S new file mode 100644 index 000000000000..038f6ae87c5e --- /dev/null +++ b/arch/x86/crypto/des3_ede-asm_64.S | |||
@@ -0,0 +1,805 @@ | |||
1 | /* | ||
2 | * des3_ede-asm_64.S - x86-64 assembly implementation of 3DES cipher | ||
3 | * | ||
4 | * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | |||
17 | #include <linux/linkage.h> | ||
18 | |||
19 | .file "des3_ede-asm_64.S" | ||
20 | .text | ||
21 | |||
22 | #define s1 .L_s1 | ||
23 | #define s2 ((s1) + (64*8)) | ||
24 | #define s3 ((s2) + (64*8)) | ||
25 | #define s4 ((s3) + (64*8)) | ||
26 | #define s5 ((s4) + (64*8)) | ||
27 | #define s6 ((s5) + (64*8)) | ||
28 | #define s7 ((s6) + (64*8)) | ||
29 | #define s8 ((s7) + (64*8)) | ||
30 | |||
31 | /* register macros */ | ||
32 | #define CTX %rdi | ||
33 | |||
34 | #define RL0 %r8 | ||
35 | #define RL1 %r9 | ||
36 | #define RL2 %r10 | ||
37 | |||
38 | #define RL0d %r8d | ||
39 | #define RL1d %r9d | ||
40 | #define RL2d %r10d | ||
41 | |||
42 | #define RR0 %r11 | ||
43 | #define RR1 %r12 | ||
44 | #define RR2 %r13 | ||
45 | |||
46 | #define RR0d %r11d | ||
47 | #define RR1d %r12d | ||
48 | #define RR2d %r13d | ||
49 | |||
50 | #define RW0 %rax | ||
51 | #define RW1 %rbx | ||
52 | #define RW2 %rcx | ||
53 | |||
54 | #define RW0d %eax | ||
55 | #define RW1d %ebx | ||
56 | #define RW2d %ecx | ||
57 | |||
58 | #define RW0bl %al | ||
59 | #define RW1bl %bl | ||
60 | #define RW2bl %cl | ||
61 | |||
62 | #define RW0bh %ah | ||
63 | #define RW1bh %bh | ||
64 | #define RW2bh %ch | ||
65 | |||
66 | #define RT0 %r15 | ||
67 | #define RT1 %rbp | ||
68 | #define RT2 %r14 | ||
69 | #define RT3 %rdx | ||
70 | |||
71 | #define RT0d %r15d | ||
72 | #define RT1d %ebp | ||
73 | #define RT2d %r14d | ||
74 | #define RT3d %edx | ||
75 | |||
76 | /*********************************************************************** | ||
77 | * 1-way 3DES | ||
78 | ***********************************************************************/ | ||
79 | #define do_permutation(a, b, offset, mask) \ | ||
80 | movl a, RT0d; \ | ||
81 | shrl $(offset), RT0d; \ | ||
82 | xorl b, RT0d; \ | ||
83 | andl $(mask), RT0d; \ | ||
84 | xorl RT0d, b; \ | ||
85 | shll $(offset), RT0d; \ | ||
86 | xorl RT0d, a; | ||
87 | |||
88 | #define expand_to_64bits(val, mask) \ | ||
89 | movl val##d, RT0d; \ | ||
90 | rorl $4, RT0d; \ | ||
91 | shlq $32, RT0; \ | ||
92 | orq RT0, val; \ | ||
93 | andq mask, val; | ||
94 | |||
95 | #define compress_to_64bits(val) \ | ||
96 | movq val, RT0; \ | ||
97 | shrq $32, RT0; \ | ||
98 | roll $4, RT0d; \ | ||
99 | orl RT0d, val##d; | ||
100 | |||
101 | #define initial_permutation(left, right) \ | ||
102 | do_permutation(left##d, right##d, 4, 0x0f0f0f0f); \ | ||
103 | do_permutation(left##d, right##d, 16, 0x0000ffff); \ | ||
104 | do_permutation(right##d, left##d, 2, 0x33333333); \ | ||
105 | do_permutation(right##d, left##d, 8, 0x00ff00ff); \ | ||
106 | movabs $0x3f3f3f3f3f3f3f3f, RT3; \ | ||
107 | movl left##d, RW0d; \ | ||
108 | roll $1, right##d; \ | ||
109 | xorl right##d, RW0d; \ | ||
110 | andl $0xaaaaaaaa, RW0d; \ | ||
111 | xorl RW0d, left##d; \ | ||
112 | xorl RW0d, right##d; \ | ||
113 | roll $1, left##d; \ | ||
114 | expand_to_64bits(right, RT3); \ | ||
115 | expand_to_64bits(left, RT3); | ||
116 | |||
117 | #define final_permutation(left, right) \ | ||
118 | compress_to_64bits(right); \ | ||
119 | compress_to_64bits(left); \ | ||
120 | movl right##d, RW0d; \ | ||
121 | rorl $1, left##d; \ | ||
122 | xorl left##d, RW0d; \ | ||
123 | andl $0xaaaaaaaa, RW0d; \ | ||
124 | xorl RW0d, right##d; \ | ||
125 | xorl RW0d, left##d; \ | ||
126 | rorl $1, right##d; \ | ||
127 | do_permutation(right##d, left##d, 8, 0x00ff00ff); \ | ||
128 | do_permutation(right##d, left##d, 2, 0x33333333); \ | ||
129 | do_permutation(left##d, right##d, 16, 0x0000ffff); \ | ||
130 | do_permutation(left##d, right##d, 4, 0x0f0f0f0f); | ||
131 | |||
132 | #define round1(n, from, to, load_next_key) \ | ||
133 | xorq from, RW0; \ | ||
134 | \ | ||
135 | movzbl RW0bl, RT0d; \ | ||
136 | movzbl RW0bh, RT1d; \ | ||
137 | shrq $16, RW0; \ | ||
138 | movzbl RW0bl, RT2d; \ | ||
139 | movzbl RW0bh, RT3d; \ | ||
140 | shrq $16, RW0; \ | ||
141 | movq s8(, RT0, 8), RT0; \ | ||
142 | xorq s6(, RT1, 8), to; \ | ||
143 | movzbl RW0bl, RL1d; \ | ||
144 | movzbl RW0bh, RT1d; \ | ||
145 | shrl $16, RW0d; \ | ||
146 | xorq s4(, RT2, 8), RT0; \ | ||
147 | xorq s2(, RT3, 8), to; \ | ||
148 | movzbl RW0bl, RT2d; \ | ||
149 | movzbl RW0bh, RT3d; \ | ||
150 | xorq s7(, RL1, 8), RT0; \ | ||
151 | xorq s5(, RT1, 8), to; \ | ||
152 | xorq s3(, RT2, 8), RT0; \ | ||
153 | load_next_key(n, RW0); \ | ||
154 | xorq RT0, to; \ | ||
155 | xorq s1(, RT3, 8), to; \ | ||
156 | |||
157 | #define load_next_key(n, RWx) \ | ||
158 | movq (((n) + 1) * 8)(CTX), RWx; | ||
159 | |||
160 | #define dummy2(a, b) /*_*/ | ||
161 | |||
162 | #define read_block(io, left, right) \ | ||
163 | movl (io), left##d; \ | ||
164 | movl 4(io), right##d; \ | ||
165 | bswapl left##d; \ | ||
166 | bswapl right##d; | ||
167 | |||
168 | #define write_block(io, left, right) \ | ||
169 | bswapl left##d; \ | ||
170 | bswapl right##d; \ | ||
171 | movl left##d, (io); \ | ||
172 | movl right##d, 4(io); | ||
173 | |||
174 | ENTRY(des3_ede_x86_64_crypt_blk) | ||
175 | /* input: | ||
176 | * %rdi: round keys, CTX | ||
177 | * %rsi: dst | ||
178 | * %rdx: src | ||
179 | */ | ||
180 | pushq %rbp; | ||
181 | pushq %rbx; | ||
182 | pushq %r12; | ||
183 | pushq %r13; | ||
184 | pushq %r14; | ||
185 | pushq %r15; | ||
186 | |||
187 | read_block(%rdx, RL0, RR0); | ||
188 | initial_permutation(RL0, RR0); | ||
189 | |||
190 | movq (CTX), RW0; | ||
191 | |||
192 | round1(0, RR0, RL0, load_next_key); | ||
193 | round1(1, RL0, RR0, load_next_key); | ||
194 | round1(2, RR0, RL0, load_next_key); | ||
195 | round1(3, RL0, RR0, load_next_key); | ||
196 | round1(4, RR0, RL0, load_next_key); | ||
197 | round1(5, RL0, RR0, load_next_key); | ||
198 | round1(6, RR0, RL0, load_next_key); | ||
199 | round1(7, RL0, RR0, load_next_key); | ||
200 | round1(8, RR0, RL0, load_next_key); | ||
201 | round1(9, RL0, RR0, load_next_key); | ||
202 | round1(10, RR0, RL0, load_next_key); | ||
203 | round1(11, RL0, RR0, load_next_key); | ||
204 | round1(12, RR0, RL0, load_next_key); | ||
205 | round1(13, RL0, RR0, load_next_key); | ||
206 | round1(14, RR0, RL0, load_next_key); | ||
207 | round1(15, RL0, RR0, load_next_key); | ||
208 | |||
209 | round1(16+0, RL0, RR0, load_next_key); | ||
210 | round1(16+1, RR0, RL0, load_next_key); | ||
211 | round1(16+2, RL0, RR0, load_next_key); | ||
212 | round1(16+3, RR0, RL0, load_next_key); | ||
213 | round1(16+4, RL0, RR0, load_next_key); | ||
214 | round1(16+5, RR0, RL0, load_next_key); | ||
215 | round1(16+6, RL0, RR0, load_next_key); | ||
216 | round1(16+7, RR0, RL0, load_next_key); | ||
217 | round1(16+8, RL0, RR0, load_next_key); | ||
218 | round1(16+9, RR0, RL0, load_next_key); | ||
219 | round1(16+10, RL0, RR0, load_next_key); | ||
220 | round1(16+11, RR0, RL0, load_next_key); | ||
221 | round1(16+12, RL0, RR0, load_next_key); | ||
222 | round1(16+13, RR0, RL0, load_next_key); | ||
223 | round1(16+14, RL0, RR0, load_next_key); | ||
224 | round1(16+15, RR0, RL0, load_next_key); | ||
225 | |||
226 | round1(32+0, RR0, RL0, load_next_key); | ||
227 | round1(32+1, RL0, RR0, load_next_key); | ||
228 | round1(32+2, RR0, RL0, load_next_key); | ||
229 | round1(32+3, RL0, RR0, load_next_key); | ||
230 | round1(32+4, RR0, RL0, load_next_key); | ||
231 | round1(32+5, RL0, RR0, load_next_key); | ||
232 | round1(32+6, RR0, RL0, load_next_key); | ||
233 | round1(32+7, RL0, RR0, load_next_key); | ||
234 | round1(32+8, RR0, RL0, load_next_key); | ||
235 | round1(32+9, RL0, RR0, load_next_key); | ||
236 | round1(32+10, RR0, RL0, load_next_key); | ||
237 | round1(32+11, RL0, RR0, load_next_key); | ||
238 | round1(32+12, RR0, RL0, load_next_key); | ||
239 | round1(32+13, RL0, RR0, load_next_key); | ||
240 | round1(32+14, RR0, RL0, load_next_key); | ||
241 | round1(32+15, RL0, RR0, dummy2); | ||
242 | |||
243 | final_permutation(RR0, RL0); | ||
244 | write_block(%rsi, RR0, RL0); | ||
245 | |||
246 | popq %r15; | ||
247 | popq %r14; | ||
248 | popq %r13; | ||
249 | popq %r12; | ||
250 | popq %rbx; | ||
251 | popq %rbp; | ||
252 | |||
253 | ret; | ||
254 | ENDPROC(des3_ede_x86_64_crypt_blk) | ||
255 | |||
256 | /*********************************************************************** | ||
257 | * 3-way 3DES | ||
258 | ***********************************************************************/ | ||
259 | #define expand_to_64bits(val, mask) \ | ||
260 | movl val##d, RT0d; \ | ||
261 | rorl $4, RT0d; \ | ||
262 | shlq $32, RT0; \ | ||
263 | orq RT0, val; \ | ||
264 | andq mask, val; | ||
265 | |||
266 | #define compress_to_64bits(val) \ | ||
267 | movq val, RT0; \ | ||
268 | shrq $32, RT0; \ | ||
269 | roll $4, RT0d; \ | ||
270 | orl RT0d, val##d; | ||
271 | |||
272 | #define initial_permutation3(left, right) \ | ||
273 | do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ | ||
274 | do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ | ||
275 | do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ | ||
276 | do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ | ||
277 | do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); \ | ||
278 | do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ | ||
279 | \ | ||
280 | do_permutation(right##0d, left##0d, 2, 0x33333333); \ | ||
281 | do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ | ||
282 | do_permutation(right##1d, left##1d, 2, 0x33333333); \ | ||
283 | do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ | ||
284 | do_permutation(right##2d, left##2d, 2, 0x33333333); \ | ||
285 | do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ | ||
286 | \ | ||
287 | movabs $0x3f3f3f3f3f3f3f3f, RT3; \ | ||
288 | \ | ||
289 | movl left##0d, RW0d; \ | ||
290 | roll $1, right##0d; \ | ||
291 | xorl right##0d, RW0d; \ | ||
292 | andl $0xaaaaaaaa, RW0d; \ | ||
293 | xorl RW0d, left##0d; \ | ||
294 | xorl RW0d, right##0d; \ | ||
295 | roll $1, left##0d; \ | ||
296 | expand_to_64bits(right##0, RT3); \ | ||
297 | expand_to_64bits(left##0, RT3); \ | ||
298 | movl left##1d, RW1d; \ | ||
299 | roll $1, right##1d; \ | ||
300 | xorl right##1d, RW1d; \ | ||
301 | andl $0xaaaaaaaa, RW1d; \ | ||
302 | xorl RW1d, left##1d; \ | ||
303 | xorl RW1d, right##1d; \ | ||
304 | roll $1, left##1d; \ | ||
305 | expand_to_64bits(right##1, RT3); \ | ||
306 | expand_to_64bits(left##1, RT3); \ | ||
307 | movl left##2d, RW2d; \ | ||
308 | roll $1, right##2d; \ | ||
309 | xorl right##2d, RW2d; \ | ||
310 | andl $0xaaaaaaaa, RW2d; \ | ||
311 | xorl RW2d, left##2d; \ | ||
312 | xorl RW2d, right##2d; \ | ||
313 | roll $1, left##2d; \ | ||
314 | expand_to_64bits(right##2, RT3); \ | ||
315 | expand_to_64bits(left##2, RT3); | ||
316 | |||
317 | #define final_permutation3(left, right) \ | ||
318 | compress_to_64bits(right##0); \ | ||
319 | compress_to_64bits(left##0); \ | ||
320 | movl right##0d, RW0d; \ | ||
321 | rorl $1, left##0d; \ | ||
322 | xorl left##0d, RW0d; \ | ||
323 | andl $0xaaaaaaaa, RW0d; \ | ||
324 | xorl RW0d, right##0d; \ | ||
325 | xorl RW0d, left##0d; \ | ||
326 | rorl $1, right##0d; \ | ||
327 | compress_to_64bits(right##1); \ | ||
328 | compress_to_64bits(left##1); \ | ||
329 | movl right##1d, RW1d; \ | ||
330 | rorl $1, left##1d; \ | ||
331 | xorl left##1d, RW1d; \ | ||
332 | andl $0xaaaaaaaa, RW1d; \ | ||
333 | xorl RW1d, right##1d; \ | ||
334 | xorl RW1d, left##1d; \ | ||
335 | rorl $1, right##1d; \ | ||
336 | compress_to_64bits(right##2); \ | ||
337 | compress_to_64bits(left##2); \ | ||
338 | movl right##2d, RW2d; \ | ||
339 | rorl $1, left##2d; \ | ||
340 | xorl left##2d, RW2d; \ | ||
341 | andl $0xaaaaaaaa, RW2d; \ | ||
342 | xorl RW2d, right##2d; \ | ||
343 | xorl RW2d, left##2d; \ | ||
344 | rorl $1, right##2d; \ | ||
345 | \ | ||
346 | do_permutation(right##0d, left##0d, 8, 0x00ff00ff); \ | ||
347 | do_permutation(right##0d, left##0d, 2, 0x33333333); \ | ||
348 | do_permutation(right##1d, left##1d, 8, 0x00ff00ff); \ | ||
349 | do_permutation(right##1d, left##1d, 2, 0x33333333); \ | ||
350 | do_permutation(right##2d, left##2d, 8, 0x00ff00ff); \ | ||
351 | do_permutation(right##2d, left##2d, 2, 0x33333333); \ | ||
352 | \ | ||
353 | do_permutation(left##0d, right##0d, 16, 0x0000ffff); \ | ||
354 | do_permutation(left##0d, right##0d, 4, 0x0f0f0f0f); \ | ||
355 | do_permutation(left##1d, right##1d, 16, 0x0000ffff); \ | ||
356 | do_permutation(left##1d, right##1d, 4, 0x0f0f0f0f); \ | ||
357 | do_permutation(left##2d, right##2d, 16, 0x0000ffff); \ | ||
358 | do_permutation(left##2d, right##2d, 4, 0x0f0f0f0f); | ||
359 | |||
360 | #define round3(n, from, to, load_next_key, do_movq) \ | ||
361 | xorq from##0, RW0; \ | ||
362 | movzbl RW0bl, RT3d; \ | ||
363 | movzbl RW0bh, RT1d; \ | ||
364 | shrq $16, RW0; \ | ||
365 | xorq s8(, RT3, 8), to##0; \ | ||
366 | xorq s6(, RT1, 8), to##0; \ | ||
367 | movzbl RW0bl, RT3d; \ | ||
368 | movzbl RW0bh, RT1d; \ | ||
369 | shrq $16, RW0; \ | ||
370 | xorq s4(, RT3, 8), to##0; \ | ||
371 | xorq s2(, RT1, 8), to##0; \ | ||
372 | movzbl RW0bl, RT3d; \ | ||
373 | movzbl RW0bh, RT1d; \ | ||
374 | shrl $16, RW0d; \ | ||
375 | xorq s7(, RT3, 8), to##0; \ | ||
376 | xorq s5(, RT1, 8), to##0; \ | ||
377 | movzbl RW0bl, RT3d; \ | ||
378 | movzbl RW0bh, RT1d; \ | ||
379 | load_next_key(n, RW0); \ | ||
380 | xorq s3(, RT3, 8), to##0; \ | ||
381 | xorq s1(, RT1, 8), to##0; \ | ||
382 | xorq from##1, RW1; \ | ||
383 | movzbl RW1bl, RT3d; \ | ||
384 | movzbl RW1bh, RT1d; \ | ||
385 | shrq $16, RW1; \ | ||
386 | xorq s8(, RT3, 8), to##1; \ | ||
387 | xorq s6(, RT1, 8), to##1; \ | ||
388 | movzbl RW1bl, RT3d; \ | ||
389 | movzbl RW1bh, RT1d; \ | ||
390 | shrq $16, RW1; \ | ||
391 | xorq s4(, RT3, 8), to##1; \ | ||
392 | xorq s2(, RT1, 8), to##1; \ | ||
393 | movzbl RW1bl, RT3d; \ | ||
394 | movzbl RW1bh, RT1d; \ | ||
395 | shrl $16, RW1d; \ | ||
396 | xorq s7(, RT3, 8), to##1; \ | ||
397 | xorq s5(, RT1, 8), to##1; \ | ||
398 | movzbl RW1bl, RT3d; \ | ||
399 | movzbl RW1bh, RT1d; \ | ||
400 | do_movq(RW0, RW1); \ | ||
401 | xorq s3(, RT3, 8), to##1; \ | ||
402 | xorq s1(, RT1, 8), to##1; \ | ||
403 | xorq from##2, RW2; \ | ||
404 | movzbl RW2bl, RT3d; \ | ||
405 | movzbl RW2bh, RT1d; \ | ||
406 | shrq $16, RW2; \ | ||
407 | xorq s8(, RT3, 8), to##2; \ | ||
408 | xorq s6(, RT1, 8), to##2; \ | ||
409 | movzbl RW2bl, RT3d; \ | ||
410 | movzbl RW2bh, RT1d; \ | ||
411 | shrq $16, RW2; \ | ||
412 | xorq s4(, RT3, 8), to##2; \ | ||
413 | xorq s2(, RT1, 8), to##2; \ | ||
414 | movzbl RW2bl, RT3d; \ | ||
415 | movzbl RW2bh, RT1d; \ | ||
416 | shrl $16, RW2d; \ | ||
417 | xorq s7(, RT3, 8), to##2; \ | ||
418 | xorq s5(, RT1, 8), to##2; \ | ||
419 | movzbl RW2bl, RT3d; \ | ||
420 | movzbl RW2bh, RT1d; \ | ||
421 | do_movq(RW0, RW2); \ | ||
422 | xorq s3(, RT3, 8), to##2; \ | ||
423 | xorq s1(, RT1, 8), to##2; | ||
424 | |||
425 | #define __movq(src, dst) \ | ||
426 | movq src, dst; | ||
427 | |||
428 | ENTRY(des3_ede_x86_64_crypt_blk_3way) | ||
429 | /* input: | ||
430 | * %rdi: ctx, round keys | ||
431 | * %rsi: dst (3 blocks) | ||
432 | * %rdx: src (3 blocks) | ||
433 | */ | ||
434 | |||
435 | pushq %rbp; | ||
436 | pushq %rbx; | ||
437 | pushq %r12; | ||
438 | pushq %r13; | ||
439 | pushq %r14; | ||
440 | pushq %r15; | ||
441 | |||
442 | /* load input */ | ||
443 | movl 0 * 4(%rdx), RL0d; | ||
444 | movl 1 * 4(%rdx), RR0d; | ||
445 | movl 2 * 4(%rdx), RL1d; | ||
446 | movl 3 * 4(%rdx), RR1d; | ||
447 | movl 4 * 4(%rdx), RL2d; | ||
448 | movl 5 * 4(%rdx), RR2d; | ||
449 | |||
450 | bswapl RL0d; | ||
451 | bswapl RR0d; | ||
452 | bswapl RL1d; | ||
453 | bswapl RR1d; | ||
454 | bswapl RL2d; | ||
455 | bswapl RR2d; | ||
456 | |||
457 | initial_permutation3(RL, RR); | ||
458 | |||
459 | movq 0(CTX), RW0; | ||
460 | movq RW0, RW1; | ||
461 | movq RW0, RW2; | ||
462 | |||
463 | round3(0, RR, RL, load_next_key, __movq); | ||
464 | round3(1, RL, RR, load_next_key, __movq); | ||
465 | round3(2, RR, RL, load_next_key, __movq); | ||
466 | round3(3, RL, RR, load_next_key, __movq); | ||
467 | round3(4, RR, RL, load_next_key, __movq); | ||
468 | round3(5, RL, RR, load_next_key, __movq); | ||
469 | round3(6, RR, RL, load_next_key, __movq); | ||
470 | round3(7, RL, RR, load_next_key, __movq); | ||
471 | round3(8, RR, RL, load_next_key, __movq); | ||
472 | round3(9, RL, RR, load_next_key, __movq); | ||
473 | round3(10, RR, RL, load_next_key, __movq); | ||
474 | round3(11, RL, RR, load_next_key, __movq); | ||
475 | round3(12, RR, RL, load_next_key, __movq); | ||
476 | round3(13, RL, RR, load_next_key, __movq); | ||
477 | round3(14, RR, RL, load_next_key, __movq); | ||
478 | round3(15, RL, RR, load_next_key, __movq); | ||
479 | |||
480 | round3(16+0, RL, RR, load_next_key, __movq); | ||
481 | round3(16+1, RR, RL, load_next_key, __movq); | ||
482 | round3(16+2, RL, RR, load_next_key, __movq); | ||
483 | round3(16+3, RR, RL, load_next_key, __movq); | ||
484 | round3(16+4, RL, RR, load_next_key, __movq); | ||
485 | round3(16+5, RR, RL, load_next_key, __movq); | ||
486 | round3(16+6, RL, RR, load_next_key, __movq); | ||
487 | round3(16+7, RR, RL, load_next_key, __movq); | ||
488 | round3(16+8, RL, RR, load_next_key, __movq); | ||
489 | round3(16+9, RR, RL, load_next_key, __movq); | ||
490 | round3(16+10, RL, RR, load_next_key, __movq); | ||
491 | round3(16+11, RR, RL, load_next_key, __movq); | ||
492 | round3(16+12, RL, RR, load_next_key, __movq); | ||
493 | round3(16+13, RR, RL, load_next_key, __movq); | ||
494 | round3(16+14, RL, RR, load_next_key, __movq); | ||
495 | round3(16+15, RR, RL, load_next_key, __movq); | ||
496 | |||
497 | round3(32+0, RR, RL, load_next_key, __movq); | ||
498 | round3(32+1, RL, RR, load_next_key, __movq); | ||
499 | round3(32+2, RR, RL, load_next_key, __movq); | ||
500 | round3(32+3, RL, RR, load_next_key, __movq); | ||
501 | round3(32+4, RR, RL, load_next_key, __movq); | ||
502 | round3(32+5, RL, RR, load_next_key, __movq); | ||
503 | round3(32+6, RR, RL, load_next_key, __movq); | ||
504 | round3(32+7, RL, RR, load_next_key, __movq); | ||
505 | round3(32+8, RR, RL, load_next_key, __movq); | ||
506 | round3(32+9, RL, RR, load_next_key, __movq); | ||
507 | round3(32+10, RR, RL, load_next_key, __movq); | ||
508 | round3(32+11, RL, RR, load_next_key, __movq); | ||
509 | round3(32+12, RR, RL, load_next_key, __movq); | ||
510 | round3(32+13, RL, RR, load_next_key, __movq); | ||
511 | round3(32+14, RR, RL, load_next_key, __movq); | ||
512 | round3(32+15, RL, RR, dummy2, dummy2); | ||
513 | |||
514 | final_permutation3(RR, RL); | ||
515 | |||
516 | bswapl RR0d; | ||
517 | bswapl RL0d; | ||
518 | bswapl RR1d; | ||
519 | bswapl RL1d; | ||
520 | bswapl RR2d; | ||
521 | bswapl RL2d; | ||
522 | |||
523 | movl RR0d, 0 * 4(%rsi); | ||
524 | movl RL0d, 1 * 4(%rsi); | ||
525 | movl RR1d, 2 * 4(%rsi); | ||
526 | movl RL1d, 3 * 4(%rsi); | ||
527 | movl RR2d, 4 * 4(%rsi); | ||
528 | movl RL2d, 5 * 4(%rsi); | ||
529 | |||
530 | popq %r15; | ||
531 | popq %r14; | ||
532 | popq %r13; | ||
533 | popq %r12; | ||
534 | popq %rbx; | ||
535 | popq %rbp; | ||
536 | |||
537 | ret; | ||
538 | ENDPROC(des3_ede_x86_64_crypt_blk_3way) | ||
539 | |||
540 | .data | ||
541 | .align 16 | ||
542 | .L_s1: | ||
543 | .quad 0x0010100001010400, 0x0000000000000000 | ||
544 | .quad 0x0000100000010000, 0x0010100001010404 | ||
545 | .quad 0x0010100001010004, 0x0000100000010404 | ||
546 | .quad 0x0000000000000004, 0x0000100000010000 | ||
547 | .quad 0x0000000000000400, 0x0010100001010400 | ||
548 | .quad 0x0010100001010404, 0x0000000000000400 | ||
549 | .quad 0x0010000001000404, 0x0010100001010004 | ||
550 | .quad 0x0010000001000000, 0x0000000000000004 | ||
551 | .quad 0x0000000000000404, 0x0010000001000400 | ||
552 | .quad 0x0010000001000400, 0x0000100000010400 | ||
553 | .quad 0x0000100000010400, 0x0010100001010000 | ||
554 | .quad 0x0010100001010000, 0x0010000001000404 | ||
555 | .quad 0x0000100000010004, 0x0010000001000004 | ||
556 | .quad 0x0010000001000004, 0x0000100000010004 | ||
557 | .quad 0x0000000000000000, 0x0000000000000404 | ||
558 | .quad 0x0000100000010404, 0x0010000001000000 | ||
559 | .quad 0x0000100000010000, 0x0010100001010404 | ||
560 | .quad 0x0000000000000004, 0x0010100001010000 | ||
561 | .quad 0x0010100001010400, 0x0010000001000000 | ||
562 | .quad 0x0010000001000000, 0x0000000000000400 | ||
563 | .quad 0x0010100001010004, 0x0000100000010000 | ||
564 | .quad 0x0000100000010400, 0x0010000001000004 | ||
565 | .quad 0x0000000000000400, 0x0000000000000004 | ||
566 | .quad 0x0010000001000404, 0x0000100000010404 | ||
567 | .quad 0x0010100001010404, 0x0000100000010004 | ||
568 | .quad 0x0010100001010000, 0x0010000001000404 | ||
569 | .quad 0x0010000001000004, 0x0000000000000404 | ||
570 | .quad 0x0000100000010404, 0x0010100001010400 | ||
571 | .quad 0x0000000000000404, 0x0010000001000400 | ||
572 | .quad 0x0010000001000400, 0x0000000000000000 | ||
573 | .quad 0x0000100000010004, 0x0000100000010400 | ||
574 | .quad 0x0000000000000000, 0x0010100001010004 | ||
575 | .L_s2: | ||
576 | .quad 0x0801080200100020, 0x0800080000000000 | ||
577 | .quad 0x0000080000000000, 0x0001080200100020 | ||
578 | .quad 0x0001000000100000, 0x0000000200000020 | ||
579 | .quad 0x0801000200100020, 0x0800080200000020 | ||
580 | .quad 0x0800000200000020, 0x0801080200100020 | ||
581 | .quad 0x0801080000100000, 0x0800000000000000 | ||
582 | .quad 0x0800080000000000, 0x0001000000100000 | ||
583 | .quad 0x0000000200000020, 0x0801000200100020 | ||
584 | .quad 0x0001080000100000, 0x0001000200100020 | ||
585 | .quad 0x0800080200000020, 0x0000000000000000 | ||
586 | .quad 0x0800000000000000, 0x0000080000000000 | ||
587 | .quad 0x0001080200100020, 0x0801000000100000 | ||
588 | .quad 0x0001000200100020, 0x0800000200000020 | ||
589 | .quad 0x0000000000000000, 0x0001080000100000 | ||
590 | .quad 0x0000080200000020, 0x0801080000100000 | ||
591 | .quad 0x0801000000100000, 0x0000080200000020 | ||
592 | .quad 0x0000000000000000, 0x0001080200100020 | ||
593 | .quad 0x0801000200100020, 0x0001000000100000 | ||
594 | .quad 0x0800080200000020, 0x0801000000100000 | ||
595 | .quad 0x0801080000100000, 0x0000080000000000 | ||
596 | .quad 0x0801000000100000, 0x0800080000000000 | ||
597 | .quad 0x0000000200000020, 0x0801080200100020 | ||
598 | .quad 0x0001080200100020, 0x0000000200000020 | ||
599 | .quad 0x0000080000000000, 0x0800000000000000 | ||
600 | .quad 0x0000080200000020, 0x0801080000100000 | ||
601 | .quad 0x0001000000100000, 0x0800000200000020 | ||
602 | .quad 0x0001000200100020, 0x0800080200000020 | ||
603 | .quad 0x0800000200000020, 0x0001000200100020 | ||
604 | .quad 0x0001080000100000, 0x0000000000000000 | ||
605 | .quad 0x0800080000000000, 0x0000080200000020 | ||
606 | .quad 0x0800000000000000, 0x0801000200100020 | ||
607 | .quad 0x0801080200100020, 0x0001080000100000 | ||
608 | .L_s3: | ||
609 | .quad 0x0000002000000208, 0x0000202008020200 | ||
610 | .quad 0x0000000000000000, 0x0000200008020008 | ||
611 | .quad 0x0000002008000200, 0x0000000000000000 | ||
612 | .quad 0x0000202000020208, 0x0000002008000200 | ||
613 | .quad 0x0000200000020008, 0x0000000008000008 | ||
614 | .quad 0x0000000008000008, 0x0000200000020000 | ||
615 | .quad 0x0000202008020208, 0x0000200000020008 | ||
616 | .quad 0x0000200008020000, 0x0000002000000208 | ||
617 | .quad 0x0000000008000000, 0x0000000000000008 | ||
618 | .quad 0x0000202008020200, 0x0000002000000200 | ||
619 | .quad 0x0000202000020200, 0x0000200008020000 | ||
620 | .quad 0x0000200008020008, 0x0000202000020208 | ||
621 | .quad 0x0000002008000208, 0x0000202000020200 | ||
622 | .quad 0x0000200000020000, 0x0000002008000208 | ||
623 | .quad 0x0000000000000008, 0x0000202008020208 | ||
624 | .quad 0x0000002000000200, 0x0000000008000000 | ||
625 | .quad 0x0000202008020200, 0x0000000008000000 | ||
626 | .quad 0x0000200000020008, 0x0000002000000208 | ||
627 | .quad 0x0000200000020000, 0x0000202008020200 | ||
628 | .quad 0x0000002008000200, 0x0000000000000000 | ||
629 | .quad 0x0000002000000200, 0x0000200000020008 | ||
630 | .quad 0x0000202008020208, 0x0000002008000200 | ||
631 | .quad 0x0000000008000008, 0x0000002000000200 | ||
632 | .quad 0x0000000000000000, 0x0000200008020008 | ||
633 | .quad 0x0000002008000208, 0x0000200000020000 | ||
634 | .quad 0x0000000008000000, 0x0000202008020208 | ||
635 | .quad 0x0000000000000008, 0x0000202000020208 | ||
636 | .quad 0x0000202000020200, 0x0000000008000008 | ||
637 | .quad 0x0000200008020000, 0x0000002008000208 | ||
638 | .quad 0x0000002000000208, 0x0000200008020000 | ||
639 | .quad 0x0000202000020208, 0x0000000000000008 | ||
640 | .quad 0x0000200008020008, 0x0000202000020200 | ||
641 | .L_s4: | ||
642 | .quad 0x1008020000002001, 0x1000020800002001 | ||
643 | .quad 0x1000020800002001, 0x0000000800000000 | ||
644 | .quad 0x0008020800002000, 0x1008000800000001 | ||
645 | .quad 0x1008000000000001, 0x1000020000002001 | ||
646 | .quad 0x0000000000000000, 0x0008020000002000 | ||
647 | .quad 0x0008020000002000, 0x1008020800002001 | ||
648 | .quad 0x1000000800000001, 0x0000000000000000 | ||
649 | .quad 0x0008000800000000, 0x1008000000000001 | ||
650 | .quad 0x1000000000000001, 0x0000020000002000 | ||
651 | .quad 0x0008000000000000, 0x1008020000002001 | ||
652 | .quad 0x0000000800000000, 0x0008000000000000 | ||
653 | .quad 0x1000020000002001, 0x0000020800002000 | ||
654 | .quad 0x1008000800000001, 0x1000000000000001 | ||
655 | .quad 0x0000020800002000, 0x0008000800000000 | ||
656 | .quad 0x0000020000002000, 0x0008020800002000 | ||
657 | .quad 0x1008020800002001, 0x1000000800000001 | ||
658 | .quad 0x0008000800000000, 0x1008000000000001 | ||
659 | .quad 0x0008020000002000, 0x1008020800002001 | ||
660 | .quad 0x1000000800000001, 0x0000000000000000 | ||
661 | .quad 0x0000000000000000, 0x0008020000002000 | ||
662 | .quad 0x0000020800002000, 0x0008000800000000 | ||
663 | .quad 0x1008000800000001, 0x1000000000000001 | ||
664 | .quad 0x1008020000002001, 0x1000020800002001 | ||
665 | .quad 0x1000020800002001, 0x0000000800000000 | ||
666 | .quad 0x1008020800002001, 0x1000000800000001 | ||
667 | .quad 0x1000000000000001, 0x0000020000002000 | ||
668 | .quad 0x1008000000000001, 0x1000020000002001 | ||
669 | .quad 0x0008020800002000, 0x1008000800000001 | ||
670 | .quad 0x1000020000002001, 0x0000020800002000 | ||
671 | .quad 0x0008000000000000, 0x1008020000002001 | ||
672 | .quad 0x0000000800000000, 0x0008000000000000 | ||
673 | .quad 0x0000020000002000, 0x0008020800002000 | ||
674 | .L_s5: | ||
675 | .quad 0x0000001000000100, 0x0020001002080100 | ||
676 | .quad 0x0020000002080000, 0x0420001002000100 | ||
677 | .quad 0x0000000000080000, 0x0000001000000100 | ||
678 | .quad 0x0400000000000000, 0x0020000002080000 | ||
679 | .quad 0x0400001000080100, 0x0000000000080000 | ||
680 | .quad 0x0020001002000100, 0x0400001000080100 | ||
681 | .quad 0x0420001002000100, 0x0420000002080000 | ||
682 | .quad 0x0000001000080100, 0x0400000000000000 | ||
683 | .quad 0x0020000002000000, 0x0400000000080000 | ||
684 | .quad 0x0400000000080000, 0x0000000000000000 | ||
685 | .quad 0x0400001000000100, 0x0420001002080100 | ||
686 | .quad 0x0420001002080100, 0x0020001002000100 | ||
687 | .quad 0x0420000002080000, 0x0400001000000100 | ||
688 | .quad 0x0000000000000000, 0x0420000002000000 | ||
689 | .quad 0x0020001002080100, 0x0020000002000000 | ||
690 | .quad 0x0420000002000000, 0x0000001000080100 | ||
691 | .quad 0x0000000000080000, 0x0420001002000100 | ||
692 | .quad 0x0000001000000100, 0x0020000002000000 | ||
693 | .quad 0x0400000000000000, 0x0020000002080000 | ||
694 | .quad 0x0420001002000100, 0x0400001000080100 | ||
695 | .quad 0x0020001002000100, 0x0400000000000000 | ||
696 | .quad 0x0420000002080000, 0x0020001002080100 | ||
697 | .quad 0x0400001000080100, 0x0000001000000100 | ||
698 | .quad 0x0020000002000000, 0x0420000002080000 | ||
699 | .quad 0x0420001002080100, 0x0000001000080100 | ||
700 | .quad 0x0420000002000000, 0x0420001002080100 | ||
701 | .quad 0x0020000002080000, 0x0000000000000000 | ||
702 | .quad 0x0400000000080000, 0x0420000002000000 | ||
703 | .quad 0x0000001000080100, 0x0020001002000100 | ||
704 | .quad 0x0400001000000100, 0x0000000000080000 | ||
705 | .quad 0x0000000000000000, 0x0400000000080000 | ||
706 | .quad 0x0020001002080100, 0x0400001000000100 | ||
707 | .L_s6: | ||
708 | .quad 0x0200000120000010, 0x0204000020000000 | ||
709 | .quad 0x0000040000000000, 0x0204040120000010 | ||
710 | .quad 0x0204000020000000, 0x0000000100000010 | ||
711 | .quad 0x0204040120000010, 0x0004000000000000 | ||
712 | .quad 0x0200040020000000, 0x0004040100000010 | ||
713 | .quad 0x0004000000000000, 0x0200000120000010 | ||
714 | .quad 0x0004000100000010, 0x0200040020000000 | ||
715 | .quad 0x0200000020000000, 0x0000040100000010 | ||
716 | .quad 0x0000000000000000, 0x0004000100000010 | ||
717 | .quad 0x0200040120000010, 0x0000040000000000 | ||
718 | .quad 0x0004040000000000, 0x0200040120000010 | ||
719 | .quad 0x0000000100000010, 0x0204000120000010 | ||
720 | .quad 0x0204000120000010, 0x0000000000000000 | ||
721 | .quad 0x0004040100000010, 0x0204040020000000 | ||
722 | .quad 0x0000040100000010, 0x0004040000000000 | ||
723 | .quad 0x0204040020000000, 0x0200000020000000 | ||
724 | .quad 0x0200040020000000, 0x0000000100000010 | ||
725 | .quad 0x0204000120000010, 0x0004040000000000 | ||
726 | .quad 0x0204040120000010, 0x0004000000000000 | ||
727 | .quad 0x0000040100000010, 0x0200000120000010 | ||
728 | .quad 0x0004000000000000, 0x0200040020000000 | ||
729 | .quad 0x0200000020000000, 0x0000040100000010 | ||
730 | .quad 0x0200000120000010, 0x0204040120000010 | ||
731 | .quad 0x0004040000000000, 0x0204000020000000 | ||
732 | .quad 0x0004040100000010, 0x0204040020000000 | ||
733 | .quad 0x0000000000000000, 0x0204000120000010 | ||
734 | .quad 0x0000000100000010, 0x0000040000000000 | ||
735 | .quad 0x0204000020000000, 0x0004040100000010 | ||
736 | .quad 0x0000040000000000, 0x0004000100000010 | ||
737 | .quad 0x0200040120000010, 0x0000000000000000 | ||
738 | .quad 0x0204040020000000, 0x0200000020000000 | ||
739 | .quad 0x0004000100000010, 0x0200040120000010 | ||
740 | .L_s7: | ||
741 | .quad 0x0002000000200000, 0x2002000004200002 | ||
742 | .quad 0x2000000004000802, 0x0000000000000000 | ||
743 | .quad 0x0000000000000800, 0x2000000004000802 | ||
744 | .quad 0x2002000000200802, 0x0002000004200800 | ||
745 | .quad 0x2002000004200802, 0x0002000000200000 | ||
746 | .quad 0x0000000000000000, 0x2000000004000002 | ||
747 | .quad 0x2000000000000002, 0x0000000004000000 | ||
748 | .quad 0x2002000004200002, 0x2000000000000802 | ||
749 | .quad 0x0000000004000800, 0x2002000000200802 | ||
750 | .quad 0x2002000000200002, 0x0000000004000800 | ||
751 | .quad 0x2000000004000002, 0x0002000004200000 | ||
752 | .quad 0x0002000004200800, 0x2002000000200002 | ||
753 | .quad 0x0002000004200000, 0x0000000000000800 | ||
754 | .quad 0x2000000000000802, 0x2002000004200802 | ||
755 | .quad 0x0002000000200800, 0x2000000000000002 | ||
756 | .quad 0x0000000004000000, 0x0002000000200800 | ||
757 | .quad 0x0000000004000000, 0x0002000000200800 | ||
758 | .quad 0x0002000000200000, 0x2000000004000802 | ||
759 | .quad 0x2000000004000802, 0x2002000004200002 | ||
760 | .quad 0x2002000004200002, 0x2000000000000002 | ||
761 | .quad 0x2002000000200002, 0x0000000004000000 | ||
762 | .quad 0x0000000004000800, 0x0002000000200000 | ||
763 | .quad 0x0002000004200800, 0x2000000000000802 | ||
764 | .quad 0x2002000000200802, 0x0002000004200800 | ||
765 | .quad 0x2000000000000802, 0x2000000004000002 | ||
766 | .quad 0x2002000004200802, 0x0002000004200000 | ||
767 | .quad 0x0002000000200800, 0x0000000000000000 | ||
768 | .quad 0x2000000000000002, 0x2002000004200802 | ||
769 | .quad 0x0000000000000000, 0x2002000000200802 | ||
770 | .quad 0x0002000004200000, 0x0000000000000800 | ||
771 | .quad 0x2000000004000002, 0x0000000004000800 | ||
772 | .quad 0x0000000000000800, 0x2002000000200002 | ||
773 | .L_s8: | ||
774 | .quad 0x0100010410001000, 0x0000010000001000 | ||
775 | .quad 0x0000000000040000, 0x0100010410041000 | ||
776 | .quad 0x0100000010000000, 0x0100010410001000 | ||
777 | .quad 0x0000000400000000, 0x0100000010000000 | ||
778 | .quad 0x0000000400040000, 0x0100000010040000 | ||
779 | .quad 0x0100010410041000, 0x0000010000041000 | ||
780 | .quad 0x0100010010041000, 0x0000010400041000 | ||
781 | .quad 0x0000010000001000, 0x0000000400000000 | ||
782 | .quad 0x0100000010040000, 0x0100000410000000 | ||
783 | .quad 0x0100010010001000, 0x0000010400001000 | ||
784 | .quad 0x0000010000041000, 0x0000000400040000 | ||
785 | .quad 0x0100000410040000, 0x0100010010041000 | ||
786 | .quad 0x0000010400001000, 0x0000000000000000 | ||
787 | .quad 0x0000000000000000, 0x0100000410040000 | ||
788 | .quad 0x0100000410000000, 0x0100010010001000 | ||
789 | .quad 0x0000010400041000, 0x0000000000040000 | ||
790 | .quad 0x0000010400041000, 0x0000000000040000 | ||
791 | .quad 0x0100010010041000, 0x0000010000001000 | ||
792 | .quad 0x0000000400000000, 0x0100000410040000 | ||
793 | .quad 0x0000010000001000, 0x0000010400041000 | ||
794 | .quad 0x0100010010001000, 0x0000000400000000 | ||
795 | .quad 0x0100000410000000, 0x0100000010040000 | ||
796 | .quad 0x0100000410040000, 0x0100000010000000 | ||
797 | .quad 0x0000000000040000, 0x0100010410001000 | ||
798 | .quad 0x0000000000000000, 0x0100010410041000 | ||
799 | .quad 0x0000000400040000, 0x0100000410000000 | ||
800 | .quad 0x0100000010040000, 0x0100010010001000 | ||
801 | .quad 0x0100010410001000, 0x0000000000000000 | ||
802 | .quad 0x0100010410041000, 0x0000010000041000 | ||
803 | .quad 0x0000010000041000, 0x0000010400001000 | ||
804 | .quad 0x0000010400001000, 0x0000000400040000 | ||
805 | .quad 0x0100000010000000, 0x0100010010041000 | ||
diff --git a/arch/x86/crypto/des3_ede_glue.c b/arch/x86/crypto/des3_ede_glue.c new file mode 100644 index 000000000000..0e9c0668fe4e --- /dev/null +++ b/arch/x86/crypto/des3_ede_glue.c | |||
@@ -0,0 +1,509 @@ | |||
1 | /* | ||
2 | * Glue Code for assembler optimized version of 3DES | ||
3 | * | ||
4 | * Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> | ||
5 | * | ||
6 | * CBC & ECB parts based on code (crypto/cbc.c,ecb.c) by: | ||
7 | * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> | ||
8 | * CTR part based on code (crypto/ctr.c) by: | ||
9 | * (C) Copyright IBM Corp. 2007 - Joy Latten <latten@us.ibm.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <asm/processor.h> | ||
24 | #include <crypto/des.h> | ||
25 | #include <linux/crypto.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/types.h> | ||
29 | #include <crypto/algapi.h> | ||
30 | |||
31 | struct des3_ede_x86_ctx { | ||
32 | u32 enc_expkey[DES3_EDE_EXPKEY_WORDS]; | ||
33 | u32 dec_expkey[DES3_EDE_EXPKEY_WORDS]; | ||
34 | }; | ||
35 | |||
36 | /* regular block cipher functions */ | ||
37 | asmlinkage void des3_ede_x86_64_crypt_blk(const u32 *expkey, u8 *dst, | ||
38 | const u8 *src); | ||
39 | |||
40 | /* 3-way parallel cipher functions */ | ||
41 | asmlinkage void des3_ede_x86_64_crypt_blk_3way(const u32 *expkey, u8 *dst, | ||
42 | const u8 *src); | ||
43 | |||
44 | static inline void des3_ede_enc_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
45 | const u8 *src) | ||
46 | { | ||
47 | u32 *enc_ctx = ctx->enc_expkey; | ||
48 | |||
49 | des3_ede_x86_64_crypt_blk(enc_ctx, dst, src); | ||
50 | } | ||
51 | |||
52 | static inline void des3_ede_dec_blk(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
53 | const u8 *src) | ||
54 | { | ||
55 | u32 *dec_ctx = ctx->dec_expkey; | ||
56 | |||
57 | des3_ede_x86_64_crypt_blk(dec_ctx, dst, src); | ||
58 | } | ||
59 | |||
60 | static inline void des3_ede_enc_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
61 | const u8 *src) | ||
62 | { | ||
63 | u32 *enc_ctx = ctx->enc_expkey; | ||
64 | |||
65 | des3_ede_x86_64_crypt_blk_3way(enc_ctx, dst, src); | ||
66 | } | ||
67 | |||
68 | static inline void des3_ede_dec_blk_3way(struct des3_ede_x86_ctx *ctx, u8 *dst, | ||
69 | const u8 *src) | ||
70 | { | ||
71 | u32 *dec_ctx = ctx->dec_expkey; | ||
72 | |||
73 | des3_ede_x86_64_crypt_blk_3way(dec_ctx, dst, src); | ||
74 | } | ||
75 | |||
76 | static void des3_ede_x86_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
77 | { | ||
78 | des3_ede_enc_blk(crypto_tfm_ctx(tfm), dst, src); | ||
79 | } | ||
80 | |||
81 | static void des3_ede_x86_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) | ||
82 | { | ||
83 | des3_ede_dec_blk(crypto_tfm_ctx(tfm), dst, src); | ||
84 | } | ||
85 | |||
86 | static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, | ||
87 | const u32 *expkey) | ||
88 | { | ||
89 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
90 | unsigned int nbytes; | ||
91 | int err; | ||
92 | |||
93 | err = blkcipher_walk_virt(desc, walk); | ||
94 | |||
95 | while ((nbytes = walk->nbytes)) { | ||
96 | u8 *wsrc = walk->src.virt.addr; | ||
97 | u8 *wdst = walk->dst.virt.addr; | ||
98 | |||
99 | /* Process four block batch */ | ||
100 | if (nbytes >= bsize * 3) { | ||
101 | do { | ||
102 | des3_ede_x86_64_crypt_blk_3way(expkey, wdst, | ||
103 | wsrc); | ||
104 | |||
105 | wsrc += bsize * 3; | ||
106 | wdst += bsize * 3; | ||
107 | nbytes -= bsize * 3; | ||
108 | } while (nbytes >= bsize * 3); | ||
109 | |||
110 | if (nbytes < bsize) | ||
111 | goto done; | ||
112 | } | ||
113 | |||
114 | /* Handle leftovers */ | ||
115 | do { | ||
116 | des3_ede_x86_64_crypt_blk(expkey, wdst, wsrc); | ||
117 | |||
118 | wsrc += bsize; | ||
119 | wdst += bsize; | ||
120 | nbytes -= bsize; | ||
121 | } while (nbytes >= bsize); | ||
122 | |||
123 | done: | ||
124 | err = blkcipher_walk_done(desc, walk, nbytes); | ||
125 | } | ||
126 | |||
127 | return err; | ||
128 | } | ||
129 | |||
130 | static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
131 | struct scatterlist *src, unsigned int nbytes) | ||
132 | { | ||
133 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
134 | struct blkcipher_walk walk; | ||
135 | |||
136 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
137 | return ecb_crypt(desc, &walk, ctx->enc_expkey); | ||
138 | } | ||
139 | |||
140 | static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
141 | struct scatterlist *src, unsigned int nbytes) | ||
142 | { | ||
143 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
144 | struct blkcipher_walk walk; | ||
145 | |||
146 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
147 | return ecb_crypt(desc, &walk, ctx->dec_expkey); | ||
148 | } | ||
149 | |||
150 | static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, | ||
151 | struct blkcipher_walk *walk) | ||
152 | { | ||
153 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
154 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
155 | unsigned int nbytes = walk->nbytes; | ||
156 | u64 *src = (u64 *)walk->src.virt.addr; | ||
157 | u64 *dst = (u64 *)walk->dst.virt.addr; | ||
158 | u64 *iv = (u64 *)walk->iv; | ||
159 | |||
160 | do { | ||
161 | *dst = *src ^ *iv; | ||
162 | des3_ede_enc_blk(ctx, (u8 *)dst, (u8 *)dst); | ||
163 | iv = dst; | ||
164 | |||
165 | src += 1; | ||
166 | dst += 1; | ||
167 | nbytes -= bsize; | ||
168 | } while (nbytes >= bsize); | ||
169 | |||
170 | *(u64 *)walk->iv = *iv; | ||
171 | return nbytes; | ||
172 | } | ||
173 | |||
174 | static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
175 | struct scatterlist *src, unsigned int nbytes) | ||
176 | { | ||
177 | struct blkcipher_walk walk; | ||
178 | int err; | ||
179 | |||
180 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
181 | err = blkcipher_walk_virt(desc, &walk); | ||
182 | |||
183 | while ((nbytes = walk.nbytes)) { | ||
184 | nbytes = __cbc_encrypt(desc, &walk); | ||
185 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
186 | } | ||
187 | |||
188 | return err; | ||
189 | } | ||
190 | |||
191 | static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, | ||
192 | struct blkcipher_walk *walk) | ||
193 | { | ||
194 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
195 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
196 | unsigned int nbytes = walk->nbytes; | ||
197 | u64 *src = (u64 *)walk->src.virt.addr; | ||
198 | u64 *dst = (u64 *)walk->dst.virt.addr; | ||
199 | u64 ivs[3 - 1]; | ||
200 | u64 last_iv; | ||
201 | |||
202 | /* Start of the last block. */ | ||
203 | src += nbytes / bsize - 1; | ||
204 | dst += nbytes / bsize - 1; | ||
205 | |||
206 | last_iv = *src; | ||
207 | |||
208 | /* Process four block batch */ | ||
209 | if (nbytes >= bsize * 3) { | ||
210 | do { | ||
211 | nbytes -= bsize * 3 - bsize; | ||
212 | src -= 3 - 1; | ||
213 | dst -= 3 - 1; | ||
214 | |||
215 | ivs[0] = src[0]; | ||
216 | ivs[1] = src[1]; | ||
217 | |||
218 | des3_ede_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); | ||
219 | |||
220 | dst[1] ^= ivs[0]; | ||
221 | dst[2] ^= ivs[1]; | ||
222 | |||
223 | nbytes -= bsize; | ||
224 | if (nbytes < bsize) | ||
225 | goto done; | ||
226 | |||
227 | *dst ^= *(src - 1); | ||
228 | src -= 1; | ||
229 | dst -= 1; | ||
230 | } while (nbytes >= bsize * 3); | ||
231 | } | ||
232 | |||
233 | /* Handle leftovers */ | ||
234 | for (;;) { | ||
235 | des3_ede_dec_blk(ctx, (u8 *)dst, (u8 *)src); | ||
236 | |||
237 | nbytes -= bsize; | ||
238 | if (nbytes < bsize) | ||
239 | break; | ||
240 | |||
241 | *dst ^= *(src - 1); | ||
242 | src -= 1; | ||
243 | dst -= 1; | ||
244 | } | ||
245 | |||
246 | done: | ||
247 | *dst ^= *(u64 *)walk->iv; | ||
248 | *(u64 *)walk->iv = last_iv; | ||
249 | |||
250 | return nbytes; | ||
251 | } | ||
252 | |||
253 | static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
254 | struct scatterlist *src, unsigned int nbytes) | ||
255 | { | ||
256 | struct blkcipher_walk walk; | ||
257 | int err; | ||
258 | |||
259 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
260 | err = blkcipher_walk_virt(desc, &walk); | ||
261 | |||
262 | while ((nbytes = walk.nbytes)) { | ||
263 | nbytes = __cbc_decrypt(desc, &walk); | ||
264 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
265 | } | ||
266 | |||
267 | return err; | ||
268 | } | ||
269 | |||
270 | static void ctr_crypt_final(struct des3_ede_x86_ctx *ctx, | ||
271 | struct blkcipher_walk *walk) | ||
272 | { | ||
273 | u8 *ctrblk = walk->iv; | ||
274 | u8 keystream[DES3_EDE_BLOCK_SIZE]; | ||
275 | u8 *src = walk->src.virt.addr; | ||
276 | u8 *dst = walk->dst.virt.addr; | ||
277 | unsigned int nbytes = walk->nbytes; | ||
278 | |||
279 | des3_ede_enc_blk(ctx, keystream, ctrblk); | ||
280 | crypto_xor(keystream, src, nbytes); | ||
281 | memcpy(dst, keystream, nbytes); | ||
282 | |||
283 | crypto_inc(ctrblk, DES3_EDE_BLOCK_SIZE); | ||
284 | } | ||
285 | |||
286 | static unsigned int __ctr_crypt(struct blkcipher_desc *desc, | ||
287 | struct blkcipher_walk *walk) | ||
288 | { | ||
289 | struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); | ||
290 | unsigned int bsize = DES3_EDE_BLOCK_SIZE; | ||
291 | unsigned int nbytes = walk->nbytes; | ||
292 | __be64 *src = (__be64 *)walk->src.virt.addr; | ||
293 | __be64 *dst = (__be64 *)walk->dst.virt.addr; | ||
294 | u64 ctrblk = be64_to_cpu(*(__be64 *)walk->iv); | ||
295 | __be64 ctrblocks[3]; | ||
296 | |||
297 | /* Process four block batch */ | ||
298 | if (nbytes >= bsize * 3) { | ||
299 | do { | ||
300 | /* create ctrblks for parallel encrypt */ | ||
301 | ctrblocks[0] = cpu_to_be64(ctrblk++); | ||
302 | ctrblocks[1] = cpu_to_be64(ctrblk++); | ||
303 | ctrblocks[2] = cpu_to_be64(ctrblk++); | ||
304 | |||
305 | des3_ede_enc_blk_3way(ctx, (u8 *)ctrblocks, | ||
306 | (u8 *)ctrblocks); | ||
307 | |||
308 | dst[0] = src[0] ^ ctrblocks[0]; | ||
309 | dst[1] = src[1] ^ ctrblocks[1]; | ||
310 | dst[2] = src[2] ^ ctrblocks[2]; | ||
311 | |||
312 | src += 3; | ||
313 | dst += 3; | ||
314 | } while ((nbytes -= bsize * 3) >= bsize * 3); | ||
315 | |||
316 | if (nbytes < bsize) | ||
317 | goto done; | ||
318 | } | ||
319 | |||
320 | /* Handle leftovers */ | ||
321 | do { | ||
322 | ctrblocks[0] = cpu_to_be64(ctrblk++); | ||
323 | |||
324 | des3_ede_enc_blk(ctx, (u8 *)ctrblocks, (u8 *)ctrblocks); | ||
325 | |||
326 | dst[0] = src[0] ^ ctrblocks[0]; | ||
327 | |||
328 | src += 1; | ||
329 | dst += 1; | ||
330 | } while ((nbytes -= bsize) >= bsize); | ||
331 | |||
332 | done: | ||
333 | *(__be64 *)walk->iv = cpu_to_be64(ctrblk); | ||
334 | return nbytes; | ||
335 | } | ||
336 | |||
337 | static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, | ||
338 | struct scatterlist *src, unsigned int nbytes) | ||
339 | { | ||
340 | struct blkcipher_walk walk; | ||
341 | int err; | ||
342 | |||
343 | blkcipher_walk_init(&walk, dst, src, nbytes); | ||
344 | err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); | ||
345 | |||
346 | while ((nbytes = walk.nbytes) >= DES3_EDE_BLOCK_SIZE) { | ||
347 | nbytes = __ctr_crypt(desc, &walk); | ||
348 | err = blkcipher_walk_done(desc, &walk, nbytes); | ||
349 | } | ||
350 | |||
351 | if (walk.nbytes) { | ||
352 | ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); | ||
353 | err = blkcipher_walk_done(desc, &walk, 0); | ||
354 | } | ||
355 | |||
356 | return err; | ||
357 | } | ||
358 | |||
359 | static int des3_ede_x86_setkey(struct crypto_tfm *tfm, const u8 *key, | ||
360 | unsigned int keylen) | ||
361 | { | ||
362 | struct des3_ede_x86_ctx *ctx = crypto_tfm_ctx(tfm); | ||
363 | u32 i, j, tmp; | ||
364 | int err; | ||
365 | |||
366 | /* Generate encryption context using generic implementation. */ | ||
367 | err = __des3_ede_setkey(ctx->enc_expkey, &tfm->crt_flags, key, keylen); | ||
368 | if (err < 0) | ||
369 | return err; | ||
370 | |||
371 | /* Fix encryption context for this implementation and form decryption | ||
372 | * context. */ | ||
373 | j = DES3_EDE_EXPKEY_WORDS - 2; | ||
374 | for (i = 0; i < DES3_EDE_EXPKEY_WORDS; i += 2, j -= 2) { | ||
375 | tmp = ror32(ctx->enc_expkey[i + 1], 4); | ||
376 | ctx->enc_expkey[i + 1] = tmp; | ||
377 | |||
378 | ctx->dec_expkey[j + 0] = ctx->enc_expkey[i + 0]; | ||
379 | ctx->dec_expkey[j + 1] = tmp; | ||
380 | } | ||
381 | |||
382 | return 0; | ||
383 | } | ||
384 | |||
385 | static struct crypto_alg des3_ede_algs[4] = { { | ||
386 | .cra_name = "des3_ede", | ||
387 | .cra_driver_name = "des3_ede-asm", | ||
388 | .cra_priority = 200, | ||
389 | .cra_flags = CRYPTO_ALG_TYPE_CIPHER, | ||
390 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
391 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
392 | .cra_alignmask = 0, | ||
393 | .cra_module = THIS_MODULE, | ||
394 | .cra_u = { | ||
395 | .cipher = { | ||
396 | .cia_min_keysize = DES3_EDE_KEY_SIZE, | ||
397 | .cia_max_keysize = DES3_EDE_KEY_SIZE, | ||
398 | .cia_setkey = des3_ede_x86_setkey, | ||
399 | .cia_encrypt = des3_ede_x86_encrypt, | ||
400 | .cia_decrypt = des3_ede_x86_decrypt, | ||
401 | } | ||
402 | } | ||
403 | }, { | ||
404 | .cra_name = "ecb(des3_ede)", | ||
405 | .cra_driver_name = "ecb-des3_ede-asm", | ||
406 | .cra_priority = 300, | ||
407 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
408 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
409 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
410 | .cra_alignmask = 0, | ||
411 | .cra_type = &crypto_blkcipher_type, | ||
412 | .cra_module = THIS_MODULE, | ||
413 | .cra_u = { | ||
414 | .blkcipher = { | ||
415 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
416 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
417 | .setkey = des3_ede_x86_setkey, | ||
418 | .encrypt = ecb_encrypt, | ||
419 | .decrypt = ecb_decrypt, | ||
420 | }, | ||
421 | }, | ||
422 | }, { | ||
423 | .cra_name = "cbc(des3_ede)", | ||
424 | .cra_driver_name = "cbc-des3_ede-asm", | ||
425 | .cra_priority = 300, | ||
426 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
427 | .cra_blocksize = DES3_EDE_BLOCK_SIZE, | ||
428 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
429 | .cra_alignmask = 0, | ||
430 | .cra_type = &crypto_blkcipher_type, | ||
431 | .cra_module = THIS_MODULE, | ||
432 | .cra_u = { | ||
433 | .blkcipher = { | ||
434 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
435 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
436 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
437 | .setkey = des3_ede_x86_setkey, | ||
438 | .encrypt = cbc_encrypt, | ||
439 | .decrypt = cbc_decrypt, | ||
440 | }, | ||
441 | }, | ||
442 | }, { | ||
443 | .cra_name = "ctr(des3_ede)", | ||
444 | .cra_driver_name = "ctr-des3_ede-asm", | ||
445 | .cra_priority = 300, | ||
446 | .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER, | ||
447 | .cra_blocksize = 1, | ||
448 | .cra_ctxsize = sizeof(struct des3_ede_x86_ctx), | ||
449 | .cra_alignmask = 0, | ||
450 | .cra_type = &crypto_blkcipher_type, | ||
451 | .cra_module = THIS_MODULE, | ||
452 | .cra_u = { | ||
453 | .blkcipher = { | ||
454 | .min_keysize = DES3_EDE_KEY_SIZE, | ||
455 | .max_keysize = DES3_EDE_KEY_SIZE, | ||
456 | .ivsize = DES3_EDE_BLOCK_SIZE, | ||
457 | .setkey = des3_ede_x86_setkey, | ||
458 | .encrypt = ctr_crypt, | ||
459 | .decrypt = ctr_crypt, | ||
460 | }, | ||
461 | }, | ||
462 | } }; | ||
463 | |||
464 | static bool is_blacklisted_cpu(void) | ||
465 | { | ||
466 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
467 | return false; | ||
468 | |||
469 | if (boot_cpu_data.x86 == 0x0f) { | ||
470 | /* | ||
471 | * On Pentium 4, des3_ede-x86_64 is slower than generic C | ||
472 | * implementation because use of 64bit rotates (which are really | ||
473 | * slow on P4). Therefore blacklist P4s. | ||
474 | */ | ||
475 | return true; | ||
476 | } | ||
477 | |||
478 | return false; | ||
479 | } | ||
480 | |||
481 | static int force; | ||
482 | module_param(force, int, 0); | ||
483 | MODULE_PARM_DESC(force, "Force module load, ignore CPU blacklist"); | ||
484 | |||
485 | static int __init des3_ede_x86_init(void) | ||
486 | { | ||
487 | if (!force && is_blacklisted_cpu()) { | ||
488 | pr_info("des3_ede-x86_64: performance on this CPU would be suboptimal: disabling des3_ede-x86_64.\n"); | ||
489 | return -ENODEV; | ||
490 | } | ||
491 | |||
492 | return crypto_register_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); | ||
493 | } | ||
494 | |||
495 | static void __exit des3_ede_x86_fini(void) | ||
496 | { | ||
497 | crypto_unregister_algs(des3_ede_algs, ARRAY_SIZE(des3_ede_algs)); | ||
498 | } | ||
499 | |||
500 | module_init(des3_ede_x86_init); | ||
501 | module_exit(des3_ede_x86_fini); | ||
502 | |||
503 | MODULE_LICENSE("GPL"); | ||
504 | MODULE_DESCRIPTION("Triple DES EDE Cipher Algorithm, asm optimized"); | ||
505 | MODULE_ALIAS("des3_ede"); | ||
506 | MODULE_ALIAS("des3_ede-asm"); | ||
507 | MODULE_ALIAS("des"); | ||
508 | MODULE_ALIAS("des-asm"); | ||
509 | MODULE_AUTHOR("Jussi Kivilinna <jussi.kivilinna@iki.fi>"); | ||
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 19b0ebafcd3e..79752f2bdec5 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
@@ -99,7 +99,7 @@ static inline void native_apic_mem_write(u32 reg, u32 v) | |||
99 | { | 99 | { |
100 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); | 100 | volatile u32 *addr = (volatile u32 *)(APIC_BASE + reg); |
101 | 101 | ||
102 | alternative_io("movl %0, %1", "xchgl %0, %1", X86_FEATURE_11AP, | 102 | alternative_io("movl %0, %1", "xchgl %0, %1", X86_BUG_11AP, |
103 | ASM_OUTPUT2("=r" (v), "=m" (*addr)), | 103 | ASM_OUTPUT2("=r" (v), "=m" (*addr)), |
104 | ASM_OUTPUT2("0" (v), "m" (*addr))); | 104 | ASM_OUTPUT2("0" (v), "m" (*addr))); |
105 | } | 105 | } |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 5c7198cca5ed..0f4460b5636d 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -99,7 +99,7 @@ | |||
99 | #if defined(CONFIG_X86_PPRO_FENCE) | 99 | #if defined(CONFIG_X86_PPRO_FENCE) |
100 | 100 | ||
101 | /* | 101 | /* |
102 | * For either of these options x86 doesn't have a strong TSO memory | 102 | * For this option x86 doesn't have a strong TSO memory |
103 | * model and we should fall back to full barriers. | 103 | * model and we should fall back to full barriers. |
104 | */ | 104 | */ |
105 | 105 | ||
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index d47786acb016..99c105d78b7e 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ | 5 | #include <asm/alternative.h> /* Provides LOCK_PREFIX */ |
6 | 6 | ||
7 | #define __HAVE_ARCH_CMPXCHG 1 | ||
8 | |||
7 | /* | 9 | /* |
8 | * Non-existant functions to indicate usage errors at link time | 10 | * Non-existant functions to indicate usage errors at link time |
9 | * (or compile-time if the compiler implements __compiletime_error(). | 11 | * (or compile-time if the compiler implements __compiletime_error(). |
@@ -143,7 +145,6 @@ extern void __add_wrong_size(void) | |||
143 | # include <asm/cmpxchg_64.h> | 145 | # include <asm/cmpxchg_64.h> |
144 | #endif | 146 | #endif |
145 | 147 | ||
146 | #ifdef __HAVE_ARCH_CMPXCHG | ||
147 | #define cmpxchg(ptr, old, new) \ | 148 | #define cmpxchg(ptr, old, new) \ |
148 | __cmpxchg(ptr, old, new, sizeof(*(ptr))) | 149 | __cmpxchg(ptr, old, new, sizeof(*(ptr))) |
149 | 150 | ||
@@ -152,7 +153,6 @@ extern void __add_wrong_size(void) | |||
152 | 153 | ||
153 | #define cmpxchg_local(ptr, old, new) \ | 154 | #define cmpxchg_local(ptr, old, new) \ |
154 | __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) | 155 | __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) |
155 | #endif | ||
156 | 156 | ||
157 | /* | 157 | /* |
158 | * xadd() adds "inc" to "*ptr" and atomically returns the previous | 158 | * xadd() adds "inc" to "*ptr" and atomically returns the previous |
diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index f8bf2eecab86..f7e142926481 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h | |||
@@ -34,8 +34,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) | |||
34 | : "memory"); | 34 | : "memory"); |
35 | } | 35 | } |
36 | 36 | ||
37 | #define __HAVE_ARCH_CMPXCHG 1 | ||
38 | |||
39 | #ifdef CONFIG_X86_CMPXCHG64 | 37 | #ifdef CONFIG_X86_CMPXCHG64 |
40 | #define cmpxchg64(ptr, o, n) \ | 38 | #define cmpxchg64(ptr, o, n) \ |
41 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ | 39 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ |
diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 614be87f1a9b..1af94697aae5 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h | |||
@@ -6,8 +6,6 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) | |||
6 | *ptr = val; | 6 | *ptr = val; |
7 | } | 7 | } |
8 | 8 | ||
9 | #define __HAVE_ARCH_CMPXCHG 1 | ||
10 | |||
11 | #define cmpxchg64(ptr, o, n) \ | 9 | #define cmpxchg64(ptr, o, n) \ |
12 | ({ \ | 10 | ({ \ |
13 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ | 11 | BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ |
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index e265ff95d16d..bb9b258d60e7 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <asm/required-features.h> | 8 | #include <asm/required-features.h> |
9 | #endif | 9 | #endif |
10 | 10 | ||
11 | #define NCAPINTS 10 /* N 32-bit words worth of info */ | 11 | #define NCAPINTS 11 /* N 32-bit words worth of info */ |
12 | #define NBUGINTS 1 /* N 32-bit bug flags */ | 12 | #define NBUGINTS 1 /* N 32-bit bug flags */ |
13 | 13 | ||
14 | /* | 14 | /* |
@@ -18,213 +18,218 @@ | |||
18 | */ | 18 | */ |
19 | 19 | ||
20 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ | 20 | /* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */ |
21 | #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ | 21 | #define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */ |
22 | #define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */ | 22 | #define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */ |
23 | #define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */ | 23 | #define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */ |
24 | #define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */ | 24 | #define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */ |
25 | #define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */ | 25 | #define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */ |
26 | #define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers */ | 26 | #define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */ |
27 | #define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */ | 27 | #define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */ |
28 | #define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Exception */ | 28 | #define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */ |
29 | #define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */ | 29 | #define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */ |
30 | #define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */ | 30 | #define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */ |
31 | #define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */ | 31 | #define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */ |
32 | #define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */ | 32 | #define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */ |
33 | #define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */ | 33 | #define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */ |
34 | #define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */ | 34 | #define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */ |
35 | #define X86_FEATURE_CMOV (0*32+15) /* CMOV instructions */ | 35 | #define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions */ |
36 | /* (plus FCMOVcc, FCOMI with FPU) */ | 36 | /* (plus FCMOVcc, FCOMI with FPU) */ |
37 | #define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */ | 37 | #define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */ |
38 | #define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */ | 38 | #define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */ |
39 | #define X86_FEATURE_PN (0*32+18) /* Processor serial number */ | 39 | #define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */ |
40 | #define X86_FEATURE_CLFLUSH (0*32+19) /* CLFLUSH instruction */ | 40 | #define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */ |
41 | #define X86_FEATURE_DS (0*32+21) /* "dts" Debug Store */ | 41 | #define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */ |
42 | #define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */ | 42 | #define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */ |
43 | #define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */ | 43 | #define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */ |
44 | #define X86_FEATURE_FXSR (0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ | 44 | #define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */ |
45 | #define X86_FEATURE_XMM (0*32+25) /* "sse" */ | 45 | #define X86_FEATURE_XMM ( 0*32+25) /* "sse" */ |
46 | #define X86_FEATURE_XMM2 (0*32+26) /* "sse2" */ | 46 | #define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */ |
47 | #define X86_FEATURE_SELFSNOOP (0*32+27) /* "ss" CPU self snoop */ | 47 | #define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */ |
48 | #define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */ | 48 | #define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */ |
49 | #define X86_FEATURE_ACC (0*32+29) /* "tm" Automatic clock control */ | 49 | #define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */ |
50 | #define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */ | 50 | #define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */ |
51 | #define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */ | 51 | #define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */ |
52 | 52 | ||
53 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ | 53 | /* AMD-defined CPU features, CPUID level 0x80000001, word 1 */ |
54 | /* Don't duplicate feature flags which are redundant with Intel! */ | 54 | /* Don't duplicate feature flags which are redundant with Intel! */ |
55 | #define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */ | 55 | #define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */ |
56 | #define X86_FEATURE_MP (1*32+19) /* MP Capable. */ | 56 | #define X86_FEATURE_MP ( 1*32+19) /* MP Capable. */ |
57 | #define X86_FEATURE_NX (1*32+20) /* Execute Disable */ | 57 | #define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */ |
58 | #define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */ | 58 | #define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */ |
59 | #define X86_FEATURE_FXSR_OPT (1*32+25) /* FXSAVE/FXRSTOR optimizations */ | 59 | #define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */ |
60 | #define X86_FEATURE_GBPAGES (1*32+26) /* "pdpe1gb" GB pages */ | 60 | #define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */ |
61 | #define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */ | 61 | #define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */ |
62 | #define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */ | 62 | #define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64) */ |
63 | #define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */ | 63 | #define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow! extensions */ |
64 | #define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */ | 64 | #define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow! */ |
65 | 65 | ||
66 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ | 66 | /* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */ |
67 | #define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */ | 67 | #define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */ |
68 | #define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */ | 68 | #define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */ |
69 | #define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */ | 69 | #define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */ |
70 | 70 | ||
71 | /* Other features, Linux-defined mapping, word 3 */ | 71 | /* Other features, Linux-defined mapping, word 3 */ |
72 | /* This range is used for feature bits which conflict or are synthesized */ | 72 | /* This range is used for feature bits which conflict or are synthesized */ |
73 | #define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */ | 73 | #define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */ |
74 | #define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */ | 74 | #define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */ |
75 | #define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */ | 75 | #define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */ |
76 | #define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */ | 76 | #define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */ |
77 | /* cpu types for specific tunings: */ | 77 | /* cpu types for specific tunings: */ |
78 | #define X86_FEATURE_K8 (3*32+ 4) /* "" Opteron, Athlon64 */ | 78 | #define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */ |
79 | #define X86_FEATURE_K7 (3*32+ 5) /* "" Athlon */ | 79 | #define X86_FEATURE_K7 ( 3*32+ 5) /* "" Athlon */ |
80 | #define X86_FEATURE_P3 (3*32+ 6) /* "" P3 */ | 80 | #define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */ |
81 | #define X86_FEATURE_P4 (3*32+ 7) /* "" P4 */ | 81 | #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ |
82 | #define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */ | 82 | #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ |
83 | #define X86_FEATURE_UP (3*32+ 9) /* smp kernel running on up */ | 83 | #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ |
84 | #define X86_FEATURE_FXSAVE_LEAK (3*32+10) /* "" FXSAVE leaks FOP/FIP/FOP */ | 84 | /* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ |
85 | #define X86_FEATURE_ARCH_PERFMON (3*32+11) /* Intel Architectural PerfMon */ | 85 | #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ |
86 | #define X86_FEATURE_PEBS (3*32+12) /* Precise-Event Based Sampling */ | 86 | #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ |
87 | #define X86_FEATURE_BTS (3*32+13) /* Branch Trace Store */ | 87 | #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ |
88 | #define X86_FEATURE_SYSCALL32 (3*32+14) /* "" syscall in ia32 userspace */ | 88 | #define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in ia32 userspace */ |
89 | #define X86_FEATURE_SYSENTER32 (3*32+15) /* "" sysenter in ia32 userspace */ | 89 | #define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in ia32 userspace */ |
90 | #define X86_FEATURE_REP_GOOD (3*32+16) /* rep microcode works well */ | 90 | #define X86_FEATURE_REP_GOOD ( 3*32+16) /* rep microcode works well */ |
91 | #define X86_FEATURE_MFENCE_RDTSC (3*32+17) /* "" Mfence synchronizes RDTSC */ | 91 | #define X86_FEATURE_MFENCE_RDTSC ( 3*32+17) /* "" Mfence synchronizes RDTSC */ |
92 | #define X86_FEATURE_LFENCE_RDTSC (3*32+18) /* "" Lfence synchronizes RDTSC */ | 92 | #define X86_FEATURE_LFENCE_RDTSC ( 3*32+18) /* "" Lfence synchronizes RDTSC */ |
93 | #define X86_FEATURE_11AP (3*32+19) /* "" Bad local APIC aka 11AP */ | 93 | /* free, was #define X86_FEATURE_11AP ( 3*32+19) * "" Bad local APIC aka 11AP */ |
94 | #define X86_FEATURE_NOPL (3*32+20) /* The NOPL (0F 1F) instructions */ | 94 | #define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */ |
95 | #define X86_FEATURE_ALWAYS (3*32+21) /* "" Always-present feature */ | 95 | #define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */ |
96 | #define X86_FEATURE_XTOPOLOGY (3*32+22) /* cpu topology enum extensions */ | 96 | #define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* cpu topology enum extensions */ |
97 | #define X86_FEATURE_TSC_RELIABLE (3*32+23) /* TSC is known to be reliable */ | 97 | #define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */ |
98 | #define X86_FEATURE_NONSTOP_TSC (3*32+24) /* TSC does not stop in C states */ | 98 | #define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */ |
99 | #define X86_FEATURE_CLFLUSH_MONITOR (3*32+25) /* "" clflush reqd with monitor */ | 99 | /* free, was #define X86_FEATURE_CLFLUSH_MONITOR ( 3*32+25) * "" clflush reqd with monitor */ |
100 | #define X86_FEATURE_EXTD_APICID (3*32+26) /* has extended APICID (8 bits) */ | 100 | #define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */ |
101 | #define X86_FEATURE_AMD_DCM (3*32+27) /* multi-node processor */ | 101 | #define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */ |
102 | #define X86_FEATURE_APERFMPERF (3*32+28) /* APERFMPERF */ | 102 | #define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */ |
103 | #define X86_FEATURE_EAGER_FPU (3*32+29) /* "eagerfpu" Non lazy FPU restore */ | 103 | #define X86_FEATURE_EAGER_FPU ( 3*32+29) /* "eagerfpu" Non lazy FPU restore */ |
104 | #define X86_FEATURE_NONSTOP_TSC_S3 (3*32+30) /* TSC doesn't stop in S3 state */ | 104 | #define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */ |
105 | 105 | ||
106 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ | 106 | /* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */ |
107 | #define X86_FEATURE_XMM3 (4*32+ 0) /* "pni" SSE-3 */ | 107 | #define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */ |
108 | #define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* PCLMULQDQ instruction */ | 108 | #define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */ |
109 | #define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */ | 109 | #define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */ |
110 | #define X86_FEATURE_MWAIT (4*32+ 3) /* "monitor" Monitor/Mwait support */ | 110 | #define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" Monitor/Mwait support */ |
111 | #define X86_FEATURE_DSCPL (4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ | 111 | #define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL Qual. Debug Store */ |
112 | #define X86_FEATURE_VMX (4*32+ 5) /* Hardware virtualization */ | 112 | #define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */ |
113 | #define X86_FEATURE_SMX (4*32+ 6) /* Safer mode */ | 113 | #define X86_FEATURE_SMX ( 4*32+ 6) /* Safer mode */ |
114 | #define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */ | 114 | #define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */ |
115 | #define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */ | 115 | #define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */ |
116 | #define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental SSE-3 */ | 116 | #define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */ |
117 | #define X86_FEATURE_CID (4*32+10) /* Context ID */ | 117 | #define X86_FEATURE_CID ( 4*32+10) /* Context ID */ |
118 | #define X86_FEATURE_FMA (4*32+12) /* Fused multiply-add */ | 118 | #define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */ |
119 | #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ | 119 | #define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B */ |
120 | #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ | 120 | #define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */ |
121 | #define X86_FEATURE_PDCM (4*32+15) /* Performance Capabilities */ | 121 | #define X86_FEATURE_PDCM ( 4*32+15) /* Performance Capabilities */ |
122 | #define X86_FEATURE_PCID (4*32+17) /* Process Context Identifiers */ | 122 | #define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */ |
123 | #define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */ | 123 | #define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */ |
124 | #define X86_FEATURE_XMM4_1 (4*32+19) /* "sse4_1" SSE-4.1 */ | 124 | #define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */ |
125 | #define X86_FEATURE_XMM4_2 (4*32+20) /* "sse4_2" SSE-4.2 */ | 125 | #define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */ |
126 | #define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */ | 126 | #define X86_FEATURE_X2APIC ( 4*32+21) /* x2APIC */ |
127 | #define X86_FEATURE_MOVBE (4*32+22) /* MOVBE instruction */ | 127 | #define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */ |
128 | #define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */ | 128 | #define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */ |
129 | #define X86_FEATURE_TSC_DEADLINE_TIMER (4*32+24) /* Tsc deadline timer */ | 129 | #define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* Tsc deadline timer */ |
130 | #define X86_FEATURE_AES (4*32+25) /* AES instructions */ | 130 | #define X86_FEATURE_AES ( 4*32+25) /* AES instructions */ |
131 | #define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ | 131 | #define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */ |
132 | #define X86_FEATURE_OSXSAVE (4*32+27) /* "" XSAVE enabled in the OS */ | 132 | #define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE enabled in the OS */ |
133 | #define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */ | 133 | #define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */ |
134 | #define X86_FEATURE_F16C (4*32+29) /* 16-bit fp conversions */ | 134 | #define X86_FEATURE_F16C ( 4*32+29) /* 16-bit fp conversions */ |
135 | #define X86_FEATURE_RDRAND (4*32+30) /* The RDRAND instruction */ | 135 | #define X86_FEATURE_RDRAND ( 4*32+30) /* The RDRAND instruction */ |
136 | #define X86_FEATURE_HYPERVISOR (4*32+31) /* Running on a hypervisor */ | 136 | #define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */ |
137 | 137 | ||
138 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ | 138 | /* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ |
139 | #define X86_FEATURE_XSTORE (5*32+ 2) /* "rng" RNG present (xstore) */ | 139 | #define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */ |
140 | #define X86_FEATURE_XSTORE_EN (5*32+ 3) /* "rng_en" RNG enabled */ | 140 | #define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */ |
141 | #define X86_FEATURE_XCRYPT (5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ | 141 | #define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */ |
142 | #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* "ace_en" on-CPU crypto enabled */ | 142 | #define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */ |
143 | #define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */ | 143 | #define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */ |
144 | #define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */ | 144 | #define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */ |
145 | #define X86_FEATURE_PHE (5*32+10) /* PadLock Hash Engine */ | 145 | #define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */ |
146 | #define X86_FEATURE_PHE_EN (5*32+11) /* PHE enabled */ | 146 | #define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */ |
147 | #define X86_FEATURE_PMM (5*32+12) /* PadLock Montgomery Multiplier */ | 147 | #define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */ |
148 | #define X86_FEATURE_PMM_EN (5*32+13) /* PMM enabled */ | 148 | #define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */ |
149 | 149 | ||
150 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ | 150 | /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ |
151 | #define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ | 151 | #define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */ |
152 | #define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ | 152 | #define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */ |
153 | #define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */ | 153 | #define X86_FEATURE_SVM ( 6*32+ 2) /* Secure virtual machine */ |
154 | #define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */ | 154 | #define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */ |
155 | #define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */ | 155 | #define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */ |
156 | #define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */ | 156 | #define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */ |
157 | #define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */ | 157 | #define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */ |
158 | #define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */ | 158 | #define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */ |
159 | #define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */ | 159 | #define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */ |
160 | #define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */ | 160 | #define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */ |
161 | #define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */ | 161 | #define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */ |
162 | #define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */ | 162 | #define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */ |
163 | #define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */ | 163 | #define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */ |
164 | #define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */ | 164 | #define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */ |
165 | #define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */ | 165 | #define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */ |
166 | #define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */ | 166 | #define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */ |
167 | #define X86_FEATURE_TCE (6*32+17) /* translation cache extension */ | 167 | #define X86_FEATURE_TCE ( 6*32+17) /* translation cache extension */ |
168 | #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */ | 168 | #define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */ |
169 | #define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */ | 169 | #define X86_FEATURE_TBM ( 6*32+21) /* trailing bit manipulations */ |
170 | #define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */ | 170 | #define X86_FEATURE_TOPOEXT ( 6*32+22) /* topology extensions CPUID leafs */ |
171 | #define X86_FEATURE_PERFCTR_CORE (6*32+23) /* core performance counter extensions */ | 171 | #define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* core performance counter extensions */ |
172 | #define X86_FEATURE_PERFCTR_NB (6*32+24) /* NB performance counter extensions */ | 172 | #define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */ |
173 | #define X86_FEATURE_PERFCTR_L2 (6*32+28) /* L2 performance counter extensions */ | 173 | #define X86_FEATURE_PERFCTR_L2 ( 6*32+28) /* L2 performance counter extensions */ |
174 | 174 | ||
175 | /* | 175 | /* |
176 | * Auxiliary flags: Linux defined - For features scattered in various | 176 | * Auxiliary flags: Linux defined - For features scattered in various |
177 | * CPUID levels like 0x6, 0xA etc, word 7 | 177 | * CPUID levels like 0x6, 0xA etc, word 7 |
178 | */ | 178 | */ |
179 | #define X86_FEATURE_IDA (7*32+ 0) /* Intel Dynamic Acceleration */ | 179 | #define X86_FEATURE_IDA ( 7*32+ 0) /* Intel Dynamic Acceleration */ |
180 | #define X86_FEATURE_ARAT (7*32+ 1) /* Always Running APIC Timer */ | 180 | #define X86_FEATURE_ARAT ( 7*32+ 1) /* Always Running APIC Timer */ |
181 | #define X86_FEATURE_CPB (7*32+ 2) /* AMD Core Performance Boost */ | 181 | #define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */ |
182 | #define X86_FEATURE_EPB (7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ | 182 | #define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */ |
183 | #define X86_FEATURE_XSAVEOPT (7*32+ 4) /* Optimized Xsave */ | 183 | #define X86_FEATURE_PLN ( 7*32+ 5) /* Intel Power Limit Notification */ |
184 | #define X86_FEATURE_PLN (7*32+ 5) /* Intel Power Limit Notification */ | 184 | #define X86_FEATURE_PTS ( 7*32+ 6) /* Intel Package Thermal Status */ |
185 | #define X86_FEATURE_PTS (7*32+ 6) /* Intel Package Thermal Status */ | 185 | #define X86_FEATURE_DTHERM ( 7*32+ 7) /* Digital Thermal Sensor */ |
186 | #define X86_FEATURE_DTHERM (7*32+ 7) /* Digital Thermal Sensor */ | 186 | #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ |
187 | #define X86_FEATURE_HW_PSTATE (7*32+ 8) /* AMD HW-PState */ | 187 | #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ |
188 | #define X86_FEATURE_PROC_FEEDBACK (7*32+ 9) /* AMD ProcFeedbackInterface */ | ||
189 | 188 | ||
190 | /* Virtualization flags: Linux defined, word 8 */ | 189 | /* Virtualization flags: Linux defined, word 8 */ |
191 | #define X86_FEATURE_TPR_SHADOW (8*32+ 0) /* Intel TPR Shadow */ | 190 | #define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */ |
192 | #define X86_FEATURE_VNMI (8*32+ 1) /* Intel Virtual NMI */ | 191 | #define X86_FEATURE_VNMI ( 8*32+ 1) /* Intel Virtual NMI */ |
193 | #define X86_FEATURE_FLEXPRIORITY (8*32+ 2) /* Intel FlexPriority */ | 192 | #define X86_FEATURE_FLEXPRIORITY ( 8*32+ 2) /* Intel FlexPriority */ |
194 | #define X86_FEATURE_EPT (8*32+ 3) /* Intel Extended Page Table */ | 193 | #define X86_FEATURE_EPT ( 8*32+ 3) /* Intel Extended Page Table */ |
195 | #define X86_FEATURE_VPID (8*32+ 4) /* Intel Virtual Processor ID */ | 194 | #define X86_FEATURE_VPID ( 8*32+ 4) /* Intel Virtual Processor ID */ |
196 | #define X86_FEATURE_NPT (8*32+ 5) /* AMD Nested Page Table support */ | 195 | #define X86_FEATURE_NPT ( 8*32+ 5) /* AMD Nested Page Table support */ |
197 | #define X86_FEATURE_LBRV (8*32+ 6) /* AMD LBR Virtualization support */ | 196 | #define X86_FEATURE_LBRV ( 8*32+ 6) /* AMD LBR Virtualization support */ |
198 | #define X86_FEATURE_SVML (8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ | 197 | #define X86_FEATURE_SVML ( 8*32+ 7) /* "svm_lock" AMD SVM locking MSR */ |
199 | #define X86_FEATURE_NRIPS (8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ | 198 | #define X86_FEATURE_NRIPS ( 8*32+ 8) /* "nrip_save" AMD SVM next_rip save */ |
200 | #define X86_FEATURE_TSCRATEMSR (8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ | 199 | #define X86_FEATURE_TSCRATEMSR ( 8*32+ 9) /* "tsc_scale" AMD TSC scaling support */ |
201 | #define X86_FEATURE_VMCBCLEAN (8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ | 200 | #define X86_FEATURE_VMCBCLEAN ( 8*32+10) /* "vmcb_clean" AMD VMCB clean bits support */ |
202 | #define X86_FEATURE_FLUSHBYASID (8*32+11) /* AMD flush-by-ASID support */ | 201 | #define X86_FEATURE_FLUSHBYASID ( 8*32+11) /* AMD flush-by-ASID support */ |
203 | #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */ | 202 | #define X86_FEATURE_DECODEASSISTS ( 8*32+12) /* AMD Decode Assists support */ |
204 | #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */ | 203 | #define X86_FEATURE_PAUSEFILTER ( 8*32+13) /* AMD filtered pause intercept */ |
205 | #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */ | 204 | #define X86_FEATURE_PFTHRESHOLD ( 8*32+14) /* AMD pause filter threshold */ |
206 | 205 | ||
207 | 206 | ||
208 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ | 207 | /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ |
209 | #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ | 208 | #define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ |
210 | #define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */ | 209 | #define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3b */ |
211 | #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ | 210 | #define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */ |
212 | #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ | 211 | #define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */ |
213 | #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ | 212 | #define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */ |
214 | #define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */ | 213 | #define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */ |
215 | #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */ | 214 | #define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */ |
216 | #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */ | 215 | #define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB */ |
217 | #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */ | 216 | #define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */ |
218 | #define X86_FEATURE_RTM (9*32+11) /* Restricted Transactional Memory */ | 217 | #define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */ |
219 | #define X86_FEATURE_MPX (9*32+14) /* Memory Protection Extension */ | 218 | #define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */ |
220 | #define X86_FEATURE_AVX512F (9*32+16) /* AVX-512 Foundation */ | 219 | #define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */ |
221 | #define X86_FEATURE_RDSEED (9*32+18) /* The RDSEED instruction */ | 220 | #define X86_FEATURE_RDSEED ( 9*32+18) /* The RDSEED instruction */ |
222 | #define X86_FEATURE_ADX (9*32+19) /* The ADCX and ADOX instructions */ | 221 | #define X86_FEATURE_ADX ( 9*32+19) /* The ADCX and ADOX instructions */ |
223 | #define X86_FEATURE_SMAP (9*32+20) /* Supervisor Mode Access Prevention */ | 222 | #define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */ |
224 | #define X86_FEATURE_CLFLUSHOPT (9*32+23) /* CLFLUSHOPT instruction */ | 223 | #define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */ |
225 | #define X86_FEATURE_AVX512PF (9*32+26) /* AVX-512 Prefetch */ | 224 | #define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */ |
226 | #define X86_FEATURE_AVX512ER (9*32+27) /* AVX-512 Exponential and Reciprocal */ | 225 | #define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */ |
227 | #define X86_FEATURE_AVX512CD (9*32+28) /* AVX-512 Conflict Detection */ | 226 | #define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */ |
227 | |||
228 | /* Extended state features, CPUID level 0x0000000d:1 (eax), word 10 */ | ||
229 | #define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT */ | ||
230 | #define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC */ | ||
231 | #define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 */ | ||
232 | #define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS */ | ||
228 | 233 | ||
229 | /* | 234 | /* |
230 | * BUG word(s) | 235 | * BUG word(s) |
@@ -234,8 +239,11 @@ | |||
234 | #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ | 239 | #define X86_BUG_F00F X86_BUG(0) /* Intel F00F */ |
235 | #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ | 240 | #define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */ |
236 | #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ | 241 | #define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */ |
237 | #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* AMD Erratum 383 */ | 242 | #define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */ |
238 | #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* AMD Erratum 400 */ | 243 | #define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */ |
244 | #define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */ | ||
245 | #define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */ | ||
246 | #define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */ | ||
239 | 247 | ||
240 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) | 248 | #if defined(__KERNEL__) && !defined(__ASSEMBLY__) |
241 | 249 | ||
@@ -245,6 +253,12 @@ | |||
245 | extern const char * const x86_cap_flags[NCAPINTS*32]; | 253 | extern const char * const x86_cap_flags[NCAPINTS*32]; |
246 | extern const char * const x86_power_flags[32]; | 254 | extern const char * const x86_power_flags[32]; |
247 | 255 | ||
256 | /* | ||
257 | * In order to save room, we index into this array by doing | ||
258 | * X86_BUG_<name> - NCAPINTS*32. | ||
259 | */ | ||
260 | extern const char * const x86_bug_flags[NBUGINTS*32]; | ||
261 | |||
248 | #define test_cpu_cap(c, bit) \ | 262 | #define test_cpu_cap(c, bit) \ |
249 | test_bit(bit, (unsigned long *)((c)->x86_capability)) | 263 | test_bit(bit, (unsigned long *)((c)->x86_capability)) |
250 | 264 | ||
@@ -301,7 +315,6 @@ extern const char * const x86_power_flags[32]; | |||
301 | #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) | 315 | #define cpu_has_avx boot_cpu_has(X86_FEATURE_AVX) |
302 | #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) | 316 | #define cpu_has_avx2 boot_cpu_has(X86_FEATURE_AVX2) |
303 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) | 317 | #define cpu_has_ht boot_cpu_has(X86_FEATURE_HT) |
304 | #define cpu_has_mp boot_cpu_has(X86_FEATURE_MP) | ||
305 | #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) | 318 | #define cpu_has_nx boot_cpu_has(X86_FEATURE_NX) |
306 | #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) | 319 | #define cpu_has_k6_mtrr boot_cpu_has(X86_FEATURE_K6_MTRR) |
307 | #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) | 320 | #define cpu_has_cyrix_arr boot_cpu_has(X86_FEATURE_CYRIX_ARR) |
@@ -328,6 +341,7 @@ extern const char * const x86_power_flags[32]; | |||
328 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) | 341 | #define cpu_has_x2apic boot_cpu_has(X86_FEATURE_X2APIC) |
329 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) | 342 | #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE) |
330 | #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) | 343 | #define cpu_has_xsaveopt boot_cpu_has(X86_FEATURE_XSAVEOPT) |
344 | #define cpu_has_xsaves boot_cpu_has(X86_FEATURE_XSAVES) | ||
331 | #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) | 345 | #define cpu_has_osxsave boot_cpu_has(X86_FEATURE_OSXSAVE) |
332 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) | 346 | #define cpu_has_hypervisor boot_cpu_has(X86_FEATURE_HYPERVISOR) |
333 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) | 347 | #define cpu_has_pclmulqdq boot_cpu_has(X86_FEATURE_PCLMULQDQ) |
@@ -347,9 +361,6 @@ extern const char * const x86_power_flags[32]; | |||
347 | #undef cpu_has_pae | 361 | #undef cpu_has_pae |
348 | #define cpu_has_pae ___BUG___ | 362 | #define cpu_has_pae ___BUG___ |
349 | 363 | ||
350 | #undef cpu_has_mp | ||
351 | #define cpu_has_mp 1 | ||
352 | |||
353 | #undef cpu_has_k6_mtrr | 364 | #undef cpu_has_k6_mtrr |
354 | #define cpu_has_k6_mtrr 0 | 365 | #define cpu_has_k6_mtrr 0 |
355 | 366 | ||
@@ -539,20 +550,20 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit) | |||
539 | #define static_cpu_has_safe(bit) boot_cpu_has(bit) | 550 | #define static_cpu_has_safe(bit) boot_cpu_has(bit) |
540 | #endif | 551 | #endif |
541 | 552 | ||
542 | #define cpu_has_bug(c, bit) cpu_has(c, (bit)) | 553 | #define cpu_has_bug(c, bit) cpu_has(c, (bit)) |
543 | #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) | 554 | #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) |
544 | #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)); | 555 | #define clear_cpu_bug(c, bit) clear_cpu_cap(c, (bit)) |
545 | 556 | ||
546 | #define static_cpu_has_bug(bit) static_cpu_has((bit)) | 557 | #define static_cpu_has_bug(bit) static_cpu_has((bit)) |
547 | #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) | 558 | #define static_cpu_has_bug_safe(bit) static_cpu_has_safe((bit)) |
559 | #define boot_cpu_has_bug(bit) cpu_has_bug(&boot_cpu_data, (bit)) | ||
548 | 560 | ||
549 | #define MAX_CPU_FEATURES (NCAPINTS * 32) | 561 | #define MAX_CPU_FEATURES (NCAPINTS * 32) |
550 | #define cpu_have_feature boot_cpu_has | 562 | #define cpu_have_feature boot_cpu_has |
551 | 563 | ||
552 | #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" | 564 | #define CPU_FEATURE_TYPEFMT "x86,ven%04Xfam%04Xmod%04X" |
553 | #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ | 565 | #define CPU_FEATURE_TYPEVAL boot_cpu_data.x86_vendor, boot_cpu_data.x86, \ |
554 | boot_cpu_data.x86_model | 566 | boot_cpu_data.x86_model |
555 | 567 | ||
556 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ | 568 | #endif /* defined(__KERNEL__) && !defined(__ASSEMBLY__) */ |
557 | |||
558 | #endif /* _ASM_X86_CPUFEATURE_H */ | 569 | #endif /* _ASM_X86_CPUFEATURE_H */ |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 1eb5f6433ad8..044a2fd3c5fe 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -104,6 +104,8 @@ extern void __init runtime_code_page_mkexec(void); | |||
104 | extern void __init efi_runtime_mkexec(void); | 104 | extern void __init efi_runtime_mkexec(void); |
105 | extern void __init efi_dump_pagetable(void); | 105 | extern void __init efi_dump_pagetable(void); |
106 | extern void __init efi_apply_memmap_quirks(void); | 106 | extern void __init efi_apply_memmap_quirks(void); |
107 | extern int __init efi_reuse_config(u64 tables, int nr_tables); | ||
108 | extern void efi_delete_dummy_variable(void); | ||
107 | 109 | ||
108 | struct efi_setup_data { | 110 | struct efi_setup_data { |
109 | u64 fw_vendor; | 111 | u64 fw_vendor; |
@@ -156,6 +158,33 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( | |||
156 | return EFI_SUCCESS; | 158 | return EFI_SUCCESS; |
157 | } | 159 | } |
158 | #endif /* CONFIG_EFI_MIXED */ | 160 | #endif /* CONFIG_EFI_MIXED */ |
161 | |||
162 | |||
163 | /* arch specific definitions used by the stub code */ | ||
164 | |||
165 | struct efi_config { | ||
166 | u64 image_handle; | ||
167 | u64 table; | ||
168 | u64 allocate_pool; | ||
169 | u64 allocate_pages; | ||
170 | u64 get_memory_map; | ||
171 | u64 free_pool; | ||
172 | u64 free_pages; | ||
173 | u64 locate_handle; | ||
174 | u64 handle_protocol; | ||
175 | u64 exit_boot_services; | ||
176 | u64 text_output; | ||
177 | efi_status_t (*call)(unsigned long, ...); | ||
178 | bool is64; | ||
179 | } __packed; | ||
180 | |||
181 | extern struct efi_config *efi_early; | ||
182 | |||
183 | #define efi_call_early(f, ...) \ | ||
184 | efi_early->call(efi_early->f, __VA_ARGS__); | ||
185 | |||
186 | extern bool efi_reboot_required(void); | ||
187 | |||
159 | #else | 188 | #else |
160 | /* | 189 | /* |
161 | * IF EFI is not configured, have the EFI calls return -ENOSYS. | 190 | * IF EFI is not configured, have the EFI calls return -ENOSYS. |
@@ -168,6 +197,10 @@ static inline efi_status_t efi_thunk_set_virtual_address_map( | |||
168 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) | 197 | #define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS) |
169 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) | 198 | #define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS) |
170 | static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} | 199 | static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} |
200 | static inline bool efi_reboot_required(void) | ||
201 | { | ||
202 | return false; | ||
203 | } | ||
171 | #endif /* CONFIG_EFI */ | 204 | #endif /* CONFIG_EFI */ |
172 | 205 | ||
173 | #endif /* _ASM_X86_EFI_H */ | 206 | #endif /* _ASM_X86_EFI_H */ |
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h index 115e3689cd53..e3b85422cf12 100644 --- a/arch/x86/include/asm/fpu-internal.h +++ b/arch/x86/include/asm/fpu-internal.h | |||
@@ -293,7 +293,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk) | |||
293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception | 293 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
294 | is pending. Clear the x87 state here by setting it to fixed | 294 | is pending. Clear the x87 state here by setting it to fixed |
295 | values. "m" is a random variable that should be in L1 */ | 295 | values. "m" is a random variable that should be in L1 */ |
296 | if (unlikely(static_cpu_has_safe(X86_FEATURE_FXSAVE_LEAK))) { | 296 | if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) { |
297 | asm volatile( | 297 | asm volatile( |
298 | "fnclex\n\t" | 298 | "fnclex\n\t" |
299 | "emms\n\t" | 299 | "emms\n\t" |
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h index 0525a8bdf65d..e1f7fecaa7d6 100644 --- a/arch/x86/include/asm/ftrace.h +++ b/arch/x86/include/asm/ftrace.h | |||
@@ -68,6 +68,8 @@ struct dyn_arch_ftrace { | |||
68 | 68 | ||
69 | int ftrace_int3_handler(struct pt_regs *regs); | 69 | int ftrace_int3_handler(struct pt_regs *regs); |
70 | 70 | ||
71 | #define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR | ||
72 | |||
71 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 73 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
72 | #endif /* __ASSEMBLY__ */ | 74 | #endif /* __ASSEMBLY__ */ |
73 | #endif /* CONFIG_FUNCTION_TRACER */ | 75 | #endif /* CONFIG_FUNCTION_TRACER */ |
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index bba3cf88e624..0a8b519226b8 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h | |||
@@ -129,7 +129,7 @@ static inline notrace unsigned long arch_local_irq_save(void) | |||
129 | 129 | ||
130 | #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ | 130 | #define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */ |
131 | 131 | ||
132 | #define INTERRUPT_RETURN iretq | 132 | #define INTERRUPT_RETURN jmp native_iret |
133 | #define USERGS_SYSRET64 \ | 133 | #define USERGS_SYSRET64 \ |
134 | swapgs; \ | 134 | swapgs; \ |
135 | sysretq; | 135 | sysretq; |
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index a04fe4eb237d..eb181178fe0b 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -37,6 +37,7 @@ struct x86_instruction_info { | |||
37 | u8 modrm_reg; /* index of register used */ | 37 | u8 modrm_reg; /* index of register used */ |
38 | u8 modrm_rm; /* rm part of modrm */ | 38 | u8 modrm_rm; /* rm part of modrm */ |
39 | u64 src_val; /* value of source operand */ | 39 | u64 src_val; /* value of source operand */ |
40 | u64 dst_val; /* value of destination operand */ | ||
40 | u8 src_bytes; /* size of source operand */ | 41 | u8 src_bytes; /* size of source operand */ |
41 | u8 dst_bytes; /* size of destination operand */ | 42 | u8 dst_bytes; /* size of destination operand */ |
42 | u8 ad_bytes; /* size of src/dst address */ | 43 | u8 ad_bytes; /* size of src/dst address */ |
@@ -194,6 +195,7 @@ struct x86_emulate_ops { | |||
194 | int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); | 195 | int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value); |
195 | int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); | 196 | int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data); |
196 | int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); | 197 | int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata); |
198 | int (*check_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc); | ||
197 | int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); | 199 | int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata); |
198 | void (*halt)(struct x86_emulate_ctxt *ctxt); | 200 | void (*halt)(struct x86_emulate_ctxt *ctxt); |
199 | void (*wbinvd)(struct x86_emulate_ctxt *ctxt); | 201 | void (*wbinvd)(struct x86_emulate_ctxt *ctxt); |
@@ -231,7 +233,7 @@ struct operand { | |||
231 | union { | 233 | union { |
232 | unsigned long val; | 234 | unsigned long val; |
233 | u64 val64; | 235 | u64 val64; |
234 | char valptr[sizeof(unsigned long) + 2]; | 236 | char valptr[sizeof(sse128_t)]; |
235 | sse128_t vec_val; | 237 | sse128_t vec_val; |
236 | u64 mm_val; | 238 | u64 mm_val; |
237 | void *data; | 239 | void *data; |
@@ -240,8 +242,8 @@ struct operand { | |||
240 | 242 | ||
241 | struct fetch_cache { | 243 | struct fetch_cache { |
242 | u8 data[15]; | 244 | u8 data[15]; |
243 | unsigned long start; | 245 | u8 *ptr; |
244 | unsigned long end; | 246 | u8 *end; |
245 | }; | 247 | }; |
246 | 248 | ||
247 | struct read_cache { | 249 | struct read_cache { |
@@ -286,30 +288,36 @@ struct x86_emulate_ctxt { | |||
286 | u8 opcode_len; | 288 | u8 opcode_len; |
287 | u8 b; | 289 | u8 b; |
288 | u8 intercept; | 290 | u8 intercept; |
289 | u8 lock_prefix; | ||
290 | u8 rep_prefix; | ||
291 | u8 op_bytes; | 291 | u8 op_bytes; |
292 | u8 ad_bytes; | 292 | u8 ad_bytes; |
293 | u8 rex_prefix; | ||
294 | struct operand src; | 293 | struct operand src; |
295 | struct operand src2; | 294 | struct operand src2; |
296 | struct operand dst; | 295 | struct operand dst; |
297 | bool has_seg_override; | ||
298 | u8 seg_override; | ||
299 | u64 d; | ||
300 | int (*execute)(struct x86_emulate_ctxt *ctxt); | 296 | int (*execute)(struct x86_emulate_ctxt *ctxt); |
301 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); | 297 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); |
298 | /* | ||
299 | * The following six fields are cleared together, | ||
300 | * the rest are initialized unconditionally in x86_decode_insn | ||
301 | * or elsewhere | ||
302 | */ | ||
303 | bool rip_relative; | ||
304 | u8 rex_prefix; | ||
305 | u8 lock_prefix; | ||
306 | u8 rep_prefix; | ||
307 | /* bitmaps of registers in _regs[] that can be read */ | ||
308 | u32 regs_valid; | ||
309 | /* bitmaps of registers in _regs[] that have been written */ | ||
310 | u32 regs_dirty; | ||
302 | /* modrm */ | 311 | /* modrm */ |
303 | u8 modrm; | 312 | u8 modrm; |
304 | u8 modrm_mod; | 313 | u8 modrm_mod; |
305 | u8 modrm_reg; | 314 | u8 modrm_reg; |
306 | u8 modrm_rm; | 315 | u8 modrm_rm; |
307 | u8 modrm_seg; | 316 | u8 modrm_seg; |
308 | bool rip_relative; | 317 | u8 seg_override; |
318 | u64 d; | ||
309 | unsigned long _eip; | 319 | unsigned long _eip; |
310 | struct operand memop; | 320 | struct operand memop; |
311 | u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */ | ||
312 | u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */ | ||
313 | /* Fields above regs are cleared together. */ | 321 | /* Fields above regs are cleared together. */ |
314 | unsigned long _regs[NR_VCPU_REGS]; | 322 | unsigned long _regs[NR_VCPU_REGS]; |
315 | struct operand *memopp; | 323 | struct operand *memopp; |
@@ -407,6 +415,7 @@ bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt); | |||
407 | #define EMULATION_OK 0 | 415 | #define EMULATION_OK 0 |
408 | #define EMULATION_RESTART 1 | 416 | #define EMULATION_RESTART 1 |
409 | #define EMULATION_INTERCEPTED 2 | 417 | #define EMULATION_INTERCEPTED 2 |
418 | void init_decode_cache(struct x86_emulate_ctxt *ctxt); | ||
410 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); | 419 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt); |
411 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | 420 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, |
412 | u16 tss_selector, int idt_index, int reason, | 421 | u16 tss_selector, int idt_index, int reason, |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 49205d01b9ad..572460175ba5 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -152,14 +152,16 @@ enum { | |||
152 | 152 | ||
153 | #define DR6_BD (1 << 13) | 153 | #define DR6_BD (1 << 13) |
154 | #define DR6_BS (1 << 14) | 154 | #define DR6_BS (1 << 14) |
155 | #define DR6_FIXED_1 0xffff0ff0 | 155 | #define DR6_RTM (1 << 16) |
156 | #define DR6_VOLATILE 0x0000e00f | 156 | #define DR6_FIXED_1 0xfffe0ff0 |
157 | #define DR6_INIT 0xffff0ff0 | ||
158 | #define DR6_VOLATILE 0x0001e00f | ||
157 | 159 | ||
158 | #define DR7_BP_EN_MASK 0x000000ff | 160 | #define DR7_BP_EN_MASK 0x000000ff |
159 | #define DR7_GE (1 << 9) | 161 | #define DR7_GE (1 << 9) |
160 | #define DR7_GD (1 << 13) | 162 | #define DR7_GD (1 << 13) |
161 | #define DR7_FIXED_1 0x00000400 | 163 | #define DR7_FIXED_1 0x00000400 |
162 | #define DR7_VOLATILE 0xffff23ff | 164 | #define DR7_VOLATILE 0xffff2bff |
163 | 165 | ||
164 | /* apic attention bits */ | 166 | /* apic attention bits */ |
165 | #define KVM_APIC_CHECK_VAPIC 0 | 167 | #define KVM_APIC_CHECK_VAPIC 0 |
@@ -448,7 +450,7 @@ struct kvm_vcpu_arch { | |||
448 | u64 tsc_offset_adjustment; | 450 | u64 tsc_offset_adjustment; |
449 | u64 this_tsc_nsec; | 451 | u64 this_tsc_nsec; |
450 | u64 this_tsc_write; | 452 | u64 this_tsc_write; |
451 | u8 this_tsc_generation; | 453 | u64 this_tsc_generation; |
452 | bool tsc_catchup; | 454 | bool tsc_catchup; |
453 | bool tsc_always_catchup; | 455 | bool tsc_always_catchup; |
454 | s8 virtual_tsc_shift; | 456 | s8 virtual_tsc_shift; |
@@ -591,7 +593,7 @@ struct kvm_arch { | |||
591 | u64 cur_tsc_nsec; | 593 | u64 cur_tsc_nsec; |
592 | u64 cur_tsc_write; | 594 | u64 cur_tsc_write; |
593 | u64 cur_tsc_offset; | 595 | u64 cur_tsc_offset; |
594 | u8 cur_tsc_generation; | 596 | u64 cur_tsc_generation; |
595 | int nr_vcpus_matched_tsc; | 597 | int nr_vcpus_matched_tsc; |
596 | 598 | ||
597 | spinlock_t pvclock_gtod_sync_lock; | 599 | spinlock_t pvclock_gtod_sync_lock; |
@@ -717,7 +719,7 @@ struct kvm_x86_ops { | |||
717 | int (*handle_exit)(struct kvm_vcpu *vcpu); | 719 | int (*handle_exit)(struct kvm_vcpu *vcpu); |
718 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); | 720 | void (*skip_emulated_instruction)(struct kvm_vcpu *vcpu); |
719 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 721 | void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); |
720 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask); | 722 | u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu); |
721 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, | 723 | void (*patch_hypercall)(struct kvm_vcpu *vcpu, |
722 | unsigned char *hypercall_addr); | 724 | unsigned char *hypercall_addr); |
723 | void (*set_irq)(struct kvm_vcpu *vcpu); | 725 | void (*set_irq)(struct kvm_vcpu *vcpu); |
@@ -1070,6 +1072,7 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu); | |||
1070 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); | 1072 | bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr); |
1071 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); | 1073 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
1072 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); | 1074 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
1075 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc); | ||
1073 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); | 1076 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
1074 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); | 1077 | void kvm_handle_pmu_event(struct kvm_vcpu *vcpu); |
1075 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); | 1078 | void kvm_deliver_pmi(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/include/asm/mc146818rtc.h b/arch/x86/include/asm/mc146818rtc.h index a55c7efcc4ed..0f555cc31984 100644 --- a/arch/x86/include/asm/mc146818rtc.h +++ b/arch/x86/include/asm/mc146818rtc.h | |||
@@ -13,7 +13,7 @@ | |||
13 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ | 13 | #define RTC_ALWAYS_BCD 1 /* RTC operates in binary mode */ |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #if defined(CONFIG_X86_32) && defined(__HAVE_ARCH_CMPXCHG) | 16 | #if defined(CONFIG_X86_32) |
17 | /* | 17 | /* |
18 | * This lock provides nmi access to the CMOS/RTC registers. It has some | 18 | * This lock provides nmi access to the CMOS/RTC registers. It has some |
19 | * special properties. It is owned by a CPU and stores the index register | 19 | * special properties. It is owned by a CPU and stores the index register |
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index be12c534fd59..166af2a8e865 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h | |||
@@ -3,6 +3,10 @@ | |||
3 | 3 | ||
4 | #include <asm/desc.h> | 4 | #include <asm/desc.h> |
5 | #include <linux/atomic.h> | 5 | #include <linux/atomic.h> |
6 | #include <linux/mm_types.h> | ||
7 | |||
8 | #include <trace/events/tlb.h> | ||
9 | |||
6 | #include <asm/pgalloc.h> | 10 | #include <asm/pgalloc.h> |
7 | #include <asm/tlbflush.h> | 11 | #include <asm/tlbflush.h> |
8 | #include <asm/paravirt.h> | 12 | #include <asm/paravirt.h> |
@@ -44,6 +48,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
44 | 48 | ||
45 | /* Re-load page tables */ | 49 | /* Re-load page tables */ |
46 | load_cr3(next->pgd); | 50 | load_cr3(next->pgd); |
51 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
47 | 52 | ||
48 | /* Stop flush ipis for the previous mm */ | 53 | /* Stop flush ipis for the previous mm */ |
49 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); | 54 | cpumask_clear_cpu(cpu, mm_cpumask(prev)); |
@@ -71,6 +76,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
71 | * to make sure to use no freed page tables. | 76 | * to make sure to use no freed page tables. |
72 | */ | 77 | */ |
73 | load_cr3(next->pgd); | 78 | load_cr3(next->pgd); |
79 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
74 | load_LDT_nolock(&next->context); | 80 | load_LDT_nolock(&next->context); |
75 | } | 81 | } |
76 | } | 82 | } |
diff --git a/arch/x86/include/asm/mutex_32.h b/arch/x86/include/asm/mutex_32.h index 0208c3c2cbc6..85e6cda45a02 100644 --- a/arch/x86/include/asm/mutex_32.h +++ b/arch/x86/include/asm/mutex_32.h | |||
@@ -100,23 +100,11 @@ do { \ | |||
100 | static inline int __mutex_fastpath_trylock(atomic_t *count, | 100 | static inline int __mutex_fastpath_trylock(atomic_t *count, |
101 | int (*fail_fn)(atomic_t *)) | 101 | int (*fail_fn)(atomic_t *)) |
102 | { | 102 | { |
103 | /* | 103 | /* cmpxchg because it never induces a false contention state. */ |
104 | * We have two variants here. The cmpxchg based one is the best one | ||
105 | * because it never induce a false contention state. It is included | ||
106 | * here because architectures using the inc/dec algorithms over the | ||
107 | * xchg ones are much more likely to support cmpxchg natively. | ||
108 | * | ||
109 | * If not we fall back to the spinlock based variant - that is | ||
110 | * just as efficient (and simpler) as a 'destructive' probing of | ||
111 | * the mutex state would be. | ||
112 | */ | ||
113 | #ifdef __HAVE_ARCH_CMPXCHG | ||
114 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) | 104 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) |
115 | return 1; | 105 | return 1; |
106 | |||
116 | return 0; | 107 | return 0; |
117 | #else | ||
118 | return fail_fn(count); | ||
119 | #endif | ||
120 | } | 108 | } |
121 | 109 | ||
122 | #endif /* _ASM_X86_MUTEX_32_H */ | 110 | #endif /* _ASM_X86_MUTEX_32_H */ |
diff --git a/arch/x86/include/asm/mwait.h b/arch/x86/include/asm/mwait.h index 1da25a5f96f9..a1410db38a1a 100644 --- a/arch/x86/include/asm/mwait.h +++ b/arch/x86/include/asm/mwait.h | |||
@@ -43,7 +43,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx) | |||
43 | static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) | 43 | static inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
44 | { | 44 | { |
45 | if (!current_set_polling_and_test()) { | 45 | if (!current_set_polling_and_test()) { |
46 | if (static_cpu_has(X86_FEATURE_CLFLUSH_MONITOR)) { | 46 | if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) { |
47 | mb(); | 47 | mb(); |
48 | clflush((void *)¤t_thread_info()->flags); | 48 | clflush((void *)¤t_thread_info()->flags); |
49 | mb(); | 49 | mb(); |
diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index 851bcdc5db04..fd472181a1d0 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h | |||
@@ -52,10 +52,9 @@ | |||
52 | * Compared to the generic __my_cpu_offset version, the following | 52 | * Compared to the generic __my_cpu_offset version, the following |
53 | * saves one instruction and avoids clobbering a temp register. | 53 | * saves one instruction and avoids clobbering a temp register. |
54 | */ | 54 | */ |
55 | #define raw_cpu_ptr(ptr) \ | 55 | #define arch_raw_cpu_ptr(ptr) \ |
56 | ({ \ | 56 | ({ \ |
57 | unsigned long tcp_ptr__; \ | 57 | unsigned long tcp_ptr__; \ |
58 | __verify_pcpu_ptr(ptr); \ | ||
59 | asm volatile("add " __percpu_arg(1) ", %0" \ | 58 | asm volatile("add " __percpu_arg(1) ", %0" \ |
60 | : "=r" (tcp_ptr__) \ | 59 | : "=r" (tcp_ptr__) \ |
61 | : "m" (this_cpu_off), "0" (ptr)); \ | 60 | : "m" (this_cpu_off), "0" (ptr)); \ |
diff --git a/arch/x86/include/asm/pmc_atom.h b/arch/x86/include/asm/pmc_atom.h new file mode 100644 index 000000000000..fc7a17c05d35 --- /dev/null +++ b/arch/x86/include/asm/pmc_atom.h | |||
@@ -0,0 +1,107 @@ | |||
1 | /* | ||
2 | * Intel Atom SOC Power Management Controller Header File | ||
3 | * Copyright (c) 2014, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef PMC_ATOM_H | ||
17 | #define PMC_ATOM_H | ||
18 | |||
19 | /* ValleyView Power Control Unit PCI Device ID */ | ||
20 | #define PCI_DEVICE_ID_VLV_PMC 0x0F1C | ||
21 | |||
22 | /* PMC Memory mapped IO registers */ | ||
23 | #define PMC_BASE_ADDR_OFFSET 0x44 | ||
24 | #define PMC_BASE_ADDR_MASK 0xFFFFFE00 | ||
25 | #define PMC_MMIO_REG_LEN 0x100 | ||
26 | #define PMC_REG_BIT_WIDTH 32 | ||
27 | |||
28 | /* BIOS uses FUNC_DIS to disable specific function */ | ||
29 | #define PMC_FUNC_DIS 0x34 | ||
30 | #define PMC_FUNC_DIS_2 0x38 | ||
31 | |||
32 | /* S0ix wake event control */ | ||
33 | #define PMC_S0IX_WAKE_EN 0x3C | ||
34 | |||
35 | #define BIT_LPC_CLOCK_RUN BIT(4) | ||
36 | #define BIT_SHARED_IRQ_GPSC BIT(5) | ||
37 | #define BIT_ORED_DEDICATED_IRQ_GPSS BIT(18) | ||
38 | #define BIT_ORED_DEDICATED_IRQ_GPSC BIT(19) | ||
39 | #define BIT_SHARED_IRQ_GPSS BIT(20) | ||
40 | |||
41 | #define PMC_WAKE_EN_SETTING ~(BIT_LPC_CLOCK_RUN | \ | ||
42 | BIT_SHARED_IRQ_GPSC | \ | ||
43 | BIT_ORED_DEDICATED_IRQ_GPSS | \ | ||
44 | BIT_ORED_DEDICATED_IRQ_GPSC | \ | ||
45 | BIT_SHARED_IRQ_GPSS) | ||
46 | |||
47 | /* The timers acumulate time spent in sleep state */ | ||
48 | #define PMC_S0IR_TMR 0x80 | ||
49 | #define PMC_S0I1_TMR 0x84 | ||
50 | #define PMC_S0I2_TMR 0x88 | ||
51 | #define PMC_S0I3_TMR 0x8C | ||
52 | #define PMC_S0_TMR 0x90 | ||
53 | /* Sleep state counter is in units of of 32us */ | ||
54 | #define PMC_TMR_SHIFT 5 | ||
55 | |||
56 | /* These registers reflect D3 status of functions */ | ||
57 | #define PMC_D3_STS_0 0xA0 | ||
58 | |||
59 | #define BIT_LPSS1_F0_DMA BIT(0) | ||
60 | #define BIT_LPSS1_F1_PWM1 BIT(1) | ||
61 | #define BIT_LPSS1_F2_PWM2 BIT(2) | ||
62 | #define BIT_LPSS1_F3_HSUART1 BIT(3) | ||
63 | #define BIT_LPSS1_F4_HSUART2 BIT(4) | ||
64 | #define BIT_LPSS1_F5_SPI BIT(5) | ||
65 | #define BIT_LPSS1_F6_XXX BIT(6) | ||
66 | #define BIT_LPSS1_F7_XXX BIT(7) | ||
67 | #define BIT_SCC_EMMC BIT(8) | ||
68 | #define BIT_SCC_SDIO BIT(9) | ||
69 | #define BIT_SCC_SDCARD BIT(10) | ||
70 | #define BIT_SCC_MIPI BIT(11) | ||
71 | #define BIT_HDA BIT(12) | ||
72 | #define BIT_LPE BIT(13) | ||
73 | #define BIT_OTG BIT(14) | ||
74 | #define BIT_USH BIT(15) | ||
75 | #define BIT_GBE BIT(16) | ||
76 | #define BIT_SATA BIT(17) | ||
77 | #define BIT_USB_EHCI BIT(18) | ||
78 | #define BIT_SEC BIT(19) | ||
79 | #define BIT_PCIE_PORT0 BIT(20) | ||
80 | #define BIT_PCIE_PORT1 BIT(21) | ||
81 | #define BIT_PCIE_PORT2 BIT(22) | ||
82 | #define BIT_PCIE_PORT3 BIT(23) | ||
83 | #define BIT_LPSS2_F0_DMA BIT(24) | ||
84 | #define BIT_LPSS2_F1_I2C1 BIT(25) | ||
85 | #define BIT_LPSS2_F2_I2C2 BIT(26) | ||
86 | #define BIT_LPSS2_F3_I2C3 BIT(27) | ||
87 | #define BIT_LPSS2_F4_I2C4 BIT(28) | ||
88 | #define BIT_LPSS2_F5_I2C5 BIT(29) | ||
89 | #define BIT_LPSS2_F6_I2C6 BIT(30) | ||
90 | #define BIT_LPSS2_F7_I2C7 BIT(31) | ||
91 | |||
92 | #define PMC_D3_STS_1 0xA4 | ||
93 | #define BIT_SMB BIT(0) | ||
94 | #define BIT_OTG_SS_PHY BIT(1) | ||
95 | #define BIT_USH_SS_PHY BIT(2) | ||
96 | #define BIT_DFX BIT(3) | ||
97 | |||
98 | /* PMC I/O Registers */ | ||
99 | #define ACPI_BASE_ADDR_OFFSET 0x40 | ||
100 | #define ACPI_BASE_ADDR_MASK 0xFFFFFE00 | ||
101 | #define ACPI_MMIO_REG_LEN 0x100 | ||
102 | |||
103 | #define PM1_CNT 0x4 | ||
104 | #define SLEEP_TYPE_MASK 0xFFFFECFF | ||
105 | #define SLEEP_TYPE_S5 0x1C00 | ||
106 | #define SLEEP_ENABLE 0x2000 | ||
107 | #endif /* PMC_ATOM_H */ | ||
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index a4ea02351f4d..ee30b9f0b91c 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -72,7 +72,6 @@ extern u16 __read_mostly tlb_lld_4k[NR_INFO]; | |||
72 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; | 72 | extern u16 __read_mostly tlb_lld_2m[NR_INFO]; |
73 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; | 73 | extern u16 __read_mostly tlb_lld_4m[NR_INFO]; |
74 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; | 74 | extern u16 __read_mostly tlb_lld_1g[NR_INFO]; |
75 | extern s8 __read_mostly tlb_flushall_shift; | ||
76 | 75 | ||
77 | /* | 76 | /* |
78 | * CPU type and hardware bug flags. Kept separately for each CPU. | 77 | * CPU type and hardware bug flags. Kept separately for each CPU. |
@@ -696,6 +695,8 @@ static inline void cpu_relax(void) | |||
696 | rep_nop(); | 695 | rep_nop(); |
697 | } | 696 | } |
698 | 697 | ||
698 | #define cpu_relax_lowlatency() cpu_relax() | ||
699 | |||
699 | /* Stop speculative execution and prefetching of modified code. */ | 700 | /* Stop speculative execution and prefetching of modified code. */ |
700 | static inline void sync_core(void) | 701 | static inline void sync_core(void) |
701 | { | 702 | { |
diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h index 70f46f07f94e..ae0e241e228b 100644 --- a/arch/x86/include/asm/qrwlock.h +++ b/arch/x86/include/asm/qrwlock.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/qrwlock_types.h> | 4 | #include <asm-generic/qrwlock_types.h> |
5 | 5 | ||
6 | #if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) | 6 | #ifndef CONFIG_X86_PPRO_FENCE |
7 | #define queue_write_unlock queue_write_unlock | 7 | #define queue_write_unlock queue_write_unlock |
8 | static inline void queue_write_unlock(struct qrwlock *lock) | 8 | static inline void queue_write_unlock(struct qrwlock *lock) |
9 | { | 9 | { |
diff --git a/arch/x86/include/asm/vga.h b/arch/x86/include/asm/vga.h index 44282fbf7bf9..c4b9dc2f67c5 100644 --- a/arch/x86/include/asm/vga.h +++ b/arch/x86/include/asm/vga.h | |||
@@ -17,10 +17,4 @@ | |||
17 | #define vga_readb(x) (*(x)) | 17 | #define vga_readb(x) (*(x)) |
18 | #define vga_writeb(x, y) (*(y) = (x)) | 18 | #define vga_writeb(x, y) (*(y) = (x)) |
19 | 19 | ||
20 | #ifdef CONFIG_FB_EFI | ||
21 | #define __ARCH_HAS_VGA_DEFAULT_DEVICE | ||
22 | extern struct pci_dev *vga_default_device(void); | ||
23 | extern void vga_set_default_device(struct pci_dev *pdev); | ||
24 | #endif | ||
25 | |||
26 | #endif /* _ASM_X86_VGA_H */ | 20 | #endif /* _ASM_X86_VGA_H */ |
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 7004d21e6219..bcbfade26d8d 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h | |||
@@ -51,6 +51,9 @@ | |||
51 | #define CPU_BASED_MONITOR_EXITING 0x20000000 | 51 | #define CPU_BASED_MONITOR_EXITING 0x20000000 |
52 | #define CPU_BASED_PAUSE_EXITING 0x40000000 | 52 | #define CPU_BASED_PAUSE_EXITING 0x40000000 |
53 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 | 53 | #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000 |
54 | |||
55 | #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172 | ||
56 | |||
54 | /* | 57 | /* |
55 | * Definitions of Secondary Processor-Based VM-Execution Controls. | 58 | * Definitions of Secondary Processor-Based VM-Execution Controls. |
56 | */ | 59 | */ |
@@ -76,7 +79,7 @@ | |||
76 | 79 | ||
77 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 | 80 | #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 |
78 | 81 | ||
79 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000002 | 82 | #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004 |
80 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 | 83 | #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200 |
81 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 | 84 | #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000 |
82 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 | 85 | #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000 |
@@ -89,7 +92,7 @@ | |||
89 | 92 | ||
90 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff | 93 | #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff |
91 | 94 | ||
92 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000002 | 95 | #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004 |
93 | #define VM_ENTRY_IA32E_MODE 0x00000200 | 96 | #define VM_ENTRY_IA32E_MODE 0x00000200 |
94 | #define VM_ENTRY_SMM 0x00000400 | 97 | #define VM_ENTRY_SMM 0x00000400 |
95 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 | 98 | #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800 |
diff --git a/arch/x86/include/uapi/asm/Kbuild b/arch/x86/include/uapi/asm/Kbuild index 09409c44f9a5..3dec769cadf7 100644 --- a/arch/x86/include/uapi/asm/Kbuild +++ b/arch/x86/include/uapi/asm/Kbuild | |||
@@ -22,6 +22,7 @@ header-y += ipcbuf.h | |||
22 | header-y += ist.h | 22 | header-y += ist.h |
23 | header-y += kvm.h | 23 | header-y += kvm.h |
24 | header-y += kvm_para.h | 24 | header-y += kvm_para.h |
25 | header-y += kvm_perf.h | ||
25 | header-y += ldt.h | 26 | header-y += ldt.h |
26 | header-y += mce.h | 27 | header-y += mce.h |
27 | header-y += mman.h | 28 | header-y += mman.h |
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h index d3a87780c70b..d7dcef58aefa 100644 --- a/arch/x86/include/uapi/asm/kvm.h +++ b/arch/x86/include/uapi/asm/kvm.h | |||
@@ -23,7 +23,10 @@ | |||
23 | #define GP_VECTOR 13 | 23 | #define GP_VECTOR 13 |
24 | #define PF_VECTOR 14 | 24 | #define PF_VECTOR 14 |
25 | #define MF_VECTOR 16 | 25 | #define MF_VECTOR 16 |
26 | #define AC_VECTOR 17 | ||
26 | #define MC_VECTOR 18 | 27 | #define MC_VECTOR 18 |
28 | #define XM_VECTOR 19 | ||
29 | #define VE_VECTOR 20 | ||
27 | 30 | ||
28 | /* Select x86 specific features in <linux/kvm.h> */ | 31 | /* Select x86 specific features in <linux/kvm.h> */ |
29 | #define __KVM_HAVE_PIT | 32 | #define __KVM_HAVE_PIT |
diff --git a/arch/x86/include/uapi/asm/kvm_perf.h b/arch/x86/include/uapi/asm/kvm_perf.h new file mode 100644 index 000000000000..3bb964f88aa1 --- /dev/null +++ b/arch/x86/include/uapi/asm/kvm_perf.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _ASM_X86_KVM_PERF_H | ||
2 | #define _ASM_X86_KVM_PERF_H | ||
3 | |||
4 | #include <asm/svm.h> | ||
5 | #include <asm/vmx.h> | ||
6 | #include <asm/kvm.h> | ||
7 | |||
8 | #define DECODE_STR_LEN 20 | ||
9 | |||
10 | #define VCPU_ID "vcpu_id" | ||
11 | |||
12 | #define KVM_ENTRY_TRACE "kvm:kvm_entry" | ||
13 | #define KVM_EXIT_TRACE "kvm:kvm_exit" | ||
14 | #define KVM_EXIT_REASON "exit_reason" | ||
15 | |||
16 | #endif /* _ASM_X86_KVM_PERF_H */ | ||
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h index fcf2b3ae1bf0..eac9e92fe181 100644 --- a/arch/x86/include/uapi/asm/msr-index.h +++ b/arch/x86/include/uapi/asm/msr-index.h | |||
@@ -297,6 +297,8 @@ | |||
297 | #define MSR_IA32_TSC_ADJUST 0x0000003b | 297 | #define MSR_IA32_TSC_ADJUST 0x0000003b |
298 | #define MSR_IA32_BNDCFGS 0x00000d90 | 298 | #define MSR_IA32_BNDCFGS 0x00000d90 |
299 | 299 | ||
300 | #define MSR_IA32_XSS 0x00000da0 | ||
301 | |||
300 | #define FEATURE_CONTROL_LOCKED (1<<0) | 302 | #define FEATURE_CONTROL_LOCKED (1<<0) |
301 | #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) | 303 | #define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) |
302 | #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) | 304 | #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
@@ -558,6 +560,7 @@ | |||
558 | 560 | ||
559 | /* VMX_BASIC bits and bitmasks */ | 561 | /* VMX_BASIC bits and bitmasks */ |
560 | #define VMX_BASIC_VMCS_SIZE_SHIFT 32 | 562 | #define VMX_BASIC_VMCS_SIZE_SHIFT 32 |
563 | #define VMX_BASIC_TRUE_CTLS (1ULL << 55) | ||
561 | #define VMX_BASIC_64 0x0001000000000000LLU | 564 | #define VMX_BASIC_64 0x0001000000000000LLU |
562 | #define VMX_BASIC_MEM_TYPE_SHIFT 50 | 565 | #define VMX_BASIC_MEM_TYPE_SHIFT 50 |
563 | #define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU | 566 | #define VMX_BASIC_MEM_TYPE_MASK 0x003c000000000000LLU |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 047f9ff2e36c..bde3993624f1 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -106,6 +106,7 @@ obj-$(CONFIG_EFI) += sysfb_efi.o | |||
106 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o | 106 | obj-$(CONFIG_PERF_EVENTS) += perf_regs.o |
107 | obj-$(CONFIG_TRACING) += tracepoint.o | 107 | obj-$(CONFIG_TRACING) += tracepoint.o |
108 | obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o | 108 | obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o |
109 | obj-$(CONFIG_PMC_ATOM) += pmc_atom.o | ||
109 | 110 | ||
110 | ### | 111 | ### |
111 | # 64 bit specific files | 112 | # 64 bit specific files |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 86281ffb96d6..a531f6564ed0 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -74,10 +74,6 @@ int acpi_fix_pin2_polarity __initdata; | |||
74 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 74 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
75 | #endif | 75 | #endif |
76 | 76 | ||
77 | #ifndef __HAVE_ARCH_CMPXCHG | ||
78 | #warning ACPI uses CMPXCHG, i486 and later hardware | ||
79 | #endif | ||
80 | |||
81 | /* -------------------------------------------------------------------------- | 77 | /* -------------------------------------------------------------------------- |
82 | Boot-time Configuration | 78 | Boot-time Configuration |
83 | -------------------------------------------------------------------------- */ | 79 | -------------------------------------------------------------------------- */ |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index ce8b8ff0e0ef..60e5497681f5 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <asm/processor.h> | 8 | #include <asm/processor.h> |
9 | #include <asm/apic.h> | 9 | #include <asm/apic.h> |
10 | #include <asm/cpu.h> | 10 | #include <asm/cpu.h> |
11 | #include <asm/smp.h> | ||
11 | #include <asm/pci-direct.h> | 12 | #include <asm/pci-direct.h> |
12 | 13 | ||
13 | #ifdef CONFIG_X86_64 | 14 | #ifdef CONFIG_X86_64 |
@@ -50,7 +51,6 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |||
50 | return wrmsr_safe_regs(gprs); | 51 | return wrmsr_safe_regs(gprs); |
51 | } | 52 | } |
52 | 53 | ||
53 | #ifdef CONFIG_X86_32 | ||
54 | /* | 54 | /* |
55 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause | 55 | * B step AMD K6 before B 9730xxxx have hardware bugs that can cause |
56 | * misexecution of code under Linux. Owners of such processors should | 56 | * misexecution of code under Linux. Owners of such processors should |
@@ -70,6 +70,7 @@ __asm__(".globl vide\n\t.align 4\nvide: ret"); | |||
70 | 70 | ||
71 | static void init_amd_k5(struct cpuinfo_x86 *c) | 71 | static void init_amd_k5(struct cpuinfo_x86 *c) |
72 | { | 72 | { |
73 | #ifdef CONFIG_X86_32 | ||
73 | /* | 74 | /* |
74 | * General Systems BIOSen alias the cpu frequency registers | 75 | * General Systems BIOSen alias the cpu frequency registers |
75 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux | 76 | * of the Elan at 0x000df000. Unfortuantly, one of the Linux |
@@ -83,11 +84,12 @@ static void init_amd_k5(struct cpuinfo_x86 *c) | |||
83 | if (inl(CBAR) & CBAR_ENB) | 84 | if (inl(CBAR) & CBAR_ENB) |
84 | outl(0 | CBAR_KEY, CBAR); | 85 | outl(0 | CBAR_KEY, CBAR); |
85 | } | 86 | } |
87 | #endif | ||
86 | } | 88 | } |
87 | 89 | ||
88 | |||
89 | static void init_amd_k6(struct cpuinfo_x86 *c) | 90 | static void init_amd_k6(struct cpuinfo_x86 *c) |
90 | { | 91 | { |
92 | #ifdef CONFIG_X86_32 | ||
91 | u32 l, h; | 93 | u32 l, h; |
92 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); | 94 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); |
93 | 95 | ||
@@ -176,10 +178,44 @@ static void init_amd_k6(struct cpuinfo_x86 *c) | |||
176 | /* placeholder for any needed mods */ | 178 | /* placeholder for any needed mods */ |
177 | return; | 179 | return; |
178 | } | 180 | } |
181 | #endif | ||
179 | } | 182 | } |
180 | 183 | ||
181 | static void amd_k7_smp_check(struct cpuinfo_x86 *c) | 184 | static void init_amd_k7(struct cpuinfo_x86 *c) |
182 | { | 185 | { |
186 | #ifdef CONFIG_X86_32 | ||
187 | u32 l, h; | ||
188 | |||
189 | /* | ||
190 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
191 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
192 | * If the BIOS didn't enable it already, enable it here. | ||
193 | */ | ||
194 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
195 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
196 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
197 | msr_clear_bit(MSR_K7_HWCR, 15); | ||
198 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
199 | } | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
204 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
205 | * As per AMD technical note 27212 0.2 | ||
206 | */ | ||
207 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
208 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
209 | if ((l & 0xfff00000) != 0x20000000) { | ||
210 | printk(KERN_INFO | ||
211 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | ||
212 | l, ((l & 0x000fffff)|0x20000000)); | ||
213 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
214 | } | ||
215 | } | ||
216 | |||
217 | set_cpu_cap(c, X86_FEATURE_K7); | ||
218 | |||
183 | /* calling is from identify_secondary_cpu() ? */ | 219 | /* calling is from identify_secondary_cpu() ? */ |
184 | if (!c->cpu_index) | 220 | if (!c->cpu_index) |
185 | return; | 221 | return; |
@@ -207,7 +243,7 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
207 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | 243 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || |
208 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | 244 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || |
209 | (c->x86_model > 7)) | 245 | (c->x86_model > 7)) |
210 | if (cpu_has_mp) | 246 | if (cpu_has(c, X86_FEATURE_MP)) |
211 | return; | 247 | return; |
212 | 248 | ||
213 | /* If we get here, not a certified SMP capable AMD system. */ | 249 | /* If we get here, not a certified SMP capable AMD system. */ |
@@ -219,45 +255,8 @@ static void amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
219 | WARN_ONCE(1, "WARNING: This combination of AMD" | 255 | WARN_ONCE(1, "WARNING: This combination of AMD" |
220 | " processors is not suitable for SMP.\n"); | 256 | " processors is not suitable for SMP.\n"); |
221 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); | 257 | add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_NOW_UNRELIABLE); |
222 | } | ||
223 | |||
224 | static void init_amd_k7(struct cpuinfo_x86 *c) | ||
225 | { | ||
226 | u32 l, h; | ||
227 | |||
228 | /* | ||
229 | * Bit 15 of Athlon specific MSR 15, needs to be 0 | ||
230 | * to enable SSE on Palomino/Morgan/Barton CPU's. | ||
231 | * If the BIOS didn't enable it already, enable it here. | ||
232 | */ | ||
233 | if (c->x86_model >= 6 && c->x86_model <= 10) { | ||
234 | if (!cpu_has(c, X86_FEATURE_XMM)) { | ||
235 | printk(KERN_INFO "Enabling disabled K7/SSE Support.\n"); | ||
236 | msr_clear_bit(MSR_K7_HWCR, 15); | ||
237 | set_cpu_cap(c, X86_FEATURE_XMM); | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * It's been determined by AMD that Athlons since model 8 stepping 1 | ||
243 | * are more robust with CLK_CTL set to 200xxxxx instead of 600xxxxx | ||
244 | * As per AMD technical note 27212 0.2 | ||
245 | */ | ||
246 | if ((c->x86_model == 8 && c->x86_mask >= 1) || (c->x86_model > 8)) { | ||
247 | rdmsr(MSR_K7_CLK_CTL, l, h); | ||
248 | if ((l & 0xfff00000) != 0x20000000) { | ||
249 | printk(KERN_INFO | ||
250 | "CPU: CLK_CTL MSR was %x. Reprogramming to %x\n", | ||
251 | l, ((l & 0x000fffff)|0x20000000)); | ||
252 | wrmsr(MSR_K7_CLK_CTL, (l & 0x000fffff)|0x20000000, h); | ||
253 | } | ||
254 | } | ||
255 | |||
256 | set_cpu_cap(c, X86_FEATURE_K7); | ||
257 | |||
258 | amd_k7_smp_check(c); | ||
259 | } | ||
260 | #endif | 258 | #endif |
259 | } | ||
261 | 260 | ||
262 | #ifdef CONFIG_NUMA | 261 | #ifdef CONFIG_NUMA |
263 | /* | 262 | /* |
@@ -446,6 +445,26 @@ static void early_init_amd_mc(struct cpuinfo_x86 *c) | |||
446 | 445 | ||
447 | static void bsp_init_amd(struct cpuinfo_x86 *c) | 446 | static void bsp_init_amd(struct cpuinfo_x86 *c) |
448 | { | 447 | { |
448 | |||
449 | #ifdef CONFIG_X86_64 | ||
450 | if (c->x86 >= 0xf) { | ||
451 | unsigned long long tseg; | ||
452 | |||
453 | /* | ||
454 | * Split up direct mapping around the TSEG SMM area. | ||
455 | * Don't do it for gbpages because there seems very little | ||
456 | * benefit in doing so. | ||
457 | */ | ||
458 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
459 | unsigned long pfn = tseg >> PAGE_SHIFT; | ||
460 | |||
461 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
462 | if (pfn_range_is_mapped(pfn, pfn + 1)) | ||
463 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
464 | } | ||
465 | } | ||
466 | #endif | ||
467 | |||
449 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | 468 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { |
450 | 469 | ||
451 | if (c->x86 > 0x10 || | 470 | if (c->x86 > 0x10 || |
@@ -515,101 +534,74 @@ static const int amd_erratum_383[]; | |||
515 | static const int amd_erratum_400[]; | 534 | static const int amd_erratum_400[]; |
516 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); | 535 | static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum); |
517 | 536 | ||
518 | static void init_amd(struct cpuinfo_x86 *c) | 537 | static void init_amd_k8(struct cpuinfo_x86 *c) |
519 | { | 538 | { |
520 | u32 dummy; | 539 | u32 level; |
521 | unsigned long long value; | 540 | u64 value; |
522 | 541 | ||
523 | #ifdef CONFIG_SMP | 542 | /* On C+ stepping K8 rep microcode works well for copy/memset */ |
524 | /* | 543 | level = cpuid_eax(1); |
525 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | 544 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) |
526 | * bit 6 of msr C001_0015 | 545 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); |
527 | * | ||
528 | * Errata 63 for SH-B3 steppings | ||
529 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
530 | */ | ||
531 | if (c->x86 == 0xf) | ||
532 | msr_set_bit(MSR_K7_HWCR, 6); | ||
533 | #endif | ||
534 | |||
535 | early_init_amd(c); | ||
536 | 546 | ||
537 | /* | 547 | /* |
538 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | 548 | * Some BIOSes incorrectly force this feature, but only K8 revision D |
539 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | 549 | * (model = 0x14) and later actually support it. |
550 | * (AMD Erratum #110, docId: 25759). | ||
540 | */ | 551 | */ |
541 | clear_cpu_cap(c, 0*32+31); | 552 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { |
542 | 553 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | |
543 | #ifdef CONFIG_X86_64 | 554 | if (!rdmsrl_amd_safe(0xc001100d, &value)) { |
544 | /* On C+ stepping K8 rep microcode works well for copy/memset */ | 555 | value &= ~BIT_64(32); |
545 | if (c->x86 == 0xf) { | 556 | wrmsrl_amd_safe(0xc001100d, value); |
546 | u32 level; | ||
547 | |||
548 | level = cpuid_eax(1); | ||
549 | if ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) | ||
550 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
551 | |||
552 | /* | ||
553 | * Some BIOSes incorrectly force this feature, but only K8 | ||
554 | * revision D (model = 0x14) and later actually support it. | ||
555 | * (AMD Erratum #110, docId: 25759). | ||
556 | */ | ||
557 | if (c->x86_model < 0x14 && cpu_has(c, X86_FEATURE_LAHF_LM)) { | ||
558 | clear_cpu_cap(c, X86_FEATURE_LAHF_LM); | ||
559 | if (!rdmsrl_amd_safe(0xc001100d, &value)) { | ||
560 | value &= ~(1ULL << 32); | ||
561 | wrmsrl_amd_safe(0xc001100d, value); | ||
562 | } | ||
563 | } | 557 | } |
564 | |||
565 | } | 558 | } |
566 | if (c->x86 >= 0x10) | ||
567 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
568 | 559 | ||
569 | /* get apicid instead of initial apic id from cpuid */ | 560 | if (!c->x86_model_id[0]) |
570 | c->apicid = hard_smp_processor_id(); | 561 | strcpy(c->x86_model_id, "Hammer"); |
571 | #else | 562 | } |
563 | |||
564 | static void init_amd_gh(struct cpuinfo_x86 *c) | ||
565 | { | ||
566 | #ifdef CONFIG_X86_64 | ||
567 | /* do this for boot cpu */ | ||
568 | if (c == &boot_cpu_data) | ||
569 | check_enable_amd_mmconf_dmi(); | ||
570 | |||
571 | fam10h_check_enable_mmcfg(); | ||
572 | #endif | ||
572 | 573 | ||
573 | /* | 574 | /* |
574 | * FIXME: We should handle the K5 here. Set up the write | 575 | * Disable GART TLB Walk Errors on Fam10h. We do this here because this |
575 | * range and also turn on MSR 83 bits 4 and 31 (write alloc, | 576 | * is always needed when GART is enabled, even in a kernel which has no |
576 | * no bus pipeline) | 577 | * MCE support built in. BIOS should disable GartTlbWlk Errors already. |
578 | * If it doesn't, we do it here as suggested by the BKDG. | ||
579 | * | ||
580 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
577 | */ | 581 | */ |
582 | msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); | ||
578 | 583 | ||
579 | switch (c->x86) { | 584 | /* |
580 | case 4: | 585 | * On family 10h BIOS may not have properly enabled WC+ support, causing |
581 | init_amd_k5(c); | 586 | * it to be converted to CD memtype. This may result in performance |
582 | break; | 587 | * degradation for certain nested-paging guests. Prevent this conversion |
583 | case 5: | 588 | * by clearing bit 24 in MSR_AMD64_BU_CFG2. |
584 | init_amd_k6(c); | 589 | * |
585 | break; | 590 | * NOTE: we want to use the _safe accessors so as not to #GP kvm |
586 | case 6: /* An Athlon/Duron */ | 591 | * guests on older kvm hosts. |
587 | init_amd_k7(c); | 592 | */ |
588 | break; | 593 | msr_clear_bit(MSR_AMD64_BU_CFG2, 24); |
589 | } | ||
590 | 594 | ||
591 | /* K6s reports MCEs but don't actually have all the MSRs */ | 595 | if (cpu_has_amd_erratum(c, amd_erratum_383)) |
592 | if (c->x86 < 6) | 596 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); |
593 | clear_cpu_cap(c, X86_FEATURE_MCE); | 597 | } |
594 | #endif | ||
595 | 598 | ||
596 | /* Enable workaround for FXSAVE leak */ | 599 | static void init_amd_bd(struct cpuinfo_x86 *c) |
597 | if (c->x86 >= 6) | 600 | { |
598 | set_cpu_cap(c, X86_FEATURE_FXSAVE_LEAK); | 601 | u64 value; |
599 | |||
600 | if (!c->x86_model_id[0]) { | ||
601 | switch (c->x86) { | ||
602 | case 0xf: | ||
603 | /* Should distinguish Models here, but this is only | ||
604 | a fallback anyways. */ | ||
605 | strcpy(c->x86_model_id, "Hammer"); | ||
606 | break; | ||
607 | } | ||
608 | } | ||
609 | 602 | ||
610 | /* re-enable TopologyExtensions if switched off by BIOS */ | 603 | /* re-enable TopologyExtensions if switched off by BIOS */ |
611 | if ((c->x86 == 0x15) && | 604 | if ((c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && |
612 | (c->x86_model >= 0x10) && (c->x86_model <= 0x1f) && | ||
613 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { | 605 | !cpu_has(c, X86_FEATURE_TOPOEXT)) { |
614 | 606 | ||
615 | if (msr_set_bit(0xc0011005, 54) > 0) { | 607 | if (msr_set_bit(0xc0011005, 54) > 0) { |
@@ -625,14 +617,60 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
625 | * The way access filter has a performance penalty on some workloads. | 617 | * The way access filter has a performance penalty on some workloads. |
626 | * Disable it on the affected CPUs. | 618 | * Disable it on the affected CPUs. |
627 | */ | 619 | */ |
628 | if ((c->x86 == 0x15) && | 620 | if ((c->x86_model >= 0x02) && (c->x86_model < 0x20)) { |
629 | (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { | ||
630 | |||
631 | if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { | 621 | if (!rdmsrl_safe(0xc0011021, &value) && !(value & 0x1E)) { |
632 | value |= 0x1E; | 622 | value |= 0x1E; |
633 | wrmsrl_safe(0xc0011021, value); | 623 | wrmsrl_safe(0xc0011021, value); |
634 | } | 624 | } |
635 | } | 625 | } |
626 | } | ||
627 | |||
628 | static void init_amd(struct cpuinfo_x86 *c) | ||
629 | { | ||
630 | u32 dummy; | ||
631 | |||
632 | #ifdef CONFIG_SMP | ||
633 | /* | ||
634 | * Disable TLB flush filter by setting HWCR.FFDIS on K8 | ||
635 | * bit 6 of msr C001_0015 | ||
636 | * | ||
637 | * Errata 63 for SH-B3 steppings | ||
638 | * Errata 122 for all steppings (F+ have it disabled by default) | ||
639 | */ | ||
640 | if (c->x86 == 0xf) | ||
641 | msr_set_bit(MSR_K7_HWCR, 6); | ||
642 | #endif | ||
643 | |||
644 | early_init_amd(c); | ||
645 | |||
646 | /* | ||
647 | * Bit 31 in normal CPUID used for nonstandard 3DNow ID; | ||
648 | * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway | ||
649 | */ | ||
650 | clear_cpu_cap(c, 0*32+31); | ||
651 | |||
652 | if (c->x86 >= 0x10) | ||
653 | set_cpu_cap(c, X86_FEATURE_REP_GOOD); | ||
654 | |||
655 | /* get apicid instead of initial apic id from cpuid */ | ||
656 | c->apicid = hard_smp_processor_id(); | ||
657 | |||
658 | /* K6s reports MCEs but don't actually have all the MSRs */ | ||
659 | if (c->x86 < 6) | ||
660 | clear_cpu_cap(c, X86_FEATURE_MCE); | ||
661 | |||
662 | switch (c->x86) { | ||
663 | case 4: init_amd_k5(c); break; | ||
664 | case 5: init_amd_k6(c); break; | ||
665 | case 6: init_amd_k7(c); break; | ||
666 | case 0xf: init_amd_k8(c); break; | ||
667 | case 0x10: init_amd_gh(c); break; | ||
668 | case 0x15: init_amd_bd(c); break; | ||
669 | } | ||
670 | |||
671 | /* Enable workaround for FXSAVE leak */ | ||
672 | if (c->x86 >= 6) | ||
673 | set_cpu_bug(c, X86_BUG_FXSAVE_LEAK); | ||
636 | 674 | ||
637 | cpu_detect_cache_sizes(c); | 675 | cpu_detect_cache_sizes(c); |
638 | 676 | ||
@@ -656,33 +694,6 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
656 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); | 694 | set_cpu_cap(c, X86_FEATURE_MFENCE_RDTSC); |
657 | } | 695 | } |
658 | 696 | ||
659 | #ifdef CONFIG_X86_64 | ||
660 | if (c->x86 == 0x10) { | ||
661 | /* do this for boot cpu */ | ||
662 | if (c == &boot_cpu_data) | ||
663 | check_enable_amd_mmconf_dmi(); | ||
664 | |||
665 | fam10h_check_enable_mmcfg(); | ||
666 | } | ||
667 | |||
668 | if (c == &boot_cpu_data && c->x86 >= 0xf) { | ||
669 | unsigned long long tseg; | ||
670 | |||
671 | /* | ||
672 | * Split up direct mapping around the TSEG SMM area. | ||
673 | * Don't do it for gbpages because there seems very little | ||
674 | * benefit in doing so. | ||
675 | */ | ||
676 | if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) { | ||
677 | unsigned long pfn = tseg >> PAGE_SHIFT; | ||
678 | |||
679 | printk(KERN_DEBUG "tseg: %010llx\n", tseg); | ||
680 | if (pfn_range_is_mapped(pfn, pfn + 1)) | ||
681 | set_memory_4k((unsigned long)__va(tseg), 1); | ||
682 | } | ||
683 | } | ||
684 | #endif | ||
685 | |||
686 | /* | 697 | /* |
687 | * Family 0x12 and above processors have APIC timer | 698 | * Family 0x12 and above processors have APIC timer |
688 | * running in deep C states. | 699 | * running in deep C states. |
@@ -690,34 +701,6 @@ static void init_amd(struct cpuinfo_x86 *c) | |||
690 | if (c->x86 > 0x11) | 701 | if (c->x86 > 0x11) |
691 | set_cpu_cap(c, X86_FEATURE_ARAT); | 702 | set_cpu_cap(c, X86_FEATURE_ARAT); |
692 | 703 | ||
693 | if (c->x86 == 0x10) { | ||
694 | /* | ||
695 | * Disable GART TLB Walk Errors on Fam10h. We do this here | ||
696 | * because this is always needed when GART is enabled, even in a | ||
697 | * kernel which has no MCE support built in. | ||
698 | * BIOS should disable GartTlbWlk Errors already. If | ||
699 | * it doesn't, do it here as suggested by the BKDG. | ||
700 | * | ||
701 | * Fixes: https://bugzilla.kernel.org/show_bug.cgi?id=33012 | ||
702 | */ | ||
703 | msr_set_bit(MSR_AMD64_MCx_MASK(4), 10); | ||
704 | |||
705 | /* | ||
706 | * On family 10h BIOS may not have properly enabled WC+ support, | ||
707 | * causing it to be converted to CD memtype. This may result in | ||
708 | * performance degradation for certain nested-paging guests. | ||
709 | * Prevent this conversion by clearing bit 24 in | ||
710 | * MSR_AMD64_BU_CFG2. | ||
711 | * | ||
712 | * NOTE: we want to use the _safe accessors so as not to #GP kvm | ||
713 | * guests on older kvm hosts. | ||
714 | */ | ||
715 | msr_clear_bit(MSR_AMD64_BU_CFG2, 24); | ||
716 | |||
717 | if (cpu_has_amd_erratum(c, amd_erratum_383)) | ||
718 | set_cpu_bug(c, X86_BUG_AMD_TLB_MMATCH); | ||
719 | } | ||
720 | |||
721 | if (cpu_has_amd_erratum(c, amd_erratum_400)) | 704 | if (cpu_has_amd_erratum(c, amd_erratum_400)) |
722 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); | 705 | set_cpu_bug(c, X86_BUG_AMD_APIC_C1E); |
723 | 706 | ||
@@ -741,11 +724,6 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
741 | } | 724 | } |
742 | #endif | 725 | #endif |
743 | 726 | ||
744 | static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | ||
745 | { | ||
746 | tlb_flushall_shift = 6; | ||
747 | } | ||
748 | |||
749 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | 727 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) |
750 | { | 728 | { |
751 | u32 ebx, eax, ecx, edx; | 729 | u32 ebx, eax, ecx, edx; |
@@ -793,8 +771,6 @@ static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | |||
793 | tlb_lli_2m[ENTRIES] = eax & mask; | 771 | tlb_lli_2m[ENTRIES] = eax & mask; |
794 | 772 | ||
795 | tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; | 773 | tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1; |
796 | |||
797 | cpu_set_tlb_flushall_shift(c); | ||
798 | } | 774 | } |
799 | 775 | ||
800 | static const struct cpu_dev amd_cpu_dev = { | 776 | static const struct cpu_dev amd_cpu_dev = { |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index ef1b93f18ed1..333fd5209336 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -481,26 +481,17 @@ u16 __read_mostly tlb_lld_2m[NR_INFO]; | |||
481 | u16 __read_mostly tlb_lld_4m[NR_INFO]; | 481 | u16 __read_mostly tlb_lld_4m[NR_INFO]; |
482 | u16 __read_mostly tlb_lld_1g[NR_INFO]; | 482 | u16 __read_mostly tlb_lld_1g[NR_INFO]; |
483 | 483 | ||
484 | /* | ||
485 | * tlb_flushall_shift shows the balance point in replacing cr3 write | ||
486 | * with multiple 'invlpg'. It will do this replacement when | ||
487 | * flush_tlb_lines <= active_lines/2^tlb_flushall_shift. | ||
488 | * If tlb_flushall_shift is -1, means the replacement will be disabled. | ||
489 | */ | ||
490 | s8 __read_mostly tlb_flushall_shift = -1; | ||
491 | |||
492 | void cpu_detect_tlb(struct cpuinfo_x86 *c) | 484 | void cpu_detect_tlb(struct cpuinfo_x86 *c) |
493 | { | 485 | { |
494 | if (this_cpu->c_detect_tlb) | 486 | if (this_cpu->c_detect_tlb) |
495 | this_cpu->c_detect_tlb(c); | 487 | this_cpu->c_detect_tlb(c); |
496 | 488 | ||
497 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" | 489 | printk(KERN_INFO "Last level iTLB entries: 4KB %d, 2MB %d, 4MB %d\n" |
498 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n" | 490 | "Last level dTLB entries: 4KB %d, 2MB %d, 4MB %d, 1GB %d\n", |
499 | "tlb_flushall_shift: %d\n", | ||
500 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], | 491 | tlb_lli_4k[ENTRIES], tlb_lli_2m[ENTRIES], |
501 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], | 492 | tlb_lli_4m[ENTRIES], tlb_lld_4k[ENTRIES], |
502 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], | 493 | tlb_lld_2m[ENTRIES], tlb_lld_4m[ENTRIES], |
503 | tlb_lld_1g[ENTRIES], tlb_flushall_shift); | 494 | tlb_lld_1g[ENTRIES]); |
504 | } | 495 | } |
505 | 496 | ||
506 | void detect_ht(struct cpuinfo_x86 *c) | 497 | void detect_ht(struct cpuinfo_x86 *c) |
@@ -634,6 +625,15 @@ void get_cpu_cap(struct cpuinfo_x86 *c) | |||
634 | c->x86_capability[9] = ebx; | 625 | c->x86_capability[9] = ebx; |
635 | } | 626 | } |
636 | 627 | ||
628 | /* Extended state features: level 0x0000000d */ | ||
629 | if (c->cpuid_level >= 0x0000000d) { | ||
630 | u32 eax, ebx, ecx, edx; | ||
631 | |||
632 | cpuid_count(0x0000000d, 1, &eax, &ebx, &ecx, &edx); | ||
633 | |||
634 | c->x86_capability[10] = eax; | ||
635 | } | ||
636 | |||
637 | /* AMD-defined flags: level 0x80000001 */ | 637 | /* AMD-defined flags: level 0x80000001 */ |
638 | xlvl = cpuid_eax(0x80000000); | 638 | xlvl = cpuid_eax(0x80000000); |
639 | c->extended_cpuid_level = xlvl; | 639 | c->extended_cpuid_level = xlvl; |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index a80029035bf2..74e804ddc5c7 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -253,7 +253,7 @@ static void intel_workarounds(struct cpuinfo_x86 *c) | |||
253 | */ | 253 | */ |
254 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && | 254 | if (cpu_has_apic && (c->x86<<8 | c->x86_model<<4) == 0x520 && |
255 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) | 255 | (c->x86_mask < 0x6 || c->x86_mask == 0xb)) |
256 | set_cpu_cap(c, X86_FEATURE_11AP); | 256 | set_cpu_bug(c, X86_BUG_11AP); |
257 | 257 | ||
258 | 258 | ||
259 | #ifdef CONFIG_X86_INTEL_USERCOPY | 259 | #ifdef CONFIG_X86_INTEL_USERCOPY |
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
370 | */ | 370 | */ |
371 | detect_extended_topology(c); | 371 | detect_extended_topology(c); |
372 | 372 | ||
373 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
374 | /* | ||
375 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
376 | * detection. | ||
377 | */ | ||
378 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
379 | #ifdef CONFIG_X86_32 | ||
380 | detect_ht(c); | ||
381 | #endif | ||
382 | } | ||
383 | |||
373 | l2 = init_intel_cacheinfo(c); | 384 | l2 = init_intel_cacheinfo(c); |
374 | if (c->cpuid_level > 9) { | 385 | if (c->cpuid_level > 9) { |
375 | unsigned eax = cpuid_eax(10); | 386 | unsigned eax = cpuid_eax(10); |
@@ -391,7 +402,7 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
391 | 402 | ||
392 | if (c->x86 == 6 && cpu_has_clflush && | 403 | if (c->x86 == 6 && cpu_has_clflush && |
393 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) | 404 | (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47)) |
394 | set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR); | 405 | set_cpu_bug(c, X86_BUG_CLFLUSH_MONITOR); |
395 | 406 | ||
396 | #ifdef CONFIG_X86_64 | 407 | #ifdef CONFIG_X86_64 |
397 | if (c->x86 == 15) | 408 | if (c->x86 == 15) |
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c) | |||
438 | set_cpu_cap(c, X86_FEATURE_P3); | 449 | set_cpu_cap(c, X86_FEATURE_P3); |
439 | #endif | 450 | #endif |
440 | 451 | ||
441 | if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) { | ||
442 | /* | ||
443 | * let's use the legacy cpuid vector 0x1 and 0x4 for topology | ||
444 | * detection. | ||
445 | */ | ||
446 | c->x86_max_cores = intel_num_cpu_cores(c); | ||
447 | #ifdef CONFIG_X86_32 | ||
448 | detect_ht(c); | ||
449 | #endif | ||
450 | } | ||
451 | |||
452 | /* Work around errata */ | 452 | /* Work around errata */ |
453 | srat_detect_node(c); | 453 | srat_detect_node(c); |
454 | 454 | ||
@@ -634,31 +634,6 @@ static void intel_tlb_lookup(const unsigned char desc) | |||
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
637 | static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) | ||
638 | { | ||
639 | switch ((c->x86 << 8) + c->x86_model) { | ||
640 | case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | ||
641 | case 0x616: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */ | ||
642 | case 0x617: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */ | ||
643 | case 0x61d: /* six-core 45 nm xeon "Dunnington" */ | ||
644 | tlb_flushall_shift = -1; | ||
645 | break; | ||
646 | case 0x63a: /* Ivybridge */ | ||
647 | tlb_flushall_shift = 2; | ||
648 | break; | ||
649 | case 0x61a: /* 45 nm nehalem, "Bloomfield" */ | ||
650 | case 0x61e: /* 45 nm nehalem, "Lynnfield" */ | ||
651 | case 0x625: /* 32 nm nehalem, "Clarkdale" */ | ||
652 | case 0x62c: /* 32 nm nehalem, "Gulftown" */ | ||
653 | case 0x62e: /* 45 nm nehalem-ex, "Beckton" */ | ||
654 | case 0x62f: /* 32 nm Xeon E7 */ | ||
655 | case 0x62a: /* SandyBridge */ | ||
656 | case 0x62d: /* SandyBridge, "Romely-EP" */ | ||
657 | default: | ||
658 | tlb_flushall_shift = 6; | ||
659 | } | ||
660 | } | ||
661 | |||
662 | static void intel_detect_tlb(struct cpuinfo_x86 *c) | 637 | static void intel_detect_tlb(struct cpuinfo_x86 *c) |
663 | { | 638 | { |
664 | int i, j, n; | 639 | int i, j, n; |
@@ -683,7 +658,6 @@ static void intel_detect_tlb(struct cpuinfo_x86 *c) | |||
683 | for (j = 1 ; j < 16 ; j++) | 658 | for (j = 1 ; j < 16 ; j++) |
684 | intel_tlb_lookup(desc[j]); | 659 | intel_tlb_lookup(desc[j]); |
685 | } | 660 | } |
686 | intel_tlb_flushall_shift_set(c); | ||
687 | } | 661 | } |
688 | 662 | ||
689 | static const struct cpu_dev intel_cpu_dev = { | 663 | static const struct cpu_dev intel_cpu_dev = { |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index a952e9c85b6f..9c8f7394c612 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
730 | #endif | 730 | #endif |
731 | } | 731 | } |
732 | 732 | ||
733 | #ifdef CONFIG_X86_HT | ||
734 | /* | ||
735 | * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in | ||
736 | * turns means that the only possibility is SMT (as indicated in | ||
737 | * cpuid1). Since cpuid2 doesn't specify shared caches, and we know | ||
738 | * that SMT shares all caches, we can unconditionally set cpu_llc_id to | ||
739 | * c->phys_proc_id. | ||
740 | */ | ||
741 | if (per_cpu(cpu_llc_id, cpu) == BAD_APICID) | ||
742 | per_cpu(cpu_llc_id, cpu) = c->phys_proc_id; | ||
743 | #endif | ||
744 | |||
733 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); | 745 | c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d)); |
734 | 746 | ||
735 | return l2; | 747 | return l2; |
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index 2bf616505499..e2b22df964cd 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh | |||
@@ -1,23 +1,25 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | # | 2 | # |
3 | # Generate the x86_cap_flags[] array from include/asm/cpufeature.h | 3 | # Generate the x86_cap/bug_flags[] arrays from include/asm/cpufeature.h |
4 | # | 4 | # |
5 | 5 | ||
6 | IN=$1 | 6 | IN=$1 |
7 | OUT=$2 | 7 | OUT=$2 |
8 | 8 | ||
9 | TABS="$(printf '\t\t\t\t\t')" | 9 | function dump_array() |
10 | trap 'rm "$OUT"' EXIT | 10 | { |
11 | ARRAY=$1 | ||
12 | SIZE=$2 | ||
13 | PFX=$3 | ||
14 | POSTFIX=$4 | ||
11 | 15 | ||
12 | ( | 16 | PFX_SZ=$(echo $PFX | wc -c) |
13 | echo "#ifndef _ASM_X86_CPUFEATURE_H" | 17 | TABS="$(printf '\t\t\t\t\t')" |
14 | echo "#include <asm/cpufeature.h>" | 18 | |
15 | echo "#endif" | 19 | echo "const char * const $ARRAY[$SIZE] = {" |
16 | echo "" | ||
17 | echo "const char * const x86_cap_flags[NCAPINTS*32] = {" | ||
18 | 20 | ||
19 | # Iterate through any input lines starting with #define X86_FEATURE_ | 21 | # Iterate through any input lines starting with #define $PFX |
20 | sed -n -e 's/\t/ /g' -e 's/^ *# *define *X86_FEATURE_//p' $IN | | 22 | sed -n -e 's/\t/ /g' -e "s/^ *# *define *$PFX//p" $IN | |
21 | while read i | 23 | while read i |
22 | do | 24 | do |
23 | # Name is everything up to the first whitespace | 25 | # Name is everything up to the first whitespace |
@@ -31,11 +33,32 @@ trap 'rm "$OUT"' EXIT | |||
31 | # Name is uppercase, VALUE is all lowercase | 33 | # Name is uppercase, VALUE is all lowercase |
32 | VALUE="$(echo "$VALUE" | tr A-Z a-z)" | 34 | VALUE="$(echo "$VALUE" | tr A-Z a-z)" |
33 | 35 | ||
34 | TABCOUNT=$(( ( 5*8 - 14 - $(echo "$NAME" | wc -c) ) / 8 )) | 36 | if [ -n "$POSTFIX" ]; then |
35 | printf "\t[%s]%.*s = %s,\n" \ | 37 | T=$(( $PFX_SZ + $(echo $POSTFIX | wc -c) + 2 )) |
36 | "X86_FEATURE_$NAME" "$TABCOUNT" "$TABS" "$VALUE" | 38 | TABS="$(printf '\t\t\t\t\t\t')" |
39 | TABCOUNT=$(( ( 6*8 - ($T + 1) - $(echo "$NAME" | wc -c) ) / 8 )) | ||
40 | printf "\t[%s - %s]%.*s = %s,\n" "$PFX$NAME" "$POSTFIX" "$TABCOUNT" "$TABS" "$VALUE" | ||
41 | else | ||
42 | TABCOUNT=$(( ( 5*8 - ($PFX_SZ + 1) - $(echo "$NAME" | wc -c) ) / 8 )) | ||
43 | printf "\t[%s]%.*s = %s,\n" "$PFX$NAME" "$TABCOUNT" "$TABS" "$VALUE" | ||
44 | fi | ||
37 | done | 45 | done |
38 | echo "};" | 46 | echo "};" |
47 | } | ||
48 | |||
49 | trap 'rm "$OUT"' EXIT | ||
50 | |||
51 | ( | ||
52 | echo "#ifndef _ASM_X86_CPUFEATURE_H" | ||
53 | echo "#include <asm/cpufeature.h>" | ||
54 | echo "#endif" | ||
55 | echo "" | ||
56 | |||
57 | dump_array "x86_cap_flags" "NCAPINTS*32" "X86_FEATURE_" "" | ||
58 | echo "" | ||
59 | |||
60 | dump_array "x86_bug_flags" "NBUGINTS*32" "X86_BUG_" "NCAPINTS*32" | ||
61 | |||
39 | ) > $OUT | 62 | ) > $OUT |
40 | 63 | ||
41 | trap - EXIT | 64 | trap - EXIT |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 2bdfbff8a4f6..2879ecdaac43 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event) | |||
118 | continue; | 118 | continue; |
119 | if (event->attr.config1 & ~er->valid_mask) | 119 | if (event->attr.config1 & ~er->valid_mask) |
120 | return -EINVAL; | 120 | return -EINVAL; |
121 | /* Check if the extra msrs can be safely accessed*/ | ||
122 | if (!er->extra_msr_access) | ||
123 | return -ENXIO; | ||
121 | 124 | ||
122 | reg->idx = er->idx; | 125 | reg->idx = er->idx; |
123 | reg->config = event->attr.config1; | 126 | reg->config = event->attr.config1; |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 3b2f9bdd974b..8ade93111e03 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -295,14 +295,16 @@ struct extra_reg { | |||
295 | u64 config_mask; | 295 | u64 config_mask; |
296 | u64 valid_mask; | 296 | u64 valid_mask; |
297 | int idx; /* per_xxx->regs[] reg index */ | 297 | int idx; /* per_xxx->regs[] reg index */ |
298 | bool extra_msr_access; | ||
298 | }; | 299 | }; |
299 | 300 | ||
300 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ | 301 | #define EVENT_EXTRA_REG(e, ms, m, vm, i) { \ |
301 | .event = (e), \ | 302 | .event = (e), \ |
302 | .msr = (ms), \ | 303 | .msr = (ms), \ |
303 | .config_mask = (m), \ | 304 | .config_mask = (m), \ |
304 | .valid_mask = (vm), \ | 305 | .valid_mask = (vm), \ |
305 | .idx = EXTRA_REG_##i, \ | 306 | .idx = EXTRA_REG_##i, \ |
307 | .extra_msr_access = true, \ | ||
306 | } | 308 | } |
307 | 309 | ||
308 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ | 310 | #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx) \ |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index 3bbdf4cd38b9..30790d798e6b 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
@@ -294,31 +294,41 @@ static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) | |||
294 | cpu_to_node(cpu)); | 294 | cpu_to_node(cpu)); |
295 | } | 295 | } |
296 | 296 | ||
297 | static void amd_uncore_cpu_up_prepare(unsigned int cpu) | 297 | static int amd_uncore_cpu_up_prepare(unsigned int cpu) |
298 | { | 298 | { |
299 | struct amd_uncore *uncore; | 299 | struct amd_uncore *uncore_nb = NULL, *uncore_l2; |
300 | 300 | ||
301 | if (amd_uncore_nb) { | 301 | if (amd_uncore_nb) { |
302 | uncore = amd_uncore_alloc(cpu); | 302 | uncore_nb = amd_uncore_alloc(cpu); |
303 | uncore->cpu = cpu; | 303 | if (!uncore_nb) |
304 | uncore->num_counters = NUM_COUNTERS_NB; | 304 | goto fail; |
305 | uncore->rdpmc_base = RDPMC_BASE_NB; | 305 | uncore_nb->cpu = cpu; |
306 | uncore->msr_base = MSR_F15H_NB_PERF_CTL; | 306 | uncore_nb->num_counters = NUM_COUNTERS_NB; |
307 | uncore->active_mask = &amd_nb_active_mask; | 307 | uncore_nb->rdpmc_base = RDPMC_BASE_NB; |
308 | uncore->pmu = &amd_nb_pmu; | 308 | uncore_nb->msr_base = MSR_F15H_NB_PERF_CTL; |
309 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore; | 309 | uncore_nb->active_mask = &amd_nb_active_mask; |
310 | uncore_nb->pmu = &amd_nb_pmu; | ||
311 | *per_cpu_ptr(amd_uncore_nb, cpu) = uncore_nb; | ||
310 | } | 312 | } |
311 | 313 | ||
312 | if (amd_uncore_l2) { | 314 | if (amd_uncore_l2) { |
313 | uncore = amd_uncore_alloc(cpu); | 315 | uncore_l2 = amd_uncore_alloc(cpu); |
314 | uncore->cpu = cpu; | 316 | if (!uncore_l2) |
315 | uncore->num_counters = NUM_COUNTERS_L2; | 317 | goto fail; |
316 | uncore->rdpmc_base = RDPMC_BASE_L2; | 318 | uncore_l2->cpu = cpu; |
317 | uncore->msr_base = MSR_F16H_L2I_PERF_CTL; | 319 | uncore_l2->num_counters = NUM_COUNTERS_L2; |
318 | uncore->active_mask = &amd_l2_active_mask; | 320 | uncore_l2->rdpmc_base = RDPMC_BASE_L2; |
319 | uncore->pmu = &amd_l2_pmu; | 321 | uncore_l2->msr_base = MSR_F16H_L2I_PERF_CTL; |
320 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; | 322 | uncore_l2->active_mask = &amd_l2_active_mask; |
323 | uncore_l2->pmu = &amd_l2_pmu; | ||
324 | *per_cpu_ptr(amd_uncore_l2, cpu) = uncore_l2; | ||
321 | } | 325 | } |
326 | |||
327 | return 0; | ||
328 | |||
329 | fail: | ||
330 | kfree(uncore_nb); | ||
331 | return -ENOMEM; | ||
322 | } | 332 | } |
323 | 333 | ||
324 | static struct amd_uncore * | 334 | static struct amd_uncore * |
@@ -441,7 +451,7 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) | |||
441 | 451 | ||
442 | if (!--uncore->refcnt) | 452 | if (!--uncore->refcnt) |
443 | kfree(uncore); | 453 | kfree(uncore); |
444 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | 454 | *per_cpu_ptr(uncores, cpu) = NULL; |
445 | } | 455 | } |
446 | 456 | ||
447 | static void amd_uncore_cpu_dead(unsigned int cpu) | 457 | static void amd_uncore_cpu_dead(unsigned int cpu) |
@@ -461,7 +471,8 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | |||
461 | 471 | ||
462 | switch (action & ~CPU_TASKS_FROZEN) { | 472 | switch (action & ~CPU_TASKS_FROZEN) { |
463 | case CPU_UP_PREPARE: | 473 | case CPU_UP_PREPARE: |
464 | amd_uncore_cpu_up_prepare(cpu); | 474 | if (amd_uncore_cpu_up_prepare(cpu)) |
475 | return notifier_from_errno(-ENOMEM); | ||
465 | break; | 476 | break; |
466 | 477 | ||
467 | case CPU_STARTING: | 478 | case CPU_STARTING: |
@@ -501,20 +512,33 @@ static void __init init_cpu_already_online(void *dummy) | |||
501 | amd_uncore_cpu_online(cpu); | 512 | amd_uncore_cpu_online(cpu); |
502 | } | 513 | } |
503 | 514 | ||
515 | static void cleanup_cpu_online(void *dummy) | ||
516 | { | ||
517 | unsigned int cpu = smp_processor_id(); | ||
518 | |||
519 | amd_uncore_cpu_dead(cpu); | ||
520 | } | ||
521 | |||
504 | static int __init amd_uncore_init(void) | 522 | static int __init amd_uncore_init(void) |
505 | { | 523 | { |
506 | unsigned int cpu; | 524 | unsigned int cpu, cpu2; |
507 | int ret = -ENODEV; | 525 | int ret = -ENODEV; |
508 | 526 | ||
509 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) | 527 | if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) |
510 | return -ENODEV; | 528 | goto fail_nodev; |
511 | 529 | ||
512 | if (!cpu_has_topoext) | 530 | if (!cpu_has_topoext) |
513 | return -ENODEV; | 531 | goto fail_nodev; |
514 | 532 | ||
515 | if (cpu_has_perfctr_nb) { | 533 | if (cpu_has_perfctr_nb) { |
516 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); | 534 | amd_uncore_nb = alloc_percpu(struct amd_uncore *); |
517 | perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | 535 | if (!amd_uncore_nb) { |
536 | ret = -ENOMEM; | ||
537 | goto fail_nb; | ||
538 | } | ||
539 | ret = perf_pmu_register(&amd_nb_pmu, amd_nb_pmu.name, -1); | ||
540 | if (ret) | ||
541 | goto fail_nb; | ||
518 | 542 | ||
519 | printk(KERN_INFO "perf: AMD NB counters detected\n"); | 543 | printk(KERN_INFO "perf: AMD NB counters detected\n"); |
520 | ret = 0; | 544 | ret = 0; |
@@ -522,20 +546,28 @@ static int __init amd_uncore_init(void) | |||
522 | 546 | ||
523 | if (cpu_has_perfctr_l2) { | 547 | if (cpu_has_perfctr_l2) { |
524 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); | 548 | amd_uncore_l2 = alloc_percpu(struct amd_uncore *); |
525 | perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | 549 | if (!amd_uncore_l2) { |
550 | ret = -ENOMEM; | ||
551 | goto fail_l2; | ||
552 | } | ||
553 | ret = perf_pmu_register(&amd_l2_pmu, amd_l2_pmu.name, -1); | ||
554 | if (ret) | ||
555 | goto fail_l2; | ||
526 | 556 | ||
527 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); | 557 | printk(KERN_INFO "perf: AMD L2I counters detected\n"); |
528 | ret = 0; | 558 | ret = 0; |
529 | } | 559 | } |
530 | 560 | ||
531 | if (ret) | 561 | if (ret) |
532 | return -ENODEV; | 562 | goto fail_nodev; |
533 | 563 | ||
534 | cpu_notifier_register_begin(); | 564 | cpu_notifier_register_begin(); |
535 | 565 | ||
536 | /* init cpus already online before registering for hotplug notifier */ | 566 | /* init cpus already online before registering for hotplug notifier */ |
537 | for_each_online_cpu(cpu) { | 567 | for_each_online_cpu(cpu) { |
538 | amd_uncore_cpu_up_prepare(cpu); | 568 | ret = amd_uncore_cpu_up_prepare(cpu); |
569 | if (ret) | ||
570 | goto fail_online; | ||
539 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); | 571 | smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); |
540 | } | 572 | } |
541 | 573 | ||
@@ -543,5 +575,30 @@ static int __init amd_uncore_init(void) | |||
543 | cpu_notifier_register_done(); | 575 | cpu_notifier_register_done(); |
544 | 576 | ||
545 | return 0; | 577 | return 0; |
578 | |||
579 | |||
580 | fail_online: | ||
581 | for_each_online_cpu(cpu2) { | ||
582 | if (cpu2 == cpu) | ||
583 | break; | ||
584 | smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); | ||
585 | } | ||
586 | cpu_notifier_register_done(); | ||
587 | |||
588 | /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ | ||
589 | amd_uncore_nb = amd_uncore_l2 = NULL; | ||
590 | if (cpu_has_perfctr_l2) | ||
591 | perf_pmu_unregister(&amd_l2_pmu); | ||
592 | fail_l2: | ||
593 | if (cpu_has_perfctr_nb) | ||
594 | perf_pmu_unregister(&amd_nb_pmu); | ||
595 | if (amd_uncore_l2) | ||
596 | free_percpu(amd_uncore_l2); | ||
597 | fail_nb: | ||
598 | if (amd_uncore_nb) | ||
599 | free_percpu(amd_uncore_nb); | ||
600 | |||
601 | fail_nodev: | ||
602 | return ret; | ||
546 | } | 603 | } |
547 | device_initcall(amd_uncore_init); | 604 | device_initcall(amd_uncore_init); |
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 07846d738bdb..2502d0d9d246 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -2182,6 +2182,41 @@ static void intel_snb_check_microcode(void) | |||
2182 | } | 2182 | } |
2183 | } | 2183 | } |
2184 | 2184 | ||
2185 | /* | ||
2186 | * Under certain circumstances, access certain MSR may cause #GP. | ||
2187 | * The function tests if the input MSR can be safely accessed. | ||
2188 | */ | ||
2189 | static bool check_msr(unsigned long msr, u64 mask) | ||
2190 | { | ||
2191 | u64 val_old, val_new, val_tmp; | ||
2192 | |||
2193 | /* | ||
2194 | * Read the current value, change it and read it back to see if it | ||
2195 | * matches, this is needed to detect certain hardware emulators | ||
2196 | * (qemu/kvm) that don't trap on the MSR access and always return 0s. | ||
2197 | */ | ||
2198 | if (rdmsrl_safe(msr, &val_old)) | ||
2199 | return false; | ||
2200 | |||
2201 | /* | ||
2202 | * Only change the bits which can be updated by wrmsrl. | ||
2203 | */ | ||
2204 | val_tmp = val_old ^ mask; | ||
2205 | if (wrmsrl_safe(msr, val_tmp) || | ||
2206 | rdmsrl_safe(msr, &val_new)) | ||
2207 | return false; | ||
2208 | |||
2209 | if (val_new != val_tmp) | ||
2210 | return false; | ||
2211 | |||
2212 | /* Here it's sure that the MSR can be safely accessed. | ||
2213 | * Restore the old value and return. | ||
2214 | */ | ||
2215 | wrmsrl(msr, val_old); | ||
2216 | |||
2217 | return true; | ||
2218 | } | ||
2219 | |||
2185 | static __init void intel_sandybridge_quirk(void) | 2220 | static __init void intel_sandybridge_quirk(void) |
2186 | { | 2221 | { |
2187 | x86_pmu.check_microcode = intel_snb_check_microcode; | 2222 | x86_pmu.check_microcode = intel_snb_check_microcode; |
@@ -2271,7 +2306,8 @@ __init int intel_pmu_init(void) | |||
2271 | union cpuid10_ebx ebx; | 2306 | union cpuid10_ebx ebx; |
2272 | struct event_constraint *c; | 2307 | struct event_constraint *c; |
2273 | unsigned int unused; | 2308 | unsigned int unused; |
2274 | int version; | 2309 | struct extra_reg *er; |
2310 | int version, i; | ||
2275 | 2311 | ||
2276 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 2312 | if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { |
2277 | switch (boot_cpu_data.x86) { | 2313 | switch (boot_cpu_data.x86) { |
@@ -2474,6 +2510,9 @@ __init int intel_pmu_init(void) | |||
2474 | case 62: /* IvyBridge EP */ | 2510 | case 62: /* IvyBridge EP */ |
2475 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, | 2511 | memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, |
2476 | sizeof(hw_cache_event_ids)); | 2512 | sizeof(hw_cache_event_ids)); |
2513 | /* dTLB-load-misses on IVB is different than SNB */ | ||
2514 | hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */ | ||
2515 | |||
2477 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, | 2516 | memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, |
2478 | sizeof(hw_cache_extra_regs)); | 2517 | sizeof(hw_cache_extra_regs)); |
2479 | 2518 | ||
@@ -2574,6 +2613,34 @@ __init int intel_pmu_init(void) | |||
2574 | } | 2613 | } |
2575 | } | 2614 | } |
2576 | 2615 | ||
2616 | /* | ||
2617 | * Access LBR MSR may cause #GP under certain circumstances. | ||
2618 | * E.g. KVM doesn't support LBR MSR | ||
2619 | * Check all LBT MSR here. | ||
2620 | * Disable LBR access if any LBR MSRs can not be accessed. | ||
2621 | */ | ||
2622 | if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL)) | ||
2623 | x86_pmu.lbr_nr = 0; | ||
2624 | for (i = 0; i < x86_pmu.lbr_nr; i++) { | ||
2625 | if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) && | ||
2626 | check_msr(x86_pmu.lbr_to + i, 0xffffUL))) | ||
2627 | x86_pmu.lbr_nr = 0; | ||
2628 | } | ||
2629 | |||
2630 | /* | ||
2631 | * Access extra MSR may cause #GP under certain circumstances. | ||
2632 | * E.g. KVM doesn't support offcore event | ||
2633 | * Check all extra_regs here. | ||
2634 | */ | ||
2635 | if (x86_pmu.extra_regs) { | ||
2636 | for (er = x86_pmu.extra_regs; er->msr; er++) { | ||
2637 | er->extra_msr_access = check_msr(er->msr, 0x1ffUL); | ||
2638 | /* Disable LBR select mapping */ | ||
2639 | if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access) | ||
2640 | x86_pmu.lbr_sel_map = NULL; | ||
2641 | } | ||
2642 | } | ||
2643 | |||
2577 | /* Support full width counters using alternative MSR range */ | 2644 | /* Support full width counters using alternative MSR range */ |
2578 | if (x86_pmu.intel_cap.full_width_write) { | 2645 | if (x86_pmu.intel_cap.full_width_write) { |
2579 | x86_pmu.max_period = x86_pmu.cntval_mask; | 2646 | x86_pmu.max_period = x86_pmu.cntval_mask; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 980970cb744d..696ade311ded 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c | |||
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu) | |||
311 | if (!x86_pmu.bts) | 311 | if (!x86_pmu.bts) |
312 | return 0; | 312 | return 0; |
313 | 313 | ||
314 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node); | 314 | buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node); |
315 | if (unlikely(!buffer)) | 315 | if (unlikely(!buffer)) { |
316 | WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__); | ||
316 | return -ENOMEM; | 317 | return -ENOMEM; |
318 | } | ||
317 | 319 | ||
318 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; | 320 | max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE; |
319 | thresh = max / 16; | 321 | thresh = max / 16; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 65bbbea38b9c..cfc6f9dfcd90 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = { | |||
550 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), | 550 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6), |
551 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), | 551 | SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8), |
552 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), | 552 | SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8), |
553 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc), | 553 | SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa), |
554 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc), | 554 | SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa), |
555 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), | 555 | SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2), |
556 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), | 556 | SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2), |
557 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), | 557 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2), |
558 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), | 558 | SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2), |
559 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), | 559 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8), |
560 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), | 560 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8), |
561 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc), | 561 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa), |
562 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc), | 562 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa), |
563 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), | 563 | SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2), |
564 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), | 564 | SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2), |
565 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), | 565 | SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2), |
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | |||
1222 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, | 1222 | SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN, |
1223 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), | 1223 | SNBEP_CBO_PMON_CTL_TID_EN, 0x1), |
1224 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), | 1224 | SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2), |
1225 | |||
1225 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), | 1226 | SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4), |
1226 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), | 1227 | SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc), |
1227 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), | 1228 | SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc), |
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = { | |||
1245 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), | 1246 | SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10), |
1246 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), | 1247 | SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10), |
1247 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), | 1248 | SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10), |
1248 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | 1249 | SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10), |
1249 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), | 1250 | SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10), |
1250 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), | 1251 | SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18), |
1251 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), | 1252 | SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18), |
@@ -2946,10 +2947,7 @@ again: | |||
2946 | * extra registers. If we failed to take an extra | 2947 | * extra registers. If we failed to take an extra |
2947 | * register, try the alternative. | 2948 | * register, try the alternative. |
2948 | */ | 2949 | */ |
2949 | if (idx % 2) | 2950 | idx ^= 1; |
2950 | idx--; | ||
2951 | else | ||
2952 | idx++; | ||
2953 | if (idx != reg1->idx % 6) { | 2951 | if (idx != reg1->idx % 6) { |
2954 | if (idx == 2) | 2952 | if (idx == 2) |
2955 | config1 >>= 8; | 2953 | config1 >>= 8; |
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c index 06fe3ed8b851..5433658e598d 100644 --- a/arch/x86/kernel/cpu/proc.c +++ b/arch/x86/kernel/cpu/proc.c | |||
@@ -97,6 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
97 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) | 97 | if (cpu_has(c, i) && x86_cap_flags[i] != NULL) |
98 | seq_printf(m, " %s", x86_cap_flags[i]); | 98 | seq_printf(m, " %s", x86_cap_flags[i]); |
99 | 99 | ||
100 | seq_printf(m, "\nbugs\t\t:"); | ||
101 | for (i = 0; i < 32*NBUGINTS; i++) { | ||
102 | unsigned int bug_bit = 32*NCAPINTS + i; | ||
103 | |||
104 | if (cpu_has_bug(c, bug_bit) && x86_bug_flags[i]) | ||
105 | seq_printf(m, " %s", x86_bug_flags[i]); | ||
106 | } | ||
107 | |||
100 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", | 108 | seq_printf(m, "\nbogomips\t: %lu.%02lu\n", |
101 | c->loops_per_jiffy/(500000/HZ), | 109 | c->loops_per_jiffy/(500000/HZ), |
102 | (c->loops_per_jiffy/(5000/HZ)) % 100); | 110 | (c->loops_per_jiffy/(5000/HZ)) % 100); |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index b6f794aa1693..4a8013d55947 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -38,7 +38,6 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, | 38 | { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, |
39 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, | 39 | { X86_FEATURE_APERFMPERF, CR_ECX, 0, 0x00000006, 0 }, |
40 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, | 40 | { X86_FEATURE_EPB, CR_ECX, 3, 0x00000006, 0 }, |
41 | { X86_FEATURE_XSAVEOPT, CR_EAX, 0, 0x0000000d, 1 }, | ||
42 | { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, | 41 | { X86_FEATURE_HW_PSTATE, CR_EDX, 7, 0x80000007, 0 }, |
43 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, | 42 | { X86_FEATURE_CPB, CR_EDX, 9, 0x80000007, 0 }, |
44 | { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, | 43 | { X86_FEATURE_PROC_FEEDBACK, CR_EDX,11, 0x80000007, 0 }, |
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index dbaa23e78b36..47c410d99f5d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S | |||
@@ -425,8 +425,8 @@ sysenter_do_call: | |||
425 | cmpl $(NR_syscalls), %eax | 425 | cmpl $(NR_syscalls), %eax |
426 | jae sysenter_badsys | 426 | jae sysenter_badsys |
427 | call *sys_call_table(,%eax,4) | 427 | call *sys_call_table(,%eax,4) |
428 | movl %eax,PT_EAX(%esp) | ||
429 | sysenter_after_call: | 428 | sysenter_after_call: |
429 | movl %eax,PT_EAX(%esp) | ||
430 | LOCKDEP_SYS_EXIT | 430 | LOCKDEP_SYS_EXIT |
431 | DISABLE_INTERRUPTS(CLBR_ANY) | 431 | DISABLE_INTERRUPTS(CLBR_ANY) |
432 | TRACE_IRQS_OFF | 432 | TRACE_IRQS_OFF |
@@ -502,6 +502,7 @@ ENTRY(system_call) | |||
502 | jae syscall_badsys | 502 | jae syscall_badsys |
503 | syscall_call: | 503 | syscall_call: |
504 | call *sys_call_table(,%eax,4) | 504 | call *sys_call_table(,%eax,4) |
505 | syscall_after_call: | ||
505 | movl %eax,PT_EAX(%esp) # store the return value | 506 | movl %eax,PT_EAX(%esp) # store the return value |
506 | syscall_exit: | 507 | syscall_exit: |
507 | LOCKDEP_SYS_EXIT | 508 | LOCKDEP_SYS_EXIT |
@@ -675,12 +676,12 @@ syscall_fault: | |||
675 | END(syscall_fault) | 676 | END(syscall_fault) |
676 | 677 | ||
677 | syscall_badsys: | 678 | syscall_badsys: |
678 | movl $-ENOSYS,PT_EAX(%esp) | 679 | movl $-ENOSYS,%eax |
679 | jmp syscall_exit | 680 | jmp syscall_after_call |
680 | END(syscall_badsys) | 681 | END(syscall_badsys) |
681 | 682 | ||
682 | sysenter_badsys: | 683 | sysenter_badsys: |
683 | movl $-ENOSYS,PT_EAX(%esp) | 684 | movl $-ENOSYS,%eax |
684 | jmp sysenter_after_call | 685 | jmp sysenter_after_call |
685 | END(syscall_badsys) | 686 | END(syscall_badsys) |
686 | CFI_ENDPROC | 687 | CFI_ENDPROC |
@@ -1058,9 +1059,6 @@ ENTRY(mcount) | |||
1058 | END(mcount) | 1059 | END(mcount) |
1059 | 1060 | ||
1060 | ENTRY(ftrace_caller) | 1061 | ENTRY(ftrace_caller) |
1061 | cmpl $0, function_trace_stop | ||
1062 | jne ftrace_stub | ||
1063 | |||
1064 | pushl %eax | 1062 | pushl %eax |
1065 | pushl %ecx | 1063 | pushl %ecx |
1066 | pushl %edx | 1064 | pushl %edx |
@@ -1092,8 +1090,6 @@ END(ftrace_caller) | |||
1092 | 1090 | ||
1093 | ENTRY(ftrace_regs_caller) | 1091 | ENTRY(ftrace_regs_caller) |
1094 | pushf /* push flags before compare (in cs location) */ | 1092 | pushf /* push flags before compare (in cs location) */ |
1095 | cmpl $0, function_trace_stop | ||
1096 | jne ftrace_restore_flags | ||
1097 | 1093 | ||
1098 | /* | 1094 | /* |
1099 | * i386 does not save SS and ESP when coming from kernel. | 1095 | * i386 does not save SS and ESP when coming from kernel. |
@@ -1152,7 +1148,6 @@ GLOBAL(ftrace_regs_call) | |||
1152 | popf /* Pop flags at end (no addl to corrupt flags) */ | 1148 | popf /* Pop flags at end (no addl to corrupt flags) */ |
1153 | jmp ftrace_ret | 1149 | jmp ftrace_ret |
1154 | 1150 | ||
1155 | ftrace_restore_flags: | ||
1156 | popf | 1151 | popf |
1157 | jmp ftrace_stub | 1152 | jmp ftrace_stub |
1158 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 1153 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
@@ -1161,9 +1156,6 @@ ENTRY(mcount) | |||
1161 | cmpl $__PAGE_OFFSET, %esp | 1156 | cmpl $__PAGE_OFFSET, %esp |
1162 | jb ftrace_stub /* Paging not enabled yet? */ | 1157 | jb ftrace_stub /* Paging not enabled yet? */ |
1163 | 1158 | ||
1164 | cmpl $0, function_trace_stop | ||
1165 | jne ftrace_stub | ||
1166 | |||
1167 | cmpl $ftrace_stub, ftrace_trace_function | 1159 | cmpl $ftrace_stub, ftrace_trace_function |
1168 | jnz trace | 1160 | jnz trace |
1169 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 1161 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index b25ca969edd2..2fac1343a90b 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -207,7 +207,6 @@ ENDPROC(native_usergs_sysret64) | |||
207 | */ | 207 | */ |
208 | .macro XCPT_FRAME start=1 offset=0 | 208 | .macro XCPT_FRAME start=1 offset=0 |
209 | INTR_FRAME \start, RIP+\offset-ORIG_RAX | 209 | INTR_FRAME \start, RIP+\offset-ORIG_RAX |
210 | /*CFI_REL_OFFSET orig_rax, ORIG_RAX-ORIG_RAX*/ | ||
211 | .endm | 210 | .endm |
212 | 211 | ||
213 | /* | 212 | /* |
@@ -287,21 +286,21 @@ ENDPROC(native_usergs_sysret64) | |||
287 | ENTRY(save_paranoid) | 286 | ENTRY(save_paranoid) |
288 | XCPT_FRAME 1 RDI+8 | 287 | XCPT_FRAME 1 RDI+8 |
289 | cld | 288 | cld |
290 | movq_cfi rdi, RDI+8 | 289 | movq %rdi, RDI+8(%rsp) |
291 | movq_cfi rsi, RSI+8 | 290 | movq %rsi, RSI+8(%rsp) |
292 | movq_cfi rdx, RDX+8 | 291 | movq_cfi rdx, RDX+8 |
293 | movq_cfi rcx, RCX+8 | 292 | movq_cfi rcx, RCX+8 |
294 | movq_cfi rax, RAX+8 | 293 | movq_cfi rax, RAX+8 |
295 | movq_cfi r8, R8+8 | 294 | movq %r8, R8+8(%rsp) |
296 | movq_cfi r9, R9+8 | 295 | movq %r9, R9+8(%rsp) |
297 | movq_cfi r10, R10+8 | 296 | movq %r10, R10+8(%rsp) |
298 | movq_cfi r11, R11+8 | 297 | movq %r11, R11+8(%rsp) |
299 | movq_cfi rbx, RBX+8 | 298 | movq_cfi rbx, RBX+8 |
300 | movq_cfi rbp, RBP+8 | 299 | movq %rbp, RBP+8(%rsp) |
301 | movq_cfi r12, R12+8 | 300 | movq %r12, R12+8(%rsp) |
302 | movq_cfi r13, R13+8 | 301 | movq %r13, R13+8(%rsp) |
303 | movq_cfi r14, R14+8 | 302 | movq %r14, R14+8(%rsp) |
304 | movq_cfi r15, R15+8 | 303 | movq %r15, R15+8(%rsp) |
305 | movl $1,%ebx | 304 | movl $1,%ebx |
306 | movl $MSR_GS_BASE,%ecx | 305 | movl $MSR_GS_BASE,%ecx |
307 | rdmsr | 306 | rdmsr |
@@ -830,27 +829,24 @@ restore_args: | |||
830 | RESTORE_ARGS 1,8,1 | 829 | RESTORE_ARGS 1,8,1 |
831 | 830 | ||
832 | irq_return: | 831 | irq_return: |
832 | INTERRUPT_RETURN | ||
833 | |||
834 | ENTRY(native_iret) | ||
833 | /* | 835 | /* |
834 | * Are we returning to a stack segment from the LDT? Note: in | 836 | * Are we returning to a stack segment from the LDT? Note: in |
835 | * 64-bit mode SS:RSP on the exception stack is always valid. | 837 | * 64-bit mode SS:RSP on the exception stack is always valid. |
836 | */ | 838 | */ |
837 | #ifdef CONFIG_X86_ESPFIX64 | 839 | #ifdef CONFIG_X86_ESPFIX64 |
838 | testb $4,(SS-RIP)(%rsp) | 840 | testb $4,(SS-RIP)(%rsp) |
839 | jnz irq_return_ldt | 841 | jnz native_irq_return_ldt |
840 | #endif | 842 | #endif |
841 | 843 | ||
842 | irq_return_iret: | 844 | native_irq_return_iret: |
843 | INTERRUPT_RETURN | ||
844 | _ASM_EXTABLE(irq_return_iret, bad_iret) | ||
845 | |||
846 | #ifdef CONFIG_PARAVIRT | ||
847 | ENTRY(native_iret) | ||
848 | iretq | 845 | iretq |
849 | _ASM_EXTABLE(native_iret, bad_iret) | 846 | _ASM_EXTABLE(native_irq_return_iret, bad_iret) |
850 | #endif | ||
851 | 847 | ||
852 | #ifdef CONFIG_X86_ESPFIX64 | 848 | #ifdef CONFIG_X86_ESPFIX64 |
853 | irq_return_ldt: | 849 | native_irq_return_ldt: |
854 | pushq_cfi %rax | 850 | pushq_cfi %rax |
855 | pushq_cfi %rdi | 851 | pushq_cfi %rdi |
856 | SWAPGS | 852 | SWAPGS |
@@ -872,7 +868,7 @@ irq_return_ldt: | |||
872 | SWAPGS | 868 | SWAPGS |
873 | movq %rax,%rsp | 869 | movq %rax,%rsp |
874 | popq_cfi %rax | 870 | popq_cfi %rax |
875 | jmp irq_return_iret | 871 | jmp native_irq_return_iret |
876 | #endif | 872 | #endif |
877 | 873 | ||
878 | .section .fixup,"ax" | 874 | .section .fixup,"ax" |
@@ -956,13 +952,8 @@ __do_double_fault: | |||
956 | cmpl $__KERNEL_CS,CS(%rdi) | 952 | cmpl $__KERNEL_CS,CS(%rdi) |
957 | jne do_double_fault | 953 | jne do_double_fault |
958 | movq RIP(%rdi),%rax | 954 | movq RIP(%rdi),%rax |
959 | cmpq $irq_return_iret,%rax | 955 | cmpq $native_irq_return_iret,%rax |
960 | #ifdef CONFIG_PARAVIRT | ||
961 | je 1f | ||
962 | cmpq $native_iret,%rax | ||
963 | #endif | ||
964 | jne do_double_fault /* This shouldn't happen... */ | 956 | jne do_double_fault /* This shouldn't happen... */ |
965 | 1: | ||
966 | movq PER_CPU_VAR(kernel_stack),%rax | 957 | movq PER_CPU_VAR(kernel_stack),%rax |
967 | subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ | 958 | subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */ |
968 | movq %rax,RSP(%rdi) | 959 | movq %rax,RSP(%rdi) |
@@ -1395,21 +1386,21 @@ ENTRY(error_entry) | |||
1395 | CFI_ADJUST_CFA_OFFSET 15*8 | 1386 | CFI_ADJUST_CFA_OFFSET 15*8 |
1396 | /* oldrax contains error code */ | 1387 | /* oldrax contains error code */ |
1397 | cld | 1388 | cld |
1398 | movq_cfi rdi, RDI+8 | 1389 | movq %rdi, RDI+8(%rsp) |
1399 | movq_cfi rsi, RSI+8 | 1390 | movq %rsi, RSI+8(%rsp) |
1400 | movq_cfi rdx, RDX+8 | 1391 | movq %rdx, RDX+8(%rsp) |
1401 | movq_cfi rcx, RCX+8 | 1392 | movq %rcx, RCX+8(%rsp) |
1402 | movq_cfi rax, RAX+8 | 1393 | movq %rax, RAX+8(%rsp) |
1403 | movq_cfi r8, R8+8 | 1394 | movq %r8, R8+8(%rsp) |
1404 | movq_cfi r9, R9+8 | 1395 | movq %r9, R9+8(%rsp) |
1405 | movq_cfi r10, R10+8 | 1396 | movq %r10, R10+8(%rsp) |
1406 | movq_cfi r11, R11+8 | 1397 | movq %r11, R11+8(%rsp) |
1407 | movq_cfi rbx, RBX+8 | 1398 | movq_cfi rbx, RBX+8 |
1408 | movq_cfi rbp, RBP+8 | 1399 | movq %rbp, RBP+8(%rsp) |
1409 | movq_cfi r12, R12+8 | 1400 | movq %r12, R12+8(%rsp) |
1410 | movq_cfi r13, R13+8 | 1401 | movq %r13, R13+8(%rsp) |
1411 | movq_cfi r14, R14+8 | 1402 | movq %r14, R14+8(%rsp) |
1412 | movq_cfi r15, R15+8 | 1403 | movq %r15, R15+8(%rsp) |
1413 | xorl %ebx,%ebx | 1404 | xorl %ebx,%ebx |
1414 | testl $3,CS+8(%rsp) | 1405 | testl $3,CS+8(%rsp) |
1415 | je error_kernelspace | 1406 | je error_kernelspace |
@@ -1427,8 +1418,9 @@ error_sti: | |||
1427 | * compat mode. Check for these here too. | 1418 | * compat mode. Check for these here too. |
1428 | */ | 1419 | */ |
1429 | error_kernelspace: | 1420 | error_kernelspace: |
1421 | CFI_REL_OFFSET rcx, RCX+8 | ||
1430 | incl %ebx | 1422 | incl %ebx |
1431 | leaq irq_return_iret(%rip),%rcx | 1423 | leaq native_irq_return_iret(%rip),%rcx |
1432 | cmpq %rcx,RIP+8(%rsp) | 1424 | cmpq %rcx,RIP+8(%rsp) |
1433 | je error_swapgs | 1425 | je error_swapgs |
1434 | movl %ecx,%eax /* zero extend */ | 1426 | movl %ecx,%eax /* zero extend */ |
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index cbc4a91b131e..3386dc9aa333 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c | |||
@@ -703,6 +703,9 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
703 | unsigned long return_hooker = (unsigned long) | 703 | unsigned long return_hooker = (unsigned long) |
704 | &return_to_handler; | 704 | &return_to_handler; |
705 | 705 | ||
706 | if (unlikely(ftrace_graph_is_dead())) | ||
707 | return; | ||
708 | |||
706 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 709 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
707 | return; | 710 | return; |
708 | 711 | ||
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 7596df664901..67e6d19ef1be 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c | |||
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs) | |||
574 | struct kprobe *p; | 574 | struct kprobe *p; |
575 | struct kprobe_ctlblk *kcb; | 575 | struct kprobe_ctlblk *kcb; |
576 | 576 | ||
577 | if (user_mode_vm(regs)) | ||
578 | return 0; | ||
579 | |||
577 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); | 580 | addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t)); |
578 | /* | 581 | /* |
579 | * We don't want to be preempted for the entire | 582 | * We don't want to be preempted for the entire |
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S index c050a0153168..c73aecf10d34 100644 --- a/arch/x86/kernel/mcount_64.S +++ b/arch/x86/kernel/mcount_64.S | |||
@@ -46,10 +46,6 @@ END(function_hook) | |||
46 | .endm | 46 | .endm |
47 | 47 | ||
48 | ENTRY(ftrace_caller) | 48 | ENTRY(ftrace_caller) |
49 | /* Check if tracing was disabled (quick check) */ | ||
50 | cmpl $0, function_trace_stop | ||
51 | jne ftrace_stub | ||
52 | |||
53 | ftrace_caller_setup | 49 | ftrace_caller_setup |
54 | /* regs go into 4th parameter (but make it NULL) */ | 50 | /* regs go into 4th parameter (but make it NULL) */ |
55 | movq $0, %rcx | 51 | movq $0, %rcx |
@@ -73,10 +69,6 @@ ENTRY(ftrace_regs_caller) | |||
73 | /* Save the current flags before compare (in SS location)*/ | 69 | /* Save the current flags before compare (in SS location)*/ |
74 | pushfq | 70 | pushfq |
75 | 71 | ||
76 | /* Check if tracing was disabled (quick check) */ | ||
77 | cmpl $0, function_trace_stop | ||
78 | jne ftrace_restore_flags | ||
79 | |||
80 | /* skip=8 to skip flags saved in SS */ | 72 | /* skip=8 to skip flags saved in SS */ |
81 | ftrace_caller_setup 8 | 73 | ftrace_caller_setup 8 |
82 | 74 | ||
@@ -131,7 +123,7 @@ GLOBAL(ftrace_regs_call) | |||
131 | popfq | 123 | popfq |
132 | 124 | ||
133 | jmp ftrace_return | 125 | jmp ftrace_return |
134 | ftrace_restore_flags: | 126 | |
135 | popfq | 127 | popfq |
136 | jmp ftrace_stub | 128 | jmp ftrace_stub |
137 | 129 | ||
@@ -141,9 +133,6 @@ END(ftrace_regs_caller) | |||
141 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | 133 | #else /* ! CONFIG_DYNAMIC_FTRACE */ |
142 | 134 | ||
143 | ENTRY(function_hook) | 135 | ENTRY(function_hook) |
144 | cmpl $0, function_trace_stop | ||
145 | jne ftrace_stub | ||
146 | |||
147 | cmpq $ftrace_stub, ftrace_trace_function | 136 | cmpq $ftrace_stub, ftrace_trace_function |
148 | jnz trace | 137 | jnz trace |
149 | 138 | ||
diff --git a/arch/x86/kernel/paravirt_patch_64.c b/arch/x86/kernel/paravirt_patch_64.c index 3f08f34f93eb..a1da6737ba5b 100644 --- a/arch/x86/kernel/paravirt_patch_64.c +++ b/arch/x86/kernel/paravirt_patch_64.c | |||
@@ -6,7 +6,6 @@ DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); | |||
6 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); | 6 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); |
7 | DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); | 7 | DEF_NATIVE(pv_irq_ops, restore_fl, "pushq %rdi; popfq"); |
8 | DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); | 8 | DEF_NATIVE(pv_irq_ops, save_fl, "pushfq; popq %rax"); |
9 | DEF_NATIVE(pv_cpu_ops, iret, "iretq"); | ||
10 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); | 9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "movq %cr2, %rax"); |
11 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); | 10 | DEF_NATIVE(pv_mmu_ops, read_cr3, "movq %cr3, %rax"); |
12 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); | 11 | DEF_NATIVE(pv_mmu_ops, write_cr3, "movq %rdi, %cr3"); |
@@ -50,7 +49,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf, | |||
50 | PATCH_SITE(pv_irq_ops, save_fl); | 49 | PATCH_SITE(pv_irq_ops, save_fl); |
51 | PATCH_SITE(pv_irq_ops, irq_enable); | 50 | PATCH_SITE(pv_irq_ops, irq_enable); |
52 | PATCH_SITE(pv_irq_ops, irq_disable); | 51 | PATCH_SITE(pv_irq_ops, irq_disable); |
53 | PATCH_SITE(pv_cpu_ops, iret); | ||
54 | PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); | 52 | PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); |
55 | PATCH_SITE(pv_cpu_ops, usergs_sysret32); | 53 | PATCH_SITE(pv_cpu_ops, usergs_sysret32); |
56 | PATCH_SITE(pv_cpu_ops, usergs_sysret64); | 54 | PATCH_SITE(pv_cpu_ops, usergs_sysret64); |
diff --git a/arch/x86/kernel/pmc_atom.c b/arch/x86/kernel/pmc_atom.c new file mode 100644 index 000000000000..0c424a67985d --- /dev/null +++ b/arch/x86/kernel/pmc_atom.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * Intel Atom SOC Power Management Controller Driver | ||
3 | * Copyright (c) 2014, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/pci.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/io.h> | ||
25 | |||
26 | #include <asm/pmc_atom.h> | ||
27 | |||
28 | #define DRIVER_NAME KBUILD_MODNAME | ||
29 | |||
30 | struct pmc_dev { | ||
31 | u32 base_addr; | ||
32 | void __iomem *regmap; | ||
33 | #ifdef CONFIG_DEBUG_FS | ||
34 | struct dentry *dbgfs_dir; | ||
35 | #endif /* CONFIG_DEBUG_FS */ | ||
36 | }; | ||
37 | |||
38 | static struct pmc_dev pmc_device; | ||
39 | static u32 acpi_base_addr; | ||
40 | |||
41 | struct pmc_dev_map { | ||
42 | const char *name; | ||
43 | u32 bit_mask; | ||
44 | }; | ||
45 | |||
46 | static const struct pmc_dev_map dev_map[] = { | ||
47 | {"0 - LPSS1_F0_DMA", BIT_LPSS1_F0_DMA}, | ||
48 | {"1 - LPSS1_F1_PWM1", BIT_LPSS1_F1_PWM1}, | ||
49 | {"2 - LPSS1_F2_PWM2", BIT_LPSS1_F2_PWM2}, | ||
50 | {"3 - LPSS1_F3_HSUART1", BIT_LPSS1_F3_HSUART1}, | ||
51 | {"4 - LPSS1_F4_HSUART2", BIT_LPSS1_F4_HSUART2}, | ||
52 | {"5 - LPSS1_F5_SPI", BIT_LPSS1_F5_SPI}, | ||
53 | {"6 - LPSS1_F6_Reserved", BIT_LPSS1_F6_XXX}, | ||
54 | {"7 - LPSS1_F7_Reserved", BIT_LPSS1_F7_XXX}, | ||
55 | {"8 - SCC_EMMC", BIT_SCC_EMMC}, | ||
56 | {"9 - SCC_SDIO", BIT_SCC_SDIO}, | ||
57 | {"10 - SCC_SDCARD", BIT_SCC_SDCARD}, | ||
58 | {"11 - SCC_MIPI", BIT_SCC_MIPI}, | ||
59 | {"12 - HDA", BIT_HDA}, | ||
60 | {"13 - LPE", BIT_LPE}, | ||
61 | {"14 - OTG", BIT_OTG}, | ||
62 | {"15 - USH", BIT_USH}, | ||
63 | {"16 - GBE", BIT_GBE}, | ||
64 | {"17 - SATA", BIT_SATA}, | ||
65 | {"18 - USB_EHCI", BIT_USB_EHCI}, | ||
66 | {"19 - SEC", BIT_SEC}, | ||
67 | {"20 - PCIE_PORT0", BIT_PCIE_PORT0}, | ||
68 | {"21 - PCIE_PORT1", BIT_PCIE_PORT1}, | ||
69 | {"22 - PCIE_PORT2", BIT_PCIE_PORT2}, | ||
70 | {"23 - PCIE_PORT3", BIT_PCIE_PORT3}, | ||
71 | {"24 - LPSS2_F0_DMA", BIT_LPSS2_F0_DMA}, | ||
72 | {"25 - LPSS2_F1_I2C1", BIT_LPSS2_F1_I2C1}, | ||
73 | {"26 - LPSS2_F2_I2C2", BIT_LPSS2_F2_I2C2}, | ||
74 | {"27 - LPSS2_F3_I2C3", BIT_LPSS2_F3_I2C3}, | ||
75 | {"28 - LPSS2_F3_I2C4", BIT_LPSS2_F4_I2C4}, | ||
76 | {"29 - LPSS2_F5_I2C5", BIT_LPSS2_F5_I2C5}, | ||
77 | {"30 - LPSS2_F6_I2C6", BIT_LPSS2_F6_I2C6}, | ||
78 | {"31 - LPSS2_F7_I2C7", BIT_LPSS2_F7_I2C7}, | ||
79 | {"32 - SMB", BIT_SMB}, | ||
80 | {"33 - OTG_SS_PHY", BIT_OTG_SS_PHY}, | ||
81 | {"34 - USH_SS_PHY", BIT_USH_SS_PHY}, | ||
82 | {"35 - DFX", BIT_DFX}, | ||
83 | }; | ||
84 | |||
85 | static inline u32 pmc_reg_read(struct pmc_dev *pmc, int reg_offset) | ||
86 | { | ||
87 | return readl(pmc->regmap + reg_offset); | ||
88 | } | ||
89 | |||
90 | static inline void pmc_reg_write(struct pmc_dev *pmc, int reg_offset, u32 val) | ||
91 | { | ||
92 | writel(val, pmc->regmap + reg_offset); | ||
93 | } | ||
94 | |||
95 | static void pmc_power_off(void) | ||
96 | { | ||
97 | u16 pm1_cnt_port; | ||
98 | u32 pm1_cnt_value; | ||
99 | |||
100 | pr_info("Preparing to enter system sleep state S5\n"); | ||
101 | |||
102 | pm1_cnt_port = acpi_base_addr + PM1_CNT; | ||
103 | |||
104 | pm1_cnt_value = inl(pm1_cnt_port); | ||
105 | pm1_cnt_value &= SLEEP_TYPE_MASK; | ||
106 | pm1_cnt_value |= SLEEP_TYPE_S5; | ||
107 | pm1_cnt_value |= SLEEP_ENABLE; | ||
108 | |||
109 | outl(pm1_cnt_value, pm1_cnt_port); | ||
110 | } | ||
111 | |||
112 | static void pmc_hw_reg_setup(struct pmc_dev *pmc) | ||
113 | { | ||
114 | /* | ||
115 | * Disable PMC S0IX_WAKE_EN events coming from: | ||
116 | * - LPC clock run | ||
117 | * - GPIO_SUS ored dedicated IRQs | ||
118 | * - GPIO_SCORE ored dedicated IRQs | ||
119 | * - GPIO_SUS shared IRQ | ||
120 | * - GPIO_SCORE shared IRQ | ||
121 | */ | ||
122 | pmc_reg_write(pmc, PMC_S0IX_WAKE_EN, (u32)PMC_WAKE_EN_SETTING); | ||
123 | } | ||
124 | |||
125 | #ifdef CONFIG_DEBUG_FS | ||
126 | static int pmc_dev_state_show(struct seq_file *s, void *unused) | ||
127 | { | ||
128 | struct pmc_dev *pmc = s->private; | ||
129 | u32 func_dis, func_dis_2, func_dis_index; | ||
130 | u32 d3_sts_0, d3_sts_1, d3_sts_index; | ||
131 | int dev_num, dev_index, reg_index; | ||
132 | |||
133 | func_dis = pmc_reg_read(pmc, PMC_FUNC_DIS); | ||
134 | func_dis_2 = pmc_reg_read(pmc, PMC_FUNC_DIS_2); | ||
135 | d3_sts_0 = pmc_reg_read(pmc, PMC_D3_STS_0); | ||
136 | d3_sts_1 = pmc_reg_read(pmc, PMC_D3_STS_1); | ||
137 | |||
138 | dev_num = ARRAY_SIZE(dev_map); | ||
139 | |||
140 | for (dev_index = 0; dev_index < dev_num; dev_index++) { | ||
141 | reg_index = dev_index / PMC_REG_BIT_WIDTH; | ||
142 | if (reg_index) { | ||
143 | func_dis_index = func_dis_2; | ||
144 | d3_sts_index = d3_sts_1; | ||
145 | } else { | ||
146 | func_dis_index = func_dis; | ||
147 | d3_sts_index = d3_sts_0; | ||
148 | } | ||
149 | |||
150 | seq_printf(s, "Dev: %-32s\tState: %s [%s]\n", | ||
151 | dev_map[dev_index].name, | ||
152 | dev_map[dev_index].bit_mask & func_dis_index ? | ||
153 | "Disabled" : "Enabled ", | ||
154 | dev_map[dev_index].bit_mask & d3_sts_index ? | ||
155 | "D3" : "D0"); | ||
156 | } | ||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | static int pmc_dev_state_open(struct inode *inode, struct file *file) | ||
161 | { | ||
162 | return single_open(file, pmc_dev_state_show, inode->i_private); | ||
163 | } | ||
164 | |||
165 | static const struct file_operations pmc_dev_state_ops = { | ||
166 | .open = pmc_dev_state_open, | ||
167 | .read = seq_read, | ||
168 | .llseek = seq_lseek, | ||
169 | .release = single_release, | ||
170 | }; | ||
171 | |||
172 | static int pmc_sleep_tmr_show(struct seq_file *s, void *unused) | ||
173 | { | ||
174 | struct pmc_dev *pmc = s->private; | ||
175 | u64 s0ir_tmr, s0i1_tmr, s0i2_tmr, s0i3_tmr, s0_tmr; | ||
176 | |||
177 | s0ir_tmr = (u64)pmc_reg_read(pmc, PMC_S0IR_TMR) << PMC_TMR_SHIFT; | ||
178 | s0i1_tmr = (u64)pmc_reg_read(pmc, PMC_S0I1_TMR) << PMC_TMR_SHIFT; | ||
179 | s0i2_tmr = (u64)pmc_reg_read(pmc, PMC_S0I2_TMR) << PMC_TMR_SHIFT; | ||
180 | s0i3_tmr = (u64)pmc_reg_read(pmc, PMC_S0I3_TMR) << PMC_TMR_SHIFT; | ||
181 | s0_tmr = (u64)pmc_reg_read(pmc, PMC_S0_TMR) << PMC_TMR_SHIFT; | ||
182 | |||
183 | seq_printf(s, "S0IR Residency:\t%lldus\n", s0ir_tmr); | ||
184 | seq_printf(s, "S0I1 Residency:\t%lldus\n", s0i1_tmr); | ||
185 | seq_printf(s, "S0I2 Residency:\t%lldus\n", s0i2_tmr); | ||
186 | seq_printf(s, "S0I3 Residency:\t%lldus\n", s0i3_tmr); | ||
187 | seq_printf(s, "S0 Residency:\t%lldus\n", s0_tmr); | ||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | static int pmc_sleep_tmr_open(struct inode *inode, struct file *file) | ||
192 | { | ||
193 | return single_open(file, pmc_sleep_tmr_show, inode->i_private); | ||
194 | } | ||
195 | |||
196 | static const struct file_operations pmc_sleep_tmr_ops = { | ||
197 | .open = pmc_sleep_tmr_open, | ||
198 | .read = seq_read, | ||
199 | .llseek = seq_lseek, | ||
200 | .release = single_release, | ||
201 | }; | ||
202 | |||
203 | static void pmc_dbgfs_unregister(struct pmc_dev *pmc) | ||
204 | { | ||
205 | if (!pmc->dbgfs_dir) | ||
206 | return; | ||
207 | |||
208 | debugfs_remove_recursive(pmc->dbgfs_dir); | ||
209 | pmc->dbgfs_dir = NULL; | ||
210 | } | ||
211 | |||
212 | static int pmc_dbgfs_register(struct pmc_dev *pmc, struct pci_dev *pdev) | ||
213 | { | ||
214 | struct dentry *dir, *f; | ||
215 | |||
216 | dir = debugfs_create_dir("pmc_atom", NULL); | ||
217 | if (!dir) | ||
218 | return -ENOMEM; | ||
219 | |||
220 | f = debugfs_create_file("dev_state", S_IFREG | S_IRUGO, | ||
221 | dir, pmc, &pmc_dev_state_ops); | ||
222 | if (!f) { | ||
223 | dev_err(&pdev->dev, "dev_states register failed\n"); | ||
224 | goto err; | ||
225 | } | ||
226 | f = debugfs_create_file("sleep_state", S_IFREG | S_IRUGO, | ||
227 | dir, pmc, &pmc_sleep_tmr_ops); | ||
228 | if (!f) { | ||
229 | dev_err(&pdev->dev, "sleep_state register failed\n"); | ||
230 | goto err; | ||
231 | } | ||
232 | pmc->dbgfs_dir = dir; | ||
233 | return 0; | ||
234 | err: | ||
235 | pmc_dbgfs_unregister(pmc); | ||
236 | return -ENODEV; | ||
237 | } | ||
238 | #endif /* CONFIG_DEBUG_FS */ | ||
239 | |||
240 | static int pmc_setup_dev(struct pci_dev *pdev) | ||
241 | { | ||
242 | struct pmc_dev *pmc = &pmc_device; | ||
243 | int ret; | ||
244 | |||
245 | /* Obtain ACPI base address */ | ||
246 | pci_read_config_dword(pdev, ACPI_BASE_ADDR_OFFSET, &acpi_base_addr); | ||
247 | acpi_base_addr &= ACPI_BASE_ADDR_MASK; | ||
248 | |||
249 | /* Install power off function */ | ||
250 | if (acpi_base_addr != 0 && pm_power_off == NULL) | ||
251 | pm_power_off = pmc_power_off; | ||
252 | |||
253 | pci_read_config_dword(pdev, PMC_BASE_ADDR_OFFSET, &pmc->base_addr); | ||
254 | pmc->base_addr &= PMC_BASE_ADDR_MASK; | ||
255 | |||
256 | pmc->regmap = ioremap_nocache(pmc->base_addr, PMC_MMIO_REG_LEN); | ||
257 | if (!pmc->regmap) { | ||
258 | dev_err(&pdev->dev, "error: ioremap failed\n"); | ||
259 | return -ENOMEM; | ||
260 | } | ||
261 | |||
262 | /* PMC hardware registers setup */ | ||
263 | pmc_hw_reg_setup(pmc); | ||
264 | |||
265 | #ifdef CONFIG_DEBUG_FS | ||
266 | ret = pmc_dbgfs_register(pmc, pdev); | ||
267 | if (ret) { | ||
268 | iounmap(pmc->regmap); | ||
269 | return ret; | ||
270 | } | ||
271 | #endif /* CONFIG_DEBUG_FS */ | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | /* | ||
276 | * Data for PCI driver interface | ||
277 | * | ||
278 | * This data only exists for exporting the supported | ||
279 | * PCI ids via MODULE_DEVICE_TABLE. We do not actually | ||
280 | * register a pci_driver, because lpc_ich will register | ||
281 | * a driver on the same PCI id. | ||
282 | */ | ||
283 | static const struct pci_device_id pmc_pci_ids[] = { | ||
284 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_VLV_PMC) }, | ||
285 | { 0, }, | ||
286 | }; | ||
287 | |||
288 | MODULE_DEVICE_TABLE(pci, pmc_pci_ids); | ||
289 | |||
290 | static int __init pmc_atom_init(void) | ||
291 | { | ||
292 | int err = -ENODEV; | ||
293 | struct pci_dev *pdev = NULL; | ||
294 | const struct pci_device_id *ent; | ||
295 | |||
296 | /* We look for our device - PCU PMC | ||
297 | * we assume that there is max. one device. | ||
298 | * | ||
299 | * We can't use plain pci_driver mechanism, | ||
300 | * as the device is really a multiple function device, | ||
301 | * main driver that binds to the pci_device is lpc_ich | ||
302 | * and have to find & bind to the device this way. | ||
303 | */ | ||
304 | for_each_pci_dev(pdev) { | ||
305 | ent = pci_match_id(pmc_pci_ids, pdev); | ||
306 | if (ent) { | ||
307 | err = pmc_setup_dev(pdev); | ||
308 | goto out; | ||
309 | } | ||
310 | } | ||
311 | /* Device not found. */ | ||
312 | out: | ||
313 | return err; | ||
314 | } | ||
315 | |||
316 | module_init(pmc_atom_init); | ||
317 | /* no module_exit, this driver shouldn't be unloaded */ | ||
318 | |||
319 | MODULE_AUTHOR("Aubrey Li <aubrey.li@linux.intel.com>"); | ||
320 | MODULE_DESCRIPTION("Intel Atom SOC Power Management Controller Interface"); | ||
321 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 52b1157c53eb..17962e667a91 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/mc146818rtc.h> | 28 | #include <linux/mc146818rtc.h> |
29 | #include <asm/realmode.h> | 29 | #include <asm/realmode.h> |
30 | #include <asm/x86_init.h> | 30 | #include <asm/x86_init.h> |
31 | #include <asm/efi.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * Power off function, if any | 34 | * Power off function, if any |
@@ -401,12 +402,25 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
401 | 402 | ||
402 | static int __init reboot_init(void) | 403 | static int __init reboot_init(void) |
403 | { | 404 | { |
405 | int rv; | ||
406 | |||
404 | /* | 407 | /* |
405 | * Only do the DMI check if reboot_type hasn't been overridden | 408 | * Only do the DMI check if reboot_type hasn't been overridden |
406 | * on the command line | 409 | * on the command line |
407 | */ | 410 | */ |
408 | if (reboot_default) | 411 | if (!reboot_default) |
409 | dmi_check_system(reboot_dmi_table); | 412 | return 0; |
413 | |||
414 | /* | ||
415 | * The DMI quirks table takes precedence. If no quirks entry | ||
416 | * matches and the ACPI Hardware Reduced bit is set, force EFI | ||
417 | * reboot. | ||
418 | */ | ||
419 | rv = dmi_check_system(reboot_dmi_table); | ||
420 | |||
421 | if (!rv && efi_reboot_required()) | ||
422 | reboot_type = BOOT_EFI; | ||
423 | |||
410 | return 0; | 424 | return 0; |
411 | } | 425 | } |
412 | core_initcall(reboot_init); | 426 | core_initcall(reboot_init); |
@@ -528,11 +542,7 @@ static void native_machine_emergency_restart(void) | |||
528 | break; | 542 | break; |
529 | 543 | ||
530 | case BOOT_EFI: | 544 | case BOOT_EFI: |
531 | if (efi_enabled(EFI_RUNTIME_SERVICES)) | 545 | efi_reboot(reboot_mode, NULL); |
532 | efi.reset_system(reboot_mode == REBOOT_WARM ? | ||
533 | EFI_RESET_WARM : | ||
534 | EFI_RESET_COLD, | ||
535 | EFI_SUCCESS, 0, NULL); | ||
536 | reboot_type = BOOT_BIOS; | 546 | reboot_type = BOOT_BIOS; |
537 | break; | 547 | break; |
538 | 548 | ||
diff --git a/arch/x86/kernel/resource.c b/arch/x86/kernel/resource.c index 2a26819bb6a8..80eab01c1a68 100644 --- a/arch/x86/kernel/resource.c +++ b/arch/x86/kernel/resource.c | |||
@@ -37,10 +37,12 @@ static void remove_e820_regions(struct resource *avail) | |||
37 | 37 | ||
38 | void arch_remove_reservations(struct resource *avail) | 38 | void arch_remove_reservations(struct resource *avail) |
39 | { | 39 | { |
40 | /* Trim out BIOS areas (low 1MB and high 2MB) and E820 regions */ | 40 | /* |
41 | * Trim out BIOS area (high 2MB) and E820 regions. We do not remove | ||
42 | * the low 1MB unconditionally, as this area is needed for some ISA | ||
43 | * cards requiring a memory range, e.g. the i82365 PCMCIA controller. | ||
44 | */ | ||
41 | if (avail->flags & IORESOURCE_MEM) { | 45 | if (avail->flags & IORESOURCE_MEM) { |
42 | if (avail->start < BIOS_END) | ||
43 | avail->start = BIOS_END; | ||
44 | resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); | 46 | resource_clip(avail, BIOS_ROM_BASE, BIOS_ROM_END); |
45 | 47 | ||
46 | remove_e820_regions(avail); | 48 | remove_e820_regions(avail); |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 78a0e6298922..41ead8d3bc0b 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -924,10 +924,10 @@ void __init setup_arch(char **cmdline_p) | |||
924 | #endif | 924 | #endif |
925 | #ifdef CONFIG_EFI | 925 | #ifdef CONFIG_EFI |
926 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | 926 | if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
927 | "EL32", 4)) { | 927 | EFI32_LOADER_SIGNATURE, 4)) { |
928 | set_bit(EFI_BOOT, &efi.flags); | 928 | set_bit(EFI_BOOT, &efi.flags); |
929 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, | 929 | } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature, |
930 | "EL64", 4)) { | 930 | EFI64_LOADER_SIGNATURE, 4)) { |
931 | set_bit(EFI_BOOT, &efi.flags); | 931 | set_bit(EFI_BOOT, &efi.flags); |
932 | set_bit(EFI_64BIT, &efi.flags); | 932 | set_bit(EFI_64BIT, &efi.flags); |
933 | } | 933 | } |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ea030319b321..56b0c338061e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -234,9 +234,6 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc) | |||
234 | return ns; | 234 | return ns; |
235 | } | 235 | } |
236 | 236 | ||
237 | /* XXX surely we already have this someplace in the kernel?! */ | ||
238 | #define DIV_ROUND(n, d) (((n) + ((d) / 2)) / (d)) | ||
239 | |||
240 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | 237 | static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) |
241 | { | 238 | { |
242 | unsigned long long tsc_now, ns_now; | 239 | unsigned long long tsc_now, ns_now; |
@@ -259,7 +256,9 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu) | |||
259 | * time function is continuous; see the comment near struct | 256 | * time function is continuous; see the comment near struct |
260 | * cyc2ns_data. | 257 | * cyc2ns_data. |
261 | */ | 258 | */ |
262 | data->cyc2ns_mul = DIV_ROUND(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, cpu_khz); | 259 | data->cyc2ns_mul = |
260 | DIV_ROUND_CLOSEST(NSEC_PER_MSEC << CYC2NS_SCALE_FACTOR, | ||
261 | cpu_khz); | ||
263 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; | 262 | data->cyc2ns_shift = CYC2NS_SCALE_FACTOR; |
264 | data->cyc2ns_offset = ns_now - | 263 | data->cyc2ns_offset = ns_now - |
265 | mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); | 264 | mul_u64_u32_shr(tsc_now, data->cyc2ns_mul, CYC2NS_SCALE_FACTOR); |
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h index f9087315e0cd..a5380590ab0e 100644 --- a/arch/x86/kvm/cpuid.h +++ b/arch/x86/kvm/cpuid.h | |||
@@ -95,4 +95,12 @@ static inline bool guest_cpuid_has_gbpages(struct kvm_vcpu *vcpu) | |||
95 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); | 95 | best = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); |
96 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); | 96 | return best && (best->edx & bit(X86_FEATURE_GBPAGES)); |
97 | } | 97 | } |
98 | |||
99 | static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | struct kvm_cpuid_entry2 *best; | ||
102 | |||
103 | best = kvm_find_cpuid_entry(vcpu, 7, 0); | ||
104 | return best && (best->ebx & bit(X86_FEATURE_RTM)); | ||
105 | } | ||
98 | #endif | 106 | #endif |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e4e833d3d7d7..56657b0bb3bb 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -162,6 +162,10 @@ | |||
162 | #define NoWrite ((u64)1 << 45) /* No writeback */ | 162 | #define NoWrite ((u64)1 << 45) /* No writeback */ |
163 | #define SrcWrite ((u64)1 << 46) /* Write back src operand */ | 163 | #define SrcWrite ((u64)1 << 46) /* Write back src operand */ |
164 | #define NoMod ((u64)1 << 47) /* Mod field is ignored */ | 164 | #define NoMod ((u64)1 << 47) /* Mod field is ignored */ |
165 | #define Intercept ((u64)1 << 48) /* Has valid intercept field */ | ||
166 | #define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ | ||
167 | #define NoBigReal ((u64)1 << 50) /* No big real mode */ | ||
168 | #define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ | ||
165 | 169 | ||
166 | #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) | 170 | #define DstXacc (DstAccLo | SrcAccHi | SrcWrite) |
167 | 171 | ||
@@ -426,6 +430,7 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, | |||
426 | .modrm_reg = ctxt->modrm_reg, | 430 | .modrm_reg = ctxt->modrm_reg, |
427 | .modrm_rm = ctxt->modrm_rm, | 431 | .modrm_rm = ctxt->modrm_rm, |
428 | .src_val = ctxt->src.val64, | 432 | .src_val = ctxt->src.val64, |
433 | .dst_val = ctxt->dst.val64, | ||
429 | .src_bytes = ctxt->src.bytes, | 434 | .src_bytes = ctxt->src.bytes, |
430 | .dst_bytes = ctxt->dst.bytes, | 435 | .dst_bytes = ctxt->dst.bytes, |
431 | .ad_bytes = ctxt->ad_bytes, | 436 | .ad_bytes = ctxt->ad_bytes, |
@@ -511,12 +516,6 @@ static u32 desc_limit_scaled(struct desc_struct *desc) | |||
511 | return desc->g ? (limit << 12) | 0xfff : limit; | 516 | return desc->g ? (limit << 12) | 0xfff : limit; |
512 | } | 517 | } |
513 | 518 | ||
514 | static void set_seg_override(struct x86_emulate_ctxt *ctxt, int seg) | ||
515 | { | ||
516 | ctxt->has_seg_override = true; | ||
517 | ctxt->seg_override = seg; | ||
518 | } | ||
519 | |||
520 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | 519 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) |
521 | { | 520 | { |
522 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 521 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
@@ -525,14 +524,6 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | |||
525 | return ctxt->ops->get_cached_segment_base(ctxt, seg); | 524 | return ctxt->ops->get_cached_segment_base(ctxt, seg); |
526 | } | 525 | } |
527 | 526 | ||
528 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt) | ||
529 | { | ||
530 | if (!ctxt->has_seg_override) | ||
531 | return 0; | ||
532 | |||
533 | return ctxt->seg_override; | ||
534 | } | ||
535 | |||
536 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | 527 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, |
537 | u32 error, bool valid) | 528 | u32 error, bool valid) |
538 | { | 529 | { |
@@ -651,7 +642,12 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
651 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) | 642 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) |
652 | goto bad; | 643 | goto bad; |
653 | lim = desc_limit_scaled(&desc); | 644 | lim = desc_limit_scaled(&desc); |
654 | if ((desc.type & 8) || !(desc.type & 4)) { | 645 | if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && |
646 | (ctxt->d & NoBigReal)) { | ||
647 | /* la is between zero and 0xffff */ | ||
648 | if (la > 0xffff || (u32)(la + size - 1) > 0xffff) | ||
649 | goto bad; | ||
650 | } else if ((desc.type & 8) || !(desc.type & 4)) { | ||
655 | /* expand-up segment */ | 651 | /* expand-up segment */ |
656 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | 652 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) |
657 | goto bad; | 653 | goto bad; |
@@ -716,68 +712,71 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, | |||
716 | } | 712 | } |
717 | 713 | ||
718 | /* | 714 | /* |
719 | * Fetch the next byte of the instruction being emulated which is pointed to | 715 | * Prefetch the remaining bytes of the instruction without crossing page |
720 | * by ctxt->_eip, then increment ctxt->_eip. | ||
721 | * | ||
722 | * Also prefetch the remaining bytes of the instruction without crossing page | ||
723 | * boundary if they are not in fetch_cache yet. | 716 | * boundary if they are not in fetch_cache yet. |
724 | */ | 717 | */ |
725 | static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) | 718 | static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) |
726 | { | 719 | { |
727 | struct fetch_cache *fc = &ctxt->fetch; | ||
728 | int rc; | 720 | int rc; |
729 | int size, cur_size; | 721 | unsigned size; |
730 | 722 | unsigned long linear; | |
731 | if (ctxt->_eip == fc->end) { | 723 | int cur_size = ctxt->fetch.end - ctxt->fetch.data; |
732 | unsigned long linear; | 724 | struct segmented_address addr = { .seg = VCPU_SREG_CS, |
733 | struct segmented_address addr = { .seg = VCPU_SREG_CS, | 725 | .ea = ctxt->eip + cur_size }; |
734 | .ea = ctxt->_eip }; | 726 | |
735 | cur_size = fc->end - fc->start; | 727 | size = 15UL ^ cur_size; |
736 | size = min(15UL - cur_size, | 728 | rc = __linearize(ctxt, addr, size, false, true, &linear); |
737 | PAGE_SIZE - offset_in_page(ctxt->_eip)); | 729 | if (unlikely(rc != X86EMUL_CONTINUE)) |
738 | rc = __linearize(ctxt, addr, size, false, true, &linear); | 730 | return rc; |
739 | if (unlikely(rc != X86EMUL_CONTINUE)) | ||
740 | return rc; | ||
741 | rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, | ||
742 | size, &ctxt->exception); | ||
743 | if (unlikely(rc != X86EMUL_CONTINUE)) | ||
744 | return rc; | ||
745 | fc->end += size; | ||
746 | } | ||
747 | *dest = fc->data[ctxt->_eip - fc->start]; | ||
748 | ctxt->_eip++; | ||
749 | return X86EMUL_CONTINUE; | ||
750 | } | ||
751 | 731 | ||
752 | static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | 732 | size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); |
753 | void *dest, unsigned size) | ||
754 | { | ||
755 | int rc; | ||
756 | 733 | ||
757 | /* x86 instructions are limited to 15 bytes. */ | 734 | /* |
758 | if (unlikely(ctxt->_eip + size - ctxt->eip > 15)) | 735 | * One instruction can only straddle two pages, |
736 | * and one has been loaded at the beginning of | ||
737 | * x86_decode_insn. So, if not enough bytes | ||
738 | * still, we must have hit the 15-byte boundary. | ||
739 | */ | ||
740 | if (unlikely(size < op_size)) | ||
759 | return X86EMUL_UNHANDLEABLE; | 741 | return X86EMUL_UNHANDLEABLE; |
760 | while (size--) { | 742 | rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, |
761 | rc = do_insn_fetch_byte(ctxt, dest++); | 743 | size, &ctxt->exception); |
762 | if (rc != X86EMUL_CONTINUE) | 744 | if (unlikely(rc != X86EMUL_CONTINUE)) |
763 | return rc; | 745 | return rc; |
764 | } | 746 | ctxt->fetch.end += size; |
765 | return X86EMUL_CONTINUE; | 747 | return X86EMUL_CONTINUE; |
766 | } | 748 | } |
767 | 749 | ||
750 | static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, | ||
751 | unsigned size) | ||
752 | { | ||
753 | if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) | ||
754 | return __do_insn_fetch_bytes(ctxt, size); | ||
755 | else | ||
756 | return X86EMUL_CONTINUE; | ||
757 | } | ||
758 | |||
768 | /* Fetch next part of the instruction being emulated. */ | 759 | /* Fetch next part of the instruction being emulated. */ |
769 | #define insn_fetch(_type, _ctxt) \ | 760 | #define insn_fetch(_type, _ctxt) \ |
770 | ({ unsigned long _x; \ | 761 | ({ _type _x; \ |
771 | rc = do_insn_fetch(_ctxt, &_x, sizeof(_type)); \ | 762 | \ |
763 | rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ | ||
772 | if (rc != X86EMUL_CONTINUE) \ | 764 | if (rc != X86EMUL_CONTINUE) \ |
773 | goto done; \ | 765 | goto done; \ |
774 | (_type)_x; \ | 766 | ctxt->_eip += sizeof(_type); \ |
767 | _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \ | ||
768 | ctxt->fetch.ptr += sizeof(_type); \ | ||
769 | _x; \ | ||
775 | }) | 770 | }) |
776 | 771 | ||
777 | #define insn_fetch_arr(_arr, _size, _ctxt) \ | 772 | #define insn_fetch_arr(_arr, _size, _ctxt) \ |
778 | ({ rc = do_insn_fetch(_ctxt, _arr, (_size)); \ | 773 | ({ \ |
774 | rc = do_insn_fetch_bytes(_ctxt, _size); \ | ||
779 | if (rc != X86EMUL_CONTINUE) \ | 775 | if (rc != X86EMUL_CONTINUE) \ |
780 | goto done; \ | 776 | goto done; \ |
777 | ctxt->_eip += (_size); \ | ||
778 | memcpy(_arr, ctxt->fetch.ptr, _size); \ | ||
779 | ctxt->fetch.ptr += (_size); \ | ||
781 | }) | 780 | }) |
782 | 781 | ||
783 | /* | 782 | /* |
@@ -1063,19 +1062,17 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1063 | struct operand *op) | 1062 | struct operand *op) |
1064 | { | 1063 | { |
1065 | u8 sib; | 1064 | u8 sib; |
1066 | int index_reg = 0, base_reg = 0, scale; | 1065 | int index_reg, base_reg, scale; |
1067 | int rc = X86EMUL_CONTINUE; | 1066 | int rc = X86EMUL_CONTINUE; |
1068 | ulong modrm_ea = 0; | 1067 | ulong modrm_ea = 0; |
1069 | 1068 | ||
1070 | if (ctxt->rex_prefix) { | 1069 | ctxt->modrm_reg = ((ctxt->rex_prefix << 1) & 8); /* REX.R */ |
1071 | ctxt->modrm_reg = (ctxt->rex_prefix & 4) << 1; /* REX.R */ | 1070 | index_reg = (ctxt->rex_prefix << 2) & 8; /* REX.X */ |
1072 | index_reg = (ctxt->rex_prefix & 2) << 2; /* REX.X */ | 1071 | base_reg = (ctxt->rex_prefix << 3) & 8; /* REX.B */ |
1073 | ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ | ||
1074 | } | ||
1075 | 1072 | ||
1076 | ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; | 1073 | ctxt->modrm_mod = (ctxt->modrm & 0xc0) >> 6; |
1077 | ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; | 1074 | ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; |
1078 | ctxt->modrm_rm |= (ctxt->modrm & 0x07); | 1075 | ctxt->modrm_rm = base_reg | (ctxt->modrm & 0x07); |
1079 | ctxt->modrm_seg = VCPU_SREG_DS; | 1076 | ctxt->modrm_seg = VCPU_SREG_DS; |
1080 | 1077 | ||
1081 | if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { | 1078 | if (ctxt->modrm_mod == 3 || (ctxt->d & NoMod)) { |
@@ -1093,7 +1090,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1093 | if (ctxt->d & Mmx) { | 1090 | if (ctxt->d & Mmx) { |
1094 | op->type = OP_MM; | 1091 | op->type = OP_MM; |
1095 | op->bytes = 8; | 1092 | op->bytes = 8; |
1096 | op->addr.xmm = ctxt->modrm_rm & 7; | 1093 | op->addr.mm = ctxt->modrm_rm & 7; |
1097 | return rc; | 1094 | return rc; |
1098 | } | 1095 | } |
1099 | fetch_register_operand(op); | 1096 | fetch_register_operand(op); |
@@ -1190,6 +1187,9 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1190 | } | 1187 | } |
1191 | } | 1188 | } |
1192 | op->addr.mem.ea = modrm_ea; | 1189 | op->addr.mem.ea = modrm_ea; |
1190 | if (ctxt->ad_bytes != 8) | ||
1191 | ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; | ||
1192 | |||
1193 | done: | 1193 | done: |
1194 | return rc; | 1194 | return rc; |
1195 | } | 1195 | } |
@@ -1220,12 +1220,14 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt) | |||
1220 | long sv = 0, mask; | 1220 | long sv = 0, mask; |
1221 | 1221 | ||
1222 | if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { | 1222 | if (ctxt->dst.type == OP_MEM && ctxt->src.type == OP_REG) { |
1223 | mask = ~(ctxt->dst.bytes * 8 - 1); | 1223 | mask = ~((long)ctxt->dst.bytes * 8 - 1); |
1224 | 1224 | ||
1225 | if (ctxt->src.bytes == 2) | 1225 | if (ctxt->src.bytes == 2) |
1226 | sv = (s16)ctxt->src.val & (s16)mask; | 1226 | sv = (s16)ctxt->src.val & (s16)mask; |
1227 | else if (ctxt->src.bytes == 4) | 1227 | else if (ctxt->src.bytes == 4) |
1228 | sv = (s32)ctxt->src.val & (s32)mask; | 1228 | sv = (s32)ctxt->src.val & (s32)mask; |
1229 | else | ||
1230 | sv = (s64)ctxt->src.val & (s64)mask; | ||
1229 | 1231 | ||
1230 | ctxt->dst.addr.mem.ea += (sv >> 3); | 1232 | ctxt->dst.addr.mem.ea += (sv >> 3); |
1231 | } | 1233 | } |
@@ -1315,8 +1317,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1315 | in_page = (ctxt->eflags & EFLG_DF) ? | 1317 | in_page = (ctxt->eflags & EFLG_DF) ? |
1316 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : | 1318 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : |
1317 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); | 1319 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); |
1318 | n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, | 1320 | n = min3(in_page, (unsigned int)sizeof(rc->data) / size, count); |
1319 | count); | ||
1320 | if (n == 0) | 1321 | if (n == 0) |
1321 | n = 1; | 1322 | n = 1; |
1322 | rc->pos = rc->end = 0; | 1323 | rc->pos = rc->end = 0; |
@@ -1358,17 +1359,19 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | |||
1358 | u16 selector, struct desc_ptr *dt) | 1359 | u16 selector, struct desc_ptr *dt) |
1359 | { | 1360 | { |
1360 | const struct x86_emulate_ops *ops = ctxt->ops; | 1361 | const struct x86_emulate_ops *ops = ctxt->ops; |
1362 | u32 base3 = 0; | ||
1361 | 1363 | ||
1362 | if (selector & 1 << 2) { | 1364 | if (selector & 1 << 2) { |
1363 | struct desc_struct desc; | 1365 | struct desc_struct desc; |
1364 | u16 sel; | 1366 | u16 sel; |
1365 | 1367 | ||
1366 | memset (dt, 0, sizeof *dt); | 1368 | memset (dt, 0, sizeof *dt); |
1367 | if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) | 1369 | if (!ops->get_segment(ctxt, &sel, &desc, &base3, |
1370 | VCPU_SREG_LDTR)) | ||
1368 | return; | 1371 | return; |
1369 | 1372 | ||
1370 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | 1373 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
1371 | dt->address = get_desc_base(&desc); | 1374 | dt->address = get_desc_base(&desc) | ((u64)base3 << 32); |
1372 | } else | 1375 | } else |
1373 | ops->get_gdt(ctxt, dt); | 1376 | ops->get_gdt(ctxt, dt); |
1374 | } | 1377 | } |
@@ -1422,6 +1425,7 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1422 | ulong desc_addr; | 1425 | ulong desc_addr; |
1423 | int ret; | 1426 | int ret; |
1424 | u16 dummy; | 1427 | u16 dummy; |
1428 | u32 base3 = 0; | ||
1425 | 1429 | ||
1426 | memset(&seg_desc, 0, sizeof seg_desc); | 1430 | memset(&seg_desc, 0, sizeof seg_desc); |
1427 | 1431 | ||
@@ -1538,9 +1542,14 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1538 | ret = write_segment_descriptor(ctxt, selector, &seg_desc); | 1542 | ret = write_segment_descriptor(ctxt, selector, &seg_desc); |
1539 | if (ret != X86EMUL_CONTINUE) | 1543 | if (ret != X86EMUL_CONTINUE) |
1540 | return ret; | 1544 | return ret; |
1545 | } else if (ctxt->mode == X86EMUL_MODE_PROT64) { | ||
1546 | ret = ctxt->ops->read_std(ctxt, desc_addr+8, &base3, | ||
1547 | sizeof(base3), &ctxt->exception); | ||
1548 | if (ret != X86EMUL_CONTINUE) | ||
1549 | return ret; | ||
1541 | } | 1550 | } |
1542 | load: | 1551 | load: |
1543 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg); | 1552 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); |
1544 | return X86EMUL_CONTINUE; | 1553 | return X86EMUL_CONTINUE; |
1545 | exception: | 1554 | exception: |
1546 | emulate_exception(ctxt, err_vec, err_code, true); | 1555 | emulate_exception(ctxt, err_vec, err_code, true); |
@@ -1575,34 +1584,28 @@ static void write_register_operand(struct operand *op) | |||
1575 | 1584 | ||
1576 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) | 1585 | static int writeback(struct x86_emulate_ctxt *ctxt, struct operand *op) |
1577 | { | 1586 | { |
1578 | int rc; | ||
1579 | |||
1580 | switch (op->type) { | 1587 | switch (op->type) { |
1581 | case OP_REG: | 1588 | case OP_REG: |
1582 | write_register_operand(op); | 1589 | write_register_operand(op); |
1583 | break; | 1590 | break; |
1584 | case OP_MEM: | 1591 | case OP_MEM: |
1585 | if (ctxt->lock_prefix) | 1592 | if (ctxt->lock_prefix) |
1586 | rc = segmented_cmpxchg(ctxt, | 1593 | return segmented_cmpxchg(ctxt, |
1594 | op->addr.mem, | ||
1595 | &op->orig_val, | ||
1596 | &op->val, | ||
1597 | op->bytes); | ||
1598 | else | ||
1599 | return segmented_write(ctxt, | ||
1587 | op->addr.mem, | 1600 | op->addr.mem, |
1588 | &op->orig_val, | ||
1589 | &op->val, | 1601 | &op->val, |
1590 | op->bytes); | 1602 | op->bytes); |
1591 | else | ||
1592 | rc = segmented_write(ctxt, | ||
1593 | op->addr.mem, | ||
1594 | &op->val, | ||
1595 | op->bytes); | ||
1596 | if (rc != X86EMUL_CONTINUE) | ||
1597 | return rc; | ||
1598 | break; | 1603 | break; |
1599 | case OP_MEM_STR: | 1604 | case OP_MEM_STR: |
1600 | rc = segmented_write(ctxt, | 1605 | return segmented_write(ctxt, |
1601 | op->addr.mem, | 1606 | op->addr.mem, |
1602 | op->data, | 1607 | op->data, |
1603 | op->bytes * op->count); | 1608 | op->bytes * op->count); |
1604 | if (rc != X86EMUL_CONTINUE) | ||
1605 | return rc; | ||
1606 | break; | 1609 | break; |
1607 | case OP_XMM: | 1610 | case OP_XMM: |
1608 | write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); | 1611 | write_sse_reg(ctxt, &op->vec_val, op->addr.xmm); |
@@ -1671,7 +1674,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1671 | return rc; | 1674 | return rc; |
1672 | 1675 | ||
1673 | change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF | 1676 | change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF |
1674 | | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID; | 1677 | | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_AC | EFLG_ID; |
1675 | 1678 | ||
1676 | switch(ctxt->mode) { | 1679 | switch(ctxt->mode) { |
1677 | case X86EMUL_MODE_PROT64: | 1680 | case X86EMUL_MODE_PROT64: |
@@ -1754,6 +1757,9 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) | |||
1754 | if (rc != X86EMUL_CONTINUE) | 1757 | if (rc != X86EMUL_CONTINUE) |
1755 | return rc; | 1758 | return rc; |
1756 | 1759 | ||
1760 | if (ctxt->modrm_reg == VCPU_SREG_SS) | ||
1761 | ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; | ||
1762 | |||
1757 | rc = load_segment_descriptor(ctxt, (u16)selector, seg); | 1763 | rc = load_segment_descriptor(ctxt, (u16)selector, seg); |
1758 | return rc; | 1764 | return rc; |
1759 | } | 1765 | } |
@@ -1991,6 +1997,9 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) | |||
1991 | { | 1997 | { |
1992 | u64 old = ctxt->dst.orig_val64; | 1998 | u64 old = ctxt->dst.orig_val64; |
1993 | 1999 | ||
2000 | if (ctxt->dst.bytes == 16) | ||
2001 | return X86EMUL_UNHANDLEABLE; | ||
2002 | |||
1994 | if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || | 2003 | if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || |
1995 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { | 2004 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { |
1996 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); | 2005 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); |
@@ -2017,6 +2026,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2017 | { | 2026 | { |
2018 | int rc; | 2027 | int rc; |
2019 | unsigned long cs; | 2028 | unsigned long cs; |
2029 | int cpl = ctxt->ops->cpl(ctxt); | ||
2020 | 2030 | ||
2021 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); | 2031 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); |
2022 | if (rc != X86EMUL_CONTINUE) | 2032 | if (rc != X86EMUL_CONTINUE) |
@@ -2026,6 +2036,9 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2026 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); | 2036 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
2027 | if (rc != X86EMUL_CONTINUE) | 2037 | if (rc != X86EMUL_CONTINUE) |
2028 | return rc; | 2038 | return rc; |
2039 | /* Outer-privilege level return is not implemented */ | ||
2040 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) | ||
2041 | return X86EMUL_UNHANDLEABLE; | ||
2029 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); | 2042 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); |
2030 | return rc; | 2043 | return rc; |
2031 | } | 2044 | } |
@@ -2044,8 +2057,10 @@ static int em_ret_far_imm(struct x86_emulate_ctxt *ctxt) | |||
2044 | static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | 2057 | static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) |
2045 | { | 2058 | { |
2046 | /* Save real source value, then compare EAX against destination. */ | 2059 | /* Save real source value, then compare EAX against destination. */ |
2060 | ctxt->dst.orig_val = ctxt->dst.val; | ||
2061 | ctxt->dst.val = reg_read(ctxt, VCPU_REGS_RAX); | ||
2047 | ctxt->src.orig_val = ctxt->src.val; | 2062 | ctxt->src.orig_val = ctxt->src.val; |
2048 | ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX); | 2063 | ctxt->src.val = ctxt->dst.orig_val; |
2049 | fastop(ctxt, em_cmp); | 2064 | fastop(ctxt, em_cmp); |
2050 | 2065 | ||
2051 | if (ctxt->eflags & EFLG_ZF) { | 2066 | if (ctxt->eflags & EFLG_ZF) { |
@@ -2055,6 +2070,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | |||
2055 | /* Failure: write the value we saw to EAX. */ | 2070 | /* Failure: write the value we saw to EAX. */ |
2056 | ctxt->dst.type = OP_REG; | 2071 | ctxt->dst.type = OP_REG; |
2057 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); | 2072 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); |
2073 | ctxt->dst.val = ctxt->dst.orig_val; | ||
2058 | } | 2074 | } |
2059 | return X86EMUL_CONTINUE; | 2075 | return X86EMUL_CONTINUE; |
2060 | } | 2076 | } |
@@ -2194,7 +2210,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2194 | *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; | 2210 | *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; |
2195 | if (efer & EFER_LMA) { | 2211 | if (efer & EFER_LMA) { |
2196 | #ifdef CONFIG_X86_64 | 2212 | #ifdef CONFIG_X86_64 |
2197 | *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF; | 2213 | *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags; |
2198 | 2214 | ||
2199 | ops->get_msr(ctxt, | 2215 | ops->get_msr(ctxt, |
2200 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 2216 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
@@ -2202,14 +2218,14 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2202 | ctxt->_eip = msr_data; | 2218 | ctxt->_eip = msr_data; |
2203 | 2219 | ||
2204 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); | 2220 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
2205 | ctxt->eflags &= ~(msr_data | EFLG_RF); | 2221 | ctxt->eflags &= ~msr_data; |
2206 | #endif | 2222 | #endif |
2207 | } else { | 2223 | } else { |
2208 | /* legacy mode */ | 2224 | /* legacy mode */ |
2209 | ops->get_msr(ctxt, MSR_STAR, &msr_data); | 2225 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
2210 | ctxt->_eip = (u32)msr_data; | 2226 | ctxt->_eip = (u32)msr_data; |
2211 | 2227 | ||
2212 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 2228 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); |
2213 | } | 2229 | } |
2214 | 2230 | ||
2215 | return X86EMUL_CONTINUE; | 2231 | return X86EMUL_CONTINUE; |
@@ -2258,7 +2274,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) | |||
2258 | break; | 2274 | break; |
2259 | } | 2275 | } |
2260 | 2276 | ||
2261 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 2277 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF); |
2262 | cs_sel = (u16)msr_data; | 2278 | cs_sel = (u16)msr_data; |
2263 | cs_sel &= ~SELECTOR_RPL_MASK; | 2279 | cs_sel &= ~SELECTOR_RPL_MASK; |
2264 | ss_sel = cs_sel + 8; | 2280 | ss_sel = cs_sel + 8; |
@@ -2964,7 +2980,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt) | |||
2964 | 2980 | ||
2965 | static int em_mov(struct x86_emulate_ctxt *ctxt) | 2981 | static int em_mov(struct x86_emulate_ctxt *ctxt) |
2966 | { | 2982 | { |
2967 | memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes); | 2983 | memcpy(ctxt->dst.valptr, ctxt->src.valptr, sizeof(ctxt->src.valptr)); |
2968 | return X86EMUL_CONTINUE; | 2984 | return X86EMUL_CONTINUE; |
2969 | } | 2985 | } |
2970 | 2986 | ||
@@ -3221,7 +3237,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt) | |||
3221 | 3237 | ||
3222 | static int em_smsw(struct x86_emulate_ctxt *ctxt) | 3238 | static int em_smsw(struct x86_emulate_ctxt *ctxt) |
3223 | { | 3239 | { |
3224 | ctxt->dst.bytes = 2; | 3240 | if (ctxt->dst.type == OP_MEM) |
3241 | ctxt->dst.bytes = 2; | ||
3225 | ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); | 3242 | ctxt->dst.val = ctxt->ops->get_cr(ctxt, 0); |
3226 | return X86EMUL_CONTINUE; | 3243 | return X86EMUL_CONTINUE; |
3227 | } | 3244 | } |
@@ -3496,7 +3513,7 @@ static int check_rdpmc(struct x86_emulate_ctxt *ctxt) | |||
3496 | u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); | 3513 | u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); |
3497 | 3514 | ||
3498 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || | 3515 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || |
3499 | (rcx > 3)) | 3516 | ctxt->ops->check_pmc(ctxt, rcx)) |
3500 | return emulate_gp(ctxt, 0); | 3517 | return emulate_gp(ctxt, 0); |
3501 | 3518 | ||
3502 | return X86EMUL_CONTINUE; | 3519 | return X86EMUL_CONTINUE; |
@@ -3521,9 +3538,9 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) | |||
3521 | } | 3538 | } |
3522 | 3539 | ||
3523 | #define D(_y) { .flags = (_y) } | 3540 | #define D(_y) { .flags = (_y) } |
3524 | #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } | 3541 | #define DI(_y, _i) { .flags = (_y)|Intercept, .intercept = x86_intercept_##_i } |
3525 | #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ | 3542 | #define DIP(_y, _i, _p) { .flags = (_y)|Intercept|CheckPerm, \ |
3526 | .check_perm = (_p) } | 3543 | .intercept = x86_intercept_##_i, .check_perm = (_p) } |
3527 | #define N D(NotImpl) | 3544 | #define N D(NotImpl) |
3528 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } | 3545 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } |
3529 | #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } | 3546 | #define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } |
@@ -3532,10 +3549,10 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt) | |||
3532 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } | 3549 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } |
3533 | #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } | 3550 | #define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } |
3534 | #define II(_f, _e, _i) \ | 3551 | #define II(_f, _e, _i) \ |
3535 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } | 3552 | { .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i } |
3536 | #define IIP(_f, _e, _i, _p) \ | 3553 | #define IIP(_f, _e, _i, _p) \ |
3537 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ | 3554 | { .flags = (_f)|Intercept|CheckPerm, .u.execute = (_e), \ |
3538 | .check_perm = (_p) } | 3555 | .intercept = x86_intercept_##_i, .check_perm = (_p) } |
3539 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } | 3556 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } |
3540 | 3557 | ||
3541 | #define D2bv(_f) D((_f) | ByteOp), D(_f) | 3558 | #define D2bv(_f) D((_f) | ByteOp), D(_f) |
@@ -3634,8 +3651,8 @@ static const struct opcode group6[] = { | |||
3634 | }; | 3651 | }; |
3635 | 3652 | ||
3636 | static const struct group_dual group7 = { { | 3653 | static const struct group_dual group7 = { { |
3637 | II(Mov | DstMem | Priv, em_sgdt, sgdt), | 3654 | II(Mov | DstMem, em_sgdt, sgdt), |
3638 | II(Mov | DstMem | Priv, em_sidt, sidt), | 3655 | II(Mov | DstMem, em_sidt, sidt), |
3639 | II(SrcMem | Priv, em_lgdt, lgdt), | 3656 | II(SrcMem | Priv, em_lgdt, lgdt), |
3640 | II(SrcMem | Priv, em_lidt, lidt), | 3657 | II(SrcMem | Priv, em_lidt, lidt), |
3641 | II(SrcNone | DstMem | Mov, em_smsw, smsw), N, | 3658 | II(SrcNone | DstMem | Mov, em_smsw, smsw), N, |
@@ -3899,7 +3916,7 @@ static const struct opcode twobyte_table[256] = { | |||
3899 | N, N, | 3916 | N, N, |
3900 | N, N, N, N, N, N, N, N, | 3917 | N, N, N, N, N, N, N, N, |
3901 | /* 0x40 - 0x4F */ | 3918 | /* 0x40 - 0x4F */ |
3902 | X16(D(DstReg | SrcMem | ModRM | Mov)), | 3919 | X16(D(DstReg | SrcMem | ModRM)), |
3903 | /* 0x50 - 0x5F */ | 3920 | /* 0x50 - 0x5F */ |
3904 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | 3921 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, |
3905 | /* 0x60 - 0x6F */ | 3922 | /* 0x60 - 0x6F */ |
@@ -4061,12 +4078,12 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4061 | mem_common: | 4078 | mem_common: |
4062 | *op = ctxt->memop; | 4079 | *op = ctxt->memop; |
4063 | ctxt->memopp = op; | 4080 | ctxt->memopp = op; |
4064 | if ((ctxt->d & BitOp) && op == &ctxt->dst) | 4081 | if (ctxt->d & BitOp) |
4065 | fetch_bit_operand(ctxt); | 4082 | fetch_bit_operand(ctxt); |
4066 | op->orig_val = op->val; | 4083 | op->orig_val = op->val; |
4067 | break; | 4084 | break; |
4068 | case OpMem64: | 4085 | case OpMem64: |
4069 | ctxt->memop.bytes = 8; | 4086 | ctxt->memop.bytes = (ctxt->op_bytes == 8) ? 16 : 8; |
4070 | goto mem_common; | 4087 | goto mem_common; |
4071 | case OpAcc: | 4088 | case OpAcc: |
4072 | op->type = OP_REG; | 4089 | op->type = OP_REG; |
@@ -4150,7 +4167,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4150 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 4167 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
4151 | op->addr.mem.ea = | 4168 | op->addr.mem.ea = |
4152 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); | 4169 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); |
4153 | op->addr.mem.seg = seg_override(ctxt); | 4170 | op->addr.mem.seg = ctxt->seg_override; |
4154 | op->val = 0; | 4171 | op->val = 0; |
4155 | op->count = 1; | 4172 | op->count = 1; |
4156 | break; | 4173 | break; |
@@ -4161,7 +4178,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4161 | register_address(ctxt, | 4178 | register_address(ctxt, |
4162 | reg_read(ctxt, VCPU_REGS_RBX) + | 4179 | reg_read(ctxt, VCPU_REGS_RBX) + |
4163 | (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); | 4180 | (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); |
4164 | op->addr.mem.seg = seg_override(ctxt); | 4181 | op->addr.mem.seg = ctxt->seg_override; |
4165 | op->val = 0; | 4182 | op->val = 0; |
4166 | break; | 4183 | break; |
4167 | case OpImmFAddr: | 4184 | case OpImmFAddr: |
@@ -4208,16 +4225,22 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4208 | int mode = ctxt->mode; | 4225 | int mode = ctxt->mode; |
4209 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; | 4226 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; |
4210 | bool op_prefix = false; | 4227 | bool op_prefix = false; |
4228 | bool has_seg_override = false; | ||
4211 | struct opcode opcode; | 4229 | struct opcode opcode; |
4212 | 4230 | ||
4213 | ctxt->memop.type = OP_NONE; | 4231 | ctxt->memop.type = OP_NONE; |
4214 | ctxt->memopp = NULL; | 4232 | ctxt->memopp = NULL; |
4215 | ctxt->_eip = ctxt->eip; | 4233 | ctxt->_eip = ctxt->eip; |
4216 | ctxt->fetch.start = ctxt->_eip; | 4234 | ctxt->fetch.ptr = ctxt->fetch.data; |
4217 | ctxt->fetch.end = ctxt->fetch.start + insn_len; | 4235 | ctxt->fetch.end = ctxt->fetch.data + insn_len; |
4218 | ctxt->opcode_len = 1; | 4236 | ctxt->opcode_len = 1; |
4219 | if (insn_len > 0) | 4237 | if (insn_len > 0) |
4220 | memcpy(ctxt->fetch.data, insn, insn_len); | 4238 | memcpy(ctxt->fetch.data, insn, insn_len); |
4239 | else { | ||
4240 | rc = __do_insn_fetch_bytes(ctxt, 1); | ||
4241 | if (rc != X86EMUL_CONTINUE) | ||
4242 | return rc; | ||
4243 | } | ||
4221 | 4244 | ||
4222 | switch (mode) { | 4245 | switch (mode) { |
4223 | case X86EMUL_MODE_REAL: | 4246 | case X86EMUL_MODE_REAL: |
@@ -4261,11 +4284,13 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
4261 | case 0x2e: /* CS override */ | 4284 | case 0x2e: /* CS override */ |
4262 | case 0x36: /* SS override */ | 4285 | case 0x36: /* SS override */ |
4263 | case 0x3e: /* DS override */ | 4286 | case 0x3e: /* DS override */ |
4264 | set_seg_override(ctxt, (ctxt->b >> 3) & 3); | 4287 | has_seg_override = true; |
4288 | ctxt->seg_override = (ctxt->b >> 3) & 3; | ||
4265 | break; | 4289 | break; |
4266 | case 0x64: /* FS override */ | 4290 | case 0x64: /* FS override */ |
4267 | case 0x65: /* GS override */ | 4291 | case 0x65: /* GS override */ |
4268 | set_seg_override(ctxt, ctxt->b & 7); | 4292 | has_seg_override = true; |
4293 | ctxt->seg_override = ctxt->b & 7; | ||
4269 | break; | 4294 | break; |
4270 | case 0x40 ... 0x4f: /* REX */ | 4295 | case 0x40 ... 0x4f: /* REX */ |
4271 | if (mode != X86EMUL_MODE_PROT64) | 4296 | if (mode != X86EMUL_MODE_PROT64) |
@@ -4314,6 +4339,13 @@ done_prefixes: | |||
4314 | if (ctxt->d & ModRM) | 4339 | if (ctxt->d & ModRM) |
4315 | ctxt->modrm = insn_fetch(u8, ctxt); | 4340 | ctxt->modrm = insn_fetch(u8, ctxt); |
4316 | 4341 | ||
4342 | /* vex-prefix instructions are not implemented */ | ||
4343 | if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && | ||
4344 | (mode == X86EMUL_MODE_PROT64 || | ||
4345 | (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) { | ||
4346 | ctxt->d = NotImpl; | ||
4347 | } | ||
4348 | |||
4317 | while (ctxt->d & GroupMask) { | 4349 | while (ctxt->d & GroupMask) { |
4318 | switch (ctxt->d & GroupMask) { | 4350 | switch (ctxt->d & GroupMask) { |
4319 | case Group: | 4351 | case Group: |
@@ -4356,49 +4388,59 @@ done_prefixes: | |||
4356 | ctxt->d |= opcode.flags; | 4388 | ctxt->d |= opcode.flags; |
4357 | } | 4389 | } |
4358 | 4390 | ||
4359 | ctxt->execute = opcode.u.execute; | ||
4360 | ctxt->check_perm = opcode.check_perm; | ||
4361 | ctxt->intercept = opcode.intercept; | ||
4362 | |||
4363 | /* Unrecognised? */ | 4391 | /* Unrecognised? */ |
4364 | if (ctxt->d == 0 || (ctxt->d & NotImpl)) | 4392 | if (ctxt->d == 0) |
4365 | return EMULATION_FAILED; | 4393 | return EMULATION_FAILED; |
4366 | 4394 | ||
4367 | if (!(ctxt->d & EmulateOnUD) && ctxt->ud) | 4395 | ctxt->execute = opcode.u.execute; |
4368 | return EMULATION_FAILED; | ||
4369 | 4396 | ||
4370 | if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) | 4397 | if (unlikely(ctxt->d & |
4371 | ctxt->op_bytes = 8; | 4398 | (NotImpl|EmulateOnUD|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { |
4399 | /* | ||
4400 | * These are copied unconditionally here, and checked unconditionally | ||
4401 | * in x86_emulate_insn. | ||
4402 | */ | ||
4403 | ctxt->check_perm = opcode.check_perm; | ||
4404 | ctxt->intercept = opcode.intercept; | ||
4405 | |||
4406 | if (ctxt->d & NotImpl) | ||
4407 | return EMULATION_FAILED; | ||
4408 | |||
4409 | if (!(ctxt->d & EmulateOnUD) && ctxt->ud) | ||
4410 | return EMULATION_FAILED; | ||
4372 | 4411 | ||
4373 | if (ctxt->d & Op3264) { | 4412 | if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) |
4374 | if (mode == X86EMUL_MODE_PROT64) | ||
4375 | ctxt->op_bytes = 8; | 4413 | ctxt->op_bytes = 8; |
4376 | else | ||
4377 | ctxt->op_bytes = 4; | ||
4378 | } | ||
4379 | 4414 | ||
4380 | if (ctxt->d & Sse) | 4415 | if (ctxt->d & Op3264) { |
4381 | ctxt->op_bytes = 16; | 4416 | if (mode == X86EMUL_MODE_PROT64) |
4382 | else if (ctxt->d & Mmx) | 4417 | ctxt->op_bytes = 8; |
4383 | ctxt->op_bytes = 8; | 4418 | else |
4419 | ctxt->op_bytes = 4; | ||
4420 | } | ||
4421 | |||
4422 | if (ctxt->d & Sse) | ||
4423 | ctxt->op_bytes = 16; | ||
4424 | else if (ctxt->d & Mmx) | ||
4425 | ctxt->op_bytes = 8; | ||
4426 | } | ||
4384 | 4427 | ||
4385 | /* ModRM and SIB bytes. */ | 4428 | /* ModRM and SIB bytes. */ |
4386 | if (ctxt->d & ModRM) { | 4429 | if (ctxt->d & ModRM) { |
4387 | rc = decode_modrm(ctxt, &ctxt->memop); | 4430 | rc = decode_modrm(ctxt, &ctxt->memop); |
4388 | if (!ctxt->has_seg_override) | 4431 | if (!has_seg_override) { |
4389 | set_seg_override(ctxt, ctxt->modrm_seg); | 4432 | has_seg_override = true; |
4433 | ctxt->seg_override = ctxt->modrm_seg; | ||
4434 | } | ||
4390 | } else if (ctxt->d & MemAbs) | 4435 | } else if (ctxt->d & MemAbs) |
4391 | rc = decode_abs(ctxt, &ctxt->memop); | 4436 | rc = decode_abs(ctxt, &ctxt->memop); |
4392 | if (rc != X86EMUL_CONTINUE) | 4437 | if (rc != X86EMUL_CONTINUE) |
4393 | goto done; | 4438 | goto done; |
4394 | 4439 | ||
4395 | if (!ctxt->has_seg_override) | 4440 | if (!has_seg_override) |
4396 | set_seg_override(ctxt, VCPU_SREG_DS); | 4441 | ctxt->seg_override = VCPU_SREG_DS; |
4397 | |||
4398 | ctxt->memop.addr.mem.seg = seg_override(ctxt); | ||
4399 | 4442 | ||
4400 | if (ctxt->memop.type == OP_MEM && ctxt->ad_bytes != 8) | 4443 | ctxt->memop.addr.mem.seg = ctxt->seg_override; |
4401 | ctxt->memop.addr.mem.ea = (u32)ctxt->memop.addr.mem.ea; | ||
4402 | 4444 | ||
4403 | /* | 4445 | /* |
4404 | * Decode and fetch the source operand: register, memory | 4446 | * Decode and fetch the source operand: register, memory |
@@ -4420,7 +4462,7 @@ done_prefixes: | |||
4420 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); | 4462 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); |
4421 | 4463 | ||
4422 | done: | 4464 | done: |
4423 | if (ctxt->memopp && ctxt->memopp->type == OP_MEM && ctxt->rip_relative) | 4465 | if (ctxt->rip_relative) |
4424 | ctxt->memopp->addr.mem.ea += ctxt->_eip; | 4466 | ctxt->memopp->addr.mem.ea += ctxt->_eip; |
4425 | 4467 | ||
4426 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; | 4468 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; |
@@ -4495,6 +4537,16 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *)) | |||
4495 | return X86EMUL_CONTINUE; | 4537 | return X86EMUL_CONTINUE; |
4496 | } | 4538 | } |
4497 | 4539 | ||
4540 | void init_decode_cache(struct x86_emulate_ctxt *ctxt) | ||
4541 | { | ||
4542 | memset(&ctxt->rip_relative, 0, | ||
4543 | (void *)&ctxt->modrm - (void *)&ctxt->rip_relative); | ||
4544 | |||
4545 | ctxt->io_read.pos = 0; | ||
4546 | ctxt->io_read.end = 0; | ||
4547 | ctxt->mem_read.end = 0; | ||
4548 | } | ||
4549 | |||
4498 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | 4550 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) |
4499 | { | 4551 | { |
4500 | const struct x86_emulate_ops *ops = ctxt->ops; | 4552 | const struct x86_emulate_ops *ops = ctxt->ops; |
@@ -4503,12 +4555,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4503 | 4555 | ||
4504 | ctxt->mem_read.pos = 0; | 4556 | ctxt->mem_read.pos = 0; |
4505 | 4557 | ||
4506 | if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || | ||
4507 | (ctxt->d & Undefined)) { | ||
4508 | rc = emulate_ud(ctxt); | ||
4509 | goto done; | ||
4510 | } | ||
4511 | |||
4512 | /* LOCK prefix is allowed only with some instructions */ | 4558 | /* LOCK prefix is allowed only with some instructions */ |
4513 | if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { | 4559 | if (ctxt->lock_prefix && (!(ctxt->d & Lock) || ctxt->dst.type != OP_MEM)) { |
4514 | rc = emulate_ud(ctxt); | 4560 | rc = emulate_ud(ctxt); |
@@ -4520,69 +4566,82 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4520 | goto done; | 4566 | goto done; |
4521 | } | 4567 | } |
4522 | 4568 | ||
4523 | if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) | 4569 | if (unlikely(ctxt->d & |
4524 | || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { | 4570 | (No64|Undefined|Sse|Mmx|Intercept|CheckPerm|Priv|Prot|String))) { |
4525 | rc = emulate_ud(ctxt); | 4571 | if ((ctxt->mode == X86EMUL_MODE_PROT64 && (ctxt->d & No64)) || |
4526 | goto done; | 4572 | (ctxt->d & Undefined)) { |
4527 | } | 4573 | rc = emulate_ud(ctxt); |
4528 | 4574 | goto done; | |
4529 | if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { | 4575 | } |
4530 | rc = emulate_nm(ctxt); | ||
4531 | goto done; | ||
4532 | } | ||
4533 | 4576 | ||
4534 | if (ctxt->d & Mmx) { | 4577 | if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM))) |
4535 | rc = flush_pending_x87_faults(ctxt); | 4578 | || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { |
4536 | if (rc != X86EMUL_CONTINUE) | 4579 | rc = emulate_ud(ctxt); |
4537 | goto done; | 4580 | goto done; |
4538 | /* | 4581 | } |
4539 | * Now that we know the fpu is exception safe, we can fetch | ||
4540 | * operands from it. | ||
4541 | */ | ||
4542 | fetch_possible_mmx_operand(ctxt, &ctxt->src); | ||
4543 | fetch_possible_mmx_operand(ctxt, &ctxt->src2); | ||
4544 | if (!(ctxt->d & Mov)) | ||
4545 | fetch_possible_mmx_operand(ctxt, &ctxt->dst); | ||
4546 | } | ||
4547 | 4582 | ||
4548 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4583 | if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { |
4549 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4584 | rc = emulate_nm(ctxt); |
4550 | X86_ICPT_PRE_EXCEPT); | ||
4551 | if (rc != X86EMUL_CONTINUE) | ||
4552 | goto done; | 4585 | goto done; |
4553 | } | 4586 | } |
4554 | 4587 | ||
4555 | /* Privileged instruction can be executed only in CPL=0 */ | 4588 | if (ctxt->d & Mmx) { |
4556 | if ((ctxt->d & Priv) && ops->cpl(ctxt)) { | 4589 | rc = flush_pending_x87_faults(ctxt); |
4557 | rc = emulate_gp(ctxt, 0); | 4590 | if (rc != X86EMUL_CONTINUE) |
4558 | goto done; | 4591 | goto done; |
4559 | } | 4592 | /* |
4593 | * Now that we know the fpu is exception safe, we can fetch | ||
4594 | * operands from it. | ||
4595 | */ | ||
4596 | fetch_possible_mmx_operand(ctxt, &ctxt->src); | ||
4597 | fetch_possible_mmx_operand(ctxt, &ctxt->src2); | ||
4598 | if (!(ctxt->d & Mov)) | ||
4599 | fetch_possible_mmx_operand(ctxt, &ctxt->dst); | ||
4600 | } | ||
4560 | 4601 | ||
4561 | /* Instruction can only be executed in protected mode */ | 4602 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { |
4562 | if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { | 4603 | rc = emulator_check_intercept(ctxt, ctxt->intercept, |
4563 | rc = emulate_ud(ctxt); | 4604 | X86_ICPT_PRE_EXCEPT); |
4564 | goto done; | 4605 | if (rc != X86EMUL_CONTINUE) |
4565 | } | 4606 | goto done; |
4607 | } | ||
4566 | 4608 | ||
4567 | /* Do instruction specific permission checks */ | 4609 | /* Privileged instruction can be executed only in CPL=0 */ |
4568 | if (ctxt->check_perm) { | 4610 | if ((ctxt->d & Priv) && ops->cpl(ctxt)) { |
4569 | rc = ctxt->check_perm(ctxt); | 4611 | if (ctxt->d & PrivUD) |
4570 | if (rc != X86EMUL_CONTINUE) | 4612 | rc = emulate_ud(ctxt); |
4613 | else | ||
4614 | rc = emulate_gp(ctxt, 0); | ||
4571 | goto done; | 4615 | goto done; |
4572 | } | 4616 | } |
4573 | 4617 | ||
4574 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4618 | /* Instruction can only be executed in protected mode */ |
4575 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4619 | if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) { |
4576 | X86_ICPT_POST_EXCEPT); | 4620 | rc = emulate_ud(ctxt); |
4577 | if (rc != X86EMUL_CONTINUE) | ||
4578 | goto done; | 4621 | goto done; |
4579 | } | 4622 | } |
4580 | 4623 | ||
4581 | if (ctxt->rep_prefix && (ctxt->d & String)) { | 4624 | /* Do instruction specific permission checks */ |
4582 | /* All REP prefixes have the same first termination condition */ | 4625 | if (ctxt->d & CheckPerm) { |
4583 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { | 4626 | rc = ctxt->check_perm(ctxt); |
4584 | ctxt->eip = ctxt->_eip; | 4627 | if (rc != X86EMUL_CONTINUE) |
4585 | goto done; | 4628 | goto done; |
4629 | } | ||
4630 | |||
4631 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { | ||
4632 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | ||
4633 | X86_ICPT_POST_EXCEPT); | ||
4634 | if (rc != X86EMUL_CONTINUE) | ||
4635 | goto done; | ||
4636 | } | ||
4637 | |||
4638 | if (ctxt->rep_prefix && (ctxt->d & String)) { | ||
4639 | /* All REP prefixes have the same first termination condition */ | ||
4640 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { | ||
4641 | ctxt->eip = ctxt->_eip; | ||
4642 | ctxt->eflags &= ~EFLG_RF; | ||
4643 | goto done; | ||
4644 | } | ||
4586 | } | 4645 | } |
4587 | } | 4646 | } |
4588 | 4647 | ||
@@ -4616,13 +4675,18 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4616 | 4675 | ||
4617 | special_insn: | 4676 | special_insn: |
4618 | 4677 | ||
4619 | if (unlikely(ctxt->guest_mode) && ctxt->intercept) { | 4678 | if (unlikely(ctxt->guest_mode) && (ctxt->d & Intercept)) { |
4620 | rc = emulator_check_intercept(ctxt, ctxt->intercept, | 4679 | rc = emulator_check_intercept(ctxt, ctxt->intercept, |
4621 | X86_ICPT_POST_MEMACCESS); | 4680 | X86_ICPT_POST_MEMACCESS); |
4622 | if (rc != X86EMUL_CONTINUE) | 4681 | if (rc != X86EMUL_CONTINUE) |
4623 | goto done; | 4682 | goto done; |
4624 | } | 4683 | } |
4625 | 4684 | ||
4685 | if (ctxt->rep_prefix && (ctxt->d & String)) | ||
4686 | ctxt->eflags |= EFLG_RF; | ||
4687 | else | ||
4688 | ctxt->eflags &= ~EFLG_RF; | ||
4689 | |||
4626 | if (ctxt->execute) { | 4690 | if (ctxt->execute) { |
4627 | if (ctxt->d & Fastop) { | 4691 | if (ctxt->d & Fastop) { |
4628 | void (*fop)(struct fastop *) = (void *)ctxt->execute; | 4692 | void (*fop)(struct fastop *) = (void *)ctxt->execute; |
@@ -4657,8 +4721,9 @@ special_insn: | |||
4657 | break; | 4721 | break; |
4658 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ | 4722 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ |
4659 | if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) | 4723 | if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) |
4660 | break; | 4724 | ctxt->dst.type = OP_NONE; |
4661 | rc = em_xchg(ctxt); | 4725 | else |
4726 | rc = em_xchg(ctxt); | ||
4662 | break; | 4727 | break; |
4663 | case 0x98: /* cbw/cwde/cdqe */ | 4728 | case 0x98: /* cbw/cwde/cdqe */ |
4664 | switch (ctxt->op_bytes) { | 4729 | switch (ctxt->op_bytes) { |
@@ -4709,17 +4774,17 @@ special_insn: | |||
4709 | goto done; | 4774 | goto done; |
4710 | 4775 | ||
4711 | writeback: | 4776 | writeback: |
4712 | if (!(ctxt->d & NoWrite)) { | ||
4713 | rc = writeback(ctxt, &ctxt->dst); | ||
4714 | if (rc != X86EMUL_CONTINUE) | ||
4715 | goto done; | ||
4716 | } | ||
4717 | if (ctxt->d & SrcWrite) { | 4777 | if (ctxt->d & SrcWrite) { |
4718 | BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); | 4778 | BUG_ON(ctxt->src.type == OP_MEM || ctxt->src.type == OP_MEM_STR); |
4719 | rc = writeback(ctxt, &ctxt->src); | 4779 | rc = writeback(ctxt, &ctxt->src); |
4720 | if (rc != X86EMUL_CONTINUE) | 4780 | if (rc != X86EMUL_CONTINUE) |
4721 | goto done; | 4781 | goto done; |
4722 | } | 4782 | } |
4783 | if (!(ctxt->d & NoWrite)) { | ||
4784 | rc = writeback(ctxt, &ctxt->dst); | ||
4785 | if (rc != X86EMUL_CONTINUE) | ||
4786 | goto done; | ||
4787 | } | ||
4723 | 4788 | ||
4724 | /* | 4789 | /* |
4725 | * restore dst type in case the decoding will be reused | 4790 | * restore dst type in case the decoding will be reused |
@@ -4761,6 +4826,7 @@ writeback: | |||
4761 | } | 4826 | } |
4762 | goto done; /* skip rip writeback */ | 4827 | goto done; /* skip rip writeback */ |
4763 | } | 4828 | } |
4829 | ctxt->eflags &= ~EFLG_RF; | ||
4764 | } | 4830 | } |
4765 | 4831 | ||
4766 | ctxt->eip = ctxt->_eip; | 4832 | ctxt->eip = ctxt->_eip; |
@@ -4793,8 +4859,10 @@ twobyte_insn: | |||
4793 | ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); | 4859 | ops->get_dr(ctxt, ctxt->modrm_reg, &ctxt->dst.val); |
4794 | break; | 4860 | break; |
4795 | case 0x40 ... 0x4f: /* cmov */ | 4861 | case 0x40 ... 0x4f: /* cmov */ |
4796 | ctxt->dst.val = ctxt->dst.orig_val = ctxt->src.val; | 4862 | if (test_cc(ctxt->b, ctxt->eflags)) |
4797 | if (!test_cc(ctxt->b, ctxt->eflags)) | 4863 | ctxt->dst.val = ctxt->src.val; |
4864 | else if (ctxt->mode != X86EMUL_MODE_PROT64 || | ||
4865 | ctxt->op_bytes != 4) | ||
4798 | ctxt->dst.type = OP_NONE; /* no writeback */ | 4866 | ctxt->dst.type = OP_NONE; /* no writeback */ |
4799 | break; | 4867 | break; |
4800 | case 0x80 ... 0x8f: /* jnz rel, etc*/ | 4868 | case 0x80 ... 0x8f: /* jnz rel, etc*/ |
@@ -4818,8 +4886,8 @@ twobyte_insn: | |||
4818 | break; | 4886 | break; |
4819 | case 0xc3: /* movnti */ | 4887 | case 0xc3: /* movnti */ |
4820 | ctxt->dst.bytes = ctxt->op_bytes; | 4888 | ctxt->dst.bytes = ctxt->op_bytes; |
4821 | ctxt->dst.val = (ctxt->op_bytes == 4) ? (u32) ctxt->src.val : | 4889 | ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val : |
4822 | (u64) ctxt->src.val; | 4890 | (u32) ctxt->src.val; |
4823 | break; | 4891 | break; |
4824 | default: | 4892 | default: |
4825 | goto cannot_emulate; | 4893 | goto cannot_emulate; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 006911858174..3855103f71fd 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1451,7 +1451,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu) | |||
1451 | vcpu->arch.apic_arb_prio = 0; | 1451 | vcpu->arch.apic_arb_prio = 0; |
1452 | vcpu->arch.apic_attention = 0; | 1452 | vcpu->arch.apic_attention = 0; |
1453 | 1453 | ||
1454 | apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" | 1454 | apic_debug("%s: vcpu=%p, id=%d, base_msr=" |
1455 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, | 1455 | "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, |
1456 | vcpu, kvm_apic_id(apic), | 1456 | vcpu, kvm_apic_id(apic), |
1457 | vcpu->arch.apic_base, apic->base_address); | 1457 | vcpu->arch.apic_base, apic->base_address); |
@@ -1895,7 +1895,7 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) | |||
1895 | /* evaluate pending_events before reading the vector */ | 1895 | /* evaluate pending_events before reading the vector */ |
1896 | smp_rmb(); | 1896 | smp_rmb(); |
1897 | sipi_vector = apic->sipi_vector; | 1897 | sipi_vector = apic->sipi_vector; |
1898 | pr_debug("vcpu %d received sipi with vector # %x\n", | 1898 | apic_debug("vcpu %d received sipi with vector # %x\n", |
1899 | vcpu->vcpu_id, sipi_vector); | 1899 | vcpu->vcpu_id, sipi_vector); |
1900 | kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); | 1900 | kvm_vcpu_deliver_sipi_vector(vcpu, sipi_vector); |
1901 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 1901 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h index 9d2e0ffcb190..5aaf35641768 100644 --- a/arch/x86/kvm/mmutrace.h +++ b/arch/x86/kvm/mmutrace.h | |||
@@ -22,7 +22,7 @@ | |||
22 | __entry->unsync = sp->unsync; | 22 | __entry->unsync = sp->unsync; |
23 | 23 | ||
24 | #define KVM_MMU_PAGE_PRINTK() ({ \ | 24 | #define KVM_MMU_PAGE_PRINTK() ({ \ |
25 | const char *ret = p->buffer + p->len; \ | 25 | const u32 saved_len = p->len; \ |
26 | static const char *access_str[] = { \ | 26 | static const char *access_str[] = { \ |
27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ | 27 | "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ |
28 | }; \ | 28 | }; \ |
@@ -41,7 +41,7 @@ | |||
41 | role.nxe ? "" : "!", \ | 41 | role.nxe ? "" : "!", \ |
42 | __entry->root_count, \ | 42 | __entry->root_count, \ |
43 | __entry->unsync ? "unsync" : "sync", 0); \ | 43 | __entry->unsync ? "unsync" : "sync", 0); \ |
44 | ret; \ | 44 | p->buffer + saved_len; \ |
45 | }) | 45 | }) |
46 | 46 | ||
47 | #define kvm_mmu_trace_pferr_flags \ | 47 | #define kvm_mmu_trace_pferr_flags \ |
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c index cbecaa90399c..3dd6accb64ec 100644 --- a/arch/x86/kvm/pmu.c +++ b/arch/x86/kvm/pmu.c | |||
@@ -428,6 +428,15 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
428 | return 1; | 428 | return 1; |
429 | } | 429 | } |
430 | 430 | ||
431 | int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc) | ||
432 | { | ||
433 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | ||
434 | bool fixed = pmc & (1u << 30); | ||
435 | pmc &= ~(3u << 30); | ||
436 | return (!fixed && pmc >= pmu->nr_arch_gp_counters) || | ||
437 | (fixed && pmc >= pmu->nr_arch_fixed_counters); | ||
438 | } | ||
439 | |||
431 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) | 440 | int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data) |
432 | { | 441 | { |
433 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | 442 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b5e994ad0135..ddf742768ecf 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -486,14 +486,14 @@ static int is_external_interrupt(u32 info) | |||
486 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); | 486 | return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR); |
487 | } | 487 | } |
488 | 488 | ||
489 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 489 | static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
490 | { | 490 | { |
491 | struct vcpu_svm *svm = to_svm(vcpu); | 491 | struct vcpu_svm *svm = to_svm(vcpu); |
492 | u32 ret = 0; | 492 | u32 ret = 0; |
493 | 493 | ||
494 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) | 494 | if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) |
495 | ret |= KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; | 495 | ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS; |
496 | return ret & mask; | 496 | return ret; |
497 | } | 497 | } |
498 | 498 | ||
499 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 499 | static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
@@ -1415,7 +1415,16 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1415 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; | 1415 | var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1; |
1416 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; | 1416 | var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1; |
1417 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; | 1417 | var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; |
1418 | var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; | 1418 | |
1419 | /* | ||
1420 | * AMD CPUs circa 2014 track the G bit for all segments except CS. | ||
1421 | * However, the SVM spec states that the G bit is not observed by the | ||
1422 | * CPU, and some VMware virtual CPUs drop the G bit for all segments. | ||
1423 | * So let's synthesize a legal G bit for all segments, this helps | ||
1424 | * running KVM nested. It also helps cross-vendor migration, because | ||
1425 | * Intel's vmentry has a check on the 'G' bit. | ||
1426 | */ | ||
1427 | var->g = s->limit > 0xfffff; | ||
1419 | 1428 | ||
1420 | /* | 1429 | /* |
1421 | * AMD's VMCB does not have an explicit unusable field, so emulate it | 1430 | * AMD's VMCB does not have an explicit unusable field, so emulate it |
@@ -1424,14 +1433,6 @@ static void svm_get_segment(struct kvm_vcpu *vcpu, | |||
1424 | var->unusable = !var->present || (var->type == 0); | 1433 | var->unusable = !var->present || (var->type == 0); |
1425 | 1434 | ||
1426 | switch (seg) { | 1435 | switch (seg) { |
1427 | case VCPU_SREG_CS: | ||
1428 | /* | ||
1429 | * SVM always stores 0 for the 'G' bit in the CS selector in | ||
1430 | * the VMCB on a VMEXIT. This hurts cross-vendor migration: | ||
1431 | * Intel's VMENTRY has a check on the 'G' bit. | ||
1432 | */ | ||
1433 | var->g = s->limit > 0xfffff; | ||
1434 | break; | ||
1435 | case VCPU_SREG_TR: | 1436 | case VCPU_SREG_TR: |
1436 | /* | 1437 | /* |
1437 | * Work around a bug where the busy flag in the tr selector | 1438 | * Work around a bug where the busy flag in the tr selector |
@@ -2116,22 +2117,27 @@ static void nested_svm_unmap(struct page *page) | |||
2116 | 2117 | ||
2117 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) | 2118 | static int nested_svm_intercept_ioio(struct vcpu_svm *svm) |
2118 | { | 2119 | { |
2119 | unsigned port; | 2120 | unsigned port, size, iopm_len; |
2120 | u8 val, bit; | 2121 | u16 val, mask; |
2122 | u8 start_bit; | ||
2121 | u64 gpa; | 2123 | u64 gpa; |
2122 | 2124 | ||
2123 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) | 2125 | if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT))) |
2124 | return NESTED_EXIT_HOST; | 2126 | return NESTED_EXIT_HOST; |
2125 | 2127 | ||
2126 | port = svm->vmcb->control.exit_info_1 >> 16; | 2128 | port = svm->vmcb->control.exit_info_1 >> 16; |
2129 | size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >> | ||
2130 | SVM_IOIO_SIZE_SHIFT; | ||
2127 | gpa = svm->nested.vmcb_iopm + (port / 8); | 2131 | gpa = svm->nested.vmcb_iopm + (port / 8); |
2128 | bit = port % 8; | 2132 | start_bit = port % 8; |
2129 | val = 0; | 2133 | iopm_len = (start_bit + size > 8) ? 2 : 1; |
2134 | mask = (0xf >> (4 - size)) << start_bit; | ||
2135 | val = 0; | ||
2130 | 2136 | ||
2131 | if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, 1)) | 2137 | if (kvm_read_guest(svm->vcpu.kvm, gpa, &val, iopm_len)) |
2132 | val &= (1 << bit); | 2138 | return NESTED_EXIT_DONE; |
2133 | 2139 | ||
2134 | return val ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; | 2140 | return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST; |
2135 | } | 2141 | } |
2136 | 2142 | ||
2137 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) | 2143 | static int nested_svm_exit_handled_msr(struct vcpu_svm *svm) |
@@ -4205,7 +4211,8 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, | |||
4205 | if (info->intercept == x86_intercept_cr_write) | 4211 | if (info->intercept == x86_intercept_cr_write) |
4206 | icpt_info.exit_code += info->modrm_reg; | 4212 | icpt_info.exit_code += info->modrm_reg; |
4207 | 4213 | ||
4208 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0) | 4214 | if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 || |
4215 | info->intercept == x86_intercept_clts) | ||
4209 | break; | 4216 | break; |
4210 | 4217 | ||
4211 | intercept = svm->nested.intercept; | 4218 | intercept = svm->nested.intercept; |
@@ -4250,14 +4257,14 @@ static int svm_check_intercept(struct kvm_vcpu *vcpu, | |||
4250 | u64 exit_info; | 4257 | u64 exit_info; |
4251 | u32 bytes; | 4258 | u32 bytes; |
4252 | 4259 | ||
4253 | exit_info = (vcpu->arch.regs[VCPU_REGS_RDX] & 0xffff) << 16; | ||
4254 | |||
4255 | if (info->intercept == x86_intercept_in || | 4260 | if (info->intercept == x86_intercept_in || |
4256 | info->intercept == x86_intercept_ins) { | 4261 | info->intercept == x86_intercept_ins) { |
4257 | exit_info |= SVM_IOIO_TYPE_MASK; | 4262 | exit_info = ((info->src_val & 0xffff) << 16) | |
4258 | bytes = info->src_bytes; | 4263 | SVM_IOIO_TYPE_MASK; |
4259 | } else { | ||
4260 | bytes = info->dst_bytes; | 4264 | bytes = info->dst_bytes; |
4265 | } else { | ||
4266 | exit_info = (info->dst_val & 0xffff) << 16; | ||
4267 | bytes = info->src_bytes; | ||
4261 | } | 4268 | } |
4262 | 4269 | ||
4263 | if (info->intercept == x86_intercept_outs || | 4270 | if (info->intercept == x86_intercept_outs || |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h index 33574c95220d..e850a7d332be 100644 --- a/arch/x86/kvm/trace.h +++ b/arch/x86/kvm/trace.h | |||
@@ -721,10 +721,10 @@ TRACE_EVENT(kvm_emulate_insn, | |||
721 | ), | 721 | ), |
722 | 722 | ||
723 | TP_fast_assign( | 723 | TP_fast_assign( |
724 | __entry->rip = vcpu->arch.emulate_ctxt.fetch.start; | ||
725 | __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); | 724 | __entry->csbase = kvm_x86_ops->get_segment_base(vcpu, VCPU_SREG_CS); |
726 | __entry->len = vcpu->arch.emulate_ctxt._eip | 725 | __entry->len = vcpu->arch.emulate_ctxt.fetch.ptr |
727 | - vcpu->arch.emulate_ctxt.fetch.start; | 726 | - vcpu->arch.emulate_ctxt.fetch.data; |
727 | __entry->rip = vcpu->arch.emulate_ctxt._eip - __entry->len; | ||
728 | memcpy(__entry->insn, | 728 | memcpy(__entry->insn, |
729 | vcpu->arch.emulate_ctxt.fetch.data, | 729 | vcpu->arch.emulate_ctxt.fetch.data, |
730 | 15); | 730 | 15); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 801332edefc3..e618f34bde2d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -383,6 +383,9 @@ struct nested_vmx { | |||
383 | 383 | ||
384 | struct hrtimer preemption_timer; | 384 | struct hrtimer preemption_timer; |
385 | bool preemption_timer_expired; | 385 | bool preemption_timer_expired; |
386 | |||
387 | /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */ | ||
388 | u64 vmcs01_debugctl; | ||
386 | }; | 389 | }; |
387 | 390 | ||
388 | #define POSTED_INTR_ON 0 | 391 | #define POSTED_INTR_ON 0 |
@@ -740,7 +743,6 @@ static u32 vmx_segment_access_rights(struct kvm_segment *var); | |||
740 | static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); | 743 | static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu); |
741 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); | 744 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); |
742 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); | 745 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); |
743 | static bool vmx_mpx_supported(void); | ||
744 | 746 | ||
745 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); | 747 | static DEFINE_PER_CPU(struct vmcs *, vmxarea); |
746 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | 748 | static DEFINE_PER_CPU(struct vmcs *, current_vmcs); |
@@ -820,7 +822,6 @@ static const u32 vmx_msr_index[] = { | |||
820 | #endif | 822 | #endif |
821 | MSR_EFER, MSR_TSC_AUX, MSR_STAR, | 823 | MSR_EFER, MSR_TSC_AUX, MSR_STAR, |
822 | }; | 824 | }; |
823 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | ||
824 | 825 | ||
825 | static inline bool is_page_fault(u32 intr_info) | 826 | static inline bool is_page_fault(u32 intr_info) |
826 | { | 827 | { |
@@ -1940,7 +1941,7 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |||
1940 | vmcs_writel(GUEST_RFLAGS, rflags); | 1941 | vmcs_writel(GUEST_RFLAGS, rflags); |
1941 | } | 1942 | } |
1942 | 1943 | ||
1943 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 1944 | static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu) |
1944 | { | 1945 | { |
1945 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); | 1946 | u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO); |
1946 | int ret = 0; | 1947 | int ret = 0; |
@@ -1950,7 +1951,7 @@ static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | |||
1950 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) | 1951 | if (interruptibility & GUEST_INTR_STATE_MOV_SS) |
1951 | ret |= KVM_X86_SHADOW_INT_MOV_SS; | 1952 | ret |= KVM_X86_SHADOW_INT_MOV_SS; |
1952 | 1953 | ||
1953 | return ret & mask; | 1954 | return ret; |
1954 | } | 1955 | } |
1955 | 1956 | ||
1956 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) | 1957 | static void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask) |
@@ -2239,10 +2240,13 @@ static inline bool nested_vmx_allowed(struct kvm_vcpu *vcpu) | |||
2239 | * or other means. | 2240 | * or other means. |
2240 | */ | 2241 | */ |
2241 | static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; | 2242 | static u32 nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high; |
2243 | static u32 nested_vmx_true_procbased_ctls_low; | ||
2242 | static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; | 2244 | static u32 nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high; |
2243 | static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; | 2245 | static u32 nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high; |
2244 | static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; | 2246 | static u32 nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high; |
2247 | static u32 nested_vmx_true_exit_ctls_low; | ||
2245 | static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; | 2248 | static u32 nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high; |
2249 | static u32 nested_vmx_true_entry_ctls_low; | ||
2246 | static u32 nested_vmx_misc_low, nested_vmx_misc_high; | 2250 | static u32 nested_vmx_misc_low, nested_vmx_misc_high; |
2247 | static u32 nested_vmx_ept_caps; | 2251 | static u32 nested_vmx_ept_caps; |
2248 | static __init void nested_vmx_setup_ctls_msrs(void) | 2252 | static __init void nested_vmx_setup_ctls_msrs(void) |
@@ -2265,21 +2269,13 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2265 | /* pin-based controls */ | 2269 | /* pin-based controls */ |
2266 | rdmsr(MSR_IA32_VMX_PINBASED_CTLS, | 2270 | rdmsr(MSR_IA32_VMX_PINBASED_CTLS, |
2267 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); | 2271 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high); |
2268 | /* | ||
2269 | * According to the Intel spec, if bit 55 of VMX_BASIC is off (as it is | ||
2270 | * in our case), bits 1, 2 and 4 (i.e., 0x16) must be 1 in this MSR. | ||
2271 | */ | ||
2272 | nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; | 2272 | nested_vmx_pinbased_ctls_low |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR; |
2273 | nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | | 2273 | nested_vmx_pinbased_ctls_high &= PIN_BASED_EXT_INTR_MASK | |
2274 | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; | 2274 | PIN_BASED_NMI_EXITING | PIN_BASED_VIRTUAL_NMIS; |
2275 | nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | | 2275 | nested_vmx_pinbased_ctls_high |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | |
2276 | PIN_BASED_VMX_PREEMPTION_TIMER; | 2276 | PIN_BASED_VMX_PREEMPTION_TIMER; |
2277 | 2277 | ||
2278 | /* | 2278 | /* exit controls */ |
2279 | * Exit controls | ||
2280 | * If bit 55 of VMX_BASIC is off, bits 0-8 and 10, 11, 13, 14, 16 and | ||
2281 | * 17 must be 1. | ||
2282 | */ | ||
2283 | rdmsr(MSR_IA32_VMX_EXIT_CTLS, | 2279 | rdmsr(MSR_IA32_VMX_EXIT_CTLS, |
2284 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); | 2280 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high); |
2285 | nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; | 2281 | nested_vmx_exit_ctls_low = VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR; |
@@ -2296,10 +2292,13 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2296 | if (vmx_mpx_supported()) | 2292 | if (vmx_mpx_supported()) |
2297 | nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; | 2293 | nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; |
2298 | 2294 | ||
2295 | /* We support free control of debug control saving. */ | ||
2296 | nested_vmx_true_exit_ctls_low = nested_vmx_exit_ctls_low & | ||
2297 | ~VM_EXIT_SAVE_DEBUG_CONTROLS; | ||
2298 | |||
2299 | /* entry controls */ | 2299 | /* entry controls */ |
2300 | rdmsr(MSR_IA32_VMX_ENTRY_CTLS, | 2300 | rdmsr(MSR_IA32_VMX_ENTRY_CTLS, |
2301 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); | 2301 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high); |
2302 | /* If bit 55 of VMX_BASIC is off, bits 0-8 and 12 must be 1. */ | ||
2303 | nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; | 2302 | nested_vmx_entry_ctls_low = VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR; |
2304 | nested_vmx_entry_ctls_high &= | 2303 | nested_vmx_entry_ctls_high &= |
2305 | #ifdef CONFIG_X86_64 | 2304 | #ifdef CONFIG_X86_64 |
@@ -2311,10 +2310,14 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2311 | if (vmx_mpx_supported()) | 2310 | if (vmx_mpx_supported()) |
2312 | nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; | 2311 | nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; |
2313 | 2312 | ||
2313 | /* We support free control of debug control loading. */ | ||
2314 | nested_vmx_true_entry_ctls_low = nested_vmx_entry_ctls_low & | ||
2315 | ~VM_ENTRY_LOAD_DEBUG_CONTROLS; | ||
2316 | |||
2314 | /* cpu-based controls */ | 2317 | /* cpu-based controls */ |
2315 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, | 2318 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS, |
2316 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); | 2319 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high); |
2317 | nested_vmx_procbased_ctls_low = 0; | 2320 | nested_vmx_procbased_ctls_low = CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR; |
2318 | nested_vmx_procbased_ctls_high &= | 2321 | nested_vmx_procbased_ctls_high &= |
2319 | CPU_BASED_VIRTUAL_INTR_PENDING | | 2322 | CPU_BASED_VIRTUAL_INTR_PENDING | |
2320 | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | | 2323 | CPU_BASED_VIRTUAL_NMI_PENDING | CPU_BASED_USE_TSC_OFFSETING | |
@@ -2335,7 +2338,12 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
2335 | * can use it to avoid exits to L1 - even when L0 runs L2 | 2338 | * can use it to avoid exits to L1 - even when L0 runs L2 |
2336 | * without MSR bitmaps. | 2339 | * without MSR bitmaps. |
2337 | */ | 2340 | */ |
2338 | nested_vmx_procbased_ctls_high |= CPU_BASED_USE_MSR_BITMAPS; | 2341 | nested_vmx_procbased_ctls_high |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR | |
2342 | CPU_BASED_USE_MSR_BITMAPS; | ||
2343 | |||
2344 | /* We support free control of CR3 access interception. */ | ||
2345 | nested_vmx_true_procbased_ctls_low = nested_vmx_procbased_ctls_low & | ||
2346 | ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING); | ||
2339 | 2347 | ||
2340 | /* secondary cpu-based controls */ | 2348 | /* secondary cpu-based controls */ |
2341 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, | 2349 | rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2, |
@@ -2394,7 +2402,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2394 | * guest, and the VMCS structure we give it - not about the | 2402 | * guest, and the VMCS structure we give it - not about the |
2395 | * VMX support of the underlying hardware. | 2403 | * VMX support of the underlying hardware. |
2396 | */ | 2404 | */ |
2397 | *pdata = VMCS12_REVISION | | 2405 | *pdata = VMCS12_REVISION | VMX_BASIC_TRUE_CTLS | |
2398 | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | | 2406 | ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) | |
2399 | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); | 2407 | (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT); |
2400 | break; | 2408 | break; |
@@ -2404,16 +2412,25 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2404 | nested_vmx_pinbased_ctls_high); | 2412 | nested_vmx_pinbased_ctls_high); |
2405 | break; | 2413 | break; |
2406 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: | 2414 | case MSR_IA32_VMX_TRUE_PROCBASED_CTLS: |
2415 | *pdata = vmx_control_msr(nested_vmx_true_procbased_ctls_low, | ||
2416 | nested_vmx_procbased_ctls_high); | ||
2417 | break; | ||
2407 | case MSR_IA32_VMX_PROCBASED_CTLS: | 2418 | case MSR_IA32_VMX_PROCBASED_CTLS: |
2408 | *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, | 2419 | *pdata = vmx_control_msr(nested_vmx_procbased_ctls_low, |
2409 | nested_vmx_procbased_ctls_high); | 2420 | nested_vmx_procbased_ctls_high); |
2410 | break; | 2421 | break; |
2411 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: | 2422 | case MSR_IA32_VMX_TRUE_EXIT_CTLS: |
2423 | *pdata = vmx_control_msr(nested_vmx_true_exit_ctls_low, | ||
2424 | nested_vmx_exit_ctls_high); | ||
2425 | break; | ||
2412 | case MSR_IA32_VMX_EXIT_CTLS: | 2426 | case MSR_IA32_VMX_EXIT_CTLS: |
2413 | *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, | 2427 | *pdata = vmx_control_msr(nested_vmx_exit_ctls_low, |
2414 | nested_vmx_exit_ctls_high); | 2428 | nested_vmx_exit_ctls_high); |
2415 | break; | 2429 | break; |
2416 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: | 2430 | case MSR_IA32_VMX_TRUE_ENTRY_CTLS: |
2431 | *pdata = vmx_control_msr(nested_vmx_true_entry_ctls_low, | ||
2432 | nested_vmx_entry_ctls_high); | ||
2433 | break; | ||
2417 | case MSR_IA32_VMX_ENTRY_CTLS: | 2434 | case MSR_IA32_VMX_ENTRY_CTLS: |
2418 | *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, | 2435 | *pdata = vmx_control_msr(nested_vmx_entry_ctls_low, |
2419 | nested_vmx_entry_ctls_high); | 2436 | nested_vmx_entry_ctls_high); |
@@ -2442,7 +2459,7 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2442 | *pdata = -1ULL; | 2459 | *pdata = -1ULL; |
2443 | break; | 2460 | break; |
2444 | case MSR_IA32_VMX_VMCS_ENUM: | 2461 | case MSR_IA32_VMX_VMCS_ENUM: |
2445 | *pdata = 0x1f; | 2462 | *pdata = 0x2e; /* highest index: VMX_PREEMPTION_TIMER_VALUE */ |
2446 | break; | 2463 | break; |
2447 | case MSR_IA32_VMX_PROCBASED_CTLS2: | 2464 | case MSR_IA32_VMX_PROCBASED_CTLS2: |
2448 | *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, | 2465 | *pdata = vmx_control_msr(nested_vmx_secondary_ctls_low, |
@@ -3653,7 +3670,7 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu, | |||
3653 | vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); | 3670 | vmcs_write32(sf->ar_bytes, vmx_segment_access_rights(var)); |
3654 | 3671 | ||
3655 | out: | 3672 | out: |
3656 | vmx->emulation_required |= emulation_required(vcpu); | 3673 | vmx->emulation_required = emulation_required(vcpu); |
3657 | } | 3674 | } |
3658 | 3675 | ||
3659 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) | 3676 | static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) |
@@ -4422,7 +4439,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
4422 | vmx->vcpu.arch.pat = host_pat; | 4439 | vmx->vcpu.arch.pat = host_pat; |
4423 | } | 4440 | } |
4424 | 4441 | ||
4425 | for (i = 0; i < NR_VMX_MSR; ++i) { | 4442 | for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) { |
4426 | u32 index = vmx_msr_index[i]; | 4443 | u32 index = vmx_msr_index[i]; |
4427 | u32 data_low, data_high; | 4444 | u32 data_low, data_high; |
4428 | int j = vmx->nmsrs; | 4445 | int j = vmx->nmsrs; |
@@ -4873,7 +4890,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
4873 | if (!(vcpu->guest_debug & | 4890 | if (!(vcpu->guest_debug & |
4874 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | 4891 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { |
4875 | vcpu->arch.dr6 &= ~15; | 4892 | vcpu->arch.dr6 &= ~15; |
4876 | vcpu->arch.dr6 |= dr6; | 4893 | vcpu->arch.dr6 |= dr6 | DR6_RTM; |
4877 | if (!(dr6 & ~DR6_RESERVED)) /* icebp */ | 4894 | if (!(dr6 & ~DR6_RESERVED)) /* icebp */ |
4878 | skip_emulated_instruction(vcpu); | 4895 | skip_emulated_instruction(vcpu); |
4879 | 4896 | ||
@@ -5039,7 +5056,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
5039 | reg = (exit_qualification >> 8) & 15; | 5056 | reg = (exit_qualification >> 8) & 15; |
5040 | switch ((exit_qualification >> 4) & 3) { | 5057 | switch ((exit_qualification >> 4) & 3) { |
5041 | case 0: /* mov to cr */ | 5058 | case 0: /* mov to cr */ |
5042 | val = kvm_register_read(vcpu, reg); | 5059 | val = kvm_register_readl(vcpu, reg); |
5043 | trace_kvm_cr_write(cr, val); | 5060 | trace_kvm_cr_write(cr, val); |
5044 | switch (cr) { | 5061 | switch (cr) { |
5045 | case 0: | 5062 | case 0: |
@@ -5056,7 +5073,7 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
5056 | return 1; | 5073 | return 1; |
5057 | case 8: { | 5074 | case 8: { |
5058 | u8 cr8_prev = kvm_get_cr8(vcpu); | 5075 | u8 cr8_prev = kvm_get_cr8(vcpu); |
5059 | u8 cr8 = kvm_register_read(vcpu, reg); | 5076 | u8 cr8 = (u8)val; |
5060 | err = kvm_set_cr8(vcpu, cr8); | 5077 | err = kvm_set_cr8(vcpu, cr8); |
5061 | kvm_complete_insn_gp(vcpu, err); | 5078 | kvm_complete_insn_gp(vcpu, err); |
5062 | if (irqchip_in_kernel(vcpu->kvm)) | 5079 | if (irqchip_in_kernel(vcpu->kvm)) |
@@ -5132,7 +5149,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
5132 | return 0; | 5149 | return 0; |
5133 | } else { | 5150 | } else { |
5134 | vcpu->arch.dr7 &= ~DR7_GD; | 5151 | vcpu->arch.dr7 &= ~DR7_GD; |
5135 | vcpu->arch.dr6 |= DR6_BD; | 5152 | vcpu->arch.dr6 |= DR6_BD | DR6_RTM; |
5136 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | 5153 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); |
5137 | kvm_queue_exception(vcpu, DB_VECTOR); | 5154 | kvm_queue_exception(vcpu, DB_VECTOR); |
5138 | return 1; | 5155 | return 1; |
@@ -5165,7 +5182,7 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
5165 | return 1; | 5182 | return 1; |
5166 | kvm_register_write(vcpu, reg, val); | 5183 | kvm_register_write(vcpu, reg, val); |
5167 | } else | 5184 | } else |
5168 | if (kvm_set_dr(vcpu, dr, kvm_register_read(vcpu, reg))) | 5185 | if (kvm_set_dr(vcpu, dr, kvm_register_readl(vcpu, reg))) |
5169 | return 1; | 5186 | return 1; |
5170 | 5187 | ||
5171 | skip_emulated_instruction(vcpu); | 5188 | skip_emulated_instruction(vcpu); |
@@ -5621,7 +5638,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5621 | cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 5638 | cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
5622 | intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; | 5639 | intr_window_requested = cpu_exec_ctrl & CPU_BASED_VIRTUAL_INTR_PENDING; |
5623 | 5640 | ||
5624 | while (!guest_state_valid(vcpu) && count-- != 0) { | 5641 | while (vmx->emulation_required && count-- != 0) { |
5625 | if (intr_window_requested && vmx_interrupt_allowed(vcpu)) | 5642 | if (intr_window_requested && vmx_interrupt_allowed(vcpu)) |
5626 | return handle_interrupt_window(&vmx->vcpu); | 5643 | return handle_interrupt_window(&vmx->vcpu); |
5627 | 5644 | ||
@@ -5655,7 +5672,6 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) | |||
5655 | schedule(); | 5672 | schedule(); |
5656 | } | 5673 | } |
5657 | 5674 | ||
5658 | vmx->emulation_required = emulation_required(vcpu); | ||
5659 | out: | 5675 | out: |
5660 | return ret; | 5676 | return ret; |
5661 | } | 5677 | } |
@@ -5754,22 +5770,27 @@ static void nested_free_vmcs02(struct vcpu_vmx *vmx, gpa_t vmptr) | |||
5754 | 5770 | ||
5755 | /* | 5771 | /* |
5756 | * Free all VMCSs saved for this vcpu, except the one pointed by | 5772 | * Free all VMCSs saved for this vcpu, except the one pointed by |
5757 | * vmx->loaded_vmcs. These include the VMCSs in vmcs02_pool (except the one | 5773 | * vmx->loaded_vmcs. We must be running L1, so vmx->loaded_vmcs |
5758 | * currently used, if running L2), and vmcs01 when running L2. | 5774 | * must be &vmx->vmcs01. |
5759 | */ | 5775 | */ |
5760 | static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) | 5776 | static void nested_free_all_saved_vmcss(struct vcpu_vmx *vmx) |
5761 | { | 5777 | { |
5762 | struct vmcs02_list *item, *n; | 5778 | struct vmcs02_list *item, *n; |
5779 | |||
5780 | WARN_ON(vmx->loaded_vmcs != &vmx->vmcs01); | ||
5763 | list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { | 5781 | list_for_each_entry_safe(item, n, &vmx->nested.vmcs02_pool, list) { |
5764 | if (vmx->loaded_vmcs != &item->vmcs02) | 5782 | /* |
5765 | free_loaded_vmcs(&item->vmcs02); | 5783 | * Something will leak if the above WARN triggers. Better than |
5784 | * a use-after-free. | ||
5785 | */ | ||
5786 | if (vmx->loaded_vmcs == &item->vmcs02) | ||
5787 | continue; | ||
5788 | |||
5789 | free_loaded_vmcs(&item->vmcs02); | ||
5766 | list_del(&item->list); | 5790 | list_del(&item->list); |
5767 | kfree(item); | 5791 | kfree(item); |
5792 | vmx->nested.vmcs02_num--; | ||
5768 | } | 5793 | } |
5769 | vmx->nested.vmcs02_num = 0; | ||
5770 | |||
5771 | if (vmx->loaded_vmcs != &vmx->vmcs01) | ||
5772 | free_loaded_vmcs(&vmx->vmcs01); | ||
5773 | } | 5794 | } |
5774 | 5795 | ||
5775 | /* | 5796 | /* |
@@ -5918,7 +5939,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5918 | * which replaces physical address width with 32 | 5939 | * which replaces physical address width with 32 |
5919 | * | 5940 | * |
5920 | */ | 5941 | */ |
5921 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5942 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5922 | nested_vmx_failInvalid(vcpu); | 5943 | nested_vmx_failInvalid(vcpu); |
5923 | skip_emulated_instruction(vcpu); | 5944 | skip_emulated_instruction(vcpu); |
5924 | return 1; | 5945 | return 1; |
@@ -5936,7 +5957,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5936 | vmx->nested.vmxon_ptr = vmptr; | 5957 | vmx->nested.vmxon_ptr = vmptr; |
5937 | break; | 5958 | break; |
5938 | case EXIT_REASON_VMCLEAR: | 5959 | case EXIT_REASON_VMCLEAR: |
5939 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5960 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5940 | nested_vmx_failValid(vcpu, | 5961 | nested_vmx_failValid(vcpu, |
5941 | VMXERR_VMCLEAR_INVALID_ADDRESS); | 5962 | VMXERR_VMCLEAR_INVALID_ADDRESS); |
5942 | skip_emulated_instruction(vcpu); | 5963 | skip_emulated_instruction(vcpu); |
@@ -5951,7 +5972,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason, | |||
5951 | } | 5972 | } |
5952 | break; | 5973 | break; |
5953 | case EXIT_REASON_VMPTRLD: | 5974 | case EXIT_REASON_VMPTRLD: |
5954 | if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) { | 5975 | if (!PAGE_ALIGNED(vmptr) || (vmptr >> maxphyaddr)) { |
5955 | nested_vmx_failValid(vcpu, | 5976 | nested_vmx_failValid(vcpu, |
5956 | VMXERR_VMPTRLD_INVALID_ADDRESS); | 5977 | VMXERR_VMPTRLD_INVALID_ADDRESS); |
5957 | skip_emulated_instruction(vcpu); | 5978 | skip_emulated_instruction(vcpu); |
@@ -6086,20 +6107,27 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) | |||
6086 | static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) | 6107 | static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) |
6087 | { | 6108 | { |
6088 | u32 exec_control; | 6109 | u32 exec_control; |
6110 | if (vmx->nested.current_vmptr == -1ull) | ||
6111 | return; | ||
6112 | |||
6113 | /* current_vmptr and current_vmcs12 are always set/reset together */ | ||
6114 | if (WARN_ON(vmx->nested.current_vmcs12 == NULL)) | ||
6115 | return; | ||
6116 | |||
6089 | if (enable_shadow_vmcs) { | 6117 | if (enable_shadow_vmcs) { |
6090 | if (vmx->nested.current_vmcs12 != NULL) { | 6118 | /* copy to memory all shadowed fields in case |
6091 | /* copy to memory all shadowed fields in case | 6119 | they were modified */ |
6092 | they were modified */ | 6120 | copy_shadow_to_vmcs12(vmx); |
6093 | copy_shadow_to_vmcs12(vmx); | 6121 | vmx->nested.sync_shadow_vmcs = false; |
6094 | vmx->nested.sync_shadow_vmcs = false; | 6122 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); |
6095 | exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL); | 6123 | exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; |
6096 | exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS; | 6124 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); |
6097 | vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); | 6125 | vmcs_write64(VMCS_LINK_POINTER, -1ull); |
6098 | vmcs_write64(VMCS_LINK_POINTER, -1ull); | ||
6099 | } | ||
6100 | } | 6126 | } |
6101 | kunmap(vmx->nested.current_vmcs12_page); | 6127 | kunmap(vmx->nested.current_vmcs12_page); |
6102 | nested_release_page(vmx->nested.current_vmcs12_page); | 6128 | nested_release_page(vmx->nested.current_vmcs12_page); |
6129 | vmx->nested.current_vmptr = -1ull; | ||
6130 | vmx->nested.current_vmcs12 = NULL; | ||
6103 | } | 6131 | } |
6104 | 6132 | ||
6105 | /* | 6133 | /* |
@@ -6110,12 +6138,9 @@ static void free_nested(struct vcpu_vmx *vmx) | |||
6110 | { | 6138 | { |
6111 | if (!vmx->nested.vmxon) | 6139 | if (!vmx->nested.vmxon) |
6112 | return; | 6140 | return; |
6141 | |||
6113 | vmx->nested.vmxon = false; | 6142 | vmx->nested.vmxon = false; |
6114 | if (vmx->nested.current_vmptr != -1ull) { | 6143 | nested_release_vmcs12(vmx); |
6115 | nested_release_vmcs12(vmx); | ||
6116 | vmx->nested.current_vmptr = -1ull; | ||
6117 | vmx->nested.current_vmcs12 = NULL; | ||
6118 | } | ||
6119 | if (enable_shadow_vmcs) | 6144 | if (enable_shadow_vmcs) |
6120 | free_vmcs(vmx->nested.current_shadow_vmcs); | 6145 | free_vmcs(vmx->nested.current_shadow_vmcs); |
6121 | /* Unpin physical memory we referred to in current vmcs02 */ | 6146 | /* Unpin physical memory we referred to in current vmcs02 */ |
@@ -6152,11 +6177,8 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) | |||
6152 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) | 6177 | if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr)) |
6153 | return 1; | 6178 | return 1; |
6154 | 6179 | ||
6155 | if (vmptr == vmx->nested.current_vmptr) { | 6180 | if (vmptr == vmx->nested.current_vmptr) |
6156 | nested_release_vmcs12(vmx); | 6181 | nested_release_vmcs12(vmx); |
6157 | vmx->nested.current_vmptr = -1ull; | ||
6158 | vmx->nested.current_vmcs12 = NULL; | ||
6159 | } | ||
6160 | 6182 | ||
6161 | page = nested_get_page(vcpu, vmptr); | 6183 | page = nested_get_page(vcpu, vmptr); |
6162 | if (page == NULL) { | 6184 | if (page == NULL) { |
@@ -6384,7 +6406,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
6384 | return 1; | 6406 | return 1; |
6385 | 6407 | ||
6386 | /* Decode instruction info and find the field to read */ | 6408 | /* Decode instruction info and find the field to read */ |
6387 | field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 6409 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
6388 | /* Read the field, zero-extended to a u64 field_value */ | 6410 | /* Read the field, zero-extended to a u64 field_value */ |
6389 | if (!vmcs12_read_any(vcpu, field, &field_value)) { | 6411 | if (!vmcs12_read_any(vcpu, field, &field_value)) { |
6390 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); | 6412 | nested_vmx_failValid(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT); |
@@ -6397,7 +6419,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu) | |||
6397 | * on the guest's mode (32 or 64 bit), not on the given field's length. | 6419 | * on the guest's mode (32 or 64 bit), not on the given field's length. |
6398 | */ | 6420 | */ |
6399 | if (vmx_instruction_info & (1u << 10)) { | 6421 | if (vmx_instruction_info & (1u << 10)) { |
6400 | kvm_register_write(vcpu, (((vmx_instruction_info) >> 3) & 0xf), | 6422 | kvm_register_writel(vcpu, (((vmx_instruction_info) >> 3) & 0xf), |
6401 | field_value); | 6423 | field_value); |
6402 | } else { | 6424 | } else { |
6403 | if (get_vmx_mem_address(vcpu, exit_qualification, | 6425 | if (get_vmx_mem_address(vcpu, exit_qualification, |
@@ -6434,21 +6456,21 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu) | |||
6434 | return 1; | 6456 | return 1; |
6435 | 6457 | ||
6436 | if (vmx_instruction_info & (1u << 10)) | 6458 | if (vmx_instruction_info & (1u << 10)) |
6437 | field_value = kvm_register_read(vcpu, | 6459 | field_value = kvm_register_readl(vcpu, |
6438 | (((vmx_instruction_info) >> 3) & 0xf)); | 6460 | (((vmx_instruction_info) >> 3) & 0xf)); |
6439 | else { | 6461 | else { |
6440 | if (get_vmx_mem_address(vcpu, exit_qualification, | 6462 | if (get_vmx_mem_address(vcpu, exit_qualification, |
6441 | vmx_instruction_info, &gva)) | 6463 | vmx_instruction_info, &gva)) |
6442 | return 1; | 6464 | return 1; |
6443 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, | 6465 | if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, |
6444 | &field_value, (is_long_mode(vcpu) ? 8 : 4), &e)) { | 6466 | &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { |
6445 | kvm_inject_page_fault(vcpu, &e); | 6467 | kvm_inject_page_fault(vcpu, &e); |
6446 | return 1; | 6468 | return 1; |
6447 | } | 6469 | } |
6448 | } | 6470 | } |
6449 | 6471 | ||
6450 | 6472 | ||
6451 | field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); | 6473 | field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf)); |
6452 | if (vmcs_field_readonly(field)) { | 6474 | if (vmcs_field_readonly(field)) { |
6453 | nested_vmx_failValid(vcpu, | 6475 | nested_vmx_failValid(vcpu, |
6454 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); | 6476 | VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT); |
@@ -6498,9 +6520,8 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu) | |||
6498 | skip_emulated_instruction(vcpu); | 6520 | skip_emulated_instruction(vcpu); |
6499 | return 1; | 6521 | return 1; |
6500 | } | 6522 | } |
6501 | if (vmx->nested.current_vmptr != -1ull) | ||
6502 | nested_release_vmcs12(vmx); | ||
6503 | 6523 | ||
6524 | nested_release_vmcs12(vmx); | ||
6504 | vmx->nested.current_vmptr = vmptr; | 6525 | vmx->nested.current_vmptr = vmptr; |
6505 | vmx->nested.current_vmcs12 = new_vmcs12; | 6526 | vmx->nested.current_vmcs12 = new_vmcs12; |
6506 | vmx->nested.current_vmcs12_page = page; | 6527 | vmx->nested.current_vmcs12_page = page; |
@@ -6571,7 +6592,7 @@ static int handle_invept(struct kvm_vcpu *vcpu) | |||
6571 | } | 6592 | } |
6572 | 6593 | ||
6573 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); | 6594 | vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO); |
6574 | type = kvm_register_read(vcpu, (vmx_instruction_info >> 28) & 0xf); | 6595 | type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf); |
6575 | 6596 | ||
6576 | types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; | 6597 | types = (nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6; |
6577 | 6598 | ||
@@ -6751,7 +6772,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu, | |||
6751 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 6772 | unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6752 | int cr = exit_qualification & 15; | 6773 | int cr = exit_qualification & 15; |
6753 | int reg = (exit_qualification >> 8) & 15; | 6774 | int reg = (exit_qualification >> 8) & 15; |
6754 | unsigned long val = kvm_register_read(vcpu, reg); | 6775 | unsigned long val = kvm_register_readl(vcpu, reg); |
6755 | 6776 | ||
6756 | switch ((exit_qualification >> 4) & 3) { | 6777 | switch ((exit_qualification >> 4) & 3) { |
6757 | case 0: /* mov to cr */ | 6778 | case 0: /* mov to cr */ |
@@ -7112,7 +7133,26 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr) | |||
7112 | if (max_irr == -1) | 7133 | if (max_irr == -1) |
7113 | return; | 7134 | return; |
7114 | 7135 | ||
7115 | vmx_set_rvi(max_irr); | 7136 | /* |
7137 | * If a vmexit is needed, vmx_check_nested_events handles it. | ||
7138 | */ | ||
7139 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | ||
7140 | return; | ||
7141 | |||
7142 | if (!is_guest_mode(vcpu)) { | ||
7143 | vmx_set_rvi(max_irr); | ||
7144 | return; | ||
7145 | } | ||
7146 | |||
7147 | /* | ||
7148 | * Fall back to pre-APICv interrupt injection since L2 | ||
7149 | * is run without virtual interrupt delivery. | ||
7150 | */ | ||
7151 | if (!kvm_event_needs_reinjection(vcpu) && | ||
7152 | vmx_interrupt_allowed(vcpu)) { | ||
7153 | kvm_queue_interrupt(vcpu, max_irr, false); | ||
7154 | vmx_inject_irq(vcpu); | ||
7155 | } | ||
7116 | } | 7156 | } |
7117 | 7157 | ||
7118 | static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) | 7158 | static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) |
@@ -7520,13 +7560,31 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
7520 | vmx_complete_interrupts(vmx); | 7560 | vmx_complete_interrupts(vmx); |
7521 | } | 7561 | } |
7522 | 7562 | ||
7563 | static void vmx_load_vmcs01(struct kvm_vcpu *vcpu) | ||
7564 | { | ||
7565 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
7566 | int cpu; | ||
7567 | |||
7568 | if (vmx->loaded_vmcs == &vmx->vmcs01) | ||
7569 | return; | ||
7570 | |||
7571 | cpu = get_cpu(); | ||
7572 | vmx->loaded_vmcs = &vmx->vmcs01; | ||
7573 | vmx_vcpu_put(vcpu); | ||
7574 | vmx_vcpu_load(vcpu, cpu); | ||
7575 | vcpu->cpu = cpu; | ||
7576 | put_cpu(); | ||
7577 | } | ||
7578 | |||
7523 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | 7579 | static void vmx_free_vcpu(struct kvm_vcpu *vcpu) |
7524 | { | 7580 | { |
7525 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 7581 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
7526 | 7582 | ||
7527 | free_vpid(vmx); | 7583 | free_vpid(vmx); |
7528 | free_loaded_vmcs(vmx->loaded_vmcs); | 7584 | leave_guest_mode(vcpu); |
7585 | vmx_load_vmcs01(vcpu); | ||
7529 | free_nested(vmx); | 7586 | free_nested(vmx); |
7587 | free_loaded_vmcs(vmx->loaded_vmcs); | ||
7530 | kfree(vmx->guest_msrs); | 7588 | kfree(vmx->guest_msrs); |
7531 | kvm_vcpu_uninit(vcpu); | 7589 | kvm_vcpu_uninit(vcpu); |
7532 | kmem_cache_free(kvm_vcpu_cache, vmx); | 7590 | kmem_cache_free(kvm_vcpu_cache, vmx); |
@@ -7548,6 +7606,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
7548 | goto free_vcpu; | 7606 | goto free_vcpu; |
7549 | 7607 | ||
7550 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | 7608 | vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); |
7609 | BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) | ||
7610 | > PAGE_SIZE); | ||
7611 | |||
7551 | err = -ENOMEM; | 7612 | err = -ENOMEM; |
7552 | if (!vmx->guest_msrs) { | 7613 | if (!vmx->guest_msrs) { |
7553 | goto uninit_vcpu; | 7614 | goto uninit_vcpu; |
@@ -7836,7 +7897,13 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7836 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); | 7897 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base); |
7837 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); | 7898 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base); |
7838 | 7899 | ||
7839 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); | 7900 | if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) { |
7901 | kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); | ||
7902 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl); | ||
7903 | } else { | ||
7904 | kvm_set_dr(vcpu, 7, vcpu->arch.dr7); | ||
7905 | vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl); | ||
7906 | } | ||
7840 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 7907 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
7841 | vmcs12->vm_entry_intr_info_field); | 7908 | vmcs12->vm_entry_intr_info_field); |
7842 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, | 7909 | vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE, |
@@ -7846,7 +7913,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
7846 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, | 7913 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, |
7847 | vmcs12->guest_interruptibility_info); | 7914 | vmcs12->guest_interruptibility_info); |
7848 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); | 7915 | vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs); |
7849 | kvm_set_dr(vcpu, 7, vmcs12->guest_dr7); | ||
7850 | vmx_set_rflags(vcpu, vmcs12->guest_rflags); | 7916 | vmx_set_rflags(vcpu, vmcs12->guest_rflags); |
7851 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, | 7917 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, |
7852 | vmcs12->guest_pending_dbg_exceptions); | 7918 | vmcs12->guest_pending_dbg_exceptions); |
@@ -8113,14 +8179,14 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8113 | } | 8179 | } |
8114 | 8180 | ||
8115 | if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && | 8181 | if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && |
8116 | !IS_ALIGNED(vmcs12->msr_bitmap, PAGE_SIZE)) { | 8182 | !PAGE_ALIGNED(vmcs12->msr_bitmap)) { |
8117 | /*TODO: Also verify bits beyond physical address width are 0*/ | 8183 | /*TODO: Also verify bits beyond physical address width are 0*/ |
8118 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8184 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8119 | return 1; | 8185 | return 1; |
8120 | } | 8186 | } |
8121 | 8187 | ||
8122 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && | 8188 | if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && |
8123 | !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) { | 8189 | !PAGE_ALIGNED(vmcs12->apic_access_addr)) { |
8124 | /*TODO: Also verify bits beyond physical address width are 0*/ | 8190 | /*TODO: Also verify bits beyond physical address width are 0*/ |
8125 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8191 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8126 | return 1; | 8192 | return 1; |
@@ -8136,15 +8202,18 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8136 | } | 8202 | } |
8137 | 8203 | ||
8138 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, | 8204 | if (!vmx_control_verify(vmcs12->cpu_based_vm_exec_control, |
8139 | nested_vmx_procbased_ctls_low, nested_vmx_procbased_ctls_high) || | 8205 | nested_vmx_true_procbased_ctls_low, |
8206 | nested_vmx_procbased_ctls_high) || | ||
8140 | !vmx_control_verify(vmcs12->secondary_vm_exec_control, | 8207 | !vmx_control_verify(vmcs12->secondary_vm_exec_control, |
8141 | nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || | 8208 | nested_vmx_secondary_ctls_low, nested_vmx_secondary_ctls_high) || |
8142 | !vmx_control_verify(vmcs12->pin_based_vm_exec_control, | 8209 | !vmx_control_verify(vmcs12->pin_based_vm_exec_control, |
8143 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || | 8210 | nested_vmx_pinbased_ctls_low, nested_vmx_pinbased_ctls_high) || |
8144 | !vmx_control_verify(vmcs12->vm_exit_controls, | 8211 | !vmx_control_verify(vmcs12->vm_exit_controls, |
8145 | nested_vmx_exit_ctls_low, nested_vmx_exit_ctls_high) || | 8212 | nested_vmx_true_exit_ctls_low, |
8213 | nested_vmx_exit_ctls_high) || | ||
8146 | !vmx_control_verify(vmcs12->vm_entry_controls, | 8214 | !vmx_control_verify(vmcs12->vm_entry_controls, |
8147 | nested_vmx_entry_ctls_low, nested_vmx_entry_ctls_high)) | 8215 | nested_vmx_true_entry_ctls_low, |
8216 | nested_vmx_entry_ctls_high)) | ||
8148 | { | 8217 | { |
8149 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); | 8218 | nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); |
8150 | return 1; | 8219 | return 1; |
@@ -8221,6 +8290,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) | |||
8221 | 8290 | ||
8222 | vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); | 8291 | vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET); |
8223 | 8292 | ||
8293 | if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) | ||
8294 | vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8295 | |||
8224 | cpu = get_cpu(); | 8296 | cpu = get_cpu(); |
8225 | vmx->loaded_vmcs = vmcs02; | 8297 | vmx->loaded_vmcs = vmcs02; |
8226 | vmx_vcpu_put(vcpu); | 8298 | vmx_vcpu_put(vcpu); |
@@ -8398,7 +8470,6 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
8398 | vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); | 8470 | vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); |
8399 | vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); | 8471 | vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12); |
8400 | 8472 | ||
8401 | kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); | ||
8402 | vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); | 8473 | vmcs12->guest_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP); |
8403 | vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); | 8474 | vmcs12->guest_rip = kvm_register_read(vcpu, VCPU_REGS_RIP); |
8404 | vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); | 8475 | vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS); |
@@ -8477,9 +8548,13 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, | |||
8477 | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | | 8548 | (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) | |
8478 | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); | 8549 | (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE); |
8479 | 8550 | ||
8551 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS) { | ||
8552 | kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7); | ||
8553 | vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8554 | } | ||
8555 | |||
8480 | /* TODO: These cannot have changed unless we have MSR bitmaps and | 8556 | /* TODO: These cannot have changed unless we have MSR bitmaps and |
8481 | * the relevant bit asks not to trap the change */ | 8557 | * the relevant bit asks not to trap the change */ |
8482 | vmcs12->guest_ia32_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL); | ||
8483 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) | 8558 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_PAT) |
8484 | vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); | 8559 | vmcs12->guest_ia32_pat = vmcs_read64(GUEST_IA32_PAT); |
8485 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) | 8560 | if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER) |
@@ -8670,7 +8745,6 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
8670 | unsigned long exit_qualification) | 8745 | unsigned long exit_qualification) |
8671 | { | 8746 | { |
8672 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 8747 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
8673 | int cpu; | ||
8674 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 8748 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
8675 | 8749 | ||
8676 | /* trying to cancel vmlaunch/vmresume is a bug */ | 8750 | /* trying to cancel vmlaunch/vmresume is a bug */ |
@@ -8695,12 +8769,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
8695 | vmcs12->vm_exit_intr_error_code, | 8769 | vmcs12->vm_exit_intr_error_code, |
8696 | KVM_ISA_VMX); | 8770 | KVM_ISA_VMX); |
8697 | 8771 | ||
8698 | cpu = get_cpu(); | 8772 | vmx_load_vmcs01(vcpu); |
8699 | vmx->loaded_vmcs = &vmx->vmcs01; | ||
8700 | vmx_vcpu_put(vcpu); | ||
8701 | vmx_vcpu_load(vcpu, cpu); | ||
8702 | vcpu->cpu = cpu; | ||
8703 | put_cpu(); | ||
8704 | 8773 | ||
8705 | vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); | 8774 | vm_entry_controls_init(vmx, vmcs_read32(VM_ENTRY_CONTROLS)); |
8706 | vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); | 8775 | vm_exit_controls_init(vmx, vmcs_read32(VM_EXIT_CONTROLS)); |
@@ -8890,7 +8959,7 @@ static int __init vmx_init(void) | |||
8890 | 8959 | ||
8891 | rdmsrl_safe(MSR_EFER, &host_efer); | 8960 | rdmsrl_safe(MSR_EFER, &host_efer); |
8892 | 8961 | ||
8893 | for (i = 0; i < NR_VMX_MSR; ++i) | 8962 | for (i = 0; i < ARRAY_SIZE(vmx_msr_index); ++i) |
8894 | kvm_define_shared_msr(i, vmx_msr_index[i]); | 8963 | kvm_define_shared_msr(i, vmx_msr_index[i]); |
8895 | 8964 | ||
8896 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); | 8965 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index f6449334ec45..b86d329b953a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE); | |||
87 | 87 | ||
88 | static void update_cr8_intercept(struct kvm_vcpu *vcpu); | 88 | static void update_cr8_intercept(struct kvm_vcpu *vcpu); |
89 | static void process_nmi(struct kvm_vcpu *vcpu); | 89 | static void process_nmi(struct kvm_vcpu *vcpu); |
90 | static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags); | ||
90 | 91 | ||
91 | struct kvm_x86_ops *kvm_x86_ops; | 92 | struct kvm_x86_ops *kvm_x86_ops; |
92 | EXPORT_SYMBOL_GPL(kvm_x86_ops); | 93 | EXPORT_SYMBOL_GPL(kvm_x86_ops); |
@@ -211,6 +212,7 @@ static void shared_msr_update(unsigned slot, u32 msr) | |||
211 | 212 | ||
212 | void kvm_define_shared_msr(unsigned slot, u32 msr) | 213 | void kvm_define_shared_msr(unsigned slot, u32 msr) |
213 | { | 214 | { |
215 | BUG_ON(slot >= KVM_NR_SHARED_MSRS); | ||
214 | if (slot >= shared_msrs_global.nr) | 216 | if (slot >= shared_msrs_global.nr) |
215 | shared_msrs_global.nr = slot + 1; | 217 | shared_msrs_global.nr = slot + 1; |
216 | shared_msrs_global.msrs[slot] = msr; | 218 | shared_msrs_global.msrs[slot] = msr; |
@@ -310,6 +312,31 @@ static int exception_class(int vector) | |||
310 | return EXCPT_BENIGN; | 312 | return EXCPT_BENIGN; |
311 | } | 313 | } |
312 | 314 | ||
315 | #define EXCPT_FAULT 0 | ||
316 | #define EXCPT_TRAP 1 | ||
317 | #define EXCPT_ABORT 2 | ||
318 | #define EXCPT_INTERRUPT 3 | ||
319 | |||
320 | static int exception_type(int vector) | ||
321 | { | ||
322 | unsigned int mask; | ||
323 | |||
324 | if (WARN_ON(vector > 31 || vector == NMI_VECTOR)) | ||
325 | return EXCPT_INTERRUPT; | ||
326 | |||
327 | mask = 1 << vector; | ||
328 | |||
329 | /* #DB is trap, as instruction watchpoints are handled elsewhere */ | ||
330 | if (mask & ((1 << DB_VECTOR) | (1 << BP_VECTOR) | (1 << OF_VECTOR))) | ||
331 | return EXCPT_TRAP; | ||
332 | |||
333 | if (mask & ((1 << DF_VECTOR) | (1 << MC_VECTOR))) | ||
334 | return EXCPT_ABORT; | ||
335 | |||
336 | /* Reserved exceptions will result in fault */ | ||
337 | return EXCPT_FAULT; | ||
338 | } | ||
339 | |||
313 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | 340 | static void kvm_multiple_exception(struct kvm_vcpu *vcpu, |
314 | unsigned nr, bool has_error, u32 error_code, | 341 | unsigned nr, bool has_error, u32 error_code, |
315 | bool reinject) | 342 | bool reinject) |
@@ -758,6 +785,15 @@ static void kvm_update_dr7(struct kvm_vcpu *vcpu) | |||
758 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; | 785 | vcpu->arch.switch_db_regs |= KVM_DEBUGREG_BP_ENABLED; |
759 | } | 786 | } |
760 | 787 | ||
788 | static u64 kvm_dr6_fixed(struct kvm_vcpu *vcpu) | ||
789 | { | ||
790 | u64 fixed = DR6_FIXED_1; | ||
791 | |||
792 | if (!guest_cpuid_has_rtm(vcpu)) | ||
793 | fixed |= DR6_RTM; | ||
794 | return fixed; | ||
795 | } | ||
796 | |||
761 | static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | 797 | static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) |
762 | { | 798 | { |
763 | switch (dr) { | 799 | switch (dr) { |
@@ -773,7 +809,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | |||
773 | case 6: | 809 | case 6: |
774 | if (val & 0xffffffff00000000ULL) | 810 | if (val & 0xffffffff00000000ULL) |
775 | return -1; /* #GP */ | 811 | return -1; /* #GP */ |
776 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | 812 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | kvm_dr6_fixed(vcpu); |
777 | kvm_update_dr6(vcpu); | 813 | kvm_update_dr6(vcpu); |
778 | break; | 814 | break; |
779 | case 5: | 815 | case 5: |
@@ -1215,6 +1251,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1215 | unsigned long flags; | 1251 | unsigned long flags; |
1216 | s64 usdiff; | 1252 | s64 usdiff; |
1217 | bool matched; | 1253 | bool matched; |
1254 | bool already_matched; | ||
1218 | u64 data = msr->data; | 1255 | u64 data = msr->data; |
1219 | 1256 | ||
1220 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); | 1257 | raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); |
@@ -1279,6 +1316,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1279 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); | 1316 | pr_debug("kvm: adjusted tsc offset by %llu\n", delta); |
1280 | } | 1317 | } |
1281 | matched = true; | 1318 | matched = true; |
1319 | already_matched = (vcpu->arch.this_tsc_generation == kvm->arch.cur_tsc_generation); | ||
1282 | } else { | 1320 | } else { |
1283 | /* | 1321 | /* |
1284 | * We split periods of matched TSC writes into generations. | 1322 | * We split periods of matched TSC writes into generations. |
@@ -1294,7 +1332,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1294 | kvm->arch.cur_tsc_write = data; | 1332 | kvm->arch.cur_tsc_write = data; |
1295 | kvm->arch.cur_tsc_offset = offset; | 1333 | kvm->arch.cur_tsc_offset = offset; |
1296 | matched = false; | 1334 | matched = false; |
1297 | pr_debug("kvm: new tsc generation %u, clock %llu\n", | 1335 | pr_debug("kvm: new tsc generation %llu, clock %llu\n", |
1298 | kvm->arch.cur_tsc_generation, data); | 1336 | kvm->arch.cur_tsc_generation, data); |
1299 | } | 1337 | } |
1300 | 1338 | ||
@@ -1319,10 +1357,11 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) | |||
1319 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); | 1357 | raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); |
1320 | 1358 | ||
1321 | spin_lock(&kvm->arch.pvclock_gtod_sync_lock); | 1359 | spin_lock(&kvm->arch.pvclock_gtod_sync_lock); |
1322 | if (matched) | 1360 | if (!matched) { |
1323 | kvm->arch.nr_vcpus_matched_tsc++; | ||
1324 | else | ||
1325 | kvm->arch.nr_vcpus_matched_tsc = 0; | 1361 | kvm->arch.nr_vcpus_matched_tsc = 0; |
1362 | } else if (!already_matched) { | ||
1363 | kvm->arch.nr_vcpus_matched_tsc++; | ||
1364 | } | ||
1326 | 1365 | ||
1327 | kvm_track_tsc_matching(vcpu); | 1366 | kvm_track_tsc_matching(vcpu); |
1328 | spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); | 1367 | spin_unlock(&kvm->arch.pvclock_gtod_sync_lock); |
@@ -2032,6 +2071,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2032 | data &= ~(u64)0x40; /* ignore flush filter disable */ | 2071 | data &= ~(u64)0x40; /* ignore flush filter disable */ |
2033 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ | 2072 | data &= ~(u64)0x100; /* ignore ignne emulation enable */ |
2034 | data &= ~(u64)0x8; /* ignore TLB cache disable */ | 2073 | data &= ~(u64)0x8; /* ignore TLB cache disable */ |
2074 | data &= ~(u64)0x40000; /* ignore Mc status write enable */ | ||
2035 | if (data != 0) { | 2075 | if (data != 0) { |
2036 | vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", | 2076 | vcpu_unimpl(vcpu, "unimplemented HWCR wrmsr: 0x%llx\n", |
2037 | data); | 2077 | data); |
@@ -2974,9 +3014,7 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu, | |||
2974 | vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; | 3014 | vcpu->arch.interrupt.pending && !vcpu->arch.interrupt.soft; |
2975 | events->interrupt.nr = vcpu->arch.interrupt.nr; | 3015 | events->interrupt.nr = vcpu->arch.interrupt.nr; |
2976 | events->interrupt.soft = 0; | 3016 | events->interrupt.soft = 0; |
2977 | events->interrupt.shadow = | 3017 | events->interrupt.shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); |
2978 | kvm_x86_ops->get_interrupt_shadow(vcpu, | ||
2979 | KVM_X86_SHADOW_INT_MOV_SS | KVM_X86_SHADOW_INT_STI); | ||
2980 | 3018 | ||
2981 | events->nmi.injected = vcpu->arch.nmi_injected; | 3019 | events->nmi.injected = vcpu->arch.nmi_injected; |
2982 | events->nmi.pending = vcpu->arch.nmi_pending != 0; | 3020 | events->nmi.pending = vcpu->arch.nmi_pending != 0; |
@@ -4082,7 +4120,8 @@ static int kvm_read_guest_virt_helper(gva_t addr, void *val, unsigned int bytes, | |||
4082 | 4120 | ||
4083 | if (gpa == UNMAPPED_GVA) | 4121 | if (gpa == UNMAPPED_GVA) |
4084 | return X86EMUL_PROPAGATE_FAULT; | 4122 | return X86EMUL_PROPAGATE_FAULT; |
4085 | ret = kvm_read_guest(vcpu->kvm, gpa, data, toread); | 4123 | ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, data, |
4124 | offset, toread); | ||
4086 | if (ret < 0) { | 4125 | if (ret < 0) { |
4087 | r = X86EMUL_IO_NEEDED; | 4126 | r = X86EMUL_IO_NEEDED; |
4088 | goto out; | 4127 | goto out; |
@@ -4103,10 +4142,24 @@ static int kvm_fetch_guest_virt(struct x86_emulate_ctxt *ctxt, | |||
4103 | { | 4142 | { |
4104 | struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); | 4143 | struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); |
4105 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; | 4144 | u32 access = (kvm_x86_ops->get_cpl(vcpu) == 3) ? PFERR_USER_MASK : 0; |
4145 | unsigned offset; | ||
4146 | int ret; | ||
4106 | 4147 | ||
4107 | return kvm_read_guest_virt_helper(addr, val, bytes, vcpu, | 4148 | /* Inline kvm_read_guest_virt_helper for speed. */ |
4108 | access | PFERR_FETCH_MASK, | 4149 | gpa_t gpa = vcpu->arch.walk_mmu->gva_to_gpa(vcpu, addr, access|PFERR_FETCH_MASK, |
4109 | exception); | 4150 | exception); |
4151 | if (unlikely(gpa == UNMAPPED_GVA)) | ||
4152 | return X86EMUL_PROPAGATE_FAULT; | ||
4153 | |||
4154 | offset = addr & (PAGE_SIZE-1); | ||
4155 | if (WARN_ON(offset + bytes > PAGE_SIZE)) | ||
4156 | bytes = (unsigned)PAGE_SIZE - offset; | ||
4157 | ret = kvm_read_guest_page(vcpu->kvm, gpa >> PAGE_SHIFT, val, | ||
4158 | offset, bytes); | ||
4159 | if (unlikely(ret < 0)) | ||
4160 | return X86EMUL_IO_NEEDED; | ||
4161 | |||
4162 | return X86EMUL_CONTINUE; | ||
4110 | } | 4163 | } |
4111 | 4164 | ||
4112 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, | 4165 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
@@ -4730,7 +4783,6 @@ static void emulator_set_segment(struct x86_emulate_ctxt *ctxt, u16 selector, | |||
4730 | if (desc->g) | 4783 | if (desc->g) |
4731 | var.limit = (var.limit << 12) | 0xfff; | 4784 | var.limit = (var.limit << 12) | 0xfff; |
4732 | var.type = desc->type; | 4785 | var.type = desc->type; |
4733 | var.present = desc->p; | ||
4734 | var.dpl = desc->dpl; | 4786 | var.dpl = desc->dpl; |
4735 | var.db = desc->d; | 4787 | var.db = desc->d; |
4736 | var.s = desc->s; | 4788 | var.s = desc->s; |
@@ -4762,6 +4814,12 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt, | |||
4762 | return kvm_set_msr(emul_to_vcpu(ctxt), &msr); | 4814 | return kvm_set_msr(emul_to_vcpu(ctxt), &msr); |
4763 | } | 4815 | } |
4764 | 4816 | ||
4817 | static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt, | ||
4818 | u32 pmc) | ||
4819 | { | ||
4820 | return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc); | ||
4821 | } | ||
4822 | |||
4765 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, | 4823 | static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt, |
4766 | u32 pmc, u64 *pdata) | 4824 | u32 pmc, u64 *pdata) |
4767 | { | 4825 | { |
@@ -4838,6 +4896,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
4838 | .set_dr = emulator_set_dr, | 4896 | .set_dr = emulator_set_dr, |
4839 | .set_msr = emulator_set_msr, | 4897 | .set_msr = emulator_set_msr, |
4840 | .get_msr = emulator_get_msr, | 4898 | .get_msr = emulator_get_msr, |
4899 | .check_pmc = emulator_check_pmc, | ||
4841 | .read_pmc = emulator_read_pmc, | 4900 | .read_pmc = emulator_read_pmc, |
4842 | .halt = emulator_halt, | 4901 | .halt = emulator_halt, |
4843 | .wbinvd = emulator_wbinvd, | 4902 | .wbinvd = emulator_wbinvd, |
@@ -4850,7 +4909,7 @@ static const struct x86_emulate_ops emulate_ops = { | |||
4850 | 4909 | ||
4851 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 4910 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
4852 | { | 4911 | { |
4853 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); | 4912 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu); |
4854 | /* | 4913 | /* |
4855 | * an sti; sti; sequence only disable interrupts for the first | 4914 | * an sti; sti; sequence only disable interrupts for the first |
4856 | * instruction. So, if the last instruction, be it emulated or | 4915 | * instruction. So, if the last instruction, be it emulated or |
@@ -4858,8 +4917,13 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | |||
4858 | * means that the last instruction is an sti. We should not | 4917 | * means that the last instruction is an sti. We should not |
4859 | * leave the flag on in this case. The same goes for mov ss | 4918 | * leave the flag on in this case. The same goes for mov ss |
4860 | */ | 4919 | */ |
4861 | if (!(int_shadow & mask)) | 4920 | if (int_shadow & mask) |
4921 | mask = 0; | ||
4922 | if (unlikely(int_shadow || mask)) { | ||
4862 | kvm_x86_ops->set_interrupt_shadow(vcpu, mask); | 4923 | kvm_x86_ops->set_interrupt_shadow(vcpu, mask); |
4924 | if (!mask) | ||
4925 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
4926 | } | ||
4863 | } | 4927 | } |
4864 | 4928 | ||
4865 | static void inject_emulated_exception(struct kvm_vcpu *vcpu) | 4929 | static void inject_emulated_exception(struct kvm_vcpu *vcpu) |
@@ -4874,19 +4938,6 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) | |||
4874 | kvm_queue_exception(vcpu, ctxt->exception.vector); | 4938 | kvm_queue_exception(vcpu, ctxt->exception.vector); |
4875 | } | 4939 | } |
4876 | 4940 | ||
4877 | static void init_decode_cache(struct x86_emulate_ctxt *ctxt) | ||
4878 | { | ||
4879 | memset(&ctxt->opcode_len, 0, | ||
4880 | (void *)&ctxt->_regs - (void *)&ctxt->opcode_len); | ||
4881 | |||
4882 | ctxt->fetch.start = 0; | ||
4883 | ctxt->fetch.end = 0; | ||
4884 | ctxt->io_read.pos = 0; | ||
4885 | ctxt->io_read.end = 0; | ||
4886 | ctxt->mem_read.pos = 0; | ||
4887 | ctxt->mem_read.end = 0; | ||
4888 | } | ||
4889 | |||
4890 | static void init_emulate_ctxt(struct kvm_vcpu *vcpu) | 4941 | static void init_emulate_ctxt(struct kvm_vcpu *vcpu) |
4891 | { | 4942 | { |
4892 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 4943 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
@@ -5085,23 +5136,22 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7, | |||
5085 | return dr6; | 5136 | return dr6; |
5086 | } | 5137 | } |
5087 | 5138 | ||
5088 | static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) | 5139 | static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r) |
5089 | { | 5140 | { |
5090 | struct kvm_run *kvm_run = vcpu->run; | 5141 | struct kvm_run *kvm_run = vcpu->run; |
5091 | 5142 | ||
5092 | /* | 5143 | /* |
5093 | * Use the "raw" value to see if TF was passed to the processor. | 5144 | * rflags is the old, "raw" value of the flags. The new value has |
5094 | * Note that the new value of the flags has not been saved yet. | 5145 | * not been saved yet. |
5095 | * | 5146 | * |
5096 | * This is correct even for TF set by the guest, because "the | 5147 | * This is correct even for TF set by the guest, because "the |
5097 | * processor will not generate this exception after the instruction | 5148 | * processor will not generate this exception after the instruction |
5098 | * that sets the TF flag". | 5149 | * that sets the TF flag". |
5099 | */ | 5150 | */ |
5100 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
5101 | |||
5102 | if (unlikely(rflags & X86_EFLAGS_TF)) { | 5151 | if (unlikely(rflags & X86_EFLAGS_TF)) { |
5103 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | 5152 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { |
5104 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; | 5153 | kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1 | |
5154 | DR6_RTM; | ||
5105 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; | 5155 | kvm_run->debug.arch.pc = vcpu->arch.singlestep_rip; |
5106 | kvm_run->debug.arch.exception = DB_VECTOR; | 5156 | kvm_run->debug.arch.exception = DB_VECTOR; |
5107 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 5157 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
@@ -5114,7 +5164,7 @@ static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) | |||
5114 | * cleared by the processor". | 5164 | * cleared by the processor". |
5115 | */ | 5165 | */ |
5116 | vcpu->arch.dr6 &= ~15; | 5166 | vcpu->arch.dr6 &= ~15; |
5117 | vcpu->arch.dr6 |= DR6_BS; | 5167 | vcpu->arch.dr6 |= DR6_BS | DR6_RTM; |
5118 | kvm_queue_exception(vcpu, DB_VECTOR); | 5168 | kvm_queue_exception(vcpu, DB_VECTOR); |
5119 | } | 5169 | } |
5120 | } | 5170 | } |
@@ -5133,7 +5183,7 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | |||
5133 | vcpu->arch.eff_db); | 5183 | vcpu->arch.eff_db); |
5134 | 5184 | ||
5135 | if (dr6 != 0) { | 5185 | if (dr6 != 0) { |
5136 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; | 5186 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1 | DR6_RTM; |
5137 | kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + | 5187 | kvm_run->debug.arch.pc = kvm_rip_read(vcpu) + |
5138 | get_segment_base(vcpu, VCPU_SREG_CS); | 5188 | get_segment_base(vcpu, VCPU_SREG_CS); |
5139 | 5189 | ||
@@ -5144,14 +5194,15 @@ static bool kvm_vcpu_check_breakpoint(struct kvm_vcpu *vcpu, int *r) | |||
5144 | } | 5194 | } |
5145 | } | 5195 | } |
5146 | 5196 | ||
5147 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK)) { | 5197 | if (unlikely(vcpu->arch.dr7 & DR7_BP_EN_MASK) && |
5198 | !(kvm_get_rflags(vcpu) & X86_EFLAGS_RF)) { | ||
5148 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, | 5199 | dr6 = kvm_vcpu_check_hw_bp(eip, 0, |
5149 | vcpu->arch.dr7, | 5200 | vcpu->arch.dr7, |
5150 | vcpu->arch.db); | 5201 | vcpu->arch.db); |
5151 | 5202 | ||
5152 | if (dr6 != 0) { | 5203 | if (dr6 != 0) { |
5153 | vcpu->arch.dr6 &= ~15; | 5204 | vcpu->arch.dr6 &= ~15; |
5154 | vcpu->arch.dr6 |= dr6; | 5205 | vcpu->arch.dr6 |= dr6 | DR6_RTM; |
5155 | kvm_queue_exception(vcpu, DB_VECTOR); | 5206 | kvm_queue_exception(vcpu, DB_VECTOR); |
5156 | *r = EMULATE_DONE; | 5207 | *r = EMULATE_DONE; |
5157 | return true; | 5208 | return true; |
@@ -5215,6 +5266,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, | |||
5215 | 5266 | ||
5216 | if (emulation_type & EMULTYPE_SKIP) { | 5267 | if (emulation_type & EMULTYPE_SKIP) { |
5217 | kvm_rip_write(vcpu, ctxt->_eip); | 5268 | kvm_rip_write(vcpu, ctxt->_eip); |
5269 | if (ctxt->eflags & X86_EFLAGS_RF) | ||
5270 | kvm_set_rflags(vcpu, ctxt->eflags & ~X86_EFLAGS_RF); | ||
5218 | return EMULATE_DONE; | 5271 | return EMULATE_DONE; |
5219 | } | 5272 | } |
5220 | 5273 | ||
@@ -5265,13 +5318,22 @@ restart: | |||
5265 | r = EMULATE_DONE; | 5318 | r = EMULATE_DONE; |
5266 | 5319 | ||
5267 | if (writeback) { | 5320 | if (writeback) { |
5321 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | ||
5268 | toggle_interruptibility(vcpu, ctxt->interruptibility); | 5322 | toggle_interruptibility(vcpu, ctxt->interruptibility); |
5269 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5270 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 5323 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
5271 | kvm_rip_write(vcpu, ctxt->eip); | 5324 | kvm_rip_write(vcpu, ctxt->eip); |
5272 | if (r == EMULATE_DONE) | 5325 | if (r == EMULATE_DONE) |
5273 | kvm_vcpu_check_singlestep(vcpu, &r); | 5326 | kvm_vcpu_check_singlestep(vcpu, rflags, &r); |
5274 | kvm_set_rflags(vcpu, ctxt->eflags); | 5327 | __kvm_set_rflags(vcpu, ctxt->eflags); |
5328 | |||
5329 | /* | ||
5330 | * For STI, interrupts are shadowed; so KVM_REQ_EVENT will | ||
5331 | * do nothing, and it will be requested again as soon as | ||
5332 | * the shadow expires. But we still need to check here, | ||
5333 | * because POPF has no interrupt shadow. | ||
5334 | */ | ||
5335 | if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF)) | ||
5336 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5275 | } else | 5337 | } else |
5276 | vcpu->arch.emulate_regs_need_sync_to_vcpu = true; | 5338 | vcpu->arch.emulate_regs_need_sync_to_vcpu = true; |
5277 | 5339 | ||
@@ -5662,7 +5724,6 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
5662 | u64 param, ingpa, outgpa, ret; | 5724 | u64 param, ingpa, outgpa, ret; |
5663 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; | 5725 | uint16_t code, rep_idx, rep_cnt, res = HV_STATUS_SUCCESS, rep_done = 0; |
5664 | bool fast, longmode; | 5726 | bool fast, longmode; |
5665 | int cs_db, cs_l; | ||
5666 | 5727 | ||
5667 | /* | 5728 | /* |
5668 | * hypercall generates UD from non zero cpl and real mode | 5729 | * hypercall generates UD from non zero cpl and real mode |
@@ -5673,8 +5734,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu) | |||
5673 | return 0; | 5734 | return 0; |
5674 | } | 5735 | } |
5675 | 5736 | ||
5676 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 5737 | longmode = is_64_bit_mode(vcpu); |
5677 | longmode = is_long_mode(vcpu) && cs_l == 1; | ||
5678 | 5738 | ||
5679 | if (!longmode) { | 5739 | if (!longmode) { |
5680 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | | 5740 | param = ((u64)kvm_register_read(vcpu, VCPU_REGS_RDX) << 32) | |
@@ -5739,7 +5799,7 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid) | |||
5739 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | 5799 | int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) |
5740 | { | 5800 | { |
5741 | unsigned long nr, a0, a1, a2, a3, ret; | 5801 | unsigned long nr, a0, a1, a2, a3, ret; |
5742 | int r = 1; | 5802 | int op_64_bit, r = 1; |
5743 | 5803 | ||
5744 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) | 5804 | if (kvm_hv_hypercall_enabled(vcpu->kvm)) |
5745 | return kvm_hv_hypercall(vcpu); | 5805 | return kvm_hv_hypercall(vcpu); |
@@ -5752,7 +5812,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5752 | 5812 | ||
5753 | trace_kvm_hypercall(nr, a0, a1, a2, a3); | 5813 | trace_kvm_hypercall(nr, a0, a1, a2, a3); |
5754 | 5814 | ||
5755 | if (!is_long_mode(vcpu)) { | 5815 | op_64_bit = is_64_bit_mode(vcpu); |
5816 | if (!op_64_bit) { | ||
5756 | nr &= 0xFFFFFFFF; | 5817 | nr &= 0xFFFFFFFF; |
5757 | a0 &= 0xFFFFFFFF; | 5818 | a0 &= 0xFFFFFFFF; |
5758 | a1 &= 0xFFFFFFFF; | 5819 | a1 &= 0xFFFFFFFF; |
@@ -5778,6 +5839,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
5778 | break; | 5839 | break; |
5779 | } | 5840 | } |
5780 | out: | 5841 | out: |
5842 | if (!op_64_bit) | ||
5843 | ret = (u32)ret; | ||
5781 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); | 5844 | kvm_register_write(vcpu, VCPU_REGS_RAX, ret); |
5782 | ++vcpu->stat.hypercalls; | 5845 | ++vcpu->stat.hypercalls; |
5783 | return r; | 5846 | return r; |
@@ -5856,6 +5919,11 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
5856 | trace_kvm_inj_exception(vcpu->arch.exception.nr, | 5919 | trace_kvm_inj_exception(vcpu->arch.exception.nr, |
5857 | vcpu->arch.exception.has_error_code, | 5920 | vcpu->arch.exception.has_error_code, |
5858 | vcpu->arch.exception.error_code); | 5921 | vcpu->arch.exception.error_code); |
5922 | |||
5923 | if (exception_type(vcpu->arch.exception.nr) == EXCPT_FAULT) | ||
5924 | __kvm_set_rflags(vcpu, kvm_get_rflags(vcpu) | | ||
5925 | X86_EFLAGS_RF); | ||
5926 | |||
5859 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, | 5927 | kvm_x86_ops->queue_exception(vcpu, vcpu->arch.exception.nr, |
5860 | vcpu->arch.exception.has_error_code, | 5928 | vcpu->arch.exception.has_error_code, |
5861 | vcpu->arch.exception.error_code, | 5929 | vcpu->arch.exception.error_code, |
@@ -5887,6 +5955,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win) | |||
5887 | kvm_x86_ops->set_nmi(vcpu); | 5955 | kvm_x86_ops->set_nmi(vcpu); |
5888 | } | 5956 | } |
5889 | } else if (kvm_cpu_has_injectable_intr(vcpu)) { | 5957 | } else if (kvm_cpu_has_injectable_intr(vcpu)) { |
5958 | /* | ||
5959 | * Because interrupts can be injected asynchronously, we are | ||
5960 | * calling check_nested_events again here to avoid a race condition. | ||
5961 | * See https://lkml.org/lkml/2014/7/2/60 for discussion about this | ||
5962 | * proposal and current concerns. Perhaps we should be setting | ||
5963 | * KVM_REQ_EVENT only on certain events and not unconditionally? | ||
5964 | */ | ||
5965 | if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) { | ||
5966 | r = kvm_x86_ops->check_nested_events(vcpu, req_int_win); | ||
5967 | if (r != 0) | ||
5968 | return r; | ||
5969 | } | ||
5890 | if (kvm_x86_ops->interrupt_allowed(vcpu)) { | 5970 | if (kvm_x86_ops->interrupt_allowed(vcpu)) { |
5891 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), | 5971 | kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu), |
5892 | false); | 5972 | false); |
@@ -6835,9 +6915,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu) | |||
6835 | atomic_set(&vcpu->arch.nmi_queued, 0); | 6915 | atomic_set(&vcpu->arch.nmi_queued, 0); |
6836 | vcpu->arch.nmi_pending = 0; | 6916 | vcpu->arch.nmi_pending = 0; |
6837 | vcpu->arch.nmi_injected = false; | 6917 | vcpu->arch.nmi_injected = false; |
6918 | kvm_clear_interrupt_queue(vcpu); | ||
6919 | kvm_clear_exception_queue(vcpu); | ||
6838 | 6920 | ||
6839 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); | 6921 | memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); |
6840 | vcpu->arch.dr6 = DR6_FIXED_1; | 6922 | vcpu->arch.dr6 = DR6_INIT; |
6841 | kvm_update_dr6(vcpu); | 6923 | kvm_update_dr6(vcpu); |
6842 | vcpu->arch.dr7 = DR7_FIXED_1; | 6924 | vcpu->arch.dr7 = DR7_FIXED_1; |
6843 | kvm_update_dr7(vcpu); | 6925 | kvm_update_dr7(vcpu); |
@@ -7393,12 +7475,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu) | |||
7393 | } | 7475 | } |
7394 | EXPORT_SYMBOL_GPL(kvm_get_rflags); | 7476 | EXPORT_SYMBOL_GPL(kvm_get_rflags); |
7395 | 7477 | ||
7396 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | 7478 | static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) |
7397 | { | 7479 | { |
7398 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && | 7480 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && |
7399 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) | 7481 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) |
7400 | rflags |= X86_EFLAGS_TF; | 7482 | rflags |= X86_EFLAGS_TF; |
7401 | kvm_x86_ops->set_rflags(vcpu, rflags); | 7483 | kvm_x86_ops->set_rflags(vcpu, rflags); |
7484 | } | ||
7485 | |||
7486 | void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | ||
7487 | { | ||
7488 | __kvm_set_rflags(vcpu, rflags); | ||
7402 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 7489 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
7403 | } | 7490 | } |
7404 | EXPORT_SYMBOL_GPL(kvm_set_rflags); | 7491 | EXPORT_SYMBOL_GPL(kvm_set_rflags); |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 8c97bac9a895..306a1b77581f 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -47,6 +47,16 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) | |||
47 | #endif | 47 | #endif |
48 | } | 48 | } |
49 | 49 | ||
50 | static inline bool is_64_bit_mode(struct kvm_vcpu *vcpu) | ||
51 | { | ||
52 | int cs_db, cs_l; | ||
53 | |||
54 | if (!is_long_mode(vcpu)) | ||
55 | return false; | ||
56 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | ||
57 | return cs_l; | ||
58 | } | ||
59 | |||
50 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | 60 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
51 | { | 61 | { |
52 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | 62 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
@@ -108,6 +118,23 @@ static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) | |||
108 | return false; | 118 | return false; |
109 | } | 119 | } |
110 | 120 | ||
121 | static inline unsigned long kvm_register_readl(struct kvm_vcpu *vcpu, | ||
122 | enum kvm_reg reg) | ||
123 | { | ||
124 | unsigned long val = kvm_register_read(vcpu, reg); | ||
125 | |||
126 | return is_64_bit_mode(vcpu) ? val : (u32)val; | ||
127 | } | ||
128 | |||
129 | static inline void kvm_register_writel(struct kvm_vcpu *vcpu, | ||
130 | enum kvm_reg reg, | ||
131 | unsigned long val) | ||
132 | { | ||
133 | if (!is_64_bit_mode(vcpu)) | ||
134 | val = (u32)val; | ||
135 | return kvm_register_write(vcpu, reg, val); | ||
136 | } | ||
137 | |||
111 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); | 138 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
112 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); | 139 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
113 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); | 140 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 36642793e315..1dbade870f90 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -577,6 +577,8 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address) | |||
577 | 577 | ||
578 | static const char nx_warning[] = KERN_CRIT | 578 | static const char nx_warning[] = KERN_CRIT |
579 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; | 579 | "kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; |
580 | static const char smep_warning[] = KERN_CRIT | ||
581 | "unable to execute userspace code (SMEP?) (uid: %d)\n"; | ||
580 | 582 | ||
581 | static void | 583 | static void |
582 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, | 584 | show_fault_oops(struct pt_regs *regs, unsigned long error_code, |
@@ -597,6 +599,10 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |||
597 | 599 | ||
598 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | 600 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
599 | printk(nx_warning, from_kuid(&init_user_ns, current_uid())); | 601 | printk(nx_warning, from_kuid(&init_user_ns, current_uid())); |
602 | if (pte && pte_present(*pte) && pte_exec(*pte) && | ||
603 | (pgd_flags(*pgd) & _PAGE_USER) && | ||
604 | (read_cr4() & X86_CR4_SMEP)) | ||
605 | printk(smep_warning, from_kuid(&init_user_ns, current_uid())); | ||
600 | } | 606 | } |
601 | 607 | ||
602 | printk(KERN_ALERT "BUG: unable to handle kernel "); | 608 | printk(KERN_ALERT "BUG: unable to handle kernel "); |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index f97130618113..66dba36f2343 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -18,6 +18,13 @@ | |||
18 | #include <asm/dma.h> /* for MAX_DMA_PFN */ | 18 | #include <asm/dma.h> /* for MAX_DMA_PFN */ |
19 | #include <asm/microcode.h> | 19 | #include <asm/microcode.h> |
20 | 20 | ||
21 | /* | ||
22 | * We need to define the tracepoints somewhere, and tlb.c | ||
23 | * is only compied when SMP=y. | ||
24 | */ | ||
25 | #define CREATE_TRACE_POINTS | ||
26 | #include <trace/events/tlb.h> | ||
27 | |||
21 | #include "mm_internal.h" | 28 | #include "mm_internal.h" |
22 | 29 | ||
23 | static unsigned long __initdata pgt_buf_start; | 30 | static unsigned long __initdata pgt_buf_start; |
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index dd8dda167a24..1fe33987de02 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -49,6 +49,7 @@ void leave_mm(int cpu) | |||
49 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { | 49 | if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { |
50 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); | 50 | cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); |
51 | load_cr3(swapper_pg_dir); | 51 | load_cr3(swapper_pg_dir); |
52 | trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); | ||
52 | } | 53 | } |
53 | } | 54 | } |
54 | EXPORT_SYMBOL_GPL(leave_mm); | 55 | EXPORT_SYMBOL_GPL(leave_mm); |
@@ -102,20 +103,24 @@ static void flush_tlb_func(void *info) | |||
102 | 103 | ||
103 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) | 104 | if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm)) |
104 | return; | 105 | return; |
106 | if (!f->flush_end) | ||
107 | f->flush_end = f->flush_start + PAGE_SIZE; | ||
105 | 108 | ||
106 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); | 109 | count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED); |
107 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { | 110 | if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) { |
108 | if (f->flush_end == TLB_FLUSH_ALL) | 111 | if (f->flush_end == TLB_FLUSH_ALL) { |
109 | local_flush_tlb(); | 112 | local_flush_tlb(); |
110 | else if (!f->flush_end) | 113 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, TLB_FLUSH_ALL); |
111 | __flush_tlb_single(f->flush_start); | 114 | } else { |
112 | else { | ||
113 | unsigned long addr; | 115 | unsigned long addr; |
116 | unsigned long nr_pages = | ||
117 | f->flush_end - f->flush_start / PAGE_SIZE; | ||
114 | addr = f->flush_start; | 118 | addr = f->flush_start; |
115 | while (addr < f->flush_end) { | 119 | while (addr < f->flush_end) { |
116 | __flush_tlb_single(addr); | 120 | __flush_tlb_single(addr); |
117 | addr += PAGE_SIZE; | 121 | addr += PAGE_SIZE; |
118 | } | 122 | } |
123 | trace_tlb_flush(TLB_REMOTE_SHOOTDOWN, nr_pages); | ||
119 | } | 124 | } |
120 | } else | 125 | } else |
121 | leave_mm(smp_processor_id()); | 126 | leave_mm(smp_processor_id()); |
@@ -153,46 +158,45 @@ void flush_tlb_current_task(void) | |||
153 | 158 | ||
154 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | 159 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
155 | local_flush_tlb(); | 160 | local_flush_tlb(); |
161 | trace_tlb_flush(TLB_LOCAL_SHOOTDOWN, TLB_FLUSH_ALL); | ||
156 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | 162 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
157 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | 163 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); |
158 | preempt_enable(); | 164 | preempt_enable(); |
159 | } | 165 | } |
160 | 166 | ||
167 | /* | ||
168 | * See Documentation/x86/tlb.txt for details. We choose 33 | ||
169 | * because it is large enough to cover the vast majority (at | ||
170 | * least 95%) of allocations, and is small enough that we are | ||
171 | * confident it will not cause too much overhead. Each single | ||
172 | * flush is about 100 ns, so this caps the maximum overhead at | ||
173 | * _about_ 3,000 ns. | ||
174 | * | ||
175 | * This is in units of pages. | ||
176 | */ | ||
177 | unsigned long tlb_single_page_flush_ceiling = 33; | ||
178 | |||
161 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | 179 | void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, |
162 | unsigned long end, unsigned long vmflag) | 180 | unsigned long end, unsigned long vmflag) |
163 | { | 181 | { |
164 | unsigned long addr; | 182 | unsigned long addr; |
165 | unsigned act_entries, tlb_entries = 0; | 183 | /* do a global flush by default */ |
166 | unsigned long nr_base_pages; | 184 | unsigned long base_pages_to_flush = TLB_FLUSH_ALL; |
167 | 185 | ||
168 | preempt_disable(); | 186 | preempt_disable(); |
169 | if (current->active_mm != mm) | 187 | if (current->active_mm != mm) |
170 | goto flush_all; | 188 | goto out; |
171 | 189 | ||
172 | if (!current->mm) { | 190 | if (!current->mm) { |
173 | leave_mm(smp_processor_id()); | 191 | leave_mm(smp_processor_id()); |
174 | goto flush_all; | 192 | goto out; |
175 | } | 193 | } |
176 | 194 | ||
177 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 | 195 | if ((end != TLB_FLUSH_ALL) && !(vmflag & VM_HUGETLB)) |
178 | || vmflag & VM_HUGETLB) { | 196 | base_pages_to_flush = (end - start) >> PAGE_SHIFT; |
179 | local_flush_tlb(); | ||
180 | goto flush_all; | ||
181 | } | ||
182 | |||
183 | /* In modern CPU, last level tlb used for both data/ins */ | ||
184 | if (vmflag & VM_EXEC) | ||
185 | tlb_entries = tlb_lli_4k[ENTRIES]; | ||
186 | else | ||
187 | tlb_entries = tlb_lld_4k[ENTRIES]; | ||
188 | 197 | ||
189 | /* Assume all of TLB entries was occupied by this task */ | 198 | if (base_pages_to_flush > tlb_single_page_flush_ceiling) { |
190 | act_entries = tlb_entries >> tlb_flushall_shift; | 199 | base_pages_to_flush = TLB_FLUSH_ALL; |
191 | act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm; | ||
192 | nr_base_pages = (end - start) >> PAGE_SHIFT; | ||
193 | |||
194 | /* tlb_flushall_shift is on balance point, details in commit log */ | ||
195 | if (nr_base_pages > act_entries) { | ||
196 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); | 200 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL); |
197 | local_flush_tlb(); | 201 | local_flush_tlb(); |
198 | } else { | 202 | } else { |
@@ -201,17 +205,15 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, | |||
201 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); | 205 | count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE); |
202 | __flush_tlb_single(addr); | 206 | __flush_tlb_single(addr); |
203 | } | 207 | } |
204 | |||
205 | if (cpumask_any_but(mm_cpumask(mm), | ||
206 | smp_processor_id()) < nr_cpu_ids) | ||
207 | flush_tlb_others(mm_cpumask(mm), mm, start, end); | ||
208 | preempt_enable(); | ||
209 | return; | ||
210 | } | 208 | } |
211 | 209 | trace_tlb_flush(TLB_LOCAL_MM_SHOOTDOWN, base_pages_to_flush); | |
212 | flush_all: | 210 | out: |
211 | if (base_pages_to_flush == TLB_FLUSH_ALL) { | ||
212 | start = 0UL; | ||
213 | end = TLB_FLUSH_ALL; | ||
214 | } | ||
213 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) | 215 | if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids) |
214 | flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL); | 216 | flush_tlb_others(mm_cpumask(mm), mm, start, end); |
215 | preempt_enable(); | 217 | preempt_enable(); |
216 | } | 218 | } |
217 | 219 | ||
@@ -260,32 +262,26 @@ static void do_kernel_range_flush(void *info) | |||
260 | 262 | ||
261 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 263 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
262 | { | 264 | { |
263 | unsigned act_entries; | ||
264 | struct flush_tlb_info info; | ||
265 | |||
266 | /* In modern CPU, last level tlb used for both data/ins */ | ||
267 | act_entries = tlb_lld_4k[ENTRIES]; | ||
268 | 265 | ||
269 | /* Balance as user space task's flush, a bit conservative */ | 266 | /* Balance as user space task's flush, a bit conservative */ |
270 | if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1 || | 267 | if (end == TLB_FLUSH_ALL || |
271 | (end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) | 268 | (end - start) > tlb_single_page_flush_ceiling * PAGE_SIZE) { |
272 | |||
273 | on_each_cpu(do_flush_tlb_all, NULL, 1); | 269 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
274 | else { | 270 | } else { |
271 | struct flush_tlb_info info; | ||
275 | info.flush_start = start; | 272 | info.flush_start = start; |
276 | info.flush_end = end; | 273 | info.flush_end = end; |
277 | on_each_cpu(do_kernel_range_flush, &info, 1); | 274 | on_each_cpu(do_kernel_range_flush, &info, 1); |
278 | } | 275 | } |
279 | } | 276 | } |
280 | 277 | ||
281 | #ifdef CONFIG_DEBUG_TLBFLUSH | ||
282 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, | 278 | static ssize_t tlbflush_read_file(struct file *file, char __user *user_buf, |
283 | size_t count, loff_t *ppos) | 279 | size_t count, loff_t *ppos) |
284 | { | 280 | { |
285 | char buf[32]; | 281 | char buf[32]; |
286 | unsigned int len; | 282 | unsigned int len; |
287 | 283 | ||
288 | len = sprintf(buf, "%hd\n", tlb_flushall_shift); | 284 | len = sprintf(buf, "%ld\n", tlb_single_page_flush_ceiling); |
289 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); | 285 | return simple_read_from_buffer(user_buf, count, ppos, buf, len); |
290 | } | 286 | } |
291 | 287 | ||
@@ -294,20 +290,20 @@ static ssize_t tlbflush_write_file(struct file *file, | |||
294 | { | 290 | { |
295 | char buf[32]; | 291 | char buf[32]; |
296 | ssize_t len; | 292 | ssize_t len; |
297 | s8 shift; | 293 | int ceiling; |
298 | 294 | ||
299 | len = min(count, sizeof(buf) - 1); | 295 | len = min(count, sizeof(buf) - 1); |
300 | if (copy_from_user(buf, user_buf, len)) | 296 | if (copy_from_user(buf, user_buf, len)) |
301 | return -EFAULT; | 297 | return -EFAULT; |
302 | 298 | ||
303 | buf[len] = '\0'; | 299 | buf[len] = '\0'; |
304 | if (kstrtos8(buf, 0, &shift)) | 300 | if (kstrtoint(buf, 0, &ceiling)) |
305 | return -EINVAL; | 301 | return -EINVAL; |
306 | 302 | ||
307 | if (shift < -1 || shift >= BITS_PER_LONG) | 303 | if (ceiling < 0) |
308 | return -EINVAL; | 304 | return -EINVAL; |
309 | 305 | ||
310 | tlb_flushall_shift = shift; | 306 | tlb_single_page_flush_ceiling = ceiling; |
311 | return count; | 307 | return count; |
312 | } | 308 | } |
313 | 309 | ||
@@ -317,11 +313,10 @@ static const struct file_operations fops_tlbflush = { | |||
317 | .llseek = default_llseek, | 313 | .llseek = default_llseek, |
318 | }; | 314 | }; |
319 | 315 | ||
320 | static int __init create_tlb_flushall_shift(void) | 316 | static int __init create_tlb_single_page_flush_ceiling(void) |
321 | { | 317 | { |
322 | debugfs_create_file("tlb_flushall_shift", S_IRUSR | S_IWUSR, | 318 | debugfs_create_file("tlb_single_page_flush_ceiling", S_IRUSR | S_IWUSR, |
323 | arch_debugfs_dir, NULL, &fops_tlbflush); | 319 | arch_debugfs_dir, NULL, &fops_tlbflush); |
324 | return 0; | 320 | return 0; |
325 | } | 321 | } |
326 | late_initcall(create_tlb_flushall_shift); | 322 | late_initcall(create_tlb_single_page_flush_ceiling); |
327 | #endif | ||
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c index b5e60268d93f..c61ea57d1ba1 100644 --- a/arch/x86/pci/fixup.c +++ b/arch/x86/pci/fixup.c | |||
@@ -326,6 +326,27 @@ static void pci_fixup_video(struct pci_dev *pdev) | |||
326 | struct pci_bus *bus; | 326 | struct pci_bus *bus; |
327 | u16 config; | 327 | u16 config; |
328 | 328 | ||
329 | if (!vga_default_device()) { | ||
330 | resource_size_t start, end; | ||
331 | int i; | ||
332 | |||
333 | /* Does firmware framebuffer belong to us? */ | ||
334 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
335 | if (!(pci_resource_flags(pdev, i) & IORESOURCE_MEM)) | ||
336 | continue; | ||
337 | |||
338 | start = pci_resource_start(pdev, i); | ||
339 | end = pci_resource_end(pdev, i); | ||
340 | |||
341 | if (!start || !end) | ||
342 | continue; | ||
343 | |||
344 | if (screen_info.lfb_base >= start && | ||
345 | (screen_info.lfb_base + screen_info.lfb_size) < end) | ||
346 | vga_set_default_device(pdev); | ||
347 | } | ||
348 | } | ||
349 | |||
329 | /* Is VGA routed to us? */ | 350 | /* Is VGA routed to us? */ |
330 | bus = pdev->bus; | 351 | bus = pdev->bus; |
331 | while (bus) { | 352 | while (bus) { |
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c index a19ed92e74e4..2ae525e0d8ba 100644 --- a/arch/x86/pci/i386.c +++ b/arch/x86/pci/i386.c | |||
@@ -162,6 +162,10 @@ pcibios_align_resource(void *data, const struct resource *res, | |||
162 | return start; | 162 | return start; |
163 | if (start & 0x300) | 163 | if (start & 0x300) |
164 | start = (start + 0x3ff) & ~0x3ff; | 164 | start = (start + 0x3ff) & ~0x3ff; |
165 | } else if (res->flags & IORESOURCE_MEM) { | ||
166 | /* The low 1MB range is reserved for ISA cards */ | ||
167 | if (start < BIOS_END) | ||
168 | start = BIOS_END; | ||
165 | } | 169 | } |
166 | return start; | 170 | return start; |
167 | } | 171 | } |
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile index d51045afcaaf..2846aaab5103 100644 --- a/arch/x86/platform/efi/Makefile +++ b/arch/x86/platform/efi/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o | 1 | obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o |
2 | obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o | 2 | obj-$(CONFIG_ACPI_BGRT) += efi-bgrt.o |
3 | obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o | 3 | obj-$(CONFIG_EARLY_PRINTK_EFI) += early_printk.o |
4 | obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o | 4 | obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 87fc96bcc13c..850da94fef30 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -56,13 +56,6 @@ | |||
56 | 56 | ||
57 | #define EFI_DEBUG | 57 | #define EFI_DEBUG |
58 | 58 | ||
59 | #define EFI_MIN_RESERVE 5120 | ||
60 | |||
61 | #define EFI_DUMMY_GUID \ | ||
62 | EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) | ||
63 | |||
64 | static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; | ||
65 | |||
66 | struct efi_memory_map memmap; | 59 | struct efi_memory_map memmap; |
67 | 60 | ||
68 | static struct efi efi_phys __initdata; | 61 | static struct efi efi_phys __initdata; |
@@ -95,139 +88,6 @@ static int __init setup_add_efi_memmap(char *arg) | |||
95 | } | 88 | } |
96 | early_param("add_efi_memmap", setup_add_efi_memmap); | 89 | early_param("add_efi_memmap", setup_add_efi_memmap); |
97 | 90 | ||
98 | static bool efi_no_storage_paranoia; | ||
99 | |||
100 | static int __init setup_storage_paranoia(char *arg) | ||
101 | { | ||
102 | efi_no_storage_paranoia = true; | ||
103 | return 0; | ||
104 | } | ||
105 | early_param("efi_no_storage_paranoia", setup_storage_paranoia); | ||
106 | |||
107 | static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | efi_status_t status; | ||
111 | |||
112 | spin_lock_irqsave(&rtc_lock, flags); | ||
113 | status = efi_call_virt(get_time, tm, tc); | ||
114 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
115 | return status; | ||
116 | } | ||
117 | |||
118 | static efi_status_t virt_efi_set_time(efi_time_t *tm) | ||
119 | { | ||
120 | unsigned long flags; | ||
121 | efi_status_t status; | ||
122 | |||
123 | spin_lock_irqsave(&rtc_lock, flags); | ||
124 | status = efi_call_virt(set_time, tm); | ||
125 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
126 | return status; | ||
127 | } | ||
128 | |||
129 | static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled, | ||
130 | efi_bool_t *pending, | ||
131 | efi_time_t *tm) | ||
132 | { | ||
133 | unsigned long flags; | ||
134 | efi_status_t status; | ||
135 | |||
136 | spin_lock_irqsave(&rtc_lock, flags); | ||
137 | status = efi_call_virt(get_wakeup_time, enabled, pending, tm); | ||
138 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
139 | return status; | ||
140 | } | ||
141 | |||
142 | static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm) | ||
143 | { | ||
144 | unsigned long flags; | ||
145 | efi_status_t status; | ||
146 | |||
147 | spin_lock_irqsave(&rtc_lock, flags); | ||
148 | status = efi_call_virt(set_wakeup_time, enabled, tm); | ||
149 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
150 | return status; | ||
151 | } | ||
152 | |||
153 | static efi_status_t virt_efi_get_variable(efi_char16_t *name, | ||
154 | efi_guid_t *vendor, | ||
155 | u32 *attr, | ||
156 | unsigned long *data_size, | ||
157 | void *data) | ||
158 | { | ||
159 | return efi_call_virt(get_variable, | ||
160 | name, vendor, attr, | ||
161 | data_size, data); | ||
162 | } | ||
163 | |||
164 | static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, | ||
165 | efi_char16_t *name, | ||
166 | efi_guid_t *vendor) | ||
167 | { | ||
168 | return efi_call_virt(get_next_variable, | ||
169 | name_size, name, vendor); | ||
170 | } | ||
171 | |||
172 | static efi_status_t virt_efi_set_variable(efi_char16_t *name, | ||
173 | efi_guid_t *vendor, | ||
174 | u32 attr, | ||
175 | unsigned long data_size, | ||
176 | void *data) | ||
177 | { | ||
178 | return efi_call_virt(set_variable, | ||
179 | name, vendor, attr, | ||
180 | data_size, data); | ||
181 | } | ||
182 | |||
183 | static efi_status_t virt_efi_query_variable_info(u32 attr, | ||
184 | u64 *storage_space, | ||
185 | u64 *remaining_space, | ||
186 | u64 *max_variable_size) | ||
187 | { | ||
188 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
189 | return EFI_UNSUPPORTED; | ||
190 | |||
191 | return efi_call_virt(query_variable_info, attr, storage_space, | ||
192 | remaining_space, max_variable_size); | ||
193 | } | ||
194 | |||
195 | static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) | ||
196 | { | ||
197 | return efi_call_virt(get_next_high_mono_count, count); | ||
198 | } | ||
199 | |||
200 | static void virt_efi_reset_system(int reset_type, | ||
201 | efi_status_t status, | ||
202 | unsigned long data_size, | ||
203 | efi_char16_t *data) | ||
204 | { | ||
205 | __efi_call_virt(reset_system, reset_type, status, | ||
206 | data_size, data); | ||
207 | } | ||
208 | |||
209 | static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, | ||
210 | unsigned long count, | ||
211 | unsigned long sg_list) | ||
212 | { | ||
213 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
214 | return EFI_UNSUPPORTED; | ||
215 | |||
216 | return efi_call_virt(update_capsule, capsules, count, sg_list); | ||
217 | } | ||
218 | |||
219 | static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, | ||
220 | unsigned long count, | ||
221 | u64 *max_size, | ||
222 | int *reset_type) | ||
223 | { | ||
224 | if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) | ||
225 | return EFI_UNSUPPORTED; | ||
226 | |||
227 | return efi_call_virt(query_capsule_caps, capsules, count, max_size, | ||
228 | reset_type); | ||
229 | } | ||
230 | |||
231 | static efi_status_t __init phys_efi_set_virtual_address_map( | 91 | static efi_status_t __init phys_efi_set_virtual_address_map( |
232 | unsigned long memory_map_size, | 92 | unsigned long memory_map_size, |
233 | unsigned long descriptor_size, | 93 | unsigned long descriptor_size, |
@@ -244,42 +104,6 @@ static efi_status_t __init phys_efi_set_virtual_address_map( | |||
244 | return status; | 104 | return status; |
245 | } | 105 | } |
246 | 106 | ||
247 | int efi_set_rtc_mmss(const struct timespec *now) | ||
248 | { | ||
249 | unsigned long nowtime = now->tv_sec; | ||
250 | efi_status_t status; | ||
251 | efi_time_t eft; | ||
252 | efi_time_cap_t cap; | ||
253 | struct rtc_time tm; | ||
254 | |||
255 | status = efi.get_time(&eft, &cap); | ||
256 | if (status != EFI_SUCCESS) { | ||
257 | pr_err("Oops: efitime: can't read time!\n"); | ||
258 | return -1; | ||
259 | } | ||
260 | |||
261 | rtc_time_to_tm(nowtime, &tm); | ||
262 | if (!rtc_valid_tm(&tm)) { | ||
263 | eft.year = tm.tm_year + 1900; | ||
264 | eft.month = tm.tm_mon + 1; | ||
265 | eft.day = tm.tm_mday; | ||
266 | eft.minute = tm.tm_min; | ||
267 | eft.second = tm.tm_sec; | ||
268 | eft.nanosecond = 0; | ||
269 | } else { | ||
270 | pr_err("%s: Invalid EFI RTC value: write of %lx to EFI RTC failed\n", | ||
271 | __func__, nowtime); | ||
272 | return -1; | ||
273 | } | ||
274 | |||
275 | status = efi.set_time(&eft); | ||
276 | if (status != EFI_SUCCESS) { | ||
277 | pr_err("Oops: efitime: can't write time!\n"); | ||
278 | return -1; | ||
279 | } | ||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | void efi_get_time(struct timespec *now) | 107 | void efi_get_time(struct timespec *now) |
284 | { | 108 | { |
285 | efi_status_t status; | 109 | efi_status_t status; |
@@ -350,6 +174,9 @@ int __init efi_memblock_x86_reserve_range(void) | |||
350 | struct efi_info *e = &boot_params.efi_info; | 174 | struct efi_info *e = &boot_params.efi_info; |
351 | unsigned long pmap; | 175 | unsigned long pmap; |
352 | 176 | ||
177 | if (efi_enabled(EFI_PARAVIRT)) | ||
178 | return 0; | ||
179 | |||
353 | #ifdef CONFIG_X86_32 | 180 | #ifdef CONFIG_X86_32 |
354 | /* Can't handle data above 4GB at this time */ | 181 | /* Can't handle data above 4GB at this time */ |
355 | if (e->efi_memmap_hi) { | 182 | if (e->efi_memmap_hi) { |
@@ -392,69 +219,15 @@ static void __init print_efi_memmap(void) | |||
392 | #endif /* EFI_DEBUG */ | 219 | #endif /* EFI_DEBUG */ |
393 | } | 220 | } |
394 | 221 | ||
395 | void __init efi_reserve_boot_services(void) | ||
396 | { | ||
397 | void *p; | ||
398 | |||
399 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
400 | efi_memory_desc_t *md = p; | ||
401 | u64 start = md->phys_addr; | ||
402 | u64 size = md->num_pages << EFI_PAGE_SHIFT; | ||
403 | |||
404 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
405 | md->type != EFI_BOOT_SERVICES_DATA) | ||
406 | continue; | ||
407 | /* Only reserve where possible: | ||
408 | * - Not within any already allocated areas | ||
409 | * - Not over any memory area (really needed, if above?) | ||
410 | * - Not within any part of the kernel | ||
411 | * - Not the bios reserved area | ||
412 | */ | ||
413 | if ((start + size > __pa_symbol(_text) | ||
414 | && start <= __pa_symbol(_end)) || | ||
415 | !e820_all_mapped(start, start+size, E820_RAM) || | ||
416 | memblock_is_region_reserved(start, size)) { | ||
417 | /* Could not reserve, skip it */ | ||
418 | md->num_pages = 0; | ||
419 | memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n", | ||
420 | start, start+size-1); | ||
421 | } else | ||
422 | memblock_reserve(start, size); | ||
423 | } | ||
424 | } | ||
425 | |||
426 | void __init efi_unmap_memmap(void) | 222 | void __init efi_unmap_memmap(void) |
427 | { | 223 | { |
428 | clear_bit(EFI_MEMMAP, &efi.flags); | 224 | clear_bit(EFI_MEMMAP, &efi.flags); |
429 | if (memmap.map) { | 225 | if (memmap.map) { |
430 | early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size); | 226 | early_memunmap(memmap.map, memmap.nr_map * memmap.desc_size); |
431 | memmap.map = NULL; | 227 | memmap.map = NULL; |
432 | } | 228 | } |
433 | } | 229 | } |
434 | 230 | ||
435 | void __init efi_free_boot_services(void) | ||
436 | { | ||
437 | void *p; | ||
438 | |||
439 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
440 | efi_memory_desc_t *md = p; | ||
441 | unsigned long long start = md->phys_addr; | ||
442 | unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; | ||
443 | |||
444 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
445 | md->type != EFI_BOOT_SERVICES_DATA) | ||
446 | continue; | ||
447 | |||
448 | /* Could not reserve boot area */ | ||
449 | if (!size) | ||
450 | continue; | ||
451 | |||
452 | free_bootmem_late(start, size); | ||
453 | } | ||
454 | |||
455 | efi_unmap_memmap(); | ||
456 | } | ||
457 | |||
458 | static int __init efi_systab_init(void *phys) | 231 | static int __init efi_systab_init(void *phys) |
459 | { | 232 | { |
460 | if (efi_enabled(EFI_64BIT)) { | 233 | if (efi_enabled(EFI_64BIT)) { |
@@ -467,12 +240,12 @@ static int __init efi_systab_init(void *phys) | |||
467 | if (!data) | 240 | if (!data) |
468 | return -ENOMEM; | 241 | return -ENOMEM; |
469 | } | 242 | } |
470 | systab64 = early_ioremap((unsigned long)phys, | 243 | systab64 = early_memremap((unsigned long)phys, |
471 | sizeof(*systab64)); | 244 | sizeof(*systab64)); |
472 | if (systab64 == NULL) { | 245 | if (systab64 == NULL) { |
473 | pr_err("Couldn't map the system table!\n"); | 246 | pr_err("Couldn't map the system table!\n"); |
474 | if (data) | 247 | if (data) |
475 | early_iounmap(data, sizeof(*data)); | 248 | early_memunmap(data, sizeof(*data)); |
476 | return -ENOMEM; | 249 | return -ENOMEM; |
477 | } | 250 | } |
478 | 251 | ||
@@ -504,9 +277,9 @@ static int __init efi_systab_init(void *phys) | |||
504 | systab64->tables; | 277 | systab64->tables; |
505 | tmp |= data ? data->tables : systab64->tables; | 278 | tmp |= data ? data->tables : systab64->tables; |
506 | 279 | ||
507 | early_iounmap(systab64, sizeof(*systab64)); | 280 | early_memunmap(systab64, sizeof(*systab64)); |
508 | if (data) | 281 | if (data) |
509 | early_iounmap(data, sizeof(*data)); | 282 | early_memunmap(data, sizeof(*data)); |
510 | #ifdef CONFIG_X86_32 | 283 | #ifdef CONFIG_X86_32 |
511 | if (tmp >> 32) { | 284 | if (tmp >> 32) { |
512 | pr_err("EFI data located above 4GB, disabling EFI.\n"); | 285 | pr_err("EFI data located above 4GB, disabling EFI.\n"); |
@@ -516,7 +289,7 @@ static int __init efi_systab_init(void *phys) | |||
516 | } else { | 289 | } else { |
517 | efi_system_table_32_t *systab32; | 290 | efi_system_table_32_t *systab32; |
518 | 291 | ||
519 | systab32 = early_ioremap((unsigned long)phys, | 292 | systab32 = early_memremap((unsigned long)phys, |
520 | sizeof(*systab32)); | 293 | sizeof(*systab32)); |
521 | if (systab32 == NULL) { | 294 | if (systab32 == NULL) { |
522 | pr_err("Couldn't map the system table!\n"); | 295 | pr_err("Couldn't map the system table!\n"); |
@@ -537,7 +310,7 @@ static int __init efi_systab_init(void *phys) | |||
537 | efi_systab.nr_tables = systab32->nr_tables; | 310 | efi_systab.nr_tables = systab32->nr_tables; |
538 | efi_systab.tables = systab32->tables; | 311 | efi_systab.tables = systab32->tables; |
539 | 312 | ||
540 | early_iounmap(systab32, sizeof(*systab32)); | 313 | early_memunmap(systab32, sizeof(*systab32)); |
541 | } | 314 | } |
542 | 315 | ||
543 | efi.systab = &efi_systab; | 316 | efi.systab = &efi_systab; |
@@ -563,7 +336,7 @@ static int __init efi_runtime_init32(void) | |||
563 | { | 336 | { |
564 | efi_runtime_services_32_t *runtime; | 337 | efi_runtime_services_32_t *runtime; |
565 | 338 | ||
566 | runtime = early_ioremap((unsigned long)efi.systab->runtime, | 339 | runtime = early_memremap((unsigned long)efi.systab->runtime, |
567 | sizeof(efi_runtime_services_32_t)); | 340 | sizeof(efi_runtime_services_32_t)); |
568 | if (!runtime) { | 341 | if (!runtime) { |
569 | pr_err("Could not map the runtime service table!\n"); | 342 | pr_err("Could not map the runtime service table!\n"); |
@@ -578,7 +351,7 @@ static int __init efi_runtime_init32(void) | |||
578 | efi_phys.set_virtual_address_map = | 351 | efi_phys.set_virtual_address_map = |
579 | (efi_set_virtual_address_map_t *) | 352 | (efi_set_virtual_address_map_t *) |
580 | (unsigned long)runtime->set_virtual_address_map; | 353 | (unsigned long)runtime->set_virtual_address_map; |
581 | early_iounmap(runtime, sizeof(efi_runtime_services_32_t)); | 354 | early_memunmap(runtime, sizeof(efi_runtime_services_32_t)); |
582 | 355 | ||
583 | return 0; | 356 | return 0; |
584 | } | 357 | } |
@@ -587,7 +360,7 @@ static int __init efi_runtime_init64(void) | |||
587 | { | 360 | { |
588 | efi_runtime_services_64_t *runtime; | 361 | efi_runtime_services_64_t *runtime; |
589 | 362 | ||
590 | runtime = early_ioremap((unsigned long)efi.systab->runtime, | 363 | runtime = early_memremap((unsigned long)efi.systab->runtime, |
591 | sizeof(efi_runtime_services_64_t)); | 364 | sizeof(efi_runtime_services_64_t)); |
592 | if (!runtime) { | 365 | if (!runtime) { |
593 | pr_err("Could not map the runtime service table!\n"); | 366 | pr_err("Could not map the runtime service table!\n"); |
@@ -602,7 +375,7 @@ static int __init efi_runtime_init64(void) | |||
602 | efi_phys.set_virtual_address_map = | 375 | efi_phys.set_virtual_address_map = |
603 | (efi_set_virtual_address_map_t *) | 376 | (efi_set_virtual_address_map_t *) |
604 | (unsigned long)runtime->set_virtual_address_map; | 377 | (unsigned long)runtime->set_virtual_address_map; |
605 | early_iounmap(runtime, sizeof(efi_runtime_services_64_t)); | 378 | early_memunmap(runtime, sizeof(efi_runtime_services_64_t)); |
606 | 379 | ||
607 | return 0; | 380 | return 0; |
608 | } | 381 | } |
@@ -616,14 +389,24 @@ static int __init efi_runtime_init(void) | |||
616 | * the runtime services table so that we can grab the physical | 389 | * the runtime services table so that we can grab the physical |
617 | * address of several of the EFI runtime functions, needed to | 390 | * address of several of the EFI runtime functions, needed to |
618 | * set the firmware into virtual mode. | 391 | * set the firmware into virtual mode. |
392 | * | ||
393 | * When EFI_PARAVIRT is in force then we could not map runtime | ||
394 | * service memory region because we do not have direct access to it. | ||
395 | * However, runtime services are available through proxy functions | ||
396 | * (e.g. in case of Xen dom0 EFI implementation they call special | ||
397 | * hypercall which executes relevant EFI functions) and that is why | ||
398 | * they are always enabled. | ||
619 | */ | 399 | */ |
620 | if (efi_enabled(EFI_64BIT)) | ||
621 | rv = efi_runtime_init64(); | ||
622 | else | ||
623 | rv = efi_runtime_init32(); | ||
624 | 400 | ||
625 | if (rv) | 401 | if (!efi_enabled(EFI_PARAVIRT)) { |
626 | return rv; | 402 | if (efi_enabled(EFI_64BIT)) |
403 | rv = efi_runtime_init64(); | ||
404 | else | ||
405 | rv = efi_runtime_init32(); | ||
406 | |||
407 | if (rv) | ||
408 | return rv; | ||
409 | } | ||
627 | 410 | ||
628 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); | 411 | set_bit(EFI_RUNTIME_SERVICES, &efi.flags); |
629 | 412 | ||
@@ -632,8 +415,11 @@ static int __init efi_runtime_init(void) | |||
632 | 415 | ||
633 | static int __init efi_memmap_init(void) | 416 | static int __init efi_memmap_init(void) |
634 | { | 417 | { |
418 | if (efi_enabled(EFI_PARAVIRT)) | ||
419 | return 0; | ||
420 | |||
635 | /* Map the EFI memory map */ | 421 | /* Map the EFI memory map */ |
636 | memmap.map = early_ioremap((unsigned long)memmap.phys_map, | 422 | memmap.map = early_memremap((unsigned long)memmap.phys_map, |
637 | memmap.nr_map * memmap.desc_size); | 423 | memmap.nr_map * memmap.desc_size); |
638 | if (memmap.map == NULL) { | 424 | if (memmap.map == NULL) { |
639 | pr_err("Could not map the memory map!\n"); | 425 | pr_err("Could not map the memory map!\n"); |
@@ -649,62 +435,6 @@ static int __init efi_memmap_init(void) | |||
649 | return 0; | 435 | return 0; |
650 | } | 436 | } |
651 | 437 | ||
652 | /* | ||
653 | * A number of config table entries get remapped to virtual addresses | ||
654 | * after entering EFI virtual mode. However, the kexec kernel requires | ||
655 | * their physical addresses therefore we pass them via setup_data and | ||
656 | * correct those entries to their respective physical addresses here. | ||
657 | * | ||
658 | * Currently only handles smbios which is necessary for some firmware | ||
659 | * implementation. | ||
660 | */ | ||
661 | static int __init efi_reuse_config(u64 tables, int nr_tables) | ||
662 | { | ||
663 | int i, sz, ret = 0; | ||
664 | void *p, *tablep; | ||
665 | struct efi_setup_data *data; | ||
666 | |||
667 | if (!efi_setup) | ||
668 | return 0; | ||
669 | |||
670 | if (!efi_enabled(EFI_64BIT)) | ||
671 | return 0; | ||
672 | |||
673 | data = early_memremap(efi_setup, sizeof(*data)); | ||
674 | if (!data) { | ||
675 | ret = -ENOMEM; | ||
676 | goto out; | ||
677 | } | ||
678 | |||
679 | if (!data->smbios) | ||
680 | goto out_memremap; | ||
681 | |||
682 | sz = sizeof(efi_config_table_64_t); | ||
683 | |||
684 | p = tablep = early_memremap(tables, nr_tables * sz); | ||
685 | if (!p) { | ||
686 | pr_err("Could not map Configuration table!\n"); | ||
687 | ret = -ENOMEM; | ||
688 | goto out_memremap; | ||
689 | } | ||
690 | |||
691 | for (i = 0; i < efi.systab->nr_tables; i++) { | ||
692 | efi_guid_t guid; | ||
693 | |||
694 | guid = ((efi_config_table_64_t *)p)->guid; | ||
695 | |||
696 | if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) | ||
697 | ((efi_config_table_64_t *)p)->table = data->smbios; | ||
698 | p += sz; | ||
699 | } | ||
700 | early_iounmap(tablep, nr_tables * sz); | ||
701 | |||
702 | out_memremap: | ||
703 | early_iounmap(data, sizeof(*data)); | ||
704 | out: | ||
705 | return ret; | ||
706 | } | ||
707 | |||
708 | void __init efi_init(void) | 438 | void __init efi_init(void) |
709 | { | 439 | { |
710 | efi_char16_t *c16; | 440 | efi_char16_t *c16; |
@@ -728,8 +458,6 @@ void __init efi_init(void) | |||
728 | if (efi_systab_init(efi_phys.systab)) | 458 | if (efi_systab_init(efi_phys.systab)) |
729 | return; | 459 | return; |
730 | 460 | ||
731 | set_bit(EFI_SYSTEM_TABLES, &efi.flags); | ||
732 | |||
733 | efi.config_table = (unsigned long)efi.systab->tables; | 461 | efi.config_table = (unsigned long)efi.systab->tables; |
734 | efi.fw_vendor = (unsigned long)efi.systab->fw_vendor; | 462 | efi.fw_vendor = (unsigned long)efi.systab->fw_vendor; |
735 | efi.runtime = (unsigned long)efi.systab->runtime; | 463 | efi.runtime = (unsigned long)efi.systab->runtime; |
@@ -737,14 +465,14 @@ void __init efi_init(void) | |||
737 | /* | 465 | /* |
738 | * Show what we know for posterity | 466 | * Show what we know for posterity |
739 | */ | 467 | */ |
740 | c16 = tmp = early_ioremap(efi.systab->fw_vendor, 2); | 468 | c16 = tmp = early_memremap(efi.systab->fw_vendor, 2); |
741 | if (c16) { | 469 | if (c16) { |
742 | for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) | 470 | for (i = 0; i < sizeof(vendor) - 1 && *c16; ++i) |
743 | vendor[i] = *c16++; | 471 | vendor[i] = *c16++; |
744 | vendor[i] = '\0'; | 472 | vendor[i] = '\0'; |
745 | } else | 473 | } else |
746 | pr_err("Could not map the firmware vendor!\n"); | 474 | pr_err("Could not map the firmware vendor!\n"); |
747 | early_iounmap(tmp, 2); | 475 | early_memunmap(tmp, 2); |
748 | 476 | ||
749 | pr_info("EFI v%u.%.02u by %s\n", | 477 | pr_info("EFI v%u.%.02u by %s\n", |
750 | efi.systab->hdr.revision >> 16, | 478 | efi.systab->hdr.revision >> 16, |
@@ -770,8 +498,6 @@ void __init efi_init(void) | |||
770 | if (efi_memmap_init()) | 498 | if (efi_memmap_init()) |
771 | return; | 499 | return; |
772 | 500 | ||
773 | set_bit(EFI_MEMMAP, &efi.flags); | ||
774 | |||
775 | print_efi_memmap(); | 501 | print_efi_memmap(); |
776 | } | 502 | } |
777 | 503 | ||
@@ -847,22 +573,6 @@ void __init old_map_region(efi_memory_desc_t *md) | |||
847 | (unsigned long long)md->phys_addr); | 573 | (unsigned long long)md->phys_addr); |
848 | } | 574 | } |
849 | 575 | ||
850 | static void native_runtime_setup(void) | ||
851 | { | ||
852 | efi.get_time = virt_efi_get_time; | ||
853 | efi.set_time = virt_efi_set_time; | ||
854 | efi.get_wakeup_time = virt_efi_get_wakeup_time; | ||
855 | efi.set_wakeup_time = virt_efi_set_wakeup_time; | ||
856 | efi.get_variable = virt_efi_get_variable; | ||
857 | efi.get_next_variable = virt_efi_get_next_variable; | ||
858 | efi.set_variable = virt_efi_set_variable; | ||
859 | efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; | ||
860 | efi.reset_system = virt_efi_reset_system; | ||
861 | efi.query_variable_info = virt_efi_query_variable_info; | ||
862 | efi.update_capsule = virt_efi_update_capsule; | ||
863 | efi.query_capsule_caps = virt_efi_query_capsule_caps; | ||
864 | } | ||
865 | |||
866 | /* Merge contiguous regions of the same type and attribute */ | 576 | /* Merge contiguous regions of the same type and attribute */ |
867 | static void __init efi_merge_regions(void) | 577 | static void __init efi_merge_regions(void) |
868 | { | 578 | { |
@@ -1049,7 +759,7 @@ static void __init kexec_enter_virtual_mode(void) | |||
1049 | */ | 759 | */ |
1050 | efi.runtime_version = efi_systab.hdr.revision; | 760 | efi.runtime_version = efi_systab.hdr.revision; |
1051 | 761 | ||
1052 | native_runtime_setup(); | 762 | efi_native_runtime_setup(); |
1053 | 763 | ||
1054 | efi.set_virtual_address_map = NULL; | 764 | efi.set_virtual_address_map = NULL; |
1055 | 765 | ||
@@ -1057,11 +767,7 @@ static void __init kexec_enter_virtual_mode(void) | |||
1057 | runtime_code_page_mkexec(); | 767 | runtime_code_page_mkexec(); |
1058 | 768 | ||
1059 | /* clean DUMMY object */ | 769 | /* clean DUMMY object */ |
1060 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | 770 | efi_delete_dummy_variable(); |
1061 | EFI_VARIABLE_NON_VOLATILE | | ||
1062 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1063 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1064 | 0, NULL); | ||
1065 | #endif | 771 | #endif |
1066 | } | 772 | } |
1067 | 773 | ||
@@ -1142,7 +848,7 @@ static void __init __efi_enter_virtual_mode(void) | |||
1142 | efi.runtime_version = efi_systab.hdr.revision; | 848 | efi.runtime_version = efi_systab.hdr.revision; |
1143 | 849 | ||
1144 | if (efi_is_native()) | 850 | if (efi_is_native()) |
1145 | native_runtime_setup(); | 851 | efi_native_runtime_setup(); |
1146 | else | 852 | else |
1147 | efi_thunk_runtime_setup(); | 853 | efi_thunk_runtime_setup(); |
1148 | 854 | ||
@@ -1179,15 +885,14 @@ static void __init __efi_enter_virtual_mode(void) | |||
1179 | free_pages((unsigned long)new_memmap, pg_shift); | 885 | free_pages((unsigned long)new_memmap, pg_shift); |
1180 | 886 | ||
1181 | /* clean DUMMY object */ | 887 | /* clean DUMMY object */ |
1182 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | 888 | efi_delete_dummy_variable(); |
1183 | EFI_VARIABLE_NON_VOLATILE | | ||
1184 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1185 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1186 | 0, NULL); | ||
1187 | } | 889 | } |
1188 | 890 | ||
1189 | void __init efi_enter_virtual_mode(void) | 891 | void __init efi_enter_virtual_mode(void) |
1190 | { | 892 | { |
893 | if (efi_enabled(EFI_PARAVIRT)) | ||
894 | return; | ||
895 | |||
1191 | if (efi_setup) | 896 | if (efi_setup) |
1192 | kexec_enter_virtual_mode(); | 897 | kexec_enter_virtual_mode(); |
1193 | else | 898 | else |
@@ -1220,6 +925,9 @@ u64 efi_mem_attributes(unsigned long phys_addr) | |||
1220 | efi_memory_desc_t *md; | 925 | efi_memory_desc_t *md; |
1221 | void *p; | 926 | void *p; |
1222 | 927 | ||
928 | if (!efi_enabled(EFI_MEMMAP)) | ||
929 | return 0; | ||
930 | |||
1223 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | 931 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { |
1224 | md = p; | 932 | md = p; |
1225 | if ((md->phys_addr <= phys_addr) && | 933 | if ((md->phys_addr <= phys_addr) && |
@@ -1230,86 +938,6 @@ u64 efi_mem_attributes(unsigned long phys_addr) | |||
1230 | return 0; | 938 | return 0; |
1231 | } | 939 | } |
1232 | 940 | ||
1233 | /* | ||
1234 | * Some firmware implementations refuse to boot if there's insufficient space | ||
1235 | * in the variable store. Ensure that we never use more than a safe limit. | ||
1236 | * | ||
1237 | * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable | ||
1238 | * store. | ||
1239 | */ | ||
1240 | efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | ||
1241 | { | ||
1242 | efi_status_t status; | ||
1243 | u64 storage_size, remaining_size, max_size; | ||
1244 | |||
1245 | if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) | ||
1246 | return 0; | ||
1247 | |||
1248 | status = efi.query_variable_info(attributes, &storage_size, | ||
1249 | &remaining_size, &max_size); | ||
1250 | if (status != EFI_SUCCESS) | ||
1251 | return status; | ||
1252 | |||
1253 | /* | ||
1254 | * We account for that by refusing the write if permitting it would | ||
1255 | * reduce the available space to under 5KB. This figure was provided by | ||
1256 | * Samsung, so should be safe. | ||
1257 | */ | ||
1258 | if ((remaining_size - size < EFI_MIN_RESERVE) && | ||
1259 | !efi_no_storage_paranoia) { | ||
1260 | |||
1261 | /* | ||
1262 | * Triggering garbage collection may require that the firmware | ||
1263 | * generate a real EFI_OUT_OF_RESOURCES error. We can force | ||
1264 | * that by attempting to use more space than is available. | ||
1265 | */ | ||
1266 | unsigned long dummy_size = remaining_size + 1024; | ||
1267 | void *dummy = kzalloc(dummy_size, GFP_ATOMIC); | ||
1268 | |||
1269 | if (!dummy) | ||
1270 | return EFI_OUT_OF_RESOURCES; | ||
1271 | |||
1272 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1273 | EFI_VARIABLE_NON_VOLATILE | | ||
1274 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1275 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1276 | dummy_size, dummy); | ||
1277 | |||
1278 | if (status == EFI_SUCCESS) { | ||
1279 | /* | ||
1280 | * This should have failed, so if it didn't make sure | ||
1281 | * that we delete it... | ||
1282 | */ | ||
1283 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
1284 | EFI_VARIABLE_NON_VOLATILE | | ||
1285 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
1286 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
1287 | 0, dummy); | ||
1288 | } | ||
1289 | |||
1290 | kfree(dummy); | ||
1291 | |||
1292 | /* | ||
1293 | * The runtime code may now have triggered a garbage collection | ||
1294 | * run, so check the variable info again | ||
1295 | */ | ||
1296 | status = efi.query_variable_info(attributes, &storage_size, | ||
1297 | &remaining_size, &max_size); | ||
1298 | |||
1299 | if (status != EFI_SUCCESS) | ||
1300 | return status; | ||
1301 | |||
1302 | /* | ||
1303 | * There still isn't enough room, so return an error | ||
1304 | */ | ||
1305 | if (remaining_size - size < EFI_MIN_RESERVE) | ||
1306 | return EFI_OUT_OF_RESOURCES; | ||
1307 | } | ||
1308 | |||
1309 | return EFI_SUCCESS; | ||
1310 | } | ||
1311 | EXPORT_SYMBOL_GPL(efi_query_variable_store); | ||
1312 | |||
1313 | static int __init parse_efi_cmdline(char *str) | 941 | static int __init parse_efi_cmdline(char *str) |
1314 | { | 942 | { |
1315 | if (*str == '=') | 943 | if (*str == '=') |
@@ -1321,22 +949,3 @@ static int __init parse_efi_cmdline(char *str) | |||
1321 | return 0; | 949 | return 0; |
1322 | } | 950 | } |
1323 | early_param("efi", parse_efi_cmdline); | 951 | early_param("efi", parse_efi_cmdline); |
1324 | |||
1325 | void __init efi_apply_memmap_quirks(void) | ||
1326 | { | ||
1327 | /* | ||
1328 | * Once setup is done earlier, unmap the EFI memory map on mismatched | ||
1329 | * firmware/kernel architectures since there is no support for runtime | ||
1330 | * services. | ||
1331 | */ | ||
1332 | if (!efi_runtime_supported()) { | ||
1333 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); | ||
1334 | efi_unmap_memmap(); | ||
1335 | } | ||
1336 | |||
1337 | /* | ||
1338 | * UV doesn't support the new EFI pagetable mapping yet. | ||
1339 | */ | ||
1340 | if (is_uv_system()) | ||
1341 | set_bit(EFI_OLD_MEMMAP, &efi.flags); | ||
1342 | } | ||
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c new file mode 100644 index 000000000000..1c7380da65ff --- /dev/null +++ b/arch/x86/platform/efi/quirks.c | |||
@@ -0,0 +1,290 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/string.h> | ||
4 | #include <linux/time.h> | ||
5 | #include <linux/types.h> | ||
6 | #include <linux/efi.h> | ||
7 | #include <linux/slab.h> | ||
8 | #include <linux/memblock.h> | ||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/acpi.h> | ||
11 | #include <asm/efi.h> | ||
12 | #include <asm/uv/uv.h> | ||
13 | |||
14 | #define EFI_MIN_RESERVE 5120 | ||
15 | |||
16 | #define EFI_DUMMY_GUID \ | ||
17 | EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9) | ||
18 | |||
19 | static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; | ||
20 | |||
21 | static bool efi_no_storage_paranoia; | ||
22 | |||
23 | /* | ||
24 | * Some firmware implementations refuse to boot if there's insufficient | ||
25 | * space in the variable store. The implementation of garbage collection | ||
26 | * in some FW versions causes stale (deleted) variables to take up space | ||
27 | * longer than intended and space is only freed once the store becomes | ||
28 | * almost completely full. | ||
29 | * | ||
30 | * Enabling this option disables the space checks in | ||
31 | * efi_query_variable_store() and forces garbage collection. | ||
32 | * | ||
33 | * Only enable this option if deleting EFI variables does not free up | ||
34 | * space in your variable store, e.g. if despite deleting variables | ||
35 | * you're unable to create new ones. | ||
36 | */ | ||
37 | static int __init setup_storage_paranoia(char *arg) | ||
38 | { | ||
39 | efi_no_storage_paranoia = true; | ||
40 | return 0; | ||
41 | } | ||
42 | early_param("efi_no_storage_paranoia", setup_storage_paranoia); | ||
43 | |||
44 | /* | ||
45 | * Deleting the dummy variable which kicks off garbage collection | ||
46 | */ | ||
47 | void efi_delete_dummy_variable(void) | ||
48 | { | ||
49 | efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
50 | EFI_VARIABLE_NON_VOLATILE | | ||
51 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
52 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
53 | 0, NULL); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Some firmware implementations refuse to boot if there's insufficient space | ||
58 | * in the variable store. Ensure that we never use more than a safe limit. | ||
59 | * | ||
60 | * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable | ||
61 | * store. | ||
62 | */ | ||
63 | efi_status_t efi_query_variable_store(u32 attributes, unsigned long size) | ||
64 | { | ||
65 | efi_status_t status; | ||
66 | u64 storage_size, remaining_size, max_size; | ||
67 | |||
68 | if (!(attributes & EFI_VARIABLE_NON_VOLATILE)) | ||
69 | return 0; | ||
70 | |||
71 | status = efi.query_variable_info(attributes, &storage_size, | ||
72 | &remaining_size, &max_size); | ||
73 | if (status != EFI_SUCCESS) | ||
74 | return status; | ||
75 | |||
76 | /* | ||
77 | * We account for that by refusing the write if permitting it would | ||
78 | * reduce the available space to under 5KB. This figure was provided by | ||
79 | * Samsung, so should be safe. | ||
80 | */ | ||
81 | if ((remaining_size - size < EFI_MIN_RESERVE) && | ||
82 | !efi_no_storage_paranoia) { | ||
83 | |||
84 | /* | ||
85 | * Triggering garbage collection may require that the firmware | ||
86 | * generate a real EFI_OUT_OF_RESOURCES error. We can force | ||
87 | * that by attempting to use more space than is available. | ||
88 | */ | ||
89 | unsigned long dummy_size = remaining_size + 1024; | ||
90 | void *dummy = kzalloc(dummy_size, GFP_ATOMIC); | ||
91 | |||
92 | if (!dummy) | ||
93 | return EFI_OUT_OF_RESOURCES; | ||
94 | |||
95 | status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, | ||
96 | EFI_VARIABLE_NON_VOLATILE | | ||
97 | EFI_VARIABLE_BOOTSERVICE_ACCESS | | ||
98 | EFI_VARIABLE_RUNTIME_ACCESS, | ||
99 | dummy_size, dummy); | ||
100 | |||
101 | if (status == EFI_SUCCESS) { | ||
102 | /* | ||
103 | * This should have failed, so if it didn't make sure | ||
104 | * that we delete it... | ||
105 | */ | ||
106 | efi_delete_dummy_variable(); | ||
107 | } | ||
108 | |||
109 | kfree(dummy); | ||
110 | |||
111 | /* | ||
112 | * The runtime code may now have triggered a garbage collection | ||
113 | * run, so check the variable info again | ||
114 | */ | ||
115 | status = efi.query_variable_info(attributes, &storage_size, | ||
116 | &remaining_size, &max_size); | ||
117 | |||
118 | if (status != EFI_SUCCESS) | ||
119 | return status; | ||
120 | |||
121 | /* | ||
122 | * There still isn't enough room, so return an error | ||
123 | */ | ||
124 | if (remaining_size - size < EFI_MIN_RESERVE) | ||
125 | return EFI_OUT_OF_RESOURCES; | ||
126 | } | ||
127 | |||
128 | return EFI_SUCCESS; | ||
129 | } | ||
130 | EXPORT_SYMBOL_GPL(efi_query_variable_store); | ||
131 | |||
132 | /* | ||
133 | * The UEFI specification makes it clear that the operating system is free to do | ||
134 | * whatever it wants with boot services code after ExitBootServices() has been | ||
135 | * called. Ignoring this recommendation a significant bunch of EFI implementations | ||
136 | * continue calling into boot services code (SetVirtualAddressMap). In order to | ||
137 | * work around such buggy implementations we reserve boot services region during | ||
138 | * EFI init and make sure it stays executable. Then, after SetVirtualAddressMap(), it | ||
139 | * is discarded. | ||
140 | */ | ||
141 | void __init efi_reserve_boot_services(void) | ||
142 | { | ||
143 | void *p; | ||
144 | |||
145 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
146 | efi_memory_desc_t *md = p; | ||
147 | u64 start = md->phys_addr; | ||
148 | u64 size = md->num_pages << EFI_PAGE_SHIFT; | ||
149 | |||
150 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
151 | md->type != EFI_BOOT_SERVICES_DATA) | ||
152 | continue; | ||
153 | /* Only reserve where possible: | ||
154 | * - Not within any already allocated areas | ||
155 | * - Not over any memory area (really needed, if above?) | ||
156 | * - Not within any part of the kernel | ||
157 | * - Not the bios reserved area | ||
158 | */ | ||
159 | if ((start + size > __pa_symbol(_text) | ||
160 | && start <= __pa_symbol(_end)) || | ||
161 | !e820_all_mapped(start, start+size, E820_RAM) || | ||
162 | memblock_is_region_reserved(start, size)) { | ||
163 | /* Could not reserve, skip it */ | ||
164 | md->num_pages = 0; | ||
165 | memblock_dbg("Could not reserve boot range [0x%010llx-0x%010llx]\n", | ||
166 | start, start+size-1); | ||
167 | } else | ||
168 | memblock_reserve(start, size); | ||
169 | } | ||
170 | } | ||
171 | |||
172 | void __init efi_free_boot_services(void) | ||
173 | { | ||
174 | void *p; | ||
175 | |||
176 | for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { | ||
177 | efi_memory_desc_t *md = p; | ||
178 | unsigned long long start = md->phys_addr; | ||
179 | unsigned long long size = md->num_pages << EFI_PAGE_SHIFT; | ||
180 | |||
181 | if (md->type != EFI_BOOT_SERVICES_CODE && | ||
182 | md->type != EFI_BOOT_SERVICES_DATA) | ||
183 | continue; | ||
184 | |||
185 | /* Could not reserve boot area */ | ||
186 | if (!size) | ||
187 | continue; | ||
188 | |||
189 | free_bootmem_late(start, size); | ||
190 | } | ||
191 | |||
192 | efi_unmap_memmap(); | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * A number of config table entries get remapped to virtual addresses | ||
197 | * after entering EFI virtual mode. However, the kexec kernel requires | ||
198 | * their physical addresses therefore we pass them via setup_data and | ||
199 | * correct those entries to their respective physical addresses here. | ||
200 | * | ||
201 | * Currently only handles smbios which is necessary for some firmware | ||
202 | * implementation. | ||
203 | */ | ||
204 | int __init efi_reuse_config(u64 tables, int nr_tables) | ||
205 | { | ||
206 | int i, sz, ret = 0; | ||
207 | void *p, *tablep; | ||
208 | struct efi_setup_data *data; | ||
209 | |||
210 | if (!efi_setup) | ||
211 | return 0; | ||
212 | |||
213 | if (!efi_enabled(EFI_64BIT)) | ||
214 | return 0; | ||
215 | |||
216 | data = early_memremap(efi_setup, sizeof(*data)); | ||
217 | if (!data) { | ||
218 | ret = -ENOMEM; | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | if (!data->smbios) | ||
223 | goto out_memremap; | ||
224 | |||
225 | sz = sizeof(efi_config_table_64_t); | ||
226 | |||
227 | p = tablep = early_memremap(tables, nr_tables * sz); | ||
228 | if (!p) { | ||
229 | pr_err("Could not map Configuration table!\n"); | ||
230 | ret = -ENOMEM; | ||
231 | goto out_memremap; | ||
232 | } | ||
233 | |||
234 | for (i = 0; i < efi.systab->nr_tables; i++) { | ||
235 | efi_guid_t guid; | ||
236 | |||
237 | guid = ((efi_config_table_64_t *)p)->guid; | ||
238 | |||
239 | if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID)) | ||
240 | ((efi_config_table_64_t *)p)->table = data->smbios; | ||
241 | p += sz; | ||
242 | } | ||
243 | early_memunmap(tablep, nr_tables * sz); | ||
244 | |||
245 | out_memremap: | ||
246 | early_memunmap(data, sizeof(*data)); | ||
247 | out: | ||
248 | return ret; | ||
249 | } | ||
250 | |||
251 | void __init efi_apply_memmap_quirks(void) | ||
252 | { | ||
253 | /* | ||
254 | * Once setup is done earlier, unmap the EFI memory map on mismatched | ||
255 | * firmware/kernel architectures since there is no support for runtime | ||
256 | * services. | ||
257 | */ | ||
258 | if (!efi_runtime_supported()) { | ||
259 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); | ||
260 | efi_unmap_memmap(); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * UV doesn't support the new EFI pagetable mapping yet. | ||
265 | */ | ||
266 | if (is_uv_system()) | ||
267 | set_bit(EFI_OLD_MEMMAP, &efi.flags); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * For most modern platforms the preferred method of powering off is via | ||
272 | * ACPI. However, there are some that are known to require the use of | ||
273 | * EFI runtime services and for which ACPI does not work at all. | ||
274 | * | ||
275 | * Using EFI is a last resort, to be used only if no other option | ||
276 | * exists. | ||
277 | */ | ||
278 | bool efi_reboot_required(void) | ||
279 | { | ||
280 | if (!acpi_gbl_reduced_hardware) | ||
281 | return false; | ||
282 | |||
283 | efi_reboot_quirk_mode = EFI_RESET_WARM; | ||
284 | return true; | ||
285 | } | ||
286 | |||
287 | bool efi_poweroff_required(void) | ||
288 | { | ||
289 | return !!acpi_gbl_reduced_hardware; | ||
290 | } | ||
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c index 9471b9456f25..baf16e72e668 100644 --- a/arch/x86/platform/ts5500/ts5500.c +++ b/arch/x86/platform/ts5500/ts5500.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Technologic Systems TS-5500 Single Board Computer support | 2 | * Technologic Systems TS-5500 Single Board Computer support |
3 | * | 3 | * |
4 | * Copyright (C) 2013 Savoir-faire Linux Inc. | 4 | * Copyright (C) 2013-2014 Savoir-faire Linux Inc. |
5 | * Vivien Didelot <vivien.didelot@savoirfairelinux.com> | 5 | * Vivien Didelot <vivien.didelot@savoirfairelinux.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it under | 7 | * This program is free software; you can redistribute it and/or modify it under |
@@ -15,8 +15,8 @@ | |||
15 | * state or available options. For further information about sysfs entries, see | 15 | * state or available options. For further information about sysfs entries, see |
16 | * Documentation/ABI/testing/sysfs-platform-ts5500. | 16 | * Documentation/ABI/testing/sysfs-platform-ts5500. |
17 | * | 17 | * |
18 | * This code actually supports the TS-5500 platform, but it may be extended to | 18 | * This code may be extended to support similar x86-based platforms. |
19 | * support similar Technologic Systems x86-based platforms, such as the TS-5600. | 19 | * Actually, the TS-5500 and TS-5400 are supported. |
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/delay.h> | 22 | #include <linux/delay.h> |
@@ -32,6 +32,7 @@ | |||
32 | /* Product code register */ | 32 | /* Product code register */ |
33 | #define TS5500_PRODUCT_CODE_ADDR 0x74 | 33 | #define TS5500_PRODUCT_CODE_ADDR 0x74 |
34 | #define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */ | 34 | #define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */ |
35 | #define TS5400_PRODUCT_CODE 0x40 /* TS-5400 product code */ | ||
35 | 36 | ||
36 | /* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */ | 37 | /* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */ |
37 | #define TS5500_SRAM_RS485_ADC_ADDR 0x75 | 38 | #define TS5500_SRAM_RS485_ADC_ADDR 0x75 |
@@ -66,6 +67,7 @@ | |||
66 | 67 | ||
67 | /** | 68 | /** |
68 | * struct ts5500_sbc - TS-5500 board description | 69 | * struct ts5500_sbc - TS-5500 board description |
70 | * @name: Board model name. | ||
69 | * @id: Board product ID. | 71 | * @id: Board product ID. |
70 | * @sram: Flag for SRAM option. | 72 | * @sram: Flag for SRAM option. |
71 | * @rs485: Flag for RS-485 option. | 73 | * @rs485: Flag for RS-485 option. |
@@ -75,6 +77,7 @@ | |||
75 | * @jumpers: Bitfield for jumpers' state. | 77 | * @jumpers: Bitfield for jumpers' state. |
76 | */ | 78 | */ |
77 | struct ts5500_sbc { | 79 | struct ts5500_sbc { |
80 | const char *name; | ||
78 | int id; | 81 | int id; |
79 | bool sram; | 82 | bool sram; |
80 | bool rs485; | 83 | bool rs485; |
@@ -122,13 +125,16 @@ static int __init ts5500_detect_config(struct ts5500_sbc *sbc) | |||
122 | if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500")) | 125 | if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500")) |
123 | return -EBUSY; | 126 | return -EBUSY; |
124 | 127 | ||
125 | tmp = inb(TS5500_PRODUCT_CODE_ADDR); | 128 | sbc->id = inb(TS5500_PRODUCT_CODE_ADDR); |
126 | if (tmp != TS5500_PRODUCT_CODE) { | 129 | if (sbc->id == TS5500_PRODUCT_CODE) { |
127 | pr_err("This platform is not a TS-5500 (found ID 0x%x)\n", tmp); | 130 | sbc->name = "TS-5500"; |
131 | } else if (sbc->id == TS5400_PRODUCT_CODE) { | ||
132 | sbc->name = "TS-5400"; | ||
133 | } else { | ||
134 | pr_err("ts5500: unknown product code 0x%x\n", sbc->id); | ||
128 | ret = -ENODEV; | 135 | ret = -ENODEV; |
129 | goto cleanup; | 136 | goto cleanup; |
130 | } | 137 | } |
131 | sbc->id = tmp; | ||
132 | 138 | ||
133 | tmp = inb(TS5500_SRAM_RS485_ADC_ADDR); | 139 | tmp = inb(TS5500_SRAM_RS485_ADC_ADDR); |
134 | sbc->sram = tmp & TS5500_SRAM; | 140 | sbc->sram = tmp & TS5500_SRAM; |
@@ -147,48 +153,52 @@ cleanup: | |||
147 | return ret; | 153 | return ret; |
148 | } | 154 | } |
149 | 155 | ||
150 | static ssize_t ts5500_show_id(struct device *dev, | 156 | static ssize_t name_show(struct device *dev, struct device_attribute *attr, |
151 | struct device_attribute *attr, char *buf) | 157 | char *buf) |
152 | { | 158 | { |
153 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); | 159 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); |
154 | 160 | ||
155 | return sprintf(buf, "0x%.2x\n", sbc->id); | 161 | return sprintf(buf, "%s\n", sbc->name); |
156 | } | 162 | } |
163 | static DEVICE_ATTR_RO(name); | ||
157 | 164 | ||
158 | static ssize_t ts5500_show_jumpers(struct device *dev, | 165 | static ssize_t id_show(struct device *dev, struct device_attribute *attr, |
159 | struct device_attribute *attr, | 166 | char *buf) |
160 | char *buf) | ||
161 | { | 167 | { |
162 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); | 168 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); |
163 | 169 | ||
164 | return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1); | 170 | return sprintf(buf, "0x%.2x\n", sbc->id); |
165 | } | 171 | } |
172 | static DEVICE_ATTR_RO(id); | ||
166 | 173 | ||
167 | #define TS5500_SHOW(field) \ | 174 | static ssize_t jumpers_show(struct device *dev, struct device_attribute *attr, |
168 | static ssize_t ts5500_show_##field(struct device *dev, \ | 175 | char *buf) |
169 | struct device_attribute *attr, \ | 176 | { |
170 | char *buf) \ | 177 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); |
171 | { \ | ||
172 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); \ | ||
173 | return sprintf(buf, "%d\n", sbc->field); \ | ||
174 | } | ||
175 | |||
176 | TS5500_SHOW(sram) | ||
177 | TS5500_SHOW(rs485) | ||
178 | TS5500_SHOW(adc) | ||
179 | TS5500_SHOW(ereset) | ||
180 | TS5500_SHOW(itr) | ||
181 | 178 | ||
182 | static DEVICE_ATTR(id, S_IRUGO, ts5500_show_id, NULL); | 179 | return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1); |
183 | static DEVICE_ATTR(jumpers, S_IRUGO, ts5500_show_jumpers, NULL); | 180 | } |
184 | static DEVICE_ATTR(sram, S_IRUGO, ts5500_show_sram, NULL); | 181 | static DEVICE_ATTR_RO(jumpers); |
185 | static DEVICE_ATTR(rs485, S_IRUGO, ts5500_show_rs485, NULL); | 182 | |
186 | static DEVICE_ATTR(adc, S_IRUGO, ts5500_show_adc, NULL); | 183 | #define TS5500_ATTR_BOOL(_field) \ |
187 | static DEVICE_ATTR(ereset, S_IRUGO, ts5500_show_ereset, NULL); | 184 | static ssize_t _field##_show(struct device *dev, \ |
188 | static DEVICE_ATTR(itr, S_IRUGO, ts5500_show_itr, NULL); | 185 | struct device_attribute *attr, char *buf) \ |
186 | { \ | ||
187 | struct ts5500_sbc *sbc = dev_get_drvdata(dev); \ | ||
188 | \ | ||
189 | return sprintf(buf, "%d\n", sbc->_field); \ | ||
190 | } \ | ||
191 | static DEVICE_ATTR_RO(_field) | ||
192 | |||
193 | TS5500_ATTR_BOOL(sram); | ||
194 | TS5500_ATTR_BOOL(rs485); | ||
195 | TS5500_ATTR_BOOL(adc); | ||
196 | TS5500_ATTR_BOOL(ereset); | ||
197 | TS5500_ATTR_BOOL(itr); | ||
189 | 198 | ||
190 | static struct attribute *ts5500_attributes[] = { | 199 | static struct attribute *ts5500_attributes[] = { |
191 | &dev_attr_id.attr, | 200 | &dev_attr_id.attr, |
201 | &dev_attr_name.attr, | ||
192 | &dev_attr_jumpers.attr, | 202 | &dev_attr_jumpers.attr, |
193 | &dev_attr_sram.attr, | 203 | &dev_attr_sram.attr, |
194 | &dev_attr_rs485.attr, | 204 | &dev_attr_rs485.attr, |
@@ -311,12 +321,14 @@ static int __init ts5500_init(void) | |||
311 | if (err) | 321 | if (err) |
312 | goto error; | 322 | goto error; |
313 | 323 | ||
314 | ts5500_dio1_pdev.dev.parent = &pdev->dev; | 324 | if (sbc->id == TS5500_PRODUCT_CODE) { |
315 | if (platform_device_register(&ts5500_dio1_pdev)) | 325 | ts5500_dio1_pdev.dev.parent = &pdev->dev; |
316 | dev_warn(&pdev->dev, "DIO1 block registration failed\n"); | 326 | if (platform_device_register(&ts5500_dio1_pdev)) |
317 | ts5500_dio2_pdev.dev.parent = &pdev->dev; | 327 | dev_warn(&pdev->dev, "DIO1 block registration failed\n"); |
318 | if (platform_device_register(&ts5500_dio2_pdev)) | 328 | ts5500_dio2_pdev.dev.parent = &pdev->dev; |
319 | dev_warn(&pdev->dev, "DIO2 block registration failed\n"); | 329 | if (platform_device_register(&ts5500_dio2_pdev)) |
330 | dev_warn(&pdev->dev, "DIO2 block registration failed\n"); | ||
331 | } | ||
320 | 332 | ||
321 | if (led_classdev_register(&pdev->dev, &ts5500_led_cdev)) | 333 | if (led_classdev_register(&pdev->dev, &ts5500_led_cdev)) |
322 | dev_warn(&pdev->dev, "LED registration failed\n"); | 334 | dev_warn(&pdev->dev, "LED registration failed\n"); |
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c index 424f4c97a44d..6ec7910f59bf 100644 --- a/arch/x86/power/cpu.c +++ b/arch/x86/power/cpu.c | |||
@@ -165,7 +165,7 @@ static void fix_processor_context(void) | |||
165 | * by __save_processor_state() | 165 | * by __save_processor_state() |
166 | * @ctxt - structure to load the registers contents from | 166 | * @ctxt - structure to load the registers contents from |
167 | */ | 167 | */ |
168 | static void __restore_processor_state(struct saved_context *ctxt) | 168 | static void notrace __restore_processor_state(struct saved_context *ctxt) |
169 | { | 169 | { |
170 | if (ctxt->misc_enable_saved) | 170 | if (ctxt->misc_enable_saved) |
171 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); | 171 | wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); |
@@ -239,7 +239,7 @@ static void __restore_processor_state(struct saved_context *ctxt) | |||
239 | } | 239 | } |
240 | 240 | ||
241 | /* Needed by apm.c */ | 241 | /* Needed by apm.c */ |
242 | void restore_processor_state(void) | 242 | void notrace restore_processor_state(void) |
243 | { | 243 | { |
244 | __restore_processor_state(&saved_context); | 244 | __restore_processor_state(&saved_context); |
245 | } | 245 | } |
diff --git a/arch/x86/um/asm/processor.h b/arch/x86/um/asm/processor.h index 04f82e020f2b..2a206d2b14ab 100644 --- a/arch/x86/um/asm/processor.h +++ b/arch/x86/um/asm/processor.h | |||
@@ -25,7 +25,8 @@ static inline void rep_nop(void) | |||
25 | __asm__ __volatile__("rep;nop": : :"memory"); | 25 | __asm__ __volatile__("rep;nop": : :"memory"); |
26 | } | 26 | } |
27 | 27 | ||
28 | #define cpu_relax() rep_nop() | 28 | #define cpu_relax() rep_nop() |
29 | #define cpu_relax_lowlatency() cpu_relax() | ||
29 | 30 | ||
30 | #include <asm/processor-generic.h> | 31 | #include <asm/processor-generic.h> |
31 | 32 | ||
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 96ab2c09cb68..7322755f337a 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile | |||
@@ -22,3 +22,4 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o | |||
22 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o | 22 | obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o |
23 | obj-$(CONFIG_XEN_DOM0) += apic.o vga.o | 23 | obj-$(CONFIG_XEN_DOM0) += apic.o vga.o |
24 | obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o | 24 | obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o |
25 | obj-$(CONFIG_XEN_EFI) += efi.o | ||
diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c new file mode 100644 index 000000000000..a02e09e18f57 --- /dev/null +++ b/arch/x86/xen/efi.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2014 Oracle Co., Daniel Kiper | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along | ||
15 | * with this program. If not, see <http://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/efi.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/string.h> | ||
21 | |||
22 | #include <xen/xen-ops.h> | ||
23 | |||
24 | #include <asm/setup.h> | ||
25 | |||
26 | void __init xen_efi_init(void) | ||
27 | { | ||
28 | efi_system_table_t *efi_systab_xen; | ||
29 | |||
30 | efi_systab_xen = xen_efi_probe(); | ||
31 | |||
32 | if (efi_systab_xen == NULL) | ||
33 | return; | ||
34 | |||
35 | strncpy((char *)&boot_params.efi_info.efi_loader_signature, "Xen", | ||
36 | sizeof(boot_params.efi_info.efi_loader_signature)); | ||
37 | boot_params.efi_info.efi_systab = (__u32)__pa(efi_systab_xen); | ||
38 | boot_params.efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32); | ||
39 | |||
40 | set_bit(EFI_BOOT, &efi.flags); | ||
41 | set_bit(EFI_PARAVIRT, &efi.flags); | ||
42 | set_bit(EFI_64BIT, &efi.flags); | ||
43 | } | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index ffb101e45731..94813515fdd6 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1718,6 +1718,8 @@ asmlinkage __visible void __init xen_start_kernel(void) | |||
1718 | 1718 | ||
1719 | xen_setup_runstate_info(0); | 1719 | xen_setup_runstate_info(0); |
1720 | 1720 | ||
1721 | xen_efi_init(); | ||
1722 | |||
1721 | /* Start the world */ | 1723 | /* Start the world */ |
1722 | #ifdef CONFIG_X86_32 | 1724 | #ifdef CONFIG_X86_32 |
1723 | i386_start_kernel(); | 1725 | i386_start_kernel(); |
diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c index c98583588580..ebfa9b2c871d 100644 --- a/arch/x86/xen/grant-table.c +++ b/arch/x86/xen/grant-table.c | |||
@@ -36,99 +36,133 @@ | |||
36 | 36 | ||
37 | #include <linux/sched.h> | 37 | #include <linux/sched.h> |
38 | #include <linux/mm.h> | 38 | #include <linux/mm.h> |
39 | #include <linux/slab.h> | ||
39 | #include <linux/vmalloc.h> | 40 | #include <linux/vmalloc.h> |
40 | 41 | ||
41 | #include <xen/interface/xen.h> | 42 | #include <xen/interface/xen.h> |
42 | #include <xen/page.h> | 43 | #include <xen/page.h> |
43 | #include <xen/grant_table.h> | 44 | #include <xen/grant_table.h> |
45 | #include <xen/xen.h> | ||
44 | 46 | ||
45 | #include <asm/pgtable.h> | 47 | #include <asm/pgtable.h> |
46 | 48 | ||
47 | static int map_pte_fn(pte_t *pte, struct page *pmd_page, | 49 | static struct gnttab_vm_area { |
48 | unsigned long addr, void *data) | 50 | struct vm_struct *area; |
51 | pte_t **ptes; | ||
52 | } gnttab_shared_vm_area, gnttab_status_vm_area; | ||
53 | |||
54 | int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, | ||
55 | unsigned long max_nr_gframes, | ||
56 | void **__shared) | ||
49 | { | 57 | { |
50 | unsigned long **frames = (unsigned long **)data; | 58 | void *shared = *__shared; |
59 | unsigned long addr; | ||
60 | unsigned long i; | ||
51 | 61 | ||
52 | set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); | 62 | if (shared == NULL) |
53 | (*frames)++; | 63 | *__shared = shared = gnttab_shared_vm_area.area->addr; |
54 | return 0; | ||
55 | } | ||
56 | 64 | ||
57 | /* | 65 | addr = (unsigned long)shared; |
58 | * This function is used to map shared frames to store grant status. It is | 66 | |
59 | * different from map_pte_fn above, the frames type here is uint64_t. | 67 | for (i = 0; i < nr_gframes; i++) { |
60 | */ | 68 | set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], |
61 | static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, | 69 | mfn_pte(frames[i], PAGE_KERNEL)); |
62 | unsigned long addr, void *data) | 70 | addr += PAGE_SIZE; |
63 | { | 71 | } |
64 | uint64_t **frames = (uint64_t **)data; | ||
65 | 72 | ||
66 | set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); | ||
67 | (*frames)++; | ||
68 | return 0; | 73 | return 0; |
69 | } | 74 | } |
70 | 75 | ||
71 | static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, | 76 | int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, |
72 | unsigned long addr, void *data) | 77 | unsigned long max_nr_gframes, |
78 | grant_status_t **__shared) | ||
73 | { | 79 | { |
80 | grant_status_t *shared = *__shared; | ||
81 | unsigned long addr; | ||
82 | unsigned long i; | ||
83 | |||
84 | if (shared == NULL) | ||
85 | *__shared = shared = gnttab_status_vm_area.area->addr; | ||
86 | |||
87 | addr = (unsigned long)shared; | ||
88 | |||
89 | for (i = 0; i < nr_gframes; i++) { | ||
90 | set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], | ||
91 | mfn_pte(frames[i], PAGE_KERNEL)); | ||
92 | addr += PAGE_SIZE; | ||
93 | } | ||
74 | 94 | ||
75 | set_pte_at(&init_mm, addr, pte, __pte(0)); | ||
76 | return 0; | 95 | return 0; |
77 | } | 96 | } |
78 | 97 | ||
79 | int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, | 98 | void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) |
80 | unsigned long max_nr_gframes, | ||
81 | void **__shared) | ||
82 | { | 99 | { |
83 | int rc; | 100 | pte_t **ptes; |
84 | void *shared = *__shared; | 101 | unsigned long addr; |
102 | unsigned long i; | ||
85 | 103 | ||
86 | if (shared == NULL) { | 104 | if (shared == gnttab_status_vm_area.area->addr) |
87 | struct vm_struct *area = | 105 | ptes = gnttab_status_vm_area.ptes; |
88 | alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); | 106 | else |
89 | BUG_ON(area == NULL); | 107 | ptes = gnttab_shared_vm_area.ptes; |
90 | shared = area->addr; | ||
91 | *__shared = shared; | ||
92 | } | ||
93 | 108 | ||
94 | rc = apply_to_page_range(&init_mm, (unsigned long)shared, | 109 | addr = (unsigned long)shared; |
95 | PAGE_SIZE * nr_gframes, | 110 | |
96 | map_pte_fn, &frames); | 111 | for (i = 0; i < nr_gframes; i++) { |
97 | return rc; | 112 | set_pte_at(&init_mm, addr, ptes[i], __pte(0)); |
113 | addr += PAGE_SIZE; | ||
114 | } | ||
98 | } | 115 | } |
99 | 116 | ||
100 | int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, | 117 | static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) |
101 | unsigned long max_nr_gframes, | ||
102 | grant_status_t **__shared) | ||
103 | { | 118 | { |
104 | int rc; | 119 | area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL); |
105 | grant_status_t *shared = *__shared; | 120 | if (area->ptes == NULL) |
121 | return -ENOMEM; | ||
106 | 122 | ||
107 | if (shared == NULL) { | 123 | area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes); |
108 | /* No need to pass in PTE as we are going to do it | 124 | if (area->area == NULL) { |
109 | * in apply_to_page_range anyhow. */ | 125 | kfree(area->ptes); |
110 | struct vm_struct *area = | 126 | return -ENOMEM; |
111 | alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); | ||
112 | BUG_ON(area == NULL); | ||
113 | shared = area->addr; | ||
114 | *__shared = shared; | ||
115 | } | 127 | } |
116 | 128 | ||
117 | rc = apply_to_page_range(&init_mm, (unsigned long)shared, | 129 | return 0; |
118 | PAGE_SIZE * nr_gframes, | ||
119 | map_pte_fn_status, &frames); | ||
120 | return rc; | ||
121 | } | 130 | } |
122 | 131 | ||
123 | void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) | 132 | static void arch_gnttab_vfree(struct gnttab_vm_area *area) |
133 | { | ||
134 | free_vm_area(area->area); | ||
135 | kfree(area->ptes); | ||
136 | } | ||
137 | |||
138 | int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) | ||
124 | { | 139 | { |
125 | apply_to_page_range(&init_mm, (unsigned long)shared, | 140 | int ret; |
126 | PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); | 141 | |
142 | if (!xen_pv_domain()) | ||
143 | return 0; | ||
144 | |||
145 | ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); | ||
146 | if (ret < 0) | ||
147 | return ret; | ||
148 | |||
149 | /* | ||
150 | * Always allocate the space for the status frames in case | ||
151 | * we're migrated to a host with V2 support. | ||
152 | */ | ||
153 | ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status); | ||
154 | if (ret < 0) | ||
155 | goto err; | ||
156 | |||
157 | return 0; | ||
158 | err: | ||
159 | arch_gnttab_vfree(&gnttab_shared_vm_area); | ||
160 | return -ENOMEM; | ||
127 | } | 161 | } |
162 | |||
128 | #ifdef CONFIG_XEN_PVH | 163 | #ifdef CONFIG_XEN_PVH |
129 | #include <xen/balloon.h> | 164 | #include <xen/balloon.h> |
130 | #include <xen/events.h> | 165 | #include <xen/events.h> |
131 | #include <xen/xen.h> | ||
132 | #include <linux/slab.h> | 166 | #include <linux/slab.h> |
133 | static int __init xlated_setup_gnttab_pages(void) | 167 | static int __init xlated_setup_gnttab_pages(void) |
134 | { | 168 | { |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 97d87659f779..28c7e0be56e4 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -105,6 +105,14 @@ static inline void __init xen_init_apic(void) | |||
105 | } | 105 | } |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | #ifdef CONFIG_XEN_EFI | ||
109 | extern void xen_efi_init(void); | ||
110 | #else | ||
111 | static inline void __init xen_efi_init(void) | ||
112 | { | ||
113 | } | ||
114 | #endif | ||
115 | |||
108 | /* Declare an asm function, along with symbols needed to make it | 116 | /* Declare an asm function, along with symbols needed to make it |
109 | inlineable */ | 117 | inlineable */ |
110 | #define DECL_ASM(ret, name, ...) \ | 118 | #define DECL_ASM(ret, name, ...) \ |
diff --git a/arch/xtensa/include/asm/processor.h b/arch/xtensa/include/asm/processor.h index abb59708a3b7..b61bdf0eea25 100644 --- a/arch/xtensa/include/asm/processor.h +++ b/arch/xtensa/include/asm/processor.h | |||
@@ -182,6 +182,7 @@ extern unsigned long get_wchan(struct task_struct *p); | |||
182 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1]) | 182 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->areg[1]) |
183 | 183 | ||
184 | #define cpu_relax() barrier() | 184 | #define cpu_relax() barrier() |
185 | #define cpu_relax_lowlatency() cpu_relax() | ||
185 | 186 | ||
186 | /* Special register access. */ | 187 | /* Special register access. */ |
187 | 188 | ||
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index f9e1ec346e35..8453e6e39895 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S | |||
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow: | |||
376 | beqz a2, 1f # if at start of vector, don't restore | 376 | beqz a2, 1f # if at start of vector, don't restore |
377 | 377 | ||
378 | addi a0, a0, -128 | 378 | addi a0, a0, -128 |
379 | bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 | 379 | bbsi.l a0, 8, 1f # don't restore except for overflow 8 and 12 |
380 | bbsi a0, 7, 2f | 380 | |
381 | /* | ||
382 | * This fixup handler is for the extremely unlikely case where the | ||
383 | * overflow handler's reference thru a0 gets a hardware TLB refill | ||
384 | * that bumps out the (distinct, aliasing) TLB entry that mapped its | ||
385 | * prior references thru a9/a13, and where our reference now thru | ||
386 | * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill). | ||
387 | */ | ||
388 | movi a2, window_overflow_restore_a0_fixup | ||
389 | s32i a2, a3, EXC_TABLE_FIXUP | ||
390 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
391 | xsr a3, excsave1 | ||
392 | |||
393 | bbsi.l a0, 7, 2f | ||
381 | 394 | ||
382 | /* | 395 | /* |
383 | * Restore a0 as saved by _WindowOverflow8(). | 396 | * Restore a0 as saved by _WindowOverflow8(). |
384 | * | ||
385 | * FIXME: we really need a fixup handler for this L32E, | ||
386 | * for the extremely unlikely case where the overflow handler's | ||
387 | * reference thru a0 gets a hardware TLB refill that bumps out | ||
388 | * the (distinct, aliasing) TLB entry that mapped its prior | ||
389 | * references thru a9, and where our reference now thru a9 | ||
390 | * gets a 2nd-level miss exception (not hardware TLB refill). | ||
391 | */ | 397 | */ |
392 | 398 | ||
393 | l32e a2, a9, -16 | 399 | l32e a0, a9, -16 |
394 | wsr a2, depc # replace the saved a0 | 400 | wsr a0, depc # replace the saved a0 |
395 | j 1f | 401 | j 3f |
396 | 402 | ||
397 | 2: | 403 | 2: |
398 | /* | 404 | /* |
399 | * Restore a0 as saved by _WindowOverflow12(). | 405 | * Restore a0 as saved by _WindowOverflow12(). |
400 | * | ||
401 | * FIXME: we really need a fixup handler for this L32E, | ||
402 | * for the extremely unlikely case where the overflow handler's | ||
403 | * reference thru a0 gets a hardware TLB refill that bumps out | ||
404 | * the (distinct, aliasing) TLB entry that mapped its prior | ||
405 | * references thru a13, and where our reference now thru a13 | ||
406 | * gets a 2nd-level miss exception (not hardware TLB refill). | ||
407 | */ | 406 | */ |
408 | 407 | ||
409 | l32e a2, a13, -16 | 408 | l32e a0, a13, -16 |
410 | wsr a2, depc # replace the saved a0 | 409 | wsr a0, depc # replace the saved a0 |
410 | 3: | ||
411 | xsr a3, excsave1 | ||
412 | movi a0, 0 | ||
413 | s32i a0, a3, EXC_TABLE_FIXUP | ||
414 | s32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
411 | 1: | 415 | 1: |
412 | /* | 416 | /* |
413 | * Restore WindowBase while leaving all address registers restored. | 417 | * Restore WindowBase while leaving all address registers restored. |
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow: | |||
449 | 453 | ||
450 | s32i a0, a2, PT_DEPC | 454 | s32i a0, a2, PT_DEPC |
451 | 455 | ||
456 | _DoubleExceptionVector_handle_exception: | ||
452 | addx4 a0, a0, a3 | 457 | addx4 a0, a0, a3 |
453 | l32i a0, a0, EXC_TABLE_FAST_USER | 458 | l32i a0, a0, EXC_TABLE_FAST_USER |
454 | xsr a3, excsave1 | 459 | xsr a3, excsave1 |
@@ -464,11 +469,120 @@ _DoubleExceptionVector_WindowOverflow: | |||
464 | rotw -3 | 469 | rotw -3 |
465 | j 1b | 470 | j 1b |
466 | 471 | ||
467 | .end literal_prefix | ||
468 | 472 | ||
469 | ENDPROC(_DoubleExceptionVector) | 473 | ENDPROC(_DoubleExceptionVector) |
470 | 474 | ||
471 | /* | 475 | /* |
476 | * Fixup handler for TLB miss in double exception handler for window owerflow. | ||
477 | * We get here with windowbase set to the window that was being spilled and | ||
478 | * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12 | ||
479 | * (bit set) window. | ||
480 | * | ||
481 | * We do the following here: | ||
482 | * - go to the original window retaining a0 value; | ||
483 | * - set up exception stack to return back to appropriate a0 restore code | ||
484 | * (we'll need to rotate window back and there's no place to save this | ||
485 | * information, use different return address for that); | ||
486 | * - handle the exception; | ||
487 | * - go to the window that was being spilled; | ||
488 | * - set up window_overflow_restore_a0_fixup as a fixup routine; | ||
489 | * - reload a0; | ||
490 | * - restore the original window; | ||
491 | * - reset the default fixup routine; | ||
492 | * - return to user. By the time we get to this fixup handler all information | ||
493 | * about the conditions of the original double exception that happened in | ||
494 | * the window overflow handler is lost, so we just return to userspace to | ||
495 | * retry overflow from start. | ||
496 | * | ||
497 | * a0: value of depc, original value in depc | ||
498 | * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE | ||
499 | * a3: exctable, original value in excsave1 | ||
500 | */ | ||
501 | |||
502 | ENTRY(window_overflow_restore_a0_fixup) | ||
503 | |||
504 | rsr a0, ps | ||
505 | extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH | ||
506 | rsr a2, windowbase | ||
507 | sub a0, a2, a0 | ||
508 | extui a0, a0, 0, 3 | ||
509 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
510 | xsr a3, excsave1 | ||
511 | |||
512 | _beqi a0, 1, .Lhandle_1 | ||
513 | _beqi a0, 3, .Lhandle_3 | ||
514 | |||
515 | .macro overflow_fixup_handle_exception_pane n | ||
516 | |||
517 | rsr a0, depc | ||
518 | rotw -\n | ||
519 | |||
520 | xsr a3, excsave1 | ||
521 | wsr a2, depc | ||
522 | l32i a2, a3, EXC_TABLE_KSTK | ||
523 | s32i a0, a2, PT_AREG0 | ||
524 | |||
525 | movi a0, .Lrestore_\n | ||
526 | s32i a0, a2, PT_DEPC | ||
527 | rsr a0, exccause | ||
528 | j _DoubleExceptionVector_handle_exception | ||
529 | |||
530 | .endm | ||
531 | |||
532 | overflow_fixup_handle_exception_pane 2 | ||
533 | .Lhandle_1: | ||
534 | overflow_fixup_handle_exception_pane 1 | ||
535 | .Lhandle_3: | ||
536 | overflow_fixup_handle_exception_pane 3 | ||
537 | |||
538 | .macro overflow_fixup_restore_a0_pane n | ||
539 | |||
540 | rotw \n | ||
541 | /* Need to preserve a0 value here to be able to handle exception | ||
542 | * that may occur on a0 reload from stack. It may occur because | ||
543 | * TLB miss handler may not be atomic and pointer to page table | ||
544 | * may be lost before we get here. There are no free registers, | ||
545 | * so we need to use EXC_TABLE_DOUBLE_SAVE area. | ||
546 | */ | ||
547 | xsr a3, excsave1 | ||
548 | s32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
549 | movi a2, window_overflow_restore_a0_fixup | ||
550 | s32i a2, a3, EXC_TABLE_FIXUP | ||
551 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
552 | xsr a3, excsave1 | ||
553 | bbsi.l a0, 7, 1f | ||
554 | l32e a0, a9, -16 | ||
555 | j 2f | ||
556 | 1: | ||
557 | l32e a0, a13, -16 | ||
558 | 2: | ||
559 | rotw -\n | ||
560 | |||
561 | .endm | ||
562 | |||
563 | .Lrestore_2: | ||
564 | overflow_fixup_restore_a0_pane 2 | ||
565 | |||
566 | .Lset_default_fixup: | ||
567 | xsr a3, excsave1 | ||
568 | s32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
569 | movi a2, 0 | ||
570 | s32i a2, a3, EXC_TABLE_FIXUP | ||
571 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE | ||
572 | xsr a3, excsave1 | ||
573 | rfe | ||
574 | |||
575 | .Lrestore_1: | ||
576 | overflow_fixup_restore_a0_pane 1 | ||
577 | j .Lset_default_fixup | ||
578 | .Lrestore_3: | ||
579 | overflow_fixup_restore_a0_pane 3 | ||
580 | j .Lset_default_fixup | ||
581 | |||
582 | ENDPROC(window_overflow_restore_a0_fixup) | ||
583 | |||
584 | .end literal_prefix | ||
585 | /* | ||
472 | * Debug interrupt vector | 586 | * Debug interrupt vector |
473 | * | 587 | * |
474 | * There is not much space here, so simply jump to another handler. | 588 | * There is not much space here, so simply jump to another handler. |
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index ee32c0085dff..d16db6df86f8 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S | |||
@@ -269,13 +269,13 @@ SECTIONS | |||
269 | .UserExceptionVector.literal) | 269 | .UserExceptionVector.literal) |
270 | SECTION_VECTOR (_DoubleExceptionVector_literal, | 270 | SECTION_VECTOR (_DoubleExceptionVector_literal, |
271 | .DoubleExceptionVector.literal, | 271 | .DoubleExceptionVector.literal, |
272 | DOUBLEEXC_VECTOR_VADDR - 16, | 272 | DOUBLEEXC_VECTOR_VADDR - 40, |
273 | SIZEOF(.UserExceptionVector.text), | 273 | SIZEOF(.UserExceptionVector.text), |
274 | .UserExceptionVector.text) | 274 | .UserExceptionVector.text) |
275 | SECTION_VECTOR (_DoubleExceptionVector_text, | 275 | SECTION_VECTOR (_DoubleExceptionVector_text, |
276 | .DoubleExceptionVector.text, | 276 | .DoubleExceptionVector.text, |
277 | DOUBLEEXC_VECTOR_VADDR, | 277 | DOUBLEEXC_VECTOR_VADDR, |
278 | 32, | 278 | 40, |
279 | .DoubleExceptionVector.literal) | 279 | .DoubleExceptionVector.literal) |
280 | 280 | ||
281 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; | 281 | . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 4224256bb215..77ed20209ca5 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) | |||
191 | return -EINVAL; | 191 | return -EINVAL; |
192 | } | 192 | } |
193 | 193 | ||
194 | if (it && start - it->start < bank_sz) { | 194 | if (it && start - it->start <= bank_sz) { |
195 | if (start == it->start) { | 195 | if (start == it->start) { |
196 | if (end - it->start < bank_sz) { | 196 | if (end - it->start < bank_sz) { |
197 | it->start = end; | 197 | it->start = end; |