diff options
Diffstat (limited to 'arch')
103 files changed, 889 insertions, 872 deletions
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 6b58c1de7577..400c663b21c2 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c | |||
@@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, | |||
282 | #else | 282 | #else |
283 | /* if V-P const for loop, PTAG can be written once outside loop */ | 283 | /* if V-P const for loop, PTAG can be written once outside loop */ |
284 | if (full_page_op) | 284 | if (full_page_op) |
285 | write_aux_reg(ARC_REG_DC_PTAG, paddr); | 285 | write_aux_reg(aux_tag, paddr); |
286 | #endif | 286 | #endif |
287 | 287 | ||
288 | while (num_lines-- > 0) { | 288 | while (num_lines-- > 0) { |
@@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, | |||
296 | write_aux_reg(aux_cmd, vaddr); | 296 | write_aux_reg(aux_cmd, vaddr); |
297 | vaddr += L1_CACHE_BYTES; | 297 | vaddr += L1_CACHE_BYTES; |
298 | #else | 298 | #else |
299 | write_aux_reg(aux, paddr); | 299 | write_aux_reg(aux_cmd, paddr); |
300 | paddr += L1_CACHE_BYTES; | 300 | paddr += L1_CACHE_BYTES; |
301 | #endif | 301 | #endif |
302 | } | 302 | } |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e25419817791..15949459611f 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF | |||
1578 | 1578 | ||
1579 | choice | 1579 | choice |
1580 | prompt "Memory split" | 1580 | prompt "Memory split" |
1581 | depends on MMU | ||
1581 | default VMSPLIT_3G | 1582 | default VMSPLIT_3G |
1582 | help | 1583 | help |
1583 | Select the desired split between kernel and user memory. | 1584 | Select the desired split between kernel and user memory. |
@@ -1595,6 +1596,7 @@ endchoice | |||
1595 | 1596 | ||
1596 | config PAGE_OFFSET | 1597 | config PAGE_OFFSET |
1597 | hex | 1598 | hex |
1599 | default PHYS_OFFSET if !MMU | ||
1598 | default 0x40000000 if VMSPLIT_1G | 1600 | default 0x40000000 if VMSPLIT_1G |
1599 | default 0x80000000 if VMSPLIT_2G | 1601 | default 0x80000000 if VMSPLIT_2G |
1600 | default 0xC0000000 | 1602 | default 0xC0000000 |
@@ -1903,6 +1905,7 @@ config XEN | |||
1903 | depends on ARM && AEABI && OF | 1905 | depends on ARM && AEABI && OF |
1904 | depends on CPU_V7 && !CPU_V6 | 1906 | depends on CPU_V7 && !CPU_V6 |
1905 | depends on !GENERIC_ATOMIC64 | 1907 | depends on !GENERIC_ATOMIC64 |
1908 | depends on MMU | ||
1906 | select ARM_PSCI | 1909 | select ARM_PSCI |
1907 | select SWIOTLB_XEN | 1910 | select SWIOTLB_XEN |
1908 | select ARCH_DMA_ADDR_T_64BIT | 1911 | select ARCH_DMA_ADDR_T_64BIT |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index 47279aa96a6a..0714e0334e33 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -1,4 +1,5 @@ | |||
1 | ashldi3.S | 1 | ashldi3.S |
2 | bswapsdi2.S | ||
2 | font.c | 3 | font.c |
3 | lib1funcs.S | 4 | lib1funcs.S |
4 | hyp-stub.S | 5 | hyp-stub.S |
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index e491b82f8d67..792fde1b7f75 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi | |||
@@ -147,7 +147,7 @@ | |||
147 | }; | 147 | }; |
148 | 148 | ||
149 | pinctrl@35004800 { | 149 | pinctrl@35004800 { |
150 | compatible = "brcm,capri-pinctrl"; | 150 | compatible = "brcm,bcm11351-pinctrl"; |
151 | reg = <0x35004800 0x430>; | 151 | reg = <0x35004800 0x430>; |
152 | }; | 152 | }; |
153 | 153 | ||
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi index 2363593e1050..ef58d1c24313 100644 --- a/arch/arm/boot/dts/keystone-clocks.dtsi +++ b/arch/arm/boot/dts/keystone-clocks.dtsi | |||
@@ -612,7 +612,7 @@ clocks { | |||
612 | compatible = "ti,keystone,psc-clock"; | 612 | compatible = "ti,keystone,psc-clock"; |
613 | clocks = <&chipclk13>; | 613 | clocks = <&chipclk13>; |
614 | clock-output-names = "vcp-3"; | 614 | clock-output-names = "vcp-3"; |
615 | reg = <0x0235000a8 0xb00>, <0x02350060 0x400>; | 615 | reg = <0x023500a8 0xb00>, <0x02350060 0x400>; |
616 | reg-names = "control", "domain"; | 616 | reg-names = "control", "domain"; |
617 | domain-id = <24>; | 617 | domain-id = <24>; |
618 | }; | 618 | }; |
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts index c551e4af4d83..d3b253bbc885 100644 --- a/arch/arm/boot/dts/omap3-gta04.dts +++ b/arch/arm/boot/dts/omap3-gta04.dts | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | / { | 14 | / { |
15 | model = "OMAP3 GTA04"; | 15 | model = "OMAP3 GTA04"; |
16 | compatible = "ti,omap3-gta04", "ti,omap3"; | 16 | compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3"; |
17 | 17 | ||
18 | cpus { | 18 | cpus { |
19 | cpu@0 { | 19 | cpu@0 { |
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts index 25a2b5f652fd..f2779ac75872 100644 --- a/arch/arm/boot/dts/omap3-igep0020.dts +++ b/arch/arm/boot/dts/omap3-igep0020.dts | |||
@@ -14,7 +14,7 @@ | |||
14 | 14 | ||
15 | / { | 15 | / { |
16 | model = "IGEPv2 (TI OMAP AM/DM37x)"; | 16 | model = "IGEPv2 (TI OMAP AM/DM37x)"; |
17 | compatible = "isee,omap3-igep0020", "ti,omap3"; | 17 | compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3"; |
18 | 18 | ||
19 | leds { | 19 | leds { |
20 | pinctrl-names = "default"; | 20 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts index 145c58cfc8ac..2793749eb1ba 100644 --- a/arch/arm/boot/dts/omap3-igep0030.dts +++ b/arch/arm/boot/dts/omap3-igep0030.dts | |||
@@ -13,7 +13,7 @@ | |||
13 | 13 | ||
14 | / { | 14 | / { |
15 | model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; | 15 | model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; |
16 | compatible = "isee,omap3-igep0030", "ti,omap3"; | 16 | compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3"; |
17 | 17 | ||
18 | leds { | 18 | leds { |
19 | pinctrl-names = "default"; | 19 | pinctrl-names = "default"; |
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 10666ca8aee1..d4d2763f4794 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi | |||
@@ -426,7 +426,7 @@ | |||
426 | }; | 426 | }; |
427 | 427 | ||
428 | rtp: rtp@01c25000 { | 428 | rtp: rtp@01c25000 { |
429 | compatible = "allwinner,sun4i-ts"; | 429 | compatible = "allwinner,sun4i-a10-ts"; |
430 | reg = <0x01c25000 0x100>; | 430 | reg = <0x01c25000 0x100>; |
431 | interrupts = <29>; | 431 | interrupts = <29>; |
432 | }; | 432 | }; |
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 64961595e8d6..79fd412005b0 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi | |||
@@ -383,7 +383,7 @@ | |||
383 | }; | 383 | }; |
384 | 384 | ||
385 | rtp: rtp@01c25000 { | 385 | rtp: rtp@01c25000 { |
386 | compatible = "allwinner,sun4i-ts"; | 386 | compatible = "allwinner,sun4i-a10-ts"; |
387 | reg = <0x01c25000 0x100>; | 387 | reg = <0x01c25000 0x100>; |
388 | interrupts = <29>; | 388 | interrupts = <29>; |
389 | }; | 389 | }; |
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 320335abfccd..c463fd730c91 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi | |||
@@ -346,7 +346,7 @@ | |||
346 | }; | 346 | }; |
347 | 347 | ||
348 | rtp: rtp@01c25000 { | 348 | rtp: rtp@01c25000 { |
349 | compatible = "allwinner,sun4i-ts"; | 349 | compatible = "allwinner,sun4i-a10-ts"; |
350 | reg = <0x01c25000 0x100>; | 350 | reg = <0x01c25000 0x100>; |
351 | interrupts = <29>; | 351 | interrupts = <29>; |
352 | }; | 352 | }; |
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 9ff09484847b..6f25cf559ad0 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi | |||
@@ -454,7 +454,7 @@ | |||
454 | rtc: rtc@01c20d00 { | 454 | rtc: rtc@01c20d00 { |
455 | compatible = "allwinner,sun7i-a20-rtc"; | 455 | compatible = "allwinner,sun7i-a20-rtc"; |
456 | reg = <0x01c20d00 0x20>; | 456 | reg = <0x01c20d00 0x20>; |
457 | interrupts = <0 24 1>; | 457 | interrupts = <0 24 4>; |
458 | }; | 458 | }; |
459 | 459 | ||
460 | sid: eeprom@01c23800 { | 460 | sid: eeprom@01c23800 { |
@@ -463,7 +463,7 @@ | |||
463 | }; | 463 | }; |
464 | 464 | ||
465 | rtp: rtp@01c25000 { | 465 | rtp: rtp@01c25000 { |
466 | compatible = "allwinner,sun4i-ts"; | 466 | compatible = "allwinner,sun4i-a10-ts"; |
467 | reg = <0x01c25000 0x100>; | 467 | reg = <0x01c25000 0x100>; |
468 | interrupts = <0 29 4>; | 468 | interrupts = <0 29 4>; |
469 | }; | 469 | }; |
@@ -596,10 +596,10 @@ | |||
596 | hstimer@01c60000 { | 596 | hstimer@01c60000 { |
597 | compatible = "allwinner,sun7i-a20-hstimer"; | 597 | compatible = "allwinner,sun7i-a20-hstimer"; |
598 | reg = <0x01c60000 0x1000>; | 598 | reg = <0x01c60000 0x1000>; |
599 | interrupts = <0 81 1>, | 599 | interrupts = <0 81 4>, |
600 | <0 82 1>, | 600 | <0 82 4>, |
601 | <0 83 1>, | 601 | <0 83 4>, |
602 | <0 84 1>; | 602 | <0 84 4>; |
603 | clocks = <&ahb_gates 28>; | 603 | clocks = <&ahb_gates 28>; |
604 | }; | 604 | }; |
605 | 605 | ||
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 00fe9e9710fd..27d69b558c5d 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig | |||
@@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16 | |||
204 | CONFIG_MMC_SDHCI=y | 204 | CONFIG_MMC_SDHCI=y |
205 | CONFIG_MMC_SDHCI_PLTFM=y | 205 | CONFIG_MMC_SDHCI_PLTFM=y |
206 | CONFIG_MMC_SDHCI_TEGRA=y | 206 | CONFIG_MMC_SDHCI_TEGRA=y |
207 | CONFIG_NEW_LEDS=y | ||
208 | CONFIG_LEDS_CLASS=y | ||
207 | CONFIG_LEDS_GPIO=y | 209 | CONFIG_LEDS_GPIO=y |
210 | CONFIG_LEDS_TRIGGERS=y | ||
208 | CONFIG_LEDS_TRIGGER_TIMER=y | 211 | CONFIG_LEDS_TRIGGER_TIMER=y |
209 | CONFIG_LEDS_TRIGGER_ONESHOT=y | 212 | CONFIG_LEDS_TRIGGER_ONESHOT=y |
210 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | 213 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y |
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 8756e4bcdba0..4afb376d9c7c 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h | |||
@@ -30,14 +30,15 @@ | |||
30 | */ | 30 | */ |
31 | #define UL(x) _AC(x, UL) | 31 | #define UL(x) _AC(x, UL) |
32 | 32 | ||
33 | /* PAGE_OFFSET - the virtual address of the start of the kernel image */ | ||
34 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | ||
35 | |||
33 | #ifdef CONFIG_MMU | 36 | #ifdef CONFIG_MMU |
34 | 37 | ||
35 | /* | 38 | /* |
36 | * PAGE_OFFSET - the virtual address of the start of the kernel image | ||
37 | * TASK_SIZE - the maximum size of a user space task. | 39 | * TASK_SIZE - the maximum size of a user space task. |
38 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area | 40 | * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area |
39 | */ | 41 | */ |
40 | #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) | ||
41 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) | 42 | #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) |
42 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) | 43 | #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) |
43 | 44 | ||
@@ -104,10 +105,6 @@ | |||
104 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) | 105 | #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) |
105 | #endif | 106 | #endif |
106 | 107 | ||
107 | #ifndef PAGE_OFFSET | ||
108 | #define PAGE_OFFSET PLAT_PHYS_OFFSET | ||
109 | #endif | ||
110 | |||
111 | /* | 108 | /* |
112 | * The module can be at any place in ram in nommu mode. | 109 | * The module can be at any place in ram in nommu mode. |
113 | */ | 110 | */ |
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 47cd974e57ea..c96ecacb2021 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S | |||
@@ -177,6 +177,18 @@ __lookup_processor_type_data: | |||
177 | .long __proc_info_end | 177 | .long __proc_info_end |
178 | .size __lookup_processor_type_data, . - __lookup_processor_type_data | 178 | .size __lookup_processor_type_data, . - __lookup_processor_type_data |
179 | 179 | ||
180 | __error_lpae: | ||
181 | #ifdef CONFIG_DEBUG_LL | ||
182 | adr r0, str_lpae | ||
183 | bl printascii | ||
184 | b __error | ||
185 | str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" | ||
186 | #else | ||
187 | b __error | ||
188 | #endif | ||
189 | .align | ||
190 | ENDPROC(__error_lpae) | ||
191 | |||
180 | __error_p: | 192 | __error_p: |
181 | #ifdef CONFIG_DEBUG_LL | 193 | #ifdef CONFIG_DEBUG_LL |
182 | adr r0, str_p1 | 194 | adr r0, str_p1 |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 914616e0bdcd..f5f381d91556 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -102,7 +102,7 @@ ENTRY(stext) | |||
102 | and r3, r3, #0xf @ extract VMSA support | 102 | and r3, r3, #0xf @ extract VMSA support |
103 | cmp r3, #5 @ long-descriptor translation table format? | 103 | cmp r3, #5 @ long-descriptor translation table format? |
104 | THUMB( it lo ) @ force fixup-able long branch encoding | 104 | THUMB( it lo ) @ force fixup-able long branch encoding |
105 | blo __error_p @ only classic page table format | 105 | blo __error_lpae @ only classic page table format |
106 | #endif | 106 | #endif |
107 | 107 | ||
108 | #ifndef CONFIG_XIP_KERNEL | 108 | #ifndef CONFIG_XIP_KERNEL |
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 1d8248ea5669..bd18bb8b2770 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self, | |||
878 | unsigned long cmd, | 878 | unsigned long cmd, |
879 | void *v) | 879 | void *v) |
880 | { | 880 | { |
881 | if (cmd == CPU_PM_EXIT) { | 881 | if (cmd == CPU_PM_EXIT && |
882 | __hyp_get_vectors() == hyp_default_vectors) { | ||
882 | cpu_init_hyp_mode(NULL); | 883 | cpu_init_hyp_mode(NULL); |
883 | return NOTIFY_OK; | 884 | return NOTIFY_OK; |
884 | } | 885 | } |
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S index ddc15539bad2..0d68d4073068 100644 --- a/arch/arm/kvm/interrupts.S +++ b/arch/arm/kvm/interrupts.S | |||
@@ -220,6 +220,10 @@ after_vfp_restore: | |||
220 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | 220 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are |
221 | * passed in r0 and r1. | 221 | * passed in r0 and r1. |
222 | * | 222 | * |
223 | * A function pointer with a value of 0xffffffff has a special meaning, | ||
224 | * and is used to implement __hyp_get_vectors in the same way as in | ||
225 | * arch/arm/kernel/hyp_stub.S. | ||
226 | * | ||
223 | * The calling convention follows the standard AAPCS: | 227 | * The calling convention follows the standard AAPCS: |
224 | * r0 - r3: caller save | 228 | * r0 - r3: caller save |
225 | * r12: caller save | 229 | * r12: caller save |
@@ -363,6 +367,11 @@ hyp_hvc: | |||
363 | host_switch_to_hyp: | 367 | host_switch_to_hyp: |
364 | pop {r0, r1, r2} | 368 | pop {r0, r1, r2} |
365 | 369 | ||
370 | /* Check for __hyp_get_vectors */ | ||
371 | cmp r0, #-1 | ||
372 | mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR | ||
373 | beq 1f | ||
374 | |||
366 | push {lr} | 375 | push {lr} |
367 | mrs lr, SPSR | 376 | mrs lr, SPSR |
368 | push {lr} | 377 | push {lr} |
@@ -378,7 +387,7 @@ THUMB( orr lr, #1) | |||
378 | pop {lr} | 387 | pop {lr} |
379 | msr SPSR_csxf, lr | 388 | msr SPSR_csxf, lr |
380 | pop {lr} | 389 | pop {lr} |
381 | eret | 390 | 1: eret |
382 | 391 | ||
383 | guest_trap: | 392 | guest_trap: |
384 | load_vcpu @ Load VCPU pointer to r0 | 393 | load_vcpu @ Load VCPU pointer to r0 |
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 3b05aea56d1f..11ed9152e665 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c | |||
@@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = { | |||
433 | .enable = &omap2_dflt_clk_enable, | 433 | .enable = &omap2_dflt_clk_enable, |
434 | .disable = &omap2_dflt_clk_disable, | 434 | .disable = &omap2_dflt_clk_disable, |
435 | .is_enabled = &omap2_dflt_clk_is_enabled, | 435 | .is_enabled = &omap2_dflt_clk_is_enabled, |
436 | .set_rate = &omap3_clkoutx2_set_rate, | ||
436 | .recalc_rate = &omap3_clkoutx2_recalc, | 437 | .recalc_rate = &omap3_clkoutx2_recalc, |
438 | .round_rate = &omap3_clkoutx2_round_rate, | ||
437 | }; | 439 | }; |
438 | 440 | ||
439 | static const struct clk_ops dpll4_m5x2_ck_3630_ops = { | 441 | static const struct clk_ops dpll4_m5x2_ck_3630_ops = { |
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 4c158c838d40..01fc710c8181 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include "prm.h" | 23 | #include "prm.h" |
24 | #include "clockdomain.h" | 24 | #include "clockdomain.h" |
25 | 25 | ||
26 | #define MAX_CPUS 2 | ||
27 | |||
26 | /* Machine specific information */ | 28 | /* Machine specific information */ |
27 | struct idle_statedata { | 29 | struct idle_statedata { |
28 | u32 cpu_state; | 30 | u32 cpu_state; |
@@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = { | |||
48 | }, | 50 | }, |
49 | }; | 51 | }; |
50 | 52 | ||
51 | static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; | 53 | static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; |
52 | static struct clockdomain *cpu_clkdm[NR_CPUS]; | 54 | static struct clockdomain *cpu_clkdm[MAX_CPUS]; |
53 | 55 | ||
54 | static atomic_t abort_barrier; | 56 | static atomic_t abort_barrier; |
55 | static bool cpu_done[NR_CPUS]; | 57 | static bool cpu_done[MAX_CPUS]; |
56 | static struct idle_statedata *state_ptr = &omap4_idle_data[0]; | 58 | static struct idle_statedata *state_ptr = &omap4_idle_data[0]; |
57 | 59 | ||
58 | /* Private functions */ | 60 | /* Private functions */ |
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 3185ced807c9..3c418ea54bbe 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c | |||
@@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk) | |||
623 | 623 | ||
624 | /* Clock control for DPLL outputs */ | 624 | /* Clock control for DPLL outputs */ |
625 | 625 | ||
626 | /* Find the parent DPLL for the given clkoutx2 clock */ | ||
627 | static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) | ||
628 | { | ||
629 | struct clk_hw_omap *pclk = NULL; | ||
630 | struct clk *parent; | ||
631 | |||
632 | /* Walk up the parents of clk, looking for a DPLL */ | ||
633 | do { | ||
634 | do { | ||
635 | parent = __clk_get_parent(hw->clk); | ||
636 | hw = __clk_get_hw(parent); | ||
637 | } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); | ||
638 | if (!hw) | ||
639 | break; | ||
640 | pclk = to_clk_hw_omap(hw); | ||
641 | } while (pclk && !pclk->dpll_data); | ||
642 | |||
643 | /* clk does not have a DPLL as a parent? error in the clock data */ | ||
644 | if (!pclk) { | ||
645 | WARN_ON(1); | ||
646 | return NULL; | ||
647 | } | ||
648 | |||
649 | return pclk; | ||
650 | } | ||
651 | |||
626 | /** | 652 | /** |
627 | * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate | 653 | * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate |
628 | * @clk: DPLL output struct clk | 654 | * @clk: DPLL output struct clk |
@@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, | |||
637 | unsigned long rate; | 663 | unsigned long rate; |
638 | u32 v; | 664 | u32 v; |
639 | struct clk_hw_omap *pclk = NULL; | 665 | struct clk_hw_omap *pclk = NULL; |
640 | struct clk *parent; | ||
641 | 666 | ||
642 | if (!parent_rate) | 667 | if (!parent_rate) |
643 | return 0; | 668 | return 0; |
644 | 669 | ||
645 | /* Walk up the parents of clk, looking for a DPLL */ | 670 | pclk = omap3_find_clkoutx2_dpll(hw); |
646 | do { | ||
647 | do { | ||
648 | parent = __clk_get_parent(hw->clk); | ||
649 | hw = __clk_get_hw(parent); | ||
650 | } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); | ||
651 | if (!hw) | ||
652 | break; | ||
653 | pclk = to_clk_hw_omap(hw); | ||
654 | } while (pclk && !pclk->dpll_data); | ||
655 | 671 | ||
656 | /* clk does not have a DPLL as a parent? error in the clock data */ | 672 | if (!pclk) |
657 | if (!pclk) { | ||
658 | WARN_ON(1); | ||
659 | return 0; | 673 | return 0; |
660 | } | ||
661 | 674 | ||
662 | dd = pclk->dpll_data; | 675 | dd = pclk->dpll_data; |
663 | 676 | ||
@@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, | |||
672 | return rate; | 685 | return rate; |
673 | } | 686 | } |
674 | 687 | ||
688 | int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, | ||
689 | unsigned long parent_rate) | ||
690 | { | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, | ||
695 | unsigned long *prate) | ||
696 | { | ||
697 | const struct dpll_data *dd; | ||
698 | u32 v; | ||
699 | struct clk_hw_omap *pclk = NULL; | ||
700 | |||
701 | if (!*prate) | ||
702 | return 0; | ||
703 | |||
704 | pclk = omap3_find_clkoutx2_dpll(hw); | ||
705 | |||
706 | if (!pclk) | ||
707 | return 0; | ||
708 | |||
709 | dd = pclk->dpll_data; | ||
710 | |||
711 | /* TYPE J does not have a clkoutx2 */ | ||
712 | if (dd->flags & DPLL_J_TYPE) { | ||
713 | *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); | ||
714 | return *prate; | ||
715 | } | ||
716 | |||
717 | WARN_ON(!dd->enable_mask); | ||
718 | |||
719 | v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; | ||
720 | v >>= __ffs(dd->enable_mask); | ||
721 | |||
722 | /* If in bypass, the rate is fixed to the bypass rate*/ | ||
723 | if (v != OMAP3XXX_EN_DPLL_LOCKED) | ||
724 | return *prate; | ||
725 | |||
726 | if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { | ||
727 | unsigned long best_parent; | ||
728 | |||
729 | best_parent = (rate / 2); | ||
730 | *prate = __clk_round_rate(__clk_get_parent(hw->clk), | ||
731 | best_parent); | ||
732 | } | ||
733 | |||
734 | return *prate * 2; | ||
735 | } | ||
736 | |||
675 | /* OMAP3/4 non-CORE DPLL clkops */ | 737 | /* OMAP3/4 non-CORE DPLL clkops */ |
676 | const struct clk_hw_omap_ops clkhwops_omap3_dpll = { | 738 | const struct clk_hw_omap_ops clkhwops_omap3_dpll = { |
677 | .allow_idle = omap3_dpll_allow_idle, | 739 | .allow_idle = omap3_dpll_allow_idle, |
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 42d81885c700..1f33f5db10d5 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c | |||
@@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh) | |||
1947 | goto dis_opt_clks; | 1947 | goto dis_opt_clks; |
1948 | 1948 | ||
1949 | _write_sysconfig(v, oh); | 1949 | _write_sysconfig(v, oh); |
1950 | ret = _clear_softreset(oh, &v); | ||
1951 | if (ret) | ||
1952 | goto dis_opt_clks; | ||
1953 | |||
1954 | _write_sysconfig(v, oh); | ||
1955 | 1950 | ||
1956 | if (oh->class->sysc->srst_udelay) | 1951 | if (oh->class->sysc->srst_udelay) |
1957 | udelay(oh->class->sysc->srst_udelay); | 1952 | udelay(oh->class->sysc->srst_udelay); |
1958 | 1953 | ||
1959 | c = _wait_softreset_complete(oh); | 1954 | c = _wait_softreset_complete(oh); |
1960 | if (c == MAX_MODULE_SOFTRESET_WAIT) | 1955 | if (c == MAX_MODULE_SOFTRESET_WAIT) { |
1961 | pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", | 1956 | pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", |
1962 | oh->name, MAX_MODULE_SOFTRESET_WAIT); | 1957 | oh->name, MAX_MODULE_SOFTRESET_WAIT); |
1963 | else | 1958 | ret = -ETIMEDOUT; |
1959 | goto dis_opt_clks; | ||
1960 | } else { | ||
1964 | pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); | 1961 | pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); |
1962 | } | ||
1963 | |||
1964 | ret = _clear_softreset(oh, &v); | ||
1965 | if (ret) | ||
1966 | goto dis_opt_clks; | ||
1967 | |||
1968 | _write_sysconfig(v, oh); | ||
1965 | 1969 | ||
1966 | /* | 1970 | /* |
1967 | * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from | 1971 | * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from |
1968 | * _wait_target_ready() or _reset() | 1972 | * _wait_target_ready() or _reset() |
1969 | */ | 1973 | */ |
1970 | 1974 | ||
1971 | ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; | ||
1972 | |||
1973 | dis_opt_clks: | 1975 | dis_opt_clks: |
1974 | if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) | 1976 | if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) |
1975 | _disable_optional_clocks(oh); | 1977 | _disable_optional_clocks(oh); |
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 18f333c440db..810c205d668b 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c | |||
@@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = { | |||
1365 | .rev_offs = 0x0000, | 1365 | .rev_offs = 0x0000, |
1366 | .sysc_offs = 0x0010, | 1366 | .sysc_offs = 0x0010, |
1367 | .syss_offs = 0x0014, | 1367 | .syss_offs = 0x0014, |
1368 | .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | | 1368 | .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | |
1369 | SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | | 1369 | SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | |
1370 | SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), | 1370 | SYSS_HAS_RESET_STATUS), |
1371 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | | 1371 | .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), |
1372 | SIDLE_SMART_WKUP), | ||
1373 | .sysc_fields = &omap_hwmod_sysc_type1, | 1372 | .sysc_fields = &omap_hwmod_sysc_type1, |
1374 | }; | 1373 | }; |
1375 | 1374 | ||
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 3d5b24dcd9a4..c33e07e2f0d4 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c | |||
@@ -22,6 +22,8 @@ | |||
22 | #include "common-board-devices.h" | 22 | #include "common-board-devices.h" |
23 | #include "dss-common.h" | 23 | #include "dss-common.h" |
24 | #include "control.h" | 24 | #include "control.h" |
25 | #include "omap-secure.h" | ||
26 | #include "soc.h" | ||
25 | 27 | ||
26 | struct pdata_init { | 28 | struct pdata_init { |
27 | const char *compatible; | 29 | const char *compatible; |
@@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void) | |||
169 | omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); | 171 | omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); |
170 | omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ | 172 | omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ |
171 | } | 173 | } |
174 | |||
175 | static void __init nokia_n900_legacy_init(void) | ||
176 | { | ||
177 | hsmmc2_internal_input_clk(); | ||
178 | |||
179 | if (omap_type() == OMAP2_DEVICE_TYPE_SEC) { | ||
180 | if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) { | ||
181 | pr_info("RX-51: Enabling ARM errata 430973 workaround\n"); | ||
182 | /* set IBE to 1 */ | ||
183 | rx51_secure_update_aux_cr(BIT(6), 0); | ||
184 | } else { | ||
185 | pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n"); | ||
186 | pr_warning("Thumb binaries may crash randomly without this workaround\n"); | ||
187 | } | ||
188 | } | ||
189 | } | ||
172 | #endif /* CONFIG_ARCH_OMAP3 */ | 190 | #endif /* CONFIG_ARCH_OMAP3 */ |
173 | 191 | ||
174 | #ifdef CONFIG_ARCH_OMAP4 | 192 | #ifdef CONFIG_ARCH_OMAP4 |
@@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { | |||
239 | #endif | 257 | #endif |
240 | #ifdef CONFIG_ARCH_OMAP3 | 258 | #ifdef CONFIG_ARCH_OMAP3 |
241 | OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), | 259 | OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), |
260 | OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata), | ||
242 | OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), | 261 | OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), |
243 | /* Only on am3517 */ | 262 | /* Only on am3517 */ |
244 | OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), | 263 | OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), |
@@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { | |||
259 | static struct pdata_init pdata_quirks[] __initdata = { | 278 | static struct pdata_init pdata_quirks[] __initdata = { |
260 | #ifdef CONFIG_ARCH_OMAP3 | 279 | #ifdef CONFIG_ARCH_OMAP3 |
261 | { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, | 280 | { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, |
262 | { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, | 281 | { "nokia,omap3-n900", nokia_n900_legacy_init, }, |
263 | { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, | 282 | { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, |
264 | { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, | 283 | { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, |
265 | { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, | 284 | { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, |
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c index 6334b96b4097..280f3c58abe5 100644 --- a/arch/arm/mach-omap2/prminst44xx.c +++ b/arch/arm/mach-omap2/prminst44xx.c | |||
@@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void) | |||
183 | OMAP4_PRM_RSTCTRL_OFFSET); | 183 | OMAP4_PRM_RSTCTRL_OFFSET); |
184 | v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; | 184 | v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; |
185 | omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, | 185 | omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, |
186 | OMAP4430_PRM_DEVICE_INST, | 186 | dev_inst, |
187 | OMAP4_PRM_RSTCTRL_OFFSET); | 187 | OMAP4_PRM_RSTCTRL_OFFSET); |
188 | 188 | ||
189 | /* OCP barrier */ | 189 | /* OCP barrier */ |
190 | v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, | 190 | v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, |
191 | OMAP4430_PRM_DEVICE_INST, | 191 | dev_inst, |
192 | OMAP4_PRM_RSTCTRL_OFFSET); | 192 | OMAP4_PRM_RSTCTRL_OFFSET); |
193 | } | 193 | } |
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index f33679d2d3ee..50e1d850ee2e 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h | |||
@@ -13,6 +13,8 @@ | |||
13 | #ifndef __ASM_ARCH_COLLIE_H | 13 | #ifndef __ASM_ARCH_COLLIE_H |
14 | #define __ASM_ARCH_COLLIE_H | 14 | #define __ASM_ARCH_COLLIE_H |
15 | 15 | ||
16 | #include "hardware.h" /* Gives GPIO_MAX */ | ||
17 | |||
16 | extern void locomolcd_power(int on); | 18 | extern void locomolcd_power(int on); |
17 | 19 | ||
18 | #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) | 20 | #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) |
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 2b3a56414271..ef69152f9b52 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c | |||
@@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) | |||
264 | note_page(st, addr, 3, pmd_val(*pmd)); | 264 | note_page(st, addr, 3, pmd_val(*pmd)); |
265 | else | 265 | else |
266 | walk_pte(st, pmd, addr); | 266 | walk_pte(st, pmd, addr); |
267 | |||
268 | if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) | ||
269 | note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); | ||
267 | } | 270 | } |
268 | } | 271 | } |
269 | 272 | ||
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h index 13fb0b3efc5f..453a179469a3 100644 --- a/arch/arm64/include/asm/percpu.h +++ b/arch/arm64/include/asm/percpu.h | |||
@@ -16,6 +16,8 @@ | |||
16 | #ifndef __ASM_PERCPU_H | 16 | #ifndef __ASM_PERCPU_H |
17 | #define __ASM_PERCPU_H | 17 | #define __ASM_PERCPU_H |
18 | 18 | ||
19 | #ifdef CONFIG_SMP | ||
20 | |||
19 | static inline void set_my_cpu_offset(unsigned long off) | 21 | static inline void set_my_cpu_offset(unsigned long off) |
20 | { | 22 | { |
21 | asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); | 23 | asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory"); |
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void) | |||
36 | } | 38 | } |
37 | #define __my_cpu_offset __my_cpu_offset() | 39 | #define __my_cpu_offset __my_cpu_offset() |
38 | 40 | ||
41 | #else /* !CONFIG_SMP */ | ||
42 | |||
43 | #define set_my_cpu_offset(x) do { } while (0) | ||
44 | |||
45 | #endif /* CONFIG_SMP */ | ||
46 | |||
39 | #include <asm-generic/percpu.h> | 47 | #include <asm-generic/percpu.h> |
40 | 48 | ||
41 | #endif /* __ASM_PERCPU_H */ | 49 | #endif /* __ASM_PERCPU_H */ |
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index b524dcd17243..aa3917c8b623 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h | |||
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page; | |||
136 | /* | 136 | /* |
137 | * The following only work if pte_present(). Undefined behaviour otherwise. | 137 | * The following only work if pte_present(). Undefined behaviour otherwise. |
138 | */ | 138 | */ |
139 | #define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) | 139 | #define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))) |
140 | #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) | 140 | #define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY)) |
141 | #define pte_young(pte) (pte_val(pte) & PTE_AF) | 141 | #define pte_young(pte) (!!(pte_val(pte) & PTE_AF)) |
142 | #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) | 142 | #define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL)) |
143 | #define pte_write(pte) (pte_val(pte) & PTE_WRITE) | 143 | #define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE)) |
144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) | 144 | #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) |
145 | 145 | ||
146 | #define pte_valid_user(pte) \ | 146 | #define pte_valid_user(pte) \ |
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c index c3b6c63ea5fb..38f0558f0c0a 100644 --- a/arch/arm64/kernel/stacktrace.c +++ b/arch/arm64/kernel/stacktrace.c | |||
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame) | |||
48 | 48 | ||
49 | frame->sp = fp + 0x10; | 49 | frame->sp = fp + 0x10; |
50 | frame->fp = *(unsigned long *)(fp); | 50 | frame->fp = *(unsigned long *)(fp); |
51 | frame->pc = *(unsigned long *)(fp + 8); | 51 | /* |
52 | * -4 here because we care about the PC at time of bl, | ||
53 | * not where the return will go. | ||
54 | */ | ||
55 | frame->pc = *(unsigned long *)(fp + 8) - 4; | ||
52 | 56 | ||
53 | return 0; | 57 | return 0; |
54 | } | 58 | } |
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index 3b47c36e10ff..2c56012cb2d2 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S | |||
@@ -694,6 +694,24 @@ __hyp_panic_str: | |||
694 | 694 | ||
695 | .align 2 | 695 | .align 2 |
696 | 696 | ||
697 | /* | ||
698 | * u64 kvm_call_hyp(void *hypfn, ...); | ||
699 | * | ||
700 | * This is not really a variadic function in the classic C-way and care must | ||
701 | * be taken when calling this to ensure parameters are passed in registers | ||
702 | * only, since the stack will change between the caller and the callee. | ||
703 | * | ||
704 | * Call the function with the first argument containing a pointer to the | ||
705 | * function you wish to call in Hyp mode, and subsequent arguments will be | ||
706 | * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the | ||
707 | * function pointer can be passed). The function being called must be mapped | ||
708 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are | ||
709 | * passed in r0 and r1. | ||
710 | * | ||
711 | * A function pointer with a value of 0 has a special meaning, and is | ||
712 | * used to implement __hyp_get_vectors in the same way as in | ||
713 | * arch/arm64/kernel/hyp_stub.S. | ||
714 | */ | ||
697 | ENTRY(kvm_call_hyp) | 715 | ENTRY(kvm_call_hyp) |
698 | hvc #0 | 716 | hvc #0 |
699 | ret | 717 | ret |
@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2 | |||
737 | pop x2, x3 | 755 | pop x2, x3 |
738 | pop x0, x1 | 756 | pop x0, x1 |
739 | 757 | ||
740 | push lr, xzr | 758 | /* Check for __hyp_get_vectors */ |
759 | cbnz x0, 1f | ||
760 | mrs x0, vbar_el2 | ||
761 | b 2f | ||
762 | |||
763 | 1: push lr, xzr | ||
741 | 764 | ||
742 | /* | 765 | /* |
743 | * Compute the function address in EL2, and shuffle the parameters. | 766 | * Compute the function address in EL2, and shuffle the parameters. |
@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2 | |||
750 | blr lr | 773 | blr lr |
751 | 774 | ||
752 | pop lr, xzr | 775 | pop lr, xzr |
753 | eret | 776 | 2: eret |
754 | 777 | ||
755 | el1_trap: | 778 | el1_trap: |
756 | /* | 779 | /* |
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 09c5a0f5f4d1..86648c083bb4 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _ASM_C6X_CACHE_H | 12 | #define _ASM_C6X_CACHE_H |
13 | 13 | ||
14 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
15 | #include <linux/init.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Cache line size | 18 | * Cache line size |
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 184066ceb1f6..053c17b36559 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h | |||
@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
144 | * definition, which doesn't have the same semantics. We don't want to | 144 | * definition, which doesn't have the same semantics. We don't want to |
145 | * use -fno-builtin, so just hide the name ffs. | 145 | * use -fno-builtin, so just hide the name ffs. |
146 | */ | 146 | */ |
147 | #define ffs kernel_ffs | 147 | #define ffs(x) kernel_ffs(x) |
148 | 148 | ||
149 | #include <asm-generic/bitops/fls.h> | 149 | #include <asm-generic/bitops/fls.h> |
150 | #include <asm-generic/bitops/__fls.h> | 150 | #include <asm-generic/bitops/__fls.h> |
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf83a735..20e8a9b21d75 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
98 | /* attempt to allocate a granule's worth of cached memory pages */ | 98 | /* attempt to allocate a granule's worth of cached memory pages */ |
99 | 99 | ||
100 | page = alloc_pages_exact_node(nid, | 100 | page = alloc_pages_exact_node(nid, |
101 | GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, | 101 | GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, |
102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); | 102 | IA64_GRANULE_SHIFT-PAGE_SHIFT); |
103 | if (!page) { | 103 | if (!page) { |
104 | mutex_unlock(&uc_pool->add_chunk_mutex); | 104 | mutex_unlock(&uc_pool->add_chunk_mutex); |
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index 7cc8c364924d..6fb9e813a910 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild | |||
@@ -1,4 +1,4 @@ | |||
1 | 1 | generic-y += barrier.h | |
2 | generic-y += bitsperlong.h | 2 | generic-y += bitsperlong.h |
3 | generic-y += clkdev.h | 3 | generic-y += clkdev.h |
4 | generic-y += cputime.h | 4 | generic-y += cputime.h |
@@ -6,6 +6,7 @@ generic-y += device.h | |||
6 | generic-y += emergency-restart.h | 6 | generic-y += emergency-restart.h |
7 | generic-y += errno.h | 7 | generic-y += errno.h |
8 | generic-y += exec.h | 8 | generic-y += exec.h |
9 | generic-y += hash.h | ||
9 | generic-y += hw_irq.h | 10 | generic-y += hw_irq.h |
10 | generic-y += ioctl.h | 11 | generic-y += ioctl.h |
11 | generic-y += ipcbuf.h | 12 | generic-y += ipcbuf.h |
@@ -18,6 +19,7 @@ generic-y += local.h | |||
18 | generic-y += mman.h | 19 | generic-y += mman.h |
19 | generic-y += mutex.h | 20 | generic-y += mutex.h |
20 | generic-y += percpu.h | 21 | generic-y += percpu.h |
22 | generic-y += preempt.h | ||
21 | generic-y += resource.h | 23 | generic-y += resource.h |
22 | generic-y += scatterlist.h | 24 | generic-y += scatterlist.h |
23 | generic-y += sections.h | 25 | generic-y += sections.h |
@@ -31,5 +33,3 @@ generic-y += trace_clock.h | |||
31 | generic-y += types.h | 33 | generic-y += types.h |
32 | generic-y += word-at-a-time.h | 34 | generic-y += word-at-a-time.h |
33 | generic-y += xor.h | 35 | generic-y += xor.h |
34 | generic-y += preempt.h | ||
35 | generic-y += hash.h | ||
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h deleted file mode 100644 index 15c5f77c1614..000000000000 --- a/arch/m68k/include/asm/barrier.h +++ /dev/null | |||
@@ -1,8 +0,0 @@ | |||
1 | #ifndef _M68K_BARRIER_H | ||
2 | #define _M68K_BARRIER_H | ||
3 | |||
4 | #define nop() do { asm volatile ("nop"); barrier(); } while (0) | ||
5 | |||
6 | #include <asm-generic/barrier.h> | ||
7 | |||
8 | #endif /* _M68K_BARRIER_H */ | ||
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 014f288fc813..9d38b73989eb 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h | |||
@@ -4,7 +4,7 @@ | |||
4 | #include <uapi/asm/unistd.h> | 4 | #include <uapi/asm/unistd.h> |
5 | 5 | ||
6 | 6 | ||
7 | #define NR_syscalls 349 | 7 | #define NR_syscalls 351 |
8 | 8 | ||
9 | #define __ARCH_WANT_OLD_READDIR | 9 | #define __ARCH_WANT_OLD_READDIR |
10 | #define __ARCH_WANT_OLD_STAT | 10 | #define __ARCH_WANT_OLD_STAT |
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 625f321001dc..b932dd470041 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h | |||
@@ -354,5 +354,7 @@ | |||
354 | #define __NR_process_vm_writev 346 | 354 | #define __NR_process_vm_writev 346 |
355 | #define __NR_kcmp 347 | 355 | #define __NR_kcmp 347 |
356 | #define __NR_finit_module 348 | 356 | #define __NR_finit_module 348 |
357 | #define __NR_sched_setattr 349 | ||
358 | #define __NR_sched_getattr 350 | ||
357 | 359 | ||
358 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ | 360 | #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ |
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 3f04ea0ab802..b6223dc41d82 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S | |||
@@ -369,4 +369,6 @@ ENTRY(sys_call_table) | |||
369 | .long sys_process_vm_writev | 369 | .long sys_process_vm_writev |
370 | .long sys_kcmp | 370 | .long sys_kcmp |
371 | .long sys_finit_module | 371 | .long sys_finit_module |
372 | .long sys_sched_setattr | ||
373 | .long sys_sched_getattr /* 350 */ | ||
372 | 374 | ||
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index 84fdf6857c31..a613d2c82fd9 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
@@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
200 | 200 | ||
201 | /* | 201 | /* |
202 | * We can't access below the stack pointer in the 32bit ABI and | 202 | * We can't access below the stack pointer in the 32bit ABI and |
203 | * can access 288 bytes in the 64bit ABI | 203 | * can access 288 bytes in the 64bit big-endian ABI, |
204 | * or 512 bytes with the new ELFv2 little-endian ABI. | ||
204 | */ | 205 | */ |
205 | if (!is_32bit_task()) | 206 | if (!is_32bit_task()) |
206 | usp -= 288; | 207 | usp -= USER_REDZONE_SIZE; |
207 | 208 | ||
208 | return (void __user *) (usp - len); | 209 | return (void __user *) (usp - len); |
209 | } | 210 | } |
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index 40157e2ca691..ed82142a3251 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h | |||
@@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, | |||
816 | int64_t opal_pci_poll(uint64_t phb_id); | 816 | int64_t opal_pci_poll(uint64_t phb_id); |
817 | int64_t opal_return_cpu(void); | 817 | int64_t opal_return_cpu(void); |
818 | 818 | ||
819 | int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); | 819 | int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val); |
820 | int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); | 820 | int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val); |
821 | 821 | ||
822 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, | 822 | int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, |
823 | uint32_t addr, uint32_t data, uint32_t sz); | 823 | uint32_t addr, uint32_t data, uint32_t sz); |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index becc08e6a65c..279b80f3bb29 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -28,11 +28,23 @@ | |||
28 | 28 | ||
29 | #ifdef __powerpc64__ | 29 | #ifdef __powerpc64__ |
30 | 30 | ||
31 | /* | ||
32 | * Size of redzone that userspace is allowed to use below the stack | ||
33 | * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in | ||
34 | * the new ELFv2 little-endian ABI, so we allow the larger amount. | ||
35 | * | ||
36 | * For kernel code we allow a 288-byte redzone, in order to conserve | ||
37 | * kernel stack space; gcc currently only uses 288 bytes, and will | ||
38 | * hopefully allow explicit control of the redzone size in future. | ||
39 | */ | ||
40 | #define USER_REDZONE_SIZE 512 | ||
41 | #define KERNEL_REDZONE_SIZE 288 | ||
42 | |||
31 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ | 43 | #define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */ |
32 | #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ | 44 | #define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */ |
33 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) | 45 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265) |
34 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ | 46 | #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \ |
35 | STACK_FRAME_OVERHEAD + 288) | 47 | STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) |
36 | #define STACK_FRAME_MARKER 12 | 48 | #define STACK_FRAME_MARKER 12 |
37 | 49 | ||
38 | /* Size of dummy stack frame allocated when calling signal handler. */ | 50 | /* Size of dummy stack frame allocated when calling signal handler. */ |
@@ -41,6 +53,8 @@ | |||
41 | 53 | ||
42 | #else /* __powerpc64__ */ | 54 | #else /* __powerpc64__ */ |
43 | 55 | ||
56 | #define USER_REDZONE_SIZE 0 | ||
57 | #define KERNEL_REDZONE_SIZE 0 | ||
44 | #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ | 58 | #define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */ |
45 | #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ | 59 | #define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */ |
46 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) | 60 | #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 11c1d069d920..7a13f378ca2c 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
98 | size_t csize, unsigned long offset, int userbuf) | 98 | size_t csize, unsigned long offset, int userbuf) |
99 | { | 99 | { |
100 | void *vaddr; | 100 | void *vaddr; |
101 | phys_addr_t paddr; | ||
101 | 102 | ||
102 | if (!csize) | 103 | if (!csize) |
103 | return 0; | 104 | return 0; |
104 | 105 | ||
105 | csize = min_t(size_t, csize, PAGE_SIZE); | 106 | csize = min_t(size_t, csize, PAGE_SIZE); |
107 | paddr = pfn << PAGE_SHIFT; | ||
106 | 108 | ||
107 | if ((min_low_pfn < pfn) && (pfn < max_pfn)) { | 109 | if (memblock_is_region_memory(paddr, csize)) { |
108 | vaddr = __va(pfn << PAGE_SHIFT); | 110 | vaddr = __va(paddr); |
109 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 111 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
110 | } else { | 112 | } else { |
111 | vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | 113 | vaddr = __ioremap(paddr, PAGE_SIZE, 0); |
112 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); | 114 | csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf); |
113 | iounmap(vaddr); | 115 | iounmap(vaddr); |
114 | } | 116 | } |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 9b27b293a922..b0ded97ee4e1 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new) | |||
74 | */ | 74 | */ |
75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) | 75 | static int test_24bit_addr(unsigned long ip, unsigned long addr) |
76 | { | 76 | { |
77 | addr = ppc_function_entry((void *)addr); | ||
77 | 78 | ||
78 | /* use the create_branch to verify that this offset can be branched */ | 79 | /* use the create_branch to verify that this offset can be branched */ |
79 | return create_branch((unsigned int *)ip, addr, 0); | 80 | return create_branch((unsigned int *)ip, addr, 0); |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f1738..af064d28b365 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |||
1048 | flush_altivec_to_thread(src); | 1048 | flush_altivec_to_thread(src); |
1049 | flush_vsx_to_thread(src); | 1049 | flush_vsx_to_thread(src); |
1050 | flush_spe_to_thread(src); | 1050 | flush_spe_to_thread(src); |
1051 | /* | ||
1052 | * Flush TM state out so we can copy it. __switch_to_tm() does this | ||
1053 | * flush but it removes the checkpointed state from the current CPU and | ||
1054 | * transitions the CPU out of TM mode. Hence we need to call | ||
1055 | * tm_recheckpoint_new_task() (on the same task) to restore the | ||
1056 | * checkpointed state back and the TM mode. | ||
1057 | */ | ||
1058 | __switch_to_tm(src); | ||
1059 | tm_recheckpoint_new_task(src); | ||
1051 | 1060 | ||
1052 | *dst = *src; | 1061 | *dst = *src; |
1053 | 1062 | ||
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index 1482327cfeba..d88736fbece6 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S | |||
@@ -81,6 +81,7 @@ _GLOBAL(relocate) | |||
81 | 81 | ||
82 | 6: blr | 82 | 6: blr |
83 | 83 | ||
84 | .balign 8 | ||
84 | p_dyn: .llong __dynamic_start - 0b | 85 | p_dyn: .llong __dynamic_start - 0b |
85 | p_rela: .llong __rela_dyn_start - 0b | 86 | p_rela: .llong __rela_dyn_start - 0b |
86 | p_st: .llong _stext - 0b | 87 | p_st: .llong _stext - 0b |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index e35bf773df7a..8d253c29649b 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -65,8 +65,8 @@ struct rt_sigframe { | |||
65 | struct siginfo __user *pinfo; | 65 | struct siginfo __user *pinfo; |
66 | void __user *puc; | 66 | void __user *puc; |
67 | struct siginfo info; | 67 | struct siginfo info; |
68 | /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */ | 68 | /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */ |
69 | char abigap[288]; | 69 | char abigap[USER_REDZONE_SIZE]; |
70 | } __attribute__ ((aligned (16))); | 70 | } __attribute__ ((aligned (16))); |
71 | 71 | ||
72 | static const char fmt32[] = KERN_INFO \ | 72 | static const char fmt32[] = KERN_INFO \ |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47a0d77..e865d748179b 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) | |||
123 | 123 | ||
124 | area->nid = nid; | 124 | area->nid = nid; |
125 | area->order = order; | 125 | area->order = order; |
126 | area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, | 126 | area->pages = alloc_pages_exact_node(area->nid, |
127 | GFP_KERNEL|__GFP_THISNODE, | ||
127 | area->order); | 128 | area->order); |
128 | 129 | ||
129 | if (!area->pages) { | 130 | if (!area->pages) { |
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index f51474336460..253fefe3d1a0 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c | |||
@@ -114,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get, | |||
114 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); | 114 | ioda_eeh_inbB_dbgfs_set, "0x%llx\n"); |
115 | #endif /* CONFIG_DEBUG_FS */ | 115 | #endif /* CONFIG_DEBUG_FS */ |
116 | 116 | ||
117 | |||
117 | /** | 118 | /** |
118 | * ioda_eeh_post_init - Chip dependent post initialization | 119 | * ioda_eeh_post_init - Chip dependent post initialization |
119 | * @hose: PCI controller | 120 | * @hose: PCI controller |
@@ -221,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option) | |||
221 | return ret; | 222 | return ret; |
222 | } | 223 | } |
223 | 224 | ||
225 | static void ioda_eeh_phb_diag(struct pci_controller *hose) | ||
226 | { | ||
227 | struct pnv_phb *phb = hose->private_data; | ||
228 | long rc; | ||
229 | |||
230 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, | ||
231 | PNV_PCI_DIAG_BUF_SIZE); | ||
232 | if (rc != OPAL_SUCCESS) { | ||
233 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
234 | __func__, hose->global_number, rc); | ||
235 | return; | ||
236 | } | ||
237 | |||
238 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
239 | } | ||
240 | |||
224 | /** | 241 | /** |
225 | * ioda_eeh_get_state - Retrieve the state of PE | 242 | * ioda_eeh_get_state - Retrieve the state of PE |
226 | * @pe: EEH PE | 243 | * @pe: EEH PE |
@@ -272,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) | |||
272 | result |= EEH_STATE_DMA_ACTIVE; | 289 | result |= EEH_STATE_DMA_ACTIVE; |
273 | result |= EEH_STATE_MMIO_ENABLED; | 290 | result |= EEH_STATE_MMIO_ENABLED; |
274 | result |= EEH_STATE_DMA_ENABLED; | 291 | result |= EEH_STATE_DMA_ENABLED; |
292 | } else if (!(pe->state & EEH_PE_ISOLATED)) { | ||
293 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
294 | ioda_eeh_phb_diag(hose); | ||
275 | } | 295 | } |
276 | 296 | ||
277 | return result; | 297 | return result; |
@@ -315,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) | |||
315 | __func__, fstate, hose->global_number, pe_no); | 335 | __func__, fstate, hose->global_number, pe_no); |
316 | } | 336 | } |
317 | 337 | ||
338 | /* Dump PHB diag-data for frozen PE */ | ||
339 | if (result != EEH_STATE_NOT_SUPPORT && | ||
340 | (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) != | ||
341 | (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) && | ||
342 | !(pe->state & EEH_PE_ISOLATED)) { | ||
343 | eeh_pe_state_mark(pe, EEH_PE_ISOLATED); | ||
344 | ioda_eeh_phb_diag(hose); | ||
345 | } | ||
346 | |||
318 | return result; | 347 | return result; |
319 | } | 348 | } |
320 | 349 | ||
@@ -530,42 +559,6 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option) | |||
530 | } | 559 | } |
531 | 560 | ||
532 | /** | 561 | /** |
533 | * ioda_eeh_get_log - Retrieve error log | ||
534 | * @pe: EEH PE | ||
535 | * @severity: Severity level of the log | ||
536 | * @drv_log: buffer to store the log | ||
537 | * @len: space of the log buffer | ||
538 | * | ||
539 | * The function is used to retrieve error log from P7IOC. | ||
540 | */ | ||
541 | static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, | ||
542 | char *drv_log, unsigned long len) | ||
543 | { | ||
544 | s64 ret; | ||
545 | unsigned long flags; | ||
546 | struct pci_controller *hose = pe->phb; | ||
547 | struct pnv_phb *phb = hose->private_data; | ||
548 | |||
549 | spin_lock_irqsave(&phb->lock, flags); | ||
550 | |||
551 | ret = opal_pci_get_phb_diag_data2(phb->opal_id, | ||
552 | phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE); | ||
553 | if (ret) { | ||
554 | spin_unlock_irqrestore(&phb->lock, flags); | ||
555 | pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n", | ||
556 | __func__, hose->global_number, pe->addr, ret); | ||
557 | return -EIO; | ||
558 | } | ||
559 | |||
560 | /* The PHB diag-data is always indicative */ | ||
561 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
562 | |||
563 | spin_unlock_irqrestore(&phb->lock, flags); | ||
564 | |||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | /** | ||
569 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE | 562 | * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE |
570 | * @pe: EEH PE | 563 | * @pe: EEH PE |
571 | * | 564 | * |
@@ -646,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose) | |||
646 | } | 639 | } |
647 | } | 640 | } |
648 | 641 | ||
649 | static void ioda_eeh_phb_diag(struct pci_controller *hose) | ||
650 | { | ||
651 | struct pnv_phb *phb = hose->private_data; | ||
652 | long rc; | ||
653 | |||
654 | rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob, | ||
655 | PNV_PCI_DIAG_BUF_SIZE); | ||
656 | if (rc != OPAL_SUCCESS) { | ||
657 | pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n", | ||
658 | __func__, hose->global_number, rc); | ||
659 | return; | ||
660 | } | ||
661 | |||
662 | pnv_pci_dump_phb_diag_data(hose, phb->diag.blob); | ||
663 | } | ||
664 | |||
665 | static int ioda_eeh_get_phb_pe(struct pci_controller *hose, | 642 | static int ioda_eeh_get_phb_pe(struct pci_controller *hose, |
666 | struct eeh_pe **pe) | 643 | struct eeh_pe **pe) |
667 | { | 644 | { |
@@ -835,6 +812,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) | |||
835 | } | 812 | } |
836 | 813 | ||
837 | /* | 814 | /* |
815 | * EEH core will try recover from fenced PHB or | ||
816 | * frozen PE. In the time for frozen PE, EEH core | ||
817 | * enable IO path for that before collecting logs, | ||
818 | * but it ruins the site. So we have to dump the | ||
819 | * log in advance here. | ||
820 | */ | ||
821 | if ((ret == EEH_NEXT_ERR_FROZEN_PE || | ||
822 | ret == EEH_NEXT_ERR_FENCED_PHB) && | ||
823 | !((*pe)->state & EEH_PE_ISOLATED)) { | ||
824 | eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); | ||
825 | ioda_eeh_phb_diag(hose); | ||
826 | } | ||
827 | |||
828 | /* | ||
838 | * If we have no errors on the specific PHB or only | 829 | * If we have no errors on the specific PHB or only |
839 | * informative error there, we continue poking it. | 830 | * informative error there, we continue poking it. |
840 | * Otherwise, we need actions to be taken by upper | 831 | * Otherwise, we need actions to be taken by upper |
@@ -852,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = { | |||
852 | .set_option = ioda_eeh_set_option, | 843 | .set_option = ioda_eeh_set_option, |
853 | .get_state = ioda_eeh_get_state, | 844 | .get_state = ioda_eeh_get_state, |
854 | .reset = ioda_eeh_reset, | 845 | .reset = ioda_eeh_reset, |
855 | .get_log = ioda_eeh_get_log, | ||
856 | .configure_bridge = ioda_eeh_configure_bridge, | 846 | .configure_bridge = ioda_eeh_configure_bridge, |
857 | .next_error = ioda_eeh_next_error | 847 | .next_error = ioda_eeh_next_error |
858 | }; | 848 | }; |
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c index 4fbf276ac99e..4cd2ea6c0dbe 100644 --- a/arch/powerpc/platforms/powernv/opal-xscom.c +++ b/arch/powerpc/platforms/powernv/opal-xscom.c | |||
@@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc) | |||
71 | } | 71 | } |
72 | } | 72 | } |
73 | 73 | ||
74 | static u64 opal_scom_unmangle(u64 reg) | 74 | static u64 opal_scom_unmangle(u64 addr) |
75 | { | 75 | { |
76 | /* | 76 | /* |
77 | * XSCOM indirect addresses have the top bit set. Additionally | 77 | * XSCOM indirect addresses have the top bit set. Additionally |
78 | * the reset of the top 3 nibbles is always 0. | 78 | * the rest of the top 3 nibbles is always 0. |
79 | * | 79 | * |
80 | * Because the debugfs interface uses signed offsets and shifts | 80 | * Because the debugfs interface uses signed offsets and shifts |
81 | * the address left by 3, we basically cannot use the top 4 bits | 81 | * the address left by 3, we basically cannot use the top 4 bits |
@@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg) | |||
86 | * conversion here. To leave room for further xscom address | 86 | * conversion here. To leave room for further xscom address |
87 | * expansion, we only clear out the top byte | 87 | * expansion, we only clear out the top byte |
88 | * | 88 | * |
89 | * For in-kernel use, we also support the real indirect bit, so | ||
90 | * we test for any of the top 5 bits | ||
91 | * | ||
89 | */ | 92 | */ |
90 | if (reg & (1ull << 59)) | 93 | if (addr & (0x1full << 59)) |
91 | reg = (reg & ~(0xffull << 56)) | (1ull << 63); | 94 | addr = (addr & ~(0xffull << 56)) | (1ull << 63); |
92 | return reg; | 95 | return addr; |
93 | } | 96 | } |
94 | 97 | ||
95 | static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | 98 | static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) |
@@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value) | |||
98 | int64_t rc; | 101 | int64_t rc; |
99 | __be64 v; | 102 | __be64 v; |
100 | 103 | ||
101 | reg = opal_scom_unmangle(reg); | 104 | reg = opal_scom_unmangle(m->addr + reg); |
102 | rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v)); | 105 | rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v)); |
103 | *value = be64_to_cpu(v); | 106 | *value = be64_to_cpu(v); |
104 | return opal_xscom_err_xlate(rc); | 107 | return opal_xscom_err_xlate(rc); |
105 | } | 108 | } |
@@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value) | |||
109 | struct opal_scom_map *m = map; | 112 | struct opal_scom_map *m = map; |
110 | int64_t rc; | 113 | int64_t rc; |
111 | 114 | ||
112 | reg = opal_scom_unmangle(reg); | 115 | reg = opal_scom_unmangle(m->addr + reg); |
113 | rc = opal_xscom_write(m->chip, m->addr + reg, value); | 116 | rc = opal_xscom_write(m->chip, reg, value); |
114 | return opal_xscom_err_xlate(rc); | 117 | return opal_xscom_err_xlate(rc); |
115 | } | 118 | } |
116 | 119 | ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index 95633d79ef5d..8518817dcdfd 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c | |||
@@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose, | |||
134 | pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", | 134 | pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n", |
135 | hose->global_number, common->version); | 135 | hose->global_number, common->version); |
136 | 136 | ||
137 | pr_info(" brdgCtl: %08x\n", data->brdgCtl); | 137 | if (data->brdgCtl) |
138 | 138 | pr_info(" brdgCtl: %08x\n", | |
139 | pr_info(" portStatusReg: %08x\n", data->portStatusReg); | 139 | data->brdgCtl); |
140 | pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); | 140 | if (data->portStatusReg || data->rootCmplxStatus || |
141 | pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); | 141 | data->busAgentStatus) |
142 | 142 | pr_info(" UtlSts: %08x %08x %08x\n", | |
143 | pr_info(" deviceStatus: %08x\n", data->deviceStatus); | 143 | data->portStatusReg, data->rootCmplxStatus, |
144 | pr_info(" slotStatus: %08x\n", data->slotStatus); | 144 | data->busAgentStatus); |
145 | pr_info(" linkStatus: %08x\n", data->linkStatus); | 145 | if (data->deviceStatus || data->slotStatus || |
146 | pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); | 146 | data->linkStatus || data->devCmdStatus || |
147 | pr_info(" devSecStatus: %08x\n", data->devSecStatus); | 147 | data->devSecStatus) |
148 | 148 | pr_info(" RootSts: %08x %08x %08x %08x %08x\n", | |
149 | pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); | 149 | data->deviceStatus, data->slotStatus, |
150 | pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); | 150 | data->linkStatus, data->devCmdStatus, |
151 | pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); | 151 | data->devSecStatus); |
152 | pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); | 152 | if (data->rootErrorStatus || data->uncorrErrorStatus || |
153 | pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); | 153 | data->corrErrorStatus) |
154 | pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); | 154 | pr_info(" RootErrSts: %08x %08x %08x\n", |
155 | pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); | 155 | data->rootErrorStatus, data->uncorrErrorStatus, |
156 | pr_info(" sourceId: %08x\n", data->sourceId); | 156 | data->corrErrorStatus); |
157 | pr_info(" errorClass: %016llx\n", data->errorClass); | 157 | if (data->tlpHdr1 || data->tlpHdr2 || |
158 | pr_info(" correlator: %016llx\n", data->correlator); | 158 | data->tlpHdr3 || data->tlpHdr4) |
159 | pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr); | 159 | pr_info(" RootErrLog: %08x %08x %08x %08x\n", |
160 | pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr); | 160 | data->tlpHdr1, data->tlpHdr2, |
161 | pr_info(" lemFir: %016llx\n", data->lemFir); | 161 | data->tlpHdr3, data->tlpHdr4); |
162 | pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); | 162 | if (data->sourceId || data->errorClass || |
163 | pr_info(" lemWOF: %016llx\n", data->lemWOF); | 163 | data->correlator) |
164 | pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); | 164 | pr_info(" RootErrLog1: %08x %016llx %016llx\n", |
165 | pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); | 165 | data->sourceId, data->errorClass, |
166 | pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); | 166 | data->correlator); |
167 | pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); | 167 | if (data->p7iocPlssr || data->p7iocCsr) |
168 | pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); | 168 | pr_info(" PhbSts: %016llx %016llx\n", |
169 | pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); | 169 | data->p7iocPlssr, data->p7iocCsr); |
170 | pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); | 170 | if (data->lemFir || data->lemErrorMask || |
171 | pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); | 171 | data->lemWOF) |
172 | pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); | 172 | pr_info(" Lem: %016llx %016llx %016llx\n", |
173 | pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); | 173 | data->lemFir, data->lemErrorMask, |
174 | pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); | 174 | data->lemWOF); |
175 | pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); | 175 | if (data->phbErrorStatus || data->phbFirstErrorStatus || |
176 | pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); | 176 | data->phbErrorLog0 || data->phbErrorLog1) |
177 | pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); | 177 | pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", |
178 | pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); | 178 | data->phbErrorStatus, data->phbFirstErrorStatus, |
179 | pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); | 179 | data->phbErrorLog0, data->phbErrorLog1); |
180 | if (data->mmioErrorStatus || data->mmioFirstErrorStatus || | ||
181 | data->mmioErrorLog0 || data->mmioErrorLog1) | ||
182 | pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", | ||
183 | data->mmioErrorStatus, data->mmioFirstErrorStatus, | ||
184 | data->mmioErrorLog0, data->mmioErrorLog1); | ||
185 | if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || | ||
186 | data->dma0ErrorLog0 || data->dma0ErrorLog1) | ||
187 | pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", | ||
188 | data->dma0ErrorStatus, data->dma0FirstErrorStatus, | ||
189 | data->dma0ErrorLog0, data->dma0ErrorLog1); | ||
190 | if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || | ||
191 | data->dma1ErrorLog0 || data->dma1ErrorLog1) | ||
192 | pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", | ||
193 | data->dma1ErrorStatus, data->dma1FirstErrorStatus, | ||
194 | data->dma1ErrorLog0, data->dma1ErrorLog1); | ||
180 | 195 | ||
181 | for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { | 196 | for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) { |
182 | if ((data->pestA[i] >> 63) == 0 && | 197 | if ((data->pestA[i] >> 63) == 0 && |
183 | (data->pestB[i] >> 63) == 0) | 198 | (data->pestB[i] >> 63) == 0) |
184 | continue; | 199 | continue; |
185 | 200 | ||
186 | pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); | 201 | pr_info(" PE[%3d] A/B: %016llx %016llx\n", |
187 | pr_info(" PESTB: %016llx\n", data->pestB[i]); | 202 | i, data->pestA[i], data->pestB[i]); |
188 | } | 203 | } |
189 | } | 204 | } |
190 | 205 | ||
@@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, | |||
197 | data = (struct OpalIoPhb3ErrorData*)common; | 212 | data = (struct OpalIoPhb3ErrorData*)common; |
198 | pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", | 213 | pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n", |
199 | hose->global_number, common->version); | 214 | hose->global_number, common->version); |
200 | 215 | if (data->brdgCtl) | |
201 | pr_info(" brdgCtl: %08x\n", data->brdgCtl); | 216 | pr_info(" brdgCtl: %08x\n", |
202 | 217 | data->brdgCtl); | |
203 | pr_info(" portStatusReg: %08x\n", data->portStatusReg); | 218 | if (data->portStatusReg || data->rootCmplxStatus || |
204 | pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus); | 219 | data->busAgentStatus) |
205 | pr_info(" busAgentStatus: %08x\n", data->busAgentStatus); | 220 | pr_info(" UtlSts: %08x %08x %08x\n", |
206 | 221 | data->portStatusReg, data->rootCmplxStatus, | |
207 | pr_info(" deviceStatus: %08x\n", data->deviceStatus); | 222 | data->busAgentStatus); |
208 | pr_info(" slotStatus: %08x\n", data->slotStatus); | 223 | if (data->deviceStatus || data->slotStatus || |
209 | pr_info(" linkStatus: %08x\n", data->linkStatus); | 224 | data->linkStatus || data->devCmdStatus || |
210 | pr_info(" devCmdStatus: %08x\n", data->devCmdStatus); | 225 | data->devSecStatus) |
211 | pr_info(" devSecStatus: %08x\n", data->devSecStatus); | 226 | pr_info(" RootSts: %08x %08x %08x %08x %08x\n", |
212 | 227 | data->deviceStatus, data->slotStatus, | |
213 | pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus); | 228 | data->linkStatus, data->devCmdStatus, |
214 | pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus); | 229 | data->devSecStatus); |
215 | pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus); | 230 | if (data->rootErrorStatus || data->uncorrErrorStatus || |
216 | pr_info(" tlpHdr1: %08x\n", data->tlpHdr1); | 231 | data->corrErrorStatus) |
217 | pr_info(" tlpHdr2: %08x\n", data->tlpHdr2); | 232 | pr_info(" RootErrSts: %08x %08x %08x\n", |
218 | pr_info(" tlpHdr3: %08x\n", data->tlpHdr3); | 233 | data->rootErrorStatus, data->uncorrErrorStatus, |
219 | pr_info(" tlpHdr4: %08x\n", data->tlpHdr4); | 234 | data->corrErrorStatus); |
220 | pr_info(" sourceId: %08x\n", data->sourceId); | 235 | if (data->tlpHdr1 || data->tlpHdr2 || |
221 | pr_info(" errorClass: %016llx\n", data->errorClass); | 236 | data->tlpHdr3 || data->tlpHdr4) |
222 | pr_info(" correlator: %016llx\n", data->correlator); | 237 | pr_info(" RootErrLog: %08x %08x %08x %08x\n", |
223 | 238 | data->tlpHdr1, data->tlpHdr2, | |
224 | pr_info(" nFir: %016llx\n", data->nFir); | 239 | data->tlpHdr3, data->tlpHdr4); |
225 | pr_info(" nFirMask: %016llx\n", data->nFirMask); | 240 | if (data->sourceId || data->errorClass || |
226 | pr_info(" nFirWOF: %016llx\n", data->nFirWOF); | 241 | data->correlator) |
227 | pr_info(" PhbPlssr: %016llx\n", data->phbPlssr); | 242 | pr_info(" RootErrLog1: %08x %016llx %016llx\n", |
228 | pr_info(" PhbCsr: %016llx\n", data->phbCsr); | 243 | data->sourceId, data->errorClass, |
229 | pr_info(" lemFir: %016llx\n", data->lemFir); | 244 | data->correlator); |
230 | pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask); | 245 | if (data->nFir || data->nFirMask || |
231 | pr_info(" lemWOF: %016llx\n", data->lemWOF); | 246 | data->nFirWOF) |
232 | pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus); | 247 | pr_info(" nFir: %016llx %016llx %016llx\n", |
233 | pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus); | 248 | data->nFir, data->nFirMask, |
234 | pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0); | 249 | data->nFirWOF); |
235 | pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1); | 250 | if (data->phbPlssr || data->phbCsr) |
236 | pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus); | 251 | pr_info(" PhbSts: %016llx %016llx\n", |
237 | pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus); | 252 | data->phbPlssr, data->phbCsr); |
238 | pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0); | 253 | if (data->lemFir || data->lemErrorMask || |
239 | pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1); | 254 | data->lemWOF) |
240 | pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus); | 255 | pr_info(" Lem: %016llx %016llx %016llx\n", |
241 | pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus); | 256 | data->lemFir, data->lemErrorMask, |
242 | pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0); | 257 | data->lemWOF); |
243 | pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1); | 258 | if (data->phbErrorStatus || data->phbFirstErrorStatus || |
244 | pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus); | 259 | data->phbErrorLog0 || data->phbErrorLog1) |
245 | pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus); | 260 | pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n", |
246 | pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0); | 261 | data->phbErrorStatus, data->phbFirstErrorStatus, |
247 | pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1); | 262 | data->phbErrorLog0, data->phbErrorLog1); |
263 | if (data->mmioErrorStatus || data->mmioFirstErrorStatus || | ||
264 | data->mmioErrorLog0 || data->mmioErrorLog1) | ||
265 | pr_info(" OutErr: %016llx %016llx %016llx %016llx\n", | ||
266 | data->mmioErrorStatus, data->mmioFirstErrorStatus, | ||
267 | data->mmioErrorLog0, data->mmioErrorLog1); | ||
268 | if (data->dma0ErrorStatus || data->dma0FirstErrorStatus || | ||
269 | data->dma0ErrorLog0 || data->dma0ErrorLog1) | ||
270 | pr_info(" InAErr: %016llx %016llx %016llx %016llx\n", | ||
271 | data->dma0ErrorStatus, data->dma0FirstErrorStatus, | ||
272 | data->dma0ErrorLog0, data->dma0ErrorLog1); | ||
273 | if (data->dma1ErrorStatus || data->dma1FirstErrorStatus || | ||
274 | data->dma1ErrorLog0 || data->dma1ErrorLog1) | ||
275 | pr_info(" InBErr: %016llx %016llx %016llx %016llx\n", | ||
276 | data->dma1ErrorStatus, data->dma1FirstErrorStatus, | ||
277 | data->dma1ErrorLog0, data->dma1ErrorLog1); | ||
248 | 278 | ||
249 | for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { | 279 | for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { |
250 | if ((data->pestA[i] >> 63) == 0 && | 280 | if ((data->pestA[i] >> 63) == 0 && |
251 | (data->pestB[i] >> 63) == 0) | 281 | (data->pestB[i] >> 63) == 0) |
252 | continue; | 282 | continue; |
253 | 283 | ||
254 | pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]); | 284 | pr_info(" PE[%3d] A/B: %016llx %016llx\n", |
255 | pr_info(" PESTB: %016llx\n", data->pestB[i]); | 285 | i, data->pestA[i], data->pestB[i]); |
256 | } | 286 | } |
257 | } | 287 | } |
258 | 288 | ||
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 82789e79e539..0ea99e3d4815 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -35,12 +35,7 @@ | |||
35 | #include "offline_states.h" | 35 | #include "offline_states.h" |
36 | 36 | ||
37 | /* This version can't take the spinlock, because it never returns */ | 37 | /* This version can't take the spinlock, because it never returns */ |
38 | static struct rtas_args rtas_stop_self_args = { | 38 | static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE; |
39 | .token = RTAS_UNKNOWN_SERVICE, | ||
40 | .nargs = 0, | ||
41 | .nret = 1, | ||
42 | .rets = &rtas_stop_self_args.args[0], | ||
43 | }; | ||
44 | 39 | ||
45 | static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = | 40 | static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) = |
46 | CPU_STATE_OFFLINE; | 41 | CPU_STATE_OFFLINE; |
@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu) | |||
93 | 88 | ||
94 | static void rtas_stop_self(void) | 89 | static void rtas_stop_self(void) |
95 | { | 90 | { |
96 | struct rtas_args *args = &rtas_stop_self_args; | 91 | struct rtas_args args = { |
92 | .token = cpu_to_be32(rtas_stop_self_token), | ||
93 | .nargs = 0, | ||
94 | .nret = 1, | ||
95 | .rets = &args.args[0], | ||
96 | }; | ||
97 | 97 | ||
98 | local_irq_disable(); | 98 | local_irq_disable(); |
99 | 99 | ||
100 | BUG_ON(args->token == RTAS_UNKNOWN_SERVICE); | 100 | BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE); |
101 | 101 | ||
102 | printk("cpu %u (hwid %u) Ready to die...\n", | 102 | printk("cpu %u (hwid %u) Ready to die...\n", |
103 | smp_processor_id(), hard_smp_processor_id()); | 103 | smp_processor_id(), hard_smp_processor_id()); |
104 | enter_rtas(__pa(args)); | 104 | enter_rtas(__pa(&args)); |
105 | 105 | ||
106 | panic("Alas, I survived.\n"); | 106 | panic("Alas, I survived.\n"); |
107 | } | 107 | } |
@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void) | |||
392 | } | 392 | } |
393 | } | 393 | } |
394 | 394 | ||
395 | rtas_stop_self_args.token = rtas_token("stop-self"); | 395 | rtas_stop_self_token = rtas_token("stop-self"); |
396 | qcss_tok = rtas_token("query-cpu-stopped-state"); | 396 | qcss_tok = rtas_token("query-cpu-stopped-state"); |
397 | 397 | ||
398 | if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE || | 398 | if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE || |
399 | qcss_tok == RTAS_UNKNOWN_SERVICE) { | 399 | qcss_tok == RTAS_UNKNOWN_SERVICE) { |
400 | printk(KERN_INFO "CPU Hotplug not supported by firmware " | 400 | printk(KERN_INFO "CPU Hotplug not supported by firmware " |
401 | "- disabling.\n"); | 401 | "- disabling.\n"); |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 59c8efce1b99..0248949a756d 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper) | |||
1421 | ENTRY(sys_sched_getattr_wrapper) | 1421 | ENTRY(sys_sched_getattr_wrapper) |
1422 | lgfr %r2,%r2 # pid_t | 1422 | lgfr %r2,%r2 # pid_t |
1423 | llgtr %r3,%r3 # const char __user * | 1423 | llgtr %r3,%r3 # const char __user * |
1424 | llgfr %r3,%r3 # unsigned int | 1424 | llgfr %r4,%r4 # unsigned int |
1425 | jg sys_sched_getattr | 1425 | jg sys_sched_getattr |
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c index 60c11a629d96..f91c03119804 100644 --- a/arch/s390/pci/pci_dma.c +++ b/arch/s390/pci/pci_dma.c | |||
@@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev) | |||
206 | zdev->dma_table = NULL; | 206 | zdev->dma_table = NULL; |
207 | } | 207 | } |
208 | 208 | ||
209 | static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start, | 209 | static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, |
210 | int size) | 210 | unsigned long start, int size) |
211 | { | 211 | { |
212 | unsigned long boundary_size = 0x1000000; | 212 | unsigned long boundary_size; |
213 | 213 | ||
214 | boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1, | ||
215 | PAGE_SIZE) >> PAGE_SHIFT; | ||
214 | return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, | 216 | return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, |
215 | start, size, 0, boundary_size, 0); | 217 | start, size, 0, boundary_size, 0); |
216 | } | 218 | } |
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h index 673515bc4135..aa1b2b9088a7 100644 --- a/arch/sh/include/cpu-sh2/cpu/cache.h +++ b/arch/sh/include/cpu-sh2/cpu/cache.h | |||
@@ -18,7 +18,7 @@ | |||
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) | 20 | #if defined(CONFIG_CPU_SUBTYPE_SH7619) |
21 | #define CCR 0xffffffec | 21 | #define SH_CCR 0xffffffec |
22 | 22 | ||
23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ | 23 | #define CCR_CACHE_CE 0x01 /* Cache enable */ |
24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ | 24 | #define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */ |
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h index defb0baa5a06..b27ce92cb600 100644 --- a/arch/sh/include/cpu-sh2a/cpu/cache.h +++ b/arch/sh/include/cpu-sh2a/cpu/cache.h | |||
@@ -17,8 +17,8 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xfffc1000 /* CCR1 */ | 20 | #define SH_CCR 0xfffc1000 /* CCR1 */ |
21 | #define CCR2 0xfffc1004 | 21 | #define SH_CCR2 0xfffc1004 |
22 | 22 | ||
23 | /* | 23 | /* |
24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not | 24 | * Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not |
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h index bee2d81c56bf..29700fd88c75 100644 --- a/arch/sh/include/cpu-sh3/cpu/cache.h +++ b/arch/sh/include/cpu-sh3/cpu/cache.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xffffffec /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xffffffec /* Address of Cache Control Register */ |
21 | 21 | ||
22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ | 22 | #define CCR_CACHE_CE 0x01 /* Cache Enable */ |
23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ | 23 | #define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */ |
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h index 7bfb9e8b069c..92c4cd119b66 100644 --- a/arch/sh/include/cpu-sh4/cpu/cache.h +++ b/arch/sh/include/cpu-sh4/cpu/cache.h | |||
@@ -17,7 +17,7 @@ | |||
17 | #define SH_CACHE_COMBINED 4 | 17 | #define SH_CACHE_COMBINED 4 |
18 | #define SH_CACHE_ASSOC 8 | 18 | #define SH_CACHE_ASSOC 8 |
19 | 19 | ||
20 | #define CCR 0xff00001c /* Address of Cache Control Register */ | 20 | #define SH_CCR 0xff00001c /* Address of Cache Control Register */ |
21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ | 21 | #define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */ |
22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ | 22 | #define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/ |
23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ | 23 | #define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */ |
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ecf83cd158dc..0d7360d549c1 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c | |||
@@ -112,7 +112,7 @@ static void cache_init(void) | |||
112 | unsigned long ccr, flags; | 112 | unsigned long ccr, flags; |
113 | 113 | ||
114 | jump_to_uncached(); | 114 | jump_to_uncached(); |
115 | ccr = __raw_readl(CCR); | 115 | ccr = __raw_readl(SH_CCR); |
116 | 116 | ||
117 | /* | 117 | /* |
118 | * At this point we don't know whether the cache is enabled or not - a | 118 | * At this point we don't know whether the cache is enabled or not - a |
@@ -189,7 +189,7 @@ static void cache_init(void) | |||
189 | 189 | ||
190 | l2_cache_init(); | 190 | l2_cache_init(); |
191 | 191 | ||
192 | __raw_writel(flags, CCR); | 192 | __raw_writel(flags, SH_CCR); |
193 | back_to_cached(); | 193 | back_to_cached(); |
194 | } | 194 | } |
195 | #else | 195 | #else |
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index 115725198038..777e50f33c00 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c | |||
@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) | |||
36 | */ | 36 | */ |
37 | jump_to_uncached(); | 37 | jump_to_uncached(); |
38 | 38 | ||
39 | ccr = __raw_readl(CCR); | 39 | ccr = __raw_readl(SH_CCR); |
40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { | 40 | if ((ccr & CCR_CACHE_ENABLE) == 0) { |
41 | back_to_cached(); | 41 | back_to_cached(); |
42 | 42 | ||
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c index defcf719f2e8..a74259f2f981 100644 --- a/arch/sh/mm/cache-sh2.c +++ b/arch/sh/mm/cache-sh2.c | |||
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size) | |||
63 | local_irq_save(flags); | 63 | local_irq_save(flags); |
64 | jump_to_uncached(); | 64 | jump_to_uncached(); |
65 | 65 | ||
66 | ccr = __raw_readl(CCR); | 66 | ccr = __raw_readl(SH_CCR); |
67 | ccr |= CCR_CACHE_INVALIDATE; | 67 | ccr |= CCR_CACHE_INVALIDATE; |
68 | __raw_writel(ccr, CCR); | 68 | __raw_writel(ccr, SH_CCR); |
69 | 69 | ||
70 | back_to_cached(); | 70 | back_to_cached(); |
71 | local_irq_restore(flags); | 71 | local_irq_restore(flags); |
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c index 949e2d3138a0..ee87d081259b 100644 --- a/arch/sh/mm/cache-sh2a.c +++ b/arch/sh/mm/cache-sh2a.c | |||
@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size) | |||
134 | 134 | ||
135 | /* If there are too many pages then just blow the cache */ | 135 | /* If there are too many pages then just blow the cache */ |
136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { | 136 | if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) { |
137 | __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR); | 137 | __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE, |
138 | SH_CCR); | ||
138 | } else { | 139 | } else { |
139 | for (v = begin; v < end; v += L1_CACHE_BYTES) | 140 | for (v = begin; v < end; v += L1_CACHE_BYTES) |
140 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); | 141 | sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v); |
@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args) | |||
167 | /* I-Cache invalidate */ | 168 | /* I-Cache invalidate */ |
168 | /* If there are too many pages then just blow the cache */ | 169 | /* If there are too many pages then just blow the cache */ |
169 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { | 170 | if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) { |
170 | __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR); | 171 | __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE, |
172 | SH_CCR); | ||
171 | } else { | 173 | } else { |
172 | for (v = start; v < end; v += L1_CACHE_BYTES) | 174 | for (v = start; v < end; v += L1_CACHE_BYTES) |
173 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); | 175 | sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v); |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 0e529285b28d..51d8f7f31d1d 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -133,9 +133,9 @@ static void flush_icache_all(void) | |||
133 | jump_to_uncached(); | 133 | jump_to_uncached(); |
134 | 134 | ||
135 | /* Flush I-cache */ | 135 | /* Flush I-cache */ |
136 | ccr = __raw_readl(CCR); | 136 | ccr = __raw_readl(SH_CCR); |
137 | ccr |= CCR_CACHE_ICI; | 137 | ccr |= CCR_CACHE_ICI; |
138 | __raw_writel(ccr, CCR); | 138 | __raw_writel(ccr, SH_CCR); |
139 | 139 | ||
140 | /* | 140 | /* |
141 | * back_to_cached() will take care of the barrier for us, don't add | 141 | * back_to_cached() will take care of the barrier for us, don't add |
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c index c0adbee97b5f..24c58b7dc022 100644 --- a/arch/sh/mm/cache-shx3.c +++ b/arch/sh/mm/cache-shx3.c | |||
@@ -19,7 +19,7 @@ void __init shx3_cache_init(void) | |||
19 | { | 19 | { |
20 | unsigned int ccr; | 20 | unsigned int ccr; |
21 | 21 | ||
22 | ccr = __raw_readl(CCR); | 22 | ccr = __raw_readl(SH_CCR); |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * If we've got cache aliases, resolve them in hardware. | 25 | * If we've got cache aliases, resolve them in hardware. |
@@ -40,5 +40,5 @@ void __init shx3_cache_init(void) | |||
40 | ccr |= CCR_CACHE_IBE; | 40 | ccr |= CCR_CACHE_IBE; |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | writel_uncached(ccr, CCR); | 43 | writel_uncached(ccr, SH_CCR); |
44 | } | 44 | } |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 616966a96cba..097c2cdd117f 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -285,8 +285,8 @@ void __init cpu_cache_init(void) | |||
285 | { | 285 | { |
286 | unsigned int cache_disabled = 0; | 286 | unsigned int cache_disabled = 0; |
287 | 287 | ||
288 | #ifdef CCR | 288 | #ifdef SH_CCR |
289 | cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE); | 289 | cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE); |
290 | #endif | 290 | #endif |
291 | 291 | ||
292 | compute_alias(&boot_cpu_data.icache); | 292 | compute_alias(&boot_cpu_data.icache); |
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index c026cca5602c..f3aaf231b4e5 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu | |||
@@ -341,10 +341,6 @@ config X86_USE_3DNOW | |||
341 | def_bool y | 341 | def_bool y |
342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML | 342 | depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML |
343 | 343 | ||
344 | config X86_OOSTORE | ||
345 | def_bool y | ||
346 | depends on (MWINCHIP3D || MWINCHIPC6) && MTRR | ||
347 | |||
348 | # | 344 | # |
349 | # P6_NOPs are a relatively minor optimization that require a family >= | 345 | # P6_NOPs are a relatively minor optimization that require a family >= |
350 | # 6 processor, except that it is broken on certain VIA chips. | 346 | # 6 processor, except that it is broken on certain VIA chips. |
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c index 90a21f430117..4dbf967da50d 100644 --- a/arch/x86/boot/compressed/aslr.c +++ b/arch/x86/boot/compressed/aslr.c | |||
@@ -111,7 +111,7 @@ struct mem_vector { | |||
111 | }; | 111 | }; |
112 | 112 | ||
113 | #define MEM_AVOID_MAX 5 | 113 | #define MEM_AVOID_MAX 5 |
114 | struct mem_vector mem_avoid[MEM_AVOID_MAX]; | 114 | static struct mem_vector mem_avoid[MEM_AVOID_MAX]; |
115 | 115 | ||
116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) | 116 | static bool mem_contains(struct mem_vector *region, struct mem_vector *item) |
117 | { | 117 | { |
@@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | /* Does this memory vector overlap a known avoided area? */ | 182 | /* Does this memory vector overlap a known avoided area? */ |
183 | bool mem_avoid_overlap(struct mem_vector *img) | 183 | static bool mem_avoid_overlap(struct mem_vector *img) |
184 | { | 184 | { |
185 | int i; | 185 | int i; |
186 | 186 | ||
@@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img) | |||
192 | return false; | 192 | return false; |
193 | } | 193 | } |
194 | 194 | ||
195 | unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN]; | 195 | static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / |
196 | unsigned long slot_max = 0; | 196 | CONFIG_PHYSICAL_ALIGN]; |
197 | static unsigned long slot_max; | ||
197 | 198 | ||
198 | static void slots_append(unsigned long addr) | 199 | static void slots_append(unsigned long addr) |
199 | { | 200 | { |
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 04a48903b2eb..69bbb4845020 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h | |||
@@ -85,11 +85,7 @@ | |||
85 | #else | 85 | #else |
86 | # define smp_rmb() barrier() | 86 | # define smp_rmb() barrier() |
87 | #endif | 87 | #endif |
88 | #ifdef CONFIG_X86_OOSTORE | 88 | #define smp_wmb() barrier() |
89 | # define smp_wmb() wmb() | ||
90 | #else | ||
91 | # define smp_wmb() barrier() | ||
92 | #endif | ||
93 | #define smp_read_barrier_depends() read_barrier_depends() | 89 | #define smp_read_barrier_depends() read_barrier_depends() |
94 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 90 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
95 | #else /* !SMP */ | 91 | #else /* !SMP */ |
@@ -100,7 +96,7 @@ | |||
100 | #define set_mb(var, value) do { var = value; barrier(); } while (0) | 96 | #define set_mb(var, value) do { var = value; barrier(); } while (0) |
101 | #endif /* SMP */ | 97 | #endif /* SMP */ |
102 | 98 | ||
103 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 99 | #if defined(CONFIG_X86_PPRO_FENCE) |
104 | 100 | ||
105 | /* | 101 | /* |
106 | * For either of these options x86 doesn't have a strong TSO memory | 102 | * For either of these options x86 doesn't have a strong TSO memory |
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3d6b9f81cc68..acd86c850414 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h | |||
@@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void); | |||
134 | extern void __init old_map_region(efi_memory_desc_t *md); | 134 | extern void __init old_map_region(efi_memory_desc_t *md); |
135 | extern void __init runtime_code_page_mkexec(void); | 135 | extern void __init runtime_code_page_mkexec(void); |
136 | extern void __init efi_runtime_mkexec(void); | 136 | extern void __init efi_runtime_mkexec(void); |
137 | extern void __init efi_apply_memmap_quirks(void); | ||
137 | 138 | ||
138 | struct efi_setup_data { | 139 | struct efi_setup_data { |
139 | u64 fw_vendor; | 140 | u64 fw_vendor; |
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 34f69cb9350a..91d9c69a629e 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h | |||
@@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) | |||
237 | 237 | ||
238 | static inline void flush_write_buffers(void) | 238 | static inline void flush_write_buffers(void) |
239 | { | 239 | { |
240 | #if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) | 240 | #if defined(CONFIG_X86_PPRO_FENCE) |
241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); | 241 | asm volatile("lock; addl $0,0(%%esp)": : :"memory"); |
242 | #endif | 242 | #endif |
243 | } | 243 | } |
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index bf156ded74b5..0f62f5482d91 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h | |||
@@ -26,10 +26,9 @@ | |||
26 | # define LOCK_PTR_REG "D" | 26 | # define LOCK_PTR_REG "D" |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | #if defined(CONFIG_X86_32) && \ | 29 | #if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) |
30 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) | ||
31 | /* | 30 | /* |
32 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock | 31 | * On PPro SMP, we use a locked operation to unlock |
33 | * (PPro errata 66, 92) | 32 | * (PPro errata 66, 92) |
34 | */ | 33 | */ |
35 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX | 34 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8779edab684e..d8fba5c15fbd 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -8,236 +8,6 @@ | |||
8 | 8 | ||
9 | #include "cpu.h" | 9 | #include "cpu.h" |
10 | 10 | ||
11 | #ifdef CONFIG_X86_OOSTORE | ||
12 | |||
13 | static u32 power2(u32 x) | ||
14 | { | ||
15 | u32 s = 1; | ||
16 | |||
17 | while (s <= x) | ||
18 | s <<= 1; | ||
19 | |||
20 | return s >>= 1; | ||
21 | } | ||
22 | |||
23 | |||
24 | /* | ||
25 | * Set up an actual MCR | ||
26 | */ | ||
27 | static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) | ||
28 | { | ||
29 | u32 lo, hi; | ||
30 | |||
31 | hi = base & ~0xFFF; | ||
32 | lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ | ||
33 | lo &= ~0xFFF; /* Remove the ctrl value bits */ | ||
34 | lo |= key; /* Attribute we wish to set */ | ||
35 | wrmsr(reg+MSR_IDT_MCR0, lo, hi); | ||
36 | mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */ | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Figure what we can cover with MCR's | ||
41 | * | ||
42 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | ||
43 | */ | ||
44 | static u32 ramtop(void) | ||
45 | { | ||
46 | u32 clip = 0xFFFFFFFFUL; | ||
47 | u32 top = 0; | ||
48 | int i; | ||
49 | |||
50 | for (i = 0; i < e820.nr_map; i++) { | ||
51 | unsigned long start, end; | ||
52 | |||
53 | if (e820.map[i].addr > 0xFFFFFFFFUL) | ||
54 | continue; | ||
55 | /* | ||
56 | * Don't MCR over reserved space. Ignore the ISA hole | ||
57 | * we frob around that catastrophe already | ||
58 | */ | ||
59 | if (e820.map[i].type == E820_RESERVED) { | ||
60 | if (e820.map[i].addr >= 0x100000UL && | ||
61 | e820.map[i].addr < clip) | ||
62 | clip = e820.map[i].addr; | ||
63 | continue; | ||
64 | } | ||
65 | start = e820.map[i].addr; | ||
66 | end = e820.map[i].addr + e820.map[i].size; | ||
67 | if (start >= end) | ||
68 | continue; | ||
69 | if (end > top) | ||
70 | top = end; | ||
71 | } | ||
72 | /* | ||
73 | * Everything below 'top' should be RAM except for the ISA hole. | ||
74 | * Because of the limited MCR's we want to map NV/ACPI into our | ||
75 | * MCR range for gunk in RAM | ||
76 | * | ||
77 | * Clip might cause us to MCR insufficient RAM but that is an | ||
78 | * acceptable failure mode and should only bite obscure boxes with | ||
79 | * a VESA hole at 15Mb | ||
80 | * | ||
81 | * The second case Clip sometimes kicks in is when the EBDA is marked | ||
82 | * as reserved. Again we fail safe with reasonable results | ||
83 | */ | ||
84 | if (top > clip) | ||
85 | top = clip; | ||
86 | |||
87 | return top; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Compute a set of MCR's to give maximum coverage | ||
92 | */ | ||
93 | static int centaur_mcr_compute(int nr, int key) | ||
94 | { | ||
95 | u32 mem = ramtop(); | ||
96 | u32 root = power2(mem); | ||
97 | u32 base = root; | ||
98 | u32 top = root; | ||
99 | u32 floor = 0; | ||
100 | int ct = 0; | ||
101 | |||
102 | while (ct < nr) { | ||
103 | u32 fspace = 0; | ||
104 | u32 high; | ||
105 | u32 low; | ||
106 | |||
107 | /* | ||
108 | * Find the largest block we will fill going upwards | ||
109 | */ | ||
110 | high = power2(mem-top); | ||
111 | |||
112 | /* | ||
113 | * Find the largest block we will fill going downwards | ||
114 | */ | ||
115 | low = base/2; | ||
116 | |||
117 | /* | ||
118 | * Don't fill below 1Mb going downwards as there | ||
119 | * is an ISA hole in the way. | ||
120 | */ | ||
121 | if (base <= 1024*1024) | ||
122 | low = 0; | ||
123 | |||
124 | /* | ||
125 | * See how much space we could cover by filling below | ||
126 | * the ISA hole | ||
127 | */ | ||
128 | |||
129 | if (floor == 0) | ||
130 | fspace = 512*1024; | ||
131 | else if (floor == 512*1024) | ||
132 | fspace = 128*1024; | ||
133 | |||
134 | /* And forget ROM space */ | ||
135 | |||
136 | /* | ||
137 | * Now install the largest coverage we get | ||
138 | */ | ||
139 | if (fspace > high && fspace > low) { | ||
140 | centaur_mcr_insert(ct, floor, fspace, key); | ||
141 | floor += fspace; | ||
142 | } else if (high > low) { | ||
143 | centaur_mcr_insert(ct, top, high, key); | ||
144 | top += high; | ||
145 | } else if (low > 0) { | ||
146 | base -= low; | ||
147 | centaur_mcr_insert(ct, base, low, key); | ||
148 | } else | ||
149 | break; | ||
150 | ct++; | ||
151 | } | ||
152 | /* | ||
153 | * We loaded ct values. We now need to set the mask. The caller | ||
154 | * must do this bit. | ||
155 | */ | ||
156 | return ct; | ||
157 | } | ||
158 | |||
159 | static void centaur_create_optimal_mcr(void) | ||
160 | { | ||
161 | int used; | ||
162 | int i; | ||
163 | |||
164 | /* | ||
165 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
166 | * as write combining and weak write ordered. | ||
167 | * | ||
168 | * To experiment with: Linux never uses stack operations for | ||
169 | * mmio spaces so we could globally enable stack operation wc | ||
170 | * | ||
171 | * Load the registers with type 31 - full write combining, all | ||
172 | * writes weakly ordered. | ||
173 | */ | ||
174 | used = centaur_mcr_compute(6, 31); | ||
175 | |||
176 | /* | ||
177 | * Wipe unused MCRs | ||
178 | */ | ||
179 | for (i = used; i < 8; i++) | ||
180 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
181 | } | ||
182 | |||
183 | static void winchip2_create_optimal_mcr(void) | ||
184 | { | ||
185 | u32 lo, hi; | ||
186 | int used; | ||
187 | int i; | ||
188 | |||
189 | /* | ||
190 | * Allocate up to 6 mcrs to mark as much of ram as possible | ||
191 | * as write combining, weak store ordered. | ||
192 | * | ||
193 | * Load the registers with type 25 | ||
194 | * 8 - weak write ordering | ||
195 | * 16 - weak read ordering | ||
196 | * 1 - write combining | ||
197 | */ | ||
198 | used = centaur_mcr_compute(6, 25); | ||
199 | |||
200 | /* | ||
201 | * Mark the registers we are using. | ||
202 | */ | ||
203 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
204 | for (i = 0; i < used; i++) | ||
205 | lo |= 1<<(9+i); | ||
206 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
207 | |||
208 | /* | ||
209 | * Wipe unused MCRs | ||
210 | */ | ||
211 | |||
212 | for (i = used; i < 8; i++) | ||
213 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Handle the MCR key on the Winchip 2. | ||
218 | */ | ||
219 | static void winchip2_unprotect_mcr(void) | ||
220 | { | ||
221 | u32 lo, hi; | ||
222 | u32 key; | ||
223 | |||
224 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
225 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
226 | key = (lo>>17) & 7; | ||
227 | lo |= key<<6; /* replace with unlock key */ | ||
228 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
229 | } | ||
230 | |||
231 | static void winchip2_protect_mcr(void) | ||
232 | { | ||
233 | u32 lo, hi; | ||
234 | |||
235 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
236 | lo &= ~0x1C0; /* blank bits 8-6 */ | ||
237 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
238 | } | ||
239 | #endif /* CONFIG_X86_OOSTORE */ | ||
240 | |||
241 | #define ACE_PRESENT (1 << 6) | 11 | #define ACE_PRESENT (1 << 6) |
242 | #define ACE_ENABLED (1 << 7) | 12 | #define ACE_ENABLED (1 << 7) |
243 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ | 13 | #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ |
@@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
362 | fcr_clr = DPDC; | 132 | fcr_clr = DPDC; |
363 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); | 133 | printk(KERN_NOTICE "Disabling bugged TSC.\n"); |
364 | clear_cpu_cap(c, X86_FEATURE_TSC); | 134 | clear_cpu_cap(c, X86_FEATURE_TSC); |
365 | #ifdef CONFIG_X86_OOSTORE | ||
366 | centaur_create_optimal_mcr(); | ||
367 | /* | ||
368 | * Enable: | ||
369 | * write combining on non-stack, non-string | ||
370 | * write combining on string, all types | ||
371 | * weak write ordering | ||
372 | * | ||
373 | * The C6 original lacks weak read order | ||
374 | * | ||
375 | * Note 0x120 is write only on Winchip 1 | ||
376 | */ | ||
377 | wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); | ||
378 | #endif | ||
379 | break; | 135 | break; |
380 | case 8: | 136 | case 8: |
381 | switch (c->x86_mask) { | 137 | switch (c->x86_mask) { |
@@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c) | |||
392 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 148 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
393 | E2MMX|EAMD3D; | 149 | E2MMX|EAMD3D; |
394 | fcr_clr = DPDC; | 150 | fcr_clr = DPDC; |
395 | #ifdef CONFIG_X86_OOSTORE | ||
396 | winchip2_unprotect_mcr(); | ||
397 | winchip2_create_optimal_mcr(); | ||
398 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
399 | /* | ||
400 | * Enable: | ||
401 | * write combining on non-stack, non-string | ||
402 | * write combining on string, all types | ||
403 | * weak write ordering | ||
404 | */ | ||
405 | lo |= 31; | ||
406 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
407 | winchip2_protect_mcr(); | ||
408 | #endif | ||
409 | break; | 151 | break; |
410 | case 9: | 152 | case 9: |
411 | name = "3"; | 153 | name = "3"; |
412 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| | 154 | fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| |
413 | E2MMX|EAMD3D; | 155 | E2MMX|EAMD3D; |
414 | fcr_clr = DPDC; | 156 | fcr_clr = DPDC; |
415 | #ifdef CONFIG_X86_OOSTORE | ||
416 | winchip2_unprotect_mcr(); | ||
417 | winchip2_create_optimal_mcr(); | ||
418 | rdmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
419 | /* | ||
420 | * Enable: | ||
421 | * write combining on non-stack, non-string | ||
422 | * write combining on string, all types | ||
423 | * weak write ordering | ||
424 | */ | ||
425 | lo |= 31; | ||
426 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | ||
427 | winchip2_protect_mcr(); | ||
428 | #endif | ||
429 | break; | 157 | break; |
430 | default: | 158 | default: |
431 | name = "??"; | 159 | name = "??"; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 895604f2e916..79f9f848bee4 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags) | |||
1192 | for (i = 0; i < cpuc->n_events; i++) { | 1192 | for (i = 0; i < cpuc->n_events; i++) { |
1193 | if (event == cpuc->event_list[i]) { | 1193 | if (event == cpuc->event_list[i]) { |
1194 | 1194 | ||
1195 | if (i >= cpuc->n_events - cpuc->n_added) | ||
1196 | --cpuc->n_added; | ||
1197 | |||
1195 | if (x86_pmu.put_event_constraints) | 1198 | if (x86_pmu.put_event_constraints) |
1196 | x86_pmu.put_event_constraints(cpuc, event); | 1199 | x86_pmu.put_event_constraints(cpuc, event); |
1197 | 1200 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index c88f7f4b03ee..047f540cf3f7 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -3334,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3334 | if (!pmus) | 3334 | if (!pmus) |
3335 | return -ENOMEM; | 3335 | return -ENOMEM; |
3336 | 3336 | ||
3337 | type->pmus = pmus; | ||
3338 | |||
3337 | type->unconstrainted = (struct event_constraint) | 3339 | type->unconstrainted = (struct event_constraint) |
3338 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, | 3340 | __EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1, |
3339 | 0, type->num_counters, 0, 0); | 3341 | 0, type->num_counters, 0, 0); |
@@ -3369,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type) | |||
3369 | } | 3371 | } |
3370 | 3372 | ||
3371 | type->pmu_group = &uncore_pmu_attr_group; | 3373 | type->pmu_group = &uncore_pmu_attr_group; |
3372 | type->pmus = pmus; | ||
3373 | return 0; | 3374 | return 0; |
3374 | fail: | 3375 | fail: |
3375 | uncore_type_exit(type); | 3376 | uncore_type_exit(type); |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 81ba27679f18..f36bd42d6f0c 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers) | |||
544 | /* This is global to keep gas from relaxing the jumps */ | 544 | /* This is global to keep gas from relaxing the jumps */ |
545 | ENTRY(early_idt_handler) | 545 | ENTRY(early_idt_handler) |
546 | cld | 546 | cld |
547 | |||
548 | cmpl $2,(%esp) # X86_TRAP_NMI | ||
549 | je is_nmi # Ignore NMI | ||
550 | |||
547 | cmpl $2,%ss:early_recursion_flag | 551 | cmpl $2,%ss:early_recursion_flag |
548 | je hlt_loop | 552 | je hlt_loop |
549 | incl %ss:early_recursion_flag | 553 | incl %ss:early_recursion_flag |
@@ -594,8 +598,9 @@ ex_entry: | |||
594 | pop %edx | 598 | pop %edx |
595 | pop %ecx | 599 | pop %ecx |
596 | pop %eax | 600 | pop %eax |
597 | addl $8,%esp /* drop vector number and error code */ | ||
598 | decl %ss:early_recursion_flag | 601 | decl %ss:early_recursion_flag |
602 | is_nmi: | ||
603 | addl $8,%esp /* drop vector number and error code */ | ||
599 | iret | 604 | iret |
600 | ENDPROC(early_idt_handler) | 605 | ENDPROC(early_idt_handler) |
601 | 606 | ||
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e1aabdb314c8..a468c0a65c42 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S | |||
@@ -343,6 +343,9 @@ early_idt_handlers: | |||
343 | ENTRY(early_idt_handler) | 343 | ENTRY(early_idt_handler) |
344 | cld | 344 | cld |
345 | 345 | ||
346 | cmpl $2,(%rsp) # X86_TRAP_NMI | ||
347 | je is_nmi # Ignore NMI | ||
348 | |||
346 | cmpl $2,early_recursion_flag(%rip) | 349 | cmpl $2,early_recursion_flag(%rip) |
347 | jz 1f | 350 | jz 1f |
348 | incl early_recursion_flag(%rip) | 351 | incl early_recursion_flag(%rip) |
@@ -405,8 +408,9 @@ ENTRY(early_idt_handler) | |||
405 | popq %rdx | 408 | popq %rdx |
406 | popq %rcx | 409 | popq %rcx |
407 | popq %rax | 410 | popq %rax |
408 | addq $16,%rsp # drop vector number and error code | ||
409 | decl early_recursion_flag(%rip) | 411 | decl early_recursion_flag(%rip) |
412 | is_nmi: | ||
413 | addq $16,%rsp # drop vector number and error code | ||
410 | INTERRUPT_RETURN | 414 | INTERRUPT_RETURN |
411 | ENDPROC(early_idt_handler) | 415 | ENDPROC(early_idt_handler) |
412 | 416 | ||
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e8368c6dd2a2..d5dd80814419 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin); | |||
86 | 86 | ||
87 | void __kernel_fpu_end(void) | 87 | void __kernel_fpu_end(void) |
88 | { | 88 | { |
89 | if (use_eager_fpu()) | 89 | if (use_eager_fpu()) { |
90 | math_state_restore(); | 90 | /* |
91 | else | 91 | * For eager fpu, most the time, tsk_used_math() is true. |
92 | * Restore the user math as we are done with the kernel usage. | ||
93 | * At few instances during thread exit, signal handling etc, | ||
94 | * tsk_used_math() is false. Those few places will take proper | ||
95 | * actions, so we don't need to restore the math here. | ||
96 | */ | ||
97 | if (likely(tsk_used_math(current))) | ||
98 | math_state_restore(); | ||
99 | } else { | ||
92 | stts(); | 100 | stts(); |
101 | } | ||
93 | } | 102 | } |
94 | EXPORT_SYMBOL(__kernel_fpu_end); | 103 | EXPORT_SYMBOL(__kernel_fpu_end); |
95 | 104 | ||
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 4eabc160696f..679cef0791cd 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c | |||
@@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void) | |||
279 | VMCOREINFO_SYMBOL(node_data); | 279 | VMCOREINFO_SYMBOL(node_data); |
280 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); | 280 | VMCOREINFO_LENGTH(node_data, MAX_NUMNODES); |
281 | #endif | 281 | #endif |
282 | vmcoreinfo_append_str("KERNELOFFSET=%lx\n", | ||
283 | (unsigned long)&_text - __START_KERNEL); | ||
282 | } | 284 | } |
283 | 285 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 7c6acd4b8995..ff898bbf579d 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev) | |||
529 | return; | 529 | return; |
530 | 530 | ||
531 | pci_read_config_dword(nb_ht, 0x60, &val); | 531 | pci_read_config_dword(nb_ht, 0x60, &val); |
532 | node = val & 7; | 532 | node = pcibus_to_node(dev->bus) | (val & 7); |
533 | /* | 533 | /* |
534 | * Some hardware may return an invalid node ID, | 534 | * Some hardware may return an invalid node ID, |
535 | * so check it first: | 535 | * so check it first: |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 06853e670354..ce72964b2f46 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p) | |||
1239 | register_refined_jiffies(CLOCK_TICK_RATE); | 1239 | register_refined_jiffies(CLOCK_TICK_RATE); |
1240 | 1240 | ||
1241 | #ifdef CONFIG_EFI | 1241 | #ifdef CONFIG_EFI |
1242 | /* Once setup is done above, unmap the EFI memory map on | 1242 | if (efi_enabled(EFI_BOOT)) |
1243 | * mismatched firmware/kernel archtectures since there is no | 1243 | efi_apply_memmap_quirks(); |
1244 | * support for runtime services. | ||
1245 | */ | ||
1246 | if (efi_enabled(EFI_BOOT) && !efi_is_native()) { | ||
1247 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); | ||
1248 | efi_unmap_memmap(); | ||
1249 | } | ||
1250 | #endif | 1244 | #endif |
1251 | } | 1245 | } |
1252 | 1246 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index e50425d0f5f7..9b531351a587 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write, | |||
2672 | break; | 2672 | break; |
2673 | } | 2673 | } |
2674 | 2674 | ||
2675 | drop_large_spte(vcpu, iterator.sptep); | ||
2675 | if (!is_shadow_present_pte(*iterator.sptep)) { | 2676 | if (!is_shadow_present_pte(*iterator.sptep)) { |
2676 | u64 base_addr = iterator.addr; | 2677 | u64 base_addr = iterator.addr; |
2677 | 2678 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e81df8fce027..2de1bc09a8d4 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm) | |||
3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); | 3002 | u8 cr8_prev = kvm_get_cr8(&svm->vcpu); |
3003 | /* instruction emulation calls kvm_set_cr8() */ | 3003 | /* instruction emulation calls kvm_set_cr8() */ |
3004 | r = cr_interception(svm); | 3004 | r = cr_interception(svm); |
3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) { | 3005 | if (irqchip_in_kernel(svm->vcpu.kvm)) |
3006 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
3007 | return r; | 3006 | return r; |
3008 | } | ||
3009 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) | 3007 | if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) |
3010 | return r; | 3008 | return r; |
3011 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; | 3009 | kvm_run->exit_reason = KVM_EXIT_SET_TPR; |
@@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) | |||
3567 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) | 3565 | if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) |
3568 | return; | 3566 | return; |
3569 | 3567 | ||
3568 | clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); | ||
3569 | |||
3570 | if (irr == -1) | 3570 | if (irr == -1) |
3571 | return; | 3571 | return; |
3572 | 3572 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a06f101ef64b..392752834751 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu) | |||
6688 | else if (is_page_fault(intr_info)) | 6688 | else if (is_page_fault(intr_info)) |
6689 | return enable_ept; | 6689 | return enable_ept; |
6690 | else if (is_no_device(intr_info) && | 6690 | else if (is_no_device(intr_info) && |
6691 | !(nested_read_cr0(vmcs12) & X86_CR0_TS)) | 6691 | !(vmcs12->guest_cr0 & X86_CR0_TS)) |
6692 | return 0; | 6692 | return 0; |
6693 | return vmcs12->exception_bitmap & | 6693 | return vmcs12->exception_bitmap & |
6694 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); | 6694 | (1u << (intr_info & INTR_INFO_VECTOR_MASK)); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 39c28f09dfd5..2b8578432d5b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu) | |||
6186 | frag->len -= len; | 6186 | frag->len -= len; |
6187 | } | 6187 | } |
6188 | 6188 | ||
6189 | if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) { | 6189 | if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) { |
6190 | vcpu->mmio_needed = 0; | 6190 | vcpu->mmio_needed = 0; |
6191 | 6191 | ||
6192 | /* FIXME: return into emulator if single-stepping. */ | 6192 | /* FIXME: return into emulator if single-stepping. */ |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6dea040cc3a1..a10c8c792161 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) | |||
1020 | * This routine handles page faults. It determines the address, | 1020 | * This routine handles page faults. It determines the address, |
1021 | * and the problem, and then passes it off to one of the appropriate | 1021 | * and the problem, and then passes it off to one of the appropriate |
1022 | * routines. | 1022 | * routines. |
1023 | * | ||
1024 | * This function must have noinline because both callers | ||
1025 | * {,trace_}do_page_fault() have notrace on. Having this an actual function | ||
1026 | * guarantees there's a function trace entry. | ||
1023 | */ | 1027 | */ |
1024 | static void __kprobes | 1028 | static void __kprobes noinline |
1025 | __do_page_fault(struct pt_regs *regs, unsigned long error_code) | 1029 | __do_page_fault(struct pt_regs *regs, unsigned long error_code, |
1030 | unsigned long address) | ||
1026 | { | 1031 | { |
1027 | struct vm_area_struct *vma; | 1032 | struct vm_area_struct *vma; |
1028 | struct task_struct *tsk; | 1033 | struct task_struct *tsk; |
1029 | unsigned long address; | ||
1030 | struct mm_struct *mm; | 1034 | struct mm_struct *mm; |
1031 | int fault; | 1035 | int fault; |
1032 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; | 1036 | unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; |
@@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) | |||
1034 | tsk = current; | 1038 | tsk = current; |
1035 | mm = tsk->mm; | 1039 | mm = tsk->mm; |
1036 | 1040 | ||
1037 | /* Get the faulting address: */ | ||
1038 | address = read_cr2(); | ||
1039 | |||
1040 | /* | 1041 | /* |
1041 | * Detect and handle instructions that would cause a page fault for | 1042 | * Detect and handle instructions that would cause a page fault for |
1042 | * both a tracked kernel page and a userspace page. | 1043 | * both a tracked kernel page and a userspace page. |
@@ -1248,32 +1249,50 @@ good_area: | |||
1248 | up_read(&mm->mmap_sem); | 1249 | up_read(&mm->mmap_sem); |
1249 | } | 1250 | } |
1250 | 1251 | ||
1251 | dotraplinkage void __kprobes | 1252 | dotraplinkage void __kprobes notrace |
1252 | do_page_fault(struct pt_regs *regs, unsigned long error_code) | 1253 | do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1253 | { | 1254 | { |
1255 | unsigned long address = read_cr2(); /* Get the faulting address */ | ||
1254 | enum ctx_state prev_state; | 1256 | enum ctx_state prev_state; |
1255 | 1257 | ||
1258 | /* | ||
1259 | * We must have this function tagged with __kprobes, notrace and call | ||
1260 | * read_cr2() before calling anything else. To avoid calling any kind | ||
1261 | * of tracing machinery before we've observed the CR2 value. | ||
1262 | * | ||
1263 | * exception_{enter,exit}() contain all sorts of tracepoints. | ||
1264 | */ | ||
1265 | |||
1256 | prev_state = exception_enter(); | 1266 | prev_state = exception_enter(); |
1257 | __do_page_fault(regs, error_code); | 1267 | __do_page_fault(regs, error_code, address); |
1258 | exception_exit(prev_state); | 1268 | exception_exit(prev_state); |
1259 | } | 1269 | } |
1260 | 1270 | ||
1261 | static void trace_page_fault_entries(struct pt_regs *regs, | 1271 | #ifdef CONFIG_TRACING |
1272 | static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, | ||
1262 | unsigned long error_code) | 1273 | unsigned long error_code) |
1263 | { | 1274 | { |
1264 | if (user_mode(regs)) | 1275 | if (user_mode(regs)) |
1265 | trace_page_fault_user(read_cr2(), regs, error_code); | 1276 | trace_page_fault_user(address, regs, error_code); |
1266 | else | 1277 | else |
1267 | trace_page_fault_kernel(read_cr2(), regs, error_code); | 1278 | trace_page_fault_kernel(address, regs, error_code); |
1268 | } | 1279 | } |
1269 | 1280 | ||
1270 | dotraplinkage void __kprobes | 1281 | dotraplinkage void __kprobes notrace |
1271 | trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) | 1282 | trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) |
1272 | { | 1283 | { |
1284 | /* | ||
1285 | * The exception_enter and tracepoint processing could | ||
1286 | * trigger another page faults (user space callchain | ||
1287 | * reading) and destroy the original cr2 value, so read | ||
1288 | * the faulting address now. | ||
1289 | */ | ||
1290 | unsigned long address = read_cr2(); | ||
1273 | enum ctx_state prev_state; | 1291 | enum ctx_state prev_state; |
1274 | 1292 | ||
1275 | prev_state = exception_enter(); | 1293 | prev_state = exception_enter(); |
1276 | trace_page_fault_entries(regs, error_code); | 1294 | trace_page_fault_entries(address, regs, error_code); |
1277 | __do_page_fault(regs, error_code); | 1295 | __do_page_fault(regs, error_code, address); |
1278 | exception_exit(prev_state); | 1296 | exception_exit(prev_state); |
1279 | } | 1297 | } |
1298 | #endif /* CONFIG_TRACING */ | ||
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1b2152..01495755701b 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S | |||
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: | |||
140 | push %r9; \ | 140 | push %r9; \ |
141 | push SKBDATA; \ | 141 | push SKBDATA; \ |
142 | /* rsi already has offset */ \ | 142 | /* rsi already has offset */ \ |
143 | mov $SIZE,%ecx; /* size */ \ | 143 | mov $SIZE,%edx; /* size */ \ |
144 | call bpf_internal_load_pointer_neg_helper; \ | 144 | call bpf_internal_load_pointer_neg_helper; \ |
145 | test %rax,%rax; \ | 145 | test %rax,%rax; \ |
146 | pop SKBDATA; \ | 146 | pop SKBDATA; \ |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1a201ac7cef8..b97acecf3fd9 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
53 | #include <asm/x86_init.h> | 53 | #include <asm/x86_init.h> |
54 | #include <asm/rtc.h> | 54 | #include <asm/rtc.h> |
55 | #include <asm/uv/uv.h> | ||
55 | 56 | ||
56 | #define EFI_DEBUG | 57 | #define EFI_DEBUG |
57 | 58 | ||
@@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str) | |||
1210 | return 0; | 1211 | return 0; |
1211 | } | 1212 | } |
1212 | early_param("efi", parse_efi_cmdline); | 1213 | early_param("efi", parse_efi_cmdline); |
1214 | |||
1215 | void __init efi_apply_memmap_quirks(void) | ||
1216 | { | ||
1217 | /* | ||
1218 | * Once setup is done earlier, unmap the EFI memory map on mismatched | ||
1219 | * firmware/kernel architectures since there is no support for runtime | ||
1220 | * services. | ||
1221 | */ | ||
1222 | if (!efi_is_native()) { | ||
1223 | pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); | ||
1224 | efi_unmap_memmap(); | ||
1225 | } | ||
1226 | |||
1227 | /* | ||
1228 | * UV doesn't support the new EFI pagetable mapping yet. | ||
1229 | */ | ||
1230 | if (is_uv_system()) | ||
1231 | set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); | ||
1232 | } | ||
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7d01b8c56c00..cc04e67bfd05 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h | |||
@@ -40,11 +40,7 @@ | |||
40 | #define smp_rmb() barrier() | 40 | #define smp_rmb() barrier() |
41 | #endif /* CONFIG_X86_PPRO_FENCE */ | 41 | #endif /* CONFIG_X86_PPRO_FENCE */ |
42 | 42 | ||
43 | #ifdef CONFIG_X86_OOSTORE | ||
44 | #define smp_wmb() wmb() | ||
45 | #else /* CONFIG_X86_OOSTORE */ | ||
46 | #define smp_wmb() barrier() | 43 | #define smp_wmb() barrier() |
47 | #endif /* CONFIG_X86_OOSTORE */ | ||
48 | 44 | ||
49 | #define smp_read_barrier_depends() read_barrier_depends() | 45 | #define smp_read_barrier_depends() read_barrier_depends() |
50 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) | 46 | #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) |
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index ba56e11cbf77..c87ae7c6e5f9 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -20,6 +20,7 @@ config XTENSA | |||
20 | select HAVE_FUNCTION_TRACER | 20 | select HAVE_FUNCTION_TRACER |
21 | select HAVE_IRQ_TIME_ACCOUNTING | 21 | select HAVE_IRQ_TIME_ACCOUNTING |
22 | select HAVE_PERF_EVENTS | 22 | select HAVE_PERF_EVENTS |
23 | select COMMON_CLK | ||
23 | help | 24 | help |
24 | Xtensa processors are 32-bit RISC machines designed by Tensilica | 25 | Xtensa processors are 32-bit RISC machines designed by Tensilica |
25 | primarily for embedded systems. These processors are both | 26 | primarily for embedded systems. These processors are both |
@@ -80,7 +81,6 @@ choice | |||
80 | config XTENSA_VARIANT_FSF | 81 | config XTENSA_VARIANT_FSF |
81 | bool "fsf - default (not generic) configuration" | 82 | bool "fsf - default (not generic) configuration" |
82 | select MMU | 83 | select MMU |
83 | select HAVE_XTENSA_GPIO32 | ||
84 | 84 | ||
85 | config XTENSA_VARIANT_DC232B | 85 | config XTENSA_VARIANT_DC232B |
86 | bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" | 86 | bool "dc232b - Diamond 232L Standard Core Rev.B (LE)" |
@@ -135,7 +135,6 @@ config HAVE_SMP | |||
135 | config SMP | 135 | config SMP |
136 | bool "Enable Symmetric multi-processing support" | 136 | bool "Enable Symmetric multi-processing support" |
137 | depends on HAVE_SMP | 137 | depends on HAVE_SMP |
138 | select USE_GENERIC_SMP_HELPERS | ||
139 | select GENERIC_SMP_IDLE_THREAD | 138 | select GENERIC_SMP_IDLE_THREAD |
140 | help | 139 | help |
141 | Enabled SMP Software; allows more than one CPU/CORE | 140 | Enabled SMP Software; allows more than one CPU/CORE |
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi index 46b4f5eab421..e7370b11348e 100644 --- a/arch/xtensa/boot/dts/xtfpga.dtsi +++ b/arch/xtensa/boot/dts/xtfpga.dtsi | |||
@@ -35,6 +35,13 @@ | |||
35 | interrupt-controller; | 35 | interrupt-controller; |
36 | }; | 36 | }; |
37 | 37 | ||
38 | clocks { | ||
39 | osc: main-oscillator { | ||
40 | #clock-cells = <0>; | ||
41 | compatible = "fixed-clock"; | ||
42 | }; | ||
43 | }; | ||
44 | |||
38 | serial0: serial@fd050020 { | 45 | serial0: serial@fd050020 { |
39 | device_type = "serial"; | 46 | device_type = "serial"; |
40 | compatible = "ns16550a"; | 47 | compatible = "ns16550a"; |
@@ -42,9 +49,7 @@ | |||
42 | reg = <0xfd050020 0x20>; | 49 | reg = <0xfd050020 0x20>; |
43 | reg-shift = <2>; | 50 | reg-shift = <2>; |
44 | interrupts = <0 1>; /* external irq 0 */ | 51 | interrupts = <0 1>; /* external irq 0 */ |
45 | /* Filled in by platform_setup from FPGA register | 52 | clocks = <&osc>; |
46 | * clock-frequency = <100000000>; | ||
47 | */ | ||
48 | }; | 53 | }; |
49 | 54 | ||
50 | enet0: ethoc@fd030000 { | 55 | enet0: ethoc@fd030000 { |
@@ -52,5 +57,6 @@ | |||
52 | reg = <0xfd030000 0x4000 0xfd800000 0x4000>; | 57 | reg = <0xfd030000 0x4000 0xfd800000 0x4000>; |
53 | interrupts = <1 1>; /* external irq 1 */ | 58 | interrupts = <1 1>; /* external irq 1 */ |
54 | local-mac-address = [00 50 c2 13 6f 00]; | 59 | local-mac-address = [00 50 c2 13 6f 00]; |
60 | clocks = <&osc>; | ||
55 | }; | 61 | }; |
56 | }; | 62 | }; |
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h index 2a042d430c25..74944207167e 100644 --- a/arch/xtensa/include/asm/io.h +++ b/arch/xtensa/include/asm/io.h | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #ifdef CONFIG_MMU | 26 | #ifdef CONFIG_MMU |
27 | 27 | ||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | 28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) |
29 | extern unsigned long xtensa_kio_paddr; | 29 | extern unsigned long xtensa_kio_paddr; |
30 | 30 | ||
31 | static inline unsigned long xtensa_get_kio_paddr(void) | 31 | static inline unsigned long xtensa_get_kio_paddr(void) |
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h index 8c194f6af45e..677bfcf4ee5d 100644 --- a/arch/xtensa/include/asm/traps.h +++ b/arch/xtensa/include/asm/traps.h | |||
@@ -23,25 +23,37 @@ void secondary_trap_init(void); | |||
23 | 23 | ||
24 | static inline void spill_registers(void) | 24 | static inline void spill_registers(void) |
25 | { | 25 | { |
26 | 26 | #if XCHAL_NUM_AREGS > 16 | |
27 | __asm__ __volatile__ ( | 27 | __asm__ __volatile__ ( |
28 | "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t" | 28 | " call12 1f\n" |
29 | "mov a12, a0\n\t" | 29 | " _j 2f\n" |
30 | "rsr a13, sar\n\t" | 30 | " retw\n" |
31 | "xsr a14, ps\n\t" | 31 | " .align 4\n" |
32 | "movi a0, _spill_registers\n\t" | 32 | "1:\n" |
33 | "rsync\n\t" | 33 | " _entry a1, 48\n" |
34 | "callx0 a0\n\t" | 34 | " addi a12, a0, 3\n" |
35 | "mov a0, a12\n\t" | 35 | #if XCHAL_NUM_AREGS > 32 |
36 | "wsr a13, sar\n\t" | 36 | " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n" |
37 | "wsr a14, ps\n\t" | 37 | " _entry a1, 48\n" |
38 | : : | 38 | " mov a12, a0\n" |
39 | #if defined(CONFIG_FRAME_POINTER) | 39 | " .endr\n" |
40 | : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15", | 40 | #endif |
41 | " _entry a1, 48\n" | ||
42 | #if XCHAL_NUM_AREGS % 12 == 0 | ||
43 | " mov a8, a8\n" | ||
44 | #elif XCHAL_NUM_AREGS % 12 == 4 | ||
45 | " mov a12, a12\n" | ||
46 | #elif XCHAL_NUM_AREGS % 12 == 8 | ||
47 | " mov a4, a4\n" | ||
48 | #endif | ||
49 | " retw\n" | ||
50 | "2:\n" | ||
51 | : : : "a12", "a13", "memory"); | ||
41 | #else | 52 | #else |
42 | : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15", | 53 | __asm__ __volatile__ ( |
54 | " mov a12, a12\n" | ||
55 | : : : "memory"); | ||
43 | #endif | 56 | #endif |
44 | "memory"); | ||
45 | } | 57 | } |
46 | 58 | ||
47 | #endif /* _XTENSA_TRAPS_H */ | 59 | #endif /* _XTENSA_TRAPS_H */ |
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h index 5791b45d5a5d..f74ddfbb92ef 100644 --- a/arch/xtensa/include/asm/vectors.h +++ b/arch/xtensa/include/asm/vectors.h | |||
@@ -25,7 +25,7 @@ | |||
25 | #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 | 25 | #define XCHAL_KIO_DEFAULT_PADDR 0xf0000000 |
26 | #define XCHAL_KIO_SIZE 0x10000000 | 26 | #define XCHAL_KIO_SIZE 0x10000000 |
27 | 27 | ||
28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | 28 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) |
29 | #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() | 29 | #define XCHAL_KIO_PADDR xtensa_get_kio_paddr() |
30 | #else | 30 | #else |
31 | #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR | 31 | #define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR |
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index 51940fec6990..b9395529f02d 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h | |||
@@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3) | |||
734 | #define __NR_accept4 333 | 734 | #define __NR_accept4 333 |
735 | __SYSCALL(333, sys_accept4, 4) | 735 | __SYSCALL(333, sys_accept4, 4) |
736 | 736 | ||
737 | #define __NR_syscall_count 334 | 737 | #define __NR_sched_setattr 334 |
738 | __SYSCALL(334, sys_sched_setattr, 2) | ||
739 | #define __NR_sched_getattr 335 | ||
740 | __SYSCALL(335, sys_sched_getattr, 3) | ||
741 | |||
742 | #define __NR_syscall_count 336 | ||
738 | 743 | ||
739 | /* | 744 | /* |
740 | * sysxtensa syscall handler | 745 | * sysxtensa syscall handler |
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 21dbe6bdb8ed..ef7f4990722b 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
@@ -1081,196 +1081,53 @@ ENTRY(fast_syscall_spill_registers) | |||
1081 | 1081 | ||
1082 | rsr a0, sar | 1082 | rsr a0, sar |
1083 | s32i a3, a2, PT_AREG3 | 1083 | s32i a3, a2, PT_AREG3 |
1084 | s32i a4, a2, PT_AREG4 | 1084 | s32i a0, a2, PT_SAR |
1085 | s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 | ||
1086 | 1085 | ||
1087 | /* The spill routine might clobber a7, a11, and a15. */ | 1086 | /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */ |
1088 | 1087 | ||
1088 | s32i a4, a2, PT_AREG4 | ||
1089 | s32i a7, a2, PT_AREG7 | 1089 | s32i a7, a2, PT_AREG7 |
1090 | s32i a8, a2, PT_AREG8 | ||
1090 | s32i a11, a2, PT_AREG11 | 1091 | s32i a11, a2, PT_AREG11 |
1092 | s32i a12, a2, PT_AREG12 | ||
1091 | s32i a15, a2, PT_AREG15 | 1093 | s32i a15, a2, PT_AREG15 |
1092 | 1094 | ||
1093 | call0 _spill_registers # destroys a3, a4, and SAR | ||
1094 | |||
1095 | /* Advance PC, restore registers and SAR, and return from exception. */ | ||
1096 | |||
1097 | l32i a3, a2, PT_AREG5 | ||
1098 | l32i a4, a2, PT_AREG4 | ||
1099 | l32i a0, a2, PT_AREG0 | ||
1100 | wsr a3, sar | ||
1101 | l32i a3, a2, PT_AREG3 | ||
1102 | |||
1103 | /* Restore clobbered registers. */ | ||
1104 | |||
1105 | l32i a7, a2, PT_AREG7 | ||
1106 | l32i a11, a2, PT_AREG11 | ||
1107 | l32i a15, a2, PT_AREG15 | ||
1108 | |||
1109 | movi a2, 0 | ||
1110 | rfe | ||
1111 | |||
1112 | ENDPROC(fast_syscall_spill_registers) | ||
1113 | |||
1114 | /* Fixup handler. | ||
1115 | * | ||
1116 | * We get here if the spill routine causes an exception, e.g. tlb miss. | ||
1117 | * We basically restore WINDOWBASE and WINDOWSTART to the condition when | ||
1118 | * we entered the spill routine and jump to the user exception handler. | ||
1119 | * | ||
1120 | * a0: value of depc, original value in depc | ||
1121 | * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE | ||
1122 | * a3: exctable, original value in excsave1 | ||
1123 | */ | ||
1124 | |||
1125 | ENTRY(fast_syscall_spill_registers_fixup) | ||
1126 | |||
1127 | rsr a2, windowbase # get current windowbase (a2 is saved) | ||
1128 | xsr a0, depc # restore depc and a0 | ||
1129 | ssl a2 # set shift (32 - WB) | ||
1130 | |||
1131 | /* We need to make sure the current registers (a0-a3) are preserved. | ||
1132 | * To do this, we simply set the bit for the current window frame | ||
1133 | * in WS, so that the exception handlers save them to the task stack. | ||
1134 | */ | ||
1135 | |||
1136 | xsr a3, excsave1 # get spill-mask | ||
1137 | slli a3, a3, 1 # shift left by one | ||
1138 | |||
1139 | slli a2, a3, 32-WSBITS | ||
1140 | src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... | ||
1141 | wsr a2, windowstart # set corrected windowstart | ||
1142 | |||
1143 | srli a3, a3, 1 | ||
1144 | rsr a2, excsave1 | ||
1145 | l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 | ||
1146 | xsr a2, excsave1 | ||
1147 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 | ||
1148 | l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) | ||
1149 | xsr a2, excsave1 | ||
1150 | |||
1151 | /* Return to the original (user task) WINDOWBASE. | ||
1152 | * We leave the following frame behind: | ||
1153 | * a0, a1, a2 same | ||
1154 | * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) | ||
1155 | * depc: depc (we have to return to that address) | ||
1156 | * excsave_1: exctable | ||
1157 | */ | ||
1158 | |||
1159 | wsr a3, windowbase | ||
1160 | rsync | ||
1161 | |||
1162 | /* We are now in the original frame when we entered _spill_registers: | ||
1163 | * a0: return address | ||
1164 | * a1: used, stack pointer | ||
1165 | * a2: kernel stack pointer | ||
1166 | * a3: available | ||
1167 | * depc: exception address | ||
1168 | * excsave: exctable | ||
1169 | * Note: This frame might be the same as above. | ||
1170 | */ | ||
1171 | |||
1172 | /* Setup stack pointer. */ | ||
1173 | |||
1174 | addi a2, a2, -PT_USER_SIZE | ||
1175 | s32i a0, a2, PT_AREG0 | ||
1176 | |||
1177 | /* Make sure we return to this fixup handler. */ | ||
1178 | |||
1179 | movi a3, fast_syscall_spill_registers_fixup_return | ||
1180 | s32i a3, a2, PT_DEPC # setup depc | ||
1181 | |||
1182 | /* Jump to the exception handler. */ | ||
1183 | |||
1184 | rsr a3, excsave1 | ||
1185 | rsr a0, exccause | ||
1186 | addx4 a0, a0, a3 # find entry in table | ||
1187 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | ||
1188 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | ||
1189 | jx a0 | ||
1190 | |||
1191 | ENDPROC(fast_syscall_spill_registers_fixup) | ||
1192 | |||
1193 | ENTRY(fast_syscall_spill_registers_fixup_return) | ||
1194 | |||
1195 | /* When we return here, all registers have been restored (a2: DEPC) */ | ||
1196 | |||
1197 | wsr a2, depc # exception address | ||
1198 | |||
1199 | /* Restore fixup handler. */ | ||
1200 | |||
1201 | rsr a2, excsave1 | ||
1202 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE | ||
1203 | movi a3, fast_syscall_spill_registers_fixup | ||
1204 | s32i a3, a2, EXC_TABLE_FIXUP | ||
1205 | rsr a3, windowbase | ||
1206 | s32i a3, a2, EXC_TABLE_PARAM | ||
1207 | l32i a2, a2, EXC_TABLE_KSTK | ||
1208 | |||
1209 | /* Load WB at the time the exception occurred. */ | ||
1210 | |||
1211 | rsr a3, sar # WB is still in SAR | ||
1212 | neg a3, a3 | ||
1213 | wsr a3, windowbase | ||
1214 | rsync | ||
1215 | |||
1216 | rsr a3, excsave1 | ||
1217 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | ||
1218 | |||
1219 | rfde | ||
1220 | |||
1221 | ENDPROC(fast_syscall_spill_registers_fixup_return) | ||
1222 | |||
1223 | /* | ||
1224 | * spill all registers. | ||
1225 | * | ||
1226 | * This is not a real function. The following conditions must be met: | ||
1227 | * | ||
1228 | * - must be called with call0. | ||
1229 | * - uses a3, a4 and SAR. | ||
1230 | * - the last 'valid' register of each frame are clobbered. | ||
1231 | * - the caller must have registered a fixup handler | ||
1232 | * (or be inside a critical section) | ||
1233 | * - PS_EXCM must be set (PS_WOE cleared?) | ||
1234 | */ | ||
1235 | |||
1236 | ENTRY(_spill_registers) | ||
1237 | |||
1238 | /* | 1095 | /* |
1239 | * Rotate ws so that the current windowbase is at bit 0. | 1096 | * Rotate ws so that the current windowbase is at bit 0. |
1240 | * Assume ws = xxxwww1yy (www1 current window frame). | 1097 | * Assume ws = xxxwww1yy (www1 current window frame). |
1241 | * Rotate ws right so that a4 = yyxxxwww1. | 1098 | * Rotate ws right so that a4 = yyxxxwww1. |
1242 | */ | 1099 | */ |
1243 | 1100 | ||
1244 | rsr a4, windowbase | 1101 | rsr a0, windowbase |
1245 | rsr a3, windowstart # a3 = xxxwww1yy | 1102 | rsr a3, windowstart # a3 = xxxwww1yy |
1246 | ssr a4 # holds WB | 1103 | ssr a0 # holds WB |
1247 | slli a4, a3, WSBITS | 1104 | slli a0, a3, WSBITS |
1248 | or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy | 1105 | or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy |
1249 | srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 | 1106 | srl a3, a3 # a3 = 00xxxwww1yyxxxwww1 |
1250 | 1107 | ||
1251 | /* We are done if there are no more than the current register frame. */ | 1108 | /* We are done if there are no more than the current register frame. */ |
1252 | 1109 | ||
1253 | extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww | 1110 | extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww |
1254 | movi a4, (1 << (WSBITS-1)) | 1111 | movi a0, (1 << (WSBITS-1)) |
1255 | _beqz a3, .Lnospill # only one active frame? jump | 1112 | _beqz a3, .Lnospill # only one active frame? jump |
1256 | 1113 | ||
1257 | /* We want 1 at the top, so that we return to the current windowbase */ | 1114 | /* We want 1 at the top, so that we return to the current windowbase */ |
1258 | 1115 | ||
1259 | or a3, a3, a4 # 1yyxxxwww | 1116 | or a3, a3, a0 # 1yyxxxwww |
1260 | 1117 | ||
1261 | /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ | 1118 | /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ |
1262 | 1119 | ||
1263 | wsr a3, windowstart # save shifted windowstart | 1120 | wsr a3, windowstart # save shifted windowstart |
1264 | neg a4, a3 | 1121 | neg a0, a3 |
1265 | and a3, a4, a3 # first bit set from right: 000010000 | 1122 | and a3, a0, a3 # first bit set from right: 000010000 |
1266 | 1123 | ||
1267 | ffs_ws a4, a3 # a4: shifts to skip empty frames | 1124 | ffs_ws a0, a3 # a0: shifts to skip empty frames |
1268 | movi a3, WSBITS | 1125 | movi a3, WSBITS |
1269 | sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right | 1126 | sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right |
1270 | ssr a4 # save in SAR for later. | 1127 | ssr a0 # save in SAR for later. |
1271 | 1128 | ||
1272 | rsr a3, windowbase | 1129 | rsr a3, windowbase |
1273 | add a3, a3, a4 | 1130 | add a3, a3, a0 |
1274 | wsr a3, windowbase | 1131 | wsr a3, windowbase |
1275 | rsync | 1132 | rsync |
1276 | 1133 | ||
@@ -1285,22 +1142,6 @@ ENTRY(_spill_registers) | |||
1285 | * we have to save 4,8. or 12 registers. | 1142 | * we have to save 4,8. or 12 registers. |
1286 | */ | 1143 | */ |
1287 | 1144 | ||
1288 | _bbsi.l a3, 1, .Lc4 | ||
1289 | _bbsi.l a3, 2, .Lc8 | ||
1290 | |||
1291 | /* Special case: we have a call12-frame starting at a4. */ | ||
1292 | |||
1293 | _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) | ||
1294 | |||
1295 | s32e a4, a1, -16 # a1 is valid with an empty spill area | ||
1296 | l32e a4, a5, -12 | ||
1297 | s32e a8, a4, -48 | ||
1298 | mov a8, a4 | ||
1299 | l32e a4, a1, -16 | ||
1300 | j .Lc12c | ||
1301 | |||
1302 | .Lnospill: | ||
1303 | ret | ||
1304 | 1145 | ||
1305 | .Lloop: _bbsi.l a3, 1, .Lc4 | 1146 | .Lloop: _bbsi.l a3, 1, .Lc4 |
1306 | _bbci.l a3, 2, .Lc12 | 1147 | _bbci.l a3, 2, .Lc12 |
@@ -1314,20 +1155,10 @@ ENTRY(_spill_registers) | |||
1314 | s32e a9, a4, -28 | 1155 | s32e a9, a4, -28 |
1315 | s32e a10, a4, -24 | 1156 | s32e a10, a4, -24 |
1316 | s32e a11, a4, -20 | 1157 | s32e a11, a4, -20 |
1317 | |||
1318 | srli a11, a3, 2 # shift windowbase by 2 | 1158 | srli a11, a3, 2 # shift windowbase by 2 |
1319 | rotw 2 | 1159 | rotw 2 |
1320 | _bnei a3, 1, .Lloop | 1160 | _bnei a3, 1, .Lloop |
1321 | 1161 | j .Lexit | |
1322 | .Lexit: /* Done. Do the final rotation, set WS, and return. */ | ||
1323 | |||
1324 | rotw 1 | ||
1325 | rsr a3, windowbase | ||
1326 | ssl a3 | ||
1327 | movi a3, 1 | ||
1328 | sll a3, a3 | ||
1329 | wsr a3, windowstart | ||
1330 | ret | ||
1331 | 1162 | ||
1332 | .Lc4: s32e a4, a9, -16 | 1163 | .Lc4: s32e a4, a9, -16 |
1333 | s32e a5, a9, -12 | 1164 | s32e a5, a9, -12 |
@@ -1343,11 +1174,11 @@ ENTRY(_spill_registers) | |||
1343 | 1174 | ||
1344 | /* 12-register frame (call12) */ | 1175 | /* 12-register frame (call12) */ |
1345 | 1176 | ||
1346 | l32e a2, a5, -12 | 1177 | l32e a0, a5, -12 |
1347 | s32e a8, a2, -48 | 1178 | s32e a8, a0, -48 |
1348 | mov a8, a2 | 1179 | mov a8, a0 |
1349 | 1180 | ||
1350 | .Lc12c: s32e a9, a8, -44 | 1181 | s32e a9, a8, -44 |
1351 | s32e a10, a8, -40 | 1182 | s32e a10, a8, -40 |
1352 | s32e a11, a8, -36 | 1183 | s32e a11, a8, -36 |
1353 | s32e a12, a8, -32 | 1184 | s32e a12, a8, -32 |
@@ -1367,30 +1198,54 @@ ENTRY(_spill_registers) | |||
1367 | */ | 1198 | */ |
1368 | 1199 | ||
1369 | rotw 1 | 1200 | rotw 1 |
1370 | mov a5, a13 | 1201 | mov a4, a13 |
1371 | rotw -1 | 1202 | rotw -1 |
1372 | 1203 | ||
1373 | s32e a4, a9, -16 | 1204 | s32e a4, a8, -16 |
1374 | s32e a5, a9, -12 | 1205 | s32e a5, a8, -12 |
1375 | s32e a6, a9, -8 | 1206 | s32e a6, a8, -8 |
1376 | s32e a7, a9, -4 | 1207 | s32e a7, a8, -4 |
1377 | 1208 | ||
1378 | rotw 3 | 1209 | rotw 3 |
1379 | 1210 | ||
1380 | _beqi a3, 1, .Lexit | 1211 | _beqi a3, 1, .Lexit |
1381 | j .Lloop | 1212 | j .Lloop |
1382 | 1213 | ||
1383 | .Linvalid_mask: | 1214 | .Lexit: |
1384 | 1215 | ||
1385 | /* We get here because of an unrecoverable error in the window | 1216 | /* Done. Do the final rotation and set WS */ |
1386 | * registers. If we are in user space, we kill the application, | 1217 | |
1387 | * however, this condition is unrecoverable in kernel space. | 1218 | rotw 1 |
1388 | */ | 1219 | rsr a3, windowbase |
1220 | ssl a3 | ||
1221 | movi a3, 1 | ||
1222 | sll a3, a3 | ||
1223 | wsr a3, windowstart | ||
1224 | .Lnospill: | ||
1225 | |||
1226 | /* Advance PC, restore registers and SAR, and return from exception. */ | ||
1227 | |||
1228 | l32i a3, a2, PT_SAR | ||
1229 | l32i a0, a2, PT_AREG0 | ||
1230 | wsr a3, sar | ||
1231 | l32i a3, a2, PT_AREG3 | ||
1389 | 1232 | ||
1390 | rsr a0, ps | 1233 | /* Restore clobbered registers. */ |
1391 | _bbci.l a0, PS_UM_BIT, 1f | ||
1392 | 1234 | ||
1393 | /* User space: Setup a dummy frame and kill application. | 1235 | l32i a4, a2, PT_AREG4 |
1236 | l32i a7, a2, PT_AREG7 | ||
1237 | l32i a8, a2, PT_AREG8 | ||
1238 | l32i a11, a2, PT_AREG11 | ||
1239 | l32i a12, a2, PT_AREG12 | ||
1240 | l32i a15, a2, PT_AREG15 | ||
1241 | |||
1242 | movi a2, 0 | ||
1243 | rfe | ||
1244 | |||
1245 | .Linvalid_mask: | ||
1246 | |||
1247 | /* We get here because of an unrecoverable error in the window | ||
1248 | * registers, so set up a dummy frame and kill the user application. | ||
1394 | * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. | 1249 | * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. |
1395 | */ | 1250 | */ |
1396 | 1251 | ||
@@ -1414,14 +1269,136 @@ ENTRY(_spill_registers) | |||
1414 | movi a4, do_exit | 1269 | movi a4, do_exit |
1415 | callx4 a4 | 1270 | callx4 a4 |
1416 | 1271 | ||
1417 | 1: /* Kernel space: PANIC! */ | 1272 | /* shouldn't return, so panic */ |
1418 | 1273 | ||
1419 | wsr a0, excsave1 | 1274 | wsr a0, excsave1 |
1420 | movi a0, unrecoverable_exception | 1275 | movi a0, unrecoverable_exception |
1421 | callx0 a0 # should not return | 1276 | callx0 a0 # should not return |
1422 | 1: j 1b | 1277 | 1: j 1b |
1423 | 1278 | ||
1424 | ENDPROC(_spill_registers) | 1279 | |
1280 | ENDPROC(fast_syscall_spill_registers) | ||
1281 | |||
1282 | /* Fixup handler. | ||
1283 | * | ||
1284 | * We get here if the spill routine causes an exception, e.g. tlb miss. | ||
1285 | * We basically restore WINDOWBASE and WINDOWSTART to the condition when | ||
1286 | * we entered the spill routine and jump to the user exception handler. | ||
1287 | * | ||
1288 | * Note that we only need to restore the bits in windowstart that have not | ||
1289 | * been spilled yet by the _spill_register routine. Luckily, a3 contains a | ||
1290 | * rotated windowstart with only those bits set for frames that haven't been | ||
1291 | * spilled yet. Because a3 is rotated such that bit 0 represents the register | ||
1292 | * frame for the current windowbase - 1, we need to rotate a3 left by the | ||
1293 | * value of the current windowbase + 1 and move it to windowstart. | ||
1294 | * | ||
1295 | * a0: value of depc, original value in depc | ||
1296 | * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE | ||
1297 | * a3: exctable, original value in excsave1 | ||
1298 | */ | ||
1299 | |||
1300 | ENTRY(fast_syscall_spill_registers_fixup) | ||
1301 | |||
1302 | rsr a2, windowbase # get current windowbase (a2 is saved) | ||
1303 | xsr a0, depc # restore depc and a0 | ||
1304 | ssl a2 # set shift (32 - WB) | ||
1305 | |||
1306 | /* We need to make sure the current registers (a0-a3) are preserved. | ||
1307 | * To do this, we simply set the bit for the current window frame | ||
1308 | * in WS, so that the exception handlers save them to the task stack. | ||
1309 | * | ||
1310 | * Note: we use a3 to set the windowbase, so we take a special care | ||
1311 | * of it, saving it in the original _spill_registers frame across | ||
1312 | * the exception handler call. | ||
1313 | */ | ||
1314 | |||
1315 | xsr a3, excsave1 # get spill-mask | ||
1316 | slli a3, a3, 1 # shift left by one | ||
1317 | addi a3, a3, 1 # set the bit for the current window frame | ||
1318 | |||
1319 | slli a2, a3, 32-WSBITS | ||
1320 | src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy...... | ||
1321 | wsr a2, windowstart # set corrected windowstart | ||
1322 | |||
1323 | srli a3, a3, 1 | ||
1324 | rsr a2, excsave1 | ||
1325 | l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2 | ||
1326 | xsr a2, excsave1 | ||
1327 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3 | ||
1328 | l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task) | ||
1329 | xsr a2, excsave1 | ||
1330 | |||
1331 | /* Return to the original (user task) WINDOWBASE. | ||
1332 | * We leave the following frame behind: | ||
1333 | * a0, a1, a2 same | ||
1334 | * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE) | ||
1335 | * depc: depc (we have to return to that address) | ||
1336 | * excsave_1: exctable | ||
1337 | */ | ||
1338 | |||
1339 | wsr a3, windowbase | ||
1340 | rsync | ||
1341 | |||
1342 | /* We are now in the original frame when we entered _spill_registers: | ||
1343 | * a0: return address | ||
1344 | * a1: used, stack pointer | ||
1345 | * a2: kernel stack pointer | ||
1346 | * a3: available | ||
1347 | * depc: exception address | ||
1348 | * excsave: exctable | ||
1349 | * Note: This frame might be the same as above. | ||
1350 | */ | ||
1351 | |||
1352 | /* Setup stack pointer. */ | ||
1353 | |||
1354 | addi a2, a2, -PT_USER_SIZE | ||
1355 | s32i a0, a2, PT_AREG0 | ||
1356 | |||
1357 | /* Make sure we return to this fixup handler. */ | ||
1358 | |||
1359 | movi a3, fast_syscall_spill_registers_fixup_return | ||
1360 | s32i a3, a2, PT_DEPC # setup depc | ||
1361 | |||
1362 | /* Jump to the exception handler. */ | ||
1363 | |||
1364 | rsr a3, excsave1 | ||
1365 | rsr a0, exccause | ||
1366 | addx4 a0, a0, a3 # find entry in table | ||
1367 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | ||
1368 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | ||
1369 | jx a0 | ||
1370 | |||
1371 | ENDPROC(fast_syscall_spill_registers_fixup) | ||
1372 | |||
1373 | ENTRY(fast_syscall_spill_registers_fixup_return) | ||
1374 | |||
1375 | /* When we return here, all registers have been restored (a2: DEPC) */ | ||
1376 | |||
1377 | wsr a2, depc # exception address | ||
1378 | |||
1379 | /* Restore fixup handler. */ | ||
1380 | |||
1381 | rsr a2, excsave1 | ||
1382 | s32i a3, a2, EXC_TABLE_DOUBLE_SAVE | ||
1383 | movi a3, fast_syscall_spill_registers_fixup | ||
1384 | s32i a3, a2, EXC_TABLE_FIXUP | ||
1385 | rsr a3, windowbase | ||
1386 | s32i a3, a2, EXC_TABLE_PARAM | ||
1387 | l32i a2, a2, EXC_TABLE_KSTK | ||
1388 | |||
1389 | /* Load WB at the time the exception occurred. */ | ||
1390 | |||
1391 | rsr a3, sar # WB is still in SAR | ||
1392 | neg a3, a3 | ||
1393 | wsr a3, windowbase | ||
1394 | rsync | ||
1395 | |||
1396 | rsr a3, excsave1 | ||
1397 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | ||
1398 | |||
1399 | rfde | ||
1400 | |||
1401 | ENDPROC(fast_syscall_spill_registers_fixup_return) | ||
1425 | 1402 | ||
1426 | #ifdef CONFIG_MMU | 1403 | #ifdef CONFIG_MMU |
1427 | /* | 1404 | /* |
@@ -1794,6 +1771,43 @@ ENTRY(system_call) | |||
1794 | 1771 | ||
1795 | ENDPROC(system_call) | 1772 | ENDPROC(system_call) |
1796 | 1773 | ||
1774 | /* | ||
1775 | * Spill live registers on the kernel stack macro. | ||
1776 | * | ||
1777 | * Entry condition: ps.woe is set, ps.excm is cleared | ||
1778 | * Exit condition: windowstart has single bit set | ||
1779 | * May clobber: a12, a13 | ||
1780 | */ | ||
1781 | .macro spill_registers_kernel | ||
1782 | |||
1783 | #if XCHAL_NUM_AREGS > 16 | ||
1784 | call12 1f | ||
1785 | _j 2f | ||
1786 | retw | ||
1787 | .align 4 | ||
1788 | 1: | ||
1789 | _entry a1, 48 | ||
1790 | addi a12, a0, 3 | ||
1791 | #if XCHAL_NUM_AREGS > 32 | ||
1792 | .rept (XCHAL_NUM_AREGS - 32) / 12 | ||
1793 | _entry a1, 48 | ||
1794 | mov a12, a0 | ||
1795 | .endr | ||
1796 | #endif | ||
1797 | _entry a1, 48 | ||
1798 | #if XCHAL_NUM_AREGS % 12 == 0 | ||
1799 | mov a8, a8 | ||
1800 | #elif XCHAL_NUM_AREGS % 12 == 4 | ||
1801 | mov a12, a12 | ||
1802 | #elif XCHAL_NUM_AREGS % 12 == 8 | ||
1803 | mov a4, a4 | ||
1804 | #endif | ||
1805 | retw | ||
1806 | 2: | ||
1807 | #else | ||
1808 | mov a12, a12 | ||
1809 | #endif | ||
1810 | .endm | ||
1797 | 1811 | ||
1798 | /* | 1812 | /* |
1799 | * Task switch. | 1813 | * Task switch. |
@@ -1806,21 +1820,20 @@ ENTRY(_switch_to) | |||
1806 | 1820 | ||
1807 | entry a1, 16 | 1821 | entry a1, 16 |
1808 | 1822 | ||
1809 | mov a12, a2 # preserve 'prev' (a2) | 1823 | mov a10, a2 # preserve 'prev' (a2) |
1810 | mov a13, a3 # and 'next' (a3) | 1824 | mov a11, a3 # and 'next' (a3) |
1811 | 1825 | ||
1812 | l32i a4, a2, TASK_THREAD_INFO | 1826 | l32i a4, a2, TASK_THREAD_INFO |
1813 | l32i a5, a3, TASK_THREAD_INFO | 1827 | l32i a5, a3, TASK_THREAD_INFO |
1814 | 1828 | ||
1815 | save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | 1829 | save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER |
1816 | 1830 | ||
1817 | s32i a0, a12, THREAD_RA # save return address | 1831 | s32i a0, a10, THREAD_RA # save return address |
1818 | s32i a1, a12, THREAD_SP # save stack pointer | 1832 | s32i a1, a10, THREAD_SP # save stack pointer |
1819 | 1833 | ||
1820 | /* Disable ints while we manipulate the stack pointer. */ | 1834 | /* Disable ints while we manipulate the stack pointer. */ |
1821 | 1835 | ||
1822 | movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL | 1836 | rsil a14, LOCKLEVEL |
1823 | xsr a14, ps | ||
1824 | rsr a3, excsave1 | 1837 | rsr a3, excsave1 |
1825 | rsync | 1838 | rsync |
1826 | s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ | 1839 | s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ |
@@ -1835,7 +1848,7 @@ ENTRY(_switch_to) | |||
1835 | 1848 | ||
1836 | /* Flush register file. */ | 1849 | /* Flush register file. */ |
1837 | 1850 | ||
1838 | call0 _spill_registers # destroys a3, a4, and SAR | 1851 | spill_registers_kernel |
1839 | 1852 | ||
1840 | /* Set kernel stack (and leave critical section) | 1853 | /* Set kernel stack (and leave critical section) |
1841 | * Note: It's save to set it here. The stack will not be overwritten | 1854 | * Note: It's save to set it here. The stack will not be overwritten |
@@ -1851,13 +1864,13 @@ ENTRY(_switch_to) | |||
1851 | 1864 | ||
1852 | /* restore context of the task 'next' */ | 1865 | /* restore context of the task 'next' */ |
1853 | 1866 | ||
1854 | l32i a0, a13, THREAD_RA # restore return address | 1867 | l32i a0, a11, THREAD_RA # restore return address |
1855 | l32i a1, a13, THREAD_SP # restore stack pointer | 1868 | l32i a1, a11, THREAD_SP # restore stack pointer |
1856 | 1869 | ||
1857 | load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER | 1870 | load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER |
1858 | 1871 | ||
1859 | wsr a14, ps | 1872 | wsr a14, ps |
1860 | mov a2, a12 # return 'prev' | 1873 | mov a2, a10 # return 'prev' |
1861 | rsync | 1874 | rsync |
1862 | 1875 | ||
1863 | retw | 1876 | retw |
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 7d12af1317f1..84fe931bb60e 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/bootmem.h> | 22 | #include <linux/bootmem.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
25 | #include <linux/clk-provider.h> | ||
25 | #include <linux/cpu.h> | 26 | #include <linux/cpu.h> |
26 | #include <linux/of_fdt.h> | 27 | #include <linux/of_fdt.h> |
27 | #include <linux/of_platform.h> | 28 | #include <linux/of_platform.h> |
@@ -276,6 +277,7 @@ void __init early_init_devtree(void *params) | |||
276 | 277 | ||
277 | static int __init xtensa_device_probe(void) | 278 | static int __init xtensa_device_probe(void) |
278 | { | 279 | { |
280 | of_clk_init(NULL); | ||
279 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); | 281 | of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); |
280 | return 0; | 282 | return 0; |
281 | } | 283 | } |
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 08b769d3b3a1..2a1823de69cc 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/platform.h> | 30 | #include <asm/platform.h> |
31 | 31 | ||
32 | unsigned long ccount_freq; /* ccount Hz */ | 32 | unsigned long ccount_freq; /* ccount Hz */ |
33 | EXPORT_SYMBOL(ccount_freq); | ||
33 | 34 | ||
34 | static cycle_t ccount_read(struct clocksource *cs) | 35 | static cycle_t ccount_read(struct clocksource *cs) |
35 | { | 36 | { |
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index cb8fd44caabc..f9e1ec346e35 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S | |||
@@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector) | |||
235 | 235 | ||
236 | /* Check for overflow/underflow exception, jump if overflow. */ | 236 | /* Check for overflow/underflow exception, jump if overflow. */ |
237 | 237 | ||
238 | _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow | 238 | bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * Restart window underflow exception. | 241 | * Restart window underflow exception. |
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index 74a60c7e085e..80b33ed51f31 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw); | |||
122 | EXPORT_SYMBOL(insl); | 122 | EXPORT_SYMBOL(insl); |
123 | 123 | ||
124 | extern long common_exception_return; | 124 | extern long common_exception_return; |
125 | extern long _spill_registers; | ||
126 | EXPORT_SYMBOL(common_exception_return); | 125 | EXPORT_SYMBOL(common_exception_return); |
127 | EXPORT_SYMBOL(_spill_registers); | ||
128 | 126 | ||
129 | #ifdef CONFIG_FUNCTION_TRACER | 127 | #ifdef CONFIG_FUNCTION_TRACER |
130 | EXPORT_SYMBOL(_mcount); | 128 | EXPORT_SYMBOL(_mcount); |
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 479d7537a32a..aff108df92d3 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c | |||
@@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist) | |||
90 | 90 | ||
91 | 91 | ||
92 | /* | 92 | /* |
93 | * Initialize the bootmem system and give it all the memory we have available. | 93 | * Initialize the bootmem system and give it all low memory we have available. |
94 | */ | 94 | */ |
95 | 95 | ||
96 | void __init bootmem_init(void) | 96 | void __init bootmem_init(void) |
@@ -142,9 +142,14 @@ void __init bootmem_init(void) | |||
142 | 142 | ||
143 | /* Add all remaining memory pieces into the bootmem map */ | 143 | /* Add all remaining memory pieces into the bootmem map */ |
144 | 144 | ||
145 | for (i=0; i<sysmem.nr_banks; i++) | 145 | for (i = 0; i < sysmem.nr_banks; i++) { |
146 | free_bootmem(sysmem.bank[i].start, | 146 | if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) { |
147 | sysmem.bank[i].end - sysmem.bank[i].start); | 147 | unsigned long end = min(max_low_pfn << PAGE_SHIFT, |
148 | sysmem.bank[i].end); | ||
149 | free_bootmem(sysmem.bank[i].start, | ||
150 | end - sysmem.bank[i].start); | ||
151 | } | ||
152 | } | ||
148 | 153 | ||
149 | } | 154 | } |
150 | 155 | ||
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 36ec171698b8..861203e958da 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c | |||
@@ -39,7 +39,7 @@ void init_mmu(void) | |||
39 | set_itlbcfg_register(0); | 39 | set_itlbcfg_register(0); |
40 | set_dtlbcfg_register(0); | 40 | set_dtlbcfg_register(0); |
41 | #endif | 41 | #endif |
42 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF | 42 | #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF) |
43 | /* | 43 | /* |
44 | * Update the IO area mapping in case xtensa_kio_paddr has changed | 44 | * Update the IO area mapping in case xtensa_kio_paddr has changed |
45 | */ | 45 | */ |
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c index 800227862fe8..57fd08b36f51 100644 --- a/arch/xtensa/platforms/xtfpga/setup.c +++ b/arch/xtensa/platforms/xtfpga/setup.c | |||
@@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node) | |||
135 | 135 | ||
136 | static int __init machine_setup(void) | 136 | static int __init machine_setup(void) |
137 | { | 137 | { |
138 | struct device_node *serial; | 138 | struct device_node *clock; |
139 | struct device_node *eth = NULL; | 139 | struct device_node *eth = NULL; |
140 | 140 | ||
141 | for_each_compatible_node(serial, NULL, "ns16550a") | 141 | for_each_node_by_name(clock, "main-oscillator") |
142 | update_clock_frequency(serial); | 142 | update_clock_frequency(clock); |
143 | 143 | ||
144 | if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) | 144 | if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc"))) |
145 | update_local_mac(eth); | 145 | update_local_mac(eth); |
@@ -290,6 +290,7 @@ static int __init xtavnet_init(void) | |||
290 | * knows whether they set it correctly on the DIP switches. | 290 | * knows whether they set it correctly on the DIP switches. |
291 | */ | 291 | */ |
292 | pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); | 292 | pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr); |
293 | ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR; | ||
293 | 294 | ||
294 | return 0; | 295 | return 0; |
295 | } | 296 | } |
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h index bf4020116df5..244cdea4dee5 100644 --- a/arch/xtensa/variants/fsf/include/variant/tie.h +++ b/arch/xtensa/variants/fsf/include/variant/tie.h | |||
@@ -18,13 +18,6 @@ | |||
18 | #define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ | 18 | #define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */ |
19 | #define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ | 19 | #define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */ |
20 | 20 | ||
21 | /* Basic parameters of each coprocessor: */ | ||
22 | #define XCHAL_CP7_NAME "XTIOP" | ||
23 | #define XCHAL_CP7_IDENT XTIOP | ||
24 | #define XCHAL_CP7_SA_SIZE 0 /* size of state save area */ | ||
25 | #define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */ | ||
26 | #define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */ | ||
27 | |||
28 | /* Filler info for unassigned coprocessors, to simplify arrays etc: */ | 21 | /* Filler info for unassigned coprocessors, to simplify arrays etc: */ |
29 | #define XCHAL_NCP_SA_SIZE 0 | 22 | #define XCHAL_NCP_SA_SIZE 0 |
30 | #define XCHAL_NCP_SA_ALIGN 1 | 23 | #define XCHAL_NCP_SA_ALIGN 1 |
@@ -42,6 +35,8 @@ | |||
42 | #define XCHAL_CP5_SA_ALIGN 1 | 35 | #define XCHAL_CP5_SA_ALIGN 1 |
43 | #define XCHAL_CP6_SA_SIZE 0 | 36 | #define XCHAL_CP6_SA_SIZE 0 |
44 | #define XCHAL_CP6_SA_ALIGN 1 | 37 | #define XCHAL_CP6_SA_ALIGN 1 |
38 | #define XCHAL_CP7_SA_SIZE 0 | ||
39 | #define XCHAL_CP7_SA_ALIGN 1 | ||
45 | 40 | ||
46 | /* Save area for non-coprocessor optional and custom (TIE) state: */ | 41 | /* Save area for non-coprocessor optional and custom (TIE) state: */ |
47 | #define XCHAL_NCP_SA_SIZE 0 | 42 | #define XCHAL_NCP_SA_SIZE 0 |