diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2014-05-28 13:00:14 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2014-05-29 09:08:23 -0400 |
commit | 2e2d663d2dd64ffe9855be0b35aa221c9b8139f2 (patch) | |
tree | 508667aa6fbab564e7875d3953671265d0176e69 | |
parent | 5ec79bf919ddb53fd98893b7217897c839aa19cc (diff) | |
parent | 322014531e1fac4674b8eef67e4f80aca1e9f003 (diff) |
Merge branch 'wip-mips-pm' of https://github.com/paulburton/linux into mips-for-linux-next
41 files changed, 2257 insertions, 218 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index feb96cbb3940..2fe8e60a9ea7 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -50,6 +50,8 @@ config MIPS | |||
50 | select CLONE_BACKWARDS | 50 | select CLONE_BACKWARDS |
51 | select HAVE_DEBUG_STACKOVERFLOW | 51 | select HAVE_DEBUG_STACKOVERFLOW |
52 | select HAVE_CC_STACKPROTECTOR | 52 | select HAVE_CC_STACKPROTECTOR |
53 | select CPU_PM if CPU_IDLE | ||
54 | select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST | ||
53 | 55 | ||
54 | menu "Machine selection" | 56 | menu "Machine selection" |
55 | 57 | ||
@@ -2012,9 +2014,11 @@ config MIPS_CPS | |||
2012 | depends on SYS_SUPPORTS_MIPS_CPS | 2014 | depends on SYS_SUPPORTS_MIPS_CPS |
2013 | select MIPS_CM | 2015 | select MIPS_CM |
2014 | select MIPS_CPC | 2016 | select MIPS_CPC |
2017 | select MIPS_CPS_PM if HOTPLUG_CPU | ||
2015 | select MIPS_GIC_IPI | 2018 | select MIPS_GIC_IPI |
2016 | select SMP | 2019 | select SMP |
2017 | select SYNC_R4K if (CEVT_R4K || CSRC_R4K) | 2020 | select SYNC_R4K if (CEVT_R4K || CSRC_R4K) |
2021 | select SYS_SUPPORTS_HOTPLUG_CPU | ||
2018 | select SYS_SUPPORTS_SMP | 2022 | select SYS_SUPPORTS_SMP |
2019 | select WEAK_ORDERING | 2023 | select WEAK_ORDERING |
2020 | help | 2024 | help |
@@ -2024,6 +2028,9 @@ config MIPS_CPS | |||
2024 | no external assistance. It is safe to enable this when hardware | 2028 | no external assistance. It is safe to enable this when hardware |
2025 | support is unavailable. | 2029 | support is unavailable. |
2026 | 2030 | ||
2031 | config MIPS_CPS_PM | ||
2032 | bool | ||
2033 | |||
2027 | config MIPS_GIC_IPI | 2034 | config MIPS_GIC_IPI |
2028 | bool | 2035 | bool |
2029 | 2036 | ||
@@ -2633,12 +2640,16 @@ endmenu | |||
2633 | config MIPS_EXTERNAL_TIMER | 2640 | config MIPS_EXTERNAL_TIMER |
2634 | bool | 2641 | bool |
2635 | 2642 | ||
2636 | if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER | ||
2637 | menu "CPU Power Management" | 2643 | menu "CPU Power Management" |
2644 | |||
2645 | if CPU_SUPPORTS_CPUFREQ && MIPS_EXTERNAL_TIMER | ||
2638 | source "drivers/cpufreq/Kconfig" | 2646 | source "drivers/cpufreq/Kconfig" |
2639 | endmenu | ||
2640 | endif | 2647 | endif |
2641 | 2648 | ||
2649 | source "drivers/cpuidle/Kconfig" | ||
2650 | |||
2651 | endmenu | ||
2652 | |||
2642 | source "net/Kconfig" | 2653 | source "net/Kconfig" |
2643 | 2654 | ||
2644 | source "drivers/Kconfig" | 2655 | source "drivers/Kconfig" |
diff --git a/arch/mips/configs/maltasmvp_defconfig b/arch/mips/configs/maltasmvp_defconfig index 10ef3bed5f43..f8a32315bb38 100644 --- a/arch/mips/configs/maltasmvp_defconfig +++ b/arch/mips/configs/maltasmvp_defconfig | |||
@@ -4,10 +4,9 @@ CONFIG_CPU_MIPS32_R2=y | |||
4 | CONFIG_PAGE_SIZE_16KB=y | 4 | CONFIG_PAGE_SIZE_16KB=y |
5 | CONFIG_MIPS_MT_SMP=y | 5 | CONFIG_MIPS_MT_SMP=y |
6 | CONFIG_SCHED_SMT=y | 6 | CONFIG_SCHED_SMT=y |
7 | CONFIG_MIPS_CMP=y | 7 | CONFIG_MIPS_CPS=y |
8 | CONFIG_NR_CPUS=8 | 8 | CONFIG_NR_CPUS=8 |
9 | CONFIG_HZ_100=y | 9 | CONFIG_HZ_100=y |
10 | CONFIG_LOCALVERSION="cmp" | ||
11 | CONFIG_SYSVIPC=y | 10 | CONFIG_SYSVIPC=y |
12 | CONFIG_POSIX_MQUEUE=y | 11 | CONFIG_POSIX_MQUEUE=y |
13 | CONFIG_AUDIT=y | 12 | CONFIG_AUDIT=y |
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig index 2d3002cba102..c83338a39917 100644 --- a/arch/mips/configs/maltasmvp_eva_defconfig +++ b/arch/mips/configs/maltasmvp_eva_defconfig | |||
@@ -5,10 +5,9 @@ CONFIG_CPU_MIPS32_3_5_FEATURES=y | |||
5 | CONFIG_PAGE_SIZE_16KB=y | 5 | CONFIG_PAGE_SIZE_16KB=y |
6 | CONFIG_MIPS_MT_SMP=y | 6 | CONFIG_MIPS_MT_SMP=y |
7 | CONFIG_SCHED_SMT=y | 7 | CONFIG_SCHED_SMT=y |
8 | CONFIG_MIPS_CMP=y | 8 | CONFIG_MIPS_CPS=y |
9 | CONFIG_NR_CPUS=8 | 9 | CONFIG_NR_CPUS=8 |
10 | CONFIG_HZ_100=y | 10 | CONFIG_HZ_100=y |
11 | CONFIG_LOCALVERSION="cmp" | ||
12 | CONFIG_SYSVIPC=y | 11 | CONFIG_SYSVIPC=y |
13 | CONFIG_POSIX_MQUEUE=y | 12 | CONFIG_POSIX_MQUEUE=y |
14 | CONFIG_AUDIT=y | 13 | CONFIG_AUDIT=y |
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h index 69468ded2828..e08381a37f8b 100644 --- a/arch/mips/include/asm/cacheflush.h +++ b/arch/mips/include/asm/cacheflush.h | |||
@@ -113,6 +113,12 @@ unsigned long run_uncached(void *func); | |||
113 | 113 | ||
114 | extern void *kmap_coherent(struct page *page, unsigned long addr); | 114 | extern void *kmap_coherent(struct page *page, unsigned long addr); |
115 | extern void kunmap_coherent(void); | 115 | extern void kunmap_coherent(void); |
116 | extern void *kmap_noncoherent(struct page *page, unsigned long addr); | ||
117 | |||
118 | static inline void kunmap_noncoherent(void) | ||
119 | { | ||
120 | kunmap_coherent(); | ||
121 | } | ||
116 | 122 | ||
117 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | 123 | #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE |
118 | static inline void flush_kernel_dcache_page(struct page *page) | 124 | static inline void flush_kernel_dcache_page(struct page *page) |
diff --git a/arch/mips/include/asm/gic.h b/arch/mips/include/asm/gic.h index 082716690589..10f6a99f92c2 100644 --- a/arch/mips/include/asm/gic.h +++ b/arch/mips/include/asm/gic.h | |||
@@ -380,6 +380,7 @@ extern unsigned int gic_compare_int (void); | |||
380 | extern cycle_t gic_read_count(void); | 380 | extern cycle_t gic_read_count(void); |
381 | extern cycle_t gic_read_compare(void); | 381 | extern cycle_t gic_read_compare(void); |
382 | extern void gic_write_compare(cycle_t cnt); | 382 | extern void gic_write_compare(cycle_t cnt); |
383 | extern void gic_write_cpu_compare(cycle_t cnt, int cpu); | ||
383 | extern void gic_send_ipi(unsigned int intr); | 384 | extern void gic_send_ipi(unsigned int intr); |
384 | extern unsigned int plat_ipi_call_int_xlate(unsigned int); | 385 | extern unsigned int plat_ipi_call_int_xlate(unsigned int); |
385 | extern unsigned int plat_ipi_resched_int_xlate(unsigned int); | 386 | extern unsigned int plat_ipi_resched_int_xlate(unsigned int); |
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h index d192158886b1..d9f932de80e9 100644 --- a/arch/mips/include/asm/idle.h +++ b/arch/mips/include/asm/idle.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef __ASM_IDLE_H | 1 | #ifndef __ASM_IDLE_H |
2 | #define __ASM_IDLE_H | 2 | #define __ASM_IDLE_H |
3 | 3 | ||
4 | #include <linux/cpuidle.h> | ||
4 | #include <linux/linkage.h> | 5 | #include <linux/linkage.h> |
5 | 6 | ||
6 | extern void (*cpu_wait)(void); | 7 | extern void (*cpu_wait)(void); |
@@ -20,4 +21,17 @@ static inline int address_is_in_r4k_wait_irqoff(unsigned long addr) | |||
20 | addr < (unsigned long)__pastwait; | 21 | addr < (unsigned long)__pastwait; |
21 | } | 22 | } |
22 | 23 | ||
24 | extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, | ||
25 | struct cpuidle_driver *drv, int index); | ||
26 | |||
27 | #define MIPS_CPUIDLE_WAIT_STATE {\ | ||
28 | .enter = mips_cpuidle_wait_enter,\ | ||
29 | .exit_latency = 1,\ | ||
30 | .target_residency = 1,\ | ||
31 | .power_usage = UINT_MAX,\ | ||
32 | .flags = CPUIDLE_FLAG_TIME_VALID,\ | ||
33 | .name = "wait",\ | ||
34 | .desc = "MIPS wait",\ | ||
35 | } | ||
36 | |||
23 | #endif /* __ASM_IDLE_H */ | 37 | #endif /* __ASM_IDLE_H */ |
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h index 988507e46d42..e139a534e0fd 100644 --- a/arch/mips/include/asm/mips-cpc.h +++ b/arch/mips/include/asm/mips-cpc.h | |||
@@ -72,7 +72,12 @@ static inline bool mips_cpc_present(void) | |||
72 | #define MIPS_CPC_COCB_OFS 0x4000 | 72 | #define MIPS_CPC_COCB_OFS 0x4000 |
73 | 73 | ||
74 | /* Macros to ease the creation of register access functions */ | 74 | /* Macros to ease the creation of register access functions */ |
75 | #define BUILD_CPC_R_(name, off) \ | 75 | #define BUILD_CPC_R_(name, off) \ |
76 | static inline u32 *addr_cpc_##name(void) \ | ||
77 | { \ | ||
78 | return (u32 *)(mips_cpc_base + (off)); \ | ||
79 | } \ | ||
80 | \ | ||
76 | static inline u32 read_cpc_##name(void) \ | 81 | static inline u32 read_cpc_##name(void) \ |
77 | { \ | 82 | { \ |
78 | return __raw_readl(mips_cpc_base + (off)); \ | 83 | return __raw_readl(mips_cpc_base + (off)); \ |
@@ -147,4 +152,31 @@ BUILD_CPC_Cx_RW(other, 0x10) | |||
147 | #define CPC_Cx_OTHER_CORENUM_SHF 16 | 152 | #define CPC_Cx_OTHER_CORENUM_SHF 16 |
148 | #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) | 153 | #define CPC_Cx_OTHER_CORENUM_MSK (_ULCAST_(0xff) << 16) |
149 | 154 | ||
155 | #ifdef CONFIG_MIPS_CPC | ||
156 | |||
157 | /** | ||
158 | * mips_cpc_lock_other - lock access to another core | ||
159 | * core: the other core to be accessed | ||
160 | * | ||
161 | * Call before operating upon a core via the 'other' register region in | ||
162 | * order to prevent the region being moved during access. Must be followed | ||
163 | * by a call to mips_cpc_unlock_other. | ||
164 | */ | ||
165 | extern void mips_cpc_lock_other(unsigned int core); | ||
166 | |||
167 | /** | ||
168 | * mips_cpc_unlock_other - unlock access to another core | ||
169 | * | ||
170 | * Call after operating upon another core via the 'other' register region. | ||
171 | * Must be called after mips_cpc_lock_other. | ||
172 | */ | ||
173 | extern void mips_cpc_unlock_other(void); | ||
174 | |||
175 | #else /* !CONFIG_MIPS_CPC */ | ||
176 | |||
177 | static inline void mips_cpc_lock_other(unsigned int core) { } | ||
178 | static inline void mips_cpc_unlock_other(void) { } | ||
179 | |||
180 | #endif /* !CONFIG_MIPS_CPC */ | ||
181 | |||
150 | #endif /* __MIPS_ASM_MIPS_CPC_H__ */ | 182 | #endif /* __MIPS_ASM_MIPS_CPC_H__ */ |
diff --git a/arch/mips/include/asm/mipsmtregs.h b/arch/mips/include/asm/mipsmtregs.h index 6efa79a27b6a..5f8052ce43bf 100644 --- a/arch/mips/include/asm/mipsmtregs.h +++ b/arch/mips/include/asm/mipsmtregs.h | |||
@@ -36,6 +36,8 @@ | |||
36 | 36 | ||
37 | #define read_c0_tcbind() __read_32bit_c0_register($2, 2) | 37 | #define read_c0_tcbind() __read_32bit_c0_register($2, 2) |
38 | 38 | ||
39 | #define write_c0_tchalt(val) __write_32bit_c0_register($2, 4, val) | ||
40 | |||
39 | #define read_c0_tccontext() __read_32bit_c0_register($2, 5) | 41 | #define read_c0_tccontext() __read_32bit_c0_register($2, 5) |
40 | #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) | 42 | #define write_c0_tccontext(val) __write_32bit_c0_register($2, 5, val) |
41 | 43 | ||
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h index 0f75aaca201b..2e373da5f8e9 100644 --- a/arch/mips/include/asm/mmu_context.h +++ b/arch/mips/include/asm/mmu_context.h | |||
@@ -27,11 +27,15 @@ do { \ | |||
27 | } while (0) | 27 | } while (0) |
28 | 28 | ||
29 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT | 29 | #ifdef CONFIG_MIPS_PGD_C0_CONTEXT |
30 | |||
31 | #define TLBMISS_HANDLER_RESTORE() \ | ||
32 | write_c0_xcontext((unsigned long) smp_processor_id() << \ | ||
33 | SMP_CPUID_REGSHIFT) | ||
34 | |||
30 | #define TLBMISS_HANDLER_SETUP() \ | 35 | #define TLBMISS_HANDLER_SETUP() \ |
31 | do { \ | 36 | do { \ |
32 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ | 37 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir); \ |
33 | write_c0_xcontext((unsigned long) smp_processor_id() << \ | 38 | TLBMISS_HANDLER_RESTORE(); \ |
34 | SMP_CPUID_REGSHIFT); \ | ||
35 | } while (0) | 39 | } while (0) |
36 | 40 | ||
37 | #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ | 41 | #else /* !CONFIG_MIPS_PGD_C0_CONTEXT: using pgd_current*/ |
@@ -43,9 +47,12 @@ do { \ | |||
43 | */ | 47 | */ |
44 | extern unsigned long pgd_current[]; | 48 | extern unsigned long pgd_current[]; |
45 | 49 | ||
46 | #define TLBMISS_HANDLER_SETUP() \ | 50 | #define TLBMISS_HANDLER_RESTORE() \ |
47 | write_c0_context((unsigned long) smp_processor_id() << \ | 51 | write_c0_context((unsigned long) smp_processor_id() << \ |
48 | SMP_CPUID_REGSHIFT); \ | 52 | SMP_CPUID_REGSHIFT) |
53 | |||
54 | #define TLBMISS_HANDLER_SETUP() \ | ||
55 | TLBMISS_HANDLER_RESTORE(); \ | ||
49 | back_to_back_c0_hazard(); \ | 56 | back_to_back_c0_hazard(); \ |
50 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) | 57 | TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) |
51 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ | 58 | #endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/ |
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h index 008324d1c261..539ddd148bbb 100644 --- a/arch/mips/include/asm/pgtable.h +++ b/arch/mips/include/asm/pgtable.h | |||
@@ -32,6 +32,8 @@ struct vm_area_struct; | |||
32 | _page_cachable_default) | 32 | _page_cachable_default) |
33 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | 33 | #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ |
34 | _PAGE_GLOBAL | _page_cachable_default) | 34 | _PAGE_GLOBAL | _page_cachable_default) |
35 | #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \ | ||
36 | _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT) | ||
35 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ | 37 | #define PAGE_USERIO __pgprot(_PAGE_PRESENT | (cpu_has_rixi ? 0 : _PAGE_READ) | _PAGE_WRITE | \ |
36 | _page_cachable_default) | 38 | _page_cachable_default) |
37 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ | 39 | #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \ |
diff --git a/arch/mips/include/asm/pm-cps.h b/arch/mips/include/asm/pm-cps.h new file mode 100644 index 000000000000..625eda53d571 --- /dev/null +++ b/arch/mips/include/asm/pm-cps.h | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Imagination Technologies | ||
3 | * Author: Paul Burton <paul.burton@imgtec.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2 of the License, or (at your | ||
8 | * option) any later version. | ||
9 | */ | ||
10 | |||
11 | #ifndef __MIPS_ASM_PM_CPS_H__ | ||
12 | #define __MIPS_ASM_PM_CPS_H__ | ||
13 | |||
14 | /* | ||
15 | * The CM & CPC can only handle coherence & power control on a per-core basis, | ||
16 | * thus in an MT system the VPEs within each core are coupled and can only | ||
17 | * enter or exit states requiring CM or CPC assistance in unison. | ||
18 | */ | ||
19 | #ifdef CONFIG_MIPS_MT | ||
20 | # define coupled_coherence cpu_has_mipsmt | ||
21 | #else | ||
22 | # define coupled_coherence 0 | ||
23 | #endif | ||
24 | |||
25 | /* Enumeration of possible PM states */ | ||
26 | enum cps_pm_state { | ||
27 | CPS_PM_NC_WAIT, /* MIPS wait instruction, non-coherent */ | ||
28 | CPS_PM_CLOCK_GATED, /* Core clock gated */ | ||
29 | CPS_PM_POWER_GATED, /* Core power gated */ | ||
30 | CPS_PM_STATE_COUNT, | ||
31 | }; | ||
32 | |||
33 | /** | ||
34 | * cps_pm_support_state - determine whether the system supports a PM state | ||
35 | * @state: the state to test for support | ||
36 | * | ||
37 | * Returns true if the system supports the given state, otherwise false. | ||
38 | */ | ||
39 | extern bool cps_pm_support_state(enum cps_pm_state state); | ||
40 | |||
41 | /** | ||
42 | * cps_pm_enter_state - enter a PM state | ||
43 | * @state: the state to enter | ||
44 | * | ||
45 | * Enter the given PM state. If coupled_coherence is non-zero then it is | ||
46 | * expected that this function be called at approximately the same time on | ||
47 | * each coupled CPU. Returns 0 on successful entry & exit, otherwise -errno. | ||
48 | */ | ||
49 | extern int cps_pm_enter_state(enum cps_pm_state state); | ||
50 | |||
51 | #endif /* __MIPS_ASM_PM_CPS_H__ */ | ||
diff --git a/arch/mips/include/asm/pm.h b/arch/mips/include/asm/pm.h new file mode 100644 index 000000000000..7c03469e043f --- /dev/null +++ b/arch/mips/include/asm/pm.h | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Imagination Technologies Ltd | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the | ||
6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
7 | * option) any later version. | ||
8 | * | ||
9 | * PM helper macros for CPU power off (e.g. Suspend-to-RAM). | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_PM_H | ||
13 | #define __ASM_PM_H | ||
14 | |||
15 | #ifdef __ASSEMBLY__ | ||
16 | |||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/asm.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | #include <asm/regdef.h> | ||
21 | |||
22 | /* Save CPU state to stack for suspend to RAM */ | ||
23 | .macro SUSPEND_SAVE_REGS | ||
24 | subu sp, PT_SIZE | ||
25 | /* Call preserved GPRs */ | ||
26 | LONG_S $16, PT_R16(sp) | ||
27 | LONG_S $17, PT_R17(sp) | ||
28 | LONG_S $18, PT_R18(sp) | ||
29 | LONG_S $19, PT_R19(sp) | ||
30 | LONG_S $20, PT_R20(sp) | ||
31 | LONG_S $21, PT_R21(sp) | ||
32 | LONG_S $22, PT_R22(sp) | ||
33 | LONG_S $23, PT_R23(sp) | ||
34 | LONG_S $28, PT_R28(sp) | ||
35 | LONG_S $30, PT_R30(sp) | ||
36 | LONG_S $31, PT_R31(sp) | ||
37 | /* A couple of CP0 registers with space in pt_regs */ | ||
38 | mfc0 k0, CP0_STATUS | ||
39 | LONG_S k0, PT_STATUS(sp) | ||
40 | .endm | ||
41 | |||
42 | /* Restore CPU state from stack after resume from RAM */ | ||
43 | .macro RESUME_RESTORE_REGS_RETURN | ||
44 | .set push | ||
45 | .set noreorder | ||
46 | /* A couple of CP0 registers with space in pt_regs */ | ||
47 | LONG_L k0, PT_STATUS(sp) | ||
48 | mtc0 k0, CP0_STATUS | ||
49 | /* Call preserved GPRs */ | ||
50 | LONG_L $16, PT_R16(sp) | ||
51 | LONG_L $17, PT_R17(sp) | ||
52 | LONG_L $18, PT_R18(sp) | ||
53 | LONG_L $19, PT_R19(sp) | ||
54 | LONG_L $20, PT_R20(sp) | ||
55 | LONG_L $21, PT_R21(sp) | ||
56 | LONG_L $22, PT_R22(sp) | ||
57 | LONG_L $23, PT_R23(sp) | ||
58 | LONG_L $28, PT_R28(sp) | ||
59 | LONG_L $30, PT_R30(sp) | ||
60 | LONG_L $31, PT_R31(sp) | ||
61 | /* Pop and return */ | ||
62 | jr ra | ||
63 | addiu sp, PT_SIZE | ||
64 | .set pop | ||
65 | .endm | ||
66 | |||
67 | /* Get address of static suspend state into t1 */ | ||
68 | .macro LA_STATIC_SUSPEND | ||
69 | la t1, mips_static_suspend_state | ||
70 | .endm | ||
71 | |||
72 | /* Save important CPU state for early restoration to global data */ | ||
73 | .macro SUSPEND_SAVE_STATIC | ||
74 | #ifdef CONFIG_EVA | ||
75 | /* | ||
76 | * Segment configuration is saved in global data where it can be easily | ||
77 | * reloaded without depending on the segment configuration. | ||
78 | */ | ||
79 | mfc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ | ||
80 | LONG_S k0, SSS_SEGCTL0(t1) | ||
81 | mfc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ | ||
82 | LONG_S k0, SSS_SEGCTL1(t1) | ||
83 | mfc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ | ||
84 | LONG_S k0, SSS_SEGCTL2(t1) | ||
85 | #endif | ||
86 | /* save stack pointer (pointing to GPRs) */ | ||
87 | LONG_S sp, SSS_SP(t1) | ||
88 | .endm | ||
89 | |||
90 | /* Restore important CPU state early from global data */ | ||
91 | .macro RESUME_RESTORE_STATIC | ||
92 | #ifdef CONFIG_EVA | ||
93 | /* | ||
94 | * Segment configuration must be restored prior to any access to | ||
95 | * allocated memory, as it may reside outside of the legacy kernel | ||
96 | * segments. | ||
97 | */ | ||
98 | LONG_L k0, SSS_SEGCTL0(t1) | ||
99 | mtc0 k0, CP0_PAGEMASK, 2 /* SegCtl0 */ | ||
100 | LONG_L k0, SSS_SEGCTL1(t1) | ||
101 | mtc0 k0, CP0_PAGEMASK, 3 /* SegCtl1 */ | ||
102 | LONG_L k0, SSS_SEGCTL2(t1) | ||
103 | mtc0 k0, CP0_PAGEMASK, 4 /* SegCtl2 */ | ||
104 | tlbw_use_hazard | ||
105 | #endif | ||
106 | /* restore stack pointer (pointing to GPRs) */ | ||
107 | LONG_L sp, SSS_SP(t1) | ||
108 | .endm | ||
109 | |||
110 | /* flush caches to make sure context has reached memory */ | ||
111 | .macro SUSPEND_CACHE_FLUSH | ||
112 | .extern __wback_cache_all | ||
113 | .set push | ||
114 | .set noreorder | ||
115 | la t1, __wback_cache_all | ||
116 | LONG_L t0, 0(t1) | ||
117 | jalr t0 | ||
118 | nop | ||
119 | .set pop | ||
120 | .endm | ||
121 | |||
122 | /* Save suspend state and flush data caches to RAM */ | ||
123 | .macro SUSPEND_SAVE | ||
124 | SUSPEND_SAVE_REGS | ||
125 | LA_STATIC_SUSPEND | ||
126 | SUSPEND_SAVE_STATIC | ||
127 | SUSPEND_CACHE_FLUSH | ||
128 | .endm | ||
129 | |||
130 | /* Restore saved state after resume from RAM and return */ | ||
131 | .macro RESUME_RESTORE_RETURN | ||
132 | LA_STATIC_SUSPEND | ||
133 | RESUME_RESTORE_STATIC | ||
134 | RESUME_RESTORE_REGS_RETURN | ||
135 | .endm | ||
136 | |||
137 | #else /* __ASSEMBLY__ */ | ||
138 | |||
139 | /** | ||
140 | * struct mips_static_suspend_state - Core saved CPU state across S2R. | ||
141 | * @segctl: CP0 Segment control registers. | ||
142 | * @sp: Stack frame where GP register context is saved. | ||
143 | * | ||
144 | * This structure contains minimal CPU state that must be saved in static kernel | ||
145 | * data in order to be able to restore the rest of the state. This includes | ||
146 | * segmentation configuration in the case of EVA being enabled, as they must be | ||
147 | * restored prior to any kmalloc'd memory being referenced (even the stack | ||
148 | * pointer). | ||
149 | */ | ||
150 | struct mips_static_suspend_state { | ||
151 | #ifdef CONFIG_EVA | ||
152 | unsigned long segctl[3]; | ||
153 | #endif | ||
154 | unsigned long sp; | ||
155 | }; | ||
156 | |||
157 | #endif /* !__ASSEMBLY__ */ | ||
158 | |||
159 | #endif /* __ASM_PM_HELPERS_H */ | ||
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h index d60d1a2180d1..a06a08a9afc6 100644 --- a/arch/mips/include/asm/smp-cps.h +++ b/arch/mips/include/asm/smp-cps.h | |||
@@ -13,17 +13,28 @@ | |||
13 | 13 | ||
14 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
15 | 15 | ||
16 | struct boot_config { | 16 | struct vpe_boot_config { |
17 | unsigned int core; | ||
18 | unsigned int vpe; | ||
19 | unsigned long pc; | 17 | unsigned long pc; |
20 | unsigned long sp; | 18 | unsigned long sp; |
21 | unsigned long gp; | 19 | unsigned long gp; |
22 | }; | 20 | }; |
23 | 21 | ||
24 | extern struct boot_config mips_cps_bootcfg; | 22 | struct core_boot_config { |
23 | atomic_t vpe_mask; | ||
24 | struct vpe_boot_config *vpe_config; | ||
25 | }; | ||
26 | |||
27 | extern struct core_boot_config *mips_cps_core_bootcfg; | ||
25 | 28 | ||
26 | extern void mips_cps_core_entry(void); | 29 | extern void mips_cps_core_entry(void); |
30 | extern void mips_cps_core_init(void); | ||
31 | |||
32 | extern struct vpe_boot_config *mips_cps_boot_vpes(void); | ||
33 | |||
34 | extern bool mips_cps_smp_in_use(void); | ||
35 | |||
36 | extern void mips_cps_pm_save(void); | ||
37 | extern void mips_cps_pm_restore(void); | ||
27 | 38 | ||
28 | #else /* __ASSEMBLY__ */ | 39 | #else /* __ASSEMBLY__ */ |
29 | 40 | ||
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index efa02acd3dd5..b037334fca22 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h | |||
@@ -46,6 +46,9 @@ extern int __cpu_logical_map[NR_CPUS]; | |||
46 | 46 | ||
47 | extern volatile cpumask_t cpu_callin_map; | 47 | extern volatile cpumask_t cpu_callin_map; |
48 | 48 | ||
49 | /* Mask of CPUs which are currently definitely operating coherently */ | ||
50 | extern cpumask_t cpu_coherent_mask; | ||
51 | |||
49 | extern void asmlinkage smp_bootstrap(void); | 52 | extern void asmlinkage smp_bootstrap(void); |
50 | 53 | ||
51 | /* | 54 | /* |
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h index c33a9564fb41..3d803877ad8f 100644 --- a/arch/mips/include/asm/uasm.h +++ b/arch/mips/include/asm/uasm.h | |||
@@ -74,6 +74,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ | |||
74 | #define Ip_u1u2(op) \ | 74 | #define Ip_u1u2(op) \ |
75 | void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) | 75 | void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) |
76 | 76 | ||
77 | #define Ip_u2u1(op) \ | ||
78 | void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b) | ||
79 | |||
77 | #define Ip_u1s2(op) \ | 80 | #define Ip_u1s2(op) \ |
78 | void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) | 81 | void ISAOPC(op)(u32 **buf, unsigned int a, signed int b) |
79 | 82 | ||
@@ -114,6 +117,7 @@ Ip_u2u1msbu3(_ext); | |||
114 | Ip_u2u1msbu3(_ins); | 117 | Ip_u2u1msbu3(_ins); |
115 | Ip_u1(_j); | 118 | Ip_u1(_j); |
116 | Ip_u1(_jal); | 119 | Ip_u1(_jal); |
120 | Ip_u2u1(_jalr); | ||
117 | Ip_u1(_jr); | 121 | Ip_u1(_jr); |
118 | Ip_u2s3u1(_ld); | 122 | Ip_u2s3u1(_ld); |
119 | Ip_u3u1u2(_ldx); | 123 | Ip_u3u1u2(_ldx); |
@@ -137,13 +141,16 @@ Ip_u2u1u3(_sra); | |||
137 | Ip_u2u1u3(_srl); | 141 | Ip_u2u1u3(_srl); |
138 | Ip_u3u1u2(_subu); | 142 | Ip_u3u1u2(_subu); |
139 | Ip_u2s3u1(_sw); | 143 | Ip_u2s3u1(_sw); |
144 | Ip_u1(_sync); | ||
140 | Ip_u1(_syscall); | 145 | Ip_u1(_syscall); |
141 | Ip_0(_tlbp); | 146 | Ip_0(_tlbp); |
142 | Ip_0(_tlbr); | 147 | Ip_0(_tlbr); |
143 | Ip_0(_tlbwi); | 148 | Ip_0(_tlbwi); |
144 | Ip_0(_tlbwr); | 149 | Ip_0(_tlbwr); |
150 | Ip_u1(_wait); | ||
145 | Ip_u3u1u2(_xor); | 151 | Ip_u3u1u2(_xor); |
146 | Ip_u2u1u3(_xori); | 152 | Ip_u2u1u3(_xori); |
153 | Ip_u2u1(_yield); | ||
147 | 154 | ||
148 | 155 | ||
149 | /* Handle labels. */ | 156 | /* Handle labels. */ |
@@ -264,6 +271,8 @@ void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, | |||
264 | unsigned int bit, int lid); | 271 | unsigned int bit, int lid); |
265 | void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, | 272 | void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, |
266 | unsigned int bit, int lid); | 273 | unsigned int bit, int lid); |
274 | void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1, | ||
275 | unsigned int r2, int lid); | ||
267 | void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); | 276 | void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); |
268 | void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); | 277 | void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); |
269 | void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); | 278 | void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); |
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h index fce8367da245..a1ad5516ad1f 100644 --- a/arch/mips/include/uapi/asm/inst.h +++ b/arch/mips/include/uapi/asm/inst.h | |||
@@ -76,16 +76,17 @@ enum spec2_op { | |||
76 | enum spec3_op { | 76 | enum spec3_op { |
77 | ext_op, dextm_op, dextu_op, dext_op, | 77 | ext_op, dextm_op, dextu_op, dext_op, |
78 | ins_op, dinsm_op, dinsu_op, dins_op, | 78 | ins_op, dinsm_op, dinsu_op, dins_op, |
79 | lx_op = 0x0a, lwle_op = 0x19, | 79 | yield_op = 0x09, lx_op = 0x0a, |
80 | lwre_op = 0x1a, cachee_op = 0x1b, | 80 | lwle_op = 0x19, lwre_op = 0x1a, |
81 | sbe_op = 0x1c, she_op = 0x1d, | 81 | cachee_op = 0x1b, sbe_op = 0x1c, |
82 | sce_op = 0x1e, swe_op = 0x1f, | 82 | she_op = 0x1d, sce_op = 0x1e, |
83 | bshfl_op = 0x20, swle_op = 0x21, | 83 | swe_op = 0x1f, bshfl_op = 0x20, |
84 | swre_op = 0x22, prefe_op = 0x23, | 84 | swle_op = 0x21, swre_op = 0x22, |
85 | dbshfl_op = 0x24, lbue_op = 0x28, | 85 | prefe_op = 0x23, dbshfl_op = 0x24, |
86 | lhue_op = 0x29, lbe_op = 0x2c, | 86 | lbue_op = 0x28, lhue_op = 0x29, |
87 | lhe_op = 0x2d, lle_op = 0x2e, | 87 | lbe_op = 0x2c, lhe_op = 0x2d, |
88 | lwe_op = 0x2f, rdhwr_op = 0x3b | 88 | lle_op = 0x2e, lwe_op = 0x2f, |
89 | rdhwr_op = 0x3b | ||
89 | }; | 90 | }; |
90 | 91 | ||
91 | /* | 92 | /* |
@@ -127,7 +128,8 @@ enum bcop_op { | |||
127 | enum cop0_coi_func { | 128 | enum cop0_coi_func { |
128 | tlbr_op = 0x01, tlbwi_op = 0x02, | 129 | tlbr_op = 0x01, tlbwi_op = 0x02, |
129 | tlbwr_op = 0x06, tlbp_op = 0x08, | 130 | tlbwr_op = 0x06, tlbp_op = 0x08, |
130 | rfe_op = 0x10, eret_op = 0x18 | 131 | rfe_op = 0x10, eret_op = 0x18, |
132 | wait_op = 0x20, | ||
131 | }; | 133 | }; |
132 | 134 | ||
133 | /* | 135 | /* |
@@ -303,7 +305,9 @@ enum mm_32axf_minor_op { | |||
303 | mm_tlbwr_op = 0x0cd, | 305 | mm_tlbwr_op = 0x0cd, |
304 | mm_jalrs_op = 0x13c, | 306 | mm_jalrs_op = 0x13c, |
305 | mm_jalrshb_op = 0x17c, | 307 | mm_jalrshb_op = 0x17c, |
308 | mm_sync_op = 0x1ad, | ||
306 | mm_syscall_op = 0x22d, | 309 | mm_syscall_op = 0x22d, |
310 | mm_wait_op = 0x24d, | ||
307 | mm_eret_op = 0x3cd, | 311 | mm_eret_op = 0x3cd, |
308 | }; | 312 | }; |
309 | 313 | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 8f8b531bc848..a61d108f4c0e 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -105,6 +105,9 @@ obj-$(CONFIG_JUMP_LABEL) += jump_label.o | |||
105 | obj-$(CONFIG_MIPS_CM) += mips-cm.o | 105 | obj-$(CONFIG_MIPS_CM) += mips-cm.o |
106 | obj-$(CONFIG_MIPS_CPC) += mips-cpc.o | 106 | obj-$(CONFIG_MIPS_CPC) += mips-cpc.o |
107 | 107 | ||
108 | obj-$(CONFIG_CPU_PM) += pm.o | ||
109 | obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o | ||
110 | |||
108 | # | 111 | # |
109 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not | 112 | # DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not |
110 | # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches | 113 | # safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 08f897ee9a77..02f075df8f2e 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/kbuild.h> | 15 | #include <linux/kbuild.h> |
16 | #include <linux/suspend.h> | 16 | #include <linux/suspend.h> |
17 | #include <asm/pm.h> | ||
17 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
18 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
19 | #include <asm/smp-cps.h> | 20 | #include <asm/smp-cps.h> |
@@ -401,6 +402,20 @@ void output_pbe_defines(void) | |||
401 | } | 402 | } |
402 | #endif | 403 | #endif |
403 | 404 | ||
405 | #ifdef CONFIG_CPU_PM | ||
406 | void output_pm_defines(void) | ||
407 | { | ||
408 | COMMENT(" PM offsets. "); | ||
409 | #ifdef CONFIG_EVA | ||
410 | OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); | ||
411 | OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); | ||
412 | OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); | ||
413 | #endif | ||
414 | OFFSET(SSS_SP, mips_static_suspend_state, sp); | ||
415 | BLANK(); | ||
416 | } | ||
417 | #endif | ||
418 | |||
404 | void output_kvm_defines(void) | 419 | void output_kvm_defines(void) |
405 | { | 420 | { |
406 | COMMENT(" KVM/MIPS Specfic offsets. "); | 421 | COMMENT(" KVM/MIPS Specfic offsets. "); |
@@ -469,10 +484,14 @@ void output_kvm_defines(void) | |||
469 | void output_cps_defines(void) | 484 | void output_cps_defines(void) |
470 | { | 485 | { |
471 | COMMENT(" MIPS CPS offsets. "); | 486 | COMMENT(" MIPS CPS offsets. "); |
472 | OFFSET(BOOTCFG_CORE, boot_config, core); | 487 | |
473 | OFFSET(BOOTCFG_VPE, boot_config, vpe); | 488 | OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); |
474 | OFFSET(BOOTCFG_PC, boot_config, pc); | 489 | OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); |
475 | OFFSET(BOOTCFG_SP, boot_config, sp); | 490 | DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); |
476 | OFFSET(BOOTCFG_GP, boot_config, gp); | 491 | |
492 | OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); | ||
493 | OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); | ||
494 | OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); | ||
495 | DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); | ||
477 | } | 496 | } |
478 | #endif | 497 | #endif |
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c index 594cbbf16d62..6093716980b9 100644 --- a/arch/mips/kernel/cevt-gic.c +++ b/arch/mips/kernel/cevt-gic.c | |||
@@ -26,7 +26,7 @@ static int gic_next_event(unsigned long delta, struct clock_event_device *evt) | |||
26 | 26 | ||
27 | cnt = gic_read_count(); | 27 | cnt = gic_read_count(); |
28 | cnt += (u64)delta; | 28 | cnt += (u64)delta; |
29 | gic_write_compare(cnt); | 29 | gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask)); |
30 | res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; | 30 | res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; |
31 | return res; | 31 | return res; |
32 | } | 32 | } |
@@ -73,7 +73,8 @@ int gic_clockevent_init(void) | |||
73 | cd = &per_cpu(gic_clockevent_device, cpu); | 73 | cd = &per_cpu(gic_clockevent_device, cpu); |
74 | 74 | ||
75 | cd->name = "MIPS GIC"; | 75 | cd->name = "MIPS GIC"; |
76 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 76 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
77 | CLOCK_EVT_FEAT_C3STOP; | ||
77 | 78 | ||
78 | clockevent_set_clock(cd, gic_frequency); | 79 | clockevent_set_clock(cd, gic_frequency); |
79 | 80 | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index bff124ae69fa..bc127e22fdab 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -62,9 +62,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
62 | /* Clear Count/Compare Interrupt */ | 62 | /* Clear Count/Compare Interrupt */ |
63 | write_c0_compare(read_c0_compare()); | 63 | write_c0_compare(read_c0_compare()); |
64 | cd = &per_cpu(mips_clockevent_device, cpu); | 64 | cd = &per_cpu(mips_clockevent_device, cpu); |
65 | #ifdef CONFIG_CEVT_GIC | ||
66 | if (!gic_present) | ||
67 | #endif | ||
68 | cd->event_handler(cd); | 65 | cd->event_handler(cd); |
69 | } | 66 | } |
70 | 67 | ||
@@ -182,7 +179,9 @@ int r4k_clockevent_init(void) | |||
182 | cd = &per_cpu(mips_clockevent_device, cpu); | 179 | cd = &per_cpu(mips_clockevent_device, cpu); |
183 | 180 | ||
184 | cd->name = "MIPS"; | 181 | cd->name = "MIPS"; |
185 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | 182 | cd->features = CLOCK_EVT_FEAT_ONESHOT | |
183 | CLOCK_EVT_FEAT_C3STOP | | ||
184 | CLOCK_EVT_FEAT_PERCPU; | ||
186 | 185 | ||
187 | clockevent_set_clock(cd, mips_hpt_frequency); | 186 | clockevent_set_clock(cd, mips_hpt_frequency); |
188 | 187 | ||
@@ -197,9 +196,6 @@ int r4k_clockevent_init(void) | |||
197 | cd->set_mode = mips_set_clock_mode; | 196 | cd->set_mode = mips_set_clock_mode; |
198 | cd->event_handler = mips_event_handler; | 197 | cd->event_handler = mips_event_handler; |
199 | 198 | ||
200 | #ifdef CONFIG_CEVT_GIC | ||
201 | if (!gic_present) | ||
202 | #endif | ||
203 | clockevents_register_device(cd); | 199 | clockevents_register_device(cd); |
204 | 200 | ||
205 | if (cp0_timer_irq_installed) | 201 | if (cp0_timer_irq_installed) |
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index f7a46db4b161..6f4f739dad96 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S | |||
@@ -14,19 +14,43 @@ | |||
14 | #include <asm/asmmacro.h> | 14 | #include <asm/asmmacro.h> |
15 | #include <asm/cacheops.h> | 15 | #include <asm/cacheops.h> |
16 | #include <asm/mipsregs.h> | 16 | #include <asm/mipsregs.h> |
17 | #include <asm/mipsmtregs.h> | ||
18 | #include <asm/pm.h> | ||
17 | 19 | ||
18 | #define GCR_CL_COHERENCE_OFS 0x2008 | 20 | #define GCR_CL_COHERENCE_OFS 0x2008 |
21 | #define GCR_CL_ID_OFS 0x2028 | ||
22 | |||
23 | .extern mips_cm_base | ||
24 | |||
25 | .set noreorder | ||
26 | |||
27 | /* | ||
28 | * Set dest to non-zero if the core supports the MT ASE, else zero. If | ||
29 | * MT is not supported then branch to nomt. | ||
30 | */ | ||
31 | .macro has_mt dest, nomt | ||
32 | mfc0 \dest, CP0_CONFIG | ||
33 | bgez \dest, \nomt | ||
34 | mfc0 \dest, CP0_CONFIG, 1 | ||
35 | bgez \dest, \nomt | ||
36 | mfc0 \dest, CP0_CONFIG, 2 | ||
37 | bgez \dest, \nomt | ||
38 | mfc0 \dest, CP0_CONFIG, 3 | ||
39 | andi \dest, \dest, MIPS_CONF3_MT | ||
40 | beqz \dest, \nomt | ||
41 | .endm | ||
19 | 42 | ||
20 | .section .text.cps-vec | 43 | .section .text.cps-vec |
21 | .balign 0x1000 | 44 | .balign 0x1000 |
22 | .set noreorder | ||
23 | 45 | ||
24 | LEAF(mips_cps_core_entry) | 46 | LEAF(mips_cps_core_entry) |
25 | /* | 47 | /* |
26 | * These first 8 bytes will be patched by cps_smp_setup to load the | 48 | * These first 12 bytes will be patched by cps_smp_setup to load the |
27 | * base address of the CM GCRs into register v1. | 49 | * base address of the CM GCRs into register v1 and the CCA to use into |
50 | * register s0. | ||
28 | */ | 51 | */ |
29 | .quad 0 | 52 | .quad 0 |
53 | .word 0 | ||
30 | 54 | ||
31 | /* Check whether we're here due to an NMI */ | 55 | /* Check whether we're here due to an NMI */ |
32 | mfc0 k0, CP0_STATUS | 56 | mfc0 k0, CP0_STATUS |
@@ -117,10 +141,11 @@ icache_done: | |||
117 | add a0, a0, t0 | 141 | add a0, a0, t0 |
118 | dcache_done: | 142 | dcache_done: |
119 | 143 | ||
120 | /* Set Kseg0 cacheable, coherent, write-back, write-allocate */ | 144 | /* Set Kseg0 CCA to that in s0 */ |
121 | mfc0 t0, CP0_CONFIG | 145 | mfc0 t0, CP0_CONFIG |
122 | ori t0, 0x7 | 146 | ori t0, 0x7 |
123 | xori t0, 0x2 | 147 | xori t0, 0x7 |
148 | or t0, t0, s0 | ||
124 | mtc0 t0, CP0_CONFIG | 149 | mtc0 t0, CP0_CONFIG |
125 | ehb | 150 | ehb |
126 | 151 | ||
@@ -134,21 +159,24 @@ dcache_done: | |||
134 | jr t0 | 159 | jr t0 |
135 | nop | 160 | nop |
136 | 161 | ||
137 | 1: /* We're up, cached & coherent */ | 162 | /* |
163 | * We're up, cached & coherent. Perform any further required core-level | ||
164 | * initialisation. | ||
165 | */ | ||
166 | 1: jal mips_cps_core_init | ||
167 | nop | ||
138 | 168 | ||
139 | /* | 169 | /* |
140 | * TODO: We should check the VPE number we intended to boot here, and | 170 | * Boot any other VPEs within this core that should be online, and |
141 | * if non-zero we should start that VPE and stop this one. For | 171 | * deactivate this VPE if it should be offline. |
142 | * the moment this doesn't matter since CPUs are brought up | ||
143 | * sequentially and in order, but once hotplug is implemented | ||
144 | * this will need revisiting. | ||
145 | */ | 172 | */ |
173 | jal mips_cps_boot_vpes | ||
174 | nop | ||
146 | 175 | ||
147 | /* Off we go! */ | 176 | /* Off we go! */ |
148 | la t0, mips_cps_bootcfg | 177 | lw t1, VPEBOOTCFG_PC(v0) |
149 | lw t1, BOOTCFG_PC(t0) | 178 | lw gp, VPEBOOTCFG_GP(v0) |
150 | lw gp, BOOTCFG_GP(t0) | 179 | lw sp, VPEBOOTCFG_SP(v0) |
151 | lw sp, BOOTCFG_SP(t0) | ||
152 | jr t1 | 180 | jr t1 |
153 | nop | 181 | nop |
154 | END(mips_cps_core_entry) | 182 | END(mips_cps_core_entry) |
@@ -189,3 +217,271 @@ LEAF(excep_ejtag) | |||
189 | jr k0 | 217 | jr k0 |
190 | nop | 218 | nop |
191 | END(excep_ejtag) | 219 | END(excep_ejtag) |
220 | |||
221 | LEAF(mips_cps_core_init) | ||
222 | #ifdef CONFIG_MIPS_MT | ||
223 | /* Check that the core implements the MT ASE */ | ||
224 | has_mt t0, 3f | ||
225 | nop | ||
226 | |||
227 | .set push | ||
228 | .set mt | ||
229 | |||
230 | /* Only allow 1 TC per VPE to execute... */ | ||
231 | dmt | ||
232 | |||
233 | /* ...and for the moment only 1 VPE */ | ||
234 | dvpe | ||
235 | la t1, 1f | ||
236 | jr.hb t1 | ||
237 | nop | ||
238 | |||
239 | /* Enter VPE configuration state */ | ||
240 | 1: mfc0 t0, CP0_MVPCONTROL | ||
241 | ori t0, t0, MVPCONTROL_VPC | ||
242 | mtc0 t0, CP0_MVPCONTROL | ||
243 | |||
244 | /* Retrieve the number of VPEs within the core */ | ||
245 | mfc0 t0, CP0_MVPCONF0 | ||
246 | srl t0, t0, MVPCONF0_PVPE_SHIFT | ||
247 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) | ||
248 | addi t7, t0, 1 | ||
249 | |||
250 | /* If there's only 1, we're done */ | ||
251 | beqz t0, 2f | ||
252 | nop | ||
253 | |||
254 | /* Loop through each VPE within this core */ | ||
255 | li t5, 1 | ||
256 | |||
257 | 1: /* Operate on the appropriate TC */ | ||
258 | mtc0 t5, CP0_VPECONTROL | ||
259 | ehb | ||
260 | |||
261 | /* Bind TC to VPE (1:1 TC:VPE mapping) */ | ||
262 | mttc0 t5, CP0_TCBIND | ||
263 | |||
264 | /* Set exclusive TC, non-active, master */ | ||
265 | li t0, VPECONF0_MVP | ||
266 | sll t1, t5, VPECONF0_XTC_SHIFT | ||
267 | or t0, t0, t1 | ||
268 | mttc0 t0, CP0_VPECONF0 | ||
269 | |||
270 | /* Set TC non-active, non-allocatable */ | ||
271 | mttc0 zero, CP0_TCSTATUS | ||
272 | |||
273 | /* Set TC halted */ | ||
274 | li t0, TCHALT_H | ||
275 | mttc0 t0, CP0_TCHALT | ||
276 | |||
277 | /* Next VPE */ | ||
278 | addi t5, t5, 1 | ||
279 | slt t0, t5, t7 | ||
280 | bnez t0, 1b | ||
281 | nop | ||
282 | |||
283 | /* Leave VPE configuration state */ | ||
284 | 2: mfc0 t0, CP0_MVPCONTROL | ||
285 | xori t0, t0, MVPCONTROL_VPC | ||
286 | mtc0 t0, CP0_MVPCONTROL | ||
287 | |||
288 | 3: .set pop | ||
289 | #endif | ||
290 | jr ra | ||
291 | nop | ||
292 | END(mips_cps_core_init) | ||
293 | |||
294 | LEAF(mips_cps_boot_vpes) | ||
295 | /* Retrieve CM base address */ | ||
296 | la t0, mips_cm_base | ||
297 | lw t0, 0(t0) | ||
298 | |||
299 | /* Calculate a pointer to this cores struct core_boot_config */ | ||
300 | lw t0, GCR_CL_ID_OFS(t0) | ||
301 | li t1, COREBOOTCFG_SIZE | ||
302 | mul t0, t0, t1 | ||
303 | la t1, mips_cps_core_bootcfg | ||
304 | lw t1, 0(t1) | ||
305 | addu t0, t0, t1 | ||
306 | |||
307 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ | ||
308 | has_mt t6, 1f | ||
309 | li t9, 0 | ||
310 | |||
311 | /* Find the number of VPEs present in the core */ | ||
312 | mfc0 t1, CP0_MVPCONF0 | ||
313 | srl t1, t1, MVPCONF0_PVPE_SHIFT | ||
314 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT | ||
315 | addi t1, t1, 1 | ||
316 | |||
317 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ | ||
318 | clz t1, t1 | ||
319 | li t2, 31 | ||
320 | subu t1, t2, t1 | ||
321 | li t2, 1 | ||
322 | sll t1, t2, t1 | ||
323 | addiu t1, t1, -1 | ||
324 | |||
325 | /* Retrieve the VPE ID from EBase.CPUNum */ | ||
326 | mfc0 t9, $15, 1 | ||
327 | and t9, t9, t1 | ||
328 | |||
329 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ | ||
330 | li t1, VPEBOOTCFG_SIZE | ||
331 | mul v0, t9, t1 | ||
332 | lw t7, COREBOOTCFG_VPECONFIG(t0) | ||
333 | addu v0, v0, t7 | ||
334 | |||
335 | #ifdef CONFIG_MIPS_MT | ||
336 | |||
337 | /* If the core doesn't support MT then return */ | ||
338 | bnez t6, 1f | ||
339 | nop | ||
340 | jr ra | ||
341 | nop | ||
342 | |||
343 | .set push | ||
344 | .set mt | ||
345 | |||
346 | 1: /* Enter VPE configuration state */ | ||
347 | dvpe | ||
348 | la t1, 1f | ||
349 | jr.hb t1 | ||
350 | nop | ||
351 | 1: mfc0 t1, CP0_MVPCONTROL | ||
352 | ori t1, t1, MVPCONTROL_VPC | ||
353 | mtc0 t1, CP0_MVPCONTROL | ||
354 | ehb | ||
355 | |||
356 | /* Loop through each VPE */ | ||
357 | lw t6, COREBOOTCFG_VPEMASK(t0) | ||
358 | move t8, t6 | ||
359 | li t5, 0 | ||
360 | |||
361 | /* Check whether the VPE should be running. If not, skip it */ | ||
362 | 1: andi t0, t6, 1 | ||
363 | beqz t0, 2f | ||
364 | nop | ||
365 | |||
366 | /* Operate on the appropriate TC */ | ||
367 | mfc0 t0, CP0_VPECONTROL | ||
368 | ori t0, t0, VPECONTROL_TARGTC | ||
369 | xori t0, t0, VPECONTROL_TARGTC | ||
370 | or t0, t0, t5 | ||
371 | mtc0 t0, CP0_VPECONTROL | ||
372 | ehb | ||
373 | |||
374 | /* Skip the VPE if its TC is not halted */ | ||
375 | mftc0 t0, CP0_TCHALT | ||
376 | beqz t0, 2f | ||
377 | nop | ||
378 | |||
379 | /* Calculate a pointer to the VPEs struct vpe_boot_config */ | ||
380 | li t0, VPEBOOTCFG_SIZE | ||
381 | mul t0, t0, t5 | ||
382 | addu t0, t0, t7 | ||
383 | |||
384 | /* Set the TC restart PC */ | ||
385 | lw t1, VPEBOOTCFG_PC(t0) | ||
386 | mttc0 t1, CP0_TCRESTART | ||
387 | |||
388 | /* Set the TC stack pointer */ | ||
389 | lw t1, VPEBOOTCFG_SP(t0) | ||
390 | mttgpr t1, sp | ||
391 | |||
392 | /* Set the TC global pointer */ | ||
393 | lw t1, VPEBOOTCFG_GP(t0) | ||
394 | mttgpr t1, gp | ||
395 | |||
396 | /* Copy config from this VPE */ | ||
397 | mfc0 t0, CP0_CONFIG | ||
398 | mttc0 t0, CP0_CONFIG | ||
399 | |||
400 | /* Ensure no software interrupts are pending */ | ||
401 | mttc0 zero, CP0_CAUSE | ||
402 | mttc0 zero, CP0_STATUS | ||
403 | |||
404 | /* Set TC active, not interrupt exempt */ | ||
405 | mftc0 t0, CP0_TCSTATUS | ||
406 | li t1, ~TCSTATUS_IXMT | ||
407 | and t0, t0, t1 | ||
408 | ori t0, t0, TCSTATUS_A | ||
409 | mttc0 t0, CP0_TCSTATUS | ||
410 | |||
411 | /* Clear the TC halt bit */ | ||
412 | mttc0 zero, CP0_TCHALT | ||
413 | |||
414 | /* Set VPE active */ | ||
415 | mftc0 t0, CP0_VPECONF0 | ||
416 | ori t0, t0, VPECONF0_VPA | ||
417 | mttc0 t0, CP0_VPECONF0 | ||
418 | |||
419 | /* Next VPE */ | ||
420 | 2: srl t6, t6, 1 | ||
421 | addi t5, t5, 1 | ||
422 | bnez t6, 1b | ||
423 | nop | ||
424 | |||
425 | /* Leave VPE configuration state */ | ||
426 | mfc0 t1, CP0_MVPCONTROL | ||
427 | xori t1, t1, MVPCONTROL_VPC | ||
428 | mtc0 t1, CP0_MVPCONTROL | ||
429 | ehb | ||
430 | evpe | ||
431 | |||
432 | /* Check whether this VPE is meant to be running */ | ||
433 | li t0, 1 | ||
434 | sll t0, t0, t9 | ||
435 | and t0, t0, t8 | ||
436 | bnez t0, 2f | ||
437 | nop | ||
438 | |||
439 | /* This VPE should be offline, halt the TC */ | ||
440 | li t0, TCHALT_H | ||
441 | mtc0 t0, CP0_TCHALT | ||
442 | la t0, 1f | ||
443 | 1: jr.hb t0 | ||
444 | nop | ||
445 | |||
446 | 2: .set pop | ||
447 | |||
448 | #endif /* CONFIG_MIPS_MT */ | ||
449 | |||
450 | /* Return */ | ||
451 | jr ra | ||
452 | nop | ||
453 | END(mips_cps_boot_vpes) | ||
454 | |||
455 | #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) | ||
456 | |||
457 | /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ | ||
458 | .macro psstate dest | ||
459 | .set push | ||
460 | .set noat | ||
461 | lw $1, TI_CPU(gp) | ||
462 | sll $1, $1, LONGLOG | ||
463 | la \dest, __per_cpu_offset | ||
464 | addu $1, $1, \dest | ||
465 | lw $1, 0($1) | ||
466 | la \dest, cps_cpu_state | ||
467 | addu \dest, \dest, $1 | ||
468 | .set pop | ||
469 | .endm | ||
470 | |||
471 | LEAF(mips_cps_pm_save) | ||
472 | /* Save CPU state */ | ||
473 | SUSPEND_SAVE_REGS | ||
474 | psstate t1 | ||
475 | SUSPEND_SAVE_STATIC | ||
476 | jr v0 | ||
477 | nop | ||
478 | END(mips_cps_pm_save) | ||
479 | |||
480 | LEAF(mips_cps_pm_restore) | ||
481 | /* Restore CPU state */ | ||
482 | psstate t1 | ||
483 | RESUME_RESTORE_STATIC | ||
484 | RESUME_RESTORE_REGS_RETURN | ||
485 | END(mips_cps_pm_restore) | ||
486 | |||
487 | #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ | ||
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c index c4ceccfa3828..09ce45980758 100644 --- a/arch/mips/kernel/idle.c +++ b/arch/mips/kernel/idle.c | |||
@@ -236,3 +236,14 @@ void arch_cpu_idle(void) | |||
236 | else | 236 | else |
237 | local_irq_enable(); | 237 | local_irq_enable(); |
238 | } | 238 | } |
239 | |||
240 | #ifdef CONFIG_CPU_IDLE | ||
241 | |||
242 | int mips_cpuidle_wait_enter(struct cpuidle_device *dev, | ||
243 | struct cpuidle_driver *drv, int index) | ||
244 | { | ||
245 | arch_cpu_idle(); | ||
246 | return index; | ||
247 | } | ||
248 | |||
249 | #endif | ||
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c index 8520dad6d4e3..88e4c323382c 100644 --- a/arch/mips/kernel/irq-gic.c +++ b/arch/mips/kernel/irq-gic.c | |||
@@ -54,6 +54,21 @@ void gic_write_compare(cycle_t cnt) | |||
54 | (int)(cnt & 0xffffffff)); | 54 | (int)(cnt & 0xffffffff)); |
55 | } | 55 | } |
56 | 56 | ||
57 | void gic_write_cpu_compare(cycle_t cnt, int cpu) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | |||
61 | local_irq_save(flags); | ||
62 | |||
63 | GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu); | ||
64 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), | ||
65 | (int)(cnt >> 32)); | ||
66 | GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), | ||
67 | (int)(cnt & 0xffffffff)); | ||
68 | |||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | |||
57 | cycle_t gic_read_compare(void) | 72 | cycle_t gic_read_compare(void) |
58 | { | 73 | { |
59 | unsigned int hi, lo; | 74 | unsigned int hi, lo; |
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c index c9dc67402969..ba473608a347 100644 --- a/arch/mips/kernel/mips-cpc.c +++ b/arch/mips/kernel/mips-cpc.c | |||
@@ -9,12 +9,18 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/percpu.h> | ||
13 | #include <linux/spinlock.h> | ||
12 | 14 | ||
13 | #include <asm/mips-cm.h> | 15 | #include <asm/mips-cm.h> |
14 | #include <asm/mips-cpc.h> | 16 | #include <asm/mips-cpc.h> |
15 | 17 | ||
16 | void __iomem *mips_cpc_base; | 18 | void __iomem *mips_cpc_base; |
17 | 19 | ||
20 | static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); | ||
21 | |||
22 | static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); | ||
23 | |||
18 | phys_t __weak mips_cpc_phys_base(void) | 24 | phys_t __weak mips_cpc_phys_base(void) |
19 | { | 25 | { |
20 | u32 cpc_base; | 26 | u32 cpc_base; |
@@ -39,6 +45,10 @@ phys_t __weak mips_cpc_phys_base(void) | |||
39 | int mips_cpc_probe(void) | 45 | int mips_cpc_probe(void) |
40 | { | 46 | { |
41 | phys_t addr; | 47 | phys_t addr; |
48 | unsigned cpu; | ||
49 | |||
50 | for_each_possible_cpu(cpu) | ||
51 | spin_lock_init(&per_cpu(cpc_core_lock, cpu)); | ||
42 | 52 | ||
43 | addr = mips_cpc_phys_base(); | 53 | addr = mips_cpc_phys_base(); |
44 | if (!addr) | 54 | if (!addr) |
@@ -50,3 +60,21 @@ int mips_cpc_probe(void) | |||
50 | 60 | ||
51 | return 0; | 61 | return 0; |
52 | } | 62 | } |
63 | |||
64 | void mips_cpc_lock_other(unsigned int core) | ||
65 | { | ||
66 | unsigned curr_core; | ||
67 | preempt_disable(); | ||
68 | curr_core = current_cpu_data.core; | ||
69 | spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), | ||
70 | per_cpu(cpc_core_lock_flags, curr_core)); | ||
71 | write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF); | ||
72 | } | ||
73 | |||
74 | void mips_cpc_unlock_other(void) | ||
75 | { | ||
76 | unsigned curr_core = current_cpu_data.core; | ||
77 | spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), | ||
78 | per_cpu(cpc_core_lock_flags, curr_core)); | ||
79 | preempt_enable(); | ||
80 | } | ||
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c new file mode 100644 index 000000000000..5aa4c6f8cf83 --- /dev/null +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -0,0 +1,716 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Imagination Technologies | ||
3 | * Author: Paul Burton <paul.burton@imgtec.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2 of the License, or (at your | ||
8 | * option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/slab.h> | ||
14 | |||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/cacheops.h> | ||
18 | #include <asm/idle.h> | ||
19 | #include <asm/mips-cm.h> | ||
20 | #include <asm/mips-cpc.h> | ||
21 | #include <asm/mipsmtregs.h> | ||
22 | #include <asm/pm.h> | ||
23 | #include <asm/pm-cps.h> | ||
24 | #include <asm/smp-cps.h> | ||
25 | #include <asm/uasm.h> | ||
26 | |||
27 | /* | ||
28 | * cps_nc_entry_fn - type of a generated non-coherent state entry function | ||
29 | * @online: the count of online coupled VPEs | ||
30 | * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count | ||
31 | * | ||
32 | * The code entering & exiting non-coherent states is generated at runtime | ||
33 | * using uasm, in order to ensure that the compiler cannot insert a stray | ||
34 | * memory access at an unfortunate time and to allow the generation of optimal | ||
35 | * core-specific code particularly for cache routines. If coupled_coherence | ||
36 | * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, | ||
37 | * returns the number of VPEs that were in the wait state at the point this | ||
38 | * VPE left it. Returns garbage if coupled_coherence is zero or this is not | ||
39 | * the entry function for CPS_PM_NC_WAIT. | ||
40 | */ | ||
41 | typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); | ||
42 | |||
43 | /* | ||
44 | * The entry point of the generated non-coherent idle state entry/exit | ||
45 | * functions. Actually per-core rather than per-CPU. | ||
46 | */ | ||
47 | static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], | ||
48 | nc_asm_enter); | ||
49 | |||
50 | /* Bitmap indicating which states are supported by the system */ | ||
51 | DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); | ||
52 | |||
53 | /* | ||
54 | * Indicates the number of coupled VPEs ready to operate in a non-coherent | ||
55 | * state. Actually per-core rather than per-CPU. | ||
56 | */ | ||
57 | static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); | ||
58 | static DEFINE_PER_CPU_ALIGNED(void*, ready_count_alloc); | ||
59 | |||
60 | /* Indicates online CPUs coupled with the current CPU */ | ||
61 | static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); | ||
62 | |||
63 | /* | ||
64 | * Used to synchronize entry to deep idle states. Actually per-core rather | ||
65 | * than per-CPU. | ||
66 | */ | ||
67 | static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); | ||
68 | |||
69 | /* Saved CPU state across the CPS_PM_POWER_GATED state */ | ||
70 | DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); | ||
71 | |||
72 | /* A somewhat arbitrary number of labels & relocs for uasm */ | ||
73 | static struct uasm_label labels[32] __initdata; | ||
74 | static struct uasm_reloc relocs[32] __initdata; | ||
75 | |||
76 | /* CPU dependant sync types */ | ||
77 | static unsigned stype_intervention; | ||
78 | static unsigned stype_memory; | ||
79 | static unsigned stype_ordering; | ||
80 | |||
81 | enum mips_reg { | ||
82 | zero, at, v0, v1, a0, a1, a2, a3, | ||
83 | t0, t1, t2, t3, t4, t5, t6, t7, | ||
84 | s0, s1, s2, s3, s4, s5, s6, s7, | ||
85 | t8, t9, k0, k1, gp, sp, fp, ra, | ||
86 | }; | ||
87 | |||
88 | bool cps_pm_support_state(enum cps_pm_state state) | ||
89 | { | ||
90 | return test_bit(state, state_support); | ||
91 | } | ||
92 | |||
93 | static void coupled_barrier(atomic_t *a, unsigned online) | ||
94 | { | ||
95 | /* | ||
96 | * This function is effectively the same as | ||
97 | * cpuidle_coupled_parallel_barrier, which can't be used here since | ||
98 | * there's no cpuidle device. | ||
99 | */ | ||
100 | |||
101 | if (!coupled_coherence) | ||
102 | return; | ||
103 | |||
104 | smp_mb__before_atomic_inc(); | ||
105 | atomic_inc(a); | ||
106 | |||
107 | while (atomic_read(a) < online) | ||
108 | cpu_relax(); | ||
109 | |||
110 | if (atomic_inc_return(a) == online * 2) { | ||
111 | atomic_set(a, 0); | ||
112 | return; | ||
113 | } | ||
114 | |||
115 | while (atomic_read(a) > online) | ||
116 | cpu_relax(); | ||
117 | } | ||
118 | |||
119 | int cps_pm_enter_state(enum cps_pm_state state) | ||
120 | { | ||
121 | unsigned cpu = smp_processor_id(); | ||
122 | unsigned core = current_cpu_data.core; | ||
123 | unsigned online, left; | ||
124 | cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); | ||
125 | u32 *core_ready_count, *nc_core_ready_count; | ||
126 | void *nc_addr; | ||
127 | cps_nc_entry_fn entry; | ||
128 | struct core_boot_config *core_cfg; | ||
129 | struct vpe_boot_config *vpe_cfg; | ||
130 | |||
131 | /* Check that there is an entry function for this state */ | ||
132 | entry = per_cpu(nc_asm_enter, core)[state]; | ||
133 | if (!entry) | ||
134 | return -EINVAL; | ||
135 | |||
136 | /* Calculate which coupled CPUs (VPEs) are online */ | ||
137 | #ifdef CONFIG_MIPS_MT | ||
138 | if (cpu_online(cpu)) { | ||
139 | cpumask_and(coupled_mask, cpu_online_mask, | ||
140 | &cpu_sibling_map[cpu]); | ||
141 | online = cpumask_weight(coupled_mask); | ||
142 | cpumask_clear_cpu(cpu, coupled_mask); | ||
143 | } else | ||
144 | #endif | ||
145 | { | ||
146 | cpumask_clear(coupled_mask); | ||
147 | online = 1; | ||
148 | } | ||
149 | |||
150 | /* Setup the VPE to run mips_cps_pm_restore when started again */ | ||
151 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | ||
152 | core_cfg = &mips_cps_core_bootcfg[core]; | ||
153 | vpe_cfg = &core_cfg->vpe_config[current_cpu_data.vpe_id]; | ||
154 | vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; | ||
155 | vpe_cfg->gp = (unsigned long)current_thread_info(); | ||
156 | vpe_cfg->sp = 0; | ||
157 | } | ||
158 | |||
159 | /* Indicate that this CPU might not be coherent */ | ||
160 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); | ||
161 | smp_mb__after_clear_bit(); | ||
162 | |||
163 | /* Create a non-coherent mapping of the core ready_count */ | ||
164 | core_ready_count = per_cpu(ready_count, core); | ||
165 | nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), | ||
166 | (unsigned long)core_ready_count); | ||
167 | nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); | ||
168 | nc_core_ready_count = nc_addr; | ||
169 | |||
170 | /* Ensure ready_count is zero-initialised before the assembly runs */ | ||
171 | ACCESS_ONCE(*nc_core_ready_count) = 0; | ||
172 | coupled_barrier(&per_cpu(pm_barrier, core), online); | ||
173 | |||
174 | /* Run the generated entry code */ | ||
175 | left = entry(online, nc_core_ready_count); | ||
176 | |||
177 | /* Remove the non-coherent mapping of ready_count */ | ||
178 | kunmap_noncoherent(); | ||
179 | |||
180 | /* Indicate that this CPU is definitely coherent */ | ||
181 | cpumask_set_cpu(cpu, &cpu_coherent_mask); | ||
182 | |||
183 | /* | ||
184 | * If this VPE is the first to leave the non-coherent wait state then | ||
185 | * it needs to wake up any coupled VPEs still running their wait | ||
186 | * instruction so that they return to cpuidle, which can then complete | ||
187 | * coordination between the coupled VPEs & provide the governor with | ||
188 | * a chance to reflect on the length of time the VPEs were in the | ||
189 | * idle state. | ||
190 | */ | ||
191 | if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) | ||
192 | arch_send_call_function_ipi_mask(coupled_mask); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, | ||
198 | struct uasm_reloc **pr, | ||
199 | const struct cache_desc *cache, | ||
200 | unsigned op, int lbl) | ||
201 | { | ||
202 | unsigned cache_size = cache->ways << cache->waybit; | ||
203 | unsigned i; | ||
204 | const unsigned unroll_lines = 32; | ||
205 | |||
206 | /* If the cache isn't present this function has it easy */ | ||
207 | if (cache->flags & MIPS_CACHE_NOT_PRESENT) | ||
208 | return; | ||
209 | |||
210 | /* Load base address */ | ||
211 | UASM_i_LA(pp, t0, (long)CKSEG0); | ||
212 | |||
213 | /* Calculate end address */ | ||
214 | if (cache_size < 0x8000) | ||
215 | uasm_i_addiu(pp, t1, t0, cache_size); | ||
216 | else | ||
217 | UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); | ||
218 | |||
219 | /* Start of cache op loop */ | ||
220 | uasm_build_label(pl, *pp, lbl); | ||
221 | |||
222 | /* Generate the cache ops */ | ||
223 | for (i = 0; i < unroll_lines; i++) | ||
224 | uasm_i_cache(pp, op, i * cache->linesz, t0); | ||
225 | |||
226 | /* Update the base address */ | ||
227 | uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); | ||
228 | |||
229 | /* Loop if we haven't reached the end address yet */ | ||
230 | uasm_il_bne(pp, pr, t0, t1, lbl); | ||
231 | uasm_i_nop(pp); | ||
232 | } | ||
233 | |||
234 | static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, | ||
235 | struct uasm_reloc **pr, | ||
236 | const struct cpuinfo_mips *cpu_info, | ||
237 | int lbl) | ||
238 | { | ||
239 | unsigned i, fsb_size = 8; | ||
240 | unsigned num_loads = (fsb_size * 3) / 2; | ||
241 | unsigned line_stride = 2; | ||
242 | unsigned line_size = cpu_info->dcache.linesz; | ||
243 | unsigned perf_counter, perf_event; | ||
244 | unsigned revision = cpu_info->processor_id & PRID_REV_MASK; | ||
245 | |||
246 | /* | ||
247 | * Determine whether this CPU requires an FSB flush, and if so which | ||
248 | * performance counter/event reflect stalls due to a full FSB. | ||
249 | */ | ||
250 | switch (__get_cpu_type(cpu_info->cputype)) { | ||
251 | case CPU_INTERAPTIV: | ||
252 | perf_counter = 1; | ||
253 | perf_event = 51; | ||
254 | break; | ||
255 | |||
256 | case CPU_PROAPTIV: | ||
257 | /* Newer proAptiv cores don't require this workaround */ | ||
258 | if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) | ||
259 | return 0; | ||
260 | |||
261 | /* On older ones it's unavailable */ | ||
262 | return -1; | ||
263 | |||
264 | /* CPUs which do not require the workaround */ | ||
265 | case CPU_P5600: | ||
266 | return 0; | ||
267 | |||
268 | default: | ||
269 | WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n"); | ||
270 | return -1; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Ensure that the fill/store buffer (FSB) is not holding the results | ||
275 | * of a prefetch, since if it is then the CPC sequencer may become | ||
276 | * stuck in the D3 (ClrBus) state whilst entering a low power state. | ||
277 | */ | ||
278 | |||
279 | /* Preserve perf counter setup */ | ||
280 | uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
281 | uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
282 | |||
283 | /* Setup perf counter to count FSB full pipeline stalls */ | ||
284 | uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); | ||
285 | uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
286 | uasm_i_ehb(pp); | ||
287 | uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
288 | uasm_i_ehb(pp); | ||
289 | |||
290 | /* Base address for loads */ | ||
291 | UASM_i_LA(pp, t0, (long)CKSEG0); | ||
292 | |||
293 | /* Start of clear loop */ | ||
294 | uasm_build_label(pl, *pp, lbl); | ||
295 | |||
296 | /* Perform some loads to fill the FSB */ | ||
297 | for (i = 0; i < num_loads; i++) | ||
298 | uasm_i_lw(pp, zero, i * line_size * line_stride, t0); | ||
299 | |||
300 | /* | ||
301 | * Invalidate the new D-cache entries so that the cache will need | ||
302 | * refilling (via the FSB) if the loop is executed again. | ||
303 | */ | ||
304 | for (i = 0; i < num_loads; i++) { | ||
305 | uasm_i_cache(pp, Hit_Invalidate_D, | ||
306 | i * line_size * line_stride, t0); | ||
307 | uasm_i_cache(pp, Hit_Writeback_Inv_SD, | ||
308 | i * line_size * line_stride, t0); | ||
309 | } | ||
310 | |||
311 | /* Completion barrier */ | ||
312 | uasm_i_sync(pp, stype_memory); | ||
313 | uasm_i_ehb(pp); | ||
314 | |||
315 | /* Check whether the pipeline stalled due to the FSB being full */ | ||
316 | uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
317 | |||
318 | /* Loop if it didn't */ | ||
319 | uasm_il_beqz(pp, pr, t1, lbl); | ||
320 | uasm_i_nop(pp); | ||
321 | |||
322 | /* Restore perf counter 1. The count may well now be wrong... */ | ||
323 | uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
324 | uasm_i_ehb(pp); | ||
325 | uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
326 | uasm_i_ehb(pp); | ||
327 | |||
328 | return 0; | ||
329 | } | ||
330 | |||
331 | static void __init cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, | ||
332 | struct uasm_reloc **pr, | ||
333 | unsigned r_addr, int lbl) | ||
334 | { | ||
335 | uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); | ||
336 | uasm_build_label(pl, *pp, lbl); | ||
337 | uasm_i_ll(pp, t1, 0, r_addr); | ||
338 | uasm_i_or(pp, t1, t1, t0); | ||
339 | uasm_i_sc(pp, t1, 0, r_addr); | ||
340 | uasm_il_beqz(pp, pr, t1, lbl); | ||
341 | uasm_i_nop(pp); | ||
342 | } | ||
343 | |||
344 | static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) | ||
345 | { | ||
346 | struct uasm_label *l = labels; | ||
347 | struct uasm_reloc *r = relocs; | ||
348 | u32 *buf, *p; | ||
349 | const unsigned r_online = a0; | ||
350 | const unsigned r_nc_count = a1; | ||
351 | const unsigned r_pcohctl = t7; | ||
352 | const unsigned max_instrs = 256; | ||
353 | unsigned cpc_cmd; | ||
354 | int err; | ||
355 | enum { | ||
356 | lbl_incready = 1, | ||
357 | lbl_poll_cont, | ||
358 | lbl_secondary_hang, | ||
359 | lbl_disable_coherence, | ||
360 | lbl_flush_fsb, | ||
361 | lbl_invicache, | ||
362 | lbl_flushdcache, | ||
363 | lbl_hang, | ||
364 | lbl_set_cont, | ||
365 | lbl_secondary_cont, | ||
366 | lbl_decready, | ||
367 | }; | ||
368 | |||
369 | /* Allocate a buffer to hold the generated code */ | ||
370 | p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); | ||
371 | if (!buf) | ||
372 | return NULL; | ||
373 | |||
374 | /* Clear labels & relocs ready for (re)use */ | ||
375 | memset(labels, 0, sizeof(labels)); | ||
376 | memset(relocs, 0, sizeof(relocs)); | ||
377 | |||
378 | if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | ||
379 | /* | ||
380 | * Save CPU state. Note the non-standard calling convention | ||
381 | * with the return address placed in v0 to avoid clobbering | ||
382 | * the ra register before it is saved. | ||
383 | */ | ||
384 | UASM_i_LA(&p, t0, (long)mips_cps_pm_save); | ||
385 | uasm_i_jalr(&p, v0, t0); | ||
386 | uasm_i_nop(&p); | ||
387 | } | ||
388 | |||
389 | /* | ||
390 | * Load addresses of required CM & CPC registers. This is done early | ||
391 | * because they're needed in both the enable & disable coherence steps | ||
392 | * but in the coupled case the enable step will only run on one VPE. | ||
393 | */ | ||
394 | UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); | ||
395 | |||
396 | if (coupled_coherence) { | ||
397 | /* Increment ready_count */ | ||
398 | uasm_i_sync(&p, stype_ordering); | ||
399 | uasm_build_label(&l, p, lbl_incready); | ||
400 | uasm_i_ll(&p, t1, 0, r_nc_count); | ||
401 | uasm_i_addiu(&p, t2, t1, 1); | ||
402 | uasm_i_sc(&p, t2, 0, r_nc_count); | ||
403 | uasm_il_beqz(&p, &r, t2, lbl_incready); | ||
404 | uasm_i_addiu(&p, t1, t1, 1); | ||
405 | |||
406 | /* Ordering barrier */ | ||
407 | uasm_i_sync(&p, stype_ordering); | ||
408 | |||
409 | /* | ||
410 | * If this is the last VPE to become ready for non-coherence | ||
411 | * then it should branch below. | ||
412 | */ | ||
413 | uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); | ||
414 | uasm_i_nop(&p); | ||
415 | |||
416 | if (state < CPS_PM_POWER_GATED) { | ||
417 | /* | ||
418 | * Otherwise this is not the last VPE to become ready | ||
419 | * for non-coherence. It needs to wait until coherence | ||
420 | * has been disabled before proceeding, which it will do | ||
421 | * by polling for the top bit of ready_count being set. | ||
422 | */ | ||
423 | uasm_i_addiu(&p, t1, zero, -1); | ||
424 | uasm_build_label(&l, p, lbl_poll_cont); | ||
425 | uasm_i_lw(&p, t0, 0, r_nc_count); | ||
426 | uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); | ||
427 | uasm_i_ehb(&p); | ||
428 | uasm_i_yield(&p, zero, t1); | ||
429 | uasm_il_b(&p, &r, lbl_poll_cont); | ||
430 | uasm_i_nop(&p); | ||
431 | } else { | ||
432 | /* | ||
433 | * The core will lose power & this VPE will not continue | ||
434 | * so it can simply halt here. | ||
435 | */ | ||
436 | uasm_i_addiu(&p, t0, zero, TCHALT_H); | ||
437 | uasm_i_mtc0(&p, t0, 2, 4); | ||
438 | uasm_build_label(&l, p, lbl_secondary_hang); | ||
439 | uasm_il_b(&p, &r, lbl_secondary_hang); | ||
440 | uasm_i_nop(&p); | ||
441 | } | ||
442 | } | ||
443 | |||
444 | /* | ||
445 | * This is the point of no return - this VPE will now proceed to | ||
446 | * disable coherence. At this point we *must* be sure that no other | ||
447 | * VPE within the core will interfere with the L1 dcache. | ||
448 | */ | ||
449 | uasm_build_label(&l, p, lbl_disable_coherence); | ||
450 | |||
451 | /* Invalidate the L1 icache */ | ||
452 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, | ||
453 | Index_Invalidate_I, lbl_invicache); | ||
454 | |||
455 | /* Writeback & invalidate the L1 dcache */ | ||
456 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, | ||
457 | Index_Writeback_Inv_D, lbl_flushdcache); | ||
458 | |||
459 | /* Completion barrier */ | ||
460 | uasm_i_sync(&p, stype_memory); | ||
461 | uasm_i_ehb(&p); | ||
462 | |||
463 | /* | ||
464 | * Disable all but self interventions. The load from COHCTL is defined | ||
465 | * by the interAptiv & proAptiv SUMs as ensuring that the operation | ||
466 | * resulting from the preceeding store is complete. | ||
467 | */ | ||
468 | uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); | ||
469 | uasm_i_sw(&p, t0, 0, r_pcohctl); | ||
470 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
471 | |||
472 | /* Sync to ensure previous interventions are complete */ | ||
473 | uasm_i_sync(&p, stype_intervention); | ||
474 | uasm_i_ehb(&p); | ||
475 | |||
476 | /* Disable coherence */ | ||
477 | uasm_i_sw(&p, zero, 0, r_pcohctl); | ||
478 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
479 | |||
480 | if (state >= CPS_PM_CLOCK_GATED) { | ||
481 | err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], | ||
482 | lbl_flush_fsb); | ||
483 | if (err) | ||
484 | goto out_err; | ||
485 | |||
486 | /* Determine the CPC command to issue */ | ||
487 | switch (state) { | ||
488 | case CPS_PM_CLOCK_GATED: | ||
489 | cpc_cmd = CPC_Cx_CMD_CLOCKOFF; | ||
490 | break; | ||
491 | case CPS_PM_POWER_GATED: | ||
492 | cpc_cmd = CPC_Cx_CMD_PWRDOWN; | ||
493 | break; | ||
494 | default: | ||
495 | BUG(); | ||
496 | goto out_err; | ||
497 | } | ||
498 | |||
499 | /* Issue the CPC command */ | ||
500 | UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); | ||
501 | uasm_i_addiu(&p, t1, zero, cpc_cmd); | ||
502 | uasm_i_sw(&p, t1, 0, t0); | ||
503 | |||
504 | if (state == CPS_PM_POWER_GATED) { | ||
505 | /* If anything goes wrong just hang */ | ||
506 | uasm_build_label(&l, p, lbl_hang); | ||
507 | uasm_il_b(&p, &r, lbl_hang); | ||
508 | uasm_i_nop(&p); | ||
509 | |||
510 | /* | ||
511 | * There's no point generating more code, the core is | ||
512 | * powered down & if powered back up will run from the | ||
513 | * reset vector not from here. | ||
514 | */ | ||
515 | goto gen_done; | ||
516 | } | ||
517 | |||
518 | /* Completion barrier */ | ||
519 | uasm_i_sync(&p, stype_memory); | ||
520 | uasm_i_ehb(&p); | ||
521 | } | ||
522 | |||
523 | if (state == CPS_PM_NC_WAIT) { | ||
524 | /* | ||
525 | * At this point it is safe for all VPEs to proceed with | ||
526 | * execution. This VPE will set the top bit of ready_count | ||
527 | * to indicate to the other VPEs that they may continue. | ||
528 | */ | ||
529 | if (coupled_coherence) | ||
530 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, | ||
531 | lbl_set_cont); | ||
532 | |||
533 | /* | ||
534 | * VPEs which did not disable coherence will continue | ||
535 | * executing, after coherence has been disabled, from this | ||
536 | * point. | ||
537 | */ | ||
538 | uasm_build_label(&l, p, lbl_secondary_cont); | ||
539 | |||
540 | /* Now perform our wait */ | ||
541 | uasm_i_wait(&p, 0); | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs | ||
546 | * will run this. The first will actually re-enable coherence & the | ||
547 | * rest will just be performing a rather unusual nop. | ||
548 | */ | ||
549 | uasm_i_addiu(&p, t0, zero, CM_GCR_Cx_COHERENCE_COHDOMAINEN_MSK); | ||
550 | uasm_i_sw(&p, t0, 0, r_pcohctl); | ||
551 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
552 | |||
553 | /* Completion barrier */ | ||
554 | uasm_i_sync(&p, stype_memory); | ||
555 | uasm_i_ehb(&p); | ||
556 | |||
557 | if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { | ||
558 | /* Decrement ready_count */ | ||
559 | uasm_build_label(&l, p, lbl_decready); | ||
560 | uasm_i_sync(&p, stype_ordering); | ||
561 | uasm_i_ll(&p, t1, 0, r_nc_count); | ||
562 | uasm_i_addiu(&p, t2, t1, -1); | ||
563 | uasm_i_sc(&p, t2, 0, r_nc_count); | ||
564 | uasm_il_beqz(&p, &r, t2, lbl_decready); | ||
565 | uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); | ||
566 | |||
567 | /* Ordering barrier */ | ||
568 | uasm_i_sync(&p, stype_ordering); | ||
569 | } | ||
570 | |||
571 | if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { | ||
572 | /* | ||
573 | * At this point it is safe for all VPEs to proceed with | ||
574 | * execution. This VPE will set the top bit of ready_count | ||
575 | * to indicate to the other VPEs that they may continue. | ||
576 | */ | ||
577 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); | ||
578 | |||
579 | /* | ||
580 | * This core will be reliant upon another core sending a | ||
581 | * power-up command to the CPC in order to resume operation. | ||
582 | * Thus an arbitrary VPE can't trigger the core leaving the | ||
583 | * idle state and the one that disables coherence might as well | ||
584 | * be the one to re-enable it. The rest will continue from here | ||
585 | * after that has been done. | ||
586 | */ | ||
587 | uasm_build_label(&l, p, lbl_secondary_cont); | ||
588 | |||
589 | /* Ordering barrier */ | ||
590 | uasm_i_sync(&p, stype_ordering); | ||
591 | } | ||
592 | |||
593 | /* The core is coherent, time to return to C code */ | ||
594 | uasm_i_jr(&p, ra); | ||
595 | uasm_i_nop(&p); | ||
596 | |||
597 | gen_done: | ||
598 | /* Ensure the code didn't exceed the resources allocated for it */ | ||
599 | BUG_ON((p - buf) > max_instrs); | ||
600 | BUG_ON((l - labels) > ARRAY_SIZE(labels)); | ||
601 | BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); | ||
602 | |||
603 | /* Patch branch offsets */ | ||
604 | uasm_resolve_relocs(relocs, labels); | ||
605 | |||
606 | /* Flush the icache */ | ||
607 | local_flush_icache_range((unsigned long)buf, (unsigned long)p); | ||
608 | |||
609 | return buf; | ||
610 | out_err: | ||
611 | kfree(buf); | ||
612 | return NULL; | ||
613 | } | ||
614 | |||
615 | static int __init cps_gen_core_entries(unsigned cpu) | ||
616 | { | ||
617 | enum cps_pm_state state; | ||
618 | unsigned core = cpu_data[cpu].core; | ||
619 | unsigned dlinesz = cpu_data[cpu].dcache.linesz; | ||
620 | void *entry_fn, *core_rc; | ||
621 | |||
622 | for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { | ||
623 | if (per_cpu(nc_asm_enter, core)[state]) | ||
624 | continue; | ||
625 | if (!test_bit(state, state_support)) | ||
626 | continue; | ||
627 | |||
628 | entry_fn = cps_gen_entry_code(cpu, state); | ||
629 | if (!entry_fn) { | ||
630 | pr_err("Failed to generate core %u state %u entry\n", | ||
631 | core, state); | ||
632 | clear_bit(state, state_support); | ||
633 | } | ||
634 | |||
635 | per_cpu(nc_asm_enter, core)[state] = entry_fn; | ||
636 | } | ||
637 | |||
638 | if (!per_cpu(ready_count, core)) { | ||
639 | core_rc = kmalloc(dlinesz * 2, GFP_KERNEL); | ||
640 | if (!core_rc) { | ||
641 | pr_err("Failed allocate core %u ready_count\n", core); | ||
642 | return -ENOMEM; | ||
643 | } | ||
644 | per_cpu(ready_count_alloc, core) = core_rc; | ||
645 | |||
646 | /* Ensure ready_count is aligned to a cacheline boundary */ | ||
647 | core_rc += dlinesz - 1; | ||
648 | core_rc = (void *)((unsigned long)core_rc & ~(dlinesz - 1)); | ||
649 | per_cpu(ready_count, core) = core_rc; | ||
650 | } | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | static int __init cps_pm_init(void) | ||
656 | { | ||
657 | unsigned cpu; | ||
658 | int err; | ||
659 | |||
660 | /* Detect appropriate sync types for the system */ | ||
661 | switch (current_cpu_data.cputype) { | ||
662 | case CPU_INTERAPTIV: | ||
663 | case CPU_PROAPTIV: | ||
664 | case CPU_M5150: | ||
665 | case CPU_P5600: | ||
666 | stype_intervention = 0x2; | ||
667 | stype_memory = 0x3; | ||
668 | stype_ordering = 0x10; | ||
669 | break; | ||
670 | |||
671 | default: | ||
672 | pr_warn("Power management is using heavyweight sync 0\n"); | ||
673 | } | ||
674 | |||
675 | /* A CM is required for all non-coherent states */ | ||
676 | if (!mips_cm_present()) { | ||
677 | pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); | ||
678 | goto out; | ||
679 | } | ||
680 | |||
681 | /* | ||
682 | * If interrupts were enabled whilst running a wait instruction on a | ||
683 | * non-coherent core then the VPE may end up processing interrupts | ||
684 | * whilst non-coherent. That would be bad. | ||
685 | */ | ||
686 | if (cpu_wait == r4k_wait_irqoff) | ||
687 | set_bit(CPS_PM_NC_WAIT, state_support); | ||
688 | else | ||
689 | pr_warn("pm-cps: non-coherent wait unavailable\n"); | ||
690 | |||
691 | /* Detect whether a CPC is present */ | ||
692 | if (mips_cpc_present()) { | ||
693 | /* Detect whether clock gating is implemented */ | ||
694 | if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL_MSK) | ||
695 | set_bit(CPS_PM_CLOCK_GATED, state_support); | ||
696 | else | ||
697 | pr_warn("pm-cps: CPC does not support clock gating\n"); | ||
698 | |||
699 | /* Power gating is available with CPS SMP & any CPC */ | ||
700 | if (mips_cps_smp_in_use()) | ||
701 | set_bit(CPS_PM_POWER_GATED, state_support); | ||
702 | else | ||
703 | pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); | ||
704 | } else { | ||
705 | pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); | ||
706 | } | ||
707 | |||
708 | for_each_present_cpu(cpu) { | ||
709 | err = cps_gen_core_entries(cpu); | ||
710 | if (err) | ||
711 | return err; | ||
712 | } | ||
713 | out: | ||
714 | return 0; | ||
715 | } | ||
716 | arch_initcall(cps_pm_init); | ||
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c new file mode 100644 index 000000000000..fefdf39d3df3 --- /dev/null +++ b/arch/mips/kernel/pm.c | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Imagination Technologies Ltd. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License as published by the | ||
6 | * Free Software Foundation; either version 2 of the License, or (at your | ||
7 | * option) any later version. | ||
8 | * | ||
9 | * CPU PM notifiers for saving/restoring general CPU state. | ||
10 | */ | ||
11 | |||
12 | #include <linux/cpu_pm.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/dsp.h> | ||
16 | #include <asm/fpu.h> | ||
17 | #include <asm/mmu_context.h> | ||
18 | #include <asm/pm.h> | ||
19 | #include <asm/watch.h> | ||
20 | |||
21 | /* Used by PM helper macros in asm/pm.h */ | ||
22 | struct mips_static_suspend_state mips_static_suspend_state; | ||
23 | |||
24 | /** | ||
25 | * mips_cpu_save() - Save general CPU state. | ||
26 | * Ensures that general CPU context is saved, notably FPU and DSP. | ||
27 | */ | ||
28 | static int mips_cpu_save(void) | ||
29 | { | ||
30 | /* Save FPU state */ | ||
31 | lose_fpu(1); | ||
32 | |||
33 | /* Save DSP state */ | ||
34 | save_dsp(current); | ||
35 | |||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | /** | ||
40 | * mips_cpu_restore() - Restore general CPU state. | ||
41 | * Restores important CPU context. | ||
42 | */ | ||
43 | static void mips_cpu_restore(void) | ||
44 | { | ||
45 | unsigned int cpu = smp_processor_id(); | ||
46 | |||
47 | /* Restore ASID */ | ||
48 | if (current->mm) | ||
49 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | ||
50 | |||
51 | /* Restore DSP state */ | ||
52 | restore_dsp(current); | ||
53 | |||
54 | /* Restore UserLocal */ | ||
55 | if (cpu_has_userlocal) | ||
56 | write_c0_userlocal(current_thread_info()->tp_value); | ||
57 | |||
58 | /* Restore watch registers */ | ||
59 | __restore_watch(); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * mips_pm_notifier() - Notifier for preserving general CPU context. | ||
64 | * @self: Notifier block. | ||
65 | * @cmd: CPU PM event. | ||
66 | * @v: Private data (unused). | ||
67 | * | ||
68 | * This is called when a CPU power management event occurs, and is used to | ||
69 | * ensure that important CPU context is preserved across a CPU power down. | ||
70 | */ | ||
71 | static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
72 | void *v) | ||
73 | { | ||
74 | int ret; | ||
75 | |||
76 | switch (cmd) { | ||
77 | case CPU_PM_ENTER: | ||
78 | ret = mips_cpu_save(); | ||
79 | if (ret) | ||
80 | return NOTIFY_STOP; | ||
81 | break; | ||
82 | case CPU_PM_ENTER_FAILED: | ||
83 | case CPU_PM_EXIT: | ||
84 | mips_cpu_restore(); | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | return NOTIFY_OK; | ||
89 | } | ||
90 | |||
91 | static struct notifier_block mips_pm_notifier_block = { | ||
92 | .notifier_call = mips_pm_notifier, | ||
93 | }; | ||
94 | |||
95 | static int __init mips_pm_init(void) | ||
96 | { | ||
97 | return cpu_pm_register_notifier(&mips_pm_notifier_block); | ||
98 | } | ||
99 | arch_initcall(mips_pm_init); | ||
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index bb36b4e6b55f..df0598d9bfdd 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -20,104 +20,43 @@ | |||
20 | #include <asm/mips-cpc.h> | 20 | #include <asm/mips-cpc.h> |
21 | #include <asm/mips_mt.h> | 21 | #include <asm/mips_mt.h> |
22 | #include <asm/mipsregs.h> | 22 | #include <asm/mipsregs.h> |
23 | #include <asm/pm-cps.h> | ||
23 | #include <asm/smp-cps.h> | 24 | #include <asm/smp-cps.h> |
24 | #include <asm/time.h> | 25 | #include <asm/time.h> |
25 | #include <asm/uasm.h> | 26 | #include <asm/uasm.h> |
26 | 27 | ||
27 | static DECLARE_BITMAP(core_power, NR_CPUS); | 28 | static DECLARE_BITMAP(core_power, NR_CPUS); |
28 | 29 | ||
29 | struct boot_config mips_cps_bootcfg; | 30 | struct core_boot_config *mips_cps_core_bootcfg; |
30 | 31 | ||
31 | static void init_core(void) | 32 | static unsigned core_vpe_count(unsigned core) |
32 | { | 33 | { |
33 | unsigned int nvpes, t; | 34 | unsigned cfg; |
34 | u32 mvpconf0, vpeconf0, vpecontrol, tcstatus, tcbind, status; | ||
35 | 35 | ||
36 | if (!cpu_has_mipsmt) | 36 | if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt) |
37 | return; | 37 | return 1; |
38 | |||
39 | /* Enter VPE configuration state */ | ||
40 | dvpe(); | ||
41 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
42 | |||
43 | /* Retrieve the count of VPEs in this core */ | ||
44 | mvpconf0 = read_c0_mvpconf0(); | ||
45 | nvpes = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
46 | smp_num_siblings = nvpes; | ||
47 | |||
48 | for (t = 1; t < nvpes; t++) { | ||
49 | /* Use a 1:1 mapping of TC index to VPE index */ | ||
50 | settc(t); | ||
51 | |||
52 | /* Bind 1 TC to this VPE */ | ||
53 | tcbind = read_tc_c0_tcbind(); | ||
54 | tcbind &= ~TCBIND_CURVPE; | ||
55 | tcbind |= t << TCBIND_CURVPE_SHIFT; | ||
56 | write_tc_c0_tcbind(tcbind); | ||
57 | |||
58 | /* Set exclusive TC, non-active, master */ | ||
59 | vpeconf0 = read_vpe_c0_vpeconf0(); | ||
60 | vpeconf0 &= ~(VPECONF0_XTC | VPECONF0_VPA); | ||
61 | vpeconf0 |= t << VPECONF0_XTC_SHIFT; | ||
62 | vpeconf0 |= VPECONF0_MVP; | ||
63 | write_vpe_c0_vpeconf0(vpeconf0); | ||
64 | |||
65 | /* Declare TC non-active, non-allocatable & interrupt exempt */ | ||
66 | tcstatus = read_tc_c0_tcstatus(); | ||
67 | tcstatus &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
68 | tcstatus |= TCSTATUS_IXMT; | ||
69 | write_tc_c0_tcstatus(tcstatus); | ||
70 | |||
71 | /* Halt the TC */ | ||
72 | write_tc_c0_tchalt(TCHALT_H); | ||
73 | |||
74 | /* Allow only 1 TC to execute */ | ||
75 | vpecontrol = read_vpe_c0_vpecontrol(); | ||
76 | vpecontrol &= ~VPECONTROL_TE; | ||
77 | write_vpe_c0_vpecontrol(vpecontrol); | ||
78 | |||
79 | /* Copy (most of) Status from VPE 0 */ | ||
80 | status = read_c0_status(); | ||
81 | status &= ~(ST0_IM | ST0_IE | ST0_KSU); | ||
82 | status |= ST0_CU0; | ||
83 | write_vpe_c0_status(status); | ||
84 | |||
85 | /* Copy Config from VPE 0 */ | ||
86 | write_vpe_c0_config(read_c0_config()); | ||
87 | write_vpe_c0_config7(read_c0_config7()); | ||
88 | |||
89 | /* Ensure no software interrupts are pending */ | ||
90 | write_vpe_c0_cause(0); | ||
91 | |||
92 | /* Sync Count */ | ||
93 | write_vpe_c0_count(read_c0_count()); | ||
94 | } | ||
95 | 38 | ||
96 | /* Leave VPE configuration state */ | 39 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
97 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 40 | cfg = read_gcr_co_config() & CM_GCR_Cx_CONFIG_PVPE_MSK; |
41 | return (cfg >> CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; | ||
98 | } | 42 | } |
99 | 43 | ||
100 | static void __init cps_smp_setup(void) | 44 | static void __init cps_smp_setup(void) |
101 | { | 45 | { |
102 | unsigned int ncores, nvpes, core_vpes; | 46 | unsigned int ncores, nvpes, core_vpes; |
103 | int c, v; | 47 | int c, v; |
104 | u32 core_cfg, *entry_code; | ||
105 | 48 | ||
106 | /* Detect & record VPE topology */ | 49 | /* Detect & record VPE topology */ |
107 | ncores = mips_cm_numcores(); | 50 | ncores = mips_cm_numcores(); |
108 | pr_info("VPE topology "); | 51 | pr_info("VPE topology "); |
109 | for (c = nvpes = 0; c < ncores; c++) { | 52 | for (c = nvpes = 0; c < ncores; c++) { |
110 | if (cpu_has_mipsmt && config_enabled(CONFIG_MIPS_MT_SMP)) { | 53 | core_vpes = core_vpe_count(c); |
111 | write_gcr_cl_other(c << CM_GCR_Cx_OTHER_CORENUM_SHF); | ||
112 | core_cfg = read_gcr_co_config(); | ||
113 | core_vpes = ((core_cfg & CM_GCR_Cx_CONFIG_PVPE_MSK) >> | ||
114 | CM_GCR_Cx_CONFIG_PVPE_SHF) + 1; | ||
115 | } else { | ||
116 | core_vpes = 1; | ||
117 | } | ||
118 | |||
119 | pr_cont("%c%u", c ? ',' : '{', core_vpes); | 54 | pr_cont("%c%u", c ? ',' : '{', core_vpes); |
120 | 55 | ||
56 | /* Use the number of VPEs in core 0 for smp_num_siblings */ | ||
57 | if (!c) | ||
58 | smp_num_siblings = core_vpes; | ||
59 | |||
121 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { | 60 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { |
122 | cpu_data[nvpes + v].core = c; | 61 | cpu_data[nvpes + v].core = c; |
123 | #ifdef CONFIG_MIPS_MT_SMP | 62 | #ifdef CONFIG_MIPS_MT_SMP |
@@ -137,19 +76,14 @@ static void __init cps_smp_setup(void) | |||
137 | __cpu_logical_map[v] = v; | 76 | __cpu_logical_map[v] = v; |
138 | } | 77 | } |
139 | 78 | ||
79 | /* Set a coherent default CCA (CWB) */ | ||
80 | change_c0_config(CONF_CM_CMASK, 0x5); | ||
81 | |||
140 | /* Core 0 is powered up (we're running on it) */ | 82 | /* Core 0 is powered up (we're running on it) */ |
141 | bitmap_set(core_power, 0, 1); | 83 | bitmap_set(core_power, 0, 1); |
142 | 84 | ||
143 | /* Disable MT - we only want to run 1 TC per VPE */ | ||
144 | if (cpu_has_mipsmt) | ||
145 | dmt(); | ||
146 | |||
147 | /* Initialise core 0 */ | 85 | /* Initialise core 0 */ |
148 | init_core(); | 86 | mips_cps_core_init(); |
149 | |||
150 | /* Patch the start of mips_cps_core_entry to provide the CM base */ | ||
151 | entry_code = (u32 *)&mips_cps_core_entry; | ||
152 | UASM_i_LA(&entry_code, 3, (long)mips_cm_base); | ||
153 | 87 | ||
154 | /* Make core 0 coherent with everything */ | 88 | /* Make core 0 coherent with everything */ |
155 | write_gcr_cl_coherence(0xff); | 89 | write_gcr_cl_coherence(0xff); |
@@ -157,15 +91,99 @@ static void __init cps_smp_setup(void) | |||
157 | 91 | ||
158 | static void __init cps_prepare_cpus(unsigned int max_cpus) | 92 | static void __init cps_prepare_cpus(unsigned int max_cpus) |
159 | { | 93 | { |
94 | unsigned ncores, core_vpes, c, cca; | ||
95 | bool cca_unsuitable; | ||
96 | u32 *entry_code; | ||
97 | |||
160 | mips_mt_set_cpuoptions(); | 98 | mips_mt_set_cpuoptions(); |
99 | |||
100 | /* Detect whether the CCA is unsuited to multi-core SMP */ | ||
101 | cca = read_c0_config() & CONF_CM_CMASK; | ||
102 | switch (cca) { | ||
103 | case 0x4: /* CWBE */ | ||
104 | case 0x5: /* CWB */ | ||
105 | /* The CCA is coherent, multi-core is fine */ | ||
106 | cca_unsuitable = false; | ||
107 | break; | ||
108 | |||
109 | default: | ||
110 | /* CCA is not coherent, multi-core is not usable */ | ||
111 | cca_unsuitable = true; | ||
112 | } | ||
113 | |||
114 | /* Warn the user if the CCA prevents multi-core */ | ||
115 | ncores = mips_cm_numcores(); | ||
116 | if (cca_unsuitable && ncores > 1) { | ||
117 | pr_warn("Using only one core due to unsuitable CCA 0x%x\n", | ||
118 | cca); | ||
119 | |||
120 | for_each_present_cpu(c) { | ||
121 | if (cpu_data[c].core) | ||
122 | set_cpu_present(c, false); | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Patch the start of mips_cps_core_entry to provide: | ||
128 | * | ||
129 | * v0 = CM base address | ||
130 | * s0 = kseg0 CCA | ||
131 | */ | ||
132 | entry_code = (u32 *)&mips_cps_core_entry; | ||
133 | UASM_i_LA(&entry_code, 3, (long)mips_cm_base); | ||
134 | uasm_i_addiu(&entry_code, 16, 0, cca); | ||
135 | dma_cache_wback_inv((unsigned long)&mips_cps_core_entry, | ||
136 | (void *)entry_code - (void *)&mips_cps_core_entry); | ||
137 | |||
138 | /* Allocate core boot configuration structs */ | ||
139 | mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), | ||
140 | GFP_KERNEL); | ||
141 | if (!mips_cps_core_bootcfg) { | ||
142 | pr_err("Failed to allocate boot config for %u cores\n", ncores); | ||
143 | goto err_out; | ||
144 | } | ||
145 | |||
146 | /* Allocate VPE boot configuration structs */ | ||
147 | for (c = 0; c < ncores; c++) { | ||
148 | core_vpes = core_vpe_count(c); | ||
149 | mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, | ||
150 | sizeof(*mips_cps_core_bootcfg[c].vpe_config), | ||
151 | GFP_KERNEL); | ||
152 | if (!mips_cps_core_bootcfg[c].vpe_config) { | ||
153 | pr_err("Failed to allocate %u VPE boot configs\n", | ||
154 | core_vpes); | ||
155 | goto err_out; | ||
156 | } | ||
157 | } | ||
158 | |||
159 | /* Mark this CPU as booted */ | ||
160 | atomic_set(&mips_cps_core_bootcfg[current_cpu_data.core].vpe_mask, | ||
161 | 1 << cpu_vpe_id(¤t_cpu_data)); | ||
162 | |||
163 | return; | ||
164 | err_out: | ||
165 | /* Clean up allocations */ | ||
166 | if (mips_cps_core_bootcfg) { | ||
167 | for (c = 0; c < ncores; c++) | ||
168 | kfree(mips_cps_core_bootcfg[c].vpe_config); | ||
169 | kfree(mips_cps_core_bootcfg); | ||
170 | mips_cps_core_bootcfg = NULL; | ||
171 | } | ||
172 | |||
173 | /* Effectively disable SMP by declaring CPUs not present */ | ||
174 | for_each_possible_cpu(c) { | ||
175 | if (c == 0) | ||
176 | continue; | ||
177 | set_cpu_present(c, false); | ||
178 | } | ||
161 | } | 179 | } |
162 | 180 | ||
163 | static void boot_core(struct boot_config *cfg) | 181 | static void boot_core(unsigned core) |
164 | { | 182 | { |
165 | u32 access; | 183 | u32 access; |
166 | 184 | ||
167 | /* Select the appropriate core */ | 185 | /* Select the appropriate core */ |
168 | write_gcr_cl_other(cfg->core << CM_GCR_Cx_OTHER_CORENUM_SHF); | 186 | write_gcr_cl_other(core << CM_GCR_Cx_OTHER_CORENUM_SHF); |
169 | 187 | ||
170 | /* Set its reset vector */ | 188 | /* Set its reset vector */ |
171 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | 189 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); |
@@ -175,104 +193,74 @@ static void boot_core(struct boot_config *cfg) | |||
175 | 193 | ||
176 | /* Ensure the core can access the GCRs */ | 194 | /* Ensure the core can access the GCRs */ |
177 | access = read_gcr_access(); | 195 | access = read_gcr_access(); |
178 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + cfg->core); | 196 | access |= 1 << (CM_GCR_ACCESS_ACCESSEN_SHF + core); |
179 | write_gcr_access(access); | 197 | write_gcr_access(access); |
180 | 198 | ||
181 | /* Copy cfg */ | ||
182 | mips_cps_bootcfg = *cfg; | ||
183 | |||
184 | if (mips_cpc_present()) { | 199 | if (mips_cpc_present()) { |
185 | /* Select the appropriate core */ | ||
186 | write_cpc_cl_other(cfg->core << CPC_Cx_OTHER_CORENUM_SHF); | ||
187 | |||
188 | /* Reset the core */ | 200 | /* Reset the core */ |
201 | mips_cpc_lock_other(core); | ||
189 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); | 202 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); |
203 | mips_cpc_unlock_other(); | ||
190 | } else { | 204 | } else { |
191 | /* Take the core out of reset */ | 205 | /* Take the core out of reset */ |
192 | write_gcr_co_reset_release(0); | 206 | write_gcr_co_reset_release(0); |
193 | } | 207 | } |
194 | 208 | ||
195 | /* The core is now powered up */ | 209 | /* The core is now powered up */ |
196 | bitmap_set(core_power, cfg->core, 1); | 210 | bitmap_set(core_power, core, 1); |
197 | } | 211 | } |
198 | 212 | ||
199 | static void boot_vpe(void *info) | 213 | static void remote_vpe_boot(void *dummy) |
200 | { | 214 | { |
201 | struct boot_config *cfg = info; | 215 | mips_cps_boot_vpes(); |
202 | u32 tcstatus, vpeconf0; | ||
203 | |||
204 | /* Enter VPE configuration state */ | ||
205 | dvpe(); | ||
206 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
207 | |||
208 | settc(cfg->vpe); | ||
209 | |||
210 | /* Set the TC restart PC */ | ||
211 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
212 | |||
213 | /* Activate the TC, allow interrupts */ | ||
214 | tcstatus = read_tc_c0_tcstatus(); | ||
215 | tcstatus &= ~TCSTATUS_IXMT; | ||
216 | tcstatus |= TCSTATUS_A; | ||
217 | write_tc_c0_tcstatus(tcstatus); | ||
218 | |||
219 | /* Clear the TC halt bit */ | ||
220 | write_tc_c0_tchalt(0); | ||
221 | |||
222 | /* Activate the VPE */ | ||
223 | vpeconf0 = read_vpe_c0_vpeconf0(); | ||
224 | vpeconf0 |= VPECONF0_VPA; | ||
225 | write_vpe_c0_vpeconf0(vpeconf0); | ||
226 | |||
227 | /* Set the stack & global pointer registers */ | ||
228 | write_tc_gpr_sp(cfg->sp); | ||
229 | write_tc_gpr_gp(cfg->gp); | ||
230 | |||
231 | /* Leave VPE configuration state */ | ||
232 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
233 | |||
234 | /* Enable other VPEs to execute */ | ||
235 | evpe(EVPE_ENABLE); | ||
236 | } | 216 | } |
237 | 217 | ||
238 | static void cps_boot_secondary(int cpu, struct task_struct *idle) | 218 | static void cps_boot_secondary(int cpu, struct task_struct *idle) |
239 | { | 219 | { |
240 | struct boot_config cfg; | 220 | unsigned core = cpu_data[cpu].core; |
221 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
222 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | ||
223 | struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; | ||
241 | unsigned int remote; | 224 | unsigned int remote; |
242 | int err; | 225 | int err; |
243 | 226 | ||
244 | cfg.core = cpu_data[cpu].core; | 227 | vpe_cfg->pc = (unsigned long)&smp_bootstrap; |
245 | cfg.vpe = cpu_vpe_id(&cpu_data[cpu]); | 228 | vpe_cfg->sp = __KSTK_TOS(idle); |
246 | cfg.pc = (unsigned long)&smp_bootstrap; | 229 | vpe_cfg->gp = (unsigned long)task_thread_info(idle); |
247 | cfg.sp = __KSTK_TOS(idle); | 230 | |
248 | cfg.gp = (unsigned long)task_thread_info(idle); | 231 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); |
232 | |||
233 | preempt_disable(); | ||
249 | 234 | ||
250 | if (!test_bit(cfg.core, core_power)) { | 235 | if (!test_bit(core, core_power)) { |
251 | /* Boot a VPE on a powered down core */ | 236 | /* Boot a VPE on a powered down core */ |
252 | boot_core(&cfg); | 237 | boot_core(core); |
253 | return; | 238 | goto out; |
254 | } | 239 | } |
255 | 240 | ||
256 | if (cfg.core != current_cpu_data.core) { | 241 | if (core != current_cpu_data.core) { |
257 | /* Boot a VPE on another powered up core */ | 242 | /* Boot a VPE on another powered up core */ |
258 | for (remote = 0; remote < NR_CPUS; remote++) { | 243 | for (remote = 0; remote < NR_CPUS; remote++) { |
259 | if (cpu_data[remote].core != cfg.core) | 244 | if (cpu_data[remote].core != core) |
260 | continue; | 245 | continue; |
261 | if (cpu_online(remote)) | 246 | if (cpu_online(remote)) |
262 | break; | 247 | break; |
263 | } | 248 | } |
264 | BUG_ON(remote >= NR_CPUS); | 249 | BUG_ON(remote >= NR_CPUS); |
265 | 250 | ||
266 | err = smp_call_function_single(remote, boot_vpe, &cfg, 1); | 251 | err = smp_call_function_single(remote, remote_vpe_boot, |
252 | NULL, 1); | ||
267 | if (err) | 253 | if (err) |
268 | panic("Failed to call remote CPU\n"); | 254 | panic("Failed to call remote CPU\n"); |
269 | return; | 255 | goto out; |
270 | } | 256 | } |
271 | 257 | ||
272 | BUG_ON(!cpu_has_mipsmt); | 258 | BUG_ON(!cpu_has_mipsmt); |
273 | 259 | ||
274 | /* Boot a VPE on this core */ | 260 | /* Boot a VPE on this core */ |
275 | boot_vpe(&cfg); | 261 | mips_cps_boot_vpes(); |
262 | out: | ||
263 | preempt_enable(); | ||
276 | } | 264 | } |
277 | 265 | ||
278 | static void cps_init_secondary(void) | 266 | static void cps_init_secondary(void) |
@@ -281,10 +269,6 @@ static void cps_init_secondary(void) | |||
281 | if (cpu_has_mipsmt) | 269 | if (cpu_has_mipsmt) |
282 | dmt(); | 270 | dmt(); |
283 | 271 | ||
284 | /* TODO: revisit this assumption once hotplug is implemented */ | ||
285 | if (cpu_vpe_id(¤t_cpu_data) == 0) | ||
286 | init_core(); | ||
287 | |||
288 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | | 272 | change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | |
289 | STATUSF_IP6 | STATUSF_IP7); | 273 | STATUSF_IP6 | STATUSF_IP7); |
290 | } | 274 | } |
@@ -302,6 +286,148 @@ static void cps_smp_finish(void) | |||
302 | local_irq_enable(); | 286 | local_irq_enable(); |
303 | } | 287 | } |
304 | 288 | ||
289 | #ifdef CONFIG_HOTPLUG_CPU | ||
290 | |||
291 | static int cps_cpu_disable(void) | ||
292 | { | ||
293 | unsigned cpu = smp_processor_id(); | ||
294 | struct core_boot_config *core_cfg; | ||
295 | |||
296 | if (!cpu) | ||
297 | return -EBUSY; | ||
298 | |||
299 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | ||
300 | return -EINVAL; | ||
301 | |||
302 | core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; | ||
303 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | ||
304 | smp_mb__after_atomic_dec(); | ||
305 | set_cpu_online(cpu, false); | ||
306 | cpu_clear(cpu, cpu_callin_map); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static DECLARE_COMPLETION(cpu_death_chosen); | ||
312 | static unsigned cpu_death_sibling; | ||
313 | static enum { | ||
314 | CPU_DEATH_HALT, | ||
315 | CPU_DEATH_POWER, | ||
316 | } cpu_death; | ||
317 | |||
318 | void play_dead(void) | ||
319 | { | ||
320 | unsigned cpu, core; | ||
321 | |||
322 | local_irq_disable(); | ||
323 | idle_task_exit(); | ||
324 | cpu = smp_processor_id(); | ||
325 | cpu_death = CPU_DEATH_POWER; | ||
326 | |||
327 | if (cpu_has_mipsmt) { | ||
328 | core = cpu_data[cpu].core; | ||
329 | |||
330 | /* Look for another online VPE within the core */ | ||
331 | for_each_online_cpu(cpu_death_sibling) { | ||
332 | if (cpu_data[cpu_death_sibling].core != core) | ||
333 | continue; | ||
334 | |||
335 | /* | ||
336 | * There is an online VPE within the core. Just halt | ||
337 | * this TC and leave the core alone. | ||
338 | */ | ||
339 | cpu_death = CPU_DEATH_HALT; | ||
340 | break; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | /* This CPU has chosen its way out */ | ||
345 | complete(&cpu_death_chosen); | ||
346 | |||
347 | if (cpu_death == CPU_DEATH_HALT) { | ||
348 | /* Halt this TC */ | ||
349 | write_c0_tchalt(TCHALT_H); | ||
350 | instruction_hazard(); | ||
351 | } else { | ||
352 | /* Power down the core */ | ||
353 | cps_pm_enter_state(CPS_PM_POWER_GATED); | ||
354 | } | ||
355 | |||
356 | /* This should never be reached */ | ||
357 | panic("Failed to offline CPU %u", cpu); | ||
358 | } | ||
359 | |||
360 | static void wait_for_sibling_halt(void *ptr_cpu) | ||
361 | { | ||
362 | unsigned cpu = (unsigned)ptr_cpu; | ||
363 | unsigned vpe_id = cpu_data[cpu].vpe_id; | ||
364 | unsigned halted; | ||
365 | unsigned long flags; | ||
366 | |||
367 | do { | ||
368 | local_irq_save(flags); | ||
369 | settc(vpe_id); | ||
370 | halted = read_tc_c0_tchalt(); | ||
371 | local_irq_restore(flags); | ||
372 | } while (!(halted & TCHALT_H)); | ||
373 | } | ||
374 | |||
375 | static void cps_cpu_die(unsigned int cpu) | ||
376 | { | ||
377 | unsigned core = cpu_data[cpu].core; | ||
378 | unsigned stat; | ||
379 | int err; | ||
380 | |||
381 | /* Wait for the cpu to choose its way out */ | ||
382 | if (!wait_for_completion_timeout(&cpu_death_chosen, | ||
383 | msecs_to_jiffies(5000))) { | ||
384 | pr_err("CPU%u: didn't offline\n", cpu); | ||
385 | return; | ||
386 | } | ||
387 | |||
388 | /* | ||
389 | * Now wait for the CPU to actually offline. Without doing this that | ||
390 | * offlining may race with one or more of: | ||
391 | * | ||
392 | * - Onlining the CPU again. | ||
393 | * - Powering down the core if another VPE within it is offlined. | ||
394 | * - A sibling VPE entering a non-coherent state. | ||
395 | * | ||
396 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | ||
397 | * with which we could race, so do nothing. | ||
398 | */ | ||
399 | if (cpu_death == CPU_DEATH_POWER) { | ||
400 | /* | ||
401 | * Wait for the core to enter a powered down or clock gated | ||
402 | * state, the latter happening when a JTAG probe is connected | ||
403 | * in which case the CPC will refuse to power down the core. | ||
404 | */ | ||
405 | do { | ||
406 | mips_cpc_lock_other(core); | ||
407 | stat = read_cpc_co_stat_conf(); | ||
408 | stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK; | ||
409 | mips_cpc_unlock_other(); | ||
410 | } while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 && | ||
411 | stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 && | ||
412 | stat != CPC_Cx_STAT_CONF_SEQSTATE_U2); | ||
413 | |||
414 | /* Indicate the core is powered off */ | ||
415 | bitmap_clear(core_power, core, 1); | ||
416 | } else if (cpu_has_mipsmt) { | ||
417 | /* | ||
418 | * Have a CPU with access to the offlined CPUs registers wait | ||
419 | * for its TC to halt. | ||
420 | */ | ||
421 | err = smp_call_function_single(cpu_death_sibling, | ||
422 | wait_for_sibling_halt, | ||
423 | (void *)cpu, 1); | ||
424 | if (err) | ||
425 | panic("Failed to call remote sibling CPU\n"); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
430 | |||
305 | static struct plat_smp_ops cps_smp_ops = { | 431 | static struct plat_smp_ops cps_smp_ops = { |
306 | .smp_setup = cps_smp_setup, | 432 | .smp_setup = cps_smp_setup, |
307 | .prepare_cpus = cps_prepare_cpus, | 433 | .prepare_cpus = cps_prepare_cpus, |
@@ -310,8 +436,18 @@ static struct plat_smp_ops cps_smp_ops = { | |||
310 | .smp_finish = cps_smp_finish, | 436 | .smp_finish = cps_smp_finish, |
311 | .send_ipi_single = gic_send_ipi_single, | 437 | .send_ipi_single = gic_send_ipi_single, |
312 | .send_ipi_mask = gic_send_ipi_mask, | 438 | .send_ipi_mask = gic_send_ipi_mask, |
439 | #ifdef CONFIG_HOTPLUG_CPU | ||
440 | .cpu_disable = cps_cpu_disable, | ||
441 | .cpu_die = cps_cpu_die, | ||
442 | #endif | ||
313 | }; | 443 | }; |
314 | 444 | ||
445 | bool mips_cps_smp_in_use(void) | ||
446 | { | ||
447 | extern struct plat_smp_ops *mp_ops; | ||
448 | return mp_ops == &cps_smp_ops; | ||
449 | } | ||
450 | |||
315 | int register_cps_smp_ops(void) | 451 | int register_cps_smp_ops(void) |
316 | { | 452 | { |
317 | if (!mips_cm_present()) { | 453 | if (!mips_cm_present()) { |
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c index 3bb1f92ab525..3b21a96d1ccb 100644 --- a/arch/mips/kernel/smp-gic.c +++ b/arch/mips/kernel/smp-gic.c | |||
@@ -15,12 +15,14 @@ | |||
15 | #include <linux/printk.h> | 15 | #include <linux/printk.h> |
16 | 16 | ||
17 | #include <asm/gic.h> | 17 | #include <asm/gic.h> |
18 | #include <asm/mips-cpc.h> | ||
18 | #include <asm/smp-ops.h> | 19 | #include <asm/smp-ops.h> |
19 | 20 | ||
20 | void gic_send_ipi_single(int cpu, unsigned int action) | 21 | void gic_send_ipi_single(int cpu, unsigned int action) |
21 | { | 22 | { |
22 | unsigned long flags; | 23 | unsigned long flags; |
23 | unsigned int intr; | 24 | unsigned int intr; |
25 | unsigned int core = cpu_data[cpu].core; | ||
24 | 26 | ||
25 | pr_debug("CPU%d: %s cpu %d action %u status %08x\n", | 27 | pr_debug("CPU%d: %s cpu %d action %u status %08x\n", |
26 | smp_processor_id(), __func__, cpu, action, read_c0_status()); | 28 | smp_processor_id(), __func__, cpu, action, read_c0_status()); |
@@ -41,6 +43,15 @@ void gic_send_ipi_single(int cpu, unsigned int action) | |||
41 | } | 43 | } |
42 | 44 | ||
43 | gic_send_ipi(intr); | 45 | gic_send_ipi(intr); |
46 | |||
47 | if (mips_cpc_present() && (core != current_cpu_data.core)) { | ||
48 | while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { | ||
49 | mips_cpc_lock_other(core); | ||
50 | write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); | ||
51 | mips_cpc_unlock_other(); | ||
52 | } | ||
53 | } | ||
54 | |||
44 | local_irq_restore(flags); | 55 | local_irq_restore(flags); |
45 | } | 56 | } |
46 | 57 | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index ce7677523b68..9bad52ede903 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -62,6 +62,8 @@ EXPORT_SYMBOL(cpu_sibling_map); | |||
62 | /* representing cpus for which sibling maps can be computed */ | 62 | /* representing cpus for which sibling maps can be computed */ |
63 | static cpumask_t cpu_sibling_setup_map; | 63 | static cpumask_t cpu_sibling_setup_map; |
64 | 64 | ||
65 | cpumask_t cpu_coherent_mask; | ||
66 | |||
65 | static inline void set_cpu_sibling_map(int cpu) | 67 | static inline void set_cpu_sibling_map(int cpu) |
66 | { | 68 | { |
67 | int i; | 69 | int i; |
@@ -114,6 +116,7 @@ asmlinkage void start_secondary(void) | |||
114 | cpu = smp_processor_id(); | 116 | cpu = smp_processor_id(); |
115 | cpu_data[cpu].udelay_val = loops_per_jiffy; | 117 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
116 | 118 | ||
119 | cpu_set(cpu, cpu_coherent_mask); | ||
117 | notify_cpu_starting(cpu); | 120 | notify_cpu_starting(cpu); |
118 | 121 | ||
119 | set_cpu_online(cpu, true); | 122 | set_cpu_online(cpu, true); |
@@ -175,6 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
175 | #ifndef CONFIG_HOTPLUG_CPU | 178 | #ifndef CONFIG_HOTPLUG_CPU |
176 | init_cpu_present(cpu_possible_mask); | 179 | init_cpu_present(cpu_possible_mask); |
177 | #endif | 180 | #endif |
181 | cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); | ||
178 | } | 182 | } |
179 | 183 | ||
180 | /* preload SMP state for boot cpu */ | 184 | /* preload SMP state for boot cpu */ |
@@ -390,3 +394,46 @@ void dump_send_ipi(void (*dump_ipi_callback)(void *)) | |||
390 | } | 394 | } |
391 | EXPORT_SYMBOL(dump_send_ipi); | 395 | EXPORT_SYMBOL(dump_send_ipi); |
392 | #endif | 396 | #endif |
397 | |||
398 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | ||
399 | |||
400 | static DEFINE_PER_CPU(atomic_t, tick_broadcast_count); | ||
401 | static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd); | ||
402 | |||
403 | void tick_broadcast(const struct cpumask *mask) | ||
404 | { | ||
405 | atomic_t *count; | ||
406 | struct call_single_data *csd; | ||
407 | int cpu; | ||
408 | |||
409 | for_each_cpu(cpu, mask) { | ||
410 | count = &per_cpu(tick_broadcast_count, cpu); | ||
411 | csd = &per_cpu(tick_broadcast_csd, cpu); | ||
412 | |||
413 | if (atomic_inc_return(count) == 1) | ||
414 | smp_call_function_single_async(cpu, csd); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static void tick_broadcast_callee(void *info) | ||
419 | { | ||
420 | int cpu = smp_processor_id(); | ||
421 | tick_receive_broadcast(); | ||
422 | atomic_set(&per_cpu(tick_broadcast_count, cpu), 0); | ||
423 | } | ||
424 | |||
425 | static int __init tick_broadcast_init(void) | ||
426 | { | ||
427 | struct call_single_data *csd; | ||
428 | int cpu; | ||
429 | |||
430 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
431 | csd = &per_cpu(tick_broadcast_csd, cpu); | ||
432 | csd->func = tick_broadcast_callee; | ||
433 | } | ||
434 | |||
435 | return 0; | ||
436 | } | ||
437 | early_initcall(tick_broadcast_init); | ||
438 | |||
439 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 3a2672907f80..1fd1a0c4f104 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/bug.h> | 15 | #include <linux/bug.h> |
16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
17 | #include <linux/context_tracking.h> | 17 | #include <linux/context_tracking.h> |
18 | #include <linux/cpu_pm.h> | ||
18 | #include <linux/kexec.h> | 19 | #include <linux/kexec.h> |
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -1837,18 +1838,16 @@ static int __init ulri_disable(char *s) | |||
1837 | } | 1838 | } |
1838 | __setup("noulri", ulri_disable); | 1839 | __setup("noulri", ulri_disable); |
1839 | 1840 | ||
1840 | void per_cpu_trap_init(bool is_boot_cpu) | 1841 | /* configure STATUS register */ |
1842 | static void configure_status(void) | ||
1841 | { | 1843 | { |
1842 | unsigned int cpu = smp_processor_id(); | ||
1843 | unsigned int status_set = ST0_CU0; | ||
1844 | unsigned int hwrena = cpu_hwrena_impl_bits; | ||
1845 | |||
1846 | /* | 1844 | /* |
1847 | * Disable coprocessors and select 32-bit or 64-bit addressing | 1845 | * Disable coprocessors and select 32-bit or 64-bit addressing |
1848 | * and the 16/32 or 32/32 FPR register model. Reset the BEV | 1846 | * and the 16/32 or 32/32 FPR register model. Reset the BEV |
1849 | * flag that some firmware may have left set and the TS bit (for | 1847 | * flag that some firmware may have left set and the TS bit (for |
1850 | * IP27). Set XX for ISA IV code to work. | 1848 | * IP27). Set XX for ISA IV code to work. |
1851 | */ | 1849 | */ |
1850 | unsigned int status_set = ST0_CU0; | ||
1852 | #ifdef CONFIG_64BIT | 1851 | #ifdef CONFIG_64BIT |
1853 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; | 1852 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; |
1854 | #endif | 1853 | #endif |
@@ -1859,6 +1858,12 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1859 | 1858 | ||
1860 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, | 1859 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, |
1861 | status_set); | 1860 | status_set); |
1861 | } | ||
1862 | |||
1863 | /* configure HWRENA register */ | ||
1864 | static void configure_hwrena(void) | ||
1865 | { | ||
1866 | unsigned int hwrena = cpu_hwrena_impl_bits; | ||
1862 | 1867 | ||
1863 | if (cpu_has_mips_r2) | 1868 | if (cpu_has_mips_r2) |
1864 | hwrena |= 0x0000000f; | 1869 | hwrena |= 0x0000000f; |
@@ -1868,7 +1873,10 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1868 | 1873 | ||
1869 | if (hwrena) | 1874 | if (hwrena) |
1870 | write_c0_hwrena(hwrena); | 1875 | write_c0_hwrena(hwrena); |
1876 | } | ||
1871 | 1877 | ||
1878 | static void configure_exception_vector(void) | ||
1879 | { | ||
1872 | if (cpu_has_veic || cpu_has_vint) { | 1880 | if (cpu_has_veic || cpu_has_vint) { |
1873 | unsigned long sr = set_c0_status(ST0_BEV); | 1881 | unsigned long sr = set_c0_status(ST0_BEV); |
1874 | write_c0_ebase(ebase); | 1882 | write_c0_ebase(ebase); |
@@ -1884,6 +1892,16 @@ void per_cpu_trap_init(bool is_boot_cpu) | |||
1884 | } else | 1892 | } else |
1885 | set_c0_cause(CAUSEF_IV); | 1893 | set_c0_cause(CAUSEF_IV); |
1886 | } | 1894 | } |
1895 | } | ||
1896 | |||
1897 | void per_cpu_trap_init(bool is_boot_cpu) | ||
1898 | { | ||
1899 | unsigned int cpu = smp_processor_id(); | ||
1900 | |||
1901 | configure_status(); | ||
1902 | configure_hwrena(); | ||
1903 | |||
1904 | configure_exception_vector(); | ||
1887 | 1905 | ||
1888 | /* | 1906 | /* |
1889 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: | 1907 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: |
@@ -2122,3 +2140,32 @@ void __init trap_init(void) | |||
2122 | 2140 | ||
2123 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ | 2141 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ |
2124 | } | 2142 | } |
2143 | |||
2144 | static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
2145 | void *v) | ||
2146 | { | ||
2147 | switch (cmd) { | ||
2148 | case CPU_PM_ENTER_FAILED: | ||
2149 | case CPU_PM_EXIT: | ||
2150 | configure_status(); | ||
2151 | configure_hwrena(); | ||
2152 | configure_exception_vector(); | ||
2153 | |||
2154 | /* Restore register with CPU number for TLB handlers */ | ||
2155 | TLBMISS_HANDLER_RESTORE(); | ||
2156 | |||
2157 | break; | ||
2158 | } | ||
2159 | |||
2160 | return NOTIFY_OK; | ||
2161 | } | ||
2162 | |||
2163 | static struct notifier_block trap_pm_notifier_block = { | ||
2164 | .notifier_call = trap_pm_notifier, | ||
2165 | }; | ||
2166 | |||
2167 | static int __init trap_pm_init(void) | ||
2168 | { | ||
2169 | return cpu_pm_register_notifier(&trap_pm_notifier_block); | ||
2170 | } | ||
2171 | arch_initcall(trap_pm_init); | ||
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 5c2128283ba6..587a14874f98 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -7,6 +7,7 @@ | |||
7 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) | 7 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) |
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/cpu_pm.h> | ||
10 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/highmem.h> | 13 | #include <linux/highmem.h> |
@@ -1643,3 +1644,26 @@ void r4k_cache_init(void) | |||
1643 | coherency_setup(); | 1644 | coherency_setup(); |
1644 | board_cache_error_setup = r4k_cache_error_setup; | 1645 | board_cache_error_setup = r4k_cache_error_setup; |
1645 | } | 1646 | } |
1647 | |||
1648 | static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
1649 | void *v) | ||
1650 | { | ||
1651 | switch (cmd) { | ||
1652 | case CPU_PM_ENTER_FAILED: | ||
1653 | case CPU_PM_EXIT: | ||
1654 | coherency_setup(); | ||
1655 | break; | ||
1656 | } | ||
1657 | |||
1658 | return NOTIFY_OK; | ||
1659 | } | ||
1660 | |||
1661 | static struct notifier_block r4k_cache_pm_notifier_block = { | ||
1662 | .notifier_call = r4k_cache_pm_notifier, | ||
1663 | }; | ||
1664 | |||
1665 | int __init r4k_cache_init_pm(void) | ||
1666 | { | ||
1667 | return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); | ||
1668 | } | ||
1669 | arch_initcall(r4k_cache_init_pm); | ||
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 609a0cd749ff..6e4413330e36 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -79,7 +79,7 @@ void setup_zero_pages(void) | |||
79 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; | 79 | zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; |
80 | } | 80 | } |
81 | 81 | ||
82 | void *kmap_coherent(struct page *page, unsigned long addr) | 82 | static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot) |
83 | { | 83 | { |
84 | enum fixed_addresses idx; | 84 | enum fixed_addresses idx; |
85 | unsigned long vaddr, flags, entrylo; | 85 | unsigned long vaddr, flags, entrylo; |
@@ -93,7 +93,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
93 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); | 93 | idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); |
94 | idx += in_interrupt() ? FIX_N_COLOURS : 0; | 94 | idx += in_interrupt() ? FIX_N_COLOURS : 0; |
95 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | 95 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); |
96 | pte = mk_pte(page, PAGE_KERNEL); | 96 | pte = mk_pte(page, prot); |
97 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) | 97 | #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) |
98 | entrylo = pte.pte_high; | 98 | entrylo = pte.pte_high; |
99 | #else | 99 | #else |
@@ -117,6 +117,16 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
117 | return (void*) vaddr; | 117 | return (void*) vaddr; |
118 | } | 118 | } |
119 | 119 | ||
120 | void *kmap_coherent(struct page *page, unsigned long addr) | ||
121 | { | ||
122 | return __kmap_pgprot(page, addr, PAGE_KERNEL); | ||
123 | } | ||
124 | |||
125 | void *kmap_noncoherent(struct page *page, unsigned long addr) | ||
126 | { | ||
127 | return __kmap_pgprot(page, addr, PAGE_KERNEL_NC); | ||
128 | } | ||
129 | |||
120 | void kunmap_coherent(void) | 130 | void kunmap_coherent(void) |
121 | { | 131 | { |
122 | unsigned int wired; | 132 | unsigned int wired; |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index 403fa804e4f4..3914e27456f2 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Carsten Langgaard, carstenl@mips.com | 8 | * Carsten Langgaard, carstenl@mips.com |
9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. | 9 | * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved. |
10 | */ | 10 | */ |
11 | #include <linux/cpu_pm.h> | ||
11 | #include <linux/init.h> | 12 | #include <linux/init.h> |
12 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
13 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
@@ -399,7 +400,10 @@ static int __init set_ntlb(char *str) | |||
399 | 400 | ||
400 | __setup("ntlb=", set_ntlb); | 401 | __setup("ntlb=", set_ntlb); |
401 | 402 | ||
402 | void tlb_init(void) | 403 | /* |
404 | * Configure TLB (for init or after a CPU has been powered off). | ||
405 | */ | ||
406 | static void r4k_tlb_configure(void) | ||
403 | { | 407 | { |
404 | /* | 408 | /* |
405 | * You should never change this register: | 409 | * You should never change this register: |
@@ -431,6 +435,11 @@ void tlb_init(void) | |||
431 | local_flush_tlb_all(); | 435 | local_flush_tlb_all(); |
432 | 436 | ||
433 | /* Did I tell you that ARC SUCKS? */ | 437 | /* Did I tell you that ARC SUCKS? */ |
438 | } | ||
439 | |||
440 | void tlb_init(void) | ||
441 | { | ||
442 | r4k_tlb_configure(); | ||
434 | 443 | ||
435 | if (ntlb) { | 444 | if (ntlb) { |
436 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { | 445 | if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) { |
@@ -444,3 +453,26 @@ void tlb_init(void) | |||
444 | 453 | ||
445 | build_tlb_refill_handler(); | 454 | build_tlb_refill_handler(); |
446 | } | 455 | } |
456 | |||
457 | static int r4k_tlb_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
458 | void *v) | ||
459 | { | ||
460 | switch (cmd) { | ||
461 | case CPU_PM_ENTER_FAILED: | ||
462 | case CPU_PM_EXIT: | ||
463 | r4k_tlb_configure(); | ||
464 | break; | ||
465 | } | ||
466 | |||
467 | return NOTIFY_OK; | ||
468 | } | ||
469 | |||
470 | static struct notifier_block r4k_tlb_pm_notifier_block = { | ||
471 | .notifier_call = r4k_tlb_pm_notifier, | ||
472 | }; | ||
473 | |||
474 | static int __init r4k_tlb_init_pm(void) | ||
475 | { | ||
476 | return cpu_pm_register_notifier(&r4k_tlb_pm_notifier_block); | ||
477 | } | ||
478 | arch_initcall(r4k_tlb_init_pm); | ||
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c index b8d580ca02e5..bcbcf4ae69b7 100644 --- a/arch/mips/mm/uasm-micromips.c +++ b/arch/mips/mm/uasm-micromips.c | |||
@@ -99,10 +99,12 @@ static struct insn insn_table_MM[] = { | |||
99 | { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, | 99 | { insn_rotr, M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD }, |
100 | { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, | 100 | { insn_subu, M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD }, |
101 | { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, | 101 | { insn_sw, M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, |
102 | { insn_sync, M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS }, | ||
102 | { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, | 103 | { insn_tlbp, M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0 }, |
103 | { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, | 104 | { insn_tlbr, M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0 }, |
104 | { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, | 105 | { insn_tlbwi, M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0 }, |
105 | { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, | 106 | { insn_tlbwr, M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0 }, |
107 | { insn_wait, M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM }, | ||
106 | { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, | 108 | { insn_xor, M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD }, |
107 | { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, | 109 | { insn_xori, M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM }, |
108 | { insn_dins, 0, 0 }, | 110 | { insn_dins, 0, 0 }, |
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c index 3abd609518c9..4a2fc82fcd4f 100644 --- a/arch/mips/mm/uasm-mips.c +++ b/arch/mips/mm/uasm-mips.c | |||
@@ -82,6 +82,7 @@ static struct insn insn_table[] = { | |||
82 | { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, | 82 | { insn_ins, M(spec3_op, 0, 0, 0, 0, ins_op), RS | RT | RD | RE }, |
83 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, | 83 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, |
84 | { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, | 84 | { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, |
85 | { insn_jalr, M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD }, | ||
85 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, | 86 | { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, |
86 | { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, | 87 | { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, |
87 | { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 88 | { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
@@ -106,13 +107,16 @@ static struct insn insn_table[] = { | |||
106 | { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, | 107 | { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, |
107 | { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, | 108 | { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, |
108 | { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, | 109 | { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, |
110 | { insn_sync, M(spec_op, 0, 0, 0, 0, sync_op), RE }, | ||
109 | { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, | 111 | { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, |
110 | { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, | 112 | { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, |
111 | { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, | 113 | { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, |
112 | { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, | 114 | { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, |
113 | { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, | 115 | { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, |
116 | { insn_wait, M(cop0_op, cop_op, 0, 0, 0, wait_op), SCIMM }, | ||
114 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, | 117 | { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, |
115 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, | 118 | { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, |
119 | { insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD }, | ||
116 | { insn_invalid, 0, 0 } | 120 | { insn_invalid, 0, 0 } |
117 | }; | 121 | }; |
118 | 122 | ||
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c index b9d14b6c7f58..55a1fdfb76ef 100644 --- a/arch/mips/mm/uasm.c +++ b/arch/mips/mm/uasm.c | |||
@@ -49,12 +49,12 @@ enum opcode { | |||
49 | insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, | 49 | insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, |
50 | insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, | 50 | insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, |
51 | insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, | 51 | insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, |
52 | insn_ext, insn_ins, insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, | 52 | insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_ld, |
53 | insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, | 53 | insn_ldx, insn_ll, insn_lld, insn_lui, insn_lw, insn_lwx, insn_mfc0, |
54 | insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, | 54 | insn_mtc0, insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, |
55 | insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, | 55 | insn_scd, insn_sd, insn_sll, insn_sra, insn_srl, insn_subu, insn_sw, |
56 | insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, | 56 | insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, |
57 | insn_xori, | 57 | insn_wait, insn_xor, insn_xori, insn_yield, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | struct insn { | 60 | struct insn { |
@@ -200,6 +200,13 @@ Ip_u1u2(op) \ | |||
200 | } \ | 200 | } \ |
201 | UASM_EXPORT_SYMBOL(uasm_i##op); | 201 | UASM_EXPORT_SYMBOL(uasm_i##op); |
202 | 202 | ||
203 | #define I_u2u1(op) \ | ||
204 | Ip_u1u2(op) \ | ||
205 | { \ | ||
206 | build_insn(buf, insn##op, b, a); \ | ||
207 | } \ | ||
208 | UASM_EXPORT_SYMBOL(uasm_i##op); | ||
209 | |||
203 | #define I_u1s2(op) \ | 210 | #define I_u1s2(op) \ |
204 | Ip_u1s2(op) \ | 211 | Ip_u1s2(op) \ |
205 | { \ | 212 | { \ |
@@ -250,6 +257,7 @@ I_u2u1msbdu3(_ext) | |||
250 | I_u2u1msbu3(_ins) | 257 | I_u2u1msbu3(_ins) |
251 | I_u1(_j) | 258 | I_u1(_j) |
252 | I_u1(_jal) | 259 | I_u1(_jal) |
260 | I_u2u1(_jalr) | ||
253 | I_u1(_jr) | 261 | I_u1(_jr) |
254 | I_u2s3u1(_ld) | 262 | I_u2s3u1(_ld) |
255 | I_u2s3u1(_ll) | 263 | I_u2s3u1(_ll) |
@@ -270,12 +278,15 @@ I_u2u1u3(_srl) | |||
270 | I_u2u1u3(_rotr) | 278 | I_u2u1u3(_rotr) |
271 | I_u3u1u2(_subu) | 279 | I_u3u1u2(_subu) |
272 | I_u2s3u1(_sw) | 280 | I_u2s3u1(_sw) |
281 | I_u1(_sync) | ||
273 | I_0(_tlbp) | 282 | I_0(_tlbp) |
274 | I_0(_tlbr) | 283 | I_0(_tlbr) |
275 | I_0(_tlbwi) | 284 | I_0(_tlbwi) |
276 | I_0(_tlbwr) | 285 | I_0(_tlbwr) |
286 | I_u1(_wait); | ||
277 | I_u3u1u2(_xor) | 287 | I_u3u1u2(_xor) |
278 | I_u2u1u3(_xori) | 288 | I_u2u1u3(_xori) |
289 | I_u2u1(_yield) | ||
279 | I_u2u1msbu3(_dins); | 290 | I_u2u1msbu3(_dins); |
280 | I_u2u1msb32u3(_dinsm); | 291 | I_u2u1msb32u3(_dinsm); |
281 | I_u1(_syscall); | 292 | I_u1(_syscall); |
@@ -469,6 +480,14 @@ void ISAFUNC(uasm_il_b)(u32 **p, struct uasm_reloc **r, int lid) | |||
469 | } | 480 | } |
470 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); | 481 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_b)); |
471 | 482 | ||
483 | void ISAFUNC(uasm_il_beq)(u32 **p, struct uasm_reloc **r, unsigned int r1, | ||
484 | unsigned int r2, int lid) | ||
485 | { | ||
486 | uasm_r_mips_pc16(r, *p, lid); | ||
487 | ISAFUNC(uasm_i_beq)(p, r1, r2, 0); | ||
488 | } | ||
489 | UASM_EXPORT_SYMBOL(ISAFUNC(uasm_il_beq)); | ||
490 | |||
472 | void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, | 491 | void ISAFUNC(uasm_il_beqz)(u32 **p, struct uasm_reloc **r, unsigned int reg, |
473 | int lid) | 492 | int lid) |
474 | { | 493 | { |
diff --git a/drivers/cpuidle/Kconfig b/drivers/cpuidle/Kconfig index f04e25f6c98d..1b96fb91d32c 100644 --- a/drivers/cpuidle/Kconfig +++ b/drivers/cpuidle/Kconfig | |||
@@ -35,6 +35,11 @@ depends on ARM | |||
35 | source "drivers/cpuidle/Kconfig.arm" | 35 | source "drivers/cpuidle/Kconfig.arm" |
36 | endmenu | 36 | endmenu |
37 | 37 | ||
38 | menu "MIPS CPU Idle Drivers" | ||
39 | depends on MIPS | ||
40 | source "drivers/cpuidle/Kconfig.mips" | ||
41 | endmenu | ||
42 | |||
38 | menu "POWERPC CPU Idle Drivers" | 43 | menu "POWERPC CPU Idle Drivers" |
39 | depends on PPC | 44 | depends on PPC |
40 | source "drivers/cpuidle/Kconfig.powerpc" | 45 | source "drivers/cpuidle/Kconfig.powerpc" |
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips new file mode 100644 index 000000000000..0e70ee28a5ca --- /dev/null +++ b/drivers/cpuidle/Kconfig.mips | |||
@@ -0,0 +1,17 @@ | |||
1 | # | ||
2 | # MIPS CPU Idle Drivers | ||
3 | # | ||
4 | config MIPS_CPS_CPUIDLE | ||
5 | bool "CPU Idle driver for MIPS CPS platforms" | ||
6 | depends on CPU_IDLE | ||
7 | depends on SYS_SUPPORTS_MIPS_CPS | ||
8 | select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT | ||
9 | select GENERIC_CLOCKEVENTS_BROADCAST if SMP | ||
10 | select MIPS_CPS_PM | ||
11 | default y | ||
12 | help | ||
13 | Select this option to enable processor idle state management | ||
14 | through cpuidle for systems built around the MIPS Coherent | ||
15 | Processing System (CPS) architecture. In order to make use of | ||
16 | the deepest idle states you will need to ensure that you are | ||
17 | also using the CONFIG_MIPS_CPS SMP implementation. | ||
diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index f71ae1b373c5..a7fc96bcf319 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile | |||
@@ -15,6 +15,10 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o | |||
15 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o | 15 | obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o |
16 | 16 | ||
17 | ############################################################################### | 17 | ############################################################################### |
18 | # MIPS drivers | ||
19 | obj-$(CONFIG_MIPS_CPS_CPUIDLE) += cpuidle-cps.o | ||
20 | |||
21 | ############################################################################### | ||
18 | # POWERPC drivers | 22 | # POWERPC drivers |
19 | obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o | 23 | obj-$(CONFIG_PSERIES_CPUIDLE) += cpuidle-pseries.o |
20 | obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o | 24 | obj-$(CONFIG_POWERNV_CPUIDLE) += cpuidle-powernv.o |
diff --git a/drivers/cpuidle/cpuidle-cps.c b/drivers/cpuidle/cpuidle-cps.c new file mode 100644 index 000000000000..fc7b62720deb --- /dev/null +++ b/drivers/cpuidle/cpuidle-cps.c | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2014 Imagination Technologies | ||
3 | * Author: Paul Burton <paul.burton@imgtec.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms of the GNU General Public License as published by the | ||
7 | * Free Software Foundation; either version 2 of the License, or (at your | ||
8 | * option) any later version. | ||
9 | */ | ||
10 | |||
11 | #include <linux/cpu_pm.h> | ||
12 | #include <linux/cpuidle.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/idle.h> | ||
16 | #include <asm/pm-cps.h> | ||
17 | |||
18 | /* Enumeration of the various idle states this driver may enter */ | ||
19 | enum cps_idle_state { | ||
20 | STATE_WAIT = 0, /* MIPS wait instruction, coherent */ | ||
21 | STATE_NC_WAIT, /* MIPS wait instruction, non-coherent */ | ||
22 | STATE_CLOCK_GATED, /* Core clock gated */ | ||
23 | STATE_POWER_GATED, /* Core power gated */ | ||
24 | STATE_COUNT | ||
25 | }; | ||
26 | |||
27 | static int cps_nc_enter(struct cpuidle_device *dev, | ||
28 | struct cpuidle_driver *drv, int index) | ||
29 | { | ||
30 | enum cps_pm_state pm_state; | ||
31 | int err; | ||
32 | |||
33 | /* | ||
34 | * At least one core must remain powered up & clocked in order for the | ||
35 | * system to have any hope of functioning. | ||
36 | * | ||
37 | * TODO: don't treat core 0 specially, just prevent the final core | ||
38 | * TODO: remap interrupt affinity temporarily | ||
39 | */ | ||
40 | if (!cpu_data[dev->cpu].core && (index > STATE_NC_WAIT)) | ||
41 | index = STATE_NC_WAIT; | ||
42 | |||
43 | /* Select the appropriate cps_pm_state */ | ||
44 | switch (index) { | ||
45 | case STATE_NC_WAIT: | ||
46 | pm_state = CPS_PM_NC_WAIT; | ||
47 | break; | ||
48 | case STATE_CLOCK_GATED: | ||
49 | pm_state = CPS_PM_CLOCK_GATED; | ||
50 | break; | ||
51 | case STATE_POWER_GATED: | ||
52 | pm_state = CPS_PM_POWER_GATED; | ||
53 | break; | ||
54 | default: | ||
55 | BUG(); | ||
56 | return -EINVAL; | ||
57 | } | ||
58 | |||
59 | /* Notify listeners the CPU is about to power down */ | ||
60 | if ((pm_state == CPS_PM_POWER_GATED) && cpu_pm_enter()) | ||
61 | return -EINTR; | ||
62 | |||
63 | /* Enter that state */ | ||
64 | err = cps_pm_enter_state(pm_state); | ||
65 | |||
66 | /* Notify listeners the CPU is back up */ | ||
67 | if (pm_state == CPS_PM_POWER_GATED) | ||
68 | cpu_pm_exit(); | ||
69 | |||
70 | return err ?: index; | ||
71 | } | ||
72 | |||
73 | static struct cpuidle_driver cps_driver = { | ||
74 | .name = "cpc_cpuidle", | ||
75 | .owner = THIS_MODULE, | ||
76 | .states = { | ||
77 | [STATE_WAIT] = MIPS_CPUIDLE_WAIT_STATE, | ||
78 | [STATE_NC_WAIT] = { | ||
79 | .enter = cps_nc_enter, | ||
80 | .exit_latency = 200, | ||
81 | .target_residency = 450, | ||
82 | .flags = CPUIDLE_FLAG_TIME_VALID, | ||
83 | .name = "nc-wait", | ||
84 | .desc = "non-coherent MIPS wait", | ||
85 | }, | ||
86 | [STATE_CLOCK_GATED] = { | ||
87 | .enter = cps_nc_enter, | ||
88 | .exit_latency = 300, | ||
89 | .target_residency = 700, | ||
90 | .flags = CPUIDLE_FLAG_TIME_VALID | | ||
91 | CPUIDLE_FLAG_TIMER_STOP, | ||
92 | .name = "clock-gated", | ||
93 | .desc = "core clock gated", | ||
94 | }, | ||
95 | [STATE_POWER_GATED] = { | ||
96 | .enter = cps_nc_enter, | ||
97 | .exit_latency = 600, | ||
98 | .target_residency = 1000, | ||
99 | .flags = CPUIDLE_FLAG_TIME_VALID | | ||
100 | CPUIDLE_FLAG_TIMER_STOP, | ||
101 | .name = "power-gated", | ||
102 | .desc = "core power gated", | ||
103 | }, | ||
104 | }, | ||
105 | .state_count = STATE_COUNT, | ||
106 | .safe_state_index = 0, | ||
107 | }; | ||
108 | |||
109 | static void __init cps_cpuidle_unregister(void) | ||
110 | { | ||
111 | int cpu; | ||
112 | struct cpuidle_device *device; | ||
113 | |||
114 | for_each_possible_cpu(cpu) { | ||
115 | device = &per_cpu(cpuidle_dev, cpu); | ||
116 | cpuidle_unregister_device(device); | ||
117 | } | ||
118 | |||
119 | cpuidle_unregister_driver(&cps_driver); | ||
120 | } | ||
121 | |||
122 | static int __init cps_cpuidle_init(void) | ||
123 | { | ||
124 | int err, cpu, core, i; | ||
125 | struct cpuidle_device *device; | ||
126 | |||
127 | /* Detect supported states */ | ||
128 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | ||
129 | cps_driver.state_count = STATE_CLOCK_GATED + 1; | ||
130 | if (!cps_pm_support_state(CPS_PM_CLOCK_GATED)) | ||
131 | cps_driver.state_count = STATE_NC_WAIT + 1; | ||
132 | if (!cps_pm_support_state(CPS_PM_NC_WAIT)) | ||
133 | cps_driver.state_count = STATE_WAIT + 1; | ||
134 | |||
135 | /* Inform the user if some states are unavailable */ | ||
136 | if (cps_driver.state_count < STATE_COUNT) { | ||
137 | pr_info("cpuidle-cps: limited to "); | ||
138 | switch (cps_driver.state_count - 1) { | ||
139 | case STATE_WAIT: | ||
140 | pr_cont("coherent wait\n"); | ||
141 | break; | ||
142 | case STATE_NC_WAIT: | ||
143 | pr_cont("non-coherent wait\n"); | ||
144 | break; | ||
145 | case STATE_CLOCK_GATED: | ||
146 | pr_cont("clock gating\n"); | ||
147 | break; | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Set the coupled flag on the appropriate states if this system | ||
153 | * requires it. | ||
154 | */ | ||
155 | if (coupled_coherence) | ||
156 | for (i = STATE_NC_WAIT; i < cps_driver.state_count; i++) | ||
157 | cps_driver.states[i].flags |= CPUIDLE_FLAG_COUPLED; | ||
158 | |||
159 | err = cpuidle_register_driver(&cps_driver); | ||
160 | if (err) { | ||
161 | pr_err("Failed to register CPS cpuidle driver\n"); | ||
162 | return err; | ||
163 | } | ||
164 | |||
165 | for_each_possible_cpu(cpu) { | ||
166 | core = cpu_data[cpu].core; | ||
167 | device = &per_cpu(cpuidle_dev, cpu); | ||
168 | device->cpu = cpu; | ||
169 | #ifdef CONFIG_MIPS_MT | ||
170 | cpumask_copy(&device->coupled_cpus, &cpu_sibling_map[cpu]); | ||
171 | #endif | ||
172 | |||
173 | err = cpuidle_register_device(device); | ||
174 | if (err) { | ||
175 | pr_err("Failed to register CPU%d cpuidle device\n", | ||
176 | cpu); | ||
177 | goto err_out; | ||
178 | } | ||
179 | } | ||
180 | |||
181 | return 0; | ||
182 | err_out: | ||
183 | cps_cpuidle_unregister(); | ||
184 | return err; | ||
185 | } | ||
186 | device_initcall(cps_cpuidle_init); | ||
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index b0238cba440b..99cbd7a74e9f 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -84,6 +84,7 @@ struct cpuidle_device { | |||
84 | }; | 84 | }; |
85 | 85 | ||
86 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); | 86 | DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices); |
87 | DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev); | ||
87 | 88 | ||
88 | /** | 89 | /** |
89 | * cpuidle_get_last_residency - retrieves the last state's residency time | 90 | * cpuidle_get_last_residency - retrieves the last state's residency time |