aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStefan Kristiansson <stefan.kristiansson@saunalahti.fi>2014-05-11 14:49:34 -0400
committerStafford Horne <shorne@gmail.com>2017-11-03 01:01:13 -0400
commit8e6d08e0a15e7d4d4b608b56597350d4cdd77710 (patch)
tree5c060395457f5bdd407c09b85caedcbbf701072c
parent9b54470afd836278a7e6f0f08194e2e2dca4b6eb (diff)
openrisc: initial SMP support
This patch introduces the SMP support for the OpenRISC architecture. The SMP architecture requires cores which have multi-core features which have been introduced a few years back including: - New SPRS SPR_COREID SPR_NUMCORES - Shadow SPRs - Atomic Instructions - Cache Coherency - A wired in IPI controller This patch adds all of the SMP specific changes to core infrastructure, it looks big but it needs to go all together as its hard to split this one up. Boot loader spinning of second cpu is not supported yet, it's assumed that Linux is booted straight after cpu reset. The bulk of these changes are trivial changes to refactor to use per cpu data structures throughout. The addition of the smp.c and changes in time.c are the changes. Some specific notes: MM changes ---------- The reason why this is created as an array, and not with DEFINE_PER_CPU is that doing it this way, we'll save a load in the tlb-miss handler (the load from __per_cpu_offset). TLB Flush --------- The SMP implementation of flush_tlb_* works by sending out a function-call IPI to all the non-local cpus by using the generic on_each_cpu() function. Currently, all flush_tlb_* functions will result in a flush_tlb_all(), which has always been the behaviour in the UP case. CPU INFO -------- This creates a per cpu cpuinfo struct and fills it out accordingly for each activated cpu. show_cpuinfo is also updated to reflect new version information in later versions of the spec. SMP API ------- This imitates the arm64 implementation by having a smp_cross_call callback that can be set by set_smp_cross_call to initiate an IPI and a handle_IPI function that is expected to be called from an IPI irqchip driver. Signed-off-by: Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> [shorne@gmail.com: added cpu stop, checkpatch fixes, wrote commit message] Signed-off-by: Stafford Horne <shorne@gmail.com>
-rw-r--r--arch/openrisc/Kconfig17
-rw-r--r--arch/openrisc/include/asm/cpuinfo.h7
-rw-r--r--arch/openrisc/include/asm/mmu_context.h2
-rw-r--r--arch/openrisc/include/asm/pgtable.h2
-rw-r--r--arch/openrisc/include/asm/serial.h2
-rw-r--r--arch/openrisc/include/asm/smp.h26
-rw-r--r--arch/openrisc/include/asm/spr_defs.h14
-rw-r--r--arch/openrisc/include/asm/time.h15
-rw-r--r--arch/openrisc/include/asm/tlbflush.h25
-rw-r--r--arch/openrisc/kernel/Makefile1
-rw-r--r--arch/openrisc/kernel/dma.c14
-rw-r--r--arch/openrisc/kernel/head.S97
-rw-r--r--arch/openrisc/kernel/setup.c165
-rw-r--r--arch/openrisc/kernel/smp.c235
-rw-r--r--arch/openrisc/kernel/time.c51
-rw-r--r--arch/openrisc/lib/delay.c2
-rw-r--r--arch/openrisc/mm/fault.c4
-rw-r--r--arch/openrisc/mm/init.c2
-rw-r--r--arch/openrisc/mm/tlb.c16
19 files changed, 584 insertions, 113 deletions
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 34eb4e90f56c..2b3898ede888 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -21,8 +21,10 @@ config OPENRISC
21 select HAVE_UID16 21 select HAVE_UID16
22 select GENERIC_ATOMIC64 22 select GENERIC_ATOMIC64
23 select GENERIC_CLOCKEVENTS 23 select GENERIC_CLOCKEVENTS
24 select GENERIC_CLOCKEVENTS_BROADCAST
24 select GENERIC_STRNCPY_FROM_USER 25 select GENERIC_STRNCPY_FROM_USER
25 select GENERIC_STRNLEN_USER 26 select GENERIC_STRNLEN_USER
27 select GENERIC_SMP_IDLE_THREAD
26 select MODULES_USE_ELF_RELA 28 select MODULES_USE_ELF_RELA
27 select HAVE_DEBUG_STACKOVERFLOW 29 select HAVE_DEBUG_STACKOVERFLOW
28 select OR1K_PIC 30 select OR1K_PIC
@@ -107,8 +109,19 @@ config OPENRISC_HAVE_INST_DIV
107endmenu 109endmenu
108 110
109config NR_CPUS 111config NR_CPUS
110 int 112 int "Maximum number of CPUs (2-32)"
111 default "1" 113 range 2 32
114 depends on SMP
115 default "2"
116
117config SMP
118 bool "Symmetric Multi-Processing support"
119 help
120 This enables support for systems with more than one CPU. If you have
121 a system with only one CPU, say N. If you have a system with more
122 than one CPU, say Y.
123
124 If you don't know what to do here, say N.
112 125
113source kernel/Kconfig.hz 126source kernel/Kconfig.hz
114source kernel/Kconfig.preempt 127source kernel/Kconfig.preempt
diff --git a/arch/openrisc/include/asm/cpuinfo.h b/arch/openrisc/include/asm/cpuinfo.h
index ec10679d6429..4ea0a33eba6c 100644
--- a/arch/openrisc/include/asm/cpuinfo.h
+++ b/arch/openrisc/include/asm/cpuinfo.h
@@ -19,7 +19,7 @@
19#ifndef __ASM_OPENRISC_CPUINFO_H 19#ifndef __ASM_OPENRISC_CPUINFO_H
20#define __ASM_OPENRISC_CPUINFO_H 20#define __ASM_OPENRISC_CPUINFO_H
21 21
22struct cpuinfo { 22struct cpuinfo_or1k {
23 u32 clock_frequency; 23 u32 clock_frequency;
24 24
25 u32 icache_size; 25 u32 icache_size;
@@ -29,8 +29,11 @@ struct cpuinfo {
29 u32 dcache_size; 29 u32 dcache_size;
30 u32 dcache_block_size; 30 u32 dcache_block_size;
31 u32 dcache_ways; 31 u32 dcache_ways;
32
33 u16 coreid;
32}; 34};
33 35
34extern struct cpuinfo cpuinfo; 36extern struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
37extern void setup_cpuinfo(void);
35 38
36#endif /* __ASM_OPENRISC_CPUINFO_H */ 39#endif /* __ASM_OPENRISC_CPUINFO_H */
diff --git a/arch/openrisc/include/asm/mmu_context.h b/arch/openrisc/include/asm/mmu_context.h
index e94b814d2e3c..c380d8caf84f 100644
--- a/arch/openrisc/include/asm/mmu_context.h
+++ b/arch/openrisc/include/asm/mmu_context.h
@@ -34,7 +34,7 @@ extern void switch_mm(struct mm_struct *prev, struct mm_struct *next,
34 * registers like cr3 on the i386 34 * registers like cr3 on the i386
35 */ 35 */
36 36
37extern volatile pgd_t *current_pgd; /* defined in arch/openrisc/mm/fault.c */ 37extern volatile pgd_t *current_pgd[]; /* defined in arch/openrisc/mm/fault.c */
38 38
39static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 39static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
40{ 40{
diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h
index 71a6f08de8f2..eff5ba2a5af2 100644
--- a/arch/openrisc/include/asm/pgtable.h
+++ b/arch/openrisc/include/asm/pgtable.h
@@ -94,7 +94,7 @@ extern void paging_init(void);
94 * 64 MB of vmalloc area is comparable to what's available on other arches. 94 * 64 MB of vmalloc area is comparable to what's available on other arches.
95 */ 95 */
96 96
97#define VMALLOC_START (PAGE_OFFSET-0x04000000) 97#define VMALLOC_START (PAGE_OFFSET-0x04000000UL)
98#define VMALLOC_END (PAGE_OFFSET) 98#define VMALLOC_END (PAGE_OFFSET)
99#define VMALLOC_VMADDR(x) ((unsigned long)(x)) 99#define VMALLOC_VMADDR(x) ((unsigned long)(x))
100 100
diff --git a/arch/openrisc/include/asm/serial.h b/arch/openrisc/include/asm/serial.h
index 270a45241639..cb5932f5447a 100644
--- a/arch/openrisc/include/asm/serial.h
+++ b/arch/openrisc/include/asm/serial.h
@@ -29,7 +29,7 @@
29 * it needs to be correct to get the early console working. 29 * it needs to be correct to get the early console working.
30 */ 30 */
31 31
32#define BASE_BAUD (cpuinfo.clock_frequency/16) 32#define BASE_BAUD (cpuinfo_or1k[smp_processor_id()].clock_frequency/16)
33 33
34#endif /* __KERNEL__ */ 34#endif /* __KERNEL__ */
35 35
diff --git a/arch/openrisc/include/asm/smp.h b/arch/openrisc/include/asm/smp.h
new file mode 100644
index 000000000000..e21d2f12b5b6
--- /dev/null
+++ b/arch/openrisc/include/asm/smp.h
@@ -0,0 +1,26 @@
1/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 *
4 * This file is licensed under the terms of the GNU General Public License
5 * version 2. This program is licensed "as is" without any warranty of any
6 * kind, whether express or implied.
7 */
8
9#ifndef __ASM_OPENRISC_SMP_H
10#define __ASM_OPENRISC_SMP_H
11
12#include <asm/spr.h>
13#include <asm/spr_defs.h>
14
15#define raw_smp_processor_id() (current_thread_info()->cpu)
16#define hard_smp_processor_id() mfspr(SPR_COREID)
17
18extern void smp_init_cpus(void);
19
20extern void arch_send_call_function_single_ipi(int cpu);
21extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
22
23extern void set_smp_cross_call(void (*)(const struct cpumask *, unsigned int));
24extern void handle_IPI(unsigned int ipi_msg);
25
26#endif /* __ASM_OPENRISC_SMP_H */
diff --git a/arch/openrisc/include/asm/spr_defs.h b/arch/openrisc/include/asm/spr_defs.h
index 367dac70326a..154b5a1ee579 100644
--- a/arch/openrisc/include/asm/spr_defs.h
+++ b/arch/openrisc/include/asm/spr_defs.h
@@ -51,6 +51,11 @@
51#define SPR_ICCFGR (SPRGROUP_SYS + 6) 51#define SPR_ICCFGR (SPRGROUP_SYS + 6)
52#define SPR_DCFGR (SPRGROUP_SYS + 7) 52#define SPR_DCFGR (SPRGROUP_SYS + 7)
53#define SPR_PCCFGR (SPRGROUP_SYS + 8) 53#define SPR_PCCFGR (SPRGROUP_SYS + 8)
54#define SPR_VR2 (SPRGROUP_SYS + 9)
55#define SPR_AVR (SPRGROUP_SYS + 10)
56#define SPR_EVBAR (SPRGROUP_SYS + 11)
57#define SPR_AECR (SPRGROUP_SYS + 12)
58#define SPR_AESR (SPRGROUP_SYS + 13)
54#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */ 59#define SPR_NPC (SPRGROUP_SYS + 16) /* CZ 21/06/01 */
55#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */ 60#define SPR_SR (SPRGROUP_SYS + 17) /* CZ 21/06/01 */
56#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */ 61#define SPR_PPC (SPRGROUP_SYS + 18) /* CZ 21/06/01 */
@@ -61,6 +66,8 @@
61#define SPR_EEAR_LAST (SPRGROUP_SYS + 63) 66#define SPR_EEAR_LAST (SPRGROUP_SYS + 63)
62#define SPR_ESR_BASE (SPRGROUP_SYS + 64) 67#define SPR_ESR_BASE (SPRGROUP_SYS + 64)
63#define SPR_ESR_LAST (SPRGROUP_SYS + 79) 68#define SPR_ESR_LAST (SPRGROUP_SYS + 79)
69#define SPR_COREID (SPRGROUP_SYS + 128)
70#define SPR_NUMCORES (SPRGROUP_SYS + 129)
64#define SPR_GPR_BASE (SPRGROUP_SYS + 1024) 71#define SPR_GPR_BASE (SPRGROUP_SYS + 1024)
65 72
66/* Data MMU group */ 73/* Data MMU group */
@@ -135,12 +142,19 @@
135#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */ 142#define SPR_VR_CFG 0x00ff0000 /* Processor configuration */
136#define SPR_VR_RES 0x0000ffc0 /* Reserved */ 143#define SPR_VR_RES 0x0000ffc0 /* Reserved */
137#define SPR_VR_REV 0x0000003f /* Processor revision */ 144#define SPR_VR_REV 0x0000003f /* Processor revision */
145#define SPR_VR_UVRP 0x00000040 /* Updated Version Registers Present */
138 146
139#define SPR_VR_VER_OFF 24 147#define SPR_VR_VER_OFF 24
140#define SPR_VR_CFG_OFF 16 148#define SPR_VR_CFG_OFF 16
141#define SPR_VR_REV_OFF 0 149#define SPR_VR_REV_OFF 0
142 150
143/* 151/*
152 * Bit definitions for the Version Register 2
153 */
154#define SPR_VR2_CPUID 0xff000000 /* Processor ID */
155#define SPR_VR2_VER 0x00ffffff /* Processor version */
156
157/*
144 * Bit definitions for the Unit Present Register 158 * Bit definitions for the Unit Present Register
145 * 159 *
146 */ 160 */
diff --git a/arch/openrisc/include/asm/time.h b/arch/openrisc/include/asm/time.h
new file mode 100644
index 000000000000..fe83a34a7d68
--- /dev/null
+++ b/arch/openrisc/include/asm/time.h
@@ -0,0 +1,15 @@
1/*
2 * OpenRISC timer API
3 *
4 * Copyright (C) 2017 by Stafford Horne (shorne@gmail.com)
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#ifndef __ASM_OR1K_TIME_H
11#define __ASM_OR1K_TIME_H
12
13extern void openrisc_clockevent_init(void);
14
15#endif /* __ASM_OR1K_TIME_H */
diff --git a/arch/openrisc/include/asm/tlbflush.h b/arch/openrisc/include/asm/tlbflush.h
index 6a2accd6cb67..94227f0eaf6d 100644
--- a/arch/openrisc/include/asm/tlbflush.h
+++ b/arch/openrisc/include/asm/tlbflush.h
@@ -33,13 +33,26 @@
33 * - flush_tlb_page(vma, vmaddr) flushes one page 33 * - flush_tlb_page(vma, vmaddr) flushes one page
34 * - flush_tlb_range(mm, start, end) flushes a range of pages 34 * - flush_tlb_range(mm, start, end) flushes a range of pages
35 */ 35 */
36extern void local_flush_tlb_all(void);
37extern void local_flush_tlb_mm(struct mm_struct *mm);
38extern void local_flush_tlb_page(struct vm_area_struct *vma,
39 unsigned long addr);
40extern void local_flush_tlb_range(struct vm_area_struct *vma,
41 unsigned long start,
42 unsigned long end);
36 43
37void flush_tlb_all(void); 44#ifndef CONFIG_SMP
38void flush_tlb_mm(struct mm_struct *mm); 45#define flush_tlb_all local_flush_tlb_all
39void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); 46#define flush_tlb_mm local_flush_tlb_mm
40void flush_tlb_range(struct vm_area_struct *vma, 47#define flush_tlb_page local_flush_tlb_page
41 unsigned long start, 48#define flush_tlb_range local_flush_tlb_range
42 unsigned long end); 49#else
50extern void flush_tlb_all(void);
51extern void flush_tlb_mm(struct mm_struct *mm);
52extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
53extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
54 unsigned long end);
55#endif
43 56
44static inline void flush_tlb(void) 57static inline void flush_tlb(void)
45{ 58{
diff --git a/arch/openrisc/kernel/Makefile b/arch/openrisc/kernel/Makefile
index ec6d9d37cefd..7d94643c878d 100644
--- a/arch/openrisc/kernel/Makefile
+++ b/arch/openrisc/kernel/Makefile
@@ -8,6 +8,7 @@ obj-y := setup.o or32_ksyms.o process.o dma.o \
8 traps.o time.o irq.o entry.o ptrace.o signal.o \ 8 traps.o time.o irq.o entry.o ptrace.o signal.o \
9 sys_call_table.o 9 sys_call_table.o
10 10
11obj-$(CONFIG_SMP) += smp.o
11obj-$(CONFIG_MODULES) += module.o 12obj-$(CONFIG_MODULES) += module.o
12obj-$(CONFIG_OF) += prom.o 13obj-$(CONFIG_OF) += prom.o
13 14
diff --git a/arch/openrisc/kernel/dma.c b/arch/openrisc/kernel/dma.c
index b10369b7e31b..a945f00011b4 100644
--- a/arch/openrisc/kernel/dma.c
+++ b/arch/openrisc/kernel/dma.c
@@ -32,6 +32,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
32 unsigned long next, struct mm_walk *walk) 32 unsigned long next, struct mm_walk *walk)
33{ 33{
34 unsigned long cl; 34 unsigned long cl;
35 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
35 36
36 pte_val(*pte) |= _PAGE_CI; 37 pte_val(*pte) |= _PAGE_CI;
37 38
@@ -42,7 +43,7 @@ page_set_nocache(pte_t *pte, unsigned long addr,
42 flush_tlb_page(NULL, addr); 43 flush_tlb_page(NULL, addr);
43 44
44 /* Flush page out of dcache */ 45 /* Flush page out of dcache */
45 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size) 46 for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
46 mtspr(SPR_DCBFR, cl); 47 mtspr(SPR_DCBFR, cl);
47 48
48 return 0; 49 return 0;
@@ -140,6 +141,7 @@ or1k_map_page(struct device *dev, struct page *page,
140{ 141{
141 unsigned long cl; 142 unsigned long cl;
142 dma_addr_t addr = page_to_phys(page) + offset; 143 dma_addr_t addr = page_to_phys(page) + offset;
144 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
143 145
144 if (attrs & DMA_ATTR_SKIP_CPU_SYNC) 146 if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
145 return addr; 147 return addr;
@@ -148,13 +150,13 @@ or1k_map_page(struct device *dev, struct page *page,
148 case DMA_TO_DEVICE: 150 case DMA_TO_DEVICE:
149 /* Flush the dcache for the requested range */ 151 /* Flush the dcache for the requested range */
150 for (cl = addr; cl < addr + size; 152 for (cl = addr; cl < addr + size;
151 cl += cpuinfo.dcache_block_size) 153 cl += cpuinfo->dcache_block_size)
152 mtspr(SPR_DCBFR, cl); 154 mtspr(SPR_DCBFR, cl);
153 break; 155 break;
154 case DMA_FROM_DEVICE: 156 case DMA_FROM_DEVICE:
155 /* Invalidate the dcache for the requested range */ 157 /* Invalidate the dcache for the requested range */
156 for (cl = addr; cl < addr + size; 158 for (cl = addr; cl < addr + size;
157 cl += cpuinfo.dcache_block_size) 159 cl += cpuinfo->dcache_block_size)
158 mtspr(SPR_DCBIR, cl); 160 mtspr(SPR_DCBIR, cl);
159 break; 161 break;
160 default: 162 default:
@@ -213,9 +215,10 @@ or1k_sync_single_for_cpu(struct device *dev,
213{ 215{
214 unsigned long cl; 216 unsigned long cl;
215 dma_addr_t addr = dma_handle; 217 dma_addr_t addr = dma_handle;
218 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
216 219
217 /* Invalidate the dcache for the requested range */ 220 /* Invalidate the dcache for the requested range */
218 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) 221 for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
219 mtspr(SPR_DCBIR, cl); 222 mtspr(SPR_DCBIR, cl);
220} 223}
221 224
@@ -226,9 +229,10 @@ or1k_sync_single_for_device(struct device *dev,
226{ 229{
227 unsigned long cl; 230 unsigned long cl;
228 dma_addr_t addr = dma_handle; 231 dma_addr_t addr = dma_handle;
232 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
229 233
230 /* Flush the dcache for the requested range */ 234 /* Flush the dcache for the requested range */
231 for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size) 235 for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
232 mtspr(SPR_DCBFR, cl); 236 mtspr(SPR_DCBFR, cl);
233} 237}
234 238
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index 1e49895408f4..a9972dc103f8 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -158,12 +158,38 @@
158 158
159/* =========================================================[ macros ]=== */ 159/* =========================================================[ macros ]=== */
160 160
161 161#ifdef CONFIG_SMP
162#define GET_CURRENT_PGD(reg,t1) \ 162#define GET_CURRENT_PGD(reg,t1) \
163 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\ 163 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
164 l.mfspr t1,r0,SPR_COREID ;\
165 l.slli t1,t1,2 ;\
166 l.add reg,reg,t1 ;\
164 tophys (t1,reg) ;\ 167 tophys (t1,reg) ;\
165 l.lwz reg,0(t1) 168 l.lwz reg,0(t1)
169#else
170#define GET_CURRENT_PGD(reg,t1) \
171 LOAD_SYMBOL_2_GPR(reg,current_pgd) ;\
172 tophys (t1,reg) ;\
173 l.lwz reg,0(t1)
174#endif
166 175
176/* Load r10 from current_thread_info_set - clobbers r1 and r30 */
177#ifdef CONFIG_SMP
178#define GET_CURRENT_THREAD_INFO \
179 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
180 tophys (r30,r1) ;\
181 l.mfspr r10,r0,SPR_COREID ;\
182 l.slli r10,r10,2 ;\
183 l.add r30,r30,r10 ;\
184 /* r10: current_thread_info */ ;\
185 l.lwz r10,0(r30)
186#else
187#define GET_CURRENT_THREAD_INFO \
188 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\
189 tophys (r30,r1) ;\
190 /* r10: current_thread_info */ ;\
191 l.lwz r10,0(r30)
192#endif
167 193
168/* 194/*
169 * DSCR: this is a common hook for handling exceptions. it will save 195 * DSCR: this is a common hook for handling exceptions. it will save
@@ -206,10 +232,7 @@
206 l.bnf 2f /* kernel_mode */ ;\ 232 l.bnf 2f /* kernel_mode */ ;\
207 EXCEPTION_T_STORE_SP /* delay slot */ ;\ 233 EXCEPTION_T_STORE_SP /* delay slot */ ;\
2081: /* user_mode: */ ;\ 2341: /* user_mode: */ ;\
209 LOAD_SYMBOL_2_GPR(r1,current_thread_info_set) ;\ 235 GET_CURRENT_THREAD_INFO ;\
210 tophys (r30,r1) ;\
211 /* r10: current_thread_info */ ;\
212 l.lwz r10,0(r30) ;\
213 tophys (r30,r10) ;\ 236 tophys (r30,r10) ;\
214 l.lwz r1,(TI_KSP)(r30) ;\ 237 l.lwz r1,(TI_KSP)(r30) ;\
215 /* fall through */ ;\ 238 /* fall through */ ;\
@@ -530,6 +553,12 @@ _start:
530 CLEAR_GPR(r30) 553 CLEAR_GPR(r30)
531 CLEAR_GPR(r31) 554 CLEAR_GPR(r31)
532 555
556#ifdef CONFIG_SMP
557 l.mfspr r26,r0,SPR_COREID
558 l.sfeq r26,r0
559 l.bnf secondary_wait
560 l.nop
561#endif
533 /* 562 /*
534 * set up initial ksp and current 563 * set up initial ksp and current
535 */ 564 */
@@ -681,6 +710,64 @@ _flush_tlb:
681 l.jr r9 710 l.jr r9
682 l.nop 711 l.nop
683 712
713#ifdef CONFIG_SMP
714secondary_wait:
715 l.mfspr r25,r0,SPR_COREID
716 l.movhi r3,hi(secondary_release)
717 l.ori r3,r3,lo(secondary_release)
718 tophys(r4, r3)
719 l.lwz r3,0(r4)
720 l.sfeq r25,r3
721 l.bnf secondary_wait
722 l.nop
723 /* fall through to secondary_init */
724
725secondary_init:
726 /*
727 * set up initial ksp and current
728 */
729 LOAD_SYMBOL_2_GPR(r10, secondary_thread_info)
730 tophys (r30,r10)
731 l.lwz r10,0(r30)
732 l.addi r1,r10,THREAD_SIZE
733 tophys (r30,r10)
734 l.sw TI_KSP(r30),r1
735
736 l.jal _ic_enable
737 l.nop
738
739 l.jal _dc_enable
740 l.nop
741
742 l.jal _flush_tlb
743 l.nop
744
745 /*
746 * enable dmmu & immu
747 */
748 l.mfspr r30,r0,SPR_SR
749 l.movhi r28,hi(SPR_SR_DME | SPR_SR_IME)
750 l.ori r28,r28,lo(SPR_SR_DME | SPR_SR_IME)
751 l.or r30,r30,r28
752 /*
753 * This is a bit tricky, we need to switch over from physical addresses
754 * to virtual addresses on the fly.
755 * To do that, we first set up ESR with the IME and DME bits set.
756 * Then EPCR is set to secondary_start and then a l.rfe is issued to
757 * "jump" to that.
758 */
759 l.mtspr r0,r30,SPR_ESR_BASE
760 LOAD_SYMBOL_2_GPR(r30, secondary_start)
761 l.mtspr r0,r30,SPR_EPCR_BASE
762 l.rfe
763
764secondary_start:
765 LOAD_SYMBOL_2_GPR(r30, secondary_start_kernel)
766 l.jr r30
767 l.nop
768
769#endif
770
684/* ========================================[ cache ]=== */ 771/* ========================================[ cache ]=== */
685 772
686 /* alignment here so we don't change memory offsets with 773 /* alignment here so we don't change memory offsets with
diff --git a/arch/openrisc/kernel/setup.c b/arch/openrisc/kernel/setup.c
index dbf5ee95a0d5..9d28ab14d139 100644
--- a/arch/openrisc/kernel/setup.c
+++ b/arch/openrisc/kernel/setup.c
@@ -93,7 +93,7 @@ static void __init setup_memory(void)
93 memblock_dump_all(); 93 memblock_dump_all();
94} 94}
95 95
96struct cpuinfo cpuinfo; 96struct cpuinfo_or1k cpuinfo_or1k[NR_CPUS];
97 97
98static void print_cpuinfo(void) 98static void print_cpuinfo(void)
99{ 99{
@@ -101,12 +101,13 @@ static void print_cpuinfo(void)
101 unsigned long vr = mfspr(SPR_VR); 101 unsigned long vr = mfspr(SPR_VR);
102 unsigned int version; 102 unsigned int version;
103 unsigned int revision; 103 unsigned int revision;
104 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
104 105
105 version = (vr & SPR_VR_VER) >> 24; 106 version = (vr & SPR_VR_VER) >> 24;
106 revision = (vr & SPR_VR_REV); 107 revision = (vr & SPR_VR_REV);
107 108
108 printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n", 109 printk(KERN_INFO "CPU: OpenRISC-%x (revision %d) @%d MHz\n",
109 version, revision, cpuinfo.clock_frequency / 1000000); 110 version, revision, cpuinfo->clock_frequency / 1000000);
110 111
111 if (!(upr & SPR_UPR_UP)) { 112 if (!(upr & SPR_UPR_UP)) {
112 printk(KERN_INFO 113 printk(KERN_INFO
@@ -117,15 +118,15 @@ static void print_cpuinfo(void)
117 if (upr & SPR_UPR_DCP) 118 if (upr & SPR_UPR_DCP)
118 printk(KERN_INFO 119 printk(KERN_INFO
119 "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n", 120 "-- dcache: %4d bytes total, %2d bytes/line, %d way(s)\n",
120 cpuinfo.dcache_size, cpuinfo.dcache_block_size, 121 cpuinfo->dcache_size, cpuinfo->dcache_block_size,
121 cpuinfo.dcache_ways); 122 cpuinfo->dcache_ways);
122 else 123 else
123 printk(KERN_INFO "-- dcache disabled\n"); 124 printk(KERN_INFO "-- dcache disabled\n");
124 if (upr & SPR_UPR_ICP) 125 if (upr & SPR_UPR_ICP)
125 printk(KERN_INFO 126 printk(KERN_INFO
126 "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n", 127 "-- icache: %4d bytes total, %2d bytes/line, %d way(s)\n",
127 cpuinfo.icache_size, cpuinfo.icache_block_size, 128 cpuinfo->icache_size, cpuinfo->icache_block_size,
128 cpuinfo.icache_ways); 129 cpuinfo->icache_ways);
129 else 130 else
130 printk(KERN_INFO "-- icache disabled\n"); 131 printk(KERN_INFO "-- icache disabled\n");
131 132
@@ -153,38 +154,58 @@ static void print_cpuinfo(void)
153 printk(KERN_INFO "-- custom unit(s)\n"); 154 printk(KERN_INFO "-- custom unit(s)\n");
154} 155}
155 156
157static struct device_node *setup_find_cpu_node(int cpu)
158{
159 u32 hwid;
160 struct device_node *cpun;
161 struct device_node *cpus = of_find_node_by_path("/cpus");
162
163 for_each_available_child_of_node(cpus, cpun) {
164 if (of_property_read_u32(cpun, "reg", &hwid))
165 continue;
166 if (hwid == cpu)
167 return cpun;
168 }
169
170 return NULL;
171}
172
156void __init setup_cpuinfo(void) 173void __init setup_cpuinfo(void)
157{ 174{
158 struct device_node *cpu; 175 struct device_node *cpu;
159 unsigned long iccfgr, dccfgr; 176 unsigned long iccfgr, dccfgr;
160 unsigned long cache_set_size; 177 unsigned long cache_set_size;
178 int cpu_id = smp_processor_id();
179 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu_id];
161 180
162 cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); 181 cpu = setup_find_cpu_node(cpu_id);
163 if (!cpu) 182 if (!cpu)
164 panic("No compatible CPU found in device tree...\n"); 183 panic("Couldn't find CPU%d in device tree...\n", cpu_id);
165 184
166 iccfgr = mfspr(SPR_ICCFGR); 185 iccfgr = mfspr(SPR_ICCFGR);
167 cpuinfo.icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW); 186 cpuinfo->icache_ways = 1 << (iccfgr & SPR_ICCFGR_NCW);
168 cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3); 187 cache_set_size = 1 << ((iccfgr & SPR_ICCFGR_NCS) >> 3);
169 cpuinfo.icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7); 188 cpuinfo->icache_block_size = 16 << ((iccfgr & SPR_ICCFGR_CBS) >> 7);
170 cpuinfo.icache_size = 189 cpuinfo->icache_size =
171 cache_set_size * cpuinfo.icache_ways * cpuinfo.icache_block_size; 190 cache_set_size * cpuinfo->icache_ways * cpuinfo->icache_block_size;
172 191
173 dccfgr = mfspr(SPR_DCCFGR); 192 dccfgr = mfspr(SPR_DCCFGR);
174 cpuinfo.dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW); 193 cpuinfo->dcache_ways = 1 << (dccfgr & SPR_DCCFGR_NCW);
175 cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3); 194 cache_set_size = 1 << ((dccfgr & SPR_DCCFGR_NCS) >> 3);
176 cpuinfo.dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7); 195 cpuinfo->dcache_block_size = 16 << ((dccfgr & SPR_DCCFGR_CBS) >> 7);
177 cpuinfo.dcache_size = 196 cpuinfo->dcache_size =
178 cache_set_size * cpuinfo.dcache_ways * cpuinfo.dcache_block_size; 197 cache_set_size * cpuinfo->dcache_ways * cpuinfo->dcache_block_size;
179 198
180 if (of_property_read_u32(cpu, "clock-frequency", 199 if (of_property_read_u32(cpu, "clock-frequency",
181 &cpuinfo.clock_frequency)) { 200 &cpuinfo->clock_frequency)) {
182 printk(KERN_WARNING 201 printk(KERN_WARNING
183 "Device tree missing CPU 'clock-frequency' parameter." 202 "Device tree missing CPU 'clock-frequency' parameter."
184 "Assuming frequency 25MHZ" 203 "Assuming frequency 25MHZ"
185 "This is probably not what you want."); 204 "This is probably not what you want.");
186 } 205 }
187 206
207 cpuinfo->coreid = mfspr(SPR_COREID);
208
188 of_node_put(cpu); 209 of_node_put(cpu);
189 210
190 print_cpuinfo(); 211 print_cpuinfo();
@@ -251,8 +272,8 @@ void __init detect_unit_config(unsigned long upr, unsigned long mask,
251void calibrate_delay(void) 272void calibrate_delay(void)
252{ 273{
253 const int *val; 274 const int *val;
254 struct device_node *cpu = NULL; 275 struct device_node *cpu = setup_find_cpu_node(smp_processor_id());
255 cpu = of_find_compatible_node(NULL, NULL, "opencores,or1200-rtlsvn481"); 276
256 val = of_get_property(cpu, "clock-frequency", NULL); 277 val = of_get_property(cpu, "clock-frequency", NULL);
257 if (!val) 278 if (!val)
258 panic("no cpu 'clock-frequency' parameter in device tree"); 279 panic("no cpu 'clock-frequency' parameter in device tree");
@@ -268,6 +289,10 @@ void __init setup_arch(char **cmdline_p)
268 289
269 setup_cpuinfo(); 290 setup_cpuinfo();
270 291
292#ifdef CONFIG_SMP
293 smp_init_cpus();
294#endif
295
271 /* process 1's initial memory region is the kernel code/data */ 296 /* process 1's initial memory region is the kernel code/data */
272 init_mm.start_code = (unsigned long)_stext; 297 init_mm.start_code = (unsigned long)_stext;
273 init_mm.end_code = (unsigned long)_etext; 298 init_mm.end_code = (unsigned long)_etext;
@@ -302,54 +327,78 @@ void __init setup_arch(char **cmdline_p)
302 327
303static int show_cpuinfo(struct seq_file *m, void *v) 328static int show_cpuinfo(struct seq_file *m, void *v)
304{ 329{
305 unsigned long vr; 330 unsigned int vr, cpucfgr;
306 int version, revision; 331 unsigned int avr;
332 unsigned int version;
333 struct cpuinfo_or1k *cpuinfo = v;
307 334
308 vr = mfspr(SPR_VR); 335 vr = mfspr(SPR_VR);
309 version = (vr & SPR_VR_VER) >> 24; 336 cpucfgr = mfspr(SPR_CPUCFGR);
310 revision = vr & SPR_VR_REV; 337
311 338#ifdef CONFIG_SMP
312 seq_printf(m, 339 seq_printf(m, "processor\t\t: %d\n", cpuinfo->coreid);
313 "cpu\t\t: OpenRISC-%x\n" 340#endif
314 "revision\t: %d\n" 341 if (vr & SPR_VR_UVRP) {
315 "frequency\t: %ld\n" 342 vr = mfspr(SPR_VR2);
316 "dcache size\t: %d bytes\n" 343 version = vr & SPR_VR2_VER;
317 "dcache block size\t: %d bytes\n" 344 avr = mfspr(SPR_AVR);
318 "dcache ways\t: %d\n" 345 seq_printf(m, "cpu architecture\t: "
319 "icache size\t: %d bytes\n" 346 "OpenRISC 1000 (%d.%d-rev%d)\n",
320 "icache block size\t: %d bytes\n" 347 (avr >> 24) & 0xff,
321 "icache ways\t: %d\n" 348 (avr >> 16) & 0xff,
322 "immu\t\t: %d entries, %lu ways\n" 349 (avr >> 8) & 0xff);
323 "dmmu\t\t: %d entries, %lu ways\n" 350 seq_printf(m, "cpu implementation id\t: 0x%x\n",
324 "bogomips\t: %lu.%02lu\n", 351 (vr & SPR_VR2_CPUID) >> 24);
325 version, 352 seq_printf(m, "cpu version\t\t: 0x%x\n", version);
326 revision, 353 } else {
327 loops_per_jiffy * HZ, 354 version = (vr & SPR_VR_VER) >> 24;
328 cpuinfo.dcache_size, 355 seq_printf(m, "cpu\t\t\t: OpenRISC-%x\n", version);
329 cpuinfo.dcache_block_size, 356 seq_printf(m, "revision\t\t: %d\n", vr & SPR_VR_REV);
330 cpuinfo.dcache_ways, 357 }
331 cpuinfo.icache_size, 358 seq_printf(m, "frequency\t\t: %ld\n", loops_per_jiffy * HZ);
332 cpuinfo.icache_block_size, 359 seq_printf(m, "dcache size\t\t: %d bytes\n", cpuinfo->dcache_size);
333 cpuinfo.icache_ways, 360 seq_printf(m, "dcache block size\t: %d bytes\n",
334 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2), 361 cpuinfo->dcache_block_size);
335 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW), 362 seq_printf(m, "dcache ways\t\t: %d\n", cpuinfo->dcache_ways);
336 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2), 363 seq_printf(m, "icache size\t\t: %d bytes\n", cpuinfo->icache_size);
337 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW), 364 seq_printf(m, "icache block size\t: %d bytes\n",
338 (loops_per_jiffy * HZ) / 500000, 365 cpuinfo->icache_block_size);
339 ((loops_per_jiffy * HZ) / 5000) % 100); 366 seq_printf(m, "icache ways\t\t: %d\n", cpuinfo->icache_ways);
367 seq_printf(m, "immu\t\t\t: %d entries, %lu ways\n",
368 1 << ((mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTS) >> 2),
369 1 + (mfspr(SPR_DMMUCFGR) & SPR_DMMUCFGR_NTW));
370 seq_printf(m, "dmmu\t\t\t: %d entries, %lu ways\n",
371 1 << ((mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTS) >> 2),
372 1 + (mfspr(SPR_IMMUCFGR) & SPR_IMMUCFGR_NTW));
373 seq_printf(m, "bogomips\t\t: %lu.%02lu\n",
374 (loops_per_jiffy * HZ) / 500000,
375 ((loops_per_jiffy * HZ) / 5000) % 100);
376
377 seq_puts(m, "features\t\t: ");
378 seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB32S ? "orbis32" : "");
379 seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OB64S ? "orbis64" : "");
380 seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF32S ? "orfpx32" : "");
381 seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OF64S ? "orfpx64" : "");
382 seq_printf(m, "%s ", cpucfgr & SPR_CPUCFGR_OV64S ? "orvdx64" : "");
383 seq_puts(m, "\n");
384
385 seq_puts(m, "\n");
386
340 return 0; 387 return 0;
341} 388}
342 389
343static void *c_start(struct seq_file *m, loff_t * pos) 390static void *c_start(struct seq_file *m, loff_t *pos)
344{ 391{
345 /* We only have one CPU... */ 392 *pos = cpumask_next(*pos - 1, cpu_online_mask);
346 return *pos < 1 ? (void *)1 : NULL; 393 if ((*pos) < nr_cpu_ids)
394 return &cpuinfo_or1k[*pos];
395 return NULL;
347} 396}
348 397
349static void *c_next(struct seq_file *m, void *v, loff_t * pos) 398static void *c_next(struct seq_file *m, void *v, loff_t *pos)
350{ 399{
351 ++*pos; 400 (*pos)++;
352 return NULL; 401 return c_start(m, pos);
353} 402}
354 403
355static void c_stop(struct seq_file *m, void *v) 404static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/openrisc/kernel/smp.c b/arch/openrisc/kernel/smp.c
new file mode 100644
index 000000000000..fd724123229a
--- /dev/null
+++ b/arch/openrisc/kernel/smp.c
@@ -0,0 +1,235 @@
1/*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14#include <linux/smp.h>
15#include <linux/cpu.h>
16#include <linux/sched.h>
17#include <linux/irq.h>
18#include <asm/cpuinfo.h>
19#include <asm/mmu_context.h>
20#include <asm/tlbflush.h>
21#include <asm/time.h>
22
23static void (*smp_cross_call)(const struct cpumask *, unsigned int);
24
25unsigned long secondary_release = -1;
26struct thread_info *secondary_thread_info;
27
28enum ipi_msg_type {
29 IPI_RESCHEDULE,
30 IPI_CALL_FUNC,
31 IPI_CALL_FUNC_SINGLE,
32};
33
34static DEFINE_SPINLOCK(boot_lock);
35
36static void boot_secondary(unsigned int cpu, struct task_struct *idle)
37{
38 /*
39 * set synchronisation state between this boot processor
40 * and the secondary one
41 */
42 spin_lock(&boot_lock);
43
44 secondary_release = cpu;
45
46 /*
47 * now the secondary core is starting up let it run its
48 * calibrations, then wait for it to finish
49 */
50 spin_unlock(&boot_lock);
51}
52
53void __init smp_prepare_boot_cpu(void)
54{
55}
56
57void __init smp_init_cpus(void)
58{
59 int i;
60
61 for (i = 0; i < NR_CPUS; i++)
62 set_cpu_possible(i, true);
63}
64
65void __init smp_prepare_cpus(unsigned int max_cpus)
66{
67 int i;
68
69 /*
70 * Initialise the present map, which describes the set of CPUs
71 * actually populated at the present time.
72 */
73 for (i = 0; i < max_cpus; i++)
74 set_cpu_present(i, true);
75}
76
77void __init smp_cpus_done(unsigned int max_cpus)
78{
79}
80
81static DECLARE_COMPLETION(cpu_running);
82
83int __cpu_up(unsigned int cpu, struct task_struct *idle)
84{
85 if (smp_cross_call == NULL) {
86 pr_warn("CPU%u: failed to start, IPI controller missing",
87 cpu);
88 return -EIO;
89 }
90
91 secondary_thread_info = task_thread_info(idle);
92 current_pgd[cpu] = init_mm.pgd;
93
94 boot_secondary(cpu, idle);
95 if (!wait_for_completion_timeout(&cpu_running,
96 msecs_to_jiffies(1000))) {
97 pr_crit("CPU%u: failed to start\n", cpu);
98 return -EIO;
99 }
100
101 return 0;
102}
103
104asmlinkage __init void secondary_start_kernel(void)
105{
106 struct mm_struct *mm = &init_mm;
107 unsigned int cpu = smp_processor_id();
108 /*
109 * All kernel threads share the same mm context; grab a
110 * reference and switch to it.
111 */
112 atomic_inc(&mm->mm_count);
113 current->active_mm = mm;
114 cpumask_set_cpu(cpu, mm_cpumask(mm));
115
116 pr_info("CPU%u: Booted secondary processor\n", cpu);
117
118 setup_cpuinfo();
119 openrisc_clockevent_init();
120
121 notify_cpu_starting(cpu);
122
123 /*
124 * OK, now it's safe to let the boot CPU continue
125 */
126 set_cpu_online(cpu, true);
127 complete(&cpu_running);
128
129 local_irq_enable();
130
131 /*
132 * OK, it's off to the idle thread for us
133 */
134 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
135}
136
137void handle_IPI(unsigned int ipi_msg)
138{
139 unsigned int cpu = smp_processor_id();
140
141 switch (ipi_msg) {
142 case IPI_RESCHEDULE:
143 scheduler_ipi();
144 break;
145
146 case IPI_CALL_FUNC:
147 generic_smp_call_function_interrupt();
148 break;
149
150 case IPI_CALL_FUNC_SINGLE:
151 generic_smp_call_function_single_interrupt();
152 break;
153
154 default:
155 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
156 break;
157 }
158}
159
160void smp_send_reschedule(int cpu)
161{
162 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
163}
164
165static void stop_this_cpu(void *dummy)
166{
167 /* Remove this CPU */
168 set_cpu_online(smp_processor_id(), false);
169
170 local_irq_disable();
171 /* CPU Doze */
172 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
173 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
174 /* If that didn't work, infinite loop */
175 while (1)
176 ;
177}
178
179void smp_send_stop(void)
180{
181 smp_call_function(stop_this_cpu, NULL, 0);
182}
183
184/* not supported, yet */
185int setup_profiling_timer(unsigned int multiplier)
186{
187 return -EINVAL;
188}
189
190void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
191{
192 smp_cross_call = fn;
193}
194
195void arch_send_call_function_single_ipi(int cpu)
196{
197 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
198}
199
200void arch_send_call_function_ipi_mask(const struct cpumask *mask)
201{
202 smp_cross_call(mask, IPI_CALL_FUNC);
203}
204
205/* TLB flush operations - Performed on each CPU*/
206static inline void ipi_flush_tlb_all(void *ignored)
207{
208 local_flush_tlb_all();
209}
210
211void flush_tlb_all(void)
212{
213 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
214}
215
216/*
217 * FIXME: implement proper functionality instead of flush_tlb_all.
218 * *But*, as things currently stands, the local_tlb_flush_* functions will
219 * all boil down to local_tlb_flush_all anyway.
220 */
221void flush_tlb_mm(struct mm_struct *mm)
222{
223 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
224}
225
226void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
227{
228 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
229}
230
231void flush_tlb_range(struct vm_area_struct *vma,
232 unsigned long start, unsigned long end)
233{
234 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
235}
diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c
index 687c11d048d7..ab04eaedbf8d 100644
--- a/arch/openrisc/kernel/time.c
+++ b/arch/openrisc/kernel/time.c
@@ -53,13 +53,32 @@ static int openrisc_timer_set_next_event(unsigned long delta,
53 * timers) we cannot enable the PERIODIC feature. The tick timer can run using 53 * timers) we cannot enable the PERIODIC feature. The tick timer can run using
54 * one-shot events, so no problem. 54 * one-shot events, so no problem.
55 */ 55 */
56DEFINE_PER_CPU(struct clock_event_device, clockevent_openrisc_timer);
56 57
57static struct clock_event_device clockevent_openrisc_timer = { 58void openrisc_clockevent_init(void)
58 .name = "openrisc_timer_clockevent", 59{
59 .features = CLOCK_EVT_FEAT_ONESHOT, 60 unsigned int cpu = smp_processor_id();
60 .rating = 300, 61 struct clock_event_device *evt =
61 .set_next_event = openrisc_timer_set_next_event, 62 &per_cpu(clockevent_openrisc_timer, cpu);
62}; 63 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[cpu];
64
65 mtspr(SPR_TTMR, SPR_TTMR_CR);
66
67#ifdef CONFIG_SMP
68 evt->broadcast = tick_broadcast;
69#endif
70 evt->name = "openrisc_timer_clockevent",
71 evt->features = CLOCK_EVT_FEAT_ONESHOT,
72 evt->rating = 300,
73 evt->set_next_event = openrisc_timer_set_next_event,
74
75 evt->cpumask = cpumask_of(cpu);
76
77 /* We only have 28 bits */
78 clockevents_config_and_register(evt, cpuinfo->clock_frequency,
79 100, 0x0fffffff);
80
81}
63 82
64static inline void timer_ack(void) 83static inline void timer_ack(void)
65{ 84{
@@ -83,7 +102,9 @@ static inline void timer_ack(void)
83irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs) 102irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
84{ 103{
85 struct pt_regs *old_regs = set_irq_regs(regs); 104 struct pt_regs *old_regs = set_irq_regs(regs);
86 struct clock_event_device *evt = &clockevent_openrisc_timer; 105 unsigned int cpu = smp_processor_id();
106 struct clock_event_device *evt =
107 &per_cpu(clockevent_openrisc_timer, cpu);
87 108
88 timer_ack(); 109 timer_ack();
89 110
@@ -99,24 +120,12 @@ irqreturn_t __irq_entry timer_interrupt(struct pt_regs *regs)
99 return IRQ_HANDLED; 120 return IRQ_HANDLED;
100} 121}
101 122
102static __init void openrisc_clockevent_init(void)
103{
104 clockevent_openrisc_timer.cpumask = cpumask_of(0);
105
106 /* We only have 28 bits */
107 clockevents_config_and_register(&clockevent_openrisc_timer,
108 cpuinfo.clock_frequency,
109 100, 0x0fffffff);
110
111}
112
113/** 123/**
114 * Clocksource: Based on OpenRISC timer/counter 124 * Clocksource: Based on OpenRISC timer/counter
115 * 125 *
116 * This sets up the OpenRISC Tick Timer as a clock source. The tick timer 126 * This sets up the OpenRISC Tick Timer as a clock source. The tick timer
117 * is 32 bits wide and runs at the CPU clock frequency. 127 * is 32 bits wide and runs at the CPU clock frequency.
118 */ 128 */
119
120static u64 openrisc_timer_read(struct clocksource *cs) 129static u64 openrisc_timer_read(struct clocksource *cs)
121{ 130{
122 return (u64) mfspr(SPR_TTCR); 131 return (u64) mfspr(SPR_TTCR);
@@ -132,7 +141,9 @@ static struct clocksource openrisc_timer = {
132 141
133static int __init openrisc_timer_init(void) 142static int __init openrisc_timer_init(void)
134{ 143{
135 if (clocksource_register_hz(&openrisc_timer, cpuinfo.clock_frequency)) 144 struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
145
146 if (clocksource_register_hz(&openrisc_timer, cpuinfo->clock_frequency))
136 panic("failed to register clocksource"); 147 panic("failed to register clocksource");
137 148
138 /* Enable the incrementer: 'continuous' mode with interrupt disabled */ 149 /* Enable the incrementer: 'continuous' mode with interrupt disabled */
diff --git a/arch/openrisc/lib/delay.c b/arch/openrisc/lib/delay.c
index 8b13fdf43ec6..a92bd621aa1f 100644
--- a/arch/openrisc/lib/delay.c
+++ b/arch/openrisc/lib/delay.c
@@ -25,7 +25,7 @@
25 25
26int read_current_timer(unsigned long *timer_value) 26int read_current_timer(unsigned long *timer_value)
27{ 27{
28 *timer_value = mfspr(SPR_TTCR); 28 *timer_value = get_cycles();
29 return 0; 29 return 0;
30} 30}
31 31
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index e310ab499385..d0021dfae20a 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -33,7 +33,7 @@ unsigned long pte_errors; /* updated by do_page_fault() */
33/* __PHX__ :: - check the vmalloc_fault in do_page_fault() 33/* __PHX__ :: - check the vmalloc_fault in do_page_fault()
34 * - also look into include/asm-or32/mmu_context.h 34 * - also look into include/asm-or32/mmu_context.h
35 */ 35 */
36volatile pgd_t *current_pgd; 36volatile pgd_t *current_pgd[NR_CPUS];
37 37
38extern void die(char *, struct pt_regs *, long); 38extern void die(char *, struct pt_regs *, long);
39 39
@@ -319,7 +319,7 @@ vmalloc_fault:
319 319
320 phx_mmu("vmalloc_fault"); 320 phx_mmu("vmalloc_fault");
321*/ 321*/
322 pgd = (pgd_t *)current_pgd + offset; 322 pgd = (pgd_t *)current_pgd[smp_processor_id()] + offset;
323 pgd_k = init_mm.pgd + offset; 323 pgd_k = init_mm.pgd + offset;
324 324
325 /* Since we're two-level, we don't need to do both 325 /* Since we're two-level, we don't need to do both
diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c
index f67d82b9d22f..6972d5d6f23f 100644
--- a/arch/openrisc/mm/init.c
+++ b/arch/openrisc/mm/init.c
@@ -147,7 +147,7 @@ void __init paging_init(void)
147 * (even if it is most probably not used until the next 147 * (even if it is most probably not used until the next
148 * switch_mm) 148 * switch_mm)
149 */ 149 */
150 current_pgd = init_mm.pgd; 150 current_pgd[smp_processor_id()] = init_mm.pgd;
151 151
152 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); 152 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
153 153
diff --git a/arch/openrisc/mm/tlb.c b/arch/openrisc/mm/tlb.c
index 683bd4d31c7c..6c253a2e86bc 100644
--- a/arch/openrisc/mm/tlb.c
+++ b/arch/openrisc/mm/tlb.c
@@ -49,7 +49,7 @@
49 * 49 *
50 */ 50 */
51 51
52void flush_tlb_all(void) 52void local_flush_tlb_all(void)
53{ 53{
54 int i; 54 int i;
55 unsigned long num_tlb_sets; 55 unsigned long num_tlb_sets;
@@ -86,7 +86,7 @@ void flush_tlb_all(void)
86#define flush_itlb_page_no_eir(addr) \ 86#define flush_itlb_page_no_eir(addr) \
87 mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0); 87 mtspr_off(SPR_ITLBMR_BASE(0), ITLB_OFFSET(addr), 0);
88 88
89void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) 89void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
90{ 90{
91 if (have_dtlbeir) 91 if (have_dtlbeir)
92 flush_dtlb_page_eir(addr); 92 flush_dtlb_page_eir(addr);
@@ -99,8 +99,8 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
99 flush_itlb_page_no_eir(addr); 99 flush_itlb_page_no_eir(addr);
100} 100}
101 101
102void flush_tlb_range(struct vm_area_struct *vma, 102void local_flush_tlb_range(struct vm_area_struct *vma,
103 unsigned long start, unsigned long end) 103 unsigned long start, unsigned long end)
104{ 104{
105 int addr; 105 int addr;
106 bool dtlbeir; 106 bool dtlbeir;
@@ -129,13 +129,13 @@ void flush_tlb_range(struct vm_area_struct *vma,
129 * This should be changed to loop over over mm and call flush_tlb_range. 129 * This should be changed to loop over over mm and call flush_tlb_range.
130 */ 130 */
131 131
132void flush_tlb_mm(struct mm_struct *mm) 132void local_flush_tlb_mm(struct mm_struct *mm)
133{ 133{
134 134
135 /* Was seeing bugs with the mm struct passed to us. Scrapped most of 135 /* Was seeing bugs with the mm struct passed to us. Scrapped most of
136 this function. */ 136 this function. */
137 /* Several architctures do this */ 137 /* Several architctures do this */
138 flush_tlb_all(); 138 local_flush_tlb_all();
139} 139}
140 140
141/* called in schedule() just before actually doing the switch_to */ 141/* called in schedule() just before actually doing the switch_to */
@@ -149,14 +149,14 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next,
149 * might be invalid at points where we still need to derefer 149 * might be invalid at points where we still need to derefer
150 * the pgd. 150 * the pgd.
151 */ 151 */
152 current_pgd = next->pgd; 152 current_pgd[smp_processor_id()] = next->pgd;
153 153
154 /* We don't have context support implemented, so flush all 154 /* We don't have context support implemented, so flush all
155 * entries belonging to previous map 155 * entries belonging to previous map
156 */ 156 */
157 157
158 if (prev != next) 158 if (prev != next)
159 flush_tlb_mm(prev); 159 local_flush_tlb_mm(prev);
160 160
161} 161}
162 162