aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMax Filippov <jcmvbkbc@gmail.com>2013-10-16 18:42:26 -0400
committerChris Zankel <chris@zankel.net>2014-01-14 13:19:58 -0500
commitf615136c06a791364f5afa8b8ba965315a6440f1 (patch)
treed9ced4cfdfd13438ce23384fbd64006bb74fd8b6
parent26a8e96a8b37e8070fa9dcb1b7490cf4d4492d50 (diff)
xtensa: add SMP support
This is largely based on SMP code from the xtensa-2.6.29-smp tree by Piet Delaney, Marc Gauthier, Joe Taylor, Christian Zankel (and possibly other Tensilica folks). Signed-off-by: Max Filippov <jcmvbkbc@gmail.com> Signed-off-by: Chris Zankel <chris@zankel.net>
-rw-r--r--arch/xtensa/Kconfig37
-rw-r--r--arch/xtensa/include/asm/barrier.h4
-rw-r--r--arch/xtensa/include/asm/bitops.h8
-rw-r--r--arch/xtensa/include/asm/cacheflush.h40
-rw-r--r--arch/xtensa/include/asm/mmu.h10
-rw-r--r--arch/xtensa/include/asm/mmu_context.h100
-rw-r--r--arch/xtensa/include/asm/ptrace.h8
-rw-r--r--arch/xtensa/include/asm/smp.h29
-rw-r--r--arch/xtensa/include/asm/timex.h14
-rw-r--r--arch/xtensa/include/asm/tlbflush.h42
-rw-r--r--arch/xtensa/include/asm/traps.h1
-rw-r--r--arch/xtensa/kernel/Makefile1
-rw-r--r--arch/xtensa/kernel/head.S130
-rw-r--r--arch/xtensa/kernel/irq.c15
-rw-r--r--arch/xtensa/kernel/mxhead.S85
-rw-r--r--arch/xtensa/kernel/setup.c66
-rw-r--r--arch/xtensa/kernel/smp.c465
-rw-r--r--arch/xtensa/kernel/traps.c33
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S26
-rw-r--r--arch/xtensa/mm/cache.c7
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--arch/xtensa/mm/mmu.c4
-rw-r--r--arch/xtensa/mm/tlb.c37
-rw-r--r--arch/xtensa/platforms/xtfpga/include/platform/hardware.h2
24 files changed, 967 insertions, 199 deletions
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index fb140ae3860d..4b09c60b6b30 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -9,7 +9,6 @@ config XTENSA
9 select GENERIC_CLOCKEVENTS 9 select GENERIC_CLOCKEVENTS
10 select VIRT_TO_BUS 10 select VIRT_TO_BUS
11 select GENERIC_IRQ_SHOW 11 select GENERIC_IRQ_SHOW
12 select GENERIC_CPU_DEVICES
13 select GENERIC_SCHED_CLOCK 12 select GENERIC_SCHED_CLOCK
14 select MODULES_USE_ELF_RELA 13 select MODULES_USE_ELF_RELA
15 select GENERIC_PCI_IOMAP 14 select GENERIC_PCI_IOMAP
@@ -65,6 +64,9 @@ config MMU
65config VARIANT_IRQ_SWITCH 64config VARIANT_IRQ_SWITCH
66 def_bool n 65 def_bool n
67 66
67config MAY_HAVE_SMP
68 def_bool n
69
68menu "Processor type and features" 70menu "Processor type and features"
69 71
70choice 72choice
@@ -105,6 +107,39 @@ config XTENSA_UNALIGNED_USER
105 107
106source "kernel/Kconfig.preempt" 108source "kernel/Kconfig.preempt"
107 109
110config HAVE_SMP
111 bool "System Supports SMP (MX)"
112 depends on MAY_HAVE_SMP
113 select XTENSA_MX
114 help
115 This option is use to indicate that the system-on-a-chip (SOC)
116 supports Multiprocessing. Multiprocessor support implemented above
117 the CPU core definition and currently needs to be selected manually.
118
119 Multiprocessor support in implemented with external cache and
120 interrupt controlers.
121
122 The MX interrupt distributer adds Interprocessor Interrupts
123 and causes the IRQ numbers to be increased by 4 for devices
124 like the open cores ethernet driver and the serial interface.
125
126 You still have to select "Enable SMP" to enable SMP on this SOC.
127
128config SMP
129 bool "Enable Symmetric multi-processing support"
130 depends on HAVE_SMP
131 select USE_GENERIC_SMP_HELPERS
132 select GENERIC_SMP_IDLE_THREAD
133 help
134 Enabled SMP Software; allows more than one CPU/CORE
135 to be activated during startup.
136
137config NR_CPUS
138 depends on SMP
139 int "Maximum number of CPUs (2-32)"
140 range 2 32
141 default "4"
142
108config MATH_EMULATION 143config MATH_EMULATION
109 bool "Math emulation" 144 bool "Math emulation"
110 help 145 help
diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h
index ef021677d536..8e5e5c980a7a 100644
--- a/arch/xtensa/include/asm/barrier.h
+++ b/arch/xtensa/include/asm/barrier.h
@@ -17,7 +17,9 @@
17#define wmb() mb() 17#define wmb() mb()
18 18
19#ifdef CONFIG_SMP 19#ifdef CONFIG_SMP
20#error smp_* not defined 20#define smp_mb() mb()
21#define smp_rmb() rmb()
22#define smp_wmb() wmb()
21#else 23#else
22#define smp_mb() barrier() 24#define smp_mb() barrier()
23#define smp_rmb() barrier() 25#define smp_rmb() barrier()
diff --git a/arch/xtensa/include/asm/bitops.h b/arch/xtensa/include/asm/bitops.h
index 84afe58d5d37..7b6873ae84c2 100644
--- a/arch/xtensa/include/asm/bitops.h
+++ b/arch/xtensa/include/asm/bitops.h
@@ -22,12 +22,8 @@
22#include <asm/processor.h> 22#include <asm/processor.h>
23#include <asm/byteorder.h> 23#include <asm/byteorder.h>
24 24
25#ifdef CONFIG_SMP 25#define smp_mb__before_clear_bit() smp_mb()
26# error SMP not supported on this architecture 26#define smp_mb__after_clear_bit() smp_mb()
27#endif
28
29#define smp_mb__before_clear_bit() barrier()
30#define smp_mb__after_clear_bit() barrier()
31 27
32#include <asm-generic/bitops/non-atomic.h> 28#include <asm-generic/bitops/non-atomic.h>
33 29
diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h
index 127cd48883c4..555a98a18453 100644
--- a/arch/xtensa/include/asm/cacheflush.h
+++ b/arch/xtensa/include/asm/cacheflush.h
@@ -1,18 +1,14 @@
1/* 1/*
2 * include/asm-xtensa/cacheflush.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * (C) 2001 - 2007 Tensilica Inc. 6 * (C) 2001 - 2013 Tensilica Inc.
9 */ 7 */
10 8
11#ifndef _XTENSA_CACHEFLUSH_H 9#ifndef _XTENSA_CACHEFLUSH_H
12#define _XTENSA_CACHEFLUSH_H 10#define _XTENSA_CACHEFLUSH_H
13 11
14#ifdef __KERNEL__
15
16#include <linux/mm.h> 12#include <linux/mm.h>
17#include <asm/processor.h> 13#include <asm/processor.h>
18#include <asm/page.h> 14#include <asm/page.h>
@@ -51,7 +47,6 @@ extern void __invalidate_icache_page(unsigned long);
51extern void __invalidate_icache_range(unsigned long, unsigned long); 47extern void __invalidate_icache_range(unsigned long, unsigned long);
52extern void __invalidate_dcache_range(unsigned long, unsigned long); 48extern void __invalidate_dcache_range(unsigned long, unsigned long);
53 49
54
55#if XCHAL_DCACHE_IS_WRITEBACK 50#if XCHAL_DCACHE_IS_WRITEBACK
56extern void __flush_invalidate_dcache_all(void); 51extern void __flush_invalidate_dcache_all(void);
57extern void __flush_dcache_page(unsigned long); 52extern void __flush_dcache_page(unsigned long);
@@ -87,9 +82,22 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
87 * (see also Documentation/cachetlb.txt) 82 * (see also Documentation/cachetlb.txt)
88 */ 83 */
89 84
90#if (DCACHE_WAY_SIZE > PAGE_SIZE) 85#if (DCACHE_WAY_SIZE > PAGE_SIZE) || defined(CONFIG_SMP)
86
87#ifdef CONFIG_SMP
88void flush_cache_all(void);
89void flush_cache_range(struct vm_area_struct*, ulong, ulong);
90void flush_icache_range(unsigned long start, unsigned long end);
91void flush_cache_page(struct vm_area_struct*,
92 unsigned long, unsigned long);
93#else
94#define flush_cache_all local_flush_cache_all
95#define flush_cache_range local_flush_cache_range
96#define flush_icache_range local_flush_icache_range
97#define flush_cache_page local_flush_cache_page
98#endif
91 99
92#define flush_cache_all() \ 100#define local_flush_cache_all() \
93 do { \ 101 do { \
94 __flush_invalidate_dcache_all(); \ 102 __flush_invalidate_dcache_all(); \
95 __invalidate_icache_all(); \ 103 __invalidate_icache_all(); \
@@ -103,9 +111,11 @@ static inline void __invalidate_icache_page_alias(unsigned long virt,
103 111
104#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 112#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
105extern void flush_dcache_page(struct page*); 113extern void flush_dcache_page(struct page*);
106extern void flush_cache_range(struct vm_area_struct*, ulong, ulong); 114
107extern void flush_cache_page(struct vm_area_struct*, 115void local_flush_cache_range(struct vm_area_struct *vma,
108 unsigned long, unsigned long); 116 unsigned long start, unsigned long end);
117void local_flush_cache_page(struct vm_area_struct *vma,
118 unsigned long address, unsigned long pfn);
109 119
110#else 120#else
111 121
@@ -119,13 +129,14 @@ extern void flush_cache_page(struct vm_area_struct*,
119#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 129#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
120#define flush_dcache_page(page) do { } while (0) 130#define flush_dcache_page(page) do { } while (0)
121 131
122#define flush_cache_page(vma,addr,pfn) do { } while (0) 132#define flush_icache_range local_flush_icache_range
123#define flush_cache_range(vma,start,end) do { } while (0) 133#define flush_cache_page(vma, addr, pfn) do { } while (0)
134#define flush_cache_range(vma, start, end) do { } while (0)
124 135
125#endif 136#endif
126 137
127/* Ensure consistency between data and instruction cache. */ 138/* Ensure consistency between data and instruction cache. */
128#define flush_icache_range(start,end) \ 139#define local_flush_icache_range(start, end) \
129 do { \ 140 do { \
130 __flush_dcache_range(start, (end) - (start)); \ 141 __flush_dcache_range(start, (end) - (start)); \
131 __invalidate_icache_range(start,(end) - (start)); \ 142 __invalidate_icache_range(start,(end) - (start)); \
@@ -253,5 +264,4 @@ static inline void flush_invalidate_dcache_unaligned(u32 addr, u32 size)
253 } 264 }
254} 265}
255 266
256#endif /* __KERNEL__ */
257#endif /* _XTENSA_CACHEFLUSH_H */ 267#endif /* _XTENSA_CACHEFLUSH_H */
diff --git a/arch/xtensa/include/asm/mmu.h b/arch/xtensa/include/asm/mmu.h
index 8554b2c8b17a..71afe418d0e5 100644
--- a/arch/xtensa/include/asm/mmu.h
+++ b/arch/xtensa/include/asm/mmu.h
@@ -1,11 +1,9 @@
1/* 1/*
2 * include/asm-xtensa/mmu.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 6 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 */ 7 */
10 8
11#ifndef _XTENSA_MMU_H 9#ifndef _XTENSA_MMU_H
@@ -15,8 +13,10 @@
15#include <asm-generic/mmu.h> 13#include <asm-generic/mmu.h>
16#else 14#else
17 15
18/* Default "unsigned long" context */ 16typedef struct {
19typedef unsigned long mm_context_t; 17 unsigned long asid[NR_CPUS];
18 unsigned int cpu;
19} mm_context_t;
20 20
21#endif /* CONFIG_MMU */ 21#endif /* CONFIG_MMU */
22#endif /* _XTENSA_MMU_H */ 22#endif /* _XTENSA_MMU_H */
diff --git a/arch/xtensa/include/asm/mmu_context.h b/arch/xtensa/include/asm/mmu_context.h
index 86292c28674f..d33c71a8c9ec 100644
--- a/arch/xtensa/include/asm/mmu_context.h
+++ b/arch/xtensa/include/asm/mmu_context.h
@@ -1,13 +1,11 @@
1/* 1/*
2 * include/asm-xtensa/mmu_context.h
3 *
4 * Switch an MMU context. 2 * Switch an MMU context.
5 * 3 *
6 * This file is subject to the terms and conditions of the GNU General Public 4 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive 5 * License. See the file "COPYING" in the main directory of this archive
8 * for more details. 6 * for more details.
9 * 7 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc. 8 * Copyright (C) 2001 - 2013 Tensilica Inc.
11 */ 9 */
12 10
13#ifndef _XTENSA_MMU_CONTEXT_H 11#ifndef _XTENSA_MMU_CONTEXT_H
@@ -20,22 +18,25 @@
20#include <linux/stringify.h> 18#include <linux/stringify.h>
21#include <linux/sched.h> 19#include <linux/sched.h>
22 20
23#include <variant/core.h> 21#include <asm/vectors.h>
24 22
25#include <asm/pgtable.h> 23#include <asm/pgtable.h>
26#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
27#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
28#include <asm-generic/mm_hooks.h> 26#include <asm-generic/mm_hooks.h>
27#include <asm-generic/percpu.h>
29 28
30#if (XCHAL_HAVE_TLBS != 1) 29#if (XCHAL_HAVE_TLBS != 1)
31# error "Linux must have an MMU!" 30# error "Linux must have an MMU!"
32#endif 31#endif
33 32
34extern unsigned long asid_cache; 33DECLARE_PER_CPU(unsigned long, asid_cache);
34#define cpu_asid_cache(cpu) per_cpu(asid_cache, cpu)
35 35
36/* 36/*
37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to 37 * NO_CONTEXT is the invalid ASID value that we don't ever assign to
38 * any user or kernel context. 38 * any user or kernel context. We use the reserved values in the
39 * ASID_INSERT macro below.
39 * 40 *
40 * 0 invalid 41 * 0 invalid
41 * 1 kernel 42 * 1 kernel
@@ -68,64 +69,77 @@ static inline unsigned long get_rasid_register (void)
68 return tmp; 69 return tmp;
69} 70}
70 71
71static inline void 72static inline void get_new_mmu_context(struct mm_struct *mm, unsigned int cpu)
72__get_new_mmu_context(struct mm_struct *mm) 73{
74 unsigned long asid = cpu_asid_cache(cpu);
75 if ((++asid & ASID_MASK) == 0) {
76 /*
77 * Start new asid cycle; continue counting with next
78 * incarnation bits; skipping over 0, 1, 2, 3.
79 */
80 local_flush_tlb_all();
81 asid += ASID_USER_FIRST;
82 }
83 cpu_asid_cache(cpu) = asid;
84 mm->context.asid[cpu] = asid;
85 mm->context.cpu = cpu;
86}
87
88static inline void get_mmu_context(struct mm_struct *mm, unsigned int cpu)
73{ 89{
74 extern void flush_tlb_all(void); 90 /*
75 if (! (++asid_cache & ASID_MASK) ) { 91 * Check if our ASID is of an older version and thus invalid.
76 flush_tlb_all(); /* start new asid cycle */ 92 */
77 asid_cache += ASID_USER_FIRST; 93
94 if (mm) {
95 unsigned long asid = mm->context.asid[cpu];
96
97 if (asid == NO_CONTEXT ||
98 ((asid ^ cpu_asid_cache(cpu)) & ~ASID_MASK))
99 get_new_mmu_context(mm, cpu);
78 } 100 }
79 mm->context = asid_cache;
80} 101}
81 102
82static inline void 103static inline void activate_context(struct mm_struct *mm, unsigned int cpu)
83__load_mmu_context(struct mm_struct *mm)
84{ 104{
85 set_rasid_register(ASID_INSERT(mm->context)); 105 get_mmu_context(mm, cpu);
106 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
86 invalidate_page_directory(); 107 invalidate_page_directory();
87} 108}
88 109
89/* 110/*
90 * Initialize the context related info for a new mm_struct 111 * Initialize the context related info for a new mm_struct
91 * instance. 112 * instance. Valid cpu values are 0..(NR_CPUS-1), so initializing
113 * to -1 says the process has never run on any core.
92 */ 114 */
93 115
94static inline int 116static inline int init_new_context(struct task_struct *tsk,
95init_new_context(struct task_struct *tsk, struct mm_struct *mm) 117 struct mm_struct *mm)
96{ 118{
97 mm->context = NO_CONTEXT; 119 int cpu;
120 for_each_possible_cpu(cpu) {
121 mm->context.asid[cpu] = NO_CONTEXT;
122 }
123 mm->context.cpu = -1;
98 return 0; 124 return 0;
99} 125}
100 126
101/*
102 * After we have set current->mm to a new value, this activates
103 * the context for the new mm so we see the new mappings.
104 */
105static inline void
106activate_mm(struct mm_struct *prev, struct mm_struct *next)
107{
108 /* Unconditionally get a new ASID. */
109
110 __get_new_mmu_context(next);
111 __load_mmu_context(next);
112}
113
114
115static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 127static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
116 struct task_struct *tsk) 128 struct task_struct *tsk)
117{ 129{
118 unsigned long asid = asid_cache; 130 unsigned int cpu = smp_processor_id();
119 131 int migrated = next->context.cpu != cpu;
120 /* Check if our ASID is of an older version and thus invalid */ 132 /* Flush the icache if we migrated to a new core. */
121 133 if (migrated) {
122 if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) 134 __invalidate_icache_all();
123 __get_new_mmu_context(next); 135 next->context.cpu = cpu;
124 136 }
125 __load_mmu_context(next); 137 if (migrated || prev != next)
138 activate_context(next, cpu);
126} 139}
127 140
128#define deactivate_mm(tsk, mm) do { } while(0) 141#define activate_mm(prev, next) switch_mm((prev), (next), NULL)
142#define deactivate_mm(tsk, mm) do { } while (0)
129 143
130/* 144/*
131 * Destroy context related info for an mm_struct that is about 145 * Destroy context related info for an mm_struct that is about
diff --git a/arch/xtensa/include/asm/ptrace.h b/arch/xtensa/include/asm/ptrace.h
index 81f31bc9dde0..598e752dcbcd 100644
--- a/arch/xtensa/include/asm/ptrace.h
+++ b/arch/xtensa/include/asm/ptrace.h
@@ -59,9 +59,17 @@ struct pt_regs {
59 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1) 59 (task_stack_page(tsk) + KERNEL_STACK_SIZE - (XCHAL_NUM_AREGS-16)*4) - 1)
60# define user_mode(regs) (((regs)->ps & 0x00000020)!=0) 60# define user_mode(regs) (((regs)->ps & 0x00000020)!=0)
61# define instruction_pointer(regs) ((regs)->pc) 61# define instruction_pointer(regs) ((regs)->pc)
62# define return_pointer(regs) (MAKE_PC_FROM_RA((regs)->areg[0], \
63 (regs)->areg[1]))
62 64
63# ifndef CONFIG_SMP 65# ifndef CONFIG_SMP
64# define profile_pc(regs) instruction_pointer(regs) 66# define profile_pc(regs) instruction_pointer(regs)
67# else
68# define profile_pc(regs) \
69 ({ \
70 in_lock_functions(instruction_pointer(regs)) ? \
71 return_pointer(regs) : instruction_pointer(regs); \
72 })
65# endif 73# endif
66 74
67#define user_stack_pointer(regs) ((regs)->areg[1]) 75#define user_stack_pointer(regs) ((regs)->areg[1])
diff --git a/arch/xtensa/include/asm/smp.h b/arch/xtensa/include/asm/smp.h
index 83c569e3bdbd..30ac58cc70df 100644
--- a/arch/xtensa/include/asm/smp.h
+++ b/arch/xtensa/include/asm/smp.h
@@ -1,27 +1,34 @@
1/* 1/*
2 * include/asm-xtensa/smp.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 6 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 */ 7 */
10 8
11#ifndef _XTENSA_SMP_H 9#ifndef _XTENSA_SMP_H
12#define _XTENSA_SMP_H 10#define _XTENSA_SMP_H
13 11
14extern struct xtensa_cpuinfo boot_cpu_data; 12#ifdef CONFIG_SMP
15 13
16#define cpu_data (&boot_cpu_data) 14#define raw_smp_processor_id() (current_thread_info()->cpu)
17#define current_cpu_data boot_cpu_data 15#define cpu_logical_map(cpu) (cpu)
18 16
19struct xtensa_cpuinfo { 17struct start_info {
20 unsigned long *pgd_cache; 18 unsigned long stack;
21 unsigned long *pte_cache;
22 unsigned long pgtable_cache_sz;
23}; 19};
20extern struct start_info start_info;
24 21
25#define cpu_logical_map(cpu) (cpu) 22struct cpumask;
23void arch_send_call_function_ipi_mask(const struct cpumask *mask);
24void arch_send_call_function_single_ipi(int cpu);
25
26void smp_init_cpus(void);
27void secondary_init_irq(void);
28void ipi_init(void);
29struct seq_file;
30void show_ipi_list(struct seq_file *p, int prec);
31
32#endif /* CONFIG_SMP */
26 33
27#endif /* _XTENSA_SMP_H */ 34#endif /* _XTENSA_SMP_H */
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h
index 27fa3c170662..ca929e6a38b5 100644
--- a/arch/xtensa/include/asm/timex.h
+++ b/arch/xtensa/include/asm/timex.h
@@ -1,18 +1,14 @@
1/* 1/*
2 * include/asm-xtensa/timex.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * Copyright (C) 2001 - 2008 Tensilica Inc. 6 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 */ 7 */
10 8
11#ifndef _XTENSA_TIMEX_H 9#ifndef _XTENSA_TIMEX_H
12#define _XTENSA_TIMEX_H 10#define _XTENSA_TIMEX_H
13 11
14#ifdef __KERNEL__
15
16#include <asm/processor.h> 12#include <asm/processor.h>
17#include <linux/stringify.h> 13#include <linux/stringify.h>
18 14
@@ -39,14 +35,9 @@ extern unsigned long ccount_freq;
39 35
40typedef unsigned long long cycles_t; 36typedef unsigned long long cycles_t;
41 37
42/*
43 * Only used for SMP.
44 */
45
46extern cycles_t cacheflush_time;
47
48#define get_cycles() (0) 38#define get_cycles() (0)
49 39
40void local_timer_setup(unsigned cpu);
50 41
51/* 42/*
52 * Register access. 43 * Register access.
@@ -81,5 +72,4 @@ static inline void set_linux_timer (unsigned long ccompare)
81 WSR_CCOMPARE(LINUX_TIMER, ccompare); 72 WSR_CCOMPARE(LINUX_TIMER, ccompare);
82} 73}
83 74
84#endif /* __KERNEL__ */
85#endif /* _XTENSA_TIMEX_H */ 75#endif /* _XTENSA_TIMEX_H */
diff --git a/arch/xtensa/include/asm/tlbflush.h b/arch/xtensa/include/asm/tlbflush.h
index 43dd348a5a47..fc34274ce41b 100644
--- a/arch/xtensa/include/asm/tlbflush.h
+++ b/arch/xtensa/include/asm/tlbflush.h
@@ -1,18 +1,14 @@
1/* 1/*
2 * include/asm-xtensa/tlbflush.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public 2 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
6 * for more details. 4 * for more details.
7 * 5 *
8 * Copyright (C) 2001 - 2005 Tensilica Inc. 6 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 */ 7 */
10 8
11#ifndef _XTENSA_TLBFLUSH_H 9#ifndef _XTENSA_TLBFLUSH_H
12#define _XTENSA_TLBFLUSH_H 10#define _XTENSA_TLBFLUSH_H
13 11
14#ifdef __KERNEL__
15
16#include <linux/stringify.h> 12#include <linux/stringify.h>
17#include <asm/processor.h> 13#include <asm/processor.h>
18 14
@@ -34,12 +30,37 @@
34 * - flush_tlb_range(mm, start, end) flushes a range of pages 30 * - flush_tlb_range(mm, start, end) flushes a range of pages
35 */ 31 */
36 32
37extern void flush_tlb_all(void); 33void local_flush_tlb_all(void);
38extern void flush_tlb_mm(struct mm_struct*); 34void local_flush_tlb_mm(struct mm_struct *mm);
39extern void flush_tlb_page(struct vm_area_struct*,unsigned long); 35void local_flush_tlb_page(struct vm_area_struct *vma,
40extern void flush_tlb_range(struct vm_area_struct*,unsigned long,unsigned long); 36 unsigned long page);
37void local_flush_tlb_range(struct vm_area_struct *vma,
38 unsigned long start, unsigned long end);
39
40#ifdef CONFIG_SMP
41
42void flush_tlb_all(void);
43void flush_tlb_mm(struct mm_struct *);
44void flush_tlb_page(struct vm_area_struct *, unsigned long);
45void flush_tlb_range(struct vm_area_struct *, unsigned long,
46 unsigned long);
47
48static inline void flush_tlb_kernel_range(unsigned long start,
49 unsigned long end)
50{
51 flush_tlb_all();
52}
53
54#else /* !CONFIG_SMP */
55
56#define flush_tlb_all() local_flush_tlb_all()
57#define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
58#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
59#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
60 end)
61#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
41 62
42#define flush_tlb_kernel_range(start,end) flush_tlb_all() 63#endif /* CONFIG_SMP */
43 64
44/* TLB operations. */ 65/* TLB operations. */
45 66
@@ -187,5 +208,4 @@ static inline unsigned long read_itlb_translation (int way)
187} 208}
188 209
189#endif /* __ASSEMBLY__ */ 210#endif /* __ASSEMBLY__ */
190#endif /* __KERNEL__ */
191#endif /* _XTENSA_TLBFLUSH_H */ 211#endif /* _XTENSA_TLBFLUSH_H */
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 917488a0ab00..8c194f6af45e 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -19,6 +19,7 @@
19 */ 19 */
20extern void * __init trap_set_handler(int cause, void *handler); 20extern void * __init trap_set_handler(int cause, void *handler);
21extern void do_unhandled(struct pt_regs *regs, unsigned long exccause); 21extern void do_unhandled(struct pt_regs *regs, unsigned long exccause);
22void secondary_trap_init(void);
22 23
23static inline void spill_registers(void) 24static inline void spill_registers(void)
24{ 25{
diff --git a/arch/xtensa/kernel/Makefile b/arch/xtensa/kernel/Makefile
index f90265ec1ccc..18d962a8c0c2 100644
--- a/arch/xtensa/kernel/Makefile
+++ b/arch/xtensa/kernel/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_KGDB) += xtensa-stub.o
12obj-$(CONFIG_PCI) += pci.o 12obj-$(CONFIG_PCI) += pci.o
13obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o 13obj-$(CONFIG_MODULES) += xtensa_ksyms.o module.o
14obj-$(CONFIG_FUNCTION_TRACER) += mcount.o 14obj-$(CONFIG_FUNCTION_TRACER) += mcount.o
15obj-$(CONFIG_SMP) += smp.o mxhead.o
15 16
16AFLAGS_head.o += -mtext-section-literals 17AFLAGS_head.o += -mtext-section-literals
17 18
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 7d740ebbe198..74ec62c892bc 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -19,6 +19,7 @@
19#include <asm/page.h> 19#include <asm/page.h>
20#include <asm/cacheasm.h> 20#include <asm/cacheasm.h>
21#include <asm/initialize_mmu.h> 21#include <asm/initialize_mmu.h>
22#include <asm/mxregs.h>
22 23
23#include <linux/init.h> 24#include <linux/init.h>
24#include <linux/linkage.h> 25#include <linux/linkage.h>
@@ -54,7 +55,7 @@ ENTRY(_start)
54 55
55 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */ 56 /* Preserve the pointer to the boot parameter list in EXCSAVE_1 */
56 wsr a2, excsave1 57 wsr a2, excsave1
57 _j _SetupMMU 58 _j _SetupOCD
58 59
59 .align 4 60 .align 4
60 .literal_position 61 .literal_position
@@ -62,6 +63,23 @@ ENTRY(_start)
62 .word _startup 63 .word _startup
63 64
64 .align 4 65 .align 4
66_SetupOCD:
67 /*
68 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
69 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
70 * xt-gdb to single step via DEBUG exceptions received directly
71 * by ocd.
72 */
73 movi a1, 1
74 movi a0, 0
75 wsr a1, windowstart
76 wsr a0, windowbase
77 rsync
78
79 movi a1, LOCKLEVEL
80 wsr a1, ps
81 rsync
82
65 .global _SetupMMU 83 .global _SetupMMU
66_SetupMMU: 84_SetupMMU:
67 Offset = _SetupMMU - _start 85 Offset = _SetupMMU - _start
@@ -90,19 +108,6 @@ ENDPROC(_start)
90 108
91ENTRY(_startup) 109ENTRY(_startup)
92 110
93 /* Disable interrupts and exceptions. */
94
95 movi a0, LOCKLEVEL
96 wsr a0, ps
97
98 /* Start with a fresh windowbase and windowstart. */
99
100 movi a1, 1
101 movi a0, 0
102 wsr a1, windowstart
103 wsr a0, windowbase
104 rsync
105
106 /* Set a0 to 0 for the remaining initialization. */ 111 /* Set a0 to 0 for the remaining initialization. */
107 112
108 movi a0, 0 113 movi a0, 0
@@ -154,17 +159,6 @@ ENTRY(_startup)
154 wsr a0, cpenable 159 wsr a0, cpenable
155#endif 160#endif
156 161
157 /* Set PS.INTLEVEL=LOCKLEVEL, PS.WOE=0, kernel stack, PS.EXCM=0
158 *
159 * Note: PS.EXCM must be cleared before using any loop
160 * instructions; otherwise, they are silently disabled, and
161 * at most one iteration of the loop is executed.
162 */
163
164 movi a1, LOCKLEVEL
165 wsr a1, ps
166 rsync
167
168 /* Initialize the caches. 162 /* Initialize the caches.
169 * a2, a3 are just working registers (clobbered). 163 * a2, a3 are just working registers (clobbered).
170 */ 164 */
@@ -182,6 +176,37 @@ ENTRY(_startup)
182 176
183 isync 177 isync
184 178
179#ifdef CONFIG_HAVE_SMP
180 movi a2, CCON # MX External Register to Configure Cache
181 movi a3, 1
182 wer a3, a2
183#endif
184
185 /* Setup stack and enable window exceptions (keep irqs disabled) */
186
187 movi a1, start_info
188 l32i a1, a1, 0
189
190 movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
191 # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
192 wsr a2, ps # (enable reg-windows; progmode stack)
193 rsync
194
195 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
196
197 movi a2, debug_exception
198 wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
199
200#ifdef CONFIG_SMP
201 /*
202 * Notice that we assume with SMP that cores have PRID
203 * supported by the cores.
204 */
205 rsr a2, prid
206 bnez a2, .Lboot_secondary
207
208#endif /* CONFIG_SMP */
209
185 /* Unpack data sections 210 /* Unpack data sections
186 * 211 *
187 * The linker script used to build the Linux kernel image 212 * The linker script used to build the Linux kernel image
@@ -234,24 +259,7 @@ ENTRY(_startup)
234 ___invalidate_icache_all a2 a3 259 ___invalidate_icache_all a2 a3
235 isync 260 isync
236 261
237 /* Setup stack and enable window exceptions (keep irqs disabled) */ 262 movi a6, 0
238
239 movi a1, init_thread_union
240 addi a1, a1, KERNEL_STACK_SIZE
241
242 movi a2, (1 << PS_WOE_BIT) | LOCKLEVEL
243 # WOE=1, INTLEVEL=LOCKLEVEL, UM=0
244 wsr a2, ps # (enable reg-windows; progmode stack)
245 rsync
246
247 /* Set up EXCSAVE[DEBUGLEVEL] to point to the Debug Exception Handler.*/
248
249 movi a2, debug_exception
250 wsr a2, SREG_EXCSAVE + XCHAL_DEBUGLEVEL
251
252 /* Set up EXCSAVE[1] to point to the exc_table. */
253
254 movi a6, exc_table
255 xsr a6, excsave1 263 xsr a6, excsave1
256 264
257 /* init_arch kick-starts the linux kernel */ 265 /* init_arch kick-starts the linux kernel */
@@ -265,9 +273,45 @@ ENTRY(_startup)
265should_never_return: 273should_never_return:
266 j should_never_return 274 j should_never_return
267 275
276#ifdef CONFIG_SMP
277.Lboot_secondary:
278
279 movi a2, cpu_start_ccount
2801:
281 l32i a3, a2, 0
282 beqi a3, 0, 1b
283 movi a3, 0
284 s32i a3, a2, 0
285 memw
2861:
287 l32i a3, a2, 0
288 beqi a3, 0, 1b
289 wsr a3, ccount
290 movi a3, 0
291 s32i a3, a2, 0
292 memw
293
294 movi a6, 0
295 wsr a6, excsave1
296
297 movi a4, secondary_start_kernel
298 callx4 a4
299 j should_never_return
300
301#endif /* CONFIG_SMP */
302
268ENDPROC(_startup) 303ENDPROC(_startup)
269 304
270/* 305/*
306 * DATA section
307 */
308
309 .section ".data.init.refok"
310 .align 4
311ENTRY(start_info)
312 .long init_thread_union + KERNEL_STACK_SIZE
313
314/*
271 * BSS section 315 * BSS section
272 */ 316 */
273 317
diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c
index 7d49730f4056..fad9e0059765 100644
--- a/arch/xtensa/kernel/irq.c
+++ b/arch/xtensa/kernel/irq.c
@@ -4,7 +4,7 @@
4 * Xtensa built-in interrupt controller and some generic functions copied 4 * Xtensa built-in interrupt controller and some generic functions copied
5 * from i386. 5 * from i386.
6 * 6 *
7 * Copyright (C) 2002 - 2006 Tensilica, Inc. 7 * Copyright (C) 2002 - 2013 Tensilica, Inc.
8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar 8 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
9 * 9 *
10 * 10 *
@@ -19,10 +19,12 @@
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/kernel_stat.h> 20#include <linux/kernel_stat.h>
21#include <linux/irqchip.h> 21#include <linux/irqchip.h>
22#include <linux/irqchip/xtensa-mx.h>
22#include <linux/irqchip/xtensa-pic.h> 23#include <linux/irqchip/xtensa-pic.h>
23#include <linux/irqdomain.h> 24#include <linux/irqdomain.h>
24#include <linux/of.h> 25#include <linux/of.h>
25 26
27#include <asm/mxregs.h>
26#include <asm/uaccess.h> 28#include <asm/uaccess.h>
27#include <asm/platform.h> 29#include <asm/platform.h>
28 30
@@ -55,6 +57,9 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
55 57
56int arch_show_interrupts(struct seq_file *p, int prec) 58int arch_show_interrupts(struct seq_file *p, int prec)
57{ 59{
60#ifdef CONFIG_SMP
61 show_ipi_list(p, prec);
62#endif
58 seq_printf(p, "%*s: ", prec, "ERR"); 63 seq_printf(p, "%*s: ", prec, "ERR");
59 seq_printf(p, "%10u\n", atomic_read(&irq_err_count)); 64 seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
60 return 0; 65 return 0;
@@ -136,7 +141,15 @@ void __init init_IRQ(void)
136#ifdef CONFIG_OF 141#ifdef CONFIG_OF
137 irqchip_init(); 142 irqchip_init();
138#else 143#else
144#ifdef CONFIG_HAVE_SMP
145 xtensa_mx_init_legacy(NULL);
146#else
139 xtensa_pic_init_legacy(NULL); 147 xtensa_pic_init_legacy(NULL);
140#endif 148#endif
149#endif
150
151#ifdef CONFIG_SMP
152 ipi_init();
153#endif
141 variant_init_irq(); 154 variant_init_irq();
142} 155}
diff --git a/arch/xtensa/kernel/mxhead.S b/arch/xtensa/kernel/mxhead.S
new file mode 100644
index 000000000000..77a161a112c5
--- /dev/null
+++ b/arch/xtensa/kernel/mxhead.S
@@ -0,0 +1,85 @@
1/*
2 * Xtensa Secondary Processors startup code.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2001 - 2013 Tensilica Inc.
9 *
10 * Joe Taylor <joe@tensilica.com>
11 * Chris Zankel <chris@zankel.net>
12 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
13 * Pete Delaney <piet@tensilica.com>
14 */
15
16#include <linux/linkage.h>
17
18#include <asm/cacheasm.h>
19#include <asm/initialize_mmu.h>
20#include <asm/mxregs.h>
21#include <asm/regs.h>
22
23
24 .section .SecondaryResetVector.text, "ax"
25
26
27ENTRY(_SecondaryResetVector)
28 _j _SetupOCD
29
30 .begin no-absolute-literals
31 .literal_position
32
33_SetupOCD:
34 /*
35 * Initialize WB, WS, and clear PS.EXCM (to allow loop instructions).
36 * Set Interrupt Level just below XCHAL_DEBUGLEVEL to allow
37 * xt-gdb to single step via DEBUG exceptions received directly
38 * by ocd.
39 */
40 movi a1, 1
41 movi a0, 0
42 wsr a1, windowstart
43 wsr a0, windowbase
44 rsync
45
46 movi a1, LOCKLEVEL
47 wsr a1, ps
48 rsync
49
50_SetupMMU:
51 Offset = _SetupMMU - _SecondaryResetVector
52
53#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
54 initialize_mmu
55#endif
56
57 /*
58 * Start Secondary Processors with NULL pointer to boot params.
59 */
60 movi a2, 0 # a2 == NULL
61 movi a3, _startup
62 jx a3
63
64 .end no-absolute-literals
65
66
67 .section .SecondaryResetVector.remapped_text, "ax"
68 .global _RemappedSecondaryResetVector
69
70 .org 0 # Need to do org before literals
71
72_RemappedSecondaryResetVector:
73 .begin no-absolute-literals
74 .literal_position
75
76 _j _RemappedSetupMMU
77 . = _RemappedSecondaryResetVector + Offset
78
79_RemappedSetupMMU:
80
81#ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
82 initialize_mmu
83#endif
84
85 .end no-absolute-literals
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 3d353140c2d0..dfd8f52c05d8 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -21,6 +21,8 @@
21#include <linux/screen_info.h> 21#include <linux/screen_info.h>
22#include <linux/bootmem.h> 22#include <linux/bootmem.h>
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/percpu.h>
25#include <linux/cpu.h>
24#include <linux/of_fdt.h> 26#include <linux/of_fdt.h>
25#include <linux/of_platform.h> 27#include <linux/of_platform.h>
26 28
@@ -46,6 +48,7 @@
46#include <asm/setup.h> 48#include <asm/setup.h>
47#include <asm/param.h> 49#include <asm/param.h>
48#include <asm/traps.h> 50#include <asm/traps.h>
51#include <asm/smp.h>
49 52
50#include <platform/hardware.h> 53#include <platform/hardware.h>
51 54
@@ -496,6 +499,10 @@ void __init setup_arch(char **cmdline_p)
496 499
497 platform_setup(cmdline_p); 500 platform_setup(cmdline_p);
498 501
502#ifdef CONFIG_SMP
503 smp_init_cpus();
504#endif
505
499 paging_init(); 506 paging_init();
500 zones_init(); 507 zones_init();
501 508
@@ -512,6 +519,21 @@ void __init setup_arch(char **cmdline_p)
512#endif 519#endif
513} 520}
514 521
522static DEFINE_PER_CPU(struct cpu, cpu_data);
523
524static int __init topology_init(void)
525{
526 int i;
527
528 for_each_possible_cpu(i) {
529 struct cpu *cpu = &per_cpu(cpu_data, i);
530 register_cpu(cpu, i);
531 }
532
533 return 0;
534}
535subsys_initcall(topology_init);
536
515void machine_restart(char * cmd) 537void machine_restart(char * cmd)
516{ 538{
517 platform_restart(); 539 platform_restart();
@@ -537,21 +559,27 @@ void machine_power_off(void)
537static int 559static int
538c_show(struct seq_file *f, void *slot) 560c_show(struct seq_file *f, void *slot)
539{ 561{
562 char buf[NR_CPUS * 5];
563
564 cpulist_scnprintf(buf, sizeof(buf), cpu_online_mask);
540 /* high-level stuff */ 565 /* high-level stuff */
541 seq_printf(f,"processor\t: 0\n" 566 seq_printf(f, "CPU count\t: %u\n"
542 "vendor_id\t: Tensilica\n" 567 "CPU list\t: %s\n"
543 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n" 568 "vendor_id\t: Tensilica\n"
544 "core ID\t\t: " XCHAL_CORE_ID "\n" 569 "model\t\t: Xtensa " XCHAL_HW_VERSION_NAME "\n"
545 "build ID\t: 0x%x\n" 570 "core ID\t\t: " XCHAL_CORE_ID "\n"
546 "byte order\t: %s\n" 571 "build ID\t: 0x%x\n"
547 "cpu MHz\t\t: %lu.%02lu\n" 572 "byte order\t: %s\n"
548 "bogomips\t: %lu.%02lu\n", 573 "cpu MHz\t\t: %lu.%02lu\n"
549 XCHAL_BUILD_UNIQUE_ID, 574 "bogomips\t: %lu.%02lu\n",
550 XCHAL_HAVE_BE ? "big" : "little", 575 num_online_cpus(),
551 ccount_freq/1000000, 576 buf,
552 (ccount_freq/10000) % 100, 577 XCHAL_BUILD_UNIQUE_ID,
553 loops_per_jiffy/(500000/HZ), 578 XCHAL_HAVE_BE ? "big" : "little",
554 (loops_per_jiffy/(5000/HZ)) % 100); 579 ccount_freq/1000000,
580 (ccount_freq/10000) % 100,
581 loops_per_jiffy/(500000/HZ),
582 (loops_per_jiffy/(5000/HZ)) % 100);
555 583
556 seq_printf(f,"flags\t\t: " 584 seq_printf(f,"flags\t\t: "
557#if XCHAL_HAVE_NMI 585#if XCHAL_HAVE_NMI
@@ -663,7 +691,7 @@ c_show(struct seq_file *f, void *slot)
663static void * 691static void *
664c_start(struct seq_file *f, loff_t *pos) 692c_start(struct seq_file *f, loff_t *pos)
665{ 693{
666 return (void *) ((*pos == 0) ? (void *)1 : NULL); 694 return (*pos == 0) ? (void *)1 : NULL;
667} 695}
668 696
669static void * 697static void *
@@ -679,10 +707,10 @@ c_stop(struct seq_file *f, void *v)
679 707
680const struct seq_operations cpuinfo_op = 708const struct seq_operations cpuinfo_op =
681{ 709{
682 start: c_start, 710 .start = c_start,
683 next: c_next, 711 .next = c_next,
684 stop: c_stop, 712 .stop = c_stop,
685 show: c_show 713 .show = c_show,
686}; 714};
687 715
688#endif /* CONFIG_PROC_FS */ 716#endif /* CONFIG_PROC_FS */
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
new file mode 100644
index 000000000000..46bdd142a07d
--- /dev/null
+++ b/arch/xtensa/kernel/smp.c
@@ -0,0 +1,465 @@
1/*
2 * Xtensa SMP support functions.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2008 - 2013 Tensilica Inc.
9 *
10 * Chris Zankel <chris@zankel.net>
11 * Joe Taylor <joe@tensilica.com>
12 * Pete Delaney <piet@tensilica.com
13 */
14
15#include <linux/cpu.h>
16#include <linux/cpumask.h>
17#include <linux/delay.h>
18#include <linux/init.h>
19#include <linux/interrupt.h>
20#include <linux/irqdomain.h>
21#include <linux/irq.h>
22#include <linux/kdebug.h>
23#include <linux/module.h>
24#include <linux/reboot.h>
25#include <linux/seq_file.h>
26#include <linux/smp.h>
27#include <linux/thread_info.h>
28
29#include <asm/cacheflush.h>
30#include <asm/kdebug.h>
31#include <asm/mmu_context.h>
32#include <asm/mxregs.h>
33#include <asm/platform.h>
34#include <asm/tlbflush.h>
35#include <asm/traps.h>
36
37#ifdef CONFIG_SMP
38# if XCHAL_HAVE_S32C1I == 0
39# error "The S32C1I option is required for SMP."
40# endif
41#endif
42
43/* IPI (Inter Process Interrupt) */
44
45#define IPI_IRQ 0
46
47static irqreturn_t ipi_interrupt(int irq, void *dev_id);
48static struct irqaction ipi_irqaction = {
49 .handler = ipi_interrupt,
50 .flags = IRQF_PERCPU,
51 .name = "ipi",
52};
53
54void ipi_init(void)
55{
56 unsigned irq = irq_create_mapping(NULL, IPI_IRQ);
57 setup_irq(irq, &ipi_irqaction);
58}
59
60static inline unsigned int get_core_count(void)
61{
62 /* Bits 18..21 of SYSCFGID contain the core count minus 1. */
63 unsigned int syscfgid = get_er(SYSCFGID);
64 return ((syscfgid >> 18) & 0xf) + 1;
65}
66
67static inline int get_core_id(void)
68{
69 /* Bits 0...18 of SYSCFGID contain the core id */
70 unsigned int core_id = get_er(SYSCFGID);
71 return core_id & 0x3fff;
72}
73
74void __init smp_prepare_cpus(unsigned int max_cpus)
75{
76 unsigned i;
77
78 for (i = 0; i < max_cpus; ++i)
79 set_cpu_present(i, true);
80}
81
82void __init smp_init_cpus(void)
83{
84 unsigned i;
85 unsigned int ncpus = get_core_count();
86 unsigned int core_id = get_core_id();
87
88 pr_info("%s: Core Count = %d\n", __func__, ncpus);
89 pr_info("%s: Core Id = %d\n", __func__, core_id);
90
91 for (i = 0; i < ncpus; ++i)
92 set_cpu_possible(i, true);
93}
94
95void __init smp_prepare_boot_cpu(void)
96{
97 unsigned int cpu = smp_processor_id();
98 BUG_ON(cpu != 0);
99 cpu_asid_cache(cpu) = ASID_USER_FIRST;
100}
101
102void __init smp_cpus_done(unsigned int max_cpus)
103{
104}
105
106static int boot_secondary_processors = 1; /* Set with xt-gdb via .xt-gdb */
107static DECLARE_COMPLETION(cpu_running);
108
109void __init secondary_start_kernel(void)
110{
111 struct mm_struct *mm = &init_mm;
112 unsigned int cpu = smp_processor_id();
113
114 init_mmu();
115
116#ifdef CONFIG_DEBUG_KERNEL
117 if (boot_secondary_processors == 0) {
118 pr_debug("%s: boot_secondary_processors:%d; Hanging cpu:%d\n",
119 __func__, boot_secondary_processors, cpu);
120 for (;;)
121 __asm__ __volatile__ ("waiti " __stringify(LOCKLEVEL));
122 }
123
124 pr_debug("%s: boot_secondary_processors:%d; Booting cpu:%d\n",
125 __func__, boot_secondary_processors, cpu);
126#endif
127 /* Init EXCSAVE1 */
128
129 secondary_trap_init();
130
131 /* All kernel threads share the same mm context. */
132
133 atomic_inc(&mm->mm_users);
134 atomic_inc(&mm->mm_count);
135 current->active_mm = mm;
136 cpumask_set_cpu(cpu, mm_cpumask(mm));
137 enter_lazy_tlb(mm, current);
138
139 preempt_disable();
140 trace_hardirqs_off();
141
142 calibrate_delay();
143
144 notify_cpu_starting(cpu);
145
146 secondary_init_irq();
147 local_timer_setup(cpu);
148
149 local_irq_enable();
150
151 set_cpu_online(cpu, true);
152 complete(&cpu_running);
153
154 cpu_startup_entry(CPUHP_ONLINE);
155}
156
157static void mx_cpu_start(void *p)
158{
159 unsigned cpu = (unsigned)p;
160 unsigned long run_stall_mask = get_er(MPSCORE);
161
162 set_er(run_stall_mask & ~(1u << cpu), MPSCORE);
163 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
164 __func__, cpu, run_stall_mask, get_er(MPSCORE));
165}
166
167static void mx_cpu_stop(void *p)
168{
169 unsigned cpu = (unsigned)p;
170 unsigned long run_stall_mask = get_er(MPSCORE);
171
172 set_er(run_stall_mask | (1u << cpu), MPSCORE);
173 pr_debug("%s: cpu: %d, run_stall_mask: %lx ---> %lx\n",
174 __func__, cpu, run_stall_mask, get_er(MPSCORE));
175}
176
177unsigned long cpu_start_ccount;
178
179static int boot_secondary(unsigned int cpu, struct task_struct *ts)
180{
181 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
182 unsigned long ccount;
183 int i;
184
185 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
186
187 for (i = 0; i < 2; ++i) {
188 do
189 ccount = get_ccount();
190 while (!ccount);
191
192 cpu_start_ccount = ccount;
193
194 while (time_before(jiffies, timeout)) {
195 mb();
196 if (!cpu_start_ccount)
197 break;
198 }
199
200 if (cpu_start_ccount) {
201 smp_call_function_single(0, mx_cpu_stop,
202 (void *)cpu, 1);
203 cpu_start_ccount = 0;
204 return -EIO;
205 }
206 }
207 return 0;
208}
209
210int __cpu_up(unsigned int cpu, struct task_struct *idle)
211{
212 int ret = 0;
213
214 if (cpu_asid_cache(cpu) == 0)
215 cpu_asid_cache(cpu) = ASID_USER_FIRST;
216
217 start_info.stack = (unsigned long)task_pt_regs(idle);
218 wmb();
219
220 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
221 __func__, cpu, idle, start_info.stack);
222
223 ret = boot_secondary(cpu, idle);
224 if (ret == 0) {
225 wait_for_completion_timeout(&cpu_running,
226 msecs_to_jiffies(1000));
227 if (!cpu_online(cpu))
228 ret = -EIO;
229 }
230
231 if (ret)
232 pr_err("CPU %u failed to boot\n", cpu);
233
234 return ret;
235}
236
237enum ipi_msg_type {
238 IPI_RESCHEDULE = 0,
239 IPI_CALL_FUNC,
240 IPI_CPU_STOP,
241 IPI_MAX
242};
243
244static const struct {
245 const char *short_text;
246 const char *long_text;
247} ipi_text[] = {
248 { .short_text = "RES", .long_text = "Rescheduling interrupts" },
249 { .short_text = "CAL", .long_text = "Function call interrupts" },
250 { .short_text = "DIE", .long_text = "CPU shutdown interrupts" },
251};
252
253struct ipi_data {
254 unsigned long ipi_count[IPI_MAX];
255};
256
257static DEFINE_PER_CPU(struct ipi_data, ipi_data);
258
259static void send_ipi_message(const struct cpumask *callmask,
260 enum ipi_msg_type msg_id)
261{
262 int index;
263 unsigned long mask = 0;
264
265 for_each_cpu(index, callmask)
266 if (index != smp_processor_id())
267 mask |= 1 << index;
268
269 set_er(mask, MIPISET(msg_id));
270}
271
272void arch_send_call_function_ipi_mask(const struct cpumask *mask)
273{
274 send_ipi_message(mask, IPI_CALL_FUNC);
275}
276
277void arch_send_call_function_single_ipi(int cpu)
278{
279 send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
280}
281
282void smp_send_reschedule(int cpu)
283{
284 send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE);
285}
286
287void smp_send_stop(void)
288{
289 struct cpumask targets;
290
291 cpumask_copy(&targets, cpu_online_mask);
292 cpumask_clear_cpu(smp_processor_id(), &targets);
293 send_ipi_message(&targets, IPI_CPU_STOP);
294}
295
296static void ipi_cpu_stop(unsigned int cpu)
297{
298 set_cpu_online(cpu, false);
299 machine_halt();
300}
301
302irqreturn_t ipi_interrupt(int irq, void *dev_id)
303{
304 unsigned int cpu = smp_processor_id();
305 struct ipi_data *ipi = &per_cpu(ipi_data, cpu);
306 unsigned int msg;
307 unsigned i;
308
309 msg = get_er(MIPICAUSE(cpu));
310 for (i = 0; i < IPI_MAX; i++)
311 if (msg & (1 << i)) {
312 set_er(1 << i, MIPICAUSE(cpu));
313 ++ipi->ipi_count[i];
314 }
315
316 if (msg & (1 << IPI_RESCHEDULE))
317 scheduler_ipi();
318 if (msg & (1 << IPI_CALL_FUNC))
319 generic_smp_call_function_interrupt();
320 if (msg & (1 << IPI_CPU_STOP))
321 ipi_cpu_stop(cpu);
322
323 return IRQ_HANDLED;
324}
325
326void show_ipi_list(struct seq_file *p, int prec)
327{
328 unsigned int cpu;
329 unsigned i;
330
331 for (i = 0; i < IPI_MAX; ++i) {
332 seq_printf(p, "%*s:", prec, ipi_text[i].short_text);
333 for_each_online_cpu(cpu)
334 seq_printf(p, " %10lu",
335 per_cpu(ipi_data, cpu).ipi_count[i]);
336 seq_printf(p, " %s\n", ipi_text[i].long_text);
337 }
338}
339
340int setup_profiling_timer(unsigned int multiplier)
341{
342 pr_debug("setup_profiling_timer %d\n", multiplier);
343 return 0;
344}
345
346/* TLB flush functions */
347
348struct flush_data {
349 struct vm_area_struct *vma;
350 unsigned long addr1;
351 unsigned long addr2;
352};
353
354static void ipi_flush_tlb_all(void *arg)
355{
356 local_flush_tlb_all();
357}
358
359void flush_tlb_all(void)
360{
361 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
362}
363
364static void ipi_flush_tlb_mm(void *arg)
365{
366 local_flush_tlb_mm(arg);
367}
368
369void flush_tlb_mm(struct mm_struct *mm)
370{
371 on_each_cpu(ipi_flush_tlb_mm, mm, 1);
372}
373
374static void ipi_flush_tlb_page(void *arg)
375{
376 struct flush_data *fd = arg;
377 local_flush_tlb_page(fd->vma, fd->addr1);
378}
379
380void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
381{
382 struct flush_data fd = {
383 .vma = vma,
384 .addr1 = addr,
385 };
386 on_each_cpu(ipi_flush_tlb_page, &fd, 1);
387}
388
389static void ipi_flush_tlb_range(void *arg)
390{
391 struct flush_data *fd = arg;
392 local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
393}
394
395void flush_tlb_range(struct vm_area_struct *vma,
396 unsigned long start, unsigned long end)
397{
398 struct flush_data fd = {
399 .vma = vma,
400 .addr1 = start,
401 .addr2 = end,
402 };
403 on_each_cpu(ipi_flush_tlb_range, &fd, 1);
404}
405
406/* Cache flush functions */
407
408static void ipi_flush_cache_all(void *arg)
409{
410 local_flush_cache_all();
411}
412
413void flush_cache_all(void)
414{
415 on_each_cpu(ipi_flush_cache_all, NULL, 1);
416}
417
418static void ipi_flush_cache_page(void *arg)
419{
420 struct flush_data *fd = arg;
421 local_flush_cache_page(fd->vma, fd->addr1, fd->addr2);
422}
423
424void flush_cache_page(struct vm_area_struct *vma,
425 unsigned long address, unsigned long pfn)
426{
427 struct flush_data fd = {
428 .vma = vma,
429 .addr1 = address,
430 .addr2 = pfn,
431 };
432 on_each_cpu(ipi_flush_cache_page, &fd, 1);
433}
434
435static void ipi_flush_cache_range(void *arg)
436{
437 struct flush_data *fd = arg;
438 local_flush_cache_range(fd->vma, fd->addr1, fd->addr2);
439}
440
441void flush_cache_range(struct vm_area_struct *vma,
442 unsigned long start, unsigned long end)
443{
444 struct flush_data fd = {
445 .vma = vma,
446 .addr1 = start,
447 .addr2 = end,
448 };
449 on_each_cpu(ipi_flush_cache_range, &fd, 1);
450}
451
452static void ipi_flush_icache_range(void *arg)
453{
454 struct flush_data *fd = arg;
455 local_flush_icache_range(fd->addr1, fd->addr2);
456}
457
458void flush_icache_range(unsigned long start, unsigned long end)
459{
460 struct flush_data fd = {
461 .addr1 = start,
462 .addr2 = end,
463 };
464 on_each_cpu(ipi_flush_icache_range, &fd, 1);
465}
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 3dbe8648df1f..3c0ff5746fe2 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -157,7 +157,7 @@ COPROCESSOR(7),
157 * 2. it is a temporary memory buffer for the exception handlers. 157 * 2. it is a temporary memory buffer for the exception handlers.
158 */ 158 */
159 159
160unsigned long exc_table[EXC_TABLE_SIZE/4]; 160DEFINE_PER_CPU(unsigned long, exc_table[EXC_TABLE_SIZE/4]);
161 161
162void die(const char*, struct pt_regs*, long); 162void die(const char*, struct pt_regs*, long);
163 163
@@ -313,17 +313,31 @@ do_debug(struct pt_regs *regs)
313} 313}
314 314
315 315
316static void set_handler(int idx, void *handler)
317{
318 unsigned int cpu;
319
320 for_each_possible_cpu(cpu)
321 per_cpu(exc_table, cpu)[idx] = (unsigned long)handler;
322}
323
316/* Set exception C handler - for temporary use when probing exceptions */ 324/* Set exception C handler - for temporary use when probing exceptions */
317 325
318void * __init trap_set_handler(int cause, void *handler) 326void * __init trap_set_handler(int cause, void *handler)
319{ 327{
320 unsigned long *entry = &exc_table[EXC_TABLE_DEFAULT / 4 + cause]; 328 void *previous = (void *)per_cpu(exc_table, 0)[
321 void *previous = (void *)*entry; 329 EXC_TABLE_DEFAULT / 4 + cause];
322 *entry = (unsigned long)handler; 330 set_handler(EXC_TABLE_DEFAULT / 4 + cause, handler);
323 return previous; 331 return previous;
324} 332}
325 333
326 334
335static void __init trap_init_excsave(void)
336{
337 unsigned long excsave1 = (unsigned long)this_cpu_ptr(exc_table);
338 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (excsave1));
339}
340
327/* 341/*
328 * Initialize dispatch tables. 342 * Initialize dispatch tables.
329 * 343 *
@@ -337,8 +351,6 @@ void * __init trap_set_handler(int cause, void *handler)
337 * See vectors.S for more details. 351 * See vectors.S for more details.
338 */ 352 */
339 353
340#define set_handler(idx,handler) (exc_table[idx] = (unsigned long) (handler))
341
342void __init trap_init(void) 354void __init trap_init(void)
343{ 355{
344 int i; 356 int i;
@@ -368,10 +380,15 @@ void __init trap_init(void)
368 } 380 }
369 381
370 /* Initialize EXCSAVE_1 to hold the address of the exception table. */ 382 /* Initialize EXCSAVE_1 to hold the address of the exception table. */
383 trap_init_excsave();
384}
371 385
372 i = (unsigned long)exc_table; 386#ifdef CONFIG_SMP
373 __asm__ __volatile__("wsr %0, excsave1\n" : : "a" (i)); 387void __init secondary_trap_init(void)
388{
389 trap_init_excsave();
374} 390}
391#endif
375 392
376/* 393/*
377 * This function dumps the current valid window frame and other base registers. 394 * This function dumps the current valid window frame and other base registers.
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 21acd11b5df2..ee32c0085dff 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -165,6 +165,13 @@ SECTIONS
165 .DoubleExceptionVector.text); 165 .DoubleExceptionVector.text);
166 RELOCATE_ENTRY(_DebugInterruptVector_text, 166 RELOCATE_ENTRY(_DebugInterruptVector_text,
167 .DebugInterruptVector.text); 167 .DebugInterruptVector.text);
168#if defined(CONFIG_SMP)
169 RELOCATE_ENTRY(_SecondaryResetVector_literal,
170 .SecondaryResetVector.literal);
171 RELOCATE_ENTRY(_SecondaryResetVector_text,
172 .SecondaryResetVector.text);
173#endif
174
168 175
169 __boot_reloc_table_end = ABSOLUTE(.) ; 176 __boot_reloc_table_end = ABSOLUTE(.) ;
170 177
@@ -272,6 +279,25 @@ SECTIONS
272 .DoubleExceptionVector.literal) 279 .DoubleExceptionVector.literal)
273 280
274 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; 281 . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
282
283#if defined(CONFIG_SMP)
284
285 SECTION_VECTOR (_SecondaryResetVector_literal,
286 .SecondaryResetVector.literal,
287 RESET_VECTOR1_VADDR - 4,
288 SIZEOF(.DoubleExceptionVector.text),
289 .DoubleExceptionVector.text)
290
291 SECTION_VECTOR (_SecondaryResetVector_text,
292 .SecondaryResetVector.text,
293 RESET_VECTOR1_VADDR,
294 4,
295 .SecondaryResetVector.literal)
296
297 . = LOADADDR(.SecondaryResetVector.text)+SIZEOF(.SecondaryResetVector.text);
298
299#endif
300
275 . = ALIGN(PAGE_SIZE); 301 . = ALIGN(PAGE_SIZE);
276 302
277 __init_end = .; 303 __init_end = .;
diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c
index 81edeab82d17..ba4c47f291b1 100644
--- a/arch/xtensa/mm/cache.c
+++ b/arch/xtensa/mm/cache.c
@@ -118,7 +118,7 @@ void flush_dcache_page(struct page *page)
118 * For now, flush the whole cache. FIXME?? 118 * For now, flush the whole cache. FIXME??
119 */ 119 */
120 120
121void flush_cache_range(struct vm_area_struct* vma, 121void local_flush_cache_range(struct vm_area_struct *vma,
122 unsigned long start, unsigned long end) 122 unsigned long start, unsigned long end)
123{ 123{
124 __flush_invalidate_dcache_all(); 124 __flush_invalidate_dcache_all();
@@ -132,7 +132,7 @@ void flush_cache_range(struct vm_area_struct* vma,
132 * alias versions of the cache flush functions. 132 * alias versions of the cache flush functions.
133 */ 133 */
134 134
135void flush_cache_page(struct vm_area_struct* vma, unsigned long address, 135void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
136 unsigned long pfn) 136 unsigned long pfn)
137{ 137{
138 /* Note that we have to use the 'alias' address to avoid multi-hit */ 138 /* Note that we have to use the 'alias' address to avoid multi-hit */
@@ -159,8 +159,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
159 159
160 /* Invalidate old entry in TLBs */ 160 /* Invalidate old entry in TLBs */
161 161
162 invalidate_itlb_mapping(addr); 162 flush_tlb_page(vma, addr);
163 invalidate_dtlb_mapping(addr);
164 163
165#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 164#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
166 165
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 70fa7bc42b4a..b57c4f91f487 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -21,7 +21,7 @@
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
23 23
24unsigned long asid_cache = ASID_USER_FIRST; 24DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
25void bad_page_fault(struct pt_regs*, unsigned long, int); 25void bad_page_fault(struct pt_regs*, unsigned long, int);
26 26
27#undef DEBUG_PAGE_FAULT 27#undef DEBUG_PAGE_FAULT
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index c43771c974be..5bb8e3c61d85 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -22,7 +22,7 @@ void __init paging_init(void)
22/* 22/*
23 * Flush the mmu and reset associated register to default values. 23 * Flush the mmu and reset associated register to default values.
24 */ 24 */
25void __init init_mmu(void) 25void init_mmu(void)
26{ 26{
27#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY) 27#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
28 /* 28 /*
@@ -37,7 +37,7 @@ void __init init_mmu(void)
37 set_itlbcfg_register(0); 37 set_itlbcfg_register(0);
38 set_dtlbcfg_register(0); 38 set_dtlbcfg_register(0);
39#endif 39#endif
40 flush_tlb_all(); 40 local_flush_tlb_all();
41 41
42 /* Set rasid register to a known value. */ 42 /* Set rasid register to a known value. */
43 43
diff --git a/arch/xtensa/mm/tlb.c b/arch/xtensa/mm/tlb.c
index ca9d2366bf12..ade623826788 100644
--- a/arch/xtensa/mm/tlb.c
+++ b/arch/xtensa/mm/tlb.c
@@ -48,7 +48,7 @@ static inline void __flush_dtlb_all (void)
48} 48}
49 49
50 50
51void flush_tlb_all (void) 51void local_flush_tlb_all(void)
52{ 52{
53 __flush_itlb_all(); 53 __flush_itlb_all();
54 __flush_dtlb_all(); 54 __flush_dtlb_all();
@@ -60,19 +60,23 @@ void flush_tlb_all (void)
60 * a new context will be assigned to it. 60 * a new context will be assigned to it.
61 */ 61 */
62 62
63void flush_tlb_mm(struct mm_struct *mm) 63void local_flush_tlb_mm(struct mm_struct *mm)
64{ 64{
65 int cpu = smp_processor_id();
66
65 if (mm == current->active_mm) { 67 if (mm == current->active_mm) {
66 unsigned long flags; 68 unsigned long flags;
67 local_irq_save(flags); 69 local_irq_save(flags);
68 __get_new_mmu_context(mm); 70 mm->context.asid[cpu] = NO_CONTEXT;
69 __load_mmu_context(mm); 71 activate_context(mm, cpu);
70 local_irq_restore(flags); 72 local_irq_restore(flags);
73 } else {
74 mm->context.asid[cpu] = NO_CONTEXT;
75 mm->context.cpu = -1;
71 } 76 }
72 else
73 mm->context = 0;
74} 77}
75 78
79
76#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2) 80#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
77#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2) 81#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
78#if _ITLB_ENTRIES > _DTLB_ENTRIES 82#if _ITLB_ENTRIES > _DTLB_ENTRIES
@@ -81,24 +85,26 @@ void flush_tlb_mm(struct mm_struct *mm)
81# define _TLB_ENTRIES _DTLB_ENTRIES 85# define _TLB_ENTRIES _DTLB_ENTRIES
82#endif 86#endif
83 87
84void flush_tlb_range (struct vm_area_struct *vma, 88void local_flush_tlb_range(struct vm_area_struct *vma,
85 unsigned long start, unsigned long end) 89 unsigned long start, unsigned long end)
86{ 90{
91 int cpu = smp_processor_id();
87 struct mm_struct *mm = vma->vm_mm; 92 struct mm_struct *mm = vma->vm_mm;
88 unsigned long flags; 93 unsigned long flags;
89 94
90 if (mm->context == NO_CONTEXT) 95 if (mm->context.asid[cpu] == NO_CONTEXT)
91 return; 96 return;
92 97
93#if 0 98#if 0
94 printk("[tlbrange<%02lx,%08lx,%08lx>]\n", 99 printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
95 (unsigned long)mm->context, start, end); 100 (unsigned long)mm->context.asid[cpu], start, end);
96#endif 101#endif
97 local_irq_save(flags); 102 local_irq_save(flags);
98 103
99 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) { 104 if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
100 int oldpid = get_rasid_register(); 105 int oldpid = get_rasid_register();
101 set_rasid_register (ASID_INSERT(mm->context)); 106
107 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
102 start &= PAGE_MASK; 108 start &= PAGE_MASK;
103 if (vma->vm_flags & VM_EXEC) 109 if (vma->vm_flags & VM_EXEC)
104 while(start < end) { 110 while(start < end) {
@@ -114,24 +120,25 @@ void flush_tlb_range (struct vm_area_struct *vma,
114 120
115 set_rasid_register(oldpid); 121 set_rasid_register(oldpid);
116 } else { 122 } else {
117 flush_tlb_mm(mm); 123 local_flush_tlb_mm(mm);
118 } 124 }
119 local_irq_restore(flags); 125 local_irq_restore(flags);
120} 126}
121 127
122void flush_tlb_page (struct vm_area_struct *vma, unsigned long page) 128void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
123{ 129{
130 int cpu = smp_processor_id();
124 struct mm_struct* mm = vma->vm_mm; 131 struct mm_struct* mm = vma->vm_mm;
125 unsigned long flags; 132 unsigned long flags;
126 int oldpid; 133 int oldpid;
127 134
128 if(mm->context == NO_CONTEXT) 135 if (mm->context.asid[cpu] == NO_CONTEXT)
129 return; 136 return;
130 137
131 local_irq_save(flags); 138 local_irq_save(flags);
132 139
133 oldpid = get_rasid_register(); 140 oldpid = get_rasid_register();
134 set_rasid_register(ASID_INSERT(mm->context)); 141 set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
135 142
136 if (vma->vm_flags & VM_EXEC) 143 if (vma->vm_flags & VM_EXEC)
137 invalidate_itlb_mapping(page); 144 invalidate_itlb_mapping(page);
diff --git a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
index 4b43ff133be0..aeb316b7ff88 100644
--- a/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
+++ b/arch/xtensa/platforms/xtfpga/include/platform/hardware.h
@@ -26,7 +26,7 @@
26 26
27/* Default assignment of LX60 devices to external interrupts. */ 27/* Default assignment of LX60 devices to external interrupts. */
28 28
29#ifdef CONFIG_ARCH_HAS_SMP 29#ifdef CONFIG_XTENSA_MX
30#define DUART16552_INTNUM XCHAL_EXTINT3_NUM 30#define DUART16552_INTNUM XCHAL_EXTINT3_NUM
31#define OETH_IRQ XCHAL_EXTINT4_NUM 31#define OETH_IRQ XCHAL_EXTINT4_NUM
32#else 32#else