aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:27:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-14 17:27:45 -0500
commitcebfa85eb86d92bf85d3b041c6b044184517a988 (patch)
treebe0a374556fe335ce96dfdb296c89537750d5868 /arch/mips/netlogic
parentd42b3a2906a10b732ea7d7f849d49be79d242ef0 (diff)
parent241738bd51cb0efe58e6c570223153e970afe3ae (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS updates from Ralf Baechle: "The MIPS bits for 3.8. This also includes a bunch fixes that were sitting in the linux-mips.org git tree for a long time. This pull request contains updates to several OCTEON drivers and the board support code for BCM47XX, BCM63XX, XLP, XLR, XLS, lantiq, Loongson1B, updates to the SSB bus support, MIPS kexec code and adds support for kdump. When pulling this, there are two expected merge conflicts in include/linux/bcma/bcma_driver_chipcommon.h which are trivial to resolve, just remove the conflict markers and keep both alternatives." * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (90 commits) MIPS: PMC-Sierra Yosemite: Remove support. VIDEO: Newport Fix console crashes MIPS: wrppmc: Fix build of PCI code. MIPS: IP22/IP28: Fix build of EISA code. MIPS: RB532: Fix build of prom code. MIPS: PowerTV: Fix build. MIPS: IP27: Correct fucked grammar in ops-bridge.c MIPS: Highmem: Fix build error if CONFIG_DEBUG_HIGHMEM is disabled MIPS: Fix potencial corruption MIPS: Fix for warning from FPU emulation code MIPS: Handle COP3 Unusable exception as COP1X for FP emulation MIPS: Fix poweroff failure when HOTPLUG_CPU configured. MIPS: MT: Fix build with CONFIG_UIDGID_STRICT_TYPE_CHECKS=y MIPS: Remove unused smvp.h MIPS/EDAC: Improve OCTEON EDAC support. MIPS: OCTEON: Add definitions for OCTEON memory contoller registers. MIPS: OCTEON: Add OCTEON family definitions to octeon-model.h ata: pata_octeon_cf: Use correct byte order for DMA in when built little-endian. MIPS/OCTEON/ata: Convert pata_octeon_cf.c to use device tree. MIPS: Remove usage of CEVT_R4K_LIB config option. ...
Diffstat (limited to 'arch/mips/netlogic')
-rw-r--r--arch/mips/netlogic/Kconfig28
-rw-r--r--arch/mips/netlogic/common/irq.c165
-rw-r--r--arch/mips/netlogic/common/smp.c89
-rw-r--r--arch/mips/netlogic/common/smpboot.S6
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c67
-rw-r--r--arch/mips/netlogic/xlp/setup.c50
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c83
-rw-r--r--arch/mips/netlogic/xlr/Makefile4
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c290
-rw-r--r--arch/mips/netlogic/xlr/fmn.c204
-rw-r--r--arch/mips/netlogic/xlr/setup.c37
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c23
12 files changed, 830 insertions, 216 deletions
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
index 8059eb76f8eb..3c05bf9e280a 100644
--- a/arch/mips/netlogic/Kconfig
+++ b/arch/mips/netlogic/Kconfig
@@ -9,6 +9,34 @@ config DT_XLP_EVP
9 This DTB will be used if the firmware does not pass in a DTB 9 This DTB will be used if the firmware does not pass in a DTB
10 pointer to the kernel. The corresponding DTS file is at 10 pointer to the kernel. The corresponding DTS file is at
11 arch/mips/netlogic/dts/xlp_evp.dts 11 arch/mips/netlogic/dts/xlp_evp.dts
12
13config NLM_MULTINODE
14 bool "Support for multi-chip boards"
15 depends on NLM_XLP_BOARD
16 default n
17 help
18 Add support for boards with 2 or 4 XLPs connected over ICI.
19
20if NLM_MULTINODE
21choice
22 prompt "Number of XLPs on the board"
23 default NLM_MULTINODE_2
24 help
25 In the multi-node case, specify the number of SoCs on the board.
26
27config NLM_MULTINODE_2
28 bool "Dual-XLP board"
29 help
30 Support boards with upto two XLPs connected over ICI.
31
32config NLM_MULTINODE_4
33 bool "Quad-XLP board"
34 help
35 Support boards with upto four XLPs connected over ICI.
36
37endchoice
38
39endif
12endif 40endif
13 41
14config NLM_COMMON 42config NLM_COMMON
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index e52bfcbce093..00dcc7a2bc5a 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -36,7 +36,6 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/interrupt.h> 38#include <linux/interrupt.h>
39#include <linux/spinlock.h>
40#include <linux/mm.h> 39#include <linux/mm.h>
41#include <linux/slab.h> 40#include <linux/slab.h>
42#include <linux/irq.h> 41#include <linux/irq.h>
@@ -59,68 +58,70 @@
59#elif defined(CONFIG_CPU_XLR) 58#elif defined(CONFIG_CPU_XLR)
60#include <asm/netlogic/xlr/iomap.h> 59#include <asm/netlogic/xlr/iomap.h>
61#include <asm/netlogic/xlr/pic.h> 60#include <asm/netlogic/xlr/pic.h>
61#include <asm/netlogic/xlr/fmn.h>
62#else 62#else
63#error "Unknown CPU" 63#error "Unknown CPU"
64#endif 64#endif
65/*
66 * These are the routines that handle all the low level interrupt stuff.
67 * Actions handled here are: initialization of the interrupt map, requesting of
68 * interrupt lines by handlers, dispatching if interrupts to handlers, probing
69 * for interrupt lines
70 */
71 65
72/* Globals */ 66#ifdef CONFIG_SMP
73static uint64_t nlm_irq_mask; 67#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
74static DEFINE_SPINLOCK(nlm_pic_lock); 68 (1ULL << IRQ_IPI_SMP_RESCHEDULE))
69#else
70#define SMP_IRQ_MASK 0
71#endif
72#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
73 (1ull << IRQ_FMN))
74
75struct nlm_pic_irq {
76 void (*extra_ack)(struct irq_data *);
77 struct nlm_soc_info *node;
78 int picirq;
79 int irt;
80 int flags;
81};
75 82
76static void xlp_pic_enable(struct irq_data *d) 83static void xlp_pic_enable(struct irq_data *d)
77{ 84{
78 unsigned long flags; 85 unsigned long flags;
79 int irt; 86 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
80 87
81 irt = nlm_irq_to_irt(d->irq); 88 BUG_ON(!pd);
82 if (irt == -1) 89 spin_lock_irqsave(&pd->node->piclock, flags);
83 return; 90 nlm_pic_enable_irt(pd->node->picbase, pd->irt);
84 spin_lock_irqsave(&nlm_pic_lock, flags); 91 spin_unlock_irqrestore(&pd->node->piclock, flags);
85 nlm_pic_enable_irt(nlm_pic_base, irt);
86 spin_unlock_irqrestore(&nlm_pic_lock, flags);
87} 92}
88 93
89static void xlp_pic_disable(struct irq_data *d) 94static void xlp_pic_disable(struct irq_data *d)
90{ 95{
96 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
91 unsigned long flags; 97 unsigned long flags;
92 int irt;
93 98
94 irt = nlm_irq_to_irt(d->irq); 99 BUG_ON(!pd);
95 if (irt == -1) 100 spin_lock_irqsave(&pd->node->piclock, flags);
96 return; 101 nlm_pic_disable_irt(pd->node->picbase, pd->irt);
97 spin_lock_irqsave(&nlm_pic_lock, flags); 102 spin_unlock_irqrestore(&pd->node->piclock, flags);
98 nlm_pic_disable_irt(nlm_pic_base, irt);
99 spin_unlock_irqrestore(&nlm_pic_lock, flags);
100} 103}
101 104
102static void xlp_pic_mask_ack(struct irq_data *d) 105static void xlp_pic_mask_ack(struct irq_data *d)
103{ 106{
104 uint64_t mask = 1ull << d->irq; 107 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
108 uint64_t mask = 1ull << pd->picirq;
105 109
106 write_c0_eirr(mask); /* ack by writing EIRR */ 110 write_c0_eirr(mask); /* ack by writing EIRR */
107} 111}
108 112
109static void xlp_pic_unmask(struct irq_data *d) 113static void xlp_pic_unmask(struct irq_data *d)
110{ 114{
111 void *hd = irq_data_get_irq_handler_data(d); 115 struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
112 int irt;
113 116
114 irt = nlm_irq_to_irt(d->irq); 117 if (!pd)
115 if (irt == -1)
116 return; 118 return;
117 119
118 if (hd) { 120 if (pd->extra_ack)
119 void (*extra_ack)(void *) = hd; 121 pd->extra_ack(d);
120 extra_ack(d); 122
121 }
122 /* Ack is a single write, no need to lock */ 123 /* Ack is a single write, no need to lock */
123 nlm_pic_ack(nlm_pic_base, irt); 124 nlm_pic_ack(pd->node->picbase, pd->irt);
124} 125}
125 126
126static struct irq_chip xlp_pic = { 127static struct irq_chip xlp_pic = {
@@ -174,64 +175,108 @@ struct irq_chip nlm_cpu_intr = {
174 .irq_eoi = cpuintr_ack, 175 .irq_eoi = cpuintr_ack,
175}; 176};
176 177
177void __init init_nlm_common_irqs(void) 178static void __init nlm_init_percpu_irqs(void)
178{ 179{
179 int i, irq, irt; 180 int i;
180 181
181 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) 182 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
182 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); 183 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
183
184 for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++)
185 irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq);
186
187#ifdef CONFIG_SMP 184#ifdef CONFIG_SMP
188 irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr, 185 irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
189 nlm_smp_function_ipi_handler); 186 nlm_smp_function_ipi_handler);
190 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, 187 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
191 nlm_smp_resched_ipi_handler); 188 nlm_smp_resched_ipi_handler);
192 nlm_irq_mask |=
193 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
194#endif 189#endif
190}
191
192void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
193{
194 struct nlm_pic_irq *pic_data;
195 int xirq;
196
197 xirq = nlm_irq_to_xirq(node, irq);
198 pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
199 BUG_ON(pic_data == NULL);
200 pic_data->irt = irt;
201 pic_data->picirq = picirq;
202 pic_data->node = nlm_get_node(node);
203 irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
204 irq_set_handler_data(xirq, pic_data);
205}
206
207void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
208{
209 struct nlm_pic_irq *pic_data;
210 int xirq;
211
212 xirq = nlm_irq_to_xirq(node, irq);
213 pic_data = irq_get_handler_data(xirq);
214 pic_data->extra_ack = xack;
215}
195 216
196 for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) { 217static void nlm_init_node_irqs(int node)
197 irt = nlm_irq_to_irt(irq); 218{
219 int i, irt;
220 uint64_t irqmask;
221 struct nlm_soc_info *nodep;
222
223 pr_info("Init IRQ for node %d\n", node);
224 nodep = nlm_get_node(node);
225 irqmask = PERCPU_IRQ_MASK;
226 for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
227 irt = nlm_irq_to_irt(i);
198 if (irt == -1) 228 if (irt == -1)
199 continue; 229 continue;
200 nlm_irq_mask |= (1ULL << irq); 230 nlm_setup_pic_irq(node, i, i, irt);
201 nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); 231 /* set interrupts to first cpu in node */
232 nlm_pic_init_irt(nodep->picbase, irt, i,
233 node * NLM_CPUS_PER_NODE);
234 irqmask |= (1ull << i);
202 } 235 }
203 236 nodep->irqmask = irqmask;
204 nlm_irq_mask |= (1ULL << IRQ_TIMER);
205} 237}
206 238
207void __init arch_init_irq(void) 239void __init arch_init_irq(void)
208{ 240{
209 /* Initialize the irq descriptors */ 241 /* Initialize the irq descriptors */
210 init_nlm_common_irqs(); 242 nlm_init_percpu_irqs();
211 243 nlm_init_node_irqs(0);
212 write_c0_eimr(nlm_irq_mask); 244 write_c0_eimr(nlm_current_node()->irqmask);
245#if defined(CONFIG_CPU_XLR)
246 nlm_setup_fmn_irq();
247#endif
213} 248}
214 249
215void __cpuinit nlm_smp_irq_init(void) 250void nlm_smp_irq_init(int hwcpuid)
216{ 251{
217 /* set interrupt mask for non-zero cpus */ 252 int node, cpu;
218 write_c0_eimr(nlm_irq_mask); 253
254 node = hwcpuid / NLM_CPUS_PER_NODE;
255 cpu = hwcpuid % NLM_CPUS_PER_NODE;
256
257 if (cpu == 0 && node != 0)
258 nlm_init_node_irqs(node);
259 write_c0_eimr(nlm_current_node()->irqmask);
219} 260}
220 261
221asmlinkage void plat_irq_dispatch(void) 262asmlinkage void plat_irq_dispatch(void)
222{ 263{
223 uint64_t eirr; 264 uint64_t eirr;
224 int i; 265 int i, node;
225 266
267 node = nlm_nodeid();
226 eirr = read_c0_eirr() & read_c0_eimr(); 268 eirr = read_c0_eirr() & read_c0_eimr();
227 if (eirr & (1 << IRQ_TIMER)) {
228 do_IRQ(IRQ_TIMER);
229 return;
230 }
231 269
232 i = __ilog2_u64(eirr); 270 i = __ilog2_u64(eirr);
233 if (i == -1) 271 if (i == -1)
234 return; 272 return;
235 273
236 do_IRQ(i); 274 /* per-CPU IRQs don't need translation */
275 if (eirr & PERCPU_IRQ_MASK) {
276 do_IRQ(i);
277 return;
278 }
279
280 /* top level irq handling */
281 do_IRQ(nlm_irq_to_xirq(node, i));
237} 282}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index fab316de57e9..a080d9ee3cd7 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
59 59
60void nlm_send_ipi_single(int logical_cpu, unsigned int action) 60void nlm_send_ipi_single(int logical_cpu, unsigned int action)
61{ 61{
62 int cpu = cpu_logical_map(logical_cpu); 62 int cpu, node;
63 uint64_t picbase;
64
65 cpu = cpu_logical_map(logical_cpu);
66 node = cpu / NLM_CPUS_PER_NODE;
67 picbase = nlm_get_node(node)->picbase;
63 68
64 if (action & SMP_CALL_FUNCTION) 69 if (action & SMP_CALL_FUNCTION)
65 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
66 if (action & SMP_RESCHEDULE_YOURSELF) 71 if (action & SMP_RESCHEDULE_YOURSELF)
67 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
68} 73}
69 74
70void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) 75void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
96void nlm_early_init_secondary(int cpu) 101void nlm_early_init_secondary(int cpu)
97{ 102{
98 change_c0_config(CONF_CM_CMASK, 0x3); 103 change_c0_config(CONF_CM_CMASK, 0x3);
99 write_c0_ebase((uint32_t)nlm_common_ebase);
100#ifdef CONFIG_CPU_XLP 104#ifdef CONFIG_CPU_XLP
101 if (hard_smp_processor_id() % 4 == 0) 105 /* mmu init, once per core */
106 if (cpu % NLM_THREADS_PER_CORE == 0)
102 xlp_mmu_init(); 107 xlp_mmu_init();
103#endif 108#endif
109 write_c0_ebase(nlm_current_node()->ebase);
104} 110}
105 111
106/* 112/*
@@ -108,8 +114,12 @@ void nlm_early_init_secondary(int cpu)
108 */ 114 */
109static void __cpuinit nlm_init_secondary(void) 115static void __cpuinit nlm_init_secondary(void)
110{ 116{
111 current_cpu_data.core = hard_smp_processor_id() / 4; 117 int hwtid;
112 nlm_smp_irq_init(); 118
119 hwtid = hard_smp_processor_id();
120 current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
121 nlm_percpu_init(hwtid);
122 nlm_smp_irq_init(hwtid);
113} 123}
114 124
115void nlm_prepare_cpus(unsigned int max_cpus) 125void nlm_prepare_cpus(unsigned int max_cpus)
@@ -120,9 +130,6 @@ void nlm_prepare_cpus(unsigned int max_cpus)
120 130
121void nlm_smp_finish(void) 131void nlm_smp_finish(void)
122{ 132{
123#ifdef notyet
124 nlm_common_msgring_cpu_init();
125#endif
126 local_irq_enable(); 133 local_irq_enable();
127} 134}
128 135
@@ -142,27 +149,27 @@ cpumask_t phys_cpu_present_map;
142 149
143void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 150void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
144{ 151{
145 unsigned long gp = (unsigned long)task_thread_info(idle); 152 int cpu, node;
146 unsigned long sp = (unsigned long)__KSTK_TOS(idle);
147 int cpu = cpu_logical_map(logical_cpu);
148 153
149 nlm_next_sp = sp; 154 cpu = cpu_logical_map(logical_cpu);
150 nlm_next_gp = gp; 155 node = cpu / NLM_CPUS_PER_NODE;
156 nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
157 nlm_next_gp = (unsigned long)task_thread_info(idle);
151 158
152 /* barrier */ 159 /* barrier for sp/gp store above */
153 __sync(); 160 __sync();
154 nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 161 nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
155} 162}
156 163
157void __init nlm_smp_setup(void) 164void __init nlm_smp_setup(void)
158{ 165{
159 unsigned int boot_cpu; 166 unsigned int boot_cpu;
160 int num_cpus, i; 167 int num_cpus, i, ncore;
161 168
162 boot_cpu = hard_smp_processor_id(); 169 boot_cpu = hard_smp_processor_id();
163 cpus_clear(phys_cpu_present_map); 170 cpumask_clear(&phys_cpu_present_map);
164 171
165 cpu_set(boot_cpu, phys_cpu_present_map); 172 cpumask_set_cpu(boot_cpu, &phys_cpu_present_map);
166 __cpu_number_map[boot_cpu] = 0; 173 __cpu_number_map[boot_cpu] = 0;
167 __cpu_logical_map[0] = boot_cpu; 174 __cpu_logical_map[0] = boot_cpu;
168 set_cpu_possible(0, true); 175 set_cpu_possible(0, true);
@@ -174,7 +181,7 @@ void __init nlm_smp_setup(void)
174 * it is only set for ASPs (see smpboot.S) 181 * it is only set for ASPs (see smpboot.S)
175 */ 182 */
176 if (nlm_cpu_ready[i]) { 183 if (nlm_cpu_ready[i]) {
177 cpu_set(i, phys_cpu_present_map); 184 cpumask_set_cpu(i, &phys_cpu_present_map);
178 __cpu_number_map[i] = num_cpus; 185 __cpu_number_map[i] = num_cpus;
179 __cpu_logical_map[num_cpus] = i; 186 __cpu_logical_map[num_cpus] = i;
180 set_cpu_possible(num_cpus, true); 187 set_cpu_possible(num_cpus, true);
@@ -182,20 +189,28 @@ void __init nlm_smp_setup(void)
182 } 189 }
183 } 190 }
184 191
192 /* check with the cores we have worken up */
193 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
194 ncore += hweight32(nlm_get_node(i)->coremask);
195
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 196 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)phys_cpu_present_map.bits[0], 197 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 198 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 199
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 200 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
201 nlm_threads_per_core, num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 202 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
191} 203}
192 204
193static int nlm_parse_cpumask(u32 cpu_mask) 205static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
194{ 206{
195 uint32_t core0_thr_mask, core_thr_mask; 207 uint32_t core0_thr_mask, core_thr_mask;
196 int threadmode, i; 208 int threadmode, i, j;
197 209
198 core0_thr_mask = cpu_mask & 0xf; 210 core0_thr_mask = 0;
211 for (i = 0; i < NLM_THREADS_PER_CORE; i++)
212 if (cpumask_test_cpu(i, wakeup_mask))
213 core0_thr_mask |= (1 << i);
199 switch (core0_thr_mask) { 214 switch (core0_thr_mask) {
200 case 1: 215 case 1:
201 nlm_threads_per_core = 1; 216 nlm_threads_per_core = 1;
@@ -214,25 +229,23 @@ static int nlm_parse_cpumask(u32 cpu_mask)
214 } 229 }
215 230
216 /* Verify other cores CPU masks */ 231 /* Verify other cores CPU masks */
217 nlm_coremask = 1; 232 for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
218 nlm_cpumask = core0_thr_mask; 233 core_thr_mask = 0;
219 for (i = 1; i < 8; i++) { 234 for (j = 0; j < NLM_THREADS_PER_CORE; j++)
220 core_thr_mask = (cpu_mask >> (i * 4)) & 0xf; 235 if (cpumask_test_cpu(i + j, wakeup_mask))
221 if (core_thr_mask) { 236 core_thr_mask |= (1 << j);
222 if (core_thr_mask != core0_thr_mask) 237 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
223 goto unsupp; 238 goto unsupp;
224 nlm_coremask |= 1 << i;
225 nlm_cpumask |= core0_thr_mask << (4 * i);
226 }
227 } 239 }
228 return threadmode; 240 return threadmode;
229 241
230unsupp: 242unsupp:
231 panic("Unsupported CPU mask %x\n", cpu_mask); 243 panic("Unsupported CPU mask %lx\n",
244 (unsigned long)cpumask_bits(wakeup_mask)[0]);
232 return 0; 245 return 0;
233} 246}
234 247
235int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask) 248int __cpuinit nlm_wakeup_secondary_cpus(void)
236{ 249{
237 unsigned long reset_vec; 250 unsigned long reset_vec;
238 char *reset_data; 251 char *reset_data;
@@ -244,7 +257,7 @@ int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
244 (nlm_reset_entry_end - nlm_reset_entry)); 257 (nlm_reset_entry_end - nlm_reset_entry));
245 258
246 /* verify the mask and setup core config variables */ 259 /* verify the mask and setup core config variables */
247 threadmode = nlm_parse_cpumask(wakeup_mask); 260 threadmode = nlm_parse_cpumask(&nlm_cpumask);
248 261
249 /* Setup CPU init parameters */ 262 /* Setup CPU init parameters */
250 reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS); 263 reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index a13355cc97eb..a0b74874bebe 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -61,7 +61,7 @@
61 li t0, LSU_DEFEATURE 61 li t0, LSU_DEFEATURE
62 mfcr t1, t0 62 mfcr t1, t0
63 63
64 lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */ 64 lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
65 or t1, t1, t2 65 or t1, t1, t2
66#ifdef XLP_AX_WORKAROUND 66#ifdef XLP_AX_WORKAROUND
67 li t2, ~0xe /* S1RCM */ 67 li t2, ~0xe /* S1RCM */
@@ -186,7 +186,7 @@ EXPORT(nlm_boot_siblings)
186 * jump to the secondary wait function. 186 * jump to the secondary wait function.
187 */ 187 */
188 mfc0 v0, CP0_EBASE, 1 188 mfc0 v0, CP0_EBASE, 1
189 andi v0, 0x7f /* v0 <- node/core */ 189 andi v0, 0x3ff /* v0 <- node/core */
190 190
191 /* Init MMU in the first thread after changing THREAD_MODE 191 /* Init MMU in the first thread after changing THREAD_MODE
192 * register (Ax Errata?) 192 * register (Ax Errata?)
@@ -263,6 +263,8 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
263 PTR_L gp, 0(t1) 263 PTR_L gp, 0(t1)
264 264
265 /* a0 has the processor id */ 265 /* a0 has the processor id */
266 mfc0 a0, CP0_EBASE, 1
267 andi a0, 0x3ff /* a0 <- node/core */
266 PTR_LA t0, nlm_early_init_secondary 268 PTR_LA t0, nlm_early_init_secondary
267 jalr t0 269 jalr t0
268 nop 270 nop
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 6c65ac701912..529e74742d9f 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -40,23 +40,23 @@
40#include <asm/mipsregs.h> 40#include <asm/mipsregs.h>
41#include <asm/time.h> 41#include <asm/time.h>
42 42
43#include <asm/netlogic/common.h>
43#include <asm/netlogic/haldefs.h> 44#include <asm/netlogic/haldefs.h>
44#include <asm/netlogic/xlp-hal/iomap.h> 45#include <asm/netlogic/xlp-hal/iomap.h>
45#include <asm/netlogic/xlp-hal/xlp.h> 46#include <asm/netlogic/xlp-hal/xlp.h>
46#include <asm/netlogic/xlp-hal/pic.h> 47#include <asm/netlogic/xlp-hal/pic.h>
47#include <asm/netlogic/xlp-hal/sys.h> 48#include <asm/netlogic/xlp-hal/sys.h>
48 49
49/* These addresses are computed by the nlm_hal_init() */
50uint64_t nlm_io_base;
51uint64_t nlm_sys_base;
52uint64_t nlm_pic_base;
53
54/* Main initialization */ 50/* Main initialization */
55void nlm_hal_init(void) 51void nlm_node_init(int node)
56{ 52{
57 nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 53 struct nlm_soc_info *nodep;
58 nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */ 54
59 nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */ 55 nodep = nlm_get_node(node);
56 nodep->sysbase = nlm_get_sys_regbase(node);
57 nodep->picbase = nlm_get_pic_regbase(node);
58 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
59 spin_lock_init(&nodep->piclock);
60} 60}
61 61
62int nlm_irq_to_irt(int irq) 62int nlm_irq_to_irt(int irq)
@@ -100,52 +100,15 @@ int nlm_irq_to_irt(int irq)
100 } 100 }
101} 101}
102 102
103int nlm_irt_to_irq(int irt) 103unsigned int nlm_get_core_frequency(int node, int core)
104{
105 switch (irt) {
106 case PIC_IRT_UART_0_INDEX:
107 return PIC_UART_0_IRQ;
108 case PIC_IRT_UART_1_INDEX:
109 return PIC_UART_1_IRQ;
110 case PIC_IRT_PCIE_LINK_0_INDEX:
111 return PIC_PCIE_LINK_0_IRQ;
112 case PIC_IRT_PCIE_LINK_1_INDEX:
113 return PIC_PCIE_LINK_1_IRQ;
114 case PIC_IRT_PCIE_LINK_2_INDEX:
115 return PIC_PCIE_LINK_2_IRQ;
116 case PIC_IRT_PCIE_LINK_3_INDEX:
117 return PIC_PCIE_LINK_3_IRQ;
118 case PIC_IRT_EHCI_0_INDEX:
119 return PIC_EHCI_0_IRQ;
120 case PIC_IRT_EHCI_1_INDEX:
121 return PIC_EHCI_1_IRQ;
122 case PIC_IRT_OHCI_0_INDEX:
123 return PIC_OHCI_0_IRQ;
124 case PIC_IRT_OHCI_1_INDEX:
125 return PIC_OHCI_1_IRQ;
126 case PIC_IRT_OHCI_2_INDEX:
127 return PIC_OHCI_2_IRQ;
128 case PIC_IRT_OHCI_3_INDEX:
129 return PIC_OHCI_3_IRQ;
130 case PIC_IRT_MMC_INDEX:
131 return PIC_MMC_IRQ;
132 case PIC_IRT_I2C_0_INDEX:
133 return PIC_I2C_0_IRQ;
134 case PIC_IRT_I2C_1_INDEX:
135 return PIC_I2C_1_IRQ;
136 default:
137 return -1;
138 }
139}
140
141unsigned int nlm_get_core_frequency(int core)
142{ 104{
143 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 105 unsigned int pll_divf, pll_divr, dfs_div, ext_div;
144 unsigned int rstval, dfsval, denom; 106 unsigned int rstval, dfsval, denom;
145 uint64_t num; 107 uint64_t num, sysbase;
146 108
147 rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG); 109 sysbase = nlm_get_node(node)->sysbase;
148 dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE); 110 rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
111 dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
149 pll_divf = ((rstval >> 10) & 0x7f) + 1; 112 pll_divf = ((rstval >> 10) & 0x7f) + 1;
150 pll_divr = ((rstval >> 8) & 0x3) + 1; 113 pll_divr = ((rstval >> 8) & 0x3) + 1;
151 ext_div = ((rstval >> 30) & 0x3) + 1; 114 ext_div = ((rstval >> 30) & 0x3) + 1;
@@ -159,5 +122,5 @@ unsigned int nlm_get_core_frequency(int core)
159 122
160unsigned int nlm_get_cpu_frequency(void) 123unsigned int nlm_get_cpu_frequency(void)
161{ 124{
162 return nlm_get_core_frequency(0); 125 return nlm_get_core_frequency(0, 0);
163} 126}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index d8997098defd..4894d62043ac 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -52,26 +52,40 @@
52#include <asm/netlogic/xlp-hal/xlp.h> 52#include <asm/netlogic/xlp-hal/xlp.h>
53#include <asm/netlogic/xlp-hal/sys.h> 53#include <asm/netlogic/xlp-hal/sys.h>
54 54
55unsigned long nlm_common_ebase = 0x0; 55uint64_t nlm_io_base;
56 56struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
57/* default to uniprocessor */ 57cpumask_t nlm_cpumask = CPU_MASK_CPU0;
58uint32_t nlm_coremask = 1, nlm_cpumask = 1; 58unsigned int nlm_threads_per_core;
59int nlm_threads_per_core = 1;
60extern u32 __dtb_start[]; 59extern u32 __dtb_start[];
61 60
62static void nlm_linux_exit(void) 61static void nlm_linux_exit(void)
63{ 62{
64 nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1); 63 uint64_t sysbase = nlm_get_node(0)->sysbase;
64
65 nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
65 for ( ; ; ) 66 for ( ; ; )
66 cpu_wait(); 67 cpu_wait();
67} 68}
68 69
69void __init plat_mem_setup(void) 70void __init plat_mem_setup(void)
70{ 71{
72 void *fdtp;
73
71 panic_timeout = 5; 74 panic_timeout = 5;
72 _machine_restart = (void (*)(char *))nlm_linux_exit; 75 _machine_restart = (void (*)(char *))nlm_linux_exit;
73 _machine_halt = nlm_linux_exit; 76 _machine_halt = nlm_linux_exit;
74 pm_power_off = nlm_linux_exit; 77 pm_power_off = nlm_linux_exit;
78
79 /*
80 * If no FDT pointer is passed in, use the built-in FDT.
81 * device_tree_init() does not handle CKSEG0 pointers in
82 * 64-bit, so convert pointer.
83 */
84 fdtp = (void *)(long)fw_arg0;
85 if (!fdtp)
86 fdtp = __dtb_start;
87 fdtp = phys_to_virt(__pa(fdtp));
88 early_init_devtree(fdtp);
75} 89}
76 90
77const char *get_system_type(void) 91const char *get_system_type(void)
@@ -94,27 +108,19 @@ void xlp_mmu_init(void)
94 (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2))); 108 (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
95} 109}
96 110
97void __init prom_init(void) 111void nlm_percpu_init(int hwcpuid)
98{ 112{
99 void *fdtp; 113}
100 114
115void __init prom_init(void)
116{
117 nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
101 xlp_mmu_init(); 118 xlp_mmu_init();
102 nlm_hal_init(); 119 nlm_node_init(0);
103
104 /*
105 * If no FDT pointer is passed in, use the built-in FDT.
106 * device_tree_init() does not handle CKSEG0 pointers in
107 * 64-bit, so convert pointer.
108 */
109 fdtp = (void *)(long)fw_arg0;
110 if (!fdtp)
111 fdtp = __dtb_start;
112 fdtp = phys_to_virt(__pa(fdtp));
113 early_init_devtree(fdtp);
114 120
115 nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
116#ifdef CONFIG_SMP 121#ifdef CONFIG_SMP
117 nlm_wakeup_secondary_cpus(0xffffffff); 122 cpumask_setall(&nlm_cpumask);
123 nlm_wakeup_secondary_cpus();
118 124
119 /* update TLB size after waking up threads */ 125 /* update TLB size after waking up threads */
120 current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; 126 current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 44d923ff3846..cb9010642ac3 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -51,45 +51,72 @@
51#include <asm/netlogic/xlp-hal/xlp.h> 51#include <asm/netlogic/xlp-hal/xlp.h>
52#include <asm/netlogic/xlp-hal/sys.h> 52#include <asm/netlogic/xlp-hal/sys.h>
53 53
54static void xlp_enable_secondary_cores(void) 54static int xlp_wakeup_core(uint64_t sysbase, int core)
55{ 55{
56 uint32_t core, value, coremask, syscoremask; 56 uint32_t coremask, value;
57 int count; 57 int count;
58 58
59 /* read cores in reset from SYS block */ 59 coremask = (1 << core);
60 syscoremask = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET);
61 60
62 /* update user specified */ 61 /* Enable CPU clock */
63 nlm_coremask = nlm_coremask & (syscoremask | 1); 62 value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
63 value &= ~coremask;
64 nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
64 65
65 for (core = 1; core < 8; core++) { 66 /* Remove CPU Reset */
66 coremask = 1 << core; 67 value = nlm_read_sys_reg(sysbase, SYS_CPU_RESET);
67 if ((nlm_coremask & coremask) == 0) 68 value &= ~coremask;
68 continue; 69 nlm_write_sys_reg(sysbase, SYS_CPU_RESET, value);
69 70
70 /* Enable CPU clock */ 71 /* Poll for CPU to mark itself coherent */
71 value = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL); 72 count = 100000;
72 value &= ~coremask; 73 do {
73 nlm_write_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIS_CTRL, value); 74 value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
75 } while ((value & coremask) != 0 && --count > 0);
74 76
75 /* Remove CPU Reset */ 77 return count != 0;
76 value = nlm_read_sys_reg(nlm_sys_base, SYS_CPU_RESET); 78}
77 value &= ~coremask; 79
78 nlm_write_sys_reg(nlm_sys_base, SYS_CPU_RESET, value); 80static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
81{
82 struct nlm_soc_info *nodep;
83 uint64_t syspcibase;
84 uint32_t syscoremask;
85 int core, n, cpu;
86
87 for (n = 0; n < NLM_NR_NODES; n++) {
88 syspcibase = nlm_get_sys_pcibase(n);
89 if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
90 break;
91
92 /* read cores in reset from SYS and account for boot cpu */
93 nlm_node_init(n);
94 nodep = nlm_get_node(n);
95 syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
96 if (n == 0)
97 syscoremask |= 1;
98
99 for (core = 0; core < NLM_CORES_PER_NODE; core++) {
100 /* see if the core exists */
101 if ((syscoremask & (1 << core)) == 0)
102 continue;
79 103
80 /* Poll for CPU to mark itself coherent */ 104 /* see if at least the first thread is enabled */
81 count = 100000; 105 cpu = (n * NLM_CORES_PER_NODE + core)
82 do { 106 * NLM_THREADS_PER_CORE;
83 value = nlm_read_sys_reg(nlm_sys_base, 107 if (!cpumask_test_cpu(cpu, wakeup_mask))
84 SYS_CPU_NONCOHERENT_MODE); 108 continue;
85 } while ((value & coremask) != 0 && count-- > 0);
86 109
87 if (count == 0) 110 /* wake up the core */
88 pr_err("Failed to enable core %d\n", core); 111 if (xlp_wakeup_core(nodep->sysbase, core))
112 nodep->coremask |= 1u << core;
113 else
114 pr_err("Failed to enable core %d\n", core);
115 }
89 } 116 }
90} 117}
91 118
92void xlp_wakeup_secondary_cpus(void) 119void xlp_wakeup_secondary_cpus()
93{ 120{
94 /* 121 /*
95 * In case of u-boot, the secondaries are in reset 122 * In case of u-boot, the secondaries are in reset
@@ -98,5 +125,5 @@ void xlp_wakeup_secondary_cpus(void)
98 xlp_boot_core0_siblings(); 125 xlp_boot_core0_siblings();
99 126
100 /* now get other cores out of reset */ 127 /* now get other cores out of reset */
101 xlp_enable_secondary_cores(); 128 xlp_enable_secondary_cores(&nlm_cpumask);
102} 129}
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
index c287dea87570..05902bc6f080 100644
--- a/arch/mips/netlogic/xlr/Makefile
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -1,2 +1,2 @@
1obj-y += setup.o platform.o platform-flash.o 1obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
2obj-$(CONFIG_SMP) += wakeup.o 2obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
new file mode 100644
index 000000000000..bed2cffa1008
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -0,0 +1,290 @@
1/*
2 * Copyright (c) 2003-2012 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <asm/cpu-info.h>
36#include <linux/irq.h>
37#include <linux/interrupt.h>
38
39#include <asm/mipsregs.h>
40#include <asm/netlogic/xlr/fmn.h>
41#include <asm/netlogic/xlr/xlr.h>
42#include <asm/netlogic/common.h>
43#include <asm/netlogic/haldefs.h>
44
45struct xlr_board_fmn_config xlr_board_fmn_config;
46
47static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
48{
49 int bkt;
50
51 pr_info("Bucket size :\n");
52 pr_info("Station\t: Size\n");
53 for (bkt = 0; bkt < 16; bkt++)
54 pr_info(" %d %d %d %d %d %d %d %d\n",
55 xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
56 xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
57 xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
58 xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
59 xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
60 xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
61 xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
62 xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
63 pr_info("\n");
64
65 pr_info("Credits distribution :\n");
66 pr_info("Station\t: Size\n");
67 for (bkt = 0; bkt < 16; bkt++)
68 pr_info(" %d %d %d %d %d %d %d %d\n",
69 fmn_info->credit_config[(bkt * 8) + 0],
70 fmn_info->credit_config[(bkt * 8) + 1],
71 fmn_info->credit_config[(bkt * 8) + 2],
72 fmn_info->credit_config[(bkt * 8) + 3],
73 fmn_info->credit_config[(bkt * 8) + 4],
74 fmn_info->credit_config[(bkt * 8) + 5],
75 fmn_info->credit_config[(bkt * 8) + 6],
76 fmn_info->credit_config[(bkt * 8) + 7]);
77 pr_info("\n");
78}
79
80static void check_credit_distribution(void)
81{
82 struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
83 int bkt, n, total_credits, ncores;
84
85 ncores = hweight32(nlm_current_node()->coremask);
86 for (bkt = 0; bkt < 128; bkt++) {
87 total_credits = 0;
88 for (n = 0; n < ncores; n++)
89 total_credits += cfg->cpu[n].credit_config[bkt];
90 total_credits += cfg->gmac[0].credit_config[bkt];
91 total_credits += cfg->gmac[1].credit_config[bkt];
92 total_credits += cfg->dma.credit_config[bkt];
93 total_credits += cfg->cmp.credit_config[bkt];
94 total_credits += cfg->sae.credit_config[bkt];
95 total_credits += cfg->xgmac[0].credit_config[bkt];
96 total_credits += cfg->xgmac[1].credit_config[bkt];
97 if (total_credits > cfg->bucket_size[bkt])
98 pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
99 bkt, total_credits, cfg->bucket_size[bkt]);
100 }
101 pr_info("Credit distribution complete.\n");
102}
103
104/**
105 * Configure bucket size and credits for a device. 'size' is the size of
106 * the buckets for the device. This size is distributed among all the CPUs
107 * so that all of them can send messages to the device.
108 *
109 * The device is also given 'cpu_credits' to send messages to the CPUs
110 *
111 * @dev_info: FMN information structure for each devices
112 * @start_stn_id: Starting station id of dev_info
113 * @end_stn_id: End station id of dev_info
114 * @num_buckets: Total number of buckets for den_info
115 * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
116 * @size: Size of the each buckets in the device station
117 */
118static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
119 int end_stn_id, int num_buckets, int cpu_credits, int size)
120{
121 int i, j, num_core, n, credits_per_cpu;
122 struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
123
124 num_core = hweight32(nlm_current_node()->coremask);
125 dev_info->num_buckets = num_buckets;
126 dev_info->start_stn_id = start_stn_id;
127 dev_info->end_stn_id = end_stn_id;
128
129 n = num_core;
130 if (num_core == 3)
131 n = 4;
132
133 for (i = start_stn_id; i <= end_stn_id; i++) {
134 xlr_board_fmn_config.bucket_size[i] = size;
135
136 /* Dividing device credits equally to cpus */
137 credits_per_cpu = size / n;
138 for (j = 0; j < num_core; j++)
139 cpu[j].credit_config[i] = credits_per_cpu;
140
141 /* credits left to distribute */
142 credits_per_cpu = size - (credits_per_cpu * num_core);
143
144 /* distribute the remaining credits (if any), among cores */
145 for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
146 cpu[j].credit_config[i] += 4;
147 credits_per_cpu -= 4;
148 }
149 }
150
151 /* Distributing cpu per bucket credits to devices */
152 for (i = 0; i < num_core; i++) {
153 for (j = 0; j < FMN_CORE_NBUCKETS; j++)
154 dev_info->credit_config[(i * 8) + j] = cpu_credits;
155 }
156}
157
158/*
159 * Each core has 256 slots and 8 buckets,
160 * Configure the 8 buckets each with 32 slots
161 */
162static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
163{
164 int i, j;
165
166 for (i = 0; i < num_core; i++) {
167 cpu[i].start_stn_id = (8 * i);
168 cpu[i].end_stn_id = (8 * i + 8);
169
170 for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
171 xlr_board_fmn_config.bucket_size[j] = 32;
172 }
173}
174
175/**
176 * Setup the FMN details for each devices according to the device available
177 * in each variant of XLR/XLS processor
178 */
179void xlr_board_info_setup(void)
180{
181 struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
182 struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
183 struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
184 struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
185 struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
186 struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
187 int processor_id, num_core;
188
189 num_core = hweight32(nlm_current_node()->coremask);
190 processor_id = read_c0_prid() & 0xff00;
191
192 setup_cpu_fmninfo(cpu, num_core);
193 switch (processor_id) {
194 case PRID_IMP_NETLOGIC_XLS104:
195 case PRID_IMP_NETLOGIC_XLS108:
196 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
197 FMN_STNID_GMAC0_TX3, 8, 16, 32);
198 setup_fmn_cc(dma, FMN_STNID_DMA_0,
199 FMN_STNID_DMA_3, 4, 8, 64);
200 setup_fmn_cc(sae, FMN_STNID_SEC0,
201 FMN_STNID_SEC1, 2, 8, 128);
202 break;
203
204 case PRID_IMP_NETLOGIC_XLS204:
205 case PRID_IMP_NETLOGIC_XLS208:
206 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
207 FMN_STNID_GMAC0_TX3, 8, 16, 32);
208 setup_fmn_cc(dma, FMN_STNID_DMA_0,
209 FMN_STNID_DMA_3, 4, 8, 64);
210 setup_fmn_cc(sae, FMN_STNID_SEC0,
211 FMN_STNID_SEC1, 2, 8, 128);
212 break;
213
214 case PRID_IMP_NETLOGIC_XLS404:
215 case PRID_IMP_NETLOGIC_XLS408:
216 case PRID_IMP_NETLOGIC_XLS404B:
217 case PRID_IMP_NETLOGIC_XLS408B:
218 case PRID_IMP_NETLOGIC_XLS416B:
219 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
220 FMN_STNID_GMAC0_TX3, 8, 8, 32);
221 setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
222 FMN_STNID_GMAC1_TX3, 8, 8, 32);
223 setup_fmn_cc(dma, FMN_STNID_DMA_0,
224 FMN_STNID_DMA_3, 4, 4, 64);
225 setup_fmn_cc(cmp, FMN_STNID_CMP_0,
226 FMN_STNID_CMP_3, 4, 4, 64);
227 setup_fmn_cc(sae, FMN_STNID_SEC0,
228 FMN_STNID_SEC1, 2, 8, 128);
229 break;
230
231 case PRID_IMP_NETLOGIC_XLS412B:
232 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
233 FMN_STNID_GMAC0_TX3, 8, 8, 32);
234 setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
235 FMN_STNID_GMAC1_TX3, 8, 8, 32);
236 setup_fmn_cc(dma, FMN_STNID_DMA_0,
237 FMN_STNID_DMA_3, 4, 4, 64);
238 setup_fmn_cc(cmp, FMN_STNID_CMP_0,
239 FMN_STNID_CMP_3, 4, 4, 64);
240 setup_fmn_cc(sae, FMN_STNID_SEC0,
241 FMN_STNID_SEC1, 2, 8, 128);
242 break;
243
244 case PRID_IMP_NETLOGIC_XLR308:
245 case PRID_IMP_NETLOGIC_XLR308C:
246 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
247 FMN_STNID_GMAC0_TX3, 8, 16, 32);
248 setup_fmn_cc(dma, FMN_STNID_DMA_0,
249 FMN_STNID_DMA_3, 4, 8, 64);
250 setup_fmn_cc(sae, FMN_STNID_SEC0,
251 FMN_STNID_SEC1, 2, 4, 128);
252 break;
253
254 case PRID_IMP_NETLOGIC_XLR532:
255 case PRID_IMP_NETLOGIC_XLR532C:
256 case PRID_IMP_NETLOGIC_XLR516C:
257 case PRID_IMP_NETLOGIC_XLR508C:
258 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
259 FMN_STNID_GMAC0_TX3, 8, 16, 32);
260 setup_fmn_cc(dma, FMN_STNID_DMA_0,
261 FMN_STNID_DMA_3, 4, 8, 64);
262 setup_fmn_cc(sae, FMN_STNID_SEC0,
263 FMN_STNID_SEC1, 2, 4, 128);
264 break;
265
266 case PRID_IMP_NETLOGIC_XLR732:
267 case PRID_IMP_NETLOGIC_XLR716:
268 setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
269 FMN_STNID_XMAC0_15_TX, 8, 0, 32);
270 setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
271 FMN_STNID_XMAC1_15_TX, 8, 0, 32);
272 setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
273 FMN_STNID_GMAC0_TX3, 8, 24, 32);
274 setup_fmn_cc(dma, FMN_STNID_DMA_0,
275 FMN_STNID_DMA_3, 4, 4, 64);
276 setup_fmn_cc(sae, FMN_STNID_SEC0,
277 FMN_STNID_SEC1, 2, 4, 128);
278 break;
279 default:
280 pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
281 pr_err("Error: Cannot initialize FMN credits.\n");
282 }
283
284 check_credit_distribution();
285
286#if 0 /* debug */
287 print_credit_config(&cpu[0]);
288 print_credit_config(&gmac[0]);
289#endif
290}
diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c
new file mode 100644
index 000000000000..4d74f03de506
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn.c
@@ -0,0 +1,204 @@
1/*
2 * Copyright (c) 2003-2012 Broadcom Corporation
3 * All Rights Reserved
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the Broadcom
9 * license below:
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 *
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in
19 * the documentation and/or other materials provided with the
20 * distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
24 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
29 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
31 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
32 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35#include <linux/kernel.h>
36#include <linux/irqreturn.h>
37#include <linux/irq.h>
38#include <linux/interrupt.h>
39
40#include <asm/mipsregs.h>
41#include <asm/netlogic/interrupt.h>
42#include <asm/netlogic/xlr/fmn.h>
43#include <asm/netlogic/common.h>
44
45#define COP2_CC_INIT_CPU_DEST(dest, conf) \
46do { \
47 nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
48 nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
49 nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
50 nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
51 nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
52 nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
53 nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
54 nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
55} while (0)
56
57struct fmn_message_handler {
58 void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
59 void *arg;
60} msg_handlers[128];
61
62/*
63 * FMN interrupt handler. We configure the FMN so that any messages in
64 * any of the CPU buckets will trigger an interrupt on the CPU.
65 * The message can be from any device on the FMN (like NAE/SAE/DMA).
66 * The source station id is used to figure out which of the registered
67 * handlers have to be called.
68 */
69static irqreturn_t fmn_message_handler(int irq, void *data)
70{
71 struct fmn_message_handler *hndlr;
72 int bucket, rv;
73 int size = 0, code = 0, src_stnid = 0;
74 struct nlm_fmn_msg msg;
75 uint32_t mflags, bkt_status;
76
77 mflags = nlm_cop2_enable();
78 /* Disable message ring interrupt */
79 nlm_fmn_setup_intr(irq, 0);
80 while (1) {
81 /* 8 bkts per core, [24:31] each bit represents one bucket
82 * Bit is Zero if bucket is not empty */
83 bkt_status = (nlm_read_c2_status() >> 24) & 0xff;
84 if (bkt_status == 0xff)
85 break;
86 for (bucket = 0; bucket < 8; bucket++) {
87 /* Continue on empty bucket */
88 if (bkt_status & (1 << bucket))
89 continue;
90 rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
91 &msg);
92 if (rv != 0)
93 continue;
94
95 hndlr = &msg_handlers[src_stnid];
96 if (hndlr->action == NULL)
97 pr_warn("No msgring handler for stnid %d\n",
98 src_stnid);
99 else {
100 nlm_cop2_restore(mflags);
101 hndlr->action(bucket, src_stnid, size, code,
102 &msg, hndlr->arg);
103 mflags = nlm_cop2_enable();
104 }
105 }
106 };
107 /* Enable message ring intr, to any thread in core */
108 nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
109 nlm_cop2_restore(mflags);
110 return IRQ_HANDLED;
111}
112
113struct irqaction fmn_irqaction = {
114 .handler = fmn_message_handler,
115 .flags = IRQF_PERCPU,
116 .name = "fmn",
117};
118
119void xlr_percpu_fmn_init(void)
120{
121 struct xlr_fmn_info *cpu_fmn_info;
122 int *bucket_sizes;
123 uint32_t flags;
124 int id;
125
126 BUG_ON(nlm_thread_id() != 0);
127 id = nlm_core_id();
128
129 bucket_sizes = xlr_board_fmn_config.bucket_size;
130 cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
131 flags = nlm_cop2_enable();
132
133 /* Setup bucket sizes for the core. */
134 nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
135 nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
136 nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
137 nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
138 nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
139 nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
140 nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
141 nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
142
143 /*
144 * For sending FMN messages, we need credits on the destination
145 * bucket. Program the credits this core has on the 128 possible
146 * destination buckets.
147 * We cannot use a loop here, because the the first argument has
148 * to be a constant integer value.
149 */
150 COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
151 COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
152 COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
153 COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
154 COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
155 COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
156 COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
157 COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
158 COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
159 COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
160 COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
161 COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
162 COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
163 COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
164 COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
165 COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
166
167 /* enable FMN interrupts on this CPU */
168 nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
169 nlm_cop2_restore(flags);
170}
171
172
173/*
174 * Register a FMN message handler with respect to the source station id
175 * @stnid: source station id
176 * @action: Handler function pointer
177 */
178int nlm_register_fmn_handler(int start_stnid, int end_stnid,
179 void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
180 void *arg)
181{
182 int sstnid;
183
184 for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
185 msg_handlers[sstnid].arg = arg;
186 smp_wmb();
187 msg_handlers[sstnid].action = action;
188 }
189 pr_debug("Registered FMN msg handler for stnid %d-%d\n",
190 start_stnid, end_stnid);
191 return 0;
192}
193
194void nlm_setup_fmn_irq(void)
195{
196 uint32_t flags;
197
198 /* setup irq only once */
199 setup_irq(IRQ_FMN, &fmn_irqaction);
200
201 flags = nlm_cop2_enable();
202 nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
203 nlm_cop2_restore(flags);
204}
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 81b1d311834f..4e7f49d3d5a8 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -49,16 +49,15 @@
49#include <asm/netlogic/xlr/iomap.h> 49#include <asm/netlogic/xlr/iomap.h>
50#include <asm/netlogic/xlr/pic.h> 50#include <asm/netlogic/xlr/pic.h>
51#include <asm/netlogic/xlr/gpio.h> 51#include <asm/netlogic/xlr/gpio.h>
52#include <asm/netlogic/xlr/fmn.h>
52 53
53uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE; 54uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
54uint64_t nlm_pic_base;
55struct psb_info nlm_prom_info; 55struct psb_info nlm_prom_info;
56 56
57unsigned long nlm_common_ebase = 0x0;
58
59/* default to uniprocessor */ 57/* default to uniprocessor */
60uint32_t nlm_coremask = 1, nlm_cpumask = 1; 58unsigned int nlm_threads_per_core = 1;
61int nlm_threads_per_core = 1; 59struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
60cpumask_t nlm_cpumask = CPU_MASK_CPU0;
62 61
63static void __init nlm_early_serial_setup(void) 62static void __init nlm_early_serial_setup(void)
64{ 63{
@@ -113,6 +112,12 @@ void __init prom_free_prom_memory(void)
113 /* Nothing yet */ 112 /* Nothing yet */
114} 113}
115 114
115void nlm_percpu_init(int hwcpuid)
116{
117 if (hwcpuid % 4 == 0)
118 xlr_percpu_fmn_init();
119}
120
116static void __init build_arcs_cmdline(int *argv) 121static void __init build_arcs_cmdline(int *argv)
117{ 122{
118 int i, remain, len; 123 int i, remain, len;
@@ -176,9 +181,19 @@ static void prom_add_memory(void)
176 } 181 }
177} 182}
178 183
184static void nlm_init_node(void)
185{
186 struct nlm_soc_info *nodep;
187
188 nodep = nlm_current_node();
189 nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
190 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
191 spin_lock_init(&nodep->piclock);
192}
193
179void __init prom_init(void) 194void __init prom_init(void)
180{ 195{
181 int *argv, *envp; /* passed as 32 bit ptrs */ 196 int i, *argv, *envp; /* passed as 32 bit ptrs */
182 struct psb_info *prom_infop; 197 struct psb_info *prom_infop;
183 198
184 /* truncate to 32 bit and sign extend all args */ 199 /* truncate to 32 bit and sign extend all args */
@@ -187,15 +202,19 @@ void __init prom_init(void)
187 prom_infop = (struct psb_info *)(long)(int)fw_arg3; 202 prom_infop = (struct psb_info *)(long)(int)fw_arg3;
188 203
189 nlm_prom_info = *prom_infop; 204 nlm_prom_info = *prom_infop;
190 nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 205 nlm_init_node();
191 206
192 nlm_early_serial_setup(); 207 nlm_early_serial_setup();
193 build_arcs_cmdline(argv); 208 build_arcs_cmdline(argv);
194 nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
195 prom_add_memory(); 209 prom_add_memory();
196 210
197#ifdef CONFIG_SMP 211#ifdef CONFIG_SMP
198 nlm_wakeup_secondary_cpus(nlm_prom_info.online_cpu_map); 212 for (i = 0; i < 32; i++)
213 if (nlm_prom_info.online_cpu_map & (1 << i))
214 cpumask_set_cpu(i, &nlm_cpumask);
215 nlm_wakeup_secondary_cpus();
199 register_smp_ops(&nlm_smp_ops); 216 register_smp_ops(&nlm_smp_ops);
200#endif 217#endif
218 xlr_board_info_setup();
219 xlr_percpu_fmn_init();
201} 220}
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index db5d987d4881..3ebf7411d67b 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/delay.h>
36#include <linux/threads.h> 37#include <linux/threads.h>
37 38
38#include <asm/asm.h> 39#include <asm/asm.h>
@@ -50,18 +51,34 @@
50 51
51int __cpuinit xlr_wakeup_secondary_cpus(void) 52int __cpuinit xlr_wakeup_secondary_cpus(void)
52{ 53{
53 unsigned int i, boot_cpu; 54 struct nlm_soc_info *nodep;
55 unsigned int i, j, boot_cpu;
54 56
55 /* 57 /*
56 * In case of RMI boot, hit with NMI to get the cores 58 * In case of RMI boot, hit with NMI to get the cores
57 * from bootloader to linux code. 59 * from bootloader to linux code.
58 */ 60 */
61 nodep = nlm_get_node(0);
59 boot_cpu = hard_smp_processor_id(); 62 boot_cpu = hard_smp_processor_id();
60 nlm_set_nmi_handler(nlm_rmiboot_preboot); 63 nlm_set_nmi_handler(nlm_rmiboot_preboot);
61 for (i = 0; i < NR_CPUS; i++) { 64 for (i = 0; i < NR_CPUS; i++) {
62 if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0) 65 if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
63 continue; 66 continue;
64 nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */ 67 nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
68 }
69
70 /* Fill up the coremask early */
71 nodep->coremask = 1;
72 for (i = 1; i < NLM_CORES_PER_NODE; i++) {
73 for (j = 1000000; j > 0; j--) {
74 if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE])
75 break;
76 udelay(10);
77 }
78 if (j != 0)
79 nodep->coremask |= (1u << i);
80 else
81 pr_err("Failed to wakeup core %d\n", i);
65 } 82 }
66 83
67 return 0; 84 return 0;