aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic
diff options
context:
space:
mode:
authorJayachandran C <jchandra@broadcom.com>2012-10-31 08:01:39 -0400
committerJohn Crispin <blogic@openwrt.org>2012-11-09 05:37:19 -0500
commit77ae798f5b736dfdc692b86b393d9699052ac77a (patch)
tree040a68a1c544167364e4ca2b78e69179c397e4b2 /arch/mips/netlogic
parent2a37b1ae443f20470a789b12a45cbc249c9e50a6 (diff)
MIPS: Netlogic: Support for multi-chip configuration
Upto 4 Netlogic XLP SoCs can be connected over ICI links to form a coherent multi-node system. Each SoC has its own set of on-chip devices including PIC. To support this, add a per SoC stucture and use it for the PIC and SYS block addresses instead of using global variables. Signed-off-by: Jayachandran C <jchandra@broadcom.com> Patchwork: http://patchwork.linux-mips.org/patch/4469 Signed-off-by: John Crispin <blogic@openwrt.org>
Diffstat (limited to 'arch/mips/netlogic')
-rw-r--r--arch/mips/netlogic/common/irq.c55
-rw-r--r--arch/mips/netlogic/common/smp.c47
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c29
-rw-r--r--arch/mips/netlogic/xlp/setup.c17
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c22
-rw-r--r--arch/mips/netlogic/xlr/setup.c20
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c21
7 files changed, 134 insertions, 77 deletions
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index e52bfcbce093..4d6bd8f6ee29 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -70,33 +70,34 @@
70 */ 70 */
71 71
72/* Globals */ 72/* Globals */
73static uint64_t nlm_irq_mask;
74static DEFINE_SPINLOCK(nlm_pic_lock);
75
76static void xlp_pic_enable(struct irq_data *d) 73static void xlp_pic_enable(struct irq_data *d)
77{ 74{
78 unsigned long flags; 75 unsigned long flags;
76 struct nlm_soc_info *nodep;
79 int irt; 77 int irt;
80 78
79 nodep = nlm_current_node();
81 irt = nlm_irq_to_irt(d->irq); 80 irt = nlm_irq_to_irt(d->irq);
82 if (irt == -1) 81 if (irt == -1)
83 return; 82 return;
84 spin_lock_irqsave(&nlm_pic_lock, flags); 83 spin_lock_irqsave(&nodep->piclock, flags);
85 nlm_pic_enable_irt(nlm_pic_base, irt); 84 nlm_pic_enable_irt(nodep->picbase, irt);
86 spin_unlock_irqrestore(&nlm_pic_lock, flags); 85 spin_unlock_irqrestore(&nodep->piclock, flags);
87} 86}
88 87
89static void xlp_pic_disable(struct irq_data *d) 88static void xlp_pic_disable(struct irq_data *d)
90{ 89{
90 struct nlm_soc_info *nodep;
91 unsigned long flags; 91 unsigned long flags;
92 int irt; 92 int irt;
93 93
94 nodep = nlm_current_node();
94 irt = nlm_irq_to_irt(d->irq); 95 irt = nlm_irq_to_irt(d->irq);
95 if (irt == -1) 96 if (irt == -1)
96 return; 97 return;
97 spin_lock_irqsave(&nlm_pic_lock, flags); 98 spin_lock_irqsave(&nodep->piclock, flags);
98 nlm_pic_disable_irt(nlm_pic_base, irt); 99 nlm_pic_disable_irt(nodep->picbase, irt);
99 spin_unlock_irqrestore(&nlm_pic_lock, flags); 100 spin_unlock_irqrestore(&nodep->piclock, flags);
100} 101}
101 102
102static void xlp_pic_mask_ack(struct irq_data *d) 103static void xlp_pic_mask_ack(struct irq_data *d)
@@ -109,8 +110,10 @@ static void xlp_pic_mask_ack(struct irq_data *d)
109static void xlp_pic_unmask(struct irq_data *d) 110static void xlp_pic_unmask(struct irq_data *d)
110{ 111{
111 void *hd = irq_data_get_irq_handler_data(d); 112 void *hd = irq_data_get_irq_handler_data(d);
113 struct nlm_soc_info *nodep;
112 int irt; 114 int irt;
113 115
116 nodep = nlm_current_node();
114 irt = nlm_irq_to_irt(d->irq); 117 irt = nlm_irq_to_irt(d->irq);
115 if (irt == -1) 118 if (irt == -1)
116 return; 119 return;
@@ -120,7 +123,7 @@ static void xlp_pic_unmask(struct irq_data *d)
120 extra_ack(d); 123 extra_ack(d);
121 } 124 }
122 /* Ack is a single write, no need to lock */ 125 /* Ack is a single write, no need to lock */
123 nlm_pic_ack(nlm_pic_base, irt); 126 nlm_pic_ack(nodep->picbase, irt);
124} 127}
125 128
126static struct irq_chip xlp_pic = { 129static struct irq_chip xlp_pic = {
@@ -177,7 +180,11 @@ struct irq_chip nlm_cpu_intr = {
177void __init init_nlm_common_irqs(void) 180void __init init_nlm_common_irqs(void)
178{ 181{
179 int i, irq, irt; 182 int i, irq, irt;
183 uint64_t irqmask;
184 struct nlm_soc_info *nodep;
180 185
186 nodep = nlm_current_node();
187 irqmask = (1ULL << IRQ_TIMER);
181 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) 188 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
182 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); 189 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
183 190
@@ -189,7 +196,7 @@ void __init init_nlm_common_irqs(void)
189 nlm_smp_function_ipi_handler); 196 nlm_smp_function_ipi_handler);
190 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, 197 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
191 nlm_smp_resched_ipi_handler); 198 nlm_smp_resched_ipi_handler);
192 nlm_irq_mask |= 199 irqmask |=
193 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); 200 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
194#endif 201#endif
195 202
@@ -197,11 +204,11 @@ void __init init_nlm_common_irqs(void)
197 irt = nlm_irq_to_irt(irq); 204 irt = nlm_irq_to_irt(irq);
198 if (irt == -1) 205 if (irt == -1)
199 continue; 206 continue;
200 nlm_irq_mask |= (1ULL << irq); 207 irqmask |= (1ULL << irq);
201 nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); 208 nlm_pic_init_irt(nodep->picbase, irt, irq, 0);
202 } 209 }
203 210
204 nlm_irq_mask |= (1ULL << IRQ_TIMER); 211 nodep->irqmask = irqmask;
205} 212}
206 213
207void __init arch_init_irq(void) 214void __init arch_init_irq(void)
@@ -209,29 +216,39 @@ void __init arch_init_irq(void)
209 /* Initialize the irq descriptors */ 216 /* Initialize the irq descriptors */
210 init_nlm_common_irqs(); 217 init_nlm_common_irqs();
211 218
212 write_c0_eimr(nlm_irq_mask); 219 write_c0_eimr(nlm_current_node()->irqmask);
213} 220}
214 221
215void __cpuinit nlm_smp_irq_init(void) 222void __cpuinit nlm_smp_irq_init(void)
216{ 223{
217 /* set interrupt mask for non-zero cpus */ 224 /* set interrupt mask for non-zero cpus */
218 write_c0_eimr(nlm_irq_mask); 225 write_c0_eimr(nlm_current_node()->irqmask);
219} 226}
220 227
221asmlinkage void plat_irq_dispatch(void) 228asmlinkage void plat_irq_dispatch(void)
222{ 229{
223 uint64_t eirr; 230 uint64_t eirr;
224 int i; 231 int i, node;
225 232
233 node = nlm_nodeid();
226 eirr = read_c0_eirr() & read_c0_eimr(); 234 eirr = read_c0_eirr() & read_c0_eimr();
227 if (eirr & (1 << IRQ_TIMER)) { 235 if (eirr & (1 << IRQ_TIMER)) {
228 do_IRQ(IRQ_TIMER); 236 do_IRQ(IRQ_TIMER);
229 return; 237 return;
230 } 238 }
231 239#ifdef CONFIG_SMP
240 if (eirr & IRQ_IPI_SMP_FUNCTION) {
241 do_IRQ(IRQ_IPI_SMP_FUNCTION);
242 return;
243 }
244 if (eirr & IRQ_IPI_SMP_RESCHEDULE) {
245 do_IRQ(IRQ_IPI_SMP_RESCHEDULE);
246 return;
247 }
248#endif
232 i = __ilog2_u64(eirr); 249 i = __ilog2_u64(eirr);
233 if (i == -1) 250 if (i == -1)
234 return; 251 return;
235 252
236 do_IRQ(i); 253 do_IRQ(nlm_irq_to_xirq(node, i));
237} 254}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 4fe8992b291c..e40b467f6184 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
59 59
60void nlm_send_ipi_single(int logical_cpu, unsigned int action) 60void nlm_send_ipi_single(int logical_cpu, unsigned int action)
61{ 61{
62 int cpu = cpu_logical_map(logical_cpu); 62 int cpu, node;
63 uint64_t picbase;
64
65 cpu = cpu_logical_map(logical_cpu);
66 node = cpu / NLM_CPUS_PER_NODE;
67 picbase = nlm_get_node(node)->picbase;
63 68
64 if (action & SMP_CALL_FUNCTION) 69 if (action & SMP_CALL_FUNCTION)
65 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
66 if (action & SMP_RESCHEDULE_YOURSELF) 71 if (action & SMP_RESCHEDULE_YOURSELF)
67 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
68} 73}
69 74
70void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) 75void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
96void nlm_early_init_secondary(int cpu) 101void nlm_early_init_secondary(int cpu)
97{ 102{
98 change_c0_config(CONF_CM_CMASK, 0x3); 103 change_c0_config(CONF_CM_CMASK, 0x3);
99 write_c0_ebase((uint32_t)nlm_common_ebase);
100#ifdef CONFIG_CPU_XLP 104#ifdef CONFIG_CPU_XLP
101 if (cpu % 4 == 0) 105 /* mmu init, once per core */
106 if (cpu % NLM_THREADS_PER_CORE == 0)
102 xlp_mmu_init(); 107 xlp_mmu_init();
103#endif 108#endif
109 write_c0_ebase(nlm_current_node()->ebase);
104} 110}
105 111
106/* 112/*
@@ -108,7 +114,7 @@ void nlm_early_init_secondary(int cpu)
108 */ 114 */
109static void __cpuinit nlm_init_secondary(void) 115static void __cpuinit nlm_init_secondary(void)
110{ 116{
111 current_cpu_data.core = hard_smp_processor_id() / 4; 117 current_cpu_data.core = hard_smp_processor_id() / NLM_THREADS_PER_CORE;
112 nlm_smp_irq_init(); 118 nlm_smp_irq_init();
113} 119}
114 120
@@ -142,22 +148,22 @@ cpumask_t phys_cpu_present_map;
142 148
143void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 149void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
144{ 150{
145 unsigned long gp = (unsigned long)task_thread_info(idle); 151 int cpu, node;
146 unsigned long sp = (unsigned long)__KSTK_TOS(idle);
147 int cpu = cpu_logical_map(logical_cpu);
148 152
149 nlm_next_sp = sp; 153 cpu = cpu_logical_map(logical_cpu);
150 nlm_next_gp = gp; 154 node = cpu / NLM_CPUS_PER_NODE;
155 nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
156 nlm_next_gp = (unsigned long)task_thread_info(idle);
151 157
152 /* barrier */ 158 /* barrier for sp/gp store above */
153 __sync(); 159 __sync();
154 nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 160 nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
155} 161}
156 162
157void __init nlm_smp_setup(void) 163void __init nlm_smp_setup(void)
158{ 164{
159 unsigned int boot_cpu; 165 unsigned int boot_cpu;
160 int num_cpus, i; 166 int num_cpus, i, ncore;
161 167
162 boot_cpu = hard_smp_processor_id(); 168 boot_cpu = hard_smp_processor_id();
163 cpumask_clear(&phys_cpu_present_map); 169 cpumask_clear(&phys_cpu_present_map);
@@ -182,11 +188,16 @@ void __init nlm_smp_setup(void)
182 } 188 }
183 } 189 }
184 190
191 /* check with the cores we have worken up */
192 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
193 ncore += hweight32(nlm_get_node(i)->coremask);
194
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 195 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], 196 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 197 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 198
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 199 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
200 nlm_threads_per_core, num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 201 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
191} 202}
192 203
@@ -196,7 +207,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
196 int threadmode, i, j; 207 int threadmode, i, j;
197 208
198 core0_thr_mask = 0; 209 core0_thr_mask = 0;
199 for (i = 0; i < 4; i++) 210 for (i = 0; i < NLM_THREADS_PER_CORE; i++)
200 if (cpumask_test_cpu(i, wakeup_mask)) 211 if (cpumask_test_cpu(i, wakeup_mask))
201 core0_thr_mask |= (1 << i); 212 core0_thr_mask |= (1 << i);
202 switch (core0_thr_mask) { 213 switch (core0_thr_mask) {
@@ -217,9 +228,9 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
217 } 228 }
218 229
219 /* Verify other cores CPU masks */ 230 /* Verify other cores CPU masks */
220 for (i = 0; i < NR_CPUS; i += 4) { 231 for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
221 core_thr_mask = 0; 232 core_thr_mask = 0;
222 for (j = 0; j < 4; j++) 233 for (j = 0; j < NLM_THREADS_PER_CORE; j++)
223 if (cpumask_test_cpu(i + j, wakeup_mask)) 234 if (cpumask_test_cpu(i + j, wakeup_mask))
224 core_thr_mask |= (1 << j); 235 core_thr_mask |= (1 << j);
225 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask) 236 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 6c65ac701912..d3a26e740acb 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -40,23 +40,23 @@
40#include <asm/mipsregs.h> 40#include <asm/mipsregs.h>
41#include <asm/time.h> 41#include <asm/time.h>
42 42
43#include <asm/netlogic/common.h>
43#include <asm/netlogic/haldefs.h> 44#include <asm/netlogic/haldefs.h>
44#include <asm/netlogic/xlp-hal/iomap.h> 45#include <asm/netlogic/xlp-hal/iomap.h>
45#include <asm/netlogic/xlp-hal/xlp.h> 46#include <asm/netlogic/xlp-hal/xlp.h>
46#include <asm/netlogic/xlp-hal/pic.h> 47#include <asm/netlogic/xlp-hal/pic.h>
47#include <asm/netlogic/xlp-hal/sys.h> 48#include <asm/netlogic/xlp-hal/sys.h>
48 49
49/* These addresses are computed by the nlm_hal_init() */
50uint64_t nlm_io_base;
51uint64_t nlm_sys_base;
52uint64_t nlm_pic_base;
53
54/* Main initialization */ 50/* Main initialization */
55void nlm_hal_init(void) 51void nlm_node_init(int node)
56{ 52{
57 nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE); 53 struct nlm_soc_info *nodep;
58 nlm_sys_base = nlm_get_sys_regbase(0); /* node 0 */ 54
59 nlm_pic_base = nlm_get_pic_regbase(0); /* node 0 */ 55 nodep = nlm_get_node(node);
56 nodep->sysbase = nlm_get_sys_regbase(node);
57 nodep->picbase = nlm_get_pic_regbase(node);
58 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
59 spin_lock_init(&nodep->piclock);
60} 60}
61 61
62int nlm_irq_to_irt(int irq) 62int nlm_irq_to_irt(int irq)
@@ -138,14 +138,15 @@ int nlm_irt_to_irq(int irt)
138 } 138 }
139} 139}
140 140
141unsigned int nlm_get_core_frequency(int core) 141unsigned int nlm_get_core_frequency(int node, int core)
142{ 142{
143 unsigned int pll_divf, pll_divr, dfs_div, ext_div; 143 unsigned int pll_divf, pll_divr, dfs_div, ext_div;
144 unsigned int rstval, dfsval, denom; 144 unsigned int rstval, dfsval, denom;
145 uint64_t num; 145 uint64_t num, sysbase;
146 146
147 rstval = nlm_read_sys_reg(nlm_sys_base, SYS_POWER_ON_RESET_CFG); 147 sysbase = nlm_get_node(node)->sysbase;
148 dfsval = nlm_read_sys_reg(nlm_sys_base, SYS_CORE_DFS_DIV_VALUE); 148 rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
149 dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
149 pll_divf = ((rstval >> 10) & 0x7f) + 1; 150 pll_divf = ((rstval >> 10) & 0x7f) + 1;
150 pll_divr = ((rstval >> 8) & 0x3) + 1; 151 pll_divr = ((rstval >> 8) & 0x3) + 1;
151 ext_div = ((rstval >> 30) & 0x3) + 1; 152 ext_div = ((rstval >> 30) & 0x3) + 1;
@@ -159,5 +160,5 @@ unsigned int nlm_get_core_frequency(int core)
159 160
160unsigned int nlm_get_cpu_frequency(void) 161unsigned int nlm_get_cpu_frequency(void)
161{ 162{
162 return nlm_get_core_frequency(0); 163 return nlm_get_core_frequency(0, 0);
163} 164}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
index 9f8d360a246e..465b8d60463f 100644
--- a/arch/mips/netlogic/xlp/setup.c
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -52,17 +52,17 @@
52#include <asm/netlogic/xlp-hal/xlp.h> 52#include <asm/netlogic/xlp-hal/xlp.h>
53#include <asm/netlogic/xlp-hal/sys.h> 53#include <asm/netlogic/xlp-hal/sys.h>
54 54
55unsigned long nlm_common_ebase = 0x0; 55uint64_t nlm_io_base;
56 56struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
57/* default to uniprocessor */
58uint32_t nlm_coremask = 1;
59cpumask_t nlm_cpumask = CPU_MASK_CPU0; 57cpumask_t nlm_cpumask = CPU_MASK_CPU0;
60int nlm_threads_per_core = 1; 58unsigned int nlm_threads_per_core;
61extern u32 __dtb_start[]; 59extern u32 __dtb_start[];
62 60
63static void nlm_linux_exit(void) 61static void nlm_linux_exit(void)
64{ 62{
65 nlm_write_sys_reg(nlm_sys_base, SYS_CHIP_RESET, 1); 63 uint64_t sysbase = nlm_get_node(0)->sysbase;
64
65 nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
66 for ( ; ; ) 66 for ( ; ; )
67 cpu_wait(); 67 cpu_wait();
68} 68}
@@ -110,10 +110,9 @@ void xlp_mmu_init(void)
110 110
111void __init prom_init(void) 111void __init prom_init(void)
112{ 112{
113 nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
113 xlp_mmu_init(); 114 xlp_mmu_init();
114 nlm_hal_init(); 115 nlm_node_init(0);
115
116 nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
117 116
118#ifdef CONFIG_SMP 117#ifdef CONFIG_SMP
119 cpumask_setall(&nlm_cpumask); 118 cpumask_setall(&nlm_cpumask);
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
index 88ce38d096f0..cb9010642ac3 100644
--- a/arch/mips/netlogic/xlp/wakeup.c
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -79,32 +79,38 @@ static int xlp_wakeup_core(uint64_t sysbase, int core)
79 79
80static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask) 80static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
81{ 81{
82 uint64_t syspcibase, sysbase; 82 struct nlm_soc_info *nodep;
83 uint64_t syspcibase;
83 uint32_t syscoremask; 84 uint32_t syscoremask;
84 int core, n; 85 int core, n, cpu;
85 86
86 for (n = 0; n < 4; n++) { 87 for (n = 0; n < NLM_NR_NODES; n++) {
87 syspcibase = nlm_get_sys_pcibase(n); 88 syspcibase = nlm_get_sys_pcibase(n);
88 if (nlm_read_reg(syspcibase, 0) == 0xffffffff) 89 if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
89 break; 90 break;
90 91
91 /* read cores in reset from SYS and account for boot cpu */ 92 /* read cores in reset from SYS and account for boot cpu */
92 sysbase = nlm_get_sys_regbase(n); 93 nlm_node_init(n);
93 syscoremask = nlm_read_sys_reg(sysbase, SYS_CPU_RESET); 94 nodep = nlm_get_node(n);
95 syscoremask = nlm_read_sys_reg(nodep->sysbase, SYS_CPU_RESET);
94 if (n == 0) 96 if (n == 0)
95 syscoremask |= 1; 97 syscoremask |= 1;
96 98
97 for (core = 0; core < 8; core++) { 99 for (core = 0; core < NLM_CORES_PER_NODE; core++) {
98 /* see if the core exists */ 100 /* see if the core exists */
99 if ((syscoremask & (1 << core)) == 0) 101 if ((syscoremask & (1 << core)) == 0)
100 continue; 102 continue;
101 103
102 /* see if at least the first thread is enabled */ 104 /* see if at least the first thread is enabled */
103 if (!cpumask_test_cpu((n * 8 + core) * 4, wakeup_mask)) 105 cpu = (n * NLM_CORES_PER_NODE + core)
106 * NLM_THREADS_PER_CORE;
107 if (!cpumask_test_cpu(cpu, wakeup_mask))
104 continue; 108 continue;
105 109
106 /* wake up the core */ 110 /* wake up the core */
107 if (!xlp_wakeup_core(sysbase, core)) 111 if (xlp_wakeup_core(nodep->sysbase, core))
112 nodep->coremask |= 1u << core;
113 else
108 pr_err("Failed to enable core %d\n", core); 114 pr_err("Failed to enable core %d\n", core);
109 } 115 }
110 } 116 }
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index 8fca6807cab8..696d4248b92f 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -51,14 +51,11 @@
51#include <asm/netlogic/xlr/gpio.h> 51#include <asm/netlogic/xlr/gpio.h>
52 52
53uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE; 53uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
54uint64_t nlm_pic_base;
55struct psb_info nlm_prom_info; 54struct psb_info nlm_prom_info;
56 55
57unsigned long nlm_common_ebase = 0x0;
58
59/* default to uniprocessor */ 56/* default to uniprocessor */
60uint32_t nlm_coremask = 1; 57unsigned int nlm_threads_per_core = 1;
61int nlm_threads_per_core = 1; 58struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
62cpumask_t nlm_cpumask = CPU_MASK_CPU0; 59cpumask_t nlm_cpumask = CPU_MASK_CPU0;
63 60
64static void __init nlm_early_serial_setup(void) 61static void __init nlm_early_serial_setup(void)
@@ -177,6 +174,16 @@ static void prom_add_memory(void)
177 } 174 }
178} 175}
179 176
177static void nlm_init_node(void)
178{
179 struct nlm_soc_info *nodep;
180
181 nodep = nlm_current_node();
182 nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
183 nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
184 spin_lock_init(&nodep->piclock);
185}
186
180void __init prom_init(void) 187void __init prom_init(void)
181{ 188{
182 int i, *argv, *envp; /* passed as 32 bit ptrs */ 189 int i, *argv, *envp; /* passed as 32 bit ptrs */
@@ -188,11 +195,10 @@ void __init prom_init(void)
188 prom_infop = (struct psb_info *)(long)(int)fw_arg3; 195 prom_infop = (struct psb_info *)(long)(int)fw_arg3;
189 196
190 nlm_prom_info = *prom_infop; 197 nlm_prom_info = *prom_infop;
191 nlm_pic_base = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET); 198 nlm_init_node();
192 199
193 nlm_early_serial_setup(); 200 nlm_early_serial_setup();
194 build_arcs_cmdline(argv); 201 build_arcs_cmdline(argv);
195 nlm_common_ebase = read_c0_ebase() & (~((1 << 12) - 1));
196 prom_add_memory(); 202 prom_add_memory();
197 203
198#ifdef CONFIG_SMP 204#ifdef CONFIG_SMP
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
index 0878924da153..3ebf7411d67b 100644
--- a/arch/mips/netlogic/xlr/wakeup.c
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/delay.h>
36#include <linux/threads.h> 37#include <linux/threads.h>
37 38
38#include <asm/asm.h> 39#include <asm/asm.h>
@@ -50,18 +51,34 @@
50 51
51int __cpuinit xlr_wakeup_secondary_cpus(void) 52int __cpuinit xlr_wakeup_secondary_cpus(void)
52{ 53{
53 unsigned int i, boot_cpu; 54 struct nlm_soc_info *nodep;
55 unsigned int i, j, boot_cpu;
54 56
55 /* 57 /*
56 * In case of RMI boot, hit with NMI to get the cores 58 * In case of RMI boot, hit with NMI to get the cores
57 * from bootloader to linux code. 59 * from bootloader to linux code.
58 */ 60 */
61 nodep = nlm_get_node(0);
59 boot_cpu = hard_smp_processor_id(); 62 boot_cpu = hard_smp_processor_id();
60 nlm_set_nmi_handler(nlm_rmiboot_preboot); 63 nlm_set_nmi_handler(nlm_rmiboot_preboot);
61 for (i = 0; i < NR_CPUS; i++) { 64 for (i = 0; i < NR_CPUS; i++) {
62 if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask)) 65 if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
63 continue; 66 continue;
64 nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */ 67 nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
68 }
69
70 /* Fill up the coremask early */
71 nodep->coremask = 1;
72 for (i = 1; i < NLM_CORES_PER_NODE; i++) {
73 for (j = 1000000; j > 0; j--) {
74 if (nlm_cpu_ready[i * NLM_THREADS_PER_CORE])
75 break;
76 udelay(10);
77 }
78 if (j != 0)
79 nodep->coremask |= (1u << i);
80 else
81 pr_err("Failed to wakeup core %d\n", i);
65 } 82 }
66 83
67 return 0; 84 return 0;