aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/netlogic/common
diff options
context:
space:
mode:
authorJayachandran C <jchandra@broadcom.com>2012-10-31 08:01:39 -0400
committerJohn Crispin <blogic@openwrt.org>2012-11-09 05:37:19 -0500
commit77ae798f5b736dfdc692b86b393d9699052ac77a (patch)
tree040a68a1c544167364e4ca2b78e69179c397e4b2 /arch/mips/netlogic/common
parent2a37b1ae443f20470a789b12a45cbc249c9e50a6 (diff)
MIPS: Netlogic: Support for multi-chip configuration
Upto 4 Netlogic XLP SoCs can be connected over ICI links to form a coherent multi-node system. Each SoC has its own set of on-chip devices including PIC. To support this, add a per SoC stucture and use it for the PIC and SYS block addresses instead of using global variables. Signed-off-by: Jayachandran C <jchandra@broadcom.com> Patchwork: http://patchwork.linux-mips.org/patch/4469 Signed-off-by: John Crispin <blogic@openwrt.org>
Diffstat (limited to 'arch/mips/netlogic/common')
-rw-r--r--arch/mips/netlogic/common/irq.c55
-rw-r--r--arch/mips/netlogic/common/smp.c47
2 files changed, 65 insertions, 37 deletions
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index e52bfcbce093..4d6bd8f6ee29 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -70,33 +70,34 @@
70 */ 70 */
71 71
72/* Globals */ 72/* Globals */
73static uint64_t nlm_irq_mask;
74static DEFINE_SPINLOCK(nlm_pic_lock);
75
76static void xlp_pic_enable(struct irq_data *d) 73static void xlp_pic_enable(struct irq_data *d)
77{ 74{
78 unsigned long flags; 75 unsigned long flags;
76 struct nlm_soc_info *nodep;
79 int irt; 77 int irt;
80 78
79 nodep = nlm_current_node();
81 irt = nlm_irq_to_irt(d->irq); 80 irt = nlm_irq_to_irt(d->irq);
82 if (irt == -1) 81 if (irt == -1)
83 return; 82 return;
84 spin_lock_irqsave(&nlm_pic_lock, flags); 83 spin_lock_irqsave(&nodep->piclock, flags);
85 nlm_pic_enable_irt(nlm_pic_base, irt); 84 nlm_pic_enable_irt(nodep->picbase, irt);
86 spin_unlock_irqrestore(&nlm_pic_lock, flags); 85 spin_unlock_irqrestore(&nodep->piclock, flags);
87} 86}
88 87
89static void xlp_pic_disable(struct irq_data *d) 88static void xlp_pic_disable(struct irq_data *d)
90{ 89{
90 struct nlm_soc_info *nodep;
91 unsigned long flags; 91 unsigned long flags;
92 int irt; 92 int irt;
93 93
94 nodep = nlm_current_node();
94 irt = nlm_irq_to_irt(d->irq); 95 irt = nlm_irq_to_irt(d->irq);
95 if (irt == -1) 96 if (irt == -1)
96 return; 97 return;
97 spin_lock_irqsave(&nlm_pic_lock, flags); 98 spin_lock_irqsave(&nodep->piclock, flags);
98 nlm_pic_disable_irt(nlm_pic_base, irt); 99 nlm_pic_disable_irt(nodep->picbase, irt);
99 spin_unlock_irqrestore(&nlm_pic_lock, flags); 100 spin_unlock_irqrestore(&nodep->piclock, flags);
100} 101}
101 102
102static void xlp_pic_mask_ack(struct irq_data *d) 103static void xlp_pic_mask_ack(struct irq_data *d)
@@ -109,8 +110,10 @@ static void xlp_pic_mask_ack(struct irq_data *d)
109static void xlp_pic_unmask(struct irq_data *d) 110static void xlp_pic_unmask(struct irq_data *d)
110{ 111{
111 void *hd = irq_data_get_irq_handler_data(d); 112 void *hd = irq_data_get_irq_handler_data(d);
113 struct nlm_soc_info *nodep;
112 int irt; 114 int irt;
113 115
116 nodep = nlm_current_node();
114 irt = nlm_irq_to_irt(d->irq); 117 irt = nlm_irq_to_irt(d->irq);
115 if (irt == -1) 118 if (irt == -1)
116 return; 119 return;
@@ -120,7 +123,7 @@ static void xlp_pic_unmask(struct irq_data *d)
120 extra_ack(d); 123 extra_ack(d);
121 } 124 }
122 /* Ack is a single write, no need to lock */ 125 /* Ack is a single write, no need to lock */
123 nlm_pic_ack(nlm_pic_base, irt); 126 nlm_pic_ack(nodep->picbase, irt);
124} 127}
125 128
126static struct irq_chip xlp_pic = { 129static struct irq_chip xlp_pic = {
@@ -177,7 +180,11 @@ struct irq_chip nlm_cpu_intr = {
177void __init init_nlm_common_irqs(void) 180void __init init_nlm_common_irqs(void)
178{ 181{
179 int i, irq, irt; 182 int i, irq, irt;
183 uint64_t irqmask;
184 struct nlm_soc_info *nodep;
180 185
186 nodep = nlm_current_node();
187 irqmask = (1ULL << IRQ_TIMER);
181 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++) 188 for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
182 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq); 189 irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
183 190
@@ -189,7 +196,7 @@ void __init init_nlm_common_irqs(void)
189 nlm_smp_function_ipi_handler); 196 nlm_smp_function_ipi_handler);
190 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr, 197 irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
191 nlm_smp_resched_ipi_handler); 198 nlm_smp_resched_ipi_handler);
192 nlm_irq_mask |= 199 irqmask |=
193 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE)); 200 ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
194#endif 201#endif
195 202
@@ -197,11 +204,11 @@ void __init init_nlm_common_irqs(void)
197 irt = nlm_irq_to_irt(irq); 204 irt = nlm_irq_to_irt(irq);
198 if (irt == -1) 205 if (irt == -1)
199 continue; 206 continue;
200 nlm_irq_mask |= (1ULL << irq); 207 irqmask |= (1ULL << irq);
201 nlm_pic_init_irt(nlm_pic_base, irt, irq, 0); 208 nlm_pic_init_irt(nodep->picbase, irt, irq, 0);
202 } 209 }
203 210
204 nlm_irq_mask |= (1ULL << IRQ_TIMER); 211 nodep->irqmask = irqmask;
205} 212}
206 213
207void __init arch_init_irq(void) 214void __init arch_init_irq(void)
@@ -209,29 +216,39 @@ void __init arch_init_irq(void)
209 /* Initialize the irq descriptors */ 216 /* Initialize the irq descriptors */
210 init_nlm_common_irqs(); 217 init_nlm_common_irqs();
211 218
212 write_c0_eimr(nlm_irq_mask); 219 write_c0_eimr(nlm_current_node()->irqmask);
213} 220}
214 221
215void __cpuinit nlm_smp_irq_init(void) 222void __cpuinit nlm_smp_irq_init(void)
216{ 223{
217 /* set interrupt mask for non-zero cpus */ 224 /* set interrupt mask for non-zero cpus */
218 write_c0_eimr(nlm_irq_mask); 225 write_c0_eimr(nlm_current_node()->irqmask);
219} 226}
220 227
221asmlinkage void plat_irq_dispatch(void) 228asmlinkage void plat_irq_dispatch(void)
222{ 229{
223 uint64_t eirr; 230 uint64_t eirr;
224 int i; 231 int i, node;
225 232
233 node = nlm_nodeid();
226 eirr = read_c0_eirr() & read_c0_eimr(); 234 eirr = read_c0_eirr() & read_c0_eimr();
227 if (eirr & (1 << IRQ_TIMER)) { 235 if (eirr & (1 << IRQ_TIMER)) {
228 do_IRQ(IRQ_TIMER); 236 do_IRQ(IRQ_TIMER);
229 return; 237 return;
230 } 238 }
231 239#ifdef CONFIG_SMP
240 if (eirr & IRQ_IPI_SMP_FUNCTION) {
241 do_IRQ(IRQ_IPI_SMP_FUNCTION);
242 return;
243 }
244 if (eirr & IRQ_IPI_SMP_RESCHEDULE) {
245 do_IRQ(IRQ_IPI_SMP_RESCHEDULE);
246 return;
247 }
248#endif
232 i = __ilog2_u64(eirr); 249 i = __ilog2_u64(eirr);
233 if (i == -1) 250 if (i == -1)
234 return; 251 return;
235 252
236 do_IRQ(i); 253 do_IRQ(nlm_irq_to_xirq(node, i));
237} 254}
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index 4fe8992b291c..e40b467f6184 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
59 59
60void nlm_send_ipi_single(int logical_cpu, unsigned int action) 60void nlm_send_ipi_single(int logical_cpu, unsigned int action)
61{ 61{
62 int cpu = cpu_logical_map(logical_cpu); 62 int cpu, node;
63 uint64_t picbase;
64
65 cpu = cpu_logical_map(logical_cpu);
66 node = cpu / NLM_CPUS_PER_NODE;
67 picbase = nlm_get_node(node)->picbase;
63 68
64 if (action & SMP_CALL_FUNCTION) 69 if (action & SMP_CALL_FUNCTION)
65 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0); 70 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
66 if (action & SMP_RESCHEDULE_YOURSELF) 71 if (action & SMP_RESCHEDULE_YOURSELF)
67 nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0); 72 nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
68} 73}
69 74
70void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action) 75void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -96,11 +101,12 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
96void nlm_early_init_secondary(int cpu) 101void nlm_early_init_secondary(int cpu)
97{ 102{
98 change_c0_config(CONF_CM_CMASK, 0x3); 103 change_c0_config(CONF_CM_CMASK, 0x3);
99 write_c0_ebase((uint32_t)nlm_common_ebase);
100#ifdef CONFIG_CPU_XLP 104#ifdef CONFIG_CPU_XLP
101 if (cpu % 4 == 0) 105 /* mmu init, once per core */
106 if (cpu % NLM_THREADS_PER_CORE == 0)
102 xlp_mmu_init(); 107 xlp_mmu_init();
103#endif 108#endif
109 write_c0_ebase(nlm_current_node()->ebase);
104} 110}
105 111
106/* 112/*
@@ -108,7 +114,7 @@ void nlm_early_init_secondary(int cpu)
108 */ 114 */
109static void __cpuinit nlm_init_secondary(void) 115static void __cpuinit nlm_init_secondary(void)
110{ 116{
111 current_cpu_data.core = hard_smp_processor_id() / 4; 117 current_cpu_data.core = hard_smp_processor_id() / NLM_THREADS_PER_CORE;
112 nlm_smp_irq_init(); 118 nlm_smp_irq_init();
113} 119}
114 120
@@ -142,22 +148,22 @@ cpumask_t phys_cpu_present_map;
142 148
143void nlm_boot_secondary(int logical_cpu, struct task_struct *idle) 149void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
144{ 150{
145 unsigned long gp = (unsigned long)task_thread_info(idle); 151 int cpu, node;
146 unsigned long sp = (unsigned long)__KSTK_TOS(idle);
147 int cpu = cpu_logical_map(logical_cpu);
148 152
149 nlm_next_sp = sp; 153 cpu = cpu_logical_map(logical_cpu);
150 nlm_next_gp = gp; 154 node = cpu / NLM_CPUS_PER_NODE;
155 nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
156 nlm_next_gp = (unsigned long)task_thread_info(idle);
151 157
152 /* barrier */ 158 /* barrier for sp/gp store above */
153 __sync(); 159 __sync();
154 nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1); 160 nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
155} 161}
156 162
157void __init nlm_smp_setup(void) 163void __init nlm_smp_setup(void)
158{ 164{
159 unsigned int boot_cpu; 165 unsigned int boot_cpu;
160 int num_cpus, i; 166 int num_cpus, i, ncore;
161 167
162 boot_cpu = hard_smp_processor_id(); 168 boot_cpu = hard_smp_processor_id();
163 cpumask_clear(&phys_cpu_present_map); 169 cpumask_clear(&phys_cpu_present_map);
@@ -182,11 +188,16 @@ void __init nlm_smp_setup(void)
182 } 188 }
183 } 189 }
184 190
191 /* check with the cores we have worken up */
192 for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
193 ncore += hweight32(nlm_get_node(i)->coremask);
194
185 pr_info("Phys CPU present map: %lx, possible map %lx\n", 195 pr_info("Phys CPU present map: %lx, possible map %lx\n",
186 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0], 196 (unsigned long)cpumask_bits(&phys_cpu_present_map)[0],
187 (unsigned long)cpumask_bits(cpu_possible_mask)[0]); 197 (unsigned long)cpumask_bits(cpu_possible_mask)[0]);
188 198
189 pr_info("Detected %i Slave CPU(s)\n", num_cpus); 199 pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
200 nlm_threads_per_core, num_cpus);
190 nlm_set_nmi_handler(nlm_boot_secondary_cpus); 201 nlm_set_nmi_handler(nlm_boot_secondary_cpus);
191} 202}
192 203
@@ -196,7 +207,7 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
196 int threadmode, i, j; 207 int threadmode, i, j;
197 208
198 core0_thr_mask = 0; 209 core0_thr_mask = 0;
199 for (i = 0; i < 4; i++) 210 for (i = 0; i < NLM_THREADS_PER_CORE; i++)
200 if (cpumask_test_cpu(i, wakeup_mask)) 211 if (cpumask_test_cpu(i, wakeup_mask))
201 core0_thr_mask |= (1 << i); 212 core0_thr_mask |= (1 << i);
202 switch (core0_thr_mask) { 213 switch (core0_thr_mask) {
@@ -217,9 +228,9 @@ static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
217 } 228 }
218 229
219 /* Verify other cores CPU masks */ 230 /* Verify other cores CPU masks */
220 for (i = 0; i < NR_CPUS; i += 4) { 231 for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
221 core_thr_mask = 0; 232 core_thr_mask = 0;
222 for (j = 0; j < 4; j++) 233 for (j = 0; j < NLM_THREADS_PER_CORE; j++)
223 if (cpumask_test_cpu(i + j, wakeup_mask)) 234 if (cpumask_test_cpu(i + j, wakeup_mask))
224 core_thr_mask |= (1 << j); 235 core_thr_mask |= (1 << j);
225 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask) 236 if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)