aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/cavium-octeon
diff options
context:
space:
mode:
authorMichal Marek <mmarek@suse.cz>2010-10-12 09:09:06 -0400
committerMichal Marek <mmarek@suse.cz>2010-10-12 09:09:06 -0400
commit239060b93bb30a4ad55f1ecaa512464a035cc5ba (patch)
tree77f79810e57d4fc24356eca0cd6db463e8994128 /arch/mips/cavium-octeon
parent1408b15b98635a13bad2e2a50b3c2ae2ccdf625b (diff)
parente9203c988234aa512bd45ca32b52e21c7bbfc414 (diff)
Merge branch 'kbuild/rc-fixes' into kbuild/kconfig
We need to revert the temporary hack in 71ebc01, hence the merge.
Diffstat (limited to 'arch/mips/cavium-octeon')
-rw-r--r--arch/mips/cavium-octeon/Makefile3
-rw-r--r--arch/mips/cavium-octeon/Platform11
-rw-r--r--arch/mips/cavium-octeon/cpu.c6
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c63
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c17
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c553
-rw-r--r--arch/mips/cavium-octeon/octeon_boot.h16
-rw-r--r--arch/mips/cavium-octeon/serial.c4
-rw-r--r--arch/mips/cavium-octeon/setup.c48
-rw-r--r--arch/mips/cavium-octeon/smp.c170
10 files changed, 535 insertions, 356 deletions
diff --git a/arch/mips/cavium-octeon/Makefile b/arch/mips/cavium-octeon/Makefile
index 3e9876317e61..19eb0434269f 100644
--- a/arch/mips/cavium-octeon/Makefile
+++ b/arch/mips/cavium-octeon/Makefile
@@ -12,7 +12,6 @@
12obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o 12obj-y := cpu.o setup.o serial.o octeon-platform.o octeon-irq.o csrc-octeon.o
13obj-y += dma-octeon.o flash_setup.o 13obj-y += dma-octeon.o flash_setup.o
14obj-y += octeon-memcpy.o 14obj-y += octeon-memcpy.o
15obj-y += executive/
15 16
16obj-$(CONFIG_SMP) += smp.o 17obj-$(CONFIG_SMP) += smp.o
17
18EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/cavium-octeon/Platform b/arch/mips/cavium-octeon/Platform
new file mode 100644
index 000000000000..1e43ccf1a792
--- /dev/null
+++ b/arch/mips/cavium-octeon/Platform
@@ -0,0 +1,11 @@
1#
2# Cavium Octeon
3#
4platform-$(CONFIG_CPU_CAVIUM_OCTEON) += cavium-octeon/
5cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += \
6 -I$(srctree)/arch/mips/include/asm/mach-cavium-octeon
7ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
8load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff84100000
9else
10load-$(CONFIG_CPU_CAVIUM_OCTEON) += 0xffffffff81100000
11endif
diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c
index b6df5387e855..c664c8cc2b42 100644
--- a/arch/mips/cavium-octeon/cpu.c
+++ b/arch/mips/cavium-octeon/cpu.c
@@ -41,12 +41,8 @@ static int cnmips_cu2_call(struct notifier_block *nfb, unsigned long action,
41 return NOTIFY_OK; /* Let default notifier send signals */ 41 return NOTIFY_OK; /* Let default notifier send signals */
42} 42}
43 43
44static struct notifier_block cnmips_cu2_notifier = {
45 .notifier_call = cnmips_cu2_call,
46};
47
48static int cnmips_cu2_setup(void) 44static int cnmips_cu2_setup(void)
49{ 45{
50 return register_cu2_notifier(&cnmips_cu2_notifier); 46 return cu2_notifier(cnmips_cu2_call, 0);
51} 47}
52early_initcall(cnmips_cu2_setup); 48early_initcall(cnmips_cu2_setup);
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 0bf4bbe04ae2..b6847c8e0ddd 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -53,7 +53,6 @@ static struct clocksource clocksource_mips = {
53unsigned long long notrace sched_clock(void) 53unsigned long long notrace sched_clock(void)
54{ 54{
55 /* 64-bit arithmatic can overflow, so use 128-bit. */ 55 /* 64-bit arithmatic can overflow, so use 128-bit. */
56#if (__GNUC__ < 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ <= 3))
57 u64 t1, t2, t3; 56 u64 t1, t2, t3;
58 unsigned long long rv; 57 unsigned long long rv;
59 u64 mult = clocksource_mips.mult; 58 u64 mult = clocksource_mips.mult;
@@ -73,13 +72,6 @@ unsigned long long notrace sched_clock(void)
73 : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift) 72 : [cnt] "r" (cnt), [mult] "r" (mult), [shift] "r" (shift)
74 : "hi", "lo"); 73 : "hi", "lo");
75 return rv; 74 return rv;
76#else
77 /* GCC > 4.3 do it the easy way. */
78 unsigned int __attribute__((mode(TI))) t;
79 t = read_c0_cvmcount();
80 t = t * clocksource_mips.mult;
81 return (unsigned long long)(t >> clocksource_mips.shift);
82#endif
83} 75}
84 76
85void __init plat_time_init(void) 77void __init plat_time_init(void)
@@ -88,3 +80,58 @@ void __init plat_time_init(void)
88 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency); 80 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
89 clocksource_register(&clocksource_mips); 81 clocksource_register(&clocksource_mips);
90} 82}
83
84static u64 octeon_udelay_factor;
85static u64 octeon_ndelay_factor;
86
87void __init octeon_setup_delays(void)
88{
89 octeon_udelay_factor = octeon_get_clock_rate() / 1000000;
90 /*
91 * For __ndelay we divide by 2^16, so the factor is multiplied
92 * by the same amount.
93 */
94 octeon_ndelay_factor = (octeon_udelay_factor * 0x10000ull) / 1000ull;
95
96 preset_lpj = octeon_get_clock_rate() / HZ;
97}
98
99void __udelay(unsigned long us)
100{
101 u64 cur, end, inc;
102
103 cur = read_c0_cvmcount();
104
105 inc = us * octeon_udelay_factor;
106 end = cur + inc;
107
108 while (end > cur)
109 cur = read_c0_cvmcount();
110}
111EXPORT_SYMBOL(__udelay);
112
113void __ndelay(unsigned long ns)
114{
115 u64 cur, end, inc;
116
117 cur = read_c0_cvmcount();
118
119 inc = ((ns * octeon_ndelay_factor) >> 16);
120 end = cur + inc;
121
122 while (end > cur)
123 cur = read_c0_cvmcount();
124}
125EXPORT_SYMBOL(__ndelay);
126
127void __delay(unsigned long loops)
128{
129 u64 cur, end;
130
131 cur = read_c0_cvmcount();
132 end = cur + loops;
133
134 while (end > cur)
135 cur = read_c0_cvmcount();
136}
137EXPORT_SYMBOL(__delay);
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index be531ec1f206..d22b5a2d64f4 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -99,13 +99,16 @@ dma_addr_t octeon_map_dma_mem(struct device *dev, void *ptr, size_t size)
99 panic("dma_map_single: " 99 panic("dma_map_single: "
100 "Attempt to map illegal memory address 0x%llx\n", 100 "Attempt to map illegal memory address 0x%llx\n",
101 physical); 101 physical);
102 else if ((physical + size >= 102 else if (physical >= CVMX_PCIE_BAR1_PHYS_BASE &&
103 (4ull<<30) - (OCTEON_PCI_BAR1_HOLE_SIZE<<20)) 103 physical + size < (CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_PHYS_SIZE)) {
104 && physical < (4ull<<30)) 104 result = physical - CVMX_PCIE_BAR1_PHYS_BASE + CVMX_PCIE_BAR1_RC_BASE;
105 pr_warning("dma_map_single: Warning: " 105
106 "Mapping memory address that might " 106 if (((result+size-1) & dma_mask) != result+size-1)
107 "conflict with devices 0x%llx-0x%llx\n", 107 panic("dma_map_single: Attempt to map address 0x%llx-0x%llx, which can't be accessed according to the dma mask 0x%llx\n",
108 physical, physical+size-1); 108 physical, physical+size-1, dma_mask);
109 goto done;
110 }
111
109 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */ 112 /* The 2nd 256MB is mapped at 256<<20 instead of 0x410000000 */
110 if ((physical >= 0x410000000ull) && physical < 0x420000000ull) 113 if ((physical >= 0x410000000ull) && physical < 0x420000000ull)
111 result = physical - 0x400000000ull; 114 result = physical - 0x400000000ull;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index c424cd158dc6..ce7500cdf5b7 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,15 +3,13 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2008 Cavium Networks 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7 */ 7 */
8#include <linux/irq.h> 8#include <linux/irq.h>
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/smp.h> 10#include <linux/smp.h>
11 11
12#include <asm/octeon/octeon.h> 12#include <asm/octeon/octeon.h>
13#include <asm/octeon/cvmx-pexp-defs.h>
14#include <asm/octeon/cvmx-npi-defs.h>
15 13
16static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock); 14static DEFINE_RAW_SPINLOCK(octeon_irq_ciu0_lock);
17static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock); 15static DEFINE_RAW_SPINLOCK(octeon_irq_ciu1_lock);
@@ -41,14 +39,14 @@ static void octeon_irq_core_ack(unsigned int irq)
41 39
42static void octeon_irq_core_eoi(unsigned int irq) 40static void octeon_irq_core_eoi(unsigned int irq)
43{ 41{
44 struct irq_desc *desc = irq_desc + irq; 42 struct irq_desc *desc = irq_to_desc(irq);
45 unsigned int bit = irq - OCTEON_IRQ_SW0; 43 unsigned int bit = irq - OCTEON_IRQ_SW0;
46 /* 44 /*
47 * If an IRQ is being processed while we are disabling it the 45 * If an IRQ is being processed while we are disabling it the
48 * handler will attempt to unmask the interrupt after it has 46 * handler will attempt to unmask the interrupt after it has
49 * been disabled. 47 * been disabled.
50 */ 48 */
51 if (desc->status & IRQ_DISABLED) 49 if ((unlikely(desc->status & IRQ_DISABLED)))
52 return; 50 return;
53 /* 51 /*
54 * We don't need to disable IRQs to make these atomic since 52 * We don't need to disable IRQs to make these atomic since
@@ -106,6 +104,29 @@ static struct irq_chip octeon_irq_chip_core = {
106 104
107static void octeon_irq_ciu0_ack(unsigned int irq) 105static void octeon_irq_ciu0_ack(unsigned int irq)
108{ 106{
107 switch (irq) {
108 case OCTEON_IRQ_GMX_DRP0:
109 case OCTEON_IRQ_GMX_DRP1:
110 case OCTEON_IRQ_IPD_DRP:
111 case OCTEON_IRQ_KEY_ZERO:
112 case OCTEON_IRQ_TIMER0:
113 case OCTEON_IRQ_TIMER1:
114 case OCTEON_IRQ_TIMER2:
115 case OCTEON_IRQ_TIMER3:
116 {
117 int index = cvmx_get_core_num() * 2;
118 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
119 /*
120 * CIU timer type interrupts must be acknoleged by
121 * writing a '1' bit to their sum0 bit.
122 */
123 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
124 break;
125 }
126 default:
127 break;
128 }
129
109 /* 130 /*
110 * In order to avoid any locking accessing the CIU, we 131 * In order to avoid any locking accessing the CIU, we
111 * acknowledge CIU interrupts by disabling all of them. This 132 * acknowledge CIU interrupts by disabling all of them. This
@@ -130,8 +151,54 @@ static void octeon_irq_ciu0_eoi(unsigned int irq)
130 set_c0_status(0x100 << 2); 151 set_c0_status(0x100 << 2);
131} 152}
132 153
154static int next_coreid_for_irq(struct irq_desc *desc)
155{
156
157#ifdef CONFIG_SMP
158 int coreid;
159 int weight = cpumask_weight(desc->affinity);
160
161 if (weight > 1) {
162 int cpu = smp_processor_id();
163 for (;;) {
164 cpu = cpumask_next(cpu, desc->affinity);
165 if (cpu >= nr_cpu_ids) {
166 cpu = -1;
167 continue;
168 } else if (cpumask_test_cpu(cpu, cpu_online_mask)) {
169 break;
170 }
171 }
172 coreid = octeon_coreid_for_cpu(cpu);
173 } else if (weight == 1) {
174 coreid = octeon_coreid_for_cpu(cpumask_first(desc->affinity));
175 } else {
176 coreid = cvmx_get_core_num();
177 }
178 return coreid;
179#else
180 return cvmx_get_core_num();
181#endif
182}
183
133static void octeon_irq_ciu0_enable(unsigned int irq) 184static void octeon_irq_ciu0_enable(unsigned int irq)
134{ 185{
186 struct irq_desc *desc = irq_to_desc(irq);
187 int coreid = next_coreid_for_irq(desc);
188 unsigned long flags;
189 uint64_t en0;
190 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
191
192 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
193 en0 = cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
194 en0 |= 1ull << bit;
195 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
196 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
197 raw_spin_unlock_irqrestore(&octeon_irq_ciu0_lock, flags);
198}
199
200static void octeon_irq_ciu0_enable_mbox(unsigned int irq)
201{
135 int coreid = cvmx_get_core_num(); 202 int coreid = cvmx_get_core_num();
136 unsigned long flags; 203 unsigned long flags;
137 uint64_t en0; 204 uint64_t en0;
@@ -167,63 +234,76 @@ static void octeon_irq_ciu0_disable(unsigned int irq)
167} 234}
168 235
169/* 236/*
170 * Enable the irq on the current core for chips that have the EN*_W1{S,C} 237 * Enable the irq on the next core in the affinity set for chips that
171 * registers. 238 * have the EN*_W1{S,C} registers.
172 */ 239 */
173static void octeon_irq_ciu0_enable_v2(unsigned int irq) 240static void octeon_irq_ciu0_enable_v2(unsigned int irq)
174{ 241{
175 int index = cvmx_get_core_num() * 2; 242 int index;
176 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 243 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
244 struct irq_desc *desc = irq_to_desc(irq);
177 245
178 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 246 if ((desc->status & IRQ_DISABLED) == 0) {
247 index = next_coreid_for_irq(desc) * 2;
248 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
249 }
179} 250}
180 251
181/* 252/*
182 * Disable the irq on the current core for chips that have the EN*_W1{S,C} 253 * Enable the irq on the current CPU for chips that
183 * registers. 254 * have the EN*_W1{S,C} registers.
184 */ 255 */
185static void octeon_irq_ciu0_ack_v2(unsigned int irq) 256static void octeon_irq_ciu0_enable_mbox_v2(unsigned int irq)
186{ 257{
187 int index = cvmx_get_core_num() * 2; 258 int index;
188 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 259 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
189 260
190 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 261 index = cvmx_get_core_num() * 2;
262 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
191} 263}
192 264
193/* 265/*
194 * CIU timer type interrupts must be acknoleged by writing a '1' bit 266 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
195 * to their sum0 bit. 267 * registers.
196 */ 268 */
197static void octeon_irq_ciu0_timer_ack(unsigned int irq) 269static void octeon_irq_ciu0_ack_v2(unsigned int irq)
198{ 270{
199 int index = cvmx_get_core_num() * 2; 271 int index = cvmx_get_core_num() * 2;
200 uint64_t mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 272 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
201 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
202}
203 273
204static void octeon_irq_ciu0_timer_ack_v1(unsigned int irq) 274 switch (irq) {
205{ 275 case OCTEON_IRQ_GMX_DRP0:
206 octeon_irq_ciu0_timer_ack(irq); 276 case OCTEON_IRQ_GMX_DRP1:
207 octeon_irq_ciu0_ack(irq); 277 case OCTEON_IRQ_IPD_DRP:
208} 278 case OCTEON_IRQ_KEY_ZERO:
279 case OCTEON_IRQ_TIMER0:
280 case OCTEON_IRQ_TIMER1:
281 case OCTEON_IRQ_TIMER2:
282 case OCTEON_IRQ_TIMER3:
283 /*
284 * CIU timer type interrupts must be acknoleged by
285 * writing a '1' bit to their sum0 bit.
286 */
287 cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
288 break;
289 default:
290 break;
291 }
209 292
210static void octeon_irq_ciu0_timer_ack_v2(unsigned int irq) 293 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
211{
212 octeon_irq_ciu0_timer_ack(irq);
213 octeon_irq_ciu0_ack_v2(irq);
214} 294}
215 295
216/* 296/*
217 * Enable the irq on the current core for chips that have the EN*_W1{S,C} 297 * Enable the irq on the current core for chips that have the EN*_W1{S,C}
218 * registers. 298 * registers.
219 */ 299 */
220static void octeon_irq_ciu0_eoi_v2(unsigned int irq) 300static void octeon_irq_ciu0_eoi_mbox_v2(unsigned int irq)
221{ 301{
222 struct irq_desc *desc = irq_desc + irq; 302 struct irq_desc *desc = irq_to_desc(irq);
223 int index = cvmx_get_core_num() * 2; 303 int index = cvmx_get_core_num() * 2;
224 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 304 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
225 305
226 if ((desc->status & IRQ_DISABLED) == 0) 306 if (likely((desc->status & IRQ_DISABLED) == 0))
227 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 307 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
228} 308}
229 309
@@ -246,18 +326,30 @@ static void octeon_irq_ciu0_disable_all_v2(unsigned int irq)
246static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest) 326static int octeon_irq_ciu0_set_affinity(unsigned int irq, const struct cpumask *dest)
247{ 327{
248 int cpu; 328 int cpu;
329 struct irq_desc *desc = irq_to_desc(irq);
330 int enable_one = (desc->status & IRQ_DISABLED) == 0;
249 unsigned long flags; 331 unsigned long flags;
250 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */ 332 int bit = irq - OCTEON_IRQ_WORKQ0; /* Bit 0-63 of EN0 */
251 333
334 /*
335 * For non-v2 CIU, we will allow only single CPU affinity.
336 * This removes the need to do locking in the .ack/.eoi
337 * functions.
338 */
339 if (cpumask_weight(dest) != 1)
340 return -EINVAL;
341
252 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags); 342 raw_spin_lock_irqsave(&octeon_irq_ciu0_lock, flags);
253 for_each_online_cpu(cpu) { 343 for_each_online_cpu(cpu) {
254 int coreid = octeon_coreid_for_cpu(cpu); 344 int coreid = octeon_coreid_for_cpu(cpu);
255 uint64_t en0 = 345 uint64_t en0 =
256 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)); 346 cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2));
257 if (cpumask_test_cpu(cpu, dest)) 347 if (cpumask_test_cpu(cpu, dest) && enable_one) {
348 enable_one = 0;
258 en0 |= 1ull << bit; 349 en0 |= 1ull << bit;
259 else 350 } else {
260 en0 &= ~(1ull << bit); 351 en0 &= ~(1ull << bit);
352 }
261 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0); 353 cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), en0);
262 } 354 }
263 /* 355 /*
@@ -279,13 +371,18 @@ static int octeon_irq_ciu0_set_affinity_v2(unsigned int irq,
279{ 371{
280 int cpu; 372 int cpu;
281 int index; 373 int index;
374 struct irq_desc *desc = irq_to_desc(irq);
375 int enable_one = (desc->status & IRQ_DISABLED) == 0;
282 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0); 376 u64 mask = 1ull << (irq - OCTEON_IRQ_WORKQ0);
377
283 for_each_online_cpu(cpu) { 378 for_each_online_cpu(cpu) {
284 index = octeon_coreid_for_cpu(cpu) * 2; 379 index = octeon_coreid_for_cpu(cpu) * 2;
285 if (cpumask_test_cpu(cpu, dest)) 380 if (cpumask_test_cpu(cpu, dest) && enable_one) {
381 enable_one = 0;
286 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask); 382 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
287 else 383 } else {
288 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask); 384 cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
385 }
289 } 386 }
290 return 0; 387 return 0;
291} 388}
@@ -298,8 +395,7 @@ static struct irq_chip octeon_irq_chip_ciu0_v2 = {
298 .name = "CIU0", 395 .name = "CIU0",
299 .enable = octeon_irq_ciu0_enable_v2, 396 .enable = octeon_irq_ciu0_enable_v2,
300 .disable = octeon_irq_ciu0_disable_all_v2, 397 .disable = octeon_irq_ciu0_disable_all_v2,
301 .ack = octeon_irq_ciu0_ack_v2, 398 .eoi = octeon_irq_ciu0_enable_v2,
302 .eoi = octeon_irq_ciu0_eoi_v2,
303#ifdef CONFIG_SMP 399#ifdef CONFIG_SMP
304 .set_affinity = octeon_irq_ciu0_set_affinity_v2, 400 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
305#endif 401#endif
@@ -309,36 +405,27 @@ static struct irq_chip octeon_irq_chip_ciu0 = {
309 .name = "CIU0", 405 .name = "CIU0",
310 .enable = octeon_irq_ciu0_enable, 406 .enable = octeon_irq_ciu0_enable,
311 .disable = octeon_irq_ciu0_disable, 407 .disable = octeon_irq_ciu0_disable,
312 .ack = octeon_irq_ciu0_ack,
313 .eoi = octeon_irq_ciu0_eoi, 408 .eoi = octeon_irq_ciu0_eoi,
314#ifdef CONFIG_SMP 409#ifdef CONFIG_SMP
315 .set_affinity = octeon_irq_ciu0_set_affinity, 410 .set_affinity = octeon_irq_ciu0_set_affinity,
316#endif 411#endif
317}; 412};
318 413
319static struct irq_chip octeon_irq_chip_ciu0_timer_v2 = { 414/* The mbox versions don't do any affinity or round-robin. */
320 .name = "CIU0-T", 415static struct irq_chip octeon_irq_chip_ciu0_mbox_v2 = {
321 .enable = octeon_irq_ciu0_enable_v2, 416 .name = "CIU0-M",
322 .disable = octeon_irq_ciu0_disable_all_v2, 417 .enable = octeon_irq_ciu0_enable_mbox_v2,
323 .ack = octeon_irq_ciu0_timer_ack_v2, 418 .disable = octeon_irq_ciu0_disable,
324 .eoi = octeon_irq_ciu0_eoi_v2, 419 .eoi = octeon_irq_ciu0_eoi_mbox_v2,
325#ifdef CONFIG_SMP
326 .set_affinity = octeon_irq_ciu0_set_affinity_v2,
327#endif
328}; 420};
329 421
330static struct irq_chip octeon_irq_chip_ciu0_timer = { 422static struct irq_chip octeon_irq_chip_ciu0_mbox = {
331 .name = "CIU0-T", 423 .name = "CIU0-M",
332 .enable = octeon_irq_ciu0_enable, 424 .enable = octeon_irq_ciu0_enable_mbox,
333 .disable = octeon_irq_ciu0_disable, 425 .disable = octeon_irq_ciu0_disable,
334 .ack = octeon_irq_ciu0_timer_ack_v1,
335 .eoi = octeon_irq_ciu0_eoi, 426 .eoi = octeon_irq_ciu0_eoi,
336#ifdef CONFIG_SMP
337 .set_affinity = octeon_irq_ciu0_set_affinity,
338#endif
339}; 427};
340 428
341
342static void octeon_irq_ciu1_ack(unsigned int irq) 429static void octeon_irq_ciu1_ack(unsigned int irq)
343{ 430{
344 /* 431 /*
@@ -365,10 +452,30 @@ static void octeon_irq_ciu1_eoi(unsigned int irq)
365 452
366static void octeon_irq_ciu1_enable(unsigned int irq) 453static void octeon_irq_ciu1_enable(unsigned int irq)
367{ 454{
368 int coreid = cvmx_get_core_num(); 455 struct irq_desc *desc = irq_to_desc(irq);
456 int coreid = next_coreid_for_irq(desc);
457 unsigned long flags;
458 uint64_t en1;
459 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
460
461 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
462 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
463 en1 |= 1ull << bit;
464 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
465 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
466 raw_spin_unlock_irqrestore(&octeon_irq_ciu1_lock, flags);
467}
468
469/*
470 * Watchdog interrupts are special. They are associated with a single
471 * core, so we hardwire the affinity to that core.
472 */
473static void octeon_irq_ciu1_wd_enable(unsigned int irq)
474{
369 unsigned long flags; 475 unsigned long flags;
370 uint64_t en1; 476 uint64_t en1;
371 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 477 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
478 int coreid = bit;
372 479
373 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 480 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
374 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)); 481 en1 = cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
@@ -405,36 +512,43 @@ static void octeon_irq_ciu1_disable(unsigned int irq)
405 */ 512 */
406static void octeon_irq_ciu1_enable_v2(unsigned int irq) 513static void octeon_irq_ciu1_enable_v2(unsigned int irq)
407{ 514{
408 int index = cvmx_get_core_num() * 2 + 1; 515 int index;
409 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 516 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
517 struct irq_desc *desc = irq_to_desc(irq);
410 518
411 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 519 if ((desc->status & IRQ_DISABLED) == 0) {
520 index = next_coreid_for_irq(desc) * 2 + 1;
521 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
522 }
412} 523}
413 524
414/* 525/*
415 * Disable the irq on the current core for chips that have the EN*_W1{S,C} 526 * Watchdog interrupts are special. They are associated with a single
416 * registers. 527 * core, so we hardwire the affinity to that core.
417 */ 528 */
418static void octeon_irq_ciu1_ack_v2(unsigned int irq) 529static void octeon_irq_ciu1_wd_enable_v2(unsigned int irq)
419{ 530{
420 int index = cvmx_get_core_num() * 2 + 1; 531 int index;
532 int coreid = irq - OCTEON_IRQ_WDOG0;
421 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 533 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
534 struct irq_desc *desc = irq_to_desc(irq);
422 535
423 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 536 if ((desc->status & IRQ_DISABLED) == 0) {
537 index = coreid * 2 + 1;
538 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
539 }
424} 540}
425 541
426/* 542/*
427 * Enable the irq on the current core for chips that have the EN*_W1{S,C} 543 * Disable the irq on the current core for chips that have the EN*_W1{S,C}
428 * registers. 544 * registers.
429 */ 545 */
430static void octeon_irq_ciu1_eoi_v2(unsigned int irq) 546static void octeon_irq_ciu1_ack_v2(unsigned int irq)
431{ 547{
432 struct irq_desc *desc = irq_desc + irq;
433 int index = cvmx_get_core_num() * 2 + 1; 548 int index = cvmx_get_core_num() * 2 + 1;
434 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 549 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
435 550
436 if ((desc->status & IRQ_DISABLED) == 0) 551 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
437 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
438} 552}
439 553
440/* 554/*
@@ -457,19 +571,30 @@ static int octeon_irq_ciu1_set_affinity(unsigned int irq,
457 const struct cpumask *dest) 571 const struct cpumask *dest)
458{ 572{
459 int cpu; 573 int cpu;
574 struct irq_desc *desc = irq_to_desc(irq);
575 int enable_one = (desc->status & IRQ_DISABLED) == 0;
460 unsigned long flags; 576 unsigned long flags;
461 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */ 577 int bit = irq - OCTEON_IRQ_WDOG0; /* Bit 0-63 of EN1 */
462 578
579 /*
580 * For non-v2 CIU, we will allow only single CPU affinity.
581 * This removes the need to do locking in the .ack/.eoi
582 * functions.
583 */
584 if (cpumask_weight(dest) != 1)
585 return -EINVAL;
586
463 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags); 587 raw_spin_lock_irqsave(&octeon_irq_ciu1_lock, flags);
464 for_each_online_cpu(cpu) { 588 for_each_online_cpu(cpu) {
465 int coreid = octeon_coreid_for_cpu(cpu); 589 int coreid = octeon_coreid_for_cpu(cpu);
466 uint64_t en1 = 590 uint64_t en1 =
467 cvmx_read_csr(CVMX_CIU_INTX_EN1 591 cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1));
468 (coreid * 2 + 1)); 592 if (cpumask_test_cpu(cpu, dest) && enable_one) {
469 if (cpumask_test_cpu(cpu, dest)) 593 enable_one = 0;
470 en1 |= 1ull << bit; 594 en1 |= 1ull << bit;
471 else 595 } else {
472 en1 &= ~(1ull << bit); 596 en1 &= ~(1ull << bit);
597 }
473 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1); 598 cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), en1);
474 } 599 }
475 /* 600 /*
@@ -491,13 +616,17 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
491{ 616{
492 int cpu; 617 int cpu;
493 int index; 618 int index;
619 struct irq_desc *desc = irq_to_desc(irq);
620 int enable_one = (desc->status & IRQ_DISABLED) == 0;
494 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0); 621 u64 mask = 1ull << (irq - OCTEON_IRQ_WDOG0);
495 for_each_online_cpu(cpu) { 622 for_each_online_cpu(cpu) {
496 index = octeon_coreid_for_cpu(cpu) * 2 + 1; 623 index = octeon_coreid_for_cpu(cpu) * 2 + 1;
497 if (cpumask_test_cpu(cpu, dest)) 624 if (cpumask_test_cpu(cpu, dest) && enable_one) {
625 enable_one = 0;
498 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask); 626 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
499 else 627 } else {
500 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask); 628 cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
629 }
501 } 630 }
502 return 0; 631 return 0;
503} 632}
@@ -507,11 +636,10 @@ static int octeon_irq_ciu1_set_affinity_v2(unsigned int irq,
507 * Newer octeon chips have support for lockless CIU operation. 636 * Newer octeon chips have support for lockless CIU operation.
508 */ 637 */
509static struct irq_chip octeon_irq_chip_ciu1_v2 = { 638static struct irq_chip octeon_irq_chip_ciu1_v2 = {
510 .name = "CIU0", 639 .name = "CIU1",
511 .enable = octeon_irq_ciu1_enable_v2, 640 .enable = octeon_irq_ciu1_enable_v2,
512 .disable = octeon_irq_ciu1_disable_all_v2, 641 .disable = octeon_irq_ciu1_disable_all_v2,
513 .ack = octeon_irq_ciu1_ack_v2, 642 .eoi = octeon_irq_ciu1_enable_v2,
514 .eoi = octeon_irq_ciu1_eoi_v2,
515#ifdef CONFIG_SMP 643#ifdef CONFIG_SMP
516 .set_affinity = octeon_irq_ciu1_set_affinity_v2, 644 .set_affinity = octeon_irq_ciu1_set_affinity_v2,
517#endif 645#endif
@@ -521,103 +649,36 @@ static struct irq_chip octeon_irq_chip_ciu1 = {
521 .name = "CIU1", 649 .name = "CIU1",
522 .enable = octeon_irq_ciu1_enable, 650 .enable = octeon_irq_ciu1_enable,
523 .disable = octeon_irq_ciu1_disable, 651 .disable = octeon_irq_ciu1_disable,
524 .ack = octeon_irq_ciu1_ack,
525 .eoi = octeon_irq_ciu1_eoi, 652 .eoi = octeon_irq_ciu1_eoi,
526#ifdef CONFIG_SMP 653#ifdef CONFIG_SMP
527 .set_affinity = octeon_irq_ciu1_set_affinity, 654 .set_affinity = octeon_irq_ciu1_set_affinity,
528#endif 655#endif
529}; 656};
530 657
531#ifdef CONFIG_PCI_MSI 658static struct irq_chip octeon_irq_chip_ciu1_wd_v2 = {
532 659 .name = "CIU1-W",
533static DEFINE_RAW_SPINLOCK(octeon_irq_msi_lock); 660 .enable = octeon_irq_ciu1_wd_enable_v2,
534 661 .disable = octeon_irq_ciu1_disable_all_v2,
535static void octeon_irq_msi_ack(unsigned int irq) 662 .eoi = octeon_irq_ciu1_wd_enable_v2,
536{ 663};
537 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
538 /* These chips have PCI */
539 cvmx_write_csr(CVMX_NPI_NPI_MSI_RCV,
540 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
541 } else {
542 /*
543 * These chips have PCIe. Thankfully the ACK doesn't
544 * need any locking.
545 */
546 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_RCV0,
547 1ull << (irq - OCTEON_IRQ_MSI_BIT0));
548 }
549}
550
551static void octeon_irq_msi_eoi(unsigned int irq)
552{
553 /* Nothing needed */
554}
555
556static void octeon_irq_msi_enable(unsigned int irq)
557{
558 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
559 /*
560 * Octeon PCI doesn't have the ability to mask/unmask
561 * MSI interrupts individually. Instead of
562 * masking/unmasking them in groups of 16, we simple
563 * assume MSI devices are well behaved. MSI
564 * interrupts are always enable and the ACK is assumed
565 * to be enough.
566 */
567 } else {
568 /* These chips have PCIe. Note that we only support
569 * the first 64 MSI interrupts. Unfortunately all the
570 * MSI enables are in the same register. We use
571 * MSI0's lock to control access to them all.
572 */
573 uint64_t en;
574 unsigned long flags;
575 raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
576 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
577 en |= 1ull << (irq - OCTEON_IRQ_MSI_BIT0);
578 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
579 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
580 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
581 }
582}
583
584static void octeon_irq_msi_disable(unsigned int irq)
585{
586 if (!octeon_has_feature(OCTEON_FEATURE_PCIE)) {
587 /* See comment in enable */
588 } else {
589 /*
590 * These chips have PCIe. Note that we only support
591 * the first 64 MSI interrupts. Unfortunately all the
592 * MSI enables are in the same register. We use
593 * MSI0's lock to control access to them all.
594 */
595 uint64_t en;
596 unsigned long flags;
597 raw_spin_lock_irqsave(&octeon_irq_msi_lock, flags);
598 en = cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
599 en &= ~(1ull << (irq - OCTEON_IRQ_MSI_BIT0));
600 cvmx_write_csr(CVMX_PEXP_NPEI_MSI_ENB0, en);
601 cvmx_read_csr(CVMX_PEXP_NPEI_MSI_ENB0);
602 raw_spin_unlock_irqrestore(&octeon_irq_msi_lock, flags);
603 }
604}
605 664
606static struct irq_chip octeon_irq_chip_msi = { 665static struct irq_chip octeon_irq_chip_ciu1_wd = {
607 .name = "MSI", 666 .name = "CIU1-W",
608 .enable = octeon_irq_msi_enable, 667 .enable = octeon_irq_ciu1_wd_enable,
609 .disable = octeon_irq_msi_disable, 668 .disable = octeon_irq_ciu1_disable,
610 .ack = octeon_irq_msi_ack, 669 .eoi = octeon_irq_ciu1_eoi,
611 .eoi = octeon_irq_msi_eoi,
612}; 670};
613#endif 671
672static void (*octeon_ciu0_ack)(unsigned int);
673static void (*octeon_ciu1_ack)(unsigned int);
614 674
615void __init arch_init_irq(void) 675void __init arch_init_irq(void)
616{ 676{
617 int irq; 677 unsigned int irq;
618 struct irq_chip *chip0; 678 struct irq_chip *chip0;
619 struct irq_chip *chip0_timer; 679 struct irq_chip *chip0_mbox;
620 struct irq_chip *chip1; 680 struct irq_chip *chip1;
681 struct irq_chip *chip1_wd;
621 682
622#ifdef CONFIG_SMP 683#ifdef CONFIG_SMP
623 /* Set the default affinity to the boot cpu. */ 684 /* Set the default affinity to the boot cpu. */
@@ -631,13 +692,19 @@ void __init arch_init_irq(void)
631 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) || 692 if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
632 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) || 693 OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
633 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) { 694 OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X)) {
695 octeon_ciu0_ack = octeon_irq_ciu0_ack_v2;
696 octeon_ciu1_ack = octeon_irq_ciu1_ack_v2;
634 chip0 = &octeon_irq_chip_ciu0_v2; 697 chip0 = &octeon_irq_chip_ciu0_v2;
635 chip0_timer = &octeon_irq_chip_ciu0_timer_v2; 698 chip0_mbox = &octeon_irq_chip_ciu0_mbox_v2;
636 chip1 = &octeon_irq_chip_ciu1_v2; 699 chip1 = &octeon_irq_chip_ciu1_v2;
700 chip1_wd = &octeon_irq_chip_ciu1_wd_v2;
637 } else { 701 } else {
702 octeon_ciu0_ack = octeon_irq_ciu0_ack;
703 octeon_ciu1_ack = octeon_irq_ciu1_ack;
638 chip0 = &octeon_irq_chip_ciu0; 704 chip0 = &octeon_irq_chip_ciu0;
639 chip0_timer = &octeon_irq_chip_ciu0_timer; 705 chip0_mbox = &octeon_irq_chip_ciu0_mbox;
640 chip1 = &octeon_irq_chip_ciu1; 706 chip1 = &octeon_irq_chip_ciu1;
707 chip1_wd = &octeon_irq_chip_ciu1_wd;
641 } 708 }
642 709
643 /* 0 - 15 reserved for i8259 master and slave controller. */ 710 /* 0 - 15 reserved for i8259 master and slave controller. */
@@ -651,34 +718,23 @@ void __init arch_init_irq(void)
651 /* 24 - 87 CIU_INT_SUM0 */ 718 /* 24 - 87 CIU_INT_SUM0 */
652 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 719 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_BOOTDMA; irq++) {
653 switch (irq) { 720 switch (irq) {
654 case OCTEON_IRQ_GMX_DRP0: 721 case OCTEON_IRQ_MBOX0:
655 case OCTEON_IRQ_GMX_DRP1: 722 case OCTEON_IRQ_MBOX1:
656 case OCTEON_IRQ_IPD_DRP: 723 set_irq_chip_and_handler(irq, chip0_mbox, handle_percpu_irq);
657 case OCTEON_IRQ_KEY_ZERO:
658 case OCTEON_IRQ_TIMER0:
659 case OCTEON_IRQ_TIMER1:
660 case OCTEON_IRQ_TIMER2:
661 case OCTEON_IRQ_TIMER3:
662 set_irq_chip_and_handler(irq, chip0_timer, handle_percpu_irq);
663 break; 724 break;
664 default: 725 default:
665 set_irq_chip_and_handler(irq, chip0, handle_percpu_irq); 726 set_irq_chip_and_handler(irq, chip0, handle_fasteoi_irq);
666 break; 727 break;
667 } 728 }
668 } 729 }
669 730
670 /* 88 - 151 CIU_INT_SUM1 */ 731 /* 88 - 151 CIU_INT_SUM1 */
671 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_RESERVED151; irq++) { 732 for (irq = OCTEON_IRQ_WDOG0; irq <= OCTEON_IRQ_WDOG15; irq++)
672 set_irq_chip_and_handler(irq, chip1, handle_percpu_irq); 733 set_irq_chip_and_handler(irq, chip1_wd, handle_fasteoi_irq);
673 } 734
735 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED151; irq++)
736 set_irq_chip_and_handler(irq, chip1, handle_fasteoi_irq);
674 737
675#ifdef CONFIG_PCI_MSI
676 /* 152 - 215 PCI/PCIe MSI interrupts */
677 for (irq = OCTEON_IRQ_MSI_BIT0; irq <= OCTEON_IRQ_MSI_BIT63; irq++) {
678 set_irq_chip_and_handler(irq, &octeon_irq_chip_msi,
679 handle_percpu_irq);
680 }
681#endif
682 set_c0_status(0x300 << 2); 738 set_c0_status(0x300 << 2);
683} 739}
684 740
@@ -693,6 +749,7 @@ asmlinkage void plat_irq_dispatch(void)
693 unsigned long cop0_status; 749 unsigned long cop0_status;
694 uint64_t ciu_en; 750 uint64_t ciu_en;
695 uint64_t ciu_sum; 751 uint64_t ciu_sum;
752 unsigned int irq;
696 753
697 while (1) { 754 while (1) {
698 cop0_cause = read_c0_cause(); 755 cop0_cause = read_c0_cause();
@@ -704,18 +761,24 @@ asmlinkage void plat_irq_dispatch(void)
704 ciu_sum = cvmx_read_csr(ciu_sum0_address); 761 ciu_sum = cvmx_read_csr(ciu_sum0_address);
705 ciu_en = cvmx_read_csr(ciu_en0_address); 762 ciu_en = cvmx_read_csr(ciu_en0_address);
706 ciu_sum &= ciu_en; 763 ciu_sum &= ciu_en;
707 if (likely(ciu_sum)) 764 if (likely(ciu_sum)) {
708 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1); 765 irq = fls64(ciu_sum) + OCTEON_IRQ_WORKQ0 - 1;
709 else 766 octeon_ciu0_ack(irq);
767 do_IRQ(irq);
768 } else {
710 spurious_interrupt(); 769 spurious_interrupt();
770 }
711 } else if (unlikely(cop0_cause & STATUSF_IP3)) { 771 } else if (unlikely(cop0_cause & STATUSF_IP3)) {
712 ciu_sum = cvmx_read_csr(ciu_sum1_address); 772 ciu_sum = cvmx_read_csr(ciu_sum1_address);
713 ciu_en = cvmx_read_csr(ciu_en1_address); 773 ciu_en = cvmx_read_csr(ciu_en1_address);
714 ciu_sum &= ciu_en; 774 ciu_sum &= ciu_en;
715 if (likely(ciu_sum)) 775 if (likely(ciu_sum)) {
716 do_IRQ(fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1); 776 irq = fls64(ciu_sum) + OCTEON_IRQ_WDOG0 - 1;
717 else 777 octeon_ciu1_ack(irq);
778 do_IRQ(irq);
779 } else {
718 spurious_interrupt(); 780 spurious_interrupt();
781 }
719 } else if (likely(cop0_cause)) { 782 } else if (likely(cop0_cause)) {
720 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE); 783 do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
721 } else { 784 } else {
@@ -725,54 +788,84 @@ asmlinkage void plat_irq_dispatch(void)
725} 788}
726 789
727#ifdef CONFIG_HOTPLUG_CPU 790#ifdef CONFIG_HOTPLUG_CPU
728static int is_irq_enabled_on_cpu(unsigned int irq, unsigned int cpu)
729{
730 unsigned int isset;
731 int coreid = octeon_coreid_for_cpu(cpu);
732 int bit = (irq < OCTEON_IRQ_WDOG0) ?
733 irq - OCTEON_IRQ_WORKQ0 : irq - OCTEON_IRQ_WDOG0;
734 if (irq < 64) {
735 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN0(coreid * 2)) &
736 (1ull << bit)) >> bit;
737 } else {
738 isset = (cvmx_read_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1)) &
739 (1ull << bit)) >> bit;
740 }
741 return isset;
742}
743 791
744void fixup_irqs(void) 792void fixup_irqs(void)
745{ 793{
746 int irq; 794 int irq;
795 struct irq_desc *desc;
796 cpumask_t new_affinity;
797 unsigned long flags;
798 int do_set_affinity;
799 int cpu;
800
801 cpu = smp_processor_id();
747 802
748 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++) 803 for (irq = OCTEON_IRQ_SW0; irq <= OCTEON_IRQ_TIMER; irq++)
749 octeon_irq_core_disable_local(irq); 804 octeon_irq_core_disable_local(irq);
750 805
751 for (irq = OCTEON_IRQ_WORKQ0; irq <= OCTEON_IRQ_GPIO15; irq++) { 806 for (irq = OCTEON_IRQ_WORKQ0; irq < OCTEON_IRQ_LAST; irq++) {
752 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { 807 desc = irq_to_desc(irq);
753 /* ciu irq migrates to next cpu */ 808 switch (irq) {
754 octeon_irq_chip_ciu0.disable(irq); 809 case OCTEON_IRQ_MBOX0:
755 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); 810 case OCTEON_IRQ_MBOX1:
756 } 811 /* The eoi function will disable them on this CPU. */
757 } 812 desc->chip->eoi(irq);
758 813 break;
759#if 0 814 case OCTEON_IRQ_WDOG0:
760 for (irq = OCTEON_IRQ_MBOX0; irq <= OCTEON_IRQ_MBOX1; irq++) 815 case OCTEON_IRQ_WDOG1:
761 octeon_irq_mailbox_mask(irq); 816 case OCTEON_IRQ_WDOG2:
762#endif 817 case OCTEON_IRQ_WDOG3:
763 for (irq = OCTEON_IRQ_UART0; irq <= OCTEON_IRQ_BOOTDMA; irq++) { 818 case OCTEON_IRQ_WDOG4:
764 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) { 819 case OCTEON_IRQ_WDOG5:
765 /* ciu irq migrates to next cpu */ 820 case OCTEON_IRQ_WDOG6:
766 octeon_irq_chip_ciu0.disable(irq); 821 case OCTEON_IRQ_WDOG7:
767 octeon_irq_ciu0_set_affinity(irq, &cpu_online_map); 822 case OCTEON_IRQ_WDOG8:
768 } 823 case OCTEON_IRQ_WDOG9:
769 } 824 case OCTEON_IRQ_WDOG10:
825 case OCTEON_IRQ_WDOG11:
826 case OCTEON_IRQ_WDOG12:
827 case OCTEON_IRQ_WDOG13:
828 case OCTEON_IRQ_WDOG14:
829 case OCTEON_IRQ_WDOG15:
830 /*
831 * These have special per CPU semantics and
832 * are handled in the watchdog driver.
833 */
834 break;
835 default:
836 raw_spin_lock_irqsave(&desc->lock, flags);
837 /*
838 * If this irq has an action, it is in use and
839 * must be migrated if it has affinity to this
840 * cpu.
841 */
842 if (desc->action && cpumask_test_cpu(cpu, desc->affinity)) {
843 if (cpumask_weight(desc->affinity) > 1) {
844 /*
845 * It has multi CPU affinity,
846 * just remove this CPU from
847 * the affinity set.
848 */
849 cpumask_copy(&new_affinity, desc->affinity);
850 cpumask_clear_cpu(cpu, &new_affinity);
851 } else {
852 /*
853 * Otherwise, put it on lowest
854 * numbered online CPU.
855 */
856 cpumask_clear(&new_affinity);
857 cpumask_set_cpu(cpumask_first(cpu_online_mask), &new_affinity);
858 }
859 do_set_affinity = 1;
860 } else {
861 do_set_affinity = 0;
862 }
863 raw_spin_unlock_irqrestore(&desc->lock, flags);
864
865 if (do_set_affinity)
866 irq_set_affinity(irq, &new_affinity);
770 867
771 for (irq = OCTEON_IRQ_UART2; irq <= OCTEON_IRQ_RESERVED135; irq++) { 868 break;
772 if (is_irq_enabled_on_cpu(irq, smp_processor_id())) {
773 /* ciu irq migrates to next cpu */
774 octeon_irq_chip_ciu1.disable(irq);
775 octeon_irq_ciu1_set_affinity(irq, &cpu_online_map);
776 } 869 }
777 } 870 }
778} 871}
diff --git a/arch/mips/cavium-octeon/octeon_boot.h b/arch/mips/cavium-octeon/octeon_boot.h
index 0f7f84accf9a..428864b2ba41 100644
--- a/arch/mips/cavium-octeon/octeon_boot.h
+++ b/arch/mips/cavium-octeon/octeon_boot.h
@@ -23,14 +23,16 @@
23#include <linux/types.h> 23#include <linux/types.h>
24 24
25struct boot_init_vector { 25struct boot_init_vector {
26 uint32_t stack_addr; 26 /* First stage address - in ram instead of flash */
27 uint32_t code_addr; 27 uint64_t code_addr;
28 /* Setup code for application, NOT application entry point */
28 uint32_t app_start_func_addr; 29 uint32_t app_start_func_addr;
30 /* k0 is used for global data - needs to be passed to other cores */
29 uint32_t k0_val; 31 uint32_t k0_val;
30 uint32_t flags; 32 /* Address of boot info block structure */
31 uint32_t boot_info_addr; 33 uint64_t boot_info_addr;
34 uint32_t flags; /* flags */
32 uint32_t pad; 35 uint32_t pad;
33 uint32_t pad2;
34}; 36};
35 37
36/* similar to bootloader's linux_app_boot_info but without global data */ 38/* similar to bootloader's linux_app_boot_info but without global data */
@@ -40,7 +42,7 @@ struct linux_app_boot_info {
40 uint32_t avail_coremask; 42 uint32_t avail_coremask;
41 uint32_t pci_console_active; 43 uint32_t pci_console_active;
42 uint32_t icache_prefetch_disable; 44 uint32_t icache_prefetch_disable;
43 uint32_t InitTLBStart_addr; 45 uint64_t InitTLBStart_addr;
44 uint32_t start_app_addr; 46 uint32_t start_app_addr;
45 uint32_t cur_exception_base; 47 uint32_t cur_exception_base;
46 uint32_t no_mark_private_data; 48 uint32_t no_mark_private_data;
@@ -58,7 +60,7 @@ struct linux_app_boot_info {
58 60
59#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot" 61#define LINUX_APP_BOOT_BLOCK_NAME "linux-app-boot"
60 62
61#define LABI_SIGNATURE 0xAABBCCDD 63#define LABI_SIGNATURE 0xAABBCC01
62 64
63/* from uboot-headers/octeon_mem_map.h */ 65/* from uboot-headers/octeon_mem_map.h */
64#define EXCEPTION_BASE_INCR (4 * 1024) 66#define EXCEPTION_BASE_INCR (4 * 1024)
diff --git a/arch/mips/cavium-octeon/serial.c b/arch/mips/cavium-octeon/serial.c
index 83eac37a1ff9..638adab02842 100644
--- a/arch/mips/cavium-octeon/serial.c
+++ b/arch/mips/cavium-octeon/serial.c
@@ -18,11 +18,7 @@
18 18
19#include <asm/octeon/octeon.h> 19#include <asm/octeon/octeon.h>
20 20
21#ifdef CONFIG_GDB_CONSOLE
22#define DEBUG_UART 0
23#else
24#define DEBUG_UART 1 21#define DEBUG_UART 1
25#endif
26 22
27unsigned int octeon_serial_in(struct uart_port *up, int offset) 23unsigned int octeon_serial_in(struct uart_port *up, int offset)
28{ 24{
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d1b5ffaf0281..69197cb6c7ea 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -32,6 +32,7 @@
32#include <asm/time.h> 32#include <asm/time.h>
33 33
34#include <asm/octeon/octeon.h> 34#include <asm/octeon/octeon.h>
35#include <asm/octeon/pci-octeon.h>
35 36
36#ifdef CONFIG_CAVIUM_DECODE_RSL 37#ifdef CONFIG_CAVIUM_DECODE_RSL
37extern void cvmx_interrupt_rsl_decode(void); 38extern void cvmx_interrupt_rsl_decode(void);
@@ -578,9 +579,6 @@ void __init prom_init(void)
578 } 579 }
579 580
580 if (strstr(arcs_cmdline, "console=") == NULL) { 581 if (strstr(arcs_cmdline, "console=") == NULL) {
581#ifdef CONFIG_GDB_CONSOLE
582 strcat(arcs_cmdline, " console=gdb");
583#else
584#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL 582#ifdef CONFIG_CAVIUM_OCTEON_2ND_KERNEL
585 strcat(arcs_cmdline, " console=ttyS0,115200"); 583 strcat(arcs_cmdline, " console=ttyS0,115200");
586#else 584#else
@@ -589,7 +587,6 @@ void __init prom_init(void)
589 else 587 else
590 strcat(arcs_cmdline, " console=ttyS0,115200"); 588 strcat(arcs_cmdline, " console=ttyS0,115200");
591#endif 589#endif
592#endif
593 } 590 }
594 591
595 if (octeon_is_simulation()) { 592 if (octeon_is_simulation()) {
@@ -598,13 +595,13 @@ void __init prom_init(void)
598 * the filesystem. Also specify the calibration delay 595 * the filesystem. Also specify the calibration delay
599 * to avoid calculating it every time. 596 * to avoid calculating it every time.
600 */ 597 */
601 strcat(arcs_cmdline, " rw root=1f00" 598 strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824");
602 " lpj=60176 slram=root,0x40000000,+1073741824");
603 } 599 }
604 600
605 mips_hpt_frequency = octeon_get_clock_rate(); 601 mips_hpt_frequency = octeon_get_clock_rate();
606 602
607 octeon_init_cvmcount(); 603 octeon_init_cvmcount();
604 octeon_setup_delays();
608 605
609 _machine_restart = octeon_restart; 606 _machine_restart = octeon_restart;
610 _machine_halt = octeon_halt; 607 _machine_halt = octeon_halt;
@@ -613,6 +610,22 @@ void __init prom_init(void)
613 register_smp_ops(&octeon_smp_ops); 610 register_smp_ops(&octeon_smp_ops);
614} 611}
615 612
613/* Exclude a single page from the regions obtained in plat_mem_setup. */
614static __init void memory_exclude_page(u64 addr, u64 *mem, u64 *size)
615{
616 if (addr > *mem && addr < *mem + *size) {
617 u64 inc = addr - *mem;
618 add_memory_region(*mem, inc, BOOT_MEM_RAM);
619 *mem += inc;
620 *size -= inc;
621 }
622
623 if (addr == *mem && *size > PAGE_SIZE) {
624 *mem += PAGE_SIZE;
625 *size -= PAGE_SIZE;
626 }
627}
628
616void __init plat_mem_setup(void) 629void __init plat_mem_setup(void)
617{ 630{
618 uint64_t mem_alloc_size; 631 uint64_t mem_alloc_size;
@@ -663,12 +676,27 @@ void __init plat_mem_setup(void)
663 CVMX_BOOTMEM_FLAG_NO_LOCKING); 676 CVMX_BOOTMEM_FLAG_NO_LOCKING);
664#endif 677#endif
665 if (memory >= 0) { 678 if (memory >= 0) {
679 u64 size = mem_alloc_size;
680
681 /*
682 * exclude a page at the beginning and end of
683 * the 256MB PCIe 'hole' so the kernel will not
684 * try to allocate multi-page buffers that
685 * span the discontinuity.
686 */
687 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE,
688 &memory, &size);
689 memory_exclude_page(CVMX_PCIE_BAR1_PHYS_BASE +
690 CVMX_PCIE_BAR1_PHYS_SIZE,
691 &memory, &size);
692
666 /* 693 /*
667 * This function automatically merges address 694 * This function automatically merges address
668 * regions next to each other if they are 695 * regions next to each other if they are
669 * received in incrementing order. 696 * received in incrementing order.
670 */ 697 */
671 add_memory_region(memory, mem_alloc_size, BOOT_MEM_RAM); 698 if (size)
699 add_memory_region(memory, size, BOOT_MEM_RAM);
672 total += mem_alloc_size; 700 total += mem_alloc_size;
673 } else { 701 } else {
674 break; 702 break;
@@ -691,7 +719,10 @@ void __init plat_mem_setup(void)
691 "cvmx_bootmem_phy_alloc\n"); 719 "cvmx_bootmem_phy_alloc\n");
692} 720}
693 721
694 722/*
723 * Emit one character to the boot UART. Exported for use by the
724 * watchdog timer.
725 */
695int prom_putchar(char c) 726int prom_putchar(char c)
696{ 727{
697 uint64_t lsrval; 728 uint64_t lsrval;
@@ -705,6 +736,7 @@ int prom_putchar(char c)
705 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull); 736 cvmx_write_csr(CVMX_MIO_UARTX_THR(octeon_uart), c & 0xffull);
706 return 1; 737 return 1;
707} 738}
739EXPORT_SYMBOL(prom_putchar);
708 740
709void prom_free_prom_memory(void) 741void prom_free_prom_memory(void)
710{ 742{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 6d99b9d8887d..391cefe556b3 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -3,7 +3,7 @@
3 * License. See the file "COPYING" in the main directory of this archive 3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2004-2008 Cavium Networks 6 * Copyright (C) 2004-2008, 2009, 2010 Cavium Networks
7 */ 7 */
8#include <linux/cpu.h> 8#include <linux/cpu.h>
9#include <linux/init.h> 9#include <linux/init.h>
@@ -27,7 +27,8 @@ volatile unsigned long octeon_processor_sp;
27volatile unsigned long octeon_processor_gp; 27volatile unsigned long octeon_processor_gp;
28 28
29#ifdef CONFIG_HOTPLUG_CPU 29#ifdef CONFIG_HOTPLUG_CPU
30static unsigned int InitTLBStart_addr; 30uint64_t octeon_bootloader_entry_addr;
31EXPORT_SYMBOL(octeon_bootloader_entry_addr);
31#endif 32#endif
32 33
33static irqreturn_t mailbox_interrupt(int irq, void *dev_id) 34static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
@@ -80,20 +81,13 @@ static inline void octeon_send_ipi_mask(const struct cpumask *mask,
80static void octeon_smp_hotplug_setup(void) 81static void octeon_smp_hotplug_setup(void)
81{ 82{
82#ifdef CONFIG_HOTPLUG_CPU 83#ifdef CONFIG_HOTPLUG_CPU
83 uint32_t labi_signature; 84 struct linux_app_boot_info *labi;
84 85
85 labi_signature = 86 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
86 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 87 if (labi->labi_signature != LABI_SIGNATURE)
87 LABI_ADDR_IN_BOOTLOADER + 88 panic("The bootloader version on this board is incorrect.");
88 offsetof(struct linux_app_boot_info, 89
89 labi_signature))); 90 octeon_bootloader_entry_addr = labi->InitTLBStart_addr;
90 if (labi_signature != LABI_SIGNATURE)
91 pr_err("The bootloader version on this board is incorrect\n");
92 InitTLBStart_addr =
93 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
94 LABI_ADDR_IN_BOOTLOADER +
95 offsetof(struct linux_app_boot_info,
96 InitTLBStart_addr)));
97#endif 91#endif
98} 92}
99 93
@@ -102,24 +96,47 @@ static void octeon_smp_setup(void)
102 const int coreid = cvmx_get_core_num(); 96 const int coreid = cvmx_get_core_num();
103 int cpus; 97 int cpus;
104 int id; 98 int id;
105
106 int core_mask = octeon_get_boot_coremask(); 99 int core_mask = octeon_get_boot_coremask();
100#ifdef CONFIG_HOTPLUG_CPU
101 unsigned int num_cores = cvmx_octeon_num_cores();
102#endif
103
104 /* The present CPUs are initially just the boot cpu (CPU 0). */
105 for (id = 0; id < NR_CPUS; id++) {
106 set_cpu_possible(id, id == 0);
107 set_cpu_present(id, id == 0);
108 }
107 109
108 cpus_clear(cpu_possible_map);
109 __cpu_number_map[coreid] = 0; 110 __cpu_number_map[coreid] = 0;
110 __cpu_logical_map[0] = coreid; 111 __cpu_logical_map[0] = coreid;
111 cpu_set(0, cpu_possible_map);
112 112
113 /* The present CPUs get the lowest CPU numbers. */
113 cpus = 1; 114 cpus = 1;
114 for (id = 0; id < 16; id++) { 115 for (id = 0; id < NR_CPUS; id++) {
115 if ((id != coreid) && (core_mask & (1 << id))) { 116 if ((id != coreid) && (core_mask & (1 << id))) {
116 cpu_set(cpus, cpu_possible_map); 117 set_cpu_possible(cpus, true);
118 set_cpu_present(cpus, true);
117 __cpu_number_map[id] = cpus; 119 __cpu_number_map[id] = cpus;
118 __cpu_logical_map[cpus] = id; 120 __cpu_logical_map[cpus] = id;
119 cpus++; 121 cpus++;
120 } 122 }
121 } 123 }
122 cpu_present_map = cpu_possible_map; 124
125#ifdef CONFIG_HOTPLUG_CPU
126 /*
127 * The possible CPUs are all those present on the chip. We
128 * will assign CPU numbers for possible cores as well. Cores
129 * are always consecutively numberd from 0.
130 */
131 for (id = 0; id < num_cores && id < NR_CPUS; id++) {
132 if (!(core_mask & (1 << id))) {
133 set_cpu_possible(cpus, true);
134 __cpu_number_map[id] = cpus;
135 __cpu_logical_map[cpus] = id;
136 cpus++;
137 }
138 }
139#endif
123 140
124 octeon_smp_hotplug_setup(); 141 octeon_smp_hotplug_setup();
125} 142}
@@ -158,18 +175,21 @@ static void octeon_init_secondary(void)
158{ 175{
159 const int coreid = cvmx_get_core_num(); 176 const int coreid = cvmx_get_core_num();
160 union cvmx_ciu_intx_sum0 interrupt_enable; 177 union cvmx_ciu_intx_sum0 interrupt_enable;
178 unsigned int sr;
161 179
162#ifdef CONFIG_HOTPLUG_CPU 180#ifdef CONFIG_HOTPLUG_CPU
163 unsigned int cur_exception_base; 181 struct linux_app_boot_info *labi;
164 182
165 cur_exception_base = cvmx_read64_uint32( 183 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
166 CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 184
167 LABI_ADDR_IN_BOOTLOADER + 185 if (labi->labi_signature != LABI_SIGNATURE)
168 offsetof(struct linux_app_boot_info, 186 panic("The bootloader version on this board is incorrect.");
169 cur_exception_base)));
170 /* cur_exception_base is incremented in bootloader after setting */
171 write_c0_ebase((unsigned int)(cur_exception_base - EXCEPTION_BASE_INCR));
172#endif 187#endif
188
189 sr = set_c0_status(ST0_BEV);
190 write_c0_ebase((u32)ebase);
191 write_c0_status(sr);
192
173 octeon_check_cpu_bist(); 193 octeon_check_cpu_bist();
174 octeon_init_cvmcount(); 194 octeon_init_cvmcount();
175 /* 195 /*
@@ -276,8 +296,8 @@ static int octeon_cpu_disable(void)
276static void octeon_cpu_die(unsigned int cpu) 296static void octeon_cpu_die(unsigned int cpu)
277{ 297{
278 int coreid = cpu_logical_map(cpu); 298 int coreid = cpu_logical_map(cpu);
279 uint32_t avail_coremask; 299 uint32_t mask, new_mask;
280 struct cvmx_bootmem_named_block_desc *block_desc; 300 const struct cvmx_bootmem_named_block_desc *block_desc;
281 301
282 while (per_cpu(cpu_state, cpu) != CPU_DEAD) 302 while (per_cpu(cpu_state, cpu) != CPU_DEAD)
283 cpu_relax(); 303 cpu_relax();
@@ -286,52 +306,40 @@ static void octeon_cpu_die(unsigned int cpu)
286 * This is a bit complicated strategics of getting/settig available 306 * This is a bit complicated strategics of getting/settig available
287 * cores mask, copied from bootloader 307 * cores mask, copied from bootloader
288 */ 308 */
309
310 mask = 1 << coreid;
289 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */ 311 /* LINUX_APP_BOOT_BLOCK is initialized in bootoct binary */
290 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 312 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
291 313
292 if (!block_desc) { 314 if (!block_desc) {
293 avail_coremask = 315 struct linux_app_boot_info *labi;
294 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
295 LABI_ADDR_IN_BOOTLOADER +
296 offsetof
297 (struct linux_app_boot_info,
298 avail_coremask)));
299 } else { /* alternative, already initialized */
300 avail_coremask =
301 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
302 block_desc->base_addr +
303 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
304 }
305 316
306 avail_coremask |= 1 << coreid; 317 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
307 318
308 /* Setting avail_coremask for bootoct binary */ 319 labi->avail_coremask |= mask;
309 if (!block_desc) { 320 new_mask = labi->avail_coremask;
310 cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 321 } else { /* alternative, already initialized */
311 LABI_ADDR_IN_BOOTLOADER + 322 uint32_t *p = (uint32_t *)PHYS_TO_XKSEG_CACHED(block_desc->base_addr +
312 offsetof(struct linux_app_boot_info, 323 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
313 avail_coremask)), 324 *p |= mask;
314 avail_coremask); 325 new_mask = *p;
315 } else {
316 cvmx_write64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
317 block_desc->base_addr +
318 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK),
319 avail_coremask);
320 } 326 }
321 327
322 pr_info("Reset core %d. Available Coremask = %x\n", coreid, 328 pr_info("Reset core %d. Available Coremask = 0x%x \n", coreid, new_mask);
323 avail_coremask); 329 mb();
324 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid); 330 cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
325 cvmx_write_csr(CVMX_CIU_PP_RST, 0); 331 cvmx_write_csr(CVMX_CIU_PP_RST, 0);
326} 332}
327 333
328void play_dead(void) 334void play_dead(void)
329{ 335{
330 int coreid = cvmx_get_core_num(); 336 int cpu = cpu_number_map(cvmx_get_core_num());
331 337
332 idle_task_exit(); 338 idle_task_exit();
333 octeon_processor_boot = 0xff; 339 octeon_processor_boot = 0xff;
334 per_cpu(cpu_state, coreid) = CPU_DEAD; 340 per_cpu(cpu_state, cpu) = CPU_DEAD;
341
342 mb();
335 343
336 while (1) /* core will be reset here */ 344 while (1) /* core will be reset here */
337 ; 345 ;
@@ -344,29 +352,27 @@ static void start_after_reset(void)
344 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */ 352 kernel_entry(0, 0, 0); /* set a2 = 0 for secondary core */
345} 353}
346 354
347int octeon_update_boot_vector(unsigned int cpu) 355static int octeon_update_boot_vector(unsigned int cpu)
348{ 356{
349 357
350 int coreid = cpu_logical_map(cpu); 358 int coreid = cpu_logical_map(cpu);
351 unsigned int avail_coremask; 359 uint32_t avail_coremask;
352 struct cvmx_bootmem_named_block_desc *block_desc; 360 const struct cvmx_bootmem_named_block_desc *block_desc;
353 struct boot_init_vector *boot_vect = 361 struct boot_init_vector *boot_vect =
354 (struct boot_init_vector *) cvmx_phys_to_ptr(0x0 + 362 (struct boot_init_vector *)PHYS_TO_XKSEG_CACHED(BOOTLOADER_BOOT_VECTOR);
355 BOOTLOADER_BOOT_VECTOR);
356 363
357 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME); 364 block_desc = cvmx_bootmem_find_named_block(LINUX_APP_BOOT_BLOCK_NAME);
358 365
359 if (!block_desc) { 366 if (!block_desc) {
360 avail_coremask = 367 struct linux_app_boot_info *labi;
361 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 368
362 LABI_ADDR_IN_BOOTLOADER + 369 labi = (struct linux_app_boot_info *)PHYS_TO_XKSEG_CACHED(LABI_ADDR_IN_BOOTLOADER);
363 offsetof(struct linux_app_boot_info, 370
364 avail_coremask))); 371 avail_coremask = labi->avail_coremask;
372 labi->avail_coremask &= ~(1 << coreid);
365 } else { /* alternative, already initialized */ 373 } else { /* alternative, already initialized */
366 avail_coremask = 374 avail_coremask = *(uint32_t *)PHYS_TO_XKSEG_CACHED(
367 cvmx_read64_uint32(CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS, 375 block_desc->base_addr + AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK);
368 block_desc->base_addr +
369 AVAIL_COREMASK_OFFSET_IN_LINUX_APP_BOOT_BLOCK));
370 } 376 }
371 377
372 if (!(avail_coremask & (1 << coreid))) { 378 if (!(avail_coremask & (1 << coreid))) {
@@ -377,9 +383,9 @@ int octeon_update_boot_vector(unsigned int cpu)
377 383
378 boot_vect[coreid].app_start_func_addr = 384 boot_vect[coreid].app_start_func_addr =
379 (uint32_t) (unsigned long) start_after_reset; 385 (uint32_t) (unsigned long) start_after_reset;
380 boot_vect[coreid].code_addr = InitTLBStart_addr; 386 boot_vect[coreid].code_addr = octeon_bootloader_entry_addr;
381 387
382 CVMX_SYNC; 388 mb();
383 389
384 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask); 390 cvmx_write_csr(CVMX_CIU_NMI, (1 << coreid) & avail_coremask);
385 391
@@ -405,17 +411,11 @@ static int __cpuinit octeon_cpu_callback(struct notifier_block *nfb,
405 return NOTIFY_OK; 411 return NOTIFY_OK;
406} 412}
407 413
408static struct notifier_block __cpuinitdata octeon_cpu_notifier = {
409 .notifier_call = octeon_cpu_callback,
410};
411
412static int __cpuinit register_cavium_notifier(void) 414static int __cpuinit register_cavium_notifier(void)
413{ 415{
414 register_hotcpu_notifier(&octeon_cpu_notifier); 416 hotcpu_notifier(octeon_cpu_callback, 0);
415
416 return 0; 417 return 0;
417} 418}
418
419late_initcall(register_cavium_notifier); 419late_initcall(register_cavium_notifier);
420 420
421#endif /* CONFIG_HOTPLUG_CPU */ 421#endif /* CONFIG_HOTPLUG_CPU */