diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/blackfin/mach-bf561/Kconfig | 6 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/Makefile | 1 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/atomic.S | 919 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/include/mach/blackfin.h | 4 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/include/mach/defBF561.h | 3 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/include/mach/mem_map.h | 120 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/include/mach/smp.h | 22 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/secondary.S | 215 | ||||
-rw-r--r-- | arch/blackfin/mach-bf561/smp.c | 172 |
9 files changed, 1459 insertions, 3 deletions
diff --git a/arch/blackfin/mach-bf561/Kconfig b/arch/blackfin/mach-bf561/Kconfig index 3f4895450bea..5d56438cad2f 100644 --- a/arch/blackfin/mach-bf561/Kconfig +++ b/arch/blackfin/mach-bf561/Kconfig | |||
@@ -4,9 +4,9 @@ source "arch/blackfin/mach-bf561/boards/Kconfig" | |||
4 | 4 | ||
5 | menu "BF561 Specific Configuration" | 5 | menu "BF561 Specific Configuration" |
6 | 6 | ||
7 | comment "Core B Support" | 7 | if (!SMP) |
8 | 8 | ||
9 | menu "Core B Support" | 9 | comment "Core B Support" |
10 | 10 | ||
11 | config BF561_COREB | 11 | config BF561_COREB |
12 | bool "Enable Core B support" | 12 | bool "Enable Core B support" |
@@ -25,7 +25,7 @@ config BF561_COREB_RESET | |||
25 | 0 is set, and will reset PC to 0xff600000 when | 25 | 0 is set, and will reset PC to 0xff600000 when |
26 | COREB_SRAM_INIT is cleared. | 26 | COREB_SRAM_INIT is cleared. |
27 | 27 | ||
28 | endmenu | 28 | endif |
29 | 29 | ||
30 | comment "Interrupt Priority Assignment" | 30 | comment "Interrupt Priority Assignment" |
31 | 31 | ||
diff --git a/arch/blackfin/mach-bf561/Makefile b/arch/blackfin/mach-bf561/Makefile index f39235a55783..c37f00c0edb5 100644 --- a/arch/blackfin/mach-bf561/Makefile +++ b/arch/blackfin/mach-bf561/Makefile | |||
@@ -7,3 +7,4 @@ extra-y := head.o | |||
7 | obj-y := ints-priority.o dma.o | 7 | obj-y := ints-priority.o dma.o |
8 | 8 | ||
9 | obj-$(CONFIG_BF561_COREB) += coreb.o | 9 | obj-$(CONFIG_BF561_COREB) += coreb.o |
10 | obj-$(CONFIG_SMP) += smp.o secondary.o atomic.o | ||
diff --git a/arch/blackfin/mach-bf561/atomic.S b/arch/blackfin/mach-bf561/atomic.S new file mode 100644 index 000000000000..9439bc6bd01f --- /dev/null +++ b/arch/blackfin/mach-bf561/atomic.S | |||
@@ -0,0 +1,919 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/mach-bf561/atomic.S | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * | ||
5 | * Copyright 2007 Analog Devices Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see the file COPYING, or write | ||
19 | * to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/linkage.h> | ||
24 | #include <asm/blackfin.h> | ||
25 | #include <asm/cache.h> | ||
26 | #include <asm/asm-offsets.h> | ||
27 | #include <asm/rwlock.h> | ||
28 | #include <asm/cplb.h> | ||
29 | |||
30 | .text | ||
31 | |||
32 | .macro coreslot_loadaddr reg:req | ||
33 | \reg\().l = _corelock; | ||
34 | \reg\().h = _corelock; | ||
35 | .endm | ||
36 | |||
37 | /* | ||
38 | * r0 = address of atomic data to flush and invalidate (32bit). | ||
39 | * | ||
40 | * Clear interrupts and return the old mask. | ||
41 | * We assume that no atomic data can span cachelines. | ||
42 | * | ||
43 | * Clobbers: r2:0, p0 | ||
44 | */ | ||
45 | ENTRY(_get_core_lock) | ||
46 | r1 = -L1_CACHE_BYTES; | ||
47 | r1 = r0 & r1; | ||
48 | cli r0; | ||
49 | coreslot_loadaddr p0; | ||
50 | .Lretry_corelock: | ||
51 | testset (p0); | ||
52 | if cc jump .Ldone_corelock; | ||
53 | SSYNC(r2); | ||
54 | jump .Lretry_corelock | ||
55 | .Ldone_corelock: | ||
56 | p0 = r1; | ||
57 | CSYNC(r2); | ||
58 | flushinv[p0]; | ||
59 | SSYNC(r2); | ||
60 | rts; | ||
61 | ENDPROC(_get_core_lock) | ||
62 | |||
63 | /* | ||
64 | * r0 = address of atomic data in uncacheable memory region (32bit). | ||
65 | * | ||
66 | * Clear interrupts and return the old mask. | ||
67 | * | ||
68 | * Clobbers: r0, p0 | ||
69 | */ | ||
70 | ENTRY(_get_core_lock_noflush) | ||
71 | cli r0; | ||
72 | coreslot_loadaddr p0; | ||
73 | .Lretry_corelock_noflush: | ||
74 | testset (p0); | ||
75 | if cc jump .Ldone_corelock_noflush; | ||
76 | SSYNC(r2); | ||
77 | jump .Lretry_corelock_noflush | ||
78 | .Ldone_corelock_noflush: | ||
79 | rts; | ||
80 | ENDPROC(_get_core_lock_noflush) | ||
81 | |||
82 | /* | ||
83 | * r0 = interrupt mask to restore. | ||
84 | * r1 = address of atomic data to flush and invalidate (32bit). | ||
85 | * | ||
86 | * Interrupts are masked on entry (see _get_core_lock). | ||
87 | * Clobbers: r2:0, p0 | ||
88 | */ | ||
89 | ENTRY(_put_core_lock) | ||
90 | /* Write-through cache assumed, so no flush needed here. */ | ||
91 | coreslot_loadaddr p0; | ||
92 | r1 = 0; | ||
93 | [p0] = r1; | ||
94 | SSYNC(r2); | ||
95 | sti r0; | ||
96 | rts; | ||
97 | ENDPROC(_put_core_lock) | ||
98 | |||
99 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
100 | |||
101 | ENTRY(___raw_smp_mark_barrier_asm) | ||
102 | [--sp] = rets; | ||
103 | [--sp] = ( r7:5 ); | ||
104 | [--sp] = r0; | ||
105 | [--sp] = p1; | ||
106 | [--sp] = p0; | ||
107 | call _get_core_lock_noflush; | ||
108 | |||
109 | /* | ||
110 | * Calculate current core mask | ||
111 | */ | ||
112 | GET_CPUID(p1, r7); | ||
113 | r6 = 1; | ||
114 | r6 <<= r7; | ||
115 | |||
116 | /* | ||
117 | * Set bit of other cores in barrier mask. Don't change current core bit. | ||
118 | */ | ||
119 | p1.l = _barrier_mask; | ||
120 | p1.h = _barrier_mask; | ||
121 | r7 = [p1]; | ||
122 | r5 = r7 & r6; | ||
123 | r7 = ~r6; | ||
124 | cc = r5 == 0; | ||
125 | if cc jump 1f; | ||
126 | r7 = r7 | r6; | ||
127 | 1: | ||
128 | [p1] = r7; | ||
129 | SSYNC(r2); | ||
130 | |||
131 | call _put_core_lock; | ||
132 | p0 = [sp++]; | ||
133 | p1 = [sp++]; | ||
134 | r0 = [sp++]; | ||
135 | ( r7:5 ) = [sp++]; | ||
136 | rets = [sp++]; | ||
137 | rts; | ||
138 | ENDPROC(___raw_smp_mark_barrier_asm) | ||
139 | |||
140 | ENTRY(___raw_smp_check_barrier_asm) | ||
141 | [--sp] = rets; | ||
142 | [--sp] = ( r7:5 ); | ||
143 | [--sp] = r0; | ||
144 | [--sp] = p1; | ||
145 | [--sp] = p0; | ||
146 | call _get_core_lock_noflush; | ||
147 | |||
148 | /* | ||
149 | * Calculate current core mask | ||
150 | */ | ||
151 | GET_CPUID(p1, r7); | ||
152 | r6 = 1; | ||
153 | r6 <<= r7; | ||
154 | |||
155 | /* | ||
156 | * Clear current core bit in barrier mask if it is set. | ||
157 | */ | ||
158 | p1.l = _barrier_mask; | ||
159 | p1.h = _barrier_mask; | ||
160 | r7 = [p1]; | ||
161 | r5 = r7 & r6; | ||
162 | cc = r5 == 0; | ||
163 | if cc jump 1f; | ||
164 | r6 = ~r6; | ||
165 | r7 = r7 & r6; | ||
166 | [p1] = r7; | ||
167 | SSYNC(r2); | ||
168 | |||
169 | call _put_core_lock; | ||
170 | |||
171 | /* | ||
172 | * Invalidate the entire D-cache of current core. | ||
173 | */ | ||
174 | sp += -12; | ||
175 | call _resync_core_dcache | ||
176 | sp += 12; | ||
177 | jump 2f; | ||
178 | 1: | ||
179 | call _put_core_lock; | ||
180 | 2: | ||
181 | p0 = [sp++]; | ||
182 | p1 = [sp++]; | ||
183 | r0 = [sp++]; | ||
184 | ( r7:5 ) = [sp++]; | ||
185 | rets = [sp++]; | ||
186 | rts; | ||
187 | ENDPROC(___raw_smp_check_barrier_asm) | ||
188 | |||
189 | /* | ||
190 | * r0 = irqflags | ||
191 | * r1 = address of atomic data | ||
192 | * | ||
193 | * Clobbers: r2:0, p1:0 | ||
194 | */ | ||
195 | _start_lock_coherent: | ||
196 | |||
197 | [--sp] = rets; | ||
198 | [--sp] = ( r7:6 ); | ||
199 | r7 = r0; | ||
200 | p1 = r1; | ||
201 | |||
202 | /* | ||
203 | * Determine whether the atomic data was previously | ||
204 | * owned by another CPU (=r6). | ||
205 | */ | ||
206 | GET_CPUID(p0, r2); | ||
207 | r1 = 1; | ||
208 | r1 <<= r2; | ||
209 | r2 = ~r1; | ||
210 | |||
211 | r1 = [p1]; | ||
212 | r1 >>= 28; /* CPU fingerprints are stored in the high nibble. */ | ||
213 | r6 = r1 & r2; | ||
214 | r1 = [p1]; | ||
215 | r1 <<= 4; | ||
216 | r1 >>= 4; | ||
217 | [p1] = r1; | ||
218 | |||
219 | /* | ||
220 | * Release the core lock now, but keep IRQs disabled while we are | ||
221 | * performing the remaining housekeeping chores for the current CPU. | ||
222 | */ | ||
223 | coreslot_loadaddr p0; | ||
224 | r1 = 0; | ||
225 | [p0] = r1; | ||
226 | |||
227 | /* | ||
228 | * If another CPU has owned the same atomic section before us, | ||
229 | * then our D-cached copy of the shared data protected by the | ||
230 | * current spin/write_lock may be obsolete. | ||
231 | */ | ||
232 | cc = r6 == 0; | ||
233 | if cc jump .Lcache_synced | ||
234 | |||
235 | /* | ||
236 | * Invalidate the entire D-cache of the current core. | ||
237 | */ | ||
238 | sp += -12; | ||
239 | call _resync_core_dcache | ||
240 | sp += 12; | ||
241 | |||
242 | .Lcache_synced: | ||
243 | SSYNC(r2); | ||
244 | sti r7; | ||
245 | ( r7:6 ) = [sp++]; | ||
246 | rets = [sp++]; | ||
247 | rts | ||
248 | |||
249 | /* | ||
250 | * r0 = irqflags | ||
251 | * r1 = address of atomic data | ||
252 | * | ||
253 | * Clobbers: r2:0, p1:0 | ||
254 | */ | ||
255 | _end_lock_coherent: | ||
256 | |||
257 | p1 = r1; | ||
258 | GET_CPUID(p0, r2); | ||
259 | r2 += 28; | ||
260 | r1 = 1; | ||
261 | r1 <<= r2; | ||
262 | r2 = [p1]; | ||
263 | r2 = r1 | r2; | ||
264 | [p1] = r2; | ||
265 | r1 = p1; | ||
266 | jump _put_core_lock; | ||
267 | |||
268 | #endif /* __ARCH_SYNC_CORE_DCACHE */ | ||
269 | |||
270 | /* | ||
271 | * r0 = &spinlock->lock | ||
272 | * | ||
273 | * Clobbers: r3:0, p1:0 | ||
274 | */ | ||
275 | ENTRY(___raw_spin_is_locked_asm) | ||
276 | p1 = r0; | ||
277 | [--sp] = rets; | ||
278 | call _get_core_lock; | ||
279 | r3 = [p1]; | ||
280 | cc = bittst( r3, 0 ); | ||
281 | r3 = cc; | ||
282 | r1 = p1; | ||
283 | call _put_core_lock; | ||
284 | rets = [sp++]; | ||
285 | r0 = r3; | ||
286 | rts; | ||
287 | ENDPROC(___raw_spin_is_locked_asm) | ||
288 | |||
289 | /* | ||
290 | * r0 = &spinlock->lock | ||
291 | * | ||
292 | * Clobbers: r3:0, p1:0 | ||
293 | */ | ||
294 | ENTRY(___raw_spin_lock_asm) | ||
295 | p1 = r0; | ||
296 | [--sp] = rets; | ||
297 | .Lretry_spinlock: | ||
298 | call _get_core_lock; | ||
299 | r1 = p1; | ||
300 | r2 = [p1]; | ||
301 | cc = bittst( r2, 0 ); | ||
302 | if cc jump .Lbusy_spinlock | ||
303 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
304 | r3 = p1; | ||
305 | bitset ( r2, 0 ); /* Raise the lock bit. */ | ||
306 | [p1] = r2; | ||
307 | call _start_lock_coherent | ||
308 | #else | ||
309 | r2 = 1; | ||
310 | [p1] = r2; | ||
311 | call _put_core_lock; | ||
312 | #endif | ||
313 | rets = [sp++]; | ||
314 | rts; | ||
315 | |||
316 | .Lbusy_spinlock: | ||
317 | /* We don't touch the atomic area if busy, so that flush | ||
318 | will behave like nop in _put_core_lock. */ | ||
319 | call _put_core_lock; | ||
320 | SSYNC(r2); | ||
321 | r0 = p1; | ||
322 | jump .Lretry_spinlock | ||
323 | ENDPROC(___raw_spin_lock_asm) | ||
324 | |||
325 | /* | ||
326 | * r0 = &spinlock->lock | ||
327 | * | ||
328 | * Clobbers: r3:0, p1:0 | ||
329 | */ | ||
330 | ENTRY(___raw_spin_trylock_asm) | ||
331 | p1 = r0; | ||
332 | [--sp] = rets; | ||
333 | call _get_core_lock; | ||
334 | r1 = p1; | ||
335 | r3 = [p1]; | ||
336 | cc = bittst( r3, 0 ); | ||
337 | if cc jump .Lfailed_trylock | ||
338 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
339 | bitset ( r3, 0 ); /* Raise the lock bit. */ | ||
340 | [p1] = r3; | ||
341 | call _start_lock_coherent | ||
342 | #else | ||
343 | r2 = 1; | ||
344 | [p1] = r2; | ||
345 | call _put_core_lock; | ||
346 | #endif | ||
347 | r0 = 1; | ||
348 | rets = [sp++]; | ||
349 | rts; | ||
350 | .Lfailed_trylock: | ||
351 | call _put_core_lock; | ||
352 | r0 = 0; | ||
353 | rets = [sp++]; | ||
354 | rts; | ||
355 | ENDPROC(___raw_spin_trylock_asm) | ||
356 | |||
357 | /* | ||
358 | * r0 = &spinlock->lock | ||
359 | * | ||
360 | * Clobbers: r2:0, p1:0 | ||
361 | */ | ||
362 | ENTRY(___raw_spin_unlock_asm) | ||
363 | p1 = r0; | ||
364 | [--sp] = rets; | ||
365 | call _get_core_lock; | ||
366 | r2 = [p1]; | ||
367 | bitclr ( r2, 0 ); | ||
368 | [p1] = r2; | ||
369 | r1 = p1; | ||
370 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
371 | call _end_lock_coherent | ||
372 | #else | ||
373 | call _put_core_lock; | ||
374 | #endif | ||
375 | rets = [sp++]; | ||
376 | rts; | ||
377 | ENDPROC(___raw_spin_unlock_asm) | ||
378 | |||
379 | /* | ||
380 | * r0 = &rwlock->lock | ||
381 | * | ||
382 | * Clobbers: r2:0, p1:0 | ||
383 | */ | ||
384 | ENTRY(___raw_read_lock_asm) | ||
385 | p1 = r0; | ||
386 | [--sp] = rets; | ||
387 | call _get_core_lock; | ||
388 | .Lrdlock_try: | ||
389 | r1 = [p1]; | ||
390 | r1 += -1; | ||
391 | [p1] = r1; | ||
392 | cc = r1 < 0; | ||
393 | if cc jump .Lrdlock_failed | ||
394 | r1 = p1; | ||
395 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
396 | call _start_lock_coherent | ||
397 | #else | ||
398 | call _put_core_lock; | ||
399 | #endif | ||
400 | rets = [sp++]; | ||
401 | rts; | ||
402 | |||
403 | .Lrdlock_failed: | ||
404 | r1 += 1; | ||
405 | [p1] = r1; | ||
406 | .Lrdlock_wait: | ||
407 | r1 = p1; | ||
408 | call _put_core_lock; | ||
409 | SSYNC(r2); | ||
410 | r0 = p1; | ||
411 | call _get_core_lock; | ||
412 | r1 = [p1]; | ||
413 | cc = r1 < 2; | ||
414 | if cc jump .Lrdlock_wait; | ||
415 | jump .Lrdlock_try | ||
416 | ENDPROC(___raw_read_lock_asm) | ||
417 | |||
418 | /* | ||
419 | * r0 = &rwlock->lock | ||
420 | * | ||
421 | * Clobbers: r3:0, p1:0 | ||
422 | */ | ||
423 | ENTRY(___raw_read_trylock_asm) | ||
424 | p1 = r0; | ||
425 | [--sp] = rets; | ||
426 | call _get_core_lock; | ||
427 | r1 = [p1]; | ||
428 | cc = r1 <= 0; | ||
429 | if cc jump .Lfailed_tryrdlock; | ||
430 | r1 += -1; | ||
431 | [p1] = r1; | ||
432 | r1 = p1; | ||
433 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
434 | call _start_lock_coherent | ||
435 | #else | ||
436 | call _put_core_lock; | ||
437 | #endif | ||
438 | rets = [sp++]; | ||
439 | r0 = 1; | ||
440 | rts; | ||
441 | .Lfailed_tryrdlock: | ||
442 | r1 = p1; | ||
443 | call _put_core_lock; | ||
444 | rets = [sp++]; | ||
445 | r0 = 0; | ||
446 | rts; | ||
447 | ENDPROC(___raw_read_trylock_asm) | ||
448 | |||
449 | /* | ||
450 | * r0 = &rwlock->lock | ||
451 | * | ||
452 | * Note: Processing controlled by a reader lock should not have | ||
453 | * any side-effect on cache issues with the other core, so we | ||
454 | * just release the core lock and exit (no _end_lock_coherent). | ||
455 | * | ||
456 | * Clobbers: r3:0, p1:0 | ||
457 | */ | ||
458 | ENTRY(___raw_read_unlock_asm) | ||
459 | p1 = r0; | ||
460 | [--sp] = rets; | ||
461 | call _get_core_lock; | ||
462 | r1 = [p1]; | ||
463 | r1 += 1; | ||
464 | [p1] = r1; | ||
465 | r1 = p1; | ||
466 | call _put_core_lock; | ||
467 | rets = [sp++]; | ||
468 | rts; | ||
469 | ENDPROC(___raw_read_unlock_asm) | ||
470 | |||
471 | /* | ||
472 | * r0 = &rwlock->lock | ||
473 | * | ||
474 | * Clobbers: r3:0, p1:0 | ||
475 | */ | ||
476 | ENTRY(___raw_write_lock_asm) | ||
477 | p1 = r0; | ||
478 | r3.l = lo(RW_LOCK_BIAS); | ||
479 | r3.h = hi(RW_LOCK_BIAS); | ||
480 | [--sp] = rets; | ||
481 | call _get_core_lock; | ||
482 | .Lwrlock_try: | ||
483 | r1 = [p1]; | ||
484 | r1 = r1 - r3; | ||
485 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
486 | r2 = r1; | ||
487 | r2 <<= 4; | ||
488 | r2 >>= 4; | ||
489 | cc = r2 == 0; | ||
490 | #else | ||
491 | cc = r1 == 0; | ||
492 | #endif | ||
493 | if !cc jump .Lwrlock_wait | ||
494 | [p1] = r1; | ||
495 | r1 = p1; | ||
496 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
497 | call _start_lock_coherent | ||
498 | #else | ||
499 | call _put_core_lock; | ||
500 | #endif | ||
501 | rets = [sp++]; | ||
502 | rts; | ||
503 | |||
504 | .Lwrlock_wait: | ||
505 | r1 = p1; | ||
506 | call _put_core_lock; | ||
507 | SSYNC(r2); | ||
508 | r0 = p1; | ||
509 | call _get_core_lock; | ||
510 | r1 = [p1]; | ||
511 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
512 | r1 <<= 4; | ||
513 | r1 >>= 4; | ||
514 | #endif | ||
515 | cc = r1 == r3; | ||
516 | if !cc jump .Lwrlock_wait; | ||
517 | jump .Lwrlock_try | ||
518 | ENDPROC(___raw_write_lock_asm) | ||
519 | |||
520 | /* | ||
521 | * r0 = &rwlock->lock | ||
522 | * | ||
523 | * Clobbers: r3:0, p1:0 | ||
524 | */ | ||
525 | ENTRY(___raw_write_trylock_asm) | ||
526 | p1 = r0; | ||
527 | [--sp] = rets; | ||
528 | call _get_core_lock; | ||
529 | r1 = [p1]; | ||
530 | r2.l = lo(RW_LOCK_BIAS); | ||
531 | r2.h = hi(RW_LOCK_BIAS); | ||
532 | cc = r1 == r2; | ||
533 | if !cc jump .Lfailed_trywrlock; | ||
534 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
535 | r1 >>= 28; | ||
536 | r1 <<= 28; | ||
537 | #else | ||
538 | r1 = 0; | ||
539 | #endif | ||
540 | [p1] = r1; | ||
541 | r1 = p1; | ||
542 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
543 | call _start_lock_coherent | ||
544 | #else | ||
545 | call _put_core_lock; | ||
546 | #endif | ||
547 | rets = [sp++]; | ||
548 | r0 = 1; | ||
549 | rts; | ||
550 | |||
551 | .Lfailed_trywrlock: | ||
552 | r1 = p1; | ||
553 | call _put_core_lock; | ||
554 | rets = [sp++]; | ||
555 | r0 = 0; | ||
556 | rts; | ||
557 | ENDPROC(___raw_write_trylock_asm) | ||
558 | |||
559 | /* | ||
560 | * r0 = &rwlock->lock | ||
561 | * | ||
562 | * Clobbers: r3:0, p1:0 | ||
563 | */ | ||
564 | ENTRY(___raw_write_unlock_asm) | ||
565 | p1 = r0; | ||
566 | r3.l = lo(RW_LOCK_BIAS); | ||
567 | r3.h = hi(RW_LOCK_BIAS); | ||
568 | [--sp] = rets; | ||
569 | call _get_core_lock; | ||
570 | r1 = [p1]; | ||
571 | r1 = r1 + r3; | ||
572 | [p1] = r1; | ||
573 | r1 = p1; | ||
574 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
575 | call _end_lock_coherent | ||
576 | #else | ||
577 | call _put_core_lock; | ||
578 | #endif | ||
579 | rets = [sp++]; | ||
580 | rts; | ||
581 | ENDPROC(___raw_write_unlock_asm) | ||
582 | |||
583 | /* | ||
584 | * r0 = ptr | ||
585 | * r1 = value | ||
586 | * | ||
587 | * Add a signed value to a 32bit word and return the new value atomically. | ||
588 | * Clobbers: r3:0, p1:0 | ||
589 | */ | ||
590 | ENTRY(___raw_atomic_update_asm) | ||
591 | p1 = r0; | ||
592 | r3 = r1; | ||
593 | [--sp] = rets; | ||
594 | call _get_core_lock; | ||
595 | r2 = [p1]; | ||
596 | r3 = r3 + r2; | ||
597 | [p1] = r3; | ||
598 | r1 = p1; | ||
599 | call _put_core_lock; | ||
600 | r0 = r3; | ||
601 | rets = [sp++]; | ||
602 | rts; | ||
603 | ENDPROC(___raw_atomic_update_asm) | ||
604 | |||
605 | /* | ||
606 | * r0 = ptr | ||
607 | * r1 = mask | ||
608 | * | ||
609 | * Clear the mask bits from a 32bit word and return the old 32bit value | ||
610 | * atomically. | ||
611 | * Clobbers: r3:0, p1:0 | ||
612 | */ | ||
613 | ENTRY(___raw_atomic_clear_asm) | ||
614 | p1 = r0; | ||
615 | r3 = ~r1; | ||
616 | [--sp] = rets; | ||
617 | call _get_core_lock; | ||
618 | r2 = [p1]; | ||
619 | r3 = r2 & r3; | ||
620 | [p1] = r3; | ||
621 | r3 = r2; | ||
622 | r1 = p1; | ||
623 | call _put_core_lock; | ||
624 | r0 = r3; | ||
625 | rets = [sp++]; | ||
626 | rts; | ||
627 | ENDPROC(___raw_atomic_clear_asm) | ||
628 | |||
629 | /* | ||
630 | * r0 = ptr | ||
631 | * r1 = mask | ||
632 | * | ||
633 | * Set the mask bits into a 32bit word and return the old 32bit value | ||
634 | * atomically. | ||
635 | * Clobbers: r3:0, p1:0 | ||
636 | */ | ||
637 | ENTRY(___raw_atomic_set_asm) | ||
638 | p1 = r0; | ||
639 | r3 = r1; | ||
640 | [--sp] = rets; | ||
641 | call _get_core_lock; | ||
642 | r2 = [p1]; | ||
643 | r3 = r2 | r3; | ||
644 | [p1] = r3; | ||
645 | r3 = r2; | ||
646 | r1 = p1; | ||
647 | call _put_core_lock; | ||
648 | r0 = r3; | ||
649 | rets = [sp++]; | ||
650 | rts; | ||
651 | ENDPROC(___raw_atomic_set_asm) | ||
652 | |||
653 | /* | ||
654 | * r0 = ptr | ||
655 | * r1 = mask | ||
656 | * | ||
657 | * XOR the mask bits with a 32bit word and return the old 32bit value | ||
658 | * atomically. | ||
659 | * Clobbers: r3:0, p1:0 | ||
660 | */ | ||
661 | ENTRY(___raw_atomic_xor_asm) | ||
662 | p1 = r0; | ||
663 | r3 = r1; | ||
664 | [--sp] = rets; | ||
665 | call _get_core_lock; | ||
666 | r2 = [p1]; | ||
667 | r3 = r2 ^ r3; | ||
668 | [p1] = r3; | ||
669 | r3 = r2; | ||
670 | r1 = p1; | ||
671 | call _put_core_lock; | ||
672 | r0 = r3; | ||
673 | rets = [sp++]; | ||
674 | rts; | ||
675 | ENDPROC(___raw_atomic_xor_asm) | ||
676 | |||
677 | /* | ||
678 | * r0 = ptr | ||
679 | * r1 = mask | ||
680 | * | ||
681 | * Perform a logical AND between the mask bits and a 32bit word, and | ||
682 | * return the masked value. We need this on this architecture in | ||
683 | * order to invalidate the local cache before testing. | ||
684 | * | ||
685 | * Clobbers: r3:0, p1:0 | ||
686 | */ | ||
687 | ENTRY(___raw_atomic_test_asm) | ||
688 | p1 = r0; | ||
689 | r3 = r1; | ||
690 | r1 = -L1_CACHE_BYTES; | ||
691 | r1 = r0 & r1; | ||
692 | p0 = r1; | ||
693 | flushinv[p0]; | ||
694 | SSYNC(r2); | ||
695 | r0 = [p1]; | ||
696 | r0 = r0 & r3; | ||
697 | rts; | ||
698 | ENDPROC(___raw_atomic_test_asm) | ||
699 | |||
700 | /* | ||
701 | * r0 = ptr | ||
702 | * r1 = value | ||
703 | * | ||
704 | * Swap *ptr with value and return the old 32bit value atomically. | ||
705 | * Clobbers: r3:0, p1:0 | ||
706 | */ | ||
707 | #define __do_xchg(src, dst) \ | ||
708 | p1 = r0; \ | ||
709 | r3 = r1; \ | ||
710 | [--sp] = rets; \ | ||
711 | call _get_core_lock; \ | ||
712 | r2 = src; \ | ||
713 | dst = r3; \ | ||
714 | r3 = r2; \ | ||
715 | r1 = p1; \ | ||
716 | call _put_core_lock; \ | ||
717 | r0 = r3; \ | ||
718 | rets = [sp++]; \ | ||
719 | rts; | ||
720 | |||
721 | ENTRY(___raw_xchg_1_asm) | ||
722 | __do_xchg(b[p1] (z), b[p1]) | ||
723 | ENDPROC(___raw_xchg_1_asm) | ||
724 | |||
725 | ENTRY(___raw_xchg_2_asm) | ||
726 | __do_xchg(w[p1] (z), w[p1]) | ||
727 | ENDPROC(___raw_xchg_2_asm) | ||
728 | |||
729 | ENTRY(___raw_xchg_4_asm) | ||
730 | __do_xchg([p1], [p1]) | ||
731 | ENDPROC(___raw_xchg_4_asm) | ||
732 | |||
733 | /* | ||
734 | * r0 = ptr | ||
735 | * r1 = new | ||
736 | * r2 = old | ||
737 | * | ||
738 | * Swap *ptr with new if *ptr == old and return the previous *ptr | ||
739 | * value atomically. | ||
740 | * | ||
741 | * Clobbers: r3:0, p1:0 | ||
742 | */ | ||
743 | #define __do_cmpxchg(src, dst) \ | ||
744 | [--sp] = rets; \ | ||
745 | [--sp] = r4; \ | ||
746 | p1 = r0; \ | ||
747 | r3 = r1; \ | ||
748 | r4 = r2; \ | ||
749 | call _get_core_lock; \ | ||
750 | r2 = src; \ | ||
751 | cc = r2 == r4; \ | ||
752 | if !cc jump 1f; \ | ||
753 | dst = r3; \ | ||
754 | 1: r3 = r2; \ | ||
755 | r1 = p1; \ | ||
756 | call _put_core_lock; \ | ||
757 | r0 = r3; \ | ||
758 | r4 = [sp++]; \ | ||
759 | rets = [sp++]; \ | ||
760 | rts; | ||
761 | |||
762 | ENTRY(___raw_cmpxchg_1_asm) | ||
763 | __do_cmpxchg(b[p1] (z), b[p1]) | ||
764 | ENDPROC(___raw_cmpxchg_1_asm) | ||
765 | |||
766 | ENTRY(___raw_cmpxchg_2_asm) | ||
767 | __do_cmpxchg(w[p1] (z), w[p1]) | ||
768 | ENDPROC(___raw_cmpxchg_2_asm) | ||
769 | |||
770 | ENTRY(___raw_cmpxchg_4_asm) | ||
771 | __do_cmpxchg([p1], [p1]) | ||
772 | ENDPROC(___raw_cmpxchg_4_asm) | ||
773 | |||
774 | /* | ||
775 | * r0 = ptr | ||
776 | * r1 = bitnr | ||
777 | * | ||
778 | * Set a bit in a 32bit word and return the old 32bit value atomically. | ||
779 | * Clobbers: r3:0, p1:0 | ||
780 | */ | ||
781 | ENTRY(___raw_bit_set_asm) | ||
782 | r2 = r1; | ||
783 | r1 = 1; | ||
784 | r1 <<= r2; | ||
785 | jump ___raw_atomic_set_asm | ||
786 | ENDPROC(___raw_bit_set_asm) | ||
787 | |||
788 | /* | ||
789 | * r0 = ptr | ||
790 | * r1 = bitnr | ||
791 | * | ||
792 | * Clear a bit in a 32bit word and return the old 32bit value atomically. | ||
793 | * Clobbers: r3:0, p1:0 | ||
794 | */ | ||
795 | ENTRY(___raw_bit_clear_asm) | ||
796 | r2 = r1; | ||
797 | r1 = 1; | ||
798 | r1 <<= r2; | ||
799 | jump ___raw_atomic_clear_asm | ||
800 | ENDPROC(___raw_bit_clear_asm) | ||
801 | |||
802 | /* | ||
803 | * r0 = ptr | ||
804 | * r1 = bitnr | ||
805 | * | ||
806 | * Toggle a bit in a 32bit word and return the old 32bit value atomically. | ||
807 | * Clobbers: r3:0, p1:0 | ||
808 | */ | ||
809 | ENTRY(___raw_bit_toggle_asm) | ||
810 | r2 = r1; | ||
811 | r1 = 1; | ||
812 | r1 <<= r2; | ||
813 | jump ___raw_atomic_xor_asm | ||
814 | ENDPROC(___raw_bit_toggle_asm) | ||
815 | |||
816 | /* | ||
817 | * r0 = ptr | ||
818 | * r1 = bitnr | ||
819 | * | ||
820 | * Test-and-set a bit in a 32bit word and return the old bit value atomically. | ||
821 | * Clobbers: r3:0, p1:0 | ||
822 | */ | ||
823 | ENTRY(___raw_bit_test_set_asm) | ||
824 | [--sp] = rets; | ||
825 | [--sp] = r1; | ||
826 | call ___raw_bit_set_asm | ||
827 | r1 = [sp++]; | ||
828 | r2 = 1; | ||
829 | r2 <<= r1; | ||
830 | r0 = r0 & r2; | ||
831 | cc = r0 == 0; | ||
832 | if cc jump 1f | ||
833 | r0 = 1; | ||
834 | 1: | ||
835 | rets = [sp++]; | ||
836 | rts; | ||
837 | ENDPROC(___raw_bit_test_set_asm) | ||
838 | |||
839 | /* | ||
840 | * r0 = ptr | ||
841 | * r1 = bitnr | ||
842 | * | ||
843 | * Test-and-clear a bit in a 32bit word and return the old bit value atomically. | ||
844 | * Clobbers: r3:0, p1:0 | ||
845 | */ | ||
846 | ENTRY(___raw_bit_test_clear_asm) | ||
847 | [--sp] = rets; | ||
848 | [--sp] = r1; | ||
849 | call ___raw_bit_clear_asm | ||
850 | r1 = [sp++]; | ||
851 | r2 = 1; | ||
852 | r2 <<= r1; | ||
853 | r0 = r0 & r2; | ||
854 | cc = r0 == 0; | ||
855 | if cc jump 1f | ||
856 | r0 = 1; | ||
857 | 1: | ||
858 | rets = [sp++]; | ||
859 | rts; | ||
860 | ENDPROC(___raw_bit_test_clear_asm) | ||
861 | |||
862 | /* | ||
863 | * r0 = ptr | ||
864 | * r1 = bitnr | ||
865 | * | ||
866 | * Test-and-toggle a bit in a 32bit word, | ||
867 | * and return the old bit value atomically. | ||
868 | * Clobbers: r3:0, p1:0 | ||
869 | */ | ||
870 | ENTRY(___raw_bit_test_toggle_asm) | ||
871 | [--sp] = rets; | ||
872 | [--sp] = r1; | ||
873 | call ___raw_bit_toggle_asm | ||
874 | r1 = [sp++]; | ||
875 | r2 = 1; | ||
876 | r2 <<= r1; | ||
877 | r0 = r0 & r2; | ||
878 | cc = r0 == 0; | ||
879 | if cc jump 1f | ||
880 | r0 = 1; | ||
881 | 1: | ||
882 | rets = [sp++]; | ||
883 | rts; | ||
884 | ENDPROC(___raw_bit_test_toggle_asm) | ||
885 | |||
886 | /* | ||
887 | * r0 = ptr | ||
888 | * r1 = bitnr | ||
889 | * | ||
890 | * Test a bit in a 32bit word and return its value. | ||
891 | * We need this on this architecture in order to invalidate | ||
892 | * the local cache before testing. | ||
893 | * | ||
894 | * Clobbers: r3:0, p1:0 | ||
895 | */ | ||
896 | ENTRY(___raw_bit_test_asm) | ||
897 | r2 = r1; | ||
898 | r1 = 1; | ||
899 | r1 <<= r2; | ||
900 | jump ___raw_atomic_test_asm | ||
901 | ENDPROC(___raw_bit_test_asm) | ||
902 | |||
903 | /* | ||
904 | * r0 = ptr | ||
905 | * | ||
906 | * Fetch and return an uncached 32bit value. | ||
907 | * | ||
908 | * Clobbers: r2:0, p1:0 | ||
909 | */ | ||
910 | ENTRY(___raw_uncached_fetch_asm) | ||
911 | p1 = r0; | ||
912 | r1 = -L1_CACHE_BYTES; | ||
913 | r1 = r0 & r1; | ||
914 | p0 = r1; | ||
915 | flushinv[p0]; | ||
916 | SSYNC(r2); | ||
917 | r0 = [p1]; | ||
918 | rts; | ||
919 | ENDPROC(___raw_uncached_fetch_asm) | ||
diff --git a/arch/blackfin/mach-bf561/include/mach/blackfin.h b/arch/blackfin/mach-bf561/include/mach/blackfin.h index 0ea8666e6764..f79f6626b7ec 100644 --- a/arch/blackfin/mach-bf561/include/mach/blackfin.h +++ b/arch/blackfin/mach-bf561/include/mach/blackfin.h | |||
@@ -66,8 +66,12 @@ | |||
66 | 66 | ||
67 | #define bfin_read_SIC_IMASK(x) bfin_read32(SICA_IMASK0 + (x << 2)) | 67 | #define bfin_read_SIC_IMASK(x) bfin_read32(SICA_IMASK0 + (x << 2)) |
68 | #define bfin_write_SIC_IMASK(x, val) bfin_write32((SICA_IMASK0 + (x << 2)), val) | 68 | #define bfin_write_SIC_IMASK(x, val) bfin_write32((SICA_IMASK0 + (x << 2)), val) |
69 | #define bfin_read_SICB_IMASK(x) bfin_read32(SICB_IMASK0 + (x << 2)) | ||
70 | #define bfin_write_SICB_IMASK(x, val) bfin_write32((SICB_IMASK0 + (x << 2)), val) | ||
69 | #define bfin_read_SIC_ISR(x) bfin_read32(SICA_ISR0 + (x << 2)) | 71 | #define bfin_read_SIC_ISR(x) bfin_read32(SICA_ISR0 + (x << 2)) |
70 | #define bfin_write_SIC_ISR(x, val) bfin_write32((SICA_ISR0 + (x << 2)), val) | 72 | #define bfin_write_SIC_ISR(x, val) bfin_write32((SICA_ISR0 + (x << 2)), val) |
73 | #define bfin_read_SICB_ISR(x) bfin_read32(SICB_ISR0 + (x << 2)) | ||
74 | #define bfin_write_SICB_ISR(x, val) bfin_write32((SICB_ISR0 + (x << 2)), val) | ||
71 | 75 | ||
72 | #define BFIN_UART_NR_PORTS 1 | 76 | #define BFIN_UART_NR_PORTS 1 |
73 | 77 | ||
diff --git a/arch/blackfin/mach-bf561/include/mach/defBF561.h b/arch/blackfin/mach-bf561/include/mach/defBF561.h index 4eca2026bb92..d7c509759659 100644 --- a/arch/blackfin/mach-bf561/include/mach/defBF561.h +++ b/arch/blackfin/mach-bf561/include/mach/defBF561.h | |||
@@ -912,6 +912,9 @@ | |||
912 | #define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */ | 912 | #define ACTIVE_PLLDISABLED 0x0004 /* Processor In Active Mode With PLL Disabled */ |
913 | #define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */ | 913 | #define PLL_LOCKED 0x0020 /* PLL_LOCKCNT Has Been Reached */ |
914 | 914 | ||
915 | /* SICA_SYSCR Masks */ | ||
916 | #define COREB_SRAM_INIT 0x0020 | ||
917 | |||
915 | /* SWRST Mask */ | 918 | /* SWRST Mask */ |
916 | #define SYSTEM_RESET 0x0007 /* Initiates a system software reset */ | 919 | #define SYSTEM_RESET 0x0007 /* Initiates a system software reset */ |
917 | #define DOUBLE_FAULT_A 0x0008 /* Core A Double Fault Causes Reset */ | 920 | #define DOUBLE_FAULT_A 0x0008 /* Core A Double Fault Causes Reset */ |
diff --git a/arch/blackfin/mach-bf561/include/mach/mem_map.h b/arch/blackfin/mach-bf561/include/mach/mem_map.h index f1d4c0637bd2..488c3bda65bf 100644 --- a/arch/blackfin/mach-bf561/include/mach/mem_map.h +++ b/arch/blackfin/mach-bf561/include/mach/mem_map.h | |||
@@ -85,4 +85,124 @@ | |||
85 | #define L1_SCRATCH_START COREA_L1_SCRATCH_START | 85 | #define L1_SCRATCH_START COREA_L1_SCRATCH_START |
86 | #define L1_SCRATCH_LENGTH 0x1000 | 86 | #define L1_SCRATCH_LENGTH 0x1000 |
87 | 87 | ||
88 | #ifndef __ASSEMBLY__ | ||
89 | |||
90 | #ifdef CONFIG_SMP | ||
91 | |||
92 | #define get_l1_scratch_start_cpu(cpu) \ | ||
93 | ({ unsigned long __addr; \ | ||
94 | __addr = (cpu) ? COREB_L1_SCRATCH_START : COREA_L1_SCRATCH_START;\ | ||
95 | __addr; }) | ||
96 | |||
97 | #define get_l1_code_start_cpu(cpu) \ | ||
98 | ({ unsigned long __addr; \ | ||
99 | __addr = (cpu) ? COREB_L1_CODE_START : COREA_L1_CODE_START; \ | ||
100 | __addr; }) | ||
101 | |||
102 | #define get_l1_data_a_start_cpu(cpu) \ | ||
103 | ({ unsigned long __addr; \ | ||
104 | __addr = (cpu) ? COREB_L1_DATA_A_START : COREA_L1_DATA_A_START;\ | ||
105 | __addr; }) | ||
106 | |||
107 | #define get_l1_data_b_start_cpu(cpu) \ | ||
108 | ({ unsigned long __addr; \ | ||
109 | __addr = (cpu) ? COREB_L1_DATA_B_START : COREA_L1_DATA_B_START;\ | ||
110 | __addr; }) | ||
111 | |||
112 | #define get_l1_scratch_start() get_l1_scratch_start_cpu(blackfin_core_id()) | ||
113 | #define get_l1_code_start() get_l1_code_start_cpu(blackfin_core_id()) | ||
114 | #define get_l1_data_a_start() get_l1_data_a_start_cpu(blackfin_core_id()) | ||
115 | #define get_l1_data_b_start() get_l1_data_b_start_cpu(blackfin_core_id()) | ||
116 | |||
117 | #else /* !CONFIG_SMP */ | ||
118 | #define get_l1_scratch_start_cpu(cpu) L1_SCRATCH_START | ||
119 | #define get_l1_code_start_cpu(cpu) L1_CODE_START | ||
120 | #define get_l1_data_a_start_cpu(cpu) L1_DATA_A_START | ||
121 | #define get_l1_data_b_start_cpu(cpu) L1_DATA_B_START | ||
122 | #define get_l1_scratch_start() L1_SCRATCH_START | ||
123 | #define get_l1_code_start() L1_CODE_START | ||
124 | #define get_l1_data_a_start() L1_DATA_A_START | ||
125 | #define get_l1_data_b_start() L1_DATA_B_START | ||
126 | #endif /* !CONFIG_SMP */ | ||
127 | |||
128 | #else /* __ASSEMBLY__ */ | ||
129 | |||
130 | /* | ||
131 | * The following macros both return the address of the PDA for the | ||
132 | * current core. | ||
133 | * | ||
134 | * In its first safe (and hairy) form, the macro neither clobbers any | ||
135 | * register aside of the output Preg, nor uses the stack, since it | ||
136 | * could be called with an invalid stack pointer, or the current stack | ||
137 | * space being uncovered by any CPLB (e.g. early exception handling). | ||
138 | * | ||
139 | * The constraints on the second form are a bit relaxed, and the code | ||
140 | * is allowed to use the specified Dreg for determining the PDA | ||
141 | * address to be returned into Preg. | ||
142 | */ | ||
143 | #ifdef CONFIG_SMP | ||
144 | #define GET_PDA_SAFE(preg) \ | ||
145 | preg.l = lo(DSPID); \ | ||
146 | preg.h = hi(DSPID); \ | ||
147 | preg = [preg]; \ | ||
148 | preg = preg << 2; \ | ||
149 | preg = preg << 2; \ | ||
150 | preg = preg << 2; \ | ||
151 | preg = preg << 2; \ | ||
152 | preg = preg << 2; \ | ||
153 | preg = preg << 2; \ | ||
154 | preg = preg << 2; \ | ||
155 | preg = preg << 2; \ | ||
156 | preg = preg << 2; \ | ||
157 | preg = preg << 2; \ | ||
158 | preg = preg << 2; \ | ||
159 | preg = preg << 2; \ | ||
160 | if cc jump 2f; \ | ||
161 | cc = preg == 0x0; \ | ||
162 | preg.l = _cpu_pda; \ | ||
163 | preg.h = _cpu_pda; \ | ||
164 | if !cc jump 3f; \ | ||
165 | 1: \ | ||
166 | /* preg = 0x0; */ \ | ||
167 | cc = !cc; /* restore cc to 0 */ \ | ||
168 | jump 4f; \ | ||
169 | 2: \ | ||
170 | cc = preg == 0x0; \ | ||
171 | preg.l = _cpu_pda; \ | ||
172 | preg.h = _cpu_pda; \ | ||
173 | if cc jump 4f; \ | ||
174 | /* preg = 0x1000000; */ \ | ||
175 | cc = !cc; /* restore cc to 1 */ \ | ||
176 | 3: \ | ||
177 | preg = [preg]; \ | ||
178 | 4: | ||
179 | |||
180 | #define GET_PDA(preg, dreg) \ | ||
181 | preg.l = lo(DSPID); \ | ||
182 | preg.h = hi(DSPID); \ | ||
183 | dreg = [preg]; \ | ||
184 | preg.l = _cpu_pda; \ | ||
185 | preg.h = _cpu_pda; \ | ||
186 | cc = bittst(dreg, 0); \ | ||
187 | if !cc jump 1f; \ | ||
188 | preg = [preg]; \ | ||
189 | 1: \ | ||
190 | |||
191 | #define GET_CPUID(preg, dreg) \ | ||
192 | preg.l = lo(DSPID); \ | ||
193 | preg.h = hi(DSPID); \ | ||
194 | dreg = [preg]; \ | ||
195 | dreg = ROT dreg BY -1; \ | ||
196 | dreg = CC; | ||
197 | |||
198 | #else | ||
199 | #define GET_PDA_SAFE(preg) \ | ||
200 | preg.l = _cpu_pda; \ | ||
201 | preg.h = _cpu_pda; | ||
202 | |||
203 | #define GET_PDA(preg, dreg) GET_PDA_SAFE(preg) | ||
204 | #endif /* CONFIG_SMP */ | ||
205 | |||
206 | #endif /* __ASSEMBLY__ */ | ||
207 | |||
88 | #endif /* _MEM_MAP_533_H_ */ | 208 | #endif /* _MEM_MAP_533_H_ */ |
diff --git a/arch/blackfin/mach-bf561/include/mach/smp.h b/arch/blackfin/mach-bf561/include/mach/smp.h new file mode 100644 index 000000000000..f9e65ebe81b2 --- /dev/null +++ b/arch/blackfin/mach-bf561/include/mach/smp.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _MACH_BF561_SMP | ||
2 | #define _MACH_BF561_SMP | ||
3 | |||
4 | struct task_struct; | ||
5 | |||
6 | void platform_init_cpus(void); | ||
7 | |||
8 | void platform_prepare_cpus(unsigned int max_cpus); | ||
9 | |||
10 | int platform_boot_secondary(unsigned int cpu, struct task_struct *idle); | ||
11 | |||
12 | void platform_secondary_init(unsigned int cpu); | ||
13 | |||
14 | void platform_request_ipi(int (*handler)(int, void *)); | ||
15 | |||
16 | void platform_send_ipi(cpumask_t callmap); | ||
17 | |||
18 | void platform_send_ipi_cpu(unsigned int cpu); | ||
19 | |||
20 | void platform_clear_ipi(unsigned int cpu); | ||
21 | |||
22 | #endif /* !_MACH_BF561_SMP */ | ||
diff --git a/arch/blackfin/mach-bf561/secondary.S b/arch/blackfin/mach-bf561/secondary.S new file mode 100644 index 000000000000..35280f06b7b6 --- /dev/null +++ b/arch/blackfin/mach-bf561/secondary.S | |||
@@ -0,0 +1,215 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/mach-bf561/secondary.S | ||
3 | * Based on: arch/blackfin/mach-bf561/head.S | ||
4 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
5 | * | ||
6 | * Copyright 2007 Analog Devices Inc. | ||
7 | * | ||
8 | * Description: BF561 coreB bootstrap file | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, see the file COPYING, or write | ||
22 | * to the Free Software Foundation, Inc., | ||
23 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/linkage.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <asm/blackfin.h> | ||
29 | #include <asm/asm-offsets.h> | ||
30 | |||
31 | __INIT | ||
32 | |||
33 | /* Lay the initial stack into the L1 scratch area of Core B */ | ||
34 | #define INITIAL_STACK (COREB_L1_SCRATCH_START + L1_SCRATCH_LENGTH - 12) | ||
35 | |||
36 | ENTRY(_coreb_trampoline_start) | ||
37 | /* Set the SYSCFG register */ | ||
38 | R0 = 0x36; | ||
39 | SYSCFG = R0; /*Enable Cycle Counter and Nesting Of Interrupts(3rd Bit)*/ | ||
40 | R0 = 0; | ||
41 | |||
42 | /*Clear Out All the data and pointer Registers*/ | ||
43 | R1 = R0; | ||
44 | R2 = R0; | ||
45 | R3 = R0; | ||
46 | R4 = R0; | ||
47 | R5 = R0; | ||
48 | R6 = R0; | ||
49 | R7 = R0; | ||
50 | |||
51 | P0 = R0; | ||
52 | P1 = R0; | ||
53 | P2 = R0; | ||
54 | P3 = R0; | ||
55 | P4 = R0; | ||
56 | P5 = R0; | ||
57 | |||
58 | LC0 = r0; | ||
59 | LC1 = r0; | ||
60 | L0 = r0; | ||
61 | L1 = r0; | ||
62 | L2 = r0; | ||
63 | L3 = r0; | ||
64 | |||
65 | /* Clear Out All the DAG Registers*/ | ||
66 | B0 = r0; | ||
67 | B1 = r0; | ||
68 | B2 = r0; | ||
69 | B3 = r0; | ||
70 | |||
71 | I0 = r0; | ||
72 | I1 = r0; | ||
73 | I2 = r0; | ||
74 | I3 = r0; | ||
75 | |||
76 | M0 = r0; | ||
77 | M1 = r0; | ||
78 | M2 = r0; | ||
79 | M3 = r0; | ||
80 | |||
81 | /* Turn off the icache */ | ||
82 | p0.l = LO(IMEM_CONTROL); | ||
83 | p0.h = HI(IMEM_CONTROL); | ||
84 | R1 = [p0]; | ||
85 | R0 = ~ENICPLB; | ||
86 | R0 = R0 & R1; | ||
87 | |||
88 | /* Anomaly 05000125 */ | ||
89 | #ifdef ANOMALY_05000125 | ||
90 | CLI R2; | ||
91 | SSYNC; | ||
92 | #endif | ||
93 | [p0] = R0; | ||
94 | SSYNC; | ||
95 | #ifdef ANOMALY_05000125 | ||
96 | STI R2; | ||
97 | #endif | ||
98 | |||
99 | /* Turn off the dcache */ | ||
100 | p0.l = LO(DMEM_CONTROL); | ||
101 | p0.h = HI(DMEM_CONTROL); | ||
102 | R1 = [p0]; | ||
103 | R0 = ~ENDCPLB; | ||
104 | R0 = R0 & R1; | ||
105 | |||
106 | /* Anomaly 05000125 */ | ||
107 | #ifdef ANOMALY_05000125 | ||
108 | CLI R2; | ||
109 | SSYNC; | ||
110 | #endif | ||
111 | [p0] = R0; | ||
112 | SSYNC; | ||
113 | #ifdef ANOMALY_05000125 | ||
114 | STI R2; | ||
115 | #endif | ||
116 | |||
117 | /* in case of double faults, save a few things */ | ||
118 | p0.l = _init_retx_coreb; | ||
119 | p0.h = _init_retx_coreb; | ||
120 | R0 = RETX; | ||
121 | [P0] = R0; | ||
122 | |||
123 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | ||
124 | /* Only save these if we are storing them, | ||
125 | * This happens here, since L1 gets clobbered | ||
126 | * below | ||
127 | */ | ||
128 | GET_PDA(p0, r0); | ||
129 | r7 = [p0 + PDA_RETX]; | ||
130 | p1.l = _init_saved_retx_coreb; | ||
131 | p1.h = _init_saved_retx_coreb; | ||
132 | [p1] = r7; | ||
133 | |||
134 | r7 = [p0 + PDA_DCPLB]; | ||
135 | p1.l = _init_saved_dcplb_fault_addr_coreb; | ||
136 | p1.h = _init_saved_dcplb_fault_addr_coreb; | ||
137 | [p1] = r7; | ||
138 | |||
139 | r7 = [p0 + PDA_ICPLB]; | ||
140 | p1.l = _init_saved_icplb_fault_addr_coreb; | ||
141 | p1.h = _init_saved_icplb_fault_addr_coreb; | ||
142 | [p1] = r7; | ||
143 | |||
144 | r7 = [p0 + PDA_SEQSTAT]; | ||
145 | p1.l = _init_saved_seqstat_coreb; | ||
146 | p1.h = _init_saved_seqstat_coreb; | ||
147 | [p1] = r7; | ||
148 | #endif | ||
149 | |||
150 | /* Initialize stack pointer */ | ||
151 | sp.l = lo(INITIAL_STACK); | ||
152 | sp.h = hi(INITIAL_STACK); | ||
153 | fp = sp; | ||
154 | usp = sp; | ||
155 | |||
156 | /* This section keeps the processor in supervisor mode | ||
157 | * during core B startup. Branches to the idle task. | ||
158 | */ | ||
159 | |||
160 | /* EVT15 = _real_start */ | ||
161 | |||
162 | p0.l = lo(EVT15); | ||
163 | p0.h = hi(EVT15); | ||
164 | p1.l = _coreb_start; | ||
165 | p1.h = _coreb_start; | ||
166 | [p0] = p1; | ||
167 | csync; | ||
168 | |||
169 | p0.l = lo(IMASK); | ||
170 | p0.h = hi(IMASK); | ||
171 | p1.l = IMASK_IVG15; | ||
172 | p1.h = 0x0; | ||
173 | [p0] = p1; | ||
174 | csync; | ||
175 | |||
176 | raise 15; | ||
177 | p0.l = .LWAIT_HERE; | ||
178 | p0.h = .LWAIT_HERE; | ||
179 | reti = p0; | ||
180 | #if defined(ANOMALY_05000281) | ||
181 | nop; nop; nop; | ||
182 | #endif | ||
183 | rti; | ||
184 | |||
185 | .LWAIT_HERE: | ||
186 | jump .LWAIT_HERE; | ||
187 | ENDPROC(_coreb_trampoline_start) | ||
188 | ENTRY(_coreb_trampoline_end) | ||
189 | |||
190 | ENTRY(_coreb_start) | ||
191 | [--sp] = reti; | ||
192 | |||
193 | p0.l = lo(WDOGB_CTL); | ||
194 | p0.h = hi(WDOGB_CTL); | ||
195 | r0 = 0xAD6(z); | ||
196 | w[p0] = r0; /* Clear the watchdog. */ | ||
197 | ssync; | ||
198 | |||
199 | /* | ||
200 | * switch to IDLE stack. | ||
201 | */ | ||
202 | p0.l = _secondary_stack; | ||
203 | p0.h = _secondary_stack; | ||
204 | sp = [p0]; | ||
205 | usp = sp; | ||
206 | fp = sp; | ||
207 | sp += -12; | ||
208 | call _init_pda | ||
209 | sp += 12; | ||
210 | call _secondary_start_kernel; | ||
211 | .L_exit: | ||
212 | jump.s .L_exit; | ||
213 | ENDPROC(_coreb_start) | ||
214 | |||
215 | __FINIT | ||
diff --git a/arch/blackfin/mach-bf561/smp.c b/arch/blackfin/mach-bf561/smp.c new file mode 100644 index 000000000000..23fd4c13897f --- /dev/null +++ b/arch/blackfin/mach-bf561/smp.c | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * File: arch/blackfin/mach-bf561/smp.c | ||
3 | * Author: Philippe Gerum <rpm@xenomai.org> | ||
4 | * | ||
5 | * Copyright 2007 Analog Devices Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, see the file COPYING, or write | ||
19 | * to the Free Software Foundation, Inc., | ||
20 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
21 | */ | ||
22 | |||
23 | #include <linux/init.h> | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/sched.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <asm/smp.h> | ||
28 | #include <asm/dma.h> | ||
29 | |||
30 | #define COREB_SRAM_BASE 0xff600000 | ||
31 | #define COREB_SRAM_SIZE 0x4000 | ||
32 | |||
33 | extern char coreb_trampoline_start, coreb_trampoline_end; | ||
34 | |||
35 | static DEFINE_SPINLOCK(boot_lock); | ||
36 | |||
37 | static cpumask_t cpu_callin_map; | ||
38 | |||
39 | /* | ||
40 | * platform_init_cpus() - Tell the world about how many cores we | ||
41 | * have. This is called while setting up the architecture support | ||
42 | * (setup_arch()), so don't be too demanding here with respect to | ||
43 | * available kernel services. | ||
44 | */ | ||
45 | |||
46 | void __init platform_init_cpus(void) | ||
47 | { | ||
48 | cpu_set(0, cpu_possible_map); /* CoreA */ | ||
49 | cpu_set(1, cpu_possible_map); /* CoreB */ | ||
50 | } | ||
51 | |||
52 | void __init platform_prepare_cpus(unsigned int max_cpus) | ||
53 | { | ||
54 | int len; | ||
55 | |||
56 | len = &coreb_trampoline_end - &coreb_trampoline_start + 1; | ||
57 | BUG_ON(len > COREB_SRAM_SIZE); | ||
58 | |||
59 | dma_memcpy((void *)COREB_SRAM_BASE, &coreb_trampoline_start, len); | ||
60 | |||
61 | /* Both cores ought to be present on a bf561! */ | ||
62 | cpu_set(0, cpu_present_map); /* CoreA */ | ||
63 | cpu_set(1, cpu_present_map); /* CoreB */ | ||
64 | |||
65 | printk(KERN_INFO "CoreB bootstrap code to SRAM %p via DMA.\n", (void *)COREB_SRAM_BASE); | ||
66 | } | ||
67 | |||
68 | int __init setup_profiling_timer(unsigned int multiplier) /* not supported */ | ||
69 | { | ||
70 | return -EINVAL; | ||
71 | } | ||
72 | |||
73 | void __cpuinit platform_secondary_init(unsigned int cpu) | ||
74 | { | ||
75 | local_irq_disable(); | ||
76 | |||
77 | /* Clone setup for peripheral interrupt sources from CoreA. */ | ||
78 | bfin_write_SICB_IMASK0(bfin_read_SICA_IMASK0()); | ||
79 | bfin_write_SICB_IMASK1(bfin_read_SICA_IMASK1()); | ||
80 | SSYNC(); | ||
81 | |||
82 | /* Clone setup for IARs from CoreA. */ | ||
83 | bfin_write_SICB_IAR0(bfin_read_SICA_IAR0()); | ||
84 | bfin_write_SICB_IAR1(bfin_read_SICA_IAR1()); | ||
85 | bfin_write_SICB_IAR2(bfin_read_SICA_IAR2()); | ||
86 | bfin_write_SICB_IAR3(bfin_read_SICA_IAR3()); | ||
87 | bfin_write_SICB_IAR4(bfin_read_SICA_IAR4()); | ||
88 | bfin_write_SICB_IAR5(bfin_read_SICA_IAR5()); | ||
89 | bfin_write_SICB_IAR6(bfin_read_SICA_IAR6()); | ||
90 | bfin_write_SICB_IAR7(bfin_read_SICA_IAR7()); | ||
91 | SSYNC(); | ||
92 | |||
93 | local_irq_enable(); | ||
94 | |||
95 | /* Calibrate loops per jiffy value. */ | ||
96 | calibrate_delay(); | ||
97 | |||
98 | /* Store CPU-private information to the cpu_data array. */ | ||
99 | bfin_setup_cpudata(cpu); | ||
100 | |||
101 | /* We are done with local CPU inits, unblock the boot CPU. */ | ||
102 | cpu_set(cpu, cpu_callin_map); | ||
103 | spin_lock(&boot_lock); | ||
104 | spin_unlock(&boot_lock); | ||
105 | } | ||
106 | |||
107 | int __cpuinit platform_boot_secondary(unsigned int cpu, struct task_struct *idle) | ||
108 | { | ||
109 | unsigned long timeout; | ||
110 | |||
111 | /* CoreB already running?! */ | ||
112 | BUG_ON((bfin_read_SICA_SYSCR() & COREB_SRAM_INIT) == 0); | ||
113 | |||
114 | printk(KERN_INFO "Booting Core B.\n"); | ||
115 | |||
116 | spin_lock(&boot_lock); | ||
117 | |||
118 | /* Kick CoreB, which should start execution from CORE_SRAM_BASE. */ | ||
119 | SSYNC(); | ||
120 | bfin_write_SICA_SYSCR(bfin_read_SICA_SYSCR() & ~COREB_SRAM_INIT); | ||
121 | SSYNC(); | ||
122 | |||
123 | timeout = jiffies + 1 * HZ; | ||
124 | while (time_before(jiffies, timeout)) { | ||
125 | if (cpu_isset(cpu, cpu_callin_map)) | ||
126 | break; | ||
127 | udelay(100); | ||
128 | barrier(); | ||
129 | } | ||
130 | |||
131 | spin_unlock(&boot_lock); | ||
132 | |||
133 | return cpu_isset(cpu, cpu_callin_map) ? 0 : -ENOSYS; | ||
134 | } | ||
135 | |||
136 | void __init platform_request_ipi(irq_handler_t handler) | ||
137 | { | ||
138 | int ret; | ||
139 | |||
140 | ret = request_irq(IRQ_SUPPLE_0, handler, IRQF_DISABLED, | ||
141 | "SMP interrupt", handler); | ||
142 | if (ret) | ||
143 | panic("Cannot request supplemental interrupt 0 for IPI service\n"); | ||
144 | } | ||
145 | |||
146 | void platform_send_ipi(cpumask_t callmap) | ||
147 | { | ||
148 | unsigned int cpu; | ||
149 | |||
150 | for_each_cpu_mask(cpu, callmap) { | ||
151 | BUG_ON(cpu >= 2); | ||
152 | SSYNC(); | ||
153 | bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu))); | ||
154 | SSYNC(); | ||
155 | } | ||
156 | } | ||
157 | |||
158 | void platform_send_ipi_cpu(unsigned int cpu) | ||
159 | { | ||
160 | BUG_ON(cpu >= 2); | ||
161 | SSYNC(); | ||
162 | bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (6 + cpu))); | ||
163 | SSYNC(); | ||
164 | } | ||
165 | |||
166 | void platform_clear_ipi(unsigned int cpu) | ||
167 | { | ||
168 | BUG_ON(cpu >= 2); | ||
169 | SSYNC(); | ||
170 | bfin_write_SICB_SYSCR(bfin_read_SICB_SYSCR() | (1 << (10 + cpu))); | ||
171 | SSYNC(); | ||
172 | } | ||