aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/Makefile4
-rw-r--r--arch/blackfin/mach-common/cache-c.c24
-rw-r--r--arch/blackfin/mach-common/cache.S6
-rw-r--r--arch/blackfin/mach-common/clocks-init.c93
-rw-r--r--arch/blackfin/mach-common/cpufreq.c6
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S3
-rw-r--r--arch/blackfin/mach-common/entry.S132
-rw-r--r--arch/blackfin/mach-common/head.S117
-rw-r--r--arch/blackfin/mach-common/interrupt.S80
-rw-r--r--arch/blackfin/mach-common/ints-priority.c541
-rw-r--r--arch/blackfin/mach-common/irqpanic.c12
-rw-r--r--arch/blackfin/mach-common/lock.S4
-rw-r--r--arch/blackfin/mach-common/pm.c20
-rw-r--r--arch/blackfin/mach-common/smp.c476
14 files changed, 1149 insertions, 369 deletions
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index e6ed57c56d4b..1f3228ed713f 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -3,10 +3,12 @@
3# 3#
4 4
5obj-y := \ 5obj-y := \
6 cache.o entry.o head.o \ 6 cache.o cache-c.o entry.o head.o \
7 interrupt.o irqpanic.o arch_checks.o ints-priority.o 7 interrupt.o irqpanic.o arch_checks.o ints-priority.o
8 8
9obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o 9obj-$(CONFIG_BFIN_ICACHE_LOCK) += lock.o
10obj-$(CONFIG_PM) += pm.o dpmc_modes.o 10obj-$(CONFIG_PM) += pm.o dpmc_modes.o
11obj-$(CONFIG_CPU_FREQ) += cpufreq.o 11obj-$(CONFIG_CPU_FREQ) += cpufreq.o
12obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o 12obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
13obj-$(CONFIG_SMP) += smp.o
14obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
diff --git a/arch/blackfin/mach-common/cache-c.c b/arch/blackfin/mach-common/cache-c.c
new file mode 100644
index 000000000000..e6ab1f815123
--- /dev/null
+++ b/arch/blackfin/mach-common/cache-c.c
@@ -0,0 +1,24 @@
1/*
2 * Blackfin cache control code (simpler control-style functions)
3 *
4 * Copyright 2004-2008 Analog Devices Inc.
5 *
6 * Enter bugs at http://blackfin.uclinux.org/
7 *
8 * Licensed under the GPL-2 or later.
9 */
10
11#include <asm/blackfin.h>
12
13/* Invalidate the Entire Data cache by
14 * clearing DMC[1:0] bits
15 */
16void blackfin_invalidate_entire_dcache(void)
17{
18 u32 dmem = bfin_read_DMEM_CONTROL();
19 SSYNC();
20 bfin_write_DMEM_CONTROL(dmem & ~0xc);
21 SSYNC();
22 bfin_write_DMEM_CONTROL(dmem);
23 SSYNC();
24}
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index a028e9450419..3c98dacbf289 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -49,13 +49,17 @@
49.ifnb \optflushins 49.ifnb \optflushins
50 \optflushins [P0]; 50 \optflushins [P0];
51.endif 51.endif
52#if ANOMALY_05000443
52.ifb \optnopins 53.ifb \optnopins
532: 542:
54.endif 55.endif
55 \flushins [P0++]; 56 \flushins [P0++];
56.ifnb \optnopins 57.ifnb \optnopins
572: \optnopins; 582: \optnopins;
58.endif 59.endif
60#else
612: \flushins [P0++];
62#endif
59 63
60 RTS; 64 RTS;
61.endm 65.endm
diff --git a/arch/blackfin/mach-common/clocks-init.c b/arch/blackfin/mach-common/clocks-init.c
new file mode 100644
index 000000000000..5d182abefc7b
--- /dev/null
+++ b/arch/blackfin/mach-common/clocks-init.c
@@ -0,0 +1,93 @@
1/*
2 * arch/blackfin/mach-common/clocks-init.c - reprogram clocks / memory
3 *
4 * Copyright 2004-2008 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <asm/blackfin.h>
12
13#include <asm/dma.h>
14#include <asm/clocks.h>
15#include <asm/mem_init.h>
16
17#define PLL_CTL_VAL \
18 (((CONFIG_VCO_MULT & 63) << 9) | CLKIN_HALF | \
19 (PLL_BYPASS << 8) | (ANOMALY_05000265 ? 0x8000 : 0))
20
21__attribute__((l1_text))
22static void do_sync(void)
23{
24 __builtin_bfin_ssync();
25}
26
27__attribute__((l1_text))
28void init_clocks(void)
29{
30 /* Kill any active DMAs as they may trigger external memory accesses
31 * in the middle of reprogramming things, and that'll screw us up.
32 * For example, any automatic DMAs left by U-Boot for splash screens.
33 */
34 size_t i;
35 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
36 struct dma_register *dma = dma_io_base_addr[i];
37 dma->cfg = 0;
38 }
39
40 do_sync();
41
42#ifdef SIC_IWR0
43 bfin_write_SIC_IWR0(IWR_ENABLE(0));
44# ifdef SIC_IWR1
45 /* BF52x system reset does not properly reset SIC_IWR1 which
46 * will screw up the bootrom as it relies on MDMA0/1 waking it
47 * up from IDLE instructions. See this report for more info:
48 * http://blackfin.uclinux.org/gf/tracker/4323
49 */
50 if (ANOMALY_05000435)
51 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
52 else
53 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
54# endif
55# ifdef SIC_IWR2
56 bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
57# endif
58#else
59 bfin_write_SIC_IWR(IWR_ENABLE(0));
60#endif
61 do_sync();
62#ifdef EBIU_SDGCTL
63 bfin_write_EBIU_SDGCTL(bfin_read_EBIU_SDGCTL() | SRFS);
64 do_sync();
65#endif
66
67#ifdef CLKBUFOE
68 bfin_write16(VR_CTL, bfin_read_VR_CTL() | CLKBUFOE);
69 do_sync();
70 __asm__ __volatile__("IDLE;");
71#endif
72 bfin_write_PLL_LOCKCNT(0x300);
73 do_sync();
74 bfin_write16(PLL_CTL, PLL_CTL_VAL);
75 __asm__ __volatile__("IDLE;");
76 bfin_write_PLL_DIV(CONFIG_CCLK_ACT_DIV | CONFIG_SCLK_DIV);
77#ifdef EBIU_SDGCTL
78 bfin_write_EBIU_SDRRC(mem_SDRRC);
79 bfin_write_EBIU_SDGCTL(mem_SDGCTL);
80#else
81 bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() & ~(SRREQ));
82 do_sync();
83 bfin_write_EBIU_RSTCTL(bfin_read_EBIU_RSTCTL() | 0x1);
84 bfin_write_EBIU_DDRCTL0(mem_DDRCTL0);
85 bfin_write_EBIU_DDRCTL1(mem_DDRCTL1);
86 bfin_write_EBIU_DDRCTL2(mem_DDRCTL2);
87#ifdef CONFIG_MEM_EBIU_DDRQUE
88 bfin_write_EBIU_DDRQUE(CONFIG_MEM_EBIU_DDRQUE);
89#endif
90#endif
91 do_sync();
92 bfin_read16(0);
93}
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index dda5443b37ed..72e16605ca09 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -104,7 +104,7 @@ static int bfin_target(struct cpufreq_policy *policy,
104 cclk_hz, target_freq, freqs.old); 104 cclk_hz, target_freq, freqs.old);
105 105
106 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 106 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
107 local_irq_save(flags); 107 local_irq_save_hw(flags);
108 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; 108 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel;
109 tscale = dpm_state_table[index].tscale; 109 tscale = dpm_state_table[index].tscale;
110 bfin_write_PLL_DIV(plldiv); 110 bfin_write_PLL_DIV(plldiv);
@@ -112,10 +112,10 @@ static int bfin_target(struct cpufreq_policy *policy,
112 bfin_write_TSCALE(tscale); 112 bfin_write_TSCALE(tscale);
113 cycles = get_cycles(); 113 cycles = get_cycles();
114 SSYNC(); 114 SSYNC();
115 cycles += 10; /* ~10 cycles we loose after get_cycles() */ 115 cycles += 10; /* ~10 cycles we lose after get_cycles() */
116 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); 116 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index);
117 __bfin_cycles_mod = index; 117 __bfin_cycles_mod = index;
118 local_irq_restore(flags); 118 local_irq_restore_hw(flags);
119 /* TODO: just test case for cycles clock source, remove later */ 119 /* TODO: just test case for cycles clock source, remove later */
120 pr_debug("cpufreq: done\n"); 120 pr_debug("cpufreq: done\n");
121 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 121 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index ad5431e2cd05..4da50bcd9300 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -247,7 +247,8 @@ ENTRY(_unset_dram_srfs)
247ENDPROC(_unset_dram_srfs) 247ENDPROC(_unset_dram_srfs)
248 248
249ENTRY(_set_sic_iwr) 249ENTRY(_set_sic_iwr)
250#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) 250#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
251 defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
251 P0.H = hi(SIC_IWR0); 252 P0.H = hi(SIC_IWR0);
252 P0.L = lo(SIC_IWR0); 253 P0.L = lo(SIC_IWR0);
253 P1.H = hi(SIC_IWR1); 254 P1.H = hi(SIC_IWR1);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index bde6dc4e2614..fae774651374 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -36,6 +36,7 @@
36#include <linux/init.h> 36#include <linux/init.h>
37#include <linux/linkage.h> 37#include <linux/linkage.h>
38#include <linux/unistd.h> 38#include <linux/unistd.h>
39#include <linux/threads.h>
39#include <asm/blackfin.h> 40#include <asm/blackfin.h>
40#include <asm/errno.h> 41#include <asm/errno.h>
41#include <asm/fixed_code.h> 42#include <asm/fixed_code.h>
@@ -75,11 +76,11 @@ ENTRY(_ex_workaround_261)
75 * handle it. 76 * handle it.
76 */ 77 */
77 P4 = R7; /* Store EXCAUSE */ 78 P4 = R7; /* Store EXCAUSE */
78 p5.l = _last_cplb_fault_retx; 79
79 p5.h = _last_cplb_fault_retx; 80 GET_PDA(p5, r7);
80 r7 = [p5]; 81 r7 = [p5 + PDA_LFRETX];
81 r6 = retx; 82 r6 = retx;
82 [p5] = r6; 83 [p5 + PDA_LFRETX] = r6;
83 cc = r6 == r7; 84 cc = r6 == r7;
84 if !cc jump _bfin_return_from_exception; 85 if !cc jump _bfin_return_from_exception;
85 /* fall through */ 86 /* fall through */
@@ -111,24 +112,21 @@ ENTRY(_ex_dcplb_viol)
111ENTRY(_ex_dcplb_miss) 112ENTRY(_ex_dcplb_miss)
112ENTRY(_ex_icplb_miss) 113ENTRY(_ex_icplb_miss)
113 (R7:6,P5:4) = [sp++]; 114 (R7:6,P5:4) = [sp++];
114 ASTAT = [sp++]; 115 /* We leave the previously pushed ASTAT on the stack. */
115 SAVE_ALL_SYS 116 SAVE_CONTEXT_CPLB
116#ifdef CONFIG_MPU 117
117 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that 118 /* We must load R1 here, _before_ DEBUG_HWTRACE_SAVE, since that
118 * will change the stack pointer. */ 119 * will change the stack pointer. */
119 R0 = SEQSTAT; 120 R0 = SEQSTAT;
120 R1 = SP; 121 R1 = SP;
121#endif 122
122 DEBUG_HWTRACE_SAVE(p5, r7) 123 DEBUG_HWTRACE_SAVE(p5, r7)
123#ifdef CONFIG_MPU 124
124 sp += -12; 125 sp += -12;
125 call _cplb_hdr; 126 call _cplb_hdr;
126 sp += 12; 127 sp += 12;
127 CC = R0 == 0; 128 CC = R0 == 0;
128 IF !CC JUMP _handle_bad_cplb; 129 IF !CC JUMP _handle_bad_cplb;
129#else
130 call __cplb_hdr;
131#endif
132 130
133#ifdef CONFIG_DEBUG_DOUBLEFAULT 131#ifdef CONFIG_DEBUG_DOUBLEFAULT
134 /* While we were processing this, did we double fault? */ 132 /* While we were processing this, did we double fault? */
@@ -142,7 +140,8 @@ ENTRY(_ex_icplb_miss)
142#endif 140#endif
143 141
144 DEBUG_HWTRACE_RESTORE(p5, r7) 142 DEBUG_HWTRACE_RESTORE(p5, r7)
145 RESTORE_ALL_SYS 143 RESTORE_CONTEXT_CPLB
144 ASTAT = [SP++];
146 SP = EX_SCRATCH_REG; 145 SP = EX_SCRATCH_REG;
147 rtx; 146 rtx;
148ENDPROC(_ex_icplb_miss) 147ENDPROC(_ex_icplb_miss)
@@ -297,9 +296,8 @@ ENTRY(_handle_bad_cplb)
297 * the stack to get ready so, we can fall through - we 296 * the stack to get ready so, we can fall through - we
298 * need to make a CPLB exception look like a normal exception 297 * need to make a CPLB exception look like a normal exception
299 */ 298 */
300 299 RESTORE_CONTEXT_CPLB
301 RESTORE_ALL_SYS 300 /* ASTAT is still on the stack, where it is needed. */
302 [--sp] = ASTAT;
303 [--sp] = (R7:6,P5:4); 301 [--sp] = (R7:6,P5:4);
304 302
305ENTRY(_ex_replaceable) 303ENTRY(_ex_replaceable)
@@ -324,7 +322,9 @@ ENTRY(_ex_trap_c)
324 [p4] = p5; 322 [p4] = p5;
325 csync; 323 csync;
326 324
325 GET_PDA(p5, r6);
327#ifndef CONFIG_DEBUG_DOUBLEFAULT 326#ifndef CONFIG_DEBUG_DOUBLEFAULT
327
328 /* 328 /*
329 * Save these registers, as they are only valid in exception context 329 * Save these registers, as they are only valid in exception context
330 * (where we are now - as soon as we defer to IRQ5, they can change) 330 * (where we are now - as soon as we defer to IRQ5, they can change)
@@ -335,29 +335,25 @@ ENTRY(_ex_trap_c)
335 p4.l = lo(DCPLB_FAULT_ADDR); 335 p4.l = lo(DCPLB_FAULT_ADDR);
336 p4.h = hi(DCPLB_FAULT_ADDR); 336 p4.h = hi(DCPLB_FAULT_ADDR);
337 r7 = [p4]; 337 r7 = [p4];
338 p5.h = _saved_dcplb_fault_addr; 338 [p5 + PDA_DCPLB] = r7;
339 p5.l = _saved_dcplb_fault_addr;
340 [p5] = r7;
341 339
342 r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; 340 p4.l = lo(ICPLB_FAULT_ADDR);
343 p5.h = _saved_icplb_fault_addr; 341 p4.h = hi(ICPLB_FAULT_ADDR);
344 p5.l = _saved_icplb_fault_addr; 342 r6 = [p4];
345 [p5] = r7; 343 [p5 + PDA_ICPLB] = r6;
346 344
347 r6 = retx; 345 r6 = retx;
348 p4.l = _saved_retx; 346 [p5 + PDA_RETX] = r6;
349 p4.h = _saved_retx;
350 [p4] = r6;
351#endif 347#endif
352 r6 = SYSCFG; 348 r6 = SYSCFG;
353 [p4 + 4] = r6; 349 [p5 + PDA_SYSCFG] = r6;
354 BITCLR(r6, 0); 350 BITCLR(r6, 0);
355 SYSCFG = r6; 351 SYSCFG = r6;
356 352
357 /* Disable all interrupts, but make sure level 5 is enabled so 353 /* Disable all interrupts, but make sure level 5 is enabled so
358 * we can switch to that level. Save the old mask. */ 354 * we can switch to that level. Save the old mask. */
359 cli r6; 355 cli r6;
360 [p4 + 8] = r6; 356 [p5 + PDA_EXIMASK] = r6;
361 357
362 p4.l = lo(SAFE_USER_INSTRUCTION); 358 p4.l = lo(SAFE_USER_INSTRUCTION);
363 p4.h = hi(SAFE_USER_INSTRUCTION); 359 p4.h = hi(SAFE_USER_INSTRUCTION);
@@ -371,9 +367,10 @@ ENTRY(_ex_trap_c)
371ENDPROC(_ex_trap_c) 367ENDPROC(_ex_trap_c)
372 368
373/* We just realized we got an exception, while we were processing a different 369/* We just realized we got an exception, while we were processing a different
374 * exception. This is a unrecoverable event, so crash 370 * exception. This is a unrecoverable event, so crash.
371 * Note: this cannot be ENTRY() as we jump here with "if cc jump" ...
375 */ 372 */
376ENTRY(_double_fault) 373_double_fault:
377 /* Turn caches & protection off, to ensure we don't get any more 374 /* Turn caches & protection off, to ensure we don't get any more
378 * double exceptions 375 * double exceptions
379 */ 376 */
@@ -424,17 +421,16 @@ ENDPROC(_double_fault)
424ENTRY(_exception_to_level5) 421ENTRY(_exception_to_level5)
425 SAVE_ALL_SYS 422 SAVE_ALL_SYS
426 423
427 p4.l = _saved_retx; 424 GET_PDA(p4, r7); /* Fetch current PDA */
428 p4.h = _saved_retx; 425 r6 = [p4 + PDA_RETX];
429 r6 = [p4];
430 [sp + PT_PC] = r6; 426 [sp + PT_PC] = r6;
431 427
432 r6 = [p4 + 4]; 428 r6 = [p4 + PDA_SYSCFG];
433 [sp + PT_SYSCFG] = r6; 429 [sp + PT_SYSCFG] = r6;
434 430
435 /* Restore interrupt mask. We haven't pushed RETI, so this 431 /* Restore interrupt mask. We haven't pushed RETI, so this
436 * doesn't enable interrupts until we return from this handler. */ 432 * doesn't enable interrupts until we return from this handler. */
437 r6 = [p4 + 8]; 433 r6 = [p4 + PDA_EXIMASK];
438 sti r6; 434 sti r6;
439 435
440 /* Restore the hardware error vector. */ 436 /* Restore the hardware error vector. */
@@ -478,8 +474,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
478 * scratch register (for want of a better option). 474 * scratch register (for want of a better option).
479 */ 475 */
480 EX_SCRATCH_REG = sp; 476 EX_SCRATCH_REG = sp;
481 sp.l = _exception_stack_top; 477 GET_PDA_SAFE(sp);
482 sp.h = _exception_stack_top; 478 sp = [sp + PDA_EXSTACK]
483 /* Try to deal with syscalls quickly. */ 479 /* Try to deal with syscalls quickly. */
484 [--sp] = ASTAT; 480 [--sp] = ASTAT;
485 [--sp] = (R7:6,P5:4); 481 [--sp] = (R7:6,P5:4);
@@ -501,27 +497,22 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
501 * but they are not very interesting, so don't save them 497 * but they are not very interesting, so don't save them
502 */ 498 */
503 499
500 GET_PDA(p5, r7);
504 p4.l = lo(DCPLB_FAULT_ADDR); 501 p4.l = lo(DCPLB_FAULT_ADDR);
505 p4.h = hi(DCPLB_FAULT_ADDR); 502 p4.h = hi(DCPLB_FAULT_ADDR);
506 r7 = [p4]; 503 r7 = [p4];
507 p5.h = _saved_dcplb_fault_addr; 504 [p5 + PDA_DCPLB] = r7;
508 p5.l = _saved_dcplb_fault_addr;
509 [p5] = r7;
510 505
511 r7 = [p4 + (ICPLB_FAULT_ADDR - DCPLB_FAULT_ADDR)]; 506 p4.l = lo(ICPLB_FAULT_ADDR);
512 p5.h = _saved_icplb_fault_addr; 507 p4.h = hi(ICPLB_FAULT_ADDR);
513 p5.l = _saved_icplb_fault_addr; 508 r7 = [p4];
514 [p5] = r7; 509 [p5 + PDA_ICPLB] = r7;
515 510
516 p4.l = _saved_retx;
517 p4.h = _saved_retx;
518 r6 = retx; 511 r6 = retx;
519 [p4] = r6; 512 [p5 + PDA_RETX] = r6;
520 513
521 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 514 r7 = SEQSTAT; /* reason code is in bit 5:0 */
522 p4.l = _saved_seqstat; 515 [p5 + PDA_SEQSTAT] = r7;
523 p4.h = _saved_seqstat;
524 [p4] = r7;
525#else 516#else
526 r7 = SEQSTAT; /* reason code is in bit 5:0 */ 517 r7 = SEQSTAT; /* reason code is in bit 5:0 */
527#endif 518#endif
@@ -546,11 +537,11 @@ ENTRY(_kernel_execve)
546 p0 = sp; 537 p0 = sp;
547 r3 = SIZEOF_PTREGS / 4; 538 r3 = SIZEOF_PTREGS / 4;
548 r4 = 0(x); 539 r4 = 0(x);
5490: 540.Lclear_regs:
550 [p0++] = r4; 541 [p0++] = r4;
551 r3 += -1; 542 r3 += -1;
552 cc = r3 == 0; 543 cc = r3 == 0;
553 if !cc jump 0b (bp); 544 if !cc jump .Lclear_regs (bp);
554 545
555 p0 = sp; 546 p0 = sp;
556 sp += -16; 547 sp += -16;
@@ -558,7 +549,7 @@ ENTRY(_kernel_execve)
558 call _do_execve; 549 call _do_execve;
559 SP += 16; 550 SP += 16;
560 cc = r0 == 0; 551 cc = r0 == 0;
561 if ! cc jump 1f; 552 if ! cc jump .Lexecve_failed;
562 /* Success. Copy our temporary pt_regs to the top of the kernel 553 /* Success. Copy our temporary pt_regs to the top of the kernel
563 * stack and do a normal exception return. 554 * stack and do a normal exception return.
564 */ 555 */
@@ -574,12 +565,12 @@ ENTRY(_kernel_execve)
574 p0 = fp; 565 p0 = fp;
575 r4 = [p0--]; 566 r4 = [p0--];
576 r3 = SIZEOF_PTREGS / 4; 567 r3 = SIZEOF_PTREGS / 4;
5770: 568.Lcopy_regs:
578 r4 = [p0--]; 569 r4 = [p0--];
579 [p1--] = r4; 570 [p1--] = r4;
580 r3 += -1; 571 r3 += -1;
581 cc = r3 == 0; 572 cc = r3 == 0;
582 if ! cc jump 0b (bp); 573 if ! cc jump .Lcopy_regs (bp);
583 574
584 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z); 575 r0 = (KERNEL_STACK_SIZE - SIZEOF_PTREGS) (z);
585 p1 = r0; 576 p1 = r0;
@@ -591,7 +582,7 @@ ENTRY(_kernel_execve)
591 582
592 RESTORE_CONTEXT; 583 RESTORE_CONTEXT;
593 rti; 584 rti;
5941: 585.Lexecve_failed:
595 unlink; 586 unlink;
596 rts; 587 rts;
597ENDPROC(_kernel_execve) 588ENDPROC(_kernel_execve)
@@ -925,9 +916,14 @@ _schedule_and_signal_from_int:
925 p1 = rets; 916 p1 = rets;
926 [sp + PT_RESERVED] = p1; 917 [sp + PT_RESERVED] = p1;
927 918
928 p0.l = _irq_flags; 919#ifdef CONFIG_SMP
929 p0.h = _irq_flags; 920 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
921 r0 = [p0 + PDA_IRQFLAGS];
922#else
923 p0.l = _bfin_irq_flags;
924 p0.h = _bfin_irq_flags;
930 r0 = [p0]; 925 r0 = [p0];
926#endif
931 sti r0; 927 sti r0;
932 928
933 r0 = sp; 929 r0 = sp;
@@ -1539,14 +1535,18 @@ ENTRY(_sys_call_table)
1539 .endr 1535 .endr
1540END(_sys_call_table) 1536END(_sys_call_table)
1541 1537
1542_exception_stack: 1538#ifdef CONFIG_EXCEPTION_L1_SCRATCH
1543 .rept 1024 1539/* .section .l1.bss.scratch */
1544 .long 0; 1540.set _exception_stack_top, L1_SCRATCH_START + L1_SCRATCH_LENGTH
1541#else
1542#ifdef CONFIG_SYSCALL_TAB_L1
1543.section .l1.bss
1544#else
1545.bss
1546#endif
1547ENTRY(_exception_stack)
1548 .rept 1024 * NR_CPUS
1549 .long 0
1545 .endr 1550 .endr
1546_exception_stack_top: 1551_exception_stack_top:
1547
1548#if ANOMALY_05000261
1549/* Used by the assembly entry point to work around an anomaly. */
1550_last_cplb_fault_retx:
1551 .long 0;
1552#endif 1552#endif
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index f123a62e2451..e1e42c029e15 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -13,6 +13,7 @@
13#include <asm/blackfin.h> 13#include <asm/blackfin.h>
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/trace.h> 15#include <asm/trace.h>
16#include <asm/asm-offsets.h>
16 17
17__INIT 18__INIT
18 19
@@ -111,33 +112,26 @@ ENTRY(__start)
111 * This happens here, since L1 gets clobbered 112 * This happens here, since L1 gets clobbered
112 * below 113 * below
113 */ 114 */
114 p0.l = _saved_retx; 115 GET_PDA(p0, r0);
115 p0.h = _saved_retx; 116 r7 = [p0 + PDA_RETX];
116 p1.l = _init_saved_retx; 117 p1.l = _init_saved_retx;
117 p1.h = _init_saved_retx; 118 p1.h = _init_saved_retx;
118 r0 = [p0]; 119 [p1] = r7;
119 [p1] = r0;
120 120
121 p0.l = _saved_dcplb_fault_addr; 121 r7 = [p0 + PDA_DCPLB];
122 p0.h = _saved_dcplb_fault_addr;
123 p1.l = _init_saved_dcplb_fault_addr; 122 p1.l = _init_saved_dcplb_fault_addr;
124 p1.h = _init_saved_dcplb_fault_addr; 123 p1.h = _init_saved_dcplb_fault_addr;
125 r0 = [p0]; 124 [p1] = r7;
126 [p1] = r0;
127 125
128 p0.l = _saved_icplb_fault_addr; 126 r7 = [p0 + PDA_ICPLB];
129 p0.h = _saved_icplb_fault_addr;
130 p1.l = _init_saved_icplb_fault_addr; 127 p1.l = _init_saved_icplb_fault_addr;
131 p1.h = _init_saved_icplb_fault_addr; 128 p1.h = _init_saved_icplb_fault_addr;
132 r0 = [p0]; 129 [p1] = r7;
133 [p1] = r0;
134 130
135 p0.l = _saved_seqstat; 131 r7 = [p0 + PDA_SEQSTAT];
136 p0.h = _saved_seqstat;
137 p1.l = _init_saved_seqstat; 132 p1.l = _init_saved_seqstat;
138 p1.h = _init_saved_seqstat; 133 p1.h = _init_saved_seqstat;
139 r0 = [p0]; 134 [p1] = r7;
140 [p1] = r0;
141#endif 135#endif
142 136
143 /* Initialize stack pointer */ 137 /* Initialize stack pointer */
@@ -153,7 +147,7 @@ ENTRY(__start)
153 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ 147 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
154 call _bfin_relocate_l1_mem; 148 call _bfin_relocate_l1_mem;
155#ifdef CONFIG_BFIN_KERNEL_CLOCK 149#ifdef CONFIG_BFIN_KERNEL_CLOCK
156 call _start_dma_code; 150 call _init_clocks;
157#endif 151#endif
158 152
159 /* This section keeps the processor in supervisor mode 153 /* This section keeps the processor in supervisor mode
@@ -170,12 +164,8 @@ ENTRY(__start)
170 [p0] = p1; 164 [p0] = p1;
171 csync; 165 csync;
172 166
173 p0.l = lo(IMASK); 167 r0 = EVT_IVG15 (z);
174 p0.h = hi(IMASK); 168 sti r0;
175 p1.l = IMASK_IVG15;
176 p1.h = 0x0;
177 [p0] = p1;
178 csync;
179 169
180 raise 15; 170 raise 15;
181 p0.l = .LWAIT_HERE; 171 p0.l = .LWAIT_HERE;
@@ -195,6 +185,19 @@ ENDPROC(__start)
195# define WDOG_CTL WDOGA_CTL 185# define WDOG_CTL WDOGA_CTL
196#endif 186#endif
197 187
188ENTRY(__init_clear_bss)
189 r2 = r2 - r1;
190 cc = r2 == 0;
191 if cc jump .L_bss_done;
192 r2 >>= 2;
193 p1 = r1;
194 p2 = r2;
195 lsetup (1f, 1f) lc0 = p2;
1961: [p1++] = r0;
197.L_bss_done:
198 rts;
199ENDPROC(__init_clear_bss)
200
198ENTRY(_real_start) 201ENTRY(_real_start)
199 /* Enable nested interrupts */ 202 /* Enable nested interrupts */
200 [--sp] = reti; 203 [--sp] = reti;
@@ -206,87 +209,34 @@ ENTRY(_real_start)
206 w[p0] = r0; 209 w[p0] = r0;
207 ssync; 210 ssync;
208 211
212 r0 = 0 (x);
213 /* Zero out all of the fun bss regions */
209#if L1_DATA_A_LENGTH > 0 214#if L1_DATA_A_LENGTH > 0
210 r1.l = __sbss_l1; 215 r1.l = __sbss_l1;
211 r1.h = __sbss_l1; 216 r1.h = __sbss_l1;
212 r2.l = __ebss_l1; 217 r2.l = __ebss_l1;
213 r2.h = __ebss_l1; 218 r2.h = __ebss_l1;
214 r0 = 0 (z); 219 call __init_clear_bss
215 r2 = r2 - r1;
216 cc = r2 == 0;
217 if cc jump .L_a_l1_done;
218 r2 >>= 2;
219 p1 = r1;
220 p2 = r2;
221 lsetup (.L_clear_a_l1, .L_clear_a_l1 ) lc0 = p2;
222.L_clear_a_l1:
223 [p1++] = r0;
224.L_a_l1_done:
225#endif 220#endif
226
227#if L1_DATA_B_LENGTH > 0 221#if L1_DATA_B_LENGTH > 0
228 r1.l = __sbss_b_l1; 222 r1.l = __sbss_b_l1;
229 r1.h = __sbss_b_l1; 223 r1.h = __sbss_b_l1;
230 r2.l = __ebss_b_l1; 224 r2.l = __ebss_b_l1;
231 r2.h = __ebss_b_l1; 225 r2.h = __ebss_b_l1;
232 r0 = 0 (z); 226 call __init_clear_bss
233 r2 = r2 - r1;
234 cc = r2 == 0;
235 if cc jump .L_b_l1_done;
236 r2 >>= 2;
237 p1 = r1;
238 p2 = r2;
239 lsetup (.L_clear_b_l1, .L_clear_b_l1 ) lc0 = p2;
240.L_clear_b_l1:
241 [p1++] = r0;
242.L_b_l1_done:
243#endif 227#endif
244
245#if L2_LENGTH > 0 228#if L2_LENGTH > 0
246 r1.l = __sbss_l2; 229 r1.l = __sbss_l2;
247 r1.h = __sbss_l2; 230 r1.h = __sbss_l2;
248 r2.l = __ebss_l2; 231 r2.l = __ebss_l2;
249 r2.h = __ebss_l2; 232 r2.h = __ebss_l2;
250 r0 = 0 (z); 233 call __init_clear_bss
251 r2 = r2 - r1;
252 cc = r2 == 0;
253 if cc jump .L_l2_done;
254 r2 >>= 2;
255 p1 = r1;
256 p2 = r2;
257 lsetup (.L_clear_l2, .L_clear_l2 ) lc0 = p2;
258.L_clear_l2:
259 [p1++] = r0;
260.L_l2_done:
261#endif 234#endif
262
263 /* Zero out the bss region
264 * Note: this will fail if bss is 0 bytes ...
265 */
266 r0 = 0 (z);
267 r1.l = ___bss_start; 235 r1.l = ___bss_start;
268 r1.h = ___bss_start; 236 r1.h = ___bss_start;
269 r2.l = ___bss_stop; 237 r2.l = ___bss_stop;
270 r2.h = ___bss_stop; 238 r2.h = ___bss_stop;
271 r2 = r2 - r1; 239 call __init_clear_bss
272 r2 >>= 2;
273 p1 = r1;
274 p2 = r2;
275 lsetup (.L_clear_bss, .L_clear_bss) lc0 = p2;
276.L_clear_bss:
277 [p1++] = r0;
278
279 /* In case there is a NULL pointer reference,
280 * zero out region before stext
281 */
282 p1 = r0;
283 r2.l = __stext;
284 r2.h = __stext;
285 r2 >>= 2;
286 p2 = r2;
287 lsetup (.L_clear_zero, .L_clear_zero) lc0 = p2;
288.L_clear_zero:
289 [p1++] = r0;
290 240
291 /* Pass the u-boot arguments to the global value command line */ 241 /* Pass the u-boot arguments to the global value command line */
292 R0 = R7; 242 R0 = R7;
@@ -299,6 +249,9 @@ ENTRY(_real_start)
299 sp = sp + p1; 249 sp = sp + p1;
300 usp = sp; 250 usp = sp;
301 fp = sp; 251 fp = sp;
252 sp += -12;
253 call _init_pda
254 sp += 12;
302 jump.l _start_kernel; 255 jump.l _start_kernel;
303ENDPROC(_real_start) 256ENDPROC(_real_start)
304 257
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 4a2ec7a9675a..473df0f7fa78 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -129,8 +129,15 @@ __common_int_entry:
129#endif 129#endif
130 r1 = sp; 130 r1 = sp;
131 SP += -12; 131 SP += -12;
132#ifdef CONFIG_IPIPE
133 call ___ipipe_grab_irq
134 SP += 12;
135 cc = r0 == 0;
136 if cc jump .Lcommon_restore_context;
137#else /* CONFIG_IPIPE */
132 call _do_irq; 138 call _do_irq;
133 SP += 12; 139 SP += 12;
140#endif /* CONFIG_IPIPE */
134 call _return_from_int; 141 call _return_from_int;
135.Lcommon_restore_context: 142.Lcommon_restore_context:
136 RESTORE_CONTEXT 143 RESTORE_CONTEXT
@@ -152,15 +159,6 @@ ENTRY(_evt_ivhw)
1521: 1591:
153#endif 160#endif
154 161
155#ifdef CONFIG_HARDWARE_PM
156 r7 = [sp + PT_SEQSTAT];
157 r7 = r7 >>> 0xe;
158 r6 = 0x1F;
159 r7 = r7 & r6;
160 r5 = 0x12;
161 cc = r7 == r5;
162 if cc jump .Lcall_do_ovf; /* deal with performance counter overflow */
163#endif
164 # We are going to dump something out, so make sure we print IPEND properly 162 # We are going to dump something out, so make sure we print IPEND properly
165 p2.l = lo(IPEND); 163 p2.l = lo(IPEND);
166 p2.h = hi(IPEND); 164 p2.h = hi(IPEND);
@@ -192,17 +190,6 @@ ENTRY(_evt_ivhw)
192.Lcommon_restore_all_sys: 190.Lcommon_restore_all_sys:
193 RESTORE_ALL_SYS 191 RESTORE_ALL_SYS
194 rti; 192 rti;
195
196#ifdef CONFIG_HARDWARE_PM
197.Lcall_do_ovf:
198
199 SP += -12;
200 call _pm_overflow;
201 SP += 12;
202
203 jump .Lcommon_restore_all_sys;
204#endif
205
206ENDPROC(_evt_ivhw) 193ENDPROC(_evt_ivhw)
207 194
208/* Interrupt routine for evt2 (NMI). 195/* Interrupt routine for evt2 (NMI).
@@ -245,3 +232,56 @@ ENTRY(_evt_system_call)
245 call _system_call; 232 call _system_call;
246 jump .Lcommon_restore_context; 233 jump .Lcommon_restore_context;
247ENDPROC(_evt_system_call) 234ENDPROC(_evt_system_call)
235
236#ifdef CONFIG_IPIPE
237ENTRY(___ipipe_call_irqtail)
238 r0.l = 1f;
239 r0.h = 1f;
240 reti = r0;
241 rti;
2421:
243 [--sp] = rets;
244 [--sp] = ( r7:4, p5:3 );
245 p0.l = ___ipipe_irq_tail_hook;
246 p0.h = ___ipipe_irq_tail_hook;
247 p0 = [p0];
248 sp += -12;
249 call (p0);
250 sp += 12;
251 ( r7:4, p5:3 ) = [sp++];
252 rets = [sp++];
253
254 [--sp] = reti;
255 reti = [sp++]; /* IRQs are off. */
256 r0.h = 3f;
257 r0.l = 3f;
258 p0.l = lo(EVT14);
259 p0.h = hi(EVT14);
260 [p0] = r0;
261 csync;
262 r0 = 0x401f;
263 sti r0;
264 raise 14;
265 [--sp] = reti; /* IRQs on. */
2662:
267 jump 2b; /* Likely paranoid. */
2683:
269 sp += 4; /* Discard saved RETI */
270 r0.h = _evt14_softirq;
271 r0.l = _evt14_softirq;
272 p0.l = lo(EVT14);
273 p0.h = hi(EVT14);
274 [p0] = r0;
275 csync;
276 p0.l = _bfin_irq_flags;
277 p0.h = _bfin_irq_flags;
278 r0 = [p0];
279 sti r0;
280#if 0 /* FIXME: this actually raises scheduling latencies */
281 /* Reenable interrupts */
282 [--sp] = reti;
283 r0 = [sp++];
284#endif
285 rts;
286ENDPROC(___ipipe_call_irqtail)
287#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 34e8a726ffda..1bba6030dce9 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -1,9 +1,6 @@
1/* 1/*
2 * File: arch/blackfin/mach-common/ints-priority.c 2 * File: arch/blackfin/mach-common/ints-priority.c
3 * Based on:
4 * Author:
5 * 3 *
6 * Created: ?
7 * Description: Set up the interrupt priorities 4 * Description: Set up the interrupt priorities
8 * 5 *
9 * Modified: 6 * Modified:
@@ -37,6 +34,9 @@
37#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
38#include <linux/seq_file.h> 35#include <linux/seq_file.h>
39#include <linux/irq.h> 36#include <linux/irq.h>
37#ifdef CONFIG_IPIPE
38#include <linux/ipipe.h>
39#endif
40#ifdef CONFIG_KGDB 40#ifdef CONFIG_KGDB
41#include <linux/kgdb.h> 41#include <linux/kgdb.h>
42#endif 42#endif
@@ -45,6 +45,8 @@
45#include <asm/gpio.h> 45#include <asm/gpio.h>
46#include <asm/irq_handler.h> 46#include <asm/irq_handler.h>
47 47
48#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
49
48#ifdef BF537_FAMILY 50#ifdef BF537_FAMILY
49# define BF537_GENERIC_ERROR_INT_DEMUX 51# define BF537_GENERIC_ERROR_INT_DEMUX
50#else 52#else
@@ -58,13 +60,16 @@
58 * - 60 * -
59 */ 61 */
60 62
63#ifndef CONFIG_SMP
61/* Initialize this to an actual value to force it into the .data 64/* Initialize this to an actual value to force it into the .data
62 * section so that we know it is properly initialized at entry into 65 * section so that we know it is properly initialized at entry into
63 * the kernel but before bss is initialized to zero (which is where 66 * the kernel but before bss is initialized to zero (which is where
64 * it would live otherwise). The 0x1f magic represents the IRQs we 67 * it would live otherwise). The 0x1f magic represents the IRQs we
65 * cannot actually mask out in hardware. 68 * cannot actually mask out in hardware.
66 */ 69 */
67unsigned long irq_flags = 0x1f; 70unsigned long bfin_irq_flags = 0x1f;
71EXPORT_SYMBOL(bfin_irq_flags);
72#endif
68 73
69/* The number of spurious interrupts */ 74/* The number of spurious interrupts */
70atomic_t num_spurious; 75atomic_t num_spurious;
@@ -103,12 +108,14 @@ static void __init search_IAR(void)
103 for (irqn = 0; irqn < NR_PERI_INTS; irqn++) { 108 for (irqn = 0; irqn < NR_PERI_INTS; irqn++) {
104 int iar_shift = (irqn & 7) * 4; 109 int iar_shift = (irqn & 7) * 4;
105 if (ivg == (0xf & 110 if (ivg == (0xf &
106#ifndef CONFIG_BF52x 111#if defined(CONFIG_BF52x) || defined(CONFIG_BF538) \
112 || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
107 bfin_read32((unsigned long *)SIC_IAR0 + 113 bfin_read32((unsigned long *)SIC_IAR0 +
108 (irqn >> 3)) >> iar_shift)) { 114 ((irqn % 32) >> 3) + ((irqn / 32) *
115 ((SIC_IAR4 - SIC_IAR0) / 4))) >> iar_shift)) {
109#else 116#else
110 bfin_read32((unsigned long *)SIC_IAR0 + 117 bfin_read32((unsigned long *)SIC_IAR0 +
111 ((irqn%32) >> 3) + ((irqn / 32) * 16)) >> iar_shift)) { 118 (irqn >> 3)) >> iar_shift)) {
112#endif 119#endif
113 ivg_table[irq_pos].irqno = IVG7 + irqn; 120 ivg_table[irq_pos].irqno = IVG7 + irqn;
114 ivg_table[irq_pos].isrflag = 1 << (irqn % 32); 121 ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
@@ -130,25 +137,25 @@ static void bfin_ack_noop(unsigned int irq)
130 137
131static void bfin_core_mask_irq(unsigned int irq) 138static void bfin_core_mask_irq(unsigned int irq)
132{ 139{
133 irq_flags &= ~(1 << irq); 140 bfin_irq_flags &= ~(1 << irq);
134 if (!irqs_disabled()) 141 if (!irqs_disabled_hw())
135 local_irq_enable(); 142 local_irq_enable_hw();
136} 143}
137 144
138static void bfin_core_unmask_irq(unsigned int irq) 145static void bfin_core_unmask_irq(unsigned int irq)
139{ 146{
140 irq_flags |= 1 << irq; 147 bfin_irq_flags |= 1 << irq;
141 /* 148 /*
142 * If interrupts are enabled, IMASK must contain the same value 149 * If interrupts are enabled, IMASK must contain the same value
143 * as irq_flags. Make sure that invariant holds. If interrupts 150 * as bfin_irq_flags. Make sure that invariant holds. If interrupts
144 * are currently disabled we need not do anything; one of the 151 * are currently disabled we need not do anything; one of the
145 * callers will take care of setting IMASK to the proper value 152 * callers will take care of setting IMASK to the proper value
146 * when reenabling interrupts. 153 * when reenabling interrupts.
147 * local_irq_enable just does "STI irq_flags", so it's exactly 154 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
148 * what we need. 155 * what we need.
149 */ 156 */
150 if (!irqs_disabled()) 157 if (!irqs_disabled_hw())
151 local_irq_enable(); 158 local_irq_enable_hw();
152 return; 159 return;
153} 160}
154 161
@@ -163,8 +170,11 @@ static void bfin_internal_mask_irq(unsigned int irq)
163 mask_bit = SIC_SYSIRQ(irq) % 32; 170 mask_bit = SIC_SYSIRQ(irq) % 32;
164 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & 171 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
165 ~(1 << mask_bit)); 172 ~(1 << mask_bit));
173#ifdef CONFIG_SMP
174 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
175 ~(1 << mask_bit));
176#endif
166#endif 177#endif
167 SSYNC();
168} 178}
169 179
170static void bfin_internal_unmask_irq(unsigned int irq) 180static void bfin_internal_unmask_irq(unsigned int irq)
@@ -178,14 +188,17 @@ static void bfin_internal_unmask_irq(unsigned int irq)
178 mask_bit = SIC_SYSIRQ(irq) % 32; 188 mask_bit = SIC_SYSIRQ(irq) % 32;
179 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) | 189 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
180 (1 << mask_bit)); 190 (1 << mask_bit));
191#ifdef CONFIG_SMP
192 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) |
193 (1 << mask_bit));
194#endif
181#endif 195#endif
182 SSYNC();
183} 196}
184 197
185#ifdef CONFIG_PM 198#ifdef CONFIG_PM
186int bfin_internal_set_wake(unsigned int irq, unsigned int state) 199int bfin_internal_set_wake(unsigned int irq, unsigned int state)
187{ 200{
188 unsigned bank, bit, wakeup = 0; 201 u32 bank, bit, wakeup = 0;
189 unsigned long flags; 202 unsigned long flags;
190 bank = SIC_SYSIRQ(irq) / 32; 203 bank = SIC_SYSIRQ(irq) / 32;
191 bit = SIC_SYSIRQ(irq) % 32; 204 bit = SIC_SYSIRQ(irq) % 32;
@@ -225,7 +238,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
225 break; 238 break;
226 } 239 }
227 240
228 local_irq_save(flags); 241 local_irq_save_hw(flags);
229 242
230 if (state) { 243 if (state) {
231 bfin_sic_iwr[bank] |= (1 << bit); 244 bfin_sic_iwr[bank] |= (1 << bit);
@@ -236,7 +249,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
236 vr_wakeup &= ~wakeup; 249 vr_wakeup &= ~wakeup;
237 } 250 }
238 251
239 local_irq_restore(flags); 252 local_irq_restore_hw(flags);
240 253
241 return 0; 254 return 0;
242} 255}
@@ -262,6 +275,19 @@ static struct irq_chip bfin_internal_irqchip = {
262#endif 275#endif
263}; 276};
264 277
278static void bfin_handle_irq(unsigned irq)
279{
280#ifdef CONFIG_IPIPE
281 struct pt_regs regs; /* Contents not used. */
282 ipipe_trace_irq_entry(irq);
283 __ipipe_handle_irq(irq, &regs);
284 ipipe_trace_irq_exit(irq);
285#else /* !CONFIG_IPIPE */
286 struct irq_desc *desc = irq_desc + irq;
287 desc->handle_irq(irq, desc);
288#endif /* !CONFIG_IPIPE */
289}
290
265#ifdef BF537_GENERIC_ERROR_INT_DEMUX 291#ifdef BF537_GENERIC_ERROR_INT_DEMUX
266static int error_int_mask; 292static int error_int_mask;
267 293
@@ -292,8 +318,6 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
292{ 318{
293 int irq = 0; 319 int irq = 0;
294 320
295 SSYNC();
296
297#if (defined(CONFIG_BF537) || defined(CONFIG_BF536)) 321#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
298 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK) 322 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
299 irq = IRQ_MAC_ERROR; 323 irq = IRQ_MAC_ERROR;
@@ -317,10 +341,9 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
317 irq = IRQ_UART1_ERROR; 341 irq = IRQ_UART1_ERROR;
318 342
319 if (irq) { 343 if (irq) {
320 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) { 344 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
321 struct irq_desc *desc = irq_desc + irq; 345 bfin_handle_irq(irq);
322 desc->handle_irq(irq, desc); 346 else {
323 } else {
324 347
325 switch (irq) { 348 switch (irq) {
326 case IRQ_PPI_ERROR: 349 case IRQ_PPI_ERROR:
@@ -366,62 +389,57 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
366 389
367static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 390static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
368{ 391{
392#ifdef CONFIG_IPIPE
393 _set_irq_handler(irq, handle_edge_irq);
394#else
369 struct irq_desc *desc = irq_desc + irq; 395 struct irq_desc *desc = irq_desc + irq;
370 /* May not call generic set_irq_handler() due to spinlock 396 /* May not call generic set_irq_handler() due to spinlock
371 recursion. */ 397 recursion. */
372 desc->handle_irq = handle; 398 desc->handle_irq = handle;
399#endif
373} 400}
374 401
375#if !defined(CONFIG_BF54x) 402static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
376
377static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
378static unsigned short gpio_edge_triggered[gpio_bank(MAX_BLACKFIN_GPIOS)];
379
380extern void bfin_gpio_irq_prepare(unsigned gpio); 403extern void bfin_gpio_irq_prepare(unsigned gpio);
381 404
405#if !defined(CONFIG_BF54x)
406
382static void bfin_gpio_ack_irq(unsigned int irq) 407static void bfin_gpio_ack_irq(unsigned int irq)
383{ 408{
384 u16 gpionr = irq - IRQ_PF0; 409 /* AFAIK ack_irq in case mask_ack is provided
385 410 * get's only called for edge sense irqs
386 if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { 411 */
387 set_gpio_data(gpionr, 0); 412 set_gpio_data(irq_to_gpio(irq), 0);
388 SSYNC();
389 }
390} 413}
391 414
392static void bfin_gpio_mask_ack_irq(unsigned int irq) 415static void bfin_gpio_mask_ack_irq(unsigned int irq)
393{ 416{
394 u16 gpionr = irq - IRQ_PF0; 417 struct irq_desc *desc = irq_desc + irq;
418 u32 gpionr = irq_to_gpio(irq);
395 419
396 if (gpio_edge_triggered[gpio_bank(gpionr)] & gpio_bit(gpionr)) { 420 if (desc->handle_irq == handle_edge_irq)
397 set_gpio_data(gpionr, 0); 421 set_gpio_data(gpionr, 0);
398 SSYNC();
399 }
400 422
401 set_gpio_maska(gpionr, 0); 423 set_gpio_maska(gpionr, 0);
402 SSYNC();
403} 424}
404 425
405static void bfin_gpio_mask_irq(unsigned int irq) 426static void bfin_gpio_mask_irq(unsigned int irq)
406{ 427{
407 set_gpio_maska(irq - IRQ_PF0, 0); 428 set_gpio_maska(irq_to_gpio(irq), 0);
408 SSYNC();
409} 429}
410 430
411static void bfin_gpio_unmask_irq(unsigned int irq) 431static void bfin_gpio_unmask_irq(unsigned int irq)
412{ 432{
413 set_gpio_maska(irq - IRQ_PF0, 1); 433 set_gpio_maska(irq_to_gpio(irq), 1);
414 SSYNC();
415} 434}
416 435
417static unsigned int bfin_gpio_irq_startup(unsigned int irq) 436static unsigned int bfin_gpio_irq_startup(unsigned int irq)
418{ 437{
419 u16 gpionr = irq - IRQ_PF0; 438 u32 gpionr = irq_to_gpio(irq);
420 439
421 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) 440 if (__test_and_set_bit(gpionr, gpio_enabled))
422 bfin_gpio_irq_prepare(gpionr); 441 bfin_gpio_irq_prepare(gpionr);
423 442
424 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
425 bfin_gpio_unmask_irq(irq); 443 bfin_gpio_unmask_irq(irq);
426 444
427 return 0; 445 return 0;
@@ -429,29 +447,39 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
429 447
430static void bfin_gpio_irq_shutdown(unsigned int irq) 448static void bfin_gpio_irq_shutdown(unsigned int irq)
431{ 449{
450 u32 gpionr = irq_to_gpio(irq);
451
432 bfin_gpio_mask_irq(irq); 452 bfin_gpio_mask_irq(irq);
433 gpio_enabled[gpio_bank(irq - IRQ_PF0)] &= ~gpio_bit(irq - IRQ_PF0); 453 __clear_bit(gpionr, gpio_enabled);
454 bfin_gpio_irq_free(gpionr);
434} 455}
435 456
436static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) 457static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
437{ 458{
438 u16 gpionr = irq - IRQ_PF0; 459 int ret;
460 char buf[16];
461 u32 gpionr = irq_to_gpio(irq);
439 462
440 if (type == IRQ_TYPE_PROBE) { 463 if (type == IRQ_TYPE_PROBE) {
441 /* only probe unenabled GPIO interrupt lines */ 464 /* only probe unenabled GPIO interrupt lines */
442 if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) 465 if (__test_bit(gpionr, gpio_enabled))
443 return 0; 466 return 0;
444 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 467 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
445 } 468 }
446 469
447 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | 470 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
448 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { 471 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
449 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) 472
473 snprintf(buf, 16, "gpio-irq%d", irq);
474 ret = bfin_gpio_irq_request(gpionr, buf);
475 if (ret)
476 return ret;
477
478 if (__test_and_set_bit(gpionr, gpio_enabled))
450 bfin_gpio_irq_prepare(gpionr); 479 bfin_gpio_irq_prepare(gpionr);
451 480
452 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
453 } else { 481 } else {
454 gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); 482 __clear_bit(gpionr, gpio_enabled);
455 return 0; 483 return 0;
456 } 484 }
457 485
@@ -472,17 +500,13 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
472 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 500 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
473 set_gpio_edge(gpionr, 1); 501 set_gpio_edge(gpionr, 1);
474 set_gpio_inen(gpionr, 1); 502 set_gpio_inen(gpionr, 1);
475 gpio_edge_triggered[gpio_bank(gpionr)] |= gpio_bit(gpionr);
476 set_gpio_data(gpionr, 0); 503 set_gpio_data(gpionr, 0);
477 504
478 } else { 505 } else {
479 set_gpio_edge(gpionr, 0); 506 set_gpio_edge(gpionr, 0);
480 gpio_edge_triggered[gpio_bank(gpionr)] &= ~gpio_bit(gpionr);
481 set_gpio_inen(gpionr, 1); 507 set_gpio_inen(gpionr, 1);
482 } 508 }
483 509
484 SSYNC();
485
486 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 510 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
487 bfin_set_irq_handler(irq, handle_edge_irq); 511 bfin_set_irq_handler(irq, handle_edge_irq);
488 else 512 else
@@ -505,22 +529,6 @@ int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
505} 529}
506#endif 530#endif
507 531
508static struct irq_chip bfin_gpio_irqchip = {
509 .name = "GPIO",
510 .ack = bfin_gpio_ack_irq,
511 .mask = bfin_gpio_mask_irq,
512 .mask_ack = bfin_gpio_mask_ack_irq,
513 .unmask = bfin_gpio_unmask_irq,
514 .disable = bfin_gpio_mask_irq,
515 .enable = bfin_gpio_unmask_irq,
516 .set_type = bfin_gpio_irq_type,
517 .startup = bfin_gpio_irq_startup,
518 .shutdown = bfin_gpio_irq_shutdown,
519#ifdef CONFIG_PM
520 .set_wake = bfin_gpio_set_wake,
521#endif
522};
523
524static void bfin_demux_gpio_irq(unsigned int inta_irq, 532static void bfin_demux_gpio_irq(unsigned int inta_irq,
525 struct irq_desc *desc) 533 struct irq_desc *desc)
526{ 534{
@@ -537,7 +545,11 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
537 irq = IRQ_PH0; 545 irq = IRQ_PH0;
538 break; 546 break;
539# endif 547# endif
540#elif defined(CONFIG_BF52x) 548#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
549 case IRQ_PORTF_INTA:
550 irq = IRQ_PF0;
551 break;
552#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
541 case IRQ_PORTF_INTA: 553 case IRQ_PORTF_INTA:
542 irq = IRQ_PF0; 554 irq = IRQ_PF0;
543 break; 555 break;
@@ -567,30 +579,22 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
567 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { 579 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
568 irq += i; 580 irq += i;
569 581
570 mask = get_gpiop_data(i) & 582 mask = get_gpiop_data(i) & get_gpiop_maska(i);
571 (gpio_enabled[gpio_bank(i)] &
572 get_gpiop_maska(i));
573 583
574 while (mask) { 584 while (mask) {
575 if (mask & 1) { 585 if (mask & 1)
576 desc = irq_desc + irq; 586 bfin_handle_irq(irq);
577 desc->handle_irq(irq, desc);
578 }
579 irq++; 587 irq++;
580 mask >>= 1; 588 mask >>= 1;
581 } 589 }
582 } 590 }
583 } else { 591 } else {
584 gpio = irq_to_gpio(irq); 592 gpio = irq_to_gpio(irq);
585 mask = get_gpiop_data(gpio) & 593 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
586 (gpio_enabled[gpio_bank(gpio)] &
587 get_gpiop_maska(gpio));
588 594
589 do { 595 do {
590 if (mask & 1) { 596 if (mask & 1)
591 desc = irq_desc + irq; 597 bfin_handle_irq(irq);
592 desc->handle_irq(irq, desc);
593 }
594 irq++; 598 irq++;
595 mask >>= 1; 599 mask >>= 1;
596 } while (mask); 600 } while (mask);
@@ -612,10 +616,6 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
612static unsigned char irq2pint_lut[NR_PINTS]; 616static unsigned char irq2pint_lut[NR_PINTS];
613static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS]; 617static unsigned char pint2irq_lut[NR_PINT_SYS_IRQS * NR_PINT_BITS];
614 618
615static unsigned int gpio_both_edge_triggered[NR_PINT_SYS_IRQS];
616static unsigned short gpio_enabled[gpio_bank(MAX_BLACKFIN_GPIOS)];
617
618
619struct pin_int_t { 619struct pin_int_t {
620 unsigned int mask_set; 620 unsigned int mask_set;
621 unsigned int mask_clear; 621 unsigned int mask_clear;
@@ -636,12 +636,9 @@ static struct pin_int_t *pint[NR_PINT_SYS_IRQS] = {
636 (struct pin_int_t *)PINT3_MASK_SET, 636 (struct pin_int_t *)PINT3_MASK_SET,
637}; 637};
638 638
639extern void bfin_gpio_irq_prepare(unsigned gpio); 639inline unsigned int get_irq_base(u32 bank, u8 bmap)
640
641inline unsigned short get_irq_base(u8 bank, u8 bmap)
642{ 640{
643 641 unsigned int irq_base;
644 u16 irq_base;
645 642
646 if (bank < 2) { /*PA-PB */ 643 if (bank < 2) { /*PA-PB */
647 irq_base = IRQ_PA0 + bmap * 16; 644 irq_base = IRQ_PA0 + bmap * 16;
@@ -650,7 +647,6 @@ inline unsigned short get_irq_base(u8 bank, u8 bmap)
650 } 647 }
651 648
652 return irq_base; 649 return irq_base;
653
654} 650}
655 651
656 /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */ 652 /* Whenever PINTx_ASSIGN is altered init_pint_lut() must be executed! */
@@ -677,20 +673,18 @@ void init_pint_lut(void)
677 673
678 pint2irq_lut[bit_pos] = irq_base - SYS_IRQS; 674 pint2irq_lut[bit_pos] = irq_base - SYS_IRQS;
679 irq2pint_lut[irq_base - SYS_IRQS] = bit_pos; 675 irq2pint_lut[irq_base - SYS_IRQS] = bit_pos;
680
681 } 676 }
682
683 } 677 }
684
685} 678}
686 679
687static void bfin_gpio_ack_irq(unsigned int irq) 680static void bfin_gpio_ack_irq(unsigned int irq)
688{ 681{
689 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 682 struct irq_desc *desc = irq_desc + irq;
683 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
690 u32 pintbit = PINT_BIT(pint_val); 684 u32 pintbit = PINT_BIT(pint_val);
691 u8 bank = PINT_2_BANK(pint_val); 685 u32 bank = PINT_2_BANK(pint_val);
692 686
693 if (unlikely(gpio_both_edge_triggered[bank] & pintbit)) { 687 if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
694 if (pint[bank]->invert_set & pintbit) 688 if (pint[bank]->invert_set & pintbit)
695 pint[bank]->invert_clear = pintbit; 689 pint[bank]->invert_clear = pintbit;
696 else 690 else
@@ -698,16 +692,16 @@ static void bfin_gpio_ack_irq(unsigned int irq)
698 } 692 }
699 pint[bank]->request = pintbit; 693 pint[bank]->request = pintbit;
700 694
701 SSYNC();
702} 695}
703 696
704static void bfin_gpio_mask_ack_irq(unsigned int irq) 697static void bfin_gpio_mask_ack_irq(unsigned int irq)
705{ 698{
706 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 699 struct irq_desc *desc = irq_desc + irq;
700 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
707 u32 pintbit = PINT_BIT(pint_val); 701 u32 pintbit = PINT_BIT(pint_val);
708 u8 bank = PINT_2_BANK(pint_val); 702 u32 bank = PINT_2_BANK(pint_val);
709 703
710 if (unlikely(gpio_both_edge_triggered[bank] & pintbit)) { 704 if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
711 if (pint[bank]->invert_set & pintbit) 705 if (pint[bank]->invert_set & pintbit)
712 pint[bank]->invert_clear = pintbit; 706 pint[bank]->invert_clear = pintbit;
713 else 707 else
@@ -716,32 +710,29 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq)
716 710
717 pint[bank]->request = pintbit; 711 pint[bank]->request = pintbit;
718 pint[bank]->mask_clear = pintbit; 712 pint[bank]->mask_clear = pintbit;
719 SSYNC();
720} 713}
721 714
722static void bfin_gpio_mask_irq(unsigned int irq) 715static void bfin_gpio_mask_irq(unsigned int irq)
723{ 716{
724 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 717 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
725 718
726 pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); 719 pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
727 SSYNC();
728} 720}
729 721
730static void bfin_gpio_unmask_irq(unsigned int irq) 722static void bfin_gpio_unmask_irq(unsigned int irq)
731{ 723{
732 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 724 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
733 u32 pintbit = PINT_BIT(pint_val); 725 u32 pintbit = PINT_BIT(pint_val);
734 u8 bank = PINT_2_BANK(pint_val); 726 u32 bank = PINT_2_BANK(pint_val);
735 727
736 pint[bank]->request = pintbit; 728 pint[bank]->request = pintbit;
737 pint[bank]->mask_set = pintbit; 729 pint[bank]->mask_set = pintbit;
738 SSYNC();
739} 730}
740 731
741static unsigned int bfin_gpio_irq_startup(unsigned int irq) 732static unsigned int bfin_gpio_irq_startup(unsigned int irq)
742{ 733{
743 u16 gpionr = irq_to_gpio(irq); 734 u32 gpionr = irq_to_gpio(irq);
744 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 735 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
745 736
746 if (pint_val == IRQ_NOT_AVAIL) { 737 if (pint_val == IRQ_NOT_AVAIL) {
747 printk(KERN_ERR 738 printk(KERN_ERR
@@ -750,10 +741,9 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
750 return -ENODEV; 741 return -ENODEV;
751 } 742 }
752 743
753 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) 744 if (__test_and_set_bit(gpionr, gpio_enabled))
754 bfin_gpio_irq_prepare(gpionr); 745 bfin_gpio_irq_prepare(gpionr);
755 746
756 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
757 bfin_gpio_unmask_irq(irq); 747 bfin_gpio_unmask_irq(irq);
758 748
759 return 0; 749 return 0;
@@ -761,38 +751,45 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
761 751
762static void bfin_gpio_irq_shutdown(unsigned int irq) 752static void bfin_gpio_irq_shutdown(unsigned int irq)
763{ 753{
764 u16 gpionr = irq_to_gpio(irq); 754 u32 gpionr = irq_to_gpio(irq);
765 755
766 bfin_gpio_mask_irq(irq); 756 bfin_gpio_mask_irq(irq);
767 gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); 757 __clear_bit(gpionr, gpio_enabled);
758 bfin_gpio_irq_free(gpionr);
768} 759}
769 760
770static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) 761static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
771{ 762{
772 763 int ret;
773 u16 gpionr = irq_to_gpio(irq); 764 char buf[16];
774 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 765 u32 gpionr = irq_to_gpio(irq);
766 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
775 u32 pintbit = PINT_BIT(pint_val); 767 u32 pintbit = PINT_BIT(pint_val);
776 u8 bank = PINT_2_BANK(pint_val); 768 u32 bank = PINT_2_BANK(pint_val);
777 769
778 if (pint_val == IRQ_NOT_AVAIL) 770 if (pint_val == IRQ_NOT_AVAIL)
779 return -ENODEV; 771 return -ENODEV;
780 772
781 if (type == IRQ_TYPE_PROBE) { 773 if (type == IRQ_TYPE_PROBE) {
782 /* only probe unenabled GPIO interrupt lines */ 774 /* only probe unenabled GPIO interrupt lines */
783 if (gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr)) 775 if (__test_bit(gpionr, gpio_enabled))
784 return 0; 776 return 0;
785 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 777 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
786 } 778 }
787 779
788 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING | 780 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
789 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) { 781 IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
790 if (!(gpio_enabled[gpio_bank(gpionr)] & gpio_bit(gpionr))) 782
783 snprintf(buf, 16, "gpio-irq%d", irq);
784 ret = bfin_gpio_irq_request(gpionr, buf);
785 if (ret)
786 return ret;
787
788 if (__test_and_set_bit(gpionr, gpio_enabled))
791 bfin_gpio_irq_prepare(gpionr); 789 bfin_gpio_irq_prepare(gpionr);
792 790
793 gpio_enabled[gpio_bank(gpionr)] |= gpio_bit(gpionr);
794 } else { 791 } else {
795 gpio_enabled[gpio_bank(gpionr)] &= ~gpio_bit(gpionr); 792 __clear_bit(gpionr, gpio_enabled);
796 return 0; 793 return 0;
797 } 794 }
798 795
@@ -803,15 +800,10 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
803 800
804 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) 801 if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
805 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 802 == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
806
807 gpio_both_edge_triggered[bank] |= pintbit;
808
809 if (gpio_get_value(gpionr)) 803 if (gpio_get_value(gpionr))
810 pint[bank]->invert_set = pintbit; 804 pint[bank]->invert_set = pintbit;
811 else 805 else
812 pint[bank]->invert_clear = pintbit; 806 pint[bank]->invert_clear = pintbit;
813 } else {
814 gpio_both_edge_triggered[bank] &= ~pintbit;
815 } 807 }
816 808
817 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) { 809 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
@@ -822,8 +814,6 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
822 bfin_set_irq_handler(irq, handle_level_irq); 814 bfin_set_irq_handler(irq, handle_level_irq);
823 } 815 }
824 816
825 SSYNC();
826
827 return 0; 817 return 0;
828} 818}
829 819
@@ -834,7 +824,7 @@ u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
834int bfin_gpio_set_wake(unsigned int irq, unsigned int state) 824int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
835{ 825{
836 u32 pint_irq; 826 u32 pint_irq;
837 u8 pint_val = irq2pint_lut[irq - SYS_IRQS]; 827 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
838 u32 bank = PINT_2_BANK(pint_val); 828 u32 bank = PINT_2_BANK(pint_val);
839 u32 pintbit = PINT_BIT(pint_val); 829 u32 pintbit = PINT_BIT(pint_val);
840 830
@@ -895,26 +885,10 @@ void bfin_pm_restore(void)
895} 885}
896#endif 886#endif
897 887
898static struct irq_chip bfin_gpio_irqchip = {
899 .name = "GPIO",
900 .ack = bfin_gpio_ack_irq,
901 .mask = bfin_gpio_mask_irq,
902 .mask_ack = bfin_gpio_mask_ack_irq,
903 .unmask = bfin_gpio_unmask_irq,
904 .disable = bfin_gpio_mask_irq,
905 .enable = bfin_gpio_unmask_irq,
906 .set_type = bfin_gpio_irq_type,
907 .startup = bfin_gpio_irq_startup,
908 .shutdown = bfin_gpio_irq_shutdown,
909#ifdef CONFIG_PM
910 .set_wake = bfin_gpio_set_wake,
911#endif
912};
913
914static void bfin_demux_gpio_irq(unsigned int inta_irq, 888static void bfin_demux_gpio_irq(unsigned int inta_irq,
915 struct irq_desc *desc) 889 struct irq_desc *desc)
916{ 890{
917 u8 bank, pint_val; 891 u32 bank, pint_val;
918 u32 request, irq; 892 u32 request, irq;
919 893
920 switch (inta_irq) { 894 switch (inta_irq) {
@@ -941,8 +915,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
941 while (request) { 915 while (request) {
942 if (request & 1) { 916 if (request & 1) {
943 irq = pint2irq_lut[pint_val] + SYS_IRQS; 917 irq = pint2irq_lut[pint_val] + SYS_IRQS;
944 desc = irq_desc + irq; 918 bfin_handle_irq(irq);
945 desc->handle_irq(irq, desc);
946 } 919 }
947 pint_val++; 920 pint_val++;
948 request >>= 1; 921 request >>= 1;
@@ -951,10 +924,24 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
951} 924}
952#endif 925#endif
953 926
954void __init init_exception_vectors(void) 927static struct irq_chip bfin_gpio_irqchip = {
955{ 928 .name = "GPIO",
956 SSYNC(); 929 .ack = bfin_gpio_ack_irq,
930 .mask = bfin_gpio_mask_irq,
931 .mask_ack = bfin_gpio_mask_ack_irq,
932 .unmask = bfin_gpio_unmask_irq,
933 .disable = bfin_gpio_mask_irq,
934 .enable = bfin_gpio_unmask_irq,
935 .set_type = bfin_gpio_irq_type,
936 .startup = bfin_gpio_irq_startup,
937 .shutdown = bfin_gpio_irq_shutdown,
938#ifdef CONFIG_PM
939 .set_wake = bfin_gpio_set_wake,
940#endif
941};
957 942
943void __cpuinit init_exception_vectors(void)
944{
958 /* cannot program in software: 945 /* cannot program in software:
959 * evt0 - emulation (jtag) 946 * evt0 - emulation (jtag)
960 * evt1 - reset 947 * evt1 - reset
@@ -979,17 +966,23 @@ void __init init_exception_vectors(void)
979 * This function should be called during kernel startup to initialize 966 * This function should be called during kernel startup to initialize
980 * the BFin IRQ handling routines. 967 * the BFin IRQ handling routines.
981 */ 968 */
969
982int __init init_arch_irq(void) 970int __init init_arch_irq(void)
983{ 971{
984 int irq; 972 int irq;
985 unsigned long ilat = 0; 973 unsigned long ilat = 0;
986 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ 974 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
987#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) 975#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
976 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
988 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); 977 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
989 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); 978 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
990# ifdef CONFIG_BF54x 979# ifdef CONFIG_BF54x
991 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); 980 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
992# endif 981# endif
982# ifdef CONFIG_SMP
983 bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
984 bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
985# endif
993#else 986#else
994 bfin_write_SIC_IMASK(SIC_UNMASK_ALL); 987 bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
995#endif 988#endif
@@ -1029,7 +1022,7 @@ int __init init_arch_irq(void)
1029 case IRQ_PINT1: 1022 case IRQ_PINT1:
1030 case IRQ_PINT2: 1023 case IRQ_PINT2:
1031 case IRQ_PINT3: 1024 case IRQ_PINT3:
1032#elif defined(CONFIG_BF52x) 1025#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1033 case IRQ_PORTF_INTA: 1026 case IRQ_PORTF_INTA:
1034 case IRQ_PORTG_INTA: 1027 case IRQ_PORTG_INTA:
1035 case IRQ_PORTH_INTA: 1028 case IRQ_PORTH_INTA:
@@ -1037,18 +1030,41 @@ int __init init_arch_irq(void)
1037 case IRQ_PROG0_INTA: 1030 case IRQ_PROG0_INTA:
1038 case IRQ_PROG1_INTA: 1031 case IRQ_PROG1_INTA:
1039 case IRQ_PROG2_INTA: 1032 case IRQ_PROG2_INTA:
1033#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
1034 case IRQ_PORTF_INTA:
1040#endif 1035#endif
1036
1041 set_irq_chained_handler(irq, 1037 set_irq_chained_handler(irq,
1042 bfin_demux_gpio_irq); 1038 bfin_demux_gpio_irq);
1043 break; 1039 break;
1044#ifdef BF537_GENERIC_ERROR_INT_DEMUX 1040#ifdef BF537_GENERIC_ERROR_INT_DEMUX
1045 case IRQ_GENERIC_ERROR: 1041 case IRQ_GENERIC_ERROR:
1046 set_irq_handler(irq, bfin_demux_error_irq); 1042 set_irq_chained_handler(irq, bfin_demux_error_irq);
1047 1043 break;
1044#endif
1045#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
1046 case IRQ_TIMER0:
1047 set_irq_handler(irq, handle_percpu_irq);
1048 break;
1049#endif
1050#ifdef CONFIG_SMP
1051 case IRQ_SUPPLE_0:
1052 case IRQ_SUPPLE_1:
1053 set_irq_handler(irq, handle_percpu_irq);
1048 break; 1054 break;
1049#endif 1055#endif
1050 default: 1056 default:
1057#ifdef CONFIG_IPIPE
1058 /*
1059 * We want internal interrupt sources to be masked, because
1060 * ISRs may trigger interrupts recursively (e.g. DMA), but
1061 * interrupts are _not_ masked at CPU level. So let's handle
1062 * them as level interrupts.
1063 */
1064 set_irq_handler(irq, handle_level_irq);
1065#else /* !CONFIG_IPIPE */
1051 set_irq_handler(irq, handle_simple_irq); 1066 set_irq_handler(irq, handle_simple_irq);
1067#endif /* !CONFIG_IPIPE */
1052 break; 1068 break;
1053 } 1069 }
1054 } 1070 }
@@ -1073,7 +1089,7 @@ int __init init_arch_irq(void)
1073 CSYNC(); 1089 CSYNC();
1074 1090
1075 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n"); 1091 printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1076 /* IMASK=xxx is equivalent to STI xx or irq_flags=xx, 1092 /* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1077 * local_irq_enable() 1093 * local_irq_enable()
1078 */ 1094 */
1079 program_IAR(); 1095 program_IAR();
@@ -1081,19 +1097,23 @@ int __init init_arch_irq(void)
1081 search_IAR(); 1097 search_IAR();
1082 1098
1083 /* Enable interrupts IVG7-15 */ 1099 /* Enable interrupts IVG7-15 */
1084 irq_flags = irq_flags | IMASK_IVG15 | 1100 bfin_irq_flags |= IMASK_IVG15 |
1085 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 1101 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1086 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 1102 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1087 1103
1088#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) 1104#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
1105 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1089 bfin_write_SIC_IWR0(IWR_DISABLE_ALL); 1106 bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1090#if defined(CONFIG_BF52x) 1107#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1091 /* BF52x system reset does not properly reset SIC_IWR1 which 1108 /* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1092 * will screw up the bootrom as it relies on MDMA0/1 waking it 1109 * will screw up the bootrom as it relies on MDMA0/1 waking it
1093 * up from IDLE instructions. See this report for more info: 1110 * up from IDLE instructions. See this report for more info:
1094 * http://blackfin.uclinux.org/gf/tracker/4323 1111 * http://blackfin.uclinux.org/gf/tracker/4323
1095 */ 1112 */
1096 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); 1113 if (ANOMALY_05000435)
1114 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1115 else
1116 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1097#else 1117#else
1098 bfin_write_SIC_IWR1(IWR_DISABLE_ALL); 1118 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1099#endif 1119#endif
@@ -1104,6 +1124,14 @@ int __init init_arch_irq(void)
1104 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 1124 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1105#endif 1125#endif
1106 1126
1127#ifdef CONFIG_IPIPE
1128 for (irq = 0; irq < NR_IRQS; irq++) {
1129 struct irq_desc *desc = irq_desc + irq;
1130 desc->ic_prio = __ipipe_get_irq_priority(irq);
1131 desc->thr_prio = __ipipe_get_irqthread_priority(irq);
1132 }
1133#endif /* CONFIG_IPIPE */
1134
1107 return 0; 1135 return 0;
1108} 1136}
1109 1137
@@ -1117,11 +1145,20 @@ void do_irq(int vec, struct pt_regs *fp)
1117 } else { 1145 } else {
1118 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst; 1146 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1119 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop; 1147 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1120#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) 1148#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \
1149 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1121 unsigned long sic_status[3]; 1150 unsigned long sic_status[3];
1122 1151
1123 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0(); 1152 if (smp_processor_id()) {
1124 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1(); 1153#ifdef CONFIG_SMP
1154 /* This will be optimized out in UP mode. */
1155 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1156 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1157#endif
1158 } else {
1159 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1160 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1161 }
1125#ifdef CONFIG_BF54x 1162#ifdef CONFIG_BF54x
1126 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2(); 1163 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1127#endif 1164#endif
@@ -1150,3 +1187,161 @@ void do_irq(int vec, struct pt_regs *fp)
1150 } 1187 }
1151 asm_do_IRQ(vec, fp); 1188 asm_do_IRQ(vec, fp);
1152} 1189}
1190
1191#ifdef CONFIG_IPIPE
1192
1193int __ipipe_get_irq_priority(unsigned irq)
1194{
1195 int ient, prio;
1196
1197 if (irq <= IRQ_CORETMR)
1198 return irq;
1199
1200 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1201 struct ivgx *ivg = ivg_table + ient;
1202 if (ivg->irqno == irq) {
1203 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1204 if (ivg7_13[prio].ifirst <= ivg &&
1205 ivg7_13[prio].istop > ivg)
1206 return IVG7 + prio;
1207 }
1208 }
1209 }
1210
1211 return IVG15;
1212}
1213
1214int __ipipe_get_irqthread_priority(unsigned irq)
1215{
1216 int ient, prio;
1217 int demux_irq;
1218
1219 /* The returned priority value is rescaled to [0..IVG13+1]
1220 * with 0 being the lowest effective priority level. */
1221
1222 if (irq <= IRQ_CORETMR)
1223 return IVG13 - irq + 1;
1224
1225 /* GPIO IRQs are given the priority of the demux
1226 * interrupt. */
1227 if (IS_GPIOIRQ(irq)) {
1228#if defined(CONFIG_BF54x)
1229 u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
1230 demux_irq = (bank == 0 ? IRQ_PINT0 :
1231 bank == 1 ? IRQ_PINT1 :
1232 bank == 2 ? IRQ_PINT2 :
1233 IRQ_PINT3);
1234#elif defined(CONFIG_BF561)
1235 demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
1236 irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
1237 IRQ_PROG0_INTA);
1238#elif defined(CONFIG_BF52x)
1239 demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
1240 irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
1241 IRQ_PORTF_INTA);
1242#else
1243 demux_irq = irq;
1244#endif
1245 return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
1246 }
1247
1248 /* The GPIO demux interrupt is given a lower priority
1249 * than the GPIO IRQs, so that its threaded handler
1250 * unmasks the interrupt line after the decoded IRQs
1251 * have been processed. */
1252 prio = PRIO_GPIODEMUX(irq);
1253 /* demux irq? */
1254 if (prio != -1)
1255 return IVG13 - prio;
1256
1257 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1258 struct ivgx *ivg = ivg_table + ient;
1259 if (ivg->irqno == irq) {
1260 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1261 if (ivg7_13[prio].ifirst <= ivg &&
1262 ivg7_13[prio].istop > ivg)
1263 return IVG7 - prio;
1264 }
1265 }
1266 }
1267
1268 return 0;
1269}
1270
1271/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1272#ifdef CONFIG_DO_IRQ_L1
1273__attribute__((l1_text))
1274#endif
1275asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1276{
1277 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1278 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1279 int irq;
1280
1281 if (likely(vec == EVT_IVTMR_P)) {
1282 irq = IRQ_CORETMR;
1283 goto handle_irq;
1284 }
1285
1286 SSYNC();
1287
1288#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
1289 {
1290 unsigned long sic_status[3];
1291
1292 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1293 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1294#ifdef CONFIG_BF54x
1295 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1296#endif
1297 for (;; ivg++) {
1298 if (ivg >= ivg_stop) {
1299 atomic_inc(&num_spurious);
1300 return 0;
1301 }
1302 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1303 break;
1304 }
1305 }
1306#else
1307 {
1308 unsigned long sic_status;
1309
1310 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1311
1312 for (;; ivg++) {
1313 if (ivg >= ivg_stop) {
1314 atomic_inc(&num_spurious);
1315 return 0;
1316 } else if (sic_status & ivg->isrflag)
1317 break;
1318 }
1319 }
1320#endif
1321
1322 irq = ivg->irqno;
1323
1324 if (irq == IRQ_SYSTMR) {
1325 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1326 /* This is basically what we need from the register frame. */
1327 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1328 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1329 if (!ipipe_root_domain_p)
1330 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1331 else
1332 __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1333 }
1334
1335handle_irq:
1336
1337 ipipe_trace_irq_entry(irq);
1338 __ipipe_handle_irq(irq, regs);
1339 ipipe_trace_irq_exit(irq);
1340
1341 if (ipipe_root_domain_p)
1342 return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
1343
1344 return 0;
1345}
1346
1347#endif /* CONFIG_IPIPE */
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
index 606ded9ff4e1..05004df0f78b 100644
--- a/arch/blackfin/mach-common/irqpanic.c
+++ b/arch/blackfin/mach-common/irqpanic.c
@@ -33,8 +33,6 @@
33#include <asm/traps.h> 33#include <asm/traps.h>
34#include <asm/blackfin.h> 34#include <asm/blackfin.h>
35 35
36#include "../oprofile/op_blackfin.h"
37
38#ifdef CONFIG_DEBUG_ICACHE_CHECK 36#ifdef CONFIG_DEBUG_ICACHE_CHECK
39#define L1_ICACHE_START 0xffa10000 37#define L1_ICACHE_START 0xffa10000
40#define L1_ICACHE_END 0xffa13fff 38#define L1_ICACHE_END 0xffa13fff
@@ -134,13 +132,3 @@ asmlinkage void irq_panic(int reason, struct pt_regs *regs)
134#endif 132#endif
135 133
136} 134}
137
138#ifdef CONFIG_HARDWARE_PM
139/*
140 * call the handler of Performance overflow
141 */
142asmlinkage void pm_overflow(int irq, struct pt_regs *regs)
143{
144 pm_overflow_handler(irq, regs);
145}
146#endif
diff --git a/arch/blackfin/mach-common/lock.S b/arch/blackfin/mach-common/lock.S
index 9daf01201e9f..6c5f5f0ea7fe 100644
--- a/arch/blackfin/mach-common/lock.S
+++ b/arch/blackfin/mach-common/lock.S
@@ -160,7 +160,7 @@ ENDPROC(_cache_grab_lock)
160 * R0 - Which way to be locked 160 * R0 - Which way to be locked
161 */ 161 */
162 162
163ENTRY(_cache_lock) 163ENTRY(_bfin_cache_lock)
164 164
165 [--SP]=( R7:0,P5:0 ); 165 [--SP]=( R7:0,P5:0 );
166 166
@@ -184,7 +184,7 @@ ENTRY(_cache_lock)
184 184
185 ( R7:0,P5:0 ) = [SP++]; 185 ( R7:0,P5:0 ) = [SP++];
186 RTS; 186 RTS;
187ENDPROC(_cache_lock) 187ENDPROC(_bfin_cache_lock)
188 188
189/* Invalidate the Entire Instruction cache by 189/* Invalidate the Entire Instruction cache by
190 * disabling IMC bit 190 * disabling IMC bit
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index e28c6af1f415..d3d70fd67c16 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -71,7 +71,7 @@ void bfin_pm_suspend_standby_enter(void)
71 gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE); 71 gpio_pm_wakeup_request(CONFIG_PM_WAKEUP_GPIO_NUMBER, WAKEUP_TYPE);
72#endif 72#endif
73 73
74 local_irq_save(flags); 74 local_irq_save_hw(flags);
75 bfin_pm_standby_setup(); 75 bfin_pm_standby_setup();
76 76
77#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER 77#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -82,15 +82,19 @@ void bfin_pm_suspend_standby_enter(void)
82 82
83 bfin_pm_standby_restore(); 83 bfin_pm_standby_restore();
84 84
85#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) 85#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) || \
86 defined(CONFIG_BF538) || defined(CONFIG_BF539) || defined(CONFIG_BF51x)
86 bfin_write_SIC_IWR0(IWR_DISABLE_ALL); 87 bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
87#if defined(CONFIG_BF52x) 88#if defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
88 /* BF52x system reset does not properly reset SIC_IWR1 which 89 /* BF52x system reset does not properly reset SIC_IWR1 which
89 * will screw up the bootrom as it relies on MDMA0/1 waking it 90 * will screw up the bootrom as it relies on MDMA0/1 waking it
90 * up from IDLE instructions. See this report for more info: 91 * up from IDLE instructions. See this report for more info:
91 * http://blackfin.uclinux.org/gf/tracker/4323 92 * http://blackfin.uclinux.org/gf/tracker/4323
92 */ 93 */
93 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11)); 94 if (ANOMALY_05000435)
95 bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
96 else
97 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
94#else 98#else
95 bfin_write_SIC_IWR1(IWR_DISABLE_ALL); 99 bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
96#endif 100#endif
@@ -101,7 +105,7 @@ void bfin_pm_suspend_standby_enter(void)
101 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 105 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
102#endif 106#endif
103 107
104 local_irq_restore(flags); 108 local_irq_restore_hw(flags);
105} 109}
106 110
107int bf53x_suspend_l1_mem(unsigned char *memptr) 111int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -245,12 +249,12 @@ int bfin_pm_suspend_mem_enter(void)
245 wakeup |= GPWE; 249 wakeup |= GPWE;
246#endif 250#endif
247 251
248 local_irq_save(flags); 252 local_irq_save_hw(flags);
249 253
250 ret = blackfin_dma_suspend(); 254 ret = blackfin_dma_suspend();
251 255
252 if (ret) { 256 if (ret) {
253 local_irq_restore(flags); 257 local_irq_restore_hw(flags);
254 kfree(memptr); 258 kfree(memptr);
255 return ret; 259 return ret;
256 } 260 }
@@ -271,7 +275,7 @@ int bfin_pm_suspend_mem_enter(void)
271 bfin_gpio_pm_hibernate_restore(); 275 bfin_gpio_pm_hibernate_restore();
272 blackfin_dma_resume(); 276 blackfin_dma_resume();
273 277
274 local_irq_restore(flags); 278 local_irq_restore_hw(flags);
275 kfree(memptr); 279 kfree(memptr);
276 280
277 return 0; 281 return 0;
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
new file mode 100644
index 000000000000..77c992847094
--- /dev/null
+++ b/arch/blackfin/mach-common/smp.c
@@ -0,0 +1,476 @@
1/*
2 * File: arch/blackfin/kernel/smp.c
3 * Author: Philippe Gerum <rpm@xenomai.org>
4 * IPI management based on arch/arm/kernel/smp.c.
5 *
6 * Copyright 2007 Analog Devices Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see the file COPYING, or write
20 * to the Free Software Foundation, Inc.,
21 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 */
23
24#include <linux/module.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/sched.h>
29#include <linux/interrupt.h>
30#include <linux/cache.h>
31#include <linux/profile.h>
32#include <linux/errno.h>
33#include <linux/mm.h>
34#include <linux/cpu.h>
35#include <linux/smp.h>
36#include <linux/seq_file.h>
37#include <linux/irq.h>
38#include <asm/atomic.h>
39#include <asm/cacheflush.h>
40#include <asm/mmu_context.h>
41#include <asm/pgtable.h>
42#include <asm/pgalloc.h>
43#include <asm/processor.h>
44#include <asm/ptrace.h>
45#include <asm/cpu.h>
46#include <linux/err.h>
47
48struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
49
50void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
51 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
52 *init_saved_dcplb_fault_addr_coreb;
53
54cpumask_t cpu_possible_map;
55EXPORT_SYMBOL(cpu_possible_map);
56
57cpumask_t cpu_online_map;
58EXPORT_SYMBOL(cpu_online_map);
59
60#define BFIN_IPI_RESCHEDULE 0
61#define BFIN_IPI_CALL_FUNC 1
62#define BFIN_IPI_CPU_STOP 2
63
64struct blackfin_flush_data {
65 unsigned long start;
66 unsigned long end;
67};
68
69void *secondary_stack;
70
71
72struct smp_call_struct {
73 void (*func)(void *info);
74 void *info;
75 int wait;
76 cpumask_t pending;
77 cpumask_t waitmask;
78};
79
80static struct blackfin_flush_data smp_flush_data;
81
82static DEFINE_SPINLOCK(stop_lock);
83
84struct ipi_message {
85 struct list_head list;
86 unsigned long type;
87 struct smp_call_struct call_struct;
88};
89
90struct ipi_message_queue {
91 struct list_head head;
92 spinlock_t lock;
93 unsigned long count;
94};
95
96static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
97
98static void ipi_cpu_stop(unsigned int cpu)
99{
100 spin_lock(&stop_lock);
101 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
102 dump_stack();
103 spin_unlock(&stop_lock);
104
105 cpu_clear(cpu, cpu_online_map);
106
107 local_irq_disable();
108
109 while (1)
110 SSYNC();
111}
112
113static void ipi_flush_icache(void *info)
114{
115 struct blackfin_flush_data *fdata = info;
116
117 /* Invalidate the memory holding the bounds of the flushed region. */
118 blackfin_dcache_invalidate_range((unsigned long)fdata,
119 (unsigned long)fdata + sizeof(*fdata));
120
121 blackfin_icache_flush_range(fdata->start, fdata->end);
122}
123
124static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
125{
126 int wait;
127 void (*func)(void *info);
128 void *info;
129 func = msg->call_struct.func;
130 info = msg->call_struct.info;
131 wait = msg->call_struct.wait;
132 cpu_clear(cpu, msg->call_struct.pending);
133 func(info);
134 if (wait)
135 cpu_clear(cpu, msg->call_struct.waitmask);
136 else
137 kfree(msg);
138}
139
140static irqreturn_t ipi_handler(int irq, void *dev_instance)
141{
142 struct ipi_message *msg, *mg;
143 struct ipi_message_queue *msg_queue;
144 unsigned int cpu = smp_processor_id();
145
146 platform_clear_ipi(cpu);
147
148 msg_queue = &__get_cpu_var(ipi_msg_queue);
149 msg_queue->count++;
150
151 spin_lock(&msg_queue->lock);
152 list_for_each_entry_safe(msg, mg, &msg_queue->head, list) {
153 list_del(&msg->list);
154 switch (msg->type) {
155 case BFIN_IPI_RESCHEDULE:
156 /* That's the easiest one; leave it to
157 * return_from_int. */
158 kfree(msg);
159 break;
160 case BFIN_IPI_CALL_FUNC:
161 ipi_call_function(cpu, msg);
162 break;
163 case BFIN_IPI_CPU_STOP:
164 ipi_cpu_stop(cpu);
165 kfree(msg);
166 break;
167 default:
168 printk(KERN_CRIT "CPU%u: Unknown IPI message \
169 0x%lx\n", cpu, msg->type);
170 kfree(msg);
171 break;
172 }
173 }
174 spin_unlock(&msg_queue->lock);
175 return IRQ_HANDLED;
176}
177
178static void ipi_queue_init(void)
179{
180 unsigned int cpu;
181 struct ipi_message_queue *msg_queue;
182 for_each_possible_cpu(cpu) {
183 msg_queue = &per_cpu(ipi_msg_queue, cpu);
184 INIT_LIST_HEAD(&msg_queue->head);
185 spin_lock_init(&msg_queue->lock);
186 msg_queue->count = 0;
187 }
188}
189
190int smp_call_function(void (*func)(void *info), void *info, int wait)
191{
192 unsigned int cpu;
193 cpumask_t callmap;
194 unsigned long flags;
195 struct ipi_message_queue *msg_queue;
196 struct ipi_message *msg;
197
198 callmap = cpu_online_map;
199 cpu_clear(smp_processor_id(), callmap);
200 if (cpus_empty(callmap))
201 return 0;
202
203 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
204 INIT_LIST_HEAD(&msg->list);
205 msg->call_struct.func = func;
206 msg->call_struct.info = info;
207 msg->call_struct.wait = wait;
208 msg->call_struct.pending = callmap;
209 msg->call_struct.waitmask = callmap;
210 msg->type = BFIN_IPI_CALL_FUNC;
211
212 for_each_cpu_mask(cpu, callmap) {
213 msg_queue = &per_cpu(ipi_msg_queue, cpu);
214 spin_lock_irqsave(&msg_queue->lock, flags);
215 list_add(&msg->list, &msg_queue->head);
216 spin_unlock_irqrestore(&msg_queue->lock, flags);
217 platform_send_ipi_cpu(cpu);
218 }
219 if (wait) {
220 while (!cpus_empty(msg->call_struct.waitmask))
221 blackfin_dcache_invalidate_range(
222 (unsigned long)(&msg->call_struct.waitmask),
223 (unsigned long)(&msg->call_struct.waitmask));
224 kfree(msg);
225 }
226 return 0;
227}
228EXPORT_SYMBOL_GPL(smp_call_function);
229
230int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
231 int wait)
232{
233 unsigned int cpu = cpuid;
234 cpumask_t callmap;
235 unsigned long flags;
236 struct ipi_message_queue *msg_queue;
237 struct ipi_message *msg;
238
239 if (cpu_is_offline(cpu))
240 return 0;
241 cpus_clear(callmap);
242 cpu_set(cpu, callmap);
243
244 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
245 INIT_LIST_HEAD(&msg->list);
246 msg->call_struct.func = func;
247 msg->call_struct.info = info;
248 msg->call_struct.wait = wait;
249 msg->call_struct.pending = callmap;
250 msg->call_struct.waitmask = callmap;
251 msg->type = BFIN_IPI_CALL_FUNC;
252
253 msg_queue = &per_cpu(ipi_msg_queue, cpu);
254 spin_lock_irqsave(&msg_queue->lock, flags);
255 list_add(&msg->list, &msg_queue->head);
256 spin_unlock_irqrestore(&msg_queue->lock, flags);
257 platform_send_ipi_cpu(cpu);
258
259 if (wait) {
260 while (!cpus_empty(msg->call_struct.waitmask))
261 blackfin_dcache_invalidate_range(
262 (unsigned long)(&msg->call_struct.waitmask),
263 (unsigned long)(&msg->call_struct.waitmask));
264 kfree(msg);
265 }
266 return 0;
267}
268EXPORT_SYMBOL_GPL(smp_call_function_single);
269
270void smp_send_reschedule(int cpu)
271{
272 unsigned long flags;
273 struct ipi_message_queue *msg_queue;
274 struct ipi_message *msg;
275
276 if (cpu_is_offline(cpu))
277 return;
278
279 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
280 memset(msg, 0, sizeof(msg));
281 INIT_LIST_HEAD(&msg->list);
282 msg->type = BFIN_IPI_RESCHEDULE;
283
284 msg_queue = &per_cpu(ipi_msg_queue, cpu);
285 spin_lock_irqsave(&msg_queue->lock, flags);
286 list_add(&msg->list, &msg_queue->head);
287 spin_unlock_irqrestore(&msg_queue->lock, flags);
288 platform_send_ipi_cpu(cpu);
289
290 return;
291}
292
293void smp_send_stop(void)
294{
295 unsigned int cpu;
296 cpumask_t callmap;
297 unsigned long flags;
298 struct ipi_message_queue *msg_queue;
299 struct ipi_message *msg;
300
301 callmap = cpu_online_map;
302 cpu_clear(smp_processor_id(), callmap);
303 if (cpus_empty(callmap))
304 return;
305
306 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
307 memset(msg, 0, sizeof(msg));
308 INIT_LIST_HEAD(&msg->list);
309 msg->type = BFIN_IPI_CPU_STOP;
310
311 for_each_cpu_mask(cpu, callmap) {
312 msg_queue = &per_cpu(ipi_msg_queue, cpu);
313 spin_lock_irqsave(&msg_queue->lock, flags);
314 list_add(&msg->list, &msg_queue->head);
315 spin_unlock_irqrestore(&msg_queue->lock, flags);
316 platform_send_ipi_cpu(cpu);
317 }
318 return;
319}
320
321int __cpuinit __cpu_up(unsigned int cpu)
322{
323 struct task_struct *idle;
324 int ret;
325
326 idle = fork_idle(cpu);
327 if (IS_ERR(idle)) {
328 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
329 return PTR_ERR(idle);
330 }
331
332 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
333 smp_wmb();
334
335 ret = platform_boot_secondary(cpu, idle);
336
337 if (ret) {
338 cpu_clear(cpu, cpu_present_map);
339 printk(KERN_CRIT "CPU%u: processor failed to boot (%d)\n", cpu, ret);
340 free_task(idle);
341 } else
342 cpu_set(cpu, cpu_online_map);
343
344 secondary_stack = NULL;
345
346 return ret;
347}
348
349static void __cpuinit setup_secondary(unsigned int cpu)
350{
351#if !(defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE))
352 struct irq_desc *timer_desc;
353#endif
354 unsigned long ilat;
355
356 bfin_write_IMASK(0);
357 CSYNC();
358 ilat = bfin_read_ILAT();
359 CSYNC();
360 bfin_write_ILAT(ilat);
361 CSYNC();
362
363 /* Reserve the PDA space for the secondary CPU. */
364 reserve_pda();
365
366 /* Enable interrupt levels IVG7-15. IARs have been already
367 * programmed by the boot CPU. */
368 bfin_irq_flags |= IMASK_IVG15 |
369 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
370 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
371
372#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
373 /* Power down the core timer, just to play safe. */
374 bfin_write_TCNTL(0);
375
376 /* system timer0 has been setup by CoreA. */
377#else
378 timer_desc = irq_desc + IRQ_CORETMR;
379 setup_core_timer();
380 timer_desc->chip->enable(IRQ_CORETMR);
381#endif
382}
383
384void __cpuinit secondary_start_kernel(void)
385{
386 unsigned int cpu = smp_processor_id();
387 struct mm_struct *mm = &init_mm;
388
389 if (_bfin_swrst & SWRST_DBL_FAULT_B) {
390 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
391#ifdef CONFIG_DEBUG_DOUBLEFAULT
392 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
393 (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
394 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
395 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
396#endif
397 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
398 init_retx_coreb);
399 }
400
401 /*
402 * We want the D-cache to be enabled early, in case the atomic
403 * support code emulates cache coherence (see
404 * __ARCH_SYNC_CORE_DCACHE).
405 */
406 init_exception_vectors();
407
408 bfin_setup_caches(cpu);
409
410 local_irq_disable();
411
412 /* Attach the new idle task to the global mm. */
413 atomic_inc(&mm->mm_users);
414 atomic_inc(&mm->mm_count);
415 current->active_mm = mm;
416 BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
417
418 preempt_disable();
419
420 setup_secondary(cpu);
421
422 local_irq_enable();
423
424 platform_secondary_init(cpu);
425
426 cpu_idle();
427}
428
429void __init smp_prepare_boot_cpu(void)
430{
431}
432
433void __init smp_prepare_cpus(unsigned int max_cpus)
434{
435 platform_prepare_cpus(max_cpus);
436 ipi_queue_init();
437 platform_request_ipi(&ipi_handler);
438}
439
440void __init smp_cpus_done(unsigned int max_cpus)
441{
442 unsigned long bogosum = 0;
443 unsigned int cpu;
444
445 for_each_online_cpu(cpu)
446 bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
447
448 printk(KERN_INFO "SMP: Total of %d processors activated "
449 "(%lu.%02lu BogoMIPS).\n",
450 num_online_cpus(),
451 bogosum / (500000/HZ),
452 (bogosum / (5000/HZ)) % 100);
453}
454
455void smp_icache_flush_range_others(unsigned long start, unsigned long end)
456{
457 smp_flush_data.start = start;
458 smp_flush_data.end = end;
459
460 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 1))
461 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
462}
463EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
464
465#ifdef __ARCH_SYNC_CORE_DCACHE
466unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
467
468void resync_core_dcache(void)
469{
470 unsigned int cpu = get_cpu();
471 blackfin_invalidate_entire_dcache();
472 ++per_cpu(cpu_data, cpu).dcache_invld_count;
473 put_cpu();
474}
475EXPORT_SYMBOL(resync_core_dcache);
476#endif