aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/Makefile1
-rw-r--r--arch/blackfin/mach-common/arch_checks.c2
-rw-r--r--arch/blackfin/mach-common/cache.S40
-rw-r--r--arch/blackfin/mach-common/cpufreq.c12
-rw-r--r--arch/blackfin/mach-common/dpmc.c54
-rw-r--r--arch/blackfin/mach-common/dpmc_modes.S50
-rw-r--r--arch/blackfin/mach-common/entry.S138
-rw-r--r--arch/blackfin/mach-common/head.S112
-rw-r--r--arch/blackfin/mach-common/interrupt.S23
-rw-r--r--arch/blackfin/mach-common/ints-priority.c743
-rw-r--r--arch/blackfin/mach-common/irqpanic.c106
-rw-r--r--arch/blackfin/mach-common/pm.c12
-rw-r--r--arch/blackfin/mach-common/smp.c246
13 files changed, 686 insertions, 853 deletions
diff --git a/arch/blackfin/mach-common/Makefile b/arch/blackfin/mach-common/Makefile
index 814cb483853b..ff299f24aba0 100644
--- a/arch/blackfin/mach-common/Makefile
+++ b/arch/blackfin/mach-common/Makefile
@@ -11,4 +11,3 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq.o
11obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o 11obj-$(CONFIG_CPU_VOLTAGE) += dpmc.o
12obj-$(CONFIG_SMP) += smp.o 12obj-$(CONFIG_SMP) += smp.o
13obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o 13obj-$(CONFIG_BFIN_KERNEL_CLOCK) += clocks-init.o
14obj-$(CONFIG_DEBUG_ICACHE_CHECK) += irqpanic.o
diff --git a/arch/blackfin/mach-common/arch_checks.c b/arch/blackfin/mach-common/arch_checks.c
index bceb98126c21..d8643fdd0fcf 100644
--- a/arch/blackfin/mach-common/arch_checks.c
+++ b/arch/blackfin/mach-common/arch_checks.c
@@ -61,6 +61,6 @@
61# error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory" 61# error "Anomaly 05000220 does not allow you to use Write Back cache with L2 or External Memory"
62#endif 62#endif
63 63
64#if ANOMALY_05000491 && !defined(CONFIG_CACHE_FLUSH_L1) 64#if ANOMALY_05000491 && !defined(CONFIG_ICACHE_FLUSH_L1)
65# error You need IFLUSH in L1 inst while Anomaly 05000491 applies 65# error You need IFLUSH in L1 inst while Anomaly 05000491 applies
66#endif 66#endif
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S
index 790c767ca95a..9f4dd35bfd74 100644
--- a/arch/blackfin/mach-common/cache.S
+++ b/arch/blackfin/mach-common/cache.S
@@ -11,12 +11,6 @@
11#include <asm/cache.h> 11#include <asm/cache.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#ifdef CONFIG_CACHE_FLUSH_L1
15.section .l1.text
16#else
17.text
18#endif
19
20/* 05000443 - IFLUSH cannot be last instruction in hardware loop */ 14/* 05000443 - IFLUSH cannot be last instruction in hardware loop */
21#if ANOMALY_05000443 15#if ANOMALY_05000443
22# define BROK_FLUSH_INST "IFLUSH" 16# define BROK_FLUSH_INST "IFLUSH"
@@ -58,6 +52,8 @@
581: 521:
59.ifeqs "\flushins", BROK_FLUSH_INST 53.ifeqs "\flushins", BROK_FLUSH_INST
60 \flushins [P0++]; 54 \flushins [P0++];
55 nop;
56 nop;
612: nop; 572: nop;
62.else 58.else
632: \flushins [P0++]; 592: \flushins [P0++];
@@ -66,11 +62,43 @@
66 RTS; 62 RTS;
67.endm 63.endm
68 64
65#ifdef CONFIG_ICACHE_FLUSH_L1
66.section .l1.text
67#else
68.text
69#endif
70
69/* Invalidate all instruction cache lines assocoiated with this memory area */ 71/* Invalidate all instruction cache lines assocoiated with this memory area */
72#ifdef CONFIG_SMP
73# define _blackfin_icache_flush_range _blackfin_icache_flush_range_l1
74#endif
70ENTRY(_blackfin_icache_flush_range) 75ENTRY(_blackfin_icache_flush_range)
71 do_flush IFLUSH 76 do_flush IFLUSH
72ENDPROC(_blackfin_icache_flush_range) 77ENDPROC(_blackfin_icache_flush_range)
73 78
79#ifdef CONFIG_SMP
80.text
81# undef _blackfin_icache_flush_range
82ENTRY(_blackfin_icache_flush_range)
83 p0.L = LO(DSPID);
84 p0.H = HI(DSPID);
85 r3 = [p0];
86 r3 = r3.b (z);
87 p2 = r3;
88 p0.L = _blackfin_iflush_l1_entry;
89 p0.H = _blackfin_iflush_l1_entry;
90 p0 = p0 + (p2 << 2);
91 p1 = [p0];
92 jump (p1);
93ENDPROC(_blackfin_icache_flush_range)
94#endif
95
96#ifdef CONFIG_DCACHE_FLUSH_L1
97.section .l1.text
98#else
99.text
100#endif
101
74/* Throw away all D-cached data in specified region without any obligation to 102/* Throw away all D-cached data in specified region without any obligation to
75 * write them back. Since the Blackfin ISA does not have an "invalidate" 103 * write them back. Since the Blackfin ISA does not have an "invalidate"
76 * instruction, we use flush/invalidate. Perhaps as a speed optimization we 104 * instruction, we use flush/invalidate. Perhaps as a speed optimization we
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 4391d03dc845..85dc6d69f9c0 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Blackfin core clock scaling 2 * Blackfin core clock scaling
3 * 3 *
4 * Copyright 2008-2009 Analog Devices Inc. 4 * Copyright 2008-2011 Analog Devices Inc.
5 * 5 *
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
@@ -16,10 +16,8 @@
16#include <asm/time.h> 16#include <asm/time.h>
17#include <asm/dpmc.h> 17#include <asm/dpmc.h>
18 18
19#define CPUFREQ_CPU 0
20
21/* this is the table of CCLK frequencies, in Hz */ 19/* this is the table of CCLK frequencies, in Hz */
22/* .index is the entry in the auxillary dpm_state_table[] */ 20/* .index is the entry in the auxiliary dpm_state_table[] */
23static struct cpufreq_frequency_table bfin_freq_table[] = { 21static struct cpufreq_frequency_table bfin_freq_table[] = {
24 { 22 {
25 .frequency = CPUFREQ_TABLE_END, 23 .frequency = CPUFREQ_TABLE_END,
@@ -46,7 +44,7 @@ static struct bfin_dpm_state {
46 44
47#if defined(CONFIG_CYCLES_CLOCKSOURCE) 45#if defined(CONFIG_CYCLES_CLOCKSOURCE)
48/* 46/*
49 * normalized to maximum frequncy offset for CYCLES, 47 * normalized to maximum frequency offset for CYCLES,
50 * used in time-ts cycles clock source, but could be used 48 * used in time-ts cycles clock source, but could be used
51 * somewhere also. 49 * somewhere also.
52 */ 50 */
@@ -134,7 +132,7 @@ static int bfin_target(struct cpufreq_policy *poli,
134 132
135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 133 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
136 if (cpu == CPUFREQ_CPU) { 134 if (cpu == CPUFREQ_CPU) {
137 local_irq_save_hw(flags); 135 flags = hard_local_irq_save();
138 plldiv = (bfin_read_PLL_DIV() & SSEL) | 136 plldiv = (bfin_read_PLL_DIV() & SSEL) |
139 dpm_state_table[index].csel; 137 dpm_state_table[index].csel;
140 bfin_write_PLL_DIV(plldiv); 138 bfin_write_PLL_DIV(plldiv);
@@ -155,7 +153,7 @@ static int bfin_target(struct cpufreq_policy *poli,
155 loops_per_jiffy = cpufreq_scale(lpj_ref, 153 loops_per_jiffy = cpufreq_scale(lpj_ref,
156 lpj_ref_freq, freqs.new); 154 lpj_ref_freq, freqs.new);
157 } 155 }
158 local_irq_restore_hw(flags); 156 hard_local_irq_restore(flags);
159 } 157 }
160 /* TODO: just test case for cycles clock source, remove later */ 158 /* TODO: just test case for cycles clock source, remove later */
161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 159 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
diff --git a/arch/blackfin/mach-common/dpmc.c b/arch/blackfin/mach-common/dpmc.c
index 02c7efd1bcf4..f5685a496c58 100644
--- a/arch/blackfin/mach-common/dpmc.c
+++ b/arch/blackfin/mach-common/dpmc.c
@@ -19,9 +19,6 @@
19 19
20#define DRIVER_NAME "bfin dpmc" 20#define DRIVER_NAME "bfin dpmc"
21 21
22#define dprintk(msg...) \
23 cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, DRIVER_NAME, msg)
24
25struct bfin_dpmc_platform_data *pdata; 22struct bfin_dpmc_platform_data *pdata;
26 23
27/** 24/**
@@ -61,17 +58,64 @@ err_out:
61} 58}
62 59
63#ifdef CONFIG_CPU_FREQ 60#ifdef CONFIG_CPU_FREQ
61# ifdef CONFIG_SMP
62static void bfin_idle_this_cpu(void *info)
63{
64 unsigned long flags = 0;
65 unsigned long iwr0, iwr1, iwr2;
66 unsigned int cpu = smp_processor_id();
67
68 local_irq_save_hw(flags);
69 bfin_iwr_set_sup0(&iwr0, &iwr1, &iwr2);
70
71 platform_clear_ipi(cpu, IRQ_SUPPLE_0);
72 SSYNC();
73 asm("IDLE;");
74 bfin_iwr_restore(iwr0, iwr1, iwr2);
75
76 local_irq_restore_hw(flags);
77}
78
79static void bfin_idle_cpu(void)
80{
81 smp_call_function(bfin_idle_this_cpu, NULL, 0);
82}
83
84static void bfin_wakeup_cpu(void)
85{
86 unsigned int cpu;
87 unsigned int this_cpu = smp_processor_id();
88 cpumask_t mask;
89
90 cpumask_copy(&mask, cpu_online_mask);
91 cpumask_clear_cpu(this_cpu, &mask);
92 for_each_cpu(cpu, &mask)
93 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
94}
95
96# else
97static void bfin_idle_cpu(void) {}
98static void bfin_wakeup_cpu(void) {}
99# endif
100
64static int 101static int
65vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data) 102vreg_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
66{ 103{
67 struct cpufreq_freqs *freq = data; 104 struct cpufreq_freqs *freq = data;
68 105
106 if (freq->cpu != CPUFREQ_CPU)
107 return 0;
108
69 if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) { 109 if (val == CPUFREQ_PRECHANGE && freq->old < freq->new) {
110 bfin_idle_cpu();
70 bfin_set_vlev(bfin_get_vlev(freq->new)); 111 bfin_set_vlev(bfin_get_vlev(freq->new));
71 udelay(pdata->vr_settling_time); /* Wait until Volatge settled */ 112 udelay(pdata->vr_settling_time); /* Wait until Volatge settled */
72 113 bfin_wakeup_cpu();
73 } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) 114 } else if (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) {
115 bfin_idle_cpu();
74 bfin_set_vlev(bfin_get_vlev(freq->new)); 116 bfin_set_vlev(bfin_get_vlev(freq->new));
117 bfin_wakeup_cpu();
118 }
75 119
76 return 0; 120 return 0;
77} 121}
diff --git a/arch/blackfin/mach-common/dpmc_modes.S b/arch/blackfin/mach-common/dpmc_modes.S
index 5969d86836a5..9cfdd49a3127 100644
--- a/arch/blackfin/mach-common/dpmc_modes.S
+++ b/arch/blackfin/mach-common/dpmc_modes.S
@@ -292,13 +292,7 @@ ENTRY(_do_hibernate)
292#ifdef SIC_IMASK 292#ifdef SIC_IMASK
293 PM_SYS_PUSH(SIC_IMASK) 293 PM_SYS_PUSH(SIC_IMASK)
294#endif 294#endif
295#ifdef SICA_IMASK0 295#ifdef SIC_IAR0
296 PM_SYS_PUSH(SICA_IMASK0)
297#endif
298#ifdef SICA_IMASK1
299 PM_SYS_PUSH(SICA_IMASK1)
300#endif
301#ifdef SIC_IAR2
302 PM_SYS_PUSH(SIC_IAR0) 296 PM_SYS_PUSH(SIC_IAR0)
303 PM_SYS_PUSH(SIC_IAR1) 297 PM_SYS_PUSH(SIC_IAR1)
304 PM_SYS_PUSH(SIC_IAR2) 298 PM_SYS_PUSH(SIC_IAR2)
@@ -321,17 +315,6 @@ ENTRY(_do_hibernate)
321 PM_SYS_PUSH(SIC_IAR11) 315 PM_SYS_PUSH(SIC_IAR11)
322#endif 316#endif
323 317
324#ifdef SICA_IAR0
325 PM_SYS_PUSH(SICA_IAR0)
326 PM_SYS_PUSH(SICA_IAR1)
327 PM_SYS_PUSH(SICA_IAR2)
328 PM_SYS_PUSH(SICA_IAR3)
329 PM_SYS_PUSH(SICA_IAR4)
330 PM_SYS_PUSH(SICA_IAR5)
331 PM_SYS_PUSH(SICA_IAR6)
332 PM_SYS_PUSH(SICA_IAR7)
333#endif
334
335#ifdef SIC_IWR 318#ifdef SIC_IWR
336 PM_SYS_PUSH(SIC_IWR) 319 PM_SYS_PUSH(SIC_IWR)
337#endif 320#endif
@@ -344,12 +327,6 @@ ENTRY(_do_hibernate)
344#ifdef SIC_IWR2 327#ifdef SIC_IWR2
345 PM_SYS_PUSH(SIC_IWR2) 328 PM_SYS_PUSH(SIC_IWR2)
346#endif 329#endif
347#ifdef SICA_IWR0
348 PM_SYS_PUSH(SICA_IWR0)
349#endif
350#ifdef SICA_IWR1
351 PM_SYS_PUSH(SICA_IWR1)
352#endif
353 330
354#ifdef PINT0_ASSIGN 331#ifdef PINT0_ASSIGN
355 PM_SYS_PUSH(PINT0_MASK_SET) 332 PM_SYS_PUSH(PINT0_MASK_SET)
@@ -750,12 +727,6 @@ ENTRY(_do_hibernate)
750 PM_SYS_POP(PINT0_MASK_SET) 727 PM_SYS_POP(PINT0_MASK_SET)
751#endif 728#endif
752 729
753#ifdef SICA_IWR1
754 PM_SYS_POP(SICA_IWR1)
755#endif
756#ifdef SICA_IWR0
757 PM_SYS_POP(SICA_IWR0)
758#endif
759#ifdef SIC_IWR2 730#ifdef SIC_IWR2
760 PM_SYS_POP(SIC_IWR2) 731 PM_SYS_POP(SIC_IWR2)
761#endif 732#endif
@@ -769,17 +740,6 @@ ENTRY(_do_hibernate)
769 PM_SYS_POP(SIC_IWR) 740 PM_SYS_POP(SIC_IWR)
770#endif 741#endif
771 742
772#ifdef SICA_IAR0
773 PM_SYS_POP(SICA_IAR7)
774 PM_SYS_POP(SICA_IAR6)
775 PM_SYS_POP(SICA_IAR5)
776 PM_SYS_POP(SICA_IAR4)
777 PM_SYS_POP(SICA_IAR3)
778 PM_SYS_POP(SICA_IAR2)
779 PM_SYS_POP(SICA_IAR1)
780 PM_SYS_POP(SICA_IAR0)
781#endif
782
783#ifdef SIC_IAR8 743#ifdef SIC_IAR8
784 PM_SYS_POP(SIC_IAR11) 744 PM_SYS_POP(SIC_IAR11)
785 PM_SYS_POP(SIC_IAR10) 745 PM_SYS_POP(SIC_IAR10)
@@ -797,17 +757,11 @@ ENTRY(_do_hibernate)
797#ifdef SIC_IAR3 757#ifdef SIC_IAR3
798 PM_SYS_POP(SIC_IAR3) 758 PM_SYS_POP(SIC_IAR3)
799#endif 759#endif
800#ifdef SIC_IAR2 760#ifdef SIC_IAR0
801 PM_SYS_POP(SIC_IAR2) 761 PM_SYS_POP(SIC_IAR2)
802 PM_SYS_POP(SIC_IAR1) 762 PM_SYS_POP(SIC_IAR1)
803 PM_SYS_POP(SIC_IAR0) 763 PM_SYS_POP(SIC_IAR0)
804#endif 764#endif
805#ifdef SICA_IMASK1
806 PM_SYS_POP(SICA_IMASK1)
807#endif
808#ifdef SICA_IMASK0
809 PM_SYS_POP(SICA_IMASK0)
810#endif
811#ifdef SIC_IMASK 765#ifdef SIC_IMASK
812 PM_SYS_POP(SIC_IMASK) 766 PM_SYS_POP(SIC_IMASK)
813#endif 767#endif
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index af1bffa21dc1..225d311c9701 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -268,7 +268,7 @@ ENTRY(_handle_bad_cplb)
268 /* To get here, we just tried and failed to change a CPLB 268 /* To get here, we just tried and failed to change a CPLB
269 * so, handle things in trap_c (C code), by lowering to 269 * so, handle things in trap_c (C code), by lowering to
270 * IRQ5, just like we normally do. Since this is not a 270 * IRQ5, just like we normally do. Since this is not a
271 * "normal" return path, we have a do alot of stuff to 271 * "normal" return path, we have a do a lot of stuff to
272 * the stack to get ready so, we can fall through - we 272 * the stack to get ready so, we can fall through - we
273 * need to make a CPLB exception look like a normal exception 273 * need to make a CPLB exception look like a normal exception
274 */ 274 */
@@ -615,7 +615,7 @@ ENTRY(_system_call)
615#ifdef CONFIG_IPIPE 615#ifdef CONFIG_IPIPE
616 r0 = sp; 616 r0 = sp;
617 SP += -12; 617 SP += -12;
618 call ___ipipe_syscall_root; 618 pseudo_long_call ___ipipe_syscall_root, p0;
619 SP += 12; 619 SP += 12;
620 cc = r0 == 1; 620 cc = r0 == 1;
621 if cc jump .Lsyscall_really_exit; 621 if cc jump .Lsyscall_really_exit;
@@ -692,7 +692,7 @@ ENTRY(_system_call)
692 [--sp] = reti; 692 [--sp] = reti;
693 SP += 4; /* don't merge with next insn to keep the pattern obvious */ 693 SP += 4; /* don't merge with next insn to keep the pattern obvious */
694 SP += -12; 694 SP += -12;
695 call ___ipipe_sync_root; 695 pseudo_long_call ___ipipe_sync_root, p4;
696 SP += 12; 696 SP += 12;
697 jump .Lresume_userspace_1; 697 jump .Lresume_userspace_1;
698.Lsyscall_no_irqsync: 698.Lsyscall_no_irqsync:
@@ -817,7 +817,7 @@ _new_old_task:
817 rets = [sp++]; 817 rets = [sp++];
818 818
819 /* 819 /*
820 * When we come out of resume, r0 carries "old" task, becuase we are 820 * When we come out of resume, r0 carries "old" task, because we are
821 * in "new" task. 821 * in "new" task.
822 */ 822 */
823 rts; 823 rts;
@@ -889,11 +889,80 @@ ENTRY(_ret_from_exception)
889 rts; 889 rts;
890ENDPROC(_ret_from_exception) 890ENDPROC(_ret_from_exception)
891 891
892#if defined(CONFIG_PREEMPT)
893
894ENTRY(_up_to_irq14)
895#if ANOMALY_05000281 || ANOMALY_05000461
896 r0.l = lo(SAFE_USER_INSTRUCTION);
897 r0.h = hi(SAFE_USER_INSTRUCTION);
898 reti = r0;
899#endif
900
901#ifdef CONFIG_DEBUG_HWERR
902 /* enable irq14 & hwerr interrupt, until we transition to _evt_evt14 */
903 r0 = (EVT_IVG14 | EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
904#else
905 /* Only enable irq14 interrupt, until we transition to _evt_evt14 */
906 r0 = (EVT_IVG14 | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
907#endif
908 sti r0;
909
910 p0.l = lo(EVT14);
911 p0.h = hi(EVT14);
912 p1.l = _evt_up_evt14;
913 p1.h = _evt_up_evt14;
914 [p0] = p1;
915 csync;
916
917 raise 14;
9181:
919 jump 1b;
920ENDPROC(_up_to_irq14)
921
922ENTRY(_evt_up_evt14)
923#ifdef CONFIG_DEBUG_HWERR
924 r0 = (EVT_IVHW | EVT_IRPTEN | EVT_EVX | EVT_NMI | EVT_RST | EVT_EMU);
925 sti r0;
926#else
927 cli r0;
928#endif
929#ifdef CONFIG_TRACE_IRQFLAGS
930 [--sp] = rets;
931 sp += -12;
932 call _trace_hardirqs_off;
933 sp += 12;
934 rets = [sp++];
935#endif
936 [--sp] = RETI;
937 SP += 4;
938
939 /* restore normal evt14 */
940 p0.l = lo(EVT14);
941 p0.h = hi(EVT14);
942 p1.l = _evt_evt14;
943 p1.h = _evt_evt14;
944 [p0] = p1;
945 csync;
946
947 rts;
948ENDPROC(_evt_up_evt14)
949
950#endif
951
892#ifdef CONFIG_IPIPE 952#ifdef CONFIG_IPIPE
893 953
894_resume_kernel_from_int: 954_resume_kernel_from_int:
955 r1 = LO(~0x8000) (Z);
956 r1 = r0 & r1;
957 r0 = 1;
958 r0 = r1 - r0;
959 r2 = r1 & r0;
960 cc = r2 == 0;
961 /* Sync the root stage only from the outer interrupt level. */
962 if !cc jump .Lnosync;
895 r0.l = ___ipipe_sync_root; 963 r0.l = ___ipipe_sync_root;
896 r0.h = ___ipipe_sync_root; 964 r0.h = ___ipipe_sync_root;
965 [--sp] = reti;
897 [--sp] = rets; 966 [--sp] = rets;
898 [--sp] = ( r7:4, p5:3 ); 967 [--sp] = ( r7:4, p5:3 );
899 SP += -12; 968 SP += -12;
@@ -901,9 +970,57 @@ _resume_kernel_from_int:
901 SP += 12; 970 SP += 12;
902 ( r7:4, p5:3 ) = [sp++]; 971 ( r7:4, p5:3 ) = [sp++];
903 rets = [sp++]; 972 rets = [sp++];
973 reti = [sp++];
974.Lnosync:
904 rts 975 rts
976#elif defined(CONFIG_PREEMPT)
977
978_resume_kernel_from_int:
979 /* check preempt_count */
980 r7 = sp;
981 r4.l = lo(ALIGN_PAGE_MASK);
982 r4.h = hi(ALIGN_PAGE_MASK);
983 r7 = r7 & r4;
984 p5 = r7;
985 r7 = [p5 + TI_PREEMPT];
986 cc = r7 == 0x0;
987 if !cc jump .Lreturn_to_kernel;
988.Lneed_schedule:
989 r7 = [p5 + TI_FLAGS];
990 r4.l = lo(_TIF_WORK_MASK);
991 r4.h = hi(_TIF_WORK_MASK);
992 r7 = r7 & r4;
993 cc = BITTST(r7, TIF_NEED_RESCHED);
994 if !cc jump .Lreturn_to_kernel;
995 /*
996 * let schedule done at level 15, otherwise sheduled process will run
997 * at high level and block low level interrupt
998 */
999 r6 = reti; /* save reti */
1000 r5.l = .Lkernel_schedule;
1001 r5.h = .Lkernel_schedule;
1002 reti = r5;
1003 rti;
1004.Lkernel_schedule:
1005 [--sp] = rets;
1006 sp += -12;
1007 pseudo_long_call _preempt_schedule_irq, p4;
1008 sp += 12;
1009 rets = [sp++];
1010
1011 [--sp] = rets;
1012 sp += -12;
1013 /* up to irq14 so that reti after restore_all can return to irq15(kernel) */
1014 pseudo_long_call _up_to_irq14, p4;
1015 sp += 12;
1016 rets = [sp++];
1017
1018 reti = r6; /* restore reti so that origin process can return to interrupted point */
1019
1020 jump .Lneed_schedule;
905#else 1021#else
906#define _resume_kernel_from_int 2f 1022
1023#define _resume_kernel_from_int .Lreturn_to_kernel
907#endif 1024#endif
908 1025
909ENTRY(_return_from_int) 1026ENTRY(_return_from_int)
@@ -913,7 +1030,7 @@ ENTRY(_return_from_int)
913 p2.h = hi(ILAT); 1030 p2.h = hi(ILAT);
914 r0 = [p2]; 1031 r0 = [p2];
915 cc = bittst (r0, EVT_IVG15_P); 1032 cc = bittst (r0, EVT_IVG15_P);
916 if cc jump 2f; 1033 if cc jump .Lreturn_to_kernel;
917 1034
918 /* if not return to user mode, get out */ 1035 /* if not return to user mode, get out */
919 p2.l = lo(IPEND); 1036 p2.l = lo(IPEND);
@@ -945,7 +1062,7 @@ ENTRY(_return_from_int)
945 STI r0; 1062 STI r0;
946 raise 15; /* raise evt15 to do signal or reschedule */ 1063 raise 15; /* raise evt15 to do signal or reschedule */
947 rti; 1064 rti;
9482: 1065.Lreturn_to_kernel:
949 rts; 1066 rts;
950ENDPROC(_return_from_int) 1067ENDPROC(_return_from_int)
951 1068
@@ -1631,6 +1748,13 @@ ENTRY(_sys_call_table)
1631 .long _sys_fanotify_init 1748 .long _sys_fanotify_init
1632 .long _sys_fanotify_mark 1749 .long _sys_fanotify_mark
1633 .long _sys_prlimit64 1750 .long _sys_prlimit64
1751 .long _sys_cacheflush
1752 .long _sys_name_to_handle_at /* 375 */
1753 .long _sys_open_by_handle_at
1754 .long _sys_clock_adjtime
1755 .long _sys_syncfs
1756 .long _sys_setns
1757 .long _sys_sendmmsg /* 380 */
1634 1758
1635 .rept NR_syscalls-(.-_sys_call_table)/4 1759 .rept NR_syscalls-(.-_sys_call_table)/4
1636 .long _sys_ni_syscall 1760 .long _sys_ni_syscall
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index 4391621d9048..76de5724c1e3 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -31,6 +31,7 @@ ENDPROC(__init_clear_bss)
31ENTRY(__start) 31ENTRY(__start)
32 /* R0: argument of command line string, passed from uboot, save it */ 32 /* R0: argument of command line string, passed from uboot, save it */
33 R7 = R0; 33 R7 = R0;
34
34 /* Enable Cycle Counter and Nesting Of Interrupts */ 35 /* Enable Cycle Counter and Nesting Of Interrupts */
35#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES 36#ifdef CONFIG_BFIN_SCRATCH_REG_CYCLES
36 R0 = SYSCFG_SNEN; 37 R0 = SYSCFG_SNEN;
@@ -38,76 +39,49 @@ ENTRY(__start)
38 R0 = SYSCFG_SNEN | SYSCFG_CCEN; 39 R0 = SYSCFG_SNEN | SYSCFG_CCEN;
39#endif 40#endif
40 SYSCFG = R0; 41 SYSCFG = R0;
41 R0 = 0; 42
42 43 /* Optimization register tricks: keep a base value in the
43 /* Clear Out All the data and pointer Registers */ 44 * reserved P registers so we use the load/store with an
44 R1 = R0; 45 * offset syntax. R0 = [P5 + <constant>];
45 R2 = R0; 46 * P5 - core MMR base
46 R3 = R0; 47 * R6 - 0
47 R4 = R0; 48 */
48 R5 = R0; 49 r6 = 0;
49 R6 = R0; 50 p5.l = 0;
50 51 p5.h = hi(COREMMR_BASE);
51 P0 = R0; 52
52 P1 = R0; 53 /* Zero out registers required by Blackfin ABI */
53 P2 = R0; 54
54 P3 = R0; 55 /* Disable circular buffers */
55 P4 = R0; 56 L0 = r6;
56 P5 = R0; 57 L1 = r6;
57 58 L2 = r6;
58 LC0 = r0; 59 L3 = r6;
59 LC1 = r0; 60
60 L0 = r0; 61 /* Disable hardware loops in case we were started by 'go' */
61 L1 = r0; 62 LC0 = r6;
62 L2 = r0; 63 LC1 = r6;
63 L3 = r0;
64
65 /* Clear Out All the DAG Registers */
66 B0 = r0;
67 B1 = r0;
68 B2 = r0;
69 B3 = r0;
70
71 I0 = r0;
72 I1 = r0;
73 I2 = r0;
74 I3 = r0;
75
76 M0 = r0;
77 M1 = r0;
78 M2 = r0;
79 M3 = r0;
80 64
81 /* 65 /*
82 * Clear ITEST_COMMAND and DTEST_COMMAND registers, 66 * Clear ITEST_COMMAND and DTEST_COMMAND registers,
83 * Leaving these as non-zero can confuse the emulator 67 * Leaving these as non-zero can confuse the emulator
84 */ 68 */
85 p0.L = LO(DTEST_COMMAND); 69 [p5 + (DTEST_COMMAND - COREMMR_BASE)] = r6;
86 p0.H = HI(DTEST_COMMAND); 70 [p5 + (ITEST_COMMAND - COREMMR_BASE)] = r6;
87 [p0] = R0;
88 [p0 + (ITEST_COMMAND - DTEST_COMMAND)] = R0;
89 CSYNC; 71 CSYNC;
90 72
91 trace_buffer_init(p0,r0); 73 trace_buffer_init(p0,r0);
92 P0 = R1;
93 R0 = R1;
94 74
95 /* Turn off the icache */ 75 /* Turn off the icache */
96 p0.l = LO(IMEM_CONTROL); 76 r1 = [p5 + (IMEM_CONTROL - COREMMR_BASE)];
97 p0.h = HI(IMEM_CONTROL); 77 BITCLR (r1, ENICPLB_P);
98 R1 = [p0]; 78 [p5 + (IMEM_CONTROL - COREMMR_BASE)] = r1;
99 R0 = ~ENICPLB;
100 R0 = R0 & R1;
101 [p0] = R0;
102 SSYNC; 79 SSYNC;
103 80
104 /* Turn off the dcache */ 81 /* Turn off the dcache */
105 p0.l = LO(DMEM_CONTROL); 82 r1 = [p5 + (DMEM_CONTROL - COREMMR_BASE)];
106 p0.h = HI(DMEM_CONTROL); 83 BITCLR (r1, ENDCPLB_P);
107 R1 = [p0]; 84 [p5 + (DMEM_CONTROL - COREMMR_BASE)] = r1;
108 R0 = ~ENDCPLB;
109 R0 = R0 & R1;
110 [p0] = R0;
111 SSYNC; 85 SSYNC;
112 86
113 /* in case of double faults, save a few things */ 87 /* in case of double faults, save a few things */
@@ -122,25 +96,25 @@ ENTRY(__start)
122 * below 96 * below
123 */ 97 */
124 GET_PDA(p0, r0); 98 GET_PDA(p0, r0);
125 r6 = [p0 + PDA_DF_RETX]; 99 r5 = [p0 + PDA_DF_RETX];
126 p1.l = _init_saved_retx; 100 p1.l = _init_saved_retx;
127 p1.h = _init_saved_retx; 101 p1.h = _init_saved_retx;
128 [p1] = r6; 102 [p1] = r5;
129 103
130 r6 = [p0 + PDA_DF_DCPLB]; 104 r5 = [p0 + PDA_DF_DCPLB];
131 p1.l = _init_saved_dcplb_fault_addr; 105 p1.l = _init_saved_dcplb_fault_addr;
132 p1.h = _init_saved_dcplb_fault_addr; 106 p1.h = _init_saved_dcplb_fault_addr;
133 [p1] = r6; 107 [p1] = r5;
134 108
135 r6 = [p0 + PDA_DF_ICPLB]; 109 r5 = [p0 + PDA_DF_ICPLB];
136 p1.l = _init_saved_icplb_fault_addr; 110 p1.l = _init_saved_icplb_fault_addr;
137 p1.h = _init_saved_icplb_fault_addr; 111 p1.h = _init_saved_icplb_fault_addr;
138 [p1] = r6; 112 [p1] = r5;
139 113
140 r6 = [p0 + PDA_DF_SEQSTAT]; 114 r5 = [p0 + PDA_DF_SEQSTAT];
141 p1.l = _init_saved_seqstat; 115 p1.l = _init_saved_seqstat;
142 p1.h = _init_saved_seqstat; 116 p1.h = _init_saved_seqstat;
143 [p1] = r6; 117 [p1] = r5;
144#endif 118#endif
145 119
146 /* Initialize stack pointer */ 120 /* Initialize stack pointer */
@@ -155,7 +129,7 @@ ENTRY(__start)
155 sti r0; 129 sti r0;
156#endif 130#endif
157 131
158 r0 = 0 (x); 132 r0 = r6;
159 /* Zero out all of the fun bss regions */ 133 /* Zero out all of the fun bss regions */
160#if L1_DATA_A_LENGTH > 0 134#if L1_DATA_A_LENGTH > 0
161 r1.l = __sbss_l1; 135 r1.l = __sbss_l1;
@@ -200,7 +174,7 @@ ENTRY(__start)
200 sp.l = lo(KERNEL_CLOCK_STACK); 174 sp.l = lo(KERNEL_CLOCK_STACK);
201 sp.h = hi(KERNEL_CLOCK_STACK); 175 sp.h = hi(KERNEL_CLOCK_STACK);
202 call _init_clocks; 176 call _init_clocks;
203 sp = usp; /* usp hasnt been touched, so restore from there */ 177 sp = usp; /* usp hasn't been touched, so restore from there */
204#endif 178#endif
205 179
206 /* This section keeps the processor in supervisor mode 180 /* This section keeps the processor in supervisor mode
@@ -210,11 +184,9 @@ ENTRY(__start)
210 184
211 /* EVT15 = _real_start */ 185 /* EVT15 = _real_start */
212 186
213 p0.l = lo(EVT15);
214 p0.h = hi(EVT15);
215 p1.l = _real_start; 187 p1.l = _real_start;
216 p1.h = _real_start; 188 p1.h = _real_start;
217 [p0] = p1; 189 [p5 + (EVT15 - COREMMR_BASE)] = p1;
218 csync; 190 csync;
219 191
220#ifdef CONFIG_EARLY_PRINTK 192#ifdef CONFIG_EARLY_PRINTK
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index cee62cf4acd4..469ce7282dc8 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -116,7 +116,24 @@ __common_int_entry:
116 cc = r0 == 0; 116 cc = r0 == 0;
117 if cc jump .Lcommon_restore_context; 117 if cc jump .Lcommon_restore_context;
118#else /* CONFIG_IPIPE */ 118#else /* CONFIG_IPIPE */
119
120#ifdef CONFIG_PREEMPT
121 r7 = sp;
122 r4.l = lo(ALIGN_PAGE_MASK);
123 r4.h = hi(ALIGN_PAGE_MASK);
124 r7 = r7 & r4;
125 p5 = r7;
126 r7 = [p5 + TI_PREEMPT]; /* get preempt count */
127 r7 += 1; /* increment it */
128 [p5 + TI_PREEMPT] = r7;
129#endif
119 pseudo_long_call _do_irq, p2; 130 pseudo_long_call _do_irq, p2;
131
132#ifdef CONFIG_PREEMPT
133 r7 += -1;
134 [p5 + TI_PREEMPT] = r7; /* restore preempt count */
135#endif
136
120 SP += 12; 137 SP += 12;
121#endif /* CONFIG_IPIPE */ 138#endif /* CONFIG_IPIPE */
122 pseudo_long_call _return_from_int, p2; 139 pseudo_long_call _return_from_int, p2;
@@ -257,16 +274,16 @@ ENDPROC(_evt_system_call)
257 * level to EVT14 to prepare the caller for a normal interrupt 274 * level to EVT14 to prepare the caller for a normal interrupt
258 * return through RTI. 275 * return through RTI.
259 * 276 *
260 * We currently use this facility in two occasions: 277 * We currently use this feature in two occasions:
261 * 278 *
262 * - to branch to __ipipe_irq_tail_hook as requested by a high 279 * - before branching to __ipipe_irq_tail_hook as requested by a high
263 * priority domain after the pipeline delivered an interrupt, 280 * priority domain after the pipeline delivered an interrupt,
264 * e.g. such as Xenomai, in order to start its rescheduling 281 * e.g. such as Xenomai, in order to start its rescheduling
265 * procedure, since we may not switch tasks when IRQ levels are 282 * procedure, since we may not switch tasks when IRQ levels are
266 * nested on the Blackfin, so we have to fake an interrupt return 283 * nested on the Blackfin, so we have to fake an interrupt return
267 * so that we may reschedule immediately. 284 * so that we may reschedule immediately.
268 * 285 *
269 * - to branch to sync_root_irqs, in order to play any interrupt 286 * - before branching to __ipipe_sync_root(), in order to play any interrupt
270 * pending for the root domain (i.e. the Linux kernel). This lowers 287 * pending for the root domain (i.e. the Linux kernel). This lowers
271 * the core priority level enough so that Linux IRQ handlers may 288 * the core priority level enough so that Linux IRQ handlers may
272 * never delay interrupts handled by high priority domains; we defer 289 * never delay interrupts handled by high priority domains; we defer
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1c8c4c7245c3..1177369f9922 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -15,35 +15,18 @@
15#include <linux/kernel_stat.h> 15#include <linux/kernel_stat.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/sched.h>
18#ifdef CONFIG_IPIPE 19#ifdef CONFIG_IPIPE
19#include <linux/ipipe.h> 20#include <linux/ipipe.h>
20#endif 21#endif
21#ifdef CONFIG_KGDB
22#include <linux/kgdb.h>
23#endif
24#include <asm/traps.h> 22#include <asm/traps.h>
25#include <asm/blackfin.h> 23#include <asm/blackfin.h>
26#include <asm/gpio.h> 24#include <asm/gpio.h>
27#include <asm/irq_handler.h> 25#include <asm/irq_handler.h>
28#include <asm/dpmc.h> 26#include <asm/dpmc.h>
29#include <asm/bfin5xx_spi.h>
30#include <asm/bfin_sport.h>
31#include <asm/bfin_can.h>
32 27
33#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 28#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
34 29
35#ifdef BF537_FAMILY
36# define BF537_GENERIC_ERROR_INT_DEMUX
37# define SPI_ERR_MASK (BIT_STAT_TXCOL | BIT_STAT_RBSY | BIT_STAT_MODF | BIT_STAT_TXE) /* SPI_STAT */
38# define SPORT_ERR_MASK (ROVF | RUVF | TOVF | TUVF) /* SPORT_STAT */
39# define PPI_ERR_MASK (0xFFFF & ~FLD) /* PPI_STATUS */
40# define EMAC_ERR_MASK (PHYINT | MMCINT | RXFSINT | TXFSINT | WAKEDET | RXDMAERR | TXDMAERR | STMDONE) /* EMAC_SYSTAT */
41# define UART_ERR_MASK (0x6) /* UART_IIR */
42# define CAN_ERR_MASK (EWTIF | EWRIF | EPIF | BOIF | WUIF | UIAIF | AAIF | RMLIF | UCEIF | EXTIF | ADIF) /* CAN_GIF */
43#else
44# undef BF537_GENERIC_ERROR_INT_DEMUX
45#endif
46
47/* 30/*
48 * NOTES: 31 * NOTES:
49 * - we have separated the physical Hardware interrupt from the 32 * - we have separated the physical Hardware interrupt from the
@@ -62,22 +45,19 @@ unsigned long bfin_irq_flags = 0x1f;
62EXPORT_SYMBOL(bfin_irq_flags); 45EXPORT_SYMBOL(bfin_irq_flags);
63#endif 46#endif
64 47
65/* The number of spurious interrupts */
66atomic_t num_spurious;
67
68#ifdef CONFIG_PM 48#ifdef CONFIG_PM
69unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */ 49unsigned long bfin_sic_iwr[3]; /* Up to 3 SIC_IWRx registers */
70unsigned vr_wakeup; 50unsigned vr_wakeup;
71#endif 51#endif
72 52
73struct ivgx { 53static struct ivgx {
74 /* irq number for request_irq, available in mach-bf5xx/irq.h */ 54 /* irq number for request_irq, available in mach-bf5xx/irq.h */
75 unsigned int irqno; 55 unsigned int irqno;
76 /* corresponding bit in the SIC_ISR register */ 56 /* corresponding bit in the SIC_ISR register */
77 unsigned int isrflag; 57 unsigned int isrflag;
78} ivg_table[NR_PERI_INTS]; 58} ivg_table[NR_PERI_INTS];
79 59
80struct ivg_slice { 60static struct ivg_slice {
81 /* position of first irq in ivg_table for given ivg */ 61 /* position of first irq in ivg_table for given ivg */
82 struct ivgx *ifirst; 62 struct ivgx *ifirst;
83 struct ivgx *istop; 63 struct ivgx *istop;
@@ -124,21 +104,21 @@ static void __init search_IAR(void)
124 * This is for core internal IRQs 104 * This is for core internal IRQs
125 */ 105 */
126 106
127static void bfin_ack_noop(unsigned int irq) 107void bfin_ack_noop(struct irq_data *d)
128{ 108{
129 /* Dummy function. */ 109 /* Dummy function. */
130} 110}
131 111
132static void bfin_core_mask_irq(unsigned int irq) 112static void bfin_core_mask_irq(struct irq_data *d)
133{ 113{
134 bfin_irq_flags &= ~(1 << irq); 114 bfin_irq_flags &= ~(1 << d->irq);
135 if (!irqs_disabled_hw()) 115 if (!hard_irqs_disabled())
136 local_irq_enable_hw(); 116 hard_local_irq_enable();
137} 117}
138 118
139static void bfin_core_unmask_irq(unsigned int irq) 119static void bfin_core_unmask_irq(struct irq_data *d)
140{ 120{
141 bfin_irq_flags |= 1 << irq; 121 bfin_irq_flags |= 1 << d->irq;
142 /* 122 /*
143 * If interrupts are enabled, IMASK must contain the same value 123 * If interrupts are enabled, IMASK must contain the same value
144 * as bfin_irq_flags. Make sure that invariant holds. If interrupts 124 * as bfin_irq_flags. Make sure that invariant holds. If interrupts
@@ -148,82 +128,88 @@ static void bfin_core_unmask_irq(unsigned int irq)
148 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly 128 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
149 * what we need. 129 * what we need.
150 */ 130 */
151 if (!irqs_disabled_hw()) 131 if (!hard_irqs_disabled())
152 local_irq_enable_hw(); 132 hard_local_irq_enable();
153 return; 133 return;
154} 134}
155 135
156static void bfin_internal_mask_irq(unsigned int irq) 136void bfin_internal_mask_irq(unsigned int irq)
157{ 137{
158 unsigned long flags; 138 unsigned long flags = hard_local_irq_save();
159 139
160#ifdef CONFIG_BF53x 140#ifdef SIC_IMASK0
161 local_irq_save_hw(flags); 141 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
162 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() & 142 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
163 ~(1 << SIC_SYSIRQ(irq)));
164#else
165 unsigned mask_bank, mask_bit;
166 local_irq_save_hw(flags);
167 mask_bank = SIC_SYSIRQ(irq) / 32;
168 mask_bit = SIC_SYSIRQ(irq) % 32;
169 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) & 143 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
170 ~(1 << mask_bit)); 144 ~(1 << mask_bit));
171#ifdef CONFIG_SMP 145# ifdef CONFIG_SMP
172 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) & 146 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
173 ~(1 << mask_bit)); 147 ~(1 << mask_bit));
148# endif
149#else
150 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
151 ~(1 << SIC_SYSIRQ(irq)));
174#endif 152#endif
175#endif 153
176 local_irq_restore_hw(flags); 154 hard_local_irq_restore(flags);
155}
156
157static void bfin_internal_mask_irq_chip(struct irq_data *d)
158{
159 bfin_internal_mask_irq(d->irq);
177} 160}
178 161
179#ifdef CONFIG_SMP 162#ifdef CONFIG_SMP
180static void bfin_internal_unmask_irq_affinity(unsigned int irq, 163static void bfin_internal_unmask_irq_affinity(unsigned int irq,
181 const struct cpumask *affinity) 164 const struct cpumask *affinity)
182#else 165#else
183static void bfin_internal_unmask_irq(unsigned int irq) 166void bfin_internal_unmask_irq(unsigned int irq)
184#endif 167#endif
185{ 168{
186 unsigned long flags; 169 unsigned long flags = hard_local_irq_save();
187 170
188#ifdef CONFIG_BF53x 171#ifdef SIC_IMASK0
189 local_irq_save_hw(flags); 172 unsigned mask_bank = SIC_SYSIRQ(irq) / 32;
190 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() | 173 unsigned mask_bit = SIC_SYSIRQ(irq) % 32;
191 (1 << SIC_SYSIRQ(irq))); 174# ifdef CONFIG_SMP
192#else
193 unsigned mask_bank, mask_bit;
194 local_irq_save_hw(flags);
195 mask_bank = SIC_SYSIRQ(irq) / 32;
196 mask_bit = SIC_SYSIRQ(irq) % 32;
197#ifdef CONFIG_SMP
198 if (cpumask_test_cpu(0, affinity)) 175 if (cpumask_test_cpu(0, affinity))
199#endif 176# endif
200 bfin_write_SIC_IMASK(mask_bank, 177 bfin_write_SIC_IMASK(mask_bank,
201 bfin_read_SIC_IMASK(mask_bank) | 178 bfin_read_SIC_IMASK(mask_bank) |
202 (1 << mask_bit)); 179 (1 << mask_bit));
203#ifdef CONFIG_SMP 180# ifdef CONFIG_SMP
204 if (cpumask_test_cpu(1, affinity)) 181 if (cpumask_test_cpu(1, affinity))
205 bfin_write_SICB_IMASK(mask_bank, 182 bfin_write_SICB_IMASK(mask_bank,
206 bfin_read_SICB_IMASK(mask_bank) | 183 bfin_read_SICB_IMASK(mask_bank) |
207 (1 << mask_bit)); 184 (1 << mask_bit));
185# endif
186#else
187 bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
188 (1 << SIC_SYSIRQ(irq)));
208#endif 189#endif
209#endif 190
210 local_irq_restore_hw(flags); 191 hard_local_irq_restore(flags);
211} 192}
212 193
213#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
214static void bfin_internal_unmask_irq(unsigned int irq) 195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
215{ 196{
216 struct irq_desc *desc = irq_to_desc(irq); 197 bfin_internal_unmask_irq_affinity(d->irq, d->affinity);
217 bfin_internal_unmask_irq_affinity(irq, desc->affinity);
218} 198}
219 199
220static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask) 200static int bfin_internal_set_affinity(struct irq_data *d,
201 const struct cpumask *mask, bool force)
221{ 202{
222 bfin_internal_mask_irq(irq); 203 bfin_internal_mask_irq(d->irq);
223 bfin_internal_unmask_irq_affinity(irq, mask); 204 bfin_internal_unmask_irq_affinity(d->irq, mask);
224 205
225 return 0; 206 return 0;
226} 207}
208#else
209static void bfin_internal_unmask_irq_chip(struct irq_data *d)
210{
211 bfin_internal_unmask_irq(d->irq);
212}
227#endif 213#endif
228 214
229#ifdef CONFIG_PM 215#ifdef CONFIG_PM
@@ -264,7 +250,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
264 break; 250 break;
265 } 251 }
266 252
267 local_irq_save_hw(flags); 253 flags = hard_local_irq_save();
268 254
269 if (state) { 255 if (state) {
270 bfin_sic_iwr[bank] |= (1 << bit); 256 bfin_sic_iwr[bank] |= (1 << bit);
@@ -275,36 +261,41 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
275 vr_wakeup &= ~wakeup; 261 vr_wakeup &= ~wakeup;
276 } 262 }
277 263
278 local_irq_restore_hw(flags); 264 hard_local_irq_restore(flags);
279 265
280 return 0; 266 return 0;
281} 267}
268
269static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
270{
271 return bfin_internal_set_wake(d->irq, state);
272}
273#else
274# define bfin_internal_set_wake_chip NULL
282#endif 275#endif
283 276
284static struct irq_chip bfin_core_irqchip = { 277static struct irq_chip bfin_core_irqchip = {
285 .name = "CORE", 278 .name = "CORE",
286 .ack = bfin_ack_noop, 279 .irq_ack = bfin_ack_noop,
287 .mask = bfin_core_mask_irq, 280 .irq_mask = bfin_core_mask_irq,
288 .unmask = bfin_core_unmask_irq, 281 .irq_unmask = bfin_core_unmask_irq,
289}; 282};
290 283
291static struct irq_chip bfin_internal_irqchip = { 284static struct irq_chip bfin_internal_irqchip = {
292 .name = "INTN", 285 .name = "INTN",
293 .ack = bfin_ack_noop, 286 .irq_ack = bfin_ack_noop,
294 .mask = bfin_internal_mask_irq, 287 .irq_mask = bfin_internal_mask_irq_chip,
295 .unmask = bfin_internal_unmask_irq, 288 .irq_unmask = bfin_internal_unmask_irq_chip,
296 .mask_ack = bfin_internal_mask_irq, 289 .irq_mask_ack = bfin_internal_mask_irq_chip,
297 .disable = bfin_internal_mask_irq, 290 .irq_disable = bfin_internal_mask_irq_chip,
298 .enable = bfin_internal_unmask_irq, 291 .irq_enable = bfin_internal_unmask_irq_chip,
299#ifdef CONFIG_SMP 292#ifdef CONFIG_SMP
300 .set_affinity = bfin_internal_set_affinity, 293 .irq_set_affinity = bfin_internal_set_affinity,
301#endif
302#ifdef CONFIG_PM
303 .set_wake = bfin_internal_set_wake,
304#endif 294#endif
295 .irq_set_wake = bfin_internal_set_wake_chip,
305}; 296};
306 297
307static void bfin_handle_irq(unsigned irq) 298void bfin_handle_irq(unsigned irq)
308{ 299{
309#ifdef CONFIG_IPIPE 300#ifdef CONFIG_IPIPE
310 struct pt_regs regs; /* Contents not used. */ 301 struct pt_regs regs; /* Contents not used. */
@@ -312,107 +303,10 @@ static void bfin_handle_irq(unsigned irq)
312 __ipipe_handle_irq(irq, &regs); 303 __ipipe_handle_irq(irq, &regs);
313 ipipe_trace_irq_exit(irq); 304 ipipe_trace_irq_exit(irq);
314#else /* !CONFIG_IPIPE */ 305#else /* !CONFIG_IPIPE */
315 struct irq_desc *desc = irq_desc + irq; 306 generic_handle_irq(irq);
316 desc->handle_irq(irq, desc);
317#endif /* !CONFIG_IPIPE */ 307#endif /* !CONFIG_IPIPE */
318} 308}
319 309
320#ifdef BF537_GENERIC_ERROR_INT_DEMUX
321static int error_int_mask;
322
323static void bfin_generic_error_mask_irq(unsigned int irq)
324{
325 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
326 if (!error_int_mask)
327 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
328}
329
330static void bfin_generic_error_unmask_irq(unsigned int irq)
331{
332 bfin_internal_unmask_irq(IRQ_GENERIC_ERROR);
333 error_int_mask |= 1L << (irq - IRQ_PPI_ERROR);
334}
335
336static struct irq_chip bfin_generic_error_irqchip = {
337 .name = "ERROR",
338 .ack = bfin_ack_noop,
339 .mask_ack = bfin_generic_error_mask_irq,
340 .mask = bfin_generic_error_mask_irq,
341 .unmask = bfin_generic_error_unmask_irq,
342};
343
344static void bfin_demux_error_irq(unsigned int int_err_irq,
345 struct irq_desc *inta_desc)
346{
347 int irq = 0;
348
349#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
350 if (bfin_read_EMAC_SYSTAT() & EMAC_ERR_MASK)
351 irq = IRQ_MAC_ERROR;
352 else
353#endif
354 if (bfin_read_SPORT0_STAT() & SPORT_ERR_MASK)
355 irq = IRQ_SPORT0_ERROR;
356 else if (bfin_read_SPORT1_STAT() & SPORT_ERR_MASK)
357 irq = IRQ_SPORT1_ERROR;
358 else if (bfin_read_PPI_STATUS() & PPI_ERR_MASK)
359 irq = IRQ_PPI_ERROR;
360 else if (bfin_read_CAN_GIF() & CAN_ERR_MASK)
361 irq = IRQ_CAN_ERROR;
362 else if (bfin_read_SPI_STAT() & SPI_ERR_MASK)
363 irq = IRQ_SPI_ERROR;
364 else if ((bfin_read_UART0_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
365 irq = IRQ_UART0_ERROR;
366 else if ((bfin_read_UART1_IIR() & UART_ERR_MASK) == UART_ERR_MASK)
367 irq = IRQ_UART1_ERROR;
368
369 if (irq) {
370 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
371 bfin_handle_irq(irq);
372 else {
373
374 switch (irq) {
375 case IRQ_PPI_ERROR:
376 bfin_write_PPI_STATUS(PPI_ERR_MASK);
377 break;
378#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
379 case IRQ_MAC_ERROR:
380 bfin_write_EMAC_SYSTAT(EMAC_ERR_MASK);
381 break;
382#endif
383 case IRQ_SPORT0_ERROR:
384 bfin_write_SPORT0_STAT(SPORT_ERR_MASK);
385 break;
386
387 case IRQ_SPORT1_ERROR:
388 bfin_write_SPORT1_STAT(SPORT_ERR_MASK);
389 break;
390
391 case IRQ_CAN_ERROR:
392 bfin_write_CAN_GIS(CAN_ERR_MASK);
393 break;
394
395 case IRQ_SPI_ERROR:
396 bfin_write_SPI_STAT(SPI_ERR_MASK);
397 break;
398
399 default:
400 break;
401 }
402
403 pr_debug("IRQ %d:"
404 " MASKED PERIPHERAL ERROR INTERRUPT ASSERTED\n",
405 irq);
406 }
407 } else
408 printk(KERN_ERR
409 "%s : %s : LINE %d :\nIRQ ?: PERIPHERAL ERROR"
410 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
411 __func__, __FILE__, __LINE__);
412
413}
414#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
415
416#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 310#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
417static int mac_stat_int_mask; 311static int mac_stat_int_mask;
418 312
@@ -448,10 +342,12 @@ static void bfin_mac_status_ack_irq(unsigned int irq)
448 } 342 }
449} 343}
450 344
451static void bfin_mac_status_mask_irq(unsigned int irq) 345static void bfin_mac_status_mask_irq(struct irq_data *d)
452{ 346{
347 unsigned int irq = d->irq;
348
453 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT)); 349 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
454#ifdef BF537_GENERIC_ERROR_INT_DEMUX 350#ifdef BF537_FAMILY
455 switch (irq) { 351 switch (irq) {
456 case IRQ_MAC_PHYINT: 352 case IRQ_MAC_PHYINT:
457 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE); 353 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
@@ -466,9 +362,11 @@ static void bfin_mac_status_mask_irq(unsigned int irq)
466 bfin_mac_status_ack_irq(irq); 362 bfin_mac_status_ack_irq(irq);
467} 363}
468 364
469static void bfin_mac_status_unmask_irq(unsigned int irq) 365static void bfin_mac_status_unmask_irq(struct irq_data *d)
470{ 366{
471#ifdef BF537_GENERIC_ERROR_INT_DEMUX 367 unsigned int irq = d->irq;
368
369#ifdef BF537_FAMILY
472 switch (irq) { 370 switch (irq) {
473 case IRQ_MAC_PHYINT: 371 case IRQ_MAC_PHYINT:
474 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE); 372 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
@@ -484,34 +382,34 @@ static void bfin_mac_status_unmask_irq(unsigned int irq)
484} 382}
485 383
486#ifdef CONFIG_PM 384#ifdef CONFIG_PM
487int bfin_mac_status_set_wake(unsigned int irq, unsigned int state) 385int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
488{ 386{
489#ifdef BF537_GENERIC_ERROR_INT_DEMUX 387#ifdef BF537_FAMILY
490 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state); 388 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
491#else 389#else
492 return bfin_internal_set_wake(IRQ_MAC_ERROR, state); 390 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
493#endif 391#endif
494} 392}
393#else
394# define bfin_mac_status_set_wake NULL
495#endif 395#endif
496 396
497static struct irq_chip bfin_mac_status_irqchip = { 397static struct irq_chip bfin_mac_status_irqchip = {
498 .name = "MACST", 398 .name = "MACST",
499 .ack = bfin_ack_noop, 399 .irq_ack = bfin_ack_noop,
500 .mask_ack = bfin_mac_status_mask_irq, 400 .irq_mask_ack = bfin_mac_status_mask_irq,
501 .mask = bfin_mac_status_mask_irq, 401 .irq_mask = bfin_mac_status_mask_irq,
502 .unmask = bfin_mac_status_unmask_irq, 402 .irq_unmask = bfin_mac_status_unmask_irq,
503#ifdef CONFIG_PM 403 .irq_set_wake = bfin_mac_status_set_wake,
504 .set_wake = bfin_mac_status_set_wake,
505#endif
506}; 404};
507 405
508static void bfin_demux_mac_status_irq(unsigned int int_err_irq, 406void bfin_demux_mac_status_irq(unsigned int int_err_irq,
509 struct irq_desc *inta_desc) 407 struct irq_desc *inta_desc)
510{ 408{
511 int i, irq = 0; 409 int i, irq = 0;
512 u32 status = bfin_read_EMAC_SYSTAT(); 410 u32 status = bfin_read_EMAC_SYSTAT();
513 411
514 for (i = 0; i < (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++) 412 for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
515 if (status & (1L << i)) { 413 if (status & (1L << i)) {
516 irq = IRQ_MAC_PHYINT + i; 414 irq = IRQ_MAC_PHYINT + i;
517 break; 415 break;
@@ -529,21 +427,18 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
529 } else 427 } else
530 printk(KERN_ERR 428 printk(KERN_ERR
531 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR" 429 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
532 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n", 430 " INTERRUPT ASSERTED BUT NO SOURCE FOUND"
533 __func__, __FILE__, __LINE__); 431 "(EMAC_SYSTAT=0x%X)\n",
432 __func__, __FILE__, __LINE__, status);
534} 433}
535#endif 434#endif
536 435
537static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 436static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
538{ 437{
539#ifdef CONFIG_IPIPE 438#ifdef CONFIG_IPIPE
540 _set_irq_handler(irq, handle_level_irq); 439 handle = handle_level_irq;
541#else
542 struct irq_desc *desc = irq_desc + irq;
543 /* May not call generic set_irq_handler() due to spinlock
544 recursion. */
545 desc->handle_irq = handle;
546#endif 440#endif
441 __irq_set_handler_locked(irq, handle);
547} 442}
548 443
549static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); 444static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -551,58 +446,59 @@ extern void bfin_gpio_irq_prepare(unsigned gpio);
551 446
552#if !defined(CONFIG_BF54x) 447#if !defined(CONFIG_BF54x)
553 448
554static void bfin_gpio_ack_irq(unsigned int irq) 449static void bfin_gpio_ack_irq(struct irq_data *d)
555{ 450{
556 /* AFAIK ack_irq in case mask_ack is provided 451 /* AFAIK ack_irq in case mask_ack is provided
557 * get's only called for edge sense irqs 452 * get's only called for edge sense irqs
558 */ 453 */
559 set_gpio_data(irq_to_gpio(irq), 0); 454 set_gpio_data(irq_to_gpio(d->irq), 0);
560} 455}
561 456
562static void bfin_gpio_mask_ack_irq(unsigned int irq) 457static void bfin_gpio_mask_ack_irq(struct irq_data *d)
563{ 458{
564 struct irq_desc *desc = irq_desc + irq; 459 unsigned int irq = d->irq;
565 u32 gpionr = irq_to_gpio(irq); 460 u32 gpionr = irq_to_gpio(irq);
566 461
567 if (desc->handle_irq == handle_edge_irq) 462 if (!irqd_is_level_type(d))
568 set_gpio_data(gpionr, 0); 463 set_gpio_data(gpionr, 0);
569 464
570 set_gpio_maska(gpionr, 0); 465 set_gpio_maska(gpionr, 0);
571} 466}
572 467
573static void bfin_gpio_mask_irq(unsigned int irq) 468static void bfin_gpio_mask_irq(struct irq_data *d)
574{ 469{
575 set_gpio_maska(irq_to_gpio(irq), 0); 470 set_gpio_maska(irq_to_gpio(d->irq), 0);
576} 471}
577 472
578static void bfin_gpio_unmask_irq(unsigned int irq) 473static void bfin_gpio_unmask_irq(struct irq_data *d)
579{ 474{
580 set_gpio_maska(irq_to_gpio(irq), 1); 475 set_gpio_maska(irq_to_gpio(d->irq), 1);
581} 476}
582 477
583static unsigned int bfin_gpio_irq_startup(unsigned int irq) 478static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
584{ 479{
585 u32 gpionr = irq_to_gpio(irq); 480 u32 gpionr = irq_to_gpio(d->irq);
586 481
587 if (__test_and_set_bit(gpionr, gpio_enabled)) 482 if (__test_and_set_bit(gpionr, gpio_enabled))
588 bfin_gpio_irq_prepare(gpionr); 483 bfin_gpio_irq_prepare(gpionr);
589 484
590 bfin_gpio_unmask_irq(irq); 485 bfin_gpio_unmask_irq(d);
591 486
592 return 0; 487 return 0;
593} 488}
594 489
595static void bfin_gpio_irq_shutdown(unsigned int irq) 490static void bfin_gpio_irq_shutdown(struct irq_data *d)
596{ 491{
597 u32 gpionr = irq_to_gpio(irq); 492 u32 gpionr = irq_to_gpio(d->irq);
598 493
599 bfin_gpio_mask_irq(irq); 494 bfin_gpio_mask_irq(d);
600 __clear_bit(gpionr, gpio_enabled); 495 __clear_bit(gpionr, gpio_enabled);
601 bfin_gpio_irq_free(gpionr); 496 bfin_gpio_irq_free(gpionr);
602} 497}
603 498
604static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) 499static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
605{ 500{
501 unsigned int irq = d->irq;
606 int ret; 502 int ret;
607 char buf[16]; 503 char buf[16];
608 u32 gpionr = irq_to_gpio(irq); 504 u32 gpionr = irq_to_gpio(irq);
@@ -663,29 +559,48 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
663} 559}
664 560
665#ifdef CONFIG_PM 561#ifdef CONFIG_PM
666int bfin_gpio_set_wake(unsigned int irq, unsigned int state) 562static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
667{ 563{
668 return gpio_pm_wakeup_ctrl(irq_to_gpio(irq), state); 564 return gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
669} 565}
566#else
567# define bfin_gpio_set_wake NULL
670#endif 568#endif
671 569
672static void bfin_demux_gpio_irq(unsigned int inta_irq, 570static void bfin_demux_gpio_block(unsigned int irq)
673 struct irq_desc *desc)
674{ 571{
675 unsigned int i, gpio, mask, irq, search = 0; 572 unsigned int gpio, mask;
573
574 gpio = irq_to_gpio(irq);
575 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
576
577 while (mask) {
578 if (mask & 1)
579 bfin_handle_irq(irq);
580 irq++;
581 mask >>= 1;
582 }
583}
584
585void bfin_demux_gpio_irq(unsigned int inta_irq,
586 struct irq_desc *desc)
587{
588 unsigned int irq;
676 589
677 switch (inta_irq) { 590 switch (inta_irq) {
678#if defined(CONFIG_BF53x) 591#if defined(BF537_FAMILY)
679 case IRQ_PROG_INTA: 592 case IRQ_PF_INTA_PG_INTA:
680 irq = IRQ_PF0; 593 bfin_demux_gpio_block(IRQ_PF0);
681 search = 1; 594 irq = IRQ_PG0;
682 break; 595 break;
683# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)) 596 case IRQ_PH_INTA_MAC_RX:
684 case IRQ_MAC_RX:
685 irq = IRQ_PH0; 597 irq = IRQ_PH0;
686 break; 598 break;
687# endif 599#elif defined(BF533_FAMILY)
688#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 600 case IRQ_PROG_INTA:
601 irq = IRQ_PF0;
602 break;
603#elif defined(BF538_FAMILY)
689 case IRQ_PORTF_INTA: 604 case IRQ_PORTF_INTA:
690 irq = IRQ_PF0; 605 irq = IRQ_PF0;
691 break; 606 break;
@@ -715,31 +630,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
715 return; 630 return;
716 } 631 }
717 632
718 if (search) { 633 bfin_demux_gpio_block(irq);
719 for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) {
720 irq += i;
721
722 mask = get_gpiop_data(i) & get_gpiop_maska(i);
723
724 while (mask) {
725 if (mask & 1)
726 bfin_handle_irq(irq);
727 irq++;
728 mask >>= 1;
729 }
730 }
731 } else {
732 gpio = irq_to_gpio(irq);
733 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
734
735 do {
736 if (mask & 1)
737 bfin_handle_irq(irq);
738 irq++;
739 mask >>= 1;
740 } while (mask);
741 }
742
743} 634}
744 635
745#else /* CONFIG_BF54x */ 636#else /* CONFIG_BF54x */
@@ -817,14 +708,13 @@ void init_pint_lut(void)
817 } 708 }
818} 709}
819 710
820static void bfin_gpio_ack_irq(unsigned int irq) 711static void bfin_gpio_ack_irq(struct irq_data *d)
821{ 712{
822 struct irq_desc *desc = irq_desc + irq; 713 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
823 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
824 u32 pintbit = PINT_BIT(pint_val); 714 u32 pintbit = PINT_BIT(pint_val);
825 u32 bank = PINT_2_BANK(pint_val); 715 u32 bank = PINT_2_BANK(pint_val);
826 716
827 if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 717 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
828 if (pint[bank]->invert_set & pintbit) 718 if (pint[bank]->invert_set & pintbit)
829 pint[bank]->invert_clear = pintbit; 719 pint[bank]->invert_clear = pintbit;
830 else 720 else
@@ -834,14 +724,13 @@ static void bfin_gpio_ack_irq(unsigned int irq)
834 724
835} 725}
836 726
837static void bfin_gpio_mask_ack_irq(unsigned int irq) 727static void bfin_gpio_mask_ack_irq(struct irq_data *d)
838{ 728{
839 struct irq_desc *desc = irq_desc + irq; 729 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
840 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
841 u32 pintbit = PINT_BIT(pint_val); 730 u32 pintbit = PINT_BIT(pint_val);
842 u32 bank = PINT_2_BANK(pint_val); 731 u32 bank = PINT_2_BANK(pint_val);
843 732
844 if ((desc->status & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) { 733 if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) {
845 if (pint[bank]->invert_set & pintbit) 734 if (pint[bank]->invert_set & pintbit)
846 pint[bank]->invert_clear = pintbit; 735 pint[bank]->invert_clear = pintbit;
847 else 736 else
@@ -852,25 +741,25 @@ static void bfin_gpio_mask_ack_irq(unsigned int irq)
852 pint[bank]->mask_clear = pintbit; 741 pint[bank]->mask_clear = pintbit;
853} 742}
854 743
855static void bfin_gpio_mask_irq(unsigned int irq) 744static void bfin_gpio_mask_irq(struct irq_data *d)
856{ 745{
857 u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; 746 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
858 747
859 pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val); 748 pint[PINT_2_BANK(pint_val)]->mask_clear = PINT_BIT(pint_val);
860} 749}
861 750
862static void bfin_gpio_unmask_irq(unsigned int irq) 751static void bfin_gpio_unmask_irq(struct irq_data *d)
863{ 752{
864 u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; 753 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
865 u32 pintbit = PINT_BIT(pint_val); 754 u32 pintbit = PINT_BIT(pint_val);
866 u32 bank = PINT_2_BANK(pint_val); 755 u32 bank = PINT_2_BANK(pint_val);
867 756
868 pint[bank]->request = pintbit;
869 pint[bank]->mask_set = pintbit; 757 pint[bank]->mask_set = pintbit;
870} 758}
871 759
872static unsigned int bfin_gpio_irq_startup(unsigned int irq) 760static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
873{ 761{
762 unsigned int irq = d->irq;
874 u32 gpionr = irq_to_gpio(irq); 763 u32 gpionr = irq_to_gpio(irq);
875 u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; 764 u32 pint_val = irq2pint_lut[irq - SYS_IRQS];
876 765
@@ -884,22 +773,23 @@ static unsigned int bfin_gpio_irq_startup(unsigned int irq)
884 if (__test_and_set_bit(gpionr, gpio_enabled)) 773 if (__test_and_set_bit(gpionr, gpio_enabled))
885 bfin_gpio_irq_prepare(gpionr); 774 bfin_gpio_irq_prepare(gpionr);
886 775
887 bfin_gpio_unmask_irq(irq); 776 bfin_gpio_unmask_irq(d);
888 777
889 return 0; 778 return 0;
890} 779}
891 780
892static void bfin_gpio_irq_shutdown(unsigned int irq) 781static void bfin_gpio_irq_shutdown(struct irq_data *d)
893{ 782{
894 u32 gpionr = irq_to_gpio(irq); 783 u32 gpionr = irq_to_gpio(d->irq);
895 784
896 bfin_gpio_mask_irq(irq); 785 bfin_gpio_mask_irq(d);
897 __clear_bit(gpionr, gpio_enabled); 786 __clear_bit(gpionr, gpio_enabled);
898 bfin_gpio_irq_free(gpionr); 787 bfin_gpio_irq_free(gpionr);
899} 788}
900 789
901static int bfin_gpio_irq_type(unsigned int irq, unsigned int type) 790static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
902{ 791{
792 unsigned int irq = d->irq;
903 int ret; 793 int ret;
904 char buf[16]; 794 char buf[16];
905 u32 gpionr = irq_to_gpio(irq); 795 u32 gpionr = irq_to_gpio(irq);
@@ -958,15 +848,11 @@ static int bfin_gpio_irq_type(unsigned int irq, unsigned int type)
958} 848}
959 849
960#ifdef CONFIG_PM 850#ifdef CONFIG_PM
961u32 pint_saved_masks[NR_PINT_SYS_IRQS]; 851static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
962u32 pint_wakeup_masks[NR_PINT_SYS_IRQS];
963
964int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
965{ 852{
966 u32 pint_irq; 853 u32 pint_irq;
967 u32 pint_val = irq2pint_lut[irq - SYS_IRQS]; 854 u32 pint_val = irq2pint_lut[d->irq - SYS_IRQS];
968 u32 bank = PINT_2_BANK(pint_val); 855 u32 bank = PINT_2_BANK(pint_val);
969 u32 pintbit = PINT_BIT(pint_val);
970 856
971 switch (bank) { 857 switch (bank) {
972 case 0: 858 case 0:
@@ -987,46 +873,14 @@ int bfin_gpio_set_wake(unsigned int irq, unsigned int state)
987 873
988 bfin_internal_set_wake(pint_irq, state); 874 bfin_internal_set_wake(pint_irq, state);
989 875
990 if (state)
991 pint_wakeup_masks[bank] |= pintbit;
992 else
993 pint_wakeup_masks[bank] &= ~pintbit;
994
995 return 0;
996}
997
998u32 bfin_pm_setup(void)
999{
1000 u32 val, i;
1001
1002 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1003 val = pint[i]->mask_clear;
1004 pint_saved_masks[i] = val;
1005 if (val ^ pint_wakeup_masks[i]) {
1006 pint[i]->mask_clear = val;
1007 pint[i]->mask_set = pint_wakeup_masks[i];
1008 }
1009 }
1010
1011 return 0; 876 return 0;
1012} 877}
1013 878#else
1014void bfin_pm_restore(void) 879# define bfin_gpio_set_wake NULL
1015{
1016 u32 i, val;
1017
1018 for (i = 0; i < NR_PINT_SYS_IRQS; i++) {
1019 val = pint_saved_masks[i];
1020 if (val ^ pint_wakeup_masks[i]) {
1021 pint[i]->mask_clear = pint[i]->mask_clear;
1022 pint[i]->mask_set = val;
1023 }
1024 }
1025}
1026#endif 880#endif
1027 881
1028static void bfin_demux_gpio_irq(unsigned int inta_irq, 882void bfin_demux_gpio_irq(unsigned int inta_irq,
1029 struct irq_desc *desc) 883 struct irq_desc *desc)
1030{ 884{
1031 u32 bank, pint_val; 885 u32 bank, pint_val;
1032 u32 request, irq; 886 u32 request, irq;
@@ -1066,18 +920,16 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
1066 920
1067static struct irq_chip bfin_gpio_irqchip = { 921static struct irq_chip bfin_gpio_irqchip = {
1068 .name = "GPIO", 922 .name = "GPIO",
1069 .ack = bfin_gpio_ack_irq, 923 .irq_ack = bfin_gpio_ack_irq,
1070 .mask = bfin_gpio_mask_irq, 924 .irq_mask = bfin_gpio_mask_irq,
1071 .mask_ack = bfin_gpio_mask_ack_irq, 925 .irq_mask_ack = bfin_gpio_mask_ack_irq,
1072 .unmask = bfin_gpio_unmask_irq, 926 .irq_unmask = bfin_gpio_unmask_irq,
1073 .disable = bfin_gpio_mask_irq, 927 .irq_disable = bfin_gpio_mask_irq,
1074 .enable = bfin_gpio_unmask_irq, 928 .irq_enable = bfin_gpio_unmask_irq,
1075 .set_type = bfin_gpio_irq_type, 929 .irq_set_type = bfin_gpio_irq_type,
1076 .startup = bfin_gpio_irq_startup, 930 .irq_startup = bfin_gpio_irq_startup,
1077 .shutdown = bfin_gpio_irq_shutdown, 931 .irq_shutdown = bfin_gpio_irq_shutdown,
1078#ifdef CONFIG_PM 932 .irq_set_wake = bfin_gpio_set_wake,
1079 .set_wake = bfin_gpio_set_wake,
1080#endif
1081}; 933};
1082 934
1083void __cpuinit init_exception_vectors(void) 935void __cpuinit init_exception_vectors(void)
@@ -1111,12 +963,12 @@ int __init init_arch_irq(void)
1111{ 963{
1112 int irq; 964 int irq;
1113 unsigned long ilat = 0; 965 unsigned long ilat = 0;
966
1114 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */ 967 /* Disable all the peripheral intrs - page 4-29 HW Ref manual */
1115#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561) \ 968#ifdef SIC_IMASK0
1116 || defined(BF538_FAMILY) || defined(CONFIG_BF51x)
1117 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL); 969 bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
1118 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL); 970 bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
1119# ifdef CONFIG_BF54x 971# ifdef SIC_IMASK2
1120 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL); 972 bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
1121# endif 973# endif
1122# ifdef CONFIG_SMP 974# ifdef CONFIG_SMP
@@ -1129,11 +981,6 @@ int __init init_arch_irq(void)
1129 981
1130 local_irq_disable(); 982 local_irq_disable();
1131 983
1132#if (defined(CONFIG_BF537) || defined(CONFIG_BF536))
1133 /* Clear EMAC Interrupt Status bits so we can demux it later */
1134 bfin_write_EMAC_SYSTAT(-1);
1135#endif
1136
1137#ifdef CONFIG_BF54x 984#ifdef CONFIG_BF54x
1138# ifdef CONFIG_PINTx_REASSIGN 985# ifdef CONFIG_PINTx_REASSIGN
1139 pint[0]->assign = CONFIG_PINT0_ASSIGN; 986 pint[0]->assign = CONFIG_PINT0_ASSIGN;
@@ -1147,16 +994,16 @@ int __init init_arch_irq(void)
1147 994
1148 for (irq = 0; irq <= SYS_IRQS; irq++) { 995 for (irq = 0; irq <= SYS_IRQS; irq++) {
1149 if (irq <= IRQ_CORETMR) 996 if (irq <= IRQ_CORETMR)
1150 set_irq_chip(irq, &bfin_core_irqchip); 997 irq_set_chip(irq, &bfin_core_irqchip);
1151 else 998 else
1152 set_irq_chip(irq, &bfin_internal_irqchip); 999 irq_set_chip(irq, &bfin_internal_irqchip);
1153 1000
1154 switch (irq) { 1001 switch (irq) {
1155#if defined(CONFIG_BF53x) 1002#if defined(BF537_FAMILY)
1003 case IRQ_PH_INTA_MAC_RX:
1004 case IRQ_PF_INTA_PG_INTA:
1005#elif defined(BF533_FAMILY)
1156 case IRQ_PROG_INTA: 1006 case IRQ_PROG_INTA:
1157# if defined(BF537_FAMILY) && !(defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1158 case IRQ_MAC_RX:
1159# endif
1160#elif defined(CONFIG_BF54x) 1007#elif defined(CONFIG_BF54x)
1161 case IRQ_PINT0: 1008 case IRQ_PINT0:
1162 case IRQ_PINT1: 1009 case IRQ_PINT1:
@@ -1170,76 +1017,61 @@ int __init init_arch_irq(void)
1170 case IRQ_PROG0_INTA: 1017 case IRQ_PROG0_INTA:
1171 case IRQ_PROG1_INTA: 1018 case IRQ_PROG1_INTA:
1172 case IRQ_PROG2_INTA: 1019 case IRQ_PROG2_INTA:
1173#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1020#elif defined(BF538_FAMILY)
1174 case IRQ_PORTF_INTA: 1021 case IRQ_PORTF_INTA:
1175#endif 1022#endif
1176 set_irq_chained_handler(irq, 1023 irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1177 bfin_demux_gpio_irq);
1178 break; 1024 break;
1179#ifdef BF537_GENERIC_ERROR_INT_DEMUX
1180 case IRQ_GENERIC_ERROR:
1181 set_irq_chained_handler(irq, bfin_demux_error_irq);
1182 break;
1183#endif
1184#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1025#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1185 case IRQ_MAC_ERROR: 1026 case IRQ_MAC_ERROR:
1186 set_irq_chained_handler(irq, bfin_demux_mac_status_irq); 1027 irq_set_chained_handler(irq,
1028 bfin_demux_mac_status_irq);
1187 break; 1029 break;
1188#endif 1030#endif
1189#ifdef CONFIG_SMP 1031#ifdef CONFIG_SMP
1190 case IRQ_SUPPLE_0: 1032 case IRQ_SUPPLE_0:
1191 case IRQ_SUPPLE_1: 1033 case IRQ_SUPPLE_1:
1192 set_irq_handler(irq, handle_percpu_irq); 1034 irq_set_handler(irq, handle_percpu_irq);
1193 break; 1035 break;
1194#endif 1036#endif
1195 1037
1196#ifdef CONFIG_TICKSOURCE_CORETMR 1038#ifdef CONFIG_TICKSOURCE_CORETMR
1197 case IRQ_CORETMR: 1039 case IRQ_CORETMR:
1198# ifdef CONFIG_SMP 1040# ifdef CONFIG_SMP
1199 set_irq_handler(irq, handle_percpu_irq); 1041 irq_set_handler(irq, handle_percpu_irq);
1200 break;
1201# else 1042# else
1202 set_irq_handler(irq, handle_simple_irq); 1043 irq_set_handler(irq, handle_simple_irq);
1203 break;
1204# endif 1044# endif
1045 break;
1205#endif 1046#endif
1206 1047
1207#ifdef CONFIG_TICKSOURCE_GPTMR0 1048#ifdef CONFIG_TICKSOURCE_GPTMR0
1208 case IRQ_TIMER0: 1049 case IRQ_TIMER0:
1209 set_irq_handler(irq, handle_simple_irq); 1050 irq_set_handler(irq, handle_simple_irq);
1210 break; 1051 break;
1211#endif 1052#endif
1212 1053
1213#ifdef CONFIG_IPIPE
1214 default: 1054 default:
1215 set_irq_handler(irq, handle_level_irq); 1055#ifdef CONFIG_IPIPE
1216 break; 1056 irq_set_handler(irq, handle_level_irq);
1217#else /* !CONFIG_IPIPE */ 1057#else
1218 default: 1058 irq_set_handler(irq, handle_simple_irq);
1219 set_irq_handler(irq, handle_simple_irq); 1059#endif
1220 break; 1060 break;
1221#endif /* !CONFIG_IPIPE */
1222 } 1061 }
1223 } 1062 }
1224 1063
1225#ifdef BF537_GENERIC_ERROR_INT_DEMUX 1064 init_mach_irq();
1226 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1227 set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
1228 handle_level_irq);
1229#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1230 set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1231#endif
1232#endif
1233 1065
1234#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) 1066#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1235 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++) 1067 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1236 set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip, 1068 irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1237 handle_level_irq); 1069 handle_level_irq);
1238#endif 1070#endif
1239 /* if configured as edge, then will be changed to do_edge_IRQ */ 1071 /* if configured as edge, then will be changed to do_edge_IRQ */
1240 for (irq = GPIO_IRQ_BASE; 1072 for (irq = GPIO_IRQ_BASE;
1241 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++) 1073 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1242 set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, 1074 irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1243 handle_level_irq); 1075 handle_level_irq);
1244 1076
1245 bfin_write_IMASK(0); 1077 bfin_write_IMASK(0);
@@ -1291,53 +1123,54 @@ int __init init_arch_irq(void)
1291#ifdef CONFIG_DO_IRQ_L1 1123#ifdef CONFIG_DO_IRQ_L1
1292__attribute__((l1_text)) 1124__attribute__((l1_text))
1293#endif 1125#endif
1294void do_irq(int vec, struct pt_regs *fp) 1126static int vec_to_irq(int vec)
1295{ 1127{
1296 if (vec == EVT_IVTMR_P) { 1128 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1297 vec = IRQ_CORETMR; 1129 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1298 } else { 1130 unsigned long sic_status[3];
1299 struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1300 struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1301#if defined(SIC_ISR0) || defined(SICA_ISR0)
1302 unsigned long sic_status[3];
1303 1131
1304 if (smp_processor_id()) { 1132 if (likely(vec == EVT_IVTMR_P))
1133 return IRQ_CORETMR;
1134
1135#ifdef SIC_ISR
1136 sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1137#else
1138 if (smp_processor_id()) {
1305# ifdef SICB_ISR0 1139# ifdef SICB_ISR0
1306 /* This will be optimized out in UP mode. */ 1140 /* This will be optimized out in UP mode. */
1307 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0(); 1141 sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1308 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1(); 1142 sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1309# endif
1310 } else {
1311 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1312 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1313 }
1314# ifdef SIC_ISR2
1315 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1316# endif 1143# endif
1317 for (;; ivg++) { 1144 } else {
1318 if (ivg >= ivg_stop) { 1145 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1319 atomic_inc(&num_spurious); 1146 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1320 return; 1147 }
1321 } 1148#endif
1322 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag) 1149#ifdef SIC_ISR2
1323 break; 1150 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1324 } 1151#endif
1325#else
1326 unsigned long sic_status;
1327
1328 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1329 1152
1330 for (;; ivg++) { 1153 for (;; ivg++) {
1331 if (ivg >= ivg_stop) { 1154 if (ivg >= ivg_stop)
1332 atomic_inc(&num_spurious); 1155 return -1;
1333 return; 1156#ifdef SIC_ISR
1334 } else if (sic_status & ivg->isrflag) 1157 if (sic_status[0] & ivg->isrflag)
1335 break; 1158#else
1336 } 1159 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1337#endif 1160#endif
1338 vec = ivg->irqno; 1161 return ivg->irqno;
1339 } 1162 }
1340 asm_do_IRQ(vec, fp); 1163}
1164
1165#ifdef CONFIG_DO_IRQ_L1
1166__attribute__((l1_text))
1167#endif
1168void do_irq(int vec, struct pt_regs *fp)
1169{
1170 int irq = vec_to_irq(vec);
1171 if (irq == -1)
1172 return;
1173 asm_do_IRQ(irq, fp);
1341} 1174}
1342 1175
1343#ifdef CONFIG_IPIPE 1176#ifdef CONFIG_IPIPE
@@ -1373,42 +1206,11 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1373 struct ipipe_domain *this_domain = __ipipe_current_domain; 1206 struct ipipe_domain *this_domain = __ipipe_current_domain;
1374 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; 1207 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1375 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; 1208 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1376 int irq, s; 1209 int irq, s = 0;
1377 1210
1378 if (likely(vec == EVT_IVTMR_P)) 1211 irq = vec_to_irq(vec);
1379 irq = IRQ_CORETMR; 1212 if (irq == -1)
1380 else { 1213 return 0;
1381#if defined(SIC_ISR0) || defined(SICA_ISR0)
1382 unsigned long sic_status[3];
1383
1384 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1385 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1386# ifdef SIC_ISR2
1387 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1388# endif
1389 for (;; ivg++) {
1390 if (ivg >= ivg_stop) {
1391 atomic_inc(&num_spurious);
1392 return 0;
1393 }
1394 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1395 break;
1396 }
1397#else
1398 unsigned long sic_status;
1399
1400 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1401
1402 for (;; ivg++) {
1403 if (ivg >= ivg_stop) {
1404 atomic_inc(&num_spurious);
1405 return 0;
1406 } else if (sic_status & ivg->isrflag)
1407 break;
1408 }
1409#endif
1410 irq = ivg->irqno;
1411 }
1412 1214
1413 if (irq == IRQ_SYSTMR) { 1215 if (irq == IRQ_SYSTMR) {
1414#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0) 1216#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
@@ -1423,6 +1225,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1423 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; 1225 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1424 } 1226 }
1425 1227
1228 /*
1229 * We don't want Linux interrupt handlers to run at the
1230 * current core priority level (i.e. < EVT15), since this
1231 * might delay other interrupts handled by a high priority
1232 * domain. Here is what we do instead:
1233 *
1234 * - we raise the SYNCDEFER bit to prevent
1235 * __ipipe_handle_irq() to sync the pipeline for the root
1236 * stage for the incoming interrupt. Upon return, that IRQ is
1237 * pending in the interrupt log.
1238 *
1239 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1240 * that _schedule_and_signal_from_int will eventually sync the
1241 * pipeline from EVT15.
1242 */
1426 if (this_domain == ipipe_root_domain) { 1243 if (this_domain == ipipe_root_domain) {
1427 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); 1244 s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1428 barrier(); 1245 barrier();
@@ -1432,6 +1249,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1432 __ipipe_handle_irq(irq, regs); 1249 __ipipe_handle_irq(irq, regs);
1433 ipipe_trace_irq_exit(irq); 1250 ipipe_trace_irq_exit(irq);
1434 1251
1252 if (user_mode(regs) &&
1253 !ipipe_test_foreign_stack() &&
1254 (current->ipipe_flags & PF_EVTRET) != 0) {
1255 /*
1256 * Testing for user_regs() does NOT fully eliminate
1257 * foreign stack contexts, because of the forged
1258 * interrupt returns we do through
1259 * __ipipe_call_irqtail. In that case, we might have
1260 * preempted a foreign stack context in a high
1261 * priority domain, with a single interrupt level now
1262 * pending after the irqtail unwinding is done. In
1263 * which case user_mode() is now true, and the event
1264 * gets dispatched spuriously.
1265 */
1266 current->ipipe_flags &= ~PF_EVTRET;
1267 __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1268 }
1269
1435 if (this_domain == ipipe_root_domain) { 1270 if (this_domain == ipipe_root_domain) {
1436 set_thread_flag(TIF_IRQ_SYNC); 1271 set_thread_flag(TIF_IRQ_SYNC);
1437 if (!s) { 1272 if (!s) {
diff --git a/arch/blackfin/mach-common/irqpanic.c b/arch/blackfin/mach-common/irqpanic.c
deleted file mode 100644
index c6496249e2bc..000000000000
--- a/arch/blackfin/mach-common/irqpanic.c
+++ /dev/null
@@ -1,106 +0,0 @@
1/*
2 * panic kernel with dump information
3 *
4 * Copyright 2005-2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/module.h>
10#include <linux/kernel_stat.h>
11#include <linux/sched.h>
12#include <asm/blackfin.h>
13
14#define L1_ICACHE_START 0xffa10000
15#define L1_ICACHE_END 0xffa13fff
16
17/*
18 * irq_panic - calls panic with string setup
19 */
20__attribute__ ((l1_text))
21asmlinkage void irq_panic(int reason, struct pt_regs *regs)
22{
23 unsigned int cmd, tag, ca, cache_hi, cache_lo, *pa;
24 unsigned short i, j, die;
25 unsigned int bad[10][6];
26
27 /* check entire cache for coherency
28 * Since printk is in cacheable memory,
29 * don't call it until you have checked everything
30 */
31
32 die = 0;
33 i = 0;
34
35 /* check icache */
36
37 for (ca = L1_ICACHE_START; ca <= L1_ICACHE_END && i < 10; ca += 32) {
38
39 /* Grab various address bits for the itest_cmd fields */
40 cmd = (((ca & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
41 ((ca & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
42 ((ca & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
43 0); /* Access Tag, Read access */
44
45 SSYNC();
46 bfin_write_ITEST_COMMAND(cmd);
47 SSYNC();
48 tag = bfin_read_ITEST_DATA0();
49 SSYNC();
50
51 /* if tag is marked as valid, check it */
52 if (tag & 1) {
53 /* The icache is arranged in 4 groups of 64-bits */
54 for (j = 0; j < 32; j += 8) {
55 cmd = ((((ca + j) & 0x3000) << 4) | /* ca[13:12] for SBNK[1:0] */
56 (((ca + j) & 0x0c00) << 16) | /* ca[11:10] for WAYSEL[1:0] */
57 (((ca + j) & 0x3f8)) | /* ca[09:03] for SET[4:0] and DW[1:0] */
58 4); /* Access Data, Read access */
59
60 SSYNC();
61 bfin_write_ITEST_COMMAND(cmd);
62 SSYNC();
63
64 cache_hi = bfin_read_ITEST_DATA1();
65 cache_lo = bfin_read_ITEST_DATA0();
66
67 pa = ((unsigned int *)((tag & 0xffffcc00) |
68 ((ca + j) & ~(0xffffcc00))));
69
70 /*
71 * Debugging this, enable
72 *
73 * printk("addr: %08x %08x%08x | %08x%08x\n",
74 * ((unsigned int *)((tag & 0xffffcc00) | ((ca+j) & ~(0xffffcc00)))),
75 * cache_hi, cache_lo, *(pa+1), *pa);
76 */
77
78 if (cache_hi != *(pa + 1) || cache_lo != *pa) {
79 /* Since icache is not working, stay out of it, by not printing */
80 die = 1;
81 bad[i][0] = (ca + j);
82 bad[i][1] = cache_hi;
83 bad[i][2] = cache_lo;
84 bad[i][3] = ((tag & 0xffffcc00) |
85 ((ca + j) & ~(0xffffcc00)));
86 bad[i][4] = *(pa + 1);
87 bad[i][5] = *(pa);
88 i++;
89 }
90 }
91 }
92 }
93 if (die) {
94 printk(KERN_EMERG "icache coherency error\n");
95 for (j = 0; j <= i; j++) {
96 printk(KERN_EMERG
97 "cache address : %08x cache value : %08x%08x\n",
98 bad[j][0], bad[j][1], bad[j][2]);
99 printk(KERN_EMERG
100 "physical address: %08x SDRAM value : %08x%08x\n",
101 bad[j][3], bad[j][4], bad[j][5]);
102 }
103 panic("icache coherency error");
104 } else
105 printk(KERN_EMERG "icache checked, and OK\n");
106}
diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
index 09c1fb410748..3c648a077e75 100644
--- a/arch/blackfin/mach-common/pm.c
+++ b/arch/blackfin/mach-common/pm.c
@@ -23,9 +23,6 @@
23 23
24void bfin_pm_suspend_standby_enter(void) 24void bfin_pm_suspend_standby_enter(void)
25{ 25{
26 unsigned long flags;
27
28 local_irq_save_hw(flags);
29 bfin_pm_standby_setup(); 26 bfin_pm_standby_setup();
30 27
31#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER 28#ifdef CONFIG_PM_BFIN_SLEEP_DEEPER
@@ -55,8 +52,6 @@ void bfin_pm_suspend_standby_enter(void)
55#else 52#else
56 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 53 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
57#endif 54#endif
58
59 local_irq_restore_hw(flags);
60} 55}
61 56
62int bf53x_suspend_l1_mem(unsigned char *memptr) 57int bf53x_suspend_l1_mem(unsigned char *memptr)
@@ -127,7 +122,6 @@ static void flushinv_all_dcache(void)
127 122
128int bfin_pm_suspend_mem_enter(void) 123int bfin_pm_suspend_mem_enter(void)
129{ 124{
130 unsigned long flags;
131 int wakeup, ret; 125 int wakeup, ret;
132 126
133 unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH 127 unsigned char *memptr = kmalloc(L1_CODE_LENGTH + L1_DATA_A_LENGTH
@@ -149,12 +143,9 @@ int bfin_pm_suspend_mem_enter(void)
149 wakeup |= GPWE; 143 wakeup |= GPWE;
150#endif 144#endif
151 145
152 local_irq_save_hw(flags);
153
154 ret = blackfin_dma_suspend(); 146 ret = blackfin_dma_suspend();
155 147
156 if (ret) { 148 if (ret) {
157 local_irq_restore_hw(flags);
158 kfree(memptr); 149 kfree(memptr);
159 return ret; 150 return ret;
160 } 151 }
@@ -178,7 +169,6 @@ int bfin_pm_suspend_mem_enter(void)
178 bfin_gpio_pm_hibernate_restore(); 169 bfin_gpio_pm_hibernate_restore();
179 blackfin_dma_resume(); 170 blackfin_dma_resume();
180 171
181 local_irq_restore_hw(flags);
182 kfree(memptr); 172 kfree(memptr);
183 173
184 return 0; 174 return 0;
@@ -233,7 +223,7 @@ static int bfin_pm_enter(suspend_state_t state)
233 return 0; 223 return 0;
234} 224}
235 225
236struct platform_suspend_ops bfin_pm_ops = { 226static const struct platform_suspend_ops bfin_pm_ops = {
237 .enter = bfin_pm_enter, 227 .enter = bfin_pm_enter,
238 .valid = bfin_pm_valid, 228 .valid = bfin_pm_valid,
239}; 229};
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index a17107a700d5..35e7e1eb0188 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -19,11 +19,13 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/cpumask.h>
22#include <linux/seq_file.h> 23#include <linux/seq_file.h>
23#include <linux/irq.h> 24#include <linux/irq.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <asm/atomic.h> 26#include <asm/atomic.h>
26#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/irq_handler.h>
27#include <asm/mmu_context.h> 29#include <asm/mmu_context.h>
28#include <asm/pgtable.h> 30#include <asm/pgtable.h>
29#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
@@ -39,16 +41,14 @@
39 */ 41 */
40struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); 42struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
41 43
44#ifdef CONFIG_ICACHE_FLUSH_L1
45unsigned long blackfin_iflush_l1_entry[NR_CPUS];
46#endif
47
42void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, 48void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
43 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, 49 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
44 *init_saved_dcplb_fault_addr_coreb; 50 *init_saved_dcplb_fault_addr_coreb;
45 51
46cpumask_t cpu_possible_map;
47EXPORT_SYMBOL(cpu_possible_map);
48
49cpumask_t cpu_online_map;
50EXPORT_SYMBOL(cpu_online_map);
51
52#define BFIN_IPI_RESCHEDULE 0 52#define BFIN_IPI_RESCHEDULE 0
53#define BFIN_IPI_CALL_FUNC 1 53#define BFIN_IPI_CALL_FUNC 1
54#define BFIN_IPI_CPU_STOP 2 54#define BFIN_IPI_CPU_STOP 2
@@ -65,8 +65,7 @@ struct smp_call_struct {
65 void (*func)(void *info); 65 void (*func)(void *info);
66 void *info; 66 void *info;
67 int wait; 67 int wait;
68 cpumask_t pending; 68 cpumask_t *waitmask;
69 cpumask_t waitmask;
70}; 69};
71 70
72static struct blackfin_flush_data smp_flush_data; 71static struct blackfin_flush_data smp_flush_data;
@@ -74,15 +73,19 @@ static struct blackfin_flush_data smp_flush_data;
74static DEFINE_SPINLOCK(stop_lock); 73static DEFINE_SPINLOCK(stop_lock);
75 74
76struct ipi_message { 75struct ipi_message {
77 struct list_head list;
78 unsigned long type; 76 unsigned long type;
79 struct smp_call_struct call_struct; 77 struct smp_call_struct call_struct;
80}; 78};
81 79
80/* A magic number - stress test shows this is safe for common cases */
81#define BFIN_IPI_MSGQ_LEN 5
82
83/* Simple FIFO buffer, overflow leads to panic */
82struct ipi_message_queue { 84struct ipi_message_queue {
83 struct list_head head;
84 spinlock_t lock; 85 spinlock_t lock;
85 unsigned long count; 86 unsigned long count;
87 unsigned long head; /* head of the queue */
88 struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
86}; 89};
87 90
88static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); 91static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
@@ -94,7 +97,7 @@ static void ipi_cpu_stop(unsigned int cpu)
94 dump_stack(); 97 dump_stack();
95 spin_unlock(&stop_lock); 98 spin_unlock(&stop_lock);
96 99
97 cpu_clear(cpu, cpu_online_map); 100 set_cpu_online(cpu, false);
98 101
99 local_irq_disable(); 102 local_irq_disable();
100 103
@@ -110,6 +113,19 @@ static void ipi_flush_icache(void *info)
110 blackfin_dcache_invalidate_range((unsigned long)fdata, 113 blackfin_dcache_invalidate_range((unsigned long)fdata,
111 (unsigned long)fdata + sizeof(*fdata)); 114 (unsigned long)fdata + sizeof(*fdata));
112 115
116 /* Make sure all write buffers in the data side of the core
117 * are flushed before trying to invalidate the icache. This
118 * needs to be after the data flush and before the icache
119 * flush so that the SSYNC does the right thing in preventing
120 * the instruction prefetcher from hitting things in cached
121 * memory at the wrong time -- it runs much further ahead than
122 * the pipeline.
123 */
124 SSYNC();
125
126 /* ipi_flaush_icache is invoked by generic flush_icache_range,
127 * so call blackfin arch icache flush directly here.
128 */
113 blackfin_icache_flush_range(fdata->start, fdata->end); 129 blackfin_icache_flush_range(fdata->start, fdata->end);
114} 130}
115 131
@@ -121,7 +137,6 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
121 func = msg->call_struct.func; 137 func = msg->call_struct.func;
122 info = msg->call_struct.info; 138 info = msg->call_struct.info;
123 wait = msg->call_struct.wait; 139 wait = msg->call_struct.wait;
124 cpu_clear(cpu, msg->call_struct.pending);
125 func(info); 140 func(info);
126 if (wait) { 141 if (wait) {
127#ifdef __ARCH_SYNC_CORE_DCACHE 142#ifdef __ARCH_SYNC_CORE_DCACHE
@@ -132,51 +147,60 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
132 */ 147 */
133 resync_core_dcache(); 148 resync_core_dcache();
134#endif 149#endif
135 cpu_clear(cpu, msg->call_struct.waitmask); 150 cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
136 } else 151 }
137 kfree(msg);
138} 152}
139 153
140static irqreturn_t ipi_handler(int irq, void *dev_instance) 154/* Use IRQ_SUPPLE_0 to request reschedule.
155 * When returning from interrupt to user space,
156 * there is chance to reschedule */
157static irqreturn_t ipi_handler_int0(int irq, void *dev_instance)
158{
159 unsigned int cpu = smp_processor_id();
160
161 platform_clear_ipi(cpu, IRQ_SUPPLE_0);
162 return IRQ_HANDLED;
163}
164
165static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
141{ 166{
142 struct ipi_message *msg; 167 struct ipi_message *msg;
143 struct ipi_message_queue *msg_queue; 168 struct ipi_message_queue *msg_queue;
144 unsigned int cpu = smp_processor_id(); 169 unsigned int cpu = smp_processor_id();
170 unsigned long flags;
145 171
146 platform_clear_ipi(cpu); 172 platform_clear_ipi(cpu, IRQ_SUPPLE_1);
147 173
148 msg_queue = &__get_cpu_var(ipi_msg_queue); 174 msg_queue = &__get_cpu_var(ipi_msg_queue);
149 msg_queue->count++;
150 175
151 spin_lock(&msg_queue->lock); 176 spin_lock_irqsave(&msg_queue->lock, flags);
152 while (!list_empty(&msg_queue->head)) { 177
153 msg = list_entry(msg_queue->head.next, typeof(*msg), list); 178 while (msg_queue->count) {
154 list_del(&msg->list); 179 msg = &msg_queue->ipi_message[msg_queue->head];
155 switch (msg->type) { 180 switch (msg->type) {
156 case BFIN_IPI_RESCHEDULE: 181 case BFIN_IPI_RESCHEDULE:
157 /* That's the easiest one; leave it to 182 scheduler_ipi();
158 * return_from_int. */
159 kfree(msg);
160 break; 183 break;
161 case BFIN_IPI_CALL_FUNC: 184 case BFIN_IPI_CALL_FUNC:
162 spin_unlock(&msg_queue->lock); 185 spin_unlock_irqrestore(&msg_queue->lock, flags);
163 ipi_call_function(cpu, msg); 186 ipi_call_function(cpu, msg);
164 spin_lock(&msg_queue->lock); 187 spin_lock_irqsave(&msg_queue->lock, flags);
165 break; 188 break;
166 case BFIN_IPI_CPU_STOP: 189 case BFIN_IPI_CPU_STOP:
167 spin_unlock(&msg_queue->lock); 190 spin_unlock_irqrestore(&msg_queue->lock, flags);
168 ipi_cpu_stop(cpu); 191 ipi_cpu_stop(cpu);
169 spin_lock(&msg_queue->lock); 192 spin_lock_irqsave(&msg_queue->lock, flags);
170 kfree(msg);
171 break; 193 break;
172 default: 194 default:
173 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", 195 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
174 cpu, msg->type); 196 cpu, msg->type);
175 kfree(msg);
176 break; 197 break;
177 } 198 }
199 msg_queue->head++;
200 msg_queue->head %= BFIN_IPI_MSGQ_LEN;
201 msg_queue->count--;
178 } 202 }
179 spin_unlock(&msg_queue->lock); 203 spin_unlock_irqrestore(&msg_queue->lock, flags);
180 return IRQ_HANDLED; 204 return IRQ_HANDLED;
181} 205}
182 206
@@ -186,48 +210,48 @@ static void ipi_queue_init(void)
186 struct ipi_message_queue *msg_queue; 210 struct ipi_message_queue *msg_queue;
187 for_each_possible_cpu(cpu) { 211 for_each_possible_cpu(cpu) {
188 msg_queue = &per_cpu(ipi_msg_queue, cpu); 212 msg_queue = &per_cpu(ipi_msg_queue, cpu);
189 INIT_LIST_HEAD(&msg_queue->head);
190 spin_lock_init(&msg_queue->lock); 213 spin_lock_init(&msg_queue->lock);
191 msg_queue->count = 0; 214 msg_queue->count = 0;
215 msg_queue->head = 0;
192 } 216 }
193} 217}
194 218
195int smp_call_function(void (*func)(void *info), void *info, int wait) 219static inline void smp_send_message(cpumask_t callmap, unsigned long type,
220 void (*func) (void *info), void *info, int wait)
196{ 221{
197 unsigned int cpu; 222 unsigned int cpu;
198 cpumask_t callmap;
199 unsigned long flags;
200 struct ipi_message_queue *msg_queue; 223 struct ipi_message_queue *msg_queue;
201 struct ipi_message *msg; 224 struct ipi_message *msg;
225 unsigned long flags, next_msg;
226 cpumask_t waitmask; /* waitmask is shared by all cpus */
202 227
203 callmap = cpu_online_map; 228 cpumask_copy(&waitmask, &callmap);
204 cpu_clear(smp_processor_id(), callmap); 229 for_each_cpu(cpu, &callmap) {
205 if (cpus_empty(callmap))
206 return 0;
207
208 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
209 if (!msg)
210 return -ENOMEM;
211 INIT_LIST_HEAD(&msg->list);
212 msg->call_struct.func = func;
213 msg->call_struct.info = info;
214 msg->call_struct.wait = wait;
215 msg->call_struct.pending = callmap;
216 msg->call_struct.waitmask = callmap;
217 msg->type = BFIN_IPI_CALL_FUNC;
218
219 for_each_cpu_mask(cpu, callmap) {
220 msg_queue = &per_cpu(ipi_msg_queue, cpu); 230 msg_queue = &per_cpu(ipi_msg_queue, cpu);
221 spin_lock_irqsave(&msg_queue->lock, flags); 231 spin_lock_irqsave(&msg_queue->lock, flags);
222 list_add_tail(&msg->list, &msg_queue->head); 232 if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
233 next_msg = (msg_queue->head + msg_queue->count)
234 % BFIN_IPI_MSGQ_LEN;
235 msg = &msg_queue->ipi_message[next_msg];
236 msg->type = type;
237 if (type == BFIN_IPI_CALL_FUNC) {
238 msg->call_struct.func = func;
239 msg->call_struct.info = info;
240 msg->call_struct.wait = wait;
241 msg->call_struct.waitmask = &waitmask;
242 }
243 msg_queue->count++;
244 } else
245 panic("IPI message queue overflow\n");
223 spin_unlock_irqrestore(&msg_queue->lock, flags); 246 spin_unlock_irqrestore(&msg_queue->lock, flags);
224 platform_send_ipi_cpu(cpu); 247 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
225 } 248 }
249
226 if (wait) { 250 if (wait) {
227 while (!cpus_empty(msg->call_struct.waitmask)) 251 while (!cpumask_empty(&waitmask))
228 blackfin_dcache_invalidate_range( 252 blackfin_dcache_invalidate_range(
229 (unsigned long)(&msg->call_struct.waitmask), 253 (unsigned long)(&waitmask),
230 (unsigned long)(&msg->call_struct.waitmask)); 254 (unsigned long)(&waitmask));
231#ifdef __ARCH_SYNC_CORE_DCACHE 255#ifdef __ARCH_SYNC_CORE_DCACHE
232 /* 256 /*
233 * Invalidate D cache in case shared data was changed by 257 * Invalidate D cache in case shared data was changed by
@@ -235,8 +259,21 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
235 */ 259 */
236 resync_core_dcache(); 260 resync_core_dcache();
237#endif 261#endif
238 kfree(msg);
239 } 262 }
263}
264
265int smp_call_function(void (*func)(void *info), void *info, int wait)
266{
267 cpumask_t callmap;
268
269 preempt_disable();
270 cpumask_copy(&callmap, cpu_online_mask);
271 cpumask_clear_cpu(smp_processor_id(), &callmap);
272 if (!cpumask_empty(&callmap))
273 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
274
275 preempt_enable();
276
240 return 0; 277 return 0;
241} 278}
242EXPORT_SYMBOL_GPL(smp_call_function); 279EXPORT_SYMBOL_GPL(smp_call_function);
@@ -246,100 +283,40 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
246{ 283{
247 unsigned int cpu = cpuid; 284 unsigned int cpu = cpuid;
248 cpumask_t callmap; 285 cpumask_t callmap;
249 unsigned long flags;
250 struct ipi_message_queue *msg_queue;
251 struct ipi_message *msg;
252 286
253 if (cpu_is_offline(cpu)) 287 if (cpu_is_offline(cpu))
254 return 0; 288 return 0;
255 cpus_clear(callmap); 289 cpumask_clear(&callmap);
256 cpu_set(cpu, callmap); 290 cpumask_set_cpu(cpu, &callmap);
257 291
258 msg = kmalloc(sizeof(*msg), GFP_ATOMIC); 292 smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
259 if (!msg)
260 return -ENOMEM;
261 INIT_LIST_HEAD(&msg->list);
262 msg->call_struct.func = func;
263 msg->call_struct.info = info;
264 msg->call_struct.wait = wait;
265 msg->call_struct.pending = callmap;
266 msg->call_struct.waitmask = callmap;
267 msg->type = BFIN_IPI_CALL_FUNC;
268
269 msg_queue = &per_cpu(ipi_msg_queue, cpu);
270 spin_lock_irqsave(&msg_queue->lock, flags);
271 list_add_tail(&msg->list, &msg_queue->head);
272 spin_unlock_irqrestore(&msg_queue->lock, flags);
273 platform_send_ipi_cpu(cpu);
274 293
275 if (wait) {
276 while (!cpus_empty(msg->call_struct.waitmask))
277 blackfin_dcache_invalidate_range(
278 (unsigned long)(&msg->call_struct.waitmask),
279 (unsigned long)(&msg->call_struct.waitmask));
280#ifdef __ARCH_SYNC_CORE_DCACHE
281 /*
282 * Invalidate D cache in case shared data was changed by
283 * other processors to ensure cache coherence.
284 */
285 resync_core_dcache();
286#endif
287 kfree(msg);
288 }
289 return 0; 294 return 0;
290} 295}
291EXPORT_SYMBOL_GPL(smp_call_function_single); 296EXPORT_SYMBOL_GPL(smp_call_function_single);
292 297
293void smp_send_reschedule(int cpu) 298void smp_send_reschedule(int cpu)
294{ 299{
295 unsigned long flags; 300 /* simply trigger an ipi */
296 struct ipi_message_queue *msg_queue;
297 struct ipi_message *msg;
298
299 if (cpu_is_offline(cpu)) 301 if (cpu_is_offline(cpu))
300 return; 302 return;
301 303 platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0);
302 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
303 if (!msg)
304 return;
305 INIT_LIST_HEAD(&msg->list);
306 msg->type = BFIN_IPI_RESCHEDULE;
307
308 msg_queue = &per_cpu(ipi_msg_queue, cpu);
309 spin_lock_irqsave(&msg_queue->lock, flags);
310 list_add_tail(&msg->list, &msg_queue->head);
311 spin_unlock_irqrestore(&msg_queue->lock, flags);
312 platform_send_ipi_cpu(cpu);
313 304
314 return; 305 return;
315} 306}
316 307
317void smp_send_stop(void) 308void smp_send_stop(void)
318{ 309{
319 unsigned int cpu;
320 cpumask_t callmap; 310 cpumask_t callmap;
321 unsigned long flags;
322 struct ipi_message_queue *msg_queue;
323 struct ipi_message *msg;
324 311
325 callmap = cpu_online_map; 312 preempt_disable();
326 cpu_clear(smp_processor_id(), callmap); 313 cpumask_copy(&callmap, cpu_online_mask);
327 if (cpus_empty(callmap)) 314 cpumask_clear_cpu(smp_processor_id(), &callmap);
328 return; 315 if (!cpumask_empty(&callmap))
316 smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
329 317
330 msg = kzalloc(sizeof(*msg), GFP_ATOMIC); 318 preempt_enable();
331 if (!msg)
332 return;
333 INIT_LIST_HEAD(&msg->list);
334 msg->type = BFIN_IPI_CPU_STOP;
335 319
336 for_each_cpu_mask(cpu, callmap) {
337 msg_queue = &per_cpu(ipi_msg_queue, cpu);
338 spin_lock_irqsave(&msg_queue->lock, flags);
339 list_add_tail(&msg->list, &msg_queue->head);
340 spin_unlock_irqrestore(&msg_queue->lock, flags);
341 platform_send_ipi_cpu(cpu);
342 }
343 return; 320 return;
344} 321}
345 322
@@ -408,8 +385,6 @@ void __cpuinit secondary_start_kernel(void)
408 */ 385 */
409 init_exception_vectors(); 386 init_exception_vectors();
410 387
411 bfin_setup_caches(cpu);
412
413 local_irq_disable(); 388 local_irq_disable();
414 389
415 /* Attach the new idle task to the global mm. */ 390 /* Attach the new idle task to the global mm. */
@@ -428,6 +403,8 @@ void __cpuinit secondary_start_kernel(void)
428 403
429 local_irq_enable(); 404 local_irq_enable();
430 405
406 bfin_setup_caches(cpu);
407
431 /* 408 /*
432 * Calibrate loops per jiffy value. 409 * Calibrate loops per jiffy value.
433 * IRQs need to be enabled here - D-cache can be invalidated 410 * IRQs need to be enabled here - D-cache can be invalidated
@@ -446,7 +423,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
446{ 423{
447 platform_prepare_cpus(max_cpus); 424 platform_prepare_cpus(max_cpus);
448 ipi_queue_init(); 425 ipi_queue_init();
449 platform_request_ipi(&ipi_handler); 426 platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
427 platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
450} 428}
451 429
452void __init smp_cpus_done(unsigned int max_cpus) 430void __init smp_cpus_done(unsigned int max_cpus)