aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r--arch/blackfin/mach-common/cpufreq.c168
-rw-r--r--arch/blackfin/mach-common/entry.S81
-rw-r--r--arch/blackfin/mach-common/head.S16
-rw-r--r--arch/blackfin/mach-common/interrupt.S35
-rw-r--r--arch/blackfin/mach-common/ints-priority.c208
-rw-r--r--arch/blackfin/mach-common/smp.c87
6 files changed, 445 insertions, 150 deletions
diff --git a/arch/blackfin/mach-common/cpufreq.c b/arch/blackfin/mach-common/cpufreq.c
index 777582897253..4391d03dc845 100644
--- a/arch/blackfin/mach-common/cpufreq.c
+++ b/arch/blackfin/mach-common/cpufreq.c
@@ -11,10 +11,13 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/cpufreq.h> 12#include <linux/cpufreq.h>
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/delay.h>
14#include <asm/blackfin.h> 15#include <asm/blackfin.h>
15#include <asm/time.h> 16#include <asm/time.h>
16#include <asm/dpmc.h> 17#include <asm/dpmc.h>
17 18
19#define CPUFREQ_CPU 0
20
18/* this is the table of CCLK frequencies, in Hz */ 21/* this is the table of CCLK frequencies, in Hz */
19/* .index is the entry in the auxillary dpm_state_table[] */ 22/* .index is the entry in the auxillary dpm_state_table[] */
20static struct cpufreq_frequency_table bfin_freq_table[] = { 23static struct cpufreq_frequency_table bfin_freq_table[] = {
@@ -41,64 +44,124 @@ static struct bfin_dpm_state {
41 unsigned int tscale; /* change the divider on the core timer interrupt */ 44 unsigned int tscale; /* change the divider on the core timer interrupt */
42} dpm_state_table[3]; 45} dpm_state_table[3];
43 46
47#if defined(CONFIG_CYCLES_CLOCKSOURCE)
44/* 48/*
45 normalized to maximum frequncy offset for CYCLES, 49 * normalized to maximum frequncy offset for CYCLES,
46 used in time-ts cycles clock source, but could be used 50 * used in time-ts cycles clock source, but could be used
47 somewhere also. 51 * somewhere also.
48 */ 52 */
49unsigned long long __bfin_cycles_off; 53unsigned long long __bfin_cycles_off;
50unsigned int __bfin_cycles_mod; 54unsigned int __bfin_cycles_mod;
55#endif
51 56
52/**************************************************************************/ 57/**************************************************************************/
58static void __init bfin_init_tables(unsigned long cclk, unsigned long sclk)
59{
53 60
54static unsigned int bfin_getfreq_khz(unsigned int cpu) 61 unsigned long csel, min_cclk;
62 int index;
63
64 /* Anomaly 273 seems to still exist on non-BF54x w/dcache turned on */
65#if ANOMALY_05000273 || ANOMALY_05000274 || \
66 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE))
67 min_cclk = sclk * 2;
68#else
69 min_cclk = sclk;
70#endif
71 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
72
73 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
74 bfin_freq_table[index].frequency = cclk >> index;
75 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
76 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
77
78 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
79 bfin_freq_table[index].frequency,
80 dpm_state_table[index].csel,
81 dpm_state_table[index].tscale);
82 }
83 return;
84}
85
86static void bfin_adjust_core_timer(void *info)
55{ 87{
56 /* The driver only support single cpu */ 88 unsigned int tscale;
57 if (cpu != 0) 89 unsigned int index = *(unsigned int *)info;
58 return -1;
59 90
60 return get_cclk() / 1000; 91 /* we have to adjust the core timer, because it is using cclk */
92 tscale = dpm_state_table[index].tscale;
93 bfin_write_TSCALE(tscale);
94 return;
61} 95}
62 96
97static unsigned int bfin_getfreq_khz(unsigned int cpu)
98{
99 /* Both CoreA/B have the same core clock */
100 return get_cclk() / 1000;
101}
63 102
64static int bfin_target(struct cpufreq_policy *policy, 103static int bfin_target(struct cpufreq_policy *poli,
65 unsigned int target_freq, unsigned int relation) 104 unsigned int target_freq, unsigned int relation)
66{ 105{
67 unsigned int index, plldiv, tscale; 106 unsigned int index, plldiv, cpu;
68 unsigned long flags, cclk_hz; 107 unsigned long flags, cclk_hz;
69 struct cpufreq_freqs freqs; 108 struct cpufreq_freqs freqs;
109 static unsigned long lpj_ref;
110 static unsigned int lpj_ref_freq;
111
112#if defined(CONFIG_CYCLES_CLOCKSOURCE)
70 cycles_t cycles; 113 cycles_t cycles;
114#endif
71 115
72 if (cpufreq_frequency_table_target(policy, bfin_freq_table, 116 for_each_online_cpu(cpu) {
73 target_freq, relation, &index)) 117 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
74 return -EINVAL; 118
75 119 if (!policy)
76 cclk_hz = bfin_freq_table[index].frequency; 120 continue;
77 121
78 freqs.old = bfin_getfreq_khz(0); 122 if (cpufreq_frequency_table_target(policy, bfin_freq_table,
79 freqs.new = cclk_hz; 123 target_freq, relation, &index))
80 freqs.cpu = 0; 124 return -EINVAL;
81 125
82 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n", 126 cclk_hz = bfin_freq_table[index].frequency;
83 cclk_hz, target_freq, freqs.old); 127
84 128 freqs.old = bfin_getfreq_khz(0);
85 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 129 freqs.new = cclk_hz;
86 local_irq_save_hw(flags); 130 freqs.cpu = cpu;
87 plldiv = (bfin_read_PLL_DIV() & SSEL) | dpm_state_table[index].csel; 131
88 tscale = dpm_state_table[index].tscale; 132 pr_debug("cpufreq: changing cclk to %lu; target = %u, oldfreq = %u\n",
89 bfin_write_PLL_DIV(plldiv); 133 cclk_hz, target_freq, freqs.old);
90 /* we have to adjust the core timer, because it is using cclk */ 134
91 bfin_write_TSCALE(tscale); 135 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
92 cycles = get_cycles(); 136 if (cpu == CPUFREQ_CPU) {
93 SSYNC(); 137 local_irq_save_hw(flags);
94 cycles += 10; /* ~10 cycles we lose after get_cycles() */ 138 plldiv = (bfin_read_PLL_DIV() & SSEL) |
95 __bfin_cycles_off += (cycles << __bfin_cycles_mod) - (cycles << index); 139 dpm_state_table[index].csel;
96 __bfin_cycles_mod = index; 140 bfin_write_PLL_DIV(plldiv);
97 local_irq_restore_hw(flags); 141 on_each_cpu(bfin_adjust_core_timer, &index, 1);
98 /* TODO: just test case for cycles clock source, remove later */ 142#if defined(CONFIG_CYCLES_CLOCKSOURCE)
99 pr_debug("cpufreq: done\n"); 143 cycles = get_cycles();
100 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 144 SSYNC();
145 cycles += 10; /* ~10 cycles we lose after get_cycles() */
146 __bfin_cycles_off +=
147 (cycles << __bfin_cycles_mod) - (cycles << index);
148 __bfin_cycles_mod = index;
149#endif
150 if (!lpj_ref_freq) {
151 lpj_ref = loops_per_jiffy;
152 lpj_ref_freq = freqs.old;
153 }
154 if (freqs.new != freqs.old) {
155 loops_per_jiffy = cpufreq_scale(lpj_ref,
156 lpj_ref_freq, freqs.new);
157 }
158 local_irq_restore_hw(flags);
159 }
160 /* TODO: just test case for cycles clock source, remove later */
161 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
162 }
101 163
164 pr_debug("cpufreq: done\n");
102 return 0; 165 return 0;
103} 166}
104 167
@@ -110,37 +173,16 @@ static int bfin_verify_speed(struct cpufreq_policy *policy)
110static int __init __bfin_cpu_init(struct cpufreq_policy *policy) 173static int __init __bfin_cpu_init(struct cpufreq_policy *policy)
111{ 174{
112 175
113 unsigned long cclk, sclk, csel, min_cclk; 176 unsigned long cclk, sclk;
114 int index;
115
116 if (policy->cpu != 0)
117 return -EINVAL;
118 177
119 cclk = get_cclk() / 1000; 178 cclk = get_cclk() / 1000;
120 sclk = get_sclk() / 1000; 179 sclk = get_sclk() / 1000;
121 180
122#if ANOMALY_05000273 || ANOMALY_05000274 || \ 181 if (policy->cpu == CPUFREQ_CPU)
123 (!defined(CONFIG_BF54x) && defined(CONFIG_BFIN_EXTMEM_DCACHEABLE)) 182 bfin_init_tables(cclk, sclk);
124 min_cclk = sclk * 2;
125#else
126 min_cclk = sclk;
127#endif
128 csel = ((bfin_read_PLL_DIV() & CSEL) >> 4);
129
130 for (index = 0; (cclk >> index) >= min_cclk && csel <= 3; index++, csel++) {
131 bfin_freq_table[index].frequency = cclk >> index;
132 dpm_state_table[index].csel = csel << 4; /* Shift now into PLL_DIV bitpos */
133 dpm_state_table[index].tscale = (TIME_SCALE / (1 << csel)) - 1;
134
135 pr_debug("cpufreq: freq:%d csel:0x%x tscale:%d\n",
136 bfin_freq_table[index].frequency,
137 dpm_state_table[index].csel,
138 dpm_state_table[index].tscale);
139 }
140 183
141 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ 184 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */
142 185
143 /*Now ,only support one cpu */
144 policy->cur = cclk; 186 policy->cur = cclk;
145 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu); 187 cpufreq_frequency_table_get_attr(bfin_freq_table, policy->cpu);
146 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table); 188 return cpufreq_frequency_table_cpuinfo(policy, bfin_freq_table);
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 01b2f58dfb95..a5847f5d67c7 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -405,7 +405,7 @@ ENTRY(_double_fault)
405 405
406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 406 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
407 SP += -12; 407 SP += -12;
408 call _double_fault_c; 408 pseudo_long_call _double_fault_c, p5;
409 SP += 12; 409 SP += 12;
410.L_double_fault_panic: 410.L_double_fault_panic:
411 JUMP .L_double_fault_panic 411 JUMP .L_double_fault_panic
@@ -447,7 +447,7 @@ ENTRY(_exception_to_level5)
447 447
448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 448 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
449 SP += -12; 449 SP += -12;
450 call _trap_c; 450 pseudo_long_call _trap_c, p4;
451 SP += 12; 451 SP += 12;
452 452
453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off 453 /* If interrupts were off during the exception (IPEND[4] = 1), turn them off
@@ -482,6 +482,8 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
482 [--sp] = ASTAT; 482 [--sp] = ASTAT;
483 [--sp] = (R7:6,P5:4); 483 [--sp] = (R7:6,P5:4);
484 484
485 ANOMALY_283_315_WORKAROUND(p5, r7)
486
485#ifdef CONFIG_EXACT_HWERR 487#ifdef CONFIG_EXACT_HWERR
486 /* Make sure all pending read/writes complete. This will ensure any 488 /* Make sure all pending read/writes complete. This will ensure any
487 * accesses which could cause hardware errors completes, and signal 489 * accesses which could cause hardware errors completes, and signal
@@ -492,8 +494,6 @@ ENTRY(_trap) /* Exception: 4th entry into system event table(supervisor mode)*/
492 ssync; 494 ssync;
493#endif 495#endif
494 496
495 ANOMALY_283_315_WORKAROUND(p5, r7)
496
497#ifdef CONFIG_DEBUG_DOUBLEFAULT 497#ifdef CONFIG_DEBUG_DOUBLEFAULT
498 /* 498 /*
499 * Save these registers, as they are only valid in exception context 499 * Save these registers, as they are only valid in exception context
@@ -551,7 +551,7 @@ ENTRY(_kernel_execve)
551 p0 = sp; 551 p0 = sp;
552 sp += -16; 552 sp += -16;
553 [sp + 12] = p0; 553 [sp + 12] = p0;
554 call _do_execve; 554 pseudo_long_call _do_execve, p5;
555 SP += 16; 555 SP += 16;
556 cc = r0 == 0; 556 cc = r0 == 0;
557 if ! cc jump .Lexecve_failed; 557 if ! cc jump .Lexecve_failed;
@@ -626,13 +626,6 @@ ENTRY(_system_call)
626 p0 = [sp + PT_ORIG_P0]; 626 p0 = [sp + PT_ORIG_P0];
627#endif /* CONFIG_IPIPE */ 627#endif /* CONFIG_IPIPE */
628 628
629 /* Check the System Call */
630 r7 = __NR_syscall;
631 /* System call number is passed in P0 */
632 r6 = p0;
633 cc = r6 < r7;
634 if ! cc jump .Lbadsys;
635
636 /* are we tracing syscalls?*/ 629 /* are we tracing syscalls?*/
637 r7 = sp; 630 r7 = sp;
638 r6.l = lo(ALIGN_PAGE_MASK); 631 r6.l = lo(ALIGN_PAGE_MASK);
@@ -642,6 +635,14 @@ ENTRY(_system_call)
642 r7 = [p2+TI_FLAGS]; 635 r7 = [p2+TI_FLAGS];
643 CC = BITTST(r7,TIF_SYSCALL_TRACE); 636 CC = BITTST(r7,TIF_SYSCALL_TRACE);
644 if CC JUMP _sys_trace; 637 if CC JUMP _sys_trace;
638 CC = BITTST(r7,TIF_SINGLESTEP);
639 if CC JUMP _sys_trace;
640
641 /* Make sure the system call # is valid */
642 p4 = __NR_syscall;
643 /* System call number is passed in P0 */
644 cc = p4 <= p0;
645 if cc jump .Lbadsys;
645 646
646 /* Execute the appropriate system call */ 647 /* Execute the appropriate system call */
647 648
@@ -704,7 +705,7 @@ ENTRY(_system_call)
704 sp += 4; 705 sp += 4;
705 706
706 SP += -12; 707 SP += -12;
707 call _schedule; 708 pseudo_long_call _schedule, p4;
708 SP += 12; 709 SP += 12;
709 710
710 jump .Lresume_userspace_1; 711 jump .Lresume_userspace_1;
@@ -723,7 +724,7 @@ ENTRY(_system_call)
723 724
724 r0 = sp; 725 r0 = sp;
725 SP += -12; 726 SP += -12;
726 call _do_notify_resume; 727 pseudo_long_call _do_notify_resume, p5;
727 SP += 12; 728 SP += 12;
728 729
729.Lsyscall_really_exit: 730.Lsyscall_really_exit:
@@ -736,11 +737,17 @@ ENDPROC(_system_call)
736 * this symbol need not be global anyways, so ... 737 * this symbol need not be global anyways, so ...
737 */ 738 */
738_sys_trace: 739_sys_trace:
739 call _syscall_trace; 740 r0 = sp;
740 741 pseudo_long_call _syscall_trace_enter, p5;
741 /* Execute the appropriate system call */
742 742
743 /* Make sure the system call # is valid */
743 p4 = [SP + PT_P0]; 744 p4 = [SP + PT_P0];
745 p3 = __NR_syscall;
746 cc = p3 <= p4;
747 r0 = -ENOSYS;
748 if cc jump .Lsys_trace_badsys;
749
750 /* Execute the appropriate system call */
744 p5.l = _sys_call_table; 751 p5.l = _sys_call_table;
745 p5.h = _sys_call_table; 752 p5.h = _sys_call_table;
746 p5 = p5 + (p4 << 2); 753 p5 = p5 + (p4 << 2);
@@ -758,9 +765,11 @@ _sys_trace:
758 SP += -12; 765 SP += -12;
759 call (p5); 766 call (p5);
760 SP += 24; 767 SP += 24;
768.Lsys_trace_badsys:
761 [sp + PT_R0] = r0; 769 [sp + PT_R0] = r0;
762 770
763 call _syscall_trace; 771 r0 = sp;
772 pseudo_long_call _syscall_trace_leave, p5;
764 jump .Lresume_userspace; 773 jump .Lresume_userspace;
765ENDPROC(_sys_trace) 774ENDPROC(_sys_trace)
766 775
@@ -966,6 +975,13 @@ ENTRY(_evt_evt14)
966#else 975#else
967 cli r0; 976 cli r0;
968#endif 977#endif
978#ifdef CONFIG_TRACE_IRQFLAGS
979 [--sp] = rets;
980 sp += -12;
981 call _trace_hardirqs_off;
982 sp += 12;
983 rets = [sp++];
984#endif
969 [--sp] = RETI; 985 [--sp] = RETI;
970 SP += 4; 986 SP += 4;
971 rts; 987 rts;
@@ -989,6 +1005,14 @@ ENTRY(_schedule_and_signal_from_int)
989 p1 = rets; 1005 p1 = rets;
990 [sp + PT_RESERVED] = p1; 1006 [sp + PT_RESERVED] = p1;
991 1007
1008#ifdef CONFIG_TRACE_IRQFLAGS
1009 /* trace_hardirqs_on() checks if all irqs are disabled. But here IRQ 15
1010 * is turned on, so disable all irqs. */
1011 cli r0;
1012 sp += -12;
1013 call _trace_hardirqs_on;
1014 sp += 12;
1015#endif
992#ifdef CONFIG_SMP 1016#ifdef CONFIG_SMP
993 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */ 1017 GET_PDA(p0, r0); /* Fetch current PDA (can't migrate to other CPU here) */
994 r0 = [p0 + PDA_IRQFLAGS]; 1018 r0 = [p0 + PDA_IRQFLAGS];
@@ -1007,7 +1031,8 @@ ENTRY(_schedule_and_signal_from_int)
1007 1031
1008 r0 = sp; 1032 r0 = sp;
1009 sp += -12; 1033 sp += -12;
1010 call _finish_atomic_sections; 1034
1035 pseudo_long_call _finish_atomic_sections, p5;
1011 sp += 12; 1036 sp += 12;
1012 jump.s .Lresume_userspace; 1037 jump.s .Lresume_userspace;
1013ENDPROC(_schedule_and_signal_from_int) 1038ENDPROC(_schedule_and_signal_from_int)
@@ -1357,7 +1382,7 @@ ENTRY(_sys_call_table)
1357 .long _sys_newuname 1382 .long _sys_newuname
1358 .long _sys_ni_syscall /* old sys_modify_ldt */ 1383 .long _sys_ni_syscall /* old sys_modify_ldt */
1359 .long _sys_adjtimex 1384 .long _sys_adjtimex
1360 .long _sys_ni_syscall /* 125 */ /* sys_mprotect */ 1385 .long _sys_mprotect /* 125 */
1361 .long _sys_ni_syscall /* old sys_sigprocmask */ 1386 .long _sys_ni_syscall /* old sys_sigprocmask */
1362 .long _sys_ni_syscall /* old "creat_module" */ 1387 .long _sys_ni_syscall /* old "creat_module" */
1363 .long _sys_init_module 1388 .long _sys_init_module
@@ -1376,16 +1401,16 @@ ENTRY(_sys_call_table)
1376 .long _sys_getdents 1401 .long _sys_getdents
1377 .long _sys_ni_syscall /* sys_select */ 1402 .long _sys_ni_syscall /* sys_select */
1378 .long _sys_flock 1403 .long _sys_flock
1379 .long _sys_ni_syscall /* sys_msync */ 1404 .long _sys_msync
1380 .long _sys_readv /* 145 */ 1405 .long _sys_readv /* 145 */
1381 .long _sys_writev 1406 .long _sys_writev
1382 .long _sys_getsid 1407 .long _sys_getsid
1383 .long _sys_fdatasync 1408 .long _sys_fdatasync
1384 .long _sys_sysctl 1409 .long _sys_sysctl
1385 .long _sys_ni_syscall /* 150 */ /* sys_mlock */ 1410 .long _sys_mlock /* 150 */
1386 .long _sys_ni_syscall /* sys_munlock */ 1411 .long _sys_munlock
1387 .long _sys_ni_syscall /* sys_mlockall */ 1412 .long _sys_mlockall
1388 .long _sys_ni_syscall /* sys_munlockall */ 1413 .long _sys_munlockall
1389 .long _sys_sched_setparam 1414 .long _sys_sched_setparam
1390 .long _sys_sched_getparam /* 155 */ 1415 .long _sys_sched_getparam /* 155 */
1391 .long _sys_sched_setscheduler 1416 .long _sys_sched_setscheduler
@@ -1450,8 +1475,8 @@ ENTRY(_sys_call_table)
1450 .long _sys_setfsuid /* 215 */ 1475 .long _sys_setfsuid /* 215 */
1451 .long _sys_setfsgid 1476 .long _sys_setfsgid
1452 .long _sys_pivot_root 1477 .long _sys_pivot_root
1453 .long _sys_ni_syscall /* sys_mincore */ 1478 .long _sys_mincore
1454 .long _sys_ni_syscall /* sys_madvise */ 1479 .long _sys_madvise
1455 .long _sys_getdents64 /* 220 */ 1480 .long _sys_getdents64 /* 220 */
1456 .long _sys_fcntl64 1481 .long _sys_fcntl64
1457 .long _sys_ni_syscall /* reserved for TUX */ 1482 .long _sys_ni_syscall /* reserved for TUX */
@@ -1507,7 +1532,7 @@ ENTRY(_sys_call_table)
1507 .long _sys_utimes 1532 .long _sys_utimes
1508 .long _sys_fadvise64_64 1533 .long _sys_fadvise64_64
1509 .long _sys_ni_syscall /* vserver */ 1534 .long _sys_ni_syscall /* vserver */
1510 .long _sys_ni_syscall /* 275, mbind */ 1535 .long _sys_mbind /* 275 */
1511 .long _sys_ni_syscall /* get_mempolicy */ 1536 .long _sys_ni_syscall /* get_mempolicy */
1512 .long _sys_ni_syscall /* set_mempolicy */ 1537 .long _sys_ni_syscall /* set_mempolicy */
1513 .long _sys_mq_open 1538 .long _sys_mq_open
diff --git a/arch/blackfin/mach-common/head.S b/arch/blackfin/mach-common/head.S
index cab0a0031eee..4391621d9048 100644
--- a/arch/blackfin/mach-common/head.S
+++ b/arch/blackfin/mach-common/head.S
@@ -144,8 +144,8 @@ ENTRY(__start)
144#endif 144#endif
145 145
146 /* Initialize stack pointer */ 146 /* Initialize stack pointer */
147 sp.l = _init_thread_union; 147 sp.l = _init_thread_union + THREAD_SIZE;
148 sp.h = _init_thread_union; 148 sp.h = _init_thread_union + THREAD_SIZE;
149 fp = sp; 149 fp = sp;
150 usp = sp; 150 usp = sp;
151 151
@@ -186,6 +186,11 @@ ENTRY(__start)
186 186
187 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */ 187 /* Put The Code for PLL Programming and SDRAM Programming in L1 ISRAM */
188 call _bfin_relocate_l1_mem; 188 call _bfin_relocate_l1_mem;
189
190#ifdef CONFIG_ROMKERNEL
191 call _bfin_relocate_xip_data;
192#endif
193
189#ifdef CONFIG_BFIN_KERNEL_CLOCK 194#ifdef CONFIG_BFIN_KERNEL_CLOCK
190 /* Only use on-chip scratch space for stack when absolutely required 195 /* Only use on-chip scratch space for stack when absolutely required
191 * to avoid Anomaly 05000227 ... we know the init_clocks() func only 196 * to avoid Anomaly 05000227 ... we know the init_clocks() func only
@@ -257,12 +262,7 @@ ENTRY(_real_start)
257 R0 = R7; 262 R0 = R7;
258 call _cmdline_init; 263 call _cmdline_init;
259 264
260 /* Load the current thread pointer and stack */ 265 sp += -12 + 4; /* +4 is for reti loading above */
261 p1 = THREAD_SIZE + 4 (z); /* +4 is for reti loading */
262 sp = sp + p1;
263 usp = sp;
264 fp = sp;
265 sp += -12;
266 call _init_pda 266 call _init_pda
267 sp += 12; 267 sp += 12;
268 jump.l _start_kernel; 268 jump.l _start_kernel;
diff --git a/arch/blackfin/mach-common/interrupt.S b/arch/blackfin/mach-common/interrupt.S
index 8085ff1cce00..cee62cf4acd4 100644
--- a/arch/blackfin/mach-common/interrupt.S
+++ b/arch/blackfin/mach-common/interrupt.S
@@ -88,6 +88,13 @@ __common_int_entry:
88#else 88#else
89 cli r1; 89 cli r1;
90#endif 90#endif
91#ifdef CONFIG_TRACE_IRQFLAGS
92 [--sp] = r0;
93 sp += -12;
94 call _trace_hardirqs_off;
95 sp += 12;
96 r0 = [sp++];
97#endif
91 [--sp] = RETI; /* orig_pc */ 98 [--sp] = RETI; /* orig_pc */
92 /* Clear all L registers. */ 99 /* Clear all L registers. */
93 r1 = 0 (x); 100 r1 = 0 (x);
@@ -109,10 +116,10 @@ __common_int_entry:
109 cc = r0 == 0; 116 cc = r0 == 0;
110 if cc jump .Lcommon_restore_context; 117 if cc jump .Lcommon_restore_context;
111#else /* CONFIG_IPIPE */ 118#else /* CONFIG_IPIPE */
112 call _do_irq; 119 pseudo_long_call _do_irq, p2;
113 SP += 12; 120 SP += 12;
114#endif /* CONFIG_IPIPE */ 121#endif /* CONFIG_IPIPE */
115 call _return_from_int; 122 pseudo_long_call _return_from_int, p2;
116.Lcommon_restore_context: 123.Lcommon_restore_context:
117 RESTORE_CONTEXT 124 RESTORE_CONTEXT
118 rti; 125 rti;
@@ -168,7 +175,7 @@ ENTRY(_evt_ivhw)
168 175
169 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */ 176 r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
170 SP += -12; 177 SP += -12;
171 call _trap_c; 178 pseudo_long_call _trap_c, p5;
172 SP += 12; 179 SP += 12;
173 180
174#ifdef EBIU_ERRMST 181#ifdef EBIU_ERRMST
@@ -179,7 +186,7 @@ ENTRY(_evt_ivhw)
179 w[p0] = r0.l; 186 w[p0] = r0.l;
180#endif 187#endif
181 188
182 call _ret_from_exception; 189 pseudo_long_call _ret_from_exception, p2;
183 190
184.Lcommon_restore_all_sys: 191.Lcommon_restore_all_sys:
185 RESTORE_ALL_SYS 192 RESTORE_ALL_SYS
@@ -187,12 +194,28 @@ ENTRY(_evt_ivhw)
187ENDPROC(_evt_ivhw) 194ENDPROC(_evt_ivhw)
188 195
189/* Interrupt routine for evt2 (NMI). 196/* Interrupt routine for evt2 (NMI).
190 * We don't actually use this, so just return.
191 * For inner circle type details, please see: 197 * For inner circle type details, please see:
192 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi 198 * http://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:nmi
193 */ 199 */
194ENTRY(_evt_nmi) 200ENTRY(_evt_nmi)
201#ifndef CONFIG_NMI_WATCHDOG
195.weak _evt_nmi 202.weak _evt_nmi
203#else
204 /* Not take account of CPLBs, this handler will not return */
205 SAVE_ALL_SYS
206 r0 = sp;
207 r1 = retn;
208 [sp + PT_PC] = r1;
209 trace_buffer_save(p4,r5);
210
211 ANOMALY_283_315_WORKAROUND(p4, r5)
212
213 SP += -12;
214 call _do_nmi;
215 SP += 12;
2161:
217 jump 1b;
218#endif
196 rtn; 219 rtn;
197ENDPROC(_evt_nmi) 220ENDPROC(_evt_nmi)
198 221
@@ -223,7 +246,7 @@ ENTRY(_evt_system_call)
223#ifdef CONFIG_FRAME_POINTER 246#ifdef CONFIG_FRAME_POINTER
224 fp = 0; 247 fp = 0;
225#endif 248#endif
226 call _system_call; 249 pseudo_long_call _system_call, p2;
227 jump .Lcommon_restore_context; 250 jump .Lcommon_restore_context;
228ENDPROC(_evt_system_call) 251ENDPROC(_evt_system_call)
229 252
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 1873b2c1fede..7ad8878bfa18 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -28,6 +28,7 @@
28#include <asm/dpmc.h> 28#include <asm/dpmc.h>
29#include <asm/bfin5xx_spi.h> 29#include <asm/bfin5xx_spi.h>
30#include <asm/bfin_sport.h> 30#include <asm/bfin_sport.h>
31#include <asm/bfin_can.h>
31 32
32#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1)) 33#define SIC_SYSIRQ(irq) (irq - (IRQ_CORETMR + 1))
33 34
@@ -172,7 +173,12 @@ static void bfin_internal_mask_irq(unsigned int irq)
172 local_irq_restore_hw(flags); 173 local_irq_restore_hw(flags);
173} 174}
174 175
176#ifdef CONFIG_SMP
177static void bfin_internal_unmask_irq_affinity(unsigned int irq,
178 const struct cpumask *affinity)
179#else
175static void bfin_internal_unmask_irq(unsigned int irq) 180static void bfin_internal_unmask_irq(unsigned int irq)
181#endif
176{ 182{
177 unsigned long flags; 183 unsigned long flags;
178 184
@@ -185,16 +191,38 @@ static void bfin_internal_unmask_irq(unsigned int irq)
185 local_irq_save_hw(flags); 191 local_irq_save_hw(flags);
186 mask_bank = SIC_SYSIRQ(irq) / 32; 192 mask_bank = SIC_SYSIRQ(irq) / 32;
187 mask_bit = SIC_SYSIRQ(irq) % 32; 193 mask_bit = SIC_SYSIRQ(irq) % 32;
188 bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) |
189 (1 << mask_bit));
190#ifdef CONFIG_SMP 194#ifdef CONFIG_SMP
191 bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) | 195 if (cpumask_test_cpu(0, affinity))
192 (1 << mask_bit)); 196#endif
197 bfin_write_SIC_IMASK(mask_bank,
198 bfin_read_SIC_IMASK(mask_bank) |
199 (1 << mask_bit));
200#ifdef CONFIG_SMP
201 if (cpumask_test_cpu(1, affinity))
202 bfin_write_SICB_IMASK(mask_bank,
203 bfin_read_SICB_IMASK(mask_bank) |
204 (1 << mask_bit));
193#endif 205#endif
194#endif 206#endif
195 local_irq_restore_hw(flags); 207 local_irq_restore_hw(flags);
196} 208}
197 209
210#ifdef CONFIG_SMP
211static void bfin_internal_unmask_irq(unsigned int irq)
212{
213 struct irq_desc *desc = irq_to_desc(irq);
214 bfin_internal_unmask_irq_affinity(irq, desc->affinity);
215}
216
217static int bfin_internal_set_affinity(unsigned int irq, const struct cpumask *mask)
218{
219 bfin_internal_mask_irq(irq);
220 bfin_internal_unmask_irq_affinity(irq, mask);
221
222 return 0;
223}
224#endif
225
198#ifdef CONFIG_PM 226#ifdef CONFIG_PM
199int bfin_internal_set_wake(unsigned int irq, unsigned int state) 227int bfin_internal_set_wake(unsigned int irq, unsigned int state)
200{ 228{
@@ -224,11 +252,6 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
224 wakeup |= USBWE; 252 wakeup |= USBWE;
225 break; 253 break;
226#endif 254#endif
227#ifdef IRQ_KEY
228 case IRQ_KEY:
229 wakeup |= KPADWE;
230 break;
231#endif
232#ifdef CONFIG_BF54x 255#ifdef CONFIG_BF54x
233 case IRQ_CNT: 256 case IRQ_CNT:
234 wakeup |= ROTWE; 257 wakeup |= ROTWE;
@@ -270,6 +293,9 @@ static struct irq_chip bfin_internal_irqchip = {
270 .mask_ack = bfin_internal_mask_irq, 293 .mask_ack = bfin_internal_mask_irq,
271 .disable = bfin_internal_mask_irq, 294 .disable = bfin_internal_mask_irq,
272 .enable = bfin_internal_unmask_irq, 295 .enable = bfin_internal_unmask_irq,
296#ifdef CONFIG_SMP
297 .set_affinity = bfin_internal_set_affinity,
298#endif
273#ifdef CONFIG_PM 299#ifdef CONFIG_PM
274 .set_wake = bfin_internal_set_wake, 300 .set_wake = bfin_internal_set_wake,
275#endif 301#endif
@@ -294,7 +320,6 @@ static int error_int_mask;
294static void bfin_generic_error_mask_irq(unsigned int irq) 320static void bfin_generic_error_mask_irq(unsigned int irq)
295{ 321{
296 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR)); 322 error_int_mask &= ~(1L << (irq - IRQ_PPI_ERROR));
297
298 if (!error_int_mask) 323 if (!error_int_mask)
299 bfin_internal_mask_irq(IRQ_GENERIC_ERROR); 324 bfin_internal_mask_irq(IRQ_GENERIC_ERROR);
300} 325}
@@ -385,6 +410,127 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
385} 410}
386#endif /* BF537_GENERIC_ERROR_INT_DEMUX */ 411#endif /* BF537_GENERIC_ERROR_INT_DEMUX */
387 412
413#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
414static int mac_stat_int_mask;
415
416static void bfin_mac_status_ack_irq(unsigned int irq)
417{
418 switch (irq) {
419 case IRQ_MAC_MMCINT:
420 bfin_write_EMAC_MMC_TIRQS(
421 bfin_read_EMAC_MMC_TIRQE() &
422 bfin_read_EMAC_MMC_TIRQS());
423 bfin_write_EMAC_MMC_RIRQS(
424 bfin_read_EMAC_MMC_RIRQE() &
425 bfin_read_EMAC_MMC_RIRQS());
426 break;
427 case IRQ_MAC_RXFSINT:
428 bfin_write_EMAC_RX_STKY(
429 bfin_read_EMAC_RX_IRQE() &
430 bfin_read_EMAC_RX_STKY());
431 break;
432 case IRQ_MAC_TXFSINT:
433 bfin_write_EMAC_TX_STKY(
434 bfin_read_EMAC_TX_IRQE() &
435 bfin_read_EMAC_TX_STKY());
436 break;
437 case IRQ_MAC_WAKEDET:
438 bfin_write_EMAC_WKUP_CTL(
439 bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
440 break;
441 default:
442 /* These bits are W1C */
443 bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
444 break;
445 }
446}
447
448static void bfin_mac_status_mask_irq(unsigned int irq)
449{
450 mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
451#ifdef BF537_GENERIC_ERROR_INT_DEMUX
452 switch (irq) {
453 case IRQ_MAC_PHYINT:
454 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
455 break;
456 default:
457 break;
458 }
459#else
460 if (!mac_stat_int_mask)
461 bfin_internal_mask_irq(IRQ_MAC_ERROR);
462#endif
463 bfin_mac_status_ack_irq(irq);
464}
465
466static void bfin_mac_status_unmask_irq(unsigned int irq)
467{
468#ifdef BF537_GENERIC_ERROR_INT_DEMUX
469 switch (irq) {
470 case IRQ_MAC_PHYINT:
471 bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
472 break;
473 default:
474 break;
475 }
476#else
477 if (!mac_stat_int_mask)
478 bfin_internal_unmask_irq(IRQ_MAC_ERROR);
479#endif
480 mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
481}
482
483#ifdef CONFIG_PM
484int bfin_mac_status_set_wake(unsigned int irq, unsigned int state)
485{
486#ifdef BF537_GENERIC_ERROR_INT_DEMUX
487 return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
488#else
489 return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
490#endif
491}
492#endif
493
494static struct irq_chip bfin_mac_status_irqchip = {
495 .name = "MACST",
496 .ack = bfin_ack_noop,
497 .mask_ack = bfin_mac_status_mask_irq,
498 .mask = bfin_mac_status_mask_irq,
499 .unmask = bfin_mac_status_unmask_irq,
500#ifdef CONFIG_PM
501 .set_wake = bfin_mac_status_set_wake,
502#endif
503};
504
505static void bfin_demux_mac_status_irq(unsigned int int_err_irq,
506 struct irq_desc *inta_desc)
507{
508 int i, irq = 0;
509 u32 status = bfin_read_EMAC_SYSTAT();
510
511 for (i = 0; i < (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
512 if (status & (1L << i)) {
513 irq = IRQ_MAC_PHYINT + i;
514 break;
515 }
516
517 if (irq) {
518 if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
519 bfin_handle_irq(irq);
520 } else {
521 bfin_mac_status_ack_irq(irq);
522 pr_debug("IRQ %d:"
523 " MASKED MAC ERROR INTERRUPT ASSERTED\n",
524 irq);
525 }
526 } else
527 printk(KERN_ERR
528 "%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
529 " INTERRUPT ASSERTED BUT NO SOURCE FOUND\n",
530 __func__, __FILE__, __LINE__);
531}
532#endif
533
388static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 534static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
389{ 535{
390#ifdef CONFIG_IPIPE 536#ifdef CONFIG_IPIPE
@@ -1031,7 +1177,6 @@ int __init init_arch_irq(void)
1031#elif defined(CONFIG_BF538) || defined(CONFIG_BF539) 1177#elif defined(CONFIG_BF538) || defined(CONFIG_BF539)
1032 case IRQ_PORTF_INTA: 1178 case IRQ_PORTF_INTA:
1033#endif 1179#endif
1034
1035 set_irq_chained_handler(irq, 1180 set_irq_chained_handler(irq,
1036 bfin_demux_gpio_irq); 1181 bfin_demux_gpio_irq);
1037 break; 1182 break;
@@ -1040,29 +1185,36 @@ int __init init_arch_irq(void)
1040 set_irq_chained_handler(irq, bfin_demux_error_irq); 1185 set_irq_chained_handler(irq, bfin_demux_error_irq);
1041 break; 1186 break;
1042#endif 1187#endif
1043 1188#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1044#ifdef CONFIG_SMP 1189 case IRQ_MAC_ERROR:
1045#ifdef CONFIG_TICKSOURCE_GPTMR0 1190 set_irq_chained_handler(irq, bfin_demux_mac_status_irq);
1046 case IRQ_TIMER0: 1191 break;
1047#endif
1048#ifdef CONFIG_TICKSOURCE_CORETMR
1049 case IRQ_CORETMR:
1050#endif 1192#endif
1193#ifdef CONFIG_SMP
1051 case IRQ_SUPPLE_0: 1194 case IRQ_SUPPLE_0:
1052 case IRQ_SUPPLE_1: 1195 case IRQ_SUPPLE_1:
1053 set_irq_handler(irq, handle_percpu_irq); 1196 set_irq_handler(irq, handle_percpu_irq);
1054 break; 1197 break;
1055#endif 1198#endif
1056 1199
1057#ifdef CONFIG_IPIPE 1200#ifdef CONFIG_TICKSOURCE_CORETMR
1058#ifndef CONFIG_TICKSOURCE_CORETMR 1201 case IRQ_CORETMR:
1059 case IRQ_TIMER0: 1202# ifdef CONFIG_SMP
1203 set_irq_handler(irq, handle_percpu_irq);
1204 break;
1205# else
1060 set_irq_handler(irq, handle_simple_irq); 1206 set_irq_handler(irq, handle_simple_irq);
1061 break; 1207 break;
1208# endif
1062#endif 1209#endif
1063 case IRQ_CORETMR: 1210
1211#ifdef CONFIG_TICKSOURCE_GPTMR0
1212 case IRQ_TIMER0:
1064 set_irq_handler(irq, handle_simple_irq); 1213 set_irq_handler(irq, handle_simple_irq);
1065 break; 1214 break;
1215#endif
1216
1217#ifdef CONFIG_IPIPE
1066 default: 1218 default:
1067 set_irq_handler(irq, handle_level_irq); 1219 set_irq_handler(irq, handle_level_irq);
1068 break; 1220 break;
@@ -1078,14 +1230,22 @@ int __init init_arch_irq(void)
1078 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++) 1230 for (irq = IRQ_PPI_ERROR; irq <= IRQ_UART1_ERROR; irq++)
1079 set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip, 1231 set_irq_chip_and_handler(irq, &bfin_generic_error_irqchip,
1080 handle_level_irq); 1232 handle_level_irq);
1233#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1234 set_irq_chained_handler(IRQ_MAC_ERROR, bfin_demux_mac_status_irq);
1235#endif
1081#endif 1236#endif
1082 1237
1238#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1239 for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1240 set_irq_chip_and_handler(irq, &bfin_mac_status_irqchip,
1241 handle_level_irq);
1242#endif
1083 /* if configured as edge, then will be changed to do_edge_IRQ */ 1243 /* if configured as edge, then will be changed to do_edge_IRQ */
1084 for (irq = GPIO_IRQ_BASE; irq < NR_IRQS; irq++) 1244 for (irq = GPIO_IRQ_BASE;
1245 irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1085 set_irq_chip_and_handler(irq, &bfin_gpio_irqchip, 1246 set_irq_chip_and_handler(irq, &bfin_gpio_irqchip,
1086 handle_level_irq); 1247 handle_level_irq);
1087 1248
1088
1089 bfin_write_IMASK(0); 1249 bfin_write_IMASK(0);
1090 CSYNC(); 1250 CSYNC();
1091 ilat = bfin_read_ILAT(); 1251 ilat = bfin_read_ILAT();
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c
index 369e687582b7..7803f22d2ca7 100644
--- a/arch/blackfin/mach-common/smp.c
+++ b/arch/blackfin/mach-common/smp.c
@@ -122,9 +122,17 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
122 wait = msg->call_struct.wait; 122 wait = msg->call_struct.wait;
123 cpu_clear(cpu, msg->call_struct.pending); 123 cpu_clear(cpu, msg->call_struct.pending);
124 func(info); 124 func(info);
125 if (wait) 125 if (wait) {
126#ifdef __ARCH_SYNC_CORE_DCACHE
127 /*
128 * 'wait' usually means synchronization between CPUs.
129 * Invalidate D cache in case shared data was changed
130 * by func() to ensure cache coherence.
131 */
132 resync_core_dcache();
133#endif
126 cpu_clear(cpu, msg->call_struct.waitmask); 134 cpu_clear(cpu, msg->call_struct.waitmask);
127 else 135 } else
128 kfree(msg); 136 kfree(msg);
129} 137}
130 138
@@ -219,6 +227,13 @@ int smp_call_function(void (*func)(void *info), void *info, int wait)
219 blackfin_dcache_invalidate_range( 227 blackfin_dcache_invalidate_range(
220 (unsigned long)(&msg->call_struct.waitmask), 228 (unsigned long)(&msg->call_struct.waitmask),
221 (unsigned long)(&msg->call_struct.waitmask)); 229 (unsigned long)(&msg->call_struct.waitmask));
230#ifdef __ARCH_SYNC_CORE_DCACHE
231 /*
232 * Invalidate D cache in case shared data was changed by
233 * other processors to ensure cache coherence.
234 */
235 resync_core_dcache();
236#endif
222 kfree(msg); 237 kfree(msg);
223 } 238 }
224 return 0; 239 return 0;
@@ -261,6 +276,13 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
261 blackfin_dcache_invalidate_range( 276 blackfin_dcache_invalidate_range(
262 (unsigned long)(&msg->call_struct.waitmask), 277 (unsigned long)(&msg->call_struct.waitmask),
263 (unsigned long)(&msg->call_struct.waitmask)); 278 (unsigned long)(&msg->call_struct.waitmask));
279#ifdef __ARCH_SYNC_CORE_DCACHE
280 /*
281 * Invalidate D cache in case shared data was changed by
282 * other processors to ensure cache coherence.
283 */
284 resync_core_dcache();
285#endif
264 kfree(msg); 286 kfree(msg);
265 } 287 }
266 return 0; 288 return 0;
@@ -322,8 +344,11 @@ void smp_send_stop(void)
322 344
323int __cpuinit __cpu_up(unsigned int cpu) 345int __cpuinit __cpu_up(unsigned int cpu)
324{ 346{
325 struct task_struct *idle;
326 int ret; 347 int ret;
348 static struct task_struct *idle;
349
350 if (idle)
351 free_task(idle);
327 352
328 idle = fork_idle(cpu); 353 idle = fork_idle(cpu);
329 if (IS_ERR(idle)) { 354 if (IS_ERR(idle)) {
@@ -332,7 +357,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
332 } 357 }
333 358
334 secondary_stack = task_stack_page(idle) + THREAD_SIZE; 359 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
335 smp_wmb();
336 360
337 ret = platform_boot_secondary(cpu, idle); 361 ret = platform_boot_secondary(cpu, idle);
338 362
@@ -343,9 +367,6 @@ int __cpuinit __cpu_up(unsigned int cpu)
343 367
344static void __cpuinit setup_secondary(unsigned int cpu) 368static void __cpuinit setup_secondary(unsigned int cpu)
345{ 369{
346#if !defined(CONFIG_TICKSOURCE_GPTMR0)
347 struct irq_desc *timer_desc;
348#endif
349 unsigned long ilat; 370 unsigned long ilat;
350 371
351 bfin_write_IMASK(0); 372 bfin_write_IMASK(0);
@@ -360,17 +381,6 @@ static void __cpuinit setup_secondary(unsigned int cpu)
360 bfin_irq_flags |= IMASK_IVG15 | 381 bfin_irq_flags |= IMASK_IVG15 |
361 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | 382 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
362 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; 383 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
363
364#if defined(CONFIG_TICKSOURCE_GPTMR0)
365 /* Power down the core timer, just to play safe. */
366 bfin_write_TCNTL(0);
367
368 /* system timer0 has been setup by CoreA. */
369#else
370 timer_desc = irq_desc + IRQ_CORETMR;
371 setup_core_timer();
372 timer_desc->chip->enable(IRQ_CORETMR);
373#endif
374} 384}
375 385
376void __cpuinit secondary_start_kernel(void) 386void __cpuinit secondary_start_kernel(void)
@@ -405,7 +415,6 @@ void __cpuinit secondary_start_kernel(void)
405 atomic_inc(&mm->mm_users); 415 atomic_inc(&mm->mm_users);
406 atomic_inc(&mm->mm_count); 416 atomic_inc(&mm->mm_count);
407 current->active_mm = mm; 417 current->active_mm = mm;
408 BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
409 418
410 preempt_disable(); 419 preempt_disable();
411 420
@@ -413,6 +422,9 @@ void __cpuinit secondary_start_kernel(void)
413 422
414 platform_secondary_init(cpu); 423 platform_secondary_init(cpu);
415 424
425 /* setup local core timer */
426 bfin_local_timer_setup();
427
416 local_irq_enable(); 428 local_irq_enable();
417 429
418 /* 430 /*
@@ -462,25 +474,58 @@ void smp_icache_flush_range_others(unsigned long start, unsigned long end)
462EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); 474EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
463 475
464#ifdef __ARCH_SYNC_CORE_ICACHE 476#ifdef __ARCH_SYNC_CORE_ICACHE
477unsigned long icache_invld_count[NR_CPUS];
465void resync_core_icache(void) 478void resync_core_icache(void)
466{ 479{
467 unsigned int cpu = get_cpu(); 480 unsigned int cpu = get_cpu();
468 blackfin_invalidate_entire_icache(); 481 blackfin_invalidate_entire_icache();
469 ++per_cpu(cpu_data, cpu).icache_invld_count; 482 icache_invld_count[cpu]++;
470 put_cpu(); 483 put_cpu();
471} 484}
472EXPORT_SYMBOL(resync_core_icache); 485EXPORT_SYMBOL(resync_core_icache);
473#endif 486#endif
474 487
475#ifdef __ARCH_SYNC_CORE_DCACHE 488#ifdef __ARCH_SYNC_CORE_DCACHE
489unsigned long dcache_invld_count[NR_CPUS];
476unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); 490unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
477 491
478void resync_core_dcache(void) 492void resync_core_dcache(void)
479{ 493{
480 unsigned int cpu = get_cpu(); 494 unsigned int cpu = get_cpu();
481 blackfin_invalidate_entire_dcache(); 495 blackfin_invalidate_entire_dcache();
482 ++per_cpu(cpu_data, cpu).dcache_invld_count; 496 dcache_invld_count[cpu]++;
483 put_cpu(); 497 put_cpu();
484} 498}
485EXPORT_SYMBOL(resync_core_dcache); 499EXPORT_SYMBOL(resync_core_dcache);
486#endif 500#endif
501
502#ifdef CONFIG_HOTPLUG_CPU
503int __cpuexit __cpu_disable(void)
504{
505 unsigned int cpu = smp_processor_id();
506
507 if (cpu == 0)
508 return -EPERM;
509
510 set_cpu_online(cpu, false);
511 return 0;
512}
513
514static DECLARE_COMPLETION(cpu_killed);
515
516int __cpuexit __cpu_die(unsigned int cpu)
517{
518 return wait_for_completion_timeout(&cpu_killed, 5000);
519}
520
521void cpu_die(void)
522{
523 complete(&cpu_killed);
524
525 atomic_dec(&init_mm.mm_users);
526 atomic_dec(&init_mm.mm_count);
527
528 local_irq_disable();
529 platform_cpu_die();
530}
531#endif