aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGraf Yang <graf.yang@analog.com>2008-11-18 04:48:22 -0500
committerBryan Wu <cooloney@kernel.org>2008-11-18 04:48:22 -0500
commit8f65873e47784a390949f0d61e5692dbf2a8253e (patch)
tree4d9509bf5e52ebac190d79de04b783829d44f49e
parentb8a989893cbdeb6c97a7b5af5f38fb0e480235f9 (diff)
Blackfin arch: SMP supporting patchset: Blackfin kernel and memory management code
Blackfin dual core BF561 processor can support SMP like features. https://docs.blackfin.uclinux.org/doku.php?id=linux-kernel:smp-like In this patch, we provide SMP extend to Blackfin kernel and memory management code Singed-off-by: Graf Yang <graf.yang@analog.com> Signed-off-by: Mike Frysinger <vapier.adi@gmail.com> Signed-off-by: Bryan Wu <cooloney@kernel.org>
-rw-r--r--arch/blackfin/kernel/asm-offsets.c29
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c34
-rw-r--r--arch/blackfin/kernel/entry.S1
-rw-r--r--arch/blackfin/kernel/irqchip.c24
-rw-r--r--arch/blackfin/kernel/kgdb.c4
-rw-r--r--arch/blackfin/kernel/module.c13
-rw-r--r--arch/blackfin/kernel/process.c23
-rw-r--r--arch/blackfin/kernel/ptrace.c8
-rw-r--r--arch/blackfin/kernel/reboot.c24
-rw-r--r--arch/blackfin/kernel/setup.c163
-rw-r--r--arch/blackfin/kernel/time.c114
-rw-r--r--arch/blackfin/kernel/traps.c56
-rw-r--r--arch/blackfin/mm/init.c60
-rw-r--r--arch/blackfin/mm/sram-alloc.c336
14 files changed, 580 insertions, 309 deletions
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c
index 9bb85dd5ccb3..b5df9459d6d5 100644
--- a/arch/blackfin/kernel/asm-offsets.c
+++ b/arch/blackfin/kernel/asm-offsets.c
@@ -56,6 +56,9 @@ int main(void)
56 /* offsets into the thread struct */ 56 /* offsets into the thread struct */
57 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); 57 DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
58 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); 58 DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
59 DEFINE(THREAD_SR, offsetof(struct thread_struct, seqstat));
60 DEFINE(PT_SR, offsetof(struct thread_struct, seqstat));
61 DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
59 DEFINE(THREAD_PC, offsetof(struct thread_struct, pc)); 62 DEFINE(THREAD_PC, offsetof(struct thread_struct, pc));
60 DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE); 63 DEFINE(KERNEL_STACK_SIZE, THREAD_SIZE);
61 64
@@ -128,5 +131,31 @@ int main(void)
128 DEFINE(SIGSEGV, SIGSEGV); 131 DEFINE(SIGSEGV, SIGSEGV);
129 DEFINE(SIGTRAP, SIGTRAP); 132 DEFINE(SIGTRAP, SIGTRAP);
130 133
134 /* PDA management (in L1 scratchpad) */
135 DEFINE(PDA_SYSCFG, offsetof(struct blackfin_pda, syscfg));
136#ifdef CONFIG_SMP
137 DEFINE(PDA_IRQFLAGS, offsetof(struct blackfin_pda, imask));
138#endif
139 DEFINE(PDA_IPDT, offsetof(struct blackfin_pda, ipdt));
140 DEFINE(PDA_IPDT_SWAPCOUNT, offsetof(struct blackfin_pda, ipdt_swapcount));
141 DEFINE(PDA_DPDT, offsetof(struct blackfin_pda, dpdt));
142 DEFINE(PDA_DPDT_SWAPCOUNT, offsetof(struct blackfin_pda, dpdt_swapcount));
143 DEFINE(PDA_EXIPTR, offsetof(struct blackfin_pda, ex_iptr));
144 DEFINE(PDA_EXOPTR, offsetof(struct blackfin_pda, ex_optr));
145 DEFINE(PDA_EXBUF, offsetof(struct blackfin_pda, ex_buf));
146 DEFINE(PDA_EXIMASK, offsetof(struct blackfin_pda, ex_imask));
147 DEFINE(PDA_EXSTACK, offsetof(struct blackfin_pda, ex_stack));
148#ifdef ANOMALY_05000261
149 DEFINE(PDA_LFRETX, offsetof(struct blackfin_pda, last_cplb_fault_retx));
150#endif
151 DEFINE(PDA_DCPLB, offsetof(struct blackfin_pda, dcplb_fault_addr));
152 DEFINE(PDA_ICPLB, offsetof(struct blackfin_pda, icplb_fault_addr));
153 DEFINE(PDA_RETX, offsetof(struct blackfin_pda, retx));
154 DEFINE(PDA_SEQSTAT, offsetof(struct blackfin_pda, seqstat));
155#ifdef CONFIG_SMP
156 /* Inter-core lock (in L2 SRAM) */
157 DEFINE(SIZEOF_CORELOCK, sizeof(struct corelock_slot));
158#endif
159
131 return 0; 160 return 0;
132} 161}
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index b66f1d4c8344..763c31531e9e 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -68,3 +68,37 @@ EXPORT_SYMBOL(insw_8);
68EXPORT_SYMBOL(outsl); 68EXPORT_SYMBOL(outsl);
69EXPORT_SYMBOL(insl); 69EXPORT_SYMBOL(insl);
70EXPORT_SYMBOL(insl_16); 70EXPORT_SYMBOL(insl_16);
71
72#ifdef CONFIG_SMP
73EXPORT_SYMBOL(__raw_atomic_update_asm);
74EXPORT_SYMBOL(__raw_atomic_clear_asm);
75EXPORT_SYMBOL(__raw_atomic_set_asm);
76EXPORT_SYMBOL(__raw_atomic_xor_asm);
77EXPORT_SYMBOL(__raw_atomic_test_asm);
78EXPORT_SYMBOL(__raw_xchg_1_asm);
79EXPORT_SYMBOL(__raw_xchg_2_asm);
80EXPORT_SYMBOL(__raw_xchg_4_asm);
81EXPORT_SYMBOL(__raw_cmpxchg_1_asm);
82EXPORT_SYMBOL(__raw_cmpxchg_2_asm);
83EXPORT_SYMBOL(__raw_cmpxchg_4_asm);
84EXPORT_SYMBOL(__raw_spin_is_locked_asm);
85EXPORT_SYMBOL(__raw_spin_lock_asm);
86EXPORT_SYMBOL(__raw_spin_trylock_asm);
87EXPORT_SYMBOL(__raw_spin_unlock_asm);
88EXPORT_SYMBOL(__raw_read_lock_asm);
89EXPORT_SYMBOL(__raw_read_trylock_asm);
90EXPORT_SYMBOL(__raw_read_unlock_asm);
91EXPORT_SYMBOL(__raw_write_lock_asm);
92EXPORT_SYMBOL(__raw_write_trylock_asm);
93EXPORT_SYMBOL(__raw_write_unlock_asm);
94EXPORT_SYMBOL(__raw_bit_set_asm);
95EXPORT_SYMBOL(__raw_bit_clear_asm);
96EXPORT_SYMBOL(__raw_bit_toggle_asm);
97EXPORT_SYMBOL(__raw_bit_test_asm);
98EXPORT_SYMBOL(__raw_bit_test_set_asm);
99EXPORT_SYMBOL(__raw_bit_test_clear_asm);
100EXPORT_SYMBOL(__raw_bit_test_toggle_asm);
101EXPORT_SYMBOL(__raw_uncached_fetch_asm);
102EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
103EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
104#endif
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index faea88ebb2ef..c0c3fe811228 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -30,6 +30,7 @@
30#include <linux/linkage.h> 30#include <linux/linkage.h>
31#include <asm/thread_info.h> 31#include <asm/thread_info.h>
32#include <asm/errno.h> 32#include <asm/errno.h>
33#include <asm/blackfin.h>
33#include <asm/asm-offsets.h> 34#include <asm/asm-offsets.h>
34 35
35#include <asm/context.S> 36#include <asm/context.S>
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 07402f57c9de..9eebb782fd30 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -36,7 +36,7 @@
36#include <linux/irq.h> 36#include <linux/irq.h>
37#include <asm/trace.h> 37#include <asm/trace.h>
38 38
39static unsigned long irq_err_count; 39static atomic_t irq_err_count;
40static spinlock_t irq_controller_lock; 40static spinlock_t irq_controller_lock;
41 41
42/* 42/*
@@ -48,7 +48,7 @@ void dummy_mask_unmask_irq(unsigned int irq)
48 48
49void ack_bad_irq(unsigned int irq) 49void ack_bad_irq(unsigned int irq)
50{ 50{
51 irq_err_count += 1; 51 atomic_inc(&irq_err_count);
52 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 52 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
53} 53}
54EXPORT_SYMBOL(ack_bad_irq); 54EXPORT_SYMBOL(ack_bad_irq);
@@ -72,7 +72,7 @@ static struct irq_desc bad_irq_desc = {
72 72
73int show_interrupts(struct seq_file *p, void *v) 73int show_interrupts(struct seq_file *p, void *v)
74{ 74{
75 int i = *(loff_t *) v; 75 int i = *(loff_t *) v, j;
76 struct irqaction *action; 76 struct irqaction *action;
77 unsigned long flags; 77 unsigned long flags;
78 78
@@ -80,19 +80,20 @@ int show_interrupts(struct seq_file *p, void *v)
80 spin_lock_irqsave(&irq_desc[i].lock, flags); 80 spin_lock_irqsave(&irq_desc[i].lock, flags);
81 action = irq_desc[i].action; 81 action = irq_desc[i].action;
82 if (!action) 82 if (!action)
83 goto unlock; 83 goto skip;
84 84 seq_printf(p, "%3d: ", i);
85 seq_printf(p, "%3d: %10u ", i, kstat_irqs(i)); 85 for_each_online_cpu(j)
86 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
87 seq_printf(p, " %8s", irq_desc[i].chip->name);
86 seq_printf(p, " %s", action->name); 88 seq_printf(p, " %s", action->name);
87 for (action = action->next; action; action = action->next) 89 for (action = action->next; action; action = action->next)
88 seq_printf(p, ", %s", action->name); 90 seq_printf(p, " %s", action->name);
89 91
90 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
91 unlock: 93 skip:
92 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
93 } else if (i == NR_IRQS) { 95 } else if (i == NR_IRQS)
94 seq_printf(p, "Err: %10lu\n", irq_err_count); 96 seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count));
95 }
96 return 0; 97 return 0;
97} 98}
98 99
@@ -101,7 +102,6 @@ int show_interrupts(struct seq_file *p, void *v)
101 * come via this function. Instead, they should provide their 102 * come via this function. Instead, they should provide their
102 * own 'handler' 103 * own 'handler'
103 */ 104 */
104
105#ifdef CONFIG_DO_IRQ_L1 105#ifdef CONFIG_DO_IRQ_L1
106__attribute__((l1_text)) 106__attribute__((l1_text))
107#endif 107#endif
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index b795a207742c..ab4022131a2a 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -363,12 +363,12 @@ void kgdb_passive_cpu_callback(void *info)
363 363
364void kgdb_roundup_cpus(unsigned long flags) 364void kgdb_roundup_cpus(unsigned long flags)
365{ 365{
366 smp_call_function(kgdb_passive_cpu_callback, NULL, 0, 0); 366 smp_call_function(kgdb_passive_cpu_callback, NULL, 0);
367} 367}
368 368
369void kgdb_roundup_cpu(int cpu, unsigned long flags) 369void kgdb_roundup_cpu(int cpu, unsigned long flags)
370{ 370{
371 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0, 0); 371 smp_call_function_single(cpu, kgdb_passive_cpu_callback, NULL, 0);
372} 372}
373#endif 373#endif
374 374
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index e1bebc80a5bf..2e14cadd4302 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -343,7 +343,13 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
343 pr_debug("location is %x, value is %x type is %d \n", 343 pr_debug("location is %x, value is %x type is %d \n",
344 (unsigned int) location32, value, 344 (unsigned int) location32, value,
345 ELF32_R_TYPE(rel[i].r_info)); 345 ELF32_R_TYPE(rel[i].r_info));
346 346#ifdef CONFIG_SMP
347 if ((unsigned long)location16 >= COREB_L1_DATA_A_START) {
348 printk(KERN_ERR "module %s: cannot relocate in L1: %u (SMP kernel)",
349 mod->name, ELF32_R_TYPE(rel[i].r_info));
350 return -ENOEXEC;
351 }
352#endif
347 switch (ELF32_R_TYPE(rel[i].r_info)) { 353 switch (ELF32_R_TYPE(rel[i].r_info)) {
348 354
349 case R_pcrel24: 355 case R_pcrel24:
@@ -436,6 +442,7 @@ module_finalize(const Elf_Ehdr * hdr,
436{ 442{
437 unsigned int i, strindex = 0, symindex = 0; 443 unsigned int i, strindex = 0, symindex = 0;
438 char *secstrings; 444 char *secstrings;
445 long err = 0;
439 446
440 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; 447 secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
441 448
@@ -460,8 +467,10 @@ module_finalize(const Elf_Ehdr * hdr,
460 (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) || 467 (strcmp(".rela.l1.text", secstrings + sechdrs[i].sh_name) == 0) ||
461 ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) && 468 ((strcmp(".rela.text", secstrings + sechdrs[i].sh_name) == 0) &&
462 (hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) { 469 (hdr->e_flags & (EF_BFIN_CODE_IN_L1|EF_BFIN_CODE_IN_L2))))) {
463 apply_relocate_add((Elf_Shdr *) sechdrs, strtab, 470 err = apply_relocate_add((Elf_Shdr *) sechdrs, strtab,
464 symindex, i, mod); 471 symindex, i, mod);
472 if (err < 0)
473 return -ENOEXEC;
465 } 474 }
466 } 475 }
467 return 0; 476 return 0;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 326e3019cd23..4359ea253010 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -171,6 +171,13 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
171 unsigned long clone_flags; 171 unsigned long clone_flags;
172 unsigned long newsp; 172 unsigned long newsp;
173 173
174#ifdef __ARCH_SYNC_CORE_DCACHE
175 if (current->rt.nr_cpus_allowed == num_possible_cpus()) {
176 current->cpus_allowed = cpumask_of_cpu(smp_processor_id());
177 current->rt.nr_cpus_allowed = 1;
178 }
179#endif
180
174 /* syscall2 puts clone_flags in r0 and usp in r1 */ 181 /* syscall2 puts clone_flags in r0 and usp in r1 */
175 clone_flags = regs->r0; 182 clone_flags = regs->r0;
176 newsp = regs->r1; 183 newsp = regs->r1;
@@ -338,22 +345,22 @@ int _access_ok(unsigned long addr, unsigned long size)
338 if (addr >= (unsigned long)__init_begin && 345 if (addr >= (unsigned long)__init_begin &&
339 addr + size <= (unsigned long)__init_end) 346 addr + size <= (unsigned long)__init_end)
340 return 1; 347 return 1;
341 if (addr >= L1_SCRATCH_START 348 if (addr >= get_l1_scratch_start()
342 && addr + size <= L1_SCRATCH_START + L1_SCRATCH_LENGTH) 349 && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
343 return 1; 350 return 1;
344#if L1_CODE_LENGTH != 0 351#if L1_CODE_LENGTH != 0
345 if (addr >= L1_CODE_START + (_etext_l1 - _stext_l1) 352 if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1)
346 && addr + size <= L1_CODE_START + L1_CODE_LENGTH) 353 && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
347 return 1; 354 return 1;
348#endif 355#endif
349#if L1_DATA_A_LENGTH != 0 356#if L1_DATA_A_LENGTH != 0
350 if (addr >= L1_DATA_A_START + (_ebss_l1 - _sdata_l1) 357 if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
351 && addr + size <= L1_DATA_A_START + L1_DATA_A_LENGTH) 358 && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
352 return 1; 359 return 1;
353#endif 360#endif
354#if L1_DATA_B_LENGTH != 0 361#if L1_DATA_B_LENGTH != 0
355 if (addr >= L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1) 362 if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
356 && addr + size <= L1_DATA_B_START + L1_DATA_B_LENGTH) 363 && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
357 return 1; 364 return 1;
358#endif 365#endif
359#if L2_LENGTH != 0 366#if L2_LENGTH != 0
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 140bf00e9974..4de44f387dd5 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -220,8 +220,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
220 break; 220 break;
221 pr_debug("ptrace: user address is valid\n"); 221 pr_debug("ptrace: user address is valid\n");
222 222
223 if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START 223 if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
224 && addr + sizeof(tmp) <= L1_CODE_START + L1_CODE_LENGTH) { 224 && addr + sizeof(tmp) <= get_l1_code_start() + L1_CODE_LENGTH) {
225 safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp)); 225 safe_dma_memcpy (&tmp, (const void *)(addr), sizeof(tmp));
226 copied = sizeof(tmp); 226 copied = sizeof(tmp);
227 227
@@ -300,8 +300,8 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
300 break; 300 break;
301 pr_debug("ptrace: user address is valid\n"); 301 pr_debug("ptrace: user address is valid\n");
302 302
303 if (L1_CODE_LENGTH != 0 && addr >= L1_CODE_START 303 if (L1_CODE_LENGTH != 0 && addr >= get_l1_code_start()
304 && addr + sizeof(data) <= L1_CODE_START + L1_CODE_LENGTH) { 304 && addr + sizeof(data) <= get_l1_code_start() + L1_CODE_LENGTH) {
305 safe_dma_memcpy ((void *)(addr), &data, sizeof(data)); 305 safe_dma_memcpy ((void *)(addr), &data, sizeof(data));
306 copied = sizeof(data); 306 copied = sizeof(data);
307 307
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c
index ae97ca407b0d..eeee8cb43360 100644
--- a/arch/blackfin/kernel/reboot.c
+++ b/arch/blackfin/kernel/reboot.c
@@ -21,7 +21,7 @@
21 * the core reset. 21 * the core reset.
22 */ 22 */
23__attribute__((l1_text)) 23__attribute__((l1_text))
24static void bfin_reset(void) 24static void _bfin_reset(void)
25{ 25{
26 /* Wait for completion of "system" events such as cache line 26 /* Wait for completion of "system" events such as cache line
27 * line fills so that we avoid infinite stalls later on as 27 * line fills so that we avoid infinite stalls later on as
@@ -66,6 +66,18 @@ static void bfin_reset(void)
66 } 66 }
67} 67}
68 68
69static void bfin_reset(void)
70{
71 if (ANOMALY_05000353 || ANOMALY_05000386)
72 _bfin_reset();
73 else
74 /* the bootrom checks to see how it was reset and will
75 * automatically perform a software reset for us when
76 * it starts executing boot
77 */
78 asm("raise 1;");
79}
80
69__attribute__((weak)) 81__attribute__((weak))
70void native_machine_restart(char *cmd) 82void native_machine_restart(char *cmd)
71{ 83{
@@ -75,14 +87,10 @@ void machine_restart(char *cmd)
75{ 87{
76 native_machine_restart(cmd); 88 native_machine_restart(cmd);
77 local_irq_disable(); 89 local_irq_disable();
78 if (ANOMALY_05000353 || ANOMALY_05000386) 90 if (smp_processor_id())
79 bfin_reset(); 91 smp_call_function((void *)bfin_reset, 0, 1);
80 else 92 else
81 /* the bootrom checks to see how it was reset and will 93 bfin_reset();
82 * automatically perform a software reset for us when
83 * it starts executing boot
84 */
85 asm("raise 1;");
86} 94}
87 95
88__attribute__((weak)) 96__attribute__((weak))
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 71a9a8c53cea..c644d234a02e 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -26,11 +26,10 @@
26#include <asm/blackfin.h> 26#include <asm/blackfin.h>
27#include <asm/cplbinit.h> 27#include <asm/cplbinit.h>
28#include <asm/div64.h> 28#include <asm/div64.h>
29#include <asm/cpu.h>
29#include <asm/fixed_code.h> 30#include <asm/fixed_code.h>
30#include <asm/early_printk.h> 31#include <asm/early_printk.h>
31 32
32static DEFINE_PER_CPU(struct cpu, cpu_devices);
33
34u16 _bfin_swrst; 33u16 _bfin_swrst;
35EXPORT_SYMBOL(_bfin_swrst); 34EXPORT_SYMBOL(_bfin_swrst);
36 35
@@ -79,29 +78,76 @@ static struct change_member *change_point[2*BFIN_MEMMAP_MAX] __initdata;
79static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata; 78static struct bfin_memmap_entry *overlap_list[BFIN_MEMMAP_MAX] __initdata;
80static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata; 79static struct bfin_memmap_entry new_map[BFIN_MEMMAP_MAX] __initdata;
81 80
82void __init bfin_cache_init(void) 81DEFINE_PER_CPU(struct blackfin_cpudata, cpu_data);
83{ 82
84#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE) 83#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
85 generate_cplb_tables(); 84void __init generate_cplb_tables(void)
85{
86 unsigned int cpu;
87
88 /* Generate per-CPU I&D CPLB tables */
89 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
90 generate_cplb_tables_cpu(cpu);
91}
86#endif 92#endif
87 93
94void __cpuinit bfin_setup_caches(unsigned int cpu)
95{
88#ifdef CONFIG_BFIN_ICACHE 96#ifdef CONFIG_BFIN_ICACHE
89 bfin_icache_init(); 97#ifdef CONFIG_MPU
90 printk(KERN_INFO "Instruction Cache Enabled\n"); 98 bfin_icache_init(icplb_tbl[cpu]);
99#else
100 bfin_icache_init(icplb_tables[cpu]);
101#endif
91#endif 102#endif
92 103
93#ifdef CONFIG_BFIN_DCACHE 104#ifdef CONFIG_BFIN_DCACHE
94 bfin_dcache_init(); 105#ifdef CONFIG_MPU
95 printk(KERN_INFO "Data Cache Enabled" 106 bfin_dcache_init(dcplb_tbl[cpu]);
107#else
108 bfin_dcache_init(dcplb_tables[cpu]);
109#endif
110#endif
111
112 /*
113 * In cache coherence emulation mode, we need to have the
114 * D-cache enabled before running any atomic operation which
115 * might invove cache invalidation (i.e. spinlock, rwlock).
116 * So printk's are deferred until then.
117 */
118#ifdef CONFIG_BFIN_ICACHE
119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
120#endif
121#ifdef CONFIG_BFIN_DCACHE
122 printk(KERN_INFO "Data Cache Enabled for CPU%u"
96# if defined CONFIG_BFIN_WB 123# if defined CONFIG_BFIN_WB
97 " (write-back)" 124 " (write-back)"
98# elif defined CONFIG_BFIN_WT 125# elif defined CONFIG_BFIN_WT
99 " (write-through)" 126 " (write-through)"
100# endif 127# endif
101 "\n"); 128 "\n", cpu);
102#endif 129#endif
103} 130}
104 131
132void __cpuinit bfin_setup_cpudata(unsigned int cpu)
133{
134 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, cpu);
135
136 cpudata->idle = current;
137 cpudata->loops_per_jiffy = loops_per_jiffy;
138 cpudata->cclk = get_cclk();
139 cpudata->imemctl = bfin_read_IMEM_CONTROL();
140 cpudata->dmemctl = bfin_read_DMEM_CONTROL();
141}
142
143void __init bfin_cache_init(void)
144{
145#if defined(CONFIG_BFIN_DCACHE) || defined(CONFIG_BFIN_ICACHE)
146 generate_cplb_tables();
147#endif
148 bfin_setup_caches(0);
149}
150
105void __init bfin_relocate_l1_mem(void) 151void __init bfin_relocate_l1_mem(void)
106{ 152{
107 unsigned long l1_code_length; 153 unsigned long l1_code_length;
@@ -230,7 +276,7 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
230 /* record all known change-points (starting and ending addresses), 276 /* record all known change-points (starting and ending addresses),
231 omitting those that are for empty memory regions */ 277 omitting those that are for empty memory regions */
232 chgidx = 0; 278 chgidx = 0;
233 for (i = 0; i < old_nr; i++) { 279 for (i = 0; i < old_nr; i++) {
234 if (map[i].size != 0) { 280 if (map[i].size != 0) {
235 change_point[chgidx]->addr = map[i].addr; 281 change_point[chgidx]->addr = map[i].addr;
236 change_point[chgidx++]->pentry = &map[i]; 282 change_point[chgidx++]->pentry = &map[i];
@@ -238,13 +284,13 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
238 change_point[chgidx++]->pentry = &map[i]; 284 change_point[chgidx++]->pentry = &map[i];
239 } 285 }
240 } 286 }
241 chg_nr = chgidx; /* true number of change-points */ 287 chg_nr = chgidx; /* true number of change-points */
242 288
243 /* sort change-point list by memory addresses (low -> high) */ 289 /* sort change-point list by memory addresses (low -> high) */
244 still_changing = 1; 290 still_changing = 1;
245 while (still_changing) { 291 while (still_changing) {
246 still_changing = 0; 292 still_changing = 0;
247 for (i = 1; i < chg_nr; i++) { 293 for (i = 1; i < chg_nr; i++) {
248 /* if <current_addr> > <last_addr>, swap */ 294 /* if <current_addr> > <last_addr>, swap */
249 /* or, if current=<start_addr> & last=<end_addr>, swap */ 295 /* or, if current=<start_addr> & last=<end_addr>, swap */
250 if ((change_point[i]->addr < change_point[i-1]->addr) || 296 if ((change_point[i]->addr < change_point[i-1]->addr) ||
@@ -261,10 +307,10 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
261 } 307 }
262 308
263 /* create a new memmap, removing overlaps */ 309 /* create a new memmap, removing overlaps */
264 overlap_entries = 0; /* number of entries in the overlap table */ 310 overlap_entries = 0; /* number of entries in the overlap table */
265 new_entry = 0; /* index for creating new memmap entries */ 311 new_entry = 0; /* index for creating new memmap entries */
266 last_type = 0; /* start with undefined memory type */ 312 last_type = 0; /* start with undefined memory type */
267 last_addr = 0; /* start with 0 as last starting address */ 313 last_addr = 0; /* start with 0 as last starting address */
268 /* loop through change-points, determining affect on the new memmap */ 314 /* loop through change-points, determining affect on the new memmap */
269 for (chgidx = 0; chgidx < chg_nr; chgidx++) { 315 for (chgidx = 0; chgidx < chg_nr; chgidx++) {
270 /* keep track of all overlapping memmap entries */ 316 /* keep track of all overlapping memmap entries */
@@ -286,14 +332,14 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
286 if (overlap_list[i]->type > current_type) 332 if (overlap_list[i]->type > current_type)
287 current_type = overlap_list[i]->type; 333 current_type = overlap_list[i]->type;
288 /* continue building up new memmap based on this information */ 334 /* continue building up new memmap based on this information */
289 if (current_type != last_type) { 335 if (current_type != last_type) {
290 if (last_type != 0) { 336 if (last_type != 0) {
291 new_map[new_entry].size = 337 new_map[new_entry].size =
292 change_point[chgidx]->addr - last_addr; 338 change_point[chgidx]->addr - last_addr;
293 /* move forward only if the new size was non-zero */ 339 /* move forward only if the new size was non-zero */
294 if (new_map[new_entry].size != 0) 340 if (new_map[new_entry].size != 0)
295 if (++new_entry >= BFIN_MEMMAP_MAX) 341 if (++new_entry >= BFIN_MEMMAP_MAX)
296 break; /* no more space left for new entries */ 342 break; /* no more space left for new entries */
297 } 343 }
298 if (current_type != 0) { 344 if (current_type != 0) {
299 new_map[new_entry].addr = change_point[chgidx]->addr; 345 new_map[new_entry].addr = change_point[chgidx]->addr;
@@ -303,9 +349,9 @@ static int __init sanitize_memmap(struct bfin_memmap_entry *map, int *pnr_map)
303 last_type = current_type; 349 last_type = current_type;
304 } 350 }
305 } 351 }
306 new_nr = new_entry; /* retain count for new entries */ 352 new_nr = new_entry; /* retain count for new entries */
307 353
308 /* copy new mapping into original location */ 354 /* copy new mapping into original location */
309 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry)); 355 memcpy(map, new_map, new_nr*sizeof(struct bfin_memmap_entry));
310 *pnr_map = new_nr; 356 *pnr_map = new_nr;
311 357
@@ -361,7 +407,6 @@ static __init int parse_memmap(char *arg)
361 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region 407 * - "memmap=XXX[KkmM][@][$]XXX[KkmM]" defines a memory region
362 * @ from <start> to <start>+<mem>, type RAM 408 * @ from <start> to <start>+<mem>, type RAM
363 * $ from <start> to <start>+<mem>, type RESERVED 409 * $ from <start> to <start>+<mem>, type RESERVED
364 *
365 */ 410 */
366static __init void parse_cmdline_early(char *cmdline_p) 411static __init void parse_cmdline_early(char *cmdline_p)
367{ 412{
@@ -383,12 +428,10 @@ static __init void parse_cmdline_early(char *cmdline_p)
383 if (*to != ' ') { 428 if (*to != ' ') {
384 if (*to == '$' 429 if (*to == '$'
385 || *(to + 1) == '$') 430 || *(to + 1) == '$')
386 reserved_mem_dcache_on = 431 reserved_mem_dcache_on = 1;
387 1;
388 if (*to == '#' 432 if (*to == '#'
389 || *(to + 1) == '#') 433 || *(to + 1) == '#')
390 reserved_mem_icache_on = 434 reserved_mem_icache_on = 1;
391 1;
392 } 435 }
393 } 436 }
394 } else if (!memcmp(to, "earlyprintk=", 12)) { 437 } else if (!memcmp(to, "earlyprintk=", 12)) {
@@ -417,9 +460,8 @@ static __init void parse_cmdline_early(char *cmdline_p)
417 * [_ramend - DMA_UNCACHED_REGION, 460 * [_ramend - DMA_UNCACHED_REGION,
418 * _ramend]: uncached DMA region 461 * _ramend]: uncached DMA region
419 * [_ramend, physical_mem_end]: memory not managed by kernel 462 * [_ramend, physical_mem_end]: memory not managed by kernel
420 *
421 */ 463 */
422static __init void memory_setup(void) 464static __init void memory_setup(void)
423{ 465{
424#ifdef CONFIG_MTD_UCLINUX 466#ifdef CONFIG_MTD_UCLINUX
425 unsigned long mtd_phys = 0; 467 unsigned long mtd_phys = 0;
@@ -436,7 +478,7 @@ static __init void memory_setup(void)
436 memory_end = _ramend - DMA_UNCACHED_REGION; 478 memory_end = _ramend - DMA_UNCACHED_REGION;
437 479
438#ifdef CONFIG_MPU 480#ifdef CONFIG_MPU
439 /* Round up to multiple of 4MB. */ 481 /* Round up to multiple of 4MB */
440 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff; 482 memory_start = (_ramstart + 0x3fffff) & ~0x3fffff;
441#else 483#else
442 memory_start = PAGE_ALIGN(_ramstart); 484 memory_start = PAGE_ALIGN(_ramstart);
@@ -616,7 +658,7 @@ static __init void setup_bootmem_allocator(void)
616 end_pfn = memory_end >> PAGE_SHIFT; 658 end_pfn = memory_end >> PAGE_SHIFT;
617 659
618 /* 660 /*
619 * give all the memory to the bootmap allocator, tell it to put the 661 * give all the memory to the bootmap allocator, tell it to put the
620 * boot mem_map at the start of memory. 662 * boot mem_map at the start of memory.
621 */ 663 */
622 bootmap_size = init_bootmem_node(NODE_DATA(0), 664 bootmap_size = init_bootmem_node(NODE_DATA(0),
@@ -791,7 +833,11 @@ void __init setup_arch(char **cmdline_p)
791 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT); 833 bfin_write_SWRST(_bfin_swrst | DOUBLE_FAULT);
792#endif 834#endif
793 835
836#ifdef CONFIG_SMP
837 if (_bfin_swrst & SWRST_DBL_FAULT_A) {
838#else
794 if (_bfin_swrst & RESET_DOUBLE) { 839 if (_bfin_swrst & RESET_DOUBLE) {
840#endif
795 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n"); 841 printk(KERN_EMERG "Recovering from DOUBLE FAULT event\n");
796#ifdef CONFIG_DEBUG_DOUBLEFAULT 842#ifdef CONFIG_DEBUG_DOUBLEFAULT
797 /* We assume the crashing kernel, and the current symbol table match */ 843 /* We assume the crashing kernel, and the current symbol table match */
@@ -835,7 +881,7 @@ void __init setup_arch(char **cmdline_p)
835 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 881 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
836 882
837 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 883 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
838 cclk / 1000000, sclk / 1000000); 884 cclk / 1000000, sclk / 1000000);
839 885
840 if (ANOMALY_05000273 && (cclk >> 1) <= sclk) 886 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
841 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n"); 887 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
@@ -867,18 +913,21 @@ void __init setup_arch(char **cmdline_p)
867 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start 913 BUG_ON((char *)&safe_user_instruction - (char *)&fixed_code_start
868 != SAFE_USER_INSTRUCTION - FIXED_CODE_START); 914 != SAFE_USER_INSTRUCTION - FIXED_CODE_START);
869 915
916#ifdef CONFIG_SMP
917 platform_init_cpus();
918#endif
870 init_exception_vectors(); 919 init_exception_vectors();
871 bfin_cache_init(); 920 bfin_cache_init(); /* Initialize caches for the boot CPU */
872} 921}
873 922
874static int __init topology_init(void) 923static int __init topology_init(void)
875{ 924{
876 int cpu; 925 unsigned int cpu;
926 /* Record CPU-private information for the boot processor. */
927 bfin_setup_cpudata(0);
877 928
878 for_each_possible_cpu(cpu) { 929 for_each_possible_cpu(cpu) {
879 struct cpu *c = &per_cpu(cpu_devices, cpu); 930 register_cpu(&per_cpu(cpu_data, cpu).cpu, cpu);
880
881 register_cpu(c, cpu);
882 } 931 }
883 932
884 return 0; 933 return 0;
@@ -983,15 +1032,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
983 char *cpu, *mmu, *fpu, *vendor, *cache; 1032 char *cpu, *mmu, *fpu, *vendor, *cache;
984 uint32_t revid; 1033 uint32_t revid;
985 1034
986 u_long cclk = 0, sclk = 0; 1035 u_long sclk = 0;
987 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0; 1036 u_int icache_size = BFIN_ICACHESIZE / 1024, dcache_size = 0, dsup_banks = 0;
1037 struct blackfin_cpudata *cpudata = &per_cpu(cpu_data, *(unsigned int *)v);
988 1038
989 cpu = CPU; 1039 cpu = CPU;
990 mmu = "none"; 1040 mmu = "none";
991 fpu = "none"; 1041 fpu = "none";
992 revid = bfin_revid(); 1042 revid = bfin_revid();
993 1043
994 cclk = get_cclk();
995 sclk = get_sclk(); 1044 sclk = get_sclk();
996 1045
997 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) { 1046 switch (bfin_read_CHIPID() & CHIPID_MANUFACTURE) {
@@ -1003,10 +1052,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1003 break; 1052 break;
1004 } 1053 }
1005 1054
1006 seq_printf(m, "processor\t: %d\n" 1055 seq_printf(m, "processor\t: %d\n" "vendor_id\t: %s\n",
1007 "vendor_id\t: %s\n", 1056 *(unsigned int *)v, vendor);
1008 *(unsigned int *)v,
1009 vendor);
1010 1057
1011 if (CPUID == bfin_cpuid()) 1058 if (CPUID == bfin_cpuid())
1012 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID); 1059 seq_printf(m, "cpu family\t: 0x%04x\n", CPUID);
@@ -1016,7 +1063,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1016 1063
1017 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1064 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1018 "stepping\t: %d\n", 1065 "stepping\t: %d\n",
1019 cpu, cclk/1000000, sclk/1000000, 1066 cpu, cpudata->cclk/1000000, sclk/1000000,
1020#ifdef CONFIG_MPU 1067#ifdef CONFIG_MPU
1021 "mpu on", 1068 "mpu on",
1022#else 1069#else
@@ -1025,16 +1072,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1025 revid); 1072 revid);
1026 1073
1027 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1074 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1028 cclk/1000000, cclk%1000000, 1075 cpudata->cclk/1000000, cpudata->cclk%1000000,
1029 sclk/1000000, sclk%1000000); 1076 sclk/1000000, sclk%1000000);
1030 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1077 seq_printf(m, "bogomips\t: %lu.%02lu\n"
1031 "Calibration\t: %lu loops\n", 1078 "Calibration\t: %lu loops\n",
1032 (loops_per_jiffy * HZ) / 500000, 1079 (cpudata->loops_per_jiffy * HZ) / 500000,
1033 ((loops_per_jiffy * HZ) / 5000) % 100, 1080 ((cpudata->loops_per_jiffy * HZ) / 5000) % 100,
1034 (loops_per_jiffy * HZ)); 1081 (cpudata->loops_per_jiffy * HZ));
1035 1082
1036 /* Check Cache configutation */ 1083 /* Check Cache configutation */
1037 switch (bfin_read_DMEM_CONTROL() & (1 << DMC0_P | 1 << DMC1_P)) { 1084 switch (cpudata->dmemctl & (1 << DMC0_P | 1 << DMC1_P)) {
1038 case ACACHE_BSRAM: 1085 case ACACHE_BSRAM:
1039 cache = "dbank-A/B\t: cache/sram"; 1086 cache = "dbank-A/B\t: cache/sram";
1040 dcache_size = 16; 1087 dcache_size = 16;
@@ -1058,10 +1105,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1058 } 1105 }
1059 1106
1060 /* Is it turned on? */ 1107 /* Is it turned on? */
1061 if ((bfin_read_DMEM_CONTROL() & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE)) 1108 if ((cpudata->dmemctl & (ENDCPLB | DMC_ENABLE)) != (ENDCPLB | DMC_ENABLE))
1062 dcache_size = 0; 1109 dcache_size = 0;
1063 1110
1064 if ((bfin_read_IMEM_CONTROL() & (IMC | ENICPLB)) != (IMC | ENICPLB)) 1111 if ((cpudata->imemctl & (IMC | ENICPLB)) != (IMC | ENICPLB))
1065 icache_size = 0; 1112 icache_size = 0;
1066 1113
1067 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1114 seq_printf(m, "cache size\t: %d KB(L1 icache) "
@@ -1086,8 +1133,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1086 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n", 1133 "dcache setup\t: %d Super-banks/%d Sub-banks/%d Ways, %d Lines/Way\n",
1087 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1134 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1088 BFIN_DLINES); 1135 BFIN_DLINES);
1136#ifdef __ARCH_SYNC_CORE_DCACHE
1137 seq_printf(m,
1138 "SMP Dcache Flushes\t: %lu\n\n",
1139 per_cpu(cpu_data, *(unsigned int *)v).dcache_invld_count);
1140#endif
1089#ifdef CONFIG_BFIN_ICACHE_LOCK 1141#ifdef CONFIG_BFIN_ICACHE_LOCK
1090 switch ((bfin_read_IMEM_CONTROL() >> 3) & WAYALL_L) { 1142 switch ((cpudata->imemctl >> 3) & WAYALL_L) {
1091 case WAY0_L: 1143 case WAY0_L:
1092 seq_printf(m, "Way0 Locked-Down\n"); 1144 seq_printf(m, "Way0 Locked-Down\n");
1093 break; 1145 break;
@@ -1137,6 +1189,12 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1137 seq_printf(m, "No Ways are locked\n"); 1189 seq_printf(m, "No Ways are locked\n");
1138 } 1190 }
1139#endif 1191#endif
1192 if (*(unsigned int *)v != NR_CPUS-1)
1193 return 0;
1194
1195#if L2_LENGTH
1196 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1197#endif
1140 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1198 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1141 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1199 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1142 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1200 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
@@ -1144,6 +1202,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1144 ((int)memory_end - (int)_stext) >> 10, 1202 ((int)memory_end - (int)_stext) >> 10,
1145 _stext, 1203 _stext,
1146 (void *)memory_end); 1204 (void *)memory_end);
1205 seq_printf(m, "\n");
1147 1206
1148 return 0; 1207 return 0;
1149} 1208}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index eb2352320454..06de2ce67a9e 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -34,9 +34,11 @@
34#include <linux/interrupt.h> 34#include <linux/interrupt.h>
35#include <linux/time.h> 35#include <linux/time.h>
36#include <linux/irq.h> 36#include <linux/irq.h>
37#include <linux/delay.h>
37 38
38#include <asm/blackfin.h> 39#include <asm/blackfin.h>
39#include <asm/time.h> 40#include <asm/time.h>
41#include <asm/gptimers.h>
40 42
41/* This is an NTP setting */ 43/* This is an NTP setting */
42#define TICK_SIZE (tick_nsec / 1000) 44#define TICK_SIZE (tick_nsec / 1000)
@@ -46,11 +48,14 @@ static unsigned long gettimeoffset(void);
46 48
47static struct irqaction bfin_timer_irq = { 49static struct irqaction bfin_timer_irq = {
48 .name = "BFIN Timer Tick", 50 .name = "BFIN Timer Tick",
51#ifdef CONFIG_IRQ_PER_CPU
52 .flags = IRQF_DISABLED | IRQF_PERCPU,
53#else
49 .flags = IRQF_DISABLED 54 .flags = IRQF_DISABLED
55#endif
50}; 56};
51 57
52static void 58void setup_core_timer(void)
53time_sched_init(irq_handler_t timer_routine)
54{ 59{
55 u32 tcount; 60 u32 tcount;
56 61
@@ -71,12 +76,41 @@ time_sched_init(irq_handler_t timer_routine)
71 CSYNC(); 76 CSYNC();
72 77
73 bfin_write_TCNTL(7); 78 bfin_write_TCNTL(7);
79}
80
81#ifdef CONFIG_TICK_SOURCE_SYSTMR0
82void setup_system_timer0(void)
83{
84 /* Power down the core timer, just to play safe. */
85 bfin_write_TCNTL(0);
86
87 disable_gptimers(TIMER0bit);
88 set_gptimer_status(0, TIMER_STATUS_TRUN0);
89 while (get_gptimer_status(0) & TIMER_STATUS_TRUN0)
90 udelay(10);
91
92 set_gptimer_config(0, 0x59); /* IRQ enable, periodic, PWM_OUT, SCLKed, OUT PAD disabled */
93 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
94 set_gptimer_pwidth(TIMER0_id, 1);
95 SSYNC();
96 enable_gptimers(TIMER0bit);
97}
98#endif
74 99
100static void
101time_sched_init(irqreturn_t(*timer_routine) (int, void *))
102{
103#ifdef CONFIG_TICK_SOURCE_SYSTMR0
104 setup_system_timer0();
105#else
106 setup_core_timer();
107#endif
75 bfin_timer_irq.handler = (irq_handler_t)timer_routine; 108 bfin_timer_irq.handler = (irq_handler_t)timer_routine;
76 /* call setup_irq instead of request_irq because request_irq calls 109#ifdef CONFIG_TICK_SOURCE_SYSTMR0
77 * kmalloc which has not been initialized yet 110 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
78 */ 111#else
79 setup_irq(IRQ_CORETMR, &bfin_timer_irq); 112 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
113#endif
80} 114}
81 115
82/* 116/*
@@ -87,17 +121,23 @@ static unsigned long gettimeoffset(void)
87 unsigned long offset; 121 unsigned long offset;
88 unsigned long clocks_per_jiffy; 122 unsigned long clocks_per_jiffy;
89 123
124#ifdef CONFIG_TICK_SOURCE_SYSTMR0
125 clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
126 offset = bfin_read_TIMER0_COUNTER() / \
127 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
128
129 if ((get_gptimer_status(0) & TIMER_STATUS_TIMIL0) && offset < (100000 / HZ / 2))
130 offset += (USEC_PER_SEC / HZ);
131#else
90 clocks_per_jiffy = bfin_read_TPERIOD(); 132 clocks_per_jiffy = bfin_read_TPERIOD();
91 offset = 133 offset = (clocks_per_jiffy - bfin_read_TCOUNT()) / \
92 (clocks_per_jiffy - 134 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
93 bfin_read_TCOUNT()) / (((clocks_per_jiffy + 1) * HZ) /
94 USEC_PER_SEC);
95 135
96 /* Check if we just wrapped the counters and maybe missed a tick */ 136 /* Check if we just wrapped the counters and maybe missed a tick */
97 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR)) 137 if ((bfin_read_ILAT() & (1 << IRQ_CORETMR))
98 && (offset < (100000 / HZ / 2))) 138 && (offset < (100000 / HZ / 2)))
99 offset += (USEC_PER_SEC / HZ); 139 offset += (USEC_PER_SEC / HZ);
100 140#endif
101 return offset; 141 return offset;
102} 142}
103 143
@@ -120,34 +160,38 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
120 static long last_rtc_update; 160 static long last_rtc_update;
121 161
122 write_seqlock(&xtime_lock); 162 write_seqlock(&xtime_lock);
123 163#ifdef CONFIG_TICK_SOURCE_SYSTMR0
124 do_timer(1); 164 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
125 165#endif
126 profile_tick(CPU_PROFILING); 166 do_timer(1);
127 167
128 /* 168
129 * If we have an externally synchronized Linux clock, then update 169 /*
130 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 170 * If we have an externally synchronized Linux clock, then update
131 * called as close as possible to 500 ms before the new second starts. 171 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
132 */ 172 * called as close as possible to 500 ms before the new second starts.
133 173 */
134 if (ntp_synced() && 174
135 xtime.tv_sec > last_rtc_update + 660 && 175 if (ntp_synced() &&
136 (xtime.tv_nsec / NSEC_PER_USEC) >= 176 xtime.tv_sec > last_rtc_update + 660 &&
137 500000 - ((unsigned)TICK_SIZE) / 2 177 (xtime.tv_nsec / NSEC_PER_USEC) >=
138 && (xtime.tv_nsec / NSEC_PER_USEC) <= 178 500000 - ((unsigned)TICK_SIZE) / 2
139 500000 + ((unsigned)TICK_SIZE) / 2) { 179 && (xtime.tv_nsec / NSEC_PER_USEC) <=
140 if (set_rtc_mmss(xtime.tv_sec) == 0) 180 500000 + ((unsigned)TICK_SIZE) / 2) {
141 last_rtc_update = xtime.tv_sec; 181 if (set_rtc_mmss(xtime.tv_sec) == 0)
142 else 182 last_rtc_update = xtime.tv_sec;
143 /* Do it again in 60s. */ 183 else
144 last_rtc_update = xtime.tv_sec - 600; 184 /* Do it again in 60s. */
185 last_rtc_update = xtime.tv_sec - 600;
186 }
187#ifdef CONFIG_TICK_SOURCE_SYSTMR0
188 set_gptimer_status(0, TIMER_STATUS_TIMIL0);
145 } 189 }
190#endif
146 write_sequnlock(&xtime_lock); 191 write_sequnlock(&xtime_lock);
147 192
148#ifndef CONFIG_SMP
149 update_process_times(user_mode(get_irq_regs())); 193 update_process_times(user_mode(get_irq_regs()));
150#endif 194 profile_tick(CPU_PROFILING);
151 195
152 return IRQ_HANDLED; 196 return IRQ_HANDLED;
153} 197}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index bef025b07443..af7cc43630de 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -75,16 +75,6 @@ void __init trap_init(void)
75 CSYNC(); 75 CSYNC();
76} 76}
77 77
78/*
79 * Used to save the RETX, SEQSTAT, I/D CPLB FAULT ADDR
80 * values across the transition from exception to IRQ5.
81 * We put these in L1, so they are going to be in a valid
82 * location during exception context
83 */
84__attribute__((l1_data))
85unsigned long saved_retx, saved_seqstat,
86 saved_icplb_fault_addr, saved_dcplb_fault_addr;
87
88static void decode_address(char *buf, unsigned long address) 78static void decode_address(char *buf, unsigned long address)
89{ 79{
90#ifdef CONFIG_DEBUG_VERBOSE 80#ifdef CONFIG_DEBUG_VERBOSE
@@ -211,18 +201,18 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
211 printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n"); 201 printk(KERN_EMERG "\n" KERN_EMERG "Double Fault\n");
212#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 202#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
213 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { 203 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) {
204 unsigned int cpu = smp_processor_id();
214 char buf[150]; 205 char buf[150];
215 decode_address(buf, saved_retx); 206 decode_address(buf, cpu_pda[cpu].retx);
216 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", 207 printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n",
217 (int)saved_seqstat & SEQSTAT_EXCAUSE, buf); 208 (unsigned int)cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE, buf);
218 decode_address(buf, saved_dcplb_fault_addr); 209 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
219 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); 210 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf);
220 decode_address(buf, saved_icplb_fault_addr); 211 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
221 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); 212 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf);
222 213
223 decode_address(buf, fp->retx); 214 decode_address(buf, fp->retx);
224 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", 215 printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf);
225 buf);
226 } else 216 } else
227#endif 217#endif
228 { 218 {
@@ -240,6 +230,9 @@ asmlinkage void trap_c(struct pt_regs *fp)
240#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON 230#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
241 int j; 231 int j;
242#endif 232#endif
233#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
234 unsigned int cpu = smp_processor_id();
235#endif
243 int sig = 0; 236 int sig = 0;
244 siginfo_t info; 237 siginfo_t info;
245 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; 238 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
@@ -417,7 +410,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
417 info.si_code = ILL_CPLB_MULHIT; 410 info.si_code = ILL_CPLB_MULHIT;
418 sig = SIGSEGV; 411 sig = SIGSEGV;
419#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 412#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
420 if (saved_dcplb_fault_addr < FIXED_CODE_START) 413 if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
421 verbose_printk(KERN_NOTICE "NULL pointer access\n"); 414 verbose_printk(KERN_NOTICE "NULL pointer access\n");
422 else 415 else
423#endif 416#endif
@@ -471,7 +464,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
471 info.si_code = ILL_CPLB_MULHIT; 464 info.si_code = ILL_CPLB_MULHIT;
472 sig = SIGSEGV; 465 sig = SIGSEGV;
473#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 466#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
474 if (saved_icplb_fault_addr < FIXED_CODE_START) 467 if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
475 verbose_printk(KERN_NOTICE "Jump to NULL address\n"); 468 verbose_printk(KERN_NOTICE "Jump to NULL address\n");
476 else 469 else
477#endif 470#endif
@@ -960,6 +953,7 @@ void dump_bfin_process(struct pt_regs *fp)
960 else 953 else
961 verbose_printk(KERN_NOTICE "COMM= invalid\n"); 954 verbose_printk(KERN_NOTICE "COMM= invalid\n");
962 955
956 printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu);
963 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) 957 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
964 verbose_printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" 958 verbose_printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
965 KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n" 959 KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n"
@@ -1053,6 +1047,7 @@ void show_regs(struct pt_regs *fp)
1053 struct irqaction *action; 1047 struct irqaction *action;
1054 unsigned int i; 1048 unsigned int i;
1055 unsigned long flags; 1049 unsigned long flags;
1050 unsigned int cpu = smp_processor_id();
1056 1051
1057 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted()); 1052 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
1058 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", 1053 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
@@ -1112,9 +1107,9 @@ unlock:
1112 1107
1113 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && 1108 if (((long)fp->seqstat & SEQSTAT_EXCAUSE) &&
1114 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { 1109 (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) {
1115 decode_address(buf, saved_dcplb_fault_addr); 1110 decode_address(buf, cpu_pda[cpu].dcplb_fault_addr);
1116 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); 1111 verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf);
1117 decode_address(buf, saved_icplb_fault_addr); 1112 decode_address(buf, cpu_pda[cpu].icplb_fault_addr);
1118 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); 1113 verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf);
1119 } 1114 }
1120 1115
@@ -1153,20 +1148,21 @@ unlock:
1153asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); 1148asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text));
1154#endif 1149#endif
1155 1150
1156asmlinkage int sys_bfin_spinlock(int *spinlock) 1151static DEFINE_SPINLOCK(bfin_spinlock_lock);
1152
1153asmlinkage int sys_bfin_spinlock(int *p)
1157{ 1154{
1158 int ret = 0; 1155 int ret, tmp = 0;
1159 int tmp = 0;
1160 1156
1161 local_irq_disable(); 1157 spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */
1162 ret = get_user(tmp, spinlock); 1158 ret = get_user(tmp, p);
1163 if (ret == 0) { 1159 if (likely(ret == 0)) {
1164 if (tmp) 1160 if (unlikely(tmp))
1165 ret = 1; 1161 ret = 1;
1166 tmp = 1; 1162 else
1167 put_user(tmp, spinlock); 1163 put_user(1, p);
1168 } 1164 }
1169 local_irq_enable(); 1165 spin_unlock(&bfin_spinlock_lock);
1170 return ret; 1166 return ret;
1171} 1167}
1172 1168
diff --git a/arch/blackfin/mm/init.c b/arch/blackfin/mm/init.c
index bc240abb8745..57d306b9c56d 100644
--- a/arch/blackfin/mm/init.c
+++ b/arch/blackfin/mm/init.c
@@ -31,7 +31,8 @@
31#include <linux/bootmem.h> 31#include <linux/bootmem.h>
32#include <linux/uaccess.h> 32#include <linux/uaccess.h>
33#include <asm/bfin-global.h> 33#include <asm/bfin-global.h>
34#include <asm/l1layout.h> 34#include <asm/pda.h>
35#include <asm/cplbinit.h>
35#include "blackfin_sram.h" 36#include "blackfin_sram.h"
36 37
37/* 38/*
@@ -53,6 +54,11 @@ static unsigned long empty_bad_page;
53 54
54unsigned long empty_zero_page; 55unsigned long empty_zero_page;
55 56
57extern unsigned long exception_stack[NR_CPUS][1024];
58
59struct blackfin_pda cpu_pda[NR_CPUS];
60EXPORT_SYMBOL(cpu_pda);
61
56/* 62/*
57 * paging_init() continues the virtual memory environment setup which 63 * paging_init() continues the virtual memory environment setup which
58 * was begun by the code in arch/head.S. 64 * was begun by the code in arch/head.S.
@@ -98,6 +104,42 @@ void __init paging_init(void)
98 } 104 }
99} 105}
100 106
107asmlinkage void init_pda(void)
108{
109 unsigned int cpu = raw_smp_processor_id();
110
111 /* Initialize the PDA fields holding references to other parts
112 of the memory. The content of such memory is still
113 undefined at the time of the call, we are only setting up
114 valid pointers to it. */
115 memset(&cpu_pda[cpu], 0, sizeof(cpu_pda[cpu]));
116
117 cpu_pda[0].next = &cpu_pda[1];
118 cpu_pda[1].next = &cpu_pda[0];
119
120 cpu_pda[cpu].ex_stack = exception_stack[cpu + 1];
121
122#ifdef CONFIG_MPU
123#else
124 cpu_pda[cpu].ipdt = ipdt_tables[cpu];
125 cpu_pda[cpu].dpdt = dpdt_tables[cpu];
126#ifdef CONFIG_CPLB_INFO
127 cpu_pda[cpu].ipdt_swapcount = ipdt_swapcount_tables[cpu];
128 cpu_pda[cpu].dpdt_swapcount = dpdt_swapcount_tables[cpu];
129#endif
130#endif
131
132#ifdef CONFIG_SMP
133 cpu_pda[cpu].imask = 0x1f;
134#endif
135}
136
137void __cpuinit reserve_pda(void)
138{
139 printk(KERN_INFO "PDA for CPU%u reserved at %p\n", smp_processor_id(),
140 &cpu_pda[smp_processor_id()]);
141}
142
101void __init mem_init(void) 143void __init mem_init(void)
102{ 144{
103 unsigned int codek = 0, datak = 0, initk = 0; 145 unsigned int codek = 0, datak = 0, initk = 0;
@@ -141,21 +183,13 @@ void __init mem_init(void)
141 183
142static int __init sram_init(void) 184static int __init sram_init(void)
143{ 185{
144 unsigned long tmp;
145
146 /* Initialize the blackfin L1 Memory. */ 186 /* Initialize the blackfin L1 Memory. */
147 bfin_sram_init(); 187 bfin_sram_init();
148 188
149 /* Allocate this once; never free it. We assume this gives us a 189 /* Reserve the PDA space for the boot CPU right after we
150 pointer to the start of L1 scratchpad memory; panic if it 190 * initialized the scratch memory allocator.
151 doesn't. */ 191 */
152 tmp = (unsigned long)l1sram_alloc(sizeof(struct l1_scratch_task_info)); 192 reserve_pda();
153 if (tmp != (unsigned long)L1_SCRATCH_TASK_INFO) {
154 printk(KERN_EMERG "mem_init(): Did not get the right address from l1sram_alloc: %08lx != %08lx\n",
155 tmp, (unsigned long)L1_SCRATCH_TASK_INFO);
156 panic("No L1, time to give up\n");
157 }
158
159 return 0; 193 return 0;
160} 194}
161pure_initcall(sram_init); 195pure_initcall(sram_init);
diff --git a/arch/blackfin/mm/sram-alloc.c b/arch/blackfin/mm/sram-alloc.c
index cc6f336e7313..8f82b4c92d07 100644
--- a/arch/blackfin/mm/sram-alloc.c
+++ b/arch/blackfin/mm/sram-alloc.c
@@ -41,8 +41,10 @@
41#include <asm/blackfin.h> 41#include <asm/blackfin.h>
42#include "blackfin_sram.h" 42#include "blackfin_sram.h"
43 43
44static spinlock_t l1sram_lock, l1_data_sram_lock, l1_inst_sram_lock; 44static DEFINE_PER_CPU(spinlock_t, l1sram_lock) ____cacheline_aligned_in_smp;
45static spinlock_t l2_sram_lock; 45static DEFINE_PER_CPU(spinlock_t, l1_data_sram_lock) ____cacheline_aligned_in_smp;
46static DEFINE_PER_CPU(spinlock_t, l1_inst_sram_lock) ____cacheline_aligned_in_smp;
47static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
46 48
47/* the data structure for L1 scratchpad and DATA SRAM */ 49/* the data structure for L1 scratchpad and DATA SRAM */
48struct sram_piece { 50struct sram_piece {
@@ -52,18 +54,22 @@ struct sram_piece {
52 struct sram_piece *next; 54 struct sram_piece *next;
53}; 55};
54 56
55static struct sram_piece free_l1_ssram_head, used_l1_ssram_head; 57static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
58static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
56 59
57#if L1_DATA_A_LENGTH != 0 60#if L1_DATA_A_LENGTH != 0
58static struct sram_piece free_l1_data_A_sram_head, used_l1_data_A_sram_head; 61static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
62static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
59#endif 63#endif
60 64
61#if L1_DATA_B_LENGTH != 0 65#if L1_DATA_B_LENGTH != 0
62static struct sram_piece free_l1_data_B_sram_head, used_l1_data_B_sram_head; 66static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
67static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
63#endif 68#endif
64 69
65#if L1_CODE_LENGTH != 0 70#if L1_CODE_LENGTH != 0
66static struct sram_piece free_l1_inst_sram_head, used_l1_inst_sram_head; 71static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
72static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
67#endif 73#endif
68 74
69#if L2_LENGTH != 0 75#if L2_LENGTH != 0
@@ -75,102 +81,115 @@ static struct kmem_cache *sram_piece_cache;
75/* L1 Scratchpad SRAM initialization function */ 81/* L1 Scratchpad SRAM initialization function */
76static void __init l1sram_init(void) 82static void __init l1sram_init(void)
77{ 83{
78 free_l1_ssram_head.next = 84 unsigned int cpu;
79 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 85 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
80 if (!free_l1_ssram_head.next) { 86 per_cpu(free_l1_ssram_head, cpu).next =
81 printk(KERN_INFO "Failed to initialize Scratchpad data SRAM\n"); 87 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
82 return; 88 if (!per_cpu(free_l1_ssram_head, cpu).next) {
89 printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
90 return;
91 }
92
93 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu);
94 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH;
95 per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
96 per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
97
98 per_cpu(used_l1_ssram_head, cpu).next = NULL;
99
100 /* mutex initialize */
101 spin_lock_init(&per_cpu(l1sram_lock, cpu));
102 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
103 L1_SCRATCH_LENGTH >> 10);
83 } 104 }
84
85 free_l1_ssram_head.next->paddr = (void *)L1_SCRATCH_START;
86 free_l1_ssram_head.next->size = L1_SCRATCH_LENGTH;
87 free_l1_ssram_head.next->pid = 0;
88 free_l1_ssram_head.next->next = NULL;
89
90 used_l1_ssram_head.next = NULL;
91
92 /* mutex initialize */
93 spin_lock_init(&l1sram_lock);
94
95 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
96 L1_SCRATCH_LENGTH >> 10);
97} 105}
98 106
99static void __init l1_data_sram_init(void) 107static void __init l1_data_sram_init(void)
100{ 108{
109 unsigned int cpu;
101#if L1_DATA_A_LENGTH != 0 110#if L1_DATA_A_LENGTH != 0
102 free_l1_data_A_sram_head.next = 111 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
103 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 112 per_cpu(free_l1_data_A_sram_head, cpu).next =
104 if (!free_l1_data_A_sram_head.next) { 113 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
105 printk(KERN_INFO "Failed to initialize L1 Data A SRAM\n"); 114 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
106 return; 115 printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
116 return;
117 }
118
119 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
120 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
121 per_cpu(free_l1_data_A_sram_head, cpu).next->size =
122 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
123 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
124 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
125
126 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
127
128 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
129 L1_DATA_A_LENGTH >> 10,
130 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
107 } 131 }
108
109 free_l1_data_A_sram_head.next->paddr =
110 (void *)L1_DATA_A_START + (_ebss_l1 - _sdata_l1);
111 free_l1_data_A_sram_head.next->size =
112 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
113 free_l1_data_A_sram_head.next->pid = 0;
114 free_l1_data_A_sram_head.next->next = NULL;
115
116 used_l1_data_A_sram_head.next = NULL;
117
118 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
119 L1_DATA_A_LENGTH >> 10,
120 free_l1_data_A_sram_head.next->size >> 10);
121#endif 132#endif
122#if L1_DATA_B_LENGTH != 0 133#if L1_DATA_B_LENGTH != 0
123 free_l1_data_B_sram_head.next = 134 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
124 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 135 per_cpu(free_l1_data_B_sram_head, cpu).next =
125 if (!free_l1_data_B_sram_head.next) { 136 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
126 printk(KERN_INFO "Failed to initialize L1 Data B SRAM\n"); 137 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
127 return; 138 printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
139 return;
140 }
141
142 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
143 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
144 per_cpu(free_l1_data_B_sram_head, cpu).next->size =
145 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
146 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
147 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
148
149 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
150
151 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
152 L1_DATA_B_LENGTH >> 10,
153 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
154 /* mutex initialize */
128 } 155 }
129
130 free_l1_data_B_sram_head.next->paddr =
131 (void *)L1_DATA_B_START + (_ebss_b_l1 - _sdata_b_l1);
132 free_l1_data_B_sram_head.next->size =
133 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
134 free_l1_data_B_sram_head.next->pid = 0;
135 free_l1_data_B_sram_head.next->next = NULL;
136
137 used_l1_data_B_sram_head.next = NULL;
138
139 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
140 L1_DATA_B_LENGTH >> 10,
141 free_l1_data_B_sram_head.next->size >> 10);
142#endif 156#endif
143 157
144 /* mutex initialize */ 158#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
145 spin_lock_init(&l1_data_sram_lock); 159 for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
160 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
161#endif
146} 162}
147 163
148static void __init l1_inst_sram_init(void) 164static void __init l1_inst_sram_init(void)
149{ 165{
150#if L1_CODE_LENGTH != 0 166#if L1_CODE_LENGTH != 0
151 free_l1_inst_sram_head.next = 167 unsigned int cpu;
152 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 168 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
153 if (!free_l1_inst_sram_head.next) { 169 per_cpu(free_l1_inst_sram_head, cpu).next =
154 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n"); 170 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
155 return; 171 if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
172 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
173 return;
174 }
175
176 per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
177 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
178 per_cpu(free_l1_inst_sram_head, cpu).next->size =
179 L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
180 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
181 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
182
183 per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
184
185 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
186 L1_CODE_LENGTH >> 10,
187 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
188
189 /* mutex initialize */
190 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
156 } 191 }
157
158 free_l1_inst_sram_head.next->paddr =
159 (void *)L1_CODE_START + (_etext_l1 - _stext_l1);
160 free_l1_inst_sram_head.next->size =
161 L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
162 free_l1_inst_sram_head.next->pid = 0;
163 free_l1_inst_sram_head.next->next = NULL;
164
165 used_l1_inst_sram_head.next = NULL;
166
167 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
168 L1_CODE_LENGTH >> 10,
169 free_l1_inst_sram_head.next->size >> 10);
170#endif 192#endif
171
172 /* mutex initialize */
173 spin_lock_init(&l1_inst_sram_lock);
174} 193}
175 194
176static void __init l2_sram_init(void) 195static void __init l2_sram_init(void)
@@ -179,7 +198,7 @@ static void __init l2_sram_init(void)
179 free_l2_sram_head.next = 198 free_l2_sram_head.next =
180 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 199 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
181 if (!free_l2_sram_head.next) { 200 if (!free_l2_sram_head.next) {
182 printk(KERN_INFO "Failed to initialize L2 SRAM\n"); 201 printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
183 return; 202 return;
184 } 203 }
185 204
@@ -200,6 +219,7 @@ static void __init l2_sram_init(void)
200 /* mutex initialize */ 219 /* mutex initialize */
201 spin_lock_init(&l2_sram_lock); 220 spin_lock_init(&l2_sram_lock);
202} 221}
222
203void __init bfin_sram_init(void) 223void __init bfin_sram_init(void)
204{ 224{
205 sram_piece_cache = kmem_cache_create("sram_piece_cache", 225 sram_piece_cache = kmem_cache_create("sram_piece_cache",
@@ -353,20 +373,20 @@ int sram_free(const void *addr)
353{ 373{
354 374
355#if L1_CODE_LENGTH != 0 375#if L1_CODE_LENGTH != 0
356 if (addr >= (void *)L1_CODE_START 376 if (addr >= (void *)get_l1_code_start()
357 && addr < (void *)(L1_CODE_START + L1_CODE_LENGTH)) 377 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
358 return l1_inst_sram_free(addr); 378 return l1_inst_sram_free(addr);
359 else 379 else
360#endif 380#endif
361#if L1_DATA_A_LENGTH != 0 381#if L1_DATA_A_LENGTH != 0
362 if (addr >= (void *)L1_DATA_A_START 382 if (addr >= (void *)get_l1_data_a_start()
363 && addr < (void *)(L1_DATA_A_START + L1_DATA_A_LENGTH)) 383 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
364 return l1_data_A_sram_free(addr); 384 return l1_data_A_sram_free(addr);
365 else 385 else
366#endif 386#endif
367#if L1_DATA_B_LENGTH != 0 387#if L1_DATA_B_LENGTH != 0
368 if (addr >= (void *)L1_DATA_B_START 388 if (addr >= (void *)get_l1_data_b_start()
369 && addr < (void *)(L1_DATA_B_START + L1_DATA_B_LENGTH)) 389 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
370 return l1_data_B_sram_free(addr); 390 return l1_data_B_sram_free(addr);
371 else 391 else
372#endif 392#endif
@@ -384,17 +404,20 @@ void *l1_data_A_sram_alloc(size_t size)
384{ 404{
385 unsigned long flags; 405 unsigned long flags;
386 void *addr = NULL; 406 void *addr = NULL;
407 unsigned int cpu;
387 408
409 cpu = get_cpu();
388 /* add mutex operation */ 410 /* add mutex operation */
389 spin_lock_irqsave(&l1_data_sram_lock, flags); 411 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
390 412
391#if L1_DATA_A_LENGTH != 0 413#if L1_DATA_A_LENGTH != 0
392 addr = _sram_alloc(size, &free_l1_data_A_sram_head, 414 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
393 &used_l1_data_A_sram_head); 415 &per_cpu(used_l1_data_A_sram_head, cpu));
394#endif 416#endif
395 417
396 /* add mutex operation */ 418 /* add mutex operation */
397 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 419 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
420 put_cpu();
398 421
399 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", 422 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
400 (long unsigned int)addr, size); 423 (long unsigned int)addr, size);
@@ -407,19 +430,22 @@ int l1_data_A_sram_free(const void *addr)
407{ 430{
408 unsigned long flags; 431 unsigned long flags;
409 int ret; 432 int ret;
433 unsigned int cpu;
410 434
435 cpu = get_cpu();
411 /* add mutex operation */ 436 /* add mutex operation */
412 spin_lock_irqsave(&l1_data_sram_lock, flags); 437 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
413 438
414#if L1_DATA_A_LENGTH != 0 439#if L1_DATA_A_LENGTH != 0
415 ret = _sram_free(addr, &free_l1_data_A_sram_head, 440 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
416 &used_l1_data_A_sram_head); 441 &per_cpu(used_l1_data_A_sram_head, cpu));
417#else 442#else
418 ret = -1; 443 ret = -1;
419#endif 444#endif
420 445
421 /* add mutex operation */ 446 /* add mutex operation */
422 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 447 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
448 put_cpu();
423 449
424 return ret; 450 return ret;
425} 451}
@@ -430,15 +456,18 @@ void *l1_data_B_sram_alloc(size_t size)
430#if L1_DATA_B_LENGTH != 0 456#if L1_DATA_B_LENGTH != 0
431 unsigned long flags; 457 unsigned long flags;
432 void *addr; 458 void *addr;
459 unsigned int cpu;
433 460
461 cpu = get_cpu();
434 /* add mutex operation */ 462 /* add mutex operation */
435 spin_lock_irqsave(&l1_data_sram_lock, flags); 463 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
436 464
437 addr = _sram_alloc(size, &free_l1_data_B_sram_head, 465 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
438 &used_l1_data_B_sram_head); 466 &per_cpu(used_l1_data_B_sram_head, cpu));
439 467
440 /* add mutex operation */ 468 /* add mutex operation */
441 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 469 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
470 put_cpu();
442 471
443 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", 472 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
444 (long unsigned int)addr, size); 473 (long unsigned int)addr, size);
@@ -455,15 +484,18 @@ int l1_data_B_sram_free(const void *addr)
455#if L1_DATA_B_LENGTH != 0 484#if L1_DATA_B_LENGTH != 0
456 unsigned long flags; 485 unsigned long flags;
457 int ret; 486 int ret;
487 unsigned int cpu;
458 488
489 cpu = get_cpu();
459 /* add mutex operation */ 490 /* add mutex operation */
460 spin_lock_irqsave(&l1_data_sram_lock, flags); 491 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
461 492
462 ret = _sram_free(addr, &free_l1_data_B_sram_head, 493 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
463 &used_l1_data_B_sram_head); 494 &per_cpu(used_l1_data_B_sram_head, cpu));
464 495
465 /* add mutex operation */ 496 /* add mutex operation */
466 spin_unlock_irqrestore(&l1_data_sram_lock, flags); 497 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
498 put_cpu();
467 499
468 return ret; 500 return ret;
469#else 501#else
@@ -509,15 +541,18 @@ void *l1_inst_sram_alloc(size_t size)
509#if L1_CODE_LENGTH != 0 541#if L1_CODE_LENGTH != 0
510 unsigned long flags; 542 unsigned long flags;
511 void *addr; 543 void *addr;
544 unsigned int cpu;
512 545
546 cpu = get_cpu();
513 /* add mutex operation */ 547 /* add mutex operation */
514 spin_lock_irqsave(&l1_inst_sram_lock, flags); 548 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
515 549
516 addr = _sram_alloc(size, &free_l1_inst_sram_head, 550 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
517 &used_l1_inst_sram_head); 551 &per_cpu(used_l1_inst_sram_head, cpu));
518 552
519 /* add mutex operation */ 553 /* add mutex operation */
520 spin_unlock_irqrestore(&l1_inst_sram_lock, flags); 554 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
555 put_cpu();
521 556
522 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", 557 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
523 (long unsigned int)addr, size); 558 (long unsigned int)addr, size);
@@ -534,15 +569,18 @@ int l1_inst_sram_free(const void *addr)
534#if L1_CODE_LENGTH != 0 569#if L1_CODE_LENGTH != 0
535 unsigned long flags; 570 unsigned long flags;
536 int ret; 571 int ret;
572 unsigned int cpu;
537 573
574 cpu = get_cpu();
538 /* add mutex operation */ 575 /* add mutex operation */
539 spin_lock_irqsave(&l1_inst_sram_lock, flags); 576 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
540 577
541 ret = _sram_free(addr, &free_l1_inst_sram_head, 578 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
542 &used_l1_inst_sram_head); 579 &per_cpu(used_l1_inst_sram_head, cpu));
543 580
544 /* add mutex operation */ 581 /* add mutex operation */
545 spin_unlock_irqrestore(&l1_inst_sram_lock, flags); 582 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
583 put_cpu();
546 584
547 return ret; 585 return ret;
548#else 586#else
@@ -556,15 +594,18 @@ void *l1sram_alloc(size_t size)
556{ 594{
557 unsigned long flags; 595 unsigned long flags;
558 void *addr; 596 void *addr;
597 unsigned int cpu;
559 598
599 cpu = get_cpu();
560 /* add mutex operation */ 600 /* add mutex operation */
561 spin_lock_irqsave(&l1sram_lock, flags); 601 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
562 602
563 addr = _sram_alloc(size, &free_l1_ssram_head, 603 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
564 &used_l1_ssram_head); 604 &per_cpu(used_l1_ssram_head, cpu));
565 605
566 /* add mutex operation */ 606 /* add mutex operation */
567 spin_unlock_irqrestore(&l1sram_lock, flags); 607 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
608 put_cpu();
568 609
569 return addr; 610 return addr;
570} 611}
@@ -574,15 +615,18 @@ void *l1sram_alloc_max(size_t *psize)
574{ 615{
575 unsigned long flags; 616 unsigned long flags;
576 void *addr; 617 void *addr;
618 unsigned int cpu;
577 619
620 cpu = get_cpu();
578 /* add mutex operation */ 621 /* add mutex operation */
579 spin_lock_irqsave(&l1sram_lock, flags); 622 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
580 623
581 addr = _sram_alloc_max(&free_l1_ssram_head, 624 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
582 &used_l1_ssram_head, psize); 625 &per_cpu(used_l1_ssram_head, cpu), psize);
583 626
584 /* add mutex operation */ 627 /* add mutex operation */
585 spin_unlock_irqrestore(&l1sram_lock, flags); 628 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
629 put_cpu();
586 630
587 return addr; 631 return addr;
588} 632}
@@ -592,15 +636,18 @@ int l1sram_free(const void *addr)
592{ 636{
593 unsigned long flags; 637 unsigned long flags;
594 int ret; 638 int ret;
639 unsigned int cpu;
595 640
641 cpu = get_cpu();
596 /* add mutex operation */ 642 /* add mutex operation */
597 spin_lock_irqsave(&l1sram_lock, flags); 643 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
598 644
599 ret = _sram_free(addr, &free_l1_ssram_head, 645 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
600 &used_l1_ssram_head); 646 &per_cpu(used_l1_ssram_head, cpu));
601 647
602 /* add mutex operation */ 648 /* add mutex operation */
603 spin_unlock_irqrestore(&l1sram_lock, flags); 649 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
650 put_cpu();
604 651
605 return ret; 652 return ret;
606} 653}
@@ -761,33 +808,36 @@ static int sram_proc_read(char *buf, char **start, off_t offset, int count,
761 int *eof, void *data) 808 int *eof, void *data)
762{ 809{
763 int len = 0; 810 int len = 0;
811 unsigned int cpu;
764 812
765 if (_sram_proc_read(buf, &len, count, "Scratchpad", 813 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
766 &free_l1_ssram_head, &used_l1_ssram_head)) 814 if (_sram_proc_read(buf, &len, count, "Scratchpad",
767 goto not_done; 815 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
816 goto not_done;
768#if L1_DATA_A_LENGTH != 0 817#if L1_DATA_A_LENGTH != 0
769 if (_sram_proc_read(buf, &len, count, "L1 Data A", 818 if (_sram_proc_read(buf, &len, count, "L1 Data A",
770 &free_l1_data_A_sram_head, 819 &per_cpu(free_l1_data_A_sram_head, cpu),
771 &used_l1_data_A_sram_head)) 820 &per_cpu(used_l1_data_A_sram_head, cpu)))
772 goto not_done; 821 goto not_done;
773#endif 822#endif
774#if L1_DATA_B_LENGTH != 0 823#if L1_DATA_B_LENGTH != 0
775 if (_sram_proc_read(buf, &len, count, "L1 Data B", 824 if (_sram_proc_read(buf, &len, count, "L1 Data B",
776 &free_l1_data_B_sram_head, 825 &per_cpu(free_l1_data_B_sram_head, cpu),
777 &used_l1_data_B_sram_head)) 826 &per_cpu(used_l1_data_B_sram_head, cpu)))
778 goto not_done; 827 goto not_done;
779#endif 828#endif
780#if L1_CODE_LENGTH != 0 829#if L1_CODE_LENGTH != 0
781 if (_sram_proc_read(buf, &len, count, "L1 Instruction", 830 if (_sram_proc_read(buf, &len, count, "L1 Instruction",
782 &free_l1_inst_sram_head, &used_l1_inst_sram_head)) 831 &per_cpu(free_l1_inst_sram_head, cpu),
783 goto not_done; 832 &per_cpu(used_l1_inst_sram_head, cpu)))
833 goto not_done;
784#endif 834#endif
835 }
785#if L2_LENGTH != 0 836#if L2_LENGTH != 0
786 if (_sram_proc_read(buf, &len, count, "L2", 837 if (_sram_proc_read(buf, &len, count, "L2", &free_l2_sram_head,
787 &free_l2_sram_head, &used_l2_sram_head)) 838 &used_l2_sram_head))
788 goto not_done; 839 goto not_done;
789#endif 840#endif
790
791 *eof = 1; 841 *eof = 1;
792 not_done: 842 not_done:
793 return len; 843 return len;