aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c10
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c36
-rw-r--r--arch/blackfin/kernel/ipipe.c7
-rw-r--r--arch/blackfin/kernel/irqchip.c114
-rw-r--r--arch/blackfin/kernel/kgdb.c297
-rw-r--r--arch/blackfin/kernel/mcount.S70
-rw-r--r--arch/blackfin/kernel/process.c151
-rw-r--r--arch/blackfin/kernel/setup.c122
-rw-r--r--arch/blackfin/kernel/traps.c60
10 files changed, 406 insertions, 462 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index 3731088e181b..141d9281e4b0 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -20,7 +20,6 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
20CFLAGS_REMOVE_ftrace.o = -pg 20CFLAGS_REMOVE_ftrace.o = -pg
21 21
22obj-$(CONFIG_IPIPE) += ipipe.o 22obj-$(CONFIG_IPIPE) += ipipe.o
23obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
24obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o 23obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
25obj-$(CONFIG_CPLB_INFO) += cplbinfo.o 24obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
26obj-$(CONFIG_MODULES) += module.o 25obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index c006a44527bf..36193eed9a1f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
46 46
47 printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); 47 printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
48 48
49#ifdef CONFIG_BFIN_ICACHE 49#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
50 i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 50 i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
51#endif 51#endif
52 52
53#ifdef CONFIG_BFIN_DCACHE 53#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
54 d_cache = CPLB_L1_CHBL; 54 d_cache = CPLB_L1_CHBL;
55#ifdef CONFIG_BFIN_WT 55#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
56 d_cache |= CPLB_L1_AOW | CPLB_WT; 56 d_cache |= CPLB_L1_AOW | CPLB_WT;
57#endif 57#endif
58#endif 58#endif
@@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
91 /* Cover L2 memory */ 91 /* Cover L2 memory */
92#if L2_LENGTH > 0 92#if L2_LENGTH > 0
93 dcplb_tbl[cpu][i_d].addr = L2_START; 93 dcplb_tbl[cpu][i_d].addr = L2_START;
94 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB; 94 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
95 icplb_tbl[cpu][i_i].addr = L2_START; 95 icplb_tbl[cpu][i_i].addr = L2_START;
96 icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB; 96 icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
97#endif 97#endif
98 98
99 first_mask_dcplb = i_d; 99 first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 784923e52a9a..bcdfe9b0b71f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu)
150 nr_dcplb_miss[cpu]++; 150 nr_dcplb_miss[cpu]++;
151 151
152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
153#ifdef CONFIG_BFIN_DCACHE 153#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
154 if (bfin_addr_dcacheable(addr)) { 154 if (bfin_addr_dcacheable(addr)) {
155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
156#ifdef CONFIG_BFIN_WT 156# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
157 d_data |= CPLB_L1_AOW | CPLB_WT; 157 d_data |= CPLB_L1_AOW | CPLB_WT;
158#endif 158# endif
159 } 159 }
160#endif 160#endif
161 if (addr >= physical_mem_end) { 161
162 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
163 addr = L2_START;
164 d_data = L2_DMEMORY;
165 } else if (addr >= physical_mem_end) {
162 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE 166 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
163 && (status & FAULT_USERSUPV)) { 167 && (status & FAULT_USERSUPV)) {
164 addr &= ~0x3fffff; 168 addr &= ~0x3fffff;
@@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu)
235 239
236 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; 240 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
237 241
238#ifdef CONFIG_BFIN_ICACHE 242#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
239 /* 243 /*
240 * Normal RAM, and possibly the reserved memory area, are 244 * Normal RAM, and possibly the reserved memory area, are
241 * cacheable. 245 * cacheable.
@@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu)
245 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 249 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
246#endif 250#endif
247 251
248 if (addr >= physical_mem_end) { 252 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
253 addr = L2_START;
254 i_data = L2_IMEMORY;
255 } else if (addr >= physical_mem_end) {
249 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 256 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
250 && (status & FAULT_USERSUPV)) { 257 && (status & FAULT_USERSUPV)) {
251 addr &= ~(1 * 1024 * 1024 - 1); 258 addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
365 local_irq_save_hw(flags); 372 local_irq_save_hw(flags);
366 current_rwx_mask[cpu] = masks; 373 current_rwx_mask[cpu] = masks;
367 374
368 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 375 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
369#ifdef CONFIG_BFIN_DCACHE 376 addr = L2_START;
370 d_data |= CPLB_L1_CHBL; 377 d_data = L2_DMEMORY;
371#ifdef CONFIG_BFIN_WT 378 } else {
372 d_data |= CPLB_L1_AOW | CPLB_WT; 379 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
373#endif 380#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
381 d_data |= CPLB_L1_CHBL;
382# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
383 d_data |= CPLB_L1_AOW | CPLB_WT;
384# endif
374#endif 385#endif
386 }
375 387
376 disable_dcplb(); 388 disable_dcplb();
377 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) { 389 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index d8cde1fc5cb9..b8d22034b9a6 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
52 52
53atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; 53atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
54 54
55unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; 55unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
56EXPORT_SYMBOL(__ipipe_irq_lvmask); 56EXPORT_SYMBOL(__ipipe_irq_lvmask);
57 57
58static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) 58static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -342,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask)
342} 342}
343 343
344EXPORT_SYMBOL(show_stack); 344EXPORT_SYMBOL(show_stack);
345
346#ifdef CONFIG_IPIPE_TRACE_MCOUNT
347void notrace _mcount(void);
348EXPORT_SYMBOL(_mcount);
349#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 6e31e935bb31..4b5fd36187d9 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,38 +38,15 @@
38#include <asm/pda.h> 38#include <asm/pda.h>
39 39
40static atomic_t irq_err_count; 40static atomic_t irq_err_count;
41static spinlock_t irq_controller_lock;
42
43/*
44 * Dummy mask/unmask handler
45 */
46void dummy_mask_unmask_irq(unsigned int irq)
47{
48}
49
50void ack_bad_irq(unsigned int irq) 41void ack_bad_irq(unsigned int irq)
51{ 42{
52 atomic_inc(&irq_err_count); 43 atomic_inc(&irq_err_count);
53 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 44 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
54} 45}
55 46
56static struct irq_chip bad_chip = {
57 .ack = dummy_mask_unmask_irq,
58 .mask = dummy_mask_unmask_irq,
59 .unmask = dummy_mask_unmask_irq,
60};
61
62static int bad_stats;
63static struct irq_desc bad_irq_desc = { 47static struct irq_desc bad_irq_desc = {
64 .status = IRQ_DISABLED,
65 .chip = &bad_chip,
66 .handle_irq = handle_bad_irq, 48 .handle_irq = handle_bad_irq,
67 .depth = 1,
68 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 49 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
69 .kstat_irqs = &bad_stats,
70#ifdef CONFIG_SMP
71 .affinity = CPU_MASK_ALL
72#endif
73}; 50};
74 51
75#ifdef CONFIG_CPUMASK_OFFSTACK 52#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -77,6 +54,7 @@ static struct irq_desc bad_irq_desc = {
77#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." 54#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
78#endif 55#endif
79 56
57#ifdef CONFIG_PROC_FS
80int show_interrupts(struct seq_file *p, void *v) 58int show_interrupts(struct seq_file *p, void *v)
81{ 59{
82 int i = *(loff_t *) v, j; 60 int i = *(loff_t *) v, j;
@@ -108,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v)
108 } 86 }
109 return 0; 87 return 0;
110} 88}
111
112/*
113 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
114 * come via this function. Instead, they should provide their
115 * own 'handler'
116 */
117#ifdef CONFIG_DO_IRQ_L1
118__attribute__((l1_text))
119#endif
120asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
121{
122 struct pt_regs *old_regs;
123 struct irq_desc *desc = irq_desc + irq;
124#ifndef CONFIG_IPIPE
125 unsigned short pending, other_ints;
126#endif 89#endif
127 old_regs = set_irq_regs(regs);
128 90
129 /*
130 * Some hardware gives randomly wrong interrupts. Rather
131 * than crashing, do something sensible.
132 */
133 if (irq >= NR_IRQS)
134 desc = &bad_irq_desc;
135
136 irq_enter();
137#ifdef CONFIG_DEBUG_STACKOVERFLOW 91#ifdef CONFIG_DEBUG_STACKOVERFLOW
92static void check_stack_overflow(int irq)
93{
138 /* Debugging check for stack overflow: is there less than STACK_WARN free? */ 94 /* Debugging check for stack overflow: is there less than STACK_WARN free? */
139 { 95 long sp = __get_SP() & (THREAD_SIZE - 1);
140 long sp;
141
142 sp = __get_SP() & (THREAD_SIZE-1);
143 96
144 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 97 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
145 dump_stack(); 98 dump_stack();
146 printk(KERN_EMERG "%s: possible stack overflow while handling irq %i " 99 pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
147 " only %ld bytes free\n", 100 irq, sp - sizeof(struct thread_info));
148 __func__, irq, sp - sizeof(struct thread_info));
149 }
150 } 101 }
102}
103#else
104static inline void check_stack_overflow(int irq) { }
151#endif 105#endif
152 generic_handle_irq(irq);
153 106
154#ifndef CONFIG_IPIPE 107#ifndef CONFIG_IPIPE
108static void maybe_lower_to_irq14(void)
109{
110 unsigned short pending, other_ints;
111
155 /* 112 /*
156 * If we're the only interrupt running (ignoring IRQ15 which 113 * If we're the only interrupt running (ignoring IRQ15 which
157 * is for syscalls), lower our priority to IRQ14 so that 114 * is for syscalls), lower our priority to IRQ14 so that
@@ -165,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
165 other_ints = pending & (pending - 1); 122 other_ints = pending & (pending - 1);
166 if (other_ints == 0) 123 if (other_ints == 0)
167 lower_to_irq14(); 124 lower_to_irq14();
168#endif /* !CONFIG_IPIPE */ 125}
126#else
127static inline void maybe_lower_to_irq14(void) { }
128#endif
129
130/*
131 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
132 * come via this function. Instead, they should provide their
133 * own 'handler'
134 */
135#ifdef CONFIG_DO_IRQ_L1
136__attribute__((l1_text))
137#endif
138asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
139{
140 struct pt_regs *old_regs = set_irq_regs(regs);
141
142 irq_enter();
143
144 check_stack_overflow(irq);
145
146 /*
147 * Some hardware gives randomly wrong interrupts. Rather
148 * than crashing, do something sensible.
149 */
150 if (irq >= NR_IRQS)
151 handle_bad_irq(irq, &bad_irq_desc);
152 else
153 generic_handle_irq(irq);
154
155 maybe_lower_to_irq14();
156
169 irq_exit(); 157 irq_exit();
170 158
171 set_irq_regs(old_regs); 159 set_irq_regs(old_regs);
@@ -173,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
173 161
174void __init init_IRQ(void) 162void __init init_IRQ(void)
175{ 163{
176 struct irq_desc *desc;
177 int irq;
178
179 spin_lock_init(&irq_controller_lock);
180 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
181 *desc = bad_irq_desc;
182 }
183
184 init_arch_irq(); 164 init_arch_irq();
185 165
186#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND 166#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index da28f796ad78..cce79d05b90b 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@ int gdb_bfin_vector = -1;
34#error change the definition of slavecpulocks 34#error change the definition of slavecpulocks
35#endif 35#endif
36 36
37#define IN_MEM(addr, size, l1_addr, l1_size) \
38({ \
39 unsigned long __addr = (unsigned long)(addr); \
40 (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
41})
42#define ASYNC_BANK_SIZE \
43 (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
44 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
45
46void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 37void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
47{ 38{
48 gdb_regs[BFIN_R0] = regs->r0; 39 gdb_regs[BFIN_R0] = regs->r0;
@@ -463,41 +454,88 @@ static int hex(char ch)
463 454
464static int validate_memory_access_address(unsigned long addr, int size) 455static int validate_memory_access_address(unsigned long addr, int size)
465{ 456{
466 int cpu = raw_smp_processor_id(); 457 if (size < 0 || addr == 0)
467
468 if (size < 0)
469 return -EFAULT; 458 return -EFAULT;
470 if (addr >= 0x1000 && (addr + size) <= physical_mem_end) 459 return bfin_mem_access_type(addr, size);
471 return 0; 460}
472 if (addr >= SYSMMR_BASE) 461
473 return 0; 462static int bfin_probe_kernel_read(char *dst, char *src, int size)
474 if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE)) 463{
475 return 0; 464 unsigned long lsrc = (unsigned long)src;
476 if (cpu == 0) { 465 int mem_type;
477 if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) 466
478 return 0; 467 mem_type = validate_memory_access_address(lsrc, size);
479 if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH)) 468 if (mem_type < 0)
480 return 0; 469 return mem_type;
481 if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH)) 470
482 return 0; 471 if (lsrc >= SYSMMR_BASE) {
483 if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH)) 472 if (size == 2 && lsrc % 2 == 0) {
484 return 0; 473 u16 mmr = bfin_read16(src);
485#ifdef CONFIG_SMP 474 memcpy(dst, &mmr, sizeof(mmr));
486 } else if (cpu == 1) {
487 if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
488 return 0; 475 return 0;
489 if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) 476 } else if (size == 4 && lsrc % 4 == 0) {
477 u32 mmr = bfin_read32(src);
478 memcpy(dst, &mmr, sizeof(mmr));
490 return 0; 479 return 0;
491 if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) 480 }
481 } else {
482 switch (mem_type) {
483 case BFIN_MEM_ACCESS_CORE:
484 case BFIN_MEM_ACCESS_CORE_ONLY:
485 return probe_kernel_read(dst, src, size);
486 /* XXX: should support IDMA here with SMP */
487 case BFIN_MEM_ACCESS_DMA:
488 if (dma_memcpy(dst, src, size))
489 return 0;
490 break;
491 case BFIN_MEM_ACCESS_ITEST:
492 if (isram_memcpy(dst, src, size))
493 return 0;
494 break;
495 }
496 }
497
498 return -EFAULT;
499}
500
501static int bfin_probe_kernel_write(char *dst, char *src, int size)
502{
503 unsigned long ldst = (unsigned long)dst;
504 int mem_type;
505
506 mem_type = validate_memory_access_address(ldst, size);
507 if (mem_type < 0)
508 return mem_type;
509
510 if (ldst >= SYSMMR_BASE) {
511 if (size == 2 && ldst % 2 == 0) {
512 u16 mmr;
513 memcpy(&mmr, src, sizeof(mmr));
514 bfin_write16(dst, mmr);
492 return 0; 515 return 0;
493 if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) 516 } else if (size == 4 && ldst % 4 == 0) {
517 u32 mmr;
518 memcpy(&mmr, src, sizeof(mmr));
519 bfin_write32(dst, mmr);
494 return 0; 520 return 0;
495#endif 521 }
522 } else {
523 switch (mem_type) {
524 case BFIN_MEM_ACCESS_CORE:
525 case BFIN_MEM_ACCESS_CORE_ONLY:
526 return probe_kernel_write(dst, src, size);
527 /* XXX: should support IDMA here with SMP */
528 case BFIN_MEM_ACCESS_DMA:
529 if (dma_memcpy(dst, src, size))
530 return 0;
531 break;
532 case BFIN_MEM_ACCESS_ITEST:
533 if (isram_memcpy(dst, src, size))
534 return 0;
535 break;
536 }
496 } 537 }
497 538
498 if (IN_MEM(addr, size, L2_START, L2_LENGTH))
499 return 0;
500
501 return -EFAULT; 539 return -EFAULT;
502} 540}
503 541
@@ -509,14 +547,6 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
509{ 547{
510 char *tmp; 548 char *tmp;
511 int err; 549 int err;
512 unsigned char *pch;
513 unsigned short mmr16;
514 unsigned long mmr32;
515 int cpu = raw_smp_processor_id();
516
517 err = validate_memory_access_address((unsigned long)mem, count);
518 if (err)
519 return err;
520 550
521 /* 551 /*
522 * We use the upper half of buf as an intermediate buffer for the 552 * We use the upper half of buf as an intermediate buffer for the
@@ -524,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
524 */ 554 */
525 tmp = buf + count; 555 tmp = buf + count;
526 556
527 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ 557 err = bfin_probe_kernel_read(tmp, mem, count);
528 switch (count) {
529 case 2:
530 if ((unsigned int)mem % 2 == 0) {
531 mmr16 = *(unsigned short *)mem;
532 pch = (unsigned char *)&mmr16;
533 *tmp++ = *pch++;
534 *tmp++ = *pch++;
535 tmp -= 2;
536 } else
537 err = -EFAULT;
538 break;
539 case 4:
540 if ((unsigned int)mem % 4 == 0) {
541 mmr32 = *(unsigned long *)mem;
542 pch = (unsigned char *)&mmr32;
543 *tmp++ = *pch++;
544 *tmp++ = *pch++;
545 *tmp++ = *pch++;
546 *tmp++ = *pch++;
547 tmp -= 4;
548 } else
549 err = -EFAULT;
550 break;
551 default:
552 err = -EFAULT;
553 }
554 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
555#ifdef CONFIG_SMP
556 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
557#endif
558 ) {
559 /* access L1 instruction SRAM*/
560 if (dma_memcpy(tmp, mem, count) == NULL)
561 err = -EFAULT;
562 } else
563 err = probe_kernel_read(tmp, mem, count);
564
565 if (!err) { 558 if (!err) {
566 while (count > 0) { 559 while (count > 0) {
567 buf = pack_hex_byte(buf, *tmp); 560 buf = pack_hex_byte(buf, *tmp);
@@ -582,13 +575,8 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
582 */ 575 */
583int kgdb_ebin2mem(char *buf, char *mem, int count) 576int kgdb_ebin2mem(char *buf, char *mem, int count)
584{ 577{
585 char *tmp_old; 578 char *tmp_old, *tmp_new;
586 char *tmp_new;
587 unsigned short *mmr16;
588 unsigned long *mmr32;
589 int err;
590 int size; 579 int size;
591 int cpu = raw_smp_processor_id();
592 580
593 tmp_old = tmp_new = buf; 581 tmp_old = tmp_new = buf;
594 582
@@ -601,41 +589,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
601 tmp_old++; 589 tmp_old++;
602 } 590 }
603 591
604 err = validate_memory_access_address((unsigned long)mem, size); 592 return bfin_probe_kernel_write(mem, buf, count);
605 if (err)
606 return err;
607
608 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
609 switch (size) {
610 case 2:
611 if ((unsigned int)mem % 2 == 0) {
612 mmr16 = (unsigned short *)buf;
613 *(unsigned short *)mem = *mmr16;
614 } else
615 err = -EFAULT;
616 break;
617 case 4:
618 if ((unsigned int)mem % 4 == 0) {
619 mmr32 = (unsigned long *)buf;
620 *(unsigned long *)mem = *mmr32;
621 } else
622 err = -EFAULT;
623 break;
624 default:
625 err = -EFAULT;
626 }
627 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
628#ifdef CONFIG_SMP
629 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
630#endif
631 ) {
632 /* access L1 instruction SRAM */
633 if (dma_memcpy(mem, buf, size) == NULL)
634 err = -EFAULT;
635 } else
636 err = probe_kernel_write(mem, buf, size);
637
638 return err;
639} 593}
640 594
641/* 595/*
@@ -645,16 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
645 */ 599 */
646int kgdb_hex2mem(char *buf, char *mem, int count) 600int kgdb_hex2mem(char *buf, char *mem, int count)
647{ 601{
648 char *tmp_raw; 602 char *tmp_raw, *tmp_hex;
649 char *tmp_hex;
650 unsigned short *mmr16;
651 unsigned long *mmr32;
652 int err;
653 int cpu = raw_smp_processor_id();
654
655 err = validate_memory_access_address((unsigned long)mem, count);
656 if (err)
657 return err;
658 603
659 /* 604 /*
660 * We use the upper half of buf as an intermediate buffer for the 605 * We use the upper half of buf as an intermediate buffer for the
@@ -669,39 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
669 *tmp_raw |= hex(*tmp_hex--) << 4; 614 *tmp_raw |= hex(*tmp_hex--) << 4;
670 } 615 }
671 616
672 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ 617 return bfin_probe_kernel_write(mem, tmp_raw, count);
673 switch (count) {
674 case 2:
675 if ((unsigned int)mem % 2 == 0) {
676 mmr16 = (unsigned short *)tmp_raw;
677 *(unsigned short *)mem = *mmr16;
678 } else
679 err = -EFAULT;
680 break;
681 case 4:
682 if ((unsigned int)mem % 4 == 0) {
683 mmr32 = (unsigned long *)tmp_raw;
684 *(unsigned long *)mem = *mmr32;
685 } else
686 err = -EFAULT;
687 break;
688 default:
689 err = -EFAULT;
690 }
691 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
692#ifdef CONFIG_SMP
693 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
694#endif
695 ) {
696 /* access L1 instruction SRAM */
697 if (dma_memcpy(mem, tmp_raw, count) == NULL)
698 err = -EFAULT;
699 } else
700 err = probe_kernel_write(mem, tmp_raw, count);
701
702 return err;
703} 618}
704 619
620#define IN_MEM(addr, size, l1_addr, l1_size) \
621({ \
622 unsigned long __addr = (unsigned long)(addr); \
623 (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
624})
625#define ASYNC_BANK_SIZE \
626 (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
627 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
628
705int kgdb_validate_break_address(unsigned long addr) 629int kgdb_validate_break_address(unsigned long addr)
706{ 630{
707 int cpu = raw_smp_processor_id(); 631 int cpu = raw_smp_processor_id();
@@ -724,46 +648,17 @@ int kgdb_validate_break_address(unsigned long addr)
724 648
725int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 649int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
726{ 650{
727 int err; 651 int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
728 int cpu = raw_smp_processor_id(); 652 BREAK_INSTR_SIZE);
729 653 if (err)
730 if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) 654 return err;
731#ifdef CONFIG_SMP 655 return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
732 || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH)) 656 BREAK_INSTR_SIZE);
733#endif
734 ) {
735 /* access L1 instruction SRAM */
736 if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
737 == NULL)
738 return -EFAULT;
739
740 if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
741 BREAK_INSTR_SIZE) == NULL)
742 return -EFAULT;
743
744 return 0;
745 } else {
746 err = probe_kernel_read(saved_instr, (char *)addr,
747 BREAK_INSTR_SIZE);
748 if (err)
749 return err;
750
751 return probe_kernel_write((char *)addr,
752 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
753 }
754} 657}
755 658
756int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) 659int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
757{ 660{
758 if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) { 661 return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
759 /* access L1 instruction SRAM */
760 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
761 return -EFAULT;
762
763 return 0;
764 } else
765 return probe_kernel_write((char *)addr,
766 (char *)bundle, BREAK_INSTR_SIZE);
767} 662}
768 663
769int kgdb_arch_init(void) 664int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb3865f46..000000000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * linux/arch/blackfin/mcount.S
3 *
4 * Copyright (C) 2006 Analog Devices Inc.
5 *
6 * 2007/04/12 Save index, length, modify and base registers. --rpm
7 */
8
9#include <linux/linkage.h>
10#include <asm/blackfin.h>
11
12.text
13
14.align 4 /* just in case */
15
16ENTRY(__mcount)
17 [--sp] = i0;
18 [--sp] = i1;
19 [--sp] = i2;
20 [--sp] = i3;
21 [--sp] = l0;
22 [--sp] = l1;
23 [--sp] = l2;
24 [--sp] = l3;
25 [--sp] = m0;
26 [--sp] = m1;
27 [--sp] = m2;
28 [--sp] = m3;
29 [--sp] = b0;
30 [--sp] = b1;
31 [--sp] = b2;
32 [--sp] = b3;
33 [--sp] = ( r7:0, p5:0 );
34 [--sp] = ASTAT;
35
36 p1.L = _ipipe_trace_enable;
37 p1.H = _ipipe_trace_enable;
38 r7 = [p1];
39 CC = r7 == 0;
40 if CC jump out;
41 link 0x10;
42 r0 = 0x0;
43 [sp + 0xc] = r0; /* v */
44 r0 = 0x0; /* type: IPIPE_TRACE_FN */
45 r1 = rets;
46 p0 = [fp]; /* p0: Prior FP */
47 r2 = [p0 + 4]; /* r2: Prior RETS */
48 call ___ipipe_trace;
49 unlink;
50out:
51 ASTAT = [sp++];
52 ( r7:0, p5:0 ) = [sp++];
53 b3 = [sp++];
54 b2 = [sp++];
55 b1 = [sp++];
56 b0 = [sp++];
57 m3 = [sp++];
58 m2 = [sp++];
59 m1 = [sp++];
60 m0 = [sp++];
61 l3 = [sp++];
62 l2 = [sp++];
63 l1 = [sp++];
64 l0 = [sp++];
65 i3 = [sp++];
66 i2 = [sp++];
67 i1 = [sp++];
68 i0 = [sp++];
69 rts;
70ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 3e1d86e456f6..79cad0ac5892 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -344,6 +344,87 @@ void finish_atomic_sections (struct pt_regs *regs)
344 } 344 }
345} 345}
346 346
347static inline
348int in_mem(unsigned long addr, unsigned long size,
349 unsigned long start, unsigned long end)
350{
351 return addr >= start && addr + size <= end;
352}
353static inline
354int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
355 unsigned long const_addr, unsigned long const_size)
356{
357 return const_size &&
358 in_mem(addr, size, const_addr + off, const_addr + const_size);
359}
360static inline
361int in_mem_const(unsigned long addr, unsigned long size,
362 unsigned long const_addr, unsigned long const_size)
363{
364 return in_mem_const_off(addr, 0, size, const_addr, const_size);
365}
366#define IN_ASYNC(bnum, bctlnum) \
367({ \
368 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
369 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
370 BFIN_MEM_ACCESS_CORE; \
371})
372
373int bfin_mem_access_type(unsigned long addr, unsigned long size)
374{
375 int cpu = raw_smp_processor_id();
376
377 /* Check that things do not wrap around */
378 if (addr > ULONG_MAX - size)
379 return -EFAULT;
380
381 if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
382 return BFIN_MEM_ACCESS_CORE;
383
384 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
385 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
386 if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
387 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
388 if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
389 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
390 if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
391 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
392#ifdef COREB_L1_CODE_START
393 if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
394 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
395 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
396 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
397 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
398 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
399 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
400 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
401#endif
402 if (in_mem_const(addr, size, L2_START, L2_LENGTH))
403 return BFIN_MEM_ACCESS_CORE;
404
405 if (addr >= SYSMMR_BASE)
406 return BFIN_MEM_ACCESS_CORE_ONLY;
407
408 /* We can't read EBIU banks that aren't enabled or we end up hanging
409 * on the access to the async space.
410 */
411 if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
412 return IN_ASYNC(0, 0);
413 if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
414 return IN_ASYNC(1, 0);
415 if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
416 return IN_ASYNC(2, 1);
417 if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
418 return IN_ASYNC(3, 1);
419
420 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
421 return BFIN_MEM_ACCESS_CORE;
422 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
423 return BFIN_MEM_ACCESS_DMA;
424
425 return -EFAULT;
426}
427
347#if defined(CONFIG_ACCESS_CHECK) 428#if defined(CONFIG_ACCESS_CHECK)
348#ifdef CONFIG_ACCESS_OK_L1 429#ifdef CONFIG_ACCESS_OK_L1
349__attribute__((l1_text)) 430__attribute__((l1_text))
@@ -353,51 +434,61 @@ int _access_ok(unsigned long addr, unsigned long size)
353{ 434{
354 if (size == 0) 435 if (size == 0)
355 return 1; 436 return 1;
356 if (addr > (addr + size)) 437 /* Check that things do not wrap around */
438 if (addr > ULONG_MAX - size)
357 return 0; 439 return 0;
358 if (segment_eq(get_fs(), KERNEL_DS)) 440 if (segment_eq(get_fs(), KERNEL_DS))
359 return 1; 441 return 1;
360#ifdef CONFIG_MTD_UCLINUX 442#ifdef CONFIG_MTD_UCLINUX
361 if (addr >= memory_start && (addr + size) <= memory_end) 443 if (1)
362 return 1; 444#else
363 if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) 445 if (0)
446#endif
447 {
448 if (in_mem(addr, size, memory_start, memory_end))
449 return 1;
450 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
451 return 1;
452# ifndef CONFIG_ROMFS_ON_MTD
453 if (0)
454# endif
455 /* For XIP, allow user space to use pointers within the ROMFS. */
456 if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
457 return 1;
458 } else {
459 if (in_mem(addr, size, memory_start, physical_mem_end))
460 return 1;
461 }
462
463 if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
364 return 1; 464 return 1;
365 465
366#ifdef CONFIG_ROMFS_ON_MTD 466 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
367 /* For XIP, allow user space to use pointers within the ROMFS. */
368 if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
369 return 1; 467 return 1;
370#endif 468 if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
371#else
372 if (addr >= memory_start && (addr + size) <= physical_mem_end)
373 return 1; 469 return 1;
374#endif 470 if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
375 if (addr >= (unsigned long)__init_begin &&
376 addr + size <= (unsigned long)__init_end)
377 return 1; 471 return 1;
378 if (addr >= get_l1_scratch_start() 472 if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
379 && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
380 return 1; 473 return 1;
381#if L1_CODE_LENGTH != 0 474#ifdef COREB_L1_CODE_START
382 if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1) 475 if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
383 && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
384 return 1; 476 return 1;
385#endif 477 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
386#if L1_DATA_A_LENGTH != 0
387 if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
388 && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
389 return 1; 478 return 1;
390#endif 479 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
391#if L1_DATA_B_LENGTH != 0
392 if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
393 && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
394 return 1; 480 return 1;
395#endif 481 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
396#if L2_LENGTH != 0
397 if (addr >= L2_START + (_ebss_l2 - _stext_l2)
398 && addr + size <= L2_START + L2_LENGTH)
399 return 1; 482 return 1;
400#endif 483#endif
484 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
485 return 1;
486
487 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
488 return 1;
489 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
490 return 1;
491
401 return 0; 492 return 0;
402} 493}
403EXPORT_SYMBOL(_access_ok); 494EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index 6454babdfaff..298f023bcc09 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -117,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
117 */ 117 */
118#ifdef CONFIG_BFIN_ICACHE 118#ifdef CONFIG_BFIN_ICACHE
119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); 119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
120 printk(KERN_INFO " External memory:"
121# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
122 " cacheable"
123# else
124 " uncacheable"
125# endif
126 " in instruction cache\n");
127 if (L2_LENGTH)
128 printk(KERN_INFO " L2 SRAM :"
129# ifdef CONFIG_BFIN_L2_ICACHEABLE
130 " cacheable"
131# else
132 " uncacheable"
133# endif
134 " in instruction cache\n");
135
136#else
137 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
120#endif 138#endif
139
121#ifdef CONFIG_BFIN_DCACHE 140#ifdef CONFIG_BFIN_DCACHE
122 printk(KERN_INFO "Data Cache Enabled for CPU%u" 141 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
123# if defined CONFIG_BFIN_WB 142 printk(KERN_INFO " External memory:"
124 " (write-back)" 143# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
125# elif defined CONFIG_BFIN_WT 144 " cacheable (write-back)"
126 " (write-through)" 145# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
146 " cacheable (write-through)"
147# else
148 " uncacheable"
127# endif 149# endif
128 "\n", cpu); 150 " in data cache\n");
151 if (L2_LENGTH)
152 printk(KERN_INFO " L2 SRAM :"
153# if defined CONFIG_BFIN_L2_WRITEBACK
154 " cacheable (write-back)"
155# elif defined CONFIG_BFIN_L2_WRITETHROUGH
156 " cacheable (write-through)"
157# else
158 " uncacheable"
159# endif
160 " in data cache\n");
161#else
162 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
129#endif 163#endif
130} 164}
131 165
@@ -443,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p)
443 } else if (!memcmp(to, "clkin_hz=", 9)) { 477 } else if (!memcmp(to, "clkin_hz=", 9)) {
444 to += 9; 478 to += 9;
445 early_init_clkin_hz(to); 479 early_init_clkin_hz(to);
480#ifdef CONFIG_EARLY_PRINTK
446 } else if (!memcmp(to, "earlyprintk=", 12)) { 481 } else if (!memcmp(to, "earlyprintk=", 12)) {
447 to += 12; 482 to += 12;
448 setup_early_printk(to); 483 setup_early_printk(to);
484#endif
449 } else if (!memcmp(to, "memmap=", 7)) { 485 } else if (!memcmp(to, "memmap=", 7)) {
450 to += 7; 486 to += 7;
451 parse_memmap(to); 487 parse_memmap(to);
@@ -516,7 +552,7 @@ static __init void memory_setup(void)
516 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) 552 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
517 mtd_size = 553 mtd_size =
518 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); 554 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
519# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 555# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
520 /* Due to a Hardware Anomaly we need to limit the size of usable 556 /* Due to a Hardware Anomaly we need to limit the size of usable
521 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 557 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
522 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 558 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -544,7 +580,7 @@ static __init void memory_setup(void)
544 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size); 580 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
545#endif /* CONFIG_MTD_UCLINUX */ 581#endif /* CONFIG_MTD_UCLINUX */
546 582
547#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 583#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
548 /* Due to a Hardware Anomaly we need to limit the size of usable 584 /* Due to a Hardware Anomaly we need to limit the size of usable
549 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 585 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
550 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 586 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -764,6 +800,11 @@ void __init setup_arch(char **cmdline_p)
764{ 800{
765 unsigned long sclk, cclk; 801 unsigned long sclk, cclk;
766 802
803 /* Check to make sure we are running on the right processor */
804 if (unlikely(CPUID != bfin_cpuid()))
805 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
806 CPU, bfin_cpuid(), bfin_revid());
807
767#ifdef CONFIG_DUMMY_CONSOLE 808#ifdef CONFIG_DUMMY_CONSOLE
768 conswitchp = &dummy_con; 809 conswitchp = &dummy_con;
769#endif 810#endif
@@ -778,14 +819,17 @@ void __init setup_arch(char **cmdline_p)
778 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 819 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
779 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; 820 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
780 821
781 /* setup memory defaults from the user config */
782 physical_mem_end = 0;
783 _ramend = get_mem_size() * 1024 * 1024;
784
785 memset(&bfin_memmap, 0, sizeof(bfin_memmap)); 822 memset(&bfin_memmap, 0, sizeof(bfin_memmap));
786 823
824 /* If the user does not specify things on the command line, use
825 * what the bootloader set things up as
826 */
827 physical_mem_end = 0;
787 parse_cmdline_early(&command_line[0]); 828 parse_cmdline_early(&command_line[0]);
788 829
830 if (_ramend == 0)
831 _ramend = get_mem_size() * 1024 * 1024;
832
789 if (physical_mem_end == 0) 833 if (physical_mem_end == 0)
790 physical_mem_end = _ramend; 834 physical_mem_end = _ramend;
791 835
@@ -837,7 +881,8 @@ void __init setup_arch(char **cmdline_p)
837 defined(CONFIG_BF538) || defined(CONFIG_BF539) 881 defined(CONFIG_BF538) || defined(CONFIG_BF539)
838 _bfin_swrst = bfin_read_SWRST(); 882 _bfin_swrst = bfin_read_SWRST();
839#else 883#else
840 _bfin_swrst = bfin_read_SYSCR(); 884 /* Clear boot mode field */
885 _bfin_swrst = bfin_read_SYSCR() & ~0xf;
841#endif 886#endif
842 887
843#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 888#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -875,10 +920,7 @@ void __init setup_arch(char **cmdline_p)
875 else 920 else
876 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); 921 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
877 922
878 if (unlikely(CPUID != bfin_cpuid())) 923 if (likely(CPUID == bfin_cpuid())) {
879 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
880 CPU, bfin_cpuid(), bfin_revid());
881 else {
882 if (bfin_revid() != bfin_compiled_revid()) { 924 if (bfin_revid() != bfin_compiled_revid()) {
883 if (bfin_compiled_revid() == -1) 925 if (bfin_compiled_revid() == -1)
884 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 926 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -1157,16 +1199,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1157 icache_size = 0; 1199 icache_size = 0;
1158 1200
1159 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1201 seq_printf(m, "cache size\t: %d KB(L1 icache) "
1160 "%d KB(L1 dcache%s) %d KB(L2 cache)\n", 1202 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1161 icache_size, dcache_size, 1203 icache_size, dcache_size, 0);
1162#if defined CONFIG_BFIN_WB
1163 "-wb"
1164#elif defined CONFIG_BFIN_WT
1165 "-wt"
1166#endif
1167 "", 0);
1168
1169 seq_printf(m, "%s\n", cache); 1204 seq_printf(m, "%s\n", cache);
1205 seq_printf(m, "external memory\t: "
1206#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1207 "cacheable"
1208#else
1209 "uncacheable"
1210#endif
1211 " in instruction cache\n");
1212 seq_printf(m, "external memory\t: "
1213#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1214 "cacheable (write-back)"
1215#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1216 "cacheable (write-through)"
1217#else
1218 "uncacheable"
1219#endif
1220 " in data cache\n");
1170 1221
1171 if (icache_size) 1222 if (icache_size)
1172 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", 1223 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1239,8 +1290,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1239 if (cpu_num != num_possible_cpus() - 1) 1290 if (cpu_num != num_possible_cpus() - 1)
1240 return 0; 1291 return 0;
1241 1292
1242 if (L2_LENGTH) 1293 if (L2_LENGTH) {
1243 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); 1294 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1295 seq_printf(m, "L2 SRAM\t\t: "
1296#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1297 "cacheable"
1298#else
1299 "uncacheable"
1300#endif
1301 " in instruction cache\n");
1302 seq_printf(m, "L2 SRAM\t\t: "
1303#if defined(CONFIG_BFIN_L2_WRITEBACK)
1304 "cacheable (write-back)"
1305#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1306 "cacheable (write-through)"
1307#else
1308 "uncacheable"
1309#endif
1310 " in data cache\n");
1311 }
1244 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1312 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1245 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1313 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1246 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1314 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index d279552fe9b0..8eeb457ce5d5 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -37,6 +37,7 @@
37#include <asm/traps.h> 37#include <asm/traps.h>
38#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
39#include <asm/cplb.h> 39#include <asm/cplb.h>
40#include <asm/dma.h>
40#include <asm/blackfin.h> 41#include <asm/blackfin.h>
41#include <asm/irq_handler.h> 42#include <asm/irq_handler.h>
42#include <linux/irq.h> 43#include <linux/irq.h>
@@ -636,57 +637,30 @@ asmlinkage void trap_c(struct pt_regs *fp)
636 */ 637 */
637static bool get_instruction(unsigned short *val, unsigned short *address) 638static bool get_instruction(unsigned short *val, unsigned short *address)
638{ 639{
639 640 unsigned long addr = (unsigned long)address;
640 unsigned long addr;
641
642 addr = (unsigned long)address;
643 641
644 /* Check for odd addresses */ 642 /* Check for odd addresses */
645 if (addr & 0x1) 643 if (addr & 0x1)
646 return false; 644 return false;
647 645
648 /* Check that things do not wrap around */ 646 /* MMR region will never have instructions */
649 if (addr > (addr + 2)) 647 if (addr >= SYSMMR_BASE)
650 return false; 648 return false;
651 649
652 /* 650 switch (bfin_mem_access_type(addr, 2)) {
653 * Since we are in exception context, we need to do a little address checking 651 case BFIN_MEM_ACCESS_CORE:
654 * We need to make sure we are only accessing valid memory, and 652 case BFIN_MEM_ACCESS_CORE_ONLY:
655 * we don't read something in the async space that can hang forever 653 *val = *address;
656 */ 654 return true;
657 if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) || 655 case BFIN_MEM_ACCESS_DMA:
658#if L2_LENGTH != 0 656 dma_memcpy(val, address, 2);
659 (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) || 657 return true;
660#endif 658 case BFIN_MEM_ACCESS_ITEST:
661 (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) || 659 isram_memcpy(val, address, 2);
662#if L1_DATA_A_LENGTH != 0 660 return true;
663 (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) || 661 default: /* invalid access */
664#endif 662 return false;
665#if L1_DATA_B_LENGTH != 0
666 (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
667#endif
668 (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
669 (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
670 addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
671 (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
672 addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
673 (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
674 addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
675 (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
676 addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
677 *val = *address;
678 return true;
679 } 663 }
680
681#if L1_CODE_LENGTH != 0
682 if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
683 isram_memcpy(val, address, 2);
684 return true;
685 }
686#endif
687
688
689 return false;
690} 664}
691 665
692/* 666/*