aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
committerDan Williams <dan.j.williams@intel.com>2009-09-08 20:55:21 -0400
commitbbb20089a3275a19e475dbc21320c3742e3ca423 (patch)
tree216fdc1cbef450ca688135c5b8969169482d9a48 /arch/blackfin/kernel
parent3e48e656903e9fd8bc805c6a2c4264d7808d315b (diff)
parent657a77fa7284d8ae28dfa48f1dc5d919bf5b2843 (diff)
Merge branch 'dmaengine' into async-tx-next
Conflicts: crypto/async_tx/async_xor.c drivers/dma/ioat/dma_v2.h drivers/dma/ioat/pci.c drivers/md/raid5.c
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile6
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c90
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c18
-rw-r--r--arch/blackfin/kernel/bfin_ksyms.c7
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cacheinit.c9
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c12
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c38
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cacheinit.c9
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbmgr.c54
-rw-r--r--arch/blackfin/kernel/early_printk.c24
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S140
-rw-r--r--arch/blackfin/kernel/ftrace.c42
-rw-r--r--arch/blackfin/kernel/gptimers.c30
-rw-r--r--arch/blackfin/kernel/init_task.c4
-rw-r--r--arch/blackfin/kernel/ipipe.c58
-rw-r--r--arch/blackfin/kernel/irqchip.c112
-rw-r--r--arch/blackfin/kernel/kgdb.c303
-rw-r--r--arch/blackfin/kernel/mcount.S70
-rw-r--r--arch/blackfin/kernel/module.c22
-rw-r--r--arch/blackfin/kernel/process.c177
-rw-r--r--arch/blackfin/kernel/setup.c217
-rw-r--r--arch/blackfin/kernel/stacktrace.c53
-rw-r--r--arch/blackfin/kernel/sys_bfin.c5
-rw-r--r--arch/blackfin/kernel/time-ts.c222
-rw-r--r--arch/blackfin/kernel/time.c53
-rw-r--r--arch/blackfin/kernel/traps.c240
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S22
27 files changed, 1269 insertions, 768 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index fd4d4328a0f2..141d9281e4b0 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -15,14 +15,18 @@ else
15 obj-y += time.o 15 obj-y += time.o
16endif 16endif
17 17
18obj-$(CONFIG_FUNCTION_TRACER) += ftrace-entry.o
19obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
20CFLAGS_REMOVE_ftrace.o = -pg
21
18obj-$(CONFIG_IPIPE) += ipipe.o 22obj-$(CONFIG_IPIPE) += ipipe.o
19obj-$(CONFIG_IPIPE_TRACE_MCOUNT) += mcount.o
20obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o 23obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o
21obj-$(CONFIG_CPLB_INFO) += cplbinfo.o 24obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
22obj-$(CONFIG_MODULES) += module.o 25obj-$(CONFIG_MODULES) += module.o
23obj-$(CONFIG_KGDB) += kgdb.o 26obj-$(CONFIG_KGDB) += kgdb.o
24obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o 27obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
25obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 28obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o
26 30
27# the kgdb test puts code into L2 and without linker 31# the kgdb test puts code into L2 and without linker
28# relaxation, we need to force long calls to/from it 32# relaxation, we need to force long calls to/from it
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 8531693fb48d..e0bf8cc06907 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -20,6 +20,11 @@
20#include <asm/dma.h> 20#include <asm/dma.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23/*
24 * To make sure we work around 05000119 - we always check DMA_DONE bit,
25 * never the DMA_RUN bit
26 */
27
23struct dma_channel dma_ch[MAX_DMA_CHANNELS]; 28struct dma_channel dma_ch[MAX_DMA_CHANNELS];
24EXPORT_SYMBOL(dma_ch); 29EXPORT_SYMBOL(dma_ch);
25 30
@@ -232,6 +237,87 @@ void blackfin_dma_resume(void)
232void __init blackfin_dma_early_init(void) 237void __init blackfin_dma_early_init(void)
233{ 238{
234 bfin_write_MDMA_S0_CONFIG(0); 239 bfin_write_MDMA_S0_CONFIG(0);
240 bfin_write_MDMA_S1_CONFIG(0);
241}
242
243void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size)
244{
245 unsigned long dst = (unsigned long)pdst;
246 unsigned long src = (unsigned long)psrc;
247 struct dma_register *dst_ch, *src_ch;
248
249 /* We assume that everything is 4 byte aligned, so include
250 * a basic sanity check
251 */
252 BUG_ON(dst % 4);
253 BUG_ON(src % 4);
254 BUG_ON(size % 4);
255
256 /* Force a sync in case a previous config reset on this channel
257 * occurred. This is needed so subsequent writes to DMA registers
258 * are not spuriously lost/corrupted.
259 */
260 __builtin_bfin_ssync();
261
262 src_ch = 0;
263 /* Find an avalible memDMA channel */
264 while (1) {
265 if (!src_ch || src_ch == (struct dma_register *)MDMA_S1_NEXT_DESC_PTR) {
266 dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR;
267 src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR;
268 } else {
269 dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR;
270 src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR;
271 }
272
273 if (!bfin_read16(&src_ch->cfg)) {
274 break;
275 } else {
276 if (bfin_read16(&src_ch->irq_status) & DMA_DONE)
277 bfin_write16(&src_ch->cfg, 0);
278 }
279
280 }
281
282 /* Destination */
283 bfin_write32(&dst_ch->start_addr, dst);
284 bfin_write16(&dst_ch->x_count, size >> 2);
285 bfin_write16(&dst_ch->x_modify, 1 << 2);
286 bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR);
287
288 /* Source */
289 bfin_write32(&src_ch->start_addr, src);
290 bfin_write16(&src_ch->x_count, size >> 2);
291 bfin_write16(&src_ch->x_modify, 1 << 2);
292 bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR);
293
294 /* Enable */
295 bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32);
296 bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32);
297
298 /* Since we are atomic now, don't use the workaround ssync */
299 __builtin_bfin_ssync();
300}
301
302void __init early_dma_memcpy_done(void)
303{
304 while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) ||
305 (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE)))
306 continue;
307
308 bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR);
309 bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR);
310 /*
311 * Now that DMA is done, we would normally flush cache, but
312 * i/d cache isn't running this early, so we don't bother,
313 * and just clear out the DMA channel for next time
314 */
315 bfin_write_MDMA_S0_CONFIG(0);
316 bfin_write_MDMA_S1_CONFIG(0);
317 bfin_write_MDMA_D0_CONFIG(0);
318 bfin_write_MDMA_D1_CONFIG(0);
319
320 __builtin_bfin_ssync();
235} 321}
236 322
237/** 323/**
@@ -367,10 +453,10 @@ void *dma_memcpy(void *pdst, const void *psrc, size_t size)
367 unsigned long src = (unsigned long)psrc; 453 unsigned long src = (unsigned long)psrc;
368 size_t bulk, rest; 454 size_t bulk, rest;
369 455
370 if (bfin_addr_dcachable(src)) 456 if (bfin_addr_dcacheable(src))
371 blackfin_dcache_flush_range(src, src + size); 457 blackfin_dcache_flush_range(src, src + size);
372 458
373 if (bfin_addr_dcachable(dst)) 459 if (bfin_addr_dcacheable(dst))
374 blackfin_dcache_invalidate_range(dst, dst + size); 460 blackfin_dcache_invalidate_range(dst, dst + size);
375 461
376 bulk = size & ~0xffff; 462 bulk = size & ~0xffff;
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index a0678da40532..beffa00a93c3 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -313,15 +313,6 @@ inline void portmux_setup(unsigned short per)
313# define portmux_setup(...) do { } while (0) 313# define portmux_setup(...) do { } while (0)
314#endif 314#endif
315 315
316static int __init bfin_gpio_init(void)
317{
318 printk(KERN_INFO "Blackfin GPIO Controller\n");
319
320 return 0;
321}
322arch_initcall(bfin_gpio_init);
323
324
325#ifndef CONFIG_BF54x 316#ifndef CONFIG_BF54x
326/*********************************************************** 317/***********************************************************
327* 318*
@@ -1021,15 +1012,6 @@ int bfin_gpio_irq_request(unsigned gpio, const char *label)
1021 1012
1022 local_irq_save_hw(flags); 1013 local_irq_save_hw(flags);
1023 1014
1024 if (unlikely(reserved_gpio_irq_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1025 if (system_state == SYSTEM_BOOTING)
1026 dump_stack();
1027 printk(KERN_ERR
1028 "bfin-gpio: GPIO %d is already reserved as gpio-irq !\n",
1029 gpio);
1030 local_irq_restore_hw(flags);
1031 return -EBUSY;
1032 }
1033 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { 1015 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1034 if (system_state == SYSTEM_BOOTING) 1016 if (system_state == SYSTEM_BOOTING)
1035 dump_stack(); 1017 dump_stack();
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c
index 01f917d58b59..ed8392c117ea 100644
--- a/arch/blackfin/kernel/bfin_ksyms.c
+++ b/arch/blackfin/kernel/bfin_ksyms.c
@@ -10,13 +10,13 @@
10#include <linux/uaccess.h> 10#include <linux/uaccess.h>
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/io.h>
13 14
14/* Allow people to have their own Blackfin exception handler in a module */ 15/* Allow people to have their own Blackfin exception handler in a module */
15EXPORT_SYMBOL(bfin_return_from_exception); 16EXPORT_SYMBOL(bfin_return_from_exception);
16 17
17/* All the Blackfin cache functions: mach-common/cache.S */ 18/* All the Blackfin cache functions: mach-common/cache.S */
18EXPORT_SYMBOL(blackfin_dcache_invalidate_range); 19EXPORT_SYMBOL(blackfin_dcache_invalidate_range);
19EXPORT_SYMBOL(blackfin_icache_dcache_flush_range);
20EXPORT_SYMBOL(blackfin_icache_flush_range); 20EXPORT_SYMBOL(blackfin_icache_flush_range);
21EXPORT_SYMBOL(blackfin_dcache_flush_range); 21EXPORT_SYMBOL(blackfin_dcache_flush_range);
22EXPORT_SYMBOL(blackfin_dflush_page); 22EXPORT_SYMBOL(blackfin_dflush_page);
@@ -104,3 +104,8 @@ EXPORT_SYMBOL(__raw_smp_mark_barrier_asm);
104EXPORT_SYMBOL(__raw_smp_check_barrier_asm); 104EXPORT_SYMBOL(__raw_smp_check_barrier_asm);
105#endif 105#endif
106#endif 106#endif
107
108#ifdef CONFIG_FUNCTION_TRACER
109extern void _mcount(void);
110EXPORT_SYMBOL(_mcount);
111#endif
diff --git a/arch/blackfin/kernel/cplb-mpu/cacheinit.c b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
index c6ff947f9d37..d5a86c3017f7 100644
--- a/arch/blackfin/kernel/cplb-mpu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cacheinit.c
@@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
55 } 55 }
56 56
57 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
58 ctrl |= DMEM_CNTR; 58
59 /*
60 * Anomaly notes:
61 * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
62 * register, so that the port preferences for DAG0 and DAG1 are set
63 * to port B
64 */
65 ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
59 bfin_write_DMEM_CONTROL(ctrl); 66 bfin_write_DMEM_CONTROL(ctrl);
60 SSYNC(); 67 SSYNC();
61} 68}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index 3e329a6ce041..36193eed9a1f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -46,13 +46,13 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
46 46
47 printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); 47 printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n");
48 48
49#ifdef CONFIG_BFIN_ICACHE 49#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
50 i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 50 i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
51#endif 51#endif
52 52
53#ifdef CONFIG_BFIN_DCACHE 53#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
54 d_cache = CPLB_L1_CHBL; 54 d_cache = CPLB_L1_CHBL;
55#ifdef CONFIG_BFIN_WT 55#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
56 d_cache |= CPLB_L1_AOW | CPLB_WT; 56 d_cache |= CPLB_L1_AOW | CPLB_WT;
57#endif 57#endif
58#endif 58#endif
@@ -64,7 +64,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
64 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; 64 dcplb_tbl[cpu][i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB;
65 65
66 icplb_tbl[cpu][i_i].addr = 0; 66 icplb_tbl[cpu][i_i].addr = 0;
67 icplb_tbl[cpu][i_i++].data = i_cache | CPLB_USER_RD | PAGE_SIZE_1KB; 67 icplb_tbl[cpu][i_i++].data = CPLB_VALID | i_cache | CPLB_USER_RD | PAGE_SIZE_1KB;
68 68
69 /* Cover kernel memory with 4M pages. */ 69 /* Cover kernel memory with 4M pages. */
70 addr = 0; 70 addr = 0;
@@ -91,9 +91,9 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
91 /* Cover L2 memory */ 91 /* Cover L2 memory */
92#if L2_LENGTH > 0 92#if L2_LENGTH > 0
93 dcplb_tbl[cpu][i_d].addr = L2_START; 93 dcplb_tbl[cpu][i_d].addr = L2_START;
94 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY | PAGE_SIZE_1MB; 94 dcplb_tbl[cpu][i_d++].data = L2_DMEMORY;
95 icplb_tbl[cpu][i_i].addr = L2_START; 95 icplb_tbl[cpu][i_i].addr = L2_START;
96 icplb_tbl[cpu][i_i++].data = L2_IMEMORY | PAGE_SIZE_1MB; 96 icplb_tbl[cpu][i_i++].data = L2_IMEMORY;
97#endif 97#endif
98 98
99 first_mask_dcplb = i_d; 99 first_mask_dcplb = i_d;
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 87463ce87f5a..bcdfe9b0b71f 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -150,15 +150,19 @@ static noinline int dcplb_miss(unsigned int cpu)
150 nr_dcplb_miss[cpu]++; 150 nr_dcplb_miss[cpu]++;
151 151
152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 152 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
153#ifdef CONFIG_BFIN_DCACHE 153#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
154 if (bfin_addr_dcachable(addr)) { 154 if (bfin_addr_dcacheable(addr)) {
155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 155 d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
156#ifdef CONFIG_BFIN_WT 156# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
157 d_data |= CPLB_L1_AOW | CPLB_WT; 157 d_data |= CPLB_L1_AOW | CPLB_WT;
158#endif 158# endif
159 } 159 }
160#endif 160#endif
161 if (addr >= physical_mem_end) { 161
162 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
163 addr = L2_START;
164 d_data = L2_DMEMORY;
165 } else if (addr >= physical_mem_end) {
162 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE 166 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE
163 && (status & FAULT_USERSUPV)) { 167 && (status & FAULT_USERSUPV)) {
164 addr &= ~0x3fffff; 168 addr &= ~0x3fffff;
@@ -235,7 +239,7 @@ static noinline int icplb_miss(unsigned int cpu)
235 239
236 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; 240 i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB;
237 241
238#ifdef CONFIG_BFIN_ICACHE 242#ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
239 /* 243 /*
240 * Normal RAM, and possibly the reserved memory area, are 244 * Normal RAM, and possibly the reserved memory area, are
241 * cacheable. 245 * cacheable.
@@ -245,7 +249,10 @@ static noinline int icplb_miss(unsigned int cpu)
245 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; 249 i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND;
246#endif 250#endif
247 251
248 if (addr >= physical_mem_end) { 252 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
253 addr = L2_START;
254 i_data = L2_IMEMORY;
255 } else if (addr >= physical_mem_end) {
249 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 256 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
250 && (status & FAULT_USERSUPV)) { 257 && (status & FAULT_USERSUPV)) {
251 addr &= ~(1 * 1024 * 1024 - 1); 258 addr &= ~(1 * 1024 * 1024 - 1);
@@ -365,13 +372,18 @@ void set_mask_dcplbs(unsigned long *masks, unsigned int cpu)
365 local_irq_save_hw(flags); 372 local_irq_save_hw(flags);
366 current_rwx_mask[cpu] = masks; 373 current_rwx_mask[cpu] = masks;
367 374
368 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; 375 if (L2_LENGTH && addr >= L2_START && addr < L2_START + L2_LENGTH) {
369#ifdef CONFIG_BFIN_DCACHE 376 addr = L2_START;
370 d_data |= CPLB_L1_CHBL; 377 d_data = L2_DMEMORY;
371#ifdef CONFIG_BFIN_WT 378 } else {
372 d_data |= CPLB_L1_AOW | CPLB_WT; 379 d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB;
373#endif 380#ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
381 d_data |= CPLB_L1_CHBL;
382# ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
383 d_data |= CPLB_L1_AOW | CPLB_WT;
384# endif
374#endif 385#endif
386 }
375 387
376 disable_dcplb(); 388 disable_dcplb();
377 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) { 389 for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
diff --git a/arch/blackfin/kernel/cplb-nompu/cacheinit.c b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
index c6ff947f9d37..d5a86c3017f7 100644
--- a/arch/blackfin/kernel/cplb-nompu/cacheinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cacheinit.c
@@ -55,7 +55,14 @@ void __cpuinit bfin_dcache_init(struct cplb_entry *dcplb_tbl)
55 } 55 }
56 56
57 ctrl = bfin_read_DMEM_CONTROL(); 57 ctrl = bfin_read_DMEM_CONTROL();
58 ctrl |= DMEM_CNTR; 58
59 /*
60 * Anomaly notes:
61 * 05000287 - We implement workaround #2 - Change the DMEM_CONTROL
62 * register, so that the port preferences for DAG0 and DAG1 are set
63 * to port B
64 */
65 ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
59 bfin_write_DMEM_CONTROL(ctrl); 66 bfin_write_DMEM_CONTROL(ctrl);
60 SSYNC(); 67 SSYNC();
61} 68}
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
index 8cbb47c7b663..12b030842fdb 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbmgr.c
@@ -28,6 +28,7 @@
28#include <asm/cplbinit.h> 28#include <asm/cplbinit.h>
29#include <asm/cplb.h> 29#include <asm/cplb.h>
30#include <asm/mmu_context.h> 30#include <asm/mmu_context.h>
31#include <asm/traps.h>
31 32
32/* 33/*
33 * WARNING 34 * WARNING
@@ -100,28 +101,6 @@ static inline void write_icplb_data(int cpu, int idx, unsigned long data,
100#endif 101#endif
101} 102}
102 103
103/*
104 * Given the contents of the status register, return the index of the
105 * CPLB that caused the fault.
106 */
107static inline int faulting_cplb_index(int status)
108{
109 int signbits = __builtin_bfin_norm_fr1x32(status & 0xFFFF);
110 return 30 - signbits;
111}
112
113/*
114 * Given the contents of the status register and the DCPLB_DATA contents,
115 * return true if a write access should be permitted.
116 */
117static inline int write_permitted(int status, unsigned long data)
118{
119 if (status & FAULT_USERSUPV)
120 return !!(data & CPLB_SUPV_WR);
121 else
122 return !!(data & CPLB_USER_WR);
123}
124
125/* Counters to implement round-robin replacement. */ 104/* Counters to implement round-robin replacement. */
126static int icplb_rr_index[NR_CPUS] PDT_ATTR; 105static int icplb_rr_index[NR_CPUS] PDT_ATTR;
127static int dcplb_rr_index[NR_CPUS] PDT_ATTR; 106static int dcplb_rr_index[NR_CPUS] PDT_ATTR;
@@ -245,43 +224,16 @@ MGR_ATTR static int dcplb_miss(int cpu)
245 return CPLB_RELOADED; 224 return CPLB_RELOADED;
246} 225}
247 226
248MGR_ATTR static noinline int dcplb_protection_fault(int cpu)
249{
250 int status = bfin_read_DCPLB_STATUS();
251
252 nr_dcplb_prot[cpu]++;
253
254 if (likely(status & FAULT_RW)) {
255 int idx = faulting_cplb_index(status);
256 unsigned long regaddr = DCPLB_DATA0 + idx * 4;
257 unsigned long data = bfin_read32(regaddr);
258
259 /* Check if fault is to dirty a clean page */
260 if (!(data & CPLB_WT) && !(data & CPLB_DIRTY) &&
261 write_permitted(status, data)) {
262
263 dcplb_tbl[cpu][idx].data = data;
264 bfin_write32(regaddr, data);
265 return CPLB_RELOADED;
266 }
267 }
268
269 return CPLB_PROT_VIOL;
270}
271
272MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) 227MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
273{ 228{
274 int cause = seqstat & 0x3f; 229 int cause = seqstat & 0x3f;
275 unsigned int cpu = smp_processor_id(); 230 unsigned int cpu = smp_processor_id();
276 switch (cause) { 231 switch (cause) {
277 case 0x2C: 232 case VEC_CPLB_I_M:
278 return icplb_miss(cpu); 233 return icplb_miss(cpu);
279 case 0x26: 234 case VEC_CPLB_M:
280 return dcplb_miss(cpu); 235 return dcplb_miss(cpu);
281 default: 236 default:
282 if (unlikely(cause == 0x23))
283 return dcplb_protection_fault(cpu);
284
285 return CPLB_UNKNOWN_ERR; 237 return CPLB_UNKNOWN_ERR;
286 } 238 }
287} 239}
diff --git a/arch/blackfin/kernel/early_printk.c b/arch/blackfin/kernel/early_printk.c
index c8ad051742e2..2ab56811841c 100644
--- a/arch/blackfin/kernel/early_printk.c
+++ b/arch/blackfin/kernel/early_printk.c
@@ -178,25 +178,15 @@ int __init setup_early_printk(char *buf)
178 178
179asmlinkage void __init init_early_exception_vectors(void) 179asmlinkage void __init init_early_exception_vectors(void)
180{ 180{
181 u32 evt;
181 SSYNC(); 182 SSYNC();
182 183
183 /* cannot program in software: 184 /* cannot program in software:
184 * evt0 - emulation (jtag) 185 * evt0 - emulation (jtag)
185 * evt1 - reset 186 * evt1 - reset
186 */ 187 */
187 bfin_write_EVT2(early_trap); 188 for (evt = EVT2; evt <= EVT15; evt += 4)
188 bfin_write_EVT3(early_trap); 189 bfin_write32(evt, early_trap);
189 bfin_write_EVT5(early_trap);
190 bfin_write_EVT6(early_trap);
191 bfin_write_EVT7(early_trap);
192 bfin_write_EVT8(early_trap);
193 bfin_write_EVT9(early_trap);
194 bfin_write_EVT10(early_trap);
195 bfin_write_EVT11(early_trap);
196 bfin_write_EVT12(early_trap);
197 bfin_write_EVT13(early_trap);
198 bfin_write_EVT14(early_trap);
199 bfin_write_EVT15(early_trap);
200 CSYNC(); 190 CSYNC();
201 191
202 /* Set all the return from interrupt, exception, NMI to a known place 192 /* Set all the return from interrupt, exception, NMI to a known place
@@ -212,11 +202,15 @@ asmlinkage void __init init_early_exception_vectors(void)
212asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr) 202asmlinkage void __init early_trap_c(struct pt_regs *fp, void *retaddr)
213{ 203{
214 /* This can happen before the uart is initialized, so initialize 204 /* This can happen before the uart is initialized, so initialize
215 * the UART now 205 * the UART now (but only if we are running on the processor we think
206 * we are compiled for - otherwise we write to MMRs that don't exist,
207 * and cause other problems. Nothing comes out the UART, but it does
208 * end up in the __buf_log.
216 */ 209 */
217 if (likely(early_console == NULL)) 210 if (likely(early_console == NULL) && CPUID == bfin_cpuid())
218 setup_early_printk(DEFAULT_EARLY_PORT); 211 setup_early_printk(DEFAULT_EARLY_PORT);
219 212
213 printk(KERN_EMERG "Early panic\n");
220 dump_bfin_mem(fp); 214 dump_bfin_mem(fp);
221 show_regs(fp); 215 show_regs(fp);
222 dump_bfin_trace_buffer(); 216 dump_bfin_trace_buffer();
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
new file mode 100644
index 000000000000..6980b7a0615d
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -0,0 +1,140 @@
1/*
2 * mcount and friends -- ftrace stuff
3 *
4 * Copyright (C) 2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/linkage.h>
9#include <asm/ftrace.h>
10
11.text
12
13/* GCC will have called us before setting up the function prologue, so we
14 * can clobber the normal scratch registers, but we need to make sure to
15 * save/restore the registers used for argument passing (R0-R2) in case
16 * the profiled function is using them. With data registers, R3 is the
17 * only one we can blow away. With pointer registers, we have P0-P2.
18 *
19 * Upon entry, the RETS will point to the top of the current profiled
20 * function. And since GCC setup the frame for us, the previous function
21 * will be waiting there. mmmm pie.
22 */
23ENTRY(__mcount)
24 /* save third function arg early so we can do testing below */
25 [--sp] = r2;
26
27 /* load the function pointer to the tracer */
28 p0.l = _ftrace_trace_function;
29 p0.h = _ftrace_trace_function;
30 r3 = [p0];
31
32 /* optional micro optimization: don't call the stub tracer */
33 r2.l = _ftrace_stub;
34 r2.h = _ftrace_stub;
35 cc = r2 == r3;
36 if ! cc jump .Ldo_trace;
37
38#ifdef CONFIG_FUNCTION_GRAPH_TRACER
39 /* if the ftrace_graph_return function pointer is not set to
40 * the ftrace_stub entry, call prepare_ftrace_return().
41 */
42 p0.l = _ftrace_graph_return;
43 p0.h = _ftrace_graph_return;
44 r3 = [p0];
45 cc = r2 == r3;
46 if ! cc jump _ftrace_graph_caller;
47
48 /* similarly, if the ftrace_graph_entry function pointer is not
49 * set to the ftrace_graph_entry_stub entry, ...
50 */
51 p0.l = _ftrace_graph_entry;
52 p0.h = _ftrace_graph_entry;
53 r2.l = _ftrace_graph_entry_stub;
54 r2.h = _ftrace_graph_entry_stub;
55 r3 = [p0];
56 cc = r2 == r3;
57 if ! cc jump _ftrace_graph_caller;
58#endif
59
60 r2 = [sp++];
61 rts;
62
63.Ldo_trace:
64
65 /* save first/second function arg and the return register */
66 [--sp] = r0;
67 [--sp] = r1;
68 [--sp] = rets;
69
70 /* setup the tracer function */
71 p0 = r3;
72
73 /* tracer(ulong frompc, ulong selfpc):
74 * frompc: the pc that did the call to ...
75 * selfpc: ... this location
76 * the selfpc itself will need adjusting for the mcount call
77 */
78 r1 = rets;
79 r0 = [fp + 4];
80 r1 += -MCOUNT_INSN_SIZE;
81
82 /* call the tracer */
83 call (p0);
84
85 /* restore state and get out of dodge */
86.Lfinish_trace:
87 rets = [sp++];
88 r1 = [sp++];
89 r0 = [sp++];
90 r2 = [sp++];
91
92.globl _ftrace_stub
93_ftrace_stub:
94 rts;
95ENDPROC(__mcount)
96
97#ifdef CONFIG_FUNCTION_GRAPH_TRACER
98/* The prepare_ftrace_return() function is similar to the trace function
99 * except it takes a pointer to the location of the frompc. This is so
100 * the prepare_ftrace_return() can hijack it temporarily for probing
101 * purposes.
102 */
103ENTRY(_ftrace_graph_caller)
104 /* save first/second function arg and the return register */
105 [--sp] = r0;
106 [--sp] = r1;
107 [--sp] = rets;
108
109 r0 = fp;
110 r1 = rets;
111 r0 += 4;
112 r1 += -MCOUNT_INSN_SIZE;
113 call _prepare_ftrace_return;
114
115 jump .Lfinish_trace;
116ENDPROC(_ftrace_graph_caller)
117
118/* Undo the rewrite caused by ftrace_graph_caller(). The common function
119 * ftrace_return_to_handler() will return the original rets so we can
120 * restore it and be on our way.
121 */
122ENTRY(_return_to_handler)
123 /* make sure original return values are saved */
124 [--sp] = p0;
125 [--sp] = r0;
126 [--sp] = r1;
127
128 /* get original return address */
129 call _ftrace_return_to_handler;
130 rets = r0;
131
132 /* anomaly 05000371 - make sure we have at least three instructions
133 * between rets setting and the return
134 */
135 r1 = [sp++];
136 r0 = [sp++];
137 p0 = [sp++];
138 rts;
139ENDPROC(_return_to_handler)
140#endif
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
new file mode 100644
index 000000000000..905bfc40a00b
--- /dev/null
+++ b/arch/blackfin/kernel/ftrace.c
@@ -0,0 +1,42 @@
1/*
2 * ftrace graph code
3 *
4 * Copyright (C) 2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/ftrace.h>
9#include <linux/kernel.h>
10#include <linux/sched.h>
11#include <asm/atomic.h>
12
13#ifdef CONFIG_FUNCTION_GRAPH_TRACER
14
15/*
16 * Hook the return address and push it in the stack of return addrs
17 * in current thread info.
18 */
19void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
20{
21 struct ftrace_graph_ent trace;
22 unsigned long return_hooker = (unsigned long)&return_to_handler;
23
24 if (unlikely(atomic_read(&current->tracing_graph_pause)))
25 return;
26
27 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth) == -EBUSY)
28 return;
29
30 trace.func = self_addr;
31
32 /* Only trace if the calling function expects to */
33 if (!ftrace_graph_entry(&trace)) {
34 current->curr_ret_stack--;
35 return;
36 }
37
38 /* all is well in the world ! hijack RETS ... */
39 *parent = return_hooker;
40}
41
42#endif
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 3a3e9615b002..7281a91d26b5 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -189,10 +189,10 @@ void set_gptimer_status(int group, uint32_t value)
189} 189}
190EXPORT_SYMBOL(set_gptimer_status); 190EXPORT_SYMBOL(set_gptimer_status);
191 191
192uint16_t get_gptimer_intr(int timer_id) 192int get_gptimer_intr(int timer_id)
193{ 193{
194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
195 return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]) ? 1 : 0; 195 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
196} 196}
197EXPORT_SYMBOL(get_gptimer_intr); 197EXPORT_SYMBOL(get_gptimer_intr);
198 198
@@ -203,10 +203,10 @@ void clear_gptimer_intr(int timer_id)
203} 203}
204EXPORT_SYMBOL(clear_gptimer_intr); 204EXPORT_SYMBOL(clear_gptimer_intr);
205 205
206uint16_t get_gptimer_over(int timer_id) 206int get_gptimer_over(int timer_id)
207{ 207{
208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
209 return (group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]) ? 1 : 0; 209 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
210} 210}
211EXPORT_SYMBOL(get_gptimer_over); 211EXPORT_SYMBOL(get_gptimer_over);
212 212
@@ -217,6 +217,13 @@ void clear_gptimer_over(int timer_id)
217} 217}
218EXPORT_SYMBOL(clear_gptimer_over); 218EXPORT_SYMBOL(clear_gptimer_over);
219 219
220int get_gptimer_run(int timer_id)
221{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
223 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
224}
225EXPORT_SYMBOL(get_gptimer_run);
226
220void set_gptimer_config(int timer_id, uint16_t config) 227void set_gptimer_config(int timer_id, uint16_t config)
221{ 228{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 229 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
@@ -244,7 +251,7 @@ void enable_gptimers(uint16_t mask)
244} 251}
245EXPORT_SYMBOL(enable_gptimers); 252EXPORT_SYMBOL(enable_gptimers);
246 253
247void disable_gptimers(uint16_t mask) 254static void _disable_gptimers(uint16_t mask)
248{ 255{
249 int i; 256 int i;
250 uint16_t m = mask; 257 uint16_t m = mask;
@@ -253,6 +260,12 @@ void disable_gptimers(uint16_t mask)
253 group_regs[i]->disable = m & 0xFF; 260 group_regs[i]->disable = m & 0xFF;
254 m >>= 8; 261 m >>= 8;
255 } 262 }
263}
264
265void disable_gptimers(uint16_t mask)
266{
267 int i;
268 _disable_gptimers(mask);
256 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) 269 for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i)
257 if (mask & (1 << i)) 270 if (mask & (1 << i))
258 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; 271 group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i];
@@ -260,6 +273,13 @@ void disable_gptimers(uint16_t mask)
260} 273}
261EXPORT_SYMBOL(disable_gptimers); 274EXPORT_SYMBOL(disable_gptimers);
262 275
276void disable_gptimers_sync(uint16_t mask)
277{
278 _disable_gptimers(mask);
279 SSYNC();
280}
281EXPORT_SYMBOL(disable_gptimers_sync);
282
263void set_gptimer_pulse_hi(int timer_id) 283void set_gptimer_pulse_hi(int timer_id)
264{ 284{
265 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 285 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 2c228c020978..c26c34de9f3c 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -35,10 +35,6 @@
35 35
36static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 36static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
37static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 37static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
38
39struct mm_struct init_mm = INIT_MM(init_mm);
40EXPORT_SYMBOL(init_mm);
41
42/* 38/*
43 * Initial task structure. 39 * Initial task structure.
44 * 40 *
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index a5de8d45424c..b8d22034b9a6 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(__ipipe_freq_scale);
52 52
53atomic_t __ipipe_irq_lvdepth[IVG15 + 1]; 53atomic_t __ipipe_irq_lvdepth[IVG15 + 1];
54 54
55unsigned long __ipipe_irq_lvmask = __all_masked_irq_flags; 55unsigned long __ipipe_irq_lvmask = bfin_no_irqs;
56EXPORT_SYMBOL(__ipipe_irq_lvmask); 56EXPORT_SYMBOL(__ipipe_irq_lvmask);
57 57
58static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc) 58static void __ipipe_ack_irq(unsigned irq, struct irq_desc *desc)
@@ -99,7 +99,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
99 * interrupt. 99 * interrupt.
100 */ 100 */
101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); 101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
102 this_domain = ipipe_current_domain; 102 this_domain = __ipipe_current_domain;
103 103
104 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) 104 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
105 head = &this_domain->p_link; 105 head = &this_domain->p_link;
@@ -167,7 +167,7 @@ int __ipipe_check_root(void)
167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
168{ 168{
169 struct irq_desc *desc = irq_to_desc(irq); 169 struct irq_desc *desc = irq_to_desc(irq);
170 int prio = desc->ic_prio; 170 int prio = __ipipe_get_irq_priority(irq);
171 171
172 desc->depth = 0; 172 desc->depth = 0;
173 if (ipd != &ipipe_root && 173 if (ipd != &ipipe_root &&
@@ -178,8 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
178 178
179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
180{ 180{
181 struct irq_desc *desc = irq_to_desc(irq); 181 int prio = __ipipe_get_irq_priority(irq);
182 int prio = desc->ic_prio;
183 182
184 if (ipd != &ipipe_root && 183 if (ipd != &ipipe_root &&
185 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio])) 184 atomic_dec_and_test(&__ipipe_irq_lvdepth[prio]))
@@ -213,7 +212,9 @@ void __ipipe_unstall_root_raw(void)
213 212
214int __ipipe_syscall_root(struct pt_regs *regs) 213int __ipipe_syscall_root(struct pt_regs *regs)
215{ 214{
215 struct ipipe_percpu_domain_data *p;
216 unsigned long flags; 216 unsigned long flags;
217 int ret;
217 218
218 /* 219 /*
219 * We need to run the IRQ tail hook whenever we don't 220 * We need to run the IRQ tail hook whenever we don't
@@ -232,29 +233,31 @@ int __ipipe_syscall_root(struct pt_regs *regs)
232 /* 233 /*
233 * This routine either returns: 234 * This routine either returns:
234 * 0 -- if the syscall is to be passed to Linux; 235 * 0 -- if the syscall is to be passed to Linux;
235 * 1 -- if the syscall should not be passed to Linux, and no 236 * >0 -- if the syscall should not be passed to Linux, and no
236 * tail work should be performed; 237 * tail work should be performed;
237 * -1 -- if the syscall should not be passed to Linux but the 238 * <0 -- if the syscall should not be passed to Linux but the
238 * tail work has to be performed (for handling signals etc). 239 * tail work has to be performed (for handling signals etc).
239 */ 240 */
240 241
241 if (__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL) && 242 if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL))
242 __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs) > 0) { 243 return 0;
243 if (ipipe_root_domain_p && !in_atomic()) { 244
244 /* 245 ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs);
245 * Sync pending VIRQs before _TIF_NEED_RESCHED 246
246 * is tested. 247 local_irq_save_hw(flags);
247 */ 248
248 local_irq_save_hw(flags); 249 if (!__ipipe_root_domain_p) {
249 if ((ipipe_root_cpudom_var(irqpend_himask) & IPIPE_IRQMASK_VIRT) != 0) 250 local_irq_restore_hw(flags);
250 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
251 local_irq_restore_hw(flags);
252 return -1;
253 }
254 return 1; 251 return 1;
255 } 252 }
256 253
257 return 0; 254 p = ipipe_root_cpudom_ptr();
255 if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0)
256 __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT);
257
258 local_irq_restore_hw(flags);
259
260 return -ret;
258} 261}
259 262
260unsigned long ipipe_critical_enter(void (*syncfn) (void)) 263unsigned long ipipe_critical_enter(void (*syncfn) (void))
@@ -310,12 +313,16 @@ int ipipe_trigger_irq(unsigned irq)
310 313
311asmlinkage void __ipipe_sync_root(void) 314asmlinkage void __ipipe_sync_root(void)
312{ 315{
316 void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
313 unsigned long flags; 317 unsigned long flags;
314 318
315 BUG_ON(irqs_disabled()); 319 BUG_ON(irqs_disabled());
316 320
317 local_irq_save_hw(flags); 321 local_irq_save_hw(flags);
318 322
323 if (irq_tail_hook)
324 irq_tail_hook();
325
319 clear_thread_flag(TIF_IRQ_SYNC); 326 clear_thread_flag(TIF_IRQ_SYNC);
320 327
321 if (ipipe_root_cpudom_var(irqpend_himask) != 0) 328 if (ipipe_root_cpudom_var(irqpend_himask) != 0)
@@ -326,9 +333,7 @@ asmlinkage void __ipipe_sync_root(void)
326 333
327void ___ipipe_sync_pipeline(unsigned long syncmask) 334void ___ipipe_sync_pipeline(unsigned long syncmask)
328{ 335{
329 struct ipipe_domain *ipd = ipipe_current_domain; 336 if (__ipipe_root_domain_p) {
330
331 if (ipd == ipipe_root_domain) {
332 if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) 337 if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
333 return; 338 return;
334 } 339 }
@@ -337,8 +342,3 @@ void ___ipipe_sync_pipeline(unsigned long syncmask)
337} 342}
338 343
339EXPORT_SYMBOL(show_stack); 344EXPORT_SYMBOL(show_stack);
340
341#ifdef CONFIG_IPIPE_TRACE_MCOUNT
342void notrace _mcount(void);
343EXPORT_SYMBOL(_mcount);
344#endif /* CONFIG_IPIPE_TRACE_MCOUNT */
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 401bd32aa499..4b5fd36187d9 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -38,36 +38,15 @@
38#include <asm/pda.h> 38#include <asm/pda.h>
39 39
40static atomic_t irq_err_count; 40static atomic_t irq_err_count;
41static spinlock_t irq_controller_lock;
42
43/*
44 * Dummy mask/unmask handler
45 */
46void dummy_mask_unmask_irq(unsigned int irq)
47{
48}
49
50void ack_bad_irq(unsigned int irq) 41void ack_bad_irq(unsigned int irq)
51{ 42{
52 atomic_inc(&irq_err_count); 43 atomic_inc(&irq_err_count);
53 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq); 44 printk(KERN_ERR "IRQ: spurious interrupt %d\n", irq);
54} 45}
55 46
56static struct irq_chip bad_chip = {
57 .ack = dummy_mask_unmask_irq,
58 .mask = dummy_mask_unmask_irq,
59 .unmask = dummy_mask_unmask_irq,
60};
61
62static struct irq_desc bad_irq_desc = { 47static struct irq_desc bad_irq_desc = {
63 .status = IRQ_DISABLED,
64 .chip = &bad_chip,
65 .handle_irq = handle_bad_irq, 48 .handle_irq = handle_bad_irq,
66 .depth = 1,
67 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 49 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock),
68#ifdef CONFIG_SMP
69 .affinity = CPU_MASK_ALL
70#endif
71}; 50};
72 51
73#ifdef CONFIG_CPUMASK_OFFSTACK 52#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -75,6 +54,7 @@ static struct irq_desc bad_irq_desc = {
75#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK." 54#error "Blackfin architecture does not support CONFIG_CPUMASK_OFFSTACK."
76#endif 55#endif
77 56
57#ifdef CONFIG_PROC_FS
78int show_interrupts(struct seq_file *p, void *v) 58int show_interrupts(struct seq_file *p, void *v)
79{ 59{
80 int i = *(loff_t *) v, j; 60 int i = *(loff_t *) v, j;
@@ -106,50 +86,29 @@ int show_interrupts(struct seq_file *p, void *v)
106 } 86 }
107 return 0; 87 return 0;
108} 88}
109
110/*
111 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
112 * come via this function. Instead, they should provide their
113 * own 'handler'
114 */
115#ifdef CONFIG_DO_IRQ_L1
116__attribute__((l1_text))
117#endif
118asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
119{
120 struct pt_regs *old_regs;
121 struct irq_desc *desc = irq_desc + irq;
122#ifndef CONFIG_IPIPE
123 unsigned short pending, other_ints;
124#endif 89#endif
125 old_regs = set_irq_regs(regs);
126 90
127 /*
128 * Some hardware gives randomly wrong interrupts. Rather
129 * than crashing, do something sensible.
130 */
131 if (irq >= NR_IRQS)
132 desc = &bad_irq_desc;
133
134 irq_enter();
135#ifdef CONFIG_DEBUG_STACKOVERFLOW 91#ifdef CONFIG_DEBUG_STACKOVERFLOW
92static void check_stack_overflow(int irq)
93{
136 /* Debugging check for stack overflow: is there less than STACK_WARN free? */ 94 /* Debugging check for stack overflow: is there less than STACK_WARN free? */
137 { 95 long sp = __get_SP() & (THREAD_SIZE - 1);
138 long sp;
139
140 sp = __get_SP() & (THREAD_SIZE-1);
141 96
142 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { 97 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
143 dump_stack(); 98 dump_stack();
144 printk(KERN_EMERG "%s: possible stack overflow while handling irq %i " 99 pr_emerg("irq%i: possible stack overflow only %ld bytes free\n",
145 " only %ld bytes free\n", 100 irq, sp - sizeof(struct thread_info));
146 __func__, irq, sp - sizeof(struct thread_info));
147 }
148 } 101 }
102}
103#else
104static inline void check_stack_overflow(int irq) { }
149#endif 105#endif
150 generic_handle_irq(irq);
151 106
152#ifndef CONFIG_IPIPE 107#ifndef CONFIG_IPIPE
108static void maybe_lower_to_irq14(void)
109{
110 unsigned short pending, other_ints;
111
153 /* 112 /*
154 * If we're the only interrupt running (ignoring IRQ15 which 113 * If we're the only interrupt running (ignoring IRQ15 which
155 * is for syscalls), lower our priority to IRQ14 so that 114 * is for syscalls), lower our priority to IRQ14 so that
@@ -163,7 +122,38 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
163 other_ints = pending & (pending - 1); 122 other_ints = pending & (pending - 1);
164 if (other_ints == 0) 123 if (other_ints == 0)
165 lower_to_irq14(); 124 lower_to_irq14();
166#endif /* !CONFIG_IPIPE */ 125}
126#else
127static inline void maybe_lower_to_irq14(void) { }
128#endif
129
130/*
131 * do_IRQ handles all hardware IRQs. Decoded IRQs should not
132 * come via this function. Instead, they should provide their
133 * own 'handler'
134 */
135#ifdef CONFIG_DO_IRQ_L1
136__attribute__((l1_text))
137#endif
138asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
139{
140 struct pt_regs *old_regs = set_irq_regs(regs);
141
142 irq_enter();
143
144 check_stack_overflow(irq);
145
146 /*
147 * Some hardware gives randomly wrong interrupts. Rather
148 * than crashing, do something sensible.
149 */
150 if (irq >= NR_IRQS)
151 handle_bad_irq(irq, &bad_irq_desc);
152 else
153 generic_handle_irq(irq);
154
155 maybe_lower_to_irq14();
156
167 irq_exit(); 157 irq_exit();
168 158
169 set_irq_regs(old_regs); 159 set_irq_regs(old_regs);
@@ -171,14 +161,6 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
171 161
172void __init init_IRQ(void) 162void __init init_IRQ(void)
173{ 163{
174 struct irq_desc *desc;
175 int irq;
176
177 spin_lock_init(&irq_controller_lock);
178 for (irq = 0, desc = irq_desc; irq < NR_IRQS; irq++, desc++) {
179 *desc = bad_irq_desc;
180 }
181
182 init_arch_irq(); 164 init_arch_irq();
183 165
184#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND 166#ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index b163f6d3330d..cce79d05b90b 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -34,15 +34,6 @@ int gdb_bfin_vector = -1;
34#error change the definition of slavecpulocks 34#error change the definition of slavecpulocks
35#endif 35#endif
36 36
37#define IN_MEM(addr, size, l1_addr, l1_size) \
38({ \
39 unsigned long __addr = (unsigned long)(addr); \
40 (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
41})
42#define ASYNC_BANK_SIZE \
43 (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
44 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
45
46void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 37void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
47{ 38{
48 gdb_regs[BFIN_R0] = regs->r0; 39 gdb_regs[BFIN_R0] = regs->r0;
@@ -463,42 +454,89 @@ static int hex(char ch)
463 454
464static int validate_memory_access_address(unsigned long addr, int size) 455static int validate_memory_access_address(unsigned long addr, int size)
465{ 456{
466 int cpu = raw_smp_processor_id(); 457 if (size < 0 || addr == 0)
458 return -EFAULT;
459 return bfin_mem_access_type(addr, size);
460}
467 461
468 if (size < 0) 462static int bfin_probe_kernel_read(char *dst, char *src, int size)
469 return EFAULT; 463{
470 if (addr >= 0x1000 && (addr + size) <= physical_mem_end) 464 unsigned long lsrc = (unsigned long)src;
471 return 0; 465 int mem_type;
472 if (addr >= SYSMMR_BASE) 466
473 return 0; 467 mem_type = validate_memory_access_address(lsrc, size);
474 if (IN_MEM(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK_SIZE)) 468 if (mem_type < 0)
475 return 0; 469 return mem_type;
476 if (cpu == 0) { 470
477 if (IN_MEM(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH)) 471 if (lsrc >= SYSMMR_BASE) {
478 return 0; 472 if (size == 2 && lsrc % 2 == 0) {
479 if (IN_MEM(addr, size, L1_CODE_START, L1_CODE_LENGTH)) 473 u16 mmr = bfin_read16(src);
480 return 0; 474 memcpy(dst, &mmr, sizeof(mmr));
481 if (IN_MEM(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
482 return 0;
483 if (IN_MEM(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
484 return 0;
485#ifdef CONFIG_SMP
486 } else if (cpu == 1) {
487 if (IN_MEM(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
488 return 0; 475 return 0;
489 if (IN_MEM(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH)) 476 } else if (size == 4 && lsrc % 4 == 0) {
477 u32 mmr = bfin_read32(src);
478 memcpy(dst, &mmr, sizeof(mmr));
490 return 0; 479 return 0;
491 if (IN_MEM(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH)) 480 }
481 } else {
482 switch (mem_type) {
483 case BFIN_MEM_ACCESS_CORE:
484 case BFIN_MEM_ACCESS_CORE_ONLY:
485 return probe_kernel_read(dst, src, size);
486 /* XXX: should support IDMA here with SMP */
487 case BFIN_MEM_ACCESS_DMA:
488 if (dma_memcpy(dst, src, size))
489 return 0;
490 break;
491 case BFIN_MEM_ACCESS_ITEST:
492 if (isram_memcpy(dst, src, size))
493 return 0;
494 break;
495 }
496 }
497
498 return -EFAULT;
499}
500
501static int bfin_probe_kernel_write(char *dst, char *src, int size)
502{
503 unsigned long ldst = (unsigned long)dst;
504 int mem_type;
505
506 mem_type = validate_memory_access_address(ldst, size);
507 if (mem_type < 0)
508 return mem_type;
509
510 if (ldst >= SYSMMR_BASE) {
511 if (size == 2 && ldst % 2 == 0) {
512 u16 mmr;
513 memcpy(&mmr, src, sizeof(mmr));
514 bfin_write16(dst, mmr);
492 return 0; 515 return 0;
493 if (IN_MEM(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH)) 516 } else if (size == 4 && ldst % 4 == 0) {
517 u32 mmr;
518 memcpy(&mmr, src, sizeof(mmr));
519 bfin_write32(dst, mmr);
494 return 0; 520 return 0;
495#endif 521 }
522 } else {
523 switch (mem_type) {
524 case BFIN_MEM_ACCESS_CORE:
525 case BFIN_MEM_ACCESS_CORE_ONLY:
526 return probe_kernel_write(dst, src, size);
527 /* XXX: should support IDMA here with SMP */
528 case BFIN_MEM_ACCESS_DMA:
529 if (dma_memcpy(dst, src, size))
530 return 0;
531 break;
532 case BFIN_MEM_ACCESS_ITEST:
533 if (isram_memcpy(dst, src, size))
534 return 0;
535 break;
536 }
496 } 537 }
497 538
498 if (IN_MEM(addr, size, L2_START, L2_LENGTH)) 539 return -EFAULT;
499 return 0;
500
501 return EFAULT;
502} 540}
503 541
504/* 542/*
@@ -508,14 +546,7 @@ static int validate_memory_access_address(unsigned long addr, int size)
508int kgdb_mem2hex(char *mem, char *buf, int count) 546int kgdb_mem2hex(char *mem, char *buf, int count)
509{ 547{
510 char *tmp; 548 char *tmp;
511 int err = 0; 549 int err;
512 unsigned char *pch;
513 unsigned short mmr16;
514 unsigned long mmr32;
515 int cpu = raw_smp_processor_id();
516
517 if (validate_memory_access_address((unsigned long)mem, count))
518 return EFAULT;
519 550
520 /* 551 /*
521 * We use the upper half of buf as an intermediate buffer for the 552 * We use the upper half of buf as an intermediate buffer for the
@@ -523,44 +554,7 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
523 */ 554 */
524 tmp = buf + count; 555 tmp = buf + count;
525 556
526 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ 557 err = bfin_probe_kernel_read(tmp, mem, count);
527 switch (count) {
528 case 2:
529 if ((unsigned int)mem % 2 == 0) {
530 mmr16 = *(unsigned short *)mem;
531 pch = (unsigned char *)&mmr16;
532 *tmp++ = *pch++;
533 *tmp++ = *pch++;
534 tmp -= 2;
535 } else
536 err = EFAULT;
537 break;
538 case 4:
539 if ((unsigned int)mem % 4 == 0) {
540 mmr32 = *(unsigned long *)mem;
541 pch = (unsigned char *)&mmr32;
542 *tmp++ = *pch++;
543 *tmp++ = *pch++;
544 *tmp++ = *pch++;
545 *tmp++ = *pch++;
546 tmp -= 4;
547 } else
548 err = EFAULT;
549 break;
550 default:
551 err = EFAULT;
552 }
553 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
554#ifdef CONFIG_SMP
555 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
556#endif
557 ) {
558 /* access L1 instruction SRAM*/
559 if (dma_memcpy(tmp, mem, count) == NULL)
560 err = EFAULT;
561 } else
562 err = probe_kernel_read(tmp, mem, count);
563
564 if (!err) { 558 if (!err) {
565 while (count > 0) { 559 while (count > 0) {
566 buf = pack_hex_byte(buf, *tmp); 560 buf = pack_hex_byte(buf, *tmp);
@@ -581,60 +575,21 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
581 */ 575 */
582int kgdb_ebin2mem(char *buf, char *mem, int count) 576int kgdb_ebin2mem(char *buf, char *mem, int count)
583{ 577{
584 char *tmp_old; 578 char *tmp_old, *tmp_new;
585 char *tmp_new; 579 int size;
586 unsigned short *mmr16;
587 unsigned long *mmr32;
588 int err = 0;
589 int size = 0;
590 int cpu = raw_smp_processor_id();
591 580
592 tmp_old = tmp_new = buf; 581 tmp_old = tmp_new = buf;
593 582
594 while (count-- > 0) { 583 for (size = 0; size < count; ++size) {
595 if (*tmp_old == 0x7d) 584 if (*tmp_old == 0x7d)
596 *tmp_new = *(++tmp_old) ^ 0x20; 585 *tmp_new = *(++tmp_old) ^ 0x20;
597 else 586 else
598 *tmp_new = *tmp_old; 587 *tmp_new = *tmp_old;
599 tmp_new++; 588 tmp_new++;
600 tmp_old++; 589 tmp_old++;
601 size++;
602 } 590 }
603 591
604 if (validate_memory_access_address((unsigned long)mem, size)) 592 return bfin_probe_kernel_write(mem, buf, count);
605 return EFAULT;
606
607 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/
608 switch (size) {
609 case 2:
610 if ((unsigned int)mem % 2 == 0) {
611 mmr16 = (unsigned short *)buf;
612 *(unsigned short *)mem = *mmr16;
613 } else
614 return EFAULT;
615 break;
616 case 4:
617 if ((unsigned int)mem % 4 == 0) {
618 mmr32 = (unsigned long *)buf;
619 *(unsigned long *)mem = *mmr32;
620 } else
621 return EFAULT;
622 break;
623 default:
624 return EFAULT;
625 }
626 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
627#ifdef CONFIG_SMP
628 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
629#endif
630 ) {
631 /* access L1 instruction SRAM */
632 if (dma_memcpy(mem, buf, size) == NULL)
633 err = EFAULT;
634 } else
635 err = probe_kernel_write(mem, buf, size);
636
637 return err;
638} 593}
639 594
640/* 595/*
@@ -644,14 +599,7 @@ int kgdb_ebin2mem(char *buf, char *mem, int count)
644 */ 599 */
645int kgdb_hex2mem(char *buf, char *mem, int count) 600int kgdb_hex2mem(char *buf, char *mem, int count)
646{ 601{
647 char *tmp_raw; 602 char *tmp_raw, *tmp_hex;
648 char *tmp_hex;
649 unsigned short *mmr16;
650 unsigned long *mmr32;
651 int cpu = raw_smp_processor_id();
652
653 if (validate_memory_access_address((unsigned long)mem, count))
654 return EFAULT;
655 603
656 /* 604 /*
657 * We use the upper half of buf as an intermediate buffer for the 605 * We use the upper half of buf as an intermediate buffer for the
@@ -666,38 +614,18 @@ int kgdb_hex2mem(char *buf, char *mem, int count)
666 *tmp_raw |= hex(*tmp_hex--) << 4; 614 *tmp_raw |= hex(*tmp_hex--) << 4;
667 } 615 }
668 616
669 if ((unsigned int)mem >= SYSMMR_BASE) { /*access MMR registers*/ 617 return bfin_probe_kernel_write(mem, tmp_raw, count);
670 switch (count) {
671 case 2:
672 if ((unsigned int)mem % 2 == 0) {
673 mmr16 = (unsigned short *)tmp_raw;
674 *(unsigned short *)mem = *mmr16;
675 } else
676 return EFAULT;
677 break;
678 case 4:
679 if ((unsigned int)mem % 4 == 0) {
680 mmr32 = (unsigned long *)tmp_raw;
681 *(unsigned long *)mem = *mmr32;
682 } else
683 return EFAULT;
684 break;
685 default:
686 return EFAULT;
687 }
688 } else if ((cpu == 0 && IN_MEM(mem, count, L1_CODE_START, L1_CODE_LENGTH))
689#ifdef CONFIG_SMP
690 || (cpu == 1 && IN_MEM(mem, count, COREB_L1_CODE_START, L1_CODE_LENGTH))
691#endif
692 ) {
693 /* access L1 instruction SRAM */
694 if (dma_memcpy(mem, tmp_raw, count) == NULL)
695 return EFAULT;
696 } else
697 return probe_kernel_write(mem, tmp_raw, count);
698 return 0;
699} 618}
700 619
620#define IN_MEM(addr, size, l1_addr, l1_size) \
621({ \
622 unsigned long __addr = (unsigned long)(addr); \
623 (l1_size && __addr >= l1_addr && __addr + (size) <= l1_addr + l1_size); \
624})
625#define ASYNC_BANK_SIZE \
626 (ASYNC_BANK0_SIZE + ASYNC_BANK1_SIZE + \
627 ASYNC_BANK2_SIZE + ASYNC_BANK3_SIZE)
628
701int kgdb_validate_break_address(unsigned long addr) 629int kgdb_validate_break_address(unsigned long addr)
702{ 630{
703 int cpu = raw_smp_processor_id(); 631 int cpu = raw_smp_processor_id();
@@ -715,51 +643,22 @@ int kgdb_validate_break_address(unsigned long addr)
715 if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH)) 643 if (IN_MEM(addr, BREAK_INSTR_SIZE, L2_START, L2_LENGTH))
716 return 0; 644 return 0;
717 645
718 return EFAULT; 646 return -EFAULT;
719} 647}
720 648
721int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr) 649int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
722{ 650{
723 int err; 651 int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
724 int cpu = raw_smp_processor_id(); 652 BREAK_INSTR_SIZE);
725 653 if (err)
726 if ((cpu == 0 && IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) 654 return err;
727#ifdef CONFIG_SMP 655 return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
728 || (cpu == 1 && IN_MEM(addr, BREAK_INSTR_SIZE, COREB_L1_CODE_START, L1_CODE_LENGTH)) 656 BREAK_INSTR_SIZE);
729#endif
730 ) {
731 /* access L1 instruction SRAM */
732 if (dma_memcpy(saved_instr, (void *)addr, BREAK_INSTR_SIZE)
733 == NULL)
734 return -EFAULT;
735
736 if (dma_memcpy((void *)addr, arch_kgdb_ops.gdb_bpt_instr,
737 BREAK_INSTR_SIZE) == NULL)
738 return -EFAULT;
739
740 return 0;
741 } else {
742 err = probe_kernel_read(saved_instr, (char *)addr,
743 BREAK_INSTR_SIZE);
744 if (err)
745 return err;
746
747 return probe_kernel_write((char *)addr,
748 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
749 }
750} 657}
751 658
752int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle) 659int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
753{ 660{
754 if (IN_MEM(addr, BREAK_INSTR_SIZE, L1_CODE_START, L1_CODE_LENGTH)) { 661 return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
755 /* access L1 instruction SRAM */
756 if (dma_memcpy((void *)addr, bundle, BREAK_INSTR_SIZE) == NULL)
757 return -EFAULT;
758
759 return 0;
760 } else
761 return probe_kernel_write((char *)addr,
762 (char *)bundle, BREAK_INSTR_SIZE);
763} 662}
764 663
765int kgdb_arch_init(void) 664int kgdb_arch_init(void)
diff --git a/arch/blackfin/kernel/mcount.S b/arch/blackfin/kernel/mcount.S
deleted file mode 100644
index edcfb3865f46..000000000000
--- a/arch/blackfin/kernel/mcount.S
+++ /dev/null
@@ -1,70 +0,0 @@
1/*
2 * linux/arch/blackfin/mcount.S
3 *
4 * Copyright (C) 2006 Analog Devices Inc.
5 *
6 * 2007/04/12 Save index, length, modify and base registers. --rpm
7 */
8
9#include <linux/linkage.h>
10#include <asm/blackfin.h>
11
12.text
13
14.align 4 /* just in case */
15
16ENTRY(__mcount)
17 [--sp] = i0;
18 [--sp] = i1;
19 [--sp] = i2;
20 [--sp] = i3;
21 [--sp] = l0;
22 [--sp] = l1;
23 [--sp] = l2;
24 [--sp] = l3;
25 [--sp] = m0;
26 [--sp] = m1;
27 [--sp] = m2;
28 [--sp] = m3;
29 [--sp] = b0;
30 [--sp] = b1;
31 [--sp] = b2;
32 [--sp] = b3;
33 [--sp] = ( r7:0, p5:0 );
34 [--sp] = ASTAT;
35
36 p1.L = _ipipe_trace_enable;
37 p1.H = _ipipe_trace_enable;
38 r7 = [p1];
39 CC = r7 == 0;
40 if CC jump out;
41 link 0x10;
42 r0 = 0x0;
43 [sp + 0xc] = r0; /* v */
44 r0 = 0x0; /* type: IPIPE_TRACE_FN */
45 r1 = rets;
46 p0 = [fp]; /* p0: Prior FP */
47 r2 = [p0 + 4]; /* r2: Prior RETS */
48 call ___ipipe_trace;
49 unlink;
50out:
51 ASTAT = [sp++];
52 ( r7:0, p5:0 ) = [sp++];
53 b3 = [sp++];
54 b2 = [sp++];
55 b1 = [sp++];
56 b0 = [sp++];
57 m3 = [sp++];
58 m2 = [sp++];
59 m1 = [sp++];
60 m0 = [sp++];
61 l3 = [sp++];
62 l2 = [sp++];
63 l1 = [sp++];
64 l0 = [sp++];
65 i3 = [sp++];
66 i2 = [sp++];
67 i1 = [sp++];
68 i0 = [sp++];
69 rts;
70ENDPROC(__mcount)
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c
index 1bd7f2d018a8..d5aee3626688 100644
--- a/arch/blackfin/kernel/module.c
+++ b/arch/blackfin/kernel/module.c
@@ -201,8 +201,8 @@ apply_relocate(Elf_Shdr * sechdrs, const char *strtab,
201/* Arithmetic relocations are handled. */ 201/* Arithmetic relocations are handled. */
202/* We do not expect LSETUP to be split and hence is not */ 202/* We do not expect LSETUP to be split and hence is not */
203/* handled. */ 203/* handled. */
204/* R_byte and R_byte2 are also not handled as the gas */ 204/* R_BFIN_BYTE and R_BFIN_BYTE2 are also not handled as the */
205/* does not generate it. */ 205/* gas does not generate it. */
206/*************************************************************************/ 206/*************************************************************************/
207int 207int
208apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab, 208apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
@@ -243,8 +243,8 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
243#endif 243#endif
244 switch (ELF32_R_TYPE(rel[i].r_info)) { 244 switch (ELF32_R_TYPE(rel[i].r_info)) {
245 245
246 case R_pcrel24: 246 case R_BFIN_PCREL24:
247 case R_pcrel24_jump_l: 247 case R_BFIN_PCREL24_JUMP_L:
248 /* Add the value, subtract its postition */ 248 /* Add the value, subtract its postition */
249 location16 = 249 location16 =
250 (uint16_t *) (sechdrs[sechdrs[relsec].sh_info]. 250 (uint16_t *) (sechdrs[sechdrs[relsec].sh_info].
@@ -266,18 +266,18 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
266 (*location16 & 0xff00) | (value >> 16 & 0x00ff); 266 (*location16 & 0xff00) | (value >> 16 & 0x00ff);
267 *(location16 + 1) = value & 0xffff; 267 *(location16 + 1) = value & 0xffff;
268 break; 268 break;
269 case R_pcrel12_jump: 269 case R_BFIN_PCREL12_JUMP:
270 case R_pcrel12_jump_s: 270 case R_BFIN_PCREL12_JUMP_S:
271 value -= (uint32_t) location32; 271 value -= (uint32_t) location32;
272 value >>= 1; 272 value >>= 1;
273 *location16 = (value & 0xfff); 273 *location16 = (value & 0xfff);
274 break; 274 break;
275 case R_pcrel10: 275 case R_BFIN_PCREL10:
276 value -= (uint32_t) location32; 276 value -= (uint32_t) location32;
277 value >>= 1; 277 value >>= 1;
278 *location16 = (value & 0x3ff); 278 *location16 = (value & 0x3ff);
279 break; 279 break;
280 case R_luimm16: 280 case R_BFIN_LUIMM16:
281 pr_debug("before %x after %x\n", *location16, 281 pr_debug("before %x after %x\n", *location16,
282 (value & 0xffff)); 282 (value & 0xffff));
283 tmp = (value & 0xffff); 283 tmp = (value & 0xffff);
@@ -286,7 +286,7 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
286 } else 286 } else
287 *location16 = tmp; 287 *location16 = tmp;
288 break; 288 break;
289 case R_huimm16: 289 case R_BFIN_HUIMM16:
290 pr_debug("before %x after %x\n", *location16, 290 pr_debug("before %x after %x\n", *location16,
291 ((value >> 16) & 0xffff)); 291 ((value >> 16) & 0xffff));
292 tmp = ((value >> 16) & 0xffff); 292 tmp = ((value >> 16) & 0xffff);
@@ -295,10 +295,10 @@ apply_relocate_add(Elf_Shdr * sechdrs, const char *strtab,
295 } else 295 } else
296 *location16 = tmp; 296 *location16 = tmp;
297 break; 297 break;
298 case R_rimm16: 298 case R_BFIN_RIMM16:
299 *location16 = (value & 0xffff); 299 *location16 = (value & 0xffff);
300 break; 300 break;
301 case R_byte4_data: 301 case R_BFIN_BYTE4_DATA:
302 pr_debug("before %x after %x\n", *location32, value); 302 pr_debug("before %x after %x\n", *location32, value);
303 *location32 = value; 303 *location32 = value;
304 break; 304 break;
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index e040e03335ea..79cad0ac5892 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -160,6 +160,29 @@ pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags)
160} 160}
161EXPORT_SYMBOL(kernel_thread); 161EXPORT_SYMBOL(kernel_thread);
162 162
163/*
164 * Do necessary setup to start up a newly executed thread.
165 *
166 * pass the data segment into user programs if it exists,
167 * it can't hurt anything as far as I can tell
168 */
169void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
170{
171 set_fs(USER_DS);
172 regs->pc = new_ip;
173 if (current->mm)
174 regs->p5 = current->mm->start_data;
175#ifdef CONFIG_SMP
176 task_thread_info(current)->l1_task_info.stack_start =
177 (void *)current->mm->context.stack_start;
178 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
179 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
180 sizeof(*L1_SCRATCH_TASK_INFO));
181#endif
182 wrusp(new_sp);
183}
184EXPORT_SYMBOL_GPL(start_thread);
185
163void flush_thread(void) 186void flush_thread(void)
164{ 187{
165} 188}
@@ -321,57 +344,151 @@ void finish_atomic_sections (struct pt_regs *regs)
321 } 344 }
322} 345}
323 346
347static inline
348int in_mem(unsigned long addr, unsigned long size,
349 unsigned long start, unsigned long end)
350{
351 return addr >= start && addr + size <= end;
352}
353static inline
354int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
355 unsigned long const_addr, unsigned long const_size)
356{
357 return const_size &&
358 in_mem(addr, size, const_addr + off, const_addr + const_size);
359}
360static inline
361int in_mem_const(unsigned long addr, unsigned long size,
362 unsigned long const_addr, unsigned long const_size)
363{
364 return in_mem_const_off(addr, 0, size, const_addr, const_size);
365}
366#define IN_ASYNC(bnum, bctlnum) \
367({ \
368 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \
369 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \
370 BFIN_MEM_ACCESS_CORE; \
371})
372
373int bfin_mem_access_type(unsigned long addr, unsigned long size)
374{
375 int cpu = raw_smp_processor_id();
376
377 /* Check that things do not wrap around */
378 if (addr > ULONG_MAX - size)
379 return -EFAULT;
380
381 if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
382 return BFIN_MEM_ACCESS_CORE;
383
384 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
385 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
386 if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
387 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
388 if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
389 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
390 if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
391 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
392#ifdef COREB_L1_CODE_START
393 if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
394 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
395 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
396 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
397 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
398 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
399 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
400 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
401#endif
402 if (in_mem_const(addr, size, L2_START, L2_LENGTH))
403 return BFIN_MEM_ACCESS_CORE;
404
405 if (addr >= SYSMMR_BASE)
406 return BFIN_MEM_ACCESS_CORE_ONLY;
407
408 /* We can't read EBIU banks that aren't enabled or we end up hanging
409 * on the access to the async space.
410 */
411 if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE))
412 return IN_ASYNC(0, 0);
413 if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
414 return IN_ASYNC(1, 0);
415 if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
416 return IN_ASYNC(2, 1);
417 if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
418 return IN_ASYNC(3, 1);
419
420 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
421 return BFIN_MEM_ACCESS_CORE;
422 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
423 return BFIN_MEM_ACCESS_DMA;
424
425 return -EFAULT;
426}
427
324#if defined(CONFIG_ACCESS_CHECK) 428#if defined(CONFIG_ACCESS_CHECK)
429#ifdef CONFIG_ACCESS_OK_L1
430__attribute__((l1_text))
431#endif
325/* Return 1 if access to memory range is OK, 0 otherwise */ 432/* Return 1 if access to memory range is OK, 0 otherwise */
326int _access_ok(unsigned long addr, unsigned long size) 433int _access_ok(unsigned long addr, unsigned long size)
327{ 434{
328 if (size == 0) 435 if (size == 0)
329 return 1; 436 return 1;
330 if (addr > (addr + size)) 437 /* Check that things do not wrap around */
438 if (addr > ULONG_MAX - size)
331 return 0; 439 return 0;
332 if (segment_eq(get_fs(), KERNEL_DS)) 440 if (segment_eq(get_fs(), KERNEL_DS))
333 return 1; 441 return 1;
334#ifdef CONFIG_MTD_UCLINUX 442#ifdef CONFIG_MTD_UCLINUX
335 if (addr >= memory_start && (addr + size) <= memory_end) 443 if (1)
336 return 1; 444#else
337 if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) 445 if (0)
446#endif
447 {
448 if (in_mem(addr, size, memory_start, memory_end))
449 return 1;
450 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
451 return 1;
452# ifndef CONFIG_ROMFS_ON_MTD
453 if (0)
454# endif
455 /* For XIP, allow user space to use pointers within the ROMFS. */
456 if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
457 return 1;
458 } else {
459 if (in_mem(addr, size, memory_start, physical_mem_end))
460 return 1;
461 }
462
463 if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
338 return 1; 464 return 1;
339 465
340#ifdef CONFIG_ROMFS_ON_MTD 466 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
341 /* For XIP, allow user space to use pointers within the ROMFS. */
342 if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end)
343 return 1; 467 return 1;
344#endif 468 if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
345#else
346 if (addr >= memory_start && (addr + size) <= physical_mem_end)
347 return 1; 469 return 1;
348#endif 470 if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
349 if (addr >= (unsigned long)__init_begin &&
350 addr + size <= (unsigned long)__init_end)
351 return 1; 471 return 1;
352 if (addr >= get_l1_scratch_start() 472 if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
353 && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH)
354 return 1; 473 return 1;
355#if L1_CODE_LENGTH != 0 474#ifdef COREB_L1_CODE_START
356 if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1) 475 if (in_mem_const(addr, size, COREB_L1_CODE_START, L1_CODE_LENGTH))
357 && addr + size <= get_l1_code_start() + L1_CODE_LENGTH)
358 return 1; 476 return 1;
359#endif 477 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
360#if L1_DATA_A_LENGTH != 0
361 if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1)
362 && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH)
363 return 1; 478 return 1;
364#endif 479 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, L1_DATA_A_LENGTH))
365#if L1_DATA_B_LENGTH != 0
366 if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1)
367 && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH)
368 return 1; 480 return 1;
369#endif 481 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, L1_DATA_B_LENGTH))
370#if L2_LENGTH != 0
371 if (addr >= L2_START + (_ebss_l2 - _stext_l2)
372 && addr + size <= L2_START + L2_LENGTH)
373 return 1; 482 return 1;
374#endif 483#endif
484 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
485 return 1;
486
487 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
488 return 1;
489 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
490 return 1;
491
375 return 0; 492 return 0;
376} 493}
377EXPORT_SYMBOL(_access_ok); 494EXPORT_SYMBOL(_access_ok);
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index a58687bdee6a..298f023bcc09 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -18,9 +18,12 @@
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/pfn.h> 19#include <linux/pfn.h>
20 20
21#ifdef CONFIG_MTD_UCLINUX
22#include <linux/mtd/map.h>
21#include <linux/ext2_fs.h> 23#include <linux/ext2_fs.h>
22#include <linux/cramfs_fs.h> 24#include <linux/cramfs_fs.h>
23#include <linux/romfs_fs.h> 25#include <linux/romfs_fs.h>
26#endif
24 27
25#include <asm/cplb.h> 28#include <asm/cplb.h>
26#include <asm/cacheflush.h> 29#include <asm/cacheflush.h>
@@ -45,6 +48,7 @@ EXPORT_SYMBOL(_ramend);
45EXPORT_SYMBOL(reserved_mem_dcache_on); 48EXPORT_SYMBOL(reserved_mem_dcache_on);
46 49
47#ifdef CONFIG_MTD_UCLINUX 50#ifdef CONFIG_MTD_UCLINUX
51extern struct map_info uclinux_ram_map;
48unsigned long memory_mtd_end, memory_mtd_start, mtd_size; 52unsigned long memory_mtd_end, memory_mtd_start, mtd_size;
49unsigned long _ebss; 53unsigned long _ebss;
50EXPORT_SYMBOL(memory_mtd_end); 54EXPORT_SYMBOL(memory_mtd_end);
@@ -113,15 +117,49 @@ void __cpuinit bfin_setup_caches(unsigned int cpu)
113 */ 117 */
114#ifdef CONFIG_BFIN_ICACHE 118#ifdef CONFIG_BFIN_ICACHE
115 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu); 119 printk(KERN_INFO "Instruction Cache Enabled for CPU%u\n", cpu);
120 printk(KERN_INFO " External memory:"
121# ifdef CONFIG_BFIN_EXTMEM_ICACHEABLE
122 " cacheable"
123# else
124 " uncacheable"
125# endif
126 " in instruction cache\n");
127 if (L2_LENGTH)
128 printk(KERN_INFO " L2 SRAM :"
129# ifdef CONFIG_BFIN_L2_ICACHEABLE
130 " cacheable"
131# else
132 " uncacheable"
133# endif
134 " in instruction cache\n");
135
136#else
137 printk(KERN_INFO "Instruction Cache Disabled for CPU%u\n", cpu);
116#endif 138#endif
139
117#ifdef CONFIG_BFIN_DCACHE 140#ifdef CONFIG_BFIN_DCACHE
118 printk(KERN_INFO "Data Cache Enabled for CPU%u" 141 printk(KERN_INFO "Data Cache Enabled for CPU%u\n", cpu);
119# if defined CONFIG_BFIN_WB 142 printk(KERN_INFO " External memory:"
120 " (write-back)" 143# if defined CONFIG_BFIN_EXTMEM_WRITEBACK
121# elif defined CONFIG_BFIN_WT 144 " cacheable (write-back)"
122 " (write-through)" 145# elif defined CONFIG_BFIN_EXTMEM_WRITETHROUGH
146 " cacheable (write-through)"
147# else
148 " uncacheable"
149# endif
150 " in data cache\n");
151 if (L2_LENGTH)
152 printk(KERN_INFO " L2 SRAM :"
153# if defined CONFIG_BFIN_L2_WRITEBACK
154 " cacheable (write-back)"
155# elif defined CONFIG_BFIN_L2_WRITETHROUGH
156 " cacheable (write-through)"
157# else
158 " uncacheable"
123# endif 159# endif
124 "\n", cpu); 160 " in data cache\n");
161#else
162 printk(KERN_INFO "Data Cache Disabled for CPU%u\n", cpu);
125#endif 163#endif
126} 164}
127 165
@@ -150,40 +188,45 @@ void __init bfin_relocate_l1_mem(void)
150 unsigned long l1_data_b_length; 188 unsigned long l1_data_b_length;
151 unsigned long l2_length; 189 unsigned long l2_length;
152 190
191 /*
192 * due to the ALIGN(4) in the arch/blackfin/kernel/vmlinux.lds.S
193 * we know that everything about l1 text/data is nice and aligned,
194 * so copy by 4 byte chunks, and don't worry about overlapping
195 * src/dest.
196 *
197 * We can't use the dma_memcpy functions, since they can call
198 * scheduler functions which might be in L1 :( and core writes
199 * into L1 instruction cause bad access errors, so we are stuck,
200 * we are required to use DMA, but can't use the common dma
201 * functions. We can't use memcpy either - since that might be
202 * going to be in the relocated L1
203 */
204
153 blackfin_dma_early_init(); 205 blackfin_dma_early_init();
154 206
207 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
155 l1_code_length = _etext_l1 - _stext_l1; 208 l1_code_length = _etext_l1 - _stext_l1;
156 if (l1_code_length > L1_CODE_LENGTH) 209 if (l1_code_length)
157 panic("L1 Instruction SRAM Overflow\n"); 210 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
158 /* cannot complain as printk is not available as yet.
159 * But we can continue booting and complain later!
160 */
161
162 /* Copy _stext_l1 to _etext_l1 to L1 instruction SRAM */
163 dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
164 211
212 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
165 l1_data_a_length = _sbss_l1 - _sdata_l1; 213 l1_data_a_length = _sbss_l1 - _sdata_l1;
166 if (l1_data_a_length > L1_DATA_A_LENGTH) 214 if (l1_data_a_length)
167 panic("L1 Data SRAM Bank A Overflow\n"); 215 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
168
169 /* Copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */
170 dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
171 216
217 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
172 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 218 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1;
173 if (l1_data_b_length > L1_DATA_B_LENGTH) 219 if (l1_data_b_length)
174 panic("L1 Data SRAM Bank B Overflow\n"); 220 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
175
176 /* Copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */
177 dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
178 l1_data_a_length, l1_data_b_length); 221 l1_data_a_length, l1_data_b_length);
179 222
223 early_dma_memcpy_done();
224
225 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */
180 if (L2_LENGTH != 0) { 226 if (L2_LENGTH != 0) {
181 l2_length = _sbss_l2 - _stext_l2; 227 l2_length = _sbss_l2 - _stext_l2;
182 if (l2_length > L2_LENGTH) 228 if (l2_length)
183 panic("L2 SRAM Overflow\n"); 229 memcpy(_stext_l2, _l2_lma_start, l2_length);
184
185 /* Copy _stext_l2 to _edata_l2 to L2 SRAM */
186 dma_memcpy(_stext_l2, _l2_lma_start, l2_length);
187 } 230 }
188} 231}
189 232
@@ -434,9 +477,11 @@ static __init void parse_cmdline_early(char *cmdline_p)
434 } else if (!memcmp(to, "clkin_hz=", 9)) { 477 } else if (!memcmp(to, "clkin_hz=", 9)) {
435 to += 9; 478 to += 9;
436 early_init_clkin_hz(to); 479 early_init_clkin_hz(to);
480#ifdef CONFIG_EARLY_PRINTK
437 } else if (!memcmp(to, "earlyprintk=", 12)) { 481 } else if (!memcmp(to, "earlyprintk=", 12)) {
438 to += 12; 482 to += 12;
439 setup_early_printk(to); 483 setup_early_printk(to);
484#endif
440 } else if (!memcmp(to, "memmap=", 7)) { 485 } else if (!memcmp(to, "memmap=", 7)) {
441 to += 7; 486 to += 7;
442 parse_memmap(to); 487 parse_memmap(to);
@@ -472,7 +517,7 @@ static __init void memory_setup(void)
472 517
473 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 518 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
474 console_init(); 519 console_init();
475 panic("DMA region exceeds memory limit: %lu.\n", 520 panic("DMA region exceeds memory limit: %lu.",
476 _ramend - _ramstart); 521 _ramend - _ramstart);
477 } 522 }
478 memory_end = _ramend - DMA_UNCACHED_REGION; 523 memory_end = _ramend - DMA_UNCACHED_REGION;
@@ -507,7 +552,7 @@ static __init void memory_setup(void)
507 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1) 552 && ((unsigned long *)mtd_phys)[1] == ROMSB_WORD1)
508 mtd_size = 553 mtd_size =
509 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2])); 554 PAGE_ALIGN(be32_to_cpu(((unsigned long *)mtd_phys)[2]));
510# if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 555# if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
511 /* Due to a Hardware Anomaly we need to limit the size of usable 556 /* Due to a Hardware Anomaly we need to limit the size of usable
512 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 557 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
513 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 558 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -526,17 +571,16 @@ static __init void memory_setup(void)
526 571
527 if (mtd_size == 0) { 572 if (mtd_size == 0) {
528 console_init(); 573 console_init();
529 panic("Don't boot kernel without rootfs attached.\n"); 574 panic("Don't boot kernel without rootfs attached.");
530 } 575 }
531 576
532 /* Relocate MTD image to the top of memory after the uncached memory area */ 577 /* Relocate MTD image to the top of memory after the uncached memory area */
533 dma_memcpy((char *)memory_end, _end, mtd_size); 578 uclinux_ram_map.phys = memory_mtd_start = memory_end;
534 579 uclinux_ram_map.size = mtd_size;
535 memory_mtd_start = memory_end; 580 dma_memcpy((void *)uclinux_ram_map.phys, _end, uclinux_ram_map.size);
536 _ebss = memory_mtd_start; /* define _ebss for compatible */
537#endif /* CONFIG_MTD_UCLINUX */ 581#endif /* CONFIG_MTD_UCLINUX */
538 582
539#if (defined(CONFIG_BFIN_ICACHE) && ANOMALY_05000263) 583#if (defined(CONFIG_BFIN_EXTMEM_ICACHEABLE) && ANOMALY_05000263)
540 /* Due to a Hardware Anomaly we need to limit the size of usable 584 /* Due to a Hardware Anomaly we need to limit the size of usable
541 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on 585 * instruction memory to max 60MB, 56 if HUNT_FOR_ZERO is on
542 * 05000263 - Hardware loop corrupted when taking an ICPLB exception 586 * 05000263 - Hardware loop corrupted when taking an ICPLB exception
@@ -756,6 +800,11 @@ void __init setup_arch(char **cmdline_p)
756{ 800{
757 unsigned long sclk, cclk; 801 unsigned long sclk, cclk;
758 802
803 /* Check to make sure we are running on the right processor */
804 if (unlikely(CPUID != bfin_cpuid()))
805 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
806 CPU, bfin_cpuid(), bfin_revid());
807
759#ifdef CONFIG_DUMMY_CONSOLE 808#ifdef CONFIG_DUMMY_CONSOLE
760 conswitchp = &dummy_con; 809 conswitchp = &dummy_con;
761#endif 810#endif
@@ -770,14 +819,17 @@ void __init setup_arch(char **cmdline_p)
770 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); 819 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
771 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0'; 820 boot_command_line[COMMAND_LINE_SIZE - 1] = '\0';
772 821
773 /* setup memory defaults from the user config */
774 physical_mem_end = 0;
775 _ramend = get_mem_size() * 1024 * 1024;
776
777 memset(&bfin_memmap, 0, sizeof(bfin_memmap)); 822 memset(&bfin_memmap, 0, sizeof(bfin_memmap));
778 823
824 /* If the user does not specify things on the command line, use
825 * what the bootloader set things up as
826 */
827 physical_mem_end = 0;
779 parse_cmdline_early(&command_line[0]); 828 parse_cmdline_early(&command_line[0]);
780 829
830 if (_ramend == 0)
831 _ramend = get_mem_size() * 1024 * 1024;
832
781 if (physical_mem_end == 0) 833 if (physical_mem_end == 0)
782 physical_mem_end = _ramend; 834 physical_mem_end = _ramend;
783 835
@@ -796,10 +848,8 @@ void __init setup_arch(char **cmdline_p)
796 cclk = get_cclk(); 848 cclk = get_cclk();
797 sclk = get_sclk(); 849 sclk = get_sclk();
798 850
799#if !defined(CONFIG_BFIN_KERNEL_CLOCK) 851 if ((ANOMALY_05000273 || ANOMALY_05000274) && (cclk >> 1) < sclk)
800 if (ANOMALY_05000273 && cclk == sclk) 852 panic("ANOMALY 05000273 or 05000274: CCLK must be >= 2*SCLK");
801 panic("ANOMALY 05000273, SCLK can not be same as CCLK");
802#endif
803 853
804#ifdef BF561_FAMILY 854#ifdef BF561_FAMILY
805 if (ANOMALY_05000266) { 855 if (ANOMALY_05000266) {
@@ -831,7 +881,8 @@ void __init setup_arch(char **cmdline_p)
831 defined(CONFIG_BF538) || defined(CONFIG_BF539) 881 defined(CONFIG_BF538) || defined(CONFIG_BF539)
832 _bfin_swrst = bfin_read_SWRST(); 882 _bfin_swrst = bfin_read_SWRST();
833#else 883#else
834 _bfin_swrst = bfin_read_SYSCR(); 884 /* Clear boot mode field */
885 _bfin_swrst = bfin_read_SYSCR() & ~0xf;
835#endif 886#endif
836 887
837#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT 888#ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT
@@ -869,10 +920,7 @@ void __init setup_arch(char **cmdline_p)
869 else 920 else
870 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid()); 921 printk(KERN_INFO "Compiled for ADSP-%s Rev 0.%d\n", CPU, bfin_compiled_revid());
871 922
872 if (unlikely(CPUID != bfin_cpuid())) 923 if (likely(CPUID == bfin_cpuid())) {
873 printk(KERN_ERR "ERROR: Not running on ADSP-%s: unknown CPUID 0x%04x Rev 0.%d\n",
874 CPU, bfin_cpuid(), bfin_revid());
875 else {
876 if (bfin_revid() != bfin_compiled_revid()) { 924 if (bfin_revid() != bfin_compiled_revid()) {
877 if (bfin_compiled_revid() == -1) 925 if (bfin_compiled_revid() == -1)
878 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n", 926 printk(KERN_ERR "Warning: Compiled for Rev none, but running on Rev %d\n",
@@ -881,7 +929,7 @@ void __init setup_arch(char **cmdline_p)
881 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n", 929 printk(KERN_ERR "Warning: Compiled for Rev %d, but running on Rev %d\n",
882 bfin_compiled_revid(), bfin_revid()); 930 bfin_compiled_revid(), bfin_revid());
883 if (bfin_compiled_revid() > bfin_revid()) 931 if (bfin_compiled_revid() > bfin_revid())
884 panic("Error: you are missing anomaly workarounds for this rev\n"); 932 panic("Error: you are missing anomaly workarounds for this rev");
885 } 933 }
886 } 934 }
887 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX) 935 if (bfin_revid() < CONFIG_BF_REV_MIN || bfin_revid() > CONFIG_BF_REV_MAX)
@@ -891,16 +939,13 @@ void __init setup_arch(char **cmdline_p)
891 939
892 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */ 940 /* We can't run on BF548-0.1 due to ANOMALY 05000448 */
893 if (bfin_cpuid() == 0x27de && bfin_revid() == 1) 941 if (bfin_cpuid() == 0x27de && bfin_revid() == 1)
894 panic("You can't run on this processor due to 05000448\n"); 942 panic("You can't run on this processor due to 05000448");
895 943
896 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n"); 944 printk(KERN_INFO "Blackfin Linux support by http://blackfin.uclinux.org/\n");
897 945
898 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n", 946 printk(KERN_INFO "Processor Speed: %lu MHz core clock and %lu MHz System Clock\n",
899 cclk / 1000000, sclk / 1000000); 947 cclk / 1000000, sclk / 1000000);
900 948
901 if (ANOMALY_05000273 && (cclk >> 1) <= sclk)
902 printk("\n\n\nANOMALY_05000273: CCLK must be >= 2*SCLK !!!\n\n\n");
903
904 setup_bootmem_allocator(); 949 setup_bootmem_allocator();
905 950
906 paging_init(); 951 paging_init();
@@ -1095,7 +1140,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1095 CPUID, bfin_cpuid()); 1140 CPUID, bfin_cpuid());
1096 1141
1097 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" 1142 seq_printf(m, "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n"
1098 "stepping\t: %d\n", 1143 "stepping\t: %d ",
1099 cpu, cclk/1000000, sclk/1000000, 1144 cpu, cclk/1000000, sclk/1000000,
1100#ifdef CONFIG_MPU 1145#ifdef CONFIG_MPU
1101 "mpu on", 1146 "mpu on",
@@ -1104,7 +1149,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1104#endif 1149#endif
1105 revid); 1150 revid);
1106 1151
1107 seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", 1152 if (bfin_revid() != bfin_compiled_revid()) {
1153 if (bfin_compiled_revid() == -1)
1154 seq_printf(m, "(Compiled for Rev none)");
1155 else if (bfin_compiled_revid() == 0xffff)
1156 seq_printf(m, "(Compiled for Rev any)");
1157 else
1158 seq_printf(m, "(Compiled for Rev %d)", bfin_compiled_revid());
1159 }
1160
1161 seq_printf(m, "\ncpu MHz\t\t: %lu.%03lu/%lu.%03lu\n",
1108 cclk/1000000, cclk%1000000, 1162 cclk/1000000, cclk%1000000,
1109 sclk/1000000, sclk%1000000); 1163 sclk/1000000, sclk%1000000);
1110 seq_printf(m, "bogomips\t: %lu.%02lu\n" 1164 seq_printf(m, "bogomips\t: %lu.%02lu\n"
@@ -1145,16 +1199,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1145 icache_size = 0; 1199 icache_size = 0;
1146 1200
1147 seq_printf(m, "cache size\t: %d KB(L1 icache) " 1201 seq_printf(m, "cache size\t: %d KB(L1 icache) "
1148 "%d KB(L1 dcache%s) %d KB(L2 cache)\n", 1202 "%d KB(L1 dcache) %d KB(L2 cache)\n",
1149 icache_size, dcache_size, 1203 icache_size, dcache_size, 0);
1150#if defined CONFIG_BFIN_WB
1151 "-wb"
1152#elif defined CONFIG_BFIN_WT
1153 "-wt"
1154#endif
1155 "", 0);
1156
1157 seq_printf(m, "%s\n", cache); 1204 seq_printf(m, "%s\n", cache);
1205 seq_printf(m, "external memory\t: "
1206#if defined(CONFIG_BFIN_EXTMEM_ICACHEABLE)
1207 "cacheable"
1208#else
1209 "uncacheable"
1210#endif
1211 " in instruction cache\n");
1212 seq_printf(m, "external memory\t: "
1213#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK)
1214 "cacheable (write-back)"
1215#elif defined(CONFIG_BFIN_EXTMEM_WRITETHROUGH)
1216 "cacheable (write-through)"
1217#else
1218 "uncacheable"
1219#endif
1220 " in data cache\n");
1158 1221
1159 if (icache_size) 1222 if (icache_size)
1160 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n", 1223 seq_printf(m, "icache setup\t: %d Sub-banks/%d Ways, %d Lines/Way\n",
@@ -1169,6 +1232,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1169#ifdef __ARCH_SYNC_CORE_DCACHE 1232#ifdef __ARCH_SYNC_CORE_DCACHE
1170 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1233 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count);
1171#endif 1234#endif
1235#ifdef __ARCH_SYNC_CORE_ICACHE
1236 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count);
1237#endif
1172#ifdef CONFIG_BFIN_ICACHE_LOCK 1238#ifdef CONFIG_BFIN_ICACHE_LOCK
1173 switch ((cpudata->imemctl >> 3) & WAYALL_L) { 1239 switch ((cpudata->imemctl >> 3) & WAYALL_L) {
1174 case WAY0_L: 1240 case WAY0_L:
@@ -1224,8 +1290,25 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1224 if (cpu_num != num_possible_cpus() - 1) 1290 if (cpu_num != num_possible_cpus() - 1)
1225 return 0; 1291 return 0;
1226 1292
1227 if (L2_LENGTH) 1293 if (L2_LENGTH) {
1228 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400); 1294 seq_printf(m, "L2 SRAM\t\t: %dKB\n", L2_LENGTH/0x400);
1295 seq_printf(m, "L2 SRAM\t\t: "
1296#if defined(CONFIG_BFIN_L2_ICACHEABLE)
1297 "cacheable"
1298#else
1299 "uncacheable"
1300#endif
1301 " in instruction cache\n");
1302 seq_printf(m, "L2 SRAM\t\t: "
1303#if defined(CONFIG_BFIN_L2_WRITEBACK)
1304 "cacheable (write-back)"
1305#elif defined(CONFIG_BFIN_L2_WRITETHROUGH)
1306 "cacheable (write-through)"
1307#else
1308 "uncacheable"
1309#endif
1310 " in data cache\n");
1311 }
1229 seq_printf(m, "board name\t: %s\n", bfin_board_name); 1312 seq_printf(m, "board name\t: %s\n", bfin_board_name);
1230 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1313 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1231 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1314 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
diff --git a/arch/blackfin/kernel/stacktrace.c b/arch/blackfin/kernel/stacktrace.c
new file mode 100644
index 000000000000..30301e1eace5
--- /dev/null
+++ b/arch/blackfin/kernel/stacktrace.c
@@ -0,0 +1,53 @@
1/*
2 * Blackfin stacktrace code (mostly copied from avr32)
3 *
4 * Copyright 2009 Analog Devices Inc.
5 * Licensed under the GPL-2 or later.
6 */
7
8#include <linux/sched.h>
9#include <linux/stacktrace.h>
10#include <linux/thread_info.h>
11#include <linux/module.h>
12
13register unsigned long current_frame_pointer asm("FP");
14
15struct stackframe {
16 unsigned long fp;
17 unsigned long rets;
18};
19
20/*
21 * Save stack-backtrace addresses into a stack_trace buffer.
22 */
23void save_stack_trace(struct stack_trace *trace)
24{
25 unsigned long low, high;
26 unsigned long fp;
27 struct stackframe *frame;
28 int skip = trace->skip;
29
30 low = (unsigned long)task_stack_page(current);
31 high = low + THREAD_SIZE;
32 fp = current_frame_pointer;
33
34 while (fp >= low && fp <= (high - sizeof(*frame))) {
35 frame = (struct stackframe *)fp;
36
37 if (skip) {
38 skip--;
39 } else {
40 trace->entries[trace->nr_entries++] = frame->rets;
41 if (trace->nr_entries >= trace->max_entries)
42 break;
43 }
44
45 /*
46 * The next frame must be at a higher address than the
47 * current frame.
48 */
49 low = fp + sizeof(*frame);
50 fp = frame->fp;
51 }
52}
53EXPORT_SYMBOL_GPL(save_stack_trace);
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index fce49d7cf001..a8f1329c15a4 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -78,11 +78,6 @@ asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
78 return do_mmap2(addr, len, prot, flags, fd, pgoff); 78 return do_mmap2(addr, len, prot, flags, fd, pgoff);
79} 79}
80 80
81asmlinkage int sys_getpagesize(void)
82{
83 return PAGE_SIZE;
84}
85
86asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) 81asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
87{ 82{
88 return sram_alloc_with_lsl(size, flags); 83 return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 27646121280a..0791eba40d9f 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -20,8 +20,9 @@
20 20
21#include <asm/blackfin.h> 21#include <asm/blackfin.h>
22#include <asm/time.h> 22#include <asm/time.h>
23#include <asm/gptimers.h>
23 24
24#ifdef CONFIG_CYCLES_CLOCKSOURCE 25#if defined(CONFIG_CYCLES_CLOCKSOURCE)
25 26
26/* Accelerators for sched_clock() 27/* Accelerators for sched_clock()
27 * convert from cycles(64bits) => nanoseconds (64bits) 28 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -58,15 +59,15 @@ static inline unsigned long long cycles_2_ns(cycle_t cyc)
58 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; 59 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
59} 60}
60 61
61static cycle_t read_cycles(struct clocksource *cs) 62static cycle_t bfin_read_cycles(struct clocksource *cs)
62{ 63{
63 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 64 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
64} 65}
65 66
66static struct clocksource clocksource_bfin = { 67static struct clocksource bfin_cs_cycles = {
67 .name = "bfin_cycles", 68 .name = "bfin_cs_cycles",
68 .rating = 350, 69 .rating = 350,
69 .read = read_cycles, 70 .read = bfin_read_cycles,
70 .mask = CLOCKSOURCE_MASK(64), 71 .mask = CLOCKSOURCE_MASK(64),
71 .shift = 22, 72 .shift = 22,
72 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 73 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
@@ -74,53 +75,198 @@ static struct clocksource clocksource_bfin = {
74 75
75unsigned long long sched_clock(void) 76unsigned long long sched_clock(void)
76{ 77{
77 return cycles_2_ns(read_cycles(&clocksource_bfin)); 78 return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles));
78} 79}
79 80
80static int __init bfin_clocksource_init(void) 81static int __init bfin_cs_cycles_init(void)
81{ 82{
82 set_cyc2ns_scale(get_cclk() / 1000); 83 set_cyc2ns_scale(get_cclk() / 1000);
83 84
84 clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift); 85 bfin_cs_cycles.mult = \
86 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
85 87
86 if (clocksource_register(&clocksource_bfin)) 88 if (clocksource_register(&bfin_cs_cycles))
87 panic("failed to register clocksource"); 89 panic("failed to register clocksource");
88 90
89 return 0; 91 return 0;
90} 92}
93#else
94# define bfin_cs_cycles_init()
95#endif
96
97#ifdef CONFIG_GPTMR0_CLOCKSOURCE
98
99void __init setup_gptimer0(void)
100{
101 disable_gptimers(TIMER0bit);
102
103 set_gptimer_config(TIMER0_id, \
104 TIMER_OUT_DIS | TIMER_PERIOD_CNT | TIMER_MODE_PWM);
105 set_gptimer_period(TIMER0_id, -1);
106 set_gptimer_pwidth(TIMER0_id, -2);
107 SSYNC();
108 enable_gptimers(TIMER0bit);
109}
110
111static cycle_t bfin_read_gptimer0(void)
112{
113 return bfin_read_TIMER0_COUNTER();
114}
115
116static struct clocksource bfin_cs_gptimer0 = {
117 .name = "bfin_cs_gptimer0",
118 .rating = 400,
119 .read = bfin_read_gptimer0,
120 .mask = CLOCKSOURCE_MASK(32),
121 .shift = 22,
122 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
123};
124
125static int __init bfin_cs_gptimer0_init(void)
126{
127 setup_gptimer0();
91 128
129 bfin_cs_gptimer0.mult = \
130 clocksource_hz2mult(get_sclk(), bfin_cs_gptimer0.shift);
131
132 if (clocksource_register(&bfin_cs_gptimer0))
133 panic("failed to register clocksource");
134
135 return 0;
136}
92#else 137#else
93# define bfin_clocksource_init() 138# define bfin_cs_gptimer0_init()
94#endif 139#endif
95 140
141#ifdef CONFIG_CORE_TIMER_IRQ_L1
142__attribute__((l1_text))
143#endif
144irqreturn_t timer_interrupt(int irq, void *dev_id);
145
146static int bfin_timer_set_next_event(unsigned long, \
147 struct clock_event_device *);
148
149static void bfin_timer_set_mode(enum clock_event_mode, \
150 struct clock_event_device *);
151
152static struct clock_event_device clockevent_bfin = {
153#if defined(CONFIG_TICKSOURCE_GPTMR0)
154 .name = "bfin_gptimer0",
155 .rating = 300,
156 .irq = IRQ_TIMER0,
157#else
158 .name = "bfin_core_timer",
159 .rating = 350,
160 .irq = IRQ_CORETMR,
161#endif
162 .shift = 32,
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .set_next_event = bfin_timer_set_next_event,
165 .set_mode = bfin_timer_set_mode,
166};
167
168static struct irqaction bfin_timer_irq = {
169#if defined(CONFIG_TICKSOURCE_GPTMR0)
170 .name = "Blackfin GPTimer0",
171#else
172 .name = "Blackfin CoreTimer",
173#endif
174 .flags = IRQF_DISABLED | IRQF_TIMER | \
175 IRQF_IRQPOLL | IRQF_PERCPU,
176 .handler = timer_interrupt,
177 .dev_id = &clockevent_bfin,
178};
179
180#if defined(CONFIG_TICKSOURCE_GPTMR0)
96static int bfin_timer_set_next_event(unsigned long cycles, 181static int bfin_timer_set_next_event(unsigned long cycles,
97 struct clock_event_device *evt) 182 struct clock_event_device *evt)
98{ 183{
184 disable_gptimers(TIMER0bit);
185
186 /* it starts counting three SCLK cycles after the TIMENx bit is set */
187 set_gptimer_pwidth(TIMER0_id, cycles - 3);
188 enable_gptimers(TIMER0bit);
189 return 0;
190}
191
192static void bfin_timer_set_mode(enum clock_event_mode mode,
193 struct clock_event_device *evt)
194{
195 switch (mode) {
196 case CLOCK_EVT_MODE_PERIODIC: {
197 set_gptimer_config(TIMER0_id, \
198 TIMER_OUT_DIS | TIMER_IRQ_ENA | \
199 TIMER_PERIOD_CNT | TIMER_MODE_PWM);
200 set_gptimer_period(TIMER0_id, get_sclk() / HZ);
201 set_gptimer_pwidth(TIMER0_id, get_sclk() / HZ - 1);
202 enable_gptimers(TIMER0bit);
203 break;
204 }
205 case CLOCK_EVT_MODE_ONESHOT:
206 disable_gptimers(TIMER0bit);
207 set_gptimer_config(TIMER0_id, \
208 TIMER_OUT_DIS | TIMER_IRQ_ENA | TIMER_MODE_PWM);
209 set_gptimer_period(TIMER0_id, 0);
210 break;
211 case CLOCK_EVT_MODE_UNUSED:
212 case CLOCK_EVT_MODE_SHUTDOWN:
213 disable_gptimers(TIMER0bit);
214 break;
215 case CLOCK_EVT_MODE_RESUME:
216 break;
217 }
218}
219
220static void bfin_timer_ack(void)
221{
222 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
223}
224
225static void __init bfin_timer_init(void)
226{
227 disable_gptimers(TIMER0bit);
228}
229
230static unsigned long __init bfin_clockevent_check(void)
231{
232 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
233 return get_sclk();
234}
235
236#else /* CONFIG_TICKSOURCE_CORETMR */
237
238static int bfin_timer_set_next_event(unsigned long cycles,
239 struct clock_event_device *evt)
240{
241 bfin_write_TCNTL(TMPWR);
242 CSYNC();
99 bfin_write_TCOUNT(cycles); 243 bfin_write_TCOUNT(cycles);
100 CSYNC(); 244 CSYNC();
245 bfin_write_TCNTL(TMPWR | TMREN);
101 return 0; 246 return 0;
102} 247}
103 248
104static void bfin_timer_set_mode(enum clock_event_mode mode, 249static void bfin_timer_set_mode(enum clock_event_mode mode,
105 struct clock_event_device *evt) 250 struct clock_event_device *evt)
106{ 251{
107 switch (mode) { 252 switch (mode) {
108 case CLOCK_EVT_MODE_PERIODIC: { 253 case CLOCK_EVT_MODE_PERIODIC: {
109 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); 254 unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1);
110 bfin_write_TCNTL(TMPWR); 255 bfin_write_TCNTL(TMPWR);
111 bfin_write_TSCALE(TIME_SCALE - 1);
112 CSYNC(); 256 CSYNC();
257 bfin_write_TSCALE(TIME_SCALE - 1);
113 bfin_write_TPERIOD(tcount); 258 bfin_write_TPERIOD(tcount);
114 bfin_write_TCOUNT(tcount); 259 bfin_write_TCOUNT(tcount);
115 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
116 CSYNC(); 260 CSYNC();
261 bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD);
117 break; 262 break;
118 } 263 }
119 case CLOCK_EVT_MODE_ONESHOT: 264 case CLOCK_EVT_MODE_ONESHOT:
265 bfin_write_TCNTL(TMPWR);
266 CSYNC();
120 bfin_write_TSCALE(TIME_SCALE - 1); 267 bfin_write_TSCALE(TIME_SCALE - 1);
268 bfin_write_TPERIOD(0);
121 bfin_write_TCOUNT(0); 269 bfin_write_TCOUNT(0);
122 bfin_write_TCNTL(TMPWR | TMREN);
123 CSYNC();
124 break; 270 break;
125 case CLOCK_EVT_MODE_UNUSED: 271 case CLOCK_EVT_MODE_UNUSED:
126 case CLOCK_EVT_MODE_SHUTDOWN: 272 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -132,6 +278,10 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
132 } 278 }
133} 279}
134 280
281static void bfin_timer_ack(void)
282{
283}
284
135static void __init bfin_timer_init(void) 285static void __init bfin_timer_init(void)
136{ 286{
137 /* power up the timer, but don't enable it just yet */ 287 /* power up the timer, but don't enable it just yet */
@@ -145,38 +295,32 @@ static void __init bfin_timer_init(void)
145 bfin_write_TPERIOD(0); 295 bfin_write_TPERIOD(0);
146 bfin_write_TCOUNT(0); 296 bfin_write_TCOUNT(0);
147 297
148 /* now enable the timer */
149 CSYNC(); 298 CSYNC();
150} 299}
151 300
301static unsigned long __init bfin_clockevent_check(void)
302{
303 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
304 return get_cclk() / TIME_SCALE;
305}
306
307void __init setup_core_timer(void)
308{
309 bfin_timer_init();
310 bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL);
311}
312#endif /* CONFIG_TICKSOURCE_GPTMR0 */
313
152/* 314/*
153 * timer_interrupt() needs to keep up the real-time clock, 315 * timer_interrupt() needs to keep up the real-time clock,
154 * as well as call the "do_timer()" routine every clocktick 316 * as well as call the "do_timer()" routine every clocktick
155 */ 317 */
156#ifdef CONFIG_CORE_TIMER_IRQ_L1
157__attribute__((l1_text))
158#endif
159irqreturn_t timer_interrupt(int irq, void *dev_id);
160
161static struct clock_event_device clockevent_bfin = {
162 .name = "bfin_core_timer",
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .shift = 32,
165 .set_next_event = bfin_timer_set_next_event,
166 .set_mode = bfin_timer_set_mode,
167};
168
169static struct irqaction bfin_timer_irq = {
170 .name = "Blackfin Core Timer",
171 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
172 .handler = timer_interrupt,
173 .dev_id = &clockevent_bfin,
174};
175
176irqreturn_t timer_interrupt(int irq, void *dev_id) 318irqreturn_t timer_interrupt(int irq, void *dev_id)
177{ 319{
178 struct clock_event_device *evt = dev_id; 320 struct clock_event_device *evt = dev_id;
321 smp_mb();
179 evt->event_handler(evt); 322 evt->event_handler(evt);
323 bfin_timer_ack();
180 return IRQ_HANDLED; 324 return IRQ_HANDLED;
181} 325}
182 326
@@ -184,9 +328,8 @@ static int __init bfin_clockevent_init(void)
184{ 328{
185 unsigned long timer_clk; 329 unsigned long timer_clk;
186 330
187 timer_clk = get_cclk() / TIME_SCALE; 331 timer_clk = bfin_clockevent_check();
188 332
189 setup_irq(IRQ_CORETMR, &bfin_timer_irq);
190 bfin_timer_init(); 333 bfin_timer_init();
191 334
192 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 335 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift);
@@ -218,6 +361,7 @@ void __init time_init(void)
218 xtime.tv_nsec = 0; 361 xtime.tv_nsec = 0;
219 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); 362 set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
220 363
221 bfin_clocksource_init(); 364 bfin_cs_cycles_init();
365 bfin_cs_gptimer0_init();
222 bfin_clockevent_init(); 366 bfin_clockevent_init();
223} 367}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 1bbacfbd4c5d..adb54aa7d7c8 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -24,14 +24,10 @@
24 24
25static struct irqaction bfin_timer_irq = { 25static struct irqaction bfin_timer_irq = {
26 .name = "Blackfin Timer Tick", 26 .name = "Blackfin Timer Tick",
27#ifdef CONFIG_IRQ_PER_CPU
28 .flags = IRQF_DISABLED | IRQF_PERCPU,
29#else
30 .flags = IRQF_DISABLED 27 .flags = IRQF_DISABLED
31#endif
32}; 28};
33 29
34#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 30#if defined(CONFIG_IPIPE)
35void __init setup_system_timer0(void) 31void __init setup_system_timer0(void)
36{ 32{
37 /* Power down the core timer, just to play safe. */ 33 /* Power down the core timer, just to play safe. */
@@ -74,7 +70,7 @@ void __init setup_core_timer(void)
74static void __init 70static void __init
75time_sched_init(irqreturn_t(*timer_routine) (int, void *)) 71time_sched_init(irqreturn_t(*timer_routine) (int, void *))
76{ 72{
77#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 73#if defined(CONFIG_IPIPE)
78 setup_system_timer0(); 74 setup_system_timer0();
79 bfin_timer_irq.handler = timer_routine; 75 bfin_timer_irq.handler = timer_routine;
80 setup_irq(IRQ_TIMER0, &bfin_timer_irq); 76 setup_irq(IRQ_TIMER0, &bfin_timer_irq);
@@ -94,7 +90,7 @@ static unsigned long gettimeoffset(void)
94 unsigned long offset; 90 unsigned long offset;
95 unsigned long clocks_per_jiffy; 91 unsigned long clocks_per_jiffy;
96 92
97#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE) 93#if defined(CONFIG_IPIPE)
98 clocks_per_jiffy = bfin_read_TIMER0_PERIOD(); 94 clocks_per_jiffy = bfin_read_TIMER0_PERIOD();
99 offset = bfin_read_TIMER0_COUNTER() / \ 95 offset = bfin_read_TIMER0_COUNTER() / \
100 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC); 96 (((clocks_per_jiffy + 1) * HZ) / USEC_PER_SEC);
@@ -133,36 +129,25 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
133 static long last_rtc_update; 129 static long last_rtc_update;
134 130
135 write_seqlock(&xtime_lock); 131 write_seqlock(&xtime_lock);
136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) 132 do_timer(1);
133
137 /* 134 /*
138 * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is 135 * If we have an externally synchronized Linux clock, then update
139 * enabled. 136 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
137 * called as close as possible to 500 ms before the new second starts.
140 */ 138 */
141 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { 139 if (ntp_synced() &&
142#endif 140 xtime.tv_sec > last_rtc_update + 660 &&
143 do_timer(1); 141 (xtime.tv_nsec / NSEC_PER_USEC) >=
144 142 500000 - ((unsigned)TICK_SIZE) / 2
145 /* 143 && (xtime.tv_nsec / NSEC_PER_USEC) <=
146 * If we have an externally synchronized Linux clock, then update 144 500000 + ((unsigned)TICK_SIZE) / 2) {
147 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be 145 if (set_rtc_mmss(xtime.tv_sec) == 0)
148 * called as close as possible to 500 ms before the new second starts. 146 last_rtc_update = xtime.tv_sec;
149 */ 147 else
150 if (ntp_synced() && 148 /* Do it again in 60s. */
151 xtime.tv_sec > last_rtc_update + 660 && 149 last_rtc_update = xtime.tv_sec - 600;
152 (xtime.tv_nsec / NSEC_PER_USEC) >=
153 500000 - ((unsigned)TICK_SIZE) / 2
154 && (xtime.tv_nsec / NSEC_PER_USEC) <=
155 500000 + ((unsigned)TICK_SIZE) / 2) {
156 if (set_rtc_mmss(xtime.tv_sec) == 0)
157 last_rtc_update = xtime.tv_sec;
158 else
159 /* Do it again in 60s. */
160 last_rtc_update = xtime.tv_sec - 600;
161 }
162#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
163 set_gptimer_status(0, TIMER_STATUS_TIMIL0);
164 } 150 }
165#endif
166 write_sequnlock(&xtime_lock); 151 write_sequnlock(&xtime_lock);
167 152
168#ifdef CONFIG_IPIPE 153#ifdef CONFIG_IPIPE
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index ffe7fb53eccb..8eeb457ce5d5 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -27,6 +27,7 @@
27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 27 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
28 */ 28 */
29 29
30#include <linux/bug.h>
30#include <linux/uaccess.h> 31#include <linux/uaccess.h>
31#include <linux/interrupt.h> 32#include <linux/interrupt.h>
32#include <linux/module.h> 33#include <linux/module.h>
@@ -36,6 +37,7 @@
36#include <asm/traps.h> 37#include <asm/traps.h>
37#include <asm/cacheflush.h> 38#include <asm/cacheflush.h>
38#include <asm/cplb.h> 39#include <asm/cplb.h>
40#include <asm/dma.h>
39#include <asm/blackfin.h> 41#include <asm/blackfin.h>
40#include <asm/irq_handler.h> 42#include <asm/irq_handler.h>
41#include <linux/irq.h> 43#include <linux/irq.h>
@@ -68,6 +70,13 @@
68 ({ if (0) printk(fmt, ##arg); 0; }) 70 ({ if (0) printk(fmt, ##arg); 0; })
69#endif 71#endif
70 72
73#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
74u32 last_seqstat;
75#ifdef CONFIG_DEBUG_MMRS_MODULE
76EXPORT_SYMBOL(last_seqstat);
77#endif
78#endif
79
71/* Initiate the event table handler */ 80/* Initiate the event table handler */
72void __init trap_init(void) 81void __init trap_init(void)
73{ 82{
@@ -79,7 +88,6 @@ void __init trap_init(void)
79static void decode_address(char *buf, unsigned long address) 88static void decode_address(char *buf, unsigned long address)
80{ 89{
81#ifdef CONFIG_DEBUG_VERBOSE 90#ifdef CONFIG_DEBUG_VERBOSE
82 struct vm_list_struct *vml;
83 struct task_struct *p; 91 struct task_struct *p;
84 struct mm_struct *mm; 92 struct mm_struct *mm;
85 unsigned long flags, offset; 93 unsigned long flags, offset;
@@ -196,6 +204,11 @@ done:
196 204
197asmlinkage void double_fault_c(struct pt_regs *fp) 205asmlinkage void double_fault_c(struct pt_regs *fp)
198{ 206{
207#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
208 int j;
209 trace_buffer_save(j);
210#endif
211
199 console_verbose(); 212 console_verbose();
200 oops_in_progress = 1; 213 oops_in_progress = 1;
201#ifdef CONFIG_DEBUG_VERBOSE 214#ifdef CONFIG_DEBUG_VERBOSE
@@ -220,10 +233,16 @@ asmlinkage void double_fault_c(struct pt_regs *fp)
220 dump_bfin_process(fp); 233 dump_bfin_process(fp);
221 dump_bfin_mem(fp); 234 dump_bfin_mem(fp);
222 show_regs(fp); 235 show_regs(fp);
236 dump_bfin_trace_buffer();
223 } 237 }
224#endif 238#endif
225 panic("Double Fault - unrecoverable event\n"); 239 panic("Double Fault - unrecoverable event");
240
241}
226 242
243static int kernel_mode_regs(struct pt_regs *regs)
244{
245 return regs->ipend & 0xffc0;
227} 246}
228 247
229asmlinkage void trap_c(struct pt_regs *fp) 248asmlinkage void trap_c(struct pt_regs *fp)
@@ -234,37 +253,24 @@ asmlinkage void trap_c(struct pt_regs *fp)
234#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 253#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
235 unsigned int cpu = smp_processor_id(); 254 unsigned int cpu = smp_processor_id();
236#endif 255#endif
256 const char *strerror = NULL;
237 int sig = 0; 257 int sig = 0;
238 siginfo_t info; 258 siginfo_t info;
239 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE; 259 unsigned long trapnr = fp->seqstat & SEQSTAT_EXCAUSE;
240 260
241 trace_buffer_save(j); 261 trace_buffer_save(j);
262#if defined(CONFIG_DEBUG_MMRS) || defined(CONFIG_DEBUG_MMRS_MODULE)
263 last_seqstat = (u32)fp->seqstat;
264#endif
242 265
243 /* Important - be very careful dereferncing pointers - will lead to 266 /* Important - be very careful dereferncing pointers - will lead to
244 * double faults if the stack has become corrupt 267 * double faults if the stack has become corrupt
245 */ 268 */
246 269
247 /* If the fault was caused by a kernel thread, or interrupt handler 270#ifndef CONFIG_KGDB
248 * we will kernel panic, so the system reboots. 271 /* IPEND is skipped if KGDB isn't enabled (see entry code) */
249 * If KGDB is enabled, don't set this for kernel breakpoints 272 fp->ipend = bfin_read_IPEND();
250 */
251
252 /* TODO: check to see if we are in some sort of deferred HWERR
253 * that we should be able to recover from, not kernel panic
254 */
255 if ((bfin_read_IPEND() & 0xFFC0) && (trapnr != VEC_STEP)
256#ifdef CONFIG_KGDB
257 && (trapnr != VEC_EXCPT02)
258#endif 273#endif
259 ){
260 console_verbose();
261 oops_in_progress = 1;
262 } else if (current) {
263 if (current->mm == NULL) {
264 console_verbose();
265 oops_in_progress = 1;
266 }
267 }
268 274
269 /* trap_c() will be called for exceptions. During exceptions 275 /* trap_c() will be called for exceptions. During exceptions
270 * processing, the pc value should be set with retx value. 276 * processing, the pc value should be set with retx value.
@@ -292,15 +298,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
292 sig = SIGTRAP; 298 sig = SIGTRAP;
293 CHK_DEBUGGER_TRAP_MAYBE(); 299 CHK_DEBUGGER_TRAP_MAYBE();
294 /* Check if this is a breakpoint in kernel space */ 300 /* Check if this is a breakpoint in kernel space */
295 if (fp->ipend & 0xffc0) 301 if (kernel_mode_regs(fp))
296 return; 302 goto traps_done;
297 else 303 else
298 break; 304 break;
299 /* 0x03 - User Defined, userspace stack overflow */ 305 /* 0x03 - User Defined, userspace stack overflow */
300 case VEC_EXCPT03: 306 case VEC_EXCPT03:
301 info.si_code = SEGV_STACKFLOW; 307 info.si_code = SEGV_STACKFLOW;
302 sig = SIGSEGV; 308 sig = SIGSEGV;
303 verbose_printk(KERN_NOTICE EXC_0x03(KERN_NOTICE)); 309 strerror = KERN_NOTICE EXC_0x03(KERN_NOTICE);
304 CHK_DEBUGGER_TRAP_MAYBE(); 310 CHK_DEBUGGER_TRAP_MAYBE();
305 break; 311 break;
306 /* 0x02 - KGDB initial connection and break signal trap */ 312 /* 0x02 - KGDB initial connection and break signal trap */
@@ -309,7 +315,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
309 info.si_code = TRAP_ILLTRAP; 315 info.si_code = TRAP_ILLTRAP;
310 sig = SIGTRAP; 316 sig = SIGTRAP;
311 CHK_DEBUGGER_TRAP(); 317 CHK_DEBUGGER_TRAP();
312 return; 318 goto traps_done;
313#endif 319#endif
314 /* 0x04 - User Defined */ 320 /* 0x04 - User Defined */
315 /* 0x05 - User Defined */ 321 /* 0x05 - User Defined */
@@ -329,7 +335,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
329 case VEC_EXCPT04 ... VEC_EXCPT15: 335 case VEC_EXCPT04 ... VEC_EXCPT15:
330 info.si_code = ILL_ILLPARAOP; 336 info.si_code = ILL_ILLPARAOP;
331 sig = SIGILL; 337 sig = SIGILL;
332 verbose_printk(KERN_NOTICE EXC_0x04(KERN_NOTICE)); 338 strerror = KERN_NOTICE EXC_0x04(KERN_NOTICE);
333 CHK_DEBUGGER_TRAP_MAYBE(); 339 CHK_DEBUGGER_TRAP_MAYBE();
334 break; 340 break;
335 /* 0x10 HW Single step, handled here */ 341 /* 0x10 HW Single step, handled here */
@@ -338,15 +344,15 @@ asmlinkage void trap_c(struct pt_regs *fp)
338 sig = SIGTRAP; 344 sig = SIGTRAP;
339 CHK_DEBUGGER_TRAP_MAYBE(); 345 CHK_DEBUGGER_TRAP_MAYBE();
340 /* Check if this is a single step in kernel space */ 346 /* Check if this is a single step in kernel space */
341 if (fp->ipend & 0xffc0) 347 if (kernel_mode_regs(fp))
342 return; 348 goto traps_done;
343 else 349 else
344 break; 350 break;
345 /* 0x11 - Trace Buffer Full, handled here */ 351 /* 0x11 - Trace Buffer Full, handled here */
346 case VEC_OVFLOW: 352 case VEC_OVFLOW:
347 info.si_code = TRAP_TRACEFLOW; 353 info.si_code = TRAP_TRACEFLOW;
348 sig = SIGTRAP; 354 sig = SIGTRAP;
349 verbose_printk(KERN_NOTICE EXC_0x11(KERN_NOTICE)); 355 strerror = KERN_NOTICE EXC_0x11(KERN_NOTICE);
350 CHK_DEBUGGER_TRAP_MAYBE(); 356 CHK_DEBUGGER_TRAP_MAYBE();
351 break; 357 break;
352 /* 0x12 - Reserved, Caught by default */ 358 /* 0x12 - Reserved, Caught by default */
@@ -366,37 +372,54 @@ asmlinkage void trap_c(struct pt_regs *fp)
366 /* 0x20 - Reserved, Caught by default */ 372 /* 0x20 - Reserved, Caught by default */
367 /* 0x21 - Undefined Instruction, handled here */ 373 /* 0x21 - Undefined Instruction, handled here */
368 case VEC_UNDEF_I: 374 case VEC_UNDEF_I:
375#ifdef CONFIG_BUG
376 if (kernel_mode_regs(fp)) {
377 switch (report_bug(fp->pc, fp)) {
378 case BUG_TRAP_TYPE_NONE:
379 break;
380 case BUG_TRAP_TYPE_WARN:
381 dump_bfin_trace_buffer();
382 fp->pc += 2;
383 goto traps_done;
384 case BUG_TRAP_TYPE_BUG:
385 /* call to panic() will dump trace, and it is
386 * off at this point, so it won't be clobbered
387 */
388 panic("BUG()");
389 }
390 }
391#endif
369 info.si_code = ILL_ILLOPC; 392 info.si_code = ILL_ILLOPC;
370 sig = SIGILL; 393 sig = SIGILL;
371 verbose_printk(KERN_NOTICE EXC_0x21(KERN_NOTICE)); 394 strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE);
372 CHK_DEBUGGER_TRAP_MAYBE(); 395 CHK_DEBUGGER_TRAP_MAYBE();
373 break; 396 break;
374 /* 0x22 - Illegal Instruction Combination, handled here */ 397 /* 0x22 - Illegal Instruction Combination, handled here */
375 case VEC_ILGAL_I: 398 case VEC_ILGAL_I:
376 info.si_code = ILL_ILLPARAOP; 399 info.si_code = ILL_ILLPARAOP;
377 sig = SIGILL; 400 sig = SIGILL;
378 verbose_printk(KERN_NOTICE EXC_0x22(KERN_NOTICE)); 401 strerror = KERN_NOTICE EXC_0x22(KERN_NOTICE);
379 CHK_DEBUGGER_TRAP_MAYBE(); 402 CHK_DEBUGGER_TRAP_MAYBE();
380 break; 403 break;
381 /* 0x23 - Data CPLB protection violation, handled here */ 404 /* 0x23 - Data CPLB protection violation, handled here */
382 case VEC_CPLB_VL: 405 case VEC_CPLB_VL:
383 info.si_code = ILL_CPLB_VI; 406 info.si_code = ILL_CPLB_VI;
384 sig = SIGBUS; 407 sig = SIGBUS;
385 verbose_printk(KERN_NOTICE EXC_0x23(KERN_NOTICE)); 408 strerror = KERN_NOTICE EXC_0x23(KERN_NOTICE);
386 CHK_DEBUGGER_TRAP_MAYBE(); 409 CHK_DEBUGGER_TRAP_MAYBE();
387 break; 410 break;
388 /* 0x24 - Data access misaligned, handled here */ 411 /* 0x24 - Data access misaligned, handled here */
389 case VEC_MISALI_D: 412 case VEC_MISALI_D:
390 info.si_code = BUS_ADRALN; 413 info.si_code = BUS_ADRALN;
391 sig = SIGBUS; 414 sig = SIGBUS;
392 verbose_printk(KERN_NOTICE EXC_0x24(KERN_NOTICE)); 415 strerror = KERN_NOTICE EXC_0x24(KERN_NOTICE);
393 CHK_DEBUGGER_TRAP_MAYBE(); 416 CHK_DEBUGGER_TRAP_MAYBE();
394 break; 417 break;
395 /* 0x25 - Unrecoverable Event, handled here */ 418 /* 0x25 - Unrecoverable Event, handled here */
396 case VEC_UNCOV: 419 case VEC_UNCOV:
397 info.si_code = ILL_ILLEXCPT; 420 info.si_code = ILL_ILLEXCPT;
398 sig = SIGILL; 421 sig = SIGILL;
399 verbose_printk(KERN_NOTICE EXC_0x25(KERN_NOTICE)); 422 strerror = KERN_NOTICE EXC_0x25(KERN_NOTICE);
400 CHK_DEBUGGER_TRAP_MAYBE(); 423 CHK_DEBUGGER_TRAP_MAYBE();
401 break; 424 break;
402 /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr, 425 /* 0x26 - Data CPLB Miss, normal case is handled in _cplb_hdr,
@@ -404,7 +427,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
404 case VEC_CPLB_M: 427 case VEC_CPLB_M:
405 info.si_code = BUS_ADRALN; 428 info.si_code = BUS_ADRALN;
406 sig = SIGBUS; 429 sig = SIGBUS;
407 verbose_printk(KERN_NOTICE EXC_0x26(KERN_NOTICE)); 430 strerror = KERN_NOTICE EXC_0x26(KERN_NOTICE);
408 break; 431 break;
409 /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */ 432 /* 0x27 - Data CPLB Multiple Hits - Linux Trap Zero, handled here */
410 case VEC_CPLB_MHIT: 433 case VEC_CPLB_MHIT:
@@ -412,10 +435,10 @@ asmlinkage void trap_c(struct pt_regs *fp)
412 sig = SIGSEGV; 435 sig = SIGSEGV;
413#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 436#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
414 if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START) 437 if (cpu_pda[cpu].dcplb_fault_addr < FIXED_CODE_START)
415 verbose_printk(KERN_NOTICE "NULL pointer access\n"); 438 strerror = KERN_NOTICE "NULL pointer access\n";
416 else 439 else
417#endif 440#endif
418 verbose_printk(KERN_NOTICE EXC_0x27(KERN_NOTICE)); 441 strerror = KERN_NOTICE EXC_0x27(KERN_NOTICE);
419 CHK_DEBUGGER_TRAP_MAYBE(); 442 CHK_DEBUGGER_TRAP_MAYBE();
420 break; 443 break;
421 /* 0x28 - Emulation Watchpoint, handled here */ 444 /* 0x28 - Emulation Watchpoint, handled here */
@@ -425,8 +448,8 @@ asmlinkage void trap_c(struct pt_regs *fp)
425 pr_debug(EXC_0x28(KERN_DEBUG)); 448 pr_debug(EXC_0x28(KERN_DEBUG));
426 CHK_DEBUGGER_TRAP_MAYBE(); 449 CHK_DEBUGGER_TRAP_MAYBE();
427 /* Check if this is a watchpoint in kernel space */ 450 /* Check if this is a watchpoint in kernel space */
428 if (fp->ipend & 0xffc0) 451 if (kernel_mode_regs(fp))
429 return; 452 goto traps_done;
430 else 453 else
431 break; 454 break;
432#ifdef CONFIG_BF535 455#ifdef CONFIG_BF535
@@ -434,7 +457,7 @@ asmlinkage void trap_c(struct pt_regs *fp)
434 case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */ 457 case VEC_ISTRU_VL: /* ADSP-BF535 only (MH) */
435 info.si_code = BUS_OPFETCH; 458 info.si_code = BUS_OPFETCH;
436 sig = SIGBUS; 459 sig = SIGBUS;
437 verbose_printk(KERN_NOTICE "BF535: VEC_ISTRU_VL\n"); 460 strerror = KERN_NOTICE "BF535: VEC_ISTRU_VL\n";
438 CHK_DEBUGGER_TRAP_MAYBE(); 461 CHK_DEBUGGER_TRAP_MAYBE();
439 break; 462 break;
440#else 463#else
@@ -444,21 +467,21 @@ asmlinkage void trap_c(struct pt_regs *fp)
444 case VEC_MISALI_I: 467 case VEC_MISALI_I:
445 info.si_code = BUS_ADRALN; 468 info.si_code = BUS_ADRALN;
446 sig = SIGBUS; 469 sig = SIGBUS;
447 verbose_printk(KERN_NOTICE EXC_0x2A(KERN_NOTICE)); 470 strerror = KERN_NOTICE EXC_0x2A(KERN_NOTICE);
448 CHK_DEBUGGER_TRAP_MAYBE(); 471 CHK_DEBUGGER_TRAP_MAYBE();
449 break; 472 break;
450 /* 0x2B - Instruction CPLB protection violation, handled here */ 473 /* 0x2B - Instruction CPLB protection violation, handled here */
451 case VEC_CPLB_I_VL: 474 case VEC_CPLB_I_VL:
452 info.si_code = ILL_CPLB_VI; 475 info.si_code = ILL_CPLB_VI;
453 sig = SIGBUS; 476 sig = SIGBUS;
454 verbose_printk(KERN_NOTICE EXC_0x2B(KERN_NOTICE)); 477 strerror = KERN_NOTICE EXC_0x2B(KERN_NOTICE);
455 CHK_DEBUGGER_TRAP_MAYBE(); 478 CHK_DEBUGGER_TRAP_MAYBE();
456 break; 479 break;
457 /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */ 480 /* 0x2C - Instruction CPLB miss, handled in _cplb_hdr */
458 case VEC_CPLB_I_M: 481 case VEC_CPLB_I_M:
459 info.si_code = ILL_CPLB_MISS; 482 info.si_code = ILL_CPLB_MISS;
460 sig = SIGBUS; 483 sig = SIGBUS;
461 verbose_printk(KERN_NOTICE EXC_0x2C(KERN_NOTICE)); 484 strerror = KERN_NOTICE EXC_0x2C(KERN_NOTICE);
462 break; 485 break;
463 /* 0x2D - Instruction CPLB Multiple Hits, handled here */ 486 /* 0x2D - Instruction CPLB Multiple Hits, handled here */
464 case VEC_CPLB_I_MHIT: 487 case VEC_CPLB_I_MHIT:
@@ -466,17 +489,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
466 sig = SIGSEGV; 489 sig = SIGSEGV;
467#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO 490#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
468 if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START) 491 if (cpu_pda[cpu].icplb_fault_addr < FIXED_CODE_START)
469 verbose_printk(KERN_NOTICE "Jump to NULL address\n"); 492 strerror = KERN_NOTICE "Jump to NULL address\n";
470 else 493 else
471#endif 494#endif
472 verbose_printk(KERN_NOTICE EXC_0x2D(KERN_NOTICE)); 495 strerror = KERN_NOTICE EXC_0x2D(KERN_NOTICE);
473 CHK_DEBUGGER_TRAP_MAYBE(); 496 CHK_DEBUGGER_TRAP_MAYBE();
474 break; 497 break;
475 /* 0x2E - Illegal use of Supervisor Resource, handled here */ 498 /* 0x2E - Illegal use of Supervisor Resource, handled here */
476 case VEC_ILL_RES: 499 case VEC_ILL_RES:
477 info.si_code = ILL_PRVOPC; 500 info.si_code = ILL_PRVOPC;
478 sig = SIGILL; 501 sig = SIGILL;
479 verbose_printk(KERN_NOTICE EXC_0x2E(KERN_NOTICE)); 502 strerror = KERN_NOTICE EXC_0x2E(KERN_NOTICE);
480 CHK_DEBUGGER_TRAP_MAYBE(); 503 CHK_DEBUGGER_TRAP_MAYBE();
481 break; 504 break;
482 /* 0x2F - Reserved, Caught by default */ 505 /* 0x2F - Reserved, Caught by default */
@@ -504,17 +527,17 @@ asmlinkage void trap_c(struct pt_regs *fp)
504 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR): 527 case (SEQSTAT_HWERRCAUSE_SYSTEM_MMR):
505 info.si_code = BUS_ADRALN; 528 info.si_code = BUS_ADRALN;
506 sig = SIGBUS; 529 sig = SIGBUS;
507 verbose_printk(KERN_NOTICE HWC_x2(KERN_NOTICE)); 530 strerror = KERN_NOTICE HWC_x2(KERN_NOTICE);
508 break; 531 break;
509 /* External Memory Addressing Error */ 532 /* External Memory Addressing Error */
510 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): 533 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
511 info.si_code = BUS_ADRERR; 534 info.si_code = BUS_ADRERR;
512 sig = SIGBUS; 535 sig = SIGBUS;
513 verbose_printk(KERN_NOTICE HWC_x3(KERN_NOTICE)); 536 strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
514 break; 537 break;
515 /* Performance Monitor Overflow */ 538 /* Performance Monitor Overflow */
516 case (SEQSTAT_HWERRCAUSE_PERF_FLOW): 539 case (SEQSTAT_HWERRCAUSE_PERF_FLOW):
517 verbose_printk(KERN_NOTICE HWC_x12(KERN_NOTICE)); 540 strerror = KERN_NOTICE HWC_x12(KERN_NOTICE);
518 break; 541 break;
519 /* RAISE 5 instruction */ 542 /* RAISE 5 instruction */
520 case (SEQSTAT_HWERRCAUSE_RAISE_5): 543 case (SEQSTAT_HWERRCAUSE_RAISE_5):
@@ -531,7 +554,6 @@ asmlinkage void trap_c(struct pt_regs *fp)
531 * if we get here we hit a reserved one, so panic 554 * if we get here we hit a reserved one, so panic
532 */ 555 */
533 default: 556 default:
534 oops_in_progress = 1;
535 info.si_code = ILL_ILLPARAOP; 557 info.si_code = ILL_ILLPARAOP;
536 sig = SIGILL; 558 sig = SIGILL;
537 verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n", 559 verbose_printk(KERN_EMERG "Caught Unhandled Exception, code = %08lx\n",
@@ -542,6 +564,16 @@ asmlinkage void trap_c(struct pt_regs *fp)
542 564
543 BUG_ON(sig == 0); 565 BUG_ON(sig == 0);
544 566
567 /* If the fault was caused by a kernel thread, or interrupt handler
568 * we will kernel panic, so the system reboots.
569 */
570 if (kernel_mode_regs(fp) || (current && !current->mm)) {
571 console_verbose();
572 oops_in_progress = 1;
573 if (strerror)
574 verbose_printk(strerror);
575 }
576
545 if (sig != SIGTRAP) { 577 if (sig != SIGTRAP) {
546 dump_bfin_process(fp); 578 dump_bfin_process(fp);
547 dump_bfin_mem(fp); 579 dump_bfin_mem(fp);
@@ -588,8 +620,11 @@ asmlinkage void trap_c(struct pt_regs *fp)
588 force_sig_info(sig, &info, current); 620 force_sig_info(sig, &info, current);
589 } 621 }
590 622
623 if (ANOMALY_05000461 && trapnr == VEC_HWERR && !access_ok(VERIFY_READ, fp->pc, 8))
624 fp->pc = SAFE_USER_INSTRUCTION;
625
626 traps_done:
591 trace_buffer_restore(j); 627 trace_buffer_restore(j);
592 return;
593} 628}
594 629
595/* Typical exception handling routines */ 630/* Typical exception handling routines */
@@ -602,57 +637,30 @@ asmlinkage void trap_c(struct pt_regs *fp)
602 */ 637 */
603static bool get_instruction(unsigned short *val, unsigned short *address) 638static bool get_instruction(unsigned short *val, unsigned short *address)
604{ 639{
605 640 unsigned long addr = (unsigned long)address;
606 unsigned long addr;
607
608 addr = (unsigned long)address;
609 641
610 /* Check for odd addresses */ 642 /* Check for odd addresses */
611 if (addr & 0x1) 643 if (addr & 0x1)
612 return false; 644 return false;
613 645
614 /* Check that things do not wrap around */ 646 /* MMR region will never have instructions */
615 if (addr > (addr + 2)) 647 if (addr >= SYSMMR_BASE)
616 return false; 648 return false;
617 649
618 /* 650 switch (bfin_mem_access_type(addr, 2)) {
619 * Since we are in exception context, we need to do a little address checking 651 case BFIN_MEM_ACCESS_CORE:
620 * We need to make sure we are only accessing valid memory, and 652 case BFIN_MEM_ACCESS_CORE_ONLY:
621 * we don't read something in the async space that can hang forever 653 *val = *address;
622 */ 654 return true;
623 if ((addr >= FIXED_CODE_START && (addr + 2) <= physical_mem_end) || 655 case BFIN_MEM_ACCESS_DMA:
624#if L2_LENGTH != 0 656 dma_memcpy(val, address, 2);
625 (addr >= L2_START && (addr + 2) <= (L2_START + L2_LENGTH)) || 657 return true;
626#endif 658 case BFIN_MEM_ACCESS_ITEST:
627 (addr >= BOOT_ROM_START && (addr + 2) <= (BOOT_ROM_START + BOOT_ROM_LENGTH)) || 659 isram_memcpy(val, address, 2);
628#if L1_DATA_A_LENGTH != 0 660 return true;
629 (addr >= L1_DATA_A_START && (addr + 2) <= (L1_DATA_A_START + L1_DATA_A_LENGTH)) || 661 default: /* invalid access */
630#endif 662 return false;
631#if L1_DATA_B_LENGTH != 0
632 (addr >= L1_DATA_B_START && (addr + 2) <= (L1_DATA_B_START + L1_DATA_B_LENGTH)) ||
633#endif
634 (addr >= L1_SCRATCH_START && (addr + 2) <= (L1_SCRATCH_START + L1_SCRATCH_LENGTH)) ||
635 (!(bfin_read_EBIU_AMBCTL0() & B0RDYEN) &&
636 addr >= ASYNC_BANK0_BASE && (addr + 2) <= (ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)) ||
637 (!(bfin_read_EBIU_AMBCTL0() & B1RDYEN) &&
638 addr >= ASYNC_BANK1_BASE && (addr + 2) <= (ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)) ||
639 (!(bfin_read_EBIU_AMBCTL1() & B2RDYEN) &&
640 addr >= ASYNC_BANK2_BASE && (addr + 2) <= (ASYNC_BANK2_BASE + ASYNC_BANK1_SIZE)) ||
641 (!(bfin_read_EBIU_AMBCTL1() & B3RDYEN) &&
642 addr >= ASYNC_BANK3_BASE && (addr + 2) <= (ASYNC_BANK3_BASE + ASYNC_BANK1_SIZE))) {
643 *val = *address;
644 return true;
645 } 663 }
646
647#if L1_CODE_LENGTH != 0
648 if (addr >= L1_CODE_START && (addr + 2) <= (L1_CODE_START + L1_CODE_LENGTH)) {
649 isram_memcpy(val, address, 2);
650 return true;
651 }
652#endif
653
654
655 return false;
656} 664}
657 665
658/* 666/*
@@ -774,6 +782,18 @@ void dump_bfin_trace_buffer(void)
774} 782}
775EXPORT_SYMBOL(dump_bfin_trace_buffer); 783EXPORT_SYMBOL(dump_bfin_trace_buffer);
776 784
785#ifdef CONFIG_BUG
786int is_valid_bugaddr(unsigned long addr)
787{
788 unsigned short opcode;
789
790 if (!get_instruction(&opcode, (unsigned short *)addr))
791 return 0;
792
793 return opcode == BFIN_BUG_OPCODE;
794}
795#endif
796
777/* 797/*
778 * Checks to see if the address pointed to is either a 798 * Checks to see if the address pointed to is either a
779 * 16-bit CALL instruction, or a 32-bit CALL instruction 799 * 16-bit CALL instruction, or a 32-bit CALL instruction
@@ -832,6 +852,11 @@ void show_stack(struct task_struct *task, unsigned long *stack)
832 decode_address(buf, (unsigned int)stack); 852 decode_address(buf, (unsigned int)stack);
833 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); 853 printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf);
834 854
855 if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) {
856 printk(KERN_NOTICE "Invalid stack pointer\n");
857 return;
858 }
859
835 /* First thing is to look for a frame pointer */ 860 /* First thing is to look for a frame pointer */
836 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { 861 for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) {
837 if (*addr & 0x1) 862 if (*addr & 0x1)
@@ -1066,6 +1091,29 @@ void show_regs(struct pt_regs *fp)
1066 unsigned int cpu = smp_processor_id(); 1091 unsigned int cpu = smp_processor_id();
1067 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); 1092 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
1068 1093
1094 verbose_printk(KERN_NOTICE "\n");
1095 if (CPUID != bfin_cpuid())
1096 verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), "
1097 "but running on:0x%04x (Rev %d)\n",
1098 CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid());
1099
1100 verbose_printk(KERN_NOTICE "ADSP-%s-0.%d",
1101 CPU, bfin_compiled_revid());
1102
1103 if (bfin_compiled_revid() != bfin_revid())
1104 verbose_printk("(Detected 0.%d)", bfin_revid());
1105
1106 verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n",
1107 get_cclk()/1000000, get_sclk()/1000000,
1108#ifdef CONFIG_MPU
1109 "mpu on"
1110#else
1111 "mpu off"
1112#endif
1113 );
1114
1115 verbose_printk(KERN_NOTICE "%s", linux_banner);
1116
1069 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted()); 1117 verbose_printk(KERN_NOTICE "\n" KERN_NOTICE "SEQUENCER STATUS:\t\t%s\n", print_tainted());
1070 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n", 1118 verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx SYSCFG: %04lx\n",
1071 (long)fp->seqstat, fp->ipend, fp->syscfg); 1119 (long)fp->seqstat, fp->ipend, fp->syscfg);
@@ -1246,5 +1294,5 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp)
1246 dump_bfin_mem(fp); 1294 dump_bfin_mem(fp);
1247 show_regs(fp); 1295 show_regs(fp);
1248 dump_stack(); 1296 dump_stack();
1249 panic("Unrecoverable event\n"); 1297 panic("Unrecoverable event");
1250} 1298}
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 27952ae047d8..6ac307ca0d80 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -50,8 +50,11 @@ SECTIONS
50 _text = .; 50 _text = .;
51 __stext = .; 51 __stext = .;
52 TEXT_TEXT 52 TEXT_TEXT
53#ifndef CONFIG_SCHEDULE_L1
53 SCHED_TEXT 54 SCHED_TEXT
55#endif
54 LOCK_TEXT 56 LOCK_TEXT
57 IRQENTRY_TEXT
55 KPROBES_TEXT 58 KPROBES_TEXT
56 *(.text.*) 59 *(.text.*)
57 *(.fixup) 60 *(.fixup)
@@ -164,6 +167,20 @@ SECTIONS
164 } 167 }
165 PERCPU(4) 168 PERCPU(4)
166 SECURITY_INIT 169 SECURITY_INIT
170
171 /* we have to discard exit text and such at runtime, not link time, to
172 * handle embedded cross-section references (alt instructions, bug
173 * table, eh_frame, etc...)
174 */
175 .exit.text :
176 {
177 EXIT_TEXT
178 }
179 .exit.data :
180 {
181 EXIT_DATA
182 }
183
167 .init.ramfs : 184 .init.ramfs :
168 { 185 {
169 . = ALIGN(4); 186 . = ALIGN(4);
@@ -180,6 +197,9 @@ SECTIONS
180 . = ALIGN(4); 197 . = ALIGN(4);
181 __stext_l1 = .; 198 __stext_l1 = .;
182 *(.l1.text) 199 *(.l1.text)
200#ifdef CONFIG_SCHEDULE_L1
201 SCHED_TEXT
202#endif
183 . = ALIGN(4); 203 . = ALIGN(4);
184 __etext_l1 = .; 204 __etext_l1 = .;
185 } 205 }
@@ -259,8 +279,6 @@ SECTIONS
259 279
260 /DISCARD/ : 280 /DISCARD/ :
261 { 281 {
262 EXIT_TEXT
263 EXIT_DATA
264 *(.exitcall.exit) 282 *(.exitcall.exit)
265 } 283 }
266} 284}