diff options
Diffstat (limited to 'arch/blackfin/kernel')
28 files changed, 2338 insertions, 1369 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index a8ddbc8ed5af..30d0d1f01dc7 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile | |||
@@ -7,7 +7,8 @@ extra-y := init_task.o vmlinux.lds | |||
7 | obj-y := \ | 7 | obj-y := \ |
8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ | 8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ |
9 | sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ | 9 | sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ |
10 | fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o | 10 | fixed_code.o reboot.o bfin_gpio.o bfin_dma_5xx.o \ |
11 | exception.o dumpstack.o | ||
11 | 12 | ||
12 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) | 13 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) |
13 | obj-y += time-ts.o | 14 | obj-y += time-ts.o |
@@ -25,9 +26,12 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o | |||
25 | obj-$(CONFIG_MODULES) += module.o | 26 | obj-$(CONFIG_MODULES) += module.o |
26 | obj-$(CONFIG_KGDB) += kgdb.o | 27 | obj-$(CONFIG_KGDB) += kgdb.o |
27 | obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o | 28 | obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o |
29 | obj-$(CONFIG_NMI_WATCHDOG) += nmi.o | ||
28 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | 30 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o |
29 | obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o | 31 | obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o |
30 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 32 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
33 | obj-$(CONFIG_DEBUG_VERBOSE) += trace.o | ||
34 | obj-$(CONFIG_BFIN_PSEUDODBG_INSNS) += pseudodbg.o | ||
31 | 35 | ||
32 | # the kgdb test puts code into L2 and without linker | 36 | # the kgdb test puts code into L2 and without linker |
33 | # relaxation, we need to force long calls to/from it | 37 | # relaxation, we need to force long calls to/from it |
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 924c00286bab..26403d1c9e65 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -91,7 +91,7 @@ late_initcall(proc_dma_init); | |||
91 | */ | 91 | */ |
92 | int request_dma(unsigned int channel, const char *device_id) | 92 | int request_dma(unsigned int channel, const char *device_id) |
93 | { | 93 | { |
94 | pr_debug("request_dma() : BEGIN \n"); | 94 | pr_debug("request_dma() : BEGIN\n"); |
95 | 95 | ||
96 | if (device_id == NULL) | 96 | if (device_id == NULL) |
97 | printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); | 97 | printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); |
@@ -107,7 +107,7 @@ int request_dma(unsigned int channel, const char *device_id) | |||
107 | #endif | 107 | #endif |
108 | 108 | ||
109 | if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { | 109 | if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { |
110 | pr_debug("DMA CHANNEL IN USE \n"); | 110 | pr_debug("DMA CHANNEL IN USE\n"); |
111 | return -EBUSY; | 111 | return -EBUSY; |
112 | } | 112 | } |
113 | 113 | ||
@@ -131,7 +131,7 @@ int request_dma(unsigned int channel, const char *device_id) | |||
131 | * you have to request DMA, before doing any operations on | 131 | * you have to request DMA, before doing any operations on |
132 | * descriptor/channel | 132 | * descriptor/channel |
133 | */ | 133 | */ |
134 | pr_debug("request_dma() : END \n"); | 134 | pr_debug("request_dma() : END\n"); |
135 | return 0; | 135 | return 0; |
136 | } | 136 | } |
137 | EXPORT_SYMBOL(request_dma); | 137 | EXPORT_SYMBOL(request_dma); |
@@ -171,7 +171,7 @@ static void clear_dma_buffer(unsigned int channel) | |||
171 | 171 | ||
172 | void free_dma(unsigned int channel) | 172 | void free_dma(unsigned int channel) |
173 | { | 173 | { |
174 | pr_debug("freedma() : BEGIN \n"); | 174 | pr_debug("freedma() : BEGIN\n"); |
175 | BUG_ON(channel >= MAX_DMA_CHANNELS || | 175 | BUG_ON(channel >= MAX_DMA_CHANNELS || |
176 | !atomic_read(&dma_ch[channel].chan_status)); | 176 | !atomic_read(&dma_ch[channel].chan_status)); |
177 | 177 | ||
@@ -185,7 +185,7 @@ void free_dma(unsigned int channel) | |||
185 | /* Clear the DMA Variable in the Channel */ | 185 | /* Clear the DMA Variable in the Channel */ |
186 | atomic_set(&dma_ch[channel].chan_status, 0); | 186 | atomic_set(&dma_ch[channel].chan_status, 0); |
187 | 187 | ||
188 | pr_debug("freedma() : END \n"); | 188 | pr_debug("freedma() : END\n"); |
189 | } | 189 | } |
190 | EXPORT_SYMBOL(free_dma); | 190 | EXPORT_SYMBOL(free_dma); |
191 | 191 | ||
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index a174596cc009..42833ee2b308 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c | |||
@@ -475,9 +475,7 @@ GET_GPIO_P(maskb) | |||
475 | 475 | ||
476 | 476 | ||
477 | #ifdef CONFIG_PM | 477 | #ifdef CONFIG_PM |
478 | |||
479 | static unsigned short wakeup_map[GPIO_BANK_NUM]; | 478 | static unsigned short wakeup_map[GPIO_BANK_NUM]; |
480 | static unsigned char wakeup_flags_map[MAX_BLACKFIN_GPIOS]; | ||
481 | 479 | ||
482 | static const unsigned int sic_iwr_irqs[] = { | 480 | static const unsigned int sic_iwr_irqs[] = { |
483 | #if defined(BF533_FAMILY) | 481 | #if defined(BF533_FAMILY) |
@@ -514,112 +512,26 @@ static const unsigned int sic_iwr_irqs[] = { | |||
514 | ************************************************************* | 512 | ************************************************************* |
515 | * MODIFICATION HISTORY : | 513 | * MODIFICATION HISTORY : |
516 | **************************************************************/ | 514 | **************************************************************/ |
517 | int gpio_pm_wakeup_request(unsigned gpio, unsigned char type) | 515 | int gpio_pm_wakeup_ctrl(unsigned gpio, unsigned ctrl) |
518 | { | ||
519 | unsigned long flags; | ||
520 | |||
521 | if ((check_gpio(gpio) < 0) || !type) | ||
522 | return -EINVAL; | ||
523 | |||
524 | local_irq_save_hw(flags); | ||
525 | wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); | ||
526 | wakeup_flags_map[gpio] = type; | ||
527 | local_irq_restore_hw(flags); | ||
528 | |||
529 | return 0; | ||
530 | } | ||
531 | EXPORT_SYMBOL(gpio_pm_wakeup_request); | ||
532 | |||
533 | void gpio_pm_wakeup_free(unsigned gpio) | ||
534 | { | 516 | { |
535 | unsigned long flags; | 517 | unsigned long flags; |
536 | 518 | ||
537 | if (check_gpio(gpio) < 0) | 519 | if (check_gpio(gpio) < 0) |
538 | return; | 520 | return -EINVAL; |
539 | 521 | ||
540 | local_irq_save_hw(flags); | 522 | local_irq_save_hw(flags); |
541 | 523 | if (ctrl) | |
542 | wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); | 524 | wakeup_map[gpio_bank(gpio)] |= gpio_bit(gpio); |
543 | |||
544 | local_irq_restore_hw(flags); | ||
545 | } | ||
546 | EXPORT_SYMBOL(gpio_pm_wakeup_free); | ||
547 | |||
548 | static int bfin_gpio_wakeup_type(unsigned gpio, unsigned char type) | ||
549 | { | ||
550 | port_setup(gpio, GPIO_USAGE); | ||
551 | set_gpio_dir(gpio, 0); | ||
552 | set_gpio_inen(gpio, 1); | ||
553 | |||
554 | if (type & (PM_WAKE_RISING | PM_WAKE_FALLING)) | ||
555 | set_gpio_edge(gpio, 1); | ||
556 | else | ||
557 | set_gpio_edge(gpio, 0); | ||
558 | |||
559 | if ((type & (PM_WAKE_BOTH_EDGES)) == (PM_WAKE_BOTH_EDGES)) | ||
560 | set_gpio_both(gpio, 1); | ||
561 | else | 525 | else |
562 | set_gpio_both(gpio, 0); | 526 | wakeup_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); |
563 | |||
564 | if ((type & (PM_WAKE_FALLING | PM_WAKE_LOW))) | ||
565 | set_gpio_polar(gpio, 1); | ||
566 | else | ||
567 | set_gpio_polar(gpio, 0); | ||
568 | |||
569 | SSYNC(); | ||
570 | |||
571 | return 0; | ||
572 | } | ||
573 | 527 | ||
574 | u32 bfin_pm_standby_setup(void) | 528 | set_gpio_maskb(gpio, ctrl); |
575 | { | 529 | local_irq_restore_hw(flags); |
576 | u16 bank, mask, i, gpio; | ||
577 | |||
578 | for (i = 0; i < MAX_BLACKFIN_GPIOS; i += GPIO_BANKSIZE) { | ||
579 | mask = wakeup_map[gpio_bank(i)]; | ||
580 | bank = gpio_bank(i); | ||
581 | |||
582 | gpio_bank_saved[bank].maskb = gpio_array[bank]->maskb; | ||
583 | gpio_array[bank]->maskb = 0; | ||
584 | |||
585 | if (mask) { | ||
586 | #if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x) | ||
587 | gpio_bank_saved[bank].fer = *port_fer[bank]; | ||
588 | #endif | ||
589 | gpio_bank_saved[bank].inen = gpio_array[bank]->inen; | ||
590 | gpio_bank_saved[bank].polar = gpio_array[bank]->polar; | ||
591 | gpio_bank_saved[bank].dir = gpio_array[bank]->dir; | ||
592 | gpio_bank_saved[bank].edge = gpio_array[bank]->edge; | ||
593 | gpio_bank_saved[bank].both = gpio_array[bank]->both; | ||
594 | gpio_bank_saved[bank].reserved = | ||
595 | reserved_gpio_map[bank]; | ||
596 | |||
597 | gpio = i; | ||
598 | |||
599 | while (mask) { | ||
600 | if ((mask & 1) && (wakeup_flags_map[gpio] != | ||
601 | PM_WAKE_IGNORE)) { | ||
602 | reserved_gpio_map[gpio_bank(gpio)] |= | ||
603 | gpio_bit(gpio); | ||
604 | bfin_gpio_wakeup_type(gpio, | ||
605 | wakeup_flags_map[gpio]); | ||
606 | set_gpio_data(gpio, 0); /*Clear*/ | ||
607 | } | ||
608 | gpio++; | ||
609 | mask >>= 1; | ||
610 | } | ||
611 | |||
612 | bfin_internal_set_wake(sic_iwr_irqs[bank], 1); | ||
613 | gpio_array[bank]->maskb_set = wakeup_map[gpio_bank(i)]; | ||
614 | } | ||
615 | } | ||
616 | |||
617 | AWA_DUMMY_READ(maskb_set); | ||
618 | 530 | ||
619 | return 0; | 531 | return 0; |
620 | } | 532 | } |
621 | 533 | ||
622 | void bfin_pm_standby_restore(void) | 534 | int bfin_pm_standby_ctrl(unsigned ctrl) |
623 | { | 535 | { |
624 | u16 bank, mask, i; | 536 | u16 bank, mask, i; |
625 | 537 | ||
@@ -627,24 +539,10 @@ void bfin_pm_standby_restore(void) | |||
627 | mask = wakeup_map[gpio_bank(i)]; | 539 | mask = wakeup_map[gpio_bank(i)]; |
628 | bank = gpio_bank(i); | 540 | bank = gpio_bank(i); |
629 | 541 | ||
630 | if (mask) { | 542 | if (mask) |
631 | #if defined(CONFIG_BF52x) || defined(BF537_FAMILY) || defined(CONFIG_BF51x) | 543 | bfin_internal_set_wake(sic_iwr_irqs[bank], ctrl); |
632 | *port_fer[bank] = gpio_bank_saved[bank].fer; | ||
633 | #endif | ||
634 | gpio_array[bank]->inen = gpio_bank_saved[bank].inen; | ||
635 | gpio_array[bank]->dir = gpio_bank_saved[bank].dir; | ||
636 | gpio_array[bank]->polar = gpio_bank_saved[bank].polar; | ||
637 | gpio_array[bank]->edge = gpio_bank_saved[bank].edge; | ||
638 | gpio_array[bank]->both = gpio_bank_saved[bank].both; | ||
639 | |||
640 | reserved_gpio_map[bank] = | ||
641 | gpio_bank_saved[bank].reserved; | ||
642 | bfin_internal_set_wake(sic_iwr_irqs[bank], 0); | ||
643 | } | ||
644 | |||
645 | gpio_array[bank]->maskb = gpio_bank_saved[bank].maskb; | ||
646 | } | 544 | } |
647 | AWA_DUMMY_READ(maskb); | 545 | return 0; |
648 | } | 546 | } |
649 | 547 | ||
650 | void bfin_gpio_pm_hibernate_suspend(void) | 548 | void bfin_gpio_pm_hibernate_suspend(void) |
@@ -708,16 +606,11 @@ void bfin_gpio_pm_hibernate_restore(void) | |||
708 | #else /* CONFIG_BF54x */ | 606 | #else /* CONFIG_BF54x */ |
709 | #ifdef CONFIG_PM | 607 | #ifdef CONFIG_PM |
710 | 608 | ||
711 | u32 bfin_pm_standby_setup(void) | 609 | int bfin_pm_standby_ctrl(unsigned ctrl) |
712 | { | 610 | { |
713 | return 0; | 611 | return 0; |
714 | } | 612 | } |
715 | 613 | ||
716 | void bfin_pm_standby_restore(void) | ||
717 | { | ||
718 | |||
719 | } | ||
720 | |||
721 | void bfin_gpio_pm_hibernate_suspend(void) | 614 | void bfin_gpio_pm_hibernate_suspend(void) |
722 | { | 615 | { |
723 | int i, bank; | 616 | int i, bank; |
@@ -1289,44 +1182,50 @@ __initcall(gpio_register_proc); | |||
1289 | #endif | 1182 | #endif |
1290 | 1183 | ||
1291 | #ifdef CONFIG_GPIOLIB | 1184 | #ifdef CONFIG_GPIOLIB |
1292 | int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) | 1185 | static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) |
1293 | { | 1186 | { |
1294 | return bfin_gpio_direction_input(gpio); | 1187 | return bfin_gpio_direction_input(gpio); |
1295 | } | 1188 | } |
1296 | 1189 | ||
1297 | int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) | 1190 | static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) |
1298 | { | 1191 | { |
1299 | return bfin_gpio_direction_output(gpio, level); | 1192 | return bfin_gpio_direction_output(gpio, level); |
1300 | } | 1193 | } |
1301 | 1194 | ||
1302 | int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) | 1195 | static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) |
1303 | { | 1196 | { |
1304 | return bfin_gpio_get_value(gpio); | 1197 | return bfin_gpio_get_value(gpio); |
1305 | } | 1198 | } |
1306 | 1199 | ||
1307 | void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) | 1200 | static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) |
1308 | { | 1201 | { |
1309 | return bfin_gpio_set_value(gpio, value); | 1202 | return bfin_gpio_set_value(gpio, value); |
1310 | } | 1203 | } |
1311 | 1204 | ||
1312 | int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) | 1205 | static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) |
1313 | { | 1206 | { |
1314 | return bfin_gpio_request(gpio, chip->label); | 1207 | return bfin_gpio_request(gpio, chip->label); |
1315 | } | 1208 | } |
1316 | 1209 | ||
1317 | void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) | 1210 | static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) |
1318 | { | 1211 | { |
1319 | return bfin_gpio_free(gpio); | 1212 | return bfin_gpio_free(gpio); |
1320 | } | 1213 | } |
1321 | 1214 | ||
1215 | static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio) | ||
1216 | { | ||
1217 | return gpio + GPIO_IRQ_BASE; | ||
1218 | } | ||
1219 | |||
1322 | static struct gpio_chip bfin_chip = { | 1220 | static struct gpio_chip bfin_chip = { |
1323 | .label = "Blackfin-GPIOlib", | 1221 | .label = "BFIN-GPIO", |
1324 | .direction_input = bfin_gpiolib_direction_input, | 1222 | .direction_input = bfin_gpiolib_direction_input, |
1325 | .get = bfin_gpiolib_get_value, | 1223 | .get = bfin_gpiolib_get_value, |
1326 | .direction_output = bfin_gpiolib_direction_output, | 1224 | .direction_output = bfin_gpiolib_direction_output, |
1327 | .set = bfin_gpiolib_set_value, | 1225 | .set = bfin_gpiolib_set_value, |
1328 | .request = bfin_gpiolib_gpio_request, | 1226 | .request = bfin_gpiolib_gpio_request, |
1329 | .free = bfin_gpiolib_gpio_free, | 1227 | .free = bfin_gpiolib_gpio_free, |
1228 | .to_irq = bfin_gpiolib_gpio_to_irq, | ||
1330 | .base = 0, | 1229 | .base = 0, |
1331 | .ngpio = MAX_BLACKFIN_GPIOS, | 1230 | .ngpio = MAX_BLACKFIN_GPIOS, |
1332 | }; | 1231 | }; |
diff --git a/arch/blackfin/kernel/bfin_ksyms.c b/arch/blackfin/kernel/bfin_ksyms.c index ed8392c117ea..2c264b51566a 100644 --- a/arch/blackfin/kernel/bfin_ksyms.c +++ b/arch/blackfin/kernel/bfin_ksyms.c | |||
@@ -33,6 +33,18 @@ EXPORT_SYMBOL(memmove); | |||
33 | EXPORT_SYMBOL(memchr); | 33 | EXPORT_SYMBOL(memchr); |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Because string functions are both inline and exported functions and | ||
37 | * folder arch/blackfin/lib is configured as a library path in Makefile, | ||
38 | * symbols exported in folder lib is not linked into built-in.o but | ||
39 | * inlined only. In order to export string symbols to kernel module | ||
40 | * properly, they should be exported here. | ||
41 | */ | ||
42 | EXPORT_SYMBOL(strcpy); | ||
43 | EXPORT_SYMBOL(strncpy); | ||
44 | EXPORT_SYMBOL(strcmp); | ||
45 | EXPORT_SYMBOL(strncmp); | ||
46 | |||
47 | /* | ||
36 | * libgcc functions - functions that are used internally by the | 48 | * libgcc functions - functions that are used internally by the |
37 | * compiler... (prototypes are not correct though, but that | 49 | * compiler... (prototypes are not correct though, but that |
38 | * doesn't really matter since they're not versioned). | 50 | * doesn't really matter since they're not versioned). |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index 8d42b9e50dfa..30fd6417f069 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c | |||
@@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) | |||
64 | icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); | 64 | icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); |
65 | } | 65 | } |
66 | 66 | ||
67 | #ifdef CONFIG_ROMKERNEL | ||
68 | /* Cover kernel XIP flash area */ | ||
69 | addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); | ||
70 | dcplb_tbl[cpu][i_d].addr = addr; | ||
71 | dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD; | ||
72 | icplb_tbl[cpu][i_i].addr = addr; | ||
73 | icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD; | ||
74 | #endif | ||
75 | |||
67 | /* Cover L1 memory. One 4M area for code and data each is enough. */ | 76 | /* Cover L1 memory. One 4M area for code and data each is enough. */ |
68 | #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 | 77 | #if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 |
69 | dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); | 78 | dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index 930c01c06813..87b25b1b30ed 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c | |||
@@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS]; | |||
31 | int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; | 31 | int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; |
32 | int nr_cplb_flush[NR_CPUS]; | 32 | int nr_cplb_flush[NR_CPUS]; |
33 | 33 | ||
34 | #ifdef CONFIG_EXCPT_IRQ_SYSC_L1 | ||
35 | #define MGR_ATTR __attribute__((l1_text)) | ||
36 | #else | ||
37 | #define MGR_ATTR | ||
38 | #endif | ||
39 | |||
34 | /* | 40 | /* |
35 | * Given the contents of the status register, return the index of the | 41 | * Given the contents of the status register, return the index of the |
36 | * CPLB that caused the fault. | 42 | * CPLB that caused the fault. |
@@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS]; | |||
59 | /* | 65 | /* |
60 | * Find an ICPLB entry to be evicted and return its index. | 66 | * Find an ICPLB entry to be evicted and return its index. |
61 | */ | 67 | */ |
62 | static int evict_one_icplb(unsigned int cpu) | 68 | MGR_ATTR static int evict_one_icplb(unsigned int cpu) |
63 | { | 69 | { |
64 | int i; | 70 | int i; |
65 | for (i = first_switched_icplb; i < MAX_CPLBS; i++) | 71 | for (i = first_switched_icplb; i < MAX_CPLBS; i++) |
@@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu) | |||
74 | return i; | 80 | return i; |
75 | } | 81 | } |
76 | 82 | ||
77 | static int evict_one_dcplb(unsigned int cpu) | 83 | MGR_ATTR static int evict_one_dcplb(unsigned int cpu) |
78 | { | 84 | { |
79 | int i; | 85 | int i; |
80 | for (i = first_switched_dcplb; i < MAX_CPLBS; i++) | 86 | for (i = first_switched_dcplb; i < MAX_CPLBS; i++) |
@@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu) | |||
89 | return i; | 95 | return i; |
90 | } | 96 | } |
91 | 97 | ||
92 | static noinline int dcplb_miss(unsigned int cpu) | 98 | MGR_ATTR static noinline int dcplb_miss(unsigned int cpu) |
93 | { | 99 | { |
94 | unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); | 100 | unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); |
95 | int status = bfin_read_DCPLB_STATUS(); | 101 | int status = bfin_read_DCPLB_STATUS(); |
@@ -114,10 +120,15 @@ static noinline int dcplb_miss(unsigned int cpu) | |||
114 | d_data = L2_DMEMORY; | 120 | d_data = L2_DMEMORY; |
115 | } else if (addr >= physical_mem_end) { | 121 | } else if (addr >= physical_mem_end) { |
116 | if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { | 122 | if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { |
117 | addr &= ~(4 * 1024 * 1024 - 1); | 123 | mask = current_rwx_mask[cpu]; |
118 | d_data &= ~PAGE_SIZE_4KB; | 124 | if (mask) { |
119 | d_data |= PAGE_SIZE_4MB; | 125 | int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; |
120 | d_data |= CPLB_USER_RD | CPLB_USER_WR; | 126 | int idx = page >> 5; |
127 | int bit = 1 << (page & 31); | ||
128 | |||
129 | if (mask[idx] & bit) | ||
130 | d_data |= CPLB_USER_RD; | ||
131 | } | ||
121 | } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH | 132 | } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH |
122 | && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { | 133 | && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { |
123 | addr &= ~(1 * 1024 * 1024 - 1); | 134 | addr &= ~(1 * 1024 * 1024 - 1); |
@@ -126,7 +137,9 @@ static noinline int dcplb_miss(unsigned int cpu) | |||
126 | } else | 137 | } else |
127 | return CPLB_PROT_VIOL; | 138 | return CPLB_PROT_VIOL; |
128 | } else if (addr >= _ramend) { | 139 | } else if (addr >= _ramend) { |
129 | d_data |= CPLB_USER_RD | CPLB_USER_WR; | 140 | d_data |= CPLB_USER_RD | CPLB_USER_WR; |
141 | if (reserved_mem_dcache_on) | ||
142 | d_data |= CPLB_L1_CHBL; | ||
130 | } else { | 143 | } else { |
131 | mask = current_rwx_mask[cpu]; | 144 | mask = current_rwx_mask[cpu]; |
132 | if (mask) { | 145 | if (mask) { |
@@ -156,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu) | |||
156 | return 0; | 169 | return 0; |
157 | } | 170 | } |
158 | 171 | ||
159 | static noinline int icplb_miss(unsigned int cpu) | 172 | MGR_ATTR static noinline int icplb_miss(unsigned int cpu) |
160 | { | 173 | { |
161 | unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); | 174 | unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); |
162 | int status = bfin_read_ICPLB_STATUS(); | 175 | int status = bfin_read_ICPLB_STATUS(); |
@@ -204,10 +217,19 @@ static noinline int icplb_miss(unsigned int cpu) | |||
204 | i_data = L2_IMEMORY; | 217 | i_data = L2_IMEMORY; |
205 | } else if (addr >= physical_mem_end) { | 218 | } else if (addr >= physical_mem_end) { |
206 | if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { | 219 | if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) { |
207 | addr &= ~(4 * 1024 * 1024 - 1); | 220 | if (!(status & FAULT_USERSUPV)) { |
208 | i_data &= ~PAGE_SIZE_4KB; | 221 | unsigned long *mask = current_rwx_mask[cpu]; |
209 | i_data |= PAGE_SIZE_4MB; | 222 | |
210 | i_data |= CPLB_USER_RD; | 223 | if (mask) { |
224 | int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT; | ||
225 | int idx = page >> 5; | ||
226 | int bit = 1 << (page & 31); | ||
227 | |||
228 | mask += 2 * page_mask_nelts; | ||
229 | if (mask[idx] & bit) | ||
230 | i_data |= CPLB_USER_RD; | ||
231 | } | ||
232 | } | ||
211 | } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH | 233 | } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH |
212 | && (status & FAULT_USERSUPV)) { | 234 | && (status & FAULT_USERSUPV)) { |
213 | addr &= ~(1 * 1024 * 1024 - 1); | 235 | addr &= ~(1 * 1024 * 1024 - 1); |
@@ -217,6 +239,8 @@ static noinline int icplb_miss(unsigned int cpu) | |||
217 | return CPLB_PROT_VIOL; | 239 | return CPLB_PROT_VIOL; |
218 | } else if (addr >= _ramend) { | 240 | } else if (addr >= _ramend) { |
219 | i_data |= CPLB_USER_RD; | 241 | i_data |= CPLB_USER_RD; |
242 | if (reserved_mem_icache_on) | ||
243 | i_data |= CPLB_L1_CHBL; | ||
220 | } else { | 244 | } else { |
221 | /* | 245 | /* |
222 | * Two cases to distinguish - a supervisor access must | 246 | * Two cases to distinguish - a supervisor access must |
@@ -251,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu) | |||
251 | return 0; | 275 | return 0; |
252 | } | 276 | } |
253 | 277 | ||
254 | static noinline int dcplb_protection_fault(unsigned int cpu) | 278 | MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu) |
255 | { | 279 | { |
256 | int status = bfin_read_DCPLB_STATUS(); | 280 | int status = bfin_read_DCPLB_STATUS(); |
257 | 281 | ||
@@ -271,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu) | |||
271 | return CPLB_PROT_VIOL; | 295 | return CPLB_PROT_VIOL; |
272 | } | 296 | } |
273 | 297 | ||
274 | int cplb_hdr(int seqstat, struct pt_regs *regs) | 298 | MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs) |
275 | { | 299 | { |
276 | int cause = seqstat & 0x3f; | 300 | int cause = seqstat & 0x3f; |
277 | unsigned int cpu = raw_smp_processor_id(); | 301 | unsigned int cpu = raw_smp_processor_id(); |
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 282a7919821b..bfe75af4e8bd 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c | |||
@@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu) | |||
56 | i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; | 56 | i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; |
57 | } | 57 | } |
58 | 58 | ||
59 | #ifdef CONFIG_ROMKERNEL | ||
60 | /* Cover kernel XIP flash area */ | ||
61 | addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); | ||
62 | d_tbl[i_d].addr = addr; | ||
63 | d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; | ||
64 | i_tbl[i_i].addr = addr; | ||
65 | i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; | ||
66 | #endif | ||
67 | |||
59 | /* Cover L1 memory. One 4M area for code and data each is enough. */ | 68 | /* Cover L1 memory. One 4M area for code and data each is enough. */ |
60 | if (cpu == 0) { | 69 | if (cpu == 0) { |
61 | if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { | 70 | if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index e937f323d82c..04ddcfeb7981 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -116,7 +116,7 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
116 | void __dma_sync(dma_addr_t addr, size_t size, | 116 | void __dma_sync(dma_addr_t addr, size_t size, |
117 | enum dma_data_direction dir) | 117 | enum dma_data_direction dir) |
118 | { | 118 | { |
119 | _dma_sync(addr, size, dir); | 119 | __dma_sync_inline(addr, size, dir); |
120 | } | 120 | } |
121 | EXPORT_SYMBOL(__dma_sync); | 121 | EXPORT_SYMBOL(__dma_sync); |
122 | 122 | ||
diff --git a/arch/blackfin/kernel/dumpstack.c b/arch/blackfin/kernel/dumpstack.c new file mode 100644 index 000000000000..5cfbaa298211 --- /dev/null +++ b/arch/blackfin/kernel/dumpstack.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* Provide basic stack dumping functions | ||
2 | * | ||
3 | * Copyright 2004-2009 Analog Devices Inc. | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/thread_info.h> | ||
10 | #include <linux/mm.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <asm/trace.h> | ||
14 | |||
15 | /* | ||
16 | * Checks to see if the address pointed to is either a | ||
17 | * 16-bit CALL instruction, or a 32-bit CALL instruction | ||
18 | */ | ||
19 | static bool is_bfin_call(unsigned short *addr) | ||
20 | { | ||
21 | unsigned int opcode; | ||
22 | |||
23 | if (!get_instruction(&opcode, addr)) | ||
24 | return false; | ||
25 | |||
26 | if ((opcode >= 0x0060 && opcode <= 0x0067) || | ||
27 | (opcode >= 0x0070 && opcode <= 0x0077) || | ||
28 | (opcode >= 0xE3000000 && opcode <= 0xE3FFFFFF)) | ||
29 | return true; | ||
30 | |||
31 | return false; | ||
32 | |||
33 | } | ||
34 | |||
35 | void show_stack(struct task_struct *task, unsigned long *stack) | ||
36 | { | ||
37 | #ifdef CONFIG_PRINTK | ||
38 | unsigned int *addr, *endstack, *fp = 0, *frame; | ||
39 | unsigned short *ins_addr; | ||
40 | char buf[150]; | ||
41 | unsigned int i, j, ret_addr, frame_no = 0; | ||
42 | |||
43 | /* | ||
44 | * If we have been passed a specific stack, use that one otherwise | ||
45 | * if we have been passed a task structure, use that, otherwise | ||
46 | * use the stack of where the variable "stack" exists | ||
47 | */ | ||
48 | |||
49 | if (stack == NULL) { | ||
50 | if (task) { | ||
51 | /* We know this is a kernel stack, so this is the start/end */ | ||
52 | stack = (unsigned long *)task->thread.ksp; | ||
53 | endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); | ||
54 | } else { | ||
55 | /* print out the existing stack info */ | ||
56 | stack = (unsigned long *)&stack; | ||
57 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
58 | } | ||
59 | } else | ||
60 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
61 | |||
62 | printk(KERN_NOTICE "Stack info:\n"); | ||
63 | decode_address(buf, (unsigned int)stack); | ||
64 | printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); | ||
65 | |||
66 | if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { | ||
67 | printk(KERN_NOTICE "Invalid stack pointer\n"); | ||
68 | return; | ||
69 | } | ||
70 | |||
71 | /* First thing is to look for a frame pointer */ | ||
72 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { | ||
73 | if (*addr & 0x1) | ||
74 | continue; | ||
75 | ins_addr = (unsigned short *)*addr; | ||
76 | ins_addr--; | ||
77 | if (is_bfin_call(ins_addr)) | ||
78 | fp = addr - 1; | ||
79 | |||
80 | if (fp) { | ||
81 | /* Let's check to see if it is a frame pointer */ | ||
82 | while (fp >= (addr - 1) && fp < endstack | ||
83 | && fp && ((unsigned int) fp & 0x3) == 0) | ||
84 | fp = (unsigned int *)*fp; | ||
85 | if (fp == 0 || fp == endstack) { | ||
86 | fp = addr - 1; | ||
87 | break; | ||
88 | } | ||
89 | fp = 0; | ||
90 | } | ||
91 | } | ||
92 | if (fp) { | ||
93 | frame = fp; | ||
94 | printk(KERN_NOTICE " FP: (0x%p)\n", fp); | ||
95 | } else | ||
96 | frame = 0; | ||
97 | |||
98 | /* | ||
99 | * Now that we think we know where things are, we | ||
100 | * walk the stack again, this time printing things out | ||
101 | * incase there is no frame pointer, we still look for | ||
102 | * valid return addresses | ||
103 | */ | ||
104 | |||
105 | /* First time print out data, next time, print out symbols */ | ||
106 | for (j = 0; j <= 1; j++) { | ||
107 | if (j) | ||
108 | printk(KERN_NOTICE "Return addresses in stack:\n"); | ||
109 | else | ||
110 | printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); | ||
111 | |||
112 | fp = frame; | ||
113 | frame_no = 0; | ||
114 | |||
115 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; | ||
116 | addr < endstack; addr++, i++) { | ||
117 | |||
118 | ret_addr = 0; | ||
119 | if (!j && i % 8 == 0) | ||
120 | printk(KERN_NOTICE "%p:", addr); | ||
121 | |||
122 | /* if it is an odd address, or zero, just skip it */ | ||
123 | if (*addr & 0x1 || !*addr) | ||
124 | goto print; | ||
125 | |||
126 | ins_addr = (unsigned short *)*addr; | ||
127 | |||
128 | /* Go back one instruction, and see if it is a CALL */ | ||
129 | ins_addr--; | ||
130 | ret_addr = is_bfin_call(ins_addr); | ||
131 | print: | ||
132 | if (!j && stack == (unsigned long *)addr) | ||
133 | printk("[%08x]", *addr); | ||
134 | else if (ret_addr) | ||
135 | if (j) { | ||
136 | decode_address(buf, (unsigned int)*addr); | ||
137 | if (frame == addr) { | ||
138 | printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); | ||
139 | continue; | ||
140 | } | ||
141 | printk(KERN_NOTICE " address : %s\n", buf); | ||
142 | } else | ||
143 | printk("<%08x>", *addr); | ||
144 | else if (fp == addr) { | ||
145 | if (j) | ||
146 | frame = addr+1; | ||
147 | else | ||
148 | printk("(%08x)", *addr); | ||
149 | |||
150 | fp = (unsigned int *)*addr; | ||
151 | frame_no++; | ||
152 | |||
153 | } else if (!j) | ||
154 | printk(" %08x ", *addr); | ||
155 | } | ||
156 | if (!j) | ||
157 | printk("\n"); | ||
158 | } | ||
159 | #endif | ||
160 | } | ||
161 | EXPORT_SYMBOL(show_stack); | ||
162 | |||
163 | void dump_stack(void) | ||
164 | { | ||
165 | unsigned long stack; | ||
166 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
167 | int tflags; | ||
168 | #endif | ||
169 | trace_buffer_save(tflags); | ||
170 | dump_bfin_trace_buffer(); | ||
171 | show_stack(current, &stack); | ||
172 | trace_buffer_restore(tflags); | ||
173 | } | ||
174 | EXPORT_SYMBOL(dump_stack); | ||
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S index f27dc2292e1b..686478f5f66b 100644 --- a/arch/blackfin/kernel/entry.S +++ b/arch/blackfin/kernel/entry.S | |||
@@ -44,7 +44,7 @@ ENTRY(_ret_from_fork) | |||
44 | sti r4; | 44 | sti r4; |
45 | #endif /* CONFIG_IPIPE */ | 45 | #endif /* CONFIG_IPIPE */ |
46 | SP += -12; | 46 | SP += -12; |
47 | call _schedule_tail; | 47 | pseudo_long_call _schedule_tail, p5; |
48 | SP += 12; | 48 | SP += 12; |
49 | r0 = [sp + PT_IPEND]; | 49 | r0 = [sp + PT_IPEND]; |
50 | cc = bittst(r0,1); | 50 | cc = bittst(r0,1); |
@@ -79,7 +79,7 @@ ENTRY(_sys_vfork) | |||
79 | r0 += 24; | 79 | r0 += 24; |
80 | [--sp] = rets; | 80 | [--sp] = rets; |
81 | SP += -12; | 81 | SP += -12; |
82 | call _bfin_vfork; | 82 | pseudo_long_call _bfin_vfork, p2; |
83 | SP += 12; | 83 | SP += 12; |
84 | rets = [sp++]; | 84 | rets = [sp++]; |
85 | rts; | 85 | rts; |
@@ -90,7 +90,7 @@ ENTRY(_sys_clone) | |||
90 | r0 += 24; | 90 | r0 += 24; |
91 | [--sp] = rets; | 91 | [--sp] = rets; |
92 | SP += -12; | 92 | SP += -12; |
93 | call _bfin_clone; | 93 | pseudo_long_call _bfin_clone, p2; |
94 | SP += 12; | 94 | SP += 12; |
95 | rets = [sp++]; | 95 | rets = [sp++]; |
96 | rts; | 96 | rts; |
@@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn) | |||
101 | r0 += 24; | 101 | r0 += 24; |
102 | [--sp] = rets; | 102 | [--sp] = rets; |
103 | SP += -12; | 103 | SP += -12; |
104 | call _do_rt_sigreturn; | 104 | pseudo_long_call _do_rt_sigreturn, p2; |
105 | SP += 12; | 105 | SP += 12; |
106 | rets = [sp++]; | 106 | rets = [sp++]; |
107 | rts; | 107 | rts; |
diff --git a/arch/blackfin/kernel/exception.c b/arch/blackfin/kernel/exception.c new file mode 100644 index 000000000000..9208b5fd5186 --- /dev/null +++ b/arch/blackfin/kernel/exception.c | |||
@@ -0,0 +1,45 @@ | |||
1 | /* Basic functions for adding/removing custom exception handlers | ||
2 | * | ||
3 | * Copyright 2004-2009 Analog Devices Inc. | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <asm/irq_handler.h> | ||
10 | |||
11 | int bfin_request_exception(unsigned int exception, void (*handler)(void)) | ||
12 | { | ||
13 | void (*curr_handler)(void); | ||
14 | |||
15 | if (exception > 0x3F) | ||
16 | return -EINVAL; | ||
17 | |||
18 | curr_handler = ex_table[exception]; | ||
19 | |||
20 | if (curr_handler != ex_replaceable) | ||
21 | return -EBUSY; | ||
22 | |||
23 | ex_table[exception] = handler; | ||
24 | |||
25 | return 0; | ||
26 | } | ||
27 | EXPORT_SYMBOL(bfin_request_exception); | ||
28 | |||
29 | int bfin_free_exception(unsigned int exception, void (*handler)(void)) | ||
30 | { | ||
31 | void (*curr_handler)(void); | ||
32 | |||
33 | if (exception > 0x3F) | ||
34 | return -EINVAL; | ||
35 | |||
36 | curr_handler = ex_table[exception]; | ||
37 | |||
38 | if (curr_handler != handler) | ||
39 | return -EBUSY; | ||
40 | |||
41 | ex_table[exception] = ex_replaceable; | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | EXPORT_SYMBOL(bfin_free_exception); | ||
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S index 76dd4fbcd17a..d66446b572c0 100644 --- a/arch/blackfin/kernel/ftrace-entry.S +++ b/arch/blackfin/kernel/ftrace-entry.S | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * mcount and friends -- ftrace stuff | 2 | * mcount and friends -- ftrace stuff |
3 | * | 3 | * |
4 | * Copyright (C) 2009 Analog Devices Inc. | 4 | * Copyright (C) 2009-2010 Analog Devices Inc. |
5 | * Licensed under the GPL-2 or later. | 5 | * Licensed under the GPL-2 or later. |
6 | */ | 6 | */ |
7 | 7 | ||
@@ -21,6 +21,15 @@ | |||
21 | * function will be waiting there. mmmm pie. | 21 | * function will be waiting there. mmmm pie. |
22 | */ | 22 | */ |
23 | ENTRY(__mcount) | 23 | ENTRY(__mcount) |
24 | #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST | ||
25 | /* optional micro optimization: return if stopped */ | ||
26 | p1.l = _function_trace_stop; | ||
27 | p1.h = _function_trace_stop; | ||
28 | r3 = [p1]; | ||
29 | cc = r3 == 0; | ||
30 | if ! cc jump _ftrace_stub (bp); | ||
31 | #endif | ||
32 | |||
24 | /* save third function arg early so we can do testing below */ | 33 | /* save third function arg early so we can do testing below */ |
25 | [--sp] = r2; | 34 | [--sp] = r2; |
26 | 35 | ||
@@ -106,9 +115,12 @@ ENTRY(_ftrace_graph_caller) | |||
106 | [--sp] = r1; | 115 | [--sp] = r1; |
107 | [--sp] = rets; | 116 | [--sp] = rets; |
108 | 117 | ||
109 | /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ | 118 | /* prepare_ftrace_return(parent, self_addr, frame_pointer) */ |
110 | r0 = sp; | 119 | r0 = sp; /* unsigned long *parent */ |
111 | r1 = rets; | 120 | r1 = rets; /* unsigned long self_addr */ |
121 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | ||
122 | r2 = fp; /* unsigned long frame_pointer */ | ||
123 | #endif | ||
112 | r0 += 16; /* skip the 4 local regs on stack */ | 124 | r0 += 16; /* skip the 4 local regs on stack */ |
113 | r1 += -MCOUNT_INSN_SIZE; | 125 | r1 += -MCOUNT_INSN_SIZE; |
114 | call _prepare_ftrace_return; | 126 | call _prepare_ftrace_return; |
@@ -127,6 +139,9 @@ ENTRY(_return_to_handler) | |||
127 | [--sp] = r1; | 139 | [--sp] = r1; |
128 | 140 | ||
129 | /* get original return address */ | 141 | /* get original return address */ |
142 | #ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST | ||
143 | r0 = fp; /* Blackfin is sane, so omit this */ | ||
144 | #endif | ||
130 | call _ftrace_return_to_handler; | 145 | call _ftrace_return_to_handler; |
131 | rets = r0; | 146 | rets = r0; |
132 | 147 | ||
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c index f2c85ac6f2da..a61d948ea925 100644 --- a/arch/blackfin/kernel/ftrace.c +++ b/arch/blackfin/kernel/ftrace.c | |||
@@ -16,7 +16,8 @@ | |||
16 | * Hook the return address and push it in the stack of return addrs | 16 | * Hook the return address and push it in the stack of return addrs |
17 | * in current thread info. | 17 | * in current thread info. |
18 | */ | 18 | */ |
19 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | 19 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, |
20 | unsigned long frame_pointer) | ||
20 | { | 21 | { |
21 | struct ftrace_graph_ent trace; | 22 | struct ftrace_graph_ent trace; |
22 | unsigned long return_hooker = (unsigned long)&return_to_handler; | 23 | unsigned long return_hooker = (unsigned long)&return_to_handler; |
@@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
24 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 25 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
25 | return; | 26 | return; |
26 | 27 | ||
27 | if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY) | 28 | if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, |
29 | frame_pointer) == -EBUSY) | ||
28 | return; | 30 | return; |
29 | 31 | ||
30 | trace.func = self_addr; | 32 | trace.func = self_addr; |
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c index 118c5b9dedac..d3970e8acd1a 100644 --- a/arch/blackfin/kernel/init_task.c +++ b/arch/blackfin/kernel/init_task.c | |||
@@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task); | |||
28 | * "init_task" linker map entry. | 28 | * "init_task" linker map entry. |
29 | */ | 29 | */ |
30 | union thread_union init_thread_union | 30 | union thread_union init_thread_union |
31 | __attribute__ ((__section__(".init_task.data"))) = { | 31 | __init_task_data = { |
32 | INIT_THREAD_INFO(init_task)}; | 32 | INIT_THREAD_INFO(init_task)}; |
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index a77307a4473b..1a496cd71ba2 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/interrupt.h> | 27 | #include <linux/interrupt.h> |
28 | #include <linux/percpu.h> | 28 | #include <linux/percpu.h> |
29 | #include <linux/bitops.h> | 29 | #include <linux/bitops.h> |
30 | #include <linux/slab.h> | ||
31 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
32 | #include <linux/kthread.h> | 31 | #include <linux/kthread.h> |
33 | #include <linux/unistd.h> | 32 | #include <linux/unistd.h> |
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index 34c7c3ed2c9c..08bc44ea6883 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c | |||
@@ -66,7 +66,7 @@ void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
66 | gdb_regs[BFIN_RETN] = regs->retn; | 66 | gdb_regs[BFIN_RETN] = regs->retn; |
67 | gdb_regs[BFIN_RETE] = regs->rete; | 67 | gdb_regs[BFIN_RETE] = regs->rete; |
68 | gdb_regs[BFIN_PC] = regs->pc; | 68 | gdb_regs[BFIN_PC] = regs->pc; |
69 | gdb_regs[BFIN_CC] = 0; | 69 | gdb_regs[BFIN_CC] = (regs->astat >> 5) & 1; |
70 | gdb_regs[BFIN_EXTRA1] = 0; | 70 | gdb_regs[BFIN_EXTRA1] = 0; |
71 | gdb_regs[BFIN_EXTRA2] = 0; | 71 | gdb_regs[BFIN_EXTRA2] = 0; |
72 | gdb_regs[BFIN_EXTRA3] = 0; | 72 | gdb_regs[BFIN_EXTRA3] = 0; |
@@ -145,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | |||
145 | #endif | 145 | #endif |
146 | } | 146 | } |
147 | 147 | ||
148 | struct hw_breakpoint { | 148 | static struct hw_breakpoint { |
149 | unsigned int occupied:1; | 149 | unsigned int occupied:1; |
150 | unsigned int skip:1; | 150 | unsigned int skip:1; |
151 | unsigned int enabled:1; | 151 | unsigned int enabled:1; |
@@ -155,7 +155,7 @@ struct hw_breakpoint { | |||
155 | unsigned int addr; | 155 | unsigned int addr; |
156 | } breakinfo[HW_WATCHPOINT_NUM]; | 156 | } breakinfo[HW_WATCHPOINT_NUM]; |
157 | 157 | ||
158 | int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) | 158 | static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) |
159 | { | 159 | { |
160 | int breakno; | 160 | int breakno; |
161 | int bfin_type; | 161 | int bfin_type; |
@@ -202,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) | |||
202 | return -ENOSPC; | 202 | return -ENOSPC; |
203 | } | 203 | } |
204 | 204 | ||
205 | int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) | 205 | static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) |
206 | { | 206 | { |
207 | int breakno; | 207 | int breakno; |
208 | int bfin_type; | 208 | int bfin_type; |
@@ -230,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) | |||
230 | return 0; | 230 | return 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | void bfin_remove_all_hw_break(void) | 233 | static void bfin_remove_all_hw_break(void) |
234 | { | 234 | { |
235 | int breakno; | 235 | int breakno; |
236 | 236 | ||
@@ -242,7 +242,7 @@ void bfin_remove_all_hw_break(void) | |||
242 | breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; | 242 | breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; |
243 | } | 243 | } |
244 | 244 | ||
245 | void bfin_correct_hw_break(void) | 245 | static void bfin_correct_hw_break(void) |
246 | { | 246 | { |
247 | int breakno; | 247 | int breakno; |
248 | unsigned int wpiactl = 0; | 248 | unsigned int wpiactl = 0; |
@@ -439,6 +439,11 @@ int kgdb_validate_break_address(unsigned long addr) | |||
439 | return -EFAULT; | 439 | return -EFAULT; |
440 | } | 440 | } |
441 | 441 | ||
442 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) | ||
443 | { | ||
444 | regs->retx = ip; | ||
445 | } | ||
446 | |||
442 | int kgdb_arch_init(void) | 447 | int kgdb_arch_init(void) |
443 | { | 448 | { |
444 | kgdb_single_step = 0; | 449 | kgdb_single_step = 0; |
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c new file mode 100644 index 000000000000..0b5f72f17fd0 --- /dev/null +++ b/arch/blackfin/kernel/nmi.c | |||
@@ -0,0 +1,299 @@ | |||
1 | /* | ||
2 | * Blackfin nmi_watchdog Driver | ||
3 | * | ||
4 | * Originally based on bfin_wdt.c | ||
5 | * Copyright 2010-2010 Analog Devices Inc. | ||
6 | * Graff Yang <graf.yang@analog.com> | ||
7 | * | ||
8 | * Enter bugs at http://blackfin.uclinux.org/ | ||
9 | * | ||
10 | * Licensed under the GPL-2 or later. | ||
11 | */ | ||
12 | |||
13 | #include <linux/bitops.h> | ||
14 | #include <linux/hardirq.h> | ||
15 | #include <linux/sysdev.h> | ||
16 | #include <linux/pm.h> | ||
17 | #include <linux/nmi.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/timer.h> | ||
20 | #include <asm/blackfin.h> | ||
21 | #include <asm/atomic.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/bfin_watchdog.h> | ||
24 | |||
25 | #define DRV_NAME "nmi-wdt" | ||
26 | |||
27 | #define NMI_WDT_TIMEOUT 5 /* 5 seconds */ | ||
28 | #define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */ | ||
29 | static int nmi_wdt_cpu = 1; | ||
30 | |||
31 | static unsigned int timeout = NMI_WDT_TIMEOUT; | ||
32 | static int nmi_active; | ||
33 | |||
34 | static unsigned short wdoga_ctl; | ||
35 | static unsigned int wdoga_cnt; | ||
36 | static struct corelock_slot saved_corelock; | ||
37 | static atomic_t nmi_touched[NR_CPUS]; | ||
38 | static struct timer_list ntimer; | ||
39 | |||
40 | enum { | ||
41 | COREA_ENTER_NMI = 0, | ||
42 | COREA_EXIT_NMI, | ||
43 | COREB_EXIT_NMI, | ||
44 | |||
45 | NMI_EVENT_NR, | ||
46 | }; | ||
47 | static unsigned long nmi_event __attribute__ ((__section__(".l2.bss"))); | ||
48 | |||
49 | /* we are in nmi, non-atomic bit ops is safe */ | ||
50 | static inline void set_nmi_event(int event) | ||
51 | { | ||
52 | __set_bit(event, &nmi_event); | ||
53 | } | ||
54 | |||
55 | static inline void wait_nmi_event(int event) | ||
56 | { | ||
57 | while (!test_bit(event, &nmi_event)) | ||
58 | barrier(); | ||
59 | __clear_bit(event, &nmi_event); | ||
60 | } | ||
61 | |||
62 | static inline void send_corea_nmi(void) | ||
63 | { | ||
64 | wdoga_ctl = bfin_read_WDOGA_CTL(); | ||
65 | wdoga_cnt = bfin_read_WDOGA_CNT(); | ||
66 | |||
67 | bfin_write_WDOGA_CTL(WDEN_DISABLE); | ||
68 | bfin_write_WDOGA_CNT(0); | ||
69 | bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI); | ||
70 | } | ||
71 | |||
72 | static inline void restore_corea_nmi(void) | ||
73 | { | ||
74 | bfin_write_WDOGA_CTL(WDEN_DISABLE); | ||
75 | bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); | ||
76 | |||
77 | bfin_write_WDOGA_CNT(wdoga_cnt); | ||
78 | bfin_write_WDOGA_CTL(wdoga_ctl); | ||
79 | } | ||
80 | |||
81 | static inline void save_corelock(void) | ||
82 | { | ||
83 | saved_corelock = corelock; | ||
84 | corelock.lock = 0; | ||
85 | } | ||
86 | |||
87 | static inline void restore_corelock(void) | ||
88 | { | ||
89 | corelock = saved_corelock; | ||
90 | } | ||
91 | |||
92 | |||
93 | static inline void nmi_wdt_keepalive(void) | ||
94 | { | ||
95 | bfin_write_WDOGB_STAT(0); | ||
96 | } | ||
97 | |||
98 | static inline void nmi_wdt_stop(void) | ||
99 | { | ||
100 | bfin_write_WDOGB_CTL(WDEN_DISABLE); | ||
101 | } | ||
102 | |||
103 | /* before calling this function, you must stop the WDT */ | ||
104 | static inline void nmi_wdt_clear(void) | ||
105 | { | ||
106 | /* clear TRO bit, disable event generation */ | ||
107 | bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE); | ||
108 | } | ||
109 | |||
110 | static inline void nmi_wdt_start(void) | ||
111 | { | ||
112 | bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI); | ||
113 | } | ||
114 | |||
115 | static inline int nmi_wdt_running(void) | ||
116 | { | ||
117 | return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE); | ||
118 | } | ||
119 | |||
120 | static inline int nmi_wdt_set_timeout(unsigned long t) | ||
121 | { | ||
122 | u32 cnt, max_t, sclk; | ||
123 | int run; | ||
124 | |||
125 | sclk = get_sclk(); | ||
126 | max_t = -1 / sclk; | ||
127 | cnt = t * sclk; | ||
128 | if (t > max_t) { | ||
129 | pr_warning("NMI: timeout value is too large\n"); | ||
130 | return -EINVAL; | ||
131 | } | ||
132 | |||
133 | run = nmi_wdt_running(); | ||
134 | nmi_wdt_stop(); | ||
135 | bfin_write_WDOGB_CNT(cnt); | ||
136 | if (run) | ||
137 | nmi_wdt_start(); | ||
138 | |||
139 | timeout = t; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | int check_nmi_wdt_touched(void) | ||
145 | { | ||
146 | unsigned int this_cpu = smp_processor_id(); | ||
147 | unsigned int cpu; | ||
148 | |||
149 | cpumask_t mask = cpu_online_map; | ||
150 | |||
151 | if (!atomic_read(&nmi_touched[this_cpu])) | ||
152 | return 0; | ||
153 | |||
154 | atomic_set(&nmi_touched[this_cpu], 0); | ||
155 | |||
156 | cpu_clear(this_cpu, mask); | ||
157 | for_each_cpu_mask(cpu, mask) { | ||
158 | invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]), | ||
159 | (unsigned long)(&nmi_touched[cpu])); | ||
160 | if (!atomic_read(&nmi_touched[cpu])) | ||
161 | return 0; | ||
162 | atomic_set(&nmi_touched[cpu], 0); | ||
163 | } | ||
164 | |||
165 | return 1; | ||
166 | } | ||
167 | |||
168 | static void nmi_wdt_timer(unsigned long data) | ||
169 | { | ||
170 | if (check_nmi_wdt_touched()) | ||
171 | nmi_wdt_keepalive(); | ||
172 | |||
173 | mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT); | ||
174 | } | ||
175 | |||
176 | static int __init init_nmi_wdt(void) | ||
177 | { | ||
178 | nmi_wdt_set_timeout(timeout); | ||
179 | nmi_wdt_start(); | ||
180 | nmi_active = true; | ||
181 | |||
182 | init_timer(&ntimer); | ||
183 | ntimer.function = nmi_wdt_timer; | ||
184 | ntimer.expires = jiffies + NMI_CHECK_TIMEOUT; | ||
185 | add_timer(&ntimer); | ||
186 | |||
187 | pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout); | ||
188 | return 0; | ||
189 | } | ||
190 | device_initcall(init_nmi_wdt); | ||
191 | |||
192 | void touch_nmi_watchdog(void) | ||
193 | { | ||
194 | atomic_set(&nmi_touched[smp_processor_id()], 1); | ||
195 | } | ||
196 | |||
197 | /* Suspend/resume support */ | ||
198 | #ifdef CONFIG_PM | ||
199 | static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state) | ||
200 | { | ||
201 | nmi_wdt_stop(); | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static int nmi_wdt_resume(struct sys_device *dev) | ||
206 | { | ||
207 | if (nmi_active) | ||
208 | nmi_wdt_start(); | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static struct sysdev_class nmi_sysclass = { | ||
213 | .name = DRV_NAME, | ||
214 | .resume = nmi_wdt_resume, | ||
215 | .suspend = nmi_wdt_suspend, | ||
216 | }; | ||
217 | |||
218 | static struct sys_device device_nmi_wdt = { | ||
219 | .id = 0, | ||
220 | .cls = &nmi_sysclass, | ||
221 | }; | ||
222 | |||
223 | static int __init init_nmi_wdt_sysfs(void) | ||
224 | { | ||
225 | int error; | ||
226 | |||
227 | if (!nmi_active) | ||
228 | return 0; | ||
229 | |||
230 | error = sysdev_class_register(&nmi_sysclass); | ||
231 | if (!error) | ||
232 | error = sysdev_register(&device_nmi_wdt); | ||
233 | return error; | ||
234 | } | ||
235 | late_initcall(init_nmi_wdt_sysfs); | ||
236 | |||
237 | #endif /* CONFIG_PM */ | ||
238 | |||
239 | |||
240 | asmlinkage notrace void do_nmi(struct pt_regs *fp) | ||
241 | { | ||
242 | unsigned int cpu = smp_processor_id(); | ||
243 | nmi_enter(); | ||
244 | |||
245 | cpu_pda[cpu].__nmi_count += 1; | ||
246 | |||
247 | if (cpu == nmi_wdt_cpu) { | ||
248 | /* CoreB goes here first */ | ||
249 | |||
250 | /* reload the WDOG_STAT */ | ||
251 | nmi_wdt_keepalive(); | ||
252 | |||
253 | /* clear nmi interrupt for CoreB */ | ||
254 | nmi_wdt_stop(); | ||
255 | nmi_wdt_clear(); | ||
256 | |||
257 | /* trigger NMI interrupt of CoreA */ | ||
258 | send_corea_nmi(); | ||
259 | |||
260 | /* waiting CoreB to enter NMI */ | ||
261 | wait_nmi_event(COREA_ENTER_NMI); | ||
262 | |||
263 | /* recover WDOGA's settings */ | ||
264 | restore_corea_nmi(); | ||
265 | |||
266 | save_corelock(); | ||
267 | |||
268 | /* corelock is save/cleared, CoreA is dummping messages */ | ||
269 | |||
270 | wait_nmi_event(COREA_EXIT_NMI); | ||
271 | } else { | ||
272 | /* OK, CoreA entered NMI */ | ||
273 | set_nmi_event(COREA_ENTER_NMI); | ||
274 | } | ||
275 | |||
276 | pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu); | ||
277 | dump_bfin_process(fp); | ||
278 | dump_bfin_mem(fp); | ||
279 | show_regs(fp); | ||
280 | dump_bfin_trace_buffer(); | ||
281 | show_stack(current, (unsigned long *)fp); | ||
282 | |||
283 | if (cpu == nmi_wdt_cpu) { | ||
284 | pr_emerg("This fault is not recoverable, sorry!\n"); | ||
285 | |||
286 | /* CoreA dump finished, restore the corelock */ | ||
287 | restore_corelock(); | ||
288 | |||
289 | set_nmi_event(COREB_EXIT_NMI); | ||
290 | } else { | ||
291 | /* CoreB dump finished, notice the CoreA we are done */ | ||
292 | set_nmi_event(COREA_EXIT_NMI); | ||
293 | |||
294 | /* synchronize with CoreA */ | ||
295 | wait_nmi_event(COREB_EXIT_NMI); | ||
296 | } | ||
297 | |||
298 | nmi_exit(); | ||
299 | } | ||
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index b56b0e485e0b..93ec07da2e51 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/unistd.h> | 11 | #include <linux/unistd.h> |
12 | #include <linux/user.h> | 12 | #include <linux/user.h> |
13 | #include <linux/uaccess.h> | 13 | #include <linux/uaccess.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
15 | #include <linux/tick.h> | 16 | #include <linux/tick.h> |
16 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
@@ -98,13 +99,6 @@ void cpu_idle(void) | |||
98 | } | 99 | } |
99 | } | 100 | } |
100 | 101 | ||
101 | /* Fill in the fpu structure for a core dump. */ | ||
102 | |||
103 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs) | ||
104 | { | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | /* | 102 | /* |
109 | * This gets run with P1 containing the | 103 | * This gets run with P1 containing the |
110 | * function to call, and R1 containing | 104 | * function to call, and R1 containing |
diff --git a/arch/blackfin/kernel/pseudodbg.c b/arch/blackfin/kernel/pseudodbg.c new file mode 100644 index 000000000000..db85bc94334e --- /dev/null +++ b/arch/blackfin/kernel/pseudodbg.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* The fake debug assert instructions | ||
2 | * | ||
3 | * Copyright 2010 Analog Devices Inc. | ||
4 | * | ||
5 | * Licensed under the GPL-2 or later | ||
6 | */ | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/ptrace.h> | ||
11 | |||
12 | const char * const greg_names[] = { | ||
13 | "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", | ||
14 | "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP", | ||
15 | "I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3", | ||
16 | "B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3", | ||
17 | "A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS", | ||
18 | "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", | ||
19 | "LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2", | ||
20 | "USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT", | ||
21 | }; | ||
22 | |||
23 | static const char *get_allreg_name(int grp, int reg) | ||
24 | { | ||
25 | return greg_names[(grp << 3) | reg]; | ||
26 | } | ||
27 | |||
28 | /* | ||
29 | * Unfortunately, the pt_regs structure is not laid out the same way as the | ||
30 | * hardware register file, so we need to do some fix ups. | ||
31 | * | ||
32 | * CYCLES is not stored in the pt_regs structure - so, we just read it from | ||
33 | * the hardware. | ||
34 | * | ||
35 | * Don't support: | ||
36 | * - All reserved registers | ||
37 | * - All in group 7 are (supervisors only) | ||
38 | */ | ||
39 | |||
40 | static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg) | ||
41 | { | ||
42 | long *val = &fp->r0; | ||
43 | unsigned long tmp; | ||
44 | |||
45 | /* Only do Dregs and Pregs for now */ | ||
46 | if (grp == 5 || | ||
47 | (grp == 4 && (reg == 4 || reg == 5)) || | ||
48 | (grp == 7)) | ||
49 | return false; | ||
50 | |||
51 | if (grp == 0 || (grp == 1 && reg < 6)) | ||
52 | val -= (reg + 8 * grp); | ||
53 | else if (grp == 1 && reg == 6) | ||
54 | val = &fp->usp; | ||
55 | else if (grp == 1 && reg == 7) | ||
56 | val = &fp->fp; | ||
57 | else if (grp == 2) { | ||
58 | val = &fp->i0; | ||
59 | val -= reg; | ||
60 | } else if (grp == 3 && reg >= 4) { | ||
61 | val = &fp->l0; | ||
62 | val -= (reg - 4); | ||
63 | } else if (grp == 3 && reg < 4) { | ||
64 | val = &fp->b0; | ||
65 | val -= reg; | ||
66 | } else if (grp == 4 && reg < 4) { | ||
67 | val = &fp->a0x; | ||
68 | val -= reg; | ||
69 | } else if (grp == 4 && reg == 6) | ||
70 | val = &fp->astat; | ||
71 | else if (grp == 4 && reg == 7) | ||
72 | val = &fp->rets; | ||
73 | else if (grp == 6 && reg < 6) { | ||
74 | val = &fp->lc0; | ||
75 | val -= reg; | ||
76 | } else if (grp == 6 && reg == 6) { | ||
77 | __asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp)); | ||
78 | val = &tmp; | ||
79 | } else if (grp == 6 && reg == 7) { | ||
80 | __asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp)); | ||
81 | val = &tmp; | ||
82 | } | ||
83 | |||
84 | *value = *val; | ||
85 | return true; | ||
86 | |||
87 | } | ||
88 | |||
89 | #define PseudoDbg_Assert_opcode 0xf0000000 | ||
90 | #define PseudoDbg_Assert_expected_bits 0 | ||
91 | #define PseudoDbg_Assert_expected_mask 0xffff | ||
92 | #define PseudoDbg_Assert_regtest_bits 16 | ||
93 | #define PseudoDbg_Assert_regtest_mask 0x7 | ||
94 | #define PseudoDbg_Assert_grp_bits 19 | ||
95 | #define PseudoDbg_Assert_grp_mask 0x7 | ||
96 | #define PseudoDbg_Assert_dbgop_bits 22 | ||
97 | #define PseudoDbg_Assert_dbgop_mask 0x3 | ||
98 | #define PseudoDbg_Assert_dontcare_bits 24 | ||
99 | #define PseudoDbg_Assert_dontcare_mask 0x7 | ||
100 | #define PseudoDbg_Assert_code_bits 27 | ||
101 | #define PseudoDbg_Assert_code_mask 0x1f | ||
102 | |||
103 | /* | ||
104 | * DBGA - debug assert | ||
105 | */ | ||
106 | bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode) | ||
107 | { | ||
108 | int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask); | ||
109 | int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask); | ||
110 | int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask); | ||
111 | int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask); | ||
112 | long value; | ||
113 | |||
114 | if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode) | ||
115 | return false; | ||
116 | |||
117 | if (!fix_up_reg(fp, &value, grp, regtest)) | ||
118 | return false; | ||
119 | |||
120 | if (dbgop == 0 || dbgop == 2) { | ||
121 | /* DBGA ( regs_lo , uimm16 ) */ | ||
122 | /* DBGAL ( regs , uimm16 ) */ | ||
123 | if (expected != (value & 0xFFFF)) { | ||
124 | pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n", | ||
125 | get_allreg_name(grp, regtest), | ||
126 | expected, (unsigned int)(value & 0xFFFF)); | ||
127 | return false; | ||
128 | } | ||
129 | |||
130 | } else if (dbgop == 1 || dbgop == 3) { | ||
131 | /* DBGA ( regs_hi , uimm16 ) */ | ||
132 | /* DBGAH ( regs , uimm16 ) */ | ||
133 | if (expected != ((value >> 16) & 0xFFFF)) { | ||
134 | pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n", | ||
135 | get_allreg_name(grp, regtest), | ||
136 | expected, (unsigned int)((value >> 16) & 0xFFFF)); | ||
137 | return false; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | fp->pc += 4; | ||
142 | return true; | ||
143 | } | ||
144 | |||
145 | #define PseudoDbg_opcode 0xf8000000 | ||
146 | #define PseudoDbg_reg_bits 0 | ||
147 | #define PseudoDbg_reg_mask 0x7 | ||
148 | #define PseudoDbg_grp_bits 3 | ||
149 | #define PseudoDbg_grp_mask 0x7 | ||
150 | #define PseudoDbg_fn_bits 6 | ||
151 | #define PseudoDbg_fn_mask 0x3 | ||
152 | #define PseudoDbg_code_bits 8 | ||
153 | #define PseudoDbg_code_mask 0xff | ||
154 | |||
155 | /* | ||
156 | * DBG - debug (dump a register value out) | ||
157 | */ | ||
158 | bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode) | ||
159 | { | ||
160 | int grp, fn, reg; | ||
161 | long value, value1; | ||
162 | |||
163 | if ((opcode & 0xFF000000) != PseudoDbg_opcode) | ||
164 | return false; | ||
165 | |||
166 | opcode >>= 16; | ||
167 | grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask); | ||
168 | fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask); | ||
169 | reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask); | ||
170 | |||
171 | if (fn == 3 && (reg == 0 || reg == 1)) { | ||
172 | if (!fix_up_reg(fp, &value, 4, 2 * reg)) | ||
173 | return false; | ||
174 | if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1)) | ||
175 | return false; | ||
176 | |||
177 | pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1); | ||
178 | fp->pc += 2; | ||
179 | return true; | ||
180 | |||
181 | } else if (fn == 0) { | ||
182 | if (!fix_up_reg(fp, &value, grp, reg)) | ||
183 | return false; | ||
184 | |||
185 | pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value); | ||
186 | fp->pc += 2; | ||
187 | return true; | ||
188 | } | ||
189 | |||
190 | return false; | ||
191 | } | ||
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 65567dc4b9f5..6ec77685df52 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds | 2 | * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds |
3 | * these modifications are Copyright 2004-2009 Analog Devices Inc. | 3 | * these modifications are Copyright 2004-2010 Analog Devices Inc. |
4 | * | 4 | * |
5 | * Licensed under the GPL-2 | 5 | * Licensed under the GPL-2 |
6 | */ | 6 | */ |
@@ -9,10 +9,13 @@ | |||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
11 | #include <linux/smp.h> | 11 | #include <linux/smp.h> |
12 | #include <linux/elf.h> | ||
12 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
13 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
14 | #include <linux/user.h> | 15 | #include <linux/user.h> |
16 | #include <linux/regset.h> | ||
15 | #include <linux/signal.h> | 17 | #include <linux/signal.h> |
18 | #include <linux/tracehook.h> | ||
16 | #include <linux/uaccess.h> | 19 | #include <linux/uaccess.h> |
17 | 20 | ||
18 | #include <asm/page.h> | 21 | #include <asm/page.h> |
@@ -25,90 +28,57 @@ | |||
25 | #include <asm/cacheflush.h> | 28 | #include <asm/cacheflush.h> |
26 | #include <asm/mem_map.h> | 29 | #include <asm/mem_map.h> |
27 | 30 | ||
28 | #define TEXT_OFFSET 0 | ||
29 | /* | 31 | /* |
30 | * does not yet catch signals sent when the child dies. | 32 | * does not yet catch signals sent when the child dies. |
31 | * in exit.c or in signal.c. | 33 | * in exit.c or in signal.c. |
32 | */ | 34 | */ |
33 | 35 | ||
34 | /* determines which bits in the SYSCFG reg the user has access to. */ | ||
35 | /* 1 = access 0 = no access */ | ||
36 | #define SYSCFG_MASK 0x0007 /* SYSCFG reg */ | ||
37 | /* sets the trace bits. */ | ||
38 | #define TRACE_BITS 0x0001 | ||
39 | |||
40 | /* Find the stack offset for a register, relative to thread.esp0. */ | ||
41 | #define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg) | ||
42 | |||
43 | /* | ||
44 | * Get the address of the live pt_regs for the specified task. | ||
45 | * These are saved onto the top kernel stack when the process | ||
46 | * is not running. | ||
47 | * | ||
48 | * Note: if a user thread is execve'd from kernel space, the | ||
49 | * kernel stack will not be empty on entry to the kernel, so | ||
50 | * ptracing these tasks will fail. | ||
51 | */ | ||
52 | static inline struct pt_regs *get_user_regs(struct task_struct *task) | ||
53 | { | ||
54 | return (struct pt_regs *) | ||
55 | ((unsigned long)task_stack_page(task) + | ||
56 | (THREAD_SIZE - sizeof(struct pt_regs))); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * Get all user integer registers. | ||
61 | */ | ||
62 | static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs) | ||
63 | { | ||
64 | struct pt_regs regs; | ||
65 | memcpy(®s, get_user_regs(tsk), sizeof(regs)); | ||
66 | regs.usp = tsk->thread.usp; | ||
67 | return copy_to_user(uregs, ®s, sizeof(struct pt_regs)) ? -EFAULT : 0; | ||
68 | } | ||
69 | |||
70 | /* Mapping from PT_xxx to the stack offset at which the register is | ||
71 | * saved. Notice that usp has no stack-slot and needs to be treated | ||
72 | * specially (see get_reg/put_reg below). | ||
73 | */ | ||
74 | |||
75 | /* | 36 | /* |
76 | * Get contents of register REGNO in task TASK. | 37 | * Get contents of register REGNO in task TASK. |
77 | */ | 38 | */ |
78 | static inline long get_reg(struct task_struct *task, int regno) | 39 | static inline long |
40 | get_reg(struct task_struct *task, long regno, unsigned long __user *datap) | ||
79 | { | 41 | { |
80 | unsigned char *reg_ptr; | 42 | long tmp; |
43 | struct pt_regs *regs = task_pt_regs(task); | ||
81 | 44 | ||
82 | struct pt_regs *regs = | 45 | if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) |
83 | (struct pt_regs *)((unsigned long)task_stack_page(task) + | 46 | return -EIO; |
84 | (THREAD_SIZE - sizeof(struct pt_regs))); | ||
85 | reg_ptr = (char *)regs; | ||
86 | 47 | ||
87 | switch (regno) { | 48 | switch (regno) { |
49 | case PT_TEXT_ADDR: | ||
50 | tmp = task->mm->start_code; | ||
51 | break; | ||
52 | case PT_TEXT_END_ADDR: | ||
53 | tmp = task->mm->end_code; | ||
54 | break; | ||
55 | case PT_DATA_ADDR: | ||
56 | tmp = task->mm->start_data; | ||
57 | break; | ||
88 | case PT_USP: | 58 | case PT_USP: |
89 | return task->thread.usp; | 59 | tmp = task->thread.usp; |
60 | break; | ||
90 | default: | 61 | default: |
91 | if (regno <= 216) | 62 | if (regno < sizeof(*regs)) { |
92 | return *(long *)(reg_ptr + regno); | 63 | void *reg_ptr = regs; |
64 | tmp = *(long *)(reg_ptr + regno); | ||
65 | } else | ||
66 | return -EIO; | ||
93 | } | 67 | } |
94 | /* slight mystery ... never seems to come here but kernel misbehaves without this code! */ | ||
95 | 68 | ||
96 | printk(KERN_WARNING "Request to get for unknown register %d\n", regno); | 69 | return put_user(tmp, datap); |
97 | return 0; | ||
98 | } | 70 | } |
99 | 71 | ||
100 | /* | 72 | /* |
101 | * Write contents of register REGNO in task TASK. | 73 | * Write contents of register REGNO in task TASK. |
102 | */ | 74 | */ |
103 | static inline int | 75 | static inline int |
104 | put_reg(struct task_struct *task, int regno, unsigned long data) | 76 | put_reg(struct task_struct *task, long regno, unsigned long data) |
105 | { | 77 | { |
106 | char *reg_ptr; | 78 | struct pt_regs *regs = task_pt_regs(task); |
107 | 79 | ||
108 | struct pt_regs *regs = | 80 | if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0) |
109 | (struct pt_regs *)((unsigned long)task_stack_page(task) + | 81 | return -EIO; |
110 | (THREAD_SIZE - sizeof(struct pt_regs))); | ||
111 | reg_ptr = (char *)regs; | ||
112 | 82 | ||
113 | switch (regno) { | 83 | switch (regno) { |
114 | case PT_PC: | 84 | case PT_PC: |
@@ -125,10 +95,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data) | |||
125 | regs->usp = data; | 95 | regs->usp = data; |
126 | task->thread.usp = data; | 96 | task->thread.usp = data; |
127 | break; | 97 | break; |
98 | case PT_SYSCFG: /* don't let userspace screw with this */ | ||
99 | if ((data & ~1) != 0x6) | ||
100 | pr_warning("ptrace: ignore syscfg write of %#lx\n", data); | ||
101 | break; /* regs->syscfg = data; break; */ | ||
128 | default: | 102 | default: |
129 | if (regno <= 216) | 103 | if (regno < sizeof(*regs)) { |
130 | *(long *)(reg_ptr + regno) = data; | 104 | void *reg_offset = regs; |
105 | *(long *)(reg_offset + regno) = data; | ||
106 | } | ||
107 | /* Ignore writes to pseudo registers */ | ||
131 | } | 108 | } |
109 | |||
132 | return 0; | 110 | return 0; |
133 | } | 111 | } |
134 | 112 | ||
@@ -160,24 +138,98 @@ static inline int is_user_addr_valid(struct task_struct *child, | |||
160 | return -EIO; | 138 | return -EIO; |
161 | } | 139 | } |
162 | 140 | ||
163 | void ptrace_enable(struct task_struct *child) | 141 | /* |
142 | * retrieve the contents of Blackfin userspace general registers | ||
143 | */ | ||
144 | static int genregs_get(struct task_struct *target, | ||
145 | const struct user_regset *regset, | ||
146 | unsigned int pos, unsigned int count, | ||
147 | void *kbuf, void __user *ubuf) | ||
148 | { | ||
149 | struct pt_regs *regs = task_pt_regs(target); | ||
150 | int ret; | ||
151 | |||
152 | /* This sucks ... */ | ||
153 | regs->usp = target->thread.usp; | ||
154 | |||
155 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | ||
156 | regs, 0, sizeof(*regs)); | ||
157 | if (ret < 0) | ||
158 | return ret; | ||
159 | |||
160 | return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, | ||
161 | sizeof(*regs), -1); | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * update the contents of the Blackfin userspace general registers | ||
166 | */ | ||
167 | static int genregs_set(struct task_struct *target, | ||
168 | const struct user_regset *regset, | ||
169 | unsigned int pos, unsigned int count, | ||
170 | const void *kbuf, const void __user *ubuf) | ||
164 | { | 171 | { |
165 | unsigned long tmp; | 172 | struct pt_regs *regs = task_pt_regs(target); |
166 | tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); | 173 | int ret; |
167 | put_reg(child, PT_SYSCFG, tmp); | 174 | |
175 | /* Don't let people set SYSCFG (it's at the end of pt_regs) */ | ||
176 | ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
177 | regs, 0, PT_SYSCFG); | ||
178 | if (ret < 0) | ||
179 | return ret; | ||
180 | |||
181 | /* This sucks ... */ | ||
182 | target->thread.usp = regs->usp; | ||
183 | /* regs->retx = regs->pc; */ | ||
184 | |||
185 | return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
186 | PT_SYSCFG, -1); | ||
168 | } | 187 | } |
169 | 188 | ||
170 | /* | 189 | /* |
171 | * Called by kernel/ptrace.c when detaching.. | 190 | * Define the register sets available on the Blackfin under Linux |
172 | * | ||
173 | * Make sure the single step bit is not set. | ||
174 | */ | 191 | */ |
175 | void ptrace_disable(struct task_struct *child) | 192 | enum bfin_regset { |
193 | REGSET_GENERAL, | ||
194 | }; | ||
195 | |||
196 | static const struct user_regset bfin_regsets[] = { | ||
197 | [REGSET_GENERAL] = { | ||
198 | .core_note_type = NT_PRSTATUS, | ||
199 | .n = sizeof(struct pt_regs) / sizeof(long), | ||
200 | .size = sizeof(long), | ||
201 | .align = sizeof(long), | ||
202 | .get = genregs_get, | ||
203 | .set = genregs_set, | ||
204 | }, | ||
205 | }; | ||
206 | |||
207 | static const struct user_regset_view user_bfin_native_view = { | ||
208 | .name = "Blackfin", | ||
209 | .e_machine = EM_BLACKFIN, | ||
210 | .regsets = bfin_regsets, | ||
211 | .n = ARRAY_SIZE(bfin_regsets), | ||
212 | }; | ||
213 | |||
214 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
215 | { | ||
216 | return &user_bfin_native_view; | ||
217 | } | ||
218 | |||
219 | void user_enable_single_step(struct task_struct *child) | ||
220 | { | ||
221 | struct pt_regs *regs = task_pt_regs(child); | ||
222 | regs->syscfg |= SYSCFG_SSSTEP; | ||
223 | |||
224 | set_tsk_thread_flag(child, TIF_SINGLESTEP); | ||
225 | } | ||
226 | |||
227 | void user_disable_single_step(struct task_struct *child) | ||
176 | { | 228 | { |
177 | unsigned long tmp; | 229 | struct pt_regs *regs = task_pt_regs(child); |
178 | /* make sure the single step bit is not set. */ | 230 | regs->syscfg &= ~SYSCFG_SSSTEP; |
179 | tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; | 231 | |
180 | put_reg(child, PT_SYSCFG, tmp); | 232 | clear_tsk_thread_flag(child, TIF_SINGLESTEP); |
181 | } | 233 | } |
182 | 234 | ||
183 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 235 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) |
@@ -240,62 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
240 | break; | 292 | break; |
241 | } | 293 | } |
242 | 294 | ||
243 | /* read the word at location addr in the USER area. */ | ||
244 | case PTRACE_PEEKUSR: | ||
245 | { | ||
246 | unsigned long tmp; | ||
247 | ret = -EIO; | ||
248 | tmp = 0; | ||
249 | if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { | ||
250 | printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning " | ||
251 | "0 - %x sizeof(pt_regs) is %lx\n", | ||
252 | (int)addr, sizeof(struct pt_regs)); | ||
253 | break; | ||
254 | } | ||
255 | if (addr == sizeof(struct pt_regs)) { | ||
256 | /* PT_TEXT_ADDR */ | ||
257 | tmp = child->mm->start_code + TEXT_OFFSET; | ||
258 | } else if (addr == (sizeof(struct pt_regs) + 4)) { | ||
259 | /* PT_TEXT_END_ADDR */ | ||
260 | tmp = child->mm->end_code; | ||
261 | } else if (addr == (sizeof(struct pt_regs) + 8)) { | ||
262 | /* PT_DATA_ADDR */ | ||
263 | tmp = child->mm->start_data; | ||
264 | #ifdef CONFIG_BINFMT_ELF_FDPIC | ||
265 | } else if (addr == (sizeof(struct pt_regs) + 12)) { | ||
266 | goto case_PTRACE_GETFDPIC_EXEC; | ||
267 | } else if (addr == (sizeof(struct pt_regs) + 16)) { | ||
268 | goto case_PTRACE_GETFDPIC_INTERP; | ||
269 | #endif | ||
270 | } else { | ||
271 | tmp = get_reg(child, addr); | ||
272 | } | ||
273 | ret = put_user(tmp, datap); | ||
274 | break; | ||
275 | } | ||
276 | |||
277 | #ifdef CONFIG_BINFMT_ELF_FDPIC | ||
278 | case PTRACE_GETFDPIC: { | ||
279 | unsigned long tmp = 0; | ||
280 | |||
281 | switch (addr) { | ||
282 | case_PTRACE_GETFDPIC_EXEC: | ||
283 | case PTRACE_GETFDPIC_EXEC: | ||
284 | tmp = child->mm->context.exec_fdpic_loadmap; | ||
285 | break; | ||
286 | case_PTRACE_GETFDPIC_INTERP: | ||
287 | case PTRACE_GETFDPIC_INTERP: | ||
288 | tmp = child->mm->context.interp_fdpic_loadmap; | ||
289 | break; | ||
290 | default: | ||
291 | break; | ||
292 | } | ||
293 | |||
294 | ret = put_user(tmp, datap); | ||
295 | break; | ||
296 | } | ||
297 | #endif | ||
298 | |||
299 | /* when I and D space are separate, this will have to be fixed. */ | 295 | /* when I and D space are separate, this will have to be fixed. */ |
300 | case PTRACE_POKEDATA: | 296 | case PTRACE_POKEDATA: |
301 | pr_debug("ptrace: PTRACE_PEEKDATA\n"); | 297 | pr_debug("ptrace: PTRACE_PEEKDATA\n"); |
@@ -336,79 +332,44 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
336 | break; | 332 | break; |
337 | } | 333 | } |
338 | 334 | ||
339 | case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ | 335 | case PTRACE_PEEKUSR: |
340 | ret = -EIO; | 336 | switch (addr) { |
341 | if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { | 337 | #ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ |
342 | printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); | 338 | case PT_FDPIC_EXEC: |
343 | break; | 339 | request = PTRACE_GETFDPIC; |
344 | } | 340 | addr = PTRACE_GETFDPIC_EXEC; |
345 | 341 | goto case_default; | |
346 | if (addr >= (sizeof(struct pt_regs))) { | 342 | case PT_FDPIC_INTERP: |
347 | ret = 0; | 343 | request = PTRACE_GETFDPIC; |
348 | break; | 344 | addr = PTRACE_GETFDPIC_INTERP; |
349 | } | 345 | goto case_default; |
350 | if (addr == PT_SYSCFG) { | 346 | #endif |
351 | data &= SYSCFG_MASK; | 347 | default: |
352 | data |= get_reg(child, PT_SYSCFG); | 348 | ret = get_reg(child, addr, datap); |
353 | } | 349 | } |
354 | ret = put_reg(child, addr, data); | 350 | pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret); |
355 | break; | ||
356 | |||
357 | case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ | ||
358 | case PTRACE_CONT: /* restart after signal. */ | ||
359 | pr_debug("ptrace: syscall/cont\n"); | ||
360 | |||
361 | ret = -EIO; | ||
362 | if (!valid_signal(data)) | ||
363 | break; | ||
364 | if (request == PTRACE_SYSCALL) | ||
365 | set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
366 | else | ||
367 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
368 | child->exit_code = data; | ||
369 | ptrace_disable(child); | ||
370 | pr_debug("ptrace: before wake_up_process\n"); | ||
371 | wake_up_process(child); | ||
372 | ret = 0; | ||
373 | break; | ||
374 | |||
375 | /* | ||
376 | * make the child exit. Best I can do is send it a sigkill. | ||
377 | * perhaps it should be put in the status that it wants to | ||
378 | * exit. | ||
379 | */ | ||
380 | case PTRACE_KILL: | ||
381 | ret = 0; | ||
382 | if (child->exit_state == EXIT_ZOMBIE) /* already dead */ | ||
383 | break; | ||
384 | child->exit_code = SIGKILL; | ||
385 | ptrace_disable(child); | ||
386 | wake_up_process(child); | ||
387 | break; | 351 | break; |
388 | 352 | ||
389 | case PTRACE_SINGLESTEP: /* set the trap flag. */ | 353 | case PTRACE_POKEUSR: |
390 | pr_debug("ptrace: single step\n"); | 354 | ret = put_reg(child, addr, data); |
391 | ret = -EIO; | 355 | pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret); |
392 | if (!valid_signal(data)) | ||
393 | break; | ||
394 | clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); | ||
395 | ptrace_enable(child); | ||
396 | child->exit_code = data; | ||
397 | wake_up_process(child); | ||
398 | ret = 0; | ||
399 | break; | 356 | break; |
400 | 357 | ||
401 | case PTRACE_GETREGS: | 358 | case PTRACE_GETREGS: |
402 | /* Get all gp regs from the child. */ | 359 | pr_debug("ptrace: PTRACE_GETREGS\n"); |
403 | ret = ptrace_getregs(child, datap); | 360 | return copy_regset_to_user(child, &user_bfin_native_view, |
404 | break; | 361 | REGSET_GENERAL, |
362 | 0, sizeof(struct pt_regs), | ||
363 | (void __user *)data); | ||
405 | 364 | ||
406 | case PTRACE_SETREGS: | 365 | case PTRACE_SETREGS: |
407 | printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); | 366 | pr_debug("ptrace: PTRACE_SETREGS\n"); |
408 | /* Set all gp regs in the child. */ | 367 | return copy_regset_from_user(child, &user_bfin_native_view, |
409 | ret = 0; | 368 | REGSET_GENERAL, |
410 | break; | 369 | 0, sizeof(struct pt_regs), |
370 | (const void __user *)data); | ||
411 | 371 | ||
372 | case_default: | ||
412 | default: | 373 | default: |
413 | ret = ptrace_request(child, request, addr, data); | 374 | ret = ptrace_request(child, request, addr, data); |
414 | break; | 375 | break; |
@@ -417,27 +378,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
417 | return ret; | 378 | return ret; |
418 | } | 379 | } |
419 | 380 | ||
420 | asmlinkage void syscall_trace(void) | 381 | asmlinkage int syscall_trace_enter(struct pt_regs *regs) |
421 | { | 382 | { |
422 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 383 | int ret = 0; |
423 | return; | 384 | |
424 | 385 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | |
425 | if (!(current->ptrace & PT_PTRACED)) | 386 | ret = tracehook_report_syscall_entry(regs); |
426 | return; | 387 | |
427 | 388 | return ret; | |
428 | /* the 0x80 provides a way for the tracing parent to distinguish | 389 | } |
429 | * between a syscall stop and SIGTRAP delivery | 390 | |
430 | */ | 391 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) |
431 | ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) | 392 | { |
432 | ? 0x80 : 0)); | 393 | int step; |
433 | 394 | ||
434 | /* | 395 | step = test_thread_flag(TIF_SINGLESTEP); |
435 | * this isn't the same as continuing with a signal, but it will do | 396 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
436 | * for normal use. strace only continues with a signal if the | 397 | tracehook_report_syscall_exit(regs, step); |
437 | * stopping signal is not SIGTRAP. -brl | ||
438 | */ | ||
439 | if (current->exit_code) { | ||
440 | send_sig(current->exit_code, current, 1); | ||
441 | current->exit_code = 0; | ||
442 | } | ||
443 | } | 398 | } |
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 95448ae9c43a..d37a397f43f5 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2004-2009 Analog Devices Inc. | 2 | * Copyright 2004-2010 Analog Devices Inc. |
3 | * | 3 | * |
4 | * Licensed under the GPL-2 or later. | 4 | * Licensed under the GPL-2 or later. |
5 | */ | 5 | */ |
@@ -220,6 +220,16 @@ void __init bfin_relocate_l1_mem(void) | |||
220 | memcpy(_stext_l2, _l2_lma, l2_len); | 220 | memcpy(_stext_l2, _l2_lma, l2_len); |
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_ROMKERNEL | ||
224 | void __init bfin_relocate_xip_data(void) | ||
225 | { | ||
226 | early_shadow_stamp(); | ||
227 | |||
228 | memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info)); | ||
229 | memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len); | ||
230 | } | ||
231 | #endif | ||
232 | |||
223 | /* add_memory_region to memmap */ | 233 | /* add_memory_region to memmap */ |
224 | static void __init add_memory_region(unsigned long long start, | 234 | static void __init add_memory_region(unsigned long long start, |
225 | unsigned long long size, int type) | 235 | unsigned long long size, int type) |
@@ -504,7 +514,7 @@ static __init void memory_setup(void) | |||
504 | #endif | 514 | #endif |
505 | unsigned long max_mem; | 515 | unsigned long max_mem; |
506 | 516 | ||
507 | _rambase = (unsigned long)_stext; | 517 | _rambase = CONFIG_BOOT_LOAD; |
508 | _ramstart = (unsigned long)_end; | 518 | _ramstart = (unsigned long)_end; |
509 | 519 | ||
510 | if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { | 520 | if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { |
@@ -597,7 +607,12 @@ static __init void memory_setup(void) | |||
597 | } | 607 | } |
598 | 608 | ||
599 | #ifdef CONFIG_MPU | 609 | #ifdef CONFIG_MPU |
610 | #if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM) | ||
611 | page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE - | ||
612 | ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32; | ||
613 | #else | ||
600 | page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; | 614 | page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; |
615 | #endif | ||
601 | page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); | 616 | page_mask_order = get_order(3 * page_mask_nelts * sizeof(long)); |
602 | #endif | 617 | #endif |
603 | 618 | ||
@@ -630,7 +645,7 @@ static __init void memory_setup(void) | |||
630 | __bss_start, __bss_stop, | 645 | __bss_start, __bss_stop, |
631 | _sdata, _edata, | 646 | _sdata, _edata, |
632 | (void *)&init_thread_union, | 647 | (void *)&init_thread_union, |
633 | (void *)((int)(&init_thread_union) + 0x2000), | 648 | (void *)((int)(&init_thread_union) + THREAD_SIZE), |
634 | __init_begin, __init_end, | 649 | __init_begin, __init_end, |
635 | (void *)_ramstart, (void *)memory_end | 650 | (void *)_ramstart, (void *)memory_end |
636 | #ifdef CONFIG_MTD_UCLINUX | 651 | #ifdef CONFIG_MTD_UCLINUX |
@@ -792,10 +807,17 @@ static inline int __init get_mem_size(void) | |||
792 | BUG(); | 807 | BUG(); |
793 | } | 808 | } |
794 | 809 | ||
810 | __attribute__((weak)) | ||
811 | void __init native_machine_early_platform_add_devices(void) | ||
812 | { | ||
813 | } | ||
814 | |||
795 | void __init setup_arch(char **cmdline_p) | 815 | void __init setup_arch(char **cmdline_p) |
796 | { | 816 | { |
797 | unsigned long sclk, cclk; | 817 | unsigned long sclk, cclk; |
798 | 818 | ||
819 | native_machine_early_platform_add_devices(); | ||
820 | |||
799 | enable_shadow_console(); | 821 | enable_shadow_console(); |
800 | 822 | ||
801 | /* Check to make sure we are running on the right processor */ | 823 | /* Check to make sure we are running on the right processor */ |
@@ -903,7 +925,7 @@ void __init setup_arch(char **cmdline_p) | |||
903 | else if (_bfin_swrst & RESET_SOFTWARE) | 925 | else if (_bfin_swrst & RESET_SOFTWARE) |
904 | printk(KERN_NOTICE "Reset caused by Software reset\n"); | 926 | printk(KERN_NOTICE "Reset caused by Software reset\n"); |
905 | 927 | ||
906 | printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); | 928 | printk(KERN_INFO "Blackfin support (C) 2004-2010 Analog Devices, Inc.\n"); |
907 | if (bfin_compiled_revid() == 0xffff) | 929 | if (bfin_compiled_revid() == 0xffff) |
908 | printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid()); | 930 | printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid()); |
909 | else if (bfin_compiled_revid() == -1) | 931 | else if (bfin_compiled_revid() == -1) |
@@ -1217,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1217 | dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, | 1239 | dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, |
1218 | BFIN_DLINES); | 1240 | BFIN_DLINES); |
1219 | #ifdef __ARCH_SYNC_CORE_DCACHE | 1241 | #ifdef __ARCH_SYNC_CORE_DCACHE |
1220 | seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); | 1242 | seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]); |
1221 | #endif | 1243 | #endif |
1222 | #ifdef __ARCH_SYNC_CORE_ICACHE | 1244 | #ifdef __ARCH_SYNC_CORE_ICACHE |
1223 | seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); | 1245 | seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]); |
1224 | #endif | 1246 | #endif |
1225 | 1247 | ||
1226 | if (cpu_num != num_possible_cpus() - 1) | 1248 | if (cpu_num != num_possible_cpus() - 1) |
@@ -1249,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
1249 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", | 1271 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", |
1250 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); | 1272 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); |
1251 | seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", | 1273 | seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", |
1252 | ((int)memory_end - (int)_stext) >> 10, | 1274 | ((int)memory_end - (int)_rambase) >> 10, |
1253 | _stext, | 1275 | (void *)_rambase, |
1254 | (void *)memory_end); | 1276 | (void *)memory_end); |
1255 | seq_printf(m, "\n"); | 1277 | seq_printf(m, "\n"); |
1256 | 1278 | ||
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index e0fd63e9e38a..d536f35d1f43 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2004-2009 Analog Devices Inc. | 2 | * Copyright 2004-2010 Analog Devices Inc. |
3 | * | 3 | * |
4 | * Licensed under the GPL-2 or later | 4 | * Licensed under the GPL-2 or later |
5 | */ | 5 | */ |
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/ucontext.h> | 18 | #include <asm/ucontext.h> |
19 | #include <asm/fixed_code.h> | 19 | #include <asm/fixed_code.h> |
20 | #include <asm/syscall.h> | ||
20 | 21 | ||
21 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 22 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
22 | 23 | ||
@@ -50,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p | |||
50 | unsigned long usp = 0; | 51 | unsigned long usp = 0; |
51 | int err = 0; | 52 | int err = 0; |
52 | 53 | ||
54 | /* Always make any pending restarted system calls return -EINTR */ | ||
55 | current_thread_info()->restart_block.fn = do_no_restart_syscall; | ||
56 | |||
53 | #define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) | 57 | #define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) |
54 | 58 | ||
55 | /* restore passed registers */ | 59 | /* restore passed registers */ |
@@ -206,16 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
206 | regs->r1 = (unsigned long)(&frame->info); | 210 | regs->r1 = (unsigned long)(&frame->info); |
207 | regs->r2 = (unsigned long)(&frame->uc); | 211 | regs->r2 = (unsigned long)(&frame->uc); |
208 | 212 | ||
209 | /* | ||
210 | * Clear the trace flag when entering the signal handler, but | ||
211 | * notify any tracer that was single-stepping it. The tracer | ||
212 | * may want to single-step inside the handler too. | ||
213 | */ | ||
214 | if (regs->syscfg & TRACE_BITS) { | ||
215 | regs->syscfg &= ~TRACE_BITS; | ||
216 | ptrace_notify(SIGTRAP); | ||
217 | } | ||
218 | |||
219 | return 0; | 213 | return 0; |
220 | 214 | ||
221 | give_sigsegv: | 215 | give_sigsegv: |
@@ -247,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler) | |||
247 | regs->r0 = regs->orig_r0; | 241 | regs->r0 = regs->orig_r0; |
248 | regs->pc -= 2; | 242 | regs->pc -= 2; |
249 | break; | 243 | break; |
244 | |||
245 | case -ERESTART_RESTARTBLOCK: | ||
246 | regs->p0 = __NR_restart_syscall; | ||
247 | regs->pc -= 2; | ||
248 | break; | ||
250 | } | 249 | } |
251 | } | 250 | } |
252 | 251 | ||
@@ -315,6 +314,9 @@ asmlinkage void do_signal(struct pt_regs *regs) | |||
315 | * clear the TIF_RESTORE_SIGMASK flag */ | 314 | * clear the TIF_RESTORE_SIGMASK flag */ |
316 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) | 315 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) |
317 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 316 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
317 | |||
318 | tracehook_signal_handler(signr, &info, &ka, regs, | ||
319 | test_thread_flag(TIF_SINGLESTEP)); | ||
318 | } | 320 | } |
319 | 321 | ||
320 | return; | 322 | return; |
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index 2e7f8e10bf87..bdc1e2f0da32 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c | |||
@@ -47,3 +47,26 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, | |||
47 | } | 47 | } |
48 | EXPORT_SYMBOL(get_fb_unmapped_area); | 48 | EXPORT_SYMBOL(get_fb_unmapped_area); |
49 | #endif | 49 | #endif |
50 | |||
51 | /* Needed for legacy userspace atomic emulation */ | ||
52 | static DEFINE_SPINLOCK(bfin_spinlock_lock); | ||
53 | |||
54 | #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 | ||
55 | __attribute__((l1_text)) | ||
56 | #endif | ||
57 | asmlinkage int sys_bfin_spinlock(int *p) | ||
58 | { | ||
59 | int ret, tmp = 0; | ||
60 | |||
61 | spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ | ||
62 | ret = get_user(tmp, p); | ||
63 | if (likely(ret == 0)) { | ||
64 | if (unlikely(tmp)) | ||
65 | ret = 1; | ||
66 | else | ||
67 | put_user(1, p); | ||
68 | } | ||
69 | spin_unlock(&bfin_spinlock_lock); | ||
70 | |||
71 | return ret; | ||
72 | } | ||
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 17c38c5b5b22..8c9a43daf80f 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <asm/blackfin.h> | 21 | #include <asm/blackfin.h> |
22 | #include <asm/time.h> | 22 | #include <asm/time.h> |
23 | #include <asm/gptimers.h> | 23 | #include <asm/gptimers.h> |
24 | #include <asm/nmi.h> | ||
24 | 25 | ||
25 | /* Accelerators for sched_clock() | 26 | /* Accelerators for sched_clock() |
26 | * convert from cycles(64bits) => nanoseconds (64bits) | 27 | * convert from cycles(64bits) => nanoseconds (64bits) |
@@ -50,7 +51,11 @@ | |||
50 | 51 | ||
51 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) | 52 | static notrace cycle_t bfin_read_cycles(struct clocksource *cs) |
52 | { | 53 | { |
54 | #ifdef CONFIG_CPU_FREQ | ||
53 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); | 55 | return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); |
56 | #else | ||
57 | return get_cycles(); | ||
58 | #endif | ||
54 | } | 59 | } |
55 | 60 | ||
56 | static struct clocksource bfin_cs_cycles = { | 61 | static struct clocksource bfin_cs_cycles = { |
@@ -132,7 +137,6 @@ static int __init bfin_cs_gptimer0_init(void) | |||
132 | # define bfin_cs_gptimer0_init() | 137 | # define bfin_cs_gptimer0_init() |
133 | #endif | 138 | #endif |
134 | 139 | ||
135 | |||
136 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) | 140 | #if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE) |
137 | /* prefer to use cycles since it has higher rating */ | 141 | /* prefer to use cycles since it has higher rating */ |
138 | notrace unsigned long long sched_clock(void) | 142 | notrace unsigned long long sched_clock(void) |
@@ -145,47 +149,8 @@ notrace unsigned long long sched_clock(void) | |||
145 | } | 149 | } |
146 | #endif | 150 | #endif |
147 | 151 | ||
148 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 | ||
149 | __attribute__((l1_text)) | ||
150 | #endif | ||
151 | irqreturn_t timer_interrupt(int irq, void *dev_id); | ||
152 | |||
153 | static int bfin_timer_set_next_event(unsigned long, \ | ||
154 | struct clock_event_device *); | ||
155 | |||
156 | static void bfin_timer_set_mode(enum clock_event_mode, \ | ||
157 | struct clock_event_device *); | ||
158 | |||
159 | static struct clock_event_device clockevent_bfin = { | ||
160 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | ||
161 | .name = "bfin_gptimer0", | ||
162 | .rating = 300, | ||
163 | .irq = IRQ_TIMER0, | ||
164 | #else | ||
165 | .name = "bfin_core_timer", | ||
166 | .rating = 350, | ||
167 | .irq = IRQ_CORETMR, | ||
168 | #endif | ||
169 | .shift = 32, | ||
170 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
171 | .set_next_event = bfin_timer_set_next_event, | ||
172 | .set_mode = bfin_timer_set_mode, | ||
173 | }; | ||
174 | |||
175 | static struct irqaction bfin_timer_irq = { | ||
176 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | 152 | #if defined(CONFIG_TICKSOURCE_GPTMR0) |
177 | .name = "Blackfin GPTimer0", | 153 | static int bfin_gptmr0_set_next_event(unsigned long cycles, |
178 | #else | ||
179 | .name = "Blackfin CoreTimer", | ||
180 | #endif | ||
181 | .flags = IRQF_DISABLED | IRQF_TIMER | \ | ||
182 | IRQF_IRQPOLL | IRQF_PERCPU, | ||
183 | .handler = timer_interrupt, | ||
184 | .dev_id = &clockevent_bfin, | ||
185 | }; | ||
186 | |||
187 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | ||
188 | static int bfin_timer_set_next_event(unsigned long cycles, | ||
189 | struct clock_event_device *evt) | 154 | struct clock_event_device *evt) |
190 | { | 155 | { |
191 | disable_gptimers(TIMER0bit); | 156 | disable_gptimers(TIMER0bit); |
@@ -196,7 +161,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, | |||
196 | return 0; | 161 | return 0; |
197 | } | 162 | } |
198 | 163 | ||
199 | static void bfin_timer_set_mode(enum clock_event_mode mode, | 164 | static void bfin_gptmr0_set_mode(enum clock_event_mode mode, |
200 | struct clock_event_device *evt) | 165 | struct clock_event_device *evt) |
201 | { | 166 | { |
202 | switch (mode) { | 167 | switch (mode) { |
@@ -224,25 +189,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, | |||
224 | } | 189 | } |
225 | } | 190 | } |
226 | 191 | ||
227 | static void bfin_timer_ack(void) | 192 | static void bfin_gptmr0_ack(void) |
228 | { | 193 | { |
229 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); | 194 | set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); |
230 | } | 195 | } |
231 | 196 | ||
232 | static void __init bfin_timer_init(void) | 197 | static void __init bfin_gptmr0_init(void) |
233 | { | 198 | { |
234 | disable_gptimers(TIMER0bit); | 199 | disable_gptimers(TIMER0bit); |
235 | } | 200 | } |
236 | 201 | ||
237 | static unsigned long __init bfin_clockevent_check(void) | 202 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
203 | __attribute__((l1_text)) | ||
204 | #endif | ||
205 | irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | ||
238 | { | 206 | { |
239 | setup_irq(IRQ_TIMER0, &bfin_timer_irq); | 207 | struct clock_event_device *evt = dev_id; |
240 | return get_sclk(); | 208 | smp_mb(); |
209 | evt->event_handler(evt); | ||
210 | bfin_gptmr0_ack(); | ||
211 | return IRQ_HANDLED; | ||
241 | } | 212 | } |
242 | 213 | ||
243 | #else /* CONFIG_TICKSOURCE_CORETMR */ | 214 | static struct irqaction gptmr0_irq = { |
215 | .name = "Blackfin GPTimer0", | ||
216 | .flags = IRQF_DISABLED | IRQF_TIMER | \ | ||
217 | IRQF_IRQPOLL | IRQF_PERCPU, | ||
218 | .handler = bfin_gptmr0_interrupt, | ||
219 | }; | ||
220 | |||
221 | static struct clock_event_device clockevent_gptmr0 = { | ||
222 | .name = "bfin_gptimer0", | ||
223 | .rating = 300, | ||
224 | .irq = IRQ_TIMER0, | ||
225 | .shift = 32, | ||
226 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
227 | .set_next_event = bfin_gptmr0_set_next_event, | ||
228 | .set_mode = bfin_gptmr0_set_mode, | ||
229 | }; | ||
244 | 230 | ||
245 | static int bfin_timer_set_next_event(unsigned long cycles, | 231 | static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt) |
232 | { | ||
233 | unsigned long clock_tick; | ||
234 | |||
235 | clock_tick = get_sclk(); | ||
236 | evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); | ||
237 | evt->max_delta_ns = clockevent_delta2ns(-1, evt); | ||
238 | evt->min_delta_ns = clockevent_delta2ns(100, evt); | ||
239 | |||
240 | evt->cpumask = cpumask_of(0); | ||
241 | |||
242 | clockevents_register_device(evt); | ||
243 | } | ||
244 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ | ||
245 | |||
246 | #if defined(CONFIG_TICKSOURCE_CORETMR) | ||
247 | /* per-cpu local core timer */ | ||
248 | static DEFINE_PER_CPU(struct clock_event_device, coretmr_events); | ||
249 | |||
250 | static int bfin_coretmr_set_next_event(unsigned long cycles, | ||
246 | struct clock_event_device *evt) | 251 | struct clock_event_device *evt) |
247 | { | 252 | { |
248 | bfin_write_TCNTL(TMPWR); | 253 | bfin_write_TCNTL(TMPWR); |
@@ -253,7 +258,7 @@ static int bfin_timer_set_next_event(unsigned long cycles, | |||
253 | return 0; | 258 | return 0; |
254 | } | 259 | } |
255 | 260 | ||
256 | static void bfin_timer_set_mode(enum clock_event_mode mode, | 261 | static void bfin_coretmr_set_mode(enum clock_event_mode mode, |
257 | struct clock_event_device *evt) | 262 | struct clock_event_device *evt) |
258 | { | 263 | { |
259 | switch (mode) { | 264 | switch (mode) { |
@@ -285,19 +290,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode, | |||
285 | } | 290 | } |
286 | } | 291 | } |
287 | 292 | ||
288 | static void bfin_timer_ack(void) | 293 | void bfin_coretmr_init(void) |
289 | { | ||
290 | } | ||
291 | |||
292 | static void __init bfin_timer_init(void) | ||
293 | { | 294 | { |
294 | /* power up the timer, but don't enable it just yet */ | 295 | /* power up the timer, but don't enable it just yet */ |
295 | bfin_write_TCNTL(TMPWR); | 296 | bfin_write_TCNTL(TMPWR); |
296 | CSYNC(); | 297 | CSYNC(); |
297 | 298 | ||
298 | /* | 299 | /* the TSCALE prescaler counter. */ |
299 | * the TSCALE prescaler counter. | ||
300 | */ | ||
301 | bfin_write_TSCALE(TIME_SCALE - 1); | 300 | bfin_write_TSCALE(TIME_SCALE - 1); |
302 | bfin_write_TPERIOD(0); | 301 | bfin_write_TPERIOD(0); |
303 | bfin_write_TCOUNT(0); | 302 | bfin_write_TCOUNT(0); |
@@ -305,52 +304,64 @@ static void __init bfin_timer_init(void) | |||
305 | CSYNC(); | 304 | CSYNC(); |
306 | } | 305 | } |
307 | 306 | ||
308 | static unsigned long __init bfin_clockevent_check(void) | 307 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
308 | __attribute__((l1_text)) | ||
309 | #endif | ||
310 | irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id) | ||
309 | { | 311 | { |
310 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); | 312 | int cpu = smp_processor_id(); |
311 | return get_cclk() / TIME_SCALE; | 313 | struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); |
312 | } | ||
313 | 314 | ||
314 | void __init setup_core_timer(void) | ||
315 | { | ||
316 | bfin_timer_init(); | ||
317 | bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); | ||
318 | } | ||
319 | #endif /* CONFIG_TICKSOURCE_GPTMR0 */ | ||
320 | |||
321 | /* | ||
322 | * timer_interrupt() needs to keep up the real-time clock, | ||
323 | * as well as call the "do_timer()" routine every clocktick | ||
324 | */ | ||
325 | irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
326 | { | ||
327 | struct clock_event_device *evt = dev_id; | ||
328 | smp_mb(); | 315 | smp_mb(); |
329 | evt->event_handler(evt); | 316 | evt->event_handler(evt); |
330 | bfin_timer_ack(); | 317 | |
318 | touch_nmi_watchdog(); | ||
319 | |||
331 | return IRQ_HANDLED; | 320 | return IRQ_HANDLED; |
332 | } | 321 | } |
333 | 322 | ||
334 | static int __init bfin_clockevent_init(void) | 323 | static struct irqaction coretmr_irq = { |
335 | { | 324 | .name = "Blackfin CoreTimer", |
336 | unsigned long timer_clk; | 325 | .flags = IRQF_DISABLED | IRQF_TIMER | \ |
337 | 326 | IRQF_IRQPOLL | IRQF_PERCPU, | |
338 | timer_clk = bfin_clockevent_check(); | 327 | .handler = bfin_coretmr_interrupt, |
328 | }; | ||
339 | 329 | ||
340 | bfin_timer_init(); | 330 | void bfin_coretmr_clockevent_init(void) |
331 | { | ||
332 | unsigned long clock_tick; | ||
333 | unsigned int cpu = smp_processor_id(); | ||
334 | struct clock_event_device *evt = &per_cpu(coretmr_events, cpu); | ||
335 | |||
336 | evt->name = "bfin_core_timer"; | ||
337 | evt->rating = 350; | ||
338 | evt->irq = -1; | ||
339 | evt->shift = 32; | ||
340 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | ||
341 | evt->set_next_event = bfin_coretmr_set_next_event; | ||
342 | evt->set_mode = bfin_coretmr_set_mode; | ||
343 | |||
344 | clock_tick = get_cclk() / TIME_SCALE; | ||
345 | evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift); | ||
346 | evt->max_delta_ns = clockevent_delta2ns(-1, evt); | ||
347 | evt->min_delta_ns = clockevent_delta2ns(100, evt); | ||
348 | |||
349 | evt->cpumask = cpumask_of(cpu); | ||
350 | |||
351 | clockevents_register_device(evt); | ||
352 | } | ||
353 | #endif /* CONFIG_TICKSOURCE_CORETMR */ | ||
341 | 354 | ||
342 | clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); | ||
343 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); | ||
344 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); | ||
345 | clockevent_bfin.cpumask = cpumask_of(0); | ||
346 | clockevents_register_device(&clockevent_bfin); | ||
347 | 355 | ||
348 | return 0; | 356 | void read_persistent_clock(struct timespec *ts) |
357 | { | ||
358 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | ||
359 | ts->tv_sec = secs_since_1970; | ||
360 | ts->tv_nsec = 0; | ||
349 | } | 361 | } |
350 | 362 | ||
351 | void __init time_init(void) | 363 | void __init time_init(void) |
352 | { | 364 | { |
353 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | ||
354 | 365 | ||
355 | #ifdef CONFIG_RTC_DRV_BFIN | 366 | #ifdef CONFIG_RTC_DRV_BFIN |
356 | /* [#2663] hack to filter junk RTC values that would cause | 367 | /* [#2663] hack to filter junk RTC values that would cause |
@@ -363,12 +374,23 @@ void __init time_init(void) | |||
363 | } | 374 | } |
364 | #endif | 375 | #endif |
365 | 376 | ||
366 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | ||
367 | xtime.tv_sec = secs_since_1970; | ||
368 | xtime.tv_nsec = 0; | ||
369 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | ||
370 | |||
371 | bfin_cs_cycles_init(); | 377 | bfin_cs_cycles_init(); |
372 | bfin_cs_gptimer0_init(); | 378 | bfin_cs_gptimer0_init(); |
373 | bfin_clockevent_init(); | 379 | |
380 | #if defined(CONFIG_TICKSOURCE_CORETMR) | ||
381 | bfin_coretmr_init(); | ||
382 | setup_irq(IRQ_CORETMR, &coretmr_irq); | ||
383 | bfin_coretmr_clockevent_init(); | ||
384 | #endif | ||
385 | |||
386 | #if defined(CONFIG_TICKSOURCE_GPTMR0) | ||
387 | bfin_gptmr0_init(); | ||
388 | setup_irq(IRQ_TIMER0, &gptmr0_irq); | ||
389 | gptmr0_irq.dev_id = &clockevent_gptmr0; | ||
390 | bfin_gptmr0_clockevent_init(&clockevent_gptmr0); | ||
391 | #endif | ||
392 | |||
393 | #if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0) | ||
394 | # error at least one clock event device is required | ||
395 | #endif | ||
374 | } | 396 | } |
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index 13c1ee3e6408..c9113619029f 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c | |||
@@ -112,11 +112,6 @@ u32 arch_gettimeoffset(void) | |||
112 | } | 112 | } |
113 | #endif | 113 | #endif |
114 | 114 | ||
115 | static inline int set_rtc_mmss(unsigned long nowtime) | ||
116 | { | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* | 115 | /* |
121 | * timer_interrupt() needs to keep up the real-time clock, | 116 | * timer_interrupt() needs to keep up the real-time clock, |
122 | * as well as call the "do_timer()" routine every clocktick | 117 | * as well as call the "do_timer()" routine every clocktick |
@@ -126,29 +121,8 @@ __attribute__((l1_text)) | |||
126 | #endif | 121 | #endif |
127 | irqreturn_t timer_interrupt(int irq, void *dummy) | 122 | irqreturn_t timer_interrupt(int irq, void *dummy) |
128 | { | 123 | { |
129 | /* last time the cmos clock got updated */ | ||
130 | static long last_rtc_update; | ||
131 | |||
132 | write_seqlock(&xtime_lock); | 124 | write_seqlock(&xtime_lock); |
133 | do_timer(1); | 125 | do_timer(1); |
134 | |||
135 | /* | ||
136 | * If we have an externally synchronized Linux clock, then update | ||
137 | * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | ||
138 | * called as close as possible to 500 ms before the new second starts. | ||
139 | */ | ||
140 | if (ntp_synced() && | ||
141 | xtime.tv_sec > last_rtc_update + 660 && | ||
142 | (xtime.tv_nsec / NSEC_PER_USEC) >= | ||
143 | 500000 - ((unsigned)TICK_SIZE) / 2 | ||
144 | && (xtime.tv_nsec / NSEC_PER_USEC) <= | ||
145 | 500000 + ((unsigned)TICK_SIZE) / 2) { | ||
146 | if (set_rtc_mmss(xtime.tv_sec) == 0) | ||
147 | last_rtc_update = xtime.tv_sec; | ||
148 | else | ||
149 | /* Do it again in 60s. */ | ||
150 | last_rtc_update = xtime.tv_sec - 600; | ||
151 | } | ||
152 | write_sequnlock(&xtime_lock); | 126 | write_sequnlock(&xtime_lock); |
153 | 127 | ||
154 | #ifdef CONFIG_IPIPE | 128 | #ifdef CONFIG_IPIPE |
@@ -161,10 +135,15 @@ irqreturn_t timer_interrupt(int irq, void *dummy) | |||
161 | return IRQ_HANDLED; | 135 | return IRQ_HANDLED; |
162 | } | 136 | } |
163 | 137 | ||
164 | void __init time_init(void) | 138 | void read_persistent_clock(struct timespec *ts) |
165 | { | 139 | { |
166 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | 140 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ |
141 | ts->tv_sec = secs_since_1970; | ||
142 | ts->tv_nsec = 0; | ||
143 | } | ||
167 | 144 | ||
145 | void __init time_init(void) | ||
146 | { | ||
168 | #ifdef CONFIG_RTC_DRV_BFIN | 147 | #ifdef CONFIG_RTC_DRV_BFIN |
169 | /* [#2663] hack to filter junk RTC values that would cause | 148 | /* [#2663] hack to filter junk RTC values that would cause |
170 | * userspace to have to deal with time values greater than | 149 | * userspace to have to deal with time values greater than |
@@ -176,11 +155,5 @@ void __init time_init(void) | |||
176 | } | 155 | } |
177 | #endif | 156 | #endif |
178 | 157 | ||
179 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | ||
180 | xtime.tv_sec = secs_since_1970; | ||
181 | xtime.tv_nsec = 0; | ||
182 | |||
183 | wall_to_monotonic.tv_sec = -xtime.tv_sec; | ||
184 | |||
185 | time_sched_init(timer_interrupt); | 158 | time_sched_init(timer_interrupt); |
186 | } | 159 | } |
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c new file mode 100644 index 000000000000..59fcdf6b0138 --- /dev/null +++ b/arch/blackfin/kernel/trace.c | |||
@@ -0,0 +1,981 @@ | |||
1 | /* provide some functions which dump the trace buffer, in a nice way for people | ||
2 | * to read it, and understand what is going on | ||
3 | * | ||
4 | * Copyright 2004-2010 Analog Devices Inc. | ||
5 | * | ||
6 | * Licensed under the GPL-2 or later | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/thread_info.h> | ||
12 | #include <linux/mm.h> | ||
13 | #include <linux/uaccess.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/kallsyms.h> | ||
16 | #include <linux/err.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <asm/dma.h> | ||
19 | #include <asm/trace.h> | ||
20 | #include <asm/fixed_code.h> | ||
21 | #include <asm/traps.h> | ||
22 | #include <asm/irq_handler.h> | ||
23 | |||
24 | void decode_address(char *buf, unsigned long address) | ||
25 | { | ||
26 | struct task_struct *p; | ||
27 | struct mm_struct *mm; | ||
28 | unsigned long flags, offset; | ||
29 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
30 | struct rb_node *n; | ||
31 | |||
32 | #ifdef CONFIG_KALLSYMS | ||
33 | unsigned long symsize; | ||
34 | const char *symname; | ||
35 | char *modname; | ||
36 | char *delim = ":"; | ||
37 | char namebuf[128]; | ||
38 | #endif | ||
39 | |||
40 | buf += sprintf(buf, "<0x%08lx> ", address); | ||
41 | |||
42 | #ifdef CONFIG_KALLSYMS | ||
43 | /* look up the address and see if we are in kernel space */ | ||
44 | symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); | ||
45 | |||
46 | if (symname) { | ||
47 | /* yeah! kernel space! */ | ||
48 | if (!modname) | ||
49 | modname = delim = ""; | ||
50 | sprintf(buf, "{ %s%s%s%s + 0x%lx }", | ||
51 | delim, modname, delim, symname, | ||
52 | (unsigned long)offset); | ||
53 | return; | ||
54 | } | ||
55 | #endif | ||
56 | |||
57 | if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { | ||
58 | /* Problem in fixed code section? */ | ||
59 | strcat(buf, "/* Maybe fixed code section */"); | ||
60 | return; | ||
61 | |||
62 | } else if (address < CONFIG_BOOT_LOAD) { | ||
63 | /* Problem somewhere before the kernel start address */ | ||
64 | strcat(buf, "/* Maybe null pointer? */"); | ||
65 | return; | ||
66 | |||
67 | } else if (address >= COREMMR_BASE) { | ||
68 | strcat(buf, "/* core mmrs */"); | ||
69 | return; | ||
70 | |||
71 | } else if (address >= SYSMMR_BASE) { | ||
72 | strcat(buf, "/* system mmrs */"); | ||
73 | return; | ||
74 | |||
75 | } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { | ||
76 | strcat(buf, "/* on-chip L1 ROM */"); | ||
77 | return; | ||
78 | |||
79 | } else if (address >= L1_SCRATCH_START && address < L1_SCRATCH_START + L1_SCRATCH_LENGTH) { | ||
80 | strcat(buf, "/* on-chip scratchpad */"); | ||
81 | return; | ||
82 | |||
83 | } else if (address >= physical_mem_end && address < ASYNC_BANK0_BASE) { | ||
84 | strcat(buf, "/* unconnected memory */"); | ||
85 | return; | ||
86 | |||
87 | } else if (address >= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE && address < BOOT_ROM_START) { | ||
88 | strcat(buf, "/* reserved memory */"); | ||
89 | return; | ||
90 | |||
91 | } else if (address >= L1_DATA_A_START && address < L1_DATA_A_START + L1_DATA_A_LENGTH) { | ||
92 | strcat(buf, "/* on-chip Data Bank A */"); | ||
93 | return; | ||
94 | |||
95 | } else if (address >= L1_DATA_B_START && address < L1_DATA_B_START + L1_DATA_B_LENGTH) { | ||
96 | strcat(buf, "/* on-chip Data Bank B */"); | ||
97 | return; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Don't walk any of the vmas if we are oopsing, it has been known | ||
102 | * to cause problems - corrupt vmas (kernel crashes) cause double faults | ||
103 | */ | ||
104 | if (oops_in_progress) { | ||
105 | strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); | ||
106 | return; | ||
107 | } | ||
108 | |||
109 | /* looks like we're off in user-land, so let's walk all the | ||
110 | * mappings of all our processes and see if we can't be a whee | ||
111 | * bit more specific | ||
112 | */ | ||
113 | write_lock_irqsave(&tasklist_lock, flags); | ||
114 | for_each_process(p) { | ||
115 | mm = (in_atomic ? p->mm : get_task_mm(p)); | ||
116 | if (!mm) | ||
117 | continue; | ||
118 | |||
119 | if (!down_read_trylock(&mm->mmap_sem)) { | ||
120 | if (!in_atomic) | ||
121 | mmput(mm); | ||
122 | continue; | ||
123 | } | ||
124 | |||
125 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
126 | struct vm_area_struct *vma; | ||
127 | |||
128 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
129 | |||
130 | if (address >= vma->vm_start && address < vma->vm_end) { | ||
131 | char _tmpbuf[256]; | ||
132 | char *name = p->comm; | ||
133 | struct file *file = vma->vm_file; | ||
134 | |||
135 | if (file) { | ||
136 | char *d_name = d_path(&file->f_path, _tmpbuf, | ||
137 | sizeof(_tmpbuf)); | ||
138 | if (!IS_ERR(d_name)) | ||
139 | name = d_name; | ||
140 | } | ||
141 | |||
142 | /* FLAT does not have its text aligned to the start of | ||
143 | * the map while FDPIC ELF does ... | ||
144 | */ | ||
145 | |||
146 | /* before we can check flat/fdpic, we need to | ||
147 | * make sure current is valid | ||
148 | */ | ||
149 | if ((unsigned long)current >= FIXED_CODE_START && | ||
150 | !((unsigned long)current & 0x3)) { | ||
151 | if (current->mm && | ||
152 | (address > current->mm->start_code) && | ||
153 | (address < current->mm->end_code)) | ||
154 | offset = address - current->mm->start_code; | ||
155 | else | ||
156 | offset = (address - vma->vm_start) + | ||
157 | (vma->vm_pgoff << PAGE_SHIFT); | ||
158 | |||
159 | sprintf(buf, "[ %s + 0x%lx ]", name, offset); | ||
160 | } else | ||
161 | sprintf(buf, "[ %s vma:0x%lx-0x%lx]", | ||
162 | name, vma->vm_start, vma->vm_end); | ||
163 | |||
164 | up_read(&mm->mmap_sem); | ||
165 | if (!in_atomic) | ||
166 | mmput(mm); | ||
167 | |||
168 | if (buf[0] == '\0') | ||
169 | sprintf(buf, "[ %s ] dynamic memory", name); | ||
170 | |||
171 | goto done; | ||
172 | } | ||
173 | } | ||
174 | |||
175 | up_read(&mm->mmap_sem); | ||
176 | if (!in_atomic) | ||
177 | mmput(mm); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * we were unable to find this address anywhere, | ||
182 | * or some MMs were skipped because they were in use. | ||
183 | */ | ||
184 | sprintf(buf, "/* kernel dynamic memory */"); | ||
185 | |||
186 | done: | ||
187 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
188 | } | ||
189 | |||
190 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) | ||
191 | |||
192 | /* | ||
193 | * Similar to get_user, do some address checking, then dereference | ||
194 | * Return true on success, false on bad address | ||
195 | */ | ||
196 | bool get_mem16(unsigned short *val, unsigned short *address) | ||
197 | { | ||
198 | unsigned long addr = (unsigned long)address; | ||
199 | |||
200 | /* Check for odd addresses */ | ||
201 | if (addr & 0x1) | ||
202 | return false; | ||
203 | |||
204 | switch (bfin_mem_access_type(addr, 2)) { | ||
205 | case BFIN_MEM_ACCESS_CORE: | ||
206 | case BFIN_MEM_ACCESS_CORE_ONLY: | ||
207 | *val = *address; | ||
208 | return true; | ||
209 | case BFIN_MEM_ACCESS_DMA: | ||
210 | dma_memcpy(val, address, 2); | ||
211 | return true; | ||
212 | case BFIN_MEM_ACCESS_ITEST: | ||
213 | isram_memcpy(val, address, 2); | ||
214 | return true; | ||
215 | default: /* invalid access */ | ||
216 | return false; | ||
217 | } | ||
218 | } | ||
219 | |||
220 | bool get_instruction(unsigned int *val, unsigned short *address) | ||
221 | { | ||
222 | unsigned long addr = (unsigned long)address; | ||
223 | unsigned short opcode0, opcode1; | ||
224 | |||
225 | /* Check for odd addresses */ | ||
226 | if (addr & 0x1) | ||
227 | return false; | ||
228 | |||
229 | /* MMR region will never have instructions */ | ||
230 | if (addr >= SYSMMR_BASE) | ||
231 | return false; | ||
232 | |||
233 | /* Scratchpad will never have instructions */ | ||
234 | if (addr >= L1_SCRATCH_START && addr < L1_SCRATCH_START + L1_SCRATCH_LENGTH) | ||
235 | return false; | ||
236 | |||
237 | /* Data banks will never have instructions */ | ||
238 | if (addr >= BOOT_ROM_START + BOOT_ROM_LENGTH && addr < L1_CODE_START) | ||
239 | return false; | ||
240 | |||
241 | if (!get_mem16(&opcode0, address)) | ||
242 | return false; | ||
243 | |||
244 | /* was this a 32-bit instruction? If so, get the next 16 bits */ | ||
245 | if ((opcode0 & 0xc000) == 0xc000) { | ||
246 | if (!get_mem16(&opcode1, address + 1)) | ||
247 | return false; | ||
248 | *val = (opcode0 << 16) + opcode1; | ||
249 | } else | ||
250 | *val = opcode0; | ||
251 | |||
252 | return true; | ||
253 | } | ||
254 | |||
255 | #if defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) | ||
256 | /* | ||
257 | * decode the instruction if we are printing out the trace, as it | ||
258 | * makes things easier to follow, without running it through objdump | ||
259 | * Decode the change of flow, and the common load/store instructions | ||
260 | * which are the main cause for faults, and discontinuities in the trace | ||
261 | * buffer. | ||
262 | */ | ||
263 | |||
264 | #define ProgCtrl_opcode 0x0000 | ||
265 | #define ProgCtrl_poprnd_bits 0 | ||
266 | #define ProgCtrl_poprnd_mask 0xf | ||
267 | #define ProgCtrl_prgfunc_bits 4 | ||
268 | #define ProgCtrl_prgfunc_mask 0xf | ||
269 | #define ProgCtrl_code_bits 8 | ||
270 | #define ProgCtrl_code_mask 0xff | ||
271 | |||
272 | static void decode_ProgCtrl_0(unsigned int opcode) | ||
273 | { | ||
274 | int poprnd = ((opcode >> ProgCtrl_poprnd_bits) & ProgCtrl_poprnd_mask); | ||
275 | int prgfunc = ((opcode >> ProgCtrl_prgfunc_bits) & ProgCtrl_prgfunc_mask); | ||
276 | |||
277 | if (prgfunc == 0 && poprnd == 0) | ||
278 | pr_cont("NOP"); | ||
279 | else if (prgfunc == 1 && poprnd == 0) | ||
280 | pr_cont("RTS"); | ||
281 | else if (prgfunc == 1 && poprnd == 1) | ||
282 | pr_cont("RTI"); | ||
283 | else if (prgfunc == 1 && poprnd == 2) | ||
284 | pr_cont("RTX"); | ||
285 | else if (prgfunc == 1 && poprnd == 3) | ||
286 | pr_cont("RTN"); | ||
287 | else if (prgfunc == 1 && poprnd == 4) | ||
288 | pr_cont("RTE"); | ||
289 | else if (prgfunc == 2 && poprnd == 0) | ||
290 | pr_cont("IDLE"); | ||
291 | else if (prgfunc == 2 && poprnd == 3) | ||
292 | pr_cont("CSYNC"); | ||
293 | else if (prgfunc == 2 && poprnd == 4) | ||
294 | pr_cont("SSYNC"); | ||
295 | else if (prgfunc == 2 && poprnd == 5) | ||
296 | pr_cont("EMUEXCPT"); | ||
297 | else if (prgfunc == 3) | ||
298 | pr_cont("CLI R%i", poprnd); | ||
299 | else if (prgfunc == 4) | ||
300 | pr_cont("STI R%i", poprnd); | ||
301 | else if (prgfunc == 5) | ||
302 | pr_cont("JUMP (P%i)", poprnd); | ||
303 | else if (prgfunc == 6) | ||
304 | pr_cont("CALL (P%i)", poprnd); | ||
305 | else if (prgfunc == 7) | ||
306 | pr_cont("CALL (PC + P%i)", poprnd); | ||
307 | else if (prgfunc == 8) | ||
308 | pr_cont("JUMP (PC + P%i", poprnd); | ||
309 | else if (prgfunc == 9) | ||
310 | pr_cont("RAISE %i", poprnd); | ||
311 | else if (prgfunc == 10) | ||
312 | pr_cont("EXCPT %i", poprnd); | ||
313 | else | ||
314 | pr_cont("0x%04x", opcode); | ||
315 | |||
316 | } | ||
317 | |||
318 | #define BRCC_opcode 0x1000 | ||
319 | #define BRCC_offset_bits 0 | ||
320 | #define BRCC_offset_mask 0x3ff | ||
321 | #define BRCC_B_bits 10 | ||
322 | #define BRCC_B_mask 0x1 | ||
323 | #define BRCC_T_bits 11 | ||
324 | #define BRCC_T_mask 0x1 | ||
325 | #define BRCC_code_bits 12 | ||
326 | #define BRCC_code_mask 0xf | ||
327 | |||
328 | static void decode_BRCC_0(unsigned int opcode) | ||
329 | { | ||
330 | int B = ((opcode >> BRCC_B_bits) & BRCC_B_mask); | ||
331 | int T = ((opcode >> BRCC_T_bits) & BRCC_T_mask); | ||
332 | |||
333 | pr_cont("IF %sCC JUMP pcrel %s", T ? "" : "!", B ? "(BP)" : ""); | ||
334 | } | ||
335 | |||
336 | #define CALLa_opcode 0xe2000000 | ||
337 | #define CALLa_addr_bits 0 | ||
338 | #define CALLa_addr_mask 0xffffff | ||
339 | #define CALLa_S_bits 24 | ||
340 | #define CALLa_S_mask 0x1 | ||
341 | #define CALLa_code_bits 25 | ||
342 | #define CALLa_code_mask 0x7f | ||
343 | |||
344 | static void decode_CALLa_0(unsigned int opcode) | ||
345 | { | ||
346 | int S = ((opcode >> (CALLa_S_bits - 16)) & CALLa_S_mask); | ||
347 | |||
348 | if (S) | ||
349 | pr_cont("CALL pcrel"); | ||
350 | else | ||
351 | pr_cont("JUMP.L"); | ||
352 | } | ||
353 | |||
354 | #define LoopSetup_opcode 0xe0800000 | ||
355 | #define LoopSetup_eoffset_bits 0 | ||
356 | #define LoopSetup_eoffset_mask 0x3ff | ||
357 | #define LoopSetup_dontcare_bits 10 | ||
358 | #define LoopSetup_dontcare_mask 0x3 | ||
359 | #define LoopSetup_reg_bits 12 | ||
360 | #define LoopSetup_reg_mask 0xf | ||
361 | #define LoopSetup_soffset_bits 16 | ||
362 | #define LoopSetup_soffset_mask 0xf | ||
363 | #define LoopSetup_c_bits 20 | ||
364 | #define LoopSetup_c_mask 0x1 | ||
365 | #define LoopSetup_rop_bits 21 | ||
366 | #define LoopSetup_rop_mask 0x3 | ||
367 | #define LoopSetup_code_bits 23 | ||
368 | #define LoopSetup_code_mask 0x1ff | ||
369 | |||
370 | static void decode_LoopSetup_0(unsigned int opcode) | ||
371 | { | ||
372 | int c = ((opcode >> LoopSetup_c_bits) & LoopSetup_c_mask); | ||
373 | int reg = ((opcode >> LoopSetup_reg_bits) & LoopSetup_reg_mask); | ||
374 | int rop = ((opcode >> LoopSetup_rop_bits) & LoopSetup_rop_mask); | ||
375 | |||
376 | pr_cont("LSETUP <> LC%i", c); | ||
377 | if ((rop & 1) == 1) | ||
378 | pr_cont("= P%i", reg); | ||
379 | if ((rop & 2) == 2) | ||
380 | pr_cont(" >> 0x1"); | ||
381 | } | ||
382 | |||
383 | #define DspLDST_opcode 0x9c00 | ||
384 | #define DspLDST_reg_bits 0 | ||
385 | #define DspLDST_reg_mask 0x7 | ||
386 | #define DspLDST_i_bits 3 | ||
387 | #define DspLDST_i_mask 0x3 | ||
388 | #define DspLDST_m_bits 5 | ||
389 | #define DspLDST_m_mask 0x3 | ||
390 | #define DspLDST_aop_bits 7 | ||
391 | #define DspLDST_aop_mask 0x3 | ||
392 | #define DspLDST_W_bits 9 | ||
393 | #define DspLDST_W_mask 0x1 | ||
394 | #define DspLDST_code_bits 10 | ||
395 | #define DspLDST_code_mask 0x3f | ||
396 | |||
397 | static void decode_dspLDST_0(unsigned int opcode) | ||
398 | { | ||
399 | int i = ((opcode >> DspLDST_i_bits) & DspLDST_i_mask); | ||
400 | int m = ((opcode >> DspLDST_m_bits) & DspLDST_m_mask); | ||
401 | int W = ((opcode >> DspLDST_W_bits) & DspLDST_W_mask); | ||
402 | int aop = ((opcode >> DspLDST_aop_bits) & DspLDST_aop_mask); | ||
403 | int reg = ((opcode >> DspLDST_reg_bits) & DspLDST_reg_mask); | ||
404 | |||
405 | if (W == 0) { | ||
406 | pr_cont("R%i", reg); | ||
407 | switch (m) { | ||
408 | case 0: | ||
409 | pr_cont(" = "); | ||
410 | break; | ||
411 | case 1: | ||
412 | pr_cont(".L = "); | ||
413 | break; | ||
414 | case 2: | ||
415 | pr_cont(".W = "); | ||
416 | break; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | pr_cont("[ I%i", i); | ||
421 | |||
422 | switch (aop) { | ||
423 | case 0: | ||
424 | pr_cont("++ ]"); | ||
425 | break; | ||
426 | case 1: | ||
427 | pr_cont("-- ]"); | ||
428 | break; | ||
429 | } | ||
430 | |||
431 | if (W == 1) { | ||
432 | pr_cont(" = R%i", reg); | ||
433 | switch (m) { | ||
434 | case 1: | ||
435 | pr_cont(".L = "); | ||
436 | break; | ||
437 | case 2: | ||
438 | pr_cont(".W = "); | ||
439 | break; | ||
440 | } | ||
441 | } | ||
442 | } | ||
443 | |||
444 | #define LDST_opcode 0x9000 | ||
445 | #define LDST_reg_bits 0 | ||
446 | #define LDST_reg_mask 0x7 | ||
447 | #define LDST_ptr_bits 3 | ||
448 | #define LDST_ptr_mask 0x7 | ||
449 | #define LDST_Z_bits 6 | ||
450 | #define LDST_Z_mask 0x1 | ||
451 | #define LDST_aop_bits 7 | ||
452 | #define LDST_aop_mask 0x3 | ||
453 | #define LDST_W_bits 9 | ||
454 | #define LDST_W_mask 0x1 | ||
455 | #define LDST_sz_bits 10 | ||
456 | #define LDST_sz_mask 0x3 | ||
457 | #define LDST_code_bits 12 | ||
458 | #define LDST_code_mask 0xf | ||
459 | |||
460 | static void decode_LDST_0(unsigned int opcode) | ||
461 | { | ||
462 | int Z = ((opcode >> LDST_Z_bits) & LDST_Z_mask); | ||
463 | int W = ((opcode >> LDST_W_bits) & LDST_W_mask); | ||
464 | int sz = ((opcode >> LDST_sz_bits) & LDST_sz_mask); | ||
465 | int aop = ((opcode >> LDST_aop_bits) & LDST_aop_mask); | ||
466 | int reg = ((opcode >> LDST_reg_bits) & LDST_reg_mask); | ||
467 | int ptr = ((opcode >> LDST_ptr_bits) & LDST_ptr_mask); | ||
468 | |||
469 | if (W == 0) | ||
470 | pr_cont("%s%i = ", (sz == 0 && Z == 1) ? "P" : "R", reg); | ||
471 | |||
472 | switch (sz) { | ||
473 | case 1: | ||
474 | pr_cont("W"); | ||
475 | break; | ||
476 | case 2: | ||
477 | pr_cont("B"); | ||
478 | break; | ||
479 | } | ||
480 | |||
481 | pr_cont("[P%i", ptr); | ||
482 | |||
483 | switch (aop) { | ||
484 | case 0: | ||
485 | pr_cont("++"); | ||
486 | break; | ||
487 | case 1: | ||
488 | pr_cont("--"); | ||
489 | break; | ||
490 | } | ||
491 | pr_cont("]"); | ||
492 | |||
493 | if (W == 1) | ||
494 | pr_cont(" = %s%i ", (sz == 0 && Z == 1) ? "P" : "R", reg); | ||
495 | |||
496 | if (sz) { | ||
497 | if (Z) | ||
498 | pr_cont(" (X)"); | ||
499 | else | ||
500 | pr_cont(" (Z)"); | ||
501 | } | ||
502 | } | ||
503 | |||
504 | #define LDSTii_opcode 0xa000 | ||
505 | #define LDSTii_reg_bit 0 | ||
506 | #define LDSTii_reg_mask 0x7 | ||
507 | #define LDSTii_ptr_bit 3 | ||
508 | #define LDSTii_ptr_mask 0x7 | ||
509 | #define LDSTii_offset_bit 6 | ||
510 | #define LDSTii_offset_mask 0xf | ||
511 | #define LDSTii_op_bit 10 | ||
512 | #define LDSTii_op_mask 0x3 | ||
513 | #define LDSTii_W_bit 12 | ||
514 | #define LDSTii_W_mask 0x1 | ||
515 | #define LDSTii_code_bit 13 | ||
516 | #define LDSTii_code_mask 0x7 | ||
517 | |||
518 | static void decode_LDSTii_0(unsigned int opcode) | ||
519 | { | ||
520 | int reg = ((opcode >> LDSTii_reg_bit) & LDSTii_reg_mask); | ||
521 | int ptr = ((opcode >> LDSTii_ptr_bit) & LDSTii_ptr_mask); | ||
522 | int offset = ((opcode >> LDSTii_offset_bit) & LDSTii_offset_mask); | ||
523 | int op = ((opcode >> LDSTii_op_bit) & LDSTii_op_mask); | ||
524 | int W = ((opcode >> LDSTii_W_bit) & LDSTii_W_mask); | ||
525 | |||
526 | if (W == 0) { | ||
527 | pr_cont("%s%i = %s[P%i + %i]", op == 3 ? "R" : "P", reg, | ||
528 | op == 1 || op == 2 ? "" : "W", ptr, offset); | ||
529 | if (op == 2) | ||
530 | pr_cont("(Z)"); | ||
531 | if (op == 3) | ||
532 | pr_cont("(X)"); | ||
533 | } else { | ||
534 | pr_cont("%s[P%i + %i] = %s%i", op == 0 ? "" : "W", ptr, | ||
535 | offset, op == 3 ? "P" : "R", reg); | ||
536 | } | ||
537 | } | ||
538 | |||
539 | #define LDSTidxI_opcode 0xe4000000 | ||
540 | #define LDSTidxI_offset_bits 0 | ||
541 | #define LDSTidxI_offset_mask 0xffff | ||
542 | #define LDSTidxI_reg_bits 16 | ||
543 | #define LDSTidxI_reg_mask 0x7 | ||
544 | #define LDSTidxI_ptr_bits 19 | ||
545 | #define LDSTidxI_ptr_mask 0x7 | ||
546 | #define LDSTidxI_sz_bits 22 | ||
547 | #define LDSTidxI_sz_mask 0x3 | ||
548 | #define LDSTidxI_Z_bits 24 | ||
549 | #define LDSTidxI_Z_mask 0x1 | ||
550 | #define LDSTidxI_W_bits 25 | ||
551 | #define LDSTidxI_W_mask 0x1 | ||
552 | #define LDSTidxI_code_bits 26 | ||
553 | #define LDSTidxI_code_mask 0x3f | ||
554 | |||
555 | static void decode_LDSTidxI_0(unsigned int opcode) | ||
556 | { | ||
557 | int Z = ((opcode >> LDSTidxI_Z_bits) & LDSTidxI_Z_mask); | ||
558 | int W = ((opcode >> LDSTidxI_W_bits) & LDSTidxI_W_mask); | ||
559 | int sz = ((opcode >> LDSTidxI_sz_bits) & LDSTidxI_sz_mask); | ||
560 | int reg = ((opcode >> LDSTidxI_reg_bits) & LDSTidxI_reg_mask); | ||
561 | int ptr = ((opcode >> LDSTidxI_ptr_bits) & LDSTidxI_ptr_mask); | ||
562 | int offset = ((opcode >> LDSTidxI_offset_bits) & LDSTidxI_offset_mask); | ||
563 | |||
564 | if (W == 0) | ||
565 | pr_cont("%s%i = ", sz == 0 && Z == 1 ? "P" : "R", reg); | ||
566 | |||
567 | if (sz == 1) | ||
568 | pr_cont("W"); | ||
569 | if (sz == 2) | ||
570 | pr_cont("B"); | ||
571 | |||
572 | pr_cont("[P%i + %s0x%x]", ptr, offset & 0x20 ? "-" : "", | ||
573 | (offset & 0x1f) << 2); | ||
574 | |||
575 | if (W == 0 && sz != 0) { | ||
576 | if (Z) | ||
577 | pr_cont("(X)"); | ||
578 | else | ||
579 | pr_cont("(Z)"); | ||
580 | } | ||
581 | |||
582 | if (W == 1) | ||
583 | pr_cont("= %s%i", (sz == 0 && Z == 1) ? "P" : "R", reg); | ||
584 | |||
585 | } | ||
586 | |||
587 | static void decode_opcode(unsigned int opcode) | ||
588 | { | ||
589 | #ifdef CONFIG_BUG | ||
590 | if (opcode == BFIN_BUG_OPCODE) | ||
591 | pr_cont("BUG"); | ||
592 | else | ||
593 | #endif | ||
594 | if ((opcode & 0xffffff00) == ProgCtrl_opcode) | ||
595 | decode_ProgCtrl_0(opcode); | ||
596 | else if ((opcode & 0xfffff000) == BRCC_opcode) | ||
597 | decode_BRCC_0(opcode); | ||
598 | else if ((opcode & 0xfffff000) == 0x2000) | ||
599 | pr_cont("JUMP.S"); | ||
600 | else if ((opcode & 0xfe000000) == CALLa_opcode) | ||
601 | decode_CALLa_0(opcode); | ||
602 | else if ((opcode & 0xff8000C0) == LoopSetup_opcode) | ||
603 | decode_LoopSetup_0(opcode); | ||
604 | else if ((opcode & 0xfffffc00) == DspLDST_opcode) | ||
605 | decode_dspLDST_0(opcode); | ||
606 | else if ((opcode & 0xfffff000) == LDST_opcode) | ||
607 | decode_LDST_0(opcode); | ||
608 | else if ((opcode & 0xffffe000) == LDSTii_opcode) | ||
609 | decode_LDSTii_0(opcode); | ||
610 | else if ((opcode & 0xfc000000) == LDSTidxI_opcode) | ||
611 | decode_LDSTidxI_0(opcode); | ||
612 | else if (opcode & 0xffff0000) | ||
613 | pr_cont("0x%08x", opcode); | ||
614 | else | ||
615 | pr_cont("0x%04x", opcode); | ||
616 | } | ||
617 | |||
618 | #define BIT_MULTI_INS 0x08000000 | ||
619 | static void decode_instruction(unsigned short *address) | ||
620 | { | ||
621 | unsigned int opcode; | ||
622 | |||
623 | if (!get_instruction(&opcode, address)) | ||
624 | return; | ||
625 | |||
626 | decode_opcode(opcode); | ||
627 | |||
628 | /* If things are a 32-bit instruction, it has the possibility of being | ||
629 | * a multi-issue instruction (a 32-bit, and 2 16 bit instrucitions) | ||
630 | * This test collidates with the unlink instruction, so disallow that | ||
631 | */ | ||
632 | if ((opcode & 0xc0000000) == 0xc0000000 && | ||
633 | (opcode & BIT_MULTI_INS) && | ||
634 | (opcode & 0xe8000000) != 0xe8000000) { | ||
635 | pr_cont(" || "); | ||
636 | if (!get_instruction(&opcode, address + 2)) | ||
637 | return; | ||
638 | decode_opcode(opcode); | ||
639 | pr_cont(" || "); | ||
640 | if (!get_instruction(&opcode, address + 3)) | ||
641 | return; | ||
642 | decode_opcode(opcode); | ||
643 | } | ||
644 | } | ||
645 | #endif | ||
646 | |||
647 | void dump_bfin_trace_buffer(void) | ||
648 | { | ||
649 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
650 | int tflags, i = 0, fault = 0; | ||
651 | char buf[150]; | ||
652 | unsigned short *addr; | ||
653 | unsigned int cpu = raw_smp_processor_id(); | ||
654 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
655 | int j, index; | ||
656 | #endif | ||
657 | |||
658 | trace_buffer_save(tflags); | ||
659 | |||
660 | pr_notice("Hardware Trace:\n"); | ||
661 | |||
662 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
663 | pr_notice("WARNING: Expanded trace turned on - can not trace exceptions\n"); | ||
664 | #endif | ||
665 | |||
666 | if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { | ||
667 | for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { | ||
668 | addr = (unsigned short *)bfin_read_TBUF(); | ||
669 | decode_address(buf, (unsigned long)addr); | ||
670 | pr_notice("%4i Target : %s\n", i, buf); | ||
671 | /* Normally, the faulting instruction doesn't go into | ||
672 | * the trace buffer, (since it doesn't commit), so | ||
673 | * we print out the fault address here | ||
674 | */ | ||
675 | if (!fault && addr == ((unsigned short *)evt_ivhw)) { | ||
676 | addr = (unsigned short *)bfin_read_TBUF(); | ||
677 | decode_address(buf, (unsigned long)addr); | ||
678 | pr_notice(" FAULT : %s ", buf); | ||
679 | decode_instruction(addr); | ||
680 | pr_cont("\n"); | ||
681 | fault = 1; | ||
682 | continue; | ||
683 | } | ||
684 | if (!fault && addr == (unsigned short *)trap && | ||
685 | (cpu_pda[cpu].seqstat & SEQSTAT_EXCAUSE) > VEC_EXCPT15) { | ||
686 | decode_address(buf, cpu_pda[cpu].icplb_fault_addr); | ||
687 | pr_notice(" FAULT : %s ", buf); | ||
688 | decode_instruction((unsigned short *)cpu_pda[cpu].icplb_fault_addr); | ||
689 | pr_cont("\n"); | ||
690 | fault = 1; | ||
691 | } | ||
692 | addr = (unsigned short *)bfin_read_TBUF(); | ||
693 | decode_address(buf, (unsigned long)addr); | ||
694 | pr_notice(" Source : %s ", buf); | ||
695 | decode_instruction(addr); | ||
696 | pr_cont("\n"); | ||
697 | } | ||
698 | } | ||
699 | |||
700 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
701 | if (trace_buff_offset) | ||
702 | index = trace_buff_offset / 4; | ||
703 | else | ||
704 | index = EXPAND_LEN; | ||
705 | |||
706 | j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; | ||
707 | while (j) { | ||
708 | decode_address(buf, software_trace_buff[index]); | ||
709 | pr_notice("%4i Target : %s\n", i, buf); | ||
710 | index -= 1; | ||
711 | if (index < 0) | ||
712 | index = EXPAND_LEN; | ||
713 | decode_address(buf, software_trace_buff[index]); | ||
714 | pr_notice(" Source : %s ", buf); | ||
715 | decode_instruction((unsigned short *)software_trace_buff[index]); | ||
716 | pr_cont("\n"); | ||
717 | index -= 1; | ||
718 | if (index < 0) | ||
719 | index = EXPAND_LEN; | ||
720 | j--; | ||
721 | i++; | ||
722 | } | ||
723 | #endif | ||
724 | |||
725 | trace_buffer_restore(tflags); | ||
726 | #endif | ||
727 | } | ||
728 | EXPORT_SYMBOL(dump_bfin_trace_buffer); | ||
729 | |||
730 | void dump_bfin_process(struct pt_regs *fp) | ||
731 | { | ||
732 | /* We should be able to look at fp->ipend, but we don't push it on the | ||
733 | * stack all the time, so do this until we fix that */ | ||
734 | unsigned int context = bfin_read_IPEND(); | ||
735 | |||
736 | if (oops_in_progress) | ||
737 | pr_emerg("Kernel OOPS in progress\n"); | ||
738 | |||
739 | if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) | ||
740 | pr_notice("HW Error context\n"); | ||
741 | else if (context & 0x0020) | ||
742 | pr_notice("Deferred Exception context\n"); | ||
743 | else if (context & 0x3FC0) | ||
744 | pr_notice("Interrupt context\n"); | ||
745 | else if (context & 0x4000) | ||
746 | pr_notice("Deferred Interrupt context\n"); | ||
747 | else if (context & 0x8000) | ||
748 | pr_notice("Kernel process context\n"); | ||
749 | |||
750 | /* Because we are crashing, and pointers could be bad, we check things | ||
751 | * pretty closely before we use them | ||
752 | */ | ||
753 | if ((unsigned long)current >= FIXED_CODE_START && | ||
754 | !((unsigned long)current & 0x3) && current->pid) { | ||
755 | pr_notice("CURRENT PROCESS:\n"); | ||
756 | if (current->comm >= (char *)FIXED_CODE_START) | ||
757 | pr_notice("COMM=%s PID=%d", | ||
758 | current->comm, current->pid); | ||
759 | else | ||
760 | pr_notice("COMM= invalid"); | ||
761 | |||
762 | pr_cont(" CPU=%d\n", current_thread_info()->cpu); | ||
763 | if (!((unsigned long)current->mm & 0x3) && | ||
764 | (unsigned long)current->mm >= FIXED_CODE_START) { | ||
765 | pr_notice("TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n", | ||
766 | (void *)current->mm->start_code, | ||
767 | (void *)current->mm->end_code, | ||
768 | (void *)current->mm->start_data, | ||
769 | (void *)current->mm->end_data); | ||
770 | pr_notice(" BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", | ||
771 | (void *)current->mm->end_data, | ||
772 | (void *)current->mm->brk, | ||
773 | (void *)current->mm->start_stack); | ||
774 | } else | ||
775 | pr_notice("invalid mm\n"); | ||
776 | } else | ||
777 | pr_notice("No Valid process in current context\n"); | ||
778 | } | ||
779 | |||
780 | void dump_bfin_mem(struct pt_regs *fp) | ||
781 | { | ||
782 | unsigned short *addr, *erraddr, val = 0, err = 0; | ||
783 | char sti = 0, buf[6]; | ||
784 | |||
785 | erraddr = (void *)fp->pc; | ||
786 | |||
787 | pr_notice("return address: [0x%p]; contents of:", erraddr); | ||
788 | |||
789 | for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; | ||
790 | addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; | ||
791 | addr++) { | ||
792 | if (!((unsigned long)addr & 0xF)) | ||
793 | pr_notice("0x%p: ", addr); | ||
794 | |||
795 | if (!get_mem16(&val, addr)) { | ||
796 | val = 0; | ||
797 | sprintf(buf, "????"); | ||
798 | } else | ||
799 | sprintf(buf, "%04x", val); | ||
800 | |||
801 | if (addr == erraddr) { | ||
802 | pr_cont("[%s]", buf); | ||
803 | err = val; | ||
804 | } else | ||
805 | pr_cont(" %s ", buf); | ||
806 | |||
807 | /* Do any previous instructions turn on interrupts? */ | ||
808 | if (addr <= erraddr && /* in the past */ | ||
809 | ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ | ||
810 | val == 0x017b)) /* [SP++] = RETI */ | ||
811 | sti = 1; | ||
812 | } | ||
813 | |||
814 | pr_cont("\n"); | ||
815 | |||
816 | /* Hardware error interrupts can be deferred */ | ||
817 | if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && | ||
818 | oops_in_progress)){ | ||
819 | pr_notice("Looks like this was a deferred error - sorry\n"); | ||
820 | #ifndef CONFIG_DEBUG_HWERR | ||
821 | pr_notice("The remaining message may be meaningless\n"); | ||
822 | pr_notice("You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); | ||
823 | #else | ||
824 | /* If we are handling only one peripheral interrupt | ||
825 | * and current mm and pid are valid, and the last error | ||
826 | * was in that user space process's text area | ||
827 | * print it out - because that is where the problem exists | ||
828 | */ | ||
829 | if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && | ||
830 | (current->pid && current->mm)) { | ||
831 | /* And the last RETI points to the current userspace context */ | ||
832 | if ((fp + 1)->pc >= current->mm->start_code && | ||
833 | (fp + 1)->pc <= current->mm->end_code) { | ||
834 | pr_notice("It might be better to look around here :\n"); | ||
835 | pr_notice("-------------------------------------------\n"); | ||
836 | show_regs(fp + 1); | ||
837 | pr_notice("-------------------------------------------\n"); | ||
838 | } | ||
839 | } | ||
840 | #endif | ||
841 | } | ||
842 | } | ||
843 | |||
844 | void show_regs(struct pt_regs *fp) | ||
845 | { | ||
846 | char buf[150]; | ||
847 | struct irqaction *action; | ||
848 | unsigned int i; | ||
849 | unsigned long flags = 0; | ||
850 | unsigned int cpu = raw_smp_processor_id(); | ||
851 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
852 | |||
853 | pr_notice("\n"); | ||
854 | if (CPUID != bfin_cpuid()) | ||
855 | pr_notice("Compiled for cpu family 0x%04x (Rev %d), " | ||
856 | "but running on:0x%04x (Rev %d)\n", | ||
857 | CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); | ||
858 | |||
859 | pr_notice("ADSP-%s-0.%d", | ||
860 | CPU, bfin_compiled_revid()); | ||
861 | |||
862 | if (bfin_compiled_revid() != bfin_revid()) | ||
863 | pr_cont("(Detected 0.%d)", bfin_revid()); | ||
864 | |||
865 | pr_cont(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", | ||
866 | get_cclk()/1000000, get_sclk()/1000000, | ||
867 | #ifdef CONFIG_MPU | ||
868 | "mpu on" | ||
869 | #else | ||
870 | "mpu off" | ||
871 | #endif | ||
872 | ); | ||
873 | |||
874 | pr_notice("%s", linux_banner); | ||
875 | |||
876 | pr_notice("\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); | ||
877 | pr_notice(" SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", | ||
878 | (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); | ||
879 | if (fp->ipend & EVT_IRPTEN) | ||
880 | pr_notice(" Global Interrupts Disabled (IPEND[4])\n"); | ||
881 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | | ||
882 | EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) | ||
883 | pr_notice(" Peripheral interrupts masked off\n"); | ||
884 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) | ||
885 | pr_notice(" Kernel interrupts masked off\n"); | ||
886 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { | ||
887 | pr_notice(" HWERRCAUSE: 0x%lx\n", | ||
888 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); | ||
889 | #ifdef EBIU_ERRMST | ||
890 | /* If the error was from the EBIU, print it out */ | ||
891 | if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { | ||
892 | pr_notice(" EBIU Error Reason : 0x%04x\n", | ||
893 | bfin_read_EBIU_ERRMST()); | ||
894 | pr_notice(" EBIU Error Address : 0x%08x\n", | ||
895 | bfin_read_EBIU_ERRADD()); | ||
896 | } | ||
897 | #endif | ||
898 | } | ||
899 | pr_notice(" EXCAUSE : 0x%lx\n", | ||
900 | fp->seqstat & SEQSTAT_EXCAUSE); | ||
901 | for (i = 2; i <= 15 ; i++) { | ||
902 | if (fp->ipend & (1 << i)) { | ||
903 | if (i != 4) { | ||
904 | decode_address(buf, bfin_read32(EVT0 + 4*i)); | ||
905 | pr_notice(" physical IVG%i asserted : %s\n", i, buf); | ||
906 | } else | ||
907 | pr_notice(" interrupts disabled\n"); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /* if no interrupts are going off, don't print this out */ | ||
912 | if (fp->ipend & ~0x3F) { | ||
913 | for (i = 0; i < (NR_IRQS - 1); i++) { | ||
914 | if (!in_atomic) | ||
915 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
916 | |||
917 | action = irq_desc[i].action; | ||
918 | if (!action) | ||
919 | goto unlock; | ||
920 | |||
921 | decode_address(buf, (unsigned int)action->handler); | ||
922 | pr_notice(" logical irq %3d mapped : %s", i, buf); | ||
923 | for (action = action->next; action; action = action->next) { | ||
924 | decode_address(buf, (unsigned int)action->handler); | ||
925 | pr_cont(", %s", buf); | ||
926 | } | ||
927 | pr_cont("\n"); | ||
928 | unlock: | ||
929 | if (!in_atomic) | ||
930 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
931 | } | ||
932 | } | ||
933 | |||
934 | decode_address(buf, fp->rete); | ||
935 | pr_notice(" RETE: %s\n", buf); | ||
936 | decode_address(buf, fp->retn); | ||
937 | pr_notice(" RETN: %s\n", buf); | ||
938 | decode_address(buf, fp->retx); | ||
939 | pr_notice(" RETX: %s\n", buf); | ||
940 | decode_address(buf, fp->rets); | ||
941 | pr_notice(" RETS: %s\n", buf); | ||
942 | decode_address(buf, fp->pc); | ||
943 | pr_notice(" PC : %s\n", buf); | ||
944 | |||
945 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && | ||
946 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { | ||
947 | decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); | ||
948 | pr_notice("DCPLB_FAULT_ADDR: %s\n", buf); | ||
949 | decode_address(buf, cpu_pda[cpu].icplb_fault_addr); | ||
950 | pr_notice("ICPLB_FAULT_ADDR: %s\n", buf); | ||
951 | } | ||
952 | |||
953 | pr_notice("PROCESSOR STATE:\n"); | ||
954 | pr_notice(" R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", | ||
955 | fp->r0, fp->r1, fp->r2, fp->r3); | ||
956 | pr_notice(" R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", | ||
957 | fp->r4, fp->r5, fp->r6, fp->r7); | ||
958 | pr_notice(" P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", | ||
959 | fp->p0, fp->p1, fp->p2, fp->p3); | ||
960 | pr_notice(" P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", | ||
961 | fp->p4, fp->p5, fp->fp, (long)fp); | ||
962 | pr_notice(" LB0: %08lx LT0: %08lx LC0: %08lx\n", | ||
963 | fp->lb0, fp->lt0, fp->lc0); | ||
964 | pr_notice(" LB1: %08lx LT1: %08lx LC1: %08lx\n", | ||
965 | fp->lb1, fp->lt1, fp->lc1); | ||
966 | pr_notice(" B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", | ||
967 | fp->b0, fp->l0, fp->m0, fp->i0); | ||
968 | pr_notice(" B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", | ||
969 | fp->b1, fp->l1, fp->m1, fp->i1); | ||
970 | pr_notice(" B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", | ||
971 | fp->b2, fp->l2, fp->m2, fp->i2); | ||
972 | pr_notice(" B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", | ||
973 | fp->b3, fp->l3, fp->m3, fp->i3); | ||
974 | pr_notice("A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", | ||
975 | fp->a0w, fp->a0x, fp->a1w, fp->a1x); | ||
976 | |||
977 | pr_notice("USP : %08lx ASTAT: %08lx\n", | ||
978 | rdusp(), fp->astat); | ||
979 | |||
980 | pr_notice("\n"); | ||
981 | } | ||
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index d3cbcd6bd985..59c1df75e4de 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -1,25 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2004-2009 Analog Devices Inc. | 2 | * Main exception handling logic. |
3 | * | ||
4 | * Copyright 2004-2010 Analog Devices Inc. | ||
3 | * | 5 | * |
4 | * Licensed under the GPL-2 or later | 6 | * Licensed under the GPL-2 or later |
5 | */ | 7 | */ |
6 | 8 | ||
7 | #include <linux/bug.h> | 9 | #include <linux/bug.h> |
8 | #include <linux/uaccess.h> | 10 | #include <linux/uaccess.h> |
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/module.h> | 11 | #include <linux/module.h> |
11 | #include <linux/kallsyms.h> | ||
12 | #include <linux/fs.h> | ||
13 | #include <linux/rbtree.h> | ||
14 | #include <asm/traps.h> | 12 | #include <asm/traps.h> |
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/cplb.h> | 13 | #include <asm/cplb.h> |
17 | #include <asm/dma.h> | ||
18 | #include <asm/blackfin.h> | 14 | #include <asm/blackfin.h> |
19 | #include <asm/irq_handler.h> | 15 | #include <asm/irq_handler.h> |
20 | #include <linux/irq.h> | 16 | #include <linux/irq.h> |
21 | #include <asm/trace.h> | 17 | #include <asm/trace.h> |
22 | #include <asm/fixed_code.h> | 18 | #include <asm/fixed_code.h> |
19 | #include <asm/pseudo_instructions.h> | ||
23 | 20 | ||
24 | #ifdef CONFIG_KGDB | 21 | #ifdef CONFIG_KGDB |
25 | # include <linux/kgdb.h> | 22 | # include <linux/kgdb.h> |
@@ -62,182 +59,6 @@ void __init trap_init(void) | |||
62 | CSYNC(); | 59 | CSYNC(); |
63 | } | 60 | } |
64 | 61 | ||
65 | static void decode_address(char *buf, unsigned long address) | ||
66 | { | ||
67 | #ifdef CONFIG_DEBUG_VERBOSE | ||
68 | struct task_struct *p; | ||
69 | struct mm_struct *mm; | ||
70 | unsigned long flags, offset; | ||
71 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
72 | struct rb_node *n; | ||
73 | |||
74 | #ifdef CONFIG_KALLSYMS | ||
75 | unsigned long symsize; | ||
76 | const char *symname; | ||
77 | char *modname; | ||
78 | char *delim = ":"; | ||
79 | char namebuf[128]; | ||
80 | #endif | ||
81 | |||
82 | buf += sprintf(buf, "<0x%08lx> ", address); | ||
83 | |||
84 | #ifdef CONFIG_KALLSYMS | ||
85 | /* look up the address and see if we are in kernel space */ | ||
86 | symname = kallsyms_lookup(address, &symsize, &offset, &modname, namebuf); | ||
87 | |||
88 | if (symname) { | ||
89 | /* yeah! kernel space! */ | ||
90 | if (!modname) | ||
91 | modname = delim = ""; | ||
92 | sprintf(buf, "{ %s%s%s%s + 0x%lx }", | ||
93 | delim, modname, delim, symname, | ||
94 | (unsigned long)offset); | ||
95 | return; | ||
96 | } | ||
97 | #endif | ||
98 | |||
99 | if (address >= FIXED_CODE_START && address < FIXED_CODE_END) { | ||
100 | /* Problem in fixed code section? */ | ||
101 | strcat(buf, "/* Maybe fixed code section */"); | ||
102 | return; | ||
103 | |||
104 | } else if (address < CONFIG_BOOT_LOAD) { | ||
105 | /* Problem somewhere before the kernel start address */ | ||
106 | strcat(buf, "/* Maybe null pointer? */"); | ||
107 | return; | ||
108 | |||
109 | } else if (address >= COREMMR_BASE) { | ||
110 | strcat(buf, "/* core mmrs */"); | ||
111 | return; | ||
112 | |||
113 | } else if (address >= SYSMMR_BASE) { | ||
114 | strcat(buf, "/* system mmrs */"); | ||
115 | return; | ||
116 | |||
117 | } else if (address >= L1_ROM_START && address < L1_ROM_START + L1_ROM_LENGTH) { | ||
118 | strcat(buf, "/* on-chip L1 ROM */"); | ||
119 | return; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Don't walk any of the vmas if we are oopsing, it has been known | ||
124 | * to cause problems - corrupt vmas (kernel crashes) cause double faults | ||
125 | */ | ||
126 | if (oops_in_progress) { | ||
127 | strcat(buf, "/* kernel dynamic memory (maybe user-space) */"); | ||
128 | return; | ||
129 | } | ||
130 | |||
131 | /* looks like we're off in user-land, so let's walk all the | ||
132 | * mappings of all our processes and see if we can't be a whee | ||
133 | * bit more specific | ||
134 | */ | ||
135 | write_lock_irqsave(&tasklist_lock, flags); | ||
136 | for_each_process(p) { | ||
137 | mm = (in_atomic ? p->mm : get_task_mm(p)); | ||
138 | if (!mm) | ||
139 | continue; | ||
140 | |||
141 | for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { | ||
142 | struct vm_area_struct *vma; | ||
143 | |||
144 | vma = rb_entry(n, struct vm_area_struct, vm_rb); | ||
145 | |||
146 | if (address >= vma->vm_start && address < vma->vm_end) { | ||
147 | char _tmpbuf[256]; | ||
148 | char *name = p->comm; | ||
149 | struct file *file = vma->vm_file; | ||
150 | |||
151 | if (file) { | ||
152 | char *d_name = d_path(&file->f_path, _tmpbuf, | ||
153 | sizeof(_tmpbuf)); | ||
154 | if (!IS_ERR(d_name)) | ||
155 | name = d_name; | ||
156 | } | ||
157 | |||
158 | /* FLAT does not have its text aligned to the start of | ||
159 | * the map while FDPIC ELF does ... | ||
160 | */ | ||
161 | |||
162 | /* before we can check flat/fdpic, we need to | ||
163 | * make sure current is valid | ||
164 | */ | ||
165 | if ((unsigned long)current >= FIXED_CODE_START && | ||
166 | !((unsigned long)current & 0x3)) { | ||
167 | if (current->mm && | ||
168 | (address > current->mm->start_code) && | ||
169 | (address < current->mm->end_code)) | ||
170 | offset = address - current->mm->start_code; | ||
171 | else | ||
172 | offset = (address - vma->vm_start) + | ||
173 | (vma->vm_pgoff << PAGE_SHIFT); | ||
174 | |||
175 | sprintf(buf, "[ %s + 0x%lx ]", name, offset); | ||
176 | } else | ||
177 | sprintf(buf, "[ %s vma:0x%lx-0x%lx]", | ||
178 | name, vma->vm_start, vma->vm_end); | ||
179 | |||
180 | if (!in_atomic) | ||
181 | mmput(mm); | ||
182 | |||
183 | if (buf[0] == '\0') | ||
184 | sprintf(buf, "[ %s ] dynamic memory", name); | ||
185 | |||
186 | goto done; | ||
187 | } | ||
188 | } | ||
189 | if (!in_atomic) | ||
190 | mmput(mm); | ||
191 | } | ||
192 | |||
193 | /* we were unable to find this address anywhere */ | ||
194 | sprintf(buf, "/* kernel dynamic memory */"); | ||
195 | |||
196 | done: | ||
197 | write_unlock_irqrestore(&tasklist_lock, flags); | ||
198 | #else | ||
199 | sprintf(buf, " "); | ||
200 | #endif | ||
201 | } | ||
202 | |||
203 | asmlinkage void double_fault_c(struct pt_regs *fp) | ||
204 | { | ||
205 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
206 | int j; | ||
207 | trace_buffer_save(j); | ||
208 | #endif | ||
209 | |||
210 | console_verbose(); | ||
211 | oops_in_progress = 1; | ||
212 | #ifdef CONFIG_DEBUG_VERBOSE | ||
213 | printk(KERN_EMERG "Double Fault\n"); | ||
214 | #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT | ||
215 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { | ||
216 | unsigned int cpu = raw_smp_processor_id(); | ||
217 | char buf[150]; | ||
218 | decode_address(buf, cpu_pda[cpu].retx_doublefault); | ||
219 | printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", | ||
220 | (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); | ||
221 | decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); | ||
222 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); | ||
223 | decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); | ||
224 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); | ||
225 | |||
226 | decode_address(buf, fp->retx); | ||
227 | printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); | ||
228 | } else | ||
229 | #endif | ||
230 | { | ||
231 | dump_bfin_process(fp); | ||
232 | dump_bfin_mem(fp); | ||
233 | show_regs(fp); | ||
234 | dump_bfin_trace_buffer(); | ||
235 | } | ||
236 | #endif | ||
237 | panic("Double Fault - unrecoverable event"); | ||
238 | |||
239 | } | ||
240 | |||
241 | static int kernel_mode_regs(struct pt_regs *regs) | 62 | static int kernel_mode_regs(struct pt_regs *regs) |
242 | { | 63 | { |
243 | return regs->ipend & 0xffc0; | 64 | return regs->ipend & 0xffc0; |
@@ -248,9 +69,10 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
248 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | 69 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON |
249 | int j; | 70 | int j; |
250 | #endif | 71 | #endif |
251 | #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO | 72 | #ifdef CONFIG_BFIN_PSEUDODBG_INSNS |
252 | unsigned int cpu = raw_smp_processor_id(); | 73 | int opcode; |
253 | #endif | 74 | #endif |
75 | unsigned int cpu = raw_smp_processor_id(); | ||
254 | const char *strerror = NULL; | 76 | const char *strerror = NULL; |
255 | int sig = 0; | 77 | int sig = 0; |
256 | siginfo_t info; | 78 | siginfo_t info; |
@@ -382,6 +204,19 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
382 | } | 204 | } |
383 | } | 205 | } |
384 | #endif | 206 | #endif |
207 | #ifdef CONFIG_BFIN_PSEUDODBG_INSNS | ||
208 | /* | ||
209 | * Support for the fake instructions, if the instruction fails, | ||
210 | * then just execute a illegal opcode failure (like normal). | ||
211 | * Don't support these instructions inside the kernel | ||
212 | */ | ||
213 | if (!kernel_mode_regs(fp) && get_instruction(&opcode, (unsigned short *)fp->pc)) { | ||
214 | if (execute_pseudodbg_assert(fp, opcode)) | ||
215 | goto traps_done; | ||
216 | if (execute_pseudodbg(fp, opcode)) | ||
217 | goto traps_done; | ||
218 | } | ||
219 | #endif | ||
385 | info.si_code = ILL_ILLOPC; | 220 | info.si_code = ILL_ILLOPC; |
386 | sig = SIGILL; | 221 | sig = SIGILL; |
387 | strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); | 222 | strerror = KERN_NOTICE EXC_0x21(KERN_NOTICE); |
@@ -639,7 +474,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
639 | { | 474 | { |
640 | info.si_signo = sig; | 475 | info.si_signo = sig; |
641 | info.si_errno = 0; | 476 | info.si_errno = 0; |
642 | info.si_addr = (void __user *)fp->pc; | 477 | switch (trapnr) { |
478 | case VEC_CPLB_VL: | ||
479 | case VEC_MISALI_D: | ||
480 | case VEC_CPLB_M: | ||
481 | case VEC_CPLB_MHIT: | ||
482 | info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr; | ||
483 | break; | ||
484 | default: | ||
485 | info.si_addr = (void __user *)fp->pc; | ||
486 | break; | ||
487 | } | ||
643 | force_sig_info(sig, &info, current); | 488 | force_sig_info(sig, &info, current); |
644 | } | 489 | } |
645 | 490 | ||
@@ -652,659 +497,44 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
652 | trace_buffer_restore(j); | 497 | trace_buffer_restore(j); |
653 | } | 498 | } |
654 | 499 | ||
655 | /* Typical exception handling routines */ | 500 | asmlinkage void double_fault_c(struct pt_regs *fp) |
656 | |||
657 | #define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) | ||
658 | |||
659 | /* | ||
660 | * Similar to get_user, do some address checking, then dereference | ||
661 | * Return true on success, false on bad address | ||
662 | */ | ||
663 | static bool get_instruction(unsigned short *val, unsigned short *address) | ||
664 | { | ||
665 | unsigned long addr = (unsigned long)address; | ||
666 | |||
667 | /* Check for odd addresses */ | ||
668 | if (addr & 0x1) | ||
669 | return false; | ||
670 | |||
671 | /* MMR region will never have instructions */ | ||
672 | if (addr >= SYSMMR_BASE) | ||
673 | return false; | ||
674 | |||
675 | switch (bfin_mem_access_type(addr, 2)) { | ||
676 | case BFIN_MEM_ACCESS_CORE: | ||
677 | case BFIN_MEM_ACCESS_CORE_ONLY: | ||
678 | *val = *address; | ||
679 | return true; | ||
680 | case BFIN_MEM_ACCESS_DMA: | ||
681 | dma_memcpy(val, address, 2); | ||
682 | return true; | ||
683 | case BFIN_MEM_ACCESS_ITEST: | ||
684 | isram_memcpy(val, address, 2); | ||
685 | return true; | ||
686 | default: /* invalid access */ | ||
687 | return false; | ||
688 | } | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * decode the instruction if we are printing out the trace, as it | ||
693 | * makes things easier to follow, without running it through objdump | ||
694 | * These are the normal instructions which cause change of flow, which | ||
695 | * would be at the source of the trace buffer | ||
696 | */ | ||
697 | #if defined(CONFIG_DEBUG_VERBOSE) && defined(CONFIG_DEBUG_BFIN_HWTRACE_ON) | ||
698 | static void decode_instruction(unsigned short *address) | ||
699 | { | ||
700 | unsigned short opcode; | ||
701 | |||
702 | if (get_instruction(&opcode, address)) { | ||
703 | if (opcode == 0x0010) | ||
704 | verbose_printk("RTS"); | ||
705 | else if (opcode == 0x0011) | ||
706 | verbose_printk("RTI"); | ||
707 | else if (opcode == 0x0012) | ||
708 | verbose_printk("RTX"); | ||
709 | else if (opcode == 0x0013) | ||
710 | verbose_printk("RTN"); | ||
711 | else if (opcode == 0x0014) | ||
712 | verbose_printk("RTE"); | ||
713 | else if (opcode == 0x0025) | ||
714 | verbose_printk("EMUEXCPT"); | ||
715 | else if (opcode == 0x0040 && opcode <= 0x0047) | ||
716 | verbose_printk("STI R%i", opcode & 7); | ||
717 | else if (opcode >= 0x0050 && opcode <= 0x0057) | ||
718 | verbose_printk("JUMP (P%i)", opcode & 7); | ||
719 | else if (opcode >= 0x0060 && opcode <= 0x0067) | ||
720 | verbose_printk("CALL (P%i)", opcode & 7); | ||
721 | else if (opcode >= 0x0070 && opcode <= 0x0077) | ||
722 | verbose_printk("CALL (PC+P%i)", opcode & 7); | ||
723 | else if (opcode >= 0x0080 && opcode <= 0x0087) | ||
724 | verbose_printk("JUMP (PC+P%i)", opcode & 7); | ||
725 | else if (opcode >= 0x0090 && opcode <= 0x009F) | ||
726 | verbose_printk("RAISE 0x%x", opcode & 0xF); | ||
727 | else if (opcode >= 0x00A0 && opcode <= 0x00AF) | ||
728 | verbose_printk("EXCPT 0x%x", opcode & 0xF); | ||
729 | else if ((opcode >= 0x1000 && opcode <= 0x13FF) || (opcode >= 0x1800 && opcode <= 0x1BFF)) | ||
730 | verbose_printk("IF !CC JUMP"); | ||
731 | else if ((opcode >= 0x1400 && opcode <= 0x17ff) || (opcode >= 0x1c00 && opcode <= 0x1fff)) | ||
732 | verbose_printk("IF CC JUMP"); | ||
733 | else if (opcode >= 0x2000 && opcode <= 0x2fff) | ||
734 | verbose_printk("JUMP.S"); | ||
735 | else if (opcode >= 0xe080 && opcode <= 0xe0ff) | ||
736 | verbose_printk("LSETUP"); | ||
737 | else if (opcode >= 0xe200 && opcode <= 0xe2ff) | ||
738 | verbose_printk("JUMP.L"); | ||
739 | else if (opcode >= 0xe300 && opcode <= 0xe3ff) | ||
740 | verbose_printk("CALL pcrel"); | ||
741 | else | ||
742 | verbose_printk("0x%04x", opcode); | ||
743 | } | ||
744 | |||
745 | } | ||
746 | #endif | ||
747 | |||
748 | void dump_bfin_trace_buffer(void) | ||
749 | { | ||
750 | #ifdef CONFIG_DEBUG_VERBOSE | ||
751 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | ||
752 | int tflags, i = 0; | ||
753 | char buf[150]; | ||
754 | unsigned short *addr; | ||
755 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
756 | int j, index; | ||
757 | #endif | ||
758 | |||
759 | trace_buffer_save(tflags); | ||
760 | |||
761 | printk(KERN_NOTICE "Hardware Trace:\n"); | ||
762 | |||
763 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
764 | printk(KERN_NOTICE "WARNING: Expanded trace turned on - can not trace exceptions\n"); | ||
765 | #endif | ||
766 | |||
767 | if (likely(bfin_read_TBUFSTAT() & TBUFCNT)) { | ||
768 | for (; bfin_read_TBUFSTAT() & TBUFCNT; i++) { | ||
769 | decode_address(buf, (unsigned long)bfin_read_TBUF()); | ||
770 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
771 | addr = (unsigned short *)bfin_read_TBUF(); | ||
772 | decode_address(buf, (unsigned long)addr); | ||
773 | printk(KERN_NOTICE " Source : %s ", buf); | ||
774 | decode_instruction(addr); | ||
775 | printk("\n"); | ||
776 | } | ||
777 | } | ||
778 | |||
779 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_EXPAND | ||
780 | if (trace_buff_offset) | ||
781 | index = trace_buff_offset / 4; | ||
782 | else | ||
783 | index = EXPAND_LEN; | ||
784 | |||
785 | j = (1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 128; | ||
786 | while (j) { | ||
787 | decode_address(buf, software_trace_buff[index]); | ||
788 | printk(KERN_NOTICE "%4i Target : %s\n", i, buf); | ||
789 | index -= 1; | ||
790 | if (index < 0 ) | ||
791 | index = EXPAND_LEN; | ||
792 | decode_address(buf, software_trace_buff[index]); | ||
793 | printk(KERN_NOTICE " Source : %s ", buf); | ||
794 | decode_instruction((unsigned short *)software_trace_buff[index]); | ||
795 | printk("\n"); | ||
796 | index -= 1; | ||
797 | if (index < 0) | ||
798 | index = EXPAND_LEN; | ||
799 | j--; | ||
800 | i++; | ||
801 | } | ||
802 | #endif | ||
803 | |||
804 | trace_buffer_restore(tflags); | ||
805 | #endif | ||
806 | #endif | ||
807 | } | ||
808 | EXPORT_SYMBOL(dump_bfin_trace_buffer); | ||
809 | |||
810 | #ifdef CONFIG_BUG | ||
811 | int is_valid_bugaddr(unsigned long addr) | ||
812 | { | ||
813 | unsigned short opcode; | ||
814 | |||
815 | if (!get_instruction(&opcode, (unsigned short *)addr)) | ||
816 | return 0; | ||
817 | |||
818 | return opcode == BFIN_BUG_OPCODE; | ||
819 | } | ||
820 | #endif | ||
821 | |||
822 | /* | ||
823 | * Checks to see if the address pointed to is either a | ||
824 | * 16-bit CALL instruction, or a 32-bit CALL instruction | ||
825 | */ | ||
826 | static bool is_bfin_call(unsigned short *addr) | ||
827 | { | ||
828 | unsigned short opcode = 0, *ins_addr; | ||
829 | ins_addr = (unsigned short *)addr; | ||
830 | |||
831 | if (!get_instruction(&opcode, ins_addr)) | ||
832 | return false; | ||
833 | |||
834 | if ((opcode >= 0x0060 && opcode <= 0x0067) || | ||
835 | (opcode >= 0x0070 && opcode <= 0x0077)) | ||
836 | return true; | ||
837 | |||
838 | ins_addr--; | ||
839 | if (!get_instruction(&opcode, ins_addr)) | ||
840 | return false; | ||
841 | |||
842 | if (opcode >= 0xE300 && opcode <= 0xE3FF) | ||
843 | return true; | ||
844 | |||
845 | return false; | ||
846 | |||
847 | } | ||
848 | |||
849 | void show_stack(struct task_struct *task, unsigned long *stack) | ||
850 | { | ||
851 | #ifdef CONFIG_PRINTK | ||
852 | unsigned int *addr, *endstack, *fp = 0, *frame; | ||
853 | unsigned short *ins_addr; | ||
854 | char buf[150]; | ||
855 | unsigned int i, j, ret_addr, frame_no = 0; | ||
856 | |||
857 | /* | ||
858 | * If we have been passed a specific stack, use that one otherwise | ||
859 | * if we have been passed a task structure, use that, otherwise | ||
860 | * use the stack of where the variable "stack" exists | ||
861 | */ | ||
862 | |||
863 | if (stack == NULL) { | ||
864 | if (task) { | ||
865 | /* We know this is a kernel stack, so this is the start/end */ | ||
866 | stack = (unsigned long *)task->thread.ksp; | ||
867 | endstack = (unsigned int *)(((unsigned int)(stack) & ~(THREAD_SIZE - 1)) + THREAD_SIZE); | ||
868 | } else { | ||
869 | /* print out the existing stack info */ | ||
870 | stack = (unsigned long *)&stack; | ||
871 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
872 | } | ||
873 | } else | ||
874 | endstack = (unsigned int *)PAGE_ALIGN((unsigned int)stack); | ||
875 | |||
876 | printk(KERN_NOTICE "Stack info:\n"); | ||
877 | decode_address(buf, (unsigned int)stack); | ||
878 | printk(KERN_NOTICE " SP: [0x%p] %s\n", stack, buf); | ||
879 | |||
880 | if (!access_ok(VERIFY_READ, stack, (unsigned int)endstack - (unsigned int)stack)) { | ||
881 | printk(KERN_NOTICE "Invalid stack pointer\n"); | ||
882 | return; | ||
883 | } | ||
884 | |||
885 | /* First thing is to look for a frame pointer */ | ||
886 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF); addr < endstack; addr++) { | ||
887 | if (*addr & 0x1) | ||
888 | continue; | ||
889 | ins_addr = (unsigned short *)*addr; | ||
890 | ins_addr--; | ||
891 | if (is_bfin_call(ins_addr)) | ||
892 | fp = addr - 1; | ||
893 | |||
894 | if (fp) { | ||
895 | /* Let's check to see if it is a frame pointer */ | ||
896 | while (fp >= (addr - 1) && fp < endstack | ||
897 | && fp && ((unsigned int) fp & 0x3) == 0) | ||
898 | fp = (unsigned int *)*fp; | ||
899 | if (fp == 0 || fp == endstack) { | ||
900 | fp = addr - 1; | ||
901 | break; | ||
902 | } | ||
903 | fp = 0; | ||
904 | } | ||
905 | } | ||
906 | if (fp) { | ||
907 | frame = fp; | ||
908 | printk(KERN_NOTICE " FP: (0x%p)\n", fp); | ||
909 | } else | ||
910 | frame = 0; | ||
911 | |||
912 | /* | ||
913 | * Now that we think we know where things are, we | ||
914 | * walk the stack again, this time printing things out | ||
915 | * incase there is no frame pointer, we still look for | ||
916 | * valid return addresses | ||
917 | */ | ||
918 | |||
919 | /* First time print out data, next time, print out symbols */ | ||
920 | for (j = 0; j <= 1; j++) { | ||
921 | if (j) | ||
922 | printk(KERN_NOTICE "Return addresses in stack:\n"); | ||
923 | else | ||
924 | printk(KERN_NOTICE " Memory from 0x%08lx to %p", ((long unsigned int)stack & ~0xF), endstack); | ||
925 | |||
926 | fp = frame; | ||
927 | frame_no = 0; | ||
928 | |||
929 | for (addr = (unsigned int *)((unsigned int)stack & ~0xF), i = 0; | ||
930 | addr < endstack; addr++, i++) { | ||
931 | |||
932 | ret_addr = 0; | ||
933 | if (!j && i % 8 == 0) | ||
934 | printk(KERN_NOTICE "%p:",addr); | ||
935 | |||
936 | /* if it is an odd address, or zero, just skip it */ | ||
937 | if (*addr & 0x1 || !*addr) | ||
938 | goto print; | ||
939 | |||
940 | ins_addr = (unsigned short *)*addr; | ||
941 | |||
942 | /* Go back one instruction, and see if it is a CALL */ | ||
943 | ins_addr--; | ||
944 | ret_addr = is_bfin_call(ins_addr); | ||
945 | print: | ||
946 | if (!j && stack == (unsigned long *)addr) | ||
947 | printk("[%08x]", *addr); | ||
948 | else if (ret_addr) | ||
949 | if (j) { | ||
950 | decode_address(buf, (unsigned int)*addr); | ||
951 | if (frame == addr) { | ||
952 | printk(KERN_NOTICE " frame %2i : %s\n", frame_no, buf); | ||
953 | continue; | ||
954 | } | ||
955 | printk(KERN_NOTICE " address : %s\n", buf); | ||
956 | } else | ||
957 | printk("<%08x>", *addr); | ||
958 | else if (fp == addr) { | ||
959 | if (j) | ||
960 | frame = addr+1; | ||
961 | else | ||
962 | printk("(%08x)", *addr); | ||
963 | |||
964 | fp = (unsigned int *)*addr; | ||
965 | frame_no++; | ||
966 | |||
967 | } else if (!j) | ||
968 | printk(" %08x ", *addr); | ||
969 | } | ||
970 | if (!j) | ||
971 | printk("\n"); | ||
972 | } | ||
973 | #endif | ||
974 | } | ||
975 | EXPORT_SYMBOL(show_stack); | ||
976 | |||
977 | void dump_stack(void) | ||
978 | { | 501 | { |
979 | unsigned long stack; | ||
980 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON | 502 | #ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON |
981 | int tflags; | 503 | int j; |
504 | trace_buffer_save(j); | ||
982 | #endif | 505 | #endif |
983 | trace_buffer_save(tflags); | ||
984 | dump_bfin_trace_buffer(); | ||
985 | show_stack(current, &stack); | ||
986 | trace_buffer_restore(tflags); | ||
987 | } | ||
988 | EXPORT_SYMBOL(dump_stack); | ||
989 | 506 | ||
990 | void dump_bfin_process(struct pt_regs *fp) | 507 | console_verbose(); |
991 | { | 508 | oops_in_progress = 1; |
992 | #ifdef CONFIG_DEBUG_VERBOSE | 509 | #ifdef CONFIG_DEBUG_VERBOSE |
993 | /* We should be able to look at fp->ipend, but we don't push it on the | 510 | printk(KERN_EMERG "Double Fault\n"); |
994 | * stack all the time, so do this until we fix that */ | 511 | #ifdef CONFIG_DEBUG_DOUBLEFAULT_PRINT |
995 | unsigned int context = bfin_read_IPEND(); | 512 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) == VEC_UNCOV) { |
996 | 513 | unsigned int cpu = raw_smp_processor_id(); | |
997 | if (oops_in_progress) | 514 | char buf[150]; |
998 | verbose_printk(KERN_EMERG "Kernel OOPS in progress\n"); | 515 | decode_address(buf, cpu_pda[cpu].retx_doublefault); |
999 | 516 | printk(KERN_EMERG "While handling exception (EXCAUSE = 0x%x) at %s:\n", | |
1000 | if (context & 0x0020 && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) | 517 | (unsigned int)cpu_pda[cpu].seqstat_doublefault & SEQSTAT_EXCAUSE, buf); |
1001 | verbose_printk(KERN_NOTICE "HW Error context\n"); | 518 | decode_address(buf, cpu_pda[cpu].dcplb_doublefault_addr); |
1002 | else if (context & 0x0020) | 519 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %s\n", buf); |
1003 | verbose_printk(KERN_NOTICE "Deferred Exception context\n"); | 520 | decode_address(buf, cpu_pda[cpu].icplb_doublefault_addr); |
1004 | else if (context & 0x3FC0) | 521 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %s\n", buf); |
1005 | verbose_printk(KERN_NOTICE "Interrupt context\n"); | ||
1006 | else if (context & 0x4000) | ||
1007 | verbose_printk(KERN_NOTICE "Deferred Interrupt context\n"); | ||
1008 | else if (context & 0x8000) | ||
1009 | verbose_printk(KERN_NOTICE "Kernel process context\n"); | ||
1010 | |||
1011 | /* Because we are crashing, and pointers could be bad, we check things | ||
1012 | * pretty closely before we use them | ||
1013 | */ | ||
1014 | if ((unsigned long)current >= FIXED_CODE_START && | ||
1015 | !((unsigned long)current & 0x3) && current->pid) { | ||
1016 | verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n"); | ||
1017 | if (current->comm >= (char *)FIXED_CODE_START) | ||
1018 | verbose_printk(KERN_NOTICE "COMM=%s PID=%d", | ||
1019 | current->comm, current->pid); | ||
1020 | else | ||
1021 | verbose_printk(KERN_NOTICE "COMM= invalid"); | ||
1022 | 522 | ||
1023 | printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu); | 523 | decode_address(buf, fp->retx); |
1024 | if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) | 524 | printk(KERN_NOTICE "The instruction at %s caused a double exception\n", buf); |
1025 | verbose_printk(KERN_NOTICE | ||
1026 | "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" | ||
1027 | " BSS = 0x%p-0x%p USER-STACK = 0x%p\n\n", | ||
1028 | (void *)current->mm->start_code, | ||
1029 | (void *)current->mm->end_code, | ||
1030 | (void *)current->mm->start_data, | ||
1031 | (void *)current->mm->end_data, | ||
1032 | (void *)current->mm->end_data, | ||
1033 | (void *)current->mm->brk, | ||
1034 | (void *)current->mm->start_stack); | ||
1035 | else | ||
1036 | verbose_printk(KERN_NOTICE "invalid mm\n"); | ||
1037 | } else | 525 | } else |
1038 | verbose_printk(KERN_NOTICE | ||
1039 | "No Valid process in current context\n"); | ||
1040 | #endif | ||
1041 | } | ||
1042 | |||
1043 | void dump_bfin_mem(struct pt_regs *fp) | ||
1044 | { | ||
1045 | #ifdef CONFIG_DEBUG_VERBOSE | ||
1046 | unsigned short *addr, *erraddr, val = 0, err = 0; | ||
1047 | char sti = 0, buf[6]; | ||
1048 | |||
1049 | erraddr = (void *)fp->pc; | ||
1050 | |||
1051 | verbose_printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); | ||
1052 | |||
1053 | for (addr = (unsigned short *)((unsigned long)erraddr & ~0xF) - 0x10; | ||
1054 | addr < (unsigned short *)((unsigned long)erraddr & ~0xF) + 0x10; | ||
1055 | addr++) { | ||
1056 | if (!((unsigned long)addr & 0xF)) | ||
1057 | verbose_printk(KERN_NOTICE "0x%p: ", addr); | ||
1058 | |||
1059 | if (!get_instruction(&val, addr)) { | ||
1060 | val = 0; | ||
1061 | sprintf(buf, "????"); | ||
1062 | } else | ||
1063 | sprintf(buf, "%04x", val); | ||
1064 | |||
1065 | if (addr == erraddr) { | ||
1066 | verbose_printk("[%s]", buf); | ||
1067 | err = val; | ||
1068 | } else | ||
1069 | verbose_printk(" %s ", buf); | ||
1070 | |||
1071 | /* Do any previous instructions turn on interrupts? */ | ||
1072 | if (addr <= erraddr && /* in the past */ | ||
1073 | ((val >= 0x0040 && val <= 0x0047) || /* STI instruction */ | ||
1074 | val == 0x017b)) /* [SP++] = RETI */ | ||
1075 | sti = 1; | ||
1076 | } | ||
1077 | |||
1078 | verbose_printk("\n"); | ||
1079 | |||
1080 | /* Hardware error interrupts can be deferred */ | ||
1081 | if (unlikely(sti && (fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR && | ||
1082 | oops_in_progress)){ | ||
1083 | verbose_printk(KERN_NOTICE "Looks like this was a deferred error - sorry\n"); | ||
1084 | #ifndef CONFIG_DEBUG_HWERR | ||
1085 | verbose_printk(KERN_NOTICE | ||
1086 | "The remaining message may be meaningless\n" | ||
1087 | "You should enable CONFIG_DEBUG_HWERR to get a better idea where it came from\n"); | ||
1088 | #else | ||
1089 | /* If we are handling only one peripheral interrupt | ||
1090 | * and current mm and pid are valid, and the last error | ||
1091 | * was in that user space process's text area | ||
1092 | * print it out - because that is where the problem exists | ||
1093 | */ | ||
1094 | if ((!(((fp)->ipend & ~0x30) & (((fp)->ipend & ~0x30) - 1))) && | ||
1095 | (current->pid && current->mm)) { | ||
1096 | /* And the last RETI points to the current userspace context */ | ||
1097 | if ((fp + 1)->pc >= current->mm->start_code && | ||
1098 | (fp + 1)->pc <= current->mm->end_code) { | ||
1099 | verbose_printk(KERN_NOTICE "It might be better to look around here : \n"); | ||
1100 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
1101 | show_regs(fp + 1); | ||
1102 | verbose_printk(KERN_NOTICE "-------------------------------------------\n"); | ||
1103 | } | ||
1104 | } | ||
1105 | #endif | ||
1106 | } | ||
1107 | #endif | ||
1108 | } | ||
1109 | |||
1110 | void show_regs(struct pt_regs *fp) | ||
1111 | { | ||
1112 | #ifdef CONFIG_DEBUG_VERBOSE | ||
1113 | char buf [150]; | ||
1114 | struct irqaction *action; | ||
1115 | unsigned int i; | ||
1116 | unsigned long flags = 0; | ||
1117 | unsigned int cpu = raw_smp_processor_id(); | ||
1118 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); | ||
1119 | |||
1120 | verbose_printk(KERN_NOTICE "\n"); | ||
1121 | if (CPUID != bfin_cpuid()) | ||
1122 | verbose_printk(KERN_NOTICE "Compiled for cpu family 0x%04x (Rev %d), " | ||
1123 | "but running on:0x%04x (Rev %d)\n", | ||
1124 | CPUID, bfin_compiled_revid(), bfin_cpuid(), bfin_revid()); | ||
1125 | |||
1126 | verbose_printk(KERN_NOTICE "ADSP-%s-0.%d", | ||
1127 | CPU, bfin_compiled_revid()); | ||
1128 | |||
1129 | if (bfin_compiled_revid() != bfin_revid()) | ||
1130 | verbose_printk("(Detected 0.%d)", bfin_revid()); | ||
1131 | |||
1132 | verbose_printk(" %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n", | ||
1133 | get_cclk()/1000000, get_sclk()/1000000, | ||
1134 | #ifdef CONFIG_MPU | ||
1135 | "mpu on" | ||
1136 | #else | ||
1137 | "mpu off" | ||
1138 | #endif | ||
1139 | ); | ||
1140 | |||
1141 | verbose_printk(KERN_NOTICE "%s", linux_banner); | ||
1142 | |||
1143 | verbose_printk(KERN_NOTICE "\nSEQUENCER STATUS:\t\t%s\n", print_tainted()); | ||
1144 | verbose_printk(KERN_NOTICE " SEQSTAT: %08lx IPEND: %04lx IMASK: %04lx SYSCFG: %04lx\n", | ||
1145 | (long)fp->seqstat, fp->ipend, cpu_pda[raw_smp_processor_id()].ex_imask, fp->syscfg); | ||
1146 | if (fp->ipend & EVT_IRPTEN) | ||
1147 | verbose_printk(KERN_NOTICE " Global Interrupts Disabled (IPEND[4])\n"); | ||
1148 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG13 | EVT_IVG12 | EVT_IVG11 | | ||
1149 | EVT_IVG10 | EVT_IVG9 | EVT_IVG8 | EVT_IVG7 | EVT_IVTMR))) | ||
1150 | verbose_printk(KERN_NOTICE " Peripheral interrupts masked off\n"); | ||
1151 | if (!(cpu_pda[raw_smp_processor_id()].ex_imask & (EVT_IVG15 | EVT_IVG14))) | ||
1152 | verbose_printk(KERN_NOTICE " Kernel interrupts masked off\n"); | ||
1153 | if ((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR) { | ||
1154 | verbose_printk(KERN_NOTICE " HWERRCAUSE: 0x%lx\n", | ||
1155 | (fp->seqstat & SEQSTAT_HWERRCAUSE) >> 14); | ||
1156 | #ifdef EBIU_ERRMST | ||
1157 | /* If the error was from the EBIU, print it out */ | ||
1158 | if (bfin_read_EBIU_ERRMST() & CORE_ERROR) { | ||
1159 | verbose_printk(KERN_NOTICE " EBIU Error Reason : 0x%04x\n", | ||
1160 | bfin_read_EBIU_ERRMST()); | ||
1161 | verbose_printk(KERN_NOTICE " EBIU Error Address : 0x%08x\n", | ||
1162 | bfin_read_EBIU_ERRADD()); | ||
1163 | } | ||
1164 | #endif | 526 | #endif |
527 | { | ||
528 | dump_bfin_process(fp); | ||
529 | dump_bfin_mem(fp); | ||
530 | show_regs(fp); | ||
531 | dump_bfin_trace_buffer(); | ||
1165 | } | 532 | } |
1166 | verbose_printk(KERN_NOTICE " EXCAUSE : 0x%lx\n", | ||
1167 | fp->seqstat & SEQSTAT_EXCAUSE); | ||
1168 | for (i = 2; i <= 15 ; i++) { | ||
1169 | if (fp->ipend & (1 << i)) { | ||
1170 | if (i != 4) { | ||
1171 | decode_address(buf, bfin_read32(EVT0 + 4*i)); | ||
1172 | verbose_printk(KERN_NOTICE " physical IVG%i asserted : %s\n", i, buf); | ||
1173 | } else | ||
1174 | verbose_printk(KERN_NOTICE " interrupts disabled\n"); | ||
1175 | } | ||
1176 | } | ||
1177 | |||
1178 | /* if no interrupts are going off, don't print this out */ | ||
1179 | if (fp->ipend & ~0x3F) { | ||
1180 | for (i = 0; i < (NR_IRQS - 1); i++) { | ||
1181 | if (!in_atomic) | ||
1182 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | ||
1183 | |||
1184 | action = irq_desc[i].action; | ||
1185 | if (!action) | ||
1186 | goto unlock; | ||
1187 | |||
1188 | decode_address(buf, (unsigned int)action->handler); | ||
1189 | verbose_printk(KERN_NOTICE " logical irq %3d mapped : %s", i, buf); | ||
1190 | for (action = action->next; action; action = action->next) { | ||
1191 | decode_address(buf, (unsigned int)action->handler); | ||
1192 | verbose_printk(", %s", buf); | ||
1193 | } | ||
1194 | verbose_printk("\n"); | ||
1195 | unlock: | ||
1196 | if (!in_atomic) | ||
1197 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | ||
1198 | } | ||
1199 | } | ||
1200 | |||
1201 | decode_address(buf, fp->rete); | ||
1202 | verbose_printk(KERN_NOTICE " RETE: %s\n", buf); | ||
1203 | decode_address(buf, fp->retn); | ||
1204 | verbose_printk(KERN_NOTICE " RETN: %s\n", buf); | ||
1205 | decode_address(buf, fp->retx); | ||
1206 | verbose_printk(KERN_NOTICE " RETX: %s\n", buf); | ||
1207 | decode_address(buf, fp->rets); | ||
1208 | verbose_printk(KERN_NOTICE " RETS: %s\n", buf); | ||
1209 | decode_address(buf, fp->pc); | ||
1210 | verbose_printk(KERN_NOTICE " PC : %s\n", buf); | ||
1211 | |||
1212 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && | ||
1213 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { | ||
1214 | decode_address(buf, cpu_pda[cpu].dcplb_fault_addr); | ||
1215 | verbose_printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); | ||
1216 | decode_address(buf, cpu_pda[cpu].icplb_fault_addr); | ||
1217 | verbose_printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); | ||
1218 | } | ||
1219 | |||
1220 | verbose_printk(KERN_NOTICE "PROCESSOR STATE:\n"); | ||
1221 | verbose_printk(KERN_NOTICE " R0 : %08lx R1 : %08lx R2 : %08lx R3 : %08lx\n", | ||
1222 | fp->r0, fp->r1, fp->r2, fp->r3); | ||
1223 | verbose_printk(KERN_NOTICE " R4 : %08lx R5 : %08lx R6 : %08lx R7 : %08lx\n", | ||
1224 | fp->r4, fp->r5, fp->r6, fp->r7); | ||
1225 | verbose_printk(KERN_NOTICE " P0 : %08lx P1 : %08lx P2 : %08lx P3 : %08lx\n", | ||
1226 | fp->p0, fp->p1, fp->p2, fp->p3); | ||
1227 | verbose_printk(KERN_NOTICE " P4 : %08lx P5 : %08lx FP : %08lx SP : %08lx\n", | ||
1228 | fp->p4, fp->p5, fp->fp, (long)fp); | ||
1229 | verbose_printk(KERN_NOTICE " LB0: %08lx LT0: %08lx LC0: %08lx\n", | ||
1230 | fp->lb0, fp->lt0, fp->lc0); | ||
1231 | verbose_printk(KERN_NOTICE " LB1: %08lx LT1: %08lx LC1: %08lx\n", | ||
1232 | fp->lb1, fp->lt1, fp->lc1); | ||
1233 | verbose_printk(KERN_NOTICE " B0 : %08lx L0 : %08lx M0 : %08lx I0 : %08lx\n", | ||
1234 | fp->b0, fp->l0, fp->m0, fp->i0); | ||
1235 | verbose_printk(KERN_NOTICE " B1 : %08lx L1 : %08lx M1 : %08lx I1 : %08lx\n", | ||
1236 | fp->b1, fp->l1, fp->m1, fp->i1); | ||
1237 | verbose_printk(KERN_NOTICE " B2 : %08lx L2 : %08lx M2 : %08lx I2 : %08lx\n", | ||
1238 | fp->b2, fp->l2, fp->m2, fp->i2); | ||
1239 | verbose_printk(KERN_NOTICE " B3 : %08lx L3 : %08lx M3 : %08lx I3 : %08lx\n", | ||
1240 | fp->b3, fp->l3, fp->m3, fp->i3); | ||
1241 | verbose_printk(KERN_NOTICE "A0.w: %08lx A0.x: %08lx A1.w: %08lx A1.x: %08lx\n", | ||
1242 | fp->a0w, fp->a0x, fp->a1w, fp->a1x); | ||
1243 | |||
1244 | verbose_printk(KERN_NOTICE "USP : %08lx ASTAT: %08lx\n", | ||
1245 | rdusp(), fp->astat); | ||
1246 | |||
1247 | verbose_printk(KERN_NOTICE "\n"); | ||
1248 | #endif | 533 | #endif |
1249 | } | 534 | panic("Double Fault - unrecoverable event"); |
1250 | |||
1251 | #ifdef CONFIG_SYS_BFIN_SPINLOCK_L1 | ||
1252 | asmlinkage int sys_bfin_spinlock(int *spinlock)__attribute__((l1_text)); | ||
1253 | #endif | ||
1254 | |||
1255 | static DEFINE_SPINLOCK(bfin_spinlock_lock); | ||
1256 | |||
1257 | asmlinkage int sys_bfin_spinlock(int *p) | ||
1258 | { | ||
1259 | int ret, tmp = 0; | ||
1260 | |||
1261 | spin_lock(&bfin_spinlock_lock); /* This would also hold kernel preemption. */ | ||
1262 | ret = get_user(tmp, p); | ||
1263 | if (likely(ret == 0)) { | ||
1264 | if (unlikely(tmp)) | ||
1265 | ret = 1; | ||
1266 | else | ||
1267 | put_user(1, p); | ||
1268 | } | ||
1269 | spin_unlock(&bfin_spinlock_lock); | ||
1270 | return ret; | ||
1271 | } | ||
1272 | |||
1273 | int bfin_request_exception(unsigned int exception, void (*handler)(void)) | ||
1274 | { | ||
1275 | void (*curr_handler)(void); | ||
1276 | |||
1277 | if (exception > 0x3F) | ||
1278 | return -EINVAL; | ||
1279 | |||
1280 | curr_handler = ex_table[exception]; | ||
1281 | |||
1282 | if (curr_handler != ex_replaceable) | ||
1283 | return -EBUSY; | ||
1284 | |||
1285 | ex_table[exception] = handler; | ||
1286 | 535 | ||
1287 | return 0; | ||
1288 | } | 536 | } |
1289 | EXPORT_SYMBOL(bfin_request_exception); | ||
1290 | |||
1291 | int bfin_free_exception(unsigned int exception, void (*handler)(void)) | ||
1292 | { | ||
1293 | void (*curr_handler)(void); | ||
1294 | |||
1295 | if (exception > 0x3F) | ||
1296 | return -EINVAL; | ||
1297 | |||
1298 | curr_handler = ex_table[exception]; | ||
1299 | 537 | ||
1300 | if (curr_handler != handler) | ||
1301 | return -EBUSY; | ||
1302 | |||
1303 | ex_table[exception] = ex_replaceable; | ||
1304 | |||
1305 | return 0; | ||
1306 | } | ||
1307 | EXPORT_SYMBOL(bfin_free_exception); | ||
1308 | 538 | ||
1309 | void panic_cplb_error(int cplb_panic, struct pt_regs *fp) | 539 | void panic_cplb_error(int cplb_panic, struct pt_regs *fp) |
1310 | { | 540 | { |
@@ -1329,3 +559,23 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp) | |||
1329 | dump_stack(); | 559 | dump_stack(); |
1330 | panic("Unrecoverable event"); | 560 | panic("Unrecoverable event"); |
1331 | } | 561 | } |
562 | |||
563 | #ifdef CONFIG_BUG | ||
564 | int is_valid_bugaddr(unsigned long addr) | ||
565 | { | ||
566 | unsigned int opcode; | ||
567 | |||
568 | if (!get_instruction(&opcode, (unsigned short *)addr)) | ||
569 | return 0; | ||
570 | |||
571 | return opcode == BFIN_BUG_OPCODE; | ||
572 | } | ||
573 | #endif | ||
574 | |||
575 | /* stub this out */ | ||
576 | #ifndef CONFIG_DEBUG_VERBOSE | ||
577 | void show_regs(struct pt_regs *fp) | ||
578 | { | ||
579 | |||
580 | } | ||
581 | #endif | ||
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 66799e763dc9..984c78172397 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -15,7 +15,12 @@ _jiffies = _jiffies_64; | |||
15 | 15 | ||
16 | SECTIONS | 16 | SECTIONS |
17 | { | 17 | { |
18 | #ifdef CONFIG_RAMKERNEL | ||
18 | . = CONFIG_BOOT_LOAD; | 19 | . = CONFIG_BOOT_LOAD; |
20 | #else | ||
21 | . = CONFIG_ROM_BASE; | ||
22 | #endif | ||
23 | |||
19 | /* Neither the text, ro_data or bss section need to be aligned | 24 | /* Neither the text, ro_data or bss section need to be aligned |
20 | * So pack them back to back | 25 | * So pack them back to back |
21 | */ | 26 | */ |
@@ -31,6 +36,12 @@ SECTIONS | |||
31 | LOCK_TEXT | 36 | LOCK_TEXT |
32 | IRQENTRY_TEXT | 37 | IRQENTRY_TEXT |
33 | KPROBES_TEXT | 38 | KPROBES_TEXT |
39 | #ifdef CONFIG_ROMKERNEL | ||
40 | __sinittext = .; | ||
41 | INIT_TEXT | ||
42 | __einittext = .; | ||
43 | EXIT_TEXT | ||
44 | #endif | ||
34 | *(.text.*) | 45 | *(.text.*) |
35 | *(.fixup) | 46 | *(.fixup) |
36 | 47 | ||
@@ -50,8 +61,14 @@ SECTIONS | |||
50 | 61 | ||
51 | /* Just in case the first read only is a 32-bit access */ | 62 | /* Just in case the first read only is a 32-bit access */ |
52 | RO_DATA(4) | 63 | RO_DATA(4) |
64 | __rodata_end = .; | ||
53 | 65 | ||
66 | #ifdef CONFIG_ROMKERNEL | ||
67 | . = CONFIG_BOOT_LOAD; | ||
68 | .bss : AT(__rodata_end) | ||
69 | #else | ||
54 | .bss : | 70 | .bss : |
71 | #endif | ||
55 | { | 72 | { |
56 | . = ALIGN(4); | 73 | . = ALIGN(4); |
57 | ___bss_start = .; | 74 | ___bss_start = .; |
@@ -67,7 +84,11 @@ SECTIONS | |||
67 | ___bss_stop = .; | 84 | ___bss_stop = .; |
68 | } | 85 | } |
69 | 86 | ||
87 | #if defined(CONFIG_ROMKERNEL) | ||
88 | .data : AT(LOADADDR(.bss) + SIZEOF(.bss)) | ||
89 | #else | ||
70 | .data : | 90 | .data : |
91 | #endif | ||
71 | { | 92 | { |
72 | __sdata = .; | 93 | __sdata = .; |
73 | /* This gets done first, so the glob doesn't suck it in */ | 94 | /* This gets done first, so the glob doesn't suck it in */ |
@@ -94,6 +115,8 @@ SECTIONS | |||
94 | 115 | ||
95 | __edata = .; | 116 | __edata = .; |
96 | } | 117 | } |
118 | __data_lma = LOADADDR(.data); | ||
119 | __data_len = SIZEOF(.data); | ||
97 | 120 | ||
98 | /* The init section should be last, so when we free it, it goes into | 121 | /* The init section should be last, so when we free it, it goes into |
99 | * the general memory pool, and (hopefully) will decrease fragmentation | 122 | * the general memory pool, and (hopefully) will decrease fragmentation |
@@ -103,25 +126,58 @@ SECTIONS | |||
103 | . = ALIGN(PAGE_SIZE); | 126 | . = ALIGN(PAGE_SIZE); |
104 | ___init_begin = .; | 127 | ___init_begin = .; |
105 | 128 | ||
129 | #ifdef CONFIG_RAMKERNEL | ||
106 | INIT_TEXT_SECTION(PAGE_SIZE) | 130 | INIT_TEXT_SECTION(PAGE_SIZE) |
107 | . = ALIGN(16); | ||
108 | INIT_DATA_SECTION(16) | ||
109 | PERCPU(4) | ||
110 | 131 | ||
111 | /* we have to discard exit text and such at runtime, not link time, to | 132 | /* We have to discard exit text and such at runtime, not link time, to |
112 | * handle embedded cross-section references (alt instructions, bug | 133 | * handle embedded cross-section references (alt instructions, bug |
113 | * table, eh_frame, etc...) | 134 | * table, eh_frame, etc...). We need all of our .text up front and |
135 | * .data after it for PCREL call issues. | ||
114 | */ | 136 | */ |
115 | .exit.text : | 137 | .exit.text : |
116 | { | 138 | { |
117 | EXIT_TEXT | 139 | EXIT_TEXT |
118 | } | 140 | } |
141 | |||
142 | . = ALIGN(16); | ||
143 | INIT_DATA_SECTION(16) | ||
144 | PERCPU(4) | ||
145 | |||
119 | .exit.data : | 146 | .exit.data : |
120 | { | 147 | { |
121 | EXIT_DATA | 148 | EXIT_DATA |
122 | } | 149 | } |
123 | 150 | ||
124 | .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) | 151 | .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) |
152 | #else | ||
153 | .init.data : AT(__data_lma + __data_len) | ||
154 | { | ||
155 | __sinitdata = .; | ||
156 | INIT_DATA | ||
157 | INIT_SETUP(16) | ||
158 | INIT_CALLS | ||
159 | CON_INITCALL | ||
160 | SECURITY_INITCALL | ||
161 | INIT_RAM_FS | ||
162 | |||
163 | . = ALIGN(4); | ||
164 | ___per_cpu_load = .; | ||
165 | ___per_cpu_start = .; | ||
166 | *(.data.percpu.first) | ||
167 | *(.data.percpu.page_aligned) | ||
168 | *(.data.percpu) | ||
169 | *(.data.percpu.shared_aligned) | ||
170 | ___per_cpu_end = .; | ||
171 | |||
172 | EXIT_DATA | ||
173 | __einitdata = .; | ||
174 | } | ||
175 | __init_data_lma = LOADADDR(.init.data); | ||
176 | __init_data_len = SIZEOF(.init.data); | ||
177 | __init_data_end = .; | ||
178 | |||
179 | .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len) | ||
180 | #endif | ||
125 | { | 181 | { |
126 | . = ALIGN(4); | 182 | . = ALIGN(4); |
127 | __stext_l1 = .; | 183 | __stext_l1 = .; |
@@ -202,7 +258,11 @@ SECTIONS | |||
202 | /* Force trailing alignment of our init section so that when we | 258 | /* Force trailing alignment of our init section so that when we |
203 | * free our init memory, we don't leave behind a partial page. | 259 | * free our init memory, we don't leave behind a partial page. |
204 | */ | 260 | */ |
261 | #ifdef CONFIG_RAMKERNEL | ||
205 | . = __l2_lma + __l2_len; | 262 | . = __l2_lma + __l2_len; |
263 | #else | ||
264 | . = __init_data_end; | ||
265 | #endif | ||
206 | . = ALIGN(PAGE_SIZE); | 266 | . = ALIGN(PAGE_SIZE); |
207 | ___init_end = .; | 267 | ___init_end = .; |
208 | 268 | ||