diff options
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r-- | arch/blackfin/kernel/Makefile | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/bfin_dma_5xx.c | 13 | ||||
-rw-r--r-- | arch/blackfin/kernel/bfin_gpio.c | 169 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbinfo.c | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbinit.c | 4 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-mpu/cplbmgr.c | 128 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbinfo.c | 15 | ||||
-rw-r--r-- | arch/blackfin/kernel/cplb-nompu/cplbinit.c | 31 | ||||
-rw-r--r-- | arch/blackfin/kernel/dma-mapping.c | 4 | ||||
-rw-r--r-- | arch/blackfin/kernel/gptimers.c | 24 | ||||
-rw-r--r-- | arch/blackfin/kernel/process.c | 69 | ||||
-rw-r--r-- | arch/blackfin/kernel/ptrace.c | 7 | ||||
-rw-r--r-- | arch/blackfin/kernel/reboot.c | 69 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 131 | ||||
-rw-r--r-- | arch/blackfin/kernel/signal.c | 24 | ||||
-rw-r--r-- | arch/blackfin/kernel/sys_bfin.c | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/time-ts.c | 219 | ||||
-rw-r--r-- | arch/blackfin/kernel/time.c | 19 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 94 | ||||
-rw-r--r-- | arch/blackfin/kernel/vmlinux.lds.S | 54 |
20 files changed, 731 insertions, 361 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile index 318b9b692a48..6140cd69c782 100644 --- a/arch/blackfin/kernel/Makefile +++ b/arch/blackfin/kernel/Makefile | |||
@@ -6,9 +6,15 @@ extra-y := init_task.o vmlinux.lds | |||
6 | 6 | ||
7 | obj-y := \ | 7 | obj-y := \ |
8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ | 8 | entry.o process.o bfin_ksyms.o ptrace.o setup.o signal.o \ |
9 | sys_bfin.o time.o traps.o irqchip.o dma-mapping.o flat.o \ | 9 | sys_bfin.o traps.o irqchip.o dma-mapping.o flat.o \ |
10 | fixed_code.o reboot.o bfin_gpio.o | 10 | fixed_code.o reboot.o bfin_gpio.o |
11 | 11 | ||
12 | ifeq ($(CONFIG_GENERIC_CLOCKEVENTS),y) | ||
13 | obj-y += time-ts.o | ||
14 | else | ||
15 | obj-y += time.o | ||
16 | endif | ||
17 | |||
12 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o | 18 | obj-$(CONFIG_BFIN_GPTIMERS) += gptimers.o |
13 | obj-$(CONFIG_MODULES) += module.o | 19 | obj-$(CONFIG_MODULES) += module.o |
14 | obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o | 20 | obj-$(CONFIG_BFIN_DMA_5XX) += bfin_dma_5xx.o |
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 8fd5d22cec34..fd5448d6107c 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -67,7 +67,7 @@ static int __init blackfin_dma_init(void) | |||
67 | 67 | ||
68 | for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { | 68 | for (i = 0; i < MAX_BLACKFIN_DMA_CHANNEL; i++) { |
69 | dma_ch[i].chan_status = DMA_CHANNEL_FREE; | 69 | dma_ch[i].chan_status = DMA_CHANNEL_FREE; |
70 | dma_ch[i].regs = base_addr[i]; | 70 | dma_ch[i].regs = dma_io_base_addr[i]; |
71 | mutex_init(&(dma_ch[i].dmalock)); | 71 | mutex_init(&(dma_ch[i].dmalock)); |
72 | } | 72 | } |
73 | /* Mark MEMDMA Channel 0 as requested since we're using it internally */ | 73 | /* Mark MEMDMA Channel 0 as requested since we're using it internally */ |
@@ -106,12 +106,15 @@ int request_dma(unsigned int channel, char *device_id) | |||
106 | 106 | ||
107 | #ifdef CONFIG_BF54x | 107 | #ifdef CONFIG_BF54x |
108 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { | 108 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { |
109 | if (strncmp(device_id, "BFIN_UART", 9) == 0) | 109 | if (strncmp(device_id, "BFIN_UART", 9) == 0) { |
110 | dma_ch[channel].regs->peripheral_map &= 0x0FFF; | ||
110 | dma_ch[channel].regs->peripheral_map |= | 111 | dma_ch[channel].regs->peripheral_map |= |
111 | (channel - CH_UART2_RX + 0xC); | 112 | ((channel - CH_UART2_RX + 0xC)<<12); |
112 | else | 113 | } else { |
114 | dma_ch[channel].regs->peripheral_map &= 0x0FFF; | ||
113 | dma_ch[channel].regs->peripheral_map |= | 115 | dma_ch[channel].regs->peripheral_map |= |
114 | (channel - CH_UART2_RX + 0x6); | 116 | ((channel - CH_UART2_RX + 0x6)<<12); |
117 | } | ||
115 | } | 118 | } |
116 | #endif | 119 | #endif |
117 | 120 | ||
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c index 08788f7bbfba..7e8eaf4a31bb 100644 --- a/arch/blackfin/kernel/bfin_gpio.c +++ b/arch/blackfin/kernel/bfin_gpio.c | |||
@@ -95,14 +95,14 @@ enum { | |||
95 | AWA_data_clear = SYSCR, | 95 | AWA_data_clear = SYSCR, |
96 | AWA_data_set = SYSCR, | 96 | AWA_data_set = SYSCR, |
97 | AWA_toggle = SYSCR, | 97 | AWA_toggle = SYSCR, |
98 | AWA_maska = UART_SCR, | 98 | AWA_maska = BFIN_UART_SCR, |
99 | AWA_maska_clear = UART_SCR, | 99 | AWA_maska_clear = BFIN_UART_SCR, |
100 | AWA_maska_set = UART_SCR, | 100 | AWA_maska_set = BFIN_UART_SCR, |
101 | AWA_maska_toggle = UART_SCR, | 101 | AWA_maska_toggle = BFIN_UART_SCR, |
102 | AWA_maskb = UART_GCTL, | 102 | AWA_maskb = BFIN_UART_GCTL, |
103 | AWA_maskb_clear = UART_GCTL, | 103 | AWA_maskb_clear = BFIN_UART_GCTL, |
104 | AWA_maskb_set = UART_GCTL, | 104 | AWA_maskb_set = BFIN_UART_GCTL, |
105 | AWA_maskb_toggle = UART_GCTL, | 105 | AWA_maskb_toggle = BFIN_UART_GCTL, |
106 | AWA_dir = SPORT1_STAT, | 106 | AWA_dir = SPORT1_STAT, |
107 | AWA_polar = SPORT1_STAT, | 107 | AWA_polar = SPORT1_STAT, |
108 | AWA_edge = SPORT1_STAT, | 108 | AWA_edge = SPORT1_STAT, |
@@ -348,11 +348,10 @@ static void portmux_setup(unsigned short per, unsigned short function) | |||
348 | offset = port_mux_lut[y].offset; | 348 | offset = port_mux_lut[y].offset; |
349 | muxreg = bfin_read_PORT_MUX(); | 349 | muxreg = bfin_read_PORT_MUX(); |
350 | 350 | ||
351 | if (offset != 1) { | 351 | if (offset != 1) |
352 | muxreg &= ~(1 << offset); | 352 | muxreg &= ~(1 << offset); |
353 | } else { | 353 | else |
354 | muxreg &= ~(3 << 1); | 354 | muxreg &= ~(3 << 1); |
355 | } | ||
356 | 355 | ||
357 | muxreg |= (function << offset); | 356 | muxreg |= (function << offset); |
358 | bfin_write_PORT_MUX(muxreg); | 357 | bfin_write_PORT_MUX(muxreg); |
@@ -396,39 +395,11 @@ inline void portmux_setup(unsigned short portno, unsigned short function) | |||
396 | # define portmux_setup(...) do { } while (0) | 395 | # define portmux_setup(...) do { } while (0) |
397 | #endif | 396 | #endif |
398 | 397 | ||
399 | #ifndef BF548_FAMILY | ||
400 | static void default_gpio(unsigned gpio) | ||
401 | { | ||
402 | unsigned short bank, bitmask; | ||
403 | unsigned long flags; | ||
404 | |||
405 | bank = gpio_bank(gpio); | ||
406 | bitmask = gpio_bit(gpio); | ||
407 | |||
408 | local_irq_save(flags); | ||
409 | |||
410 | gpio_bankb[bank]->maska_clear = bitmask; | ||
411 | gpio_bankb[bank]->maskb_clear = bitmask; | ||
412 | SSYNC(); | ||
413 | gpio_bankb[bank]->inen &= ~bitmask; | ||
414 | gpio_bankb[bank]->dir &= ~bitmask; | ||
415 | gpio_bankb[bank]->polar &= ~bitmask; | ||
416 | gpio_bankb[bank]->both &= ~bitmask; | ||
417 | gpio_bankb[bank]->edge &= ~bitmask; | ||
418 | AWA_DUMMY_READ(edge); | ||
419 | local_irq_restore(flags); | ||
420 | } | ||
421 | #else | ||
422 | # define default_gpio(...) do { } while (0) | ||
423 | #endif | ||
424 | |||
425 | static int __init bfin_gpio_init(void) | 398 | static int __init bfin_gpio_init(void) |
426 | { | 399 | { |
427 | |||
428 | printk(KERN_INFO "Blackfin GPIO Controller\n"); | 400 | printk(KERN_INFO "Blackfin GPIO Controller\n"); |
429 | 401 | ||
430 | return 0; | 402 | return 0; |
431 | |||
432 | } | 403 | } |
433 | arch_initcall(bfin_gpio_init); | 404 | arch_initcall(bfin_gpio_init); |
434 | 405 | ||
@@ -821,10 +792,10 @@ int peripheral_request(unsigned short per, const char *label) | |||
821 | local_irq_save(flags); | 792 | local_irq_save(flags); |
822 | 793 | ||
823 | if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { | 794 | if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { |
795 | dump_stack(); | ||
824 | printk(KERN_ERR | 796 | printk(KERN_ERR |
825 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", | 797 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", |
826 | __FUNCTION__, ident, get_label(ident)); | 798 | __func__, ident, get_label(ident)); |
827 | dump_stack(); | ||
828 | local_irq_restore(flags); | 799 | local_irq_restore(flags); |
829 | return -EBUSY; | 800 | return -EBUSY; |
830 | } | 801 | } |
@@ -833,31 +804,31 @@ int peripheral_request(unsigned short per, const char *label) | |||
833 | 804 | ||
834 | u16 funct = get_portmux(ident); | 805 | u16 funct = get_portmux(ident); |
835 | 806 | ||
836 | /* | 807 | /* |
837 | * Pin functions like AMC address strobes my | 808 | * Pin functions like AMC address strobes my |
838 | * be requested and used by several drivers | 809 | * be requested and used by several drivers |
839 | */ | 810 | */ |
840 | 811 | ||
841 | if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) { | 812 | if (!((per & P_MAYSHARE) && (funct == P_FUNCT2MUX(per)))) { |
842 | 813 | ||
843 | /* | 814 | /* |
844 | * Allow that the identical pin function can | 815 | * Allow that the identical pin function can |
845 | * be requested from the same driver twice | 816 | * be requested from the same driver twice |
846 | */ | 817 | */ |
847 | 818 | ||
848 | if (cmp_label(ident, label) == 0) | 819 | if (cmp_label(ident, label) == 0) |
849 | goto anyway; | 820 | goto anyway; |
850 | 821 | ||
822 | dump_stack(); | ||
851 | printk(KERN_ERR | 823 | printk(KERN_ERR |
852 | "%s: Peripheral %d function %d is already reserved by %s !\n", | 824 | "%s: Peripheral %d function %d is already reserved by %s !\n", |
853 | __FUNCTION__, ident, P_FUNCT2MUX(per), get_label(ident)); | 825 | __func__, ident, P_FUNCT2MUX(per), get_label(ident)); |
854 | dump_stack(); | ||
855 | local_irq_restore(flags); | 826 | local_irq_restore(flags); |
856 | return -EBUSY; | 827 | return -EBUSY; |
857 | } | 828 | } |
858 | } | 829 | } |
859 | 830 | ||
860 | anyway: | 831 | anyway: |
861 | reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident); | 832 | reserved_peri_map[gpio_bank(ident)] |= gpio_bit(ident); |
862 | 833 | ||
863 | portmux_setup(ident, P_FUNCT2MUX(per)); | 834 | portmux_setup(ident, P_FUNCT2MUX(per)); |
@@ -890,47 +861,47 @@ int peripheral_request(unsigned short per, const char *label) | |||
890 | 861 | ||
891 | if (!check_gpio(ident)) { | 862 | if (!check_gpio(ident)) { |
892 | 863 | ||
893 | if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { | 864 | if (unlikely(reserved_gpio_map[gpio_bank(ident)] & gpio_bit(ident))) { |
894 | printk(KERN_ERR | 865 | dump_stack(); |
895 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", | 866 | printk(KERN_ERR |
896 | __FUNCTION__, ident, get_label(ident)); | 867 | "%s: Peripheral %d is already reserved as GPIO by %s !\n", |
897 | dump_stack(); | 868 | __func__, ident, get_label(ident)); |
898 | local_irq_restore(flags); | 869 | local_irq_restore(flags); |
899 | return -EBUSY; | 870 | return -EBUSY; |
900 | } | 871 | } |
901 | 872 | ||
902 | } | 873 | } |
903 | 874 | ||
904 | if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) { | 875 | if (unlikely(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident))) { |
905 | 876 | ||
906 | /* | 877 | /* |
907 | * Pin functions like AMC address strobes my | 878 | * Pin functions like AMC address strobes my |
908 | * be requested and used by several drivers | 879 | * be requested and used by several drivers |
909 | */ | 880 | */ |
910 | 881 | ||
911 | if (!(per & P_MAYSHARE)) { | 882 | if (!(per & P_MAYSHARE)) { |
912 | 883 | ||
913 | /* | 884 | /* |
914 | * Allow that the identical pin function can | 885 | * Allow that the identical pin function can |
915 | * be requested from the same driver twice | 886 | * be requested from the same driver twice |
916 | */ | 887 | */ |
917 | 888 | ||
918 | if (cmp_label(ident, label) == 0) | 889 | if (cmp_label(ident, label) == 0) |
919 | goto anyway; | 890 | goto anyway; |
920 | 891 | ||
892 | dump_stack(); | ||
921 | printk(KERN_ERR | 893 | printk(KERN_ERR |
922 | "%s: Peripheral %d function %d is already" | 894 | "%s: Peripheral %d function %d is already" |
923 | " reserved by %s !\n", | 895 | " reserved by %s !\n", |
924 | __FUNCTION__, ident, P_FUNCT2MUX(per), | 896 | __func__, ident, P_FUNCT2MUX(per), |
925 | get_label(ident)); | 897 | get_label(ident)); |
926 | dump_stack(); | ||
927 | local_irq_restore(flags); | 898 | local_irq_restore(flags); |
928 | return -EBUSY; | 899 | return -EBUSY; |
929 | } | 900 | } |
930 | 901 | ||
931 | } | 902 | } |
932 | 903 | ||
933 | anyway: | 904 | anyway: |
934 | portmux_setup(per, P_FUNCT2MUX(per)); | 905 | portmux_setup(per, P_FUNCT2MUX(per)); |
935 | 906 | ||
936 | port_setup(ident, PERIPHERAL_USAGE); | 907 | port_setup(ident, PERIPHERAL_USAGE); |
@@ -944,7 +915,7 @@ anyway: | |||
944 | EXPORT_SYMBOL(peripheral_request); | 915 | EXPORT_SYMBOL(peripheral_request); |
945 | #endif | 916 | #endif |
946 | 917 | ||
947 | int peripheral_request_list(unsigned short per[], const char *label) | 918 | int peripheral_request_list(const unsigned short per[], const char *label) |
948 | { | 919 | { |
949 | u16 cnt; | 920 | u16 cnt; |
950 | int ret; | 921 | int ret; |
@@ -954,10 +925,10 @@ int peripheral_request_list(unsigned short per[], const char *label) | |||
954 | ret = peripheral_request(per[cnt], label); | 925 | ret = peripheral_request(per[cnt], label); |
955 | 926 | ||
956 | if (ret < 0) { | 927 | if (ret < 0) { |
957 | for ( ; cnt > 0; cnt--) { | 928 | for ( ; cnt > 0; cnt--) |
958 | peripheral_free(per[cnt - 1]); | 929 | peripheral_free(per[cnt - 1]); |
959 | } | 930 | |
960 | return ret; | 931 | return ret; |
961 | } | 932 | } |
962 | } | 933 | } |
963 | 934 | ||
@@ -981,15 +952,13 @@ void peripheral_free(unsigned short per) | |||
981 | 952 | ||
982 | local_irq_save(flags); | 953 | local_irq_save(flags); |
983 | 954 | ||
984 | if (unlikely(!(reserved_peri_map[gpio_bank(ident)] | 955 | if (unlikely(!(reserved_peri_map[gpio_bank(ident)] & gpio_bit(ident)))) { |
985 | & gpio_bit(ident)))) { | ||
986 | local_irq_restore(flags); | 956 | local_irq_restore(flags); |
987 | return; | 957 | return; |
988 | } | 958 | } |
989 | 959 | ||
990 | if (!(per & P_MAYSHARE)) { | 960 | if (!(per & P_MAYSHARE)) |
991 | port_setup(ident, GPIO_USAGE); | 961 | port_setup(ident, GPIO_USAGE); |
992 | } | ||
993 | 962 | ||
994 | reserved_peri_map[gpio_bank(ident)] &= ~gpio_bit(ident); | 963 | reserved_peri_map[gpio_bank(ident)] &= ~gpio_bit(ident); |
995 | 964 | ||
@@ -999,14 +968,11 @@ void peripheral_free(unsigned short per) | |||
999 | } | 968 | } |
1000 | EXPORT_SYMBOL(peripheral_free); | 969 | EXPORT_SYMBOL(peripheral_free); |
1001 | 970 | ||
1002 | void peripheral_free_list(unsigned short per[]) | 971 | void peripheral_free_list(const unsigned short per[]) |
1003 | { | 972 | { |
1004 | u16 cnt; | 973 | u16 cnt; |
1005 | 974 | for (cnt = 0; per[cnt] != 0; cnt++) | |
1006 | for (cnt = 0; per[cnt] != 0; cnt++) { | ||
1007 | peripheral_free(per[cnt]); | 975 | peripheral_free(per[cnt]); |
1008 | } | ||
1009 | |||
1010 | } | 976 | } |
1011 | EXPORT_SYMBOL(peripheral_free_list); | 977 | EXPORT_SYMBOL(peripheral_free_list); |
1012 | 978 | ||
@@ -1046,17 +1012,17 @@ int gpio_request(unsigned gpio, const char *label) | |||
1046 | } | 1012 | } |
1047 | 1013 | ||
1048 | if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) { | 1014 | if (unlikely(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) { |
1015 | dump_stack(); | ||
1049 | printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", | 1016 | printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n", |
1050 | gpio, get_label(gpio)); | 1017 | gpio, get_label(gpio)); |
1051 | dump_stack(); | ||
1052 | local_irq_restore(flags); | 1018 | local_irq_restore(flags); |
1053 | return -EBUSY; | 1019 | return -EBUSY; |
1054 | } | 1020 | } |
1055 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { | 1021 | if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) { |
1022 | dump_stack(); | ||
1056 | printk(KERN_ERR | 1023 | printk(KERN_ERR |
1057 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", | 1024 | "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n", |
1058 | gpio, get_label(gpio)); | 1025 | gpio, get_label(gpio)); |
1059 | dump_stack(); | ||
1060 | local_irq_restore(flags); | 1026 | local_irq_restore(flags); |
1061 | return -EBUSY; | 1027 | return -EBUSY; |
1062 | } | 1028 | } |
@@ -1082,14 +1048,12 @@ void gpio_free(unsigned gpio) | |||
1082 | local_irq_save(flags); | 1048 | local_irq_save(flags); |
1083 | 1049 | ||
1084 | if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { | 1050 | if (unlikely(!(reserved_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) { |
1085 | gpio_error(gpio); | ||
1086 | dump_stack(); | 1051 | dump_stack(); |
1052 | gpio_error(gpio); | ||
1087 | local_irq_restore(flags); | 1053 | local_irq_restore(flags); |
1088 | return; | 1054 | return; |
1089 | } | 1055 | } |
1090 | 1056 | ||
1091 | default_gpio(gpio); | ||
1092 | |||
1093 | reserved_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); | 1057 | reserved_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio); |
1094 | 1058 | ||
1095 | set_label(gpio, "free"); | 1059 | set_label(gpio, "free"); |
@@ -1152,6 +1116,18 @@ int gpio_get_value(unsigned gpio) | |||
1152 | } | 1116 | } |
1153 | EXPORT_SYMBOL(gpio_get_value); | 1117 | EXPORT_SYMBOL(gpio_get_value); |
1154 | 1118 | ||
1119 | void bfin_gpio_irq_prepare(unsigned gpio) | ||
1120 | { | ||
1121 | unsigned long flags; | ||
1122 | |||
1123 | port_setup(gpio, GPIO_USAGE); | ||
1124 | |||
1125 | local_irq_save(flags); | ||
1126 | gpio_array[gpio_bank(gpio)]->port_dir_clear = gpio_bit(gpio); | ||
1127 | gpio_array[gpio_bank(gpio)]->port_inen |= gpio_bit(gpio); | ||
1128 | local_irq_restore(flags); | ||
1129 | } | ||
1130 | |||
1155 | #else | 1131 | #else |
1156 | 1132 | ||
1157 | int gpio_direction_input(unsigned gpio) | 1133 | int gpio_direction_input(unsigned gpio) |
@@ -1218,6 +1194,11 @@ void bfin_gpio_reset_spi0_ssel1(void) | |||
1218 | udelay(1); | 1194 | udelay(1); |
1219 | } | 1195 | } |
1220 | 1196 | ||
1197 | void bfin_gpio_irq_prepare(unsigned gpio) | ||
1198 | { | ||
1199 | port_setup(gpio, GPIO_USAGE); | ||
1200 | } | ||
1201 | |||
1221 | #endif /*BF548_FAMILY */ | 1202 | #endif /*BF548_FAMILY */ |
1222 | 1203 | ||
1223 | #if defined(CONFIG_PROC_FS) | 1204 | #if defined(CONFIG_PROC_FS) |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c index bd072299f7f2..822beefa3a4b 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinfo.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinfo.c | |||
@@ -39,14 +39,6 @@ | |||
39 | #include <asm/cplbinit.h> | 39 | #include <asm/cplbinit.h> |
40 | #include <asm/blackfin.h> | 40 | #include <asm/blackfin.h> |
41 | 41 | ||
42 | #define CPLB_I 1 | ||
43 | #define CPLB_D 2 | ||
44 | |||
45 | #define SYNC_SYS SSYNC() | ||
46 | #define SYNC_CORE CSYNC() | ||
47 | |||
48 | #define CPLB_BIT_PAGESIZE 0x30000 | ||
49 | |||
50 | static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" }; | 42 | static char page_size_string_table[][4] = { "1K", "4K", "1M", "4M" }; |
51 | 43 | ||
52 | static char *cplb_print_entry(char *buf, struct cplb_entry *tbl, int switched) | 44 | static char *cplb_print_entry(char *buf, struct cplb_entry *tbl, int switched) |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c index dc6e8a7a8bda..48060105346a 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c | |||
@@ -43,13 +43,15 @@ void __init generate_cpl_tables(void) | |||
43 | unsigned long d_data, i_data; | 43 | unsigned long d_data, i_data; |
44 | unsigned long d_cache = 0, i_cache = 0; | 44 | unsigned long d_cache = 0, i_cache = 0; |
45 | 45 | ||
46 | printk(KERN_INFO "MPU: setting up cplb tables with memory protection\n"); | ||
47 | |||
46 | #ifdef CONFIG_BFIN_ICACHE | 48 | #ifdef CONFIG_BFIN_ICACHE |
47 | i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; | 49 | i_cache = CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; |
48 | #endif | 50 | #endif |
49 | 51 | ||
50 | #ifdef CONFIG_BFIN_DCACHE | 52 | #ifdef CONFIG_BFIN_DCACHE |
51 | d_cache = CPLB_L1_CHBL; | 53 | d_cache = CPLB_L1_CHBL; |
52 | #ifdef CONFIG_BLKFIN_WT | 54 | #ifdef CONFIG_BFIN_WT |
53 | d_cache |= CPLB_L1_AOW | CPLB_WT; | 55 | d_cache |= CPLB_L1_AOW | CPLB_WT; |
54 | #endif | 56 | #endif |
55 | #endif | 57 | #endif |
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c index c426a22f9907..99f2831e2964 100644 --- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c +++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c | |||
@@ -24,8 +24,6 @@ | |||
24 | #include <asm/cplbinit.h> | 24 | #include <asm/cplbinit.h> |
25 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
26 | 26 | ||
27 | #ifdef CONFIG_BFIN_ICACHE | ||
28 | |||
29 | #define FAULT_RW (1 << 16) | 27 | #define FAULT_RW (1 << 16) |
30 | #define FAULT_USERSUPV (1 << 17) | 28 | #define FAULT_USERSUPV (1 << 17) |
31 | 29 | ||
@@ -143,30 +141,48 @@ static noinline int dcplb_miss(void) | |||
143 | unsigned long d_data; | 141 | unsigned long d_data; |
144 | 142 | ||
145 | nr_dcplb_miss++; | 143 | nr_dcplb_miss++; |
146 | if (addr >= _ramend) | ||
147 | return CPLB_PROT_VIOL; | ||
148 | 144 | ||
149 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; | 145 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; |
150 | #ifdef CONFIG_BFIN_DCACHE | 146 | #ifdef CONFIG_BFIN_DCACHE |
151 | d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; | 147 | if (addr < _ramend - DMA_UNCACHED_REGION || |
152 | #ifdef CONFIG_BLKFIN_WT | 148 | (reserved_mem_dcache_on && addr >= _ramend && |
153 | d_data |= CPLB_L1_AOW | CPLB_WT; | 149 | addr < physical_mem_end)) { |
154 | #endif | 150 | d_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; |
151 | #ifdef CONFIG_BFIN_WT | ||
152 | d_data |= CPLB_L1_AOW | CPLB_WT; | ||
155 | #endif | 153 | #endif |
156 | mask = current_rwx_mask; | ||
157 | if (mask) { | ||
158 | int page = addr >> PAGE_SHIFT; | ||
159 | int offs = page >> 5; | ||
160 | int bit = 1 << (page & 31); | ||
161 | |||
162 | if (mask[offs] & bit) | ||
163 | d_data |= CPLB_USER_RD; | ||
164 | |||
165 | mask += page_mask_nelts; | ||
166 | if (mask[offs] & bit) | ||
167 | d_data |= CPLB_USER_WR; | ||
168 | } | 154 | } |
155 | #endif | ||
156 | if (addr >= physical_mem_end) { | ||
157 | if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE | ||
158 | && (status & FAULT_USERSUPV)) { | ||
159 | addr &= ~0x3fffff; | ||
160 | d_data &= ~PAGE_SIZE_4KB; | ||
161 | d_data |= PAGE_SIZE_4MB; | ||
162 | } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH | ||
163 | && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { | ||
164 | addr &= ~(1 * 1024 * 1024 - 1); | ||
165 | d_data &= ~PAGE_SIZE_4KB; | ||
166 | d_data |= PAGE_SIZE_1MB; | ||
167 | } else | ||
168 | return CPLB_PROT_VIOL; | ||
169 | } else if (addr >= _ramend) { | ||
170 | d_data |= CPLB_USER_RD | CPLB_USER_WR; | ||
171 | } else { | ||
172 | mask = current_rwx_mask; | ||
173 | if (mask) { | ||
174 | int page = addr >> PAGE_SHIFT; | ||
175 | int offs = page >> 5; | ||
176 | int bit = 1 << (page & 31); | ||
177 | |||
178 | if (mask[offs] & bit) | ||
179 | d_data |= CPLB_USER_RD; | ||
169 | 180 | ||
181 | mask += page_mask_nelts; | ||
182 | if (mask[offs] & bit) | ||
183 | d_data |= CPLB_USER_WR; | ||
184 | } | ||
185 | } | ||
170 | idx = evict_one_dcplb(); | 186 | idx = evict_one_dcplb(); |
171 | 187 | ||
172 | addr &= PAGE_MASK; | 188 | addr &= PAGE_MASK; |
@@ -189,12 +205,14 @@ static noinline int icplb_miss(void) | |||
189 | unsigned long i_data; | 205 | unsigned long i_data; |
190 | 206 | ||
191 | nr_icplb_miss++; | 207 | nr_icplb_miss++; |
192 | if (status & FAULT_USERSUPV) | ||
193 | nr_icplb_supv_miss++; | ||
194 | 208 | ||
195 | if (addr >= _ramend) | 209 | /* If inside the uncached DMA region, fault. */ |
210 | if (addr >= _ramend - DMA_UNCACHED_REGION && addr < _ramend) | ||
196 | return CPLB_PROT_VIOL; | 211 | return CPLB_PROT_VIOL; |
197 | 212 | ||
213 | if (status & FAULT_USERSUPV) | ||
214 | nr_icplb_supv_miss++; | ||
215 | |||
198 | /* | 216 | /* |
199 | * First, try to find a CPLB that matches this address. If we | 217 | * First, try to find a CPLB that matches this address. If we |
200 | * find one, then the fact that we're in the miss handler means | 218 | * find one, then the fact that we're in the miss handler means |
@@ -211,30 +229,48 @@ static noinline int icplb_miss(void) | |||
211 | } | 229 | } |
212 | 230 | ||
213 | i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; | 231 | i_data = CPLB_VALID | CPLB_PORTPRIO | PAGE_SIZE_4KB; |
214 | #ifdef CONFIG_BFIN_ICACHE | ||
215 | i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; | ||
216 | #endif | ||
217 | 232 | ||
233 | #ifdef CONFIG_BFIN_ICACHE | ||
218 | /* | 234 | /* |
219 | * Two cases to distinguish - a supervisor access must necessarily | 235 | * Normal RAM, and possibly the reserved memory area, are |
220 | * be for a module page; we grant it unconditionally (could do better | 236 | * cacheable. |
221 | * here in the future). Otherwise, check the x bitmap of the current | ||
222 | * process. | ||
223 | */ | 237 | */ |
224 | if (!(status & FAULT_USERSUPV)) { | 238 | if (addr < _ramend || |
225 | unsigned long *mask = current_rwx_mask; | 239 | (addr < physical_mem_end && reserved_mem_icache_on)) |
226 | 240 | i_data |= CPLB_L1_CHBL | ANOMALY_05000158_WORKAROUND; | |
227 | if (mask) { | 241 | #endif |
228 | int page = addr >> PAGE_SHIFT; | ||
229 | int offs = page >> 5; | ||
230 | int bit = 1 << (page & 31); | ||
231 | 242 | ||
232 | mask += 2 * page_mask_nelts; | 243 | if (addr >= physical_mem_end) { |
233 | if (mask[offs] & bit) | 244 | if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH |
234 | i_data |= CPLB_USER_RD; | 245 | && (status & FAULT_USERSUPV)) { |
246 | addr &= ~(1 * 1024 * 1024 - 1); | ||
247 | i_data &= ~PAGE_SIZE_4KB; | ||
248 | i_data |= PAGE_SIZE_1MB; | ||
249 | } else | ||
250 | return CPLB_PROT_VIOL; | ||
251 | } else if (addr >= _ramend) { | ||
252 | i_data |= CPLB_USER_RD; | ||
253 | } else { | ||
254 | /* | ||
255 | * Two cases to distinguish - a supervisor access must | ||
256 | * necessarily be for a module page; we grant it | ||
257 | * unconditionally (could do better here in the future). | ||
258 | * Otherwise, check the x bitmap of the current process. | ||
259 | */ | ||
260 | if (!(status & FAULT_USERSUPV)) { | ||
261 | unsigned long *mask = current_rwx_mask; | ||
262 | |||
263 | if (mask) { | ||
264 | int page = addr >> PAGE_SHIFT; | ||
265 | int offs = page >> 5; | ||
266 | int bit = 1 << (page & 31); | ||
267 | |||
268 | mask += 2 * page_mask_nelts; | ||
269 | if (mask[offs] & bit) | ||
270 | i_data |= CPLB_USER_RD; | ||
271 | } | ||
235 | } | 272 | } |
236 | } | 273 | } |
237 | |||
238 | idx = evict_one_icplb(); | 274 | idx = evict_one_icplb(); |
239 | addr &= PAGE_MASK; | 275 | addr &= PAGE_MASK; |
240 | icplb_tbl[idx].addr = addr; | 276 | icplb_tbl[idx].addr = addr; |
@@ -250,7 +286,6 @@ static noinline int icplb_miss(void) | |||
250 | 286 | ||
251 | static noinline int dcplb_protection_fault(void) | 287 | static noinline int dcplb_protection_fault(void) |
252 | { | 288 | { |
253 | unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); | ||
254 | int status = bfin_read_DCPLB_STATUS(); | 289 | int status = bfin_read_DCPLB_STATUS(); |
255 | 290 | ||
256 | nr_dcplb_prot++; | 291 | nr_dcplb_prot++; |
@@ -280,8 +315,7 @@ int cplb_hdr(int seqstat, struct pt_regs *regs) | |||
280 | case 0x26: | 315 | case 0x26: |
281 | return dcplb_miss(); | 316 | return dcplb_miss(); |
282 | default: | 317 | default: |
283 | return 1; | 318 | return 1; |
284 | panic_cplb_error(seqstat, regs); | ||
285 | } | 319 | } |
286 | } | 320 | } |
287 | 321 | ||
@@ -299,7 +333,7 @@ void flush_switched_cplbs(void) | |||
299 | enable_icplb(); | 333 | enable_icplb(); |
300 | 334 | ||
301 | disable_dcplb(); | 335 | disable_dcplb(); |
302 | for (i = first_mask_dcplb; i < MAX_CPLBS; i++) { | 336 | for (i = first_switched_dcplb; i < MAX_CPLBS; i++) { |
303 | dcplb_tbl[i].data = 0; | 337 | dcplb_tbl[i].data = 0; |
304 | bfin_write32(DCPLB_DATA0 + i * 4, 0); | 338 | bfin_write32(DCPLB_DATA0 + i * 4, 0); |
305 | } | 339 | } |
@@ -319,7 +353,7 @@ void set_mask_dcplbs(unsigned long *masks) | |||
319 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; | 353 | d_data = CPLB_SUPV_WR | CPLB_VALID | CPLB_DIRTY | PAGE_SIZE_4KB; |
320 | #ifdef CONFIG_BFIN_DCACHE | 354 | #ifdef CONFIG_BFIN_DCACHE |
321 | d_data |= CPLB_L1_CHBL; | 355 | d_data |= CPLB_L1_CHBL; |
322 | #ifdef CONFIG_BLKFIN_WT | 356 | #ifdef CONFIG_BFIN_WT |
323 | d_data |= CPLB_L1_AOW | CPLB_WT; | 357 | d_data |= CPLB_L1_AOW | CPLB_WT; |
324 | #endif | 358 | #endif |
325 | #endif | 359 | #endif |
@@ -334,5 +368,3 @@ void set_mask_dcplbs(unsigned long *masks) | |||
334 | } | 368 | } |
335 | enable_dcplb(); | 369 | enable_dcplb(); |
336 | } | 370 | } |
337 | |||
338 | #endif | ||
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c index a4f0b428a34d..1e74f0b97996 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinfo.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinfo.c | |||
@@ -33,9 +33,7 @@ | |||
33 | #include <linux/proc_fs.h> | 33 | #include <linux/proc_fs.h> |
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | 35 | ||
36 | #include <asm/current.h> | 36 | #include <asm/cplbinit.h> |
37 | #include <asm/system.h> | ||
38 | #include <asm/cplb.h> | ||
39 | #include <asm/blackfin.h> | 37 | #include <asm/blackfin.h> |
40 | 38 | ||
41 | #define CPLB_I 1 | 39 | #define CPLB_I 1 |
@@ -174,16 +172,6 @@ static int cplbinfo_read_proc(char *page, char **start, off_t off, | |||
174 | return len; | 172 | return len; |
175 | } | 173 | } |
176 | 174 | ||
177 | static int cplbinfo_write_proc(struct file *file, const char __user *buffer, | ||
178 | unsigned long count, void *data) | ||
179 | { | ||
180 | printk(KERN_INFO "Reset the CPLB swap in/out counts.\n"); | ||
181 | memset(ipdt_swapcount_table, 0, MAX_SWITCH_I_CPLBS * sizeof(unsigned long)); | ||
182 | memset(dpdt_swapcount_table, 0, MAX_SWITCH_D_CPLBS * sizeof(unsigned long)); | ||
183 | |||
184 | return count; | ||
185 | } | ||
186 | |||
187 | static int __init cplbinfo_init(void) | 175 | static int __init cplbinfo_init(void) |
188 | { | 176 | { |
189 | struct proc_dir_entry *entry; | 177 | struct proc_dir_entry *entry; |
@@ -193,7 +181,6 @@ static int __init cplbinfo_init(void) | |||
193 | return -ENOMEM; | 181 | return -ENOMEM; |
194 | 182 | ||
195 | entry->read_proc = cplbinfo_read_proc; | 183 | entry->read_proc = cplbinfo_read_proc; |
196 | entry->write_proc = cplbinfo_write_proc; | ||
197 | entry->data = NULL; | 184 | entry->data = NULL; |
198 | 185 | ||
199 | return 0; | 186 | return 0; |
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c index 6320bc45fbba..917325bfbd84 100644 --- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c +++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c | |||
@@ -26,6 +26,35 @@ | |||
26 | #include <asm/cplb.h> | 26 | #include <asm/cplb.h> |
27 | #include <asm/cplbinit.h> | 27 | #include <asm/cplbinit.h> |
28 | 28 | ||
29 | #ifdef CONFIG_MAX_MEM_SIZE | ||
30 | # define CPLB_MEM CONFIG_MAX_MEM_SIZE | ||
31 | #else | ||
32 | # define CPLB_MEM CONFIG_MEM_SIZE | ||
33 | #endif | ||
34 | |||
35 | /* | ||
36 | * Number of required data CPLB switchtable entries | ||
37 | * MEMSIZE / 4 (we mostly install 4M page size CPLBs | ||
38 | * approx 16 for smaller 1MB page size CPLBs for allignment purposes | ||
39 | * 1 for L1 Data Memory | ||
40 | * possibly 1 for L2 Data Memory | ||
41 | * 1 for CONFIG_DEBUG_HUNT_FOR_ZERO | ||
42 | * 1 for ASYNC Memory | ||
43 | */ | ||
44 | #define MAX_SWITCH_D_CPLBS (((CPLB_MEM / 4) + 16 + 1 + 1 + 1 \ | ||
45 | + ASYNC_MEMORY_CPLB_COVERAGE) * 2) | ||
46 | |||
47 | /* | ||
48 | * Number of required instruction CPLB switchtable entries | ||
49 | * MEMSIZE / 4 (we mostly install 4M page size CPLBs | ||
50 | * approx 12 for smaller 1MB page size CPLBs for allignment purposes | ||
51 | * 1 for L1 Instruction Memory | ||
52 | * possibly 1 for L2 Instruction Memory | ||
53 | * 1 for CONFIG_DEBUG_HUNT_FOR_ZERO | ||
54 | */ | ||
55 | #define MAX_SWITCH_I_CPLBS (((CPLB_MEM / 4) + 12 + 1 + 1 + 1) * 2) | ||
56 | |||
57 | |||
29 | u_long icplb_table[MAX_CPLBS + 1]; | 58 | u_long icplb_table[MAX_CPLBS + 1]; |
30 | u_long dcplb_table[MAX_CPLBS + 1]; | 59 | u_long dcplb_table[MAX_CPLBS + 1]; |
31 | 60 | ||
@@ -295,6 +324,8 @@ void __init generate_cpl_tables(void) | |||
295 | struct cplb_tab *t_d = NULL; | 324 | struct cplb_tab *t_d = NULL; |
296 | struct s_cplb cplb; | 325 | struct s_cplb cplb; |
297 | 326 | ||
327 | printk(KERN_INFO "NOMPU: setting up cplb tables for global access\n"); | ||
328 | |||
298 | cplb.init_i.size = MAX_CPLBS; | 329 | cplb.init_i.size = MAX_CPLBS; |
299 | cplb.init_d.size = MAX_CPLBS; | 330 | cplb.init_d.size = MAX_CPLBS; |
300 | cplb.switch_i.size = MAX_SWITCH_I_CPLBS; | 331 | cplb.switch_i.size = MAX_SWITCH_I_CPLBS; |
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c index d6b61d56b656..2f62a9f4058a 100644 --- a/arch/blackfin/kernel/dma-mapping.c +++ b/arch/blackfin/kernel/dma-mapping.c | |||
@@ -59,7 +59,7 @@ void dma_alloc_init(unsigned long start, unsigned long end) | |||
59 | memset((void *)dma_base, 0, DMA_UNCACHED_REGION); | 59 | memset((void *)dma_base, 0, DMA_UNCACHED_REGION); |
60 | dma_initialized = 1; | 60 | dma_initialized = 1; |
61 | 61 | ||
62 | printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __FUNCTION__, | 62 | printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__, |
63 | dma_page, dma_pages, dma_base); | 63 | dma_page, dma_pages, dma_base); |
64 | } | 64 | } |
65 | 65 | ||
@@ -100,7 +100,7 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages) | |||
100 | int i; | 100 | int i; |
101 | 101 | ||
102 | if ((page + pages) > dma_pages) { | 102 | if ((page + pages) > dma_pages) { |
103 | printk(KERN_ERR "%s: freeing outside range.\n", __FUNCTION__); | 103 | printk(KERN_ERR "%s: freeing outside range.\n", __func__); |
104 | BUG(); | 104 | BUG(); |
105 | } | 105 | } |
106 | 106 | ||
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index 1904d8b53328..e698554895a7 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
@@ -52,12 +52,14 @@ static volatile GPTIMER_timer_regs *const timer_regs[MAX_BLACKFIN_GPTIMERS] = | |||
52 | (GPTIMER_timer_regs *)TIMER5_CONFIG, | 52 | (GPTIMER_timer_regs *)TIMER5_CONFIG, |
53 | (GPTIMER_timer_regs *)TIMER6_CONFIG, | 53 | (GPTIMER_timer_regs *)TIMER6_CONFIG, |
54 | (GPTIMER_timer_regs *)TIMER7_CONFIG, | 54 | (GPTIMER_timer_regs *)TIMER7_CONFIG, |
55 | #endif | 55 | # if (MAX_BLACKFIN_GPTIMERS > 8) |
56 | #if (MAX_BLACKFIN_GPTIMERS > 8) | ||
57 | (GPTIMER_timer_regs *)TIMER8_CONFIG, | 56 | (GPTIMER_timer_regs *)TIMER8_CONFIG, |
58 | (GPTIMER_timer_regs *)TIMER9_CONFIG, | 57 | (GPTIMER_timer_regs *)TIMER9_CONFIG, |
59 | (GPTIMER_timer_regs *)TIMER10_CONFIG, | 58 | (GPTIMER_timer_regs *)TIMER10_CONFIG, |
59 | # if (MAX_BLACKFIN_GPTIMERS > 11) | ||
60 | (GPTIMER_timer_regs *)TIMER11_CONFIG, | 60 | (GPTIMER_timer_regs *)TIMER11_CONFIG, |
61 | # endif | ||
62 | # endif | ||
61 | #endif | 63 | #endif |
62 | }; | 64 | }; |
63 | 65 | ||
@@ -80,12 +82,14 @@ static uint32_t const trun_mask[MAX_BLACKFIN_GPTIMERS] = | |||
80 | TIMER_STATUS_TRUN5, | 82 | TIMER_STATUS_TRUN5, |
81 | TIMER_STATUS_TRUN6, | 83 | TIMER_STATUS_TRUN6, |
82 | TIMER_STATUS_TRUN7, | 84 | TIMER_STATUS_TRUN7, |
83 | #endif | 85 | # if (MAX_BLACKFIN_GPTIMERS > 8) |
84 | #if (MAX_BLACKFIN_GPTIMERS > 8) | ||
85 | TIMER_STATUS_TRUN8, | 86 | TIMER_STATUS_TRUN8, |
86 | TIMER_STATUS_TRUN9, | 87 | TIMER_STATUS_TRUN9, |
87 | TIMER_STATUS_TRUN10, | 88 | TIMER_STATUS_TRUN10, |
89 | # if (MAX_BLACKFIN_GPTIMERS > 11) | ||
88 | TIMER_STATUS_TRUN11, | 90 | TIMER_STATUS_TRUN11, |
91 | # endif | ||
92 | # endif | ||
89 | #endif | 93 | #endif |
90 | }; | 94 | }; |
91 | 95 | ||
@@ -100,12 +104,14 @@ static uint32_t const tovf_mask[MAX_BLACKFIN_GPTIMERS] = | |||
100 | TIMER_STATUS_TOVF5, | 104 | TIMER_STATUS_TOVF5, |
101 | TIMER_STATUS_TOVF6, | 105 | TIMER_STATUS_TOVF6, |
102 | TIMER_STATUS_TOVF7, | 106 | TIMER_STATUS_TOVF7, |
103 | #endif | 107 | # if (MAX_BLACKFIN_GPTIMERS > 8) |
104 | #if (MAX_BLACKFIN_GPTIMERS > 8) | ||
105 | TIMER_STATUS_TOVF8, | 108 | TIMER_STATUS_TOVF8, |
106 | TIMER_STATUS_TOVF9, | 109 | TIMER_STATUS_TOVF9, |
107 | TIMER_STATUS_TOVF10, | 110 | TIMER_STATUS_TOVF10, |
111 | # if (MAX_BLACKFIN_GPTIMERS > 11) | ||
108 | TIMER_STATUS_TOVF11, | 112 | TIMER_STATUS_TOVF11, |
113 | # endif | ||
114 | # endif | ||
109 | #endif | 115 | #endif |
110 | }; | 116 | }; |
111 | 117 | ||
@@ -120,12 +126,14 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] = | |||
120 | TIMER_STATUS_TIMIL5, | 126 | TIMER_STATUS_TIMIL5, |
121 | TIMER_STATUS_TIMIL6, | 127 | TIMER_STATUS_TIMIL6, |
122 | TIMER_STATUS_TIMIL7, | 128 | TIMER_STATUS_TIMIL7, |
123 | #endif | 129 | # if (MAX_BLACKFIN_GPTIMERS > 8) |
124 | #if (MAX_BLACKFIN_GPTIMERS > 8) | ||
125 | TIMER_STATUS_TIMIL8, | 130 | TIMER_STATUS_TIMIL8, |
126 | TIMER_STATUS_TIMIL9, | 131 | TIMER_STATUS_TIMIL9, |
127 | TIMER_STATUS_TIMIL10, | 132 | TIMER_STATUS_TIMIL10, |
133 | # if (MAX_BLACKFIN_GPTIMERS > 11) | ||
128 | TIMER_STATUS_TIMIL11, | 134 | TIMER_STATUS_TIMIL11, |
135 | # endif | ||
136 | # endif | ||
129 | #endif | 137 | #endif |
130 | }; | 138 | }; |
131 | 139 | ||
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 6b8459c66163..be9fdd00d7cb 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/unistd.h> | 32 | #include <linux/unistd.h> |
33 | #include <linux/user.h> | 33 | #include <linux/user.h> |
34 | #include <linux/uaccess.h> | 34 | #include <linux/uaccess.h> |
35 | #include <linux/sched.h> | ||
36 | #include <linux/tick.h> | ||
35 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
36 | #include <linux/err.h> | 38 | #include <linux/err.h> |
37 | 39 | ||
@@ -69,33 +71,44 @@ EXPORT_SYMBOL(pm_power_off); | |||
69 | * The idle loop on BFIN | 71 | * The idle loop on BFIN |
70 | */ | 72 | */ |
71 | #ifdef CONFIG_IDLE_L1 | 73 | #ifdef CONFIG_IDLE_L1 |
72 | void default_idle(void)__attribute__((l1_text)); | 74 | static void default_idle(void)__attribute__((l1_text)); |
73 | void cpu_idle(void)__attribute__((l1_text)); | 75 | void cpu_idle(void)__attribute__((l1_text)); |
74 | #endif | 76 | #endif |
75 | 77 | ||
76 | void default_idle(void) | 78 | /* |
79 | * This is our default idle handler. We need to disable | ||
80 | * interrupts here to ensure we don't miss a wakeup call. | ||
81 | */ | ||
82 | static void default_idle(void) | ||
77 | { | 83 | { |
78 | while (!need_resched()) { | 84 | local_irq_disable(); |
79 | local_irq_disable(); | 85 | if (!need_resched()) |
80 | if (likely(!need_resched())) | 86 | idle_with_irq_disabled(); |
81 | idle_with_irq_disabled(); | ||
82 | local_irq_enable(); | ||
83 | } | ||
84 | } | ||
85 | 87 | ||
86 | void (*idle)(void) = default_idle; | 88 | local_irq_enable(); |
89 | } | ||
87 | 90 | ||
88 | /* | 91 | /* |
89 | * The idle thread. There's no useful work to be | 92 | * The idle thread. We try to conserve power, while trying to keep |
90 | * done, so just try to conserve power and have a | 93 | * overall latency low. The architecture specific idle is passed |
91 | * low exit latency (ie sit in a loop waiting for | 94 | * a value to indicate the level of "idleness" of the system. |
92 | * somebody to say that they'd like to reschedule) | ||
93 | */ | 95 | */ |
94 | void cpu_idle(void) | 96 | void cpu_idle(void) |
95 | { | 97 | { |
96 | /* endless idle loop with no priority at all */ | 98 | /* endless idle loop with no priority at all */ |
97 | while (1) { | 99 | while (1) { |
98 | idle(); | 100 | void (*idle)(void) = pm_idle; |
101 | |||
102 | #ifdef CONFIG_HOTPLUG_CPU | ||
103 | if (cpu_is_offline(smp_processor_id())) | ||
104 | cpu_die(); | ||
105 | #endif | ||
106 | if (!idle) | ||
107 | idle = default_idle; | ||
108 | tick_nohz_stop_sched_tick(); | ||
109 | while (!need_resched()) | ||
110 | idle(); | ||
111 | tick_nohz_restart_sched_tick(); | ||
99 | preempt_enable_no_resched(); | 112 | preempt_enable_no_resched(); |
100 | schedule(); | 113 | schedule(); |
101 | preempt_disable(); | 114 | preempt_disable(); |
@@ -189,7 +202,7 @@ copy_thread(int nr, unsigned long clone_flags, | |||
189 | * sys_execve() executes a new program. | 202 | * sys_execve() executes a new program. |
190 | */ | 203 | */ |
191 | 204 | ||
192 | asmlinkage int sys_execve(char *name, char **argv, char **envp) | 205 | asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp) |
193 | { | 206 | { |
194 | int error; | 207 | int error; |
195 | char *filename; | 208 | char *filename; |
@@ -232,23 +245,25 @@ unsigned long get_wchan(struct task_struct *p) | |||
232 | 245 | ||
233 | void finish_atomic_sections (struct pt_regs *regs) | 246 | void finish_atomic_sections (struct pt_regs *regs) |
234 | { | 247 | { |
248 | int __user *up0 = (int __user *)®s->p0; | ||
249 | |||
235 | if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) | 250 | if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) |
236 | return; | 251 | return; |
237 | 252 | ||
238 | switch (regs->pc) { | 253 | switch (regs->pc) { |
239 | case ATOMIC_XCHG32 + 2: | 254 | case ATOMIC_XCHG32 + 2: |
240 | put_user(regs->r1, (int *)regs->p0); | 255 | put_user(regs->r1, up0); |
241 | regs->pc += 2; | 256 | regs->pc += 2; |
242 | break; | 257 | break; |
243 | 258 | ||
244 | case ATOMIC_CAS32 + 2: | 259 | case ATOMIC_CAS32 + 2: |
245 | case ATOMIC_CAS32 + 4: | 260 | case ATOMIC_CAS32 + 4: |
246 | if (regs->r0 == regs->r1) | 261 | if (regs->r0 == regs->r1) |
247 | put_user(regs->r2, (int *)regs->p0); | 262 | put_user(regs->r2, up0); |
248 | regs->pc = ATOMIC_CAS32 + 8; | 263 | regs->pc = ATOMIC_CAS32 + 8; |
249 | break; | 264 | break; |
250 | case ATOMIC_CAS32 + 6: | 265 | case ATOMIC_CAS32 + 6: |
251 | put_user(regs->r2, (int *)regs->p0); | 266 | put_user(regs->r2, up0); |
252 | regs->pc += 2; | 267 | regs->pc += 2; |
253 | break; | 268 | break; |
254 | 269 | ||
@@ -256,7 +271,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
256 | regs->r0 = regs->r1 + regs->r0; | 271 | regs->r0 = regs->r1 + regs->r0; |
257 | /* fall through */ | 272 | /* fall through */ |
258 | case ATOMIC_ADD32 + 4: | 273 | case ATOMIC_ADD32 + 4: |
259 | put_user(regs->r0, (int *)regs->p0); | 274 | put_user(regs->r0, up0); |
260 | regs->pc = ATOMIC_ADD32 + 6; | 275 | regs->pc = ATOMIC_ADD32 + 6; |
261 | break; | 276 | break; |
262 | 277 | ||
@@ -264,7 +279,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
264 | regs->r0 = regs->r1 - regs->r0; | 279 | regs->r0 = regs->r1 - regs->r0; |
265 | /* fall through */ | 280 | /* fall through */ |
266 | case ATOMIC_SUB32 + 4: | 281 | case ATOMIC_SUB32 + 4: |
267 | put_user(regs->r0, (int *)regs->p0); | 282 | put_user(regs->r0, up0); |
268 | regs->pc = ATOMIC_SUB32 + 6; | 283 | regs->pc = ATOMIC_SUB32 + 6; |
269 | break; | 284 | break; |
270 | 285 | ||
@@ -272,7 +287,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
272 | regs->r0 = regs->r1 | regs->r0; | 287 | regs->r0 = regs->r1 | regs->r0; |
273 | /* fall through */ | 288 | /* fall through */ |
274 | case ATOMIC_IOR32 + 4: | 289 | case ATOMIC_IOR32 + 4: |
275 | put_user(regs->r0, (int *)regs->p0); | 290 | put_user(regs->r0, up0); |
276 | regs->pc = ATOMIC_IOR32 + 6; | 291 | regs->pc = ATOMIC_IOR32 + 6; |
277 | break; | 292 | break; |
278 | 293 | ||
@@ -280,7 +295,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
280 | regs->r0 = regs->r1 & regs->r0; | 295 | regs->r0 = regs->r1 & regs->r0; |
281 | /* fall through */ | 296 | /* fall through */ |
282 | case ATOMIC_AND32 + 4: | 297 | case ATOMIC_AND32 + 4: |
283 | put_user(regs->r0, (int *)regs->p0); | 298 | put_user(regs->r0, up0); |
284 | regs->pc = ATOMIC_AND32 + 6; | 299 | regs->pc = ATOMIC_AND32 + 6; |
285 | break; | 300 | break; |
286 | 301 | ||
@@ -288,7 +303,7 @@ void finish_atomic_sections (struct pt_regs *regs) | |||
288 | regs->r0 = regs->r1 ^ regs->r0; | 303 | regs->r0 = regs->r1 ^ regs->r0; |
289 | /* fall through */ | 304 | /* fall through */ |
290 | case ATOMIC_XOR32 + 4: | 305 | case ATOMIC_XOR32 + 4: |
291 | put_user(regs->r0, (int *)regs->p0); | 306 | put_user(regs->r0, up0); |
292 | regs->pc = ATOMIC_XOR32 + 6; | 307 | regs->pc = ATOMIC_XOR32 + 6; |
293 | break; | 308 | break; |
294 | } | 309 | } |
@@ -309,6 +324,12 @@ int _access_ok(unsigned long addr, unsigned long size) | |||
309 | return 1; | 324 | return 1; |
310 | if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) | 325 | if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) |
311 | return 1; | 326 | return 1; |
327 | |||
328 | #ifdef CONFIG_ROMFS_MTD_FS | ||
329 | /* For XIP, allow user space to use pointers within the ROMFS. */ | ||
330 | if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end) | ||
331 | return 1; | ||
332 | #endif | ||
312 | #else | 333 | #else |
313 | if (addr >= memory_start && (addr + size) <= physical_mem_end) | 334 | if (addr >= memory_start && (addr + size) <= physical_mem_end) |
314 | return 1; | 335 | return 1; |
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c index 85caf9b711a1..b4f062c172c6 100644 --- a/arch/blackfin/kernel/ptrace.c +++ b/arch/blackfin/kernel/ptrace.c | |||
@@ -193,6 +193,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
193 | { | 193 | { |
194 | int ret; | 194 | int ret; |
195 | int add = 0; | 195 | int add = 0; |
196 | unsigned long __user *datap = (unsigned long __user *)data; | ||
196 | 197 | ||
197 | switch (request) { | 198 | switch (request) { |
198 | /* when I and D space are separate, these will need to be fixed. */ | 199 | /* when I and D space are separate, these will need to be fixed. */ |
@@ -229,7 +230,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
229 | pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp); | 230 | pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp); |
230 | if (copied != sizeof(tmp)) | 231 | if (copied != sizeof(tmp)) |
231 | break; | 232 | break; |
232 | ret = put_user(tmp, (unsigned long *)data); | 233 | ret = put_user(tmp, datap); |
233 | break; | 234 | break; |
234 | } | 235 | } |
235 | 236 | ||
@@ -263,7 +264,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
263 | } else { | 264 | } else { |
264 | tmp = get_reg(child, addr); | 265 | tmp = get_reg(child, addr); |
265 | } | 266 | } |
266 | ret = put_user(tmp, (unsigned long *)data); | 267 | ret = put_user(tmp, datap); |
267 | break; | 268 | break; |
268 | } | 269 | } |
269 | 270 | ||
@@ -389,7 +390,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
389 | { | 390 | { |
390 | 391 | ||
391 | /* Get all gp regs from the child. */ | 392 | /* Get all gp regs from the child. */ |
392 | ret = ptrace_getregs(child, (void __user *)data); | 393 | ret = ptrace_getregs(child, datap); |
393 | break; | 394 | break; |
394 | } | 395 | } |
395 | 396 | ||
diff --git a/arch/blackfin/kernel/reboot.c b/arch/blackfin/kernel/reboot.c index 483f93dfc1b5..367e2dc09881 100644 --- a/arch/blackfin/kernel/reboot.c +++ b/arch/blackfin/kernel/reboot.c | |||
@@ -11,45 +11,56 @@ | |||
11 | #include <asm/reboot.h> | 11 | #include <asm/reboot.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | 13 | ||
14 | #if defined(BF537_FAMILY) || defined(BF533_FAMILY) || defined(BF527_FAMILY) | 14 | /* A system soft reset makes external memory unusable so force |
15 | #define SYSCR_VAL 0x0 | 15 | * this function into L1. We use the compiler ssync here rather |
16 | #elif defined(BF561_FAMILY) | 16 | * than SSYNC() because it's safe (no interrupts and such) and |
17 | #define SYSCR_VAL 0x20 | 17 | * we save some L1. We do not need to force sanity in the SYSCR |
18 | #elif defined(BF548_FAMILY) | 18 | * register as the BMODE selection bit is cleared by the soft |
19 | #define SYSCR_VAL 0x10 | 19 | * reset while the Core B bit (on dual core parts) is cleared by |
20 | #endif | 20 | * the core reset. |
21 | |||
22 | /* | ||
23 | * Delay min 5 SCLK cycles using worst case CCLK/SCLK ratio (15) | ||
24 | */ | ||
25 | #define SWRST_DELAY (5 * 15) | ||
26 | |||
27 | /* A system soft reset makes external memory unusable | ||
28 | * so force this function into L1. | ||
29 | */ | 21 | */ |
30 | __attribute__((l1_text)) | 22 | __attribute__((l1_text)) |
31 | void bfin_reset(void) | 23 | void bfin_reset(void) |
32 | { | 24 | { |
33 | /* force BMODE and disable Core B (as needed) */ | 25 | /* Wait for completion of "system" events such as cache line |
34 | bfin_write_SYSCR(SYSCR_VAL); | 26 | * line fills so that we avoid infinite stalls later on as |
35 | 27 | * much as possible. This code is in L1, so it won't trigger | |
36 | /* we use asm ssync here because it's save and we save some L1 */ | 28 | * any such event after this point in time. |
37 | asm("ssync;"); | 29 | */ |
30 | __builtin_bfin_ssync(); | ||
38 | 31 | ||
39 | while (1) { | 32 | while (1) { |
40 | /* initiate system soft reset with magic 0x7 */ | 33 | /* Initiate System software reset. */ |
41 | bfin_write_SWRST(0x7); | 34 | bfin_write_SWRST(0x7); |
42 | 35 | ||
43 | /* Wait for System reset to actually reset, needs to be 5 SCLKs, */ | 36 | /* Due to the way reset is handled in the hardware, we need |
44 | /* Assume CCLK / SCLK ratio is worst case (15), and use 5*15 */ | 37 | * to delay for 7 SCLKS. The only reliable way to do this is |
45 | 38 | * to calculate the CCLK/SCLK ratio and multiply 7. For now, | |
46 | asm("LSETUP(.Lfoo,.Lfoo) LC0 = %0\n .Lfoo: NOP;\n" | 39 | * we'll assume worse case which is a 1:15 ratio. |
47 | : : "a" (SWRST_DELAY) : "LC0", "LT0", "LB0"); | 40 | */ |
41 | asm( | ||
42 | "LSETUP (1f, 1f) LC0 = %0\n" | ||
43 | "1: nop;" | ||
44 | : | ||
45 | : "a" (15 * 7) | ||
46 | : "LC0", "LB0", "LT0" | ||
47 | ); | ||
48 | 48 | ||
49 | /* clear system soft reset */ | 49 | /* Clear System software reset */ |
50 | bfin_write_SWRST(0); | 50 | bfin_write_SWRST(0); |
51 | asm("ssync;"); | 51 | |
52 | /* issue core reset */ | 52 | /* Wait for the SWRST write to complete. Cannot rely on SSYNC |
53 | * though as the System state is all reset now. | ||
54 | */ | ||
55 | asm( | ||
56 | "LSETUP (1f, 1f) LC1 = %0\n" | ||
57 | "1: nop;" | ||
58 | : | ||
59 | : "a" (15 * 1) | ||
60 | : "LC1", "LB1", "LT1" | ||
61 | ); | ||
62 | |||
63 | /* Issue core reset */ | ||
53 | asm("raise 1"); | 64 | asm("raise 1"); |
54 | } | 65 | } |
55 | } | 66 | } |
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index 2255c289a714..8efea004aecb 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -35,6 +35,7 @@ u16 _bfin_swrst; | |||
35 | EXPORT_SYMBOL(_bfin_swrst); | 35 | EXPORT_SYMBOL(_bfin_swrst); |
36 | 36 | ||
37 | unsigned long memory_start, memory_end, physical_mem_end; | 37 | unsigned long memory_start, memory_end, physical_mem_end; |
38 | unsigned long _rambase, _ramstart, _ramend; | ||
38 | unsigned long reserved_mem_dcache_on; | 39 | unsigned long reserved_mem_dcache_on; |
39 | unsigned long reserved_mem_icache_on; | 40 | unsigned long reserved_mem_icache_on; |
40 | EXPORT_SYMBOL(memory_start); | 41 | EXPORT_SYMBOL(memory_start); |
@@ -106,7 +107,7 @@ void __init bf53x_relocate_l1_mem(void) | |||
106 | 107 | ||
107 | l1_code_length = _etext_l1 - _stext_l1; | 108 | l1_code_length = _etext_l1 - _stext_l1; |
108 | if (l1_code_length > L1_CODE_LENGTH) | 109 | if (l1_code_length > L1_CODE_LENGTH) |
109 | l1_code_length = L1_CODE_LENGTH; | 110 | panic("L1 Instruction SRAM Overflow\n"); |
110 | /* cannot complain as printk is not available as yet. | 111 | /* cannot complain as printk is not available as yet. |
111 | * But we can continue booting and complain later! | 112 | * But we can continue booting and complain later! |
112 | */ | 113 | */ |
@@ -116,19 +117,18 @@ void __init bf53x_relocate_l1_mem(void) | |||
116 | 117 | ||
117 | l1_data_a_length = _ebss_l1 - _sdata_l1; | 118 | l1_data_a_length = _ebss_l1 - _sdata_l1; |
118 | if (l1_data_a_length > L1_DATA_A_LENGTH) | 119 | if (l1_data_a_length > L1_DATA_A_LENGTH) |
119 | l1_data_a_length = L1_DATA_A_LENGTH; | 120 | panic("L1 Data SRAM Bank A Overflow\n"); |
120 | 121 | ||
121 | /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */ | 122 | /* Copy _sdata_l1 to _ebss_l1 to L1 data bank A SRAM */ |
122 | dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); | 123 | dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length); |
123 | 124 | ||
124 | l1_data_b_length = _ebss_b_l1 - _sdata_b_l1; | 125 | l1_data_b_length = _ebss_b_l1 - _sdata_b_l1; |
125 | if (l1_data_b_length > L1_DATA_B_LENGTH) | 126 | if (l1_data_b_length > L1_DATA_B_LENGTH) |
126 | l1_data_b_length = L1_DATA_B_LENGTH; | 127 | panic("L1 Data SRAM Bank B Overflow\n"); |
127 | 128 | ||
128 | /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */ | 129 | /* Copy _sdata_b_l1 to _ebss_b_l1 to L1 data bank B SRAM */ |
129 | dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + | 130 | dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length + |
130 | l1_data_a_length, l1_data_b_length); | 131 | l1_data_a_length, l1_data_b_length); |
131 | |||
132 | } | 132 | } |
133 | 133 | ||
134 | /* add_memory_region to memmap */ | 134 | /* add_memory_region to memmap */ |
@@ -547,11 +547,38 @@ static __init void memory_setup(void) | |||
547 | ); | 547 | ); |
548 | } | 548 | } |
549 | 549 | ||
550 | /* | ||
551 | * Find the lowest, highest page frame number we have available | ||
552 | */ | ||
553 | void __init find_min_max_pfn(void) | ||
554 | { | ||
555 | int i; | ||
556 | |||
557 | max_pfn = 0; | ||
558 | min_low_pfn = memory_end; | ||
559 | |||
560 | for (i = 0; i < bfin_memmap.nr_map; i++) { | ||
561 | unsigned long start, end; | ||
562 | /* RAM? */ | ||
563 | if (bfin_memmap.map[i].type != BFIN_MEMMAP_RAM) | ||
564 | continue; | ||
565 | start = PFN_UP(bfin_memmap.map[i].addr); | ||
566 | end = PFN_DOWN(bfin_memmap.map[i].addr + | ||
567 | bfin_memmap.map[i].size); | ||
568 | if (start >= end) | ||
569 | continue; | ||
570 | if (end > max_pfn) | ||
571 | max_pfn = end; | ||
572 | if (start < min_low_pfn) | ||
573 | min_low_pfn = start; | ||
574 | } | ||
575 | } | ||
576 | |||
550 | static __init void setup_bootmem_allocator(void) | 577 | static __init void setup_bootmem_allocator(void) |
551 | { | 578 | { |
552 | int bootmap_size; | 579 | int bootmap_size; |
553 | int i; | 580 | int i; |
554 | unsigned long min_pfn, max_pfn; | 581 | unsigned long start_pfn, end_pfn; |
555 | unsigned long curr_pfn, last_pfn, size; | 582 | unsigned long curr_pfn, last_pfn, size; |
556 | 583 | ||
557 | /* mark memory between memory_start and memory_end usable */ | 584 | /* mark memory between memory_start and memory_end usable */ |
@@ -561,8 +588,19 @@ static __init void setup_bootmem_allocator(void) | |||
561 | sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map); | 588 | sanitize_memmap(bfin_memmap.map, &bfin_memmap.nr_map); |
562 | print_memory_map("boot memmap"); | 589 | print_memory_map("boot memmap"); |
563 | 590 | ||
564 | min_pfn = PAGE_OFFSET >> PAGE_SHIFT; | 591 | /* intialize globals in linux/bootmem.h */ |
565 | max_pfn = memory_end >> PAGE_SHIFT; | 592 | find_min_max_pfn(); |
593 | /* pfn of the last usable page frame */ | ||
594 | if (max_pfn > memory_end >> PAGE_SHIFT) | ||
595 | max_pfn = memory_end >> PAGE_SHIFT; | ||
596 | /* pfn of last page frame directly mapped by kernel */ | ||
597 | max_low_pfn = max_pfn; | ||
598 | /* pfn of the first usable page frame after kernel image*/ | ||
599 | if (min_low_pfn < memory_start >> PAGE_SHIFT) | ||
600 | min_low_pfn = memory_start >> PAGE_SHIFT; | ||
601 | |||
602 | start_pfn = PAGE_OFFSET >> PAGE_SHIFT; | ||
603 | end_pfn = memory_end >> PAGE_SHIFT; | ||
566 | 604 | ||
567 | /* | 605 | /* |
568 | * give all the memory to the bootmap allocator, tell it to put the | 606 | * give all the memory to the bootmap allocator, tell it to put the |
@@ -570,7 +608,7 @@ static __init void setup_bootmem_allocator(void) | |||
570 | */ | 608 | */ |
571 | bootmap_size = init_bootmem_node(NODE_DATA(0), | 609 | bootmap_size = init_bootmem_node(NODE_DATA(0), |
572 | memory_start >> PAGE_SHIFT, /* map goes here */ | 610 | memory_start >> PAGE_SHIFT, /* map goes here */ |
573 | min_pfn, max_pfn); | 611 | start_pfn, end_pfn); |
574 | 612 | ||
575 | /* register the memmap regions with the bootmem allocator */ | 613 | /* register the memmap regions with the bootmem allocator */ |
576 | for (i = 0; i < bfin_memmap.nr_map; i++) { | 614 | for (i = 0; i < bfin_memmap.nr_map; i++) { |
@@ -583,7 +621,7 @@ static __init void setup_bootmem_allocator(void) | |||
583 | * We are rounding up the start address of usable memory: | 621 | * We are rounding up the start address of usable memory: |
584 | */ | 622 | */ |
585 | curr_pfn = PFN_UP(bfin_memmap.map[i].addr); | 623 | curr_pfn = PFN_UP(bfin_memmap.map[i].addr); |
586 | if (curr_pfn >= max_pfn) | 624 | if (curr_pfn >= end_pfn) |
587 | continue; | 625 | continue; |
588 | /* | 626 | /* |
589 | * ... and at the end of the usable range downwards: | 627 | * ... and at the end of the usable range downwards: |
@@ -591,8 +629,8 @@ static __init void setup_bootmem_allocator(void) | |||
591 | last_pfn = PFN_DOWN(bfin_memmap.map[i].addr + | 629 | last_pfn = PFN_DOWN(bfin_memmap.map[i].addr + |
592 | bfin_memmap.map[i].size); | 630 | bfin_memmap.map[i].size); |
593 | 631 | ||
594 | if (last_pfn > max_pfn) | 632 | if (last_pfn > end_pfn) |
595 | last_pfn = max_pfn; | 633 | last_pfn = end_pfn; |
596 | 634 | ||
597 | /* | 635 | /* |
598 | * .. finally, did all the rounding and playing | 636 | * .. finally, did all the rounding and playing |
@@ -611,9 +649,59 @@ static __init void setup_bootmem_allocator(void) | |||
611 | BOOTMEM_DEFAULT); | 649 | BOOTMEM_DEFAULT); |
612 | } | 650 | } |
613 | 651 | ||
652 | #define EBSZ_TO_MEG(ebsz) \ | ||
653 | ({ \ | ||
654 | int meg = 0; \ | ||
655 | switch (ebsz & 0xf) { \ | ||
656 | case 0x1: meg = 16; break; \ | ||
657 | case 0x3: meg = 32; break; \ | ||
658 | case 0x5: meg = 64; break; \ | ||
659 | case 0x7: meg = 128; break; \ | ||
660 | case 0x9: meg = 256; break; \ | ||
661 | case 0xb: meg = 512; break; \ | ||
662 | } \ | ||
663 | meg; \ | ||
664 | }) | ||
665 | static inline int __init get_mem_size(void) | ||
666 | { | ||
667 | #ifdef CONFIG_MEM_SIZE | ||
668 | return CONFIG_MEM_SIZE; | ||
669 | #else | ||
670 | # if defined(EBIU_SDBCTL) | ||
671 | # if defined(BF561_FAMILY) | ||
672 | int ret = 0; | ||
673 | u32 sdbctl = bfin_read_EBIU_SDBCTL(); | ||
674 | ret += EBSZ_TO_MEG(sdbctl >> 0); | ||
675 | ret += EBSZ_TO_MEG(sdbctl >> 8); | ||
676 | ret += EBSZ_TO_MEG(sdbctl >> 16); | ||
677 | ret += EBSZ_TO_MEG(sdbctl >> 24); | ||
678 | return ret; | ||
679 | # else | ||
680 | return EBSZ_TO_MEG(bfin_read_EBIU_SDBCTL()); | ||
681 | # endif | ||
682 | # elif defined(EBIU_DDRCTL1) | ||
683 | u32 ddrctl = bfin_read_EBIU_DDRCTL1(); | ||
684 | int ret = 0; | ||
685 | switch (ddrctl & 0xc0000) { | ||
686 | case DEVSZ_64: ret = 64 / 8; | ||
687 | case DEVSZ_128: ret = 128 / 8; | ||
688 | case DEVSZ_256: ret = 256 / 8; | ||
689 | case DEVSZ_512: ret = 512 / 8; | ||
690 | } | ||
691 | switch (ddrctl & 0x30000) { | ||
692 | case DEVWD_4: ret *= 2; | ||
693 | case DEVWD_8: ret *= 2; | ||
694 | case DEVWD_16: break; | ||
695 | } | ||
696 | return ret; | ||
697 | # endif | ||
698 | #endif | ||
699 | BUG(); | ||
700 | } | ||
701 | |||
614 | void __init setup_arch(char **cmdline_p) | 702 | void __init setup_arch(char **cmdline_p) |
615 | { | 703 | { |
616 | unsigned long l1_length, sclk, cclk; | 704 | unsigned long sclk, cclk; |
617 | 705 | ||
618 | #ifdef CONFIG_DUMMY_CONSOLE | 706 | #ifdef CONFIG_DUMMY_CONSOLE |
619 | conswitchp = &dummy_con; | 707 | conswitchp = &dummy_con; |
@@ -631,7 +719,7 @@ void __init setup_arch(char **cmdline_p) | |||
631 | 719 | ||
632 | /* setup memory defaults from the user config */ | 720 | /* setup memory defaults from the user config */ |
633 | physical_mem_end = 0; | 721 | physical_mem_end = 0; |
634 | _ramend = CONFIG_MEM_SIZE * 1024 * 1024; | 722 | _ramend = get_mem_size() * 1024 * 1024; |
635 | 723 | ||
636 | memset(&bfin_memmap, 0, sizeof(bfin_memmap)); | 724 | memset(&bfin_memmap, 0, sizeof(bfin_memmap)); |
637 | 725 | ||
@@ -712,15 +800,6 @@ void __init setup_arch(char **cmdline_p) | |||
712 | 800 | ||
713 | paging_init(); | 801 | paging_init(); |
714 | 802 | ||
715 | /* check the size of the l1 area */ | ||
716 | l1_length = _etext_l1 - _stext_l1; | ||
717 | if (l1_length > L1_CODE_LENGTH) | ||
718 | panic("L1 code memory overflow\n"); | ||
719 | |||
720 | l1_length = _ebss_l1 - _sdata_l1; | ||
721 | if (l1_length > L1_DATA_A_LENGTH) | ||
722 | panic("L1 data memory overflow\n"); | ||
723 | |||
724 | /* Copy atomic sequences to their fixed location, and sanity check that | 803 | /* Copy atomic sequences to their fixed location, and sanity check that |
725 | these locations are the ones that we advertise to userspace. */ | 804 | these locations are the ones that we advertise to userspace. */ |
726 | memcpy((void *)FIXED_CODE_START, &fixed_code_start, | 805 | memcpy((void *)FIXED_CODE_START, &fixed_code_start, |
@@ -859,12 +938,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
859 | seq_printf(m, "processor\t: %d\n" | 938 | seq_printf(m, "processor\t: %d\n" |
860 | "vendor_id\t: %s\n" | 939 | "vendor_id\t: %s\n" |
861 | "cpu family\t: 0x%x\n" | 940 | "cpu family\t: 0x%x\n" |
862 | "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK)\n" | 941 | "model name\t: ADSP-%s %lu(MHz CCLK) %lu(MHz SCLK) (%s)\n" |
863 | "stepping\t: %d\n", | 942 | "stepping\t: %d\n", |
864 | 0, | 943 | 0, |
865 | vendor, | 944 | vendor, |
866 | (bfin_read_CHIPID() & CHIPID_FAMILY), | 945 | (bfin_read_CHIPID() & CHIPID_FAMILY), |
867 | cpu, cclk/1000000, sclk/1000000, | 946 | cpu, cclk/1000000, sclk/1000000, |
947 | #ifdef CONFIG_MPU | ||
948 | "mpu on", | ||
949 | #else | ||
950 | "mpu off", | ||
951 | #endif | ||
868 | revid); | 952 | revid); |
869 | 953 | ||
870 | seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", | 954 | seq_printf(m, "cpu MHz\t\t: %lu.%03lu/%lu.%03lu\n", |
@@ -973,7 +1057,6 @@ static int show_cpuinfo(struct seq_file *m, void *v) | |||
973 | seq_printf(m, "No Ways are locked\n"); | 1057 | seq_printf(m, "No Ways are locked\n"); |
974 | } | 1058 | } |
975 | #endif | 1059 | #endif |
976 | |||
977 | seq_printf(m, "board name\t: %s\n", bfin_board_name); | 1060 | seq_printf(m, "board name\t: %s\n", bfin_board_name); |
978 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", | 1061 | seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", |
979 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); | 1062 | physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); |
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index 5564c9588aa8..d1fa24401dc6 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c | |||
@@ -38,6 +38,7 @@ | |||
38 | 38 | ||
39 | #include <asm/cacheflush.h> | 39 | #include <asm/cacheflush.h> |
40 | #include <asm/ucontext.h> | 40 | #include <asm/ucontext.h> |
41 | #include <asm/fixed_code.h> | ||
41 | 42 | ||
42 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) | 43 | #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) |
43 | 44 | ||
@@ -50,18 +51,20 @@ struct rt_sigframe { | |||
50 | int sig; | 51 | int sig; |
51 | struct siginfo *pinfo; | 52 | struct siginfo *pinfo; |
52 | void *puc; | 53 | void *puc; |
54 | /* This is no longer needed by the kernel, but unfortunately userspace | ||
55 | * code expects it to be there. */ | ||
53 | char retcode[8]; | 56 | char retcode[8]; |
54 | struct siginfo info; | 57 | struct siginfo info; |
55 | struct ucontext uc; | 58 | struct ucontext uc; |
56 | }; | 59 | }; |
57 | 60 | ||
58 | asmlinkage int sys_sigaltstack(const stack_t * uss, stack_t * uoss) | 61 | asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) |
59 | { | 62 | { |
60 | return do_sigaltstack(uss, uoss, rdusp()); | 63 | return do_sigaltstack(uss, uoss, rdusp()); |
61 | } | 64 | } |
62 | 65 | ||
63 | static inline int | 66 | static inline int |
64 | rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext *sc, int *pr0) | 67 | rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *pr0) |
65 | { | 68 | { |
66 | unsigned long usp = 0; | 69 | unsigned long usp = 0; |
67 | int err = 0; | 70 | int err = 0; |
@@ -159,11 +162,6 @@ static inline int rt_setup_sigcontext(struct sigcontext *sc, struct pt_regs *reg | |||
159 | return err; | 162 | return err; |
160 | } | 163 | } |
161 | 164 | ||
162 | static inline void push_cache(unsigned long vaddr, unsigned int len) | ||
163 | { | ||
164 | flush_icache_range(vaddr, vaddr + len); | ||
165 | } | ||
166 | |||
167 | static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 165 | static inline void *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, |
168 | size_t frame_size) | 166 | size_t frame_size) |
169 | { | 167 | { |
@@ -209,19 +207,9 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
209 | err |= rt_setup_sigcontext(&frame->uc.uc_mcontext, regs); | 207 | err |= rt_setup_sigcontext(&frame->uc.uc_mcontext, regs); |
210 | err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); | 208 | err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); |
211 | 209 | ||
212 | /* Set up to return from userspace. */ | ||
213 | err |= __put_user(0x28, &(frame->retcode[0])); | ||
214 | err |= __put_user(0xe1, &(frame->retcode[1])); | ||
215 | err |= __put_user(0xad, &(frame->retcode[2])); | ||
216 | err |= __put_user(0x00, &(frame->retcode[3])); | ||
217 | err |= __put_user(0xa0, &(frame->retcode[4])); | ||
218 | err |= __put_user(0x00, &(frame->retcode[5])); | ||
219 | |||
220 | if (err) | 210 | if (err) |
221 | goto give_sigsegv; | 211 | goto give_sigsegv; |
222 | 212 | ||
223 | push_cache((unsigned long)&frame->retcode, sizeof(frame->retcode)); | ||
224 | |||
225 | /* Set up registers for signal handler */ | 213 | /* Set up registers for signal handler */ |
226 | wrusp((unsigned long)frame); | 214 | wrusp((unsigned long)frame); |
227 | if (get_personality & FDPIC_FUNCPTRS) { | 215 | if (get_personality & FDPIC_FUNCPTRS) { |
@@ -231,7 +219,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
231 | __get_user(regs->p3, &funcptr->GOT); | 219 | __get_user(regs->p3, &funcptr->GOT); |
232 | } else | 220 | } else |
233 | regs->pc = (unsigned long)ka->sa.sa_handler; | 221 | regs->pc = (unsigned long)ka->sa.sa_handler; |
234 | regs->rets = (unsigned long)(frame->retcode); | 222 | regs->rets = SIGRETURN_STUB; |
235 | 223 | ||
236 | regs->r0 = frame->sig; | 224 | regs->r0 = frame->sig; |
237 | regs->r1 = (unsigned long)(&frame->info); | 225 | regs->r1 = (unsigned long)(&frame->info); |
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c index abcd14817d0e..efb7b25a2633 100644 --- a/arch/blackfin/kernel/sys_bfin.c +++ b/arch/blackfin/kernel/sys_bfin.c | |||
@@ -49,7 +49,7 @@ | |||
49 | * sys_pipe() is the normal C calling standard for creating | 49 | * sys_pipe() is the normal C calling standard for creating |
50 | * a pipe. It's not the way unix traditionally does this, though. | 50 | * a pipe. It's not the way unix traditionally does this, though. |
51 | */ | 51 | */ |
52 | asmlinkage int sys_pipe(unsigned long *fildes) | 52 | asmlinkage int sys_pipe(unsigned long __user *fildes) |
53 | { | 53 | { |
54 | int fd[2]; | 54 | int fd[2]; |
55 | int error; | 55 | int error; |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c new file mode 100644 index 000000000000..4482c47c09e5 --- /dev/null +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -0,0 +1,219 @@ | |||
1 | /* | ||
2 | * linux/arch/kernel/time-ts.c | ||
3 | * | ||
4 | * Based on arm clockevents implementation and old bfin time tick. | ||
5 | * | ||
6 | * Copyright(C) 2008, GeoTechnologies, Vitja Makarov | ||
7 | * | ||
8 | * This code is licenced under the GPL version 2. For details see | ||
9 | * kernel-base/COPYING. | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/profile.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/time.h> | ||
15 | #include <linux/timex.h> | ||
16 | #include <linux/irq.h> | ||
17 | #include <linux/clocksource.h> | ||
18 | #include <linux/clockchips.h> | ||
19 | #include <linux/cpufreq.h> | ||
20 | |||
21 | #include <asm/blackfin.h> | ||
22 | #include <asm/time.h> | ||
23 | |||
24 | #ifdef CONFIG_CYCLES_CLOCKSOURCE | ||
25 | |||
26 | /* Accelerators for sched_clock() | ||
27 | * convert from cycles(64bits) => nanoseconds (64bits) | ||
28 | * basic equation: | ||
29 | * ns = cycles / (freq / ns_per_sec) | ||
30 | * ns = cycles * (ns_per_sec / freq) | ||
31 | * ns = cycles * (10^9 / (cpu_khz * 10^3)) | ||
32 | * ns = cycles * (10^6 / cpu_khz) | ||
33 | * | ||
34 | * Then we use scaling math (suggested by george@mvista.com) to get: | ||
35 | * ns = cycles * (10^6 * SC / cpu_khz) / SC | ||
36 | * ns = cycles * cyc2ns_scale / SC | ||
37 | * | ||
38 | * And since SC is a constant power of two, we can convert the div | ||
39 | * into a shift. | ||
40 | * | ||
41 | * We can use khz divisor instead of mhz to keep a better precision, since | ||
42 | * cyc2ns_scale is limited to 10^6 * 2^10, which fits in 32 bits. | ||
43 | * (mathieu.desnoyers@polymtl.ca) | ||
44 | * | ||
45 | * -johnstul@us.ibm.com "math is hard, lets go shopping!" | ||
46 | */ | ||
47 | |||
48 | static unsigned long cyc2ns_scale; | ||
49 | #define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ | ||
50 | |||
51 | static inline void set_cyc2ns_scale(unsigned long cpu_khz) | ||
52 | { | ||
53 | cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz; | ||
54 | } | ||
55 | |||
56 | static inline unsigned long long cycles_2_ns(cycle_t cyc) | ||
57 | { | ||
58 | return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR; | ||
59 | } | ||
60 | |||
61 | static cycle_t read_cycles(void) | ||
62 | { | ||
63 | return get_cycles(); | ||
64 | } | ||
65 | |||
66 | unsigned long long sched_clock(void) | ||
67 | { | ||
68 | return cycles_2_ns(read_cycles()); | ||
69 | } | ||
70 | |||
71 | static struct clocksource clocksource_bfin = { | ||
72 | .name = "bfin_cycles", | ||
73 | .rating = 350, | ||
74 | .read = read_cycles, | ||
75 | .mask = CLOCKSOURCE_MASK(64), | ||
76 | .shift = 22, | ||
77 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
78 | }; | ||
79 | |||
80 | static int __init bfin_clocksource_init(void) | ||
81 | { | ||
82 | set_cyc2ns_scale(get_cclk() / 1000); | ||
83 | |||
84 | clocksource_bfin.mult = clocksource_hz2mult(get_cclk(), clocksource_bfin.shift); | ||
85 | |||
86 | if (clocksource_register(&clocksource_bfin)) | ||
87 | panic("failed to register clocksource"); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | #else | ||
93 | # define bfin_clocksource_init() | ||
94 | #endif | ||
95 | |||
96 | static int bfin_timer_set_next_event(unsigned long cycles, | ||
97 | struct clock_event_device *evt) | ||
98 | { | ||
99 | bfin_write_TCOUNT(cycles); | ||
100 | CSYNC(); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static void bfin_timer_set_mode(enum clock_event_mode mode, | ||
105 | struct clock_event_device *evt) | ||
106 | { | ||
107 | switch (mode) { | ||
108 | case CLOCK_EVT_MODE_PERIODIC: { | ||
109 | unsigned long tcount = ((get_cclk() / (HZ * TIME_SCALE)) - 1); | ||
110 | bfin_write_TCNTL(TMPWR); | ||
111 | bfin_write_TSCALE(TIME_SCALE - 1); | ||
112 | CSYNC(); | ||
113 | bfin_write_TPERIOD(tcount); | ||
114 | bfin_write_TCOUNT(tcount); | ||
115 | bfin_write_TCNTL(TMPWR | TMREN | TAUTORLD); | ||
116 | CSYNC(); | ||
117 | break; | ||
118 | } | ||
119 | case CLOCK_EVT_MODE_ONESHOT: | ||
120 | bfin_write_TSCALE(0); | ||
121 | bfin_write_TCOUNT(0); | ||
122 | bfin_write_TCNTL(TMPWR | TMREN); | ||
123 | CSYNC(); | ||
124 | break; | ||
125 | case CLOCK_EVT_MODE_UNUSED: | ||
126 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
127 | bfin_write_TCNTL(0); | ||
128 | CSYNC(); | ||
129 | break; | ||
130 | case CLOCK_EVT_MODE_RESUME: | ||
131 | break; | ||
132 | } | ||
133 | } | ||
134 | |||
135 | static void __init bfin_timer_init(void) | ||
136 | { | ||
137 | /* power up the timer, but don't enable it just yet */ | ||
138 | bfin_write_TCNTL(TMPWR); | ||
139 | CSYNC(); | ||
140 | |||
141 | /* | ||
142 | * the TSCALE prescaler counter. | ||
143 | */ | ||
144 | bfin_write_TSCALE(TIME_SCALE - 1); | ||
145 | bfin_write_TPERIOD(0); | ||
146 | bfin_write_TCOUNT(0); | ||
147 | |||
148 | /* now enable the timer */ | ||
149 | CSYNC(); | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * timer_interrupt() needs to keep up the real-time clock, | ||
154 | * as well as call the "do_timer()" routine every clocktick | ||
155 | */ | ||
156 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 | ||
157 | __attribute__((l1_text)) | ||
158 | #endif | ||
159 | irqreturn_t timer_interrupt(int irq, void *dev_id); | ||
160 | |||
161 | static struct clock_event_device clockevent_bfin = { | ||
162 | .name = "bfin_core_timer", | ||
163 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | ||
164 | .shift = 32, | ||
165 | .cpumask = CPU_MASK_CPU0, | ||
166 | .set_next_event = bfin_timer_set_next_event, | ||
167 | .set_mode = bfin_timer_set_mode, | ||
168 | }; | ||
169 | |||
170 | static struct irqaction bfin_timer_irq = { | ||
171 | .name = "Blackfin Core Timer", | ||
172 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, | ||
173 | .handler = timer_interrupt, | ||
174 | .dev_id = &clockevent_bfin, | ||
175 | }; | ||
176 | |||
177 | irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
178 | { | ||
179 | struct clock_event_device *evt = dev_id; | ||
180 | evt->event_handler(evt); | ||
181 | return IRQ_HANDLED; | ||
182 | } | ||
183 | |||
184 | static int __init bfin_clockevent_init(void) | ||
185 | { | ||
186 | setup_irq(IRQ_CORETMR, &bfin_timer_irq); | ||
187 | bfin_timer_init(); | ||
188 | |||
189 | clockevent_bfin.mult = div_sc(get_cclk(), NSEC_PER_SEC, clockevent_bfin.shift); | ||
190 | clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); | ||
191 | clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); | ||
192 | clockevents_register_device(&clockevent_bfin); | ||
193 | |||
194 | return 0; | ||
195 | } | ||
196 | |||
197 | void __init time_init(void) | ||
198 | { | ||
199 | time_t secs_since_1970 = (365 * 37 + 9) * 24 * 60 * 60; /* 1 Jan 2007 */ | ||
200 | |||
201 | #ifdef CONFIG_RTC_DRV_BFIN | ||
202 | /* [#2663] hack to filter junk RTC values that would cause | ||
203 | * userspace to have to deal with time values greater than | ||
204 | * 2^31 seconds (which uClibc cannot cope with yet) | ||
205 | */ | ||
206 | if ((bfin_read_RTC_STAT() & 0xC0000000) == 0xC0000000) { | ||
207 | printk(KERN_NOTICE "bfin-rtc: invalid date; resetting\n"); | ||
208 | bfin_write_RTC_STAT(0); | ||
209 | } | ||
210 | #endif | ||
211 | |||
212 | /* Initialize xtime. From now on, xtime is updated with timer interrupts */ | ||
213 | xtime.tv_sec = secs_since_1970; | ||
214 | xtime.tv_nsec = 0; | ||
215 | set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); | ||
216 | |||
217 | bfin_clocksource_init(); | ||
218 | bfin_clockevent_init(); | ||
219 | } | ||
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index 715b3945e4c7..eb2352320454 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c | |||
@@ -6,9 +6,10 @@ | |||
6 | * Created: | 6 | * Created: |
7 | * Description: This file contains the bfin-specific time handling details. | 7 | * Description: This file contains the bfin-specific time handling details. |
8 | * Most of the stuff is located in the machine specific files. | 8 | * Most of the stuff is located in the machine specific files. |
9 | * FIXME: (This file is subject for removal) | ||
9 | * | 10 | * |
10 | * Modified: | 11 | * Modified: |
11 | * Copyright 2004-2006 Analog Devices Inc. | 12 | * Copyright 2004-2008 Analog Devices Inc. |
12 | * | 13 | * |
13 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | 14 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ |
14 | * | 15 | * |
@@ -35,6 +36,7 @@ | |||
35 | #include <linux/irq.h> | 36 | #include <linux/irq.h> |
36 | 37 | ||
37 | #include <asm/blackfin.h> | 38 | #include <asm/blackfin.h> |
39 | #include <asm/time.h> | ||
38 | 40 | ||
39 | /* This is an NTP setting */ | 41 | /* This is an NTP setting */ |
40 | #define TICK_SIZE (tick_nsec / 1000) | 42 | #define TICK_SIZE (tick_nsec / 1000) |
@@ -47,21 +49,6 @@ static struct irqaction bfin_timer_irq = { | |||
47 | .flags = IRQF_DISABLED | 49 | .flags = IRQF_DISABLED |
48 | }; | 50 | }; |
49 | 51 | ||
50 | /* | ||
51 | * The way that the Blackfin core timer works is: | ||
52 | * - CCLK is divided by a programmable 8-bit pre-scaler (TSCALE) | ||
53 | * - Every time TSCALE ticks, a 32bit is counted down (TCOUNT) | ||
54 | * | ||
55 | * If you take the fastest clock (1ns, or 1GHz to make the math work easier) | ||
56 | * 10ms is 10,000,000 clock ticks, which fits easy into a 32-bit counter | ||
57 | * (32 bit counter is 4,294,967,296ns or 4.2 seconds) so, we don't need | ||
58 | * to use TSCALE, and program it to zero (which is pass CCLK through). | ||
59 | * If you feel like using it, try to keep HZ * TIMESCALE to some | ||
60 | * value that divides easy (like power of 2). | ||
61 | */ | ||
62 | |||
63 | #define TIME_SCALE 1 | ||
64 | |||
65 | static void | 52 | static void |
66 | time_sched_init(irq_handler_t timer_routine) | 53 | time_sched_init(irq_handler_t timer_routine) |
67 | { | 54 | { |
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 56a67ab698c7..5b847070dae5 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -67,6 +67,8 @@ void __init trap_init(void) | |||
67 | CSYNC(); | 67 | CSYNC(); |
68 | } | 68 | } |
69 | 69 | ||
70 | void *saved_icplb_fault_addr, *saved_dcplb_fault_addr; | ||
71 | |||
70 | int kstack_depth_to_print = 48; | 72 | int kstack_depth_to_print = 48; |
71 | 73 | ||
72 | static void decode_address(char *buf, unsigned long address) | 74 | static void decode_address(char *buf, unsigned long address) |
@@ -75,7 +77,7 @@ static void decode_address(char *buf, unsigned long address) | |||
75 | struct task_struct *p; | 77 | struct task_struct *p; |
76 | struct mm_struct *mm; | 78 | struct mm_struct *mm; |
77 | unsigned long flags, offset; | 79 | unsigned long flags, offset; |
78 | unsigned int in_exception = bfin_read_IPEND() & 0x10; | 80 | unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic(); |
79 | 81 | ||
80 | #ifdef CONFIG_KALLSYMS | 82 | #ifdef CONFIG_KALLSYMS |
81 | unsigned long symsize; | 83 | unsigned long symsize; |
@@ -117,7 +119,7 @@ static void decode_address(char *buf, unsigned long address) | |||
117 | */ | 119 | */ |
118 | write_lock_irqsave(&tasklist_lock, flags); | 120 | write_lock_irqsave(&tasklist_lock, flags); |
119 | for_each_process(p) { | 121 | for_each_process(p) { |
120 | mm = (in_exception ? p->mm : get_task_mm(p)); | 122 | mm = (in_atomic ? p->mm : get_task_mm(p)); |
121 | if (!mm) | 123 | if (!mm) |
122 | continue; | 124 | continue; |
123 | 125 | ||
@@ -137,23 +139,36 @@ static void decode_address(char *buf, unsigned long address) | |||
137 | /* FLAT does not have its text aligned to the start of | 139 | /* FLAT does not have its text aligned to the start of |
138 | * the map while FDPIC ELF does ... | 140 | * the map while FDPIC ELF does ... |
139 | */ | 141 | */ |
140 | if (current->mm && | 142 | |
141 | (address > current->mm->start_code) && | 143 | /* before we can check flat/fdpic, we need to |
142 | (address < current->mm->end_code)) | 144 | * make sure current is valid |
143 | offset = address - current->mm->start_code; | 145 | */ |
144 | else | 146 | if ((unsigned long)current >= FIXED_CODE_START && |
145 | offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT); | 147 | !((unsigned long)current & 0x3)) { |
146 | 148 | if (current->mm && | |
147 | sprintf(buf, "<0x%p> [ %s + 0x%lx ]", | 149 | (address > current->mm->start_code) && |
148 | (void *)address, name, offset); | 150 | (address < current->mm->end_code)) |
149 | if (!in_exception) | 151 | offset = address - current->mm->start_code; |
152 | else | ||
153 | offset = (address - vma->vm_start) + | ||
154 | (vma->vm_pgoff << PAGE_SHIFT); | ||
155 | |||
156 | sprintf(buf, "<0x%p> [ %s + 0x%lx ]", | ||
157 | (void *)address, name, offset); | ||
158 | } else | ||
159 | sprintf(buf, "<0x%p> [ %s vma:0x%lx-0x%lx]", | ||
160 | (void *)address, name, | ||
161 | vma->vm_start, vma->vm_end); | ||
162 | |||
163 | if (!in_atomic) | ||
150 | mmput(mm); | 164 | mmput(mm); |
165 | |||
151 | goto done; | 166 | goto done; |
152 | } | 167 | } |
153 | 168 | ||
154 | vml = vml->next; | 169 | vml = vml->next; |
155 | } | 170 | } |
156 | if (!in_exception) | 171 | if (!in_atomic) |
157 | mmput(mm); | 172 | mmput(mm); |
158 | } | 173 | } |
159 | 174 | ||
@@ -506,7 +521,7 @@ asmlinkage void trap_c(struct pt_regs *fp) | |||
506 | 521 | ||
507 | info.si_signo = sig; | 522 | info.si_signo = sig; |
508 | info.si_errno = 0; | 523 | info.si_errno = 0; |
509 | info.si_addr = (void *)fp->pc; | 524 | info.si_addr = (void __user *)fp->pc; |
510 | force_sig_info(sig, &info, current); | 525 | force_sig_info(sig, &info, current); |
511 | 526 | ||
512 | trace_buffer_restore(j); | 527 | trace_buffer_restore(j); |
@@ -655,21 +670,31 @@ void dump_bfin_process(struct pt_regs *fp) | |||
655 | else if (context & 0x8000) | 670 | else if (context & 0x8000) |
656 | printk(KERN_NOTICE "Kernel process context\n"); | 671 | printk(KERN_NOTICE "Kernel process context\n"); |
657 | 672 | ||
658 | if (current->pid && current->mm) { | 673 | /* Because we are crashing, and pointers could be bad, we check things |
674 | * pretty closely before we use them | ||
675 | */ | ||
676 | if ((unsigned long)current >= FIXED_CODE_START && | ||
677 | !((unsigned long)current & 0x3) && current->pid) { | ||
659 | printk(KERN_NOTICE "CURRENT PROCESS:\n"); | 678 | printk(KERN_NOTICE "CURRENT PROCESS:\n"); |
660 | printk(KERN_NOTICE "COMM=%s PID=%d\n", | 679 | if (current->comm >= (char *)FIXED_CODE_START) |
661 | current->comm, current->pid); | 680 | printk(KERN_NOTICE "COMM=%s PID=%d\n", |
662 | 681 | current->comm, current->pid); | |
663 | printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" | 682 | else |
664 | KERN_NOTICE "BSS = 0x%p-0x%p USER-STACK = 0x%p\n" | 683 | printk(KERN_NOTICE "COMM= invalid\n"); |
665 | KERN_NOTICE "\n", | 684 | |
666 | (void *)current->mm->start_code, | 685 | if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) |
667 | (void *)current->mm->end_code, | 686 | printk(KERN_NOTICE "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" |
668 | (void *)current->mm->start_data, | 687 | KERN_NOTICE " BSS = 0x%p-0x%p USER-STACK = 0x%p\n" |
669 | (void *)current->mm->end_data, | 688 | KERN_NOTICE "\n", |
670 | (void *)current->mm->end_data, | 689 | (void *)current->mm->start_code, |
671 | (void *)current->mm->brk, | 690 | (void *)current->mm->end_code, |
672 | (void *)current->mm->start_stack); | 691 | (void *)current->mm->start_data, |
692 | (void *)current->mm->end_data, | ||
693 | (void *)current->mm->end_data, | ||
694 | (void *)current->mm->brk, | ||
695 | (void *)current->mm->start_stack); | ||
696 | else | ||
697 | printk(KERN_NOTICE "invalid mm\n"); | ||
673 | } else | 698 | } else |
674 | printk(KERN_NOTICE "\n" KERN_NOTICE | 699 | printk(KERN_NOTICE "\n" KERN_NOTICE |
675 | "No Valid process in current context\n"); | 700 | "No Valid process in current context\n"); |
@@ -680,10 +705,7 @@ void dump_bfin_mem(struct pt_regs *fp) | |||
680 | unsigned short *addr, *erraddr, val = 0, err = 0; | 705 | unsigned short *addr, *erraddr, val = 0, err = 0; |
681 | char sti = 0, buf[6]; | 706 | char sti = 0, buf[6]; |
682 | 707 | ||
683 | if (unlikely((fp->seqstat & SEQSTAT_EXCAUSE) == VEC_HWERR)) | 708 | erraddr = (void *)fp->pc; |
684 | erraddr = (void *)fp->pc; | ||
685 | else | ||
686 | erraddr = (void *)fp->retx; | ||
687 | 709 | ||
688 | printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); | 710 | printk(KERN_NOTICE "return address: [0x%p]; contents of:", erraddr); |
689 | 711 | ||
@@ -807,9 +829,9 @@ unlock: | |||
807 | 829 | ||
808 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && | 830 | if (((long)fp->seqstat & SEQSTAT_EXCAUSE) && |
809 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { | 831 | (((long)fp->seqstat & SEQSTAT_EXCAUSE) != VEC_HWERR)) { |
810 | decode_address(buf, bfin_read_DCPLB_FAULT_ADDR()); | 832 | decode_address(buf, saved_dcplb_fault_addr); |
811 | printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); | 833 | printk(KERN_NOTICE "DCPLB_FAULT_ADDR: %s\n", buf); |
812 | decode_address(buf, bfin_read_ICPLB_FAULT_ADDR()); | 834 | decode_address(buf, saved_icplb_fault_addr); |
813 | printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); | 835 | printk(KERN_NOTICE "ICPLB_FAULT_ADDR: %s\n", buf); |
814 | } | 836 | } |
815 | 837 | ||
@@ -917,8 +939,8 @@ void panic_cplb_error(int cplb_panic, struct pt_regs *fp) | |||
917 | 939 | ||
918 | oops_in_progress = 1; | 940 | oops_in_progress = 1; |
919 | 941 | ||
920 | printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", (void *)bfin_read_DCPLB_FAULT_ADDR()); | 942 | printk(KERN_EMERG "DCPLB_FAULT_ADDR=%p\n", saved_dcplb_fault_addr); |
921 | printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", (void *)bfin_read_ICPLB_FAULT_ADDR()); | 943 | printk(KERN_EMERG "ICPLB_FAULT_ADDR=%p\n", saved_icplb_fault_addr); |
922 | dump_bfin_process(fp); | 944 | dump_bfin_process(fp); |
923 | dump_bfin_mem(fp); | 945 | dump_bfin_mem(fp); |
924 | show_regs(fp); | 946 | show_regs(fp); |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index cb01a9de2680..3ecc64cab3be 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -56,6 +56,10 @@ SECTIONS | |||
56 | *(.text.*) | 56 | *(.text.*) |
57 | *(.fixup) | 57 | *(.fixup) |
58 | 58 | ||
59 | #if !L1_CODE_LENGTH | ||
60 | *(.l1.text) | ||
61 | #endif | ||
62 | |||
59 | . = ALIGN(16); | 63 | . = ALIGN(16); |
60 | ___start___ex_table = .; | 64 | ___start___ex_table = .; |
61 | *(__ex_table) | 65 | *(__ex_table) |
@@ -73,6 +77,12 @@ SECTIONS | |||
73 | ___bss_start = .; | 77 | ___bss_start = .; |
74 | *(.bss .bss.*) | 78 | *(.bss .bss.*) |
75 | *(COMMON) | 79 | *(COMMON) |
80 | #if !L1_DATA_A_LENGTH | ||
81 | *(.l1.bss) | ||
82 | #endif | ||
83 | #if !L1_DATA_B_LENGTH | ||
84 | *(.l1.bss.B) | ||
85 | #endif | ||
76 | ___bss_stop = .; | 86 | ___bss_stop = .; |
77 | } | 87 | } |
78 | 88 | ||
@@ -83,6 +93,15 @@ SECTIONS | |||
83 | . = ALIGN(32); | 93 | . = ALIGN(32); |
84 | *(.data.cacheline_aligned) | 94 | *(.data.cacheline_aligned) |
85 | 95 | ||
96 | #if !L1_DATA_A_LENGTH | ||
97 | . = ALIGN(32); | ||
98 | *(.data_l1.cacheline_aligned) | ||
99 | *(.l1.data) | ||
100 | #endif | ||
101 | #if !L1_DATA_B_LENGTH | ||
102 | *(.l1.data.B) | ||
103 | #endif | ||
104 | |||
86 | DATA_DATA | 105 | DATA_DATA |
87 | *(.data.*) | 106 | *(.data.*) |
88 | CONSTRUCTORS | 107 | CONSTRUCTORS |
@@ -147,64 +166,43 @@ SECTIONS | |||
147 | 166 | ||
148 | __l1_lma_start = .; | 167 | __l1_lma_start = .; |
149 | 168 | ||
150 | #if L1_CODE_LENGTH | ||
151 | # define LDS_L1_CODE *(.l1.text) | ||
152 | #else | ||
153 | # define LDS_L1_CODE | ||
154 | #endif | ||
155 | .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs)) | 169 | .text_l1 L1_CODE_START : AT(LOADADDR(.init.ramfs) + SIZEOF(.init.ramfs)) |
156 | { | 170 | { |
157 | . = ALIGN(4); | 171 | . = ALIGN(4); |
158 | __stext_l1 = .; | 172 | __stext_l1 = .; |
159 | LDS_L1_CODE | 173 | *(.l1.text) |
160 | . = ALIGN(4); | 174 | . = ALIGN(4); |
161 | __etext_l1 = .; | 175 | __etext_l1 = .; |
162 | } | 176 | } |
163 | 177 | ||
164 | #if L1_DATA_A_LENGTH | ||
165 | # define LDS_L1_A_DATA *(.l1.data) | ||
166 | # define LDS_L1_A_BSS *(.l1.bss) | ||
167 | # define LDS_L1_A_CACHE *(.data_l1.cacheline_aligned) | ||
168 | #else | ||
169 | # define LDS_L1_A_DATA | ||
170 | # define LDS_L1_A_BSS | ||
171 | # define LDS_L1_A_CACHE | ||
172 | #endif | ||
173 | .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) | 178 | .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) |
174 | { | 179 | { |
175 | . = ALIGN(4); | 180 | . = ALIGN(4); |
176 | __sdata_l1 = .; | 181 | __sdata_l1 = .; |
177 | LDS_L1_A_DATA | 182 | *(.l1.data) |
178 | __edata_l1 = .; | 183 | __edata_l1 = .; |
179 | 184 | ||
180 | . = ALIGN(4); | 185 | . = ALIGN(4); |
181 | __sbss_l1 = .; | 186 | __sbss_l1 = .; |
182 | LDS_L1_A_BSS | 187 | *(.l1.bss) |
183 | 188 | ||
184 | . = ALIGN(32); | 189 | . = ALIGN(32); |
185 | LDS_L1_A_CACHE | 190 | *(.data_l1.cacheline_aligned) |
186 | 191 | ||
187 | . = ALIGN(4); | 192 | . = ALIGN(4); |
188 | __ebss_l1 = .; | 193 | __ebss_l1 = .; |
189 | } | 194 | } |
190 | 195 | ||
191 | #if L1_DATA_B_LENGTH | ||
192 | # define LDS_L1_B_DATA *(.l1.data.B) | ||
193 | # define LDS_L1_B_BSS *(.l1.bss.B) | ||
194 | #else | ||
195 | # define LDS_L1_B_DATA | ||
196 | # define LDS_L1_B_BSS | ||
197 | #endif | ||
198 | .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) | 196 | .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) |
199 | { | 197 | { |
200 | . = ALIGN(4); | 198 | . = ALIGN(4); |
201 | __sdata_b_l1 = .; | 199 | __sdata_b_l1 = .; |
202 | LDS_L1_B_DATA | 200 | *(.l1.data.B) |
203 | __edata_b_l1 = .; | 201 | __edata_b_l1 = .; |
204 | 202 | ||
205 | . = ALIGN(4); | 203 | . = ALIGN(4); |
206 | __sbss_b_l1 = .; | 204 | __sbss_b_l1 = .; |
207 | LDS_L1_B_BSS | 205 | *(.l1.bss.B) |
208 | 206 | ||
209 | . = ALIGN(4); | 207 | . = ALIGN(4); |
210 | __ebss_b_l1 = .; | 208 | __ebss_b_l1 = .; |
@@ -223,8 +221,6 @@ SECTIONS | |||
223 | 221 | ||
224 | DWARF_DEBUG | 222 | DWARF_DEBUG |
225 | 223 | ||
226 | NOTES | ||
227 | |||
228 | /DISCARD/ : | 224 | /DISCARD/ : |
229 | { | 225 | { |
230 | EXIT_TEXT | 226 | EXIT_TEXT |