aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/blackfin/kernel
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/Makefile1
-rw-r--r--arch/blackfin/kernel/bfin_dma_5xx.c62
-rw-r--r--arch/blackfin/kernel/bfin_gpio.c119
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbinit.c11
-rw-r--r--arch/blackfin/kernel/cplb-mpu/cplbmgr.c55
-rw-r--r--arch/blackfin/kernel/cplb-nompu/cplbinit.c40
-rw-r--r--arch/blackfin/kernel/dma-mapping.c68
-rw-r--r--arch/blackfin/kernel/entry.S8
-rw-r--r--arch/blackfin/kernel/ftrace-entry.S23
-rw-r--r--arch/blackfin/kernel/ftrace.c6
-rw-r--r--arch/blackfin/kernel/gptimers.c32
-rw-r--r--arch/blackfin/kernel/init_task.c2
-rw-r--r--arch/blackfin/kernel/ipipe.c68
-rw-r--r--arch/blackfin/kernel/irqchip.c6
-rw-r--r--arch/blackfin/kernel/kgdb.c234
-rw-r--r--arch/blackfin/kernel/kgdb_test.c67
-rw-r--r--arch/blackfin/kernel/nmi.c299
-rw-r--r--arch/blackfin/kernel/process.c109
-rw-r--r--arch/blackfin/kernel/ptrace.c373
-rw-r--r--arch/blackfin/kernel/setup.c80
-rw-r--r--arch/blackfin/kernel/signal.c42
-rw-r--r--arch/blackfin/kernel/sys_bfin.c33
-rw-r--r--arch/blackfin/kernel/time-ts.c242
-rw-r--r--arch/blackfin/kernel/time.c8
-rw-r--r--arch/blackfin/kernel/traps.c83
-rw-r--r--arch/blackfin/kernel/vmlinux.lds.S100
26 files changed, 1290 insertions, 881 deletions
diff --git a/arch/blackfin/kernel/Makefile b/arch/blackfin/kernel/Makefile
index a8ddbc8ed5af..346a421f1562 100644
--- a/arch/blackfin/kernel/Makefile
+++ b/arch/blackfin/kernel/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_CPLB_INFO) += cplbinfo.o
25obj-$(CONFIG_MODULES) += module.o 25obj-$(CONFIG_MODULES) += module.o
26obj-$(CONFIG_KGDB) += kgdb.o 26obj-$(CONFIG_KGDB) += kgdb.o
27obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o 27obj-$(CONFIG_KGDB_TESTS) += kgdb_test.o
28obj-$(CONFIG_NMI_WATCHDOG) += nmi.o
28obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 29obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
29obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o 30obj-$(CONFIG_EARLY_PRINTK) += shadow_console.o
30obj-$(CONFIG_STACKTRACE) += stacktrace.o 31obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c
index 3946aff4f414..26403d1c9e65 100644
--- a/arch/blackfin/kernel/bfin_dma_5xx.c
+++ b/arch/blackfin/kernel/bfin_dma_5xx.c
@@ -37,9 +37,8 @@ static int __init blackfin_dma_init(void)
37 printk(KERN_INFO "Blackfin DMA Controller\n"); 37 printk(KERN_INFO "Blackfin DMA Controller\n");
38 38
39 for (i = 0; i < MAX_DMA_CHANNELS; i++) { 39 for (i = 0; i < MAX_DMA_CHANNELS; i++) {
40 dma_ch[i].chan_status = DMA_CHANNEL_FREE; 40 atomic_set(&dma_ch[i].chan_status, 0);
41 dma_ch[i].regs = dma_io_base_addr[i]; 41 dma_ch[i].regs = dma_io_base_addr[i];
42 mutex_init(&(dma_ch[i].dmalock));
43 } 42 }
44 /* Mark MEMDMA Channel 0 as requested since we're using it internally */ 43 /* Mark MEMDMA Channel 0 as requested since we're using it internally */
45 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); 44 request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy");
@@ -60,7 +59,7 @@ static int proc_dma_show(struct seq_file *m, void *v)
60 int i; 59 int i;
61 60
62 for (i = 0; i < MAX_DMA_CHANNELS; ++i) 61 for (i = 0; i < MAX_DMA_CHANNELS; ++i)
63 if (dma_ch[i].chan_status != DMA_CHANNEL_FREE) 62 if (dma_channel_active(i))
64 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); 63 seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id);
65 64
66 return 0; 65 return 0;
@@ -92,7 +91,7 @@ late_initcall(proc_dma_init);
92 */ 91 */
93int request_dma(unsigned int channel, const char *device_id) 92int request_dma(unsigned int channel, const char *device_id)
94{ 93{
95 pr_debug("request_dma() : BEGIN \n"); 94 pr_debug("request_dma() : BEGIN\n");
96 95
97 if (device_id == NULL) 96 if (device_id == NULL)
98 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); 97 printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel);
@@ -107,20 +106,11 @@ int request_dma(unsigned int channel, const char *device_id)
107 } 106 }
108#endif 107#endif
109 108
110 mutex_lock(&(dma_ch[channel].dmalock)); 109 if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) {
111 110 pr_debug("DMA CHANNEL IN USE\n");
112 if ((dma_ch[channel].chan_status == DMA_CHANNEL_REQUESTED)
113 || (dma_ch[channel].chan_status == DMA_CHANNEL_ENABLED)) {
114 mutex_unlock(&(dma_ch[channel].dmalock));
115 pr_debug("DMA CHANNEL IN USE \n");
116 return -EBUSY; 111 return -EBUSY;
117 } else {
118 dma_ch[channel].chan_status = DMA_CHANNEL_REQUESTED;
119 pr_debug("DMA CHANNEL IS ALLOCATED \n");
120 } 112 }
121 113
122 mutex_unlock(&(dma_ch[channel].dmalock));
123
124#ifdef CONFIG_BF54x 114#ifdef CONFIG_BF54x
125 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { 115 if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) {
126 unsigned int per_map; 116 unsigned int per_map;
@@ -141,28 +131,27 @@ int request_dma(unsigned int channel, const char *device_id)
141 * you have to request DMA, before doing any operations on 131 * you have to request DMA, before doing any operations on
142 * descriptor/channel 132 * descriptor/channel
143 */ 133 */
144 pr_debug("request_dma() : END \n"); 134 pr_debug("request_dma() : END\n");
145 return 0; 135 return 0;
146} 136}
147EXPORT_SYMBOL(request_dma); 137EXPORT_SYMBOL(request_dma);
148 138
149int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) 139int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data)
150{ 140{
151 BUG_ON(channel >= MAX_DMA_CHANNELS || 141 int ret;
152 dma_ch[channel].chan_status == DMA_CHANNEL_FREE); 142 unsigned int irq;
153 143
154 if (callback != NULL) { 144 BUG_ON(channel >= MAX_DMA_CHANNELS || !callback ||
155 int ret; 145 !atomic_read(&dma_ch[channel].chan_status));
156 unsigned int irq = channel2irq(channel);
157 146
158 ret = request_irq(irq, callback, IRQF_DISABLED, 147 irq = channel2irq(channel);
159 dma_ch[channel].device_id, data); 148 ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data);
160 if (ret) 149 if (ret)
161 return ret; 150 return ret;
151
152 dma_ch[channel].irq = irq;
153 dma_ch[channel].data = data;
162 154
163 dma_ch[channel].irq = irq;
164 dma_ch[channel].data = data;
165 }
166 return 0; 155 return 0;
167} 156}
168EXPORT_SYMBOL(set_dma_callback); 157EXPORT_SYMBOL(set_dma_callback);
@@ -182,9 +171,9 @@ static void clear_dma_buffer(unsigned int channel)
182 171
183void free_dma(unsigned int channel) 172void free_dma(unsigned int channel)
184{ 173{
185 pr_debug("freedma() : BEGIN \n"); 174 pr_debug("freedma() : BEGIN\n");
186 BUG_ON(channel >= MAX_DMA_CHANNELS || 175 BUG_ON(channel >= MAX_DMA_CHANNELS ||
187 dma_ch[channel].chan_status == DMA_CHANNEL_FREE); 176 !atomic_read(&dma_ch[channel].chan_status));
188 177
189 /* Halt the DMA */ 178 /* Halt the DMA */
190 disable_dma(channel); 179 disable_dma(channel);
@@ -194,11 +183,9 @@ void free_dma(unsigned int channel)
194 free_irq(dma_ch[channel].irq, dma_ch[channel].data); 183 free_irq(dma_ch[channel].irq, dma_ch[channel].data);
195 184
196 /* Clear the DMA Variable in the Channel */ 185 /* Clear the DMA Variable in the Channel */
197 mutex_lock(&(dma_ch[channel].dmalock)); 186 atomic_set(&dma_ch[channel].chan_status, 0);
198 dma_ch[channel].chan_status = DMA_CHANNEL_FREE;
199 mutex_unlock(&(dma_ch[channel].dmalock));
200 187
201 pr_debug("freedma() : END \n"); 188 pr_debug("freedma() : END\n");
202} 189}
203EXPORT_SYMBOL(free_dma); 190EXPORT_SYMBOL(free_dma);
204 191
@@ -210,13 +197,14 @@ int blackfin_dma_suspend(void)
210{ 197{
211 int i; 198 int i;
212 199
213 for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i) { 200 for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
214 if (dma_ch[i].chan_status == DMA_CHANNEL_ENABLED) { 201 if (dma_ch[i].regs->cfg & DMAEN) {
215 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); 202 printk(KERN_ERR "DMA Channel %d failed to suspend\n", i);
216 return -EBUSY; 203 return -EBUSY;
217 } 204 }
218 205
219 dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map; 206 if (i < MAX_DMA_SUSPEND_CHANNELS)
207 dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map;
220 } 208 }
221 209
222 return 0; 210 return 0;
diff --git a/arch/blackfin/kernel/bfin_gpio.c b/arch/blackfin/kernel/bfin_gpio.c
index 22705eeff34f..e35e20f00d9b 100644
--- a/arch/blackfin/kernel/bfin_gpio.c
+++ b/arch/blackfin/kernel/bfin_gpio.c
@@ -100,6 +100,12 @@ u8 pmux_offset[][16] = {
100}; 100};
101# endif 101# endif
102 102
103#elif defined(BF538_FAMILY)
104static unsigned short * const port_fer[] = {
105 (unsigned short *) PORTCIO_FER,
106 (unsigned short *) PORTDIO_FER,
107 (unsigned short *) PORTEIO_FER,
108};
103#endif 109#endif
104 110
105static unsigned short reserved_gpio_map[GPIO_BANK_NUM]; 111static unsigned short reserved_gpio_map[GPIO_BANK_NUM];
@@ -163,6 +169,27 @@ static int cmp_label(unsigned short ident, const char *label)
163 169
164static void port_setup(unsigned gpio, unsigned short usage) 170static void port_setup(unsigned gpio, unsigned short usage)
165{ 171{
172#if defined(BF538_FAMILY)
173 /*
174 * BF538/9 Port C,D and E are special.
175 * Inverted PORT_FER polarity on CDE and no PORF_FER on F
176 * Regular PORT F GPIOs are handled here, CDE are exclusively
177 * managed by GPIOLIB
178 */
179
180 if (gpio < MAX_BLACKFIN_GPIOS || gpio >= MAX_RESOURCES)
181 return;
182
183 gpio -= MAX_BLACKFIN_GPIOS;
184
185 if (usage == GPIO_USAGE)
186 *port_fer[gpio_bank(gpio)] |= gpio_bit(gpio);
187 else
188 *port_fer[gpio_bank(gpio)] &= ~gpio_bit(gpio);
189 SSYNC();
190 return;
191#endif
192
166 if (check_gpio(gpio)) 193 if (check_gpio(gpio))
167 return; 194 return;
168 195
@@ -762,6 +789,8 @@ int peripheral_request(unsigned short per, const char *label)
762 if (!(per & P_DEFINED)) 789 if (!(per & P_DEFINED))
763 return -ENODEV; 790 return -ENODEV;
764 791
792 BUG_ON(ident >= MAX_RESOURCES);
793
765 local_irq_save_hw(flags); 794 local_irq_save_hw(flags);
766 795
767 /* If a pin can be muxed as either GPIO or peripheral, make 796 /* If a pin can be muxed as either GPIO or peripheral, make
@@ -979,6 +1008,76 @@ void bfin_gpio_free(unsigned gpio)
979} 1008}
980EXPORT_SYMBOL(bfin_gpio_free); 1009EXPORT_SYMBOL(bfin_gpio_free);
981 1010
1011#ifdef BFIN_SPECIAL_GPIO_BANKS
1012static unsigned short reserved_special_gpio_map[gpio_bank(MAX_RESOURCES)];
1013
1014int bfin_special_gpio_request(unsigned gpio, const char *label)
1015{
1016 unsigned long flags;
1017
1018 local_irq_save_hw(flags);
1019
1020 /*
1021 * Allow that the identical GPIO can
1022 * be requested from the same driver twice
1023 * Do nothing and return -
1024 */
1025
1026 if (cmp_label(gpio, label) == 0) {
1027 local_irq_restore_hw(flags);
1028 return 0;
1029 }
1030
1031 if (unlikely(reserved_special_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1032 local_irq_restore_hw(flags);
1033 printk(KERN_ERR "bfin-gpio: GPIO %d is already reserved by %s !\n",
1034 gpio, get_label(gpio));
1035
1036 return -EBUSY;
1037 }
1038 if (unlikely(reserved_peri_map[gpio_bank(gpio)] & gpio_bit(gpio))) {
1039 local_irq_restore_hw(flags);
1040 printk(KERN_ERR
1041 "bfin-gpio: GPIO %d is already reserved as Peripheral by %s !\n",
1042 gpio, get_label(gpio));
1043
1044 return -EBUSY;
1045 }
1046
1047 reserved_special_gpio_map[gpio_bank(gpio)] |= gpio_bit(gpio);
1048 reserved_peri_map[gpio_bank(gpio)] |= gpio_bit(gpio);
1049
1050 set_label(gpio, label);
1051 local_irq_restore_hw(flags);
1052 port_setup(gpio, GPIO_USAGE);
1053
1054 return 0;
1055}
1056EXPORT_SYMBOL(bfin_special_gpio_request);
1057
1058void bfin_special_gpio_free(unsigned gpio)
1059{
1060 unsigned long flags;
1061
1062 might_sleep();
1063
1064 local_irq_save_hw(flags);
1065
1066 if (unlikely(!(reserved_special_gpio_map[gpio_bank(gpio)] & gpio_bit(gpio)))) {
1067 gpio_error(gpio);
1068 local_irq_restore_hw(flags);
1069 return;
1070 }
1071
1072 reserved_special_gpio_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
1073 reserved_peri_map[gpio_bank(gpio)] &= ~gpio_bit(gpio);
1074 set_label(gpio, "free");
1075 local_irq_restore_hw(flags);
1076}
1077EXPORT_SYMBOL(bfin_special_gpio_free);
1078#endif
1079
1080
982int bfin_gpio_irq_request(unsigned gpio, const char *label) 1081int bfin_gpio_irq_request(unsigned gpio, const char *label)
983{ 1082{
984 unsigned long flags; 1083 unsigned long flags;
@@ -1190,44 +1289,50 @@ __initcall(gpio_register_proc);
1190#endif 1289#endif
1191 1290
1192#ifdef CONFIG_GPIOLIB 1291#ifdef CONFIG_GPIOLIB
1193int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio) 1292static int bfin_gpiolib_direction_input(struct gpio_chip *chip, unsigned gpio)
1194{ 1293{
1195 return bfin_gpio_direction_input(gpio); 1294 return bfin_gpio_direction_input(gpio);
1196} 1295}
1197 1296
1198int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level) 1297static int bfin_gpiolib_direction_output(struct gpio_chip *chip, unsigned gpio, int level)
1199{ 1298{
1200 return bfin_gpio_direction_output(gpio, level); 1299 return bfin_gpio_direction_output(gpio, level);
1201} 1300}
1202 1301
1203int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio) 1302static int bfin_gpiolib_get_value(struct gpio_chip *chip, unsigned gpio)
1204{ 1303{
1205 return bfin_gpio_get_value(gpio); 1304 return bfin_gpio_get_value(gpio);
1206} 1305}
1207 1306
1208void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value) 1307static void bfin_gpiolib_set_value(struct gpio_chip *chip, unsigned gpio, int value)
1209{ 1308{
1210 return bfin_gpio_set_value(gpio, value); 1309 return bfin_gpio_set_value(gpio, value);
1211} 1310}
1212 1311
1213int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio) 1312static int bfin_gpiolib_gpio_request(struct gpio_chip *chip, unsigned gpio)
1214{ 1313{
1215 return bfin_gpio_request(gpio, chip->label); 1314 return bfin_gpio_request(gpio, chip->label);
1216} 1315}
1217 1316
1218void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio) 1317static void bfin_gpiolib_gpio_free(struct gpio_chip *chip, unsigned gpio)
1219{ 1318{
1220 return bfin_gpio_free(gpio); 1319 return bfin_gpio_free(gpio);
1221} 1320}
1222 1321
1322static int bfin_gpiolib_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
1323{
1324 return gpio + GPIO_IRQ_BASE;
1325}
1326
1223static struct gpio_chip bfin_chip = { 1327static struct gpio_chip bfin_chip = {
1224 .label = "Blackfin-GPIOlib", 1328 .label = "BFIN-GPIO",
1225 .direction_input = bfin_gpiolib_direction_input, 1329 .direction_input = bfin_gpiolib_direction_input,
1226 .get = bfin_gpiolib_get_value, 1330 .get = bfin_gpiolib_get_value,
1227 .direction_output = bfin_gpiolib_direction_output, 1331 .direction_output = bfin_gpiolib_direction_output,
1228 .set = bfin_gpiolib_set_value, 1332 .set = bfin_gpiolib_set_value,
1229 .request = bfin_gpiolib_gpio_request, 1333 .request = bfin_gpiolib_gpio_request,
1230 .free = bfin_gpiolib_gpio_free, 1334 .free = bfin_gpiolib_gpio_free,
1335 .to_irq = bfin_gpiolib_gpio_to_irq,
1231 .base = 0, 1336 .base = 0,
1232 .ngpio = MAX_BLACKFIN_GPIOS, 1337 .ngpio = MAX_BLACKFIN_GPIOS,
1233}; 1338};
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbinit.c b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
index b52c1f8c4bc0..30fd6417f069 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbinit.c
@@ -64,6 +64,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
64 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0); 64 icplb_tbl[cpu][i_i++].data = i_data | (addr == 0 ? CPLB_USER_RD : 0);
65 } 65 }
66 66
67#ifdef CONFIG_ROMKERNEL
68 /* Cover kernel XIP flash area */
69 addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
70 dcplb_tbl[cpu][i_d].addr = addr;
71 dcplb_tbl[cpu][i_d++].data = d_data | CPLB_USER_RD;
72 icplb_tbl[cpu][i_i].addr = addr;
73 icplb_tbl[cpu][i_i++].data = i_data | CPLB_USER_RD;
74#endif
75
67 /* Cover L1 memory. One 4M area for code and data each is enough. */ 76 /* Cover L1 memory. One 4M area for code and data each is enough. */
68#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0 77#if L1_DATA_A_LENGTH > 0 || L1_DATA_B_LENGTH > 0
69 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu); 78 dcplb_tbl[cpu][i_d].addr = get_l1_data_a_start_cpu(cpu);
@@ -92,6 +101,6 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
92 icplb_tbl[cpu][i_i++].data = 0; 101 icplb_tbl[cpu][i_i++].data = 0;
93} 102}
94 103
95void generate_cplb_tables_all(void) 104void __init generate_cplb_tables_all(void)
96{ 105{
97} 106}
diff --git a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
index 69e0e530d70f..87b25b1b30ed 100644
--- a/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
+++ b/arch/blackfin/kernel/cplb-mpu/cplbmgr.c
@@ -31,6 +31,12 @@ int nr_dcplb_miss[NR_CPUS], nr_icplb_miss[NR_CPUS];
31int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS]; 31int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
32int nr_cplb_flush[NR_CPUS]; 32int nr_cplb_flush[NR_CPUS];
33 33
34#ifdef CONFIG_EXCPT_IRQ_SYSC_L1
35#define MGR_ATTR __attribute__((l1_text))
36#else
37#define MGR_ATTR
38#endif
39
34/* 40/*
35 * Given the contents of the status register, return the index of the 41 * Given the contents of the status register, return the index of the
36 * CPLB that caused the fault. 42 * CPLB that caused the fault.
@@ -59,7 +65,7 @@ static int icplb_rr_index[NR_CPUS], dcplb_rr_index[NR_CPUS];
59/* 65/*
60 * Find an ICPLB entry to be evicted and return its index. 66 * Find an ICPLB entry to be evicted and return its index.
61 */ 67 */
62static int evict_one_icplb(unsigned int cpu) 68MGR_ATTR static int evict_one_icplb(unsigned int cpu)
63{ 69{
64 int i; 70 int i;
65 for (i = first_switched_icplb; i < MAX_CPLBS; i++) 71 for (i = first_switched_icplb; i < MAX_CPLBS; i++)
@@ -74,7 +80,7 @@ static int evict_one_icplb(unsigned int cpu)
74 return i; 80 return i;
75} 81}
76 82
77static int evict_one_dcplb(unsigned int cpu) 83MGR_ATTR static int evict_one_dcplb(unsigned int cpu)
78{ 84{
79 int i; 85 int i;
80 for (i = first_switched_dcplb; i < MAX_CPLBS; i++) 86 for (i = first_switched_dcplb; i < MAX_CPLBS; i++)
@@ -89,7 +95,7 @@ static int evict_one_dcplb(unsigned int cpu)
89 return i; 95 return i;
90} 96}
91 97
92static noinline int dcplb_miss(unsigned int cpu) 98MGR_ATTR static noinline int dcplb_miss(unsigned int cpu)
93{ 99{
94 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR(); 100 unsigned long addr = bfin_read_DCPLB_FAULT_ADDR();
95 int status = bfin_read_DCPLB_STATUS(); 101 int status = bfin_read_DCPLB_STATUS();
@@ -113,11 +119,16 @@ static noinline int dcplb_miss(unsigned int cpu)
113 addr = L2_START; 119 addr = L2_START;
114 d_data = L2_DMEMORY; 120 d_data = L2_DMEMORY;
115 } else if (addr >= physical_mem_end) { 121 } else if (addr >= physical_mem_end) {
116 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE 122 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
117 && (status & FAULT_USERSUPV)) { 123 mask = current_rwx_mask[cpu];
118 addr &= ~0x3fffff; 124 if (mask) {
119 d_data &= ~PAGE_SIZE_4KB; 125 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
120 d_data |= PAGE_SIZE_4MB; 126 int idx = page >> 5;
127 int bit = 1 << (page & 31);
128
129 if (mask[idx] & bit)
130 d_data |= CPLB_USER_RD;
131 }
121 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 132 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
122 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) { 133 && (status & (FAULT_RW | FAULT_USERSUPV)) == FAULT_USERSUPV) {
123 addr &= ~(1 * 1024 * 1024 - 1); 134 addr &= ~(1 * 1024 * 1024 - 1);
@@ -126,7 +137,9 @@ static noinline int dcplb_miss(unsigned int cpu)
126 } else 137 } else
127 return CPLB_PROT_VIOL; 138 return CPLB_PROT_VIOL;
128 } else if (addr >= _ramend) { 139 } else if (addr >= _ramend) {
129 d_data |= CPLB_USER_RD | CPLB_USER_WR; 140 d_data |= CPLB_USER_RD | CPLB_USER_WR;
141 if (reserved_mem_dcache_on)
142 d_data |= CPLB_L1_CHBL;
130 } else { 143 } else {
131 mask = current_rwx_mask[cpu]; 144 mask = current_rwx_mask[cpu];
132 if (mask) { 145 if (mask) {
@@ -156,7 +169,7 @@ static noinline int dcplb_miss(unsigned int cpu)
156 return 0; 169 return 0;
157} 170}
158 171
159static noinline int icplb_miss(unsigned int cpu) 172MGR_ATTR static noinline int icplb_miss(unsigned int cpu)
160{ 173{
161 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR(); 174 unsigned long addr = bfin_read_ICPLB_FAULT_ADDR();
162 int status = bfin_read_ICPLB_STATUS(); 175 int status = bfin_read_ICPLB_STATUS();
@@ -203,7 +216,21 @@ static noinline int icplb_miss(unsigned int cpu)
203 addr = L2_START; 216 addr = L2_START;
204 i_data = L2_IMEMORY; 217 i_data = L2_IMEMORY;
205 } else if (addr >= physical_mem_end) { 218 } else if (addr >= physical_mem_end) {
206 if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH 219 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
220 if (!(status & FAULT_USERSUPV)) {
221 unsigned long *mask = current_rwx_mask[cpu];
222
223 if (mask) {
224 int page = (addr - (ASYNC_BANK0_BASE - _ramend)) >> PAGE_SHIFT;
225 int idx = page >> 5;
226 int bit = 1 << (page & 31);
227
228 mask += 2 * page_mask_nelts;
229 if (mask[idx] & bit)
230 i_data |= CPLB_USER_RD;
231 }
232 }
233 } else if (addr >= BOOT_ROM_START && addr < BOOT_ROM_START + BOOT_ROM_LENGTH
207 && (status & FAULT_USERSUPV)) { 234 && (status & FAULT_USERSUPV)) {
208 addr &= ~(1 * 1024 * 1024 - 1); 235 addr &= ~(1 * 1024 * 1024 - 1);
209 i_data &= ~PAGE_SIZE_4KB; 236 i_data &= ~PAGE_SIZE_4KB;
@@ -212,6 +239,8 @@ static noinline int icplb_miss(unsigned int cpu)
212 return CPLB_PROT_VIOL; 239 return CPLB_PROT_VIOL;
213 } else if (addr >= _ramend) { 240 } else if (addr >= _ramend) {
214 i_data |= CPLB_USER_RD; 241 i_data |= CPLB_USER_RD;
242 if (reserved_mem_icache_on)
243 i_data |= CPLB_L1_CHBL;
215 } else { 244 } else {
216 /* 245 /*
217 * Two cases to distinguish - a supervisor access must 246 * Two cases to distinguish - a supervisor access must
@@ -246,7 +275,7 @@ static noinline int icplb_miss(unsigned int cpu)
246 return 0; 275 return 0;
247} 276}
248 277
249static noinline int dcplb_protection_fault(unsigned int cpu) 278MGR_ATTR static noinline int dcplb_protection_fault(unsigned int cpu)
250{ 279{
251 int status = bfin_read_DCPLB_STATUS(); 280 int status = bfin_read_DCPLB_STATUS();
252 281
@@ -266,7 +295,7 @@ static noinline int dcplb_protection_fault(unsigned int cpu)
266 return CPLB_PROT_VIOL; 295 return CPLB_PROT_VIOL;
267} 296}
268 297
269int cplb_hdr(int seqstat, struct pt_regs *regs) 298MGR_ATTR int cplb_hdr(int seqstat, struct pt_regs *regs)
270{ 299{
271 int cause = seqstat & 0x3f; 300 int cause = seqstat & 0x3f;
272 unsigned int cpu = raw_smp_processor_id(); 301 unsigned int cpu = raw_smp_processor_id();
diff --git a/arch/blackfin/kernel/cplb-nompu/cplbinit.c b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
index fd9a2f31e686..bfe75af4e8bd 100644
--- a/arch/blackfin/kernel/cplb-nompu/cplbinit.c
+++ b/arch/blackfin/kernel/cplb-nompu/cplbinit.c
@@ -56,6 +56,15 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; 56 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
57 } 57 }
58 58
59#ifdef CONFIG_ROMKERNEL
60 /* Cover kernel XIP flash area */
61 addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1);
62 d_tbl[i_d].addr = addr;
63 d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB;
64 i_tbl[i_i].addr = addr;
65 i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB;
66#endif
67
59 /* Cover L1 memory. One 4M area for code and data each is enough. */ 68 /* Cover L1 memory. One 4M area for code and data each is enough. */
60 if (cpu == 0) { 69 if (cpu == 0) {
61 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { 70 if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) {
@@ -89,15 +98,25 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
89 98
90void __init generate_cplb_tables_all(void) 99void __init generate_cplb_tables_all(void)
91{ 100{
101 unsigned long uncached_end;
92 int i_d, i_i; 102 int i_d, i_i;
93 103
94 i_d = 0; 104 i_d = 0;
95 /* Normal RAM, including MTD FS. */ 105 /* Normal RAM, including MTD FS. */
96#ifdef CONFIG_MTD_UCLINUX 106#ifdef CONFIG_MTD_UCLINUX
97 dcplb_bounds[i_d].eaddr = memory_mtd_start + mtd_size; 107 uncached_end = memory_mtd_start + mtd_size;
98#else 108#else
99 dcplb_bounds[i_d].eaddr = memory_end; 109 uncached_end = memory_end;
100#endif 110#endif
111 /*
112 * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached
113 * so that we don't have to use 4kB pages and cause CPLB thrashing
114 */
115 if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION ||
116 ((_ramend - uncached_end) >= 1 * 1024 * 1024))
117 dcplb_bounds[i_d].eaddr = uncached_end;
118 else
119 dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024);
101 dcplb_bounds[i_d++].data = SDRAM_DGENERIC; 120 dcplb_bounds[i_d++].data = SDRAM_DGENERIC;
102 /* DMA uncached region. */ 121 /* DMA uncached region. */
103 if (DMA_UNCACHED_REGION) { 122 if (DMA_UNCACHED_REGION) {
@@ -135,18 +154,15 @@ void __init generate_cplb_tables_all(void)
135 154
136 i_i = 0; 155 i_i = 0;
137 /* Normal RAM, including MTD FS. */ 156 /* Normal RAM, including MTD FS. */
138#ifdef CONFIG_MTD_UCLINUX 157 icplb_bounds[i_i].eaddr = uncached_end;
139 icplb_bounds[i_i].eaddr = memory_mtd_start + mtd_size;
140#else
141 icplb_bounds[i_i].eaddr = memory_end;
142#endif
143 icplb_bounds[i_i++].data = SDRAM_IGENERIC; 158 icplb_bounds[i_i++].data = SDRAM_IGENERIC;
144 /* DMA uncached region. */
145 if (DMA_UNCACHED_REGION) {
146 icplb_bounds[i_i].eaddr = _ramend;
147 icplb_bounds[i_i++].data = 0;
148 }
149 if (_ramend != physical_mem_end) { 159 if (_ramend != physical_mem_end) {
160 /* DMA uncached region. */
161 if (DMA_UNCACHED_REGION) {
162 /* Normally this hole is caught by the async below. */
163 icplb_bounds[i_i].eaddr = _ramend;
164 icplb_bounds[i_i++].data = 0;
165 }
150 /* Reserved memory. */ 166 /* Reserved memory. */
151 icplb_bounds[i_i].eaddr = physical_mem_end; 167 icplb_bounds[i_i].eaddr = physical_mem_end;
152 icplb_bounds[i_i++].data = (reserved_mem_icache_on ? 168 icplb_bounds[i_i++].data = (reserved_mem_icache_on ?
diff --git a/arch/blackfin/kernel/dma-mapping.c b/arch/blackfin/kernel/dma-mapping.c
index e74e74d7733f..04ddcfeb7981 100644
--- a/arch/blackfin/kernel/dma-mapping.c
+++ b/arch/blackfin/kernel/dma-mapping.c
@@ -7,30 +7,25 @@
7 */ 7 */
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/mm.h> 10#include <linux/gfp.h>
11#include <linux/string.h> 11#include <linux/string.h>
12#include <linux/bootmem.h>
13#include <linux/spinlock.h> 12#include <linux/spinlock.h>
14#include <linux/device.h>
15#include <linux/dma-mapping.h> 13#include <linux/dma-mapping.h>
16#include <linux/io.h>
17#include <linux/scatterlist.h> 14#include <linux/scatterlist.h>
18#include <asm/cacheflush.h>
19#include <asm/bfin-global.h>
20 15
21static spinlock_t dma_page_lock; 16static spinlock_t dma_page_lock;
22static unsigned int *dma_page; 17static unsigned long *dma_page;
23static unsigned int dma_pages; 18static unsigned int dma_pages;
24static unsigned long dma_base; 19static unsigned long dma_base;
25static unsigned long dma_size; 20static unsigned long dma_size;
26static unsigned int dma_initialized; 21static unsigned int dma_initialized;
27 22
28void dma_alloc_init(unsigned long start, unsigned long end) 23static void dma_alloc_init(unsigned long start, unsigned long end)
29{ 24{
30 spin_lock_init(&dma_page_lock); 25 spin_lock_init(&dma_page_lock);
31 dma_initialized = 0; 26 dma_initialized = 0;
32 27
33 dma_page = (unsigned int *)__get_free_page(GFP_KERNEL); 28 dma_page = (unsigned long *)__get_free_page(GFP_KERNEL);
34 memset(dma_page, 0, PAGE_SIZE); 29 memset(dma_page, 0, PAGE_SIZE);
35 dma_base = PAGE_ALIGN(start); 30 dma_base = PAGE_ALIGN(start);
36 dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); 31 dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start);
@@ -58,10 +53,11 @@ static unsigned long __alloc_dma_pages(unsigned int pages)
58 spin_lock_irqsave(&dma_page_lock, flags); 53 spin_lock_irqsave(&dma_page_lock, flags);
59 54
60 for (i = 0; i < dma_pages;) { 55 for (i = 0; i < dma_pages;) {
61 if (dma_page[i++] == 0) { 56 if (test_bit(i++, dma_page) == 0) {
62 if (++count == pages) { 57 if (++count == pages) {
63 while (count--) 58 while (count--)
64 dma_page[--i] = 1; 59 __set_bit(--i, dma_page);
60
65 ret = dma_base + (i << PAGE_SHIFT); 61 ret = dma_base + (i << PAGE_SHIFT);
66 break; 62 break;
67 } 63 }
@@ -84,14 +80,14 @@ static void __free_dma_pages(unsigned long addr, unsigned int pages)
84 } 80 }
85 81
86 spin_lock_irqsave(&dma_page_lock, flags); 82 spin_lock_irqsave(&dma_page_lock, flags);
87 for (i = page; i < page + pages; i++) { 83 for (i = page; i < page + pages; i++)
88 dma_page[i] = 0; 84 __clear_bit(i, dma_page);
89 } 85
90 spin_unlock_irqrestore(&dma_page_lock, flags); 86 spin_unlock_irqrestore(&dma_page_lock, flags);
91} 87}
92 88
93void *dma_alloc_coherent(struct device *dev, size_t size, 89void *dma_alloc_coherent(struct device *dev, size_t size,
94 dma_addr_t * dma_handle, gfp_t gfp) 90 dma_addr_t *dma_handle, gfp_t gfp)
95{ 91{
96 void *ret; 92 void *ret;
97 93
@@ -115,21 +111,14 @@ dma_free_coherent(struct device *dev, size_t size, void *vaddr,
115EXPORT_SYMBOL(dma_free_coherent); 111EXPORT_SYMBOL(dma_free_coherent);
116 112
117/* 113/*
118 * Dummy functions defined for some existing drivers 114 * Streaming DMA mappings
119 */ 115 */
120 116void __dma_sync(dma_addr_t addr, size_t size,
121dma_addr_t 117 enum dma_data_direction dir)
122dma_map_single(struct device *dev, void *ptr, size_t size,
123 enum dma_data_direction direction)
124{ 118{
125 BUG_ON(direction == DMA_NONE); 119 __dma_sync_inline(addr, size, dir);
126
127 invalidate_dcache_range((unsigned long)ptr,
128 (unsigned long)ptr + size);
129
130 return (dma_addr_t) ptr;
131} 120}
132EXPORT_SYMBOL(dma_map_single); 121EXPORT_SYMBOL(__dma_sync);
133 122
134int 123int
135dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 124dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
@@ -137,30 +126,23 @@ dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
137{ 126{
138 int i; 127 int i;
139 128
140 BUG_ON(direction == DMA_NONE);
141
142 for (i = 0; i < nents; i++, sg++) { 129 for (i = 0; i < nents; i++, sg++) {
143 sg->dma_address = (dma_addr_t) sg_virt(sg); 130 sg->dma_address = (dma_addr_t) sg_virt(sg);
144 131 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
145 invalidate_dcache_range(sg_dma_address(sg),
146 sg_dma_address(sg) +
147 sg_dma_len(sg));
148 } 132 }
149 133
150 return nents; 134 return nents;
151} 135}
152EXPORT_SYMBOL(dma_map_sg); 136EXPORT_SYMBOL(dma_map_sg);
153 137
154void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, 138void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
155 enum dma_data_direction direction) 139 int nelems, enum dma_data_direction direction)
156{ 140{
157 BUG_ON(direction == DMA_NONE); 141 int i;
158}
159EXPORT_SYMBOL(dma_unmap_single);
160 142
161void dma_unmap_sg(struct device *dev, struct scatterlist *sg, 143 for (i = 0; i < nelems; i++, sg++) {
162 int nhwentries, enum dma_data_direction direction) 144 sg->dma_address = (dma_addr_t) sg_virt(sg);
163{ 145 __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction);
164 BUG_ON(direction == DMA_NONE); 146 }
165} 147}
166EXPORT_SYMBOL(dma_unmap_sg); 148EXPORT_SYMBOL(dma_sync_sg_for_device);
diff --git a/arch/blackfin/kernel/entry.S b/arch/blackfin/kernel/entry.S
index f27dc2292e1b..686478f5f66b 100644
--- a/arch/blackfin/kernel/entry.S
+++ b/arch/blackfin/kernel/entry.S
@@ -44,7 +44,7 @@ ENTRY(_ret_from_fork)
44 sti r4; 44 sti r4;
45#endif /* CONFIG_IPIPE */ 45#endif /* CONFIG_IPIPE */
46 SP += -12; 46 SP += -12;
47 call _schedule_tail; 47 pseudo_long_call _schedule_tail, p5;
48 SP += 12; 48 SP += 12;
49 r0 = [sp + PT_IPEND]; 49 r0 = [sp + PT_IPEND];
50 cc = bittst(r0,1); 50 cc = bittst(r0,1);
@@ -79,7 +79,7 @@ ENTRY(_sys_vfork)
79 r0 += 24; 79 r0 += 24;
80 [--sp] = rets; 80 [--sp] = rets;
81 SP += -12; 81 SP += -12;
82 call _bfin_vfork; 82 pseudo_long_call _bfin_vfork, p2;
83 SP += 12; 83 SP += 12;
84 rets = [sp++]; 84 rets = [sp++];
85 rts; 85 rts;
@@ -90,7 +90,7 @@ ENTRY(_sys_clone)
90 r0 += 24; 90 r0 += 24;
91 [--sp] = rets; 91 [--sp] = rets;
92 SP += -12; 92 SP += -12;
93 call _bfin_clone; 93 pseudo_long_call _bfin_clone, p2;
94 SP += 12; 94 SP += 12;
95 rets = [sp++]; 95 rets = [sp++];
96 rts; 96 rts;
@@ -101,7 +101,7 @@ ENTRY(_sys_rt_sigreturn)
101 r0 += 24; 101 r0 += 24;
102 [--sp] = rets; 102 [--sp] = rets;
103 SP += -12; 103 SP += -12;
104 call _do_rt_sigreturn; 104 pseudo_long_call _do_rt_sigreturn, p2;
105 SP += 12; 105 SP += 12;
106 rets = [sp++]; 106 rets = [sp++];
107 rts; 107 rts;
diff --git a/arch/blackfin/kernel/ftrace-entry.S b/arch/blackfin/kernel/ftrace-entry.S
index 76dd4fbcd17a..d66446b572c0 100644
--- a/arch/blackfin/kernel/ftrace-entry.S
+++ b/arch/blackfin/kernel/ftrace-entry.S
@@ -1,7 +1,7 @@
1/* 1/*
2 * mcount and friends -- ftrace stuff 2 * mcount and friends -- ftrace stuff
3 * 3 *
4 * Copyright (C) 2009 Analog Devices Inc. 4 * Copyright (C) 2009-2010 Analog Devices Inc.
5 * Licensed under the GPL-2 or later. 5 * Licensed under the GPL-2 or later.
6 */ 6 */
7 7
@@ -21,6 +21,15 @@
21 * function will be waiting there. mmmm pie. 21 * function will be waiting there. mmmm pie.
22 */ 22 */
23ENTRY(__mcount) 23ENTRY(__mcount)
24#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
25 /* optional micro optimization: return if stopped */
26 p1.l = _function_trace_stop;
27 p1.h = _function_trace_stop;
28 r3 = [p1];
29 cc = r3 == 0;
30 if ! cc jump _ftrace_stub (bp);
31#endif
32
24 /* save third function arg early so we can do testing below */ 33 /* save third function arg early so we can do testing below */
25 [--sp] = r2; 34 [--sp] = r2;
26 35
@@ -106,9 +115,12 @@ ENTRY(_ftrace_graph_caller)
106 [--sp] = r1; 115 [--sp] = r1;
107 [--sp] = rets; 116 [--sp] = rets;
108 117
109 /* prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) */ 118 /* prepare_ftrace_return(parent, self_addr, frame_pointer) */
110 r0 = sp; 119 r0 = sp; /* unsigned long *parent */
111 r1 = rets; 120 r1 = rets; /* unsigned long self_addr */
121#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
122 r2 = fp; /* unsigned long frame_pointer */
123#endif
112 r0 += 16; /* skip the 4 local regs on stack */ 124 r0 += 16; /* skip the 4 local regs on stack */
113 r1 += -MCOUNT_INSN_SIZE; 125 r1 += -MCOUNT_INSN_SIZE;
114 call _prepare_ftrace_return; 126 call _prepare_ftrace_return;
@@ -127,6 +139,9 @@ ENTRY(_return_to_handler)
127 [--sp] = r1; 139 [--sp] = r1;
128 140
129 /* get original return address */ 141 /* get original return address */
142#ifdef CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST
143 r0 = fp; /* Blackfin is sane, so omit this */
144#endif
130 call _ftrace_return_to_handler; 145 call _ftrace_return_to_handler;
131 rets = r0; 146 rets = r0;
132 147
diff --git a/arch/blackfin/kernel/ftrace.c b/arch/blackfin/kernel/ftrace.c
index f2c85ac6f2da..a61d948ea925 100644
--- a/arch/blackfin/kernel/ftrace.c
+++ b/arch/blackfin/kernel/ftrace.c
@@ -16,7 +16,8 @@
16 * Hook the return address and push it in the stack of return addrs 16 * Hook the return address and push it in the stack of return addrs
17 * in current thread info. 17 * in current thread info.
18 */ 18 */
19void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 19void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
20 unsigned long frame_pointer)
20{ 21{
21 struct ftrace_graph_ent trace; 22 struct ftrace_graph_ent trace;
22 unsigned long return_hooker = (unsigned long)&return_to_handler; 23 unsigned long return_hooker = (unsigned long)&return_to_handler;
@@ -24,7 +25,8 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
24 if (unlikely(atomic_read(&current->tracing_graph_pause))) 25 if (unlikely(atomic_read(&current->tracing_graph_pause)))
25 return; 26 return;
26 27
27 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth, 0) == -EBUSY) 28 if (ftrace_push_return_trace(*parent, self_addr, &trace.depth,
29 frame_pointer) == -EBUSY)
28 return; 30 return;
29 31
30 trace.func = self_addr; 32 trace.func = self_addr;
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c
index 7281a91d26b5..cdbe075de1dc 100644
--- a/arch/blackfin/kernel/gptimers.c
+++ b/arch/blackfin/kernel/gptimers.c
@@ -137,7 +137,7 @@ static uint32_t const timil_mask[MAX_BLACKFIN_GPTIMERS] =
137#endif 137#endif
138}; 138};
139 139
140void set_gptimer_pwidth(int timer_id, uint32_t value) 140void set_gptimer_pwidth(unsigned int timer_id, uint32_t value)
141{ 141{
142 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 142 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
143 timer_regs[timer_id]->width = value; 143 timer_regs[timer_id]->width = value;
@@ -145,14 +145,14 @@ void set_gptimer_pwidth(int timer_id, uint32_t value)
145} 145}
146EXPORT_SYMBOL(set_gptimer_pwidth); 146EXPORT_SYMBOL(set_gptimer_pwidth);
147 147
148uint32_t get_gptimer_pwidth(int timer_id) 148uint32_t get_gptimer_pwidth(unsigned int timer_id)
149{ 149{
150 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 150 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
151 return timer_regs[timer_id]->width; 151 return timer_regs[timer_id]->width;
152} 152}
153EXPORT_SYMBOL(get_gptimer_pwidth); 153EXPORT_SYMBOL(get_gptimer_pwidth);
154 154
155void set_gptimer_period(int timer_id, uint32_t period) 155void set_gptimer_period(unsigned int timer_id, uint32_t period)
156{ 156{
157 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 157 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
158 timer_regs[timer_id]->period = period; 158 timer_regs[timer_id]->period = period;
@@ -160,28 +160,28 @@ void set_gptimer_period(int timer_id, uint32_t period)
160} 160}
161EXPORT_SYMBOL(set_gptimer_period); 161EXPORT_SYMBOL(set_gptimer_period);
162 162
163uint32_t get_gptimer_period(int timer_id) 163uint32_t get_gptimer_period(unsigned int timer_id)
164{ 164{
165 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 165 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
166 return timer_regs[timer_id]->period; 166 return timer_regs[timer_id]->period;
167} 167}
168EXPORT_SYMBOL(get_gptimer_period); 168EXPORT_SYMBOL(get_gptimer_period);
169 169
170uint32_t get_gptimer_count(int timer_id) 170uint32_t get_gptimer_count(unsigned int timer_id)
171{ 171{
172 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 172 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
173 return timer_regs[timer_id]->counter; 173 return timer_regs[timer_id]->counter;
174} 174}
175EXPORT_SYMBOL(get_gptimer_count); 175EXPORT_SYMBOL(get_gptimer_count);
176 176
177uint32_t get_gptimer_status(int group) 177uint32_t get_gptimer_status(unsigned int group)
178{ 178{
179 tassert(group < BFIN_TIMER_NUM_GROUP); 179 tassert(group < BFIN_TIMER_NUM_GROUP);
180 return group_regs[group]->status; 180 return group_regs[group]->status;
181} 181}
182EXPORT_SYMBOL(get_gptimer_status); 182EXPORT_SYMBOL(get_gptimer_status);
183 183
184void set_gptimer_status(int group, uint32_t value) 184void set_gptimer_status(unsigned int group, uint32_t value)
185{ 185{
186 tassert(group < BFIN_TIMER_NUM_GROUP); 186 tassert(group < BFIN_TIMER_NUM_GROUP);
187 group_regs[group]->status = value; 187 group_regs[group]->status = value;
@@ -189,42 +189,42 @@ void set_gptimer_status(int group, uint32_t value)
189} 189}
190EXPORT_SYMBOL(set_gptimer_status); 190EXPORT_SYMBOL(set_gptimer_status);
191 191
192int get_gptimer_intr(int timer_id) 192int get_gptimer_intr(unsigned int timer_id)
193{ 193{
194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 194 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
195 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]); 195 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & timil_mask[timer_id]);
196} 196}
197EXPORT_SYMBOL(get_gptimer_intr); 197EXPORT_SYMBOL(get_gptimer_intr);
198 198
199void clear_gptimer_intr(int timer_id) 199void clear_gptimer_intr(unsigned int timer_id)
200{ 200{
201 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 201 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
202 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id]; 202 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = timil_mask[timer_id];
203} 203}
204EXPORT_SYMBOL(clear_gptimer_intr); 204EXPORT_SYMBOL(clear_gptimer_intr);
205 205
206int get_gptimer_over(int timer_id) 206int get_gptimer_over(unsigned int timer_id)
207{ 207{
208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 208 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
209 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]); 209 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & tovf_mask[timer_id]);
210} 210}
211EXPORT_SYMBOL(get_gptimer_over); 211EXPORT_SYMBOL(get_gptimer_over);
212 212
213void clear_gptimer_over(int timer_id) 213void clear_gptimer_over(unsigned int timer_id)
214{ 214{
215 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 215 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
216 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id]; 216 group_regs[BFIN_TIMER_OCTET(timer_id)]->status = tovf_mask[timer_id];
217} 217}
218EXPORT_SYMBOL(clear_gptimer_over); 218EXPORT_SYMBOL(clear_gptimer_over);
219 219
220int get_gptimer_run(int timer_id) 220int get_gptimer_run(unsigned int timer_id)
221{ 221{
222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 222 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
223 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]); 223 return !!(group_regs[BFIN_TIMER_OCTET(timer_id)]->status & trun_mask[timer_id]);
224} 224}
225EXPORT_SYMBOL(get_gptimer_run); 225EXPORT_SYMBOL(get_gptimer_run);
226 226
227void set_gptimer_config(int timer_id, uint16_t config) 227void set_gptimer_config(unsigned int timer_id, uint16_t config)
228{ 228{
229 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 229 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
230 timer_regs[timer_id]->config = config; 230 timer_regs[timer_id]->config = config;
@@ -232,7 +232,7 @@ void set_gptimer_config(int timer_id, uint16_t config)
232} 232}
233EXPORT_SYMBOL(set_gptimer_config); 233EXPORT_SYMBOL(set_gptimer_config);
234 234
235uint16_t get_gptimer_config(int timer_id) 235uint16_t get_gptimer_config(unsigned int timer_id)
236{ 236{
237 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 237 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
238 return timer_regs[timer_id]->config; 238 return timer_regs[timer_id]->config;
@@ -280,7 +280,7 @@ void disable_gptimers_sync(uint16_t mask)
280} 280}
281EXPORT_SYMBOL(disable_gptimers_sync); 281EXPORT_SYMBOL(disable_gptimers_sync);
282 282
283void set_gptimer_pulse_hi(int timer_id) 283void set_gptimer_pulse_hi(unsigned int timer_id)
284{ 284{
285 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 285 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
286 timer_regs[timer_id]->config |= TIMER_PULSE_HI; 286 timer_regs[timer_id]->config |= TIMER_PULSE_HI;
@@ -288,7 +288,7 @@ void set_gptimer_pulse_hi(int timer_id)
288} 288}
289EXPORT_SYMBOL(set_gptimer_pulse_hi); 289EXPORT_SYMBOL(set_gptimer_pulse_hi);
290 290
291void clear_gptimer_pulse_hi(int timer_id) 291void clear_gptimer_pulse_hi(unsigned int timer_id)
292{ 292{
293 tassert(timer_id < MAX_BLACKFIN_GPTIMERS); 293 tassert(timer_id < MAX_BLACKFIN_GPTIMERS);
294 timer_regs[timer_id]->config &= ~TIMER_PULSE_HI; 294 timer_regs[timer_id]->config &= ~TIMER_PULSE_HI;
diff --git a/arch/blackfin/kernel/init_task.c b/arch/blackfin/kernel/init_task.c
index 118c5b9dedac..d3970e8acd1a 100644
--- a/arch/blackfin/kernel/init_task.c
+++ b/arch/blackfin/kernel/init_task.c
@@ -28,5 +28,5 @@ EXPORT_SYMBOL(init_task);
28 * "init_task" linker map entry. 28 * "init_task" linker map entry.
29 */ 29 */
30union thread_union init_thread_union 30union thread_union init_thread_union
31 __attribute__ ((__section__(".init_task.data"))) = { 31 __init_task_data = {
32INIT_THREAD_INFO(init_task)}; 32INIT_THREAD_INFO(init_task)};
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 5d7382396dc0..1a496cd71ba2 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -27,7 +27,6 @@
27#include <linux/interrupt.h> 27#include <linux/interrupt.h>
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/bitops.h> 29#include <linux/bitops.h>
30#include <linux/slab.h>
31#include <linux/errno.h> 30#include <linux/errno.h>
32#include <linux/kthread.h> 31#include <linux/kthread.h>
33#include <linux/unistd.h> 32#include <linux/unistd.h>
@@ -335,3 +334,70 @@ void __ipipe_enable_root_irqs_hw(void)
335 __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status)); 334 __clear_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
336 bfin_sti(bfin_irq_flags); 335 bfin_sti(bfin_irq_flags);
337} 336}
337
338/*
339 * We could use standard atomic bitops in the following root status
340 * manipulation routines, but let's prepare for SMP support in the
341 * same move, preventing CPU migration as required.
342 */
343void __ipipe_stall_root(void)
344{
345 unsigned long *p, flags;
346
347 local_irq_save_hw(flags);
348 p = &__ipipe_root_status;
349 __set_bit(IPIPE_STALL_FLAG, p);
350 local_irq_restore_hw(flags);
351}
352EXPORT_SYMBOL(__ipipe_stall_root);
353
354unsigned long __ipipe_test_and_stall_root(void)
355{
356 unsigned long *p, flags;
357 int x;
358
359 local_irq_save_hw(flags);
360 p = &__ipipe_root_status;
361 x = __test_and_set_bit(IPIPE_STALL_FLAG, p);
362 local_irq_restore_hw(flags);
363
364 return x;
365}
366EXPORT_SYMBOL(__ipipe_test_and_stall_root);
367
368unsigned long __ipipe_test_root(void)
369{
370 const unsigned long *p;
371 unsigned long flags;
372 int x;
373
374 local_irq_save_hw_smp(flags);
375 p = &__ipipe_root_status;
376 x = test_bit(IPIPE_STALL_FLAG, p);
377 local_irq_restore_hw_smp(flags);
378
379 return x;
380}
381EXPORT_SYMBOL(__ipipe_test_root);
382
383void __ipipe_lock_root(void)
384{
385 unsigned long *p, flags;
386
387 local_irq_save_hw(flags);
388 p = &__ipipe_root_status;
389 __set_bit(IPIPE_SYNCDEFER_FLAG, p);
390 local_irq_restore_hw(flags);
391}
392EXPORT_SYMBOL(__ipipe_lock_root);
393
394void __ipipe_unlock_root(void)
395{
396 unsigned long *p, flags;
397
398 local_irq_save_hw(flags);
399 p = &__ipipe_root_status;
400 __clear_bit(IPIPE_SYNCDEFER_FLAG, p);
401 local_irq_restore_hw(flags);
402}
403EXPORT_SYMBOL(__ipipe_unlock_root);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index db9f9c91f11f..64cff54a8a58 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq)
23 23
24static struct irq_desc bad_irq_desc = { 24static struct irq_desc bad_irq_desc = {
25 .handle_irq = handle_bad_irq, 25 .handle_irq = handle_bad_irq,
26 .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), 26 .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
27}; 27};
28 28
29#ifdef CONFIG_CPUMASK_OFFSTACK 29#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v)
39 unsigned long flags; 39 unsigned long flags;
40 40
41 if (i < NR_IRQS) { 41 if (i < NR_IRQS) {
42 spin_lock_irqsave(&irq_desc[i].lock, flags); 42 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
43 action = irq_desc[i].action; 43 action = irq_desc[i].action;
44 if (!action) 44 if (!action)
45 goto skip; 45 goto skip;
@@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v)
53 53
54 seq_putc(p, '\n'); 54 seq_putc(p, '\n');
55 skip: 55 skip:
56 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 56 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
57 } else if (i == NR_IRQS) { 57 } else if (i == NR_IRQS) {
58 seq_printf(p, "NMI: "); 58 seq_printf(p, "NMI: ");
59 for_each_online_cpu(j) 59 for_each_online_cpu(j)
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
index cce79d05b90b..2c501ceb1e55 100644
--- a/arch/blackfin/kernel/kgdb.c
+++ b/arch/blackfin/kernel/kgdb.c
@@ -6,33 +6,9 @@
6 * Licensed under the GPL-2 or later. 6 * Licensed under the GPL-2 or later.
7 */ 7 */
8 8
9#include <linux/string.h>
10#include <linux/kernel.h>
11#include <linux/sched.h>
12#include <linux/smp.h>
13#include <linux/spinlock.h>
14#include <linux/delay.h>
15#include <linux/ptrace.h> /* for linux pt_regs struct */ 9#include <linux/ptrace.h> /* for linux pt_regs struct */
16#include <linux/kgdb.h> 10#include <linux/kgdb.h>
17#include <linux/console.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/irq.h>
21#include <linux/uaccess.h> 11#include <linux/uaccess.h>
22#include <asm/system.h>
23#include <asm/traps.h>
24#include <asm/blackfin.h>
25#include <asm/dma.h>
26
27/* Put the error code here just in case the user cares. */
28int gdb_bfin_errcode;
29/* Likewise, the vector number here (since GDB only gets the signal
30 number through the usual means, and that's not very specific). */
31int gdb_bfin_vector = -1;
32
33#if KGDB_MAX_NO_CPUS != 8
34#error change the definition of slavecpulocks
35#endif
36 12
37void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) 13void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
38{ 14{
@@ -157,7 +133,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
157 regs->lb1 = gdb_regs[BFIN_LB1]; 133 regs->lb1 = gdb_regs[BFIN_LB1];
158 regs->usp = gdb_regs[BFIN_USP]; 134 regs->usp = gdb_regs[BFIN_USP];
159 regs->syscfg = gdb_regs[BFIN_SYSCFG]; 135 regs->syscfg = gdb_regs[BFIN_SYSCFG];
160 regs->retx = gdb_regs[BFIN_PC]; 136 regs->retx = gdb_regs[BFIN_RETX];
161 regs->retn = gdb_regs[BFIN_RETN]; 137 regs->retn = gdb_regs[BFIN_RETN];
162 regs->rete = gdb_regs[BFIN_RETE]; 138 regs->rete = gdb_regs[BFIN_RETE];
163 regs->pc = gdb_regs[BFIN_PC]; 139 regs->pc = gdb_regs[BFIN_PC];
@@ -169,7 +145,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
169#endif 145#endif
170} 146}
171 147
172struct hw_breakpoint { 148static struct hw_breakpoint {
173 unsigned int occupied:1; 149 unsigned int occupied:1;
174 unsigned int skip:1; 150 unsigned int skip:1;
175 unsigned int enabled:1; 151 unsigned int enabled:1;
@@ -179,7 +155,7 @@ struct hw_breakpoint {
179 unsigned int addr; 155 unsigned int addr;
180} breakinfo[HW_WATCHPOINT_NUM]; 156} breakinfo[HW_WATCHPOINT_NUM];
181 157
182int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) 158static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
183{ 159{
184 int breakno; 160 int breakno;
185 int bfin_type; 161 int bfin_type;
@@ -226,7 +202,7 @@ int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
226 return -ENOSPC; 202 return -ENOSPC;
227} 203}
228 204
229int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type) 205static int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
230{ 206{
231 int breakno; 207 int breakno;
232 int bfin_type; 208 int bfin_type;
@@ -254,7 +230,7 @@ int bfin_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype type)
254 return 0; 230 return 0;
255} 231}
256 232
257void bfin_remove_all_hw_break(void) 233static void bfin_remove_all_hw_break(void)
258{ 234{
259 int breakno; 235 int breakno;
260 236
@@ -266,7 +242,7 @@ void bfin_remove_all_hw_break(void)
266 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT; 242 breakinfo[breakno].type = TYPE_DATA_WATCHPOINT;
267} 243}
268 244
269void bfin_correct_hw_break(void) 245static void bfin_correct_hw_break(void)
270{ 246{
271 int breakno; 247 int breakno;
272 unsigned int wpiactl = 0; 248 unsigned int wpiactl = 0;
@@ -369,13 +345,6 @@ void kgdb_roundup_cpu(int cpu, unsigned long flags)
369} 345}
370#endif 346#endif
371 347
372void kgdb_post_primary_code(struct pt_regs *regs, int eVector, int err_code)
373{
374 /* Master processor is completely in the debugger */
375 gdb_bfin_vector = eVector;
376 gdb_bfin_errcode = err_code;
377}
378
379int kgdb_arch_handle_exception(int vector, int signo, 348int kgdb_arch_handle_exception(int vector, int signo,
380 int err_code, char *remcom_in_buffer, 349 int err_code, char *remcom_in_buffer,
381 char *remcom_out_buffer, 350 char *remcom_out_buffer,
@@ -441,182 +410,6 @@ struct kgdb_arch arch_kgdb_ops = {
441 .correct_hw_break = bfin_correct_hw_break, 410 .correct_hw_break = bfin_correct_hw_break,
442}; 411};
443 412
444static int hex(char ch)
445{
446 if ((ch >= 'a') && (ch <= 'f'))
447 return ch - 'a' + 10;
448 if ((ch >= '0') && (ch <= '9'))
449 return ch - '0';
450 if ((ch >= 'A') && (ch <= 'F'))
451 return ch - 'A' + 10;
452 return -1;
453}
454
455static int validate_memory_access_address(unsigned long addr, int size)
456{
457 if (size < 0 || addr == 0)
458 return -EFAULT;
459 return bfin_mem_access_type(addr, size);
460}
461
462static int bfin_probe_kernel_read(char *dst, char *src, int size)
463{
464 unsigned long lsrc = (unsigned long)src;
465 int mem_type;
466
467 mem_type = validate_memory_access_address(lsrc, size);
468 if (mem_type < 0)
469 return mem_type;
470
471 if (lsrc >= SYSMMR_BASE) {
472 if (size == 2 && lsrc % 2 == 0) {
473 u16 mmr = bfin_read16(src);
474 memcpy(dst, &mmr, sizeof(mmr));
475 return 0;
476 } else if (size == 4 && lsrc % 4 == 0) {
477 u32 mmr = bfin_read32(src);
478 memcpy(dst, &mmr, sizeof(mmr));
479 return 0;
480 }
481 } else {
482 switch (mem_type) {
483 case BFIN_MEM_ACCESS_CORE:
484 case BFIN_MEM_ACCESS_CORE_ONLY:
485 return probe_kernel_read(dst, src, size);
486 /* XXX: should support IDMA here with SMP */
487 case BFIN_MEM_ACCESS_DMA:
488 if (dma_memcpy(dst, src, size))
489 return 0;
490 break;
491 case BFIN_MEM_ACCESS_ITEST:
492 if (isram_memcpy(dst, src, size))
493 return 0;
494 break;
495 }
496 }
497
498 return -EFAULT;
499}
500
501static int bfin_probe_kernel_write(char *dst, char *src, int size)
502{
503 unsigned long ldst = (unsigned long)dst;
504 int mem_type;
505
506 mem_type = validate_memory_access_address(ldst, size);
507 if (mem_type < 0)
508 return mem_type;
509
510 if (ldst >= SYSMMR_BASE) {
511 if (size == 2 && ldst % 2 == 0) {
512 u16 mmr;
513 memcpy(&mmr, src, sizeof(mmr));
514 bfin_write16(dst, mmr);
515 return 0;
516 } else if (size == 4 && ldst % 4 == 0) {
517 u32 mmr;
518 memcpy(&mmr, src, sizeof(mmr));
519 bfin_write32(dst, mmr);
520 return 0;
521 }
522 } else {
523 switch (mem_type) {
524 case BFIN_MEM_ACCESS_CORE:
525 case BFIN_MEM_ACCESS_CORE_ONLY:
526 return probe_kernel_write(dst, src, size);
527 /* XXX: should support IDMA here with SMP */
528 case BFIN_MEM_ACCESS_DMA:
529 if (dma_memcpy(dst, src, size))
530 return 0;
531 break;
532 case BFIN_MEM_ACCESS_ITEST:
533 if (isram_memcpy(dst, src, size))
534 return 0;
535 break;
536 }
537 }
538
539 return -EFAULT;
540}
541
542/*
543 * Convert the memory pointed to by mem into hex, placing result in buf.
544 * Return a pointer to the last char put in buf (null). May return an error.
545 */
546int kgdb_mem2hex(char *mem, char *buf, int count)
547{
548 char *tmp;
549 int err;
550
551 /*
552 * We use the upper half of buf as an intermediate buffer for the
553 * raw memory copy. Hex conversion will work against this one.
554 */
555 tmp = buf + count;
556
557 err = bfin_probe_kernel_read(tmp, mem, count);
558 if (!err) {
559 while (count > 0) {
560 buf = pack_hex_byte(buf, *tmp);
561 tmp++;
562 count--;
563 }
564
565 *buf = 0;
566 }
567
568 return err;
569}
570
571/*
572 * Copy the binary array pointed to by buf into mem. Fix $, #, and
573 * 0x7d escaped with 0x7d. Return a pointer to the character after
574 * the last byte written.
575 */
576int kgdb_ebin2mem(char *buf, char *mem, int count)
577{
578 char *tmp_old, *tmp_new;
579 int size;
580
581 tmp_old = tmp_new = buf;
582
583 for (size = 0; size < count; ++size) {
584 if (*tmp_old == 0x7d)
585 *tmp_new = *(++tmp_old) ^ 0x20;
586 else
587 *tmp_new = *tmp_old;
588 tmp_new++;
589 tmp_old++;
590 }
591
592 return bfin_probe_kernel_write(mem, buf, count);
593}
594
595/*
596 * Convert the hex array pointed to by buf into binary to be placed in mem.
597 * Return a pointer to the character AFTER the last byte written.
598 * May return an error.
599 */
600int kgdb_hex2mem(char *buf, char *mem, int count)
601{
602 char *tmp_raw, *tmp_hex;
603
604 /*
605 * We use the upper half of buf as an intermediate buffer for the
606 * raw memory that is converted from hex.
607 */
608 tmp_raw = buf + count * 2;
609
610 tmp_hex = tmp_raw - 1;
611 while (tmp_hex >= buf) {
612 tmp_raw--;
613 *tmp_raw = hex(*tmp_hex--);
614 *tmp_raw |= hex(*tmp_hex--) << 4;
615 }
616
617 return bfin_probe_kernel_write(mem, tmp_raw, count);
618}
619
620#define IN_MEM(addr, size, l1_addr, l1_size) \ 413#define IN_MEM(addr, size, l1_addr, l1_size) \
621({ \ 414({ \
622 unsigned long __addr = (unsigned long)(addr); \ 415 unsigned long __addr = (unsigned long)(addr); \
@@ -646,21 +439,6 @@ int kgdb_validate_break_address(unsigned long addr)
646 return -EFAULT; 439 return -EFAULT;
647} 440}
648 441
649int kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
650{
651 int err = bfin_probe_kernel_read(saved_instr, (char *)addr,
652 BREAK_INSTR_SIZE);
653 if (err)
654 return err;
655 return bfin_probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
656 BREAK_INSTR_SIZE);
657}
658
659int kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
660{
661 return bfin_probe_kernel_write((char *)addr, bundle, BREAK_INSTR_SIZE);
662}
663
664int kgdb_arch_init(void) 442int kgdb_arch_init(void)
665{ 443{
666 kgdb_single_step = 0; 444 kgdb_single_step = 0;
diff --git a/arch/blackfin/kernel/kgdb_test.c b/arch/blackfin/kernel/kgdb_test.c
index 59fc42dc5d6a..9a4b07594389 100644
--- a/arch/blackfin/kernel/kgdb_test.c
+++ b/arch/blackfin/kernel/kgdb_test.c
@@ -17,8 +17,9 @@
17 17
18#include <asm/blackfin.h> 18#include <asm/blackfin.h>
19 19
20/* Symbols are here for kgdb test to poke directly */
20static char cmdline[256]; 21static char cmdline[256];
21static unsigned long len; 22static size_t len;
22 23
23#ifndef CONFIG_SMP 24#ifndef CONFIG_SMP
24static int num1 __attribute__((l1_data)); 25static int num1 __attribute__((l1_data));
@@ -27,11 +28,10 @@ void kgdb_l1_test(void) __attribute__((l1_text));
27 28
28void kgdb_l1_test(void) 29void kgdb_l1_test(void)
29{ 30{
30 printk(KERN_ALERT "L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1); 31 pr_alert("L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
31 printk(KERN_ALERT "L1 : code function addr = 0x%p\n", kgdb_l1_test); 32 pr_alert("L1 : code function addr = 0x%p\n", kgdb_l1_test);
32 num1 = num1 + 10 ; 33 num1 = num1 + 10;
33 printk(KERN_ALERT "L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1); 34 pr_alert("L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1);
34 return ;
35} 35}
36#endif 36#endif
37 37
@@ -42,11 +42,10 @@ void kgdb_l2_test(void) __attribute__((l2));
42 42
43void kgdb_l2_test(void) 43void kgdb_l2_test(void)
44{ 44{
45 printk(KERN_ALERT "L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2); 45 pr_alert("L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
46 printk(KERN_ALERT "L2 : code function addr = 0x%p\n", kgdb_l2_test); 46 pr_alert("L2 : code function addr = 0x%p\n", kgdb_l2_test);
47 num2 = num2 + 20 ; 47 num2 = num2 + 20;
48 printk(KERN_ALERT "L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2); 48 pr_alert("L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2);
49 return ;
50} 49}
51 50
52#endif 51#endif
@@ -54,12 +53,14 @@ void kgdb_l2_test(void)
54 53
55int kgdb_test(char *name, int len, int count, int z) 54int kgdb_test(char *name, int len, int count, int z)
56{ 55{
57 printk(KERN_ALERT "kgdb name(%d): %s, %d, %d\n", len, name, count, z); 56 pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z);
58 count = z; 57 count = z;
59 return count; 58 return count;
60} 59}
61 60
62static int test_proc_output(char *buf) 61static ssize_t
62kgdb_test_proc_read(struct file *file, char __user *buf,
63 size_t count, loff_t *ppos)
63{ 64{
64 kgdb_test("hello world!", 12, 0x55, 0x10); 65 kgdb_test("hello world!", 12, 0x55, 0x10);
65#ifndef CONFIG_SMP 66#ifndef CONFIG_SMP
@@ -72,49 +73,31 @@ static int test_proc_output(char *buf)
72 return 0; 73 return 0;
73} 74}
74 75
75static int test_read_proc(char *page, char **start, off_t off, 76static ssize_t
76 int count, int *eof, void *data) 77kgdb_test_proc_write(struct file *file, const char __user *buffer,
78 size_t count, loff_t *pos)
77{ 79{
78 int len; 80 len = min_t(size_t, 255, count);
79
80 len = test_proc_output(page);
81 if (len <= off+count)
82 *eof = 1;
83 *start = page + off;
84 len -= off;
85 if (len > count)
86 len = count;
87 if (len < 0)
88 len = 0;
89 return len;
90}
91
92static int test_write_proc(struct file *file, const char *buffer,
93 unsigned long count, void *data)
94{
95 if (count >= 256)
96 len = 255;
97 else
98 len = count;
99
100 memcpy(cmdline, buffer, count); 81 memcpy(cmdline, buffer, count);
101 cmdline[len] = 0; 82 cmdline[len] = 0;
102 83
103 return len; 84 return len;
104} 85}
105 86
87static const struct file_operations kgdb_test_proc_fops = {
88 .owner = THIS_MODULE,
89 .read = kgdb_test_proc_read,
90 .write = kgdb_test_proc_write,
91};
92
106static int __init kgdbtest_init(void) 93static int __init kgdbtest_init(void)
107{ 94{
108 struct proc_dir_entry *entry; 95 struct proc_dir_entry *entry;
109 96
110 entry = create_proc_entry("kgdbtest", 0, NULL); 97 entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops);
111 if (entry == NULL) 98 if (entry == NULL)
112 return -ENOMEM; 99 return -ENOMEM;
113 100
114 entry->read_proc = test_read_proc;
115 entry->write_proc = test_write_proc;
116 entry->data = NULL;
117
118 return 0; 101 return 0;
119} 102}
120 103
diff --git a/arch/blackfin/kernel/nmi.c b/arch/blackfin/kernel/nmi.c
new file mode 100644
index 000000000000..0b5f72f17fd0
--- /dev/null
+++ b/arch/blackfin/kernel/nmi.c
@@ -0,0 +1,299 @@
1/*
2 * Blackfin nmi_watchdog Driver
3 *
4 * Originally based on bfin_wdt.c
5 * Copyright 2010-2010 Analog Devices Inc.
6 * Graff Yang <graf.yang@analog.com>
7 *
8 * Enter bugs at http://blackfin.uclinux.org/
9 *
10 * Licensed under the GPL-2 or later.
11 */
12
13#include <linux/bitops.h>
14#include <linux/hardirq.h>
15#include <linux/sysdev.h>
16#include <linux/pm.h>
17#include <linux/nmi.h>
18#include <linux/smp.h>
19#include <linux/timer.h>
20#include <asm/blackfin.h>
21#include <asm/atomic.h>
22#include <asm/cacheflush.h>
23#include <asm/bfin_watchdog.h>
24
25#define DRV_NAME "nmi-wdt"
26
27#define NMI_WDT_TIMEOUT 5 /* 5 seconds */
28#define NMI_CHECK_TIMEOUT (4 * HZ) /* 4 seconds in jiffies */
29static int nmi_wdt_cpu = 1;
30
31static unsigned int timeout = NMI_WDT_TIMEOUT;
32static int nmi_active;
33
34static unsigned short wdoga_ctl;
35static unsigned int wdoga_cnt;
36static struct corelock_slot saved_corelock;
37static atomic_t nmi_touched[NR_CPUS];
38static struct timer_list ntimer;
39
40enum {
41 COREA_ENTER_NMI = 0,
42 COREA_EXIT_NMI,
43 COREB_EXIT_NMI,
44
45 NMI_EVENT_NR,
46};
47static unsigned long nmi_event __attribute__ ((__section__(".l2.bss")));
48
49/* we are in nmi, non-atomic bit ops is safe */
50static inline void set_nmi_event(int event)
51{
52 __set_bit(event, &nmi_event);
53}
54
55static inline void wait_nmi_event(int event)
56{
57 while (!test_bit(event, &nmi_event))
58 barrier();
59 __clear_bit(event, &nmi_event);
60}
61
62static inline void send_corea_nmi(void)
63{
64 wdoga_ctl = bfin_read_WDOGA_CTL();
65 wdoga_cnt = bfin_read_WDOGA_CNT();
66
67 bfin_write_WDOGA_CTL(WDEN_DISABLE);
68 bfin_write_WDOGA_CNT(0);
69 bfin_write_WDOGA_CTL(WDEN_ENABLE | ICTL_NMI);
70}
71
72static inline void restore_corea_nmi(void)
73{
74 bfin_write_WDOGA_CTL(WDEN_DISABLE);
75 bfin_write_WDOGA_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
76
77 bfin_write_WDOGA_CNT(wdoga_cnt);
78 bfin_write_WDOGA_CTL(wdoga_ctl);
79}
80
81static inline void save_corelock(void)
82{
83 saved_corelock = corelock;
84 corelock.lock = 0;
85}
86
87static inline void restore_corelock(void)
88{
89 corelock = saved_corelock;
90}
91
92
93static inline void nmi_wdt_keepalive(void)
94{
95 bfin_write_WDOGB_STAT(0);
96}
97
98static inline void nmi_wdt_stop(void)
99{
100 bfin_write_WDOGB_CTL(WDEN_DISABLE);
101}
102
103/* before calling this function, you must stop the WDT */
104static inline void nmi_wdt_clear(void)
105{
106 /* clear TRO bit, disable event generation */
107 bfin_write_WDOGB_CTL(WDOG_EXPIRED | WDEN_DISABLE | ICTL_NONE);
108}
109
110static inline void nmi_wdt_start(void)
111{
112 bfin_write_WDOGB_CTL(WDEN_ENABLE | ICTL_NMI);
113}
114
115static inline int nmi_wdt_running(void)
116{
117 return ((bfin_read_WDOGB_CTL() & WDEN_MASK) != WDEN_DISABLE);
118}
119
120static inline int nmi_wdt_set_timeout(unsigned long t)
121{
122 u32 cnt, max_t, sclk;
123 int run;
124
125 sclk = get_sclk();
126 max_t = -1 / sclk;
127 cnt = t * sclk;
128 if (t > max_t) {
129 pr_warning("NMI: timeout value is too large\n");
130 return -EINVAL;
131 }
132
133 run = nmi_wdt_running();
134 nmi_wdt_stop();
135 bfin_write_WDOGB_CNT(cnt);
136 if (run)
137 nmi_wdt_start();
138
139 timeout = t;
140
141 return 0;
142}
143
144int check_nmi_wdt_touched(void)
145{
146 unsigned int this_cpu = smp_processor_id();
147 unsigned int cpu;
148
149 cpumask_t mask = cpu_online_map;
150
151 if (!atomic_read(&nmi_touched[this_cpu]))
152 return 0;
153
154 atomic_set(&nmi_touched[this_cpu], 0);
155
156 cpu_clear(this_cpu, mask);
157 for_each_cpu_mask(cpu, mask) {
158 invalidate_dcache_range((unsigned long)(&nmi_touched[cpu]),
159 (unsigned long)(&nmi_touched[cpu]));
160 if (!atomic_read(&nmi_touched[cpu]))
161 return 0;
162 atomic_set(&nmi_touched[cpu], 0);
163 }
164
165 return 1;
166}
167
168static void nmi_wdt_timer(unsigned long data)
169{
170 if (check_nmi_wdt_touched())
171 nmi_wdt_keepalive();
172
173 mod_timer(&ntimer, jiffies + NMI_CHECK_TIMEOUT);
174}
175
176static int __init init_nmi_wdt(void)
177{
178 nmi_wdt_set_timeout(timeout);
179 nmi_wdt_start();
180 nmi_active = true;
181
182 init_timer(&ntimer);
183 ntimer.function = nmi_wdt_timer;
184 ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
185 add_timer(&ntimer);
186
187 pr_info("nmi_wdt: initialized: timeout=%d sec\n", timeout);
188 return 0;
189}
190device_initcall(init_nmi_wdt);
191
192void touch_nmi_watchdog(void)
193{
194 atomic_set(&nmi_touched[smp_processor_id()], 1);
195}
196
197/* Suspend/resume support */
198#ifdef CONFIG_PM
199static int nmi_wdt_suspend(struct sys_device *dev, pm_message_t state)
200{
201 nmi_wdt_stop();
202 return 0;
203}
204
205static int nmi_wdt_resume(struct sys_device *dev)
206{
207 if (nmi_active)
208 nmi_wdt_start();
209 return 0;
210}
211
212static struct sysdev_class nmi_sysclass = {
213 .name = DRV_NAME,
214 .resume = nmi_wdt_resume,
215 .suspend = nmi_wdt_suspend,
216};
217
218static struct sys_device device_nmi_wdt = {
219 .id = 0,
220 .cls = &nmi_sysclass,
221};
222
223static int __init init_nmi_wdt_sysfs(void)
224{
225 int error;
226
227 if (!nmi_active)
228 return 0;
229
230 error = sysdev_class_register(&nmi_sysclass);
231 if (!error)
232 error = sysdev_register(&device_nmi_wdt);
233 return error;
234}
235late_initcall(init_nmi_wdt_sysfs);
236
237#endif /* CONFIG_PM */
238
239
240asmlinkage notrace void do_nmi(struct pt_regs *fp)
241{
242 unsigned int cpu = smp_processor_id();
243 nmi_enter();
244
245 cpu_pda[cpu].__nmi_count += 1;
246
247 if (cpu == nmi_wdt_cpu) {
248 /* CoreB goes here first */
249
250 /* reload the WDOG_STAT */
251 nmi_wdt_keepalive();
252
253 /* clear nmi interrupt for CoreB */
254 nmi_wdt_stop();
255 nmi_wdt_clear();
256
257 /* trigger NMI interrupt of CoreA */
258 send_corea_nmi();
259
260 /* waiting CoreB to enter NMI */
261 wait_nmi_event(COREA_ENTER_NMI);
262
263 /* recover WDOGA's settings */
264 restore_corea_nmi();
265
266 save_corelock();
267
268 /* corelock is save/cleared, CoreA is dummping messages */
269
270 wait_nmi_event(COREA_EXIT_NMI);
271 } else {
272 /* OK, CoreA entered NMI */
273 set_nmi_event(COREA_ENTER_NMI);
274 }
275
276 pr_emerg("\nNMI Watchdog detected LOCKUP, dump for CPU %d\n", cpu);
277 dump_bfin_process(fp);
278 dump_bfin_mem(fp);
279 show_regs(fp);
280 dump_bfin_trace_buffer();
281 show_stack(current, (unsigned long *)fp);
282
283 if (cpu == nmi_wdt_cpu) {
284 pr_emerg("This fault is not recoverable, sorry!\n");
285
286 /* CoreA dump finished, restore the corelock */
287 restore_corelock();
288
289 set_nmi_event(COREB_EXIT_NMI);
290 } else {
291 /* CoreB dump finished, notice the CoreA we are done */
292 set_nmi_event(COREA_EXIT_NMI);
293
294 /* synchronize with CoreA */
295 wait_nmi_event(COREB_EXIT_NMI);
296 }
297
298 nmi_exit();
299}
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 5cc7e2e9e415..93ec07da2e51 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -11,6 +11,7 @@
11#include <linux/unistd.h> 11#include <linux/unistd.h>
12#include <linux/user.h> 12#include <linux/user.h>
13#include <linux/uaccess.h> 13#include <linux/uaccess.h>
14#include <linux/slab.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/tick.h> 16#include <linux/tick.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -98,13 +99,6 @@ void cpu_idle(void)
98 } 99 }
99} 100}
100 101
101/* Fill in the fpu structure for a core dump. */
102
103int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs)
104{
105 return 1;
106}
107
108/* 102/*
109 * This gets run with P1 containing the 103 * This gets run with P1 containing the
110 * function to call, and R1 containing 104 * function to call, and R1 containing
@@ -215,22 +209,18 @@ copy_thread(unsigned long clone_flags,
215/* 209/*
216 * sys_execve() executes a new program. 210 * sys_execve() executes a new program.
217 */ 211 */
218
219asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp) 212asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp)
220{ 213{
221 int error; 214 int error;
222 char *filename; 215 char *filename;
223 struct pt_regs *regs = (struct pt_regs *)((&name) + 6); 216 struct pt_regs *regs = (struct pt_regs *)((&name) + 6);
224 217
225 lock_kernel();
226 filename = getname(name); 218 filename = getname(name);
227 error = PTR_ERR(filename); 219 error = PTR_ERR(filename);
228 if (IS_ERR(filename)) 220 if (IS_ERR(filename))
229 goto out; 221 return error;
230 error = do_execve(filename, argv, envp, regs); 222 error = do_execve(filename, argv, envp, regs);
231 putname(filename); 223 putname(filename);
232 out:
233 unlock_kernel();
234 return error; 224 return error;
235} 225}
236 226
@@ -262,9 +252,12 @@ void finish_atomic_sections (struct pt_regs *regs)
262 int __user *up0 = (int __user *)regs->p0; 252 int __user *up0 = (int __user *)regs->p0;
263 253
264 switch (regs->pc) { 254 switch (regs->pc) {
255 default:
256 /* not in middle of an atomic step, so resume like normal */
257 return;
258
265 case ATOMIC_XCHG32 + 2: 259 case ATOMIC_XCHG32 + 2:
266 put_user(regs->r1, up0); 260 put_user(regs->r1, up0);
267 regs->pc = ATOMIC_XCHG32 + 4;
268 break; 261 break;
269 262
270 case ATOMIC_CAS32 + 2: 263 case ATOMIC_CAS32 + 2:
@@ -272,7 +265,6 @@ void finish_atomic_sections (struct pt_regs *regs)
272 if (regs->r0 == regs->r1) 265 if (regs->r0 == regs->r1)
273 case ATOMIC_CAS32 + 6: 266 case ATOMIC_CAS32 + 6:
274 put_user(regs->r2, up0); 267 put_user(regs->r2, up0);
275 regs->pc = ATOMIC_CAS32 + 8;
276 break; 268 break;
277 269
278 case ATOMIC_ADD32 + 2: 270 case ATOMIC_ADD32 + 2:
@@ -280,7 +272,6 @@ void finish_atomic_sections (struct pt_regs *regs)
280 /* fall through */ 272 /* fall through */
281 case ATOMIC_ADD32 + 4: 273 case ATOMIC_ADD32 + 4:
282 put_user(regs->r0, up0); 274 put_user(regs->r0, up0);
283 regs->pc = ATOMIC_ADD32 + 6;
284 break; 275 break;
285 276
286 case ATOMIC_SUB32 + 2: 277 case ATOMIC_SUB32 + 2:
@@ -288,7 +279,6 @@ void finish_atomic_sections (struct pt_regs *regs)
288 /* fall through */ 279 /* fall through */
289 case ATOMIC_SUB32 + 4: 280 case ATOMIC_SUB32 + 4:
290 put_user(regs->r0, up0); 281 put_user(regs->r0, up0);
291 regs->pc = ATOMIC_SUB32 + 6;
292 break; 282 break;
293 283
294 case ATOMIC_IOR32 + 2: 284 case ATOMIC_IOR32 + 2:
@@ -296,7 +286,6 @@ void finish_atomic_sections (struct pt_regs *regs)
296 /* fall through */ 286 /* fall through */
297 case ATOMIC_IOR32 + 4: 287 case ATOMIC_IOR32 + 4:
298 put_user(regs->r0, up0); 288 put_user(regs->r0, up0);
299 regs->pc = ATOMIC_IOR32 + 6;
300 break; 289 break;
301 290
302 case ATOMIC_AND32 + 2: 291 case ATOMIC_AND32 + 2:
@@ -304,7 +293,6 @@ void finish_atomic_sections (struct pt_regs *regs)
304 /* fall through */ 293 /* fall through */
305 case ATOMIC_AND32 + 4: 294 case ATOMIC_AND32 + 4:
306 put_user(regs->r0, up0); 295 put_user(regs->r0, up0);
307 regs->pc = ATOMIC_AND32 + 6;
308 break; 296 break;
309 297
310 case ATOMIC_XOR32 + 2: 298 case ATOMIC_XOR32 + 2:
@@ -312,9 +300,15 @@ void finish_atomic_sections (struct pt_regs *regs)
312 /* fall through */ 300 /* fall through */
313 case ATOMIC_XOR32 + 4: 301 case ATOMIC_XOR32 + 4:
314 put_user(regs->r0, up0); 302 put_user(regs->r0, up0);
315 regs->pc = ATOMIC_XOR32 + 6;
316 break; 303 break;
317 } 304 }
305
306 /*
307 * We've finished the atomic section, and the only thing left for
308 * userspace is to do a RTS, so we might as well handle that too
309 * since we need to update the PC anyways.
310 */
311 regs->pc = regs->rets;
318} 312}
319 313
320static inline 314static inline
@@ -336,12 +330,58 @@ int in_mem_const(unsigned long addr, unsigned long size,
336{ 330{
337 return in_mem_const_off(addr, size, 0, const_addr, const_size); 331 return in_mem_const_off(addr, size, 0, const_addr, const_size);
338} 332}
339#define IN_ASYNC(bnum, bctlnum) \ 333#define ASYNC_ENABLED(bnum, bctlnum) \
340({ \ 334({ \
341 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? -EFAULT : \ 335 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
342 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? -EFAULT : \ 336 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
343 BFIN_MEM_ACCESS_CORE; \ 337 1; \
344}) 338})
339/*
340 * We can't read EBIU banks that aren't enabled or we end up hanging
341 * on the access to the async space. Make sure we validate accesses
342 * that cross async banks too.
343 * 0 - found, but unusable
344 * 1 - found & usable
345 * 2 - not found
346 */
347static
348int in_async(unsigned long addr, unsigned long size)
349{
350 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
351 if (!ASYNC_ENABLED(0, 0))
352 return 0;
353 if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
354 return 1;
355 size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
356 addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
357 }
358 if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
359 if (!ASYNC_ENABLED(1, 0))
360 return 0;
361 if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
362 return 1;
363 size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
364 addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
365 }
366 if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
367 if (!ASYNC_ENABLED(2, 1))
368 return 0;
369 if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
370 return 1;
371 size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
372 addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
373 }
374 if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
375 if (ASYNC_ENABLED(3, 1))
376 return 0;
377 if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
378 return 1;
379 return 0;
380 }
381
382 /* not within async bounds */
383 return 2;
384}
345 385
346int bfin_mem_access_type(unsigned long addr, unsigned long size) 386int bfin_mem_access_type(unsigned long addr, unsigned long size)
347{ 387{
@@ -378,17 +418,11 @@ int bfin_mem_access_type(unsigned long addr, unsigned long size)
378 if (addr >= SYSMMR_BASE) 418 if (addr >= SYSMMR_BASE)
379 return BFIN_MEM_ACCESS_CORE_ONLY; 419 return BFIN_MEM_ACCESS_CORE_ONLY;
380 420
381 /* We can't read EBIU banks that aren't enabled or we end up hanging 421 switch (in_async(addr, size)) {
382 * on the access to the async space. 422 case 0: return -EFAULT;
383 */ 423 case 1: return BFIN_MEM_ACCESS_CORE;
384 if (in_mem_const(addr, size, ASYNC_BANK0_BASE, ASYNC_BANK0_SIZE)) 424 case 2: /* fall through */;
385 return IN_ASYNC(0, 0); 425 }
386 if (in_mem_const(addr, size, ASYNC_BANK1_BASE, ASYNC_BANK1_SIZE))
387 return IN_ASYNC(1, 0);
388 if (in_mem_const(addr, size, ASYNC_BANK2_BASE, ASYNC_BANK2_SIZE))
389 return IN_ASYNC(2, 1);
390 if (in_mem_const(addr, size, ASYNC_BANK3_BASE, ASYNC_BANK3_SIZE))
391 return IN_ASYNC(3, 1);
392 426
393 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH)) 427 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
394 return BFIN_MEM_ACCESS_CORE; 428 return BFIN_MEM_ACCESS_CORE;
@@ -405,6 +439,8 @@ __attribute__((l1_text))
405/* Return 1 if access to memory range is OK, 0 otherwise */ 439/* Return 1 if access to memory range is OK, 0 otherwise */
406int _access_ok(unsigned long addr, unsigned long size) 440int _access_ok(unsigned long addr, unsigned long size)
407{ 441{
442 int aret;
443
408 if (size == 0) 444 if (size == 0)
409 return 1; 445 return 1;
410 /* Check that things do not wrap around */ 446 /* Check that things do not wrap around */
@@ -454,6 +490,11 @@ int _access_ok(unsigned long addr, unsigned long size)
454 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH)) 490 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
455 return 1; 491 return 1;
456#endif 492#endif
493
494 aret = in_async(addr, size);
495 if (aret < 2)
496 return aret;
497
457 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH)) 498 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
458 return 1; 499 return 1;
459 500
diff --git a/arch/blackfin/kernel/ptrace.c b/arch/blackfin/kernel/ptrace.c
index 56b0ba12175f..43eb969405d1 100644
--- a/arch/blackfin/kernel/ptrace.c
+++ b/arch/blackfin/kernel/ptrace.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds 2 * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
3 * these modifications are Copyright 2004-2009 Analog Devices Inc. 3 * these modifications are Copyright 2004-2010 Analog Devices Inc.
4 * 4 *
5 * Licensed under the GPL-2 5 * Licensed under the GPL-2
6 */ 6 */
@@ -9,10 +9,13 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/mm.h> 10#include <linux/mm.h>
11#include <linux/smp.h> 11#include <linux/smp.h>
12#include <linux/elf.h>
12#include <linux/errno.h> 13#include <linux/errno.h>
13#include <linux/ptrace.h> 14#include <linux/ptrace.h>
14#include <linux/user.h> 15#include <linux/user.h>
16#include <linux/regset.h>
15#include <linux/signal.h> 17#include <linux/signal.h>
18#include <linux/tracehook.h>
16#include <linux/uaccess.h> 19#include <linux/uaccess.h>
17 20
18#include <asm/page.h> 21#include <asm/page.h>
@@ -25,90 +28,57 @@
25#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
26#include <asm/mem_map.h> 29#include <asm/mem_map.h>
27 30
28#define TEXT_OFFSET 0
29/* 31/*
30 * does not yet catch signals sent when the child dies. 32 * does not yet catch signals sent when the child dies.
31 * in exit.c or in signal.c. 33 * in exit.c or in signal.c.
32 */ 34 */
33 35
34/* determines which bits in the SYSCFG reg the user has access to. */
35/* 1 = access 0 = no access */
36#define SYSCFG_MASK 0x0007 /* SYSCFG reg */
37/* sets the trace bits. */
38#define TRACE_BITS 0x0001
39
40/* Find the stack offset for a register, relative to thread.esp0. */
41#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
42
43/*
44 * Get the address of the live pt_regs for the specified task.
45 * These are saved onto the top kernel stack when the process
46 * is not running.
47 *
48 * Note: if a user thread is execve'd from kernel space, the
49 * kernel stack will not be empty on entry to the kernel, so
50 * ptracing these tasks will fail.
51 */
52static inline struct pt_regs *get_user_regs(struct task_struct *task)
53{
54 return (struct pt_regs *)
55 ((unsigned long)task_stack_page(task) +
56 (THREAD_SIZE - sizeof(struct pt_regs)));
57}
58
59/*
60 * Get all user integer registers.
61 */
62static inline int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
63{
64 struct pt_regs regs;
65 memcpy(&regs, get_user_regs(tsk), sizeof(regs));
66 regs.usp = tsk->thread.usp;
67 return copy_to_user(uregs, &regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
68}
69
70/* Mapping from PT_xxx to the stack offset at which the register is
71 * saved. Notice that usp has no stack-slot and needs to be treated
72 * specially (see get_reg/put_reg below).
73 */
74
75/* 36/*
76 * Get contents of register REGNO in task TASK. 37 * Get contents of register REGNO in task TASK.
77 */ 38 */
78static inline long get_reg(struct task_struct *task, int regno) 39static inline long
40get_reg(struct task_struct *task, long regno, unsigned long __user *datap)
79{ 41{
80 unsigned char *reg_ptr; 42 long tmp;
43 struct pt_regs *regs = task_pt_regs(task);
81 44
82 struct pt_regs *regs = 45 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
83 (struct pt_regs *)((unsigned long)task_stack_page(task) + 46 return -EIO;
84 (THREAD_SIZE - sizeof(struct pt_regs)));
85 reg_ptr = (char *)regs;
86 47
87 switch (regno) { 48 switch (regno) {
49 case PT_TEXT_ADDR:
50 tmp = task->mm->start_code;
51 break;
52 case PT_TEXT_END_ADDR:
53 tmp = task->mm->end_code;
54 break;
55 case PT_DATA_ADDR:
56 tmp = task->mm->start_data;
57 break;
88 case PT_USP: 58 case PT_USP:
89 return task->thread.usp; 59 tmp = task->thread.usp;
60 break;
90 default: 61 default:
91 if (regno <= 216) 62 if (regno < sizeof(*regs)) {
92 return *(long *)(reg_ptr + regno); 63 void *reg_ptr = regs;
64 tmp = *(long *)(reg_ptr + regno);
65 } else
66 return -EIO;
93 } 67 }
94 /* slight mystery ... never seems to come here but kernel misbehaves without this code! */
95 68
96 printk(KERN_WARNING "Request to get for unknown register %d\n", regno); 69 return put_user(tmp, datap);
97 return 0;
98} 70}
99 71
100/* 72/*
101 * Write contents of register REGNO in task TASK. 73 * Write contents of register REGNO in task TASK.
102 */ 74 */
103static inline int 75static inline int
104put_reg(struct task_struct *task, int regno, unsigned long data) 76put_reg(struct task_struct *task, long regno, unsigned long data)
105{ 77{
106 char *reg_ptr; 78 struct pt_regs *regs = task_pt_regs(task);
107 79
108 struct pt_regs *regs = 80 if (regno & 3 || regno > PT_LAST_PSEUDO || regno < 0)
109 (struct pt_regs *)((unsigned long)task_stack_page(task) + 81 return -EIO;
110 (THREAD_SIZE - sizeof(struct pt_regs)));
111 reg_ptr = (char *)regs;
112 82
113 switch (regno) { 83 switch (regno) {
114 case PT_PC: 84 case PT_PC:
@@ -125,10 +95,18 @@ put_reg(struct task_struct *task, int regno, unsigned long data)
125 regs->usp = data; 95 regs->usp = data;
126 task->thread.usp = data; 96 task->thread.usp = data;
127 break; 97 break;
98 case PT_SYSCFG: /* don't let userspace screw with this */
99 if ((data & ~1) != 0x6)
100 pr_warning("ptrace: ignore syscfg write of %#lx\n", data);
101 break; /* regs->syscfg = data; break; */
128 default: 102 default:
129 if (regno <= 216) 103 if (regno < sizeof(*regs)) {
130 *(long *)(reg_ptr + regno) = data; 104 void *reg_offset = regs;
105 *(long *)(reg_offset + regno) = data;
106 }
107 /* Ignore writes to pseudo registers */
131 } 108 }
109
132 return 0; 110 return 0;
133} 111}
134 112
@@ -160,24 +138,98 @@ static inline int is_user_addr_valid(struct task_struct *child,
160 return -EIO; 138 return -EIO;
161} 139}
162 140
163void ptrace_enable(struct task_struct *child) 141/*
142 * retrieve the contents of Blackfin userspace general registers
143 */
144static int genregs_get(struct task_struct *target,
145 const struct user_regset *regset,
146 unsigned int pos, unsigned int count,
147 void *kbuf, void __user *ubuf)
148{
149 struct pt_regs *regs = task_pt_regs(target);
150 int ret;
151
152 /* This sucks ... */
153 regs->usp = target->thread.usp;
154
155 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
156 regs, 0, sizeof(*regs));
157 if (ret < 0)
158 return ret;
159
160 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
161 sizeof(*regs), -1);
162}
163
164/*
165 * update the contents of the Blackfin userspace general registers
166 */
167static int genregs_set(struct task_struct *target,
168 const struct user_regset *regset,
169 unsigned int pos, unsigned int count,
170 const void *kbuf, const void __user *ubuf)
164{ 171{
165 unsigned long tmp; 172 struct pt_regs *regs = task_pt_regs(target);
166 tmp = get_reg(child, PT_SYSCFG) | (TRACE_BITS); 173 int ret;
167 put_reg(child, PT_SYSCFG, tmp); 174
175 /* Don't let people set SYSCFG (it's at the end of pt_regs) */
176 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
177 regs, 0, PT_SYSCFG);
178 if (ret < 0)
179 return ret;
180
181 /* This sucks ... */
182 target->thread.usp = regs->usp;
183 /* regs->retx = regs->pc; */
184
185 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
186 PT_SYSCFG, -1);
168} 187}
169 188
170/* 189/*
171 * Called by kernel/ptrace.c when detaching.. 190 * Define the register sets available on the Blackfin under Linux
172 *
173 * Make sure the single step bit is not set.
174 */ 191 */
175void ptrace_disable(struct task_struct *child) 192enum bfin_regset {
193 REGSET_GENERAL,
194};
195
196static const struct user_regset bfin_regsets[] = {
197 [REGSET_GENERAL] = {
198 .core_note_type = NT_PRSTATUS,
199 .n = sizeof(struct pt_regs) / sizeof(long),
200 .size = sizeof(long),
201 .align = sizeof(long),
202 .get = genregs_get,
203 .set = genregs_set,
204 },
205};
206
207static const struct user_regset_view user_bfin_native_view = {
208 .name = "Blackfin",
209 .e_machine = EM_BLACKFIN,
210 .regsets = bfin_regsets,
211 .n = ARRAY_SIZE(bfin_regsets),
212};
213
214const struct user_regset_view *task_user_regset_view(struct task_struct *task)
215{
216 return &user_bfin_native_view;
217}
218
219void user_enable_single_step(struct task_struct *child)
220{
221 struct pt_regs *regs = task_pt_regs(child);
222 regs->syscfg |= SYSCFG_SSSTEP;
223
224 set_tsk_thread_flag(child, TIF_SINGLESTEP);
225}
226
227void user_disable_single_step(struct task_struct *child)
176{ 228{
177 unsigned long tmp; 229 struct pt_regs *regs = task_pt_regs(child);
178 /* make sure the single step bit is not set. */ 230 regs->syscfg &= ~SYSCFG_SSSTEP;
179 tmp = get_reg(child, PT_SYSCFG) & ~TRACE_BITS; 231
180 put_reg(child, PT_SYSCFG, tmp); 232 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
181} 233}
182 234
183long arch_ptrace(struct task_struct *child, long request, long addr, long data) 235long arch_ptrace(struct task_struct *child, long request, long addr, long data)
@@ -240,40 +292,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
240 break; 292 break;
241 } 293 }
242 294
243 /* read the word at location addr in the USER area. */
244 case PTRACE_PEEKUSR:
245 {
246 unsigned long tmp;
247 ret = -EIO;
248 tmp = 0;
249 if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) {
250 printk(KERN_WARNING "ptrace error : PEEKUSR : temporarily returning "
251 "0 - %x sizeof(pt_regs) is %lx\n",
252 (int)addr, sizeof(struct pt_regs));
253 break;
254 }
255 if (addr == sizeof(struct pt_regs)) {
256 /* PT_TEXT_ADDR */
257 tmp = child->mm->start_code + TEXT_OFFSET;
258 } else if (addr == (sizeof(struct pt_regs) + 4)) {
259 /* PT_TEXT_END_ADDR */
260 tmp = child->mm->end_code;
261 } else if (addr == (sizeof(struct pt_regs) + 8)) {
262 /* PT_DATA_ADDR */
263 tmp = child->mm->start_data;
264#ifdef CONFIG_BINFMT_ELF_FDPIC
265 } else if (addr == (sizeof(struct pt_regs) + 12)) {
266 goto case_PTRACE_GETFDPIC_EXEC;
267 } else if (addr == (sizeof(struct pt_regs) + 16)) {
268 goto case_PTRACE_GETFDPIC_INTERP;
269#endif
270 } else {
271 tmp = get_reg(child, addr);
272 }
273 ret = put_user(tmp, datap);
274 break;
275 }
276
277#ifdef CONFIG_BINFMT_ELF_FDPIC 295#ifdef CONFIG_BINFMT_ELF_FDPIC
278 case PTRACE_GETFDPIC: { 296 case PTRACE_GETFDPIC: {
279 unsigned long tmp = 0; 297 unsigned long tmp = 0;
@@ -316,19 +334,6 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
316 case BFIN_MEM_ACCESS_CORE_ONLY: 334 case BFIN_MEM_ACCESS_CORE_ONLY:
317 copied = access_process_vm(child, addr, &data, 335 copied = access_process_vm(child, addr, &data,
318 to_copy, 1); 336 to_copy, 1);
319 if (copied)
320 break;
321
322 /* hrm, why didn't that work ... maybe no mapping */
323 if (addr >= FIXED_CODE_START &&
324 addr + to_copy <= FIXED_CODE_END) {
325 copy_to_user_page(0, 0, 0, paddr, &data, to_copy);
326 copied = to_copy;
327 } else if (addr >= BOOT_ROM_START) {
328 memcpy(paddr, &data, to_copy);
329 copied = to_copy;
330 }
331
332 break; 337 break;
333 case BFIN_MEM_ACCESS_DMA: 338 case BFIN_MEM_ACCESS_DMA:
334 if (safe_dma_memcpy(paddr, &data, to_copy)) 339 if (safe_dma_memcpy(paddr, &data, to_copy))
@@ -349,78 +354,36 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
349 break; 354 break;
350 } 355 }
351 356
352 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */ 357 case PTRACE_PEEKUSR:
353 ret = -EIO; 358 switch (addr) {
354 if ((addr & 3) || (addr > (sizeof(struct pt_regs) + 16))) { 359#ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */
355 printk(KERN_WARNING "ptrace error : POKEUSR: temporarily returning 0\n"); 360 case PT_FDPIC_EXEC: goto case_PTRACE_GETFDPIC_EXEC;
356 break; 361 case PT_FDPIC_INTERP: goto case_PTRACE_GETFDPIC_INTERP;
357 } 362#endif
358 363 default:
359 if (addr >= (sizeof(struct pt_regs))) { 364 ret = get_reg(child, addr, datap);
360 ret = 0;
361 break;
362 }
363 if (addr == PT_SYSCFG) {
364 data &= SYSCFG_MASK;
365 data |= get_reg(child, PT_SYSCFG);
366 } 365 }
367 ret = put_reg(child, addr, data); 366 pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret);
368 break;
369
370 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
371 case PTRACE_CONT: /* restart after signal. */
372 pr_debug("ptrace: syscall/cont\n");
373
374 ret = -EIO;
375 if (!valid_signal(data))
376 break;
377 if (request == PTRACE_SYSCALL)
378 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
379 else
380 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
381 child->exit_code = data;
382 ptrace_disable(child);
383 pr_debug("ptrace: before wake_up_process\n");
384 wake_up_process(child);
385 ret = 0;
386 break;
387
388 /*
389 * make the child exit. Best I can do is send it a sigkill.
390 * perhaps it should be put in the status that it wants to
391 * exit.
392 */
393 case PTRACE_KILL:
394 ret = 0;
395 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
396 break;
397 child->exit_code = SIGKILL;
398 ptrace_disable(child);
399 wake_up_process(child);
400 break; 367 break;
401 368
402 case PTRACE_SINGLESTEP: /* set the trap flag. */ 369 case PTRACE_POKEUSR:
403 pr_debug("ptrace: single step\n"); 370 ret = put_reg(child, addr, data);
404 ret = -EIO; 371 pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret);
405 if (!valid_signal(data))
406 break;
407 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
408 ptrace_enable(child);
409 child->exit_code = data;
410 wake_up_process(child);
411 ret = 0;
412 break; 372 break;
413 373
414 case PTRACE_GETREGS: 374 case PTRACE_GETREGS:
415 /* Get all gp regs from the child. */ 375 pr_debug("ptrace: PTRACE_GETREGS\n");
416 ret = ptrace_getregs(child, datap); 376 return copy_regset_to_user(child, &user_bfin_native_view,
417 break; 377 REGSET_GENERAL,
378 0, sizeof(struct pt_regs),
379 (void __user *)data);
418 380
419 case PTRACE_SETREGS: 381 case PTRACE_SETREGS:
420 printk(KERN_WARNING "ptrace: SETREGS: **** NOT IMPLEMENTED ***\n"); 382 pr_debug("ptrace: PTRACE_SETREGS\n");
421 /* Set all gp regs in the child. */ 383 return copy_regset_from_user(child, &user_bfin_native_view,
422 ret = 0; 384 REGSET_GENERAL,
423 break; 385 0, sizeof(struct pt_regs),
386 (const void __user *)data);
424 387
425 default: 388 default:
426 ret = ptrace_request(child, request, addr, data); 389 ret = ptrace_request(child, request, addr, data);
@@ -430,27 +393,21 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
430 return ret; 393 return ret;
431} 394}
432 395
433asmlinkage void syscall_trace(void) 396asmlinkage int syscall_trace_enter(struct pt_regs *regs)
434{ 397{
435 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 398 int ret = 0;
436 return; 399
437 400 if (test_thread_flag(TIF_SYSCALL_TRACE))
438 if (!(current->ptrace & PT_PTRACED)) 401 ret = tracehook_report_syscall_entry(regs);
439 return; 402
440 403 return ret;
441 /* the 0x80 provides a way for the tracing parent to distinguish 404}
442 * between a syscall stop and SIGTRAP delivery 405
443 */ 406asmlinkage void syscall_trace_leave(struct pt_regs *regs)
444 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) 407{
445 ? 0x80 : 0)); 408 int step;
446 409
447 /* 410 step = test_thread_flag(TIF_SINGLESTEP);
448 * this isn't the same as continuing with a signal, but it will do 411 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
449 * for normal use. strace only continues with a signal if the 412 tracehook_report_syscall_exit(regs, step);
450 * stopping signal is not SIGTRAP. -brl
451 */
452 if (current->exit_code) {
453 send_sig(current->exit_code, current, 1);
454 current->exit_code = 0;
455 }
456} 413}
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c
index c202a44d1416..8e2efceb364b 100644
--- a/arch/blackfin/kernel/setup.c
+++ b/arch/blackfin/kernel/setup.c
@@ -178,10 +178,10 @@ void __init bfin_cache_init(void)
178 178
179void __init bfin_relocate_l1_mem(void) 179void __init bfin_relocate_l1_mem(void)
180{ 180{
181 unsigned long l1_code_length; 181 unsigned long text_l1_len = (unsigned long)_text_l1_len;
182 unsigned long l1_data_a_length; 182 unsigned long data_l1_len = (unsigned long)_data_l1_len;
183 unsigned long l1_data_b_length; 183 unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len;
184 unsigned long l2_length; 184 unsigned long l2_len = (unsigned long)_l2_len;
185 185
186 early_shadow_stamp(); 186 early_shadow_stamp();
187 187
@@ -201,31 +201,34 @@ void __init bfin_relocate_l1_mem(void)
201 201
202 blackfin_dma_early_init(); 202 blackfin_dma_early_init();
203 203
204 /* if necessary, copy _stext_l1 to _etext_l1 to L1 instruction SRAM */ 204 /* if necessary, copy L1 text to L1 instruction SRAM */
205 l1_code_length = _etext_l1 - _stext_l1; 205 if (L1_CODE_LENGTH && text_l1_len)
206 if (l1_code_length) 206 early_dma_memcpy(_stext_l1, _text_l1_lma, text_l1_len);
207 early_dma_memcpy(_stext_l1, _l1_lma_start, l1_code_length);
208 207
209 /* if necessary, copy _sdata_l1 to _sbss_l1 to L1 data bank A SRAM */ 208 /* if necessary, copy L1 data to L1 data bank A SRAM */
210 l1_data_a_length = _sbss_l1 - _sdata_l1; 209 if (L1_DATA_A_LENGTH && data_l1_len)
211 if (l1_data_a_length) 210 early_dma_memcpy(_sdata_l1, _data_l1_lma, data_l1_len);
212 early_dma_memcpy(_sdata_l1, _l1_lma_start + l1_code_length, l1_data_a_length);
213 211
214 /* if necessary, copy _sdata_b_l1 to _sbss_b_l1 to L1 data bank B SRAM */ 212 /* if necessary, copy L1 data B to L1 data bank B SRAM */
215 l1_data_b_length = _sbss_b_l1 - _sdata_b_l1; 213 if (L1_DATA_B_LENGTH && data_b_l1_len)
216 if (l1_data_b_length) 214 early_dma_memcpy(_sdata_b_l1, _data_b_l1_lma, data_b_l1_len);
217 early_dma_memcpy(_sdata_b_l1, _l1_lma_start + l1_code_length +
218 l1_data_a_length, l1_data_b_length);
219 215
220 early_dma_memcpy_done(); 216 early_dma_memcpy_done();
221 217
222 /* if necessary, copy _stext_l2 to _edata_l2 to L2 SRAM */ 218 /* if necessary, copy L2 text/data to L2 SRAM */
223 if (L2_LENGTH != 0) { 219 if (L2_LENGTH && l2_len)
224 l2_length = _sbss_l2 - _stext_l2; 220 memcpy(_stext_l2, _l2_lma, l2_len);
225 if (l2_length) 221}
226 memcpy(_stext_l2, _l2_lma_start, l2_length); 222
227 } 223#ifdef CONFIG_ROMKERNEL
224void __init bfin_relocate_xip_data(void)
225{
226 early_shadow_stamp();
227
228 memcpy(_sdata, _data_lma, (unsigned long)_data_len - THREAD_SIZE + sizeof(struct thread_info));
229 memcpy(_sinitdata, _init_data_lma, (unsigned long)_init_data_len);
228} 230}
231#endif
229 232
230/* add_memory_region to memmap */ 233/* add_memory_region to memmap */
231static void __init add_memory_region(unsigned long long start, 234static void __init add_memory_region(unsigned long long start,
@@ -511,7 +514,7 @@ static __init void memory_setup(void)
511#endif 514#endif
512 unsigned long max_mem; 515 unsigned long max_mem;
513 516
514 _rambase = (unsigned long)_stext; 517 _rambase = CONFIG_BOOT_LOAD;
515 _ramstart = (unsigned long)_end; 518 _ramstart = (unsigned long)_end;
516 519
517 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) { 520 if (DMA_UNCACHED_REGION > (_ramend - _ramstart)) {
@@ -604,13 +607,13 @@ static __init void memory_setup(void)
604 } 607 }
605 608
606#ifdef CONFIG_MPU 609#ifdef CONFIG_MPU
610#if defined(CONFIG_ROMFS_ON_MTD) && defined(CONFIG_MTD_ROM)
611 page_mask_nelts = (((_ramend + ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE -
612 ASYNC_BANK0_BASE) >> PAGE_SHIFT) + 31) / 32;
613#else
607 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32; 614 page_mask_nelts = ((_ramend >> PAGE_SHIFT) + 31) / 32;
608 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
609#endif 615#endif
610 616 page_mask_order = get_order(3 * page_mask_nelts * sizeof(long));
611#if !defined(CONFIG_MTD_UCLINUX)
612 /*In case there is no valid CPLB behind memory_end make sure we don't get to close*/
613 memory_end -= SIZE_4K;
614#endif 617#endif
615 618
616 init_mm.start_code = (unsigned long)_stext; 619 init_mm.start_code = (unsigned long)_stext;
@@ -642,7 +645,7 @@ static __init void memory_setup(void)
642 __bss_start, __bss_stop, 645 __bss_start, __bss_stop,
643 _sdata, _edata, 646 _sdata, _edata,
644 (void *)&init_thread_union, 647 (void *)&init_thread_union,
645 (void *)((int)(&init_thread_union) + 0x2000), 648 (void *)((int)(&init_thread_union) + THREAD_SIZE),
646 __init_begin, __init_end, 649 __init_begin, __init_end,
647 (void *)_ramstart, (void *)memory_end 650 (void *)_ramstart, (void *)memory_end
648#ifdef CONFIG_MTD_UCLINUX 651#ifdef CONFIG_MTD_UCLINUX
@@ -804,10 +807,17 @@ static inline int __init get_mem_size(void)
804 BUG(); 807 BUG();
805} 808}
806 809
810__attribute__((weak))
811void __init native_machine_early_platform_add_devices(void)
812{
813}
814
807void __init setup_arch(char **cmdline_p) 815void __init setup_arch(char **cmdline_p)
808{ 816{
809 unsigned long sclk, cclk; 817 unsigned long sclk, cclk;
810 818
819 native_machine_early_platform_add_devices();
820
811 enable_shadow_console(); 821 enable_shadow_console();
812 822
813 /* Check to make sure we are running on the right processor */ 823 /* Check to make sure we are running on the right processor */
@@ -917,7 +927,7 @@ void __init setup_arch(char **cmdline_p)
917 927
918 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n"); 928 printk(KERN_INFO "Blackfin support (C) 2004-2009 Analog Devices, Inc.\n");
919 if (bfin_compiled_revid() == 0xffff) 929 if (bfin_compiled_revid() == 0xffff)
920 printk(KERN_INFO "Compiled for ADSP-%s Rev any\n", CPU); 930 printk(KERN_INFO "Compiled for ADSP-%s Rev any, running on 0.%d\n", CPU, bfin_revid());
921 else if (bfin_compiled_revid() == -1) 931 else if (bfin_compiled_revid() == -1)
922 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU); 932 printk(KERN_INFO "Compiled for ADSP-%s Rev none\n", CPU);
923 else 933 else
@@ -1229,10 +1239,10 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1229 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS, 1239 dsup_banks, BFIN_DSUBBANKS, BFIN_DWAYS,
1230 BFIN_DLINES); 1240 BFIN_DLINES);
1231#ifdef __ARCH_SYNC_CORE_DCACHE 1241#ifdef __ARCH_SYNC_CORE_DCACHE
1232 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", cpudata->dcache_invld_count); 1242 seq_printf(m, "SMP Dcache Flushes\t: %lu\n\n", dcache_invld_count[cpu_num]);
1233#endif 1243#endif
1234#ifdef __ARCH_SYNC_CORE_ICACHE 1244#ifdef __ARCH_SYNC_CORE_ICACHE
1235 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", cpudata->icache_invld_count); 1245 seq_printf(m, "SMP Icache Flushes\t: %lu\n\n", icache_invld_count[cpu_num]);
1236#endif 1246#endif
1237 1247
1238 if (cpu_num != num_possible_cpus() - 1) 1248 if (cpu_num != num_possible_cpus() - 1)
@@ -1261,8 +1271,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1261 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n", 1271 seq_printf(m, "board memory\t: %ld kB (0x%p -> 0x%p)\n",
1262 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end); 1272 physical_mem_end >> 10, (void *)0, (void *)physical_mem_end);
1263 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n", 1273 seq_printf(m, "kernel memory\t: %d kB (0x%p -> 0x%p)\n",
1264 ((int)memory_end - (int)_stext) >> 10, 1274 ((int)memory_end - (int)_rambase) >> 10,
1265 _stext, 1275 (void *)_rambase,
1266 (void *)memory_end); 1276 (void *)memory_end);
1267 seq_printf(m, "\n"); 1277 seq_printf(m, "\n");
1268 1278
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index 9d90c18fab23..d536f35d1f43 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright 2004-2009 Analog Devices Inc. 2 * Copyright 2004-2010 Analog Devices Inc.
3 * 3 *
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
@@ -12,10 +12,12 @@
12#include <linux/binfmts.h> 12#include <linux/binfmts.h>
13#include <linux/freezer.h> 13#include <linux/freezer.h>
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <linux/tracehook.h>
15 16
16#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
17#include <asm/ucontext.h> 18#include <asm/ucontext.h>
18#include <asm/fixed_code.h> 19#include <asm/fixed_code.h>
20#include <asm/syscall.h>
19 21
20#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 22#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
21 23
@@ -49,6 +51,9 @@ rt_restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *p
49 unsigned long usp = 0; 51 unsigned long usp = 0;
50 int err = 0; 52 int err = 0;
51 53
54 /* Always make any pending restarted system calls return -EINTR */
55 current_thread_info()->restart_block.fn = do_no_restart_syscall;
56
52#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x) 57#define RESTORE(x) err |= __get_user(regs->x, &sc->sc_##x)
53 58
54 /* restore passed registers */ 59 /* restore passed registers */
@@ -205,16 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
205 regs->r1 = (unsigned long)(&frame->info); 210 regs->r1 = (unsigned long)(&frame->info);
206 regs->r2 = (unsigned long)(&frame->uc); 211 regs->r2 = (unsigned long)(&frame->uc);
207 212
208 /*
209 * Clear the trace flag when entering the signal handler, but
210 * notify any tracer that was single-stepping it. The tracer
211 * may want to single-step inside the handler too.
212 */
213 if (regs->syscfg & TRACE_BITS) {
214 regs->syscfg &= ~TRACE_BITS;
215 ptrace_notify(SIGTRAP);
216 }
217
218 return 0; 213 return 0;
219 214
220 give_sigsegv: 215 give_sigsegv:
@@ -246,6 +241,11 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
246 regs->r0 = regs->orig_r0; 241 regs->r0 = regs->orig_r0;
247 regs->pc -= 2; 242 regs->pc -= 2;
248 break; 243 break;
244
245 case -ERESTART_RESTARTBLOCK:
246 regs->p0 = __NR_restart_syscall;
247 regs->pc -= 2;
248 break;
249 } 249 }
250} 250}
251 251
@@ -314,6 +314,9 @@ asmlinkage void do_signal(struct pt_regs *regs)
314 * clear the TIF_RESTORE_SIGMASK flag */ 314 * clear the TIF_RESTORE_SIGMASK flag */
315 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 315 if (test_thread_flag(TIF_RESTORE_SIGMASK))
316 clear_thread_flag(TIF_RESTORE_SIGMASK); 316 clear_thread_flag(TIF_RESTORE_SIGMASK);
317
318 tracehook_signal_handler(signr, &info, &ka, regs,
319 test_thread_flag(TIF_SINGLESTEP));
317 } 320 }
318 321
319 return; 322 return;
@@ -332,3 +335,20 @@ asmlinkage void do_signal(struct pt_regs *regs)
332 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); 335 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
333 } 336 }
334} 337}
338
339/*
340 * notification of userspace execution resumption
341 */
342asmlinkage void do_notify_resume(struct pt_regs *regs)
343{
344 if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK))
345 do_signal(regs);
346
347 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
348 clear_thread_flag(TIF_NOTIFY_RESUME);
349 tracehook_notify_resume(regs);
350 if (current->replacement_session_keyring)
351 key_replace_session_keyring();
352 }
353}
354
diff --git a/arch/blackfin/kernel/sys_bfin.c b/arch/blackfin/kernel/sys_bfin.c
index afcef129d4e8..2e7f8e10bf87 100644
--- a/arch/blackfin/kernel/sys_bfin.c
+++ b/arch/blackfin/kernel/sys_bfin.c
@@ -22,39 +22,6 @@
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/dma.h> 23#include <asm/dma.h>
24 24
25/* common code for old and new mmaps */
26static inline long
27do_mmap2(unsigned long addr, unsigned long len,
28 unsigned long prot, unsigned long flags,
29 unsigned long fd, unsigned long pgoff)
30{
31 int error = -EBADF;
32 struct file *file = NULL;
33
34 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
35 if (!(flags & MAP_ANONYMOUS)) {
36 file = fget(fd);
37 if (!file)
38 goto out;
39 }
40
41 down_write(&current->mm->mmap_sem);
42 error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
43 up_write(&current->mm->mmap_sem);
44
45 if (file)
46 fput(file);
47 out:
48 return error;
49}
50
51asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
52 unsigned long prot, unsigned long flags,
53 unsigned long fd, unsigned long pgoff)
54{
55 return do_mmap2(addr, len, prot, flags, fd, pgoff);
56}
57
58asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags) 25asmlinkage void *sys_sram_alloc(size_t size, unsigned long flags)
59{ 26{
60 return sram_alloc_with_lsl(size, flags); 27 return sram_alloc_with_lsl(size, flags);
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c
index 359cfb1815ca..cb7a01d4f009 100644
--- a/arch/blackfin/kernel/time-ts.c
+++ b/arch/blackfin/kernel/time-ts.c
@@ -21,8 +21,7 @@
21#include <asm/blackfin.h> 21#include <asm/blackfin.h>
22#include <asm/time.h> 22#include <asm/time.h>
23#include <asm/gptimers.h> 23#include <asm/gptimers.h>
24 24#include <asm/nmi.h>
25#if defined(CONFIG_CYCLES_CLOCKSOURCE)
26 25
27/* Accelerators for sched_clock() 26/* Accelerators for sched_clock()
28 * convert from cycles(64bits) => nanoseconds (64bits) 27 * convert from cycles(64bits) => nanoseconds (64bits)
@@ -46,22 +45,17 @@
46 * -johnstul@us.ibm.com "math is hard, lets go shopping!" 45 * -johnstul@us.ibm.com "math is hard, lets go shopping!"
47 */ 46 */
48 47
49static unsigned long cyc2ns_scale;
50#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */ 48#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
51 49
52static inline void set_cyc2ns_scale(unsigned long cpu_khz) 50#if defined(CONFIG_CYCLES_CLOCKSOURCE)
53{
54 cyc2ns_scale = (1000000 << CYC2NS_SCALE_FACTOR) / cpu_khz;
55}
56
57static inline unsigned long long cycles_2_ns(cycle_t cyc)
58{
59 return (cyc * cyc2ns_scale) >> CYC2NS_SCALE_FACTOR;
60}
61 51
62static cycle_t bfin_read_cycles(struct clocksource *cs) 52static notrace cycle_t bfin_read_cycles(struct clocksource *cs)
63{ 53{
54#ifdef CONFIG_CPU_FREQ
64 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); 55 return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod);
56#else
57 return get_cycles();
58#endif
65} 59}
66 60
67static struct clocksource bfin_cs_cycles = { 61static struct clocksource bfin_cs_cycles = {
@@ -69,19 +63,18 @@ static struct clocksource bfin_cs_cycles = {
69 .rating = 400, 63 .rating = 400,
70 .read = bfin_read_cycles, 64 .read = bfin_read_cycles,
71 .mask = CLOCKSOURCE_MASK(64), 65 .mask = CLOCKSOURCE_MASK(64),
72 .shift = 22, 66 .shift = CYC2NS_SCALE_FACTOR,
73 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 67 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
74}; 68};
75 69
76unsigned long long sched_clock(void) 70static inline unsigned long long bfin_cs_cycles_sched_clock(void)
77{ 71{
78 return cycles_2_ns(bfin_read_cycles(&bfin_cs_cycles)); 72 return clocksource_cyc2ns(bfin_read_cycles(&bfin_cs_cycles),
73 bfin_cs_cycles.mult, bfin_cs_cycles.shift);
79} 74}
80 75
81static int __init bfin_cs_cycles_init(void) 76static int __init bfin_cs_cycles_init(void)
82{ 77{
83 set_cyc2ns_scale(get_cclk() / 1000);
84
85 bfin_cs_cycles.mult = \ 78 bfin_cs_cycles.mult = \
86 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift); 79 clocksource_hz2mult(get_cclk(), bfin_cs_cycles.shift);
87 80
@@ -108,7 +101,7 @@ void __init setup_gptimer0(void)
108 enable_gptimers(TIMER0bit); 101 enable_gptimers(TIMER0bit);
109} 102}
110 103
111static cycle_t bfin_read_gptimer0(void) 104static cycle_t bfin_read_gptimer0(struct clocksource *cs)
112{ 105{
113 return bfin_read_TIMER0_COUNTER(); 106 return bfin_read_TIMER0_COUNTER();
114} 107}
@@ -118,10 +111,16 @@ static struct clocksource bfin_cs_gptimer0 = {
118 .rating = 350, 111 .rating = 350,
119 .read = bfin_read_gptimer0, 112 .read = bfin_read_gptimer0,
120 .mask = CLOCKSOURCE_MASK(32), 113 .mask = CLOCKSOURCE_MASK(32),
121 .shift = 22, 114 .shift = CYC2NS_SCALE_FACTOR,
122 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 115 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
123}; 116};
124 117
118static inline unsigned long long bfin_cs_gptimer0_sched_clock(void)
119{
120 return clocksource_cyc2ns(bfin_read_TIMER0_COUNTER(),
121 bfin_cs_gptimer0.mult, bfin_cs_gptimer0.shift);
122}
123
125static int __init bfin_cs_gptimer0_init(void) 124static int __init bfin_cs_gptimer0_init(void)
126{ 125{
127 setup_gptimer0(); 126 setup_gptimer0();
@@ -138,47 +137,20 @@ static int __init bfin_cs_gptimer0_init(void)
138# define bfin_cs_gptimer0_init() 137# define bfin_cs_gptimer0_init()
139#endif 138#endif
140 139
141#ifdef CONFIG_CORE_TIMER_IRQ_L1 140#if defined(CONFIG_GPTMR0_CLOCKSOURCE) || defined(CONFIG_CYCLES_CLOCKSOURCE)
142__attribute__((l1_text)) 141/* prefer to use cycles since it has higher rating */
143#endif 142notrace unsigned long long sched_clock(void)
144irqreturn_t timer_interrupt(int irq, void *dev_id); 143{
145 144#if defined(CONFIG_CYCLES_CLOCKSOURCE)
146static int bfin_timer_set_next_event(unsigned long, \ 145 return bfin_cs_cycles_sched_clock();
147 struct clock_event_device *);
148
149static void bfin_timer_set_mode(enum clock_event_mode, \
150 struct clock_event_device *);
151
152static struct clock_event_device clockevent_bfin = {
153#if defined(CONFIG_TICKSOURCE_GPTMR0)
154 .name = "bfin_gptimer0",
155 .rating = 300,
156 .irq = IRQ_TIMER0,
157#else 146#else
158 .name = "bfin_core_timer", 147 return bfin_cs_gptimer0_sched_clock();
159 .rating = 350,
160 .irq = IRQ_CORETMR,
161#endif 148#endif
162 .shift = 32, 149}
163 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
164 .set_next_event = bfin_timer_set_next_event,
165 .set_mode = bfin_timer_set_mode,
166};
167
168static struct irqaction bfin_timer_irq = {
169#if defined(CONFIG_TICKSOURCE_GPTMR0)
170 .name = "Blackfin GPTimer0",
171#else
172 .name = "Blackfin CoreTimer",
173#endif 150#endif
174 .flags = IRQF_DISABLED | IRQF_TIMER | \
175 IRQF_IRQPOLL | IRQF_PERCPU,
176 .handler = timer_interrupt,
177 .dev_id = &clockevent_bfin,
178};
179 151
180#if defined(CONFIG_TICKSOURCE_GPTMR0) 152#if defined(CONFIG_TICKSOURCE_GPTMR0)
181static int bfin_timer_set_next_event(unsigned long cycles, 153static int bfin_gptmr0_set_next_event(unsigned long cycles,
182 struct clock_event_device *evt) 154 struct clock_event_device *evt)
183{ 155{
184 disable_gptimers(TIMER0bit); 156 disable_gptimers(TIMER0bit);
@@ -189,7 +161,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
189 return 0; 161 return 0;
190} 162}
191 163
192static void bfin_timer_set_mode(enum clock_event_mode mode, 164static void bfin_gptmr0_set_mode(enum clock_event_mode mode,
193 struct clock_event_device *evt) 165 struct clock_event_device *evt)
194{ 166{
195 switch (mode) { 167 switch (mode) {
@@ -217,25 +189,65 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
217 } 189 }
218} 190}
219 191
220static void bfin_timer_ack(void) 192static void bfin_gptmr0_ack(void)
221{ 193{
222 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0); 194 set_gptimer_status(TIMER_GROUP1, TIMER_STATUS_TIMIL0);
223} 195}
224 196
225static void __init bfin_timer_init(void) 197static void __init bfin_gptmr0_init(void)
226{ 198{
227 disable_gptimers(TIMER0bit); 199 disable_gptimers(TIMER0bit);
228} 200}
229 201
230static unsigned long __init bfin_clockevent_check(void) 202#ifdef CONFIG_CORE_TIMER_IRQ_L1
203__attribute__((l1_text))
204#endif
205irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id)
231{ 206{
232 setup_irq(IRQ_TIMER0, &bfin_timer_irq); 207 struct clock_event_device *evt = dev_id;
233 return get_sclk(); 208 smp_mb();
209 evt->event_handler(evt);
210 bfin_gptmr0_ack();
211 return IRQ_HANDLED;
234} 212}
235 213
236#else /* CONFIG_TICKSOURCE_CORETMR */ 214static struct irqaction gptmr0_irq = {
215 .name = "Blackfin GPTimer0",
216 .flags = IRQF_DISABLED | IRQF_TIMER | \
217 IRQF_IRQPOLL | IRQF_PERCPU,
218 .handler = bfin_gptmr0_interrupt,
219};
237 220
238static int bfin_timer_set_next_event(unsigned long cycles, 221static struct clock_event_device clockevent_gptmr0 = {
222 .name = "bfin_gptimer0",
223 .rating = 300,
224 .irq = IRQ_TIMER0,
225 .shift = 32,
226 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
227 .set_next_event = bfin_gptmr0_set_next_event,
228 .set_mode = bfin_gptmr0_set_mode,
229};
230
231static void __init bfin_gptmr0_clockevent_init(struct clock_event_device *evt)
232{
233 unsigned long clock_tick;
234
235 clock_tick = get_sclk();
236 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
237 evt->max_delta_ns = clockevent_delta2ns(-1, evt);
238 evt->min_delta_ns = clockevent_delta2ns(100, evt);
239
240 evt->cpumask = cpumask_of(0);
241
242 clockevents_register_device(evt);
243}
244#endif /* CONFIG_TICKSOURCE_GPTMR0 */
245
246#if defined(CONFIG_TICKSOURCE_CORETMR)
247/* per-cpu local core timer */
248static DEFINE_PER_CPU(struct clock_event_device, coretmr_events);
249
250static int bfin_coretmr_set_next_event(unsigned long cycles,
239 struct clock_event_device *evt) 251 struct clock_event_device *evt)
240{ 252{
241 bfin_write_TCNTL(TMPWR); 253 bfin_write_TCNTL(TMPWR);
@@ -246,7 +258,7 @@ static int bfin_timer_set_next_event(unsigned long cycles,
246 return 0; 258 return 0;
247} 259}
248 260
249static void bfin_timer_set_mode(enum clock_event_mode mode, 261static void bfin_coretmr_set_mode(enum clock_event_mode mode,
250 struct clock_event_device *evt) 262 struct clock_event_device *evt)
251{ 263{
252 switch (mode) { 264 switch (mode) {
@@ -278,19 +290,13 @@ static void bfin_timer_set_mode(enum clock_event_mode mode,
278 } 290 }
279} 291}
280 292
281static void bfin_timer_ack(void) 293void bfin_coretmr_init(void)
282{
283}
284
285static void __init bfin_timer_init(void)
286{ 294{
287 /* power up the timer, but don't enable it just yet */ 295 /* power up the timer, but don't enable it just yet */
288 bfin_write_TCNTL(TMPWR); 296 bfin_write_TCNTL(TMPWR);
289 CSYNC(); 297 CSYNC();
290 298
291 /* 299 /* the TSCALE prescaler counter. */
292 * the TSCALE prescaler counter.
293 */
294 bfin_write_TSCALE(TIME_SCALE - 1); 300 bfin_write_TSCALE(TIME_SCALE - 1);
295 bfin_write_TPERIOD(0); 301 bfin_write_TPERIOD(0);
296 bfin_write_TCOUNT(0); 302 bfin_write_TCOUNT(0);
@@ -298,48 +304,54 @@ static void __init bfin_timer_init(void)
298 CSYNC(); 304 CSYNC();
299} 305}
300 306
301static unsigned long __init bfin_clockevent_check(void) 307#ifdef CONFIG_CORE_TIMER_IRQ_L1
302{ 308__attribute__((l1_text))
303 setup_irq(IRQ_CORETMR, &bfin_timer_irq); 309#endif
304 return get_cclk() / TIME_SCALE; 310irqreturn_t bfin_coretmr_interrupt(int irq, void *dev_id)
305}
306
307void __init setup_core_timer(void)
308{ 311{
309 bfin_timer_init(); 312 int cpu = smp_processor_id();
310 bfin_timer_set_mode(CLOCK_EVT_MODE_PERIODIC, NULL); 313 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
311}
312#endif /* CONFIG_TICKSOURCE_GPTMR0 */
313 314
314/*
315 * timer_interrupt() needs to keep up the real-time clock,
316 * as well as call the "do_timer()" routine every clocktick
317 */
318irqreturn_t timer_interrupt(int irq, void *dev_id)
319{
320 struct clock_event_device *evt = dev_id;
321 smp_mb(); 315 smp_mb();
322 evt->event_handler(evt); 316 evt->event_handler(evt);
323 bfin_timer_ack();
324 return IRQ_HANDLED;
325}
326 317
327static int __init bfin_clockevent_init(void) 318 touch_nmi_watchdog();
328{
329 unsigned long timer_clk;
330
331 timer_clk = bfin_clockevent_check();
332 319
333 bfin_timer_init(); 320 return IRQ_HANDLED;
321}
334 322
335 clockevent_bfin.mult = div_sc(timer_clk, NSEC_PER_SEC, clockevent_bfin.shift); 323static struct irqaction coretmr_irq = {
336 clockevent_bfin.max_delta_ns = clockevent_delta2ns(-1, &clockevent_bfin); 324 .name = "Blackfin CoreTimer",
337 clockevent_bfin.min_delta_ns = clockevent_delta2ns(100, &clockevent_bfin); 325 .flags = IRQF_DISABLED | IRQF_TIMER | \
338 clockevent_bfin.cpumask = cpumask_of(0); 326 IRQF_IRQPOLL | IRQF_PERCPU,
339 clockevents_register_device(&clockevent_bfin); 327 .handler = bfin_coretmr_interrupt,
328};
340 329
341 return 0; 330void bfin_coretmr_clockevent_init(void)
331{
332 unsigned long clock_tick;
333 unsigned int cpu = smp_processor_id();
334 struct clock_event_device *evt = &per_cpu(coretmr_events, cpu);
335
336 evt->name = "bfin_core_timer";
337 evt->rating = 350;
338 evt->irq = -1;
339 evt->shift = 32;
340 evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
341 evt->set_next_event = bfin_coretmr_set_next_event;
342 evt->set_mode = bfin_coretmr_set_mode;
343
344 clock_tick = get_cclk() / TIME_SCALE;
345 evt->mult = div_sc(clock_tick, NSEC_PER_SEC, evt->shift);
346 evt->max_delta_ns = clockevent_delta2ns(-1, evt);
347 evt->min_delta_ns = clockevent_delta2ns(100, evt);
348
349 evt->cpumask = cpumask_of(cpu);
350
351 clockevents_register_device(evt);
342} 352}
353#endif /* CONFIG_TICKSOURCE_CORETMR */
354
343 355
344void __init time_init(void) 356void __init time_init(void)
345{ 357{
@@ -363,5 +375,21 @@ void __init time_init(void)
363 375
364 bfin_cs_cycles_init(); 376 bfin_cs_cycles_init();
365 bfin_cs_gptimer0_init(); 377 bfin_cs_gptimer0_init();
366 bfin_clockevent_init(); 378
379#if defined(CONFIG_TICKSOURCE_CORETMR)
380 bfin_coretmr_init();
381 setup_irq(IRQ_CORETMR, &coretmr_irq);
382 bfin_coretmr_clockevent_init();
383#endif
384
385#if defined(CONFIG_TICKSOURCE_GPTMR0)
386 bfin_gptmr0_init();
387 setup_irq(IRQ_TIMER0, &gptmr0_irq);
388 gptmr0_irq.dev_id = &clockevent_gptmr0;
389 bfin_gptmr0_clockevent_init(&clockevent_gptmr0);
390#endif
391
392#if !defined(CONFIG_TICKSOURCE_CORETMR) && !defined(CONFIG_TICKSOURCE_GPTMR0)
393# error at least one clock event device is required
394#endif
367} 395}
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index bd3b53da295e..13c1ee3e6408 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -184,11 +184,3 @@ void __init time_init(void)
184 184
185 time_sched_init(timer_interrupt); 185 time_sched_init(timer_interrupt);
186} 186}
187
188/*
189 * Scheduler clock - returns current time in nanosec units.
190 */
191unsigned long long sched_clock(void)
192{
193 return (unsigned long long)jiffies *(NSEC_PER_SEC / HZ);
194}
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c
index 6b7325d634af..ba70c4bc2699 100644
--- a/arch/blackfin/kernel/traps.c
+++ b/arch/blackfin/kernel/traps.c
@@ -119,6 +119,15 @@ static void decode_address(char *buf, unsigned long address)
119 return; 119 return;
120 } 120 }
121 121
122 /*
123 * Don't walk any of the vmas if we are oopsing, it has been known
124 * to cause problems - corrupt vmas (kernel crashes) cause double faults
125 */
126 if (oops_in_progress) {
127 strcat(buf, "/* kernel dynamic memory (maybe user-space) */");
128 return;
129 }
130
122 /* looks like we're off in user-land, so let's walk all the 131 /* looks like we're off in user-land, so let's walk all the
123 * mappings of all our processes and see if we can't be a whee 132 * mappings of all our processes and see if we can't be a whee
124 * bit more specific 133 * bit more specific
@@ -129,6 +138,12 @@ static void decode_address(char *buf, unsigned long address)
129 if (!mm) 138 if (!mm)
130 continue; 139 continue;
131 140
141 if (!down_read_trylock(&mm->mmap_sem)) {
142 if (!in_atomic)
143 mmput(mm);
144 continue;
145 }
146
132 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 147 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
133 struct vm_area_struct *vma; 148 struct vm_area_struct *vma;
134 149
@@ -168,6 +183,7 @@ static void decode_address(char *buf, unsigned long address)
168 sprintf(buf, "[ %s vma:0x%lx-0x%lx]", 183 sprintf(buf, "[ %s vma:0x%lx-0x%lx]",
169 name, vma->vm_start, vma->vm_end); 184 name, vma->vm_start, vma->vm_end);
170 185
186 up_read(&mm->mmap_sem);
171 if (!in_atomic) 187 if (!in_atomic)
172 mmput(mm); 188 mmput(mm);
173 189
@@ -177,11 +193,16 @@ static void decode_address(char *buf, unsigned long address)
177 goto done; 193 goto done;
178 } 194 }
179 } 195 }
196
197 up_read(&mm->mmap_sem);
180 if (!in_atomic) 198 if (!in_atomic)
181 mmput(mm); 199 mmput(mm);
182 } 200 }
183 201
184 /* we were unable to find this address anywhere */ 202 /*
203 * we were unable to find this address anywhere,
204 * or some MMs were skipped because they were in use.
205 */
185 sprintf(buf, "/* kernel dynamic memory */"); 206 sprintf(buf, "/* kernel dynamic memory */");
186 207
187done: 208done:
@@ -239,9 +260,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
239#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON 260#ifdef CONFIG_DEBUG_BFIN_HWTRACE_ON
240 int j; 261 int j;
241#endif 262#endif
242#ifdef CONFIG_DEBUG_HUNT_FOR_ZERO
243 unsigned int cpu = raw_smp_processor_id(); 263 unsigned int cpu = raw_smp_processor_id();
244#endif
245 const char *strerror = NULL; 264 const char *strerror = NULL;
246 int sig = 0; 265 int sig = 0;
247 siginfo_t info; 266 siginfo_t info;
@@ -515,6 +534,36 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
515 break; 534 break;
516 /* External Memory Addressing Error */ 535 /* External Memory Addressing Error */
517 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR): 536 case (SEQSTAT_HWERRCAUSE_EXTERN_ADDR):
537 if (ANOMALY_05000310) {
538 static unsigned long anomaly_rets;
539
540 if ((fp->pc >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
541 (fp->pc < (L1_CODE_START + L1_CODE_LENGTH))) {
542 /*
543 * A false hardware error will happen while fetching at
544 * the L1 instruction SRAM boundary. Ignore it.
545 */
546 anomaly_rets = fp->rets;
547 goto traps_done;
548 } else if (fp->rets == anomaly_rets) {
549 /*
550 * While boundary code returns to a function, at the ret
551 * point, a new false hardware error might occur too based
552 * on tests. Ignore it too.
553 */
554 goto traps_done;
555 } else if ((fp->rets >= (L1_CODE_START + L1_CODE_LENGTH - 512)) &&
556 (fp->rets < (L1_CODE_START + L1_CODE_LENGTH))) {
557 /*
558 * If boundary code calls a function, at the entry point,
559 * a new false hardware error maybe happen based on tests.
560 * Ignore it too.
561 */
562 goto traps_done;
563 } else
564 anomaly_rets = 0;
565 }
566
518 info.si_code = BUS_ADRERR; 567 info.si_code = BUS_ADRERR;
519 sig = SIGBUS; 568 sig = SIGBUS;
520 strerror = KERN_NOTICE HWC_x3(KERN_NOTICE); 569 strerror = KERN_NOTICE HWC_x3(KERN_NOTICE);
@@ -600,7 +649,17 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
600 { 649 {
601 info.si_signo = sig; 650 info.si_signo = sig;
602 info.si_errno = 0; 651 info.si_errno = 0;
603 info.si_addr = (void __user *)fp->pc; 652 switch (trapnr) {
653 case VEC_CPLB_VL:
654 case VEC_MISALI_D:
655 case VEC_CPLB_M:
656 case VEC_CPLB_MHIT:
657 info.si_addr = (void __user *)cpu_pda[cpu].dcplb_fault_addr;
658 break;
659 default:
660 info.si_addr = (void __user *)fp->pc;
661 break;
662 }
604 force_sig_info(sig, &info, current); 663 force_sig_info(sig, &info, current);
605 } 664 }
606 665
@@ -619,7 +678,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp)
619 678
620/* 679/*
621 * Similar to get_user, do some address checking, then dereference 680 * Similar to get_user, do some address checking, then dereference
622 * Return true on sucess, false on bad address 681 * Return true on success, false on bad address
623 */ 682 */
624static bool get_instruction(unsigned short *val, unsigned short *address) 683static bool get_instruction(unsigned short *val, unsigned short *address)
625{ 684{
@@ -673,7 +732,7 @@ static void decode_instruction(unsigned short *address)
673 verbose_printk("RTE"); 732 verbose_printk("RTE");
674 else if (opcode == 0x0025) 733 else if (opcode == 0x0025)
675 verbose_printk("EMUEXCPT"); 734 verbose_printk("EMUEXCPT");
676 else if (opcode == 0x0040 && opcode <= 0x0047) 735 else if (opcode >= 0x0040 && opcode <= 0x0047)
677 verbose_printk("STI R%i", opcode & 7); 736 verbose_printk("STI R%i", opcode & 7);
678 else if (opcode >= 0x0050 && opcode <= 0x0057) 737 else if (opcode >= 0x0050 && opcode <= 0x0057)
679 verbose_printk("JUMP (P%i)", opcode & 7); 738 verbose_printk("JUMP (P%i)", opcode & 7);
@@ -976,12 +1035,12 @@ void dump_bfin_process(struct pt_regs *fp)
976 !((unsigned long)current & 0x3) && current->pid) { 1035 !((unsigned long)current & 0x3) && current->pid) {
977 verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n"); 1036 verbose_printk(KERN_NOTICE "CURRENT PROCESS:\n");
978 if (current->comm >= (char *)FIXED_CODE_START) 1037 if (current->comm >= (char *)FIXED_CODE_START)
979 verbose_printk(KERN_NOTICE "COMM=%s PID=%d\n", 1038 verbose_printk(KERN_NOTICE "COMM=%s PID=%d",
980 current->comm, current->pid); 1039 current->comm, current->pid);
981 else 1040 else
982 verbose_printk(KERN_NOTICE "COMM= invalid\n"); 1041 verbose_printk(KERN_NOTICE "COMM= invalid");
983 1042
984 printk(KERN_NOTICE "CPU = %d\n", current_thread_info()->cpu); 1043 printk(KERN_CONT " CPU=%d\n", current_thread_info()->cpu);
985 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START) 1044 if (!((unsigned long)current->mm & 0x3) && (unsigned long)current->mm >= FIXED_CODE_START)
986 verbose_printk(KERN_NOTICE 1045 verbose_printk(KERN_NOTICE
987 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n" 1046 "TEXT = 0x%p-0x%p DATA = 0x%p-0x%p\n"
@@ -1057,7 +1116,7 @@ void dump_bfin_mem(struct pt_regs *fp)
1057 /* And the last RETI points to the current userspace context */ 1116 /* And the last RETI points to the current userspace context */
1058 if ((fp + 1)->pc >= current->mm->start_code && 1117 if ((fp + 1)->pc >= current->mm->start_code &&
1059 (fp + 1)->pc <= current->mm->end_code) { 1118 (fp + 1)->pc <= current->mm->end_code) {
1060 verbose_printk(KERN_NOTICE "It might be better to look around here : \n"); 1119 verbose_printk(KERN_NOTICE "It might be better to look around here :\n");
1061 verbose_printk(KERN_NOTICE "-------------------------------------------\n"); 1120 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
1062 show_regs(fp + 1); 1121 show_regs(fp + 1);
1063 verbose_printk(KERN_NOTICE "-------------------------------------------\n"); 1122 verbose_printk(KERN_NOTICE "-------------------------------------------\n");
@@ -1140,7 +1199,7 @@ void show_regs(struct pt_regs *fp)
1140 if (fp->ipend & ~0x3F) { 1199 if (fp->ipend & ~0x3F) {
1141 for (i = 0; i < (NR_IRQS - 1); i++) { 1200 for (i = 0; i < (NR_IRQS - 1); i++) {
1142 if (!in_atomic) 1201 if (!in_atomic)
1143 spin_lock_irqsave(&irq_desc[i].lock, flags); 1202 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
1144 1203
1145 action = irq_desc[i].action; 1204 action = irq_desc[i].action;
1146 if (!action) 1205 if (!action)
@@ -1155,7 +1214,7 @@ void show_regs(struct pt_regs *fp)
1155 verbose_printk("\n"); 1214 verbose_printk("\n");
1156unlock: 1215unlock:
1157 if (!in_atomic) 1216 if (!in_atomic)
1158 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 1217 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
1159 } 1218 }
1160 } 1219 }
1161 1220
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S
index 10e12539000e..984c78172397 100644
--- a/arch/blackfin/kernel/vmlinux.lds.S
+++ b/arch/blackfin/kernel/vmlinux.lds.S
@@ -4,8 +4,6 @@
4 * Licensed under the GPL-2 or later 4 * Licensed under the GPL-2 or later
5 */ 5 */
6 6
7#define VMLINUX_SYMBOL(_sym_) _##_sym_
8
9#include <asm-generic/vmlinux.lds.h> 7#include <asm-generic/vmlinux.lds.h>
10#include <asm/mem_map.h> 8#include <asm/mem_map.h>
11#include <asm/page.h> 9#include <asm/page.h>
@@ -17,7 +15,12 @@ _jiffies = _jiffies_64;
17 15
18SECTIONS 16SECTIONS
19{ 17{
18#ifdef CONFIG_RAMKERNEL
20 . = CONFIG_BOOT_LOAD; 19 . = CONFIG_BOOT_LOAD;
20#else
21 . = CONFIG_ROM_BASE;
22#endif
23
21 /* Neither the text, ro_data or bss section need to be aligned 24 /* Neither the text, ro_data or bss section need to be aligned
22 * So pack them back to back 25 * So pack them back to back
23 */ 26 */
@@ -33,6 +36,12 @@ SECTIONS
33 LOCK_TEXT 36 LOCK_TEXT
34 IRQENTRY_TEXT 37 IRQENTRY_TEXT
35 KPROBES_TEXT 38 KPROBES_TEXT
39#ifdef CONFIG_ROMKERNEL
40 __sinittext = .;
41 INIT_TEXT
42 __einittext = .;
43 EXIT_TEXT
44#endif
36 *(.text.*) 45 *(.text.*)
37 *(.fixup) 46 *(.fixup)
38 47
@@ -52,8 +61,14 @@ SECTIONS
52 61
53 /* Just in case the first read only is a 32-bit access */ 62 /* Just in case the first read only is a 32-bit access */
54 RO_DATA(4) 63 RO_DATA(4)
64 __rodata_end = .;
55 65
66#ifdef CONFIG_ROMKERNEL
67 . = CONFIG_BOOT_LOAD;
68 .bss : AT(__rodata_end)
69#else
56 .bss : 70 .bss :
71#endif
57 { 72 {
58 . = ALIGN(4); 73 . = ALIGN(4);
59 ___bss_start = .; 74 ___bss_start = .;
@@ -69,7 +84,11 @@ SECTIONS
69 ___bss_stop = .; 84 ___bss_stop = .;
70 } 85 }
71 86
87#if defined(CONFIG_ROMKERNEL)
88 .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
89#else
72 .data : 90 .data :
91#endif
73 { 92 {
74 __sdata = .; 93 __sdata = .;
75 /* This gets done first, so the glob doesn't suck it in */ 94 /* This gets done first, so the glob doesn't suck it in */
@@ -96,6 +115,8 @@ SECTIONS
96 115
97 __edata = .; 116 __edata = .;
98 } 117 }
118 __data_lma = LOADADDR(.data);
119 __data_len = SIZEOF(.data);
99 120
100 /* The init section should be last, so when we free it, it goes into 121 /* The init section should be last, so when we free it, it goes into
101 * the general memory pool, and (hopefully) will decrease fragmentation 122 * the general memory pool, and (hopefully) will decrease fragmentation
@@ -105,27 +126,58 @@ SECTIONS
105 . = ALIGN(PAGE_SIZE); 126 . = ALIGN(PAGE_SIZE);
106 ___init_begin = .; 127 ___init_begin = .;
107 128
129#ifdef CONFIG_RAMKERNEL
108 INIT_TEXT_SECTION(PAGE_SIZE) 130 INIT_TEXT_SECTION(PAGE_SIZE)
109 . = ALIGN(16);
110 INIT_DATA_SECTION(16)
111 PERCPU(4)
112 131
113 /* we have to discard exit text and such at runtime, not link time, to 132 /* We have to discard exit text and such at runtime, not link time, to
114 * handle embedded cross-section references (alt instructions, bug 133 * handle embedded cross-section references (alt instructions, bug
115 * table, eh_frame, etc...) 134 * table, eh_frame, etc...). We need all of our .text up front and
135 * .data after it for PCREL call issues.
116 */ 136 */
117 .exit.text : 137 .exit.text :
118 { 138 {
119 EXIT_TEXT 139 EXIT_TEXT
120 } 140 }
141
142 . = ALIGN(16);
143 INIT_DATA_SECTION(16)
144 PERCPU(4)
145
121 .exit.data : 146 .exit.data :
122 { 147 {
123 EXIT_DATA 148 EXIT_DATA
124 } 149 }
125 150
126 __l1_lma_start = .;
127
128 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data)) 151 .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
152#else
153 .init.data : AT(__data_lma + __data_len)
154 {
155 __sinitdata = .;
156 INIT_DATA
157 INIT_SETUP(16)
158 INIT_CALLS
159 CON_INITCALL
160 SECURITY_INITCALL
161 INIT_RAM_FS
162
163 . = ALIGN(4);
164 ___per_cpu_load = .;
165 ___per_cpu_start = .;
166 *(.data.percpu.first)
167 *(.data.percpu.page_aligned)
168 *(.data.percpu)
169 *(.data.percpu.shared_aligned)
170 ___per_cpu_end = .;
171
172 EXIT_DATA
173 __einitdata = .;
174 }
175 __init_data_lma = LOADADDR(.init.data);
176 __init_data_len = SIZEOF(.init.data);
177 __init_data_end = .;
178
179 .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
180#endif
129 { 181 {
130 . = ALIGN(4); 182 . = ALIGN(4);
131 __stext_l1 = .; 183 __stext_l1 = .;
@@ -136,9 +188,11 @@ SECTIONS
136 . = ALIGN(4); 188 . = ALIGN(4);
137 __etext_l1 = .; 189 __etext_l1 = .;
138 } 190 }
139 ASSERT (SIZEOF(.text_l1) <= L1_CODE_LENGTH, "L1 text overflow!") 191 __text_l1_lma = LOADADDR(.text_l1);
192 __text_l1_len = SIZEOF(.text_l1);
193 ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
140 194
141 .data_l1 L1_DATA_A_START : AT(LOADADDR(.text_l1) + SIZEOF(.text_l1)) 195 .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
142 { 196 {
143 . = ALIGN(4); 197 . = ALIGN(4);
144 __sdata_l1 = .; 198 __sdata_l1 = .;
@@ -154,9 +208,11 @@ SECTIONS
154 . = ALIGN(4); 208 . = ALIGN(4);
155 __ebss_l1 = .; 209 __ebss_l1 = .;
156 } 210 }
157 ASSERT (SIZEOF(.data_l1) <= L1_DATA_A_LENGTH, "L1 data A overflow!") 211 __data_l1_lma = LOADADDR(.data_l1);
212 __data_l1_len = SIZEOF(.data_l1);
213 ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
158 214
159 .data_b_l1 L1_DATA_B_START : AT(LOADADDR(.data_l1) + SIZEOF(.data_l1)) 215 .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
160 { 216 {
161 . = ALIGN(4); 217 . = ALIGN(4);
162 __sdata_b_l1 = .; 218 __sdata_b_l1 = .;
@@ -169,11 +225,11 @@ SECTIONS
169 . = ALIGN(4); 225 . = ALIGN(4);
170 __ebss_b_l1 = .; 226 __ebss_b_l1 = .;
171 } 227 }
172 ASSERT (SIZEOF(.data_b_l1) <= L1_DATA_B_LENGTH, "L1 data B overflow!") 228 __data_b_l1_lma = LOADADDR(.data_b_l1);
173 229 __data_b_l1_len = SIZEOF(.data_b_l1);
174 __l2_lma_start = LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1); 230 ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
175 231
176 .text_data_l2 L2_START : AT(LOADADDR(.data_b_l1) + SIZEOF(.data_b_l1)) 232 .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
177 { 233 {
178 . = ALIGN(4); 234 . = ALIGN(4);
179 __stext_l2 = .; 235 __stext_l2 = .;
@@ -195,12 +251,18 @@ SECTIONS
195 . = ALIGN(4); 251 . = ALIGN(4);
196 __ebss_l2 = .; 252 __ebss_l2 = .;
197 } 253 }
198 ASSERT (SIZEOF(.text_data_l2) <= L2_LENGTH, "L2 overflow!") 254 __l2_lma = LOADADDR(.text_data_l2);
255 __l2_len = SIZEOF(.text_data_l2);
256 ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
199 257
200 /* Force trailing alignment of our init section so that when we 258 /* Force trailing alignment of our init section so that when we
201 * free our init memory, we don't leave behind a partial page. 259 * free our init memory, we don't leave behind a partial page.
202 */ 260 */
203 . = LOADADDR(.text_data_l2) + SIZEOF(.text_data_l2); 261#ifdef CONFIG_RAMKERNEL
262 . = __l2_lma + __l2_len;
263#else
264 . = __init_data_end;
265#endif
204 . = ALIGN(PAGE_SIZE); 266 . = ALIGN(PAGE_SIZE);
205 ___init_end = .; 267 ___init_end = .;
206 268