diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-05-14 06:06:36 -0400 |
commit | a18f22a968de17b29f2310cdb7ba69163e65ec15 (patch) | |
tree | a7d56d88fad5e444d7661484109758a2f436129e /arch/blackfin/kernel | |
parent | a1c57e0fec53defe745e64417eacdbd3618c3e66 (diff) | |
parent | 798778b8653f64b7b2162ac70eca10367cff6ce8 (diff) |
Merge branch 'consolidate-clksrc-i8253' of master.kernel.org:~rmk/linux-2.6-arm into timers/clocksource
Conflicts:
arch/ia64/kernel/cyclone.c
arch/mips/kernel/i8253.c
arch/x86/kernel/i8253.c
Reason: Resolve conflicts so further cleanups do not conflict further
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r-- | arch/blackfin/kernel/bfin_dma_5xx.c | 32 | ||||
-rw-r--r-- | arch/blackfin/kernel/gptimers.c | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 84 | ||||
-rw-r--r-- | arch/blackfin/kernel/irqchip.c | 10 | ||||
-rw-r--r-- | arch/blackfin/kernel/kgdb.c | 6 | ||||
-rw-r--r-- | arch/blackfin/kernel/module.c | 45 | ||||
-rw-r--r-- | arch/blackfin/kernel/setup.c | 37 | ||||
-rw-r--r-- | arch/blackfin/kernel/time-ts.c | 8 | ||||
-rw-r--r-- | arch/blackfin/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/blackfin/kernel/trace.c | 7 | ||||
-rw-r--r-- | arch/blackfin/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/blackfin/kernel/vmlinux.lds.S | 3 |
12 files changed, 137 insertions, 105 deletions
diff --git a/arch/blackfin/kernel/bfin_dma_5xx.c b/arch/blackfin/kernel/bfin_dma_5xx.c index 1e485dfdc9f2..6ce8dce753c9 100644 --- a/arch/blackfin/kernel/bfin_dma_5xx.c +++ b/arch/blackfin/kernel/bfin_dma_5xx.c | |||
@@ -84,6 +84,24 @@ static int __init proc_dma_init(void) | |||
84 | late_initcall(proc_dma_init); | 84 | late_initcall(proc_dma_init); |
85 | #endif | 85 | #endif |
86 | 86 | ||
87 | static void set_dma_peripheral_map(unsigned int channel, const char *device_id) | ||
88 | { | ||
89 | #ifdef CONFIG_BF54x | ||
90 | unsigned int per_map; | ||
91 | |||
92 | switch (channel) { | ||
93 | case CH_UART2_RX: per_map = 0xC << 12; break; | ||
94 | case CH_UART2_TX: per_map = 0xD << 12; break; | ||
95 | case CH_UART3_RX: per_map = 0xE << 12; break; | ||
96 | case CH_UART3_TX: per_map = 0xF << 12; break; | ||
97 | default: return; | ||
98 | } | ||
99 | |||
100 | if (strncmp(device_id, "BFIN_UART", 9) == 0) | ||
101 | dma_ch[channel].regs->peripheral_map = per_map; | ||
102 | #endif | ||
103 | } | ||
104 | |||
87 | /** | 105 | /** |
88 | * request_dma - request a DMA channel | 106 | * request_dma - request a DMA channel |
89 | * | 107 | * |
@@ -111,19 +129,7 @@ int request_dma(unsigned int channel, const char *device_id) | |||
111 | return -EBUSY; | 129 | return -EBUSY; |
112 | } | 130 | } |
113 | 131 | ||
114 | #ifdef CONFIG_BF54x | 132 | set_dma_peripheral_map(channel, device_id); |
115 | if (channel >= CH_UART2_RX && channel <= CH_UART3_TX) { | ||
116 | unsigned int per_map; | ||
117 | per_map = dma_ch[channel].regs->peripheral_map & 0xFFF; | ||
118 | if (strncmp(device_id, "BFIN_UART", 9) == 0) | ||
119 | dma_ch[channel].regs->peripheral_map = per_map | | ||
120 | ((channel - CH_UART2_RX + 0xC)<<12); | ||
121 | else | ||
122 | dma_ch[channel].regs->peripheral_map = per_map | | ||
123 | ((channel - CH_UART2_RX + 0x6)<<12); | ||
124 | } | ||
125 | #endif | ||
126 | |||
127 | dma_ch[channel].device_id = device_id; | 133 | dma_ch[channel].device_id = device_id; |
128 | dma_ch[channel].irq = 0; | 134 | dma_ch[channel].irq = 0; |
129 | 135 | ||
diff --git a/arch/blackfin/kernel/gptimers.c b/arch/blackfin/kernel/gptimers.c index cdbe075de1dc..8b81dc04488a 100644 --- a/arch/blackfin/kernel/gptimers.c +++ b/arch/blackfin/kernel/gptimers.c | |||
@@ -268,7 +268,7 @@ void disable_gptimers(uint16_t mask) | |||
268 | _disable_gptimers(mask); | 268 | _disable_gptimers(mask); |
269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) | 269 | for (i = 0; i < MAX_BLACKFIN_GPTIMERS; ++i) |
270 | if (mask & (1 << i)) | 270 | if (mask & (1 << i)) |
271 | group_regs[BFIN_TIMER_OCTET(i)]->status |= trun_mask[i]; | 271 | group_regs[BFIN_TIMER_OCTET(i)]->status = trun_mask[i]; |
272 | SSYNC(); | 272 | SSYNC(); |
273 | } | 273 | } |
274 | EXPORT_SYMBOL(disable_gptimers); | 274 | EXPORT_SYMBOL(disable_gptimers); |
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 3b1da4aff2a1..f37019c847c9 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | |||
154 | * pending for it. | 154 | * pending for it. |
155 | */ | 155 | */ |
156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && | 156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && |
157 | ipipe_head_cpudom_var(irqpend_himask) == 0) | 157 | !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) |
158 | goto out; | 158 | goto out; |
159 | 159 | ||
160 | __ipipe_walk_pipeline(head); | 160 | __ipipe_walk_pipeline(head); |
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |||
185 | } | 185 | } |
186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); | 186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); |
187 | 187 | ||
188 | int __ipipe_syscall_root(struct pt_regs *regs) | 188 | asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) |
189 | { | 189 | { |
190 | struct ipipe_percpu_domain_data *p; | 190 | struct ipipe_percpu_domain_data *p; |
191 | unsigned long flags; | 191 | void (*hook)(void); |
192 | int ret; | 192 | int ret; |
193 | 193 | ||
194 | WARN_ON_ONCE(irqs_disabled_hw()); | ||
195 | |||
194 | /* | 196 | /* |
195 | * We need to run the IRQ tail hook whenever we don't | 197 | * We need to run the IRQ tail hook each time we intercept a |
196 | * propagate a syscall to higher domains, because we know that | 198 | * syscall, because we know that important operations might be |
197 | * important operations might be pending there (e.g. Xenomai | 199 | * pending there (e.g. Xenomai deferred rescheduling). |
198 | * deferred rescheduling). | ||
199 | */ | 200 | */ |
200 | 201 | hook = (__typeof__(hook))__ipipe_irq_tail_hook; | |
201 | if (regs->orig_p0 < NR_syscalls) { | 202 | hook(); |
202 | void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | ||
203 | hook(); | ||
204 | if ((current->flags & PF_EVNOTIFY) == 0) | ||
205 | return 0; | ||
206 | } | ||
207 | 203 | ||
208 | /* | 204 | /* |
209 | * This routine either returns: | 205 | * This routine either returns: |
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs) | |||
214 | * tail work has to be performed (for handling signals etc). | 210 | * tail work has to be performed (for handling signals etc). |
215 | */ | 211 | */ |
216 | 212 | ||
217 | if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | 213 | if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || |
214 | !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | ||
218 | return 0; | 215 | return 0; |
219 | 216 | ||
220 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); | 217 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
221 | 218 | ||
222 | flags = hard_local_irq_save(); | 219 | hard_local_irq_disable(); |
223 | 220 | ||
224 | if (!__ipipe_root_domain_p) { | 221 | /* |
225 | hard_local_irq_restore(flags); | 222 | * This is the end of the syscall path, so we may |
226 | return 1; | 223 | * safely assume a valid Linux task stack here. |
224 | */ | ||
225 | if (current->ipipe_flags & PF_EVTRET) { | ||
226 | current->ipipe_flags &= ~PF_EVTRET; | ||
227 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
227 | } | 228 | } |
228 | 229 | ||
229 | p = ipipe_root_cpudom_ptr(); | 230 | if (!__ipipe_root_domain_p) |
230 | if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) | 231 | ret = -1; |
231 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | 232 | else { |
233 | p = ipipe_root_cpudom_ptr(); | ||
234 | if (__ipipe_ipending_p(p)) | ||
235 | __ipipe_sync_pipeline(); | ||
236 | } | ||
232 | 237 | ||
233 | hard_local_irq_restore(flags); | 238 | hard_local_irq_enable(); |
234 | 239 | ||
235 | return -ret; | 240 | return -ret; |
236 | } | 241 | } |
237 | 242 | ||
238 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | flags = hard_local_irq_save(); | ||
243 | |||
244 | return flags; | ||
245 | } | ||
246 | |||
247 | void ipipe_critical_exit(unsigned long flags) | ||
248 | { | ||
249 | hard_local_irq_restore(flags); | ||
250 | } | ||
251 | |||
252 | static void __ipipe_no_irqtail(void) | 243 | static void __ipipe_no_irqtail(void) |
253 | { | 244 | { |
254 | } | 245 | } |
255 | 246 | ||
256 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | 247 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) |
257 | { | 248 | { |
258 | info->ncpus = num_online_cpus(); | 249 | info->sys_nr_cpus = num_online_cpus(); |
259 | info->cpufreq = ipipe_cpu_freq(); | 250 | info->sys_cpu_freq = ipipe_cpu_freq(); |
260 | info->archdep.tmirq = IPIPE_TIMER_IRQ; | 251 | info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; |
261 | info->archdep.tmfreq = info->cpufreq; | 252 | info->sys_hrtimer_freq = __ipipe_core_clock; |
253 | info->sys_hrclock_freq = __ipipe_core_clock; | ||
262 | 254 | ||
263 | return 0; | 255 | return 0; |
264 | } | 256 | } |
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq) | |||
289 | asmlinkage void __ipipe_sync_root(void) | 281 | asmlinkage void __ipipe_sync_root(void) |
290 | { | 282 | { |
291 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | 283 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; |
284 | struct ipipe_percpu_domain_data *p; | ||
292 | unsigned long flags; | 285 | unsigned long flags; |
293 | 286 | ||
294 | BUG_ON(irqs_disabled()); | 287 | BUG_ON(irqs_disabled()); |
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void) | |||
300 | 293 | ||
301 | clear_thread_flag(TIF_IRQ_SYNC); | 294 | clear_thread_flag(TIF_IRQ_SYNC); |
302 | 295 | ||
303 | if (ipipe_root_cpudom_var(irqpend_himask) != 0) | 296 | p = ipipe_root_cpudom_ptr(); |
304 | __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); | 297 | if (__ipipe_ipending_p(p)) |
298 | __ipipe_sync_pipeline(); | ||
305 | 299 | ||
306 | hard_local_irq_restore(flags); | 300 | hard_local_irq_restore(flags); |
307 | } | 301 | } |
308 | 302 | ||
309 | void ___ipipe_sync_pipeline(unsigned long syncmask) | 303 | void ___ipipe_sync_pipeline(void) |
310 | { | 304 | { |
311 | if (__ipipe_root_domain_p && | 305 | if (__ipipe_root_domain_p && |
312 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) | 306 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) |
313 | return; | 307 | return; |
314 | 308 | ||
315 | __ipipe_sync_stage(syncmask); | 309 | __ipipe_sync_stage(); |
316 | } | 310 | } |
317 | 311 | ||
318 | void __ipipe_disable_root_irqs_hw(void) | 312 | void __ipipe_disable_root_irqs_hw(void) |
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index 64cff54a8a58..1696d34f51c2 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c | |||
@@ -39,21 +39,23 @@ int show_interrupts(struct seq_file *p, void *v) | |||
39 | unsigned long flags; | 39 | unsigned long flags; |
40 | 40 | ||
41 | if (i < NR_IRQS) { | 41 | if (i < NR_IRQS) { |
42 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 42 | struct irq_desc *desc = irq_to_desc(i); |
43 | action = irq_desc[i].action; | 43 | |
44 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
45 | action = desc->action; | ||
44 | if (!action) | 46 | if (!action) |
45 | goto skip; | 47 | goto skip; |
46 | seq_printf(p, "%3d: ", i); | 48 | seq_printf(p, "%3d: ", i); |
47 | for_each_online_cpu(j) | 49 | for_each_online_cpu(j) |
48 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | 50 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
49 | seq_printf(p, " %8s", irq_desc[i].chip->name); | 51 | seq_printf(p, " %8s", irq_desc_get_chip(desc)->name); |
50 | seq_printf(p, " %s", action->name); | 52 | seq_printf(p, " %s", action->name); |
51 | for (action = action->next; action; action = action->next) | 53 | for (action = action->next; action; action = action->next) |
52 | seq_printf(p, " %s", action->name); | 54 | seq_printf(p, " %s", action->name); |
53 | 55 | ||
54 | seq_putc(p, '\n'); | 56 | seq_putc(p, '\n'); |
55 | skip: | 57 | skip: |
56 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 58 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
57 | } else if (i == NR_IRQS) { | 59 | } else if (i == NR_IRQS) { |
58 | seq_printf(p, "NMI: "); | 60 | seq_printf(p, "NMI: "); |
59 | for_each_online_cpu(j) | 61 | for_each_online_cpu(j) |
diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c index eb92592fd80c..9b80b152435e 100644 --- a/arch/blackfin/kernel/kgdb.c +++ b/arch/blackfin/kernel/kgdb.c | |||
@@ -181,7 +181,7 @@ static int bfin_set_hw_break(unsigned long addr, int len, enum kgdb_bptype type) | |||
181 | return -ENOSPC; | 181 | return -ENOSPC; |
182 | } | 182 | } |
183 | 183 | ||
184 | /* Becasue hardware data watchpoint impelemented in current | 184 | /* Because hardware data watchpoint impelemented in current |
185 | * Blackfin can not trigger an exception event as the hardware | 185 | * Blackfin can not trigger an exception event as the hardware |
186 | * instrction watchpoint does, we ignaore all data watch point here. | 186 | * instrction watchpoint does, we ignaore all data watch point here. |
187 | * They can be turned on easily after future blackfin design | 187 | * They can be turned on easily after future blackfin design |
@@ -422,11 +422,7 @@ int kgdb_arch_handle_exception(int vector, int signo, | |||
422 | 422 | ||
423 | struct kgdb_arch arch_kgdb_ops = { | 423 | struct kgdb_arch arch_kgdb_ops = { |
424 | .gdb_bpt_instr = {0xa1}, | 424 | .gdb_bpt_instr = {0xa1}, |
425 | #ifdef CONFIG_SMP | ||
426 | .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP, | ||
427 | #else | ||
428 | .flags = KGDB_HW_BREAKPOINT, | 425 | .flags = KGDB_HW_BREAKPOINT, |
429 | #endif | ||
430 | .set_hw_breakpoint = bfin_set_hw_break, | 426 | .set_hw_breakpoint = bfin_set_hw_break, |
431 | .remove_hw_breakpoint = bfin_remove_hw_break, | 427 | .remove_hw_breakpoint = bfin_remove_hw_break, |
432 | .disable_hw_break = bfin_disable_hw_debug, | 428 | .disable_hw_break = bfin_disable_hw_debug, |
diff --git a/arch/blackfin/kernel/module.c b/arch/blackfin/kernel/module.c index a6dfa6b71e63..35e350cad9d9 100644 --- a/arch/blackfin/kernel/module.c +++ b/arch/blackfin/kernel/module.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * Licensed under the GPL-2 or later | 4 | * Licensed under the GPL-2 or later |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define pr_fmt(fmt) "module %s: " fmt | 7 | #define pr_fmt(fmt) "module %s: " fmt, mod->name |
8 | 8 | ||
9 | #include <linux/moduleloader.h> | 9 | #include <linux/moduleloader.h> |
10 | #include <linux/elf.h> | 10 | #include <linux/elf.h> |
@@ -57,8 +57,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
57 | dest = l1_inst_sram_alloc(s->sh_size); | 57 | dest = l1_inst_sram_alloc(s->sh_size); |
58 | mod->arch.text_l1 = dest; | 58 | mod->arch.text_l1 = dest; |
59 | if (dest == NULL) { | 59 | if (dest == NULL) { |
60 | pr_err("L1 inst memory allocation failed\n", | 60 | pr_err("L1 inst memory allocation failed\n"); |
61 | mod->name); | ||
62 | return -1; | 61 | return -1; |
63 | } | 62 | } |
64 | dma_memcpy(dest, (void *)s->sh_addr, s->sh_size); | 63 | dma_memcpy(dest, (void *)s->sh_addr, s->sh_size); |
@@ -70,8 +69,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
70 | dest = l1_data_sram_alloc(s->sh_size); | 69 | dest = l1_data_sram_alloc(s->sh_size); |
71 | mod->arch.data_a_l1 = dest; | 70 | mod->arch.data_a_l1 = dest; |
72 | if (dest == NULL) { | 71 | if (dest == NULL) { |
73 | pr_err("L1 data memory allocation failed\n", | 72 | pr_err("L1 data memory allocation failed\n"); |
74 | mod->name); | ||
75 | return -1; | 73 | return -1; |
76 | } | 74 | } |
77 | memcpy(dest, (void *)s->sh_addr, s->sh_size); | 75 | memcpy(dest, (void *)s->sh_addr, s->sh_size); |
@@ -83,8 +81,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
83 | dest = l1_data_sram_zalloc(s->sh_size); | 81 | dest = l1_data_sram_zalloc(s->sh_size); |
84 | mod->arch.bss_a_l1 = dest; | 82 | mod->arch.bss_a_l1 = dest; |
85 | if (dest == NULL) { | 83 | if (dest == NULL) { |
86 | pr_err("L1 data memory allocation failed\n", | 84 | pr_err("L1 data memory allocation failed\n"); |
87 | mod->name); | ||
88 | return -1; | 85 | return -1; |
89 | } | 86 | } |
90 | 87 | ||
@@ -93,8 +90,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
93 | dest = l1_data_B_sram_alloc(s->sh_size); | 90 | dest = l1_data_B_sram_alloc(s->sh_size); |
94 | mod->arch.data_b_l1 = dest; | 91 | mod->arch.data_b_l1 = dest; |
95 | if (dest == NULL) { | 92 | if (dest == NULL) { |
96 | pr_err("L1 data memory allocation failed\n", | 93 | pr_err("L1 data memory allocation failed\n"); |
97 | mod->name); | ||
98 | return -1; | 94 | return -1; |
99 | } | 95 | } |
100 | memcpy(dest, (void *)s->sh_addr, s->sh_size); | 96 | memcpy(dest, (void *)s->sh_addr, s->sh_size); |
@@ -104,8 +100,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
104 | dest = l1_data_B_sram_alloc(s->sh_size); | 100 | dest = l1_data_B_sram_alloc(s->sh_size); |
105 | mod->arch.bss_b_l1 = dest; | 101 | mod->arch.bss_b_l1 = dest; |
106 | if (dest == NULL) { | 102 | if (dest == NULL) { |
107 | pr_err("L1 data memory allocation failed\n", | 103 | pr_err("L1 data memory allocation failed\n"); |
108 | mod->name); | ||
109 | return -1; | 104 | return -1; |
110 | } | 105 | } |
111 | memset(dest, 0, s->sh_size); | 106 | memset(dest, 0, s->sh_size); |
@@ -117,8 +112,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
117 | dest = l2_sram_alloc(s->sh_size); | 112 | dest = l2_sram_alloc(s->sh_size); |
118 | mod->arch.text_l2 = dest; | 113 | mod->arch.text_l2 = dest; |
119 | if (dest == NULL) { | 114 | if (dest == NULL) { |
120 | pr_err("L2 SRAM allocation failed\n", | 115 | pr_err("L2 SRAM allocation failed\n"); |
121 | mod->name); | ||
122 | return -1; | 116 | return -1; |
123 | } | 117 | } |
124 | memcpy(dest, (void *)s->sh_addr, s->sh_size); | 118 | memcpy(dest, (void *)s->sh_addr, s->sh_size); |
@@ -130,8 +124,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
130 | dest = l2_sram_alloc(s->sh_size); | 124 | dest = l2_sram_alloc(s->sh_size); |
131 | mod->arch.data_l2 = dest; | 125 | mod->arch.data_l2 = dest; |
132 | if (dest == NULL) { | 126 | if (dest == NULL) { |
133 | pr_err("L2 SRAM allocation failed\n", | 127 | pr_err("L2 SRAM allocation failed\n"); |
134 | mod->name); | ||
135 | return -1; | 128 | return -1; |
136 | } | 129 | } |
137 | memcpy(dest, (void *)s->sh_addr, s->sh_size); | 130 | memcpy(dest, (void *)s->sh_addr, s->sh_size); |
@@ -143,8 +136,7 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
143 | dest = l2_sram_zalloc(s->sh_size); | 136 | dest = l2_sram_zalloc(s->sh_size); |
144 | mod->arch.bss_l2 = dest; | 137 | mod->arch.bss_l2 = dest; |
145 | if (dest == NULL) { | 138 | if (dest == NULL) { |
146 | pr_err("L2 SRAM allocation failed\n", | 139 | pr_err("L2 SRAM allocation failed\n"); |
147 | mod->name); | ||
148 | return -1; | 140 | return -1; |
149 | } | 141 | } |
150 | 142 | ||
@@ -160,9 +152,9 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, | |||
160 | 152 | ||
161 | int | 153 | int |
162 | apply_relocate(Elf_Shdr * sechdrs, const char *strtab, | 154 | apply_relocate(Elf_Shdr * sechdrs, const char *strtab, |
163 | unsigned int symindex, unsigned int relsec, struct module *me) | 155 | unsigned int symindex, unsigned int relsec, struct module *mod) |
164 | { | 156 | { |
165 | pr_err(".rel unsupported\n", me->name); | 157 | pr_err(".rel unsupported\n"); |
166 | return -ENOEXEC; | 158 | return -ENOEXEC; |
167 | } | 159 | } |
168 | 160 | ||
@@ -186,7 +178,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
186 | Elf32_Sym *sym; | 178 | Elf32_Sym *sym; |
187 | unsigned long location, value, size; | 179 | unsigned long location, value, size; |
188 | 180 | ||
189 | pr_debug("applying relocate section %u to %u\n", mod->name, | 181 | pr_debug("applying relocate section %u to %u\n", |
190 | relsec, sechdrs[relsec].sh_info); | 182 | relsec, sechdrs[relsec].sh_info); |
191 | 183 | ||
192 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | 184 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { |
@@ -203,14 +195,14 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
203 | 195 | ||
204 | #ifdef CONFIG_SMP | 196 | #ifdef CONFIG_SMP |
205 | if (location >= COREB_L1_DATA_A_START) { | 197 | if (location >= COREB_L1_DATA_A_START) { |
206 | pr_err("cannot relocate in L1: %u (SMP kernel)", | 198 | pr_err("cannot relocate in L1: %u (SMP kernel)\n", |
207 | mod->name, ELF32_R_TYPE(rel[i].r_info)); | 199 | ELF32_R_TYPE(rel[i].r_info)); |
208 | return -ENOEXEC; | 200 | return -ENOEXEC; |
209 | } | 201 | } |
210 | #endif | 202 | #endif |
211 | 203 | ||
212 | pr_debug("location is %lx, value is %lx type is %d\n", | 204 | pr_debug("location is %lx, value is %lx type is %d\n", |
213 | mod->name, location, value, ELF32_R_TYPE(rel[i].r_info)); | 205 | location, value, ELF32_R_TYPE(rel[i].r_info)); |
214 | 206 | ||
215 | switch (ELF32_R_TYPE(rel[i].r_info)) { | 207 | switch (ELF32_R_TYPE(rel[i].r_info)) { |
216 | 208 | ||
@@ -230,11 +222,11 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
230 | case R_BFIN_PCREL12_JUMP_S: | 222 | case R_BFIN_PCREL12_JUMP_S: |
231 | case R_BFIN_PCREL10: | 223 | case R_BFIN_PCREL10: |
232 | pr_err("unsupported relocation: %u (no -mlong-calls?)\n", | 224 | pr_err("unsupported relocation: %u (no -mlong-calls?)\n", |
233 | mod->name, ELF32_R_TYPE(rel[i].r_info)); | 225 | ELF32_R_TYPE(rel[i].r_info)); |
234 | return -ENOEXEC; | 226 | return -ENOEXEC; |
235 | 227 | ||
236 | default: | 228 | default: |
237 | pr_err("unknown relocation: %u\n", mod->name, | 229 | pr_err("unknown relocation: %u\n", |
238 | ELF32_R_TYPE(rel[i].r_info)); | 230 | ELF32_R_TYPE(rel[i].r_info)); |
239 | return -ENOEXEC; | 231 | return -ENOEXEC; |
240 | } | 232 | } |
@@ -251,8 +243,7 @@ apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | |||
251 | isram_memcpy((void *)location, &value, size); | 243 | isram_memcpy((void *)location, &value, size); |
252 | break; | 244 | break; |
253 | default: | 245 | default: |
254 | pr_err("invalid relocation for %#lx\n", | 246 | pr_err("invalid relocation for %#lx\n", location); |
255 | mod->name, location); | ||
256 | return -ENOEXEC; | 247 | return -ENOEXEC; |
257 | } | 248 | } |
258 | } | 249 | } |
diff --git a/arch/blackfin/kernel/setup.c b/arch/blackfin/kernel/setup.c index ac71dc15cbdb..805c6132c779 100644 --- a/arch/blackfin/kernel/setup.c +++ b/arch/blackfin/kernel/setup.c | |||
@@ -215,11 +215,48 @@ void __init bfin_relocate_l1_mem(void) | |||
215 | 215 | ||
216 | early_dma_memcpy_done(); | 216 | early_dma_memcpy_done(); |
217 | 217 | ||
218 | #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) | ||
219 | blackfin_iflush_l1_entry[0] = (unsigned long)blackfin_icache_flush_range_l1; | ||
220 | #endif | ||
221 | |||
218 | /* if necessary, copy L2 text/data to L2 SRAM */ | 222 | /* if necessary, copy L2 text/data to L2 SRAM */ |
219 | if (L2_LENGTH && l2_len) | 223 | if (L2_LENGTH && l2_len) |
220 | memcpy(_stext_l2, _l2_lma, l2_len); | 224 | memcpy(_stext_l2, _l2_lma, l2_len); |
221 | } | 225 | } |
222 | 226 | ||
227 | #ifdef CONFIG_SMP | ||
228 | void __init bfin_relocate_coreb_l1_mem(void) | ||
229 | { | ||
230 | unsigned long text_l1_len = (unsigned long)_text_l1_len; | ||
231 | unsigned long data_l1_len = (unsigned long)_data_l1_len; | ||
232 | unsigned long data_b_l1_len = (unsigned long)_data_b_l1_len; | ||
233 | |||
234 | blackfin_dma_early_init(); | ||
235 | |||
236 | /* if necessary, copy L1 text to L1 instruction SRAM */ | ||
237 | if (L1_CODE_LENGTH && text_l1_len) | ||
238 | early_dma_memcpy((void *)COREB_L1_CODE_START, _text_l1_lma, | ||
239 | text_l1_len); | ||
240 | |||
241 | /* if necessary, copy L1 data to L1 data bank A SRAM */ | ||
242 | if (L1_DATA_A_LENGTH && data_l1_len) | ||
243 | early_dma_memcpy((void *)COREB_L1_DATA_A_START, _data_l1_lma, | ||
244 | data_l1_len); | ||
245 | |||
246 | /* if necessary, copy L1 data B to L1 data bank B SRAM */ | ||
247 | if (L1_DATA_B_LENGTH && data_b_l1_len) | ||
248 | early_dma_memcpy((void *)COREB_L1_DATA_B_START, _data_b_l1_lma, | ||
249 | data_b_l1_len); | ||
250 | |||
251 | early_dma_memcpy_done(); | ||
252 | |||
253 | #ifdef CONFIG_ICACHE_FLUSH_L1 | ||
254 | blackfin_iflush_l1_entry[1] = (unsigned long)blackfin_icache_flush_range_l1 - | ||
255 | (unsigned long)_stext_l1 + COREB_L1_CODE_START; | ||
256 | #endif | ||
257 | } | ||
258 | #endif | ||
259 | |||
223 | #ifdef CONFIG_ROMKERNEL | 260 | #ifdef CONFIG_ROMKERNEL |
224 | void __init bfin_relocate_xip_data(void) | 261 | void __init bfin_relocate_xip_data(void) |
225 | { | 262 | { |
diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index 4a013714500b..9e9b60d969dc 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c | |||
@@ -175,8 +175,14 @@ irqreturn_t bfin_gptmr0_interrupt(int irq, void *dev_id) | |||
175 | { | 175 | { |
176 | struct clock_event_device *evt = dev_id; | 176 | struct clock_event_device *evt = dev_id; |
177 | smp_mb(); | 177 | smp_mb(); |
178 | evt->event_handler(evt); | 178 | /* |
179 | * We want to ACK before we handle so that we can handle smaller timer | ||
180 | * intervals. This way if the timer expires again while we're handling | ||
181 | * things, we're more likely to see that 2nd int rather than swallowing | ||
182 | * it by ACKing the int at the end of this handler. | ||
183 | */ | ||
179 | bfin_gptmr0_ack(); | 184 | bfin_gptmr0_ack(); |
185 | evt->event_handler(evt); | ||
180 | return IRQ_HANDLED; | 186 | return IRQ_HANDLED; |
181 | } | 187 | } |
182 | 188 | ||
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c index c9113619029f..8d73724c0092 100644 --- a/arch/blackfin/kernel/time.c +++ b/arch/blackfin/kernel/time.c | |||
@@ -114,16 +114,14 @@ u32 arch_gettimeoffset(void) | |||
114 | 114 | ||
115 | /* | 115 | /* |
116 | * timer_interrupt() needs to keep up the real-time clock, | 116 | * timer_interrupt() needs to keep up the real-time clock, |
117 | * as well as call the "do_timer()" routine every clocktick | 117 | * as well as call the "xtime_update()" routine every clocktick |
118 | */ | 118 | */ |
119 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 | 119 | #ifdef CONFIG_CORE_TIMER_IRQ_L1 |
120 | __attribute__((l1_text)) | 120 | __attribute__((l1_text)) |
121 | #endif | 121 | #endif |
122 | irqreturn_t timer_interrupt(int irq, void *dummy) | 122 | irqreturn_t timer_interrupt(int irq, void *dummy) |
123 | { | 123 | { |
124 | write_seqlock(&xtime_lock); | 124 | xtime_update(1); |
125 | do_timer(1); | ||
126 | write_sequnlock(&xtime_lock); | ||
127 | 125 | ||
128 | #ifdef CONFIG_IPIPE | 126 | #ifdef CONFIG_IPIPE |
129 | update_root_process_times(get_irq_regs()); | 127 | update_root_process_times(get_irq_regs()); |
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index 05b550891ce5..050db44fe919 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c | |||
@@ -912,10 +912,11 @@ void show_regs(struct pt_regs *fp) | |||
912 | /* if no interrupts are going off, don't print this out */ | 912 | /* if no interrupts are going off, don't print this out */ |
913 | if (fp->ipend & ~0x3F) { | 913 | if (fp->ipend & ~0x3F) { |
914 | for (i = 0; i < (NR_IRQS - 1); i++) { | 914 | for (i = 0; i < (NR_IRQS - 1); i++) { |
915 | struct irq_desc *desc = irq_to_desc(i); | ||
915 | if (!in_atomic) | 916 | if (!in_atomic) |
916 | raw_spin_lock_irqsave(&irq_desc[i].lock, flags); | 917 | raw_spin_lock_irqsave(&desc->lock, flags); |
917 | 918 | ||
918 | action = irq_desc[i].action; | 919 | action = desc->action; |
919 | if (!action) | 920 | if (!action) |
920 | goto unlock; | 921 | goto unlock; |
921 | 922 | ||
@@ -928,7 +929,7 @@ void show_regs(struct pt_regs *fp) | |||
928 | pr_cont("\n"); | 929 | pr_cont("\n"); |
929 | unlock: | 930 | unlock: |
930 | if (!in_atomic) | 931 | if (!in_atomic) |
931 | raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); | 932 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
932 | } | 933 | } |
933 | } | 934 | } |
934 | 935 | ||
diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 59c1df75e4de..655f25d139a7 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c | |||
@@ -98,7 +98,7 @@ asmlinkage notrace void trap_c(struct pt_regs *fp) | |||
98 | /* send the appropriate signal to the user program */ | 98 | /* send the appropriate signal to the user program */ |
99 | switch (trapnr) { | 99 | switch (trapnr) { |
100 | 100 | ||
101 | /* This table works in conjuction with the one in ./mach-common/entry.S | 101 | /* This table works in conjunction with the one in ./mach-common/entry.S |
102 | * Some exceptions are handled there (in assembly, in exception space) | 102 | * Some exceptions are handled there (in assembly, in exception space) |
103 | * Some are handled here, (in C, in interrupt space) | 103 | * Some are handled here, (in C, in interrupt space) |
104 | * Some, like CPLB, are handled in both, where the normal path is | 104 | * Some, like CPLB, are handled in both, where the normal path is |
diff --git a/arch/blackfin/kernel/vmlinux.lds.S b/arch/blackfin/kernel/vmlinux.lds.S index 4122678529c0..854fa49f1c3e 100644 --- a/arch/blackfin/kernel/vmlinux.lds.S +++ b/arch/blackfin/kernel/vmlinux.lds.S | |||
@@ -136,7 +136,7 @@ SECTIONS | |||
136 | 136 | ||
137 | . = ALIGN(16); | 137 | . = ALIGN(16); |
138 | INIT_DATA_SECTION(16) | 138 | INIT_DATA_SECTION(16) |
139 | PERCPU(4) | 139 | PERCPU(32, PAGE_SIZE) |
140 | 140 | ||
141 | .exit.data : | 141 | .exit.data : |
142 | { | 142 | { |
@@ -176,6 +176,7 @@ SECTIONS | |||
176 | { | 176 | { |
177 | . = ALIGN(4); | 177 | . = ALIGN(4); |
178 | __stext_l1 = .; | 178 | __stext_l1 = .; |
179 | *(.l1.text.head) | ||
179 | *(.l1.text) | 180 | *(.l1.text) |
180 | #ifdef CONFIG_SCHEDULE_L1 | 181 | #ifdef CONFIG_SCHEDULE_L1 |
181 | SCHED_TEXT | 182 | SCHED_TEXT |