diff options
Diffstat (limited to 'arch/blackfin/kernel/ipipe.c')
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 113 |
1 files changed, 54 insertions, 59 deletions
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 1a496cd71ba2..486426f8a0d7 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/io.h> | 33 | #include <linux/io.h> |
34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
36 | #include <asm/irq_handler.h> | ||
36 | 37 | ||
37 | DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); | 38 | DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); |
38 | 39 | ||
@@ -154,7 +155,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | |||
154 | * pending for it. | 155 | * pending for it. |
155 | */ | 156 | */ |
156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && | 157 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && |
157 | ipipe_head_cpudom_var(irqpend_himask) == 0) | 158 | !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) |
158 | goto out; | 159 | goto out; |
159 | 160 | ||
160 | __ipipe_walk_pipeline(head); | 161 | __ipipe_walk_pipeline(head); |
@@ -185,25 +186,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |||
185 | } | 186 | } |
186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); | 187 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); |
187 | 188 | ||
188 | int __ipipe_syscall_root(struct pt_regs *regs) | 189 | asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) |
189 | { | 190 | { |
190 | struct ipipe_percpu_domain_data *p; | 191 | struct ipipe_percpu_domain_data *p; |
191 | unsigned long flags; | 192 | void (*hook)(void); |
192 | int ret; | 193 | int ret; |
193 | 194 | ||
195 | WARN_ON_ONCE(irqs_disabled_hw()); | ||
196 | |||
194 | /* | 197 | /* |
195 | * We need to run the IRQ tail hook whenever we don't | 198 | * We need to run the IRQ tail hook each time we intercept a |
196 | * propagate a syscall to higher domains, because we know that | 199 | * syscall, because we know that important operations might be |
197 | * important operations might be pending there (e.g. Xenomai | 200 | * pending there (e.g. Xenomai deferred rescheduling). |
198 | * deferred rescheduling). | ||
199 | */ | 201 | */ |
200 | 202 | hook = (__typeof__(hook))__ipipe_irq_tail_hook; | |
201 | if (regs->orig_p0 < NR_syscalls) { | 203 | hook(); |
202 | void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | ||
203 | hook(); | ||
204 | if ((current->flags & PF_EVNOTIFY) == 0) | ||
205 | return 0; | ||
206 | } | ||
207 | 204 | ||
208 | /* | 205 | /* |
209 | * This routine either returns: | 206 | * This routine either returns: |
@@ -214,51 +211,47 @@ int __ipipe_syscall_root(struct pt_regs *regs) | |||
214 | * tail work has to be performed (for handling signals etc). | 211 | * tail work has to be performed (for handling signals etc). |
215 | */ | 212 | */ |
216 | 213 | ||
217 | if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | 214 | if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || |
215 | !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | ||
218 | return 0; | 216 | return 0; |
219 | 217 | ||
220 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); | 218 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
221 | 219 | ||
222 | local_irq_save_hw(flags); | 220 | hard_local_irq_disable(); |
223 | 221 | ||
224 | if (!__ipipe_root_domain_p) { | 222 | /* |
225 | local_irq_restore_hw(flags); | 223 | * This is the end of the syscall path, so we may |
226 | return 1; | 224 | * safely assume a valid Linux task stack here. |
225 | */ | ||
226 | if (current->ipipe_flags & PF_EVTRET) { | ||
227 | current->ipipe_flags &= ~PF_EVTRET; | ||
228 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
227 | } | 229 | } |
228 | 230 | ||
229 | p = ipipe_root_cpudom_ptr(); | 231 | if (!__ipipe_root_domain_p) |
230 | if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) | 232 | ret = -1; |
231 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | 233 | else { |
234 | p = ipipe_root_cpudom_ptr(); | ||
235 | if (__ipipe_ipending_p(p)) | ||
236 | __ipipe_sync_pipeline(); | ||
237 | } | ||
232 | 238 | ||
233 | local_irq_restore_hw(flags); | 239 | hard_local_irq_enable(); |
234 | 240 | ||
235 | return -ret; | 241 | return -ret; |
236 | } | 242 | } |
237 | 243 | ||
238 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | local_irq_save_hw(flags); | ||
243 | |||
244 | return flags; | ||
245 | } | ||
246 | |||
247 | void ipipe_critical_exit(unsigned long flags) | ||
248 | { | ||
249 | local_irq_restore_hw(flags); | ||
250 | } | ||
251 | |||
252 | static void __ipipe_no_irqtail(void) | 244 | static void __ipipe_no_irqtail(void) |
253 | { | 245 | { |
254 | } | 246 | } |
255 | 247 | ||
256 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | 248 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) |
257 | { | 249 | { |
258 | info->ncpus = num_online_cpus(); | 250 | info->sys_nr_cpus = num_online_cpus(); |
259 | info->cpufreq = ipipe_cpu_freq(); | 251 | info->sys_cpu_freq = ipipe_cpu_freq(); |
260 | info->archdep.tmirq = IPIPE_TIMER_IRQ; | 252 | info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; |
261 | info->archdep.tmfreq = info->cpufreq; | 253 | info->sys_hrtimer_freq = __ipipe_core_clock; |
254 | info->sys_hrclock_freq = __ipipe_core_clock; | ||
262 | 255 | ||
263 | return 0; | 256 | return 0; |
264 | } | 257 | } |
@@ -279,9 +272,9 @@ int ipipe_trigger_irq(unsigned irq) | |||
279 | return -EINVAL; | 272 | return -EINVAL; |
280 | #endif | 273 | #endif |
281 | 274 | ||
282 | local_irq_save_hw(flags); | 275 | flags = hard_local_irq_save(); |
283 | __ipipe_handle_irq(irq, NULL); | 276 | __ipipe_handle_irq(irq, NULL); |
284 | local_irq_restore_hw(flags); | 277 | hard_local_irq_restore(flags); |
285 | 278 | ||
286 | return 1; | 279 | return 1; |
287 | } | 280 | } |
@@ -289,30 +282,32 @@ int ipipe_trigger_irq(unsigned irq) | |||
289 | asmlinkage void __ipipe_sync_root(void) | 282 | asmlinkage void __ipipe_sync_root(void) |
290 | { | 283 | { |
291 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | 284 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; |
285 | struct ipipe_percpu_domain_data *p; | ||
292 | unsigned long flags; | 286 | unsigned long flags; |
293 | 287 | ||
294 | BUG_ON(irqs_disabled()); | 288 | BUG_ON(irqs_disabled()); |
295 | 289 | ||
296 | local_irq_save_hw(flags); | 290 | flags = hard_local_irq_save(); |
297 | 291 | ||
298 | if (irq_tail_hook) | 292 | if (irq_tail_hook) |
299 | irq_tail_hook(); | 293 | irq_tail_hook(); |
300 | 294 | ||
301 | clear_thread_flag(TIF_IRQ_SYNC); | 295 | clear_thread_flag(TIF_IRQ_SYNC); |
302 | 296 | ||
303 | if (ipipe_root_cpudom_var(irqpend_himask) != 0) | 297 | p = ipipe_root_cpudom_ptr(); |
304 | __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); | 298 | if (__ipipe_ipending_p(p)) |
299 | __ipipe_sync_pipeline(); | ||
305 | 300 | ||
306 | local_irq_restore_hw(flags); | 301 | hard_local_irq_restore(flags); |
307 | } | 302 | } |
308 | 303 | ||
309 | void ___ipipe_sync_pipeline(unsigned long syncmask) | 304 | void ___ipipe_sync_pipeline(void) |
310 | { | 305 | { |
311 | if (__ipipe_root_domain_p && | 306 | if (__ipipe_root_domain_p && |
312 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) | 307 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) |
313 | return; | 308 | return; |
314 | 309 | ||
315 | __ipipe_sync_stage(syncmask); | 310 | __ipipe_sync_stage(); |
316 | } | 311 | } |
317 | 312 | ||
318 | void __ipipe_disable_root_irqs_hw(void) | 313 | void __ipipe_disable_root_irqs_hw(void) |
@@ -344,10 +339,10 @@ void __ipipe_stall_root(void) | |||
344 | { | 339 | { |
345 | unsigned long *p, flags; | 340 | unsigned long *p, flags; |
346 | 341 | ||
347 | local_irq_save_hw(flags); | 342 | flags = hard_local_irq_save(); |
348 | p = &__ipipe_root_status; | 343 | p = &__ipipe_root_status; |
349 | __set_bit(IPIPE_STALL_FLAG, p); | 344 | __set_bit(IPIPE_STALL_FLAG, p); |
350 | local_irq_restore_hw(flags); | 345 | hard_local_irq_restore(flags); |
351 | } | 346 | } |
352 | EXPORT_SYMBOL(__ipipe_stall_root); | 347 | EXPORT_SYMBOL(__ipipe_stall_root); |
353 | 348 | ||
@@ -356,10 +351,10 @@ unsigned long __ipipe_test_and_stall_root(void) | |||
356 | unsigned long *p, flags; | 351 | unsigned long *p, flags; |
357 | int x; | 352 | int x; |
358 | 353 | ||
359 | local_irq_save_hw(flags); | 354 | flags = hard_local_irq_save(); |
360 | p = &__ipipe_root_status; | 355 | p = &__ipipe_root_status; |
361 | x = __test_and_set_bit(IPIPE_STALL_FLAG, p); | 356 | x = __test_and_set_bit(IPIPE_STALL_FLAG, p); |
362 | local_irq_restore_hw(flags); | 357 | hard_local_irq_restore(flags); |
363 | 358 | ||
364 | return x; | 359 | return x; |
365 | } | 360 | } |
@@ -371,10 +366,10 @@ unsigned long __ipipe_test_root(void) | |||
371 | unsigned long flags; | 366 | unsigned long flags; |
372 | int x; | 367 | int x; |
373 | 368 | ||
374 | local_irq_save_hw_smp(flags); | 369 | flags = hard_local_irq_save_smp(); |
375 | p = &__ipipe_root_status; | 370 | p = &__ipipe_root_status; |
376 | x = test_bit(IPIPE_STALL_FLAG, p); | 371 | x = test_bit(IPIPE_STALL_FLAG, p); |
377 | local_irq_restore_hw_smp(flags); | 372 | hard_local_irq_restore_smp(flags); |
378 | 373 | ||
379 | return x; | 374 | return x; |
380 | } | 375 | } |
@@ -384,10 +379,10 @@ void __ipipe_lock_root(void) | |||
384 | { | 379 | { |
385 | unsigned long *p, flags; | 380 | unsigned long *p, flags; |
386 | 381 | ||
387 | local_irq_save_hw(flags); | 382 | flags = hard_local_irq_save(); |
388 | p = &__ipipe_root_status; | 383 | p = &__ipipe_root_status; |
389 | __set_bit(IPIPE_SYNCDEFER_FLAG, p); | 384 | __set_bit(IPIPE_SYNCDEFER_FLAG, p); |
390 | local_irq_restore_hw(flags); | 385 | hard_local_irq_restore(flags); |
391 | } | 386 | } |
392 | EXPORT_SYMBOL(__ipipe_lock_root); | 387 | EXPORT_SYMBOL(__ipipe_lock_root); |
393 | 388 | ||
@@ -395,9 +390,9 @@ void __ipipe_unlock_root(void) | |||
395 | { | 390 | { |
396 | unsigned long *p, flags; | 391 | unsigned long *p, flags; |
397 | 392 | ||
398 | local_irq_save_hw(flags); | 393 | flags = hard_local_irq_save(); |
399 | p = &__ipipe_root_status; | 394 | p = &__ipipe_root_status; |
400 | __clear_bit(IPIPE_SYNCDEFER_FLAG, p); | 395 | __clear_bit(IPIPE_SYNCDEFER_FLAG, p); |
401 | local_irq_restore_hw(flags); | 396 | hard_local_irq_restore(flags); |
402 | } | 397 | } |
403 | EXPORT_SYMBOL(__ipipe_unlock_root); | 398 | EXPORT_SYMBOL(__ipipe_unlock_root); |