diff options
author | Philippe Gerum <rpm@xenomai.org> | 2011-03-17 02:12:48 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2011-03-18 04:01:10 -0400 |
commit | 5b5da4c4b843e0d84244472b72fe1e7500f5681f (patch) | |
tree | b15424c4909ca7b4f69f28bd8576ba065e9030fa /arch/blackfin/mach-common | |
parent | 8944b5a258d73abd1f86bb360c27bb8c3bed5daa (diff) |
Blackfin/ipipe: upgrade to I-pipe mainline
This patch introduces Blackfin-specific bits to support the current
tip of the interrupt pipeline development, mainly:
- 2/3-level interrupt maps (sparse IRQs)
- generic virq handling
- sysinfo v2 format for ipipe_get_sysinfo()
Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/mach-common')
-rw-r--r-- | arch/blackfin/mach-common/ints-priority.c | 41 |
1 files changed, 37 insertions, 4 deletions
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 8e9d3cc30885..6cd52395a999 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/sched.h> | ||
18 | #ifdef CONFIG_IPIPE | 19 | #ifdef CONFIG_IPIPE |
19 | #include <linux/ipipe.h> | 20 | #include <linux/ipipe.h> |
20 | #endif | 21 | #endif |
@@ -556,10 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq, | |||
556 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) | 557 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) |
557 | { | 558 | { |
558 | #ifdef CONFIG_IPIPE | 559 | #ifdef CONFIG_IPIPE |
559 | _set_irq_handler(irq, handle_level_irq); | 560 | handle = handle_level_irq; |
560 | #else | ||
561 | __set_irq_handler_unlocked(irq, handle); | ||
562 | #endif | 561 | #endif |
562 | __set_irq_handler_unlocked(irq, handle); | ||
563 | } | 563 | } |
564 | 564 | ||
565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); | 565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); |
@@ -1392,7 +1392,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1392 | struct ipipe_domain *this_domain = __ipipe_current_domain; | 1392 | struct ipipe_domain *this_domain = __ipipe_current_domain; |
1393 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; | 1393 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; |
1394 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; | 1394 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; |
1395 | int irq, s; | 1395 | int irq, s = 0; |
1396 | 1396 | ||
1397 | if (likely(vec == EVT_IVTMR_P)) | 1397 | if (likely(vec == EVT_IVTMR_P)) |
1398 | irq = IRQ_CORETMR; | 1398 | irq = IRQ_CORETMR; |
@@ -1442,6 +1442,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1442 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; | 1442 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | /* | ||
1446 | * We don't want Linux interrupt handlers to run at the | ||
1447 | * current core priority level (i.e. < EVT15), since this | ||
1448 | * might delay other interrupts handled by a high priority | ||
1449 | * domain. Here is what we do instead: | ||
1450 | * | ||
1451 | * - we raise the SYNCDEFER bit to prevent | ||
1452 | * __ipipe_handle_irq() to sync the pipeline for the root | ||
1453 | * stage for the incoming interrupt. Upon return, that IRQ is | ||
1454 | * pending in the interrupt log. | ||
1455 | * | ||
1456 | * - we raise the TIF_IRQ_SYNC bit for the current thread, so | ||
1457 | * that _schedule_and_signal_from_int will eventually sync the | ||
1458 | * pipeline from EVT15. | ||
1459 | */ | ||
1445 | if (this_domain == ipipe_root_domain) { | 1460 | if (this_domain == ipipe_root_domain) { |
1446 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); | 1461 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); |
1447 | barrier(); | 1462 | barrier(); |
@@ -1451,6 +1466,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1451 | __ipipe_handle_irq(irq, regs); | 1466 | __ipipe_handle_irq(irq, regs); |
1452 | ipipe_trace_irq_exit(irq); | 1467 | ipipe_trace_irq_exit(irq); |
1453 | 1468 | ||
1469 | if (user_mode(regs) && | ||
1470 | !ipipe_test_foreign_stack() && | ||
1471 | (current->ipipe_flags & PF_EVTRET) != 0) { | ||
1472 | /* | ||
1473 | * Testing for user_regs() does NOT fully eliminate | ||
1474 | * foreign stack contexts, because of the forged | ||
1475 | * interrupt returns we do through | ||
1476 | * __ipipe_call_irqtail. In that case, we might have | ||
1477 | * preempted a foreign stack context in a high | ||
1478 | * priority domain, with a single interrupt level now | ||
1479 | * pending after the irqtail unwinding is done. In | ||
1480 | * which case user_mode() is now true, and the event | ||
1481 | * gets dispatched spuriously. | ||
1482 | */ | ||
1483 | current->ipipe_flags &= ~PF_EVTRET; | ||
1484 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
1485 | } | ||
1486 | |||
1454 | if (this_domain == ipipe_root_domain) { | 1487 | if (this_domain == ipipe_root_domain) { |
1455 | set_thread_flag(TIF_IRQ_SYNC); | 1488 | set_thread_flag(TIF_IRQ_SYNC); |
1456 | if (!s) { | 1489 | if (!s) { |