diff options
author | Philippe Gerum <rpm@xenomai.org> | 2011-03-17 02:12:48 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2011-03-18 04:01:10 -0400 |
commit | 5b5da4c4b843e0d84244472b72fe1e7500f5681f (patch) | |
tree | b15424c4909ca7b4f69f28bd8576ba065e9030fa /arch/blackfin | |
parent | 8944b5a258d73abd1f86bb360c27bb8c3bed5daa (diff) |
Blackfin/ipipe: upgrade to I-pipe mainline
This patch introduces Blackfin-specific bits to support the current
tip of the interrupt pipeline development, mainly:
- 2/3-level interrupt maps (sparse IRQs)
- generic virq handling
- sysinfo v2 format for ipipe_get_sysinfo()
Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin')
-rw-r--r-- | arch/blackfin/include/asm/ipipe.h | 95 | ||||
-rw-r--r-- | arch/blackfin/include/asm/ipipe_base.h | 11 | ||||
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 84 | ||||
-rw-r--r-- | arch/blackfin/mach-common/ints-priority.c | 41 |
4 files changed, 117 insertions, 114 deletions
diff --git a/arch/blackfin/include/asm/ipipe.h b/arch/blackfin/include/asm/ipipe.h index 40f94a704c02..9e0cc0e2534f 100644 --- a/arch/blackfin/include/asm/ipipe.h +++ b/arch/blackfin/include/asm/ipipe.h | |||
@@ -34,11 +34,12 @@ | |||
34 | #include <asm/bitops.h> | 34 | #include <asm/bitops.h> |
35 | #include <asm/atomic.h> | 35 | #include <asm/atomic.h> |
36 | #include <asm/traps.h> | 36 | #include <asm/traps.h> |
37 | #include <asm/bitsperlong.h> | ||
37 | 38 | ||
38 | #define IPIPE_ARCH_STRING "1.12-00" | 39 | #define IPIPE_ARCH_STRING "1.16-01" |
39 | #define IPIPE_MAJOR_NUMBER 1 | 40 | #define IPIPE_MAJOR_NUMBER 1 |
40 | #define IPIPE_MINOR_NUMBER 12 | 41 | #define IPIPE_MINOR_NUMBER 16 |
41 | #define IPIPE_PATCH_NUMBER 0 | 42 | #define IPIPE_PATCH_NUMBER 1 |
42 | 43 | ||
43 | #ifdef CONFIG_SMP | 44 | #ifdef CONFIG_SMP |
44 | #error "I-pipe/blackfin: SMP not implemented" | 45 | #error "I-pipe/blackfin: SMP not implemented" |
@@ -55,25 +56,19 @@ do { \ | |||
55 | #define task_hijacked(p) \ | 56 | #define task_hijacked(p) \ |
56 | ({ \ | 57 | ({ \ |
57 | int __x__ = __ipipe_root_domain_p; \ | 58 | int __x__ = __ipipe_root_domain_p; \ |
58 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_root_cpudom_var(status)); \ | ||
59 | if (__x__) \ | 59 | if (__x__) \ |
60 | hard_local_irq_enable(); \ | 60 | hard_local_irq_enable(); \ |
61 | !__x__; \ | 61 | !__x__; \ |
62 | }) | 62 | }) |
63 | 63 | ||
64 | struct ipipe_domain; | 64 | struct ipipe_domain; |
65 | 65 | ||
66 | struct ipipe_sysinfo { | 66 | struct ipipe_sysinfo { |
67 | 67 | int sys_nr_cpus; /* Number of CPUs on board */ | |
68 | int ncpus; /* Number of CPUs on board */ | 68 | int sys_hrtimer_irq; /* hrtimer device IRQ */ |
69 | u64 cpufreq; /* CPU frequency (in Hz) */ | 69 | u64 sys_hrtimer_freq; /* hrtimer device frequency */ |
70 | 70 | u64 sys_hrclock_freq; /* hrclock device frequency */ | |
71 | /* Arch-dependent block */ | 71 | u64 sys_cpu_freq; /* CPU frequency (Hz) */ |
72 | |||
73 | struct { | ||
74 | unsigned tmirq; /* Timer tick IRQ */ | ||
75 | u64 tmfreq; /* Timer frequency */ | ||
76 | } archdep; | ||
77 | }; | 72 | }; |
78 | 73 | ||
79 | #define ipipe_read_tsc(t) \ | 74 | #define ipipe_read_tsc(t) \ |
@@ -115,9 +110,19 @@ void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, | |||
115 | void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, | 110 | void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, |
116 | unsigned irq); | 111 | unsigned irq); |
117 | 112 | ||
118 | #define __ipipe_enable_irq(irq) (irq_desc[irq].chip->unmask(irq)) | 113 | #define __ipipe_enable_irq(irq) \ |
114 | do { \ | ||
115 | struct irq_desc *desc = irq_to_desc(irq); \ | ||
116 | struct irq_chip *chip = get_irq_desc_chip(desc); \ | ||
117 | chip->irq_unmask(&desc->irq_data); \ | ||
118 | } while (0) | ||
119 | 119 | ||
120 | #define __ipipe_disable_irq(irq) (irq_desc[irq].chip->mask(irq)) | 120 | #define __ipipe_disable_irq(irq) \ |
121 | do { \ | ||
122 | struct irq_desc *desc = irq_to_desc(irq); \ | ||
123 | struct irq_chip *chip = get_irq_desc_chip(desc); \ | ||
124 | chip->irq_mask(&desc->irq_data); \ | ||
125 | } while (0) | ||
121 | 126 | ||
122 | static inline int __ipipe_check_tickdev(const char *devname) | 127 | static inline int __ipipe_check_tickdev(const char *devname) |
123 | { | 128 | { |
@@ -128,12 +133,11 @@ void __ipipe_enable_pipeline(void); | |||
128 | 133 | ||
129 | #define __ipipe_hook_critical_ipi(ipd) do { } while (0) | 134 | #define __ipipe_hook_critical_ipi(ipd) do { } while (0) |
130 | 135 | ||
131 | #define __ipipe_sync_pipeline ___ipipe_sync_pipeline | 136 | void ___ipipe_sync_pipeline(void); |
132 | void ___ipipe_sync_pipeline(unsigned long syncmask); | ||
133 | 137 | ||
134 | void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs); | 138 | void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs); |
135 | 139 | ||
136 | int __ipipe_get_irq_priority(unsigned irq); | 140 | int __ipipe_get_irq_priority(unsigned int irq); |
137 | 141 | ||
138 | void __ipipe_serial_debug(const char *fmt, ...); | 142 | void __ipipe_serial_debug(const char *fmt, ...); |
139 | 143 | ||
@@ -152,7 +156,10 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) | |||
152 | return ffs(ul) - 1; | 156 | return ffs(ul) - 1; |
153 | } | 157 | } |
154 | 158 | ||
155 | #define __ipipe_run_irqtail() /* Must be a macro */ \ | 159 | #define __ipipe_do_root_xirq(ipd, irq) \ |
160 | ((ipd)->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs))) | ||
161 | |||
162 | #define __ipipe_run_irqtail(irq) /* Must be a macro */ \ | ||
156 | do { \ | 163 | do { \ |
157 | unsigned long __pending; \ | 164 | unsigned long __pending; \ |
158 | CSYNC(); \ | 165 | CSYNC(); \ |
@@ -164,42 +171,8 @@ static inline unsigned long __ipipe_ffnz(unsigned long ul) | |||
164 | } \ | 171 | } \ |
165 | } while (0) | 172 | } while (0) |
166 | 173 | ||
167 | #define __ipipe_run_isr(ipd, irq) \ | ||
168 | do { \ | ||
169 | if (!__ipipe_pipeline_head_p(ipd)) \ | ||
170 | hard_local_irq_enable(); \ | ||
171 | if (ipd == ipipe_root_domain) { \ | ||
172 | if (unlikely(ipipe_virtual_irq_p(irq))) { \ | ||
173 | irq_enter(); \ | ||
174 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | ||
175 | irq_exit(); \ | ||
176 | } else \ | ||
177 | ipd->irqs[irq].handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs)); \ | ||
178 | } else { \ | ||
179 | __clear_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | ||
180 | ipd->irqs[irq].handler(irq, ipd->irqs[irq].cookie); \ | ||
181 | /* Attempt to exit the outer interrupt level before \ | ||
182 | * starting the deferred IRQ processing. */ \ | ||
183 | __ipipe_run_irqtail(); \ | ||
184 | __set_bit(IPIPE_SYNC_FLAG, &ipipe_cpudom_var(ipd, status)); \ | ||
185 | } \ | ||
186 | hard_local_irq_disable(); \ | ||
187 | } while (0) | ||
188 | |||
189 | #define __ipipe_syscall_watched_p(p, sc) \ | 174 | #define __ipipe_syscall_watched_p(p, sc) \ |
190 | (((p)->flags & PF_EVNOTIFY) || (unsigned long)sc >= NR_syscalls) | 175 | (ipipe_notifier_enabled_p(p) || (unsigned long)sc >= NR_syscalls) |
191 | |||
192 | void ipipe_init_irq_threads(void); | ||
193 | |||
194 | int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc); | ||
195 | |||
196 | #ifdef CONFIG_TICKSOURCE_CORETMR | ||
197 | #define IRQ_SYSTMR IRQ_CORETMR | ||
198 | #define IRQ_PRIOTMR IRQ_CORETMR | ||
199 | #else | ||
200 | #define IRQ_SYSTMR IRQ_TIMER0 | ||
201 | #define IRQ_PRIOTMR CONFIG_IRQ_TIMER0 | ||
202 | #endif | ||
203 | 176 | ||
204 | #ifdef CONFIG_BF561 | 177 | #ifdef CONFIG_BF561 |
205 | #define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val) | 178 | #define bfin_write_TIMER_DISABLE(val) bfin_write_TMRS8_DISABLE(val) |
@@ -219,11 +192,11 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc); | |||
219 | 192 | ||
220 | #define task_hijacked(p) 0 | 193 | #define task_hijacked(p) 0 |
221 | #define ipipe_trap_notify(t, r) 0 | 194 | #define ipipe_trap_notify(t, r) 0 |
195 | #define __ipipe_root_tick_p(regs) 1 | ||
222 | 196 | ||
223 | #define ipipe_init_irq_threads() do { } while (0) | 197 | #endif /* !CONFIG_IPIPE */ |
224 | #define ipipe_start_irq_thread(irq, desc) 0 | ||
225 | 198 | ||
226 | #ifndef CONFIG_TICKSOURCE_GPTMR0 | 199 | #ifdef CONFIG_TICKSOURCE_CORETMR |
227 | #define IRQ_SYSTMR IRQ_CORETMR | 200 | #define IRQ_SYSTMR IRQ_CORETMR |
228 | #define IRQ_PRIOTMR IRQ_CORETMR | 201 | #define IRQ_PRIOTMR IRQ_CORETMR |
229 | #else | 202 | #else |
@@ -231,10 +204,6 @@ int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc); | |||
231 | #define IRQ_PRIOTMR CONFIG_IRQ_TIMER0 | 204 | #define IRQ_PRIOTMR CONFIG_IRQ_TIMER0 |
232 | #endif | 205 | #endif |
233 | 206 | ||
234 | #define __ipipe_root_tick_p(regs) 1 | ||
235 | |||
236 | #endif /* !CONFIG_IPIPE */ | ||
237 | |||
238 | #define ipipe_update_tick_evtdev(evtdev) do { } while (0) | 207 | #define ipipe_update_tick_evtdev(evtdev) do { } while (0) |
239 | 208 | ||
240 | #endif /* !__ASM_BLACKFIN_IPIPE_H */ | 209 | #endif /* !__ASM_BLACKFIN_IPIPE_H */ |
diff --git a/arch/blackfin/include/asm/ipipe_base.h b/arch/blackfin/include/asm/ipipe_base.h index 00409201d9ed..84a4ffd36747 100644 --- a/arch/blackfin/include/asm/ipipe_base.h +++ b/arch/blackfin/include/asm/ipipe_base.h | |||
@@ -24,8 +24,10 @@ | |||
24 | 24 | ||
25 | #ifdef CONFIG_IPIPE | 25 | #ifdef CONFIG_IPIPE |
26 | 26 | ||
27 | #include <asm/bitsperlong.h> | ||
28 | #include <mach/irq.h> | ||
29 | |||
27 | #define IPIPE_NR_XIRQS NR_IRQS | 30 | #define IPIPE_NR_XIRQS NR_IRQS |
28 | #define IPIPE_IRQ_ISHIFT 5 /* 2^5 for 32bits arch. */ | ||
29 | 31 | ||
30 | /* Blackfin-specific, per-cpu pipeline status */ | 32 | /* Blackfin-specific, per-cpu pipeline status */ |
31 | #define IPIPE_SYNCDEFER_FLAG 15 | 33 | #define IPIPE_SYNCDEFER_FLAG 15 |
@@ -42,11 +44,14 @@ | |||
42 | #define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4) | 44 | #define IPIPE_EVENT_INIT (IPIPE_FIRST_EVENT + 4) |
43 | #define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5) | 45 | #define IPIPE_EVENT_EXIT (IPIPE_FIRST_EVENT + 5) |
44 | #define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6) | 46 | #define IPIPE_EVENT_CLEANUP (IPIPE_FIRST_EVENT + 6) |
45 | #define IPIPE_LAST_EVENT IPIPE_EVENT_CLEANUP | 47 | #define IPIPE_EVENT_RETURN (IPIPE_FIRST_EVENT + 7) |
48 | #define IPIPE_LAST_EVENT IPIPE_EVENT_RETURN | ||
46 | #define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1) | 49 | #define IPIPE_NR_EVENTS (IPIPE_LAST_EVENT + 1) |
47 | 50 | ||
48 | #define IPIPE_TIMER_IRQ IRQ_CORETMR | 51 | #define IPIPE_TIMER_IRQ IRQ_CORETMR |
49 | 52 | ||
53 | #define __IPIPE_FEATURE_SYSINFO_V2 1 | ||
54 | |||
50 | #ifndef __ASSEMBLY__ | 55 | #ifndef __ASSEMBLY__ |
51 | 56 | ||
52 | extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */ | 57 | extern unsigned long __ipipe_root_status; /* Alias to ipipe_root_cpudom_var(status) */ |
@@ -63,6 +68,8 @@ void __ipipe_unlock_root(void); | |||
63 | 68 | ||
64 | #endif /* !__ASSEMBLY__ */ | 69 | #endif /* !__ASSEMBLY__ */ |
65 | 70 | ||
71 | #define __IPIPE_FEATURE_SYSINFO_V2 1 | ||
72 | |||
66 | #endif /* CONFIG_IPIPE */ | 73 | #endif /* CONFIG_IPIPE */ |
67 | 74 | ||
68 | #endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */ | 75 | #endif /* !__ASM_BLACKFIN_IPIPE_BASE_H */ |
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 3b1da4aff2a1..f37019c847c9 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | |||
154 | * pending for it. | 154 | * pending for it. |
155 | */ | 155 | */ |
156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && | 156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && |
157 | ipipe_head_cpudom_var(irqpend_himask) == 0) | 157 | !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) |
158 | goto out; | 158 | goto out; |
159 | 159 | ||
160 | __ipipe_walk_pipeline(head); | 160 | __ipipe_walk_pipeline(head); |
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |||
185 | } | 185 | } |
186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); | 186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); |
187 | 187 | ||
188 | int __ipipe_syscall_root(struct pt_regs *regs) | 188 | asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) |
189 | { | 189 | { |
190 | struct ipipe_percpu_domain_data *p; | 190 | struct ipipe_percpu_domain_data *p; |
191 | unsigned long flags; | 191 | void (*hook)(void); |
192 | int ret; | 192 | int ret; |
193 | 193 | ||
194 | WARN_ON_ONCE(irqs_disabled_hw()); | ||
195 | |||
194 | /* | 196 | /* |
195 | * We need to run the IRQ tail hook whenever we don't | 197 | * We need to run the IRQ tail hook each time we intercept a |
196 | * propagate a syscall to higher domains, because we know that | 198 | * syscall, because we know that important operations might be |
197 | * important operations might be pending there (e.g. Xenomai | 199 | * pending there (e.g. Xenomai deferred rescheduling). |
198 | * deferred rescheduling). | ||
199 | */ | 200 | */ |
200 | 201 | hook = (__typeof__(hook))__ipipe_irq_tail_hook; | |
201 | if (regs->orig_p0 < NR_syscalls) { | 202 | hook(); |
202 | void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | ||
203 | hook(); | ||
204 | if ((current->flags & PF_EVNOTIFY) == 0) | ||
205 | return 0; | ||
206 | } | ||
207 | 203 | ||
208 | /* | 204 | /* |
209 | * This routine either returns: | 205 | * This routine either returns: |
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs) | |||
214 | * tail work has to be performed (for handling signals etc). | 210 | * tail work has to be performed (for handling signals etc). |
215 | */ | 211 | */ |
216 | 212 | ||
217 | if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | 213 | if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || |
214 | !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | ||
218 | return 0; | 215 | return 0; |
219 | 216 | ||
220 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); | 217 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
221 | 218 | ||
222 | flags = hard_local_irq_save(); | 219 | hard_local_irq_disable(); |
223 | 220 | ||
224 | if (!__ipipe_root_domain_p) { | 221 | /* |
225 | hard_local_irq_restore(flags); | 222 | * This is the end of the syscall path, so we may |
226 | return 1; | 223 | * safely assume a valid Linux task stack here. |
224 | */ | ||
225 | if (current->ipipe_flags & PF_EVTRET) { | ||
226 | current->ipipe_flags &= ~PF_EVTRET; | ||
227 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
227 | } | 228 | } |
228 | 229 | ||
229 | p = ipipe_root_cpudom_ptr(); | 230 | if (!__ipipe_root_domain_p) |
230 | if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) | 231 | ret = -1; |
231 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | 232 | else { |
233 | p = ipipe_root_cpudom_ptr(); | ||
234 | if (__ipipe_ipending_p(p)) | ||
235 | __ipipe_sync_pipeline(); | ||
236 | } | ||
232 | 237 | ||
233 | hard_local_irq_restore(flags); | 238 | hard_local_irq_enable(); |
234 | 239 | ||
235 | return -ret; | 240 | return -ret; |
236 | } | 241 | } |
237 | 242 | ||
238 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | flags = hard_local_irq_save(); | ||
243 | |||
244 | return flags; | ||
245 | } | ||
246 | |||
247 | void ipipe_critical_exit(unsigned long flags) | ||
248 | { | ||
249 | hard_local_irq_restore(flags); | ||
250 | } | ||
251 | |||
252 | static void __ipipe_no_irqtail(void) | 243 | static void __ipipe_no_irqtail(void) |
253 | { | 244 | { |
254 | } | 245 | } |
255 | 246 | ||
256 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | 247 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) |
257 | { | 248 | { |
258 | info->ncpus = num_online_cpus(); | 249 | info->sys_nr_cpus = num_online_cpus(); |
259 | info->cpufreq = ipipe_cpu_freq(); | 250 | info->sys_cpu_freq = ipipe_cpu_freq(); |
260 | info->archdep.tmirq = IPIPE_TIMER_IRQ; | 251 | info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; |
261 | info->archdep.tmfreq = info->cpufreq; | 252 | info->sys_hrtimer_freq = __ipipe_core_clock; |
253 | info->sys_hrclock_freq = __ipipe_core_clock; | ||
262 | 254 | ||
263 | return 0; | 255 | return 0; |
264 | } | 256 | } |
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq) | |||
289 | asmlinkage void __ipipe_sync_root(void) | 281 | asmlinkage void __ipipe_sync_root(void) |
290 | { | 282 | { |
291 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | 283 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; |
284 | struct ipipe_percpu_domain_data *p; | ||
292 | unsigned long flags; | 285 | unsigned long flags; |
293 | 286 | ||
294 | BUG_ON(irqs_disabled()); | 287 | BUG_ON(irqs_disabled()); |
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void) | |||
300 | 293 | ||
301 | clear_thread_flag(TIF_IRQ_SYNC); | 294 | clear_thread_flag(TIF_IRQ_SYNC); |
302 | 295 | ||
303 | if (ipipe_root_cpudom_var(irqpend_himask) != 0) | 296 | p = ipipe_root_cpudom_ptr(); |
304 | __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); | 297 | if (__ipipe_ipending_p(p)) |
298 | __ipipe_sync_pipeline(); | ||
305 | 299 | ||
306 | hard_local_irq_restore(flags); | 300 | hard_local_irq_restore(flags); |
307 | } | 301 | } |
308 | 302 | ||
309 | void ___ipipe_sync_pipeline(unsigned long syncmask) | 303 | void ___ipipe_sync_pipeline(void) |
310 | { | 304 | { |
311 | if (__ipipe_root_domain_p && | 305 | if (__ipipe_root_domain_p && |
312 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) | 306 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) |
313 | return; | 307 | return; |
314 | 308 | ||
315 | __ipipe_sync_stage(syncmask); | 309 | __ipipe_sync_stage(); |
316 | } | 310 | } |
317 | 311 | ||
318 | void __ipipe_disable_root_irqs_hw(void) | 312 | void __ipipe_disable_root_irqs_hw(void) |
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c index 8e9d3cc30885..6cd52395a999 100644 --- a/arch/blackfin/mach-common/ints-priority.c +++ b/arch/blackfin/mach-common/ints-priority.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/kernel_stat.h> | 15 | #include <linux/kernel_stat.h> |
16 | #include <linux/seq_file.h> | 16 | #include <linux/seq_file.h> |
17 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
18 | #include <linux/sched.h> | ||
18 | #ifdef CONFIG_IPIPE | 19 | #ifdef CONFIG_IPIPE |
19 | #include <linux/ipipe.h> | 20 | #include <linux/ipipe.h> |
20 | #endif | 21 | #endif |
@@ -556,10 +557,9 @@ static void bfin_demux_mac_status_irq(unsigned int int_err_irq, | |||
556 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) | 557 | static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) |
557 | { | 558 | { |
558 | #ifdef CONFIG_IPIPE | 559 | #ifdef CONFIG_IPIPE |
559 | _set_irq_handler(irq, handle_level_irq); | 560 | handle = handle_level_irq; |
560 | #else | ||
561 | __set_irq_handler_unlocked(irq, handle); | ||
562 | #endif | 561 | #endif |
562 | __set_irq_handler_unlocked(irq, handle); | ||
563 | } | 563 | } |
564 | 564 | ||
565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); | 565 | static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); |
@@ -1392,7 +1392,7 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1392 | struct ipipe_domain *this_domain = __ipipe_current_domain; | 1392 | struct ipipe_domain *this_domain = __ipipe_current_domain; |
1393 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; | 1393 | struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop; |
1394 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; | 1394 | struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst; |
1395 | int irq, s; | 1395 | int irq, s = 0; |
1396 | 1396 | ||
1397 | if (likely(vec == EVT_IVTMR_P)) | 1397 | if (likely(vec == EVT_IVTMR_P)) |
1398 | irq = IRQ_CORETMR; | 1398 | irq = IRQ_CORETMR; |
@@ -1442,6 +1442,21 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1442 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; | 1442 | __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10; |
1443 | } | 1443 | } |
1444 | 1444 | ||
1445 | /* | ||
1446 | * We don't want Linux interrupt handlers to run at the | ||
1447 | * current core priority level (i.e. < EVT15), since this | ||
1448 | * might delay other interrupts handled by a high priority | ||
1449 | * domain. Here is what we do instead: | ||
1450 | * | ||
1451 | * - we raise the SYNCDEFER bit to prevent | ||
1452 | * __ipipe_handle_irq() to sync the pipeline for the root | ||
1453 | * stage for the incoming interrupt. Upon return, that IRQ is | ||
1454 | * pending in the interrupt log. | ||
1455 | * | ||
1456 | * - we raise the TIF_IRQ_SYNC bit for the current thread, so | ||
1457 | * that _schedule_and_signal_from_int will eventually sync the | ||
1458 | * pipeline from EVT15. | ||
1459 | */ | ||
1445 | if (this_domain == ipipe_root_domain) { | 1460 | if (this_domain == ipipe_root_domain) { |
1446 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); | 1461 | s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status); |
1447 | barrier(); | 1462 | barrier(); |
@@ -1451,6 +1466,24 @@ asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs) | |||
1451 | __ipipe_handle_irq(irq, regs); | 1466 | __ipipe_handle_irq(irq, regs); |
1452 | ipipe_trace_irq_exit(irq); | 1467 | ipipe_trace_irq_exit(irq); |
1453 | 1468 | ||
1469 | if (user_mode(regs) && | ||
1470 | !ipipe_test_foreign_stack() && | ||
1471 | (current->ipipe_flags & PF_EVTRET) != 0) { | ||
1472 | /* | ||
1473 | * Testing for user_regs() does NOT fully eliminate | ||
1474 | * foreign stack contexts, because of the forged | ||
1475 | * interrupt returns we do through | ||
1476 | * __ipipe_call_irqtail. In that case, we might have | ||
1477 | * preempted a foreign stack context in a high | ||
1478 | * priority domain, with a single interrupt level now | ||
1479 | * pending after the irqtail unwinding is done. In | ||
1480 | * which case user_mode() is now true, and the event | ||
1481 | * gets dispatched spuriously. | ||
1482 | */ | ||
1483 | current->ipipe_flags &= ~PF_EVTRET; | ||
1484 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
1485 | } | ||
1486 | |||
1454 | if (this_domain == ipipe_root_domain) { | 1487 | if (this_domain == ipipe_root_domain) { |
1455 | set_thread_flag(TIF_IRQ_SYNC); | 1488 | set_thread_flag(TIF_IRQ_SYNC); |
1456 | if (!s) { | 1489 | if (!s) { |