diff options
author | Philippe Gerum <rpm@xenomai.org> | 2011-03-17 02:12:48 -0400 |
---|---|---|
committer | Mike Frysinger <vapier@gentoo.org> | 2011-03-18 04:01:10 -0400 |
commit | 5b5da4c4b843e0d84244472b72fe1e7500f5681f (patch) | |
tree | b15424c4909ca7b4f69f28bd8576ba065e9030fa /arch/blackfin/kernel/ipipe.c | |
parent | 8944b5a258d73abd1f86bb360c27bb8c3bed5daa (diff) |
Blackfin/ipipe: upgrade to I-pipe mainline
This patch introduces Blackfin-specific bits to support the current
tip of the interrupt pipeline development, mainly:
- 2/3-level interrupt maps (sparse IRQs)
- generic virq handling
- sysinfo v2 format for ipipe_get_sysinfo()
Signed-off-by: Philippe Gerum <rpm@xenomai.org>
Signed-off-by: Mike Frysinger <vapier@gentoo.org>
Diffstat (limited to 'arch/blackfin/kernel/ipipe.c')
-rw-r--r-- | arch/blackfin/kernel/ipipe.c | 84 |
1 files changed, 39 insertions, 45 deletions
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c index 3b1da4aff2a1..f37019c847c9 100644 --- a/arch/blackfin/kernel/ipipe.c +++ b/arch/blackfin/kernel/ipipe.c | |||
@@ -154,7 +154,7 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) | |||
154 | * pending for it. | 154 | * pending for it. |
155 | */ | 155 | */ |
156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && | 156 | if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) && |
157 | ipipe_head_cpudom_var(irqpend_himask) == 0) | 157 | !__ipipe_ipending_p(ipipe_head_cpudom_ptr())) |
158 | goto out; | 158 | goto out; |
159 | 159 | ||
160 | __ipipe_walk_pipeline(head); | 160 | __ipipe_walk_pipeline(head); |
@@ -185,25 +185,21 @@ void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) | |||
185 | } | 185 | } |
186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); | 186 | EXPORT_SYMBOL(__ipipe_disable_irqdesc); |
187 | 187 | ||
188 | int __ipipe_syscall_root(struct pt_regs *regs) | 188 | asmlinkage int __ipipe_syscall_root(struct pt_regs *regs) |
189 | { | 189 | { |
190 | struct ipipe_percpu_domain_data *p; | 190 | struct ipipe_percpu_domain_data *p; |
191 | unsigned long flags; | 191 | void (*hook)(void); |
192 | int ret; | 192 | int ret; |
193 | 193 | ||
194 | WARN_ON_ONCE(irqs_disabled_hw()); | ||
195 | |||
194 | /* | 196 | /* |
195 | * We need to run the IRQ tail hook whenever we don't | 197 | * We need to run the IRQ tail hook each time we intercept a |
196 | * propagate a syscall to higher domains, because we know that | 198 | * syscall, because we know that important operations might be |
197 | * important operations might be pending there (e.g. Xenomai | 199 | * pending there (e.g. Xenomai deferred rescheduling). |
198 | * deferred rescheduling). | ||
199 | */ | 200 | */ |
200 | 201 | hook = (__typeof__(hook))__ipipe_irq_tail_hook; | |
201 | if (regs->orig_p0 < NR_syscalls) { | 202 | hook(); |
202 | void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | ||
203 | hook(); | ||
204 | if ((current->flags & PF_EVNOTIFY) == 0) | ||
205 | return 0; | ||
206 | } | ||
207 | 203 | ||
208 | /* | 204 | /* |
209 | * This routine either returns: | 205 | * This routine either returns: |
@@ -214,51 +210,47 @@ int __ipipe_syscall_root(struct pt_regs *regs) | |||
214 | * tail work has to be performed (for handling signals etc). | 210 | * tail work has to be performed (for handling signals etc). |
215 | */ | 211 | */ |
216 | 212 | ||
217 | if (!__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | 213 | if (!__ipipe_syscall_watched_p(current, regs->orig_p0) || |
214 | !__ipipe_event_monitored_p(IPIPE_EVENT_SYSCALL)) | ||
218 | return 0; | 215 | return 0; |
219 | 216 | ||
220 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); | 217 | ret = __ipipe_dispatch_event(IPIPE_EVENT_SYSCALL, regs); |
221 | 218 | ||
222 | flags = hard_local_irq_save(); | 219 | hard_local_irq_disable(); |
223 | 220 | ||
224 | if (!__ipipe_root_domain_p) { | 221 | /* |
225 | hard_local_irq_restore(flags); | 222 | * This is the end of the syscall path, so we may |
226 | return 1; | 223 | * safely assume a valid Linux task stack here. |
224 | */ | ||
225 | if (current->ipipe_flags & PF_EVTRET) { | ||
226 | current->ipipe_flags &= ~PF_EVTRET; | ||
227 | __ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs); | ||
227 | } | 228 | } |
228 | 229 | ||
229 | p = ipipe_root_cpudom_ptr(); | 230 | if (!__ipipe_root_domain_p) |
230 | if ((p->irqpend_himask & IPIPE_IRQMASK_VIRT) != 0) | 231 | ret = -1; |
231 | __ipipe_sync_pipeline(IPIPE_IRQMASK_VIRT); | 232 | else { |
233 | p = ipipe_root_cpudom_ptr(); | ||
234 | if (__ipipe_ipending_p(p)) | ||
235 | __ipipe_sync_pipeline(); | ||
236 | } | ||
232 | 237 | ||
233 | hard_local_irq_restore(flags); | 238 | hard_local_irq_enable(); |
234 | 239 | ||
235 | return -ret; | 240 | return -ret; |
236 | } | 241 | } |
237 | 242 | ||
238 | unsigned long ipipe_critical_enter(void (*syncfn) (void)) | ||
239 | { | ||
240 | unsigned long flags; | ||
241 | |||
242 | flags = hard_local_irq_save(); | ||
243 | |||
244 | return flags; | ||
245 | } | ||
246 | |||
247 | void ipipe_critical_exit(unsigned long flags) | ||
248 | { | ||
249 | hard_local_irq_restore(flags); | ||
250 | } | ||
251 | |||
252 | static void __ipipe_no_irqtail(void) | 243 | static void __ipipe_no_irqtail(void) |
253 | { | 244 | { |
254 | } | 245 | } |
255 | 246 | ||
256 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) | 247 | int ipipe_get_sysinfo(struct ipipe_sysinfo *info) |
257 | { | 248 | { |
258 | info->ncpus = num_online_cpus(); | 249 | info->sys_nr_cpus = num_online_cpus(); |
259 | info->cpufreq = ipipe_cpu_freq(); | 250 | info->sys_cpu_freq = ipipe_cpu_freq(); |
260 | info->archdep.tmirq = IPIPE_TIMER_IRQ; | 251 | info->sys_hrtimer_irq = IPIPE_TIMER_IRQ; |
261 | info->archdep.tmfreq = info->cpufreq; | 252 | info->sys_hrtimer_freq = __ipipe_core_clock; |
253 | info->sys_hrclock_freq = __ipipe_core_clock; | ||
262 | 254 | ||
263 | return 0; | 255 | return 0; |
264 | } | 256 | } |
@@ -289,6 +281,7 @@ int ipipe_trigger_irq(unsigned irq) | |||
289 | asmlinkage void __ipipe_sync_root(void) | 281 | asmlinkage void __ipipe_sync_root(void) |
290 | { | 282 | { |
291 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; | 283 | void (*irq_tail_hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; |
284 | struct ipipe_percpu_domain_data *p; | ||
292 | unsigned long flags; | 285 | unsigned long flags; |
293 | 286 | ||
294 | BUG_ON(irqs_disabled()); | 287 | BUG_ON(irqs_disabled()); |
@@ -300,19 +293,20 @@ asmlinkage void __ipipe_sync_root(void) | |||
300 | 293 | ||
301 | clear_thread_flag(TIF_IRQ_SYNC); | 294 | clear_thread_flag(TIF_IRQ_SYNC); |
302 | 295 | ||
303 | if (ipipe_root_cpudom_var(irqpend_himask) != 0) | 296 | p = ipipe_root_cpudom_ptr(); |
304 | __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY); | 297 | if (__ipipe_ipending_p(p)) |
298 | __ipipe_sync_pipeline(); | ||
305 | 299 | ||
306 | hard_local_irq_restore(flags); | 300 | hard_local_irq_restore(flags); |
307 | } | 301 | } |
308 | 302 | ||
309 | void ___ipipe_sync_pipeline(unsigned long syncmask) | 303 | void ___ipipe_sync_pipeline(void) |
310 | { | 304 | { |
311 | if (__ipipe_root_domain_p && | 305 | if (__ipipe_root_domain_p && |
312 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) | 306 | test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status))) |
313 | return; | 307 | return; |
314 | 308 | ||
315 | __ipipe_sync_stage(syncmask); | 309 | __ipipe_sync_stage(); |
316 | } | 310 | } |
317 | 311 | ||
318 | void __ipipe_disable_root_irqs_hw(void) | 312 | void __ipipe_disable_root_irqs_hw(void) |