aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/kernel')
-rw-r--r--arch/blackfin/kernel/ipipe.c176
-rw-r--r--arch/blackfin/kernel/irqchip.c14
-rw-r--r--arch/blackfin/kernel/time.c5
3 files changed, 59 insertions, 136 deletions
diff --git a/arch/blackfin/kernel/ipipe.c b/arch/blackfin/kernel/ipipe.c
index 339be5a3ae6a..a5de8d45424c 100644
--- a/arch/blackfin/kernel/ipipe.c
+++ b/arch/blackfin/kernel/ipipe.c
@@ -35,14 +35,8 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/io.h> 36#include <asm/io.h>
37 37
38static int create_irq_threads;
39
40DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs); 38DEFINE_PER_CPU(struct pt_regs, __ipipe_tick_regs);
41 39
42static DEFINE_PER_CPU(unsigned long, pending_irqthread_mask);
43
44static DEFINE_PER_CPU(int [IVG13 + 1], pending_irq_count);
45
46asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs); 40asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs);
47 41
48static void __ipipe_no_irqtail(void); 42static void __ipipe_no_irqtail(void);
@@ -93,6 +87,7 @@ void __ipipe_enable_pipeline(void)
93 */ 87 */
94void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs) 88void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
95{ 89{
90 struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
96 struct ipipe_domain *this_domain, *next_domain; 91 struct ipipe_domain *this_domain, *next_domain;
97 struct list_head *head, *pos; 92 struct list_head *head, *pos;
98 int m_ack, s = -1; 93 int m_ack, s = -1;
@@ -104,7 +99,6 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
104 * interrupt. 99 * interrupt.
105 */ 100 */
106 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR); 101 m_ack = (regs == NULL || irq == IRQ_SYSTMR || irq == IRQ_CORETMR);
107
108 this_domain = ipipe_current_domain; 102 this_domain = ipipe_current_domain;
109 103
110 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control))) 104 if (unlikely(test_bit(IPIPE_STICKY_FLAG, &this_domain->irqs[irq].control)))
@@ -114,49 +108,28 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
114 next_domain = list_entry(head, struct ipipe_domain, p_link); 108 next_domain = list_entry(head, struct ipipe_domain, p_link);
115 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) { 109 if (likely(test_bit(IPIPE_WIRED_FLAG, &next_domain->irqs[irq].control))) {
116 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) 110 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL)
117 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); 111 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
118 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) 112 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
119 s = __test_and_set_bit(IPIPE_STALL_FLAG, 113 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
120 &ipipe_root_cpudom_var(status));
121 __ipipe_dispatch_wired(next_domain, irq); 114 __ipipe_dispatch_wired(next_domain, irq);
122 goto finalize; 115 goto out;
123 return;
124 } 116 }
125 } 117 }
126 118
127 /* Ack the interrupt. */ 119 /* Ack the interrupt. */
128 120
129 pos = head; 121 pos = head;
130
131 while (pos != &__ipipe_pipeline) { 122 while (pos != &__ipipe_pipeline) {
132 next_domain = list_entry(pos, struct ipipe_domain, p_link); 123 next_domain = list_entry(pos, struct ipipe_domain, p_link);
133 /*
134 * For each domain handling the incoming IRQ, mark it
135 * as pending in its log.
136 */
137 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) { 124 if (test_bit(IPIPE_HANDLE_FLAG, &next_domain->irqs[irq].control)) {
138 /*
139 * Domains that handle this IRQ are polled for
140 * acknowledging it by decreasing priority
141 * order. The interrupt must be made pending
142 * _first_ in the domain's status flags before
143 * the PIC is unlocked.
144 */
145 __ipipe_set_irq_pending(next_domain, irq); 125 __ipipe_set_irq_pending(next_domain, irq);
146
147 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) { 126 if (!m_ack && next_domain->irqs[irq].acknowledge != NULL) {
148 next_domain->irqs[irq].acknowledge(irq, irq_desc + irq); 127 next_domain->irqs[irq].acknowledge(irq, irq_to_desc(irq));
149 m_ack = 1; 128 m_ack = 1;
150 } 129 }
151 } 130 }
152
153 /*
154 * If the domain does not want the IRQ to be passed
155 * down the interrupt pipe, exit the loop now.
156 */
157 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control)) 131 if (!test_bit(IPIPE_PASS_FLAG, &next_domain->irqs[irq].control))
158 break; 132 break;
159
160 pos = next_domain->p_link.next; 133 pos = next_domain->p_link.next;
161 } 134 }
162 135
@@ -166,18 +139,24 @@ void __ipipe_handle_irq(unsigned irq, struct pt_regs *regs)
166 * immediately to the current domain if the interrupt has been 139 * immediately to the current domain if the interrupt has been
167 * marked as 'sticky'. This search does not go beyond the 140 * marked as 'sticky'. This search does not go beyond the
168 * current domain in the pipeline. We also enforce the 141 * current domain in the pipeline. We also enforce the
169 * additional root stage lock (blackfin-specific). */ 142 * additional root stage lock (blackfin-specific).
143 */
144 if (test_bit(IPIPE_SYNCDEFER_FLAG, &p->status))
145 s = __test_and_set_bit(IPIPE_STALL_FLAG, &p->status);
170 146
171 if (test_bit(IPIPE_ROOTLOCK_FLAG, &ipipe_root_domain->flags)) 147 /*
172 s = __test_and_set_bit(IPIPE_STALL_FLAG, 148 * If the interrupt preempted the head domain, then do not
173 &ipipe_root_cpudom_var(status)); 149 * even try to walk the pipeline, unless an interrupt is
174finalize: 150 * pending for it.
151 */
152 if (test_bit(IPIPE_AHEAD_FLAG, &this_domain->flags) &&
153 ipipe_head_cpudom_var(irqpend_himask) == 0)
154 goto out;
175 155
176 __ipipe_walk_pipeline(head); 156 __ipipe_walk_pipeline(head);
177 157out:
178 if (!s) 158 if (!s)
179 __clear_bit(IPIPE_STALL_FLAG, 159 __clear_bit(IPIPE_STALL_FLAG, &p->status);
180 &ipipe_root_cpudom_var(status));
181} 160}
182 161
183int __ipipe_check_root(void) 162int __ipipe_check_root(void)
@@ -187,7 +166,7 @@ int __ipipe_check_root(void)
187 166
188void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 167void __ipipe_enable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
189{ 168{
190 struct irq_desc *desc = irq_desc + irq; 169 struct irq_desc *desc = irq_to_desc(irq);
191 int prio = desc->ic_prio; 170 int prio = desc->ic_prio;
192 171
193 desc->depth = 0; 172 desc->depth = 0;
@@ -199,7 +178,7 @@ EXPORT_SYMBOL(__ipipe_enable_irqdesc);
199 178
200void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq) 179void __ipipe_disable_irqdesc(struct ipipe_domain *ipd, unsigned irq)
201{ 180{
202 struct irq_desc *desc = irq_desc + irq; 181 struct irq_desc *desc = irq_to_desc(irq);
203 int prio = desc->ic_prio; 182 int prio = desc->ic_prio;
204 183
205 if (ipd != &ipipe_root && 184 if (ipd != &ipipe_root &&
@@ -236,15 +215,18 @@ int __ipipe_syscall_root(struct pt_regs *regs)
236{ 215{
237 unsigned long flags; 216 unsigned long flags;
238 217
239 /* We need to run the IRQ tail hook whenever we don't 218 /*
219 * We need to run the IRQ tail hook whenever we don't
240 * propagate a syscall to higher domains, because we know that 220 * propagate a syscall to higher domains, because we know that
241 * important operations might be pending there (e.g. Xenomai 221 * important operations might be pending there (e.g. Xenomai
242 * deferred rescheduling). */ 222 * deferred rescheduling).
223 */
243 224
244 if (!__ipipe_syscall_watched_p(current, regs->orig_p0)) { 225 if (regs->orig_p0 < NR_syscalls) {
245 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook; 226 void (*hook)(void) = (void (*)(void))__ipipe_irq_tail_hook;
246 hook(); 227 hook();
247 return 0; 228 if ((current->flags & PF_EVNOTIFY) == 0)
229 return 0;
248 } 230 }
249 231
250 /* 232 /*
@@ -312,112 +294,46 @@ int ipipe_trigger_irq(unsigned irq)
312{ 294{
313 unsigned long flags; 295 unsigned long flags;
314 296
297#ifdef CONFIG_IPIPE_DEBUG
315 if (irq >= IPIPE_NR_IRQS || 298 if (irq >= IPIPE_NR_IRQS ||
316 (ipipe_virtual_irq_p(irq) 299 (ipipe_virtual_irq_p(irq)
317 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map))) 300 && !test_bit(irq - IPIPE_VIRQ_BASE, &__ipipe_virtual_irq_map)))
318 return -EINVAL; 301 return -EINVAL;
302#endif
319 303
320 local_irq_save_hw(flags); 304 local_irq_save_hw(flags);
321
322 __ipipe_handle_irq(irq, NULL); 305 __ipipe_handle_irq(irq, NULL);
323
324 local_irq_restore_hw(flags); 306 local_irq_restore_hw(flags);
325 307
326 return 1; 308 return 1;
327} 309}
328 310
329/* Move Linux IRQ to threads. */ 311asmlinkage void __ipipe_sync_root(void)
330
331static int do_irqd(void *__desc)
332{ 312{
333 struct irq_desc *desc = __desc; 313 unsigned long flags;
334 unsigned irq = desc - irq_desc;
335 int thrprio = desc->thr_prio;
336 int thrmask = 1 << thrprio;
337 int cpu = smp_processor_id();
338 cpumask_t cpumask;
339
340 sigfillset(&current->blocked);
341 current->flags |= PF_NOFREEZE;
342 cpumask = cpumask_of_cpu(cpu);
343 set_cpus_allowed(current, cpumask);
344 ipipe_setscheduler_root(current, SCHED_FIFO, 50 + thrprio);
345
346 while (!kthread_should_stop()) {
347 local_irq_disable();
348 if (!(desc->status & IRQ_SCHEDULED)) {
349 set_current_state(TASK_INTERRUPTIBLE);
350resched:
351 local_irq_enable();
352 schedule();
353 local_irq_disable();
354 }
355 __set_current_state(TASK_RUNNING);
356 /*
357 * If higher priority interrupt servers are ready to
358 * run, reschedule immediately. We need this for the
359 * GPIO demux IRQ handler to unmask the interrupt line
360 * _last_, after all GPIO IRQs have run.
361 */
362 if (per_cpu(pending_irqthread_mask, cpu) & ~(thrmask|(thrmask-1)))
363 goto resched;
364 if (--per_cpu(pending_irq_count[thrprio], cpu) == 0)
365 per_cpu(pending_irqthread_mask, cpu) &= ~thrmask;
366 desc->status &= ~IRQ_SCHEDULED;
367 desc->thr_handler(irq, &__raw_get_cpu_var(__ipipe_tick_regs));
368 local_irq_enable();
369 }
370 __set_current_state(TASK_RUNNING);
371 return 0;
372}
373 314
374static void kick_irqd(unsigned irq, void *cookie) 315 BUG_ON(irqs_disabled());
375{
376 struct irq_desc *desc = irq_desc + irq;
377 int thrprio = desc->thr_prio;
378 int thrmask = 1 << thrprio;
379 int cpu = smp_processor_id();
380
381 if (!(desc->status & IRQ_SCHEDULED)) {
382 desc->status |= IRQ_SCHEDULED;
383 per_cpu(pending_irqthread_mask, cpu) |= thrmask;
384 ++per_cpu(pending_irq_count[thrprio], cpu);
385 wake_up_process(desc->thread);
386 }
387}
388 316
389int ipipe_start_irq_thread(unsigned irq, struct irq_desc *desc) 317 local_irq_save_hw(flags);
390{
391 if (desc->thread || !create_irq_threads)
392 return 0;
393
394 desc->thread = kthread_create(do_irqd, desc, "IRQ %d", irq);
395 if (desc->thread == NULL) {
396 printk(KERN_ERR "irqd: could not create IRQ thread %d!\n", irq);
397 return -ENOMEM;
398 }
399 318
400 wake_up_process(desc->thread); 319 clear_thread_flag(TIF_IRQ_SYNC);
401 320
402 desc->thr_handler = ipipe_root_domain->irqs[irq].handler; 321 if (ipipe_root_cpudom_var(irqpend_himask) != 0)
403 ipipe_root_domain->irqs[irq].handler = &kick_irqd; 322 __ipipe_sync_pipeline(IPIPE_IRQMASK_ANY);
404 323
405 return 0; 324 local_irq_restore_hw(flags);
406} 325}
407 326
408void __init ipipe_init_irq_threads(void) 327void ___ipipe_sync_pipeline(unsigned long syncmask)
409{ 328{
410 unsigned irq; 329 struct ipipe_domain *ipd = ipipe_current_domain;
411 struct irq_desc *desc;
412
413 create_irq_threads = 1;
414 330
415 for (irq = 0; irq < NR_IRQS; irq++) { 331 if (ipd == ipipe_root_domain) {
416 desc = irq_desc + irq; 332 if (test_bit(IPIPE_SYNCDEFER_FLAG, &ipipe_root_cpudom_var(status)))
417 if (desc->action != NULL || 333 return;
418 (desc->status & IRQ_NOREQUEST) != 0)
419 ipipe_start_irq_thread(irq, desc);
420 } 334 }
335
336 __ipipe_sync_stage(syncmask);
421} 337}
422 338
423EXPORT_SYMBOL(show_stack); 339EXPORT_SYMBOL(show_stack);
diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c
index 75724eee6494..7fd126564846 100644
--- a/arch/blackfin/kernel/irqchip.c
+++ b/arch/blackfin/kernel/irqchip.c
@@ -144,11 +144,15 @@ asmlinkage void asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
144#endif 144#endif
145 generic_handle_irq(irq); 145 generic_handle_irq(irq);
146 146
147#ifndef CONFIG_IPIPE /* Useless and bugous over the I-pipe: IRQs are threaded. */ 147#ifndef CONFIG_IPIPE
148 /* If we're the only interrupt running (ignoring IRQ15 which is for 148 /*
149 syscalls), lower our priority to IRQ14 so that softirqs run at 149 * If we're the only interrupt running (ignoring IRQ15 which
150 that level. If there's another, lower-level interrupt, irq_exit 150 * is for syscalls), lower our priority to IRQ14 so that
151 will defer softirqs to that. */ 151 * softirqs run at that level. If there's another,
152 * lower-level interrupt, irq_exit will defer softirqs to
153 * that. If the interrupt pipeline is enabled, we are already
154 * running at IRQ14 priority, so we don't need this code.
155 */
152 CSYNC(); 156 CSYNC();
153 pending = bfin_read_IPEND() & ~0x8000; 157 pending = bfin_read_IPEND() & ~0x8000;
154 other_ints = pending & (pending - 1); 158 other_ints = pending & (pending - 1);
diff --git a/arch/blackfin/kernel/time.c b/arch/blackfin/kernel/time.c
index 172b4c588467..1bbacfbd4c5d 100644
--- a/arch/blackfin/kernel/time.c
+++ b/arch/blackfin/kernel/time.c
@@ -134,7 +134,10 @@ irqreturn_t timer_interrupt(int irq, void *dummy)
134 134
135 write_seqlock(&xtime_lock); 135 write_seqlock(&xtime_lock);
136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE) 136#if defined(CONFIG_TICK_SOURCE_SYSTMR0) && !defined(CONFIG_IPIPE)
137/* FIXME: Here TIMIL0 is not set when IPIPE enabled, why? */ 137 /*
138 * TIMIL0 is latched in __ipipe_grab_irq() when the I-Pipe is
139 * enabled.
140 */
138 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) { 141 if (get_gptimer_status(0) & TIMER_STATUS_TIMIL0) {
139#endif 142#endif
140 do_timer(1); 143 do_timer(1);