aboutsummaryrefslogtreecommitdiffstats
path: root/arch/blackfin/mach-common/ints-priority.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/blackfin/mach-common/ints-priority.c')
-rw-r--r--arch/blackfin/mach-common/ints-priority.c235
1 files changed, 212 insertions, 23 deletions
diff --git a/arch/blackfin/mach-common/ints-priority.c b/arch/blackfin/mach-common/ints-priority.c
index 7c1db775751b..1bba6030dce9 100644
--- a/arch/blackfin/mach-common/ints-priority.c
+++ b/arch/blackfin/mach-common/ints-priority.c
@@ -34,6 +34,9 @@
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/seq_file.h> 35#include <linux/seq_file.h>
36#include <linux/irq.h> 36#include <linux/irq.h>
37#ifdef CONFIG_IPIPE
38#include <linux/ipipe.h>
39#endif
37#ifdef CONFIG_KGDB 40#ifdef CONFIG_KGDB
38#include <linux/kgdb.h> 41#include <linux/kgdb.h>
39#endif 42#endif
@@ -135,8 +138,8 @@ static void bfin_ack_noop(unsigned int irq)
135static void bfin_core_mask_irq(unsigned int irq) 138static void bfin_core_mask_irq(unsigned int irq)
136{ 139{
137 bfin_irq_flags &= ~(1 << irq); 140 bfin_irq_flags &= ~(1 << irq);
138 if (!irqs_disabled()) 141 if (!irqs_disabled_hw())
139 local_irq_enable(); 142 local_irq_enable_hw();
140} 143}
141 144
142static void bfin_core_unmask_irq(unsigned int irq) 145static void bfin_core_unmask_irq(unsigned int irq)
@@ -151,8 +154,8 @@ static void bfin_core_unmask_irq(unsigned int irq)
151 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly 154 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
152 * what we need. 155 * what we need.
153 */ 156 */
154 if (!irqs_disabled()) 157 if (!irqs_disabled_hw())
155 local_irq_enable(); 158 local_irq_enable_hw();
156 return; 159 return;
157} 160}
158 161
@@ -235,7 +238,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
235 break; 238 break;
236 } 239 }
237 240
238 local_irq_save(flags); 241 local_irq_save_hw(flags);
239 242
240 if (state) { 243 if (state) {
241 bfin_sic_iwr[bank] |= (1 << bit); 244 bfin_sic_iwr[bank] |= (1 << bit);
@@ -246,7 +249,7 @@ int bfin_internal_set_wake(unsigned int irq, unsigned int state)
246 vr_wakeup &= ~wakeup; 249 vr_wakeup &= ~wakeup;
247 } 250 }
248 251
249 local_irq_restore(flags); 252 local_irq_restore_hw(flags);
250 253
251 return 0; 254 return 0;
252} 255}
@@ -272,6 +275,19 @@ static struct irq_chip bfin_internal_irqchip = {
272#endif 275#endif
273}; 276};
274 277
278static void bfin_handle_irq(unsigned irq)
279{
280#ifdef CONFIG_IPIPE
281 struct pt_regs regs; /* Contents not used. */
282 ipipe_trace_irq_entry(irq);
283 __ipipe_handle_irq(irq, &regs);
284 ipipe_trace_irq_exit(irq);
285#else /* !CONFIG_IPIPE */
286 struct irq_desc *desc = irq_desc + irq;
287 desc->handle_irq(irq, desc);
288#endif /* !CONFIG_IPIPE */
289}
290
275#ifdef BF537_GENERIC_ERROR_INT_DEMUX 291#ifdef BF537_GENERIC_ERROR_INT_DEMUX
276static int error_int_mask; 292static int error_int_mask;
277 293
@@ -325,10 +341,9 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
325 irq = IRQ_UART1_ERROR; 341 irq = IRQ_UART1_ERROR;
326 342
327 if (irq) { 343 if (irq) {
328 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR))) { 344 if (error_int_mask & (1L << (irq - IRQ_PPI_ERROR)))
329 struct irq_desc *desc = irq_desc + irq; 345 bfin_handle_irq(irq);
330 desc->handle_irq(irq, desc); 346 else {
331 } else {
332 347
333 switch (irq) { 348 switch (irq) {
334 case IRQ_PPI_ERROR: 349 case IRQ_PPI_ERROR:
@@ -374,10 +389,14 @@ static void bfin_demux_error_irq(unsigned int int_err_irq,
374 389
375static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle) 390static inline void bfin_set_irq_handler(unsigned irq, irq_flow_handler_t handle)
376{ 391{
392#ifdef CONFIG_IPIPE
393 _set_irq_handler(irq, handle_edge_irq);
394#else
377 struct irq_desc *desc = irq_desc + irq; 395 struct irq_desc *desc = irq_desc + irq;
378 /* May not call generic set_irq_handler() due to spinlock 396 /* May not call generic set_irq_handler() due to spinlock
379 recursion. */ 397 recursion. */
380 desc->handle_irq = handle; 398 desc->handle_irq = handle;
399#endif
381} 400}
382 401
383static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS); 402static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
@@ -563,10 +582,8 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
563 mask = get_gpiop_data(i) & get_gpiop_maska(i); 582 mask = get_gpiop_data(i) & get_gpiop_maska(i);
564 583
565 while (mask) { 584 while (mask) {
566 if (mask & 1) { 585 if (mask & 1)
567 desc = irq_desc + irq; 586 bfin_handle_irq(irq);
568 desc->handle_irq(irq, desc);
569 }
570 irq++; 587 irq++;
571 mask >>= 1; 588 mask >>= 1;
572 } 589 }
@@ -576,10 +593,8 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
576 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio); 593 mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
577 594
578 do { 595 do {
579 if (mask & 1) { 596 if (mask & 1)
580 desc = irq_desc + irq; 597 bfin_handle_irq(irq);
581 desc->handle_irq(irq, desc);
582 }
583 irq++; 598 irq++;
584 mask >>= 1; 599 mask >>= 1;
585 } while (mask); 600 } while (mask);
@@ -900,8 +915,7 @@ static void bfin_demux_gpio_irq(unsigned int inta_irq,
900 while (request) { 915 while (request) {
901 if (request & 1) { 916 if (request & 1) {
902 irq = pint2irq_lut[pint_val] + SYS_IRQS; 917 irq = pint2irq_lut[pint_val] + SYS_IRQS;
903 desc = irq_desc + irq; 918 bfin_handle_irq(irq);
904 desc->handle_irq(irq, desc);
905 } 919 }
906 pint_val++; 920 pint_val++;
907 request >>= 1; 921 request >>= 1;
@@ -1025,11 +1039,10 @@ int __init init_arch_irq(void)
1025 break; 1039 break;
1026#ifdef BF537_GENERIC_ERROR_INT_DEMUX 1040#ifdef BF537_GENERIC_ERROR_INT_DEMUX
1027 case IRQ_GENERIC_ERROR: 1041 case IRQ_GENERIC_ERROR:
1028 set_irq_handler(irq, bfin_demux_error_irq); 1042 set_irq_chained_handler(irq, bfin_demux_error_irq);
1029
1030 break; 1043 break;
1031#endif 1044#endif
1032#ifdef CONFIG_TICK_SOURCE_SYSTMR0 1045#if defined(CONFIG_TICK_SOURCE_SYSTMR0) || defined(CONFIG_IPIPE)
1033 case IRQ_TIMER0: 1046 case IRQ_TIMER0:
1034 set_irq_handler(irq, handle_percpu_irq); 1047 set_irq_handler(irq, handle_percpu_irq);
1035 break; 1048 break;
@@ -1041,7 +1054,17 @@ int __init init_arch_irq(void)
1041 break; 1054 break;
1042#endif 1055#endif
1043 default: 1056 default:
1057#ifdef CONFIG_IPIPE
1058 /*
1059 * We want internal interrupt sources to be masked, because
1060 * ISRs may trigger interrupts recursively (e.g. DMA), but
1061 * interrupts are _not_ masked at CPU level. So let's handle
1062 * them as level interrupts.
1063 */
1064 set_irq_handler(irq, handle_level_irq);
1065#else /* !CONFIG_IPIPE */
1044 set_irq_handler(irq, handle_simple_irq); 1066 set_irq_handler(irq, handle_simple_irq);
1067#endif /* !CONFIG_IPIPE */
1045 break; 1068 break;
1046 } 1069 }
1047 } 1070 }
@@ -1101,6 +1124,14 @@ int __init init_arch_irq(void)
1101 bfin_write_SIC_IWR(IWR_DISABLE_ALL); 1124 bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1102#endif 1125#endif
1103 1126
1127#ifdef CONFIG_IPIPE
1128 for (irq = 0; irq < NR_IRQS; irq++) {
1129 struct irq_desc *desc = irq_desc + irq;
1130 desc->ic_prio = __ipipe_get_irq_priority(irq);
1131 desc->thr_prio = __ipipe_get_irqthread_priority(irq);
1132 }
1133#endif /* CONFIG_IPIPE */
1134
1104 return 0; 1135 return 0;
1105} 1136}
1106 1137
@@ -1156,3 +1187,161 @@ void do_irq(int vec, struct pt_regs *fp)
1156 } 1187 }
1157 asm_do_IRQ(vec, fp); 1188 asm_do_IRQ(vec, fp);
1158} 1189}
1190
1191#ifdef CONFIG_IPIPE
1192
1193int __ipipe_get_irq_priority(unsigned irq)
1194{
1195 int ient, prio;
1196
1197 if (irq <= IRQ_CORETMR)
1198 return irq;
1199
1200 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1201 struct ivgx *ivg = ivg_table + ient;
1202 if (ivg->irqno == irq) {
1203 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1204 if (ivg7_13[prio].ifirst <= ivg &&
1205 ivg7_13[prio].istop > ivg)
1206 return IVG7 + prio;
1207 }
1208 }
1209 }
1210
1211 return IVG15;
1212}
1213
1214int __ipipe_get_irqthread_priority(unsigned irq)
1215{
1216 int ient, prio;
1217 int demux_irq;
1218
1219 /* The returned priority value is rescaled to [0..IVG13+1]
1220 * with 0 being the lowest effective priority level. */
1221
1222 if (irq <= IRQ_CORETMR)
1223 return IVG13 - irq + 1;
1224
1225 /* GPIO IRQs are given the priority of the demux
1226 * interrupt. */
1227 if (IS_GPIOIRQ(irq)) {
1228#if defined(CONFIG_BF54x)
1229 u32 bank = PINT_2_BANK(irq2pint_lut[irq - SYS_IRQS]);
1230 demux_irq = (bank == 0 ? IRQ_PINT0 :
1231 bank == 1 ? IRQ_PINT1 :
1232 bank == 2 ? IRQ_PINT2 :
1233 IRQ_PINT3);
1234#elif defined(CONFIG_BF561)
1235 demux_irq = (irq >= IRQ_PF32 ? IRQ_PROG2_INTA :
1236 irq >= IRQ_PF16 ? IRQ_PROG1_INTA :
1237 IRQ_PROG0_INTA);
1238#elif defined(CONFIG_BF52x)
1239 demux_irq = (irq >= IRQ_PH0 ? IRQ_PORTH_INTA :
1240 irq >= IRQ_PG0 ? IRQ_PORTG_INTA :
1241 IRQ_PORTF_INTA);
1242#else
1243 demux_irq = irq;
1244#endif
1245 return IVG13 - PRIO_GPIODEMUX(demux_irq) + 1;
1246 }
1247
1248 /* The GPIO demux interrupt is given a lower priority
1249 * than the GPIO IRQs, so that its threaded handler
1250 * unmasks the interrupt line after the decoded IRQs
1251 * have been processed. */
1252 prio = PRIO_GPIODEMUX(irq);
1253 /* demux irq? */
1254 if (prio != -1)
1255 return IVG13 - prio;
1256
1257 for (ient = 0; ient < NR_PERI_INTS; ient++) {
1258 struct ivgx *ivg = ivg_table + ient;
1259 if (ivg->irqno == irq) {
1260 for (prio = 0; prio <= IVG13-IVG7; prio++) {
1261 if (ivg7_13[prio].ifirst <= ivg &&
1262 ivg7_13[prio].istop > ivg)
1263 return IVG7 - prio;
1264 }
1265 }
1266 }
1267
1268 return 0;
1269}
1270
1271/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1272#ifdef CONFIG_DO_IRQ_L1
1273__attribute__((l1_text))
1274#endif
1275asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1276{
1277 struct ivgx *ivg_stop = ivg7_13[vec-IVG7].istop;
1278 struct ivgx *ivg = ivg7_13[vec-IVG7].ifirst;
1279 int irq;
1280
1281 if (likely(vec == EVT_IVTMR_P)) {
1282 irq = IRQ_CORETMR;
1283 goto handle_irq;
1284 }
1285
1286 SSYNC();
1287
1288#if defined(CONFIG_BF54x) || defined(CONFIG_BF52x) || defined(CONFIG_BF561)
1289 {
1290 unsigned long sic_status[3];
1291
1292 sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1293 sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1294#ifdef CONFIG_BF54x
1295 sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1296#endif
1297 for (;; ivg++) {
1298 if (ivg >= ivg_stop) {
1299 atomic_inc(&num_spurious);
1300 return 0;
1301 }
1302 if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1303 break;
1304 }
1305 }
1306#else
1307 {
1308 unsigned long sic_status;
1309
1310 sic_status = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1311
1312 for (;; ivg++) {
1313 if (ivg >= ivg_stop) {
1314 atomic_inc(&num_spurious);
1315 return 0;
1316 } else if (sic_status & ivg->isrflag)
1317 break;
1318 }
1319 }
1320#endif
1321
1322 irq = ivg->irqno;
1323
1324 if (irq == IRQ_SYSTMR) {
1325 bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1326 /* This is basically what we need from the register frame. */
1327 __raw_get_cpu_var(__ipipe_tick_regs).ipend = regs->ipend;
1328 __raw_get_cpu_var(__ipipe_tick_regs).pc = regs->pc;
1329 if (!ipipe_root_domain_p)
1330 __raw_get_cpu_var(__ipipe_tick_regs).ipend |= 0x10;
1331 else
1332 __raw_get_cpu_var(__ipipe_tick_regs).ipend &= ~0x10;
1333 }
1334
1335handle_irq:
1336
1337 ipipe_trace_irq_entry(irq);
1338 __ipipe_handle_irq(irq, regs);
1339 ipipe_trace_irq_exit(irq);
1340
1341 if (ipipe_root_domain_p)
1342 return !test_bit(IPIPE_STALL_FLAG, &ipipe_root_cpudom_var(status));
1343
1344 return 0;
1345}
1346
1347#endif /* CONFIG_IPIPE */