aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-03-23 17:09:04 -0400
committerRalf Baechle <ralf@linux-mips.org>2011-03-25 13:45:18 -0400
commit930cd54b3bd78e52991a89b39b5ef58355ad2b6d (patch)
tree48699521917d84863846f3e7638c4178dfa9fbf5 /arch/mips
parent2a2b2212986a4072d11e521a63672e3219173437 (diff)
MIPS: SMTC: Cleanup the hook mess and use irq_data
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> To: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/2194/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/include/asm/irq.h60
-rw-r--r--arch/mips/kernel/irq.c6
-rw-r--r--arch/mips/kernel/smtc.c12
3 files changed, 39 insertions, 39 deletions
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index bdc8b8c1832..0ec01294b06 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -57,7 +57,7 @@ static inline void smtc_im_ack_irq(unsigned int irq)
57 57
58extern int plat_set_irq_affinity(struct irq_data *d, 58extern int plat_set_irq_affinity(struct irq_data *d,
59 const struct cpumask *affinity, bool force); 59 const struct cpumask *affinity, bool force);
60extern void smtc_forward_irq(unsigned int irq); 60extern void smtc_forward_irq(struct irq_data *d);
61 61
62/* 62/*
63 * IRQ affinity hook invoked at the beginning of interrupt dispatch 63 * IRQ affinity hook invoked at the beginning of interrupt dispatch
@@ -70,51 +70,53 @@ extern void smtc_forward_irq(unsigned int irq);
70 * cpumask implementations, this version is optimistically assuming 70 * cpumask implementations, this version is optimistically assuming
71 * that cpumask.h macro overhead is reasonable during interrupt dispatch. 71 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
72 */ 72 */
73#define IRQ_AFFINITY_HOOK(irq) \ 73static inline int handle_on_other_cpu(unsigned int irq)
74do { \ 74{
75 if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ 75 struct irq_data *d = irq_get_irq_data(irq);
76 smtc_forward_irq(irq); \ 76
77 irq_exit(); \ 77 if (cpumask_test_cpu(smp_processor_id(), d->affinity))
78 return; \ 78 return 0;
79 } \ 79 smtc_forward_irq(d);
80} while (0) 80 return 1;
81}
81 82
82#else /* Not doing SMTC affinity */ 83#else /* Not doing SMTC affinity */
83 84
84#define IRQ_AFFINITY_HOOK(irq) do { } while (0) 85static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
85 86
86#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 87#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
87 88
88#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 89#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
89 90
91static inline void smtc_im_backstop(unsigned int irq)
92{
93 if (irq_hwmask[irq] & 0x0000ff00)
94 write_c0_tccontext(read_c0_tccontext() &
95 ~(irq_hwmask[irq] & 0x0000ff00));
96}
97
90/* 98/*
91 * Clear interrupt mask handling "backstop" if irq_hwmask 99 * Clear interrupt mask handling "backstop" if irq_hwmask
92 * entry so indicates. This implies that the ack() or end() 100 * entry so indicates. This implies that the ack() or end()
93 * functions will take over re-enabling the low-level mask. 101 * functions will take over re-enabling the low-level mask.
94 * Otherwise it will be done on return from exception. 102 * Otherwise it will be done on return from exception.
95 */ 103 */
96#define __DO_IRQ_SMTC_HOOK(irq) \ 104static inline int smtc_handle_on_other_cpu(unsigned int irq)
97do { \ 105{
98 IRQ_AFFINITY_HOOK(irq); \ 106 int ret = handle_on_other_cpu(irq);
99 if (irq_hwmask[irq] & 0x0000ff00) \ 107
100 write_c0_tccontext(read_c0_tccontext() & \ 108 if (!ret)
101 ~(irq_hwmask[irq] & 0x0000ff00)); \ 109 smtc_im_backstop(irq);
102} while (0) 110 return ret;
103 111}
104#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
105do { \
106 if (irq_hwmask[irq] & 0x0000ff00) \
107 write_c0_tccontext(read_c0_tccontext() & \
108 ~(irq_hwmask[irq] & 0x0000ff00)); \
109} while (0)
110 112
111#else 113#else
112 114
113#define __DO_IRQ_SMTC_HOOK(irq) \ 115static inline void smtc_im_backstop(unsigned int irq) { }
114do { \ 116static inline int smtc_handle_on_other_cpu(unsigned int irq)
115 IRQ_AFFINITY_HOOK(irq); \ 117{
116} while (0) 118 return handle_on_other_cpu(irq);
117#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 119}
118 120
119#endif 121#endif
120 122
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 4f93db58a79..779b78799ad 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -183,8 +183,8 @@ void __irq_entry do_IRQ(unsigned int irq)
183{ 183{
184 irq_enter(); 184 irq_enter();
185 check_stack_overflow(); 185 check_stack_overflow();
186 __DO_IRQ_SMTC_HOOK(irq); 186 if (!smtc_handle_on_other_cpu(irq))
187 generic_handle_irq(irq); 187 generic_handle_irq(irq);
188 irq_exit(); 188 irq_exit();
189} 189}
190 190
@@ -197,7 +197,7 @@ void __irq_entry do_IRQ(unsigned int irq)
197void __irq_entry do_IRQ_no_affinity(unsigned int irq) 197void __irq_entry do_IRQ_no_affinity(unsigned int irq)
198{ 198{
199 irq_enter(); 199 irq_enter();
200 __NO_AFFINITY_IRQ_SMTC_HOOK(irq); 200 smtc_im_backstop(irq);
201 generic_handle_irq(irq); 201 generic_handle_irq(irq);
202 irq_exit(); 202 irq_exit();
203} 203}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 14c64235a24..f7e2c7807d7 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -677,9 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
677 */ 677 */
678} 678}
679 679
680void smtc_forward_irq(unsigned int irq) 680void smtc_forward_irq(struct irq_data *d)
681{ 681{
682 struct irq_data *d = irq_get_irq_data(irq); 682 unsigned int irq = d->irq;
683 int target; 683 int target;
684 684
685 /* 685 /*
@@ -708,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
708 */ 708 */
709 709
710 /* If no one is eligible, service locally */ 710 /* If no one is eligible, service locally */
711 if (target >= NR_CPUS) { 711 if (target >= NR_CPUS)
712 do_IRQ_no_affinity(irq); 712 do_IRQ_no_affinity(irq);
713 return; 713 else
714 } 714 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
715
716 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
717} 715}
718 716
719#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 717#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */