aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/irq.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/include/asm/irq.h')
-rw-r--r--arch/mips/include/asm/irq.h64
1 files changed, 33 insertions, 31 deletions
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index b003ed52ed1..0ec01294b06 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -55,9 +55,9 @@ static inline void smtc_im_ack_irq(unsigned int irq)
55#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 55#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
56#include <linux/cpumask.h> 56#include <linux/cpumask.h>
57 57
58extern int plat_set_irq_affinity(unsigned int irq, 58extern int plat_set_irq_affinity(struct irq_data *d,
59 const struct cpumask *affinity); 59 const struct cpumask *affinity, bool force);
60extern void smtc_forward_irq(unsigned int irq); 60extern void smtc_forward_irq(struct irq_data *d);
61 61
62/* 62/*
63 * IRQ affinity hook invoked at the beginning of interrupt dispatch 63 * IRQ affinity hook invoked at the beginning of interrupt dispatch
@@ -70,51 +70,53 @@ extern void smtc_forward_irq(unsigned int irq);
70 * cpumask implementations, this version is optimistically assuming 70 * cpumask implementations, this version is optimistically assuming
71 * that cpumask.h macro overhead is reasonable during interrupt dispatch. 71 * that cpumask.h macro overhead is reasonable during interrupt dispatch.
72 */ 72 */
73#define IRQ_AFFINITY_HOOK(irq) \ 73static inline int handle_on_other_cpu(unsigned int irq)
74do { \ 74{
75 if (!cpumask_test_cpu(smp_processor_id(), irq_desc[irq].affinity)) {\ 75 struct irq_data *d = irq_get_irq_data(irq);
76 smtc_forward_irq(irq); \ 76
77 irq_exit(); \ 77 if (cpumask_test_cpu(smp_processor_id(), d->affinity))
78 return; \ 78 return 0;
79 } \ 79 smtc_forward_irq(d);
80} while (0) 80 return 1;
81}
81 82
82#else /* Not doing SMTC affinity */ 83#else /* Not doing SMTC affinity */
83 84
84#define IRQ_AFFINITY_HOOK(irq) do { } while (0) 85static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
85 86
86#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 87#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
87 88
88#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 89#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
89 90
91static inline void smtc_im_backstop(unsigned int irq)
92{
93 if (irq_hwmask[irq] & 0x0000ff00)
94 write_c0_tccontext(read_c0_tccontext() &
95 ~(irq_hwmask[irq] & 0x0000ff00));
96}
97
90/* 98/*
91 * Clear interrupt mask handling "backstop" if irq_hwmask 99 * Clear interrupt mask handling "backstop" if irq_hwmask
92 * entry so indicates. This implies that the ack() or end() 100 * entry so indicates. This implies that the ack() or end()
93 * functions will take over re-enabling the low-level mask. 101 * functions will take over re-enabling the low-level mask.
94 * Otherwise it will be done on return from exception. 102 * Otherwise it will be done on return from exception.
95 */ 103 */
96#define __DO_IRQ_SMTC_HOOK(irq) \ 104static inline int smtc_handle_on_other_cpu(unsigned int irq)
97do { \ 105{
98 IRQ_AFFINITY_HOOK(irq); \ 106 int ret = handle_on_other_cpu(irq);
99 if (irq_hwmask[irq] & 0x0000ff00) \ 107
100 write_c0_tccontext(read_c0_tccontext() & \ 108 if (!ret)
101 ~(irq_hwmask[irq] & 0x0000ff00)); \ 109 smtc_im_backstop(irq);
102} while (0) 110 return ret;
103 111}
104#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) \
105do { \
106 if (irq_hwmask[irq] & 0x0000ff00) \
107 write_c0_tccontext(read_c0_tccontext() & \
108 ~(irq_hwmask[irq] & 0x0000ff00)); \
109} while (0)
110 112
111#else 113#else
112 114
113#define __DO_IRQ_SMTC_HOOK(irq) \ 115static inline void smtc_im_backstop(unsigned int irq) { }
114do { \ 116static inline int smtc_handle_on_other_cpu(unsigned int irq)
115 IRQ_AFFINITY_HOOK(irq); \ 117{
116} while (0) 118 return handle_on_other_cpu(irq);
117#define __NO_AFFINITY_IRQ_SMTC_HOOK(irq) do { } while (0) 119}
118 120
119#endif 121#endif
120 122