diff options
author | Ashok Raj <ashok.raj@intel.com> | 2005-09-06 18:16:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2005-09-07 19:57:15 -0400 |
commit | 54d5d42404e7705cf3804593189e963350d470e5 (patch) | |
tree | 7cf8a7fce163b19672193d8cf4ef6a7f6c131d9e /include/linux/irq.h | |
parent | f63ed39c578a2a2d067356a85ce7c28a7c795d8a (diff) |
[PATCH] x86/x86_64: deferred handling of writes to /proc/irqxx/smp_affinity
When handling writes to /proc/irq, current code is re-programming rte
entries directly. This is not recommended and could potentially cause
chipset's to lockup, or cause missing interrupts.
CONFIG_IRQ_BALANCE does this correctly, where it re-programs only when the
interrupt is pending. The same needs to be done for /proc/irq handling as well.
Otherwise user space irq balancers are really not doing the right thing.
- Changed pending_irq_balance_cpumask to pending_irq_migrate_cpumask for
lack of a generic name.
- added move_irq out of IRQ_BALANCE, and added this same to X86_64
- Added new proc handler for write, so we can do deferred write at irq
handling time.
- Display of /proc/irq/XX/smp_affinity used to display CPU_MASKALL, instead
it now shows only active cpu masks, or exactly what was set.
- Provided a common move_irq implementation, instead of duplicating
when using generic irq framework.
Tested on i386/x86_64 and ia64 with CONFIG_PCI_MSI turned on and off.
Tested UP builds as well.
MSI testing: tbd: I have cards, need to look for a x-over cable, although I
did test an earlier version of this patch. Will test in a couple days.
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Acked-by: Zwane Mwaikambo <zwane@holomorphy.com>
Grudgingly-acked-by: Andi Kleen <ak@muc.de>
Signed-off-by: Coywolf Qi Hunt <coywolf@lovecn.org>
Signed-off-by: Ashok Raj <ashok.raj@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/irq.h')
-rw-r--r-- | include/linux/irq.h | 123 |
1 files changed, 123 insertions, 0 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 069d3b84d311..4a362b9ec966 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -71,16 +71,139 @@ typedef struct irq_desc { | |||
71 | unsigned int irq_count; /* For detecting broken interrupts */ | 71 | unsigned int irq_count; /* For detecting broken interrupts */ |
72 | unsigned int irqs_unhandled; | 72 | unsigned int irqs_unhandled; |
73 | spinlock_t lock; | 73 | spinlock_t lock; |
74 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | ||
75 | unsigned int move_irq; /* Flag need to re-target intr dest*/ | ||
76 | #endif | ||
74 | } ____cacheline_aligned irq_desc_t; | 77 | } ____cacheline_aligned irq_desc_t; |
75 | 78 | ||
76 | extern irq_desc_t irq_desc [NR_IRQS]; | 79 | extern irq_desc_t irq_desc [NR_IRQS]; |
77 | 80 | ||
81 | /* Return a pointer to the irq descriptor for IRQ. */ | ||
82 | static inline irq_desc_t * | ||
83 | irq_descp (int irq) | ||
84 | { | ||
85 | return irq_desc + irq; | ||
86 | } | ||
87 | |||
78 | #include <asm/hw_irq.h> /* the arch dependent stuff */ | 88 | #include <asm/hw_irq.h> /* the arch dependent stuff */ |
79 | 89 | ||
80 | extern int setup_irq(unsigned int irq, struct irqaction * new); | 90 | extern int setup_irq(unsigned int irq, struct irqaction * new); |
81 | 91 | ||
82 | #ifdef CONFIG_GENERIC_HARDIRQS | 92 | #ifdef CONFIG_GENERIC_HARDIRQS |
83 | extern cpumask_t irq_affinity[NR_IRQS]; | 93 | extern cpumask_t irq_affinity[NR_IRQS]; |
94 | |||
95 | #ifdef CONFIG_SMP | ||
96 | static inline void set_native_irq_info(int irq, cpumask_t mask) | ||
97 | { | ||
98 | irq_affinity[irq] = mask; | ||
99 | } | ||
100 | #else | ||
101 | static inline void set_native_irq_info(int irq, cpumask_t mask) | ||
102 | { | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | #ifdef CONFIG_SMP | ||
107 | |||
108 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | ||
109 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; | ||
110 | |||
111 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | ||
112 | { | ||
113 | irq_desc_t *desc = irq_desc + irq; | ||
114 | unsigned long flags; | ||
115 | |||
116 | spin_lock_irqsave(&desc->lock, flags); | ||
117 | desc->move_irq = 1; | ||
118 | pending_irq_cpumask[irq] = mask; | ||
119 | spin_unlock_irqrestore(&desc->lock, flags); | ||
120 | } | ||
121 | |||
122 | static inline void | ||
123 | move_native_irq(int irq) | ||
124 | { | ||
125 | cpumask_t tmp; | ||
126 | irq_desc_t *desc = irq_descp(irq); | ||
127 | |||
128 | if (likely (!desc->move_irq)) | ||
129 | return; | ||
130 | |||
131 | desc->move_irq = 0; | ||
132 | |||
133 | if (likely(cpus_empty(pending_irq_cpumask[irq]))) | ||
134 | return; | ||
135 | |||
136 | if (!desc->handler->set_affinity) | ||
137 | return; | ||
138 | |||
139 | /* note - we hold the desc->lock */ | ||
140 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
141 | |||
142 | /* | ||
143 | * If there was a valid mask to work with, please | ||
144 | * do the disable, re-program, enable sequence. | ||
145 | * This is *not* particularly important for level triggered | ||
146 | * but in a edge trigger case, we might be setting rte | ||
147 | * when an active trigger is comming in. This could | ||
148 | * cause some ioapics to mal-function. | ||
149 | * Being paranoid i guess! | ||
150 | */ | ||
151 | if (unlikely(!cpus_empty(tmp))) { | ||
152 | desc->handler->disable(irq); | ||
153 | desc->handler->set_affinity(irq,tmp); | ||
154 | desc->handler->enable(irq); | ||
155 | } | ||
156 | cpus_clear(pending_irq_cpumask[irq]); | ||
157 | } | ||
158 | |||
159 | #ifdef CONFIG_PCI_MSI | ||
160 | /* | ||
161 | * Wonder why these are dummies? | ||
162 | * For e.g the set_ioapic_affinity_vector() calls the set_ioapic_affinity_irq() | ||
163 | * counter part after translating the vector to irq info. We need to perform | ||
164 | * this operation on the real irq, when we dont use vector, i.e when | ||
165 | * pci_use_vector() is false. | ||
166 | */ | ||
167 | static inline void move_irq(int irq) | ||
168 | { | ||
169 | } | ||
170 | |||
171 | static inline void set_irq_info(int irq, cpumask_t mask) | ||
172 | { | ||
173 | } | ||
174 | |||
175 | #else // CONFIG_PCI_MSI | ||
176 | |||
177 | static inline void move_irq(int irq) | ||
178 | { | ||
179 | move_native_irq(irq); | ||
180 | } | ||
181 | |||
182 | static inline void set_irq_info(int irq, cpumask_t mask) | ||
183 | { | ||
184 | set_native_irq_info(irq, mask); | ||
185 | } | ||
186 | #endif // CONFIG_PCI_MSI | ||
187 | |||
188 | #else // CONFIG_GENERIC_PENDING_IRQ || CONFIG_IRQBALANCE | ||
189 | |||
190 | #define move_irq(x) | ||
191 | #define move_native_irq(x) | ||
192 | #define set_pending_irq(x,y) | ||
193 | static inline void set_irq_info(int irq, cpumask_t mask) | ||
194 | { | ||
195 | set_native_irq_info(irq, mask); | ||
196 | } | ||
197 | |||
198 | #endif // CONFIG_GENERIC_PENDING_IRQ | ||
199 | |||
200 | #else // CONFIG_SMP | ||
201 | |||
202 | #define move_irq(x) | ||
203 | #define move_native_irq(x) | ||
204 | |||
205 | #endif // CONFIG_SMP | ||
206 | |||
84 | extern int no_irq_affinity; | 207 | extern int no_irq_affinity; |
85 | extern int noirqdebug_setup(char *str); | 208 | extern int noirqdebug_setup(char *str); |
86 | 209 | ||