diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-25 06:07:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-25 11:22:55 -0500 |
commit | c777ac5594f772ac760e02c3ac71d067616b579d (patch) | |
tree | 3a186fd7c1b63a89bc6c6749b2b234821ee8fdc4 /include/linux | |
parent | 4cae59d2e85c1ee2ab1ee284db1945c5394cd965 (diff) |
[PATCH] irq: uninline migration functions
Uninline some massive IRQ migration functions. Put them in the new
kernel/irq/migration.c.
Cc: Andi Kleen <ak@muc.de>
Cc: "Luck, Tony" <tony.luck@intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux')
-rw-r--r-- | include/linux/irq.h | 49 |
1 files changed, 2 insertions, 47 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 6c5d4c898ccb..ee2a82a572f7 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -114,53 +114,8 @@ static inline void set_native_irq_info(int irq, cpumask_t mask) | |||
114 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) | 114 | #if defined (CONFIG_GENERIC_PENDING_IRQ) || defined (CONFIG_IRQBALANCE) |
115 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; | 115 | extern cpumask_t pending_irq_cpumask[NR_IRQS]; |
116 | 116 | ||
117 | static inline void set_pending_irq(unsigned int irq, cpumask_t mask) | 117 | void set_pending_irq(unsigned int irq, cpumask_t mask); |
118 | { | 118 | void move_native_irq(int irq); |
119 | irq_desc_t *desc = irq_desc + irq; | ||
120 | unsigned long flags; | ||
121 | |||
122 | spin_lock_irqsave(&desc->lock, flags); | ||
123 | desc->move_irq = 1; | ||
124 | pending_irq_cpumask[irq] = mask; | ||
125 | spin_unlock_irqrestore(&desc->lock, flags); | ||
126 | } | ||
127 | |||
128 | static inline void | ||
129 | move_native_irq(int irq) | ||
130 | { | ||
131 | cpumask_t tmp; | ||
132 | irq_desc_t *desc = irq_descp(irq); | ||
133 | |||
134 | if (likely (!desc->move_irq)) | ||
135 | return; | ||
136 | |||
137 | desc->move_irq = 0; | ||
138 | |||
139 | if (likely(cpus_empty(pending_irq_cpumask[irq]))) | ||
140 | return; | ||
141 | |||
142 | if (!desc->handler->set_affinity) | ||
143 | return; | ||
144 | |||
145 | /* note - we hold the desc->lock */ | ||
146 | cpus_and(tmp, pending_irq_cpumask[irq], cpu_online_map); | ||
147 | |||
148 | /* | ||
149 | * If there was a valid mask to work with, please | ||
150 | * do the disable, re-program, enable sequence. | ||
151 | * This is *not* particularly important for level triggered | ||
152 | * but in a edge trigger case, we might be setting rte | ||
153 | * when an active trigger is comming in. This could | ||
154 | * cause some ioapics to mal-function. | ||
155 | * Being paranoid i guess! | ||
156 | */ | ||
157 | if (unlikely(!cpus_empty(tmp))) { | ||
158 | desc->handler->disable(irq); | ||
159 | desc->handler->set_affinity(irq,tmp); | ||
160 | desc->handler->enable(irq); | ||
161 | } | ||
162 | cpus_clear(pending_irq_cpumask[irq]); | ||
163 | } | ||
164 | 119 | ||
165 | #ifdef CONFIG_PCI_MSI | 120 | #ifdef CONFIG_PCI_MSI |
166 | /* | 121 | /* |