aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/internals.h
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-09-29 11:18:47 -0400
committerThomas Gleixner <tglx@linutronix.de>2010-10-12 10:53:44 -0400
commit78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c (patch)
treee9a6b0a5d8a0b99cd23dbc1c1f376a7e78a93428 /kernel/irq/internals.h
parentb7b29338dc7111ed8bd4d6555d84afae13ebe752 (diff)
genirq: Remove the now unused sparse irq leftovers
The move_irq_desc() function was only used due to the problem that the allocator did not free the old descriptors. So the descriptors had to be moved in create_irq_nr(). That's history. The code would have never been able to move active interrupt descriptors on affinity settings. That can be done in a completely different way w/o all this horror. Remove all of it. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/internals.h')
-rw-r--r--kernel/irq/internals.h102
1 files changed, 0 insertions, 102 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index f444203a772d..4571ae7e085a 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -18,17 +18,11 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
18extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 18extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp);
19extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 19extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume);
20 20
21extern struct lock_class_key irq_desc_lock_class;
22extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 21extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
23extern raw_spinlock_t sparse_irq_lock;
24 22
25/* Resending of interrupts :*/ 23/* Resending of interrupts :*/
26void check_irq_resend(struct irq_desc *desc, unsigned int irq); 24void check_irq_resend(struct irq_desc *desc, unsigned int irq);
27 25
28#ifdef CONFIG_SPARSE_IRQ
29void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
30#endif
31
32#ifdef CONFIG_PROC_FS 26#ifdef CONFIG_PROC_FS
33extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); 27extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
34extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); 28extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc);
@@ -110,99 +104,3 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)
110 104
111#undef P 105#undef P
112 106
113/* Stuff below will be cleaned up after the sparse allocator is done */
114
115#ifdef CONFIG_SMP
116/**
117 * alloc_desc_masks - allocate cpumasks for irq_desc
118 * @desc: pointer to irq_desc struct
119 * @node: node which will be handling the cpumasks
120 * @boot: true if need bootmem
121 *
122 * Allocates affinity and pending_mask cpumask if required.
123 * Returns true if successful (or not required).
124 */
125static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
126 bool boot)
127{
128 gfp_t gfp = GFP_ATOMIC;
129
130 if (boot)
131 gfp = GFP_NOWAIT;
132
133#ifdef CONFIG_CPUMASK_OFFSTACK
134 if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node))
135 return false;
136
137#ifdef CONFIG_GENERIC_PENDING_IRQ
138 if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
139 free_cpumask_var(desc->irq_data.affinity);
140 return false;
141 }
142#endif
143#endif
144 return true;
145}
146
147static inline void init_desc_masks(struct irq_desc *desc)
148{
149 cpumask_setall(desc->irq_data.affinity);
150#ifdef CONFIG_GENERIC_PENDING_IRQ
151 cpumask_clear(desc->pending_mask);
152#endif
153}
154
155/**
156 * init_copy_desc_masks - copy cpumasks for irq_desc
157 * @old_desc: pointer to old irq_desc struct
158 * @new_desc: pointer to new irq_desc struct
159 *
160 * Insures affinity and pending_masks are copied to new irq_desc.
161 * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the
162 * irq_desc struct so the copy is redundant.
163 */
164
165static inline void init_copy_desc_masks(struct irq_desc *old_desc,
166 struct irq_desc *new_desc)
167{
168#ifdef CONFIG_CPUMASK_OFFSTACK
169 cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity);
170
171#ifdef CONFIG_GENERIC_PENDING_IRQ
172 cpumask_copy(new_desc->pending_mask, old_desc->pending_mask);
173#endif
174#endif
175}
176
177static inline void free_desc_masks(struct irq_desc *old_desc,
178 struct irq_desc *new_desc)
179{
180 free_cpumask_var(old_desc->irq_data.affinity);
181
182#ifdef CONFIG_GENERIC_PENDING_IRQ
183 free_cpumask_var(old_desc->pending_mask);
184#endif
185}
186
187#else /* !CONFIG_SMP */
188
189static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
190 bool boot)
191{
192 return true;
193}
194
195static inline void init_desc_masks(struct irq_desc *desc)
196{
197}
198
199static inline void init_copy_desc_masks(struct irq_desc *old_desc,
200 struct irq_desc *new_desc)
201{
202}
203
204static inline void free_desc_masks(struct irq_desc *old_desc,
205 struct irq_desc *new_desc)
206{
207}
208#endif /* CONFIG_SMP */