diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-09-29 11:18:47 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 10:53:44 -0400 |
commit | 78f90d91f395cd0dc1ef3f21e0c5cd6fd50d202c (patch) | |
tree | e9a6b0a5d8a0b99cd23dbc1c1f376a7e78a93428 | |
parent | b7b29338dc7111ed8bd4d6555d84afae13ebe752 (diff) |
genirq: Remove the now unused sparse irq leftovers
The move_irq_desc() function was only used due to the problem that the
allocator did not free the old descriptors. So the descriptors had to
be moved in create_irq_nr(). That's history.
The code would have never been able to move active interrupt
descriptors on affinity settings. That can be done in a completely
different way w/o all this horror.
Remove all of it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/irqdesc.h | 12 | ||||
-rw-r--r-- | kernel/irq/Kconfig | 5 | ||||
-rw-r--r-- | kernel/irq/Makefile | 1 | ||||
-rw-r--r-- | kernel/irq/internals.h | 102 | ||||
-rw-r--r-- | kernel/irq/irqdesc.c | 30 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 120 |
6 files changed, 4 insertions, 266 deletions
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index f77dc5618d7e..979c68cc7458 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -82,24 +82,16 @@ struct irq_desc { | |||
82 | const char *name; | 82 | const char *name; |
83 | } ____cacheline_internodealigned_in_smp; | 83 | } ____cacheline_internodealigned_in_smp; |
84 | 84 | ||
85 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | ||
86 | struct irq_desc *desc, int node); | ||
87 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | ||
88 | |||
89 | #ifndef CONFIG_SPARSE_IRQ | 85 | #ifndef CONFIG_SPARSE_IRQ |
90 | extern struct irq_desc irq_desc[NR_IRQS]; | 86 | extern struct irq_desc irq_desc[NR_IRQS]; |
91 | #endif | 87 | #endif |
92 | 88 | ||
93 | #ifdef CONFIG_NUMA_IRQ_DESC | 89 | /* Will be removed once the last users in power and sh are gone */ |
94 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 90 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); |
95 | #else | ||
96 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | 91 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
97 | { | 92 | { |
98 | return desc; | 93 | return desc; |
99 | } | 94 | } |
100 | #endif | ||
101 | |||
102 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
103 | 95 | ||
104 | #ifdef CONFIG_GENERIC_HARDIRQS | 96 | #ifdef CONFIG_GENERIC_HARDIRQS |
105 | 97 | ||
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index a42c0191d71a..31d766bf5d2e 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -26,11 +26,6 @@ config GENERIC_IRQ_PROBE | |||
26 | config GENERIC_PENDING_IRQ | 26 | config GENERIC_PENDING_IRQ |
27 | def_bool n | 27 | def_bool n |
28 | 28 | ||
29 | if SPARSE_IRQ && NUMA | ||
30 | config NUMA_IRQ_DESC | ||
31 | def_bool n | ||
32 | endif | ||
33 | |||
34 | config AUTO_IRQ_AFFINITY | 29 | config AUTO_IRQ_AFFINITY |
35 | def_bool n | 30 | def_bool n |
36 | 31 | ||
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile index 1eaab0da56db..54329cd7b3ee 100644 --- a/kernel/irq/Makefile +++ b/kernel/irq/Makefile | |||
@@ -3,5 +3,4 @@ obj-y := irqdesc.o handle.o manage.o spurious.o resend.o chip.o dummychip.o devr | |||
3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o | 3 | obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o |
4 | obj-$(CONFIG_PROC_FS) += proc.o | 4 | obj-$(CONFIG_PROC_FS) += proc.o |
5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o | 5 | obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o |
6 | obj-$(CONFIG_NUMA_IRQ_DESC) += numa_migrate.o | ||
7 | obj-$(CONFIG_PM_SLEEP) += pm.o | 6 | obj-$(CONFIG_PM_SLEEP) += pm.o |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index f444203a772d..4571ae7e085a 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -18,17 +18,11 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
18 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 18 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); |
19 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 19 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
20 | 20 | ||
21 | extern struct lock_class_key irq_desc_lock_class; | ||
22 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); | 21 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
23 | extern raw_spinlock_t sparse_irq_lock; | ||
24 | 22 | ||
25 | /* Resending of interrupts :*/ | 23 | /* Resending of interrupts :*/ |
26 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | 24 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); |
27 | 25 | ||
28 | #ifdef CONFIG_SPARSE_IRQ | ||
29 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc); | ||
30 | #endif | ||
31 | |||
32 | #ifdef CONFIG_PROC_FS | 26 | #ifdef CONFIG_PROC_FS |
33 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); | 27 | extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); |
34 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); | 28 | extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); |
@@ -110,99 +104,3 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) | |||
110 | 104 | ||
111 | #undef P | 105 | #undef P |
112 | 106 | ||
113 | /* Stuff below will be cleaned up after the sparse allocator is done */ | ||
114 | |||
115 | #ifdef CONFIG_SMP | ||
116 | /** | ||
117 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
118 | * @desc: pointer to irq_desc struct | ||
119 | * @node: node which will be handling the cpumasks | ||
120 | * @boot: true if need bootmem | ||
121 | * | ||
122 | * Allocates affinity and pending_mask cpumask if required. | ||
123 | * Returns true if successful (or not required). | ||
124 | */ | ||
125 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
126 | bool boot) | ||
127 | { | ||
128 | gfp_t gfp = GFP_ATOMIC; | ||
129 | |||
130 | if (boot) | ||
131 | gfp = GFP_NOWAIT; | ||
132 | |||
133 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
134 | if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) | ||
135 | return false; | ||
136 | |||
137 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
138 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | ||
139 | free_cpumask_var(desc->irq_data.affinity); | ||
140 | return false; | ||
141 | } | ||
142 | #endif | ||
143 | #endif | ||
144 | return true; | ||
145 | } | ||
146 | |||
147 | static inline void init_desc_masks(struct irq_desc *desc) | ||
148 | { | ||
149 | cpumask_setall(desc->irq_data.affinity); | ||
150 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
151 | cpumask_clear(desc->pending_mask); | ||
152 | #endif | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
157 | * @old_desc: pointer to old irq_desc struct | ||
158 | * @new_desc: pointer to new irq_desc struct | ||
159 | * | ||
160 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
161 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
162 | * irq_desc struct so the copy is redundant. | ||
163 | */ | ||
164 | |||
165 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
166 | struct irq_desc *new_desc) | ||
167 | { | ||
168 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
169 | cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); | ||
170 | |||
171 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
172 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
173 | #endif | ||
174 | #endif | ||
175 | } | ||
176 | |||
177 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
178 | struct irq_desc *new_desc) | ||
179 | { | ||
180 | free_cpumask_var(old_desc->irq_data.affinity); | ||
181 | |||
182 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
183 | free_cpumask_var(old_desc->pending_mask); | ||
184 | #endif | ||
185 | } | ||
186 | |||
187 | #else /* !CONFIG_SMP */ | ||
188 | |||
189 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
190 | bool boot) | ||
191 | { | ||
192 | return true; | ||
193 | } | ||
194 | |||
195 | static inline void init_desc_masks(struct irq_desc *desc) | ||
196 | { | ||
197 | } | ||
198 | |||
199 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
200 | struct irq_desc *new_desc) | ||
201 | { | ||
202 | } | ||
203 | |||
204 | static inline void free_desc_masks(struct irq_desc *old_desc, | ||
205 | struct irq_desc *new_desc) | ||
206 | { | ||
207 | } | ||
208 | #endif /* CONFIG_SMP */ | ||
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index c9d5a1c12874..4f0b9c9d5c46 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -20,7 +20,7 @@ | |||
20 | /* | 20 | /* |
21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: | 21 | * lockdep: we want to handle all irq_desc locks as a single lock-class: |
22 | */ | 22 | */ |
23 | struct lock_class_key irq_desc_lock_class; | 23 | static struct lock_class_key irq_desc_lock_class; |
24 | 24 | ||
25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) | 25 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS) |
26 | static void __init init_irq_default_affinity(void) | 26 | static void __init init_irq_default_affinity(void) |
@@ -90,28 +90,11 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node) | |||
90 | int nr_irqs = NR_IRQS; | 90 | int nr_irqs = NR_IRQS; |
91 | EXPORT_SYMBOL_GPL(nr_irqs); | 91 | EXPORT_SYMBOL_GPL(nr_irqs); |
92 | 92 | ||
93 | DEFINE_RAW_SPINLOCK(sparse_irq_lock); | 93 | static DEFINE_RAW_SPINLOCK(sparse_irq_lock); |
94 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 94 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); |
95 | 95 | ||
96 | #ifdef CONFIG_SPARSE_IRQ | 96 | #ifdef CONFIG_SPARSE_IRQ |
97 | 97 | ||
98 | void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr) | ||
99 | { | ||
100 | void *ptr; | ||
101 | |||
102 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), | ||
103 | GFP_ATOMIC, node); | ||
104 | |||
105 | /* | ||
106 | * don't overwite if can not get new one | ||
107 | * init_copy_kstat_irqs() could still use old one | ||
108 | */ | ||
109 | if (ptr) { | ||
110 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); | ||
111 | desc->kstat_irqs = ptr; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); | 98 | static RADIX_TREE(irq_desc_tree, GFP_ATOMIC); |
116 | 99 | ||
117 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) | 100 | static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) |
@@ -124,15 +107,6 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
124 | return radix_tree_lookup(&irq_desc_tree, irq); | 107 | return radix_tree_lookup(&irq_desc_tree, irq); |
125 | } | 108 | } |
126 | 109 | ||
127 | void replace_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
128 | { | ||
129 | void **ptr; | ||
130 | |||
131 | ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); | ||
132 | if (ptr) | ||
133 | radix_tree_replace_slot(ptr, desc); | ||
134 | } | ||
135 | |||
136 | static void delete_irq_desc(unsigned int irq) | 110 | static void delete_irq_desc(unsigned int irq) |
137 | { | 111 | { |
138 | radix_tree_delete(&irq_desc_tree, irq); | 112 | radix_tree_delete(&irq_desc_tree, irq); |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c deleted file mode 100644 index e7f1f16402c1..000000000000 --- a/kernel/irq/numa_migrate.c +++ /dev/null | |||
@@ -1,120 +0,0 @@ | |||
1 | /* | ||
2 | * NUMA irq-desc migration code | ||
3 | * | ||
4 | * Migrate IRQ data structures (irq_desc, chip_data, etc.) over to | ||
5 | * the new "home node" of the IRQ. | ||
6 | */ | ||
7 | |||
8 | #include <linux/irq.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/random.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/kernel_stat.h> | ||
14 | |||
15 | #include "internals.h" | ||
16 | |||
17 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | ||
18 | struct irq_desc *desc, | ||
19 | int node, int nr) | ||
20 | { | ||
21 | init_kstat_irqs(desc, node, nr); | ||
22 | |||
23 | if (desc->kstat_irqs != old_desc->kstat_irqs) | ||
24 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, | ||
25 | nr * sizeof(*desc->kstat_irqs)); | ||
26 | } | ||
27 | |||
28 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | ||
29 | { | ||
30 | if (old_desc->kstat_irqs == desc->kstat_irqs) | ||
31 | return; | ||
32 | |||
33 | kfree(old_desc->kstat_irqs); | ||
34 | old_desc->kstat_irqs = NULL; | ||
35 | } | ||
36 | |||
37 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | ||
38 | struct irq_desc *desc, int node) | ||
39 | { | ||
40 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | ||
41 | if (!alloc_desc_masks(desc, node, false)) { | ||
42 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | ||
43 | "for migration.\n", irq); | ||
44 | return false; | ||
45 | } | ||
46 | raw_spin_lock_init(&desc->lock); | ||
47 | desc->irq_data.node = node; | ||
48 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | ||
49 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); | ||
50 | init_copy_desc_masks(old_desc, desc); | ||
51 | arch_init_copy_chip_data(old_desc, desc, node); | ||
52 | return true; | ||
53 | } | ||
54 | |||
55 | static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | ||
56 | { | ||
57 | free_kstat_irqs(old_desc, desc); | ||
58 | free_desc_masks(old_desc, desc); | ||
59 | arch_free_chip_data(old_desc, desc); | ||
60 | } | ||
61 | |||
62 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | ||
63 | int node) | ||
64 | { | ||
65 | struct irq_desc *desc; | ||
66 | unsigned int irq; | ||
67 | unsigned long flags; | ||
68 | |||
69 | irq = old_desc->irq_data.irq; | ||
70 | |||
71 | raw_spin_lock_irqsave(&sparse_irq_lock, flags); | ||
72 | |||
73 | /* We have to check it to avoid races with another CPU */ | ||
74 | desc = irq_to_desc(irq); | ||
75 | |||
76 | if (desc && old_desc != desc) | ||
77 | goto out_unlock; | ||
78 | |||
79 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | ||
80 | if (!desc) { | ||
81 | printk(KERN_ERR "irq %d: can not get new irq_desc " | ||
82 | "for migration.\n", irq); | ||
83 | /* still use old one */ | ||
84 | desc = old_desc; | ||
85 | goto out_unlock; | ||
86 | } | ||
87 | if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { | ||
88 | /* still use old one */ | ||
89 | kfree(desc); | ||
90 | desc = old_desc; | ||
91 | goto out_unlock; | ||
92 | } | ||
93 | |||
94 | replace_irq_desc(irq, desc); | ||
95 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
96 | |||
97 | /* free the old one */ | ||
98 | free_one_irq_desc(old_desc, desc); | ||
99 | kfree(old_desc); | ||
100 | |||
101 | return desc; | ||
102 | |||
103 | out_unlock: | ||
104 | raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); | ||
105 | |||
106 | return desc; | ||
107 | } | ||
108 | |||
109 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
110 | { | ||
111 | /* those static or target node is -1, do not move them */ | ||
112 | if (desc->irq_data.irq < NR_IRQS_LEGACY || node == -1) | ||
113 | return desc; | ||
114 | |||
115 | if (desc->irq_data.node != node) | ||
116 | desc = __real_move_irq_desc(desc, node); | ||
117 | |||
118 | return desc; | ||
119 | } | ||
120 | |||