diff options
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r-- | kernel/irq/handle.c | 101 |
1 files changed, 65 insertions, 36 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 6492400cb50d..375d68cd5bf0 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
20 | #include <linux/bootmem.h> | ||
20 | 21 | ||
21 | #include "internals.h" | 22 | #include "internals.h" |
22 | 23 | ||
@@ -56,11 +57,8 @@ void handle_bad_irq(unsigned int irq, struct irq_desc *desc) | |||
56 | int nr_irqs = NR_IRQS; | 57 | int nr_irqs = NR_IRQS; |
57 | EXPORT_SYMBOL_GPL(nr_irqs); | 58 | EXPORT_SYMBOL_GPL(nr_irqs); |
58 | 59 | ||
59 | void __init __attribute__((weak)) arch_early_irq_init(void) | ||
60 | { | ||
61 | } | ||
62 | |||
63 | #ifdef CONFIG_SPARSE_IRQ | 60 | #ifdef CONFIG_SPARSE_IRQ |
61 | |||
64 | static struct irq_desc irq_desc_init = { | 62 | static struct irq_desc irq_desc_init = { |
65 | .irq = -1, | 63 | .irq = -1, |
66 | .status = IRQ_DISABLED, | 64 | .status = IRQ_DISABLED, |
@@ -68,9 +66,6 @@ static struct irq_desc irq_desc_init = { | |||
68 | .handle_irq = handle_bad_irq, | 66 | .handle_irq = handle_bad_irq, |
69 | .depth = 1, | 67 | .depth = 1, |
70 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 68 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
71 | #ifdef CONFIG_SMP | ||
72 | .affinity = CPU_MASK_ALL | ||
73 | #endif | ||
74 | }; | 69 | }; |
75 | 70 | ||
76 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 71 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
@@ -90,13 +85,11 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | |||
90 | desc->kstat_irqs = (unsigned int *)ptr; | 85 | desc->kstat_irqs = (unsigned int *)ptr; |
91 | } | 86 | } |
92 | 87 | ||
93 | void __attribute__((weak)) arch_init_chip_data(struct irq_desc *desc, int cpu) | ||
94 | { | ||
95 | } | ||
96 | |||
97 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 88 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) |
98 | { | 89 | { |
99 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 90 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
91 | |||
92 | spin_lock_init(&desc->lock); | ||
100 | desc->irq = irq; | 93 | desc->irq = irq; |
101 | #ifdef CONFIG_SMP | 94 | #ifdef CONFIG_SMP |
102 | desc->cpu = cpu; | 95 | desc->cpu = cpu; |
@@ -107,6 +100,10 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
107 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 100 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
108 | BUG_ON(1); | 101 | BUG_ON(1); |
109 | } | 102 | } |
103 | if (!init_alloc_desc_masks(desc, cpu, false)) { | ||
104 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | ||
105 | BUG_ON(1); | ||
106 | } | ||
110 | arch_init_chip_data(desc, cpu); | 107 | arch_init_chip_data(desc, cpu); |
111 | } | 108 | } |
112 | 109 | ||
@@ -115,7 +112,7 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | |||
115 | */ | 112 | */ |
116 | DEFINE_SPINLOCK(sparse_irq_lock); | 113 | DEFINE_SPINLOCK(sparse_irq_lock); |
117 | 114 | ||
118 | struct irq_desc *irq_desc_ptrs[NR_IRQS] __read_mostly; | 115 | struct irq_desc **irq_desc_ptrs __read_mostly; |
119 | 116 | ||
120 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { | 117 | static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { |
121 | [0 ... NR_IRQS_LEGACY-1] = { | 118 | [0 ... NR_IRQS_LEGACY-1] = { |
@@ -125,40 +122,52 @@ static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_sm | |||
125 | .handle_irq = handle_bad_irq, | 122 | .handle_irq = handle_bad_irq, |
126 | .depth = 1, | 123 | .depth = 1, |
127 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 124 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
128 | #ifdef CONFIG_SMP | ||
129 | .affinity = CPU_MASK_ALL | ||
130 | #endif | ||
131 | } | 125 | } |
132 | }; | 126 | }; |
133 | 127 | ||
134 | /* FIXME: use bootmem alloc ...*/ | 128 | static unsigned int *kstat_irqs_legacy; |
135 | static unsigned int kstat_irqs_legacy[NR_IRQS_LEGACY][NR_CPUS]; | ||
136 | 129 | ||
137 | void __init early_irq_init(void) | 130 | int __init early_irq_init(void) |
138 | { | 131 | { |
139 | struct irq_desc *desc; | 132 | struct irq_desc *desc; |
140 | int legacy_count; | 133 | int legacy_count; |
141 | int i; | 134 | int i; |
142 | 135 | ||
136 | /* initialize nr_irqs based on nr_cpu_ids */ | ||
137 | arch_probe_nr_irqs(); | ||
138 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d\n", NR_IRQS, nr_irqs); | ||
139 | |||
143 | desc = irq_desc_legacy; | 140 | desc = irq_desc_legacy; |
144 | legacy_count = ARRAY_SIZE(irq_desc_legacy); | 141 | legacy_count = ARRAY_SIZE(irq_desc_legacy); |
145 | 142 | ||
143 | /* allocate irq_desc_ptrs array based on nr_irqs */ | ||
144 | irq_desc_ptrs = alloc_bootmem(nr_irqs * sizeof(void *)); | ||
145 | |||
146 | /* allocate based on nr_cpu_ids */ | ||
147 | /* FIXME: invert kstat_irgs, and it'd be a per_cpu_alloc'd thing */ | ||
148 | kstat_irqs_legacy = alloc_bootmem(NR_IRQS_LEGACY * nr_cpu_ids * | ||
149 | sizeof(int)); | ||
150 | |||
146 | for (i = 0; i < legacy_count; i++) { | 151 | for (i = 0; i < legacy_count; i++) { |
147 | desc[i].irq = i; | 152 | desc[i].irq = i; |
148 | desc[i].kstat_irqs = kstat_irqs_legacy[i]; | 153 | desc[i].kstat_irqs = kstat_irqs_legacy + i * nr_cpu_ids; |
149 | 154 | lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); | |
155 | init_alloc_desc_masks(&desc[i], 0, true); | ||
150 | irq_desc_ptrs[i] = desc + i; | 156 | irq_desc_ptrs[i] = desc + i; |
151 | } | 157 | } |
152 | 158 | ||
153 | for (i = legacy_count; i < NR_IRQS; i++) | 159 | for (i = legacy_count; i < nr_irqs; i++) |
154 | irq_desc_ptrs[i] = NULL; | 160 | irq_desc_ptrs[i] = NULL; |
155 | 161 | ||
156 | arch_early_irq_init(); | 162 | return arch_early_irq_init(); |
157 | } | 163 | } |
158 | 164 | ||
159 | struct irq_desc *irq_to_desc(unsigned int irq) | 165 | struct irq_desc *irq_to_desc(unsigned int irq) |
160 | { | 166 | { |
161 | return (irq < NR_IRQS) ? irq_desc_ptrs[irq] : NULL; | 167 | if (irq_desc_ptrs && irq < nr_irqs) |
168 | return irq_desc_ptrs[irq]; | ||
169 | |||
170 | return NULL; | ||
162 | } | 171 | } |
163 | 172 | ||
164 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 173 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) |
@@ -167,10 +176,9 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
167 | unsigned long flags; | 176 | unsigned long flags; |
168 | int node; | 177 | int node; |
169 | 178 | ||
170 | if (irq >= NR_IRQS) { | 179 | if (irq >= nr_irqs) { |
171 | printk(KERN_WARNING "irq >= NR_IRQS in irq_to_desc_alloc: %d %d\n", | 180 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
172 | irq, NR_IRQS); | 181 | irq, nr_irqs); |
173 | WARN_ON(1); | ||
174 | return NULL; | 182 | return NULL; |
175 | } | 183 | } |
176 | 184 | ||
@@ -203,7 +211,7 @@ out_unlock: | |||
203 | return desc; | 211 | return desc; |
204 | } | 212 | } |
205 | 213 | ||
206 | #else | 214 | #else /* !CONFIG_SPARSE_IRQ */ |
207 | 215 | ||
208 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | 216 | struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { |
209 | [0 ... NR_IRQS-1] = { | 217 | [0 ... NR_IRQS-1] = { |
@@ -212,13 +220,37 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
212 | .handle_irq = handle_bad_irq, | 220 | .handle_irq = handle_bad_irq, |
213 | .depth = 1, | 221 | .depth = 1, |
214 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), | 222 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), |
215 | #ifdef CONFIG_SMP | ||
216 | .affinity = CPU_MASK_ALL | ||
217 | #endif | ||
218 | } | 223 | } |
219 | }; | 224 | }; |
220 | 225 | ||
221 | #endif | 226 | int __init early_irq_init(void) |
227 | { | ||
228 | struct irq_desc *desc; | ||
229 | int count; | ||
230 | int i; | ||
231 | |||
232 | printk(KERN_INFO "NR_IRQS:%d\n", NR_IRQS); | ||
233 | |||
234 | desc = irq_desc; | ||
235 | count = ARRAY_SIZE(irq_desc); | ||
236 | |||
237 | for (i = 0; i < count; i++) { | ||
238 | desc[i].irq = i; | ||
239 | init_alloc_desc_masks(&desc[i], 0, true); | ||
240 | } | ||
241 | return arch_early_irq_init(); | ||
242 | } | ||
243 | |||
244 | struct irq_desc *irq_to_desc(unsigned int irq) | ||
245 | { | ||
246 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | ||
247 | } | ||
248 | |||
249 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | ||
250 | { | ||
251 | return irq_to_desc(irq); | ||
252 | } | ||
253 | #endif /* !CONFIG_SPARSE_IRQ */ | ||
222 | 254 | ||
223 | /* | 255 | /* |
224 | * What should we do if we get a hw irq event on an illegal vector? | 256 | * What should we do if we get a hw irq event on an illegal vector? |
@@ -428,9 +460,6 @@ void early_init_irq_lock_class(void) | |||
428 | int i; | 460 | int i; |
429 | 461 | ||
430 | for_each_irq_desc(i, desc) { | 462 | for_each_irq_desc(i, desc) { |
431 | if (!desc) | ||
432 | continue; | ||
433 | |||
434 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 463 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
435 | } | 464 | } |
436 | } | 465 | } |
@@ -439,7 +468,7 @@ void early_init_irq_lock_class(void) | |||
439 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 468 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
440 | { | 469 | { |
441 | struct irq_desc *desc = irq_to_desc(irq); | 470 | struct irq_desc *desc = irq_to_desc(irq); |
442 | return desc->kstat_irqs[cpu]; | 471 | return desc ? desc->kstat_irqs[cpu] : 0; |
443 | } | 472 | } |
444 | #endif | 473 | #endif |
445 | EXPORT_SYMBOL(kstat_irqs_cpu); | 474 | EXPORT_SYMBOL(kstat_irqs_cpu); |