diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2014-10-27 04:12:00 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2014-12-16 08:08:16 -0500 |
commit | 74afab7af7d9aeba86b3b8e39670cf7d0058f6df (patch) | |
tree | ef65e6502d8a56eada797a278002cdd9e1307f04 /arch/x86/kernel/apic/vector.c | |
parent | 55a0e2b122c26c7496ea85754bceddc05dba402b (diff) |
x86, irq: Move local APIC related code from io_apic.c into vector.c
Create arch/x86/kernel/apic/vector.c to host local APIC related code,
prepare for making MSI/HT_IRQ independent of IOAPIC.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Prarit Bhargava <prarit@redhat.com>
Cc: Grant Likely <grant.likely@linaro.org>
Link: http://lkml.kernel.org/r/1414397531-28254-10-git-send-email-jiang.liu@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/apic/vector.c')
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 694 |
1 files changed, 694 insertions, 0 deletions
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c new file mode 100644 index 000000000000..9ba9bd477051 --- /dev/null +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -0,0 +1,694 @@ | |||
1 | /* | ||
2 | * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc. | ||
3 | * | ||
4 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo | ||
5 | * Moved from arch/x86/kernel/apic/io_apic.c. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/irqdomain.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <asm/hw_irq.h> | ||
17 | #include <asm/apic.h> | ||
18 | #include <asm/i8259.h> | ||
19 | #include <asm/desc.h> | ||
20 | #include <asm/irq_remapping.h> | ||
21 | |||
22 | static DEFINE_RAW_SPINLOCK(vector_lock); | ||
23 | |||
24 | void lock_vector_lock(void) | ||
25 | { | ||
26 | /* Used to the online set of cpus does not change | ||
27 | * during assign_irq_vector. | ||
28 | */ | ||
29 | raw_spin_lock(&vector_lock); | ||
30 | } | ||
31 | |||
32 | void unlock_vector_lock(void) | ||
33 | { | ||
34 | raw_spin_unlock(&vector_lock); | ||
35 | } | ||
36 | |||
37 | struct irq_cfg *irq_cfg(unsigned int irq) | ||
38 | { | ||
39 | return irq_get_chip_data(irq); | ||
40 | } | ||
41 | |||
42 | struct irq_cfg *irqd_cfg(struct irq_data *irq_data) | ||
43 | { | ||
44 | return irq_data->chip_data; | ||
45 | } | ||
46 | |||
47 | static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node) | ||
48 | { | ||
49 | struct irq_cfg *cfg; | ||
50 | |||
51 | cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); | ||
52 | if (!cfg) | ||
53 | return NULL; | ||
54 | if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) | ||
55 | goto out_cfg; | ||
56 | if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) | ||
57 | goto out_domain; | ||
58 | #ifdef CONFIG_X86_IO_APIC | ||
59 | INIT_LIST_HEAD(&cfg->irq_2_pin); | ||
60 | #endif | ||
61 | return cfg; | ||
62 | out_domain: | ||
63 | free_cpumask_var(cfg->domain); | ||
64 | out_cfg: | ||
65 | kfree(cfg); | ||
66 | return NULL; | ||
67 | } | ||
68 | |||
69 | struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node) | ||
70 | { | ||
71 | int res = irq_alloc_desc_at(at, node); | ||
72 | struct irq_cfg *cfg; | ||
73 | |||
74 | if (res < 0) { | ||
75 | if (res != -EEXIST) | ||
76 | return NULL; | ||
77 | cfg = irq_cfg(at); | ||
78 | if (cfg) | ||
79 | return cfg; | ||
80 | } | ||
81 | |||
82 | cfg = alloc_irq_cfg(at, node); | ||
83 | if (cfg) | ||
84 | irq_set_chip_data(at, cfg); | ||
85 | else | ||
86 | irq_free_desc(at); | ||
87 | return cfg; | ||
88 | } | ||
89 | |||
90 | static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg) | ||
91 | { | ||
92 | if (!cfg) | ||
93 | return; | ||
94 | irq_set_chip_data(at, NULL); | ||
95 | free_cpumask_var(cfg->domain); | ||
96 | free_cpumask_var(cfg->old_domain); | ||
97 | kfree(cfg); | ||
98 | } | ||
99 | |||
100 | static int | ||
101 | __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
102 | { | ||
103 | /* | ||
104 | * NOTE! The local APIC isn't very good at handling | ||
105 | * multiple interrupts at the same interrupt level. | ||
106 | * As the interrupt level is determined by taking the | ||
107 | * vector number and shifting that right by 4, we | ||
108 | * want to spread these out a bit so that they don't | ||
109 | * all fall in the same interrupt level. | ||
110 | * | ||
111 | * Also, we've got to be careful not to trash gate | ||
112 | * 0x80, because int 0x80 is hm, kind of importantish. ;) | ||
113 | */ | ||
114 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | ||
115 | static int current_offset = VECTOR_OFFSET_START % 16; | ||
116 | int cpu, err; | ||
117 | cpumask_var_t tmp_mask; | ||
118 | |||
119 | if (cfg->move_in_progress) | ||
120 | return -EBUSY; | ||
121 | |||
122 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | ||
123 | return -ENOMEM; | ||
124 | |||
125 | /* Only try and allocate irqs on cpus that are present */ | ||
126 | err = -ENOSPC; | ||
127 | cpumask_clear(cfg->old_domain); | ||
128 | cpu = cpumask_first_and(mask, cpu_online_mask); | ||
129 | while (cpu < nr_cpu_ids) { | ||
130 | int new_cpu, vector, offset; | ||
131 | |||
132 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | ||
133 | |||
134 | if (cpumask_subset(tmp_mask, cfg->domain)) { | ||
135 | err = 0; | ||
136 | if (cpumask_equal(tmp_mask, cfg->domain)) | ||
137 | break; | ||
138 | /* | ||
139 | * New cpumask using the vector is a proper subset of | ||
140 | * the current in use mask. So cleanup the vector | ||
141 | * allocation for the members that are not used anymore. | ||
142 | */ | ||
143 | cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); | ||
144 | cfg->move_in_progress = | ||
145 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
146 | cpumask_and(cfg->domain, cfg->domain, tmp_mask); | ||
147 | break; | ||
148 | } | ||
149 | |||
150 | vector = current_vector; | ||
151 | offset = current_offset; | ||
152 | next: | ||
153 | vector += 16; | ||
154 | if (vector >= first_system_vector) { | ||
155 | offset = (offset + 1) % 16; | ||
156 | vector = FIRST_EXTERNAL_VECTOR + offset; | ||
157 | } | ||
158 | |||
159 | if (unlikely(current_vector == vector)) { | ||
160 | cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); | ||
161 | cpumask_andnot(tmp_mask, mask, cfg->old_domain); | ||
162 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | ||
163 | continue; | ||
164 | } | ||
165 | |||
166 | if (test_bit(vector, used_vectors)) | ||
167 | goto next; | ||
168 | |||
169 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | ||
170 | if (per_cpu(vector_irq, new_cpu)[vector] > | ||
171 | VECTOR_UNDEFINED) | ||
172 | goto next; | ||
173 | } | ||
174 | /* Found one! */ | ||
175 | current_vector = vector; | ||
176 | current_offset = offset; | ||
177 | if (cfg->vector) { | ||
178 | cpumask_copy(cfg->old_domain, cfg->domain); | ||
179 | cfg->move_in_progress = | ||
180 | cpumask_intersects(cfg->old_domain, cpu_online_mask); | ||
181 | } | ||
182 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | ||
183 | per_cpu(vector_irq, new_cpu)[vector] = irq; | ||
184 | cfg->vector = vector; | ||
185 | cpumask_copy(cfg->domain, tmp_mask); | ||
186 | err = 0; | ||
187 | break; | ||
188 | } | ||
189 | free_cpumask_var(tmp_mask); | ||
190 | |||
191 | return err; | ||
192 | } | ||
193 | |||
194 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
195 | { | ||
196 | int err; | ||
197 | unsigned long flags; | ||
198 | |||
199 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
200 | err = __assign_irq_vector(irq, cfg, mask); | ||
201 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
202 | return err; | ||
203 | } | ||
204 | |||
205 | void clear_irq_vector(int irq, struct irq_cfg *cfg) | ||
206 | { | ||
207 | int cpu, vector; | ||
208 | unsigned long flags; | ||
209 | |||
210 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
211 | BUG_ON(!cfg->vector); | ||
212 | |||
213 | vector = cfg->vector; | ||
214 | for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) | ||
215 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
216 | |||
217 | cfg->vector = 0; | ||
218 | cpumask_clear(cfg->domain); | ||
219 | |||
220 | if (likely(!cfg->move_in_progress)) { | ||
221 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
222 | return; | ||
223 | } | ||
224 | |||
225 | for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { | ||
226 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; | ||
227 | vector++) { | ||
228 | if (per_cpu(vector_irq, cpu)[vector] != irq) | ||
229 | continue; | ||
230 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
231 | break; | ||
232 | } | ||
233 | } | ||
234 | cfg->move_in_progress = 0; | ||
235 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
236 | } | ||
237 | |||
238 | static void __setup_vector_irq(int cpu) | ||
239 | { | ||
240 | /* Initialize vector_irq on a new cpu */ | ||
241 | int irq, vector; | ||
242 | struct irq_cfg *cfg; | ||
243 | |||
244 | /* | ||
245 | * vector_lock will make sure that we don't run into irq vector | ||
246 | * assignments that might be happening on another cpu in parallel, | ||
247 | * while we setup our initial vector to irq mappings. | ||
248 | */ | ||
249 | raw_spin_lock(&vector_lock); | ||
250 | /* Mark the inuse vectors */ | ||
251 | for_each_active_irq(irq) { | ||
252 | cfg = irq_cfg(irq); | ||
253 | if (!cfg) | ||
254 | continue; | ||
255 | |||
256 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
257 | continue; | ||
258 | vector = cfg->vector; | ||
259 | per_cpu(vector_irq, cpu)[vector] = irq; | ||
260 | } | ||
261 | /* Mark the free vectors */ | ||
262 | for (vector = 0; vector < NR_VECTORS; ++vector) { | ||
263 | irq = per_cpu(vector_irq, cpu)[vector]; | ||
264 | if (irq <= VECTOR_UNDEFINED) | ||
265 | continue; | ||
266 | |||
267 | cfg = irq_cfg(irq); | ||
268 | if (!cpumask_test_cpu(cpu, cfg->domain)) | ||
269 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; | ||
270 | } | ||
271 | raw_spin_unlock(&vector_lock); | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * Setup the vector to irq mappings. | ||
276 | */ | ||
277 | void setup_vector_irq(int cpu) | ||
278 | { | ||
279 | int irq; | ||
280 | |||
281 | /* | ||
282 | * On most of the platforms, legacy PIC delivers the interrupts on the | ||
283 | * boot cpu. But there are certain platforms where PIC interrupts are | ||
284 | * delivered to multiple cpu's. If the legacy IRQ is handled by the | ||
285 | * legacy PIC, for the new cpu that is coming online, setup the static | ||
286 | * legacy vector to irq mapping: | ||
287 | */ | ||
288 | for (irq = 0; irq < nr_legacy_irqs(); irq++) | ||
289 | per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq; | ||
290 | |||
291 | __setup_vector_irq(cpu); | ||
292 | } | ||
293 | |||
294 | int apic_retrigger_irq(struct irq_data *data) | ||
295 | { | ||
296 | struct irq_cfg *cfg = data->chip_data; | ||
297 | unsigned long flags; | ||
298 | int cpu; | ||
299 | |||
300 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
301 | cpu = cpumask_first_and(cfg->domain, cpu_online_mask); | ||
302 | apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); | ||
303 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
304 | |||
305 | return 1; | ||
306 | } | ||
307 | |||
308 | void apic_ack_edge(struct irq_data *data) | ||
309 | { | ||
310 | irq_complete_move(data->chip_data); | ||
311 | irq_move_irq(data); | ||
312 | ack_APIC_irq(); | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Either sets data->affinity to a valid value, and returns | ||
317 | * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and | ||
318 | * leaves data->affinity untouched. | ||
319 | */ | ||
320 | int apic_set_affinity(struct irq_data *data, const struct cpumask *mask, | ||
321 | unsigned int *dest_id) | ||
322 | { | ||
323 | struct irq_cfg *cfg = data->chip_data; | ||
324 | unsigned int irq = data->irq; | ||
325 | int err; | ||
326 | |||
327 | if (!config_enabled(CONFIG_SMP)) | ||
328 | return -EPERM; | ||
329 | |||
330 | if (!cpumask_intersects(mask, cpu_online_mask)) | ||
331 | return -EINVAL; | ||
332 | |||
333 | err = assign_irq_vector(irq, cfg, mask); | ||
334 | if (err) | ||
335 | return err; | ||
336 | |||
337 | err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id); | ||
338 | if (err) { | ||
339 | if (assign_irq_vector(irq, cfg, data->affinity)) | ||
340 | pr_err("Failed to recover vector for irq %d\n", irq); | ||
341 | return err; | ||
342 | } | ||
343 | |||
344 | cpumask_copy(data->affinity, mask); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | #ifdef CONFIG_SMP | ||
350 | void send_cleanup_vector(struct irq_cfg *cfg) | ||
351 | { | ||
352 | cpumask_var_t cleanup_mask; | ||
353 | |||
354 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | ||
355 | unsigned int i; | ||
356 | |||
357 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
358 | apic->send_IPI_mask(cpumask_of(i), | ||
359 | IRQ_MOVE_CLEANUP_VECTOR); | ||
360 | } else { | ||
361 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | ||
362 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | ||
363 | free_cpumask_var(cleanup_mask); | ||
364 | } | ||
365 | cfg->move_in_progress = 0; | ||
366 | } | ||
367 | |||
368 | asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) | ||
369 | { | ||
370 | unsigned vector, me; | ||
371 | |||
372 | ack_APIC_irq(); | ||
373 | irq_enter(); | ||
374 | exit_idle(); | ||
375 | |||
376 | me = smp_processor_id(); | ||
377 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
378 | int irq; | ||
379 | unsigned int irr; | ||
380 | struct irq_desc *desc; | ||
381 | struct irq_cfg *cfg; | ||
382 | |||
383 | irq = __this_cpu_read(vector_irq[vector]); | ||
384 | |||
385 | if (irq <= VECTOR_UNDEFINED) | ||
386 | continue; | ||
387 | |||
388 | desc = irq_to_desc(irq); | ||
389 | if (!desc) | ||
390 | continue; | ||
391 | |||
392 | cfg = irq_cfg(irq); | ||
393 | if (!cfg) | ||
394 | continue; | ||
395 | |||
396 | raw_spin_lock(&desc->lock); | ||
397 | |||
398 | /* | ||
399 | * Check if the irq migration is in progress. If so, we | ||
400 | * haven't received the cleanup request yet for this irq. | ||
401 | */ | ||
402 | if (cfg->move_in_progress) | ||
403 | goto unlock; | ||
404 | |||
405 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
406 | goto unlock; | ||
407 | |||
408 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
409 | /* | ||
410 | * Check if the vector that needs to be cleanedup is | ||
411 | * registered at the cpu's IRR. If so, then this is not | ||
412 | * the best time to clean it up. Lets clean it up in the | ||
413 | * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR | ||
414 | * to myself. | ||
415 | */ | ||
416 | if (irr & (1 << (vector % 32))) { | ||
417 | apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR); | ||
418 | goto unlock; | ||
419 | } | ||
420 | __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED); | ||
421 | unlock: | ||
422 | raw_spin_unlock(&desc->lock); | ||
423 | } | ||
424 | |||
425 | irq_exit(); | ||
426 | } | ||
427 | |||
428 | static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | ||
429 | { | ||
430 | unsigned me; | ||
431 | |||
432 | if (likely(!cfg->move_in_progress)) | ||
433 | return; | ||
434 | |||
435 | me = smp_processor_id(); | ||
436 | |||
437 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | ||
438 | send_cleanup_vector(cfg); | ||
439 | } | ||
440 | |||
441 | void irq_complete_move(struct irq_cfg *cfg) | ||
442 | { | ||
443 | __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); | ||
444 | } | ||
445 | |||
446 | void irq_force_complete_move(int irq) | ||
447 | { | ||
448 | struct irq_cfg *cfg = irq_cfg(irq); | ||
449 | |||
450 | if (!cfg) | ||
451 | return; | ||
452 | |||
453 | __irq_complete_move(cfg, cfg->vector); | ||
454 | } | ||
455 | #else | ||
456 | void irq_complete_move(struct irq_cfg *cfg) { } | ||
457 | #endif | ||
458 | |||
459 | /* | ||
460 | * Dynamic irq allocate and deallocation. Should be replaced by irq domains! | ||
461 | */ | ||
462 | int arch_setup_hwirq(unsigned int irq, int node) | ||
463 | { | ||
464 | struct irq_cfg *cfg; | ||
465 | unsigned long flags; | ||
466 | int ret; | ||
467 | |||
468 | cfg = alloc_irq_cfg(irq, node); | ||
469 | if (!cfg) | ||
470 | return -ENOMEM; | ||
471 | |||
472 | raw_spin_lock_irqsave(&vector_lock, flags); | ||
473 | ret = __assign_irq_vector(irq, cfg, apic->target_cpus()); | ||
474 | raw_spin_unlock_irqrestore(&vector_lock, flags); | ||
475 | |||
476 | if (!ret) | ||
477 | irq_set_chip_data(irq, cfg); | ||
478 | else | ||
479 | free_irq_cfg(irq, cfg); | ||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | void arch_teardown_hwirq(unsigned int irq) | ||
484 | { | ||
485 | struct irq_cfg *cfg = irq_cfg(irq); | ||
486 | |||
487 | free_remapped_irq(irq); | ||
488 | clear_irq_vector(irq, cfg); | ||
489 | free_irq_cfg(irq, cfg); | ||
490 | } | ||
491 | |||
492 | static void __init print_APIC_field(int base) | ||
493 | { | ||
494 | int i; | ||
495 | |||
496 | printk(KERN_DEBUG); | ||
497 | |||
498 | for (i = 0; i < 8; i++) | ||
499 | pr_cont("%08x", apic_read(base + i*0x10)); | ||
500 | |||
501 | pr_cont("\n"); | ||
502 | } | ||
503 | |||
504 | static void __init print_local_APIC(void *dummy) | ||
505 | { | ||
506 | unsigned int i, v, ver, maxlvt; | ||
507 | u64 icr; | ||
508 | |||
509 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | ||
510 | smp_processor_id(), hard_smp_processor_id()); | ||
511 | v = apic_read(APIC_ID); | ||
512 | printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id()); | ||
513 | v = apic_read(APIC_LVR); | ||
514 | printk(KERN_INFO "... APIC VERSION: %08x\n", v); | ||
515 | ver = GET_APIC_VERSION(v); | ||
516 | maxlvt = lapic_get_maxlvt(); | ||
517 | |||
518 | v = apic_read(APIC_TASKPRI); | ||
519 | printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", | ||
520 | v, v & APIC_TPRI_MASK); | ||
521 | |||
522 | /* !82489DX */ | ||
523 | if (APIC_INTEGRATED(ver)) { | ||
524 | if (!APIC_XAPIC(ver)) { | ||
525 | v = apic_read(APIC_ARBPRI); | ||
526 | printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v, | ||
527 | v & APIC_ARBPRI_MASK); | ||
528 | } | ||
529 | v = apic_read(APIC_PROCPRI); | ||
530 | printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v); | ||
531 | } | ||
532 | |||
533 | /* | ||
534 | * Remote read supported only in the 82489DX and local APIC for | ||
535 | * Pentium processors. | ||
536 | */ | ||
537 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { | ||
538 | v = apic_read(APIC_RRR); | ||
539 | printk(KERN_DEBUG "... APIC RRR: %08x\n", v); | ||
540 | } | ||
541 | |||
542 | v = apic_read(APIC_LDR); | ||
543 | printk(KERN_DEBUG "... APIC LDR: %08x\n", v); | ||
544 | if (!x2apic_enabled()) { | ||
545 | v = apic_read(APIC_DFR); | ||
546 | printk(KERN_DEBUG "... APIC DFR: %08x\n", v); | ||
547 | } | ||
548 | v = apic_read(APIC_SPIV); | ||
549 | printk(KERN_DEBUG "... APIC SPIV: %08x\n", v); | ||
550 | |||
551 | printk(KERN_DEBUG "... APIC ISR field:\n"); | ||
552 | print_APIC_field(APIC_ISR); | ||
553 | printk(KERN_DEBUG "... APIC TMR field:\n"); | ||
554 | print_APIC_field(APIC_TMR); | ||
555 | printk(KERN_DEBUG "... APIC IRR field:\n"); | ||
556 | print_APIC_field(APIC_IRR); | ||
557 | |||
558 | /* !82489DX */ | ||
559 | if (APIC_INTEGRATED(ver)) { | ||
560 | /* Due to the Pentium erratum 3AP. */ | ||
561 | if (maxlvt > 3) | ||
562 | apic_write(APIC_ESR, 0); | ||
563 | |||
564 | v = apic_read(APIC_ESR); | ||
565 | printk(KERN_DEBUG "... APIC ESR: %08x\n", v); | ||
566 | } | ||
567 | |||
568 | icr = apic_icr_read(); | ||
569 | printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr); | ||
570 | printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32)); | ||
571 | |||
572 | v = apic_read(APIC_LVTT); | ||
573 | printk(KERN_DEBUG "... APIC LVTT: %08x\n", v); | ||
574 | |||
575 | if (maxlvt > 3) { | ||
576 | /* PC is LVT#4. */ | ||
577 | v = apic_read(APIC_LVTPC); | ||
578 | printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v); | ||
579 | } | ||
580 | v = apic_read(APIC_LVT0); | ||
581 | printk(KERN_DEBUG "... APIC LVT0: %08x\n", v); | ||
582 | v = apic_read(APIC_LVT1); | ||
583 | printk(KERN_DEBUG "... APIC LVT1: %08x\n", v); | ||
584 | |||
585 | if (maxlvt > 2) { | ||
586 | /* ERR is LVT#3. */ | ||
587 | v = apic_read(APIC_LVTERR); | ||
588 | printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v); | ||
589 | } | ||
590 | |||
591 | v = apic_read(APIC_TMICT); | ||
592 | printk(KERN_DEBUG "... APIC TMICT: %08x\n", v); | ||
593 | v = apic_read(APIC_TMCCT); | ||
594 | printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v); | ||
595 | v = apic_read(APIC_TDCR); | ||
596 | printk(KERN_DEBUG "... APIC TDCR: %08x\n", v); | ||
597 | |||
598 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { | ||
599 | v = apic_read(APIC_EFEAT); | ||
600 | maxlvt = (v >> 16) & 0xff; | ||
601 | printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v); | ||
602 | v = apic_read(APIC_ECTRL); | ||
603 | printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v); | ||
604 | for (i = 0; i < maxlvt; i++) { | ||
605 | v = apic_read(APIC_EILVTn(i)); | ||
606 | printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v); | ||
607 | } | ||
608 | } | ||
609 | pr_cont("\n"); | ||
610 | } | ||
611 | |||
612 | static void __init print_local_APICs(int maxcpu) | ||
613 | { | ||
614 | int cpu; | ||
615 | |||
616 | if (!maxcpu) | ||
617 | return; | ||
618 | |||
619 | preempt_disable(); | ||
620 | for_each_online_cpu(cpu) { | ||
621 | if (cpu >= maxcpu) | ||
622 | break; | ||
623 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | ||
624 | } | ||
625 | preempt_enable(); | ||
626 | } | ||
627 | |||
628 | static void __init print_PIC(void) | ||
629 | { | ||
630 | unsigned int v; | ||
631 | unsigned long flags; | ||
632 | |||
633 | if (!nr_legacy_irqs()) | ||
634 | return; | ||
635 | |||
636 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | ||
637 | |||
638 | raw_spin_lock_irqsave(&i8259A_lock, flags); | ||
639 | |||
640 | v = inb(0xa1) << 8 | inb(0x21); | ||
641 | printk(KERN_DEBUG "... PIC IMR: %04x\n", v); | ||
642 | |||
643 | v = inb(0xa0) << 8 | inb(0x20); | ||
644 | printk(KERN_DEBUG "... PIC IRR: %04x\n", v); | ||
645 | |||
646 | outb(0x0b, 0xa0); | ||
647 | outb(0x0b, 0x20); | ||
648 | v = inb(0xa0) << 8 | inb(0x20); | ||
649 | outb(0x0a, 0xa0); | ||
650 | outb(0x0a, 0x20); | ||
651 | |||
652 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); | ||
653 | |||
654 | printk(KERN_DEBUG "... PIC ISR: %04x\n", v); | ||
655 | |||
656 | v = inb(0x4d1) << 8 | inb(0x4d0); | ||
657 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | ||
658 | } | ||
659 | |||
660 | static int show_lapic __initdata = 1; | ||
661 | static __init int setup_show_lapic(char *arg) | ||
662 | { | ||
663 | int num = -1; | ||
664 | |||
665 | if (strcmp(arg, "all") == 0) { | ||
666 | show_lapic = CONFIG_NR_CPUS; | ||
667 | } else { | ||
668 | get_option(&arg, &num); | ||
669 | if (num >= 0) | ||
670 | show_lapic = num; | ||
671 | } | ||
672 | |||
673 | return 1; | ||
674 | } | ||
675 | __setup("show_lapic=", setup_show_lapic); | ||
676 | |||
677 | static int __init print_ICs(void) | ||
678 | { | ||
679 | if (apic_verbosity == APIC_QUIET) | ||
680 | return 0; | ||
681 | |||
682 | print_PIC(); | ||
683 | |||
684 | /* don't print out if apic is not there */ | ||
685 | if (!cpu_has_apic && !apic_from_smp_config()) | ||
686 | return 0; | ||
687 | |||
688 | print_local_APICs(show_lapic); | ||
689 | print_IO_APICs(); | ||
690 | |||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | late_initcall(print_ICs); | ||