diff options
27 files changed, 808 insertions, 417 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 5820eb0cd7e7..fce5b5e516cc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
| @@ -344,6 +344,15 @@ and is between 256 and 4096 characters. It is defined in the file | |||
| 344 | Change the amount of debugging information output | 344 | Change the amount of debugging information output |
| 345 | when initialising the APIC and IO-APIC components. | 345 | when initialising the APIC and IO-APIC components. |
| 346 | 346 | ||
| 347 | show_lapic= [APIC,X86] Advanced Programmable Interrupt Controller | ||
| 348 | Limit apic dumping. The parameter defines the maximal | ||
| 349 | number of local apics being dumped. Also it is possible | ||
| 350 | to set it to "all" by meaning -- no limit here. | ||
| 351 | Format: { 1 (default) | 2 | ... | all }. | ||
| 352 | The parameter valid if only apic=debug or | ||
| 353 | apic=verbose is specified. | ||
| 354 | Example: apic=debug show_lapic=all | ||
| 355 | |||
| 347 | apm= [APM] Advanced Power Management | 356 | apm= [APM] Advanced Power Management |
| 348 | See header of arch/x86/kernel/apm_32.c. | 357 | See header of arch/x86/kernel/apm_32.c. |
| 349 | 358 | ||
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 474d80d3e6cc..b4ac2cdcb64f 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h | |||
| @@ -297,20 +297,20 @@ struct apic { | |||
| 297 | int disable_esr; | 297 | int disable_esr; |
| 298 | 298 | ||
| 299 | int dest_logical; | 299 | int dest_logical; |
| 300 | unsigned long (*check_apicid_used)(physid_mask_t bitmap, int apicid); | 300 | unsigned long (*check_apicid_used)(physid_mask_t *map, int apicid); |
| 301 | unsigned long (*check_apicid_present)(int apicid); | 301 | unsigned long (*check_apicid_present)(int apicid); |
| 302 | 302 | ||
| 303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); | 303 | void (*vector_allocation_domain)(int cpu, struct cpumask *retmask); |
| 304 | void (*init_apic_ldr)(void); | 304 | void (*init_apic_ldr)(void); |
| 305 | 305 | ||
| 306 | physid_mask_t (*ioapic_phys_id_map)(physid_mask_t map); | 306 | void (*ioapic_phys_id_map)(physid_mask_t *phys_map, physid_mask_t *retmap); |
| 307 | 307 | ||
| 308 | void (*setup_apic_routing)(void); | 308 | void (*setup_apic_routing)(void); |
| 309 | int (*multi_timer_check)(int apic, int irq); | 309 | int (*multi_timer_check)(int apic, int irq); |
| 310 | int (*apicid_to_node)(int logical_apicid); | 310 | int (*apicid_to_node)(int logical_apicid); |
| 311 | int (*cpu_to_logical_apicid)(int cpu); | 311 | int (*cpu_to_logical_apicid)(int cpu); |
| 312 | int (*cpu_present_to_apicid)(int mps_cpu); | 312 | int (*cpu_present_to_apicid)(int mps_cpu); |
| 313 | physid_mask_t (*apicid_to_cpu_present)(int phys_apicid); | 313 | void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); |
| 314 | void (*setup_portio_remap)(void); | 314 | void (*setup_portio_remap)(void); |
| 315 | int (*check_phys_apicid_present)(int phys_apicid); | 315 | int (*check_phys_apicid_present)(int phys_apicid); |
| 316 | void (*enable_apic_mode)(void); | 316 | void (*enable_apic_mode)(void); |
| @@ -488,6 +488,8 @@ static inline unsigned int read_apic_id(void) | |||
| 488 | 488 | ||
| 489 | extern void default_setup_apic_routing(void); | 489 | extern void default_setup_apic_routing(void); |
| 490 | 490 | ||
| 491 | extern struct apic apic_noop; | ||
| 492 | |||
| 491 | #ifdef CONFIG_X86_32 | 493 | #ifdef CONFIG_X86_32 |
| 492 | 494 | ||
| 493 | extern struct apic apic_default; | 495 | extern struct apic apic_default; |
| @@ -532,9 +534,9 @@ default_cpu_mask_to_apicid_and(const struct cpumask *cpumask, | |||
| 532 | return (unsigned int)(mask1 & mask2 & mask3); | 534 | return (unsigned int)(mask1 & mask2 & mask3); |
| 533 | } | 535 | } |
| 534 | 536 | ||
| 535 | static inline unsigned long default_check_apicid_used(physid_mask_t bitmap, int apicid) | 537 | static inline unsigned long default_check_apicid_used(physid_mask_t *map, int apicid) |
| 536 | { | 538 | { |
| 537 | return physid_isset(apicid, bitmap); | 539 | return physid_isset(apicid, *map); |
| 538 | } | 540 | } |
| 539 | 541 | ||
| 540 | static inline unsigned long default_check_apicid_present(int bit) | 542 | static inline unsigned long default_check_apicid_present(int bit) |
| @@ -542,9 +544,9 @@ static inline unsigned long default_check_apicid_present(int bit) | |||
| 542 | return physid_isset(bit, phys_cpu_present_map); | 544 | return physid_isset(bit, phys_cpu_present_map); |
| 543 | } | 545 | } |
| 544 | 546 | ||
| 545 | static inline physid_mask_t default_ioapic_phys_id_map(physid_mask_t phys_map) | 547 | static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
| 546 | { | 548 | { |
| 547 | return phys_map; | 549 | *retmap = *phys_map; |
| 548 | } | 550 | } |
| 549 | 551 | ||
| 550 | /* Mapping from cpu number to logical apicid */ | 552 | /* Mapping from cpu number to logical apicid */ |
| @@ -583,11 +585,6 @@ extern int default_cpu_present_to_apicid(int mps_cpu); | |||
| 583 | extern int default_check_phys_apicid_present(int phys_apicid); | 585 | extern int default_check_phys_apicid_present(int phys_apicid); |
| 584 | #endif | 586 | #endif |
| 585 | 587 | ||
| 586 | static inline physid_mask_t default_apicid_to_cpu_present(int phys_apicid) | ||
| 587 | { | ||
| 588 | return physid_mask_of_physid(phys_apicid); | ||
| 589 | } | ||
| 590 | |||
| 591 | #endif /* CONFIG_X86_LOCAL_APIC */ | 588 | #endif /* CONFIG_X86_LOCAL_APIC */ |
| 592 | 589 | ||
| 593 | #ifdef CONFIG_X86_32 | 590 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/include/asm/apicdef.h b/arch/x86/include/asm/apicdef.h index 3b62da926de9..7fe3b3060f08 100644 --- a/arch/x86/include/asm/apicdef.h +++ b/arch/x86/include/asm/apicdef.h | |||
| @@ -11,6 +11,12 @@ | |||
| 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 | 11 | #define IO_APIC_DEFAULT_PHYS_BASE 0xfec00000 |
| 12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 | 12 | #define APIC_DEFAULT_PHYS_BASE 0xfee00000 |
| 13 | 13 | ||
| 14 | /* | ||
| 15 | * This is the IO-APIC register space as specified | ||
| 16 | * by Intel docs: | ||
| 17 | */ | ||
| 18 | #define IO_APIC_SLOT_SIZE 1024 | ||
| 19 | |||
| 14 | #define APIC_ID 0x20 | 20 | #define APIC_ID 0x20 |
| 15 | 21 | ||
| 16 | #define APIC_LVR 0x30 | 22 | #define APIC_LVR 0x30 |
diff --git a/arch/x86/include/asm/apicnum.h b/arch/x86/include/asm/apicnum.h deleted file mode 100644 index 82f613c607ce..000000000000 --- a/arch/x86/include/asm/apicnum.h +++ /dev/null | |||
| @@ -1,12 +0,0 @@ | |||
| 1 | #ifndef _ASM_X86_APICNUM_H | ||
| 2 | #define _ASM_X86_APICNUM_H | ||
| 3 | |||
| 4 | /* define MAX_IO_APICS */ | ||
| 5 | #ifdef CONFIG_X86_32 | ||
| 6 | # define MAX_IO_APICS 64 | ||
| 7 | #else | ||
| 8 | # define MAX_IO_APICS 128 | ||
| 9 | # define MAX_LOCAL_APIC 32768 | ||
| 10 | #endif | ||
| 11 | |||
| 12 | #endif /* _ASM_X86_APICNUM_H */ | ||
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index ba180d93b08c..6e124269fd4b 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
| @@ -79,14 +79,32 @@ static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr, | |||
| 79 | int ioapic, int ioapic_pin, | 79 | int ioapic, int ioapic_pin, |
| 80 | int trigger, int polarity) | 80 | int trigger, int polarity) |
| 81 | { | 81 | { |
| 82 | irq_attr->ioapic = ioapic; | 82 | irq_attr->ioapic = ioapic; |
| 83 | irq_attr->ioapic_pin = ioapic_pin; | 83 | irq_attr->ioapic_pin = ioapic_pin; |
| 84 | irq_attr->trigger = trigger; | 84 | irq_attr->trigger = trigger; |
| 85 | irq_attr->polarity = polarity; | 85 | irq_attr->polarity = polarity; |
| 86 | } | 86 | } |
| 87 | 87 | ||
| 88 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, | 88 | /* |
| 89 | struct io_apic_irq_attr *irq_attr); | 89 | * This is performance-critical, we want to do it O(1) |
| 90 | * | ||
| 91 | * Most irqs are mapped 1:1 with pins. | ||
| 92 | */ | ||
| 93 | struct irq_cfg { | ||
| 94 | struct irq_pin_list *irq_2_pin; | ||
| 95 | cpumask_var_t domain; | ||
| 96 | cpumask_var_t old_domain; | ||
| 97 | u8 vector; | ||
| 98 | u8 move_in_progress : 1; | ||
| 99 | }; | ||
| 100 | |||
| 101 | extern struct irq_cfg *irq_cfg(unsigned int); | ||
| 102 | extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *); | ||
| 103 | extern void send_cleanup_vector(struct irq_cfg *); | ||
| 104 | |||
| 105 | struct irq_desc; | ||
| 106 | extern unsigned int set_desc_affinity(struct irq_desc *, const struct cpumask *); | ||
| 107 | extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr); | ||
| 90 | extern void setup_ioapic_dest(void); | 108 | extern void setup_ioapic_dest(void); |
| 91 | 109 | ||
| 92 | extern void enable_IO_APIC(void); | 110 | extern void enable_IO_APIC(void); |
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h index ddda6cbed6f4..ffd700ff5dcb 100644 --- a/arch/x86/include/asm/irq.h +++ b/arch/x86/include/asm/irq.h | |||
| @@ -34,6 +34,7 @@ static inline int irq_canonicalize(int irq) | |||
| 34 | #ifdef CONFIG_HOTPLUG_CPU | 34 | #ifdef CONFIG_HOTPLUG_CPU |
| 35 | #include <linux/cpumask.h> | 35 | #include <linux/cpumask.h> |
| 36 | extern void fixup_irqs(void); | 36 | extern void fixup_irqs(void); |
| 37 | extern void irq_force_complete_move(int); | ||
| 37 | #endif | 38 | #endif |
| 38 | 39 | ||
| 39 | extern void (*generic_interrupt_extension)(void); | 40 | extern void (*generic_interrupt_extension)(void); |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 79c94500c0bb..61d90b1331c3 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
| @@ -163,14 +163,16 @@ typedef struct physid_mask physid_mask_t; | |||
| 163 | #define physids_shift_left(d, s, n) \ | 163 | #define physids_shift_left(d, s, n) \ |
| 164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) | 164 | bitmap_shift_left((d).mask, (s).mask, n, MAX_APICS) |
| 165 | 165 | ||
| 166 | #define physids_coerce(map) ((map).mask[0]) | 166 | static inline unsigned long physids_coerce(physid_mask_t *map) |
| 167 | { | ||
| 168 | return map->mask[0]; | ||
| 169 | } | ||
| 167 | 170 | ||
| 168 | #define physids_promote(physids) \ | 171 | static inline void physids_promote(unsigned long physids, physid_mask_t *map) |
| 169 | ({ \ | 172 | { |
| 170 | physid_mask_t __physid_mask = PHYSID_MASK_NONE; \ | 173 | physids_clear(*map); |
| 171 | __physid_mask.mask[0] = physids; \ | 174 | map->mask[0] = physids; |
| 172 | __physid_mask; \ | 175 | } |
| 173 | }) | ||
| 174 | 176 | ||
| 175 | /* Note: will create very large stack frames if physid_mask_t is big */ | 177 | /* Note: will create very large stack frames if physid_mask_t is big */ |
| 176 | #define physid_mask_of_physid(physid) \ | 178 | #define physid_mask_of_physid(physid) \ |
diff --git a/arch/x86/include/asm/uv/uv_irq.h b/arch/x86/include/asm/uv/uv_irq.h index 9613c8c0b647..d6b17c760622 100644 --- a/arch/x86/include/asm/uv/uv_irq.h +++ b/arch/x86/include/asm/uv/uv_irq.h | |||
| @@ -25,12 +25,14 @@ struct uv_IO_APIC_route_entry { | |||
| 25 | dest : 32; | 25 | dest : 32; |
| 26 | }; | 26 | }; |
| 27 | 27 | ||
| 28 | extern struct irq_chip uv_irq_chip; | 28 | enum { |
| 29 | 29 | UV_AFFINITY_ALL, | |
| 30 | extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long); | 30 | UV_AFFINITY_NODE, |
| 31 | extern void arch_disable_uv_irq(int, unsigned long); | 31 | UV_AFFINITY_CPU |
| 32 | }; | ||
| 32 | 33 | ||
| 33 | extern int uv_setup_irq(char *, int, int, unsigned long); | 34 | extern int uv_irq_2_mmr_info(int, unsigned long *, int *); |
| 34 | extern void uv_teardown_irq(unsigned int, int, unsigned long); | 35 | extern int uv_setup_irq(char *, int, int, unsigned long, int); |
| 36 | extern void uv_teardown_irq(unsigned int); | ||
| 35 | 37 | ||
| 36 | #endif /* _ASM_X86_UV_UV_IRQ_H */ | 38 | #endif /* _ASM_X86_UV_UV_IRQ_H */ |
diff --git a/arch/x86/kernel/apic/Makefile b/arch/x86/kernel/apic/Makefile index da7b7b9f8bd8..565c1bfc507d 100644 --- a/arch/x86/kernel/apic/Makefile +++ b/arch/x86/kernel/apic/Makefile | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | # Makefile for local APIC drivers and for the IO-APIC code | 2 | # Makefile for local APIC drivers and for the IO-APIC code |
| 3 | # | 3 | # |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o probe_$(BITS).o ipi.o nmi.o | 5 | obj-$(CONFIG_X86_LOCAL_APIC) += apic.o apic_noop.o probe_$(BITS).o ipi.o nmi.o |
| 6 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o | 6 | obj-$(CONFIG_X86_IO_APIC) += io_apic.o |
| 7 | obj-$(CONFIG_SMP) += ipi.o | 7 | obj-$(CONFIG_SMP) += ipi.o |
| 8 | 8 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 894aa97f0717..ad8c75b9e453 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -241,28 +241,13 @@ static int modern_apic(void) | |||
| 241 | } | 241 | } |
| 242 | 242 | ||
| 243 | /* | 243 | /* |
| 244 | * bare function to substitute write operation | 244 | * right after this call apic become NOOP driven |
| 245 | * and it's _that_ fast :) | 245 | * so apic->write/read doesn't do anything |
| 246 | */ | ||
| 247 | static void native_apic_write_dummy(u32 reg, u32 v) | ||
| 248 | { | ||
| 249 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
| 250 | } | ||
| 251 | |||
| 252 | static u32 native_apic_read_dummy(u32 reg) | ||
| 253 | { | ||
| 254 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | ||
| 255 | return 0; | ||
| 256 | } | ||
| 257 | |||
| 258 | /* | ||
| 259 | * right after this call apic->write/read doesn't do anything | ||
| 260 | * note that there is no restore operation it works one way | ||
| 261 | */ | 246 | */ |
| 262 | void apic_disable(void) | 247 | void apic_disable(void) |
| 263 | { | 248 | { |
| 264 | apic->read = native_apic_read_dummy; | 249 | pr_info("APIC: switched to apic NOOP\n"); |
| 265 | apic->write = native_apic_write_dummy; | 250 | apic = &apic_noop; |
| 266 | } | 251 | } |
| 267 | 252 | ||
| 268 | void native_apic_wait_icr_idle(void) | 253 | void native_apic_wait_icr_idle(void) |
| @@ -459,7 +444,7 @@ static void lapic_timer_setup(enum clock_event_mode mode, | |||
| 459 | v = apic_read(APIC_LVTT); | 444 | v = apic_read(APIC_LVTT); |
| 460 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); | 445 | v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR); |
| 461 | apic_write(APIC_LVTT, v); | 446 | apic_write(APIC_LVTT, v); |
| 462 | apic_write(APIC_TMICT, 0xffffffff); | 447 | apic_write(APIC_TMICT, 0); |
| 463 | break; | 448 | break; |
| 464 | case CLOCK_EVT_MODE_RESUME: | 449 | case CLOCK_EVT_MODE_RESUME: |
| 465 | /* Nothing to do here */ | 450 | /* Nothing to do here */ |
| @@ -1392,14 +1377,11 @@ void __init enable_IR_x2apic(void) | |||
| 1392 | unsigned long flags; | 1377 | unsigned long flags; |
| 1393 | struct IO_APIC_route_entry **ioapic_entries = NULL; | 1378 | struct IO_APIC_route_entry **ioapic_entries = NULL; |
| 1394 | int ret, x2apic_enabled = 0; | 1379 | int ret, x2apic_enabled = 0; |
| 1395 | int dmar_table_init_ret = 0; | 1380 | int dmar_table_init_ret; |
| 1396 | 1381 | ||
| 1397 | #ifdef CONFIG_INTR_REMAP | ||
| 1398 | dmar_table_init_ret = dmar_table_init(); | 1382 | dmar_table_init_ret = dmar_table_init(); |
| 1399 | if (dmar_table_init_ret) | 1383 | if (dmar_table_init_ret && !x2apic_supported()) |
| 1400 | pr_debug("dmar_table_init() failed with %d:\n", | 1384 | return; |
| 1401 | dmar_table_init_ret); | ||
| 1402 | #endif | ||
| 1403 | 1385 | ||
| 1404 | ioapic_entries = alloc_ioapic_entries(); | 1386 | ioapic_entries = alloc_ioapic_entries(); |
| 1405 | if (!ioapic_entries) { | 1387 | if (!ioapic_entries) { |
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c new file mode 100644 index 000000000000..d9acc3bee0f4 --- /dev/null +++ b/arch/x86/kernel/apic/apic_noop.c | |||
| @@ -0,0 +1,200 @@ | |||
| 1 | /* | ||
| 2 | * NOOP APIC driver. | ||
| 3 | * | ||
| 4 | * Does almost nothing and should be substituted by a real apic driver via | ||
| 5 | * probe routine. | ||
| 6 | * | ||
| 7 | * Though in case if apic is disabled (for some reason) we try | ||
| 8 | * to not uglify the caller's code and allow to call (some) apic routines | ||
| 9 | * like self-ipi, etc... | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/threads.h> | ||
| 13 | #include <linux/cpumask.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/string.h> | ||
| 16 | #include <linux/kernel.h> | ||
| 17 | #include <linux/ctype.h> | ||
| 18 | #include <linux/init.h> | ||
| 19 | #include <linux/errno.h> | ||
| 20 | #include <asm/fixmap.h> | ||
| 21 | #include <asm/mpspec.h> | ||
| 22 | #include <asm/apicdef.h> | ||
| 23 | #include <asm/apic.h> | ||
| 24 | #include <asm/setup.h> | ||
| 25 | |||
| 26 | #include <linux/smp.h> | ||
| 27 | #include <asm/ipi.h> | ||
| 28 | |||
| 29 | #include <linux/interrupt.h> | ||
| 30 | #include <asm/acpi.h> | ||
| 31 | #include <asm/e820.h> | ||
| 32 | |||
| 33 | static void noop_init_apic_ldr(void) { } | ||
| 34 | static void noop_send_IPI_mask(const struct cpumask *cpumask, int vector) { } | ||
| 35 | static void noop_send_IPI_mask_allbutself(const struct cpumask *cpumask, int vector) { } | ||
| 36 | static void noop_send_IPI_allbutself(int vector) { } | ||
| 37 | static void noop_send_IPI_all(int vector) { } | ||
| 38 | static void noop_send_IPI_self(int vector) { } | ||
| 39 | static void noop_apic_wait_icr_idle(void) { } | ||
| 40 | static void noop_apic_icr_write(u32 low, u32 id) { } | ||
| 41 | |||
| 42 | static int noop_wakeup_secondary_cpu(int apicid, unsigned long start_eip) | ||
| 43 | { | ||
| 44 | return -1; | ||
| 45 | } | ||
| 46 | |||
| 47 | static u32 noop_safe_apic_wait_icr_idle(void) | ||
| 48 | { | ||
| 49 | return 0; | ||
| 50 | } | ||
| 51 | |||
| 52 | static u64 noop_apic_icr_read(void) | ||
| 53 | { | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | static int noop_cpu_to_logical_apicid(int cpu) | ||
| 58 | { | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | |||
| 62 | static int noop_phys_pkg_id(int cpuid_apic, int index_msb) | ||
| 63 | { | ||
| 64 | return 0; | ||
| 65 | } | ||
| 66 | |||
| 67 | static unsigned int noop_get_apic_id(unsigned long x) | ||
| 68 | { | ||
| 69 | return 0; | ||
| 70 | } | ||
| 71 | |||
| 72 | static int noop_probe(void) | ||
| 73 | { | ||
| 74 | /* | ||
| 75 | * NOOP apic should not ever be | ||
| 76 | * enabled via probe routine | ||
| 77 | */ | ||
| 78 | return 0; | ||
| 79 | } | ||
| 80 | |||
| 81 | static int noop_apic_id_registered(void) | ||
| 82 | { | ||
| 83 | /* | ||
| 84 | * if we would be really "pedantic" | ||
| 85 | * we should pass read_apic_id() here | ||
| 86 | * but since NOOP suppose APIC ID = 0 | ||
| 87 | * lets save a few cycles | ||
| 88 | */ | ||
| 89 | return physid_isset(0, phys_cpu_present_map); | ||
| 90 | } | ||
| 91 | |||
| 92 | static const struct cpumask *noop_target_cpus(void) | ||
| 93 | { | ||
| 94 | /* only BSP here */ | ||
| 95 | return cpumask_of(0); | ||
| 96 | } | ||
| 97 | |||
| 98 | static unsigned long noop_check_apicid_used(physid_mask_t *map, int apicid) | ||
| 99 | { | ||
| 100 | return physid_isset(apicid, *map); | ||
| 101 | } | ||
| 102 | |||
| 103 | static unsigned long noop_check_apicid_present(int bit) | ||
| 104 | { | ||
| 105 | return physid_isset(bit, phys_cpu_present_map); | ||
| 106 | } | ||
| 107 | |||
| 108 | static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask) | ||
| 109 | { | ||
| 110 | if (cpu != 0) | ||
| 111 | pr_warning("APIC: Vector allocated for non-BSP cpu\n"); | ||
| 112 | cpumask_clear(retmask); | ||
| 113 | cpumask_set_cpu(cpu, retmask); | ||
| 114 | } | ||
| 115 | |||
| 116 | int noop_apicid_to_node(int logical_apicid) | ||
| 117 | { | ||
| 118 | /* we're always on node 0 */ | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | static u32 noop_apic_read(u32 reg) | ||
| 123 | { | ||
| 124 | WARN_ON_ONCE((cpu_has_apic && !disable_apic)); | ||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | static void noop_apic_write(u32 reg, u32 v) | ||
| 129 | { | ||
| 130 | WARN_ON_ONCE((cpu_has_apic || !disable_apic)); | ||
| 131 | } | ||
| 132 | |||
| 133 | struct apic apic_noop = { | ||
| 134 | .name = "noop", | ||
| 135 | .probe = noop_probe, | ||
| 136 | .acpi_madt_oem_check = NULL, | ||
| 137 | |||
| 138 | .apic_id_registered = noop_apic_id_registered, | ||
| 139 | |||
| 140 | .irq_delivery_mode = dest_LowestPrio, | ||
| 141 | /* logical delivery broadcast to all CPUs: */ | ||
| 142 | .irq_dest_mode = 1, | ||
| 143 | |||
| 144 | .target_cpus = noop_target_cpus, | ||
| 145 | .disable_esr = 0, | ||
| 146 | .dest_logical = APIC_DEST_LOGICAL, | ||
| 147 | .check_apicid_used = noop_check_apicid_used, | ||
| 148 | .check_apicid_present = noop_check_apicid_present, | ||
| 149 | |||
| 150 | .vector_allocation_domain = noop_vector_allocation_domain, | ||
| 151 | .init_apic_ldr = noop_init_apic_ldr, | ||
| 152 | |||
| 153 | .ioapic_phys_id_map = default_ioapic_phys_id_map, | ||
| 154 | .setup_apic_routing = NULL, | ||
| 155 | .multi_timer_check = NULL, | ||
| 156 | .apicid_to_node = noop_apicid_to_node, | ||
| 157 | |||
| 158 | .cpu_to_logical_apicid = noop_cpu_to_logical_apicid, | ||
| 159 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | ||
| 160 | .apicid_to_cpu_present = physid_set_mask_of_physid, | ||
| 161 | |||
| 162 | .setup_portio_remap = NULL, | ||
| 163 | .check_phys_apicid_present = default_check_phys_apicid_present, | ||
| 164 | .enable_apic_mode = NULL, | ||
| 165 | |||
| 166 | .phys_pkg_id = noop_phys_pkg_id, | ||
| 167 | |||
| 168 | .mps_oem_check = NULL, | ||
| 169 | |||
| 170 | .get_apic_id = noop_get_apic_id, | ||
| 171 | .set_apic_id = NULL, | ||
| 172 | .apic_id_mask = 0x0F << 24, | ||
| 173 | |||
| 174 | .cpu_mask_to_apicid = default_cpu_mask_to_apicid, | ||
| 175 | .cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and, | ||
| 176 | |||
| 177 | .send_IPI_mask = noop_send_IPI_mask, | ||
| 178 | .send_IPI_mask_allbutself = noop_send_IPI_mask_allbutself, | ||
| 179 | .send_IPI_allbutself = noop_send_IPI_allbutself, | ||
| 180 | .send_IPI_all = noop_send_IPI_all, | ||
| 181 | .send_IPI_self = noop_send_IPI_self, | ||
| 182 | |||
| 183 | .wakeup_secondary_cpu = noop_wakeup_secondary_cpu, | ||
| 184 | |||
| 185 | /* should be safe */ | ||
| 186 | .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, | ||
| 187 | .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, | ||
| 188 | |||
| 189 | .wait_for_init_deassert = NULL, | ||
| 190 | |||
| 191 | .smp_callin_clear_local_apic = NULL, | ||
| 192 | .inquire_remote_apic = NULL, | ||
| 193 | |||
| 194 | .read = noop_apic_read, | ||
| 195 | .write = noop_apic_write, | ||
| 196 | .icr_read = noop_apic_icr_read, | ||
| 197 | .icr_write = noop_apic_icr_write, | ||
| 198 | .wait_icr_idle = noop_apic_wait_icr_idle, | ||
| 199 | .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, | ||
| 200 | }; | ||
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c index 77a06413b6b2..38dcecfa5818 100644 --- a/arch/x86/kernel/apic/bigsmp_32.c +++ b/arch/x86/kernel/apic/bigsmp_32.c | |||
| @@ -35,7 +35,7 @@ static const struct cpumask *bigsmp_target_cpus(void) | |||
| 35 | #endif | 35 | #endif |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid) | 38 | static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid) |
| 39 | { | 39 | { |
| 40 | return 0; | 40 | return 0; |
| 41 | } | 41 | } |
| @@ -93,11 +93,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu) | |||
| 93 | return BAD_APICID; | 93 | return BAD_APICID; |
| 94 | } | 94 | } |
| 95 | 95 | ||
| 96 | static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) | ||
| 97 | { | ||
| 98 | return physid_mask_of_physid(phys_apicid); | ||
| 99 | } | ||
| 100 | |||
| 101 | /* Mapping from cpu number to logical apicid */ | 96 | /* Mapping from cpu number to logical apicid */ |
| 102 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) | 97 | static inline int bigsmp_cpu_to_logical_apicid(int cpu) |
| 103 | { | 98 | { |
| @@ -106,10 +101,10 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu) | |||
| 106 | return cpu_physical_id(cpu); | 101 | return cpu_physical_id(cpu); |
| 107 | } | 102 | } |
| 108 | 103 | ||
| 109 | static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) | 104 | static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
| 110 | { | 105 | { |
| 111 | /* For clustered we don't have a good way to do this yet - hack */ | 106 | /* For clustered we don't have a good way to do this yet - hack */ |
| 112 | return physids_promote(0xFFL); | 107 | physids_promote(0xFFL, retmap); |
| 113 | } | 108 | } |
| 114 | 109 | ||
| 115 | static int bigsmp_check_phys_apicid_present(int phys_apicid) | 110 | static int bigsmp_check_phys_apicid_present(int phys_apicid) |
| @@ -230,7 +225,7 @@ struct apic apic_bigsmp = { | |||
| 230 | .apicid_to_node = bigsmp_apicid_to_node, | 225 | .apicid_to_node = bigsmp_apicid_to_node, |
| 231 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, | 226 | .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid, |
| 232 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, | 227 | .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, |
| 233 | .apicid_to_cpu_present = bigsmp_apicid_to_cpu_present, | 228 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
| 234 | .setup_portio_remap = NULL, | 229 | .setup_portio_remap = NULL, |
| 235 | .check_phys_apicid_present = bigsmp_check_phys_apicid_present, | 230 | .check_phys_apicid_present = bigsmp_check_phys_apicid_present, |
| 236 | .enable_apic_mode = NULL, | 231 | .enable_apic_mode = NULL, |
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 89174f847b49..e85f8fb7f8e7 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
| @@ -466,11 +466,11 @@ static const struct cpumask *es7000_target_cpus(void) | |||
| 466 | return cpumask_of(smp_processor_id()); | 466 | return cpumask_of(smp_processor_id()); |
| 467 | } | 467 | } |
| 468 | 468 | ||
| 469 | static unsigned long | 469 | static unsigned long es7000_check_apicid_used(physid_mask_t *map, int apicid) |
| 470 | es7000_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
| 471 | { | 470 | { |
| 472 | return 0; | 471 | return 0; |
| 473 | } | 472 | } |
| 473 | |||
| 474 | static unsigned long es7000_check_apicid_present(int bit) | 474 | static unsigned long es7000_check_apicid_present(int bit) |
| 475 | { | 475 | { |
| 476 | return physid_isset(bit, phys_cpu_present_map); | 476 | return physid_isset(bit, phys_cpu_present_map); |
| @@ -539,14 +539,10 @@ static int es7000_cpu_present_to_apicid(int mps_cpu) | |||
| 539 | 539 | ||
| 540 | static int cpu_id; | 540 | static int cpu_id; |
| 541 | 541 | ||
| 542 | static physid_mask_t es7000_apicid_to_cpu_present(int phys_apicid) | 542 | static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap) |
| 543 | { | 543 | { |
| 544 | physid_mask_t mask; | 544 | physid_set_mask_of_physid(cpu_id, retmap); |
| 545 | |||
| 546 | mask = physid_mask_of_physid(cpu_id); | ||
| 547 | ++cpu_id; | 545 | ++cpu_id; |
| 548 | |||
| 549 | return mask; | ||
| 550 | } | 546 | } |
| 551 | 547 | ||
| 552 | /* Mapping from cpu number to logical apicid */ | 548 | /* Mapping from cpu number to logical apicid */ |
| @@ -561,10 +557,10 @@ static int es7000_cpu_to_logical_apicid(int cpu) | |||
| 561 | #endif | 557 | #endif |
| 562 | } | 558 | } |
| 563 | 559 | ||
| 564 | static physid_mask_t es7000_ioapic_phys_id_map(physid_mask_t phys_map) | 560 | static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
| 565 | { | 561 | { |
| 566 | /* For clustered we don't have a good way to do this yet - hack */ | 562 | /* For clustered we don't have a good way to do this yet - hack */ |
| 567 | return physids_promote(0xff); | 563 | physids_promote(0xFFL, retmap); |
| 568 | } | 564 | } |
| 569 | 565 | ||
| 570 | static int es7000_check_phys_apicid_present(int cpu_physical_apicid) | 566 | static int es7000_check_phys_apicid_present(int cpu_physical_apicid) |
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index dc69f28489f5..c0b4468683f9 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
| @@ -60,8 +60,6 @@ | |||
| 60 | #include <asm/irq_remapping.h> | 60 | #include <asm/irq_remapping.h> |
| 61 | #include <asm/hpet.h> | 61 | #include <asm/hpet.h> |
| 62 | #include <asm/hw_irq.h> | 62 | #include <asm/hw_irq.h> |
| 63 | #include <asm/uv/uv_hub.h> | ||
| 64 | #include <asm/uv/uv_irq.h> | ||
| 65 | 63 | ||
| 66 | #include <asm/apic.h> | 64 | #include <asm/apic.h> |
| 67 | 65 | ||
| @@ -140,20 +138,6 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node) | |||
| 140 | return pin; | 138 | return pin; |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | /* | ||
| 144 | * This is performance-critical, we want to do it O(1) | ||
| 145 | * | ||
| 146 | * Most irqs are mapped 1:1 with pins. | ||
| 147 | */ | ||
| 148 | struct irq_cfg { | ||
| 149 | struct irq_pin_list *irq_2_pin; | ||
| 150 | cpumask_var_t domain; | ||
| 151 | cpumask_var_t old_domain; | ||
| 152 | unsigned move_cleanup_count; | ||
| 153 | u8 vector; | ||
| 154 | u8 move_in_progress : 1; | ||
| 155 | }; | ||
| 156 | |||
| 157 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ | 141 | /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ |
| 158 | #ifdef CONFIG_SPARSE_IRQ | 142 | #ifdef CONFIG_SPARSE_IRQ |
| 159 | static struct irq_cfg irq_cfgx[] = { | 143 | static struct irq_cfg irq_cfgx[] = { |
| @@ -209,7 +193,7 @@ int __init arch_early_irq_init(void) | |||
| 209 | } | 193 | } |
| 210 | 194 | ||
| 211 | #ifdef CONFIG_SPARSE_IRQ | 195 | #ifdef CONFIG_SPARSE_IRQ |
| 212 | static struct irq_cfg *irq_cfg(unsigned int irq) | 196 | struct irq_cfg *irq_cfg(unsigned int irq) |
| 213 | { | 197 | { |
| 214 | struct irq_cfg *cfg = NULL; | 198 | struct irq_cfg *cfg = NULL; |
| 215 | struct irq_desc *desc; | 199 | struct irq_desc *desc; |
| @@ -361,7 +345,7 @@ void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc) | |||
| 361 | /* end for move_irq_desc */ | 345 | /* end for move_irq_desc */ |
| 362 | 346 | ||
| 363 | #else | 347 | #else |
| 364 | static struct irq_cfg *irq_cfg(unsigned int irq) | 348 | struct irq_cfg *irq_cfg(unsigned int irq) |
| 365 | { | 349 | { |
| 366 | return irq < nr_irqs ? irq_cfgx + irq : NULL; | 350 | return irq < nr_irqs ? irq_cfgx + irq : NULL; |
| 367 | } | 351 | } |
| @@ -555,23 +539,41 @@ static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node, | |||
| 555 | add_pin_to_irq_node(cfg, node, newapic, newpin); | 539 | add_pin_to_irq_node(cfg, node, newapic, newpin); |
| 556 | } | 540 | } |
| 557 | 541 | ||
| 542 | static void __io_apic_modify_irq(struct irq_pin_list *entry, | ||
| 543 | int mask_and, int mask_or, | ||
| 544 | void (*final)(struct irq_pin_list *entry)) | ||
| 545 | { | ||
| 546 | unsigned int reg, pin; | ||
| 547 | |||
| 548 | pin = entry->pin; | ||
| 549 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | ||
| 550 | reg &= mask_and; | ||
| 551 | reg |= mask_or; | ||
| 552 | io_apic_modify(entry->apic, 0x10 + pin * 2, reg); | ||
| 553 | if (final) | ||
| 554 | final(entry); | ||
| 555 | } | ||
| 556 | |||
| 558 | static void io_apic_modify_irq(struct irq_cfg *cfg, | 557 | static void io_apic_modify_irq(struct irq_cfg *cfg, |
| 559 | int mask_and, int mask_or, | 558 | int mask_and, int mask_or, |
| 560 | void (*final)(struct irq_pin_list *entry)) | 559 | void (*final)(struct irq_pin_list *entry)) |
| 561 | { | 560 | { |
| 562 | int pin; | ||
| 563 | struct irq_pin_list *entry; | 561 | struct irq_pin_list *entry; |
| 564 | 562 | ||
| 565 | for_each_irq_pin(entry, cfg->irq_2_pin) { | 563 | for_each_irq_pin(entry, cfg->irq_2_pin) |
| 566 | unsigned int reg; | 564 | __io_apic_modify_irq(entry, mask_and, mask_or, final); |
| 567 | pin = entry->pin; | 565 | } |
| 568 | reg = io_apic_read(entry->apic, 0x10 + pin * 2); | 566 | |
| 569 | reg &= mask_and; | 567 | static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry) |
| 570 | reg |= mask_or; | 568 | { |
| 571 | io_apic_modify(entry->apic, 0x10 + pin * 2, reg); | 569 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER, |
| 572 | if (final) | 570 | IO_APIC_REDIR_MASKED, NULL); |
| 573 | final(entry); | 571 | } |
| 574 | } | 572 | |
| 573 | static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry) | ||
| 574 | { | ||
| 575 | __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED, | ||
| 576 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
| 575 | } | 577 | } |
| 576 | 578 | ||
| 577 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) | 579 | static void __unmask_IO_APIC_irq(struct irq_cfg *cfg) |
| @@ -595,18 +597,6 @@ static void __mask_IO_APIC_irq(struct irq_cfg *cfg) | |||
| 595 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); | 597 | io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync); |
| 596 | } | 598 | } |
| 597 | 599 | ||
| 598 | static void __mask_and_edge_IO_APIC_irq(struct irq_cfg *cfg) | ||
| 599 | { | ||
| 600 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_LEVEL_TRIGGER, | ||
| 601 | IO_APIC_REDIR_MASKED, NULL); | ||
| 602 | } | ||
| 603 | |||
| 604 | static void __unmask_and_level_IO_APIC_irq(struct irq_cfg *cfg) | ||
| 605 | { | ||
| 606 | io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, | ||
| 607 | IO_APIC_REDIR_LEVEL_TRIGGER, NULL); | ||
| 608 | } | ||
| 609 | |||
| 610 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) | 600 | static void mask_IO_APIC_irq_desc(struct irq_desc *desc) |
| 611 | { | 601 | { |
| 612 | struct irq_cfg *cfg = desc->chip_data; | 602 | struct irq_cfg *cfg = desc->chip_data; |
| @@ -1177,7 +1167,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | |||
| 1177 | int cpu, err; | 1167 | int cpu, err; |
| 1178 | cpumask_var_t tmp_mask; | 1168 | cpumask_var_t tmp_mask; |
| 1179 | 1169 | ||
| 1180 | if ((cfg->move_in_progress) || cfg->move_cleanup_count) | 1170 | if (cfg->move_in_progress) |
| 1181 | return -EBUSY; | 1171 | return -EBUSY; |
| 1182 | 1172 | ||
| 1183 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | 1173 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) |
| @@ -1237,8 +1227,7 @@ next: | |||
| 1237 | return err; | 1227 | return err; |
| 1238 | } | 1228 | } |
| 1239 | 1229 | ||
| 1240 | static int | 1230 | int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) |
| 1241 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) | ||
| 1242 | { | 1231 | { |
| 1243 | int err; | 1232 | int err; |
| 1244 | unsigned long flags; | 1233 | unsigned long flags; |
| @@ -1599,9 +1588,6 @@ __apicdebuginit(void) print_IO_APIC(void) | |||
| 1599 | struct irq_desc *desc; | 1588 | struct irq_desc *desc; |
| 1600 | unsigned int irq; | 1589 | unsigned int irq; |
| 1601 | 1590 | ||
| 1602 | if (apic_verbosity == APIC_QUIET) | ||
| 1603 | return; | ||
| 1604 | |||
| 1605 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); | 1591 | printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries); |
| 1606 | for (i = 0; i < nr_ioapics; i++) | 1592 | for (i = 0; i < nr_ioapics; i++) |
| 1607 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", | 1593 | printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n", |
| @@ -1708,9 +1694,6 @@ __apicdebuginit(void) print_APIC_field(int base) | |||
| 1708 | { | 1694 | { |
| 1709 | int i; | 1695 | int i; |
| 1710 | 1696 | ||
| 1711 | if (apic_verbosity == APIC_QUIET) | ||
| 1712 | return; | ||
| 1713 | |||
| 1714 | printk(KERN_DEBUG); | 1697 | printk(KERN_DEBUG); |
| 1715 | 1698 | ||
| 1716 | for (i = 0; i < 8; i++) | 1699 | for (i = 0; i < 8; i++) |
| @@ -1724,9 +1707,6 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
| 1724 | unsigned int i, v, ver, maxlvt; | 1707 | unsigned int i, v, ver, maxlvt; |
| 1725 | u64 icr; | 1708 | u64 icr; |
| 1726 | 1709 | ||
| 1727 | if (apic_verbosity == APIC_QUIET) | ||
| 1728 | return; | ||
| 1729 | |||
| 1730 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", | 1710 | printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n", |
| 1731 | smp_processor_id(), hard_smp_processor_id()); | 1711 | smp_processor_id(), hard_smp_processor_id()); |
| 1732 | v = apic_read(APIC_ID); | 1712 | v = apic_read(APIC_ID); |
| @@ -1824,13 +1804,19 @@ __apicdebuginit(void) print_local_APIC(void *dummy) | |||
| 1824 | printk("\n"); | 1804 | printk("\n"); |
| 1825 | } | 1805 | } |
| 1826 | 1806 | ||
| 1827 | __apicdebuginit(void) print_all_local_APICs(void) | 1807 | __apicdebuginit(void) print_local_APICs(int maxcpu) |
| 1828 | { | 1808 | { |
| 1829 | int cpu; | 1809 | int cpu; |
| 1830 | 1810 | ||
| 1811 | if (!maxcpu) | ||
| 1812 | return; | ||
| 1813 | |||
| 1831 | preempt_disable(); | 1814 | preempt_disable(); |
| 1832 | for_each_online_cpu(cpu) | 1815 | for_each_online_cpu(cpu) { |
| 1816 | if (cpu >= maxcpu) | ||
| 1817 | break; | ||
| 1833 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); | 1818 | smp_call_function_single(cpu, print_local_APIC, NULL, 1); |
| 1819 | } | ||
| 1834 | preempt_enable(); | 1820 | preempt_enable(); |
| 1835 | } | 1821 | } |
| 1836 | 1822 | ||
| @@ -1839,7 +1825,7 @@ __apicdebuginit(void) print_PIC(void) | |||
| 1839 | unsigned int v; | 1825 | unsigned int v; |
| 1840 | unsigned long flags; | 1826 | unsigned long flags; |
| 1841 | 1827 | ||
| 1842 | if (apic_verbosity == APIC_QUIET || !nr_legacy_irqs) | 1828 | if (!nr_legacy_irqs) |
| 1843 | return; | 1829 | return; |
| 1844 | 1830 | ||
| 1845 | printk(KERN_DEBUG "\nprinting PIC contents\n"); | 1831 | printk(KERN_DEBUG "\nprinting PIC contents\n"); |
| @@ -1866,21 +1852,41 @@ __apicdebuginit(void) print_PIC(void) | |||
| 1866 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); | 1852 | printk(KERN_DEBUG "... PIC ELCR: %04x\n", v); |
| 1867 | } | 1853 | } |
| 1868 | 1854 | ||
| 1869 | __apicdebuginit(int) print_all_ICs(void) | 1855 | static int __initdata show_lapic = 1; |
| 1856 | static __init int setup_show_lapic(char *arg) | ||
| 1870 | { | 1857 | { |
| 1858 | int num = -1; | ||
| 1859 | |||
| 1860 | if (strcmp(arg, "all") == 0) { | ||
| 1861 | show_lapic = CONFIG_NR_CPUS; | ||
| 1862 | } else { | ||
| 1863 | get_option(&arg, &num); | ||
| 1864 | if (num >= 0) | ||
| 1865 | show_lapic = num; | ||
| 1866 | } | ||
| 1867 | |||
| 1868 | return 1; | ||
| 1869 | } | ||
| 1870 | __setup("show_lapic=", setup_show_lapic); | ||
| 1871 | |||
| 1872 | __apicdebuginit(int) print_ICs(void) | ||
| 1873 | { | ||
| 1874 | if (apic_verbosity == APIC_QUIET) | ||
| 1875 | return 0; | ||
| 1876 | |||
| 1871 | print_PIC(); | 1877 | print_PIC(); |
| 1872 | 1878 | ||
| 1873 | /* don't print out if apic is not there */ | 1879 | /* don't print out if apic is not there */ |
| 1874 | if (!cpu_has_apic && !apic_from_smp_config()) | 1880 | if (!cpu_has_apic && !apic_from_smp_config()) |
| 1875 | return 0; | 1881 | return 0; |
| 1876 | 1882 | ||
| 1877 | print_all_local_APICs(); | 1883 | print_local_APICs(show_lapic); |
| 1878 | print_IO_APIC(); | 1884 | print_IO_APIC(); |
| 1879 | 1885 | ||
| 1880 | return 0; | 1886 | return 0; |
| 1881 | } | 1887 | } |
| 1882 | 1888 | ||
| 1883 | fs_initcall(print_all_ICs); | 1889 | fs_initcall(print_ICs); |
| 1884 | 1890 | ||
| 1885 | 1891 | ||
| 1886 | /* Where if anywhere is the i8259 connect in external int mode */ | 1892 | /* Where if anywhere is the i8259 connect in external int mode */ |
| @@ -2031,7 +2037,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
| 2031 | * This is broken; anything with a real cpu count has to | 2037 | * This is broken; anything with a real cpu count has to |
| 2032 | * circumvent this idiocy regardless. | 2038 | * circumvent this idiocy regardless. |
| 2033 | */ | 2039 | */ |
| 2034 | phys_id_present_map = apic->ioapic_phys_id_map(phys_cpu_present_map); | 2040 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map); |
| 2035 | 2041 | ||
| 2036 | /* | 2042 | /* |
| 2037 | * Set the IOAPIC ID to the value stored in the MPC table. | 2043 | * Set the IOAPIC ID to the value stored in the MPC table. |
| @@ -2058,7 +2064,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
| 2058 | * system must have a unique ID or we get lots of nice | 2064 | * system must have a unique ID or we get lots of nice |
| 2059 | * 'stuck on smp_invalidate_needed IPI wait' messages. | 2065 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
| 2060 | */ | 2066 | */ |
| 2061 | if (apic->check_apicid_used(phys_id_present_map, | 2067 | if (apic->check_apicid_used(&phys_id_present_map, |
| 2062 | mp_ioapics[apic_id].apicid)) { | 2068 | mp_ioapics[apic_id].apicid)) { |
| 2063 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", | 2069 | printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n", |
| 2064 | apic_id, mp_ioapics[apic_id].apicid); | 2070 | apic_id, mp_ioapics[apic_id].apicid); |
| @@ -2073,7 +2079,7 @@ void __init setup_ioapic_ids_from_mpc(void) | |||
| 2073 | mp_ioapics[apic_id].apicid = i; | 2079 | mp_ioapics[apic_id].apicid = i; |
| 2074 | } else { | 2080 | } else { |
| 2075 | physid_mask_t tmp; | 2081 | physid_mask_t tmp; |
| 2076 | tmp = apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid); | 2082 | apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp); |
| 2077 | apic_printk(APIC_VERBOSE, "Setting %d in the " | 2083 | apic_printk(APIC_VERBOSE, "Setting %d in the " |
| 2078 | "phys_id_present_map\n", | 2084 | "phys_id_present_map\n", |
| 2079 | mp_ioapics[apic_id].apicid); | 2085 | mp_ioapics[apic_id].apicid); |
| @@ -2228,20 +2234,16 @@ static int ioapic_retrigger_irq(unsigned int irq) | |||
| 2228 | */ | 2234 | */ |
| 2229 | 2235 | ||
| 2230 | #ifdef CONFIG_SMP | 2236 | #ifdef CONFIG_SMP |
| 2231 | static void send_cleanup_vector(struct irq_cfg *cfg) | 2237 | void send_cleanup_vector(struct irq_cfg *cfg) |
| 2232 | { | 2238 | { |
| 2233 | cpumask_var_t cleanup_mask; | 2239 | cpumask_var_t cleanup_mask; |
| 2234 | 2240 | ||
| 2235 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { | 2241 | if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { |
| 2236 | unsigned int i; | 2242 | unsigned int i; |
| 2237 | cfg->move_cleanup_count = 0; | ||
| 2238 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | ||
| 2239 | cfg->move_cleanup_count++; | ||
| 2240 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) | 2243 | for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) |
| 2241 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); | 2244 | apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR); |
| 2242 | } else { | 2245 | } else { |
| 2243 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); | 2246 | cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); |
| 2244 | cfg->move_cleanup_count = cpumask_weight(cleanup_mask); | ||
| 2245 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); | 2247 | apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); |
| 2246 | free_cpumask_var(cleanup_mask); | 2248 | free_cpumask_var(cleanup_mask); |
| 2247 | } | 2249 | } |
| @@ -2272,15 +2274,12 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq | |||
| 2272 | } | 2274 | } |
| 2273 | } | 2275 | } |
| 2274 | 2276 | ||
| 2275 | static int | ||
| 2276 | assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask); | ||
| 2277 | |||
| 2278 | /* | 2277 | /* |
| 2279 | * Either sets desc->affinity to a valid value, and returns | 2278 | * Either sets desc->affinity to a valid value, and returns |
| 2280 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and | 2279 | * ->cpu_mask_to_apicid of that, or returns BAD_APICID and |
| 2281 | * leaves desc->affinity untouched. | 2280 | * leaves desc->affinity untouched. |
| 2282 | */ | 2281 | */ |
| 2283 | static unsigned int | 2282 | unsigned int |
| 2284 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) | 2283 | set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask) |
| 2285 | { | 2284 | { |
| 2286 | struct irq_cfg *cfg; | 2285 | struct irq_cfg *cfg; |
| @@ -2433,8 +2432,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
| 2433 | 2432 | ||
| 2434 | cfg = irq_cfg(irq); | 2433 | cfg = irq_cfg(irq); |
| 2435 | spin_lock(&desc->lock); | 2434 | spin_lock(&desc->lock); |
| 2436 | if (!cfg->move_cleanup_count) | ||
| 2437 | goto unlock; | ||
| 2438 | 2435 | ||
| 2439 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2436 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
| 2440 | goto unlock; | 2437 | goto unlock; |
| @@ -2452,7 +2449,6 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) | |||
| 2452 | goto unlock; | 2449 | goto unlock; |
| 2453 | } | 2450 | } |
| 2454 | __get_cpu_var(vector_irq)[vector] = -1; | 2451 | __get_cpu_var(vector_irq)[vector] = -1; |
| 2455 | cfg->move_cleanup_count--; | ||
| 2456 | unlock: | 2452 | unlock: |
| 2457 | spin_unlock(&desc->lock); | 2453 | spin_unlock(&desc->lock); |
| 2458 | } | 2454 | } |
| @@ -2460,21 +2456,33 @@ unlock: | |||
| 2460 | irq_exit(); | 2456 | irq_exit(); |
| 2461 | } | 2457 | } |
| 2462 | 2458 | ||
| 2463 | static void irq_complete_move(struct irq_desc **descp) | 2459 | static void __irq_complete_move(struct irq_desc **descp, unsigned vector) |
| 2464 | { | 2460 | { |
| 2465 | struct irq_desc *desc = *descp; | 2461 | struct irq_desc *desc = *descp; |
| 2466 | struct irq_cfg *cfg = desc->chip_data; | 2462 | struct irq_cfg *cfg = desc->chip_data; |
| 2467 | unsigned vector, me; | 2463 | unsigned me; |
| 2468 | 2464 | ||
| 2469 | if (likely(!cfg->move_in_progress)) | 2465 | if (likely(!cfg->move_in_progress)) |
| 2470 | return; | 2466 | return; |
| 2471 | 2467 | ||
| 2472 | vector = ~get_irq_regs()->orig_ax; | ||
| 2473 | me = smp_processor_id(); | 2468 | me = smp_processor_id(); |
| 2474 | 2469 | ||
| 2475 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) | 2470 | if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) |
| 2476 | send_cleanup_vector(cfg); | 2471 | send_cleanup_vector(cfg); |
| 2477 | } | 2472 | } |
| 2473 | |||
| 2474 | static void irq_complete_move(struct irq_desc **descp) | ||
| 2475 | { | ||
| 2476 | __irq_complete_move(descp, ~get_irq_regs()->orig_ax); | ||
| 2477 | } | ||
| 2478 | |||
| 2479 | void irq_force_complete_move(int irq) | ||
| 2480 | { | ||
| 2481 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 2482 | struct irq_cfg *cfg = desc->chip_data; | ||
| 2483 | |||
| 2484 | __irq_complete_move(&desc, cfg->vector); | ||
| 2485 | } | ||
| 2478 | #else | 2486 | #else |
| 2479 | static inline void irq_complete_move(struct irq_desc **descp) {} | 2487 | static inline void irq_complete_move(struct irq_desc **descp) {} |
| 2480 | #endif | 2488 | #endif |
| @@ -2490,6 +2498,59 @@ static void ack_apic_edge(unsigned int irq) | |||
| 2490 | 2498 | ||
| 2491 | atomic_t irq_mis_count; | 2499 | atomic_t irq_mis_count; |
| 2492 | 2500 | ||
| 2501 | /* | ||
| 2502 | * IO-APIC versions below 0x20 don't support EOI register. | ||
| 2503 | * For the record, here is the information about various versions: | ||
| 2504 | * 0Xh 82489DX | ||
| 2505 | * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant | ||
| 2506 | * 2Xh I/O(x)APIC which is PCI 2.2 Compliant | ||
| 2507 | * 30h-FFh Reserved | ||
| 2508 | * | ||
| 2509 | * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic | ||
| 2510 | * version as 0x2. This is an error with documentation and these ICH chips | ||
| 2511 | * use io-apic's of version 0x20. | ||
| 2512 | * | ||
| 2513 | * For IO-APIC's with EOI register, we use that to do an explicit EOI. | ||
| 2514 | * Otherwise, we simulate the EOI message manually by changing the trigger | ||
| 2515 | * mode to edge and then back to level, with RTE being masked during this. | ||
| 2516 | */ | ||
| 2517 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
| 2518 | { | ||
| 2519 | struct irq_pin_list *entry; | ||
| 2520 | |||
| 2521 | for_each_irq_pin(entry, cfg->irq_2_pin) { | ||
| 2522 | if (mp_ioapics[entry->apic].apicver >= 0x20) { | ||
| 2523 | /* | ||
| 2524 | * Intr-remapping uses pin number as the virtual vector | ||
| 2525 | * in the RTE. Actual vector is programmed in | ||
| 2526 | * intr-remapping table entry. Hence for the io-apic | ||
| 2527 | * EOI we use the pin number. | ||
| 2528 | */ | ||
| 2529 | if (irq_remapped(irq)) | ||
| 2530 | io_apic_eoi(entry->apic, entry->pin); | ||
| 2531 | else | ||
| 2532 | io_apic_eoi(entry->apic, cfg->vector); | ||
| 2533 | } else { | ||
| 2534 | __mask_and_edge_IO_APIC_irq(entry); | ||
| 2535 | __unmask_and_level_IO_APIC_irq(entry); | ||
| 2536 | } | ||
| 2537 | } | ||
| 2538 | } | ||
| 2539 | |||
| 2540 | static void eoi_ioapic_irq(struct irq_desc *desc) | ||
| 2541 | { | ||
| 2542 | struct irq_cfg *cfg; | ||
| 2543 | unsigned long flags; | ||
| 2544 | unsigned int irq; | ||
| 2545 | |||
| 2546 | irq = desc->irq; | ||
| 2547 | cfg = desc->chip_data; | ||
| 2548 | |||
| 2549 | spin_lock_irqsave(&ioapic_lock, flags); | ||
| 2550 | __eoi_ioapic_irq(irq, cfg); | ||
| 2551 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2552 | } | ||
| 2553 | |||
| 2493 | static void ack_apic_level(unsigned int irq) | 2554 | static void ack_apic_level(unsigned int irq) |
| 2494 | { | 2555 | { |
| 2495 | struct irq_desc *desc = irq_to_desc(irq); | 2556 | struct irq_desc *desc = irq_to_desc(irq); |
| @@ -2525,6 +2586,19 @@ static void ack_apic_level(unsigned int irq) | |||
| 2525 | * level-triggered interrupt. We mask the source for the time of the | 2586 | * level-triggered interrupt. We mask the source for the time of the |
| 2526 | * operation to prevent an edge-triggered interrupt escaping meanwhile. | 2587 | * operation to prevent an edge-triggered interrupt escaping meanwhile. |
| 2527 | * The idea is from Manfred Spraul. --macro | 2588 | * The idea is from Manfred Spraul. --macro |
| 2589 | * | ||
| 2590 | * Also in the case when cpu goes offline, fixup_irqs() will forward | ||
| 2591 | * any unhandled interrupt on the offlined cpu to the new cpu | ||
| 2592 | * destination that is handling the corresponding interrupt. This | ||
| 2593 | * interrupt forwarding is done via IPI's. Hence, in this case also | ||
| 2594 | * level-triggered io-apic interrupt will be seen as an edge | ||
| 2595 | * interrupt in the IRR. And we can't rely on the cpu's EOI | ||
| 2596 | * to be broadcasted to the IO-APIC's which will clear the remoteIRR | ||
| 2597 | * corresponding to the level-triggered interrupt. Hence on IO-APIC's | ||
| 2598 | * supporting EOI register, we do an explicit EOI to clear the | ||
| 2599 | * remote IRR and on IO-APIC's which don't have an EOI register, | ||
| 2600 | * we use the above logic (mask+edge followed by unmask+level) from | ||
| 2601 | * Manfred Spraul to clear the remote IRR. | ||
| 2528 | */ | 2602 | */ |
| 2529 | cfg = desc->chip_data; | 2603 | cfg = desc->chip_data; |
| 2530 | i = cfg->vector; | 2604 | i = cfg->vector; |
| @@ -2536,6 +2610,19 @@ static void ack_apic_level(unsigned int irq) | |||
| 2536 | */ | 2610 | */ |
| 2537 | ack_APIC_irq(); | 2611 | ack_APIC_irq(); |
| 2538 | 2612 | ||
| 2613 | /* | ||
| 2614 | * Tail end of clearing remote IRR bit (either by delivering the EOI | ||
| 2615 | * message via io-apic EOI register write or simulating it using | ||
| 2616 | * mask+edge followed by unnask+level logic) manually when the | ||
| 2617 | * level triggered interrupt is seen as the edge triggered interrupt | ||
| 2618 | * at the cpu. | ||
| 2619 | */ | ||
| 2620 | if (!(v & (1 << (i & 0x1f)))) { | ||
| 2621 | atomic_inc(&irq_mis_count); | ||
| 2622 | |||
| 2623 | eoi_ioapic_irq(desc); | ||
| 2624 | } | ||
| 2625 | |||
| 2539 | /* Now we can move and renable the irq */ | 2626 | /* Now we can move and renable the irq */ |
| 2540 | if (unlikely(do_unmask_irq)) { | 2627 | if (unlikely(do_unmask_irq)) { |
| 2541 | /* Only migrate the irq if the ack has been received. | 2628 | /* Only migrate the irq if the ack has been received. |
| @@ -2569,41 +2656,9 @@ static void ack_apic_level(unsigned int irq) | |||
| 2569 | move_masked_irq(irq); | 2656 | move_masked_irq(irq); |
| 2570 | unmask_IO_APIC_irq_desc(desc); | 2657 | unmask_IO_APIC_irq_desc(desc); |
| 2571 | } | 2658 | } |
| 2572 | |||
| 2573 | /* Tail end of version 0x11 I/O APIC bug workaround */ | ||
| 2574 | if (!(v & (1 << (i & 0x1f)))) { | ||
| 2575 | atomic_inc(&irq_mis_count); | ||
| 2576 | spin_lock(&ioapic_lock); | ||
| 2577 | __mask_and_edge_IO_APIC_irq(cfg); | ||
| 2578 | __unmask_and_level_IO_APIC_irq(cfg); | ||
| 2579 | spin_unlock(&ioapic_lock); | ||
| 2580 | } | ||
| 2581 | } | 2659 | } |
| 2582 | 2660 | ||
| 2583 | #ifdef CONFIG_INTR_REMAP | 2661 | #ifdef CONFIG_INTR_REMAP |
| 2584 | static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg) | ||
| 2585 | { | ||
| 2586 | struct irq_pin_list *entry; | ||
| 2587 | |||
| 2588 | for_each_irq_pin(entry, cfg->irq_2_pin) | ||
| 2589 | io_apic_eoi(entry->apic, entry->pin); | ||
| 2590 | } | ||
| 2591 | |||
| 2592 | static void | ||
| 2593 | eoi_ioapic_irq(struct irq_desc *desc) | ||
| 2594 | { | ||
| 2595 | struct irq_cfg *cfg; | ||
| 2596 | unsigned long flags; | ||
| 2597 | unsigned int irq; | ||
| 2598 | |||
| 2599 | irq = desc->irq; | ||
| 2600 | cfg = desc->chip_data; | ||
| 2601 | |||
| 2602 | spin_lock_irqsave(&ioapic_lock, flags); | ||
| 2603 | __eoi_ioapic_irq(irq, cfg); | ||
| 2604 | spin_unlock_irqrestore(&ioapic_lock, flags); | ||
| 2605 | } | ||
| 2606 | |||
| 2607 | static void ir_ack_apic_edge(unsigned int irq) | 2662 | static void ir_ack_apic_edge(unsigned int irq) |
| 2608 | { | 2663 | { |
| 2609 | ack_APIC_irq(); | 2664 | ack_APIC_irq(); |
| @@ -3157,6 +3212,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node) | |||
| 3157 | continue; | 3212 | continue; |
| 3158 | 3213 | ||
| 3159 | desc_new = move_irq_desc(desc_new, node); | 3214 | desc_new = move_irq_desc(desc_new, node); |
| 3215 | cfg_new = desc_new->chip_data; | ||
| 3160 | 3216 | ||
| 3161 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) | 3217 | if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0) |
| 3162 | irq = new; | 3218 | irq = new; |
| @@ -3708,75 +3764,6 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | |||
| 3708 | } | 3764 | } |
| 3709 | #endif /* CONFIG_HT_IRQ */ | 3765 | #endif /* CONFIG_HT_IRQ */ |
| 3710 | 3766 | ||
| 3711 | #ifdef CONFIG_X86_UV | ||
| 3712 | /* | ||
| 3713 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
| 3714 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
| 3715 | */ | ||
| 3716 | int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
| 3717 | unsigned long mmr_offset) | ||
| 3718 | { | ||
| 3719 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
| 3720 | struct irq_cfg *cfg; | ||
| 3721 | int mmr_pnode; | ||
| 3722 | unsigned long mmr_value; | ||
| 3723 | struct uv_IO_APIC_route_entry *entry; | ||
| 3724 | unsigned long flags; | ||
| 3725 | int err; | ||
| 3726 | |||
| 3727 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
| 3728 | |||
| 3729 | cfg = irq_cfg(irq); | ||
| 3730 | |||
| 3731 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
| 3732 | if (err != 0) | ||
| 3733 | return err; | ||
| 3734 | |||
| 3735 | spin_lock_irqsave(&vector_lock, flags); | ||
| 3736 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
| 3737 | irq_name); | ||
| 3738 | spin_unlock_irqrestore(&vector_lock, flags); | ||
| 3739 | |||
| 3740 | mmr_value = 0; | ||
| 3741 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
| 3742 | entry->vector = cfg->vector; | ||
| 3743 | entry->delivery_mode = apic->irq_delivery_mode; | ||
| 3744 | entry->dest_mode = apic->irq_dest_mode; | ||
| 3745 | entry->polarity = 0; | ||
| 3746 | entry->trigger = 0; | ||
| 3747 | entry->mask = 0; | ||
| 3748 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
| 3749 | |||
| 3750 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
| 3751 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
| 3752 | |||
| 3753 | if (cfg->move_in_progress) | ||
| 3754 | send_cleanup_vector(cfg); | ||
| 3755 | |||
| 3756 | return irq; | ||
| 3757 | } | ||
| 3758 | |||
| 3759 | /* | ||
| 3760 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
| 3761 | * longer allowed to be sent. | ||
| 3762 | */ | ||
| 3763 | void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset) | ||
| 3764 | { | ||
| 3765 | unsigned long mmr_value; | ||
| 3766 | struct uv_IO_APIC_route_entry *entry; | ||
| 3767 | int mmr_pnode; | ||
| 3768 | |||
| 3769 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long)); | ||
| 3770 | |||
| 3771 | mmr_value = 0; | ||
| 3772 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
| 3773 | entry->mask = 1; | ||
| 3774 | |||
| 3775 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
| 3776 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
| 3777 | } | ||
| 3778 | #endif /* CONFIG_X86_64 */ | ||
| 3779 | |||
| 3780 | int __init io_apic_get_redir_entries (int ioapic) | 3767 | int __init io_apic_get_redir_entries (int ioapic) |
| 3781 | { | 3768 | { |
| 3782 | union IO_APIC_reg_01 reg_01; | 3769 | union IO_APIC_reg_01 reg_01; |
| @@ -3944,7 +3931,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
| 3944 | */ | 3931 | */ |
| 3945 | 3932 | ||
| 3946 | if (physids_empty(apic_id_map)) | 3933 | if (physids_empty(apic_id_map)) |
| 3947 | apic_id_map = apic->ioapic_phys_id_map(phys_cpu_present_map); | 3934 | apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); |
| 3948 | 3935 | ||
| 3949 | spin_lock_irqsave(&ioapic_lock, flags); | 3936 | spin_lock_irqsave(&ioapic_lock, flags); |
| 3950 | reg_00.raw = io_apic_read(ioapic, 0); | 3937 | reg_00.raw = io_apic_read(ioapic, 0); |
| @@ -3960,10 +3947,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
| 3960 | * Every APIC in a system must have a unique ID or we get lots of nice | 3947 | * Every APIC in a system must have a unique ID or we get lots of nice |
| 3961 | * 'stuck on smp_invalidate_needed IPI wait' messages. | 3948 | * 'stuck on smp_invalidate_needed IPI wait' messages. |
| 3962 | */ | 3949 | */ |
| 3963 | if (apic->check_apicid_used(apic_id_map, apic_id)) { | 3950 | if (apic->check_apicid_used(&apic_id_map, apic_id)) { |
| 3964 | 3951 | ||
| 3965 | for (i = 0; i < get_physical_broadcast(); i++) { | 3952 | for (i = 0; i < get_physical_broadcast(); i++) { |
| 3966 | if (!apic->check_apicid_used(apic_id_map, i)) | 3953 | if (!apic->check_apicid_used(&apic_id_map, i)) |
| 3967 | break; | 3954 | break; |
| 3968 | } | 3955 | } |
| 3969 | 3956 | ||
| @@ -3976,7 +3963,7 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id) | |||
| 3976 | apic_id = i; | 3963 | apic_id = i; |
| 3977 | } | 3964 | } |
| 3978 | 3965 | ||
| 3979 | tmp = apic->apicid_to_cpu_present(apic_id); | 3966 | apic->apicid_to_cpu_present(apic_id, &tmp); |
| 3980 | physids_or(apic_id_map, apic_id_map, tmp); | 3967 | physids_or(apic_id_map, apic_id_map, tmp); |
| 3981 | 3968 | ||
| 3982 | if (reg_00.bits.ID != apic_id) { | 3969 | if (reg_00.bits.ID != apic_id) { |
| @@ -4106,7 +4093,7 @@ static struct resource * __init ioapic_setup_resources(int nr_ioapics) | |||
| 4106 | for (i = 0; i < nr_ioapics; i++) { | 4093 | for (i = 0; i < nr_ioapics; i++) { |
| 4107 | res[i].name = mem; | 4094 | res[i].name = mem; |
| 4108 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; | 4095 | res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY; |
| 4109 | sprintf(mem, "IOAPIC %u", i); | 4096 | snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i); |
| 4110 | mem += IOAPIC_RESOURCE_NAME_SIZE; | 4097 | mem += IOAPIC_RESOURCE_NAME_SIZE; |
| 4111 | } | 4098 | } |
| 4112 | 4099 | ||
| @@ -4140,18 +4127,17 @@ void __init ioapic_init_mappings(void) | |||
| 4140 | #ifdef CONFIG_X86_32 | 4127 | #ifdef CONFIG_X86_32 |
| 4141 | fake_ioapic_page: | 4128 | fake_ioapic_page: |
| 4142 | #endif | 4129 | #endif |
| 4143 | ioapic_phys = (unsigned long) | 4130 | ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE); |
| 4144 | alloc_bootmem_pages(PAGE_SIZE); | ||
| 4145 | ioapic_phys = __pa(ioapic_phys); | 4131 | ioapic_phys = __pa(ioapic_phys); |
| 4146 | } | 4132 | } |
| 4147 | set_fixmap_nocache(idx, ioapic_phys); | 4133 | set_fixmap_nocache(idx, ioapic_phys); |
| 4148 | apic_printk(APIC_VERBOSE, | 4134 | apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n", |
| 4149 | "mapped IOAPIC to %08lx (%08lx)\n", | 4135 | __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK), |
| 4150 | __fix_to_virt(idx), ioapic_phys); | 4136 | ioapic_phys); |
| 4151 | idx++; | 4137 | idx++; |
| 4152 | 4138 | ||
| 4153 | ioapic_res->start = ioapic_phys; | 4139 | ioapic_res->start = ioapic_phys; |
| 4154 | ioapic_res->end = ioapic_phys + (4 * 1024) - 1; | 4140 | ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1; |
| 4155 | ioapic_res++; | 4141 | ioapic_res++; |
| 4156 | } | 4142 | } |
| 4157 | } | 4143 | } |
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index efa00e2b8505..07cdbdcd7a92 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
| @@ -334,10 +334,9 @@ static inline const struct cpumask *numaq_target_cpus(void) | |||
| 334 | return cpu_all_mask; | 334 | return cpu_all_mask; |
| 335 | } | 335 | } |
| 336 | 336 | ||
| 337 | static inline unsigned long | 337 | static unsigned long numaq_check_apicid_used(physid_mask_t *map, int apicid) |
| 338 | numaq_check_apicid_used(physid_mask_t bitmap, int apicid) | ||
| 339 | { | 338 | { |
| 340 | return physid_isset(apicid, bitmap); | 339 | return physid_isset(apicid, *map); |
| 341 | } | 340 | } |
| 342 | 341 | ||
| 343 | static inline unsigned long numaq_check_apicid_present(int bit) | 342 | static inline unsigned long numaq_check_apicid_present(int bit) |
| @@ -371,10 +370,10 @@ static inline int numaq_multi_timer_check(int apic, int irq) | |||
| 371 | return apic != 0 && irq == 0; | 370 | return apic != 0 && irq == 0; |
| 372 | } | 371 | } |
| 373 | 372 | ||
| 374 | static inline physid_mask_t numaq_ioapic_phys_id_map(physid_mask_t phys_map) | 373 | static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) |
| 375 | { | 374 | { |
| 376 | /* We don't have a good way to do this yet - hack */ | 375 | /* We don't have a good way to do this yet - hack */ |
| 377 | return physids_promote(0xFUL); | 376 | return physids_promote(0xFUL, retmap); |
| 378 | } | 377 | } |
| 379 | 378 | ||
| 380 | static inline int numaq_cpu_to_logical_apicid(int cpu) | 379 | static inline int numaq_cpu_to_logical_apicid(int cpu) |
| @@ -402,12 +401,12 @@ static inline int numaq_apicid_to_node(int logical_apicid) | |||
| 402 | return logical_apicid >> 4; | 401 | return logical_apicid >> 4; |
| 403 | } | 402 | } |
| 404 | 403 | ||
| 405 | static inline physid_mask_t numaq_apicid_to_cpu_present(int logical_apicid) | 404 | static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) |
| 406 | { | 405 | { |
| 407 | int node = numaq_apicid_to_node(logical_apicid); | 406 | int node = numaq_apicid_to_node(logical_apicid); |
| 408 | int cpu = __ffs(logical_apicid & 0xf); | 407 | int cpu = __ffs(logical_apicid & 0xf); |
| 409 | 408 | ||
| 410 | return physid_mask_of_physid(cpu + 4*node); | 409 | physid_set_mask_of_physid(cpu + 4*node, retmap); |
| 411 | } | 410 | } |
| 412 | 411 | ||
| 413 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ | 412 | /* Where the IO area was mapped on multiquad, always 0 otherwise */ |
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c index 0c0182cc947d..1a6559f6768c 100644 --- a/arch/x86/kernel/apic/probe_32.c +++ b/arch/x86/kernel/apic/probe_32.c | |||
| @@ -108,7 +108,7 @@ struct apic apic_default = { | |||
| 108 | .apicid_to_node = default_apicid_to_node, | 108 | .apicid_to_node = default_apicid_to_node, |
| 109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, | 109 | .cpu_to_logical_apicid = default_cpu_to_logical_apicid, |
| 110 | .cpu_present_to_apicid = default_cpu_present_to_apicid, | 110 | .cpu_present_to_apicid = default_cpu_present_to_apicid, |
| 111 | .apicid_to_cpu_present = default_apicid_to_cpu_present, | 111 | .apicid_to_cpu_present = physid_set_mask_of_physid, |
| 112 | .setup_portio_remap = NULL, | 112 | .setup_portio_remap = NULL, |
| 113 | .check_phys_apicid_present = default_check_phys_apicid_present, | 113 | .check_phys_apicid_present = default_check_phys_apicid_present, |
| 114 | .enable_apic_mode = NULL, | 114 | .enable_apic_mode = NULL, |
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c index 645ecc4ff0be..9b419263d90d 100644 --- a/arch/x86/kernel/apic/summit_32.c +++ b/arch/x86/kernel/apic/summit_32.c | |||
| @@ -183,7 +183,7 @@ static const struct cpumask *summit_target_cpus(void) | |||
| 183 | return cpumask_of(0); | 183 | return cpumask_of(0); |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid) | 186 | static unsigned long summit_check_apicid_used(physid_mask_t *map, int apicid) |
| 187 | { | 187 | { |
| 188 | return 0; | 188 | return 0; |
| 189 | } | 189 | } |
| @@ -261,15 +261,15 @@ static int summit_cpu_present_to_apicid(int mps_cpu) | |||
| 261 | return BAD_APICID; | 261 | return BAD_APICID; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map) | 264 | static void summit_ioapic_phys_id_map(physid_mask_t *phys_id_map, physid_mask_t *retmap) |
| 265 | { | 265 | { |
| 266 | /* For clustered we don't have a good way to do this yet - hack */ | 266 | /* For clustered we don't have a good way to do this yet - hack */ |
| 267 | return physids_promote(0x0F); | 267 | physids_promote(0x0FL, retmap); |
| 268 | } | 268 | } |
| 269 | 269 | ||
| 270 | static physid_mask_t summit_apicid_to_cpu_present(int apicid) | 270 | static void summit_apicid_to_cpu_present(int apicid, physid_mask_t *retmap) |
| 271 | { | 271 | { |
| 272 | return physid_mask_of_physid(0); | 272 | physid_set_mask_of_physid(0, retmap); |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | static int summit_check_phys_apicid_present(int physical_apicid) | 275 | static int summit_check_phys_apicid_present(int physical_apicid) |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 326c25477d3d..130c4b934877 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
| @@ -409,6 +409,12 @@ static __init void map_mmioh_high(int max_pnode) | |||
| 409 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); | 409 | map_high("MMIOH", mmioh.s.base, shift, max_pnode, map_uc); |
| 410 | } | 410 | } |
| 411 | 411 | ||
| 412 | static __init void map_low_mmrs(void) | ||
| 413 | { | ||
| 414 | init_extra_mapping_uc(UV_GLOBAL_MMR32_BASE, UV_GLOBAL_MMR32_SIZE); | ||
| 415 | init_extra_mapping_uc(UV_LOCAL_MMR_BASE, UV_LOCAL_MMR_SIZE); | ||
| 416 | } | ||
| 417 | |||
| 412 | static __init void uv_rtc_init(void) | 418 | static __init void uv_rtc_init(void) |
| 413 | { | 419 | { |
| 414 | long status; | 420 | long status; |
| @@ -550,6 +556,8 @@ void __init uv_system_init(void) | |||
| 550 | unsigned long mmr_base, present, paddr; | 556 | unsigned long mmr_base, present, paddr; |
| 551 | unsigned short pnode_mask; | 557 | unsigned short pnode_mask; |
| 552 | 558 | ||
| 559 | map_low_mmrs(); | ||
| 560 | |||
| 553 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); | 561 | m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); |
| 554 | m_val = m_n_config.s.m_skt; | 562 | m_val = m_n_config.s.m_skt; |
| 555 | n_val = m_n_config.s.n_skt; | 563 | n_val = m_n_config.s.n_skt; |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index fab786f60ed6..898df9719afb 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -712,7 +712,7 @@ static void probe_nmi_watchdog(void) | |||
| 712 | switch (boot_cpu_data.x86_vendor) { | 712 | switch (boot_cpu_data.x86_vendor) { |
| 713 | case X86_VENDOR_AMD: | 713 | case X86_VENDOR_AMD: |
| 714 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && | 714 | if (boot_cpu_data.x86 != 6 && boot_cpu_data.x86 != 15 && |
| 715 | boot_cpu_data.x86 != 16) | 715 | boot_cpu_data.x86 != 16 && boot_cpu_data.x86 != 17) |
| 716 | return; | 716 | return; |
| 717 | wd_ops = &k7_wd_ops; | 717 | wd_ops = &k7_wd_ops; |
| 718 | break; | 718 | break; |
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 19212cb01558..fee6cc2b2079 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c | |||
| @@ -274,3 +274,93 @@ void smp_generic_interrupt(struct pt_regs *regs) | |||
| 274 | } | 274 | } |
| 275 | 275 | ||
| 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); | 276 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
| 277 | |||
| 278 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 279 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
| 280 | void fixup_irqs(void) | ||
| 281 | { | ||
| 282 | unsigned int irq, vector; | ||
| 283 | static int warned; | ||
| 284 | struct irq_desc *desc; | ||
| 285 | |||
| 286 | for_each_irq_desc(irq, desc) { | ||
| 287 | int break_affinity = 0; | ||
| 288 | int set_affinity = 1; | ||
| 289 | const struct cpumask *affinity; | ||
| 290 | |||
| 291 | if (!desc) | ||
| 292 | continue; | ||
| 293 | if (irq == 2) | ||
| 294 | continue; | ||
| 295 | |||
| 296 | /* interrupt's are disabled at this point */ | ||
| 297 | spin_lock(&desc->lock); | ||
| 298 | |||
| 299 | affinity = desc->affinity; | ||
| 300 | if (!irq_has_action(irq) || | ||
| 301 | cpumask_equal(affinity, cpu_online_mask)) { | ||
| 302 | spin_unlock(&desc->lock); | ||
| 303 | continue; | ||
| 304 | } | ||
| 305 | |||
| 306 | /* | ||
| 307 | * Complete the irq move. This cpu is going down and for | ||
| 308 | * non intr-remapping case, we can't wait till this interrupt | ||
| 309 | * arrives at this cpu before completing the irq move. | ||
| 310 | */ | ||
| 311 | irq_force_complete_move(irq); | ||
| 312 | |||
| 313 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
| 314 | break_affinity = 1; | ||
| 315 | affinity = cpu_all_mask; | ||
| 316 | } | ||
| 317 | |||
| 318 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->mask) | ||
| 319 | desc->chip->mask(irq); | ||
| 320 | |||
| 321 | if (desc->chip->set_affinity) | ||
| 322 | desc->chip->set_affinity(irq, affinity); | ||
| 323 | else if (!(warned++)) | ||
| 324 | set_affinity = 0; | ||
| 325 | |||
| 326 | if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) | ||
| 327 | desc->chip->unmask(irq); | ||
| 328 | |||
| 329 | spin_unlock(&desc->lock); | ||
| 330 | |||
| 331 | if (break_affinity && set_affinity) | ||
| 332 | printk("Broke affinity for irq %i\n", irq); | ||
| 333 | else if (!set_affinity) | ||
| 334 | printk("Cannot set affinity for irq %i\n", irq); | ||
| 335 | } | ||
| 336 | |||
| 337 | /* | ||
| 338 | * We can remove mdelay() and then send spuriuous interrupts to | ||
| 339 | * new cpu targets for all the irqs that were handled previously by | ||
| 340 | * this cpu. While it works, I have seen spurious interrupt messages | ||
| 341 | * (nothing wrong but still...). | ||
| 342 | * | ||
| 343 | * So for now, retain mdelay(1) and check the IRR and then send those | ||
| 344 | * interrupts to new targets as this cpu is already offlined... | ||
| 345 | */ | ||
| 346 | mdelay(1); | ||
| 347 | |||
| 348 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { | ||
| 349 | unsigned int irr; | ||
| 350 | |||
| 351 | if (__get_cpu_var(vector_irq)[vector] < 0) | ||
| 352 | continue; | ||
| 353 | |||
| 354 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); | ||
| 355 | if (irr & (1 << (vector % 32))) { | ||
| 356 | irq = __get_cpu_var(vector_irq)[vector]; | ||
| 357 | |||
| 358 | desc = irq_to_desc(irq); | ||
| 359 | spin_lock(&desc->lock); | ||
| 360 | if (desc->chip->retrigger) | ||
| 361 | desc->chip->retrigger(irq); | ||
| 362 | spin_unlock(&desc->lock); | ||
| 363 | } | ||
| 364 | } | ||
| 365 | } | ||
| 366 | #endif | ||
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 7d35d0fe2329..10709f29d166 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
| @@ -211,48 +211,3 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
| 211 | 211 | ||
| 212 | return true; | 212 | return true; |
| 213 | } | 213 | } |
| 214 | |||
| 215 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 216 | |||
| 217 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
| 218 | void fixup_irqs(void) | ||
| 219 | { | ||
| 220 | unsigned int irq; | ||
| 221 | struct irq_desc *desc; | ||
| 222 | |||
| 223 | for_each_irq_desc(irq, desc) { | ||
| 224 | const struct cpumask *affinity; | ||
| 225 | |||
| 226 | if (!desc) | ||
| 227 | continue; | ||
| 228 | if (irq == 2) | ||
| 229 | continue; | ||
| 230 | |||
| 231 | affinity = desc->affinity; | ||
| 232 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
| 233 | printk("Breaking affinity for irq %i\n", irq); | ||
| 234 | affinity = cpu_all_mask; | ||
| 235 | } | ||
| 236 | if (desc->chip->set_affinity) | ||
| 237 | desc->chip->set_affinity(irq, affinity); | ||
| 238 | else if (desc->action) | ||
| 239 | printk_once("Cannot set affinity for irq %i\n", irq); | ||
| 240 | } | ||
| 241 | |||
| 242 | #if 0 | ||
| 243 | barrier(); | ||
| 244 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | ||
| 245 | [note the nop - the interrupt-enable boundary on x86 is two | ||
| 246 | instructions from sti] - to flush out pending hardirqs and | ||
| 247 | IPIs. After this point nothing is supposed to reach this CPU." */ | ||
| 248 | __asm__ __volatile__("sti; nop; cli"); | ||
| 249 | barrier(); | ||
| 250 | #else | ||
| 251 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
| 252 | local_irq_enable(); | ||
| 253 | mdelay(1); | ||
| 254 | local_irq_disable(); | ||
| 255 | #endif | ||
| 256 | } | ||
| 257 | #endif | ||
| 258 | |||
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 977d8b43a0dd..acf8fbf8fbda 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c | |||
| @@ -62,64 +62,6 @@ bool handle_irq(unsigned irq, struct pt_regs *regs) | |||
| 62 | return true; | 62 | return true; |
| 63 | } | 63 | } |
| 64 | 64 | ||
| 65 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 66 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ | ||
| 67 | void fixup_irqs(void) | ||
| 68 | { | ||
| 69 | unsigned int irq; | ||
| 70 | static int warned; | ||
| 71 | struct irq_desc *desc; | ||
| 72 | |||
| 73 | for_each_irq_desc(irq, desc) { | ||
| 74 | int break_affinity = 0; | ||
| 75 | int set_affinity = 1; | ||
| 76 | const struct cpumask *affinity; | ||
| 77 | |||
| 78 | if (!desc) | ||
| 79 | continue; | ||
| 80 | if (irq == 2) | ||
| 81 | continue; | ||
| 82 | |||
| 83 | /* interrupt's are disabled at this point */ | ||
| 84 | spin_lock(&desc->lock); | ||
| 85 | |||
| 86 | affinity = desc->affinity; | ||
| 87 | if (!irq_has_action(irq) || | ||
| 88 | cpumask_equal(affinity, cpu_online_mask)) { | ||
| 89 | spin_unlock(&desc->lock); | ||
| 90 | continue; | ||
| 91 | } | ||
| 92 | |||
| 93 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
| 94 | break_affinity = 1; | ||
| 95 | affinity = cpu_all_mask; | ||
| 96 | } | ||
| 97 | |||
| 98 | if (desc->chip->mask) | ||
| 99 | desc->chip->mask(irq); | ||
| 100 | |||
| 101 | if (desc->chip->set_affinity) | ||
| 102 | desc->chip->set_affinity(irq, affinity); | ||
| 103 | else if (!(warned++)) | ||
| 104 | set_affinity = 0; | ||
| 105 | |||
| 106 | if (desc->chip->unmask) | ||
| 107 | desc->chip->unmask(irq); | ||
| 108 | |||
| 109 | spin_unlock(&desc->lock); | ||
| 110 | |||
| 111 | if (break_affinity && set_affinity) | ||
| 112 | printk("Broke affinity for irq %i\n", irq); | ||
| 113 | else if (!set_affinity) | ||
| 114 | printk("Cannot set affinity for irq %i\n", irq); | ||
| 115 | } | ||
| 116 | |||
| 117 | /* That doesn't seem sufficient. Give it 1ms. */ | ||
| 118 | local_irq_enable(); | ||
| 119 | mdelay(1); | ||
| 120 | local_irq_disable(); | ||
| 121 | } | ||
| 122 | #endif | ||
| 123 | 65 | ||
| 124 | extern void call_softirq(void); | 66 | extern void call_softirq(void); |
| 125 | 67 | ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 565ebc65920e..324f2a44c221 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
| @@ -1250,16 +1250,7 @@ static void __ref remove_cpu_from_maps(int cpu) | |||
| 1250 | void cpu_disable_common(void) | 1250 | void cpu_disable_common(void) |
| 1251 | { | 1251 | { |
| 1252 | int cpu = smp_processor_id(); | 1252 | int cpu = smp_processor_id(); |
| 1253 | /* | ||
| 1254 | * HACK: | ||
| 1255 | * Allow any queued timer interrupts to get serviced | ||
| 1256 | * This is only a temporary solution until we cleanup | ||
| 1257 | * fixup_irqs as we do for IA64. | ||
| 1258 | */ | ||
| 1259 | local_irq_enable(); | ||
| 1260 | mdelay(1); | ||
| 1261 | 1253 | ||
| 1262 | local_irq_disable(); | ||
| 1263 | remove_siblinginfo(cpu); | 1254 | remove_siblinginfo(cpu); |
| 1264 | 1255 | ||
| 1265 | /* It's now safe to remove this processor from the online map */ | 1256 | /* It's now safe to remove this processor from the online map */ |
diff --git a/arch/x86/kernel/uv_irq.c b/arch/x86/kernel/uv_irq.c index aeef529917e4..61d805df4c91 100644 --- a/arch/x86/kernel/uv_irq.c +++ b/arch/x86/kernel/uv_irq.c | |||
| @@ -9,10 +9,25 @@ | |||
| 9 | */ | 9 | */ |
| 10 | 10 | ||
| 11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
| 12 | #include <linux/rbtree.h> | ||
| 12 | #include <linux/irq.h> | 13 | #include <linux/irq.h> |
| 13 | 14 | ||
| 14 | #include <asm/apic.h> | 15 | #include <asm/apic.h> |
| 15 | #include <asm/uv/uv_irq.h> | 16 | #include <asm/uv/uv_irq.h> |
| 17 | #include <asm/uv/uv_hub.h> | ||
| 18 | |||
| 19 | /* MMR offset and pnode of hub sourcing interrupts for a given irq */ | ||
| 20 | struct uv_irq_2_mmr_pnode{ | ||
| 21 | struct rb_node list; | ||
| 22 | unsigned long offset; | ||
| 23 | int pnode; | ||
| 24 | int irq; | ||
| 25 | }; | ||
| 26 | |||
| 27 | static spinlock_t uv_irq_lock; | ||
| 28 | static struct rb_root uv_irq_root; | ||
| 29 | |||
| 30 | static int uv_set_irq_affinity(unsigned int, const struct cpumask *); | ||
| 16 | 31 | ||
| 17 | static void uv_noop(unsigned int irq) | 32 | static void uv_noop(unsigned int irq) |
| 18 | { | 33 | { |
| @@ -39,25 +54,214 @@ struct irq_chip uv_irq_chip = { | |||
| 39 | .unmask = uv_noop, | 54 | .unmask = uv_noop, |
| 40 | .eoi = uv_ack_apic, | 55 | .eoi = uv_ack_apic, |
| 41 | .end = uv_noop, | 56 | .end = uv_noop, |
| 57 | .set_affinity = uv_set_irq_affinity, | ||
| 42 | }; | 58 | }; |
| 43 | 59 | ||
| 44 | /* | 60 | /* |
| 61 | * Add offset and pnode information of the hub sourcing interrupts to the | ||
| 62 | * rb tree for a specific irq. | ||
| 63 | */ | ||
| 64 | static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade) | ||
| 65 | { | ||
| 66 | struct rb_node **link = &uv_irq_root.rb_node; | ||
| 67 | struct rb_node *parent = NULL; | ||
| 68 | struct uv_irq_2_mmr_pnode *n; | ||
| 69 | struct uv_irq_2_mmr_pnode *e; | ||
| 70 | unsigned long irqflags; | ||
| 71 | |||
| 72 | n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL, | ||
| 73 | uv_blade_to_memory_nid(blade)); | ||
| 74 | if (!n) | ||
| 75 | return -ENOMEM; | ||
| 76 | |||
| 77 | n->irq = irq; | ||
| 78 | n->offset = offset; | ||
| 79 | n->pnode = uv_blade_to_pnode(blade); | ||
| 80 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
| 81 | /* Find the right place in the rbtree: */ | ||
| 82 | while (*link) { | ||
| 83 | parent = *link; | ||
| 84 | e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list); | ||
| 85 | |||
| 86 | if (unlikely(irq == e->irq)) { | ||
| 87 | /* irq entry exists */ | ||
| 88 | e->pnode = uv_blade_to_pnode(blade); | ||
| 89 | e->offset = offset; | ||
| 90 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
| 91 | kfree(n); | ||
| 92 | return 0; | ||
| 93 | } | ||
| 94 | |||
| 95 | if (irq < e->irq) | ||
| 96 | link = &(*link)->rb_left; | ||
| 97 | else | ||
| 98 | link = &(*link)->rb_right; | ||
| 99 | } | ||
| 100 | |||
| 101 | /* Insert the node into the rbtree. */ | ||
| 102 | rb_link_node(&n->list, parent, link); | ||
| 103 | rb_insert_color(&n->list, &uv_irq_root); | ||
| 104 | |||
| 105 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
| 106 | return 0; | ||
| 107 | } | ||
| 108 | |||
| 109 | /* Retrieve offset and pnode information from the rb tree for a specific irq */ | ||
| 110 | int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode) | ||
| 111 | { | ||
| 112 | struct uv_irq_2_mmr_pnode *e; | ||
| 113 | struct rb_node *n; | ||
| 114 | unsigned long irqflags; | ||
| 115 | |||
| 116 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
| 117 | n = uv_irq_root.rb_node; | ||
| 118 | while (n) { | ||
| 119 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
| 120 | |||
| 121 | if (e->irq == irq) { | ||
| 122 | *offset = e->offset; | ||
| 123 | *pnode = e->pnode; | ||
| 124 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
| 125 | return 0; | ||
| 126 | } | ||
| 127 | |||
| 128 | if (irq < e->irq) | ||
| 129 | n = n->rb_left; | ||
| 130 | else | ||
| 131 | n = n->rb_right; | ||
| 132 | } | ||
| 133 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
| 134 | return -1; | ||
| 135 | } | ||
| 136 | |||
| 137 | /* | ||
| 138 | * Re-target the irq to the specified CPU and enable the specified MMR located | ||
| 139 | * on the specified blade to allow the sending of MSIs to the specified CPU. | ||
| 140 | */ | ||
| 141 | static int | ||
| 142 | arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade, | ||
| 143 | unsigned long mmr_offset, int restrict) | ||
| 144 | { | ||
| 145 | const struct cpumask *eligible_cpu = cpumask_of(cpu); | ||
| 146 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 147 | struct irq_cfg *cfg; | ||
| 148 | int mmr_pnode; | ||
| 149 | unsigned long mmr_value; | ||
| 150 | struct uv_IO_APIC_route_entry *entry; | ||
| 151 | int err; | ||
| 152 | |||
| 153 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
| 154 | sizeof(unsigned long)); | ||
| 155 | |||
| 156 | cfg = irq_cfg(irq); | ||
| 157 | |||
| 158 | err = assign_irq_vector(irq, cfg, eligible_cpu); | ||
| 159 | if (err != 0) | ||
| 160 | return err; | ||
| 161 | |||
| 162 | if (restrict == UV_AFFINITY_CPU) | ||
| 163 | desc->status |= IRQ_NO_BALANCING; | ||
| 164 | else | ||
| 165 | desc->status |= IRQ_MOVE_PCNTXT; | ||
| 166 | |||
| 167 | set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq, | ||
| 168 | irq_name); | ||
| 169 | |||
| 170 | mmr_value = 0; | ||
| 171 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
| 172 | entry->vector = cfg->vector; | ||
| 173 | entry->delivery_mode = apic->irq_delivery_mode; | ||
| 174 | entry->dest_mode = apic->irq_dest_mode; | ||
| 175 | entry->polarity = 0; | ||
| 176 | entry->trigger = 0; | ||
| 177 | entry->mask = 0; | ||
| 178 | entry->dest = apic->cpu_mask_to_apicid(eligible_cpu); | ||
| 179 | |||
| 180 | mmr_pnode = uv_blade_to_pnode(mmr_blade); | ||
| 181 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
| 182 | |||
| 183 | if (cfg->move_in_progress) | ||
| 184 | send_cleanup_vector(cfg); | ||
| 185 | |||
| 186 | return irq; | ||
| 187 | } | ||
| 188 | |||
| 189 | /* | ||
| 190 | * Disable the specified MMR located on the specified blade so that MSIs are | ||
| 191 | * longer allowed to be sent. | ||
| 192 | */ | ||
| 193 | static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset) | ||
| 194 | { | ||
| 195 | unsigned long mmr_value; | ||
| 196 | struct uv_IO_APIC_route_entry *entry; | ||
| 197 | |||
| 198 | BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != | ||
| 199 | sizeof(unsigned long)); | ||
| 200 | |||
| 201 | mmr_value = 0; | ||
| 202 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
| 203 | entry->mask = 1; | ||
| 204 | |||
| 205 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
| 206 | } | ||
| 207 | |||
| 208 | static int uv_set_irq_affinity(unsigned int irq, const struct cpumask *mask) | ||
| 209 | { | ||
| 210 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 211 | struct irq_cfg *cfg = desc->chip_data; | ||
| 212 | unsigned int dest; | ||
| 213 | unsigned long mmr_value; | ||
| 214 | struct uv_IO_APIC_route_entry *entry; | ||
| 215 | unsigned long mmr_offset; | ||
| 216 | unsigned mmr_pnode; | ||
| 217 | |||
| 218 | dest = set_desc_affinity(desc, mask); | ||
| 219 | if (dest == BAD_APICID) | ||
| 220 | return -1; | ||
| 221 | |||
| 222 | mmr_value = 0; | ||
| 223 | entry = (struct uv_IO_APIC_route_entry *)&mmr_value; | ||
| 224 | |||
| 225 | entry->vector = cfg->vector; | ||
| 226 | entry->delivery_mode = apic->irq_delivery_mode; | ||
| 227 | entry->dest_mode = apic->irq_dest_mode; | ||
| 228 | entry->polarity = 0; | ||
| 229 | entry->trigger = 0; | ||
| 230 | entry->mask = 0; | ||
| 231 | entry->dest = dest; | ||
| 232 | |||
| 233 | /* Get previously stored MMR and pnode of hub sourcing interrupts */ | ||
| 234 | if (uv_irq_2_mmr_info(irq, &mmr_offset, &mmr_pnode)) | ||
| 235 | return -1; | ||
| 236 | |||
| 237 | uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value); | ||
| 238 | |||
| 239 | if (cfg->move_in_progress) | ||
| 240 | send_cleanup_vector(cfg); | ||
| 241 | |||
| 242 | return 0; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* | ||
| 45 | * Set up a mapping of an available irq and vector, and enable the specified | 246 | * Set up a mapping of an available irq and vector, and enable the specified |
| 46 | * MMR that defines the MSI that is to be sent to the specified CPU when an | 247 | * MMR that defines the MSI that is to be sent to the specified CPU when an |
| 47 | * interrupt is raised. | 248 | * interrupt is raised. |
| 48 | */ | 249 | */ |
| 49 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, | 250 | int uv_setup_irq(char *irq_name, int cpu, int mmr_blade, |
| 50 | unsigned long mmr_offset) | 251 | unsigned long mmr_offset, int restrict) |
| 51 | { | 252 | { |
| 52 | int irq; | 253 | int irq, ret; |
| 53 | int ret; | 254 | |
| 255 | irq = create_irq_nr(NR_IRQS_LEGACY, uv_blade_to_memory_nid(mmr_blade)); | ||
| 54 | 256 | ||
| 55 | irq = create_irq(); | ||
| 56 | if (irq <= 0) | 257 | if (irq <= 0) |
| 57 | return -EBUSY; | 258 | return -EBUSY; |
| 58 | 259 | ||
| 59 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset); | 260 | ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset, |
| 60 | if (ret != irq) | 261 | restrict); |
| 262 | if (ret == irq) | ||
| 263 | uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade); | ||
| 264 | else | ||
| 61 | destroy_irq(irq); | 265 | destroy_irq(irq); |
| 62 | 266 | ||
| 63 | return ret; | 267 | return ret; |
| @@ -71,9 +275,28 @@ EXPORT_SYMBOL_GPL(uv_setup_irq); | |||
| 71 | * | 275 | * |
| 72 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). | 276 | * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq(). |
| 73 | */ | 277 | */ |
| 74 | void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset) | 278 | void uv_teardown_irq(unsigned int irq) |
| 75 | { | 279 | { |
| 76 | arch_disable_uv_irq(mmr_blade, mmr_offset); | 280 | struct uv_irq_2_mmr_pnode *e; |
| 281 | struct rb_node *n; | ||
| 282 | unsigned long irqflags; | ||
| 283 | |||
| 284 | spin_lock_irqsave(&uv_irq_lock, irqflags); | ||
| 285 | n = uv_irq_root.rb_node; | ||
| 286 | while (n) { | ||
| 287 | e = rb_entry(n, struct uv_irq_2_mmr_pnode, list); | ||
| 288 | if (e->irq == irq) { | ||
| 289 | arch_disable_uv_irq(e->pnode, e->offset); | ||
| 290 | rb_erase(n, &uv_irq_root); | ||
| 291 | kfree(e); | ||
| 292 | break; | ||
| 293 | } | ||
| 294 | if (irq < e->irq) | ||
| 295 | n = n->rb_left; | ||
| 296 | else | ||
| 297 | n = n->rb_right; | ||
| 298 | } | ||
| 299 | spin_unlock_irqrestore(&uv_irq_lock, irqflags); | ||
| 77 | destroy_irq(irq); | 300 | destroy_irq(irq); |
| 78 | } | 301 | } |
| 79 | EXPORT_SYMBOL_GPL(uv_teardown_irq); | 302 | EXPORT_SYMBOL_GPL(uv_teardown_irq); |
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c index f068553a1b17..cff70c86e18e 100644 --- a/arch/x86/kernel/visws_quirks.c +++ b/arch/x86/kernel/visws_quirks.c | |||
| @@ -183,7 +183,7 @@ static void __init MP_processor_info(struct mpc_cpu *m) | |||
| 183 | return; | 183 | return; |
| 184 | } | 184 | } |
| 185 | 185 | ||
| 186 | apic_cpus = apic->apicid_to_cpu_present(m->apicid); | 186 | apic->apicid_to_cpu_present(m->apicid, &apic_cpus); |
| 187 | physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); | 187 | physids_or(phys_cpu_present_map, phys_cpu_present_map, apic_cpus); |
| 188 | /* | 188 | /* |
| 189 | * Validate version | 189 | * Validate version |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index dbb5381f7b3b..9d7ce96e5a5c 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
| @@ -136,7 +136,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) | |||
| 136 | apicid_to_node[apic_id] = node; | 136 | apicid_to_node[apic_id] = node; |
| 137 | node_set(node, cpu_nodes_parsed); | 137 | node_set(node, cpu_nodes_parsed); |
| 138 | acpi_numa = 1; | 138 | acpi_numa = 1; |
| 139 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 139 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", |
| 140 | pxm, apic_id, node); | 140 | pxm, apic_id, node); |
| 141 | } | 141 | } |
| 142 | 142 | ||
| @@ -170,7 +170,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
| 170 | apicid_to_node[apic_id] = node; | 170 | apicid_to_node[apic_id] = node; |
| 171 | node_set(node, cpu_nodes_parsed); | 171 | node_set(node, cpu_nodes_parsed); |
| 172 | acpi_numa = 1; | 172 | acpi_numa = 1; |
| 173 | printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n", | 173 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", |
| 174 | pxm, apic_id, node); | 174 | pxm, apic_id, node); |
| 175 | } | 175 | } |
| 176 | 176 | ||
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index c76677afda1b..b5bbe59f9c57 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c | |||
| @@ -106,7 +106,8 @@ xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) | |||
| 106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); | 106 | int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade); |
| 107 | 107 | ||
| 108 | #if defined CONFIG_X86_64 | 108 | #if defined CONFIG_X86_64 |
| 109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset); | 109 | mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset, |
| 110 | UV_AFFINITY_CPU); | ||
| 110 | if (mq->irq < 0) { | 111 | if (mq->irq < 0) { |
| 111 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", | 112 | dev_err(xpc_part, "uv_setup_irq() returned error=%d\n", |
| 112 | -mq->irq); | 113 | -mq->irq); |
| @@ -136,7 +137,7 @@ static void | |||
| 136 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) | 137 | xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) |
| 137 | { | 138 | { |
| 138 | #if defined CONFIG_X86_64 | 139 | #if defined CONFIG_X86_64 |
| 139 | uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset); | 140 | uv_teardown_irq(mq->irq); |
| 140 | 141 | ||
| 141 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV | 142 | #elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV |
| 142 | int mmr_pnode; | 143 | int mmr_pnode; |
