aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 11:15:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-03 11:15:37 -0500
commitfb7b096d949fa852442ed9d8f982bce526ccfe7e (patch)
tree883e7e43331d9962bcc6050a3bf88615a8c61063
parenta626b46e17d0762d664ce471d40bc506b6e721ab (diff)
parentfad539956c9e69749a03f7817d22d1bab87657bf (diff)
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (25 commits) x86: Fix out of order of gsi x86: apic: Fix mismerge, add arch_probe_nr_irqs() again x86, irq: Keep chip_data in create_irq_nr and destroy_irq xen: Remove unnecessary arch specific xen irq functions. smp: Use nr_cpus= to set nr_cpu_ids early x86, irq: Remove arch_probe_nr_irqs sparseirq: Use radix_tree instead of ptrs array sparseirq: Change irq_desc_ptrs to static init: Move radix_tree_init() early irq: Remove unnecessary bootmem code x86: Add iMac9,1 to pci_reboot_dmi_table x86: Convert i8259_lock to raw_spinlock x86: Convert nmi_lock to raw_spinlock x86: Convert ioapic_lock and vector_lock to raw_spinlock x86: Avoid race condition in pci_enable_msix() x86: Fix SCI on IOAPIC != 0 x86, ia32_aout: do not kill argument mapping x86, irq: Move __setup_vector_irq() before the first irq enable in cpu online path x86, irq: Update the vector domain for legacy irqs handled by io-apic x86, irq: Don't block IRQ0_VECTOR..IRQ15_VECTOR's on all cpu's ...
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/ia64/include/asm/xen/events.h4
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c258
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/xen/events.c8
-rw-r--r--include/linux/irq.h2
-rw-r--r--init/main.c16
-rw-r--r--kernel/irq/chip.c52
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/numa_migrate.c4
26 files changed, 345 insertions, 247 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index fbcddc5abe25..d80930d58dae 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1794,6 +1794,12 @@ and is between 256 and 4096 characters. It is defined in the file
1794 purges which is reported from either PAL_VM_SUMMARY or 1794 purges which is reported from either PAL_VM_SUMMARY or
1795 SAL PALO. 1795 SAL PALO.
1796 1796
1797 nr_cpus= [SMP] Maximum number of processors that an SMP kernel
1798 could support. nr_cpus=n : n >= 1 limits the kernel to
1799 supporting 'n' processors. Later in runtime you can not
1800 use hotplug cpu feature to put more cpu back to online.
1801 just like you compile the kernel NR_CPUS=n
1802
1797 nr_uarts= [SERIAL] maximum number of UARTs to be registered. 1803 nr_uarts= [SERIAL] maximum number of UARTs to be registered.
1798 1804
1799 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 1805 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8b6198..baa74c82aa71 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
36 return !(ia64_psr(regs)->i); 36 return !(ia64_psr(regs)->i);
37} 37}
38 38
39static inline void handle_irq(int irq, struct pt_regs *regs)
40{
41 __do_IRQ(irq);
42}
43#define irq_ctx_init(cpu) do { } while (0) 39#define irq_ctx_init(cpu) do { } while (0)
44 40
45#endif /* _ASM_IA64_XEN_EVENTS_H */ 41#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index c16fb03037d4..a7ca07f3754e 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -852,8 +852,8 @@ __init void prefill_possible_map(void)
852 852
853 possible = available_cpus + additional_cpus; 853 possible = available_cpus + additional_cpus;
854 854
855 if (possible > NR_CPUS) 855 if (possible > nr_cpu_ids)
856 possible = NR_CPUS; 856 possible = nr_cpu_ids;
857 857
858 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 858 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
859 possible, max((possible - available_cpus), 0)); 859 possible, max((possible - available_cpus), 0));
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 9046e4af66ce..280c019cfad8 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
327 current->mm->free_area_cache = TASK_UNMAPPED_BASE; 327 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
328 current->mm->cached_hole_size = 0; 328 current->mm->cached_hole_size = 0;
329 329
330 current->mm->mmap = NULL;
331 install_exec_creds(bprm); 330 install_exec_creds(bprm);
332 current->flags &= ~PF_FORKNOEXEC; 331 current->flags &= ~PF_FORKNOEXEC;
333 332
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 58d7091eeb1f..7ec65b18085d 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
24#define SLAVE_ICW4_DEFAULT 0x01 24#define SLAVE_ICW4_DEFAULT 0x01
25#define PIC_ICW4_AEOI 2 25#define PIC_ICW4_AEOI 2
26 26
27extern spinlock_t i8259A_lock; 27extern raw_spinlock_t i8259A_lock;
28 28
29extern void init_8259A(int auto_eoi); 29extern void init_8259A(int auto_eoi);
30extern void enable_8259A_irq(unsigned int irq); 30extern void enable_8259A_irq(unsigned int irq);
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 7c7c16cde1f8..5f61f6e0ffdd 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -160,6 +160,7 @@ extern int io_apic_get_redir_entries(int ioapic);
160struct io_apic_irq_attr; 160struct io_apic_irq_attr;
161extern int io_apic_set_pci_routing(struct device *dev, int irq, 161extern int io_apic_set_pci_routing(struct device *dev, int irq,
162 struct io_apic_irq_attr *irq_attr); 162 struct io_apic_irq_attr *irq_attr);
163void setup_IO_APIC_irq_extra(u32 gsi);
163extern int (*ioapic_renumber_irq)(int ioapic, int irq); 164extern int (*ioapic_renumber_irq)(int ioapic, int irq);
164extern void ioapic_init_mappings(void); 165extern void ioapic_init_mappings(void);
165extern void ioapic_insert_resources(void); 166extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..262292729fc4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
51 52
52#endif /* _ASM_X86_IRQ_H */ 53#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f085cd43..8767d99c4f64 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
28#define MCE_VECTOR 0x12 28#define MCE_VECTOR 0x12
29 29
30/* 30/*
31 * IDT vectors usable for external interrupt sources start 31 * IDT vectors usable for external interrupt sources start at 0x20.
32 * at 0x20: 32 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
33 */ 33 */
34#define FIRST_EXTERNAL_VECTOR 0x20 34#define FIRST_EXTERNAL_VECTOR 0x20
35 35/*
36#ifdef CONFIG_X86_32 36 * We start allocating at 0x21 to spread out vectors evenly between
37# define SYSCALL_VECTOR 0x80 37 * priority levels. (0x80 is the syscall vector)
38# define IA32_SYSCALL_VECTOR 0x80 38 */
39#else 39#define VECTOR_OFFSET_START 1
40# define IA32_SYSCALL_VECTOR 0x80
41#endif
42 40
43/* 41/*
44 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
45 * cleanup after irq migration. 43 * triggering cleanup after irq migration. 0x21-0x2f will still be used
44 * for device interrupts.
46 */ 45 */
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 46#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 47
48#define IA32_SYSCALL_VECTOR 0x80
49#ifdef CONFIG_X86_32
50# define SYSCALL_VECTOR 0x80
51#endif
52
49/* 53/*
50 * Vectors 0x30-0x3f are used for ISA interrupts. 54 * Vectors 0x30-0x3f are used for ISA interrupts.
55 * round up to the next 16-vector boundary
51 */ 56 */
52#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 57#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
53 58
54#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 59#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
55#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 60#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
120 */ 125 */
121#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
122 127
123/*
124 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125 * start at 0x31(0x41) to spread out vectors evenly between priority
126 * levels. (0x80 is the syscall vector)
127 */
128#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
129
130#define NR_VECTORS 256 128#define NR_VECTORS 256
131 129
132#define FPU_IRQ 13 130#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
154 152
155#define NR_IRQS_LEGACY 16 153#define NR_IRQS_LEGACY 16
156 154
157#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
158#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 155#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
159 156
160#ifdef CONFIG_X86_IO_APIC 157#ifdef CONFIG_X86_IO_APIC
161# ifdef CONFIG_SPARSE_IRQ 158# ifdef CONFIG_SPARSE_IRQ
159# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
162# define NR_IRQS \ 160# define NR_IRQS \
163 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 161 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
164 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 162 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
165 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 163 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
166# else 164# else
167# if NR_CPUS < MAX_IO_APICS 165# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
168# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) 166# define NR_IRQS \
169# else 167 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
170# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) 168 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
171# endif 169 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
172# endif 170# endif
173#else /* !CONFIG_X86_IO_APIC: */ 171#else /* !CONFIG_X86_IO_APIC: */
174# define NR_IRQS NR_IRQS_LEGACY 172# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index f95703098f8d..738fcb60e708 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
448{ 448{
449 *irq = gsi; 449 *irq = gsi;
450
451#ifdef CONFIG_X86_IO_APIC
452 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
453 setup_IO_APIC_irq_extra(gsi);
454#endif
455
450 return 0; 456 return 0;
451} 457}
452 458
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
474 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); 480 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
475 } 481 }
476#endif 482#endif
477 acpi_gsi_to_irq(plat_gsi, &irq); 483 irq = plat_gsi;
484
478 return irq; 485 return irq;
479} 486}
480 487
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 6bdd2c7ead75..14862f11cc4a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
73 */ 73 */
74int sis_apic_bug = -1; 74int sis_apic_bug = -1;
75 75
76static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_RAW_SPINLOCK(vector_lock);
78 78
79/* 79/*
80 * # of IRQ routing registers 80 * # of IRQ routing registers
@@ -94,8 +94,6 @@ struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
94/* # of MP IRQ source entries */ 94/* # of MP IRQ source entries */
95int mp_irq_entries; 95int mp_irq_entries;
96 96
97/* Number of legacy interrupts */
98static int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
99/* GSI interrupts */ 97/* GSI interrupts */
100static int nr_irqs_gsi = NR_IRQS_LEGACY; 98static int nr_irqs_gsi = NR_IRQS_LEGACY;
101 99
@@ -140,27 +138,10 @@ static struct irq_pin_list *get_one_free_irq_2_pin(int node)
140 138
141/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 139/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
142#ifdef CONFIG_SPARSE_IRQ 140#ifdef CONFIG_SPARSE_IRQ
143static struct irq_cfg irq_cfgx[] = { 141static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
144#else 142#else
145static struct irq_cfg irq_cfgx[NR_IRQS] = { 143static struct irq_cfg irq_cfgx[NR_IRQS];
146#endif 144#endif
147 [0] = { .vector = IRQ0_VECTOR, },
148 [1] = { .vector = IRQ1_VECTOR, },
149 [2] = { .vector = IRQ2_VECTOR, },
150 [3] = { .vector = IRQ3_VECTOR, },
151 [4] = { .vector = IRQ4_VECTOR, },
152 [5] = { .vector = IRQ5_VECTOR, },
153 [6] = { .vector = IRQ6_VECTOR, },
154 [7] = { .vector = IRQ7_VECTOR, },
155 [8] = { .vector = IRQ8_VECTOR, },
156 [9] = { .vector = IRQ9_VECTOR, },
157 [10] = { .vector = IRQ10_VECTOR, },
158 [11] = { .vector = IRQ11_VECTOR, },
159 [12] = { .vector = IRQ12_VECTOR, },
160 [13] = { .vector = IRQ13_VECTOR, },
161 [14] = { .vector = IRQ14_VECTOR, },
162 [15] = { .vector = IRQ15_VECTOR, },
163};
164 145
165void __init io_apic_disable_legacy(void) 146void __init io_apic_disable_legacy(void)
166{ 147{
@@ -185,8 +166,14 @@ int __init arch_early_irq_init(void)
185 desc->chip_data = &cfg[i]; 166 desc->chip_data = &cfg[i];
186 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 167 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
187 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 168 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
188 if (i < nr_legacy_irqs) 169 /*
189 cpumask_setall(cfg[i].domain); 170 * For legacy IRQ's, start with assigning irq0 to irq15 to
171 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
172 */
173 if (i < nr_legacy_irqs) {
174 cfg[i].vector = IRQ0_VECTOR + i;
175 cpumask_set_cpu(0, cfg[i].domain);
176 }
190 } 177 }
191 178
192 return 0; 179 return 0;
@@ -406,7 +393,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
406 struct irq_pin_list *entry; 393 struct irq_pin_list *entry;
407 unsigned long flags; 394 unsigned long flags;
408 395
409 spin_lock_irqsave(&ioapic_lock, flags); 396 raw_spin_lock_irqsave(&ioapic_lock, flags);
410 for_each_irq_pin(entry, cfg->irq_2_pin) { 397 for_each_irq_pin(entry, cfg->irq_2_pin) {
411 unsigned int reg; 398 unsigned int reg;
412 int pin; 399 int pin;
@@ -415,11 +402,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
415 reg = io_apic_read(entry->apic, 0x10 + pin*2); 402 reg = io_apic_read(entry->apic, 0x10 + pin*2);
416 /* Is the remote IRR bit set? */ 403 /* Is the remote IRR bit set? */
417 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 404 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
418 spin_unlock_irqrestore(&ioapic_lock, flags); 405 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
419 return true; 406 return true;
420 } 407 }
421 } 408 }
422 spin_unlock_irqrestore(&ioapic_lock, flags); 409 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
423 410
424 return false; 411 return false;
425} 412}
@@ -433,10 +420,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
433{ 420{
434 union entry_union eu; 421 union entry_union eu;
435 unsigned long flags; 422 unsigned long flags;
436 spin_lock_irqsave(&ioapic_lock, flags); 423 raw_spin_lock_irqsave(&ioapic_lock, flags);
437 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 424 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
438 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 425 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
439 spin_unlock_irqrestore(&ioapic_lock, flags); 426 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
440 return eu.entry; 427 return eu.entry;
441} 428}
442 429
@@ -459,9 +446,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
459void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 446void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
460{ 447{
461 unsigned long flags; 448 unsigned long flags;
462 spin_lock_irqsave(&ioapic_lock, flags); 449 raw_spin_lock_irqsave(&ioapic_lock, flags);
463 __ioapic_write_entry(apic, pin, e); 450 __ioapic_write_entry(apic, pin, e);
464 spin_unlock_irqrestore(&ioapic_lock, flags); 451 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
465} 452}
466 453
467/* 454/*
@@ -474,10 +461,10 @@ static void ioapic_mask_entry(int apic, int pin)
474 unsigned long flags; 461 unsigned long flags;
475 union entry_union eu = { .entry.mask = 1 }; 462 union entry_union eu = { .entry.mask = 1 };
476 463
477 spin_lock_irqsave(&ioapic_lock, flags); 464 raw_spin_lock_irqsave(&ioapic_lock, flags);
478 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 465 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
479 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 466 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
480 spin_unlock_irqrestore(&ioapic_lock, flags); 467 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
481} 468}
482 469
483/* 470/*
@@ -604,9 +591,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
604 591
605 BUG_ON(!cfg); 592 BUG_ON(!cfg);
606 593
607 spin_lock_irqsave(&ioapic_lock, flags); 594 raw_spin_lock_irqsave(&ioapic_lock, flags);
608 __mask_IO_APIC_irq(cfg); 595 __mask_IO_APIC_irq(cfg);
609 spin_unlock_irqrestore(&ioapic_lock, flags); 596 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
610} 597}
611 598
612static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 599static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -614,9 +601,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
614 struct irq_cfg *cfg = desc->chip_data; 601 struct irq_cfg *cfg = desc->chip_data;
615 unsigned long flags; 602 unsigned long flags;
616 603
617 spin_lock_irqsave(&ioapic_lock, flags); 604 raw_spin_lock_irqsave(&ioapic_lock, flags);
618 __unmask_IO_APIC_irq(cfg); 605 __unmask_IO_APIC_irq(cfg);
619 spin_unlock_irqrestore(&ioapic_lock, flags); 606 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
620} 607}
621 608
622static void mask_IO_APIC_irq(unsigned int irq) 609static void mask_IO_APIC_irq(unsigned int irq)
@@ -1140,12 +1127,12 @@ void lock_vector_lock(void)
1140 /* Used to the online set of cpus does not change 1127 /* Used to the online set of cpus does not change
1141 * during assign_irq_vector. 1128 * during assign_irq_vector.
1142 */ 1129 */
1143 spin_lock(&vector_lock); 1130 raw_spin_lock(&vector_lock);
1144} 1131}
1145 1132
1146void unlock_vector_lock(void) 1133void unlock_vector_lock(void)
1147{ 1134{
1148 spin_unlock(&vector_lock); 1135 raw_spin_unlock(&vector_lock);
1149} 1136}
1150 1137
1151static int 1138static int
@@ -1162,7 +1149,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1162 * Also, we've got to be careful not to trash gate 1149 * Also, we've got to be careful not to trash gate
1163 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1150 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1164 */ 1151 */
1165 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1152 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1153 static int current_offset = VECTOR_OFFSET_START % 8;
1166 unsigned int old_vector; 1154 unsigned int old_vector;
1167 int cpu, err; 1155 int cpu, err;
1168 cpumask_var_t tmp_mask; 1156 cpumask_var_t tmp_mask;
@@ -1198,7 +1186,7 @@ next:
1198 if (vector >= first_system_vector) { 1186 if (vector >= first_system_vector) {
1199 /* If out of vectors on large boxen, must share them. */ 1187 /* If out of vectors on large boxen, must share them. */
1200 offset = (offset + 1) % 8; 1188 offset = (offset + 1) % 8;
1201 vector = FIRST_DEVICE_VECTOR + offset; 1189 vector = FIRST_EXTERNAL_VECTOR + offset;
1202 } 1190 }
1203 if (unlikely(current_vector == vector)) 1191 if (unlikely(current_vector == vector))
1204 continue; 1192 continue;
@@ -1232,9 +1220,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1232 int err; 1220 int err;
1233 unsigned long flags; 1221 unsigned long flags;
1234 1222
1235 spin_lock_irqsave(&vector_lock, flags); 1223 raw_spin_lock_irqsave(&vector_lock, flags);
1236 err = __assign_irq_vector(irq, cfg, mask); 1224 err = __assign_irq_vector(irq, cfg, mask);
1237 spin_unlock_irqrestore(&vector_lock, flags); 1225 raw_spin_unlock_irqrestore(&vector_lock, flags);
1238 return err; 1226 return err;
1239} 1227}
1240 1228
@@ -1268,11 +1256,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1268void __setup_vector_irq(int cpu) 1256void __setup_vector_irq(int cpu)
1269{ 1257{
1270 /* Initialize vector_irq on a new cpu */ 1258 /* Initialize vector_irq on a new cpu */
1271 /* This function must be called with vector_lock held */
1272 int irq, vector; 1259 int irq, vector;
1273 struct irq_cfg *cfg; 1260 struct irq_cfg *cfg;
1274 struct irq_desc *desc; 1261 struct irq_desc *desc;
1275 1262
1263 /*
1264 * vector_lock will make sure that we don't run into irq vector
1265 * assignments that might be happening on another cpu in parallel,
1266 * while we setup our initial vector to irq mappings.
1267 */
1268 raw_spin_lock(&vector_lock);
1276 /* Mark the inuse vectors */ 1269 /* Mark the inuse vectors */
1277 for_each_irq_desc(irq, desc) { 1270 for_each_irq_desc(irq, desc) {
1278 cfg = desc->chip_data; 1271 cfg = desc->chip_data;
@@ -1291,6 +1284,7 @@ void __setup_vector_irq(int cpu)
1291 if (!cpumask_test_cpu(cpu, cfg->domain)) 1284 if (!cpumask_test_cpu(cpu, cfg->domain))
1292 per_cpu(vector_irq, cpu)[vector] = -1; 1285 per_cpu(vector_irq, cpu)[vector] = -1;
1293 } 1286 }
1287 raw_spin_unlock(&vector_lock);
1294} 1288}
1295 1289
1296static struct irq_chip ioapic_chip; 1290static struct irq_chip ioapic_chip;
@@ -1440,6 +1434,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1440 1434
1441 cfg = desc->chip_data; 1435 cfg = desc->chip_data;
1442 1436
1437 /*
1438 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1439 * controllers like 8259. Now that IO-APIC can handle this irq, update
1440 * the cfg->domain.
1441 */
1442 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1443 apic->vector_allocation_domain(0, cfg->domain);
1444
1443 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1445 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1444 return; 1446 return;
1445 1447
@@ -1473,7 +1475,7 @@ static struct {
1473 1475
1474static void __init setup_IO_APIC_irqs(void) 1476static void __init setup_IO_APIC_irqs(void)
1475{ 1477{
1476 int apic_id = 0, pin, idx, irq; 1478 int apic_id, pin, idx, irq;
1477 int notcon = 0; 1479 int notcon = 0;
1478 struct irq_desc *desc; 1480 struct irq_desc *desc;
1479 struct irq_cfg *cfg; 1481 struct irq_cfg *cfg;
@@ -1481,14 +1483,7 @@ static void __init setup_IO_APIC_irqs(void)
1481 1483
1482 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n"); 1484 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1483 1485
1484#ifdef CONFIG_ACPI 1486 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1485 if (!acpi_disabled && acpi_ioapic) {
1486 apic_id = mp_find_ioapic(0);
1487 if (apic_id < 0)
1488 apic_id = 0;
1489 }
1490#endif
1491
1492 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) { 1487 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1493 idx = find_irq_entry(apic_id, pin, mp_INT); 1488 idx = find_irq_entry(apic_id, pin, mp_INT);
1494 if (idx == -1) { 1489 if (idx == -1) {
@@ -1510,6 +1505,9 @@ static void __init setup_IO_APIC_irqs(void)
1510 1505
1511 irq = pin_2_irq(idx, apic_id, pin); 1506 irq = pin_2_irq(idx, apic_id, pin);
1512 1507
1508 if ((apic_id > 0) && (irq > 16))
1509 continue;
1510
1513 /* 1511 /*
1514 * Skip the timer IRQ if there's a quirk handler 1512 * Skip the timer IRQ if there's a quirk handler
1515 * installed and if it returns 1: 1513 * installed and if it returns 1:
@@ -1539,6 +1537,56 @@ static void __init setup_IO_APIC_irqs(void)
1539} 1537}
1540 1538
1541/* 1539/*
1540 * for the gsit that is not in first ioapic
1541 * but could not use acpi_register_gsi()
1542 * like some special sci in IBM x3330
1543 */
1544void setup_IO_APIC_irq_extra(u32 gsi)
1545{
1546 int apic_id = 0, pin, idx, irq;
1547 int node = cpu_to_node(boot_cpu_id);
1548 struct irq_desc *desc;
1549 struct irq_cfg *cfg;
1550
1551 /*
1552 * Convert 'gsi' to 'ioapic.pin'.
1553 */
1554 apic_id = mp_find_ioapic(gsi);
1555 if (apic_id < 0)
1556 return;
1557
1558 pin = mp_find_ioapic_pin(apic_id, gsi);
1559 idx = find_irq_entry(apic_id, pin, mp_INT);
1560 if (idx == -1)
1561 return;
1562
1563 irq = pin_2_irq(idx, apic_id, pin);
1564#ifdef CONFIG_SPARSE_IRQ
1565 desc = irq_to_desc(irq);
1566 if (desc)
1567 return;
1568#endif
1569 desc = irq_to_desc_alloc_node(irq, node);
1570 if (!desc) {
1571 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1572 return;
1573 }
1574
1575 cfg = desc->chip_data;
1576 add_pin_to_irq_node(cfg, node, apic_id, pin);
1577
1578 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1579 pr_debug("Pin %d-%d already programmed\n",
1580 mp_ioapics[apic_id].apicid, pin);
1581 return;
1582 }
1583 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1584
1585 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1586 irq_trigger(idx), irq_polarity(idx));
1587}
1588
1589/*
1542 * Set up the timer pin, possibly with the 8259A-master behind. 1590 * Set up the timer pin, possibly with the 8259A-master behind.
1543 */ 1591 */
1544static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1592static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1601,14 +1649,14 @@ __apicdebuginit(void) print_IO_APIC(void)
1601 1649
1602 for (apic = 0; apic < nr_ioapics; apic++) { 1650 for (apic = 0; apic < nr_ioapics; apic++) {
1603 1651
1604 spin_lock_irqsave(&ioapic_lock, flags); 1652 raw_spin_lock_irqsave(&ioapic_lock, flags);
1605 reg_00.raw = io_apic_read(apic, 0); 1653 reg_00.raw = io_apic_read(apic, 0);
1606 reg_01.raw = io_apic_read(apic, 1); 1654 reg_01.raw = io_apic_read(apic, 1);
1607 if (reg_01.bits.version >= 0x10) 1655 if (reg_01.bits.version >= 0x10)
1608 reg_02.raw = io_apic_read(apic, 2); 1656 reg_02.raw = io_apic_read(apic, 2);
1609 if (reg_01.bits.version >= 0x20) 1657 if (reg_01.bits.version >= 0x20)
1610 reg_03.raw = io_apic_read(apic, 3); 1658 reg_03.raw = io_apic_read(apic, 3);
1611 spin_unlock_irqrestore(&ioapic_lock, flags); 1659 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1612 1660
1613 printk("\n"); 1661 printk("\n");
1614 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1662 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1830,7 +1878,7 @@ __apicdebuginit(void) print_PIC(void)
1830 1878
1831 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1879 printk(KERN_DEBUG "\nprinting PIC contents\n");
1832 1880
1833 spin_lock_irqsave(&i8259A_lock, flags); 1881 raw_spin_lock_irqsave(&i8259A_lock, flags);
1834 1882
1835 v = inb(0xa1) << 8 | inb(0x21); 1883 v = inb(0xa1) << 8 | inb(0x21);
1836 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1884 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1844,7 +1892,7 @@ __apicdebuginit(void) print_PIC(void)
1844 outb(0x0a,0xa0); 1892 outb(0x0a,0xa0);
1845 outb(0x0a,0x20); 1893 outb(0x0a,0x20);
1846 1894
1847 spin_unlock_irqrestore(&i8259A_lock, flags); 1895 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1848 1896
1849 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1897 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1850 1898
@@ -1903,9 +1951,9 @@ void __init enable_IO_APIC(void)
1903 * The number of IO-APIC IRQ registers (== #pins): 1951 * The number of IO-APIC IRQ registers (== #pins):
1904 */ 1952 */
1905 for (apic = 0; apic < nr_ioapics; apic++) { 1953 for (apic = 0; apic < nr_ioapics; apic++) {
1906 spin_lock_irqsave(&ioapic_lock, flags); 1954 raw_spin_lock_irqsave(&ioapic_lock, flags);
1907 reg_01.raw = io_apic_read(apic, 1); 1955 reg_01.raw = io_apic_read(apic, 1);
1908 spin_unlock_irqrestore(&ioapic_lock, flags); 1956 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1909 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1957 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1910 } 1958 }
1911 1959
@@ -2045,9 +2093,9 @@ void __init setup_ioapic_ids_from_mpc(void)
2045 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2093 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2046 2094
2047 /* Read the register 0 value */ 2095 /* Read the register 0 value */
2048 spin_lock_irqsave(&ioapic_lock, flags); 2096 raw_spin_lock_irqsave(&ioapic_lock, flags);
2049 reg_00.raw = io_apic_read(apic_id, 0); 2097 reg_00.raw = io_apic_read(apic_id, 0);
2050 spin_unlock_irqrestore(&ioapic_lock, flags); 2098 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2051 2099
2052 old_id = mp_ioapics[apic_id].apicid; 2100 old_id = mp_ioapics[apic_id].apicid;
2053 2101
@@ -2106,16 +2154,16 @@ void __init setup_ioapic_ids_from_mpc(void)
2106 mp_ioapics[apic_id].apicid); 2154 mp_ioapics[apic_id].apicid);
2107 2155
2108 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2156 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2109 spin_lock_irqsave(&ioapic_lock, flags); 2157 raw_spin_lock_irqsave(&ioapic_lock, flags);
2110 io_apic_write(apic_id, 0, reg_00.raw); 2158 io_apic_write(apic_id, 0, reg_00.raw);
2111 spin_unlock_irqrestore(&ioapic_lock, flags); 2159 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2112 2160
2113 /* 2161 /*
2114 * Sanity check 2162 * Sanity check
2115 */ 2163 */
2116 spin_lock_irqsave(&ioapic_lock, flags); 2164 raw_spin_lock_irqsave(&ioapic_lock, flags);
2117 reg_00.raw = io_apic_read(apic_id, 0); 2165 reg_00.raw = io_apic_read(apic_id, 0);
2118 spin_unlock_irqrestore(&ioapic_lock, flags); 2166 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2119 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2167 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2120 printk("could not set ID!\n"); 2168 printk("could not set ID!\n");
2121 else 2169 else
@@ -2198,7 +2246,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2198 unsigned long flags; 2246 unsigned long flags;
2199 struct irq_cfg *cfg; 2247 struct irq_cfg *cfg;
2200 2248
2201 spin_lock_irqsave(&ioapic_lock, flags); 2249 raw_spin_lock_irqsave(&ioapic_lock, flags);
2202 if (irq < nr_legacy_irqs) { 2250 if (irq < nr_legacy_irqs) {
2203 disable_8259A_irq(irq); 2251 disable_8259A_irq(irq);
2204 if (i8259A_irq_pending(irq)) 2252 if (i8259A_irq_pending(irq))
@@ -2206,7 +2254,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2206 } 2254 }
2207 cfg = irq_cfg(irq); 2255 cfg = irq_cfg(irq);
2208 __unmask_IO_APIC_irq(cfg); 2256 __unmask_IO_APIC_irq(cfg);
2209 spin_unlock_irqrestore(&ioapic_lock, flags); 2257 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2210 2258
2211 return was_pending; 2259 return was_pending;
2212} 2260}
@@ -2217,9 +2265,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2217 struct irq_cfg *cfg = irq_cfg(irq); 2265 struct irq_cfg *cfg = irq_cfg(irq);
2218 unsigned long flags; 2266 unsigned long flags;
2219 2267
2220 spin_lock_irqsave(&vector_lock, flags); 2268 raw_spin_lock_irqsave(&vector_lock, flags);
2221 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2269 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2222 spin_unlock_irqrestore(&vector_lock, flags); 2270 raw_spin_unlock_irqrestore(&vector_lock, flags);
2223 2271
2224 return 1; 2272 return 1;
2225} 2273}
@@ -2312,14 +2360,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2312 irq = desc->irq; 2360 irq = desc->irq;
2313 cfg = desc->chip_data; 2361 cfg = desc->chip_data;
2314 2362
2315 spin_lock_irqsave(&ioapic_lock, flags); 2363 raw_spin_lock_irqsave(&ioapic_lock, flags);
2316 ret = set_desc_affinity(desc, mask, &dest); 2364 ret = set_desc_affinity(desc, mask, &dest);
2317 if (!ret) { 2365 if (!ret) {
2318 /* Only the high 8 bits are valid. */ 2366 /* Only the high 8 bits are valid. */
2319 dest = SET_APIC_LOGICAL_ID(dest); 2367 dest = SET_APIC_LOGICAL_ID(dest);
2320 __target_IO_APIC_irq(irq, dest, cfg); 2368 __target_IO_APIC_irq(irq, dest, cfg);
2321 } 2369 }
2322 spin_unlock_irqrestore(&ioapic_lock, flags); 2370 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2323 2371
2324 return ret; 2372 return ret;
2325} 2373}
@@ -2554,9 +2602,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
2554 irq = desc->irq; 2602 irq = desc->irq;
2555 cfg = desc->chip_data; 2603 cfg = desc->chip_data;
2556 2604
2557 spin_lock_irqsave(&ioapic_lock, flags); 2605 raw_spin_lock_irqsave(&ioapic_lock, flags);
2558 __eoi_ioapic_irq(irq, cfg); 2606 __eoi_ioapic_irq(irq, cfg);
2559 spin_unlock_irqrestore(&ioapic_lock, flags); 2607 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2560} 2608}
2561 2609
2562static void ack_apic_level(unsigned int irq) 2610static void ack_apic_level(unsigned int irq)
@@ -3138,13 +3186,13 @@ static int ioapic_resume(struct sys_device *dev)
3138 data = container_of(dev, struct sysfs_ioapic_data, dev); 3186 data = container_of(dev, struct sysfs_ioapic_data, dev);
3139 entry = data->entry; 3187 entry = data->entry;
3140 3188
3141 spin_lock_irqsave(&ioapic_lock, flags); 3189 raw_spin_lock_irqsave(&ioapic_lock, flags);
3142 reg_00.raw = io_apic_read(dev->id, 0); 3190 reg_00.raw = io_apic_read(dev->id, 0);
3143 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3191 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3144 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3192 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3145 io_apic_write(dev->id, 0, reg_00.raw); 3193 io_apic_write(dev->id, 0, reg_00.raw);
3146 } 3194 }
3147 spin_unlock_irqrestore(&ioapic_lock, flags); 3195 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3148 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3196 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3149 ioapic_write_entry(dev->id, i, entry[i]); 3197 ioapic_write_entry(dev->id, i, entry[i]);
3150 3198
@@ -3207,7 +3255,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3207 if (irq_want < nr_irqs_gsi) 3255 if (irq_want < nr_irqs_gsi)
3208 irq_want = nr_irqs_gsi; 3256 irq_want = nr_irqs_gsi;
3209 3257
3210 spin_lock_irqsave(&vector_lock, flags); 3258 raw_spin_lock_irqsave(&vector_lock, flags);
3211 for (new = irq_want; new < nr_irqs; new++) { 3259 for (new = irq_want; new < nr_irqs; new++) {
3212 desc_new = irq_to_desc_alloc_node(new, node); 3260 desc_new = irq_to_desc_alloc_node(new, node);
3213 if (!desc_new) { 3261 if (!desc_new) {
@@ -3226,14 +3274,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3226 irq = new; 3274 irq = new;
3227 break; 3275 break;
3228 } 3276 }
3229 spin_unlock_irqrestore(&vector_lock, flags); 3277 raw_spin_unlock_irqrestore(&vector_lock, flags);
3278
3279 if (irq > 0)
3280 dynamic_irq_init_keep_chip_data(irq);
3230 3281
3231 if (irq > 0) {
3232 dynamic_irq_init(irq);
3233 /* restore it, in case dynamic_irq_init clear it */
3234 if (desc_new)
3235 desc_new->chip_data = cfg_new;
3236 }
3237 return irq; 3282 return irq;
3238} 3283}
3239 3284
@@ -3255,20 +3300,13 @@ int create_irq(void)
3255void destroy_irq(unsigned int irq) 3300void destroy_irq(unsigned int irq)
3256{ 3301{
3257 unsigned long flags; 3302 unsigned long flags;
3258 struct irq_cfg *cfg;
3259 struct irq_desc *desc;
3260 3303
3261 /* store it, in case dynamic_irq_cleanup clear it */ 3304 dynamic_irq_cleanup_keep_chip_data(irq);
3262 desc = irq_to_desc(irq);
3263 cfg = desc->chip_data;
3264 dynamic_irq_cleanup(irq);
3265 /* connect back irq_cfg */
3266 desc->chip_data = cfg;
3267 3305
3268 free_irte(irq); 3306 free_irte(irq);
3269 spin_lock_irqsave(&vector_lock, flags); 3307 raw_spin_lock_irqsave(&vector_lock, flags);
3270 __clear_irq_vector(irq, cfg); 3308 __clear_irq_vector(irq, get_irq_chip_data(irq));
3271 spin_unlock_irqrestore(&vector_lock, flags); 3309 raw_spin_unlock_irqrestore(&vector_lock, flags);
3272} 3310}
3273 3311
3274/* 3312/*
@@ -3805,9 +3843,9 @@ int __init io_apic_get_redir_entries (int ioapic)
3805 union IO_APIC_reg_01 reg_01; 3843 union IO_APIC_reg_01 reg_01;
3806 unsigned long flags; 3844 unsigned long flags;
3807 3845
3808 spin_lock_irqsave(&ioapic_lock, flags); 3846 raw_spin_lock_irqsave(&ioapic_lock, flags);
3809 reg_01.raw = io_apic_read(ioapic, 1); 3847 reg_01.raw = io_apic_read(ioapic, 1);
3810 spin_unlock_irqrestore(&ioapic_lock, flags); 3848 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3811 3849
3812 return reg_01.bits.entries; 3850 return reg_01.bits.entries;
3813} 3851}
@@ -3969,9 +4007,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3969 if (physids_empty(apic_id_map)) 4007 if (physids_empty(apic_id_map))
3970 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 4008 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3971 4009
3972 spin_lock_irqsave(&ioapic_lock, flags); 4010 raw_spin_lock_irqsave(&ioapic_lock, flags);
3973 reg_00.raw = io_apic_read(ioapic, 0); 4011 reg_00.raw = io_apic_read(ioapic, 0);
3974 spin_unlock_irqrestore(&ioapic_lock, flags); 4012 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3975 4013
3976 if (apic_id >= get_physical_broadcast()) { 4014 if (apic_id >= get_physical_broadcast()) {
3977 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 4015 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -4005,10 +4043,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
4005 if (reg_00.bits.ID != apic_id) { 4043 if (reg_00.bits.ID != apic_id) {
4006 reg_00.bits.ID = apic_id; 4044 reg_00.bits.ID = apic_id;
4007 4045
4008 spin_lock_irqsave(&ioapic_lock, flags); 4046 raw_spin_lock_irqsave(&ioapic_lock, flags);
4009 io_apic_write(ioapic, 0, reg_00.raw); 4047 io_apic_write(ioapic, 0, reg_00.raw);
4010 reg_00.raw = io_apic_read(ioapic, 0); 4048 reg_00.raw = io_apic_read(ioapic, 0);
4011 spin_unlock_irqrestore(&ioapic_lock, flags); 4049 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4012 4050
4013 /* Sanity check */ 4051 /* Sanity check */
4014 if (reg_00.bits.ID != apic_id) { 4052 if (reg_00.bits.ID != apic_id) {
@@ -4029,9 +4067,9 @@ int __init io_apic_get_version(int ioapic)
4029 union IO_APIC_reg_01 reg_01; 4067 union IO_APIC_reg_01 reg_01;
4030 unsigned long flags; 4068 unsigned long flags;
4031 4069
4032 spin_lock_irqsave(&ioapic_lock, flags); 4070 raw_spin_lock_irqsave(&ioapic_lock, flags);
4033 reg_01.raw = io_apic_read(ioapic, 1); 4071 reg_01.raw = io_apic_read(ioapic, 1);
4034 spin_unlock_irqrestore(&ioapic_lock, flags); 4072 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4035 4073
4036 return reg_01.bits.version; 4074 return reg_01.bits.version;
4037} 4075}
@@ -4063,27 +4101,23 @@ int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
4063#ifdef CONFIG_SMP 4101#ifdef CONFIG_SMP
4064void __init setup_ioapic_dest(void) 4102void __init setup_ioapic_dest(void)
4065{ 4103{
4066 int pin, ioapic = 0, irq, irq_entry; 4104 int pin, ioapic, irq, irq_entry;
4067 struct irq_desc *desc; 4105 struct irq_desc *desc;
4068 const struct cpumask *mask; 4106 const struct cpumask *mask;
4069 4107
4070 if (skip_ioapic_setup == 1) 4108 if (skip_ioapic_setup == 1)
4071 return; 4109 return;
4072 4110
4073#ifdef CONFIG_ACPI 4111 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
4074 if (!acpi_disabled && acpi_ioapic) {
4075 ioapic = mp_find_ioapic(0);
4076 if (ioapic < 0)
4077 ioapic = 0;
4078 }
4079#endif
4080
4081 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) { 4112 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4082 irq_entry = find_irq_entry(ioapic, pin, mp_INT); 4113 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4083 if (irq_entry == -1) 4114 if (irq_entry == -1)
4084 continue; 4115 continue;
4085 irq = pin_2_irq(irq_entry, ioapic, pin); 4116 irq = pin_2_irq(irq_entry, ioapic, pin);
4086 4117
4118 if ((ioapic > 0) && (irq > 16))
4119 continue;
4120
4087 desc = irq_to_desc(irq); 4121 desc = irq_to_desc(irq);
4088 4122
4089 /* 4123 /*
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 4ada42c3dabb..bd7c96b5e8d8 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
416 416
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
420 420
421 spin_lock(&lock); 421 raw_spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
423 show_regs(regs); 423 show_regs(regs);
424 dump_stack(); 424 dump_stack();
425 spin_unlock(&lock); 425 raw_spin_unlock(&lock);
426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
427 427
428 rc = 1; 428 rc = 1;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index df89102bef80..8c93a84bb627 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34static int i8259A_auto_eoi; 34static int i8259A_auto_eoi;
35DEFINE_SPINLOCK(i8259A_lock); 35DEFINE_RAW_SPINLOCK(i8259A_lock);
36static void mask_and_ack_8259A(unsigned int); 36static void mask_and_ack_8259A(unsigned int);
37 37
38struct irq_chip i8259A_chip = { 38struct irq_chip i8259A_chip = {
@@ -68,13 +68,13 @@ void disable_8259A_irq(unsigned int irq)
68 unsigned int mask = 1 << irq; 68 unsigned int mask = 1 << irq;
69 unsigned long flags; 69 unsigned long flags;
70 70
71 spin_lock_irqsave(&i8259A_lock, flags); 71 raw_spin_lock_irqsave(&i8259A_lock, flags);
72 cached_irq_mask |= mask; 72 cached_irq_mask |= mask;
73 if (irq & 8) 73 if (irq & 8)
74 outb(cached_slave_mask, PIC_SLAVE_IMR); 74 outb(cached_slave_mask, PIC_SLAVE_IMR);
75 else 75 else
76 outb(cached_master_mask, PIC_MASTER_IMR); 76 outb(cached_master_mask, PIC_MASTER_IMR);
77 spin_unlock_irqrestore(&i8259A_lock, flags); 77 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
78} 78}
79 79
80void enable_8259A_irq(unsigned int irq) 80void enable_8259A_irq(unsigned int irq)
@@ -82,13 +82,13 @@ void enable_8259A_irq(unsigned int irq)
82 unsigned int mask = ~(1 << irq); 82 unsigned int mask = ~(1 << irq);
83 unsigned long flags; 83 unsigned long flags;
84 84
85 spin_lock_irqsave(&i8259A_lock, flags); 85 raw_spin_lock_irqsave(&i8259A_lock, flags);
86 cached_irq_mask &= mask; 86 cached_irq_mask &= mask;
87 if (irq & 8) 87 if (irq & 8)
88 outb(cached_slave_mask, PIC_SLAVE_IMR); 88 outb(cached_slave_mask, PIC_SLAVE_IMR);
89 else 89 else
90 outb(cached_master_mask, PIC_MASTER_IMR); 90 outb(cached_master_mask, PIC_MASTER_IMR);
91 spin_unlock_irqrestore(&i8259A_lock, flags); 91 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
92} 92}
93 93
94int i8259A_irq_pending(unsigned int irq) 94int i8259A_irq_pending(unsigned int irq)
@@ -97,12 +97,12 @@ int i8259A_irq_pending(unsigned int irq)
97 unsigned long flags; 97 unsigned long flags;
98 int ret; 98 int ret;
99 99
100 spin_lock_irqsave(&i8259A_lock, flags); 100 raw_spin_lock_irqsave(&i8259A_lock, flags);
101 if (irq < 8) 101 if (irq < 8)
102 ret = inb(PIC_MASTER_CMD) & mask; 102 ret = inb(PIC_MASTER_CMD) & mask;
103 else 103 else
104 ret = inb(PIC_SLAVE_CMD) & (mask >> 8); 104 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
105 spin_unlock_irqrestore(&i8259A_lock, flags); 105 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
106 106
107 return ret; 107 return ret;
108} 108}
@@ -150,7 +150,7 @@ static void mask_and_ack_8259A(unsigned int irq)
150 unsigned int irqmask = 1 << irq; 150 unsigned int irqmask = 1 << irq;
151 unsigned long flags; 151 unsigned long flags;
152 152
153 spin_lock_irqsave(&i8259A_lock, flags); 153 raw_spin_lock_irqsave(&i8259A_lock, flags);
154 /* 154 /*
155 * Lightweight spurious IRQ detection. We do not want 155 * Lightweight spurious IRQ detection. We do not want
156 * to overdo spurious IRQ handling - it's usually a sign 156 * to overdo spurious IRQ handling - it's usually a sign
@@ -183,7 +183,7 @@ handle_real_irq:
183 outb(cached_master_mask, PIC_MASTER_IMR); 183 outb(cached_master_mask, PIC_MASTER_IMR);
184 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 184 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
185 } 185 }
186 spin_unlock_irqrestore(&i8259A_lock, flags); 186 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
187 return; 187 return;
188 188
189spurious_8259A_irq: 189spurious_8259A_irq:
@@ -285,24 +285,24 @@ void mask_8259A(void)
285{ 285{
286 unsigned long flags; 286 unsigned long flags;
287 287
288 spin_lock_irqsave(&i8259A_lock, flags); 288 raw_spin_lock_irqsave(&i8259A_lock, flags);
289 289
290 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 290 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
291 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 291 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
292 292
293 spin_unlock_irqrestore(&i8259A_lock, flags); 293 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
294} 294}
295 295
296void unmask_8259A(void) 296void unmask_8259A(void)
297{ 297{
298 unsigned long flags; 298 unsigned long flags;
299 299
300 spin_lock_irqsave(&i8259A_lock, flags); 300 raw_spin_lock_irqsave(&i8259A_lock, flags);
301 301
302 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 302 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
303 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 303 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
304 304
305 spin_unlock_irqrestore(&i8259A_lock, flags); 305 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
306} 306}
307 307
308void init_8259A(int auto_eoi) 308void init_8259A(int auto_eoi)
@@ -311,7 +311,7 @@ void init_8259A(int auto_eoi)
311 311
312 i8259A_auto_eoi = auto_eoi; 312 i8259A_auto_eoi = auto_eoi;
313 313
314 spin_lock_irqsave(&i8259A_lock, flags); 314 raw_spin_lock_irqsave(&i8259A_lock, flags);
315 315
316 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 316 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
317 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 317 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -356,5 +356,5 @@ void init_8259A(int auto_eoi)
356 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 356 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
357 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 357 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
358 358
359 spin_unlock_irqrestore(&i8259A_lock, flags); 359 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
360} 360}
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index d5932226614f..fce55d532631 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
84}; 84};
85 85
86DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
87 [0 ... IRQ0_VECTOR - 1] = -1, 87 [0 ... NR_VECTORS - 1] = -1,
88 [IRQ0_VECTOR] = 0,
89 [IRQ1_VECTOR] = 1,
90 [IRQ2_VECTOR] = 2,
91 [IRQ3_VECTOR] = 3,
92 [IRQ4_VECTOR] = 4,
93 [IRQ5_VECTOR] = 5,
94 [IRQ6_VECTOR] = 6,
95 [IRQ7_VECTOR] = 7,
96 [IRQ8_VECTOR] = 8,
97 [IRQ9_VECTOR] = 9,
98 [IRQ10_VECTOR] = 10,
99 [IRQ11_VECTOR] = 11,
100 [IRQ12_VECTOR] = 12,
101 [IRQ13_VECTOR] = 13,
102 [IRQ14_VECTOR] = 14,
103 [IRQ15_VECTOR] = 15,
104 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
105}; 88};
106 89
107int vector_used_by_percpu_irq(unsigned int vector) 90int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 99 return 0;
117} 100}
118 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
119void __init init_ISA_irqs(void) 105void __init init_ISA_irqs(void)
120{ 106{
121 int i; 107 int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
142 128
143void __init init_IRQ(void) 129void __init init_IRQ(void)
144{ 130{
131 int i;
132
133 /*
134 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
135 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
136 * then this configuration will likely be static after the boot. If
137 * these IRQ's are handled by more mordern controllers like IO-APIC,
138 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc.
140 */
141 for (i = 0; i < nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143
145 x86_init.irqs.intr_init(); 144 x86_init.irqs.intr_init();
146} 145}
147 146
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddcdf64d..8e1aac86b50c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
462 }, 462 },
463 }, 463 },
464 { /* Handle problems with rebooting on the iMac9,1. */
465 .callback = set_pci_reboot,
466 .ident = "Apple iMac9,1",
467 .matches = {
468 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
469 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
470 },
471 },
464 { } 472 { }
465}; 473};
466 474
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9b4401115ea1..a435c76d714e 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -241,6 +241,11 @@ static void __cpuinit smp_callin(void)
241 map_cpu_to_logical_apicid(); 241 map_cpu_to_logical_apicid();
242 242
243 notify_cpu_starting(cpuid); 243 notify_cpu_starting(cpuid);
244
245 /*
246 * Need to setup vector mappings before we enable interrupts.
247 */
248 __setup_vector_irq(smp_processor_id());
244 /* 249 /*
245 * Get our bogomips. 250 * Get our bogomips.
246 * 251 *
@@ -315,7 +320,6 @@ notrace static void __cpuinit start_secondary(void *unused)
315 */ 320 */
316 ipi_call_lock(); 321 ipi_call_lock();
317 lock_vector_lock(); 322 lock_vector_lock();
318 __setup_vector_irq(smp_processor_id());
319 set_cpu_online(smp_processor_id(), true); 323 set_cpu_online(smp_processor_id(), true);
320 unlock_vector_lock(); 324 unlock_vector_lock();
321 ipi_call_unlock(); 325 ipi_call_unlock();
@@ -1212,11 +1216,12 @@ __init void prefill_possible_map(void)
1212 1216
1213 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1217 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1214 1218
1215 if (possible > CONFIG_NR_CPUS) { 1219 /* nr_cpu_ids could be reduced via nr_cpus= */
1220 if (possible > nr_cpu_ids) {
1216 printk(KERN_WARNING 1221 printk(KERN_WARNING
1217 "%d Processors exceeds NR_CPUS limit of %d\n", 1222 "%d Processors exceeds NR_CPUS limit of %d\n",
1218 possible, CONFIG_NR_CPUS); 1223 possible, nr_cpu_ids);
1219 possible = CONFIG_NR_CPUS; 1224 possible = nr_cpu_ids;
1220 } 1225 }
1221 1226
1222 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1227 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be2573448ed9..fb5cc5e14cfa 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
70 * manually to deassert NMI lines for the watchdog if run 70 * manually to deassert NMI lines for the watchdog if run
71 * on an 82489DX-based system. 71 * on an 82489DX-based system.
72 */ 72 */
73 spin_lock(&i8259A_lock); 73 raw_spin_lock(&i8259A_lock);
74 outb(0x0c, PIC_MASTER_OCW3); 74 outb(0x0c, PIC_MASTER_OCW3);
75 /* Ack the IRQ; AEOI will end it automatically. */ 75 /* Ack the IRQ; AEOI will end it automatically. */
76 inb(PIC_MASTER_POLL); 76 inb(PIC_MASTER_POLL);
77 spin_unlock(&i8259A_lock); 77 raw_spin_unlock(&i8259A_lock);
78 } 78 }
79 79
80 global_clock_event->event_handler(global_clock_event); 80 global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 34a279a7471d..ab38ce0984fa 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -559,7 +559,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
559 struct irq_desc *desc; 559 struct irq_desc *desc;
560 unsigned long flags; 560 unsigned long flags;
561 561
562 spin_lock_irqsave(&i8259A_lock, flags); 562 raw_spin_lock_irqsave(&i8259A_lock, flags);
563 563
564 /* Find out what's interrupting in the PIIX4 master 8259 */ 564 /* Find out what's interrupting in the PIIX4 master 8259 */
565 outb(0x0c, 0x20); /* OCW3 Poll command */ 565 outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -596,7 +596,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
596 outb(0x60 + realirq, 0x20); 596 outb(0x60 + realirq, 0x20);
597 } 597 }
598 598
599 spin_unlock_irqrestore(&i8259A_lock, flags); 599 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
600 600
601 desc = irq_to_desc(realirq); 601 desc = irq_to_desc(realirq);
602 602
@@ -614,7 +614,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
614 return IRQ_HANDLED; 614 return IRQ_HANDLED;
615 615
616out_unlock: 616out_unlock:
617 spin_unlock_irqrestore(&i8259A_lock, flags); 617 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
618 return IRQ_NONE; 618 return IRQ_NONE;
619} 619}
620 620
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..2f1ca5614292 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
79 79
80static inline unsigned int vmi_get_timer_vector(void) 80static inline unsigned int vmi_get_timer_vector(void)
81{ 81{
82#ifdef CONFIG_X86_IO_APIC 82 return IRQ0_VECTOR;
83 return FIRST_DEVICE_VECTOR;
84#else
85 return FIRST_EXTERNAL_VECTOR;
86#endif
87} 83}
88 84
89/** vmi clockchip */ 85/** vmi clockchip */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48dfc12db..b8725461d887 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
279 /* SRAT: Static Resource Affinity Table */ 279 /* SRAT: Static Resource Affinity Table */
280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
282 acpi_parse_x2apic_affinity, NR_CPUS); 282 acpi_parse_x2apic_affinity, nr_cpu_ids);
283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
284 acpi_parse_processor_affinity, NR_CPUS); 284 acpi_parse_processor_affinity, nr_cpu_ids);
285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
286 acpi_parse_memory_affinity, 286 acpi_parse_memory_affinity,
287 NR_NODE_MEMBLKS); 287 NR_NODE_MEMBLKS);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd09bc1..2f8413794d05 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
649 int bit_idx = __ffs(pending_bits); 649 int bit_idx = __ffs(pending_bits);
650 int port = (word_idx * BITS_PER_LONG) + bit_idx; 650 int port = (word_idx * BITS_PER_LONG) + bit_idx;
651 int irq = evtchn_to_irq[port]; 651 int irq = evtchn_to_irq[port];
652 struct irq_desc *desc;
652 653
653 if (irq != -1) 654 if (irq != -1) {
654 handle_irq(irq, regs); 655 desc = irq_to_desc(irq);
656 if (desc)
657 generic_handle_irq_desc(irq, desc);
658 }
655 } 659 }
656 } 660 }
657 661
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d13492df57a1..707ab122e2e6 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
400 400
401/* Dynamic irq helper functions */ 401/* Dynamic irq helper functions */
402extern void dynamic_irq_init(unsigned int irq); 402extern void dynamic_irq_init(unsigned int irq);
403void dynamic_irq_init_keep_chip_data(unsigned int irq);
403extern void dynamic_irq_cleanup(unsigned int irq); 404extern void dynamic_irq_cleanup(unsigned int irq);
405void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
404 406
405/* Set/get chip/data for an IRQ: */ 407/* Set/get chip/data for an IRQ: */
406extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); 408extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
diff --git a/init/main.c b/init/main.c
index c75dcd6eef09..18098153c331 100644
--- a/init/main.c
+++ b/init/main.c
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
149 149
150early_param("nosmp", nosmp); 150early_param("nosmp", nosmp);
151 151
152/* this is hard limit */
153static int __init nrcpus(char *str)
154{
155 int nr_cpus;
156
157 get_option(&str, &nr_cpus);
158 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
159 nr_cpu_ids = nr_cpus;
160
161 return 0;
162}
163
164early_param("nr_cpus", nrcpus);
165
152static int __init maxcpus(char *str) 166static int __init maxcpus(char *str)
153{ 167{
154 get_option(&str, &setup_max_cpus); 168 get_option(&str, &setup_max_cpus);
@@ -586,6 +600,7 @@ asmlinkage void __init start_kernel(void)
586 local_irq_disable(); 600 local_irq_disable();
587 } 601 }
588 rcu_init(); 602 rcu_init();
603 radix_tree_init();
589 /* init some links before init_ISA_irqs() */ 604 /* init some links before init_ISA_irqs() */
590 early_irq_init(); 605 early_irq_init();
591 init_IRQ(); 606 init_IRQ();
@@ -659,7 +674,6 @@ asmlinkage void __init start_kernel(void)
659 proc_caches_init(); 674 proc_caches_init();
660 buffer_init(); 675 buffer_init();
661 key_init(); 676 key_init();
662 radix_tree_init();
663 security_init(); 677 security_init();
664 vfs_caches_init(totalram_pages); 678 vfs_caches_init(totalram_pages);
665 signals_init(); 679 signals_init();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ecc3fa28f666..d70394f12ee9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,11 +18,7 @@
18 18
19#include "internals.h" 19#include "internals.h"
20 20
21/** 21static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 * dynamic_irq_init - initialize a dynamically allocated irq
23 * @irq: irq number to initialize
24 */
25void dynamic_irq_init(unsigned int irq)
26{ 22{
27 struct irq_desc *desc; 23 struct irq_desc *desc;
28 unsigned long flags; 24 unsigned long flags;
@@ -41,7 +37,8 @@ void dynamic_irq_init(unsigned int irq)
41 desc->depth = 1; 37 desc->depth = 1;
42 desc->msi_desc = NULL; 38 desc->msi_desc = NULL;
43 desc->handler_data = NULL; 39 desc->handler_data = NULL;
44 desc->chip_data = NULL; 40 if (!keep_chip_data)
41 desc->chip_data = NULL;
45 desc->action = NULL; 42 desc->action = NULL;
46 desc->irq_count = 0; 43 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 44 desc->irqs_unhandled = 0;
@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
55} 52}
56 53
57/** 54/**
58 * dynamic_irq_cleanup - cleanup a dynamically allocated irq 55 * dynamic_irq_init - initialize a dynamically allocated irq
59 * @irq: irq number to initialize 56 * @irq: irq number to initialize
60 */ 57 */
61void dynamic_irq_cleanup(unsigned int irq) 58void dynamic_irq_init(unsigned int irq)
59{
60 dynamic_irq_init_x(irq, false);
61}
62
63/**
64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65 * @irq: irq number to initialize
66 *
67 * does not set irq_to_desc(irq)->chip_data to NULL
68 */
69void dynamic_irq_init_keep_chip_data(unsigned int irq)
70{
71 dynamic_irq_init_x(irq, true);
72}
73
74static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
62{ 75{
63 struct irq_desc *desc = irq_to_desc(irq); 76 struct irq_desc *desc = irq_to_desc(irq);
64 unsigned long flags; 77 unsigned long flags;
@@ -77,7 +90,8 @@ void dynamic_irq_cleanup(unsigned int irq)
77 } 90 }
78 desc->msi_desc = NULL; 91 desc->msi_desc = NULL;
79 desc->handler_data = NULL; 92 desc->handler_data = NULL;
80 desc->chip_data = NULL; 93 if (!keep_chip_data)
94 desc->chip_data = NULL;
81 desc->handle_irq = handle_bad_irq; 95 desc->handle_irq = handle_bad_irq;
82 desc->chip = &no_irq_chip; 96 desc->chip = &no_irq_chip;
83 desc->name = NULL; 97 desc->name = NULL;
@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
85 raw_spin_unlock_irqrestore(&desc->lock, flags); 99 raw_spin_unlock_irqrestore(&desc->lock, flags);
86} 100}
87 101
102/**
103 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
104 * @irq: irq number to initialize
105 */
106void dynamic_irq_cleanup(unsigned int irq)
107{
108 dynamic_irq_cleanup_x(irq, false);
109}
110
111/**
112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113 * @irq: irq number to initialize
114 *
115 * does not set irq_to_desc(irq)->chip_data to NULL
116 */
117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118{
119 dynamic_irq_cleanup_x(irq, true);
120}
121
88 122
89/** 123/**
90 * set_irq_chip - set the irq chip for an irq 124 * set_irq_chip - set the irq chip for an irq
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 814940e7f485..76d5a671bfe1 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -19,7 +19,7 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 20#include <linux/rculist.h>
21#include <linux/hash.h> 21#include <linux/hash.h>
22#include <linux/bootmem.h> 22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 23#include <trace/events/irq.h>
24 24
25#include "internals.h" 25#include "internals.h"
@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87{ 87{
88 void *ptr; 88 void *ptr;
89 89
90 if (slab_is_available()) 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node);
92 GFP_ATOMIC, node);
93 else
94 ptr = alloc_bootmem_node(NODE_DATA(node),
95 nr * sizeof(*desc->kstat_irqs));
96 92
97 /* 93 /*
98 * don't overwite if can not get new one 94 * don't overwite if can not get new one
@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
132 */ 128 */
133DEFINE_RAW_SPINLOCK(sparse_irq_lock); 129DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 130
135struct irq_desc **irq_desc_ptrs __read_mostly; 131static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132
133static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134{
135 radix_tree_insert(&irq_desc_tree, irq, desc);
136}
137
138struct irq_desc *irq_to_desc(unsigned int irq)
139{
140 return radix_tree_lookup(&irq_desc_tree, irq);
141}
142
143void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144{
145 void **ptr;
146
147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 if (ptr)
149 radix_tree_replace_slot(ptr, desc);
150}
136 151
137static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 152static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
138 [0 ... NR_IRQS_LEGACY-1] = { 153 [0 ... NR_IRQS_LEGACY-1] = {
@@ -164,9 +179,6 @@ int __init early_irq_init(void)
164 legacy_count = ARRAY_SIZE(irq_desc_legacy); 179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
165 node = first_online_node; 180 node = first_online_node;
166 181
167 /* allocate irq_desc_ptrs array based on nr_irqs */
168 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
169
170 /* allocate based on nr_cpu_ids */ 182 /* allocate based on nr_cpu_ids */
171 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
172 sizeof(int), GFP_NOWAIT, node); 184 sizeof(int), GFP_NOWAIT, node);
@@ -180,23 +192,12 @@ int __init early_irq_init(void)
180 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
181 alloc_desc_masks(&desc[i], node, true); 193 alloc_desc_masks(&desc[i], node, true);
182 init_desc_masks(&desc[i]); 194 init_desc_masks(&desc[i]);
183 irq_desc_ptrs[i] = desc + i; 195 set_irq_desc(i, &desc[i]);
184 } 196 }
185 197
186 for (i = legacy_count; i < nr_irqs; i++)
187 irq_desc_ptrs[i] = NULL;
188
189 return arch_early_irq_init(); 198 return arch_early_irq_init();
190} 199}
191 200
192struct irq_desc *irq_to_desc(unsigned int irq)
193{
194 if (irq_desc_ptrs && irq < nr_irqs)
195 return irq_desc_ptrs[irq];
196
197 return NULL;
198}
199
200struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 201struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201{ 202{
202 struct irq_desc *desc; 203 struct irq_desc *desc;
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
208 return NULL; 209 return NULL;
209 } 210 }
210 211
211 desc = irq_desc_ptrs[irq]; 212 desc = irq_to_desc(irq);
212 if (desc) 213 if (desc)
213 return desc; 214 return desc;
214 215
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 217
217 /* We have to check it to avoid races with another CPU */ 218 /* We have to check it to avoid races with another CPU */
218 desc = irq_desc_ptrs[irq]; 219 desc = irq_to_desc(irq);
219 if (desc) 220 if (desc)
220 goto out_unlock; 221 goto out_unlock;
221 222
222 if (slab_is_available()) 223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 else
225 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
226 224
227 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
228 if (!desc) { 226 if (!desc) {
@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
231 } 229 }
232 init_one_irq_desc(irq, desc, node); 230 init_one_irq_desc(irq, desc, node);
233 231
234 irq_desc_ptrs[irq] = desc; 232 set_irq_desc(irq, desc);
235 233
236out_unlock: 234out_unlock:
237 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2821f070a3d..c63f3bc88f0b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
21extern raw_spinlock_t sparse_irq_lock; 21extern raw_spinlock_t sparse_irq_lock;
22 22
23#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */ 24void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
25extern struct irq_desc **irq_desc_ptrs;
26#else
27/* irq_desc_ptrs is a fixed size array */
28extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
29#endif 25#endif
30 26
31#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 26bac9d8f860..963559dbd858 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
70 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 70 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
71 71
72 /* We have to check it to avoid races with another CPU */ 72 /* We have to check it to avoid races with another CPU */
73 desc = irq_desc_ptrs[irq]; 73 desc = irq_to_desc(irq);
74 74
75 if (desc && old_desc != desc) 75 if (desc && old_desc != desc)
76 goto out_unlock; 76 goto out_unlock;
@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
90 goto out_unlock; 90 goto out_unlock;
91 } 91 }
92 92
93 irq_desc_ptrs[irq] = desc; 93 replace_irq_desc(irq, desc);
94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
95 95
96 /* free the old one */ 96 /* free the old one */