aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@zytor.com>2010-02-22 19:25:18 -0500
committerH. Peter Anvin <hpa@zytor.com>2010-02-22 19:25:18 -0500
commit54b56170e4517e9606b245c3f805fc96baa059f0 (patch)
tree9f8750ef972b4a7fdce530889dcbaf2a8b5b0d05
parent1f91233c26fd5f7d6525fd29b95e4b50ca7a3e88 (diff)
parentd02e30c31c57683a66ed68a1bcff900ca78f6d56 (diff)
Merge remote branch 'origin/x86/apic' into x86/mrst
Conflicts: arch/x86/kernel/apic/io_apic.c
-rw-r--r--Documentation/kernel-parameters.txt6
-rw-r--r--arch/ia64/include/asm/xen/events.h4
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/i8259.h2
-rw-r--r--arch/x86/include/asm/io_apic.h1
-rw-r--r--arch/x86/include/asm/irq.h1
-rw-r--r--arch/x86/include/asm/irq_vectors.h48
-rw-r--r--arch/x86/kernel/acpi/boot.c9
-rw-r--r--arch/x86/kernel/apic/io_apic.c229
-rw-r--r--arch/x86/kernel/apic/nmi.c6
-rw-r--r--arch/x86/kernel/i8259.c30
-rw-r--r--arch/x86/kernel/irqinit.c35
-rw-r--r--arch/x86/kernel/reboot.c8
-rw-r--r--arch/x86/kernel/smpboot.c13
-rw-r--r--arch/x86/kernel/time.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c6
-rw-r--r--arch/x86/kernel/vmiclock_32.c6
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/xen/events.c8
-rw-r--r--include/linux/irq.h2
-rw-r--r--init/main.c16
-rw-r--r--kernel/irq/chip.c52
-rw-r--r--kernel/irq/handle.c58
-rw-r--r--kernel/irq/internals.h6
-rw-r--r--kernel/irq/numa_migrate.c4
26 files changed, 333 insertions, 230 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 736d45602886..51bceb0fb277 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1772,6 +1772,12 @@ and is between 256 and 4096 characters. It is defined in the file
1772 purges which is reported from either PAL_VM_SUMMARY or 1772 purges which is reported from either PAL_VM_SUMMARY or
1773 SAL PALO. 1773 SAL PALO.
1774 1774
1775 nr_cpus= [SMP] Maximum number of processors that an SMP kernel
1776 could support. nr_cpus=n : n >= 1 limits the kernel to
1777 supporting 'n' processors. Later in runtime you can not
1778 use hotplug cpu feature to put more cpu back to online.
1779 just like you compile the kernel NR_CPUS=n
1780
1775 nr_uarts= [SERIAL] maximum number of UARTs to be registered. 1781 nr_uarts= [SERIAL] maximum number of UARTs to be registered.
1776 1782
1777 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA. 1783 numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8b6198..baa74c82aa71 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
36 return !(ia64_psr(regs)->i); 36 return !(ia64_psr(regs)->i);
37} 37}
38 38
39static inline void handle_irq(int irq, struct pt_regs *regs)
40{
41 __do_IRQ(irq);
42}
43#define irq_ctx_init(cpu) do { } while (0) 39#define irq_ctx_init(cpu) do { } while (0)
44 40
45#endif /* _ASM_IA64_XEN_EVENTS_H */ 41#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 40574ae11401..605a08b29721 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -881,8 +881,8 @@ __init void prefill_possible_map(void)
881 881
882 possible = available_cpus + additional_cpus; 882 possible = available_cpus + additional_cpus;
883 883
884 if (possible > NR_CPUS) 884 if (possible > nr_cpu_ids)
885 possible = NR_CPUS; 885 possible = nr_cpu_ids;
886 886
887 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 887 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
888 possible, max((possible - available_cpus), 0)); 888 possible, max((possible - available_cpus), 0));
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index f9f472462753..14531abdd0ce 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -327,7 +327,6 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
327 current->mm->free_area_cache = TASK_UNMAPPED_BASE; 327 current->mm->free_area_cache = TASK_UNMAPPED_BASE;
328 current->mm->cached_hole_size = 0; 328 current->mm->cached_hole_size = 0;
329 329
330 current->mm->mmap = NULL;
331 install_exec_creds(bprm); 330 install_exec_creds(bprm);
332 current->flags &= ~PF_FORKNOEXEC; 331 current->flags &= ~PF_FORKNOEXEC;
333 332
diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
index 2832babd91fc..1655147646aa 100644
--- a/arch/x86/include/asm/i8259.h
+++ b/arch/x86/include/asm/i8259.h
@@ -24,7 +24,7 @@ extern unsigned int cached_irq_mask;
24#define SLAVE_ICW4_DEFAULT 0x01 24#define SLAVE_ICW4_DEFAULT 0x01
25#define PIC_ICW4_AEOI 2 25#define PIC_ICW4_AEOI 2
26 26
27extern spinlock_t i8259A_lock; 27extern raw_spinlock_t i8259A_lock;
28 28
29/* the PIC may need a careful delay on some platforms, hence specific calls */ 29/* the PIC may need a careful delay on some platforms, hence specific calls */
30static inline unsigned char inb_pic(unsigned int port) 30static inline unsigned char inb_pic(unsigned int port)
diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h
index 84fdd5110948..31dfb42d8649 100644
--- a/arch/x86/include/asm/io_apic.h
+++ b/arch/x86/include/asm/io_apic.h
@@ -158,6 +158,7 @@ extern int io_apic_get_redir_entries(int ioapic);
158struct io_apic_irq_attr; 158struct io_apic_irq_attr;
159extern int io_apic_set_pci_routing(struct device *dev, int irq, 159extern int io_apic_set_pci_routing(struct device *dev, int irq,
160 struct io_apic_irq_attr *irq_attr); 160 struct io_apic_irq_attr *irq_attr);
161void setup_IO_APIC_irq_extra(u32 gsi);
161extern int (*ioapic_renumber_irq)(int ioapic, int irq); 162extern int (*ioapic_renumber_irq)(int ioapic, int irq);
162extern void ioapic_init_mappings(void); 163extern void ioapic_init_mappings(void);
163extern void ioapic_insert_resources(void); 164extern void ioapic_insert_resources(void);
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 5458380b6ef8..262292729fc4 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -48,5 +48,6 @@ extern DECLARE_BITMAP(used_vectors, NR_VECTORS);
48extern int vector_used_by_percpu_irq(unsigned int vector); 48extern int vector_used_by_percpu_irq(unsigned int vector);
49 49
50extern void init_ISA_irqs(void); 50extern void init_ISA_irqs(void);
51extern int nr_legacy_irqs;
51 52
52#endif /* _ASM_X86_IRQ_H */ 53#endif /* _ASM_X86_IRQ_H */
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h
index 4611f085cd43..8767d99c4f64 100644
--- a/arch/x86/include/asm/irq_vectors.h
+++ b/arch/x86/include/asm/irq_vectors.h
@@ -28,28 +28,33 @@
28#define MCE_VECTOR 0x12 28#define MCE_VECTOR 0x12
29 29
30/* 30/*
31 * IDT vectors usable for external interrupt sources start 31 * IDT vectors usable for external interrupt sources start at 0x20.
32 * at 0x20: 32 * (0x80 is the syscall vector, 0x30-0x3f are for ISA)
33 */ 33 */
34#define FIRST_EXTERNAL_VECTOR 0x20 34#define FIRST_EXTERNAL_VECTOR 0x20
35 35/*
36#ifdef CONFIG_X86_32 36 * We start allocating at 0x21 to spread out vectors evenly between
37# define SYSCALL_VECTOR 0x80 37 * priority levels. (0x80 is the syscall vector)
38# define IA32_SYSCALL_VECTOR 0x80 38 */
39#else 39#define VECTOR_OFFSET_START 1
40# define IA32_SYSCALL_VECTOR 0x80
41#endif
42 40
43/* 41/*
44 * Reserve the lowest usable priority level 0x20 - 0x2f for triggering 42 * Reserve the lowest usable vector (and hence lowest priority) 0x20 for
45 * cleanup after irq migration. 43 * triggering cleanup after irq migration. 0x21-0x2f will still be used
44 * for device interrupts.
46 */ 45 */
47#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR 46#define IRQ_MOVE_CLEANUP_VECTOR FIRST_EXTERNAL_VECTOR
48 47
48#define IA32_SYSCALL_VECTOR 0x80
49#ifdef CONFIG_X86_32
50# define SYSCALL_VECTOR 0x80
51#endif
52
49/* 53/*
50 * Vectors 0x30-0x3f are used for ISA interrupts. 54 * Vectors 0x30-0x3f are used for ISA interrupts.
55 * round up to the next 16-vector boundary
51 */ 56 */
52#define IRQ0_VECTOR (FIRST_EXTERNAL_VECTOR + 0x10) 57#define IRQ0_VECTOR ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
53 58
54#define IRQ1_VECTOR (IRQ0_VECTOR + 1) 59#define IRQ1_VECTOR (IRQ0_VECTOR + 1)
55#define IRQ2_VECTOR (IRQ0_VECTOR + 2) 60#define IRQ2_VECTOR (IRQ0_VECTOR + 2)
@@ -120,13 +125,6 @@
120 */ 125 */
121#define MCE_SELF_VECTOR 0xeb 126#define MCE_SELF_VECTOR 0xeb
122 127
123/*
124 * First APIC vector available to drivers: (vectors 0x30-0xee) we
125 * start at 0x31(0x41) to spread out vectors evenly between priority
126 * levels. (0x80 is the syscall vector)
127 */
128#define FIRST_DEVICE_VECTOR (IRQ15_VECTOR + 2)
129
130#define NR_VECTORS 256 128#define NR_VECTORS 256
131 129
132#define FPU_IRQ 13 130#define FPU_IRQ 13
@@ -154,21 +152,21 @@ static inline int invalid_vm86_irq(int irq)
154 152
155#define NR_IRQS_LEGACY 16 153#define NR_IRQS_LEGACY 16
156 154
157#define CPU_VECTOR_LIMIT ( 8 * NR_CPUS )
158#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS ) 155#define IO_APIC_VECTOR_LIMIT ( 32 * MAX_IO_APICS )
159 156
160#ifdef CONFIG_X86_IO_APIC 157#ifdef CONFIG_X86_IO_APIC
161# ifdef CONFIG_SPARSE_IRQ 158# ifdef CONFIG_SPARSE_IRQ
159# define CPU_VECTOR_LIMIT (64 * NR_CPUS)
162# define NR_IRQS \ 160# define NR_IRQS \
163 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \ 161 (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ? \
164 (NR_VECTORS + CPU_VECTOR_LIMIT) : \ 162 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
165 (NR_VECTORS + IO_APIC_VECTOR_LIMIT)) 163 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
166# else 164# else
167# if NR_CPUS < MAX_IO_APICS 165# define CPU_VECTOR_LIMIT (32 * NR_CPUS)
168# define NR_IRQS (NR_VECTORS + 4*CPU_VECTOR_LIMIT) 166# define NR_IRQS \
169# else 167 (CPU_VECTOR_LIMIT < IO_APIC_VECTOR_LIMIT ? \
170# define NR_IRQS (NR_VECTORS + IO_APIC_VECTOR_LIMIT) 168 (NR_VECTORS + CPU_VECTOR_LIMIT) : \
171# endif 169 (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
172# endif 170# endif
173#else /* !CONFIG_X86_IO_APIC: */ 171#else /* !CONFIG_X86_IO_APIC: */
174# define NR_IRQS NR_IRQS_LEGACY 172# define NR_IRQS NR_IRQS_LEGACY
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 054a5f5548b0..db2773c6defd 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -447,6 +447,12 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) 447int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
448{ 448{
449 *irq = gsi; 449 *irq = gsi;
450
451#ifdef CONFIG_X86_IO_APIC
452 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC)
453 setup_IO_APIC_irq_extra(gsi);
454#endif
455
450 return 0; 456 return 0;
451} 457}
452 458
@@ -474,7 +480,8 @@ int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
474 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity); 480 plat_gsi = mp_register_gsi(dev, gsi, trigger, polarity);
475 } 481 }
476#endif 482#endif
477 acpi_gsi_to_irq(plat_gsi, &irq); 483 irq = plat_gsi;
484
478 return irq; 485 return irq;
479} 486}
480 487
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 3592a72f3f0a..b34854358ee6 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -73,8 +73,8 @@
73 */ 73 */
74int sis_apic_bug = -1; 74int sis_apic_bug = -1;
75 75
76static DEFINE_SPINLOCK(ioapic_lock); 76static DEFINE_RAW_SPINLOCK(ioapic_lock);
77static DEFINE_SPINLOCK(vector_lock); 77static DEFINE_RAW_SPINLOCK(vector_lock);
78 78
79/* 79/*
80 * # of IRQ routing registers 80 * # of IRQ routing registers
@@ -167,8 +167,14 @@ int __init arch_early_irq_init(void)
167 desc->chip_data = &cfg[i]; 167 desc->chip_data = &cfg[i];
168 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node); 168 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
169 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node); 169 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
170 if (i < legacy_pic->nr_legacy_irqs) 170 /*
171 cpumask_setall(cfg[i].domain); 171 * For legacy IRQ's, start with assigning irq0 to irq15 to
172 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
173 */
174 if (i < legacy_pic->nr_legacy_irqs) {
175 cfg[i].vector = IRQ0_VECTOR + i;
176 cpumask_set_cpu(0, cfg[i].domain);
177 }
172 } 178 }
173 179
174 return 0; 180 return 0;
@@ -388,7 +394,7 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
388 struct irq_pin_list *entry; 394 struct irq_pin_list *entry;
389 unsigned long flags; 395 unsigned long flags;
390 396
391 spin_lock_irqsave(&ioapic_lock, flags); 397 raw_spin_lock_irqsave(&ioapic_lock, flags);
392 for_each_irq_pin(entry, cfg->irq_2_pin) { 398 for_each_irq_pin(entry, cfg->irq_2_pin) {
393 unsigned int reg; 399 unsigned int reg;
394 int pin; 400 int pin;
@@ -397,11 +403,11 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
397 reg = io_apic_read(entry->apic, 0x10 + pin*2); 403 reg = io_apic_read(entry->apic, 0x10 + pin*2);
398 /* Is the remote IRR bit set? */ 404 /* Is the remote IRR bit set? */
399 if (reg & IO_APIC_REDIR_REMOTE_IRR) { 405 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
400 spin_unlock_irqrestore(&ioapic_lock, flags); 406 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
401 return true; 407 return true;
402 } 408 }
403 } 409 }
404 spin_unlock_irqrestore(&ioapic_lock, flags); 410 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
405 411
406 return false; 412 return false;
407} 413}
@@ -415,10 +421,10 @@ static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
415{ 421{
416 union entry_union eu; 422 union entry_union eu;
417 unsigned long flags; 423 unsigned long flags;
418 spin_lock_irqsave(&ioapic_lock, flags); 424 raw_spin_lock_irqsave(&ioapic_lock, flags);
419 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin); 425 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
420 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin); 426 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
421 spin_unlock_irqrestore(&ioapic_lock, flags); 427 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
422 return eu.entry; 428 return eu.entry;
423} 429}
424 430
@@ -441,9 +447,9 @@ __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
441void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e) 447void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
442{ 448{
443 unsigned long flags; 449 unsigned long flags;
444 spin_lock_irqsave(&ioapic_lock, flags); 450 raw_spin_lock_irqsave(&ioapic_lock, flags);
445 __ioapic_write_entry(apic, pin, e); 451 __ioapic_write_entry(apic, pin, e);
446 spin_unlock_irqrestore(&ioapic_lock, flags); 452 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
447} 453}
448 454
449/* 455/*
@@ -456,10 +462,10 @@ static void ioapic_mask_entry(int apic, int pin)
456 unsigned long flags; 462 unsigned long flags;
457 union entry_union eu = { .entry.mask = 1 }; 463 union entry_union eu = { .entry.mask = 1 };
458 464
459 spin_lock_irqsave(&ioapic_lock, flags); 465 raw_spin_lock_irqsave(&ioapic_lock, flags);
460 io_apic_write(apic, 0x10 + 2*pin, eu.w1); 466 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
461 io_apic_write(apic, 0x11 + 2*pin, eu.w2); 467 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
462 spin_unlock_irqrestore(&ioapic_lock, flags); 468 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
463} 469}
464 470
465/* 471/*
@@ -586,9 +592,9 @@ static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
586 592
587 BUG_ON(!cfg); 593 BUG_ON(!cfg);
588 594
589 spin_lock_irqsave(&ioapic_lock, flags); 595 raw_spin_lock_irqsave(&ioapic_lock, flags);
590 __mask_IO_APIC_irq(cfg); 596 __mask_IO_APIC_irq(cfg);
591 spin_unlock_irqrestore(&ioapic_lock, flags); 597 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
592} 598}
593 599
594static void unmask_IO_APIC_irq_desc(struct irq_desc *desc) 600static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
@@ -596,9 +602,9 @@ static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
596 struct irq_cfg *cfg = desc->chip_data; 602 struct irq_cfg *cfg = desc->chip_data;
597 unsigned long flags; 603 unsigned long flags;
598 604
599 spin_lock_irqsave(&ioapic_lock, flags); 605 raw_spin_lock_irqsave(&ioapic_lock, flags);
600 __unmask_IO_APIC_irq(cfg); 606 __unmask_IO_APIC_irq(cfg);
601 spin_unlock_irqrestore(&ioapic_lock, flags); 607 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
602} 608}
603 609
604static void mask_IO_APIC_irq(unsigned int irq) 610static void mask_IO_APIC_irq(unsigned int irq)
@@ -1122,12 +1128,12 @@ void lock_vector_lock(void)
1122 /* Used to the online set of cpus does not change 1128 /* Used to the online set of cpus does not change
1123 * during assign_irq_vector. 1129 * during assign_irq_vector.
1124 */ 1130 */
1125 spin_lock(&vector_lock); 1131 raw_spin_lock(&vector_lock);
1126} 1132}
1127 1133
1128void unlock_vector_lock(void) 1134void unlock_vector_lock(void)
1129{ 1135{
1130 spin_unlock(&vector_lock); 1136 raw_spin_unlock(&vector_lock);
1131} 1137}
1132 1138
1133static int 1139static int
@@ -1144,7 +1150,8 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1144 * Also, we've got to be careful not to trash gate 1150 * Also, we've got to be careful not to trash gate
1145 * 0x80, because int 0x80 is hm, kind of importantish. ;) 1151 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1146 */ 1152 */
1147 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1153 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1154 static int current_offset = VECTOR_OFFSET_START % 8;
1148 unsigned int old_vector; 1155 unsigned int old_vector;
1149 int cpu, err; 1156 int cpu, err;
1150 cpumask_var_t tmp_mask; 1157 cpumask_var_t tmp_mask;
@@ -1180,7 +1187,7 @@ next:
1180 if (vector >= first_system_vector) { 1187 if (vector >= first_system_vector) {
1181 /* If out of vectors on large boxen, must share them. */ 1188 /* If out of vectors on large boxen, must share them. */
1182 offset = (offset + 1) % 8; 1189 offset = (offset + 1) % 8;
1183 vector = FIRST_DEVICE_VECTOR + offset; 1190 vector = FIRST_EXTERNAL_VECTOR + offset;
1184 } 1191 }
1185 if (unlikely(current_vector == vector)) 1192 if (unlikely(current_vector == vector))
1186 continue; 1193 continue;
@@ -1214,9 +1221,9 @@ int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1214 int err; 1221 int err;
1215 unsigned long flags; 1222 unsigned long flags;
1216 1223
1217 spin_lock_irqsave(&vector_lock, flags); 1224 raw_spin_lock_irqsave(&vector_lock, flags);
1218 err = __assign_irq_vector(irq, cfg, mask); 1225 err = __assign_irq_vector(irq, cfg, mask);
1219 spin_unlock_irqrestore(&vector_lock, flags); 1226 raw_spin_unlock_irqrestore(&vector_lock, flags);
1220 return err; 1227 return err;
1221} 1228}
1222 1229
@@ -1250,11 +1257,16 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1250void __setup_vector_irq(int cpu) 1257void __setup_vector_irq(int cpu)
1251{ 1258{
1252 /* Initialize vector_irq on a new cpu */ 1259 /* Initialize vector_irq on a new cpu */
1253 /* This function must be called with vector_lock held */
1254 int irq, vector; 1260 int irq, vector;
1255 struct irq_cfg *cfg; 1261 struct irq_cfg *cfg;
1256 struct irq_desc *desc; 1262 struct irq_desc *desc;
1257 1263
1264 /*
1265 * vector_lock will make sure that we don't run into irq vector
1266 * assignments that might be happening on another cpu in parallel,
1267 * while we setup our initial vector to irq mappings.
1268 */
1269 raw_spin_lock(&vector_lock);
1258 /* Mark the inuse vectors */ 1270 /* Mark the inuse vectors */
1259 for_each_irq_desc(irq, desc) { 1271 for_each_irq_desc(irq, desc) {
1260 cfg = desc->chip_data; 1272 cfg = desc->chip_data;
@@ -1273,6 +1285,7 @@ void __setup_vector_irq(int cpu)
1273 if (!cpumask_test_cpu(cpu, cfg->domain)) 1285 if (!cpumask_test_cpu(cpu, cfg->domain))
1274 per_cpu(vector_irq, cpu)[vector] = -1; 1286 per_cpu(vector_irq, cpu)[vector] = -1;
1275 } 1287 }
1288 raw_spin_unlock(&vector_lock);
1276} 1289}
1277 1290
1278static struct irq_chip ioapic_chip; 1291static struct irq_chip ioapic_chip;
@@ -1422,6 +1435,14 @@ static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq
1422 1435
1423 cfg = desc->chip_data; 1436 cfg = desc->chip_data;
1424 1437
1438 /*
1439 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1440 * controllers like 8259. Now that IO-APIC can handle this irq, update
1441 * the cfg->domain.
1442 */
1443 if (irq < nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1444 apic->vector_allocation_domain(0, cfg->domain);
1445
1425 if (assign_irq_vector(irq, cfg, apic->target_cpus())) 1446 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1426 return; 1447 return;
1427 1448
@@ -1521,6 +1542,56 @@ static void __init setup_IO_APIC_irqs(void)
1521} 1542}
1522 1543
1523/* 1544/*
1545 * for the gsit that is not in first ioapic
1546 * but could not use acpi_register_gsi()
1547 * like some special sci in IBM x3330
1548 */
1549void setup_IO_APIC_irq_extra(u32 gsi)
1550{
1551 int apic_id = 0, pin, idx, irq;
1552 int node = cpu_to_node(boot_cpu_id);
1553 struct irq_desc *desc;
1554 struct irq_cfg *cfg;
1555
1556 /*
1557 * Convert 'gsi' to 'ioapic.pin'.
1558 */
1559 apic_id = mp_find_ioapic(gsi);
1560 if (apic_id < 0)
1561 return;
1562
1563 pin = mp_find_ioapic_pin(apic_id, gsi);
1564 idx = find_irq_entry(apic_id, pin, mp_INT);
1565 if (idx == -1)
1566 return;
1567
1568 irq = pin_2_irq(idx, apic_id, pin);
1569#ifdef CONFIG_SPARSE_IRQ
1570 desc = irq_to_desc(irq);
1571 if (desc)
1572 return;
1573#endif
1574 desc = irq_to_desc_alloc_node(irq, node);
1575 if (!desc) {
1576 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1577 return;
1578 }
1579
1580 cfg = desc->chip_data;
1581 add_pin_to_irq_node(cfg, node, apic_id, pin);
1582
1583 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1584 pr_debug("Pin %d-%d already programmed\n",
1585 mp_ioapics[apic_id].apicid, pin);
1586 return;
1587 }
1588 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1589
1590 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1591 irq_trigger(idx), irq_polarity(idx));
1592}
1593
1594/*
1524 * Set up the timer pin, possibly with the 8259A-master behind. 1595 * Set up the timer pin, possibly with the 8259A-master behind.
1525 */ 1596 */
1526static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin, 1597static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
@@ -1583,14 +1654,14 @@ __apicdebuginit(void) print_IO_APIC(void)
1583 1654
1584 for (apic = 0; apic < nr_ioapics; apic++) { 1655 for (apic = 0; apic < nr_ioapics; apic++) {
1585 1656
1586 spin_lock_irqsave(&ioapic_lock, flags); 1657 raw_spin_lock_irqsave(&ioapic_lock, flags);
1587 reg_00.raw = io_apic_read(apic, 0); 1658 reg_00.raw = io_apic_read(apic, 0);
1588 reg_01.raw = io_apic_read(apic, 1); 1659 reg_01.raw = io_apic_read(apic, 1);
1589 if (reg_01.bits.version >= 0x10) 1660 if (reg_01.bits.version >= 0x10)
1590 reg_02.raw = io_apic_read(apic, 2); 1661 reg_02.raw = io_apic_read(apic, 2);
1591 if (reg_01.bits.version >= 0x20) 1662 if (reg_01.bits.version >= 0x20)
1592 reg_03.raw = io_apic_read(apic, 3); 1663 reg_03.raw = io_apic_read(apic, 3);
1593 spin_unlock_irqrestore(&ioapic_lock, flags); 1664 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1594 1665
1595 printk("\n"); 1666 printk("\n");
1596 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid); 1667 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
@@ -1812,7 +1883,7 @@ __apicdebuginit(void) print_PIC(void)
1812 1883
1813 printk(KERN_DEBUG "\nprinting PIC contents\n"); 1884 printk(KERN_DEBUG "\nprinting PIC contents\n");
1814 1885
1815 spin_lock_irqsave(&i8259A_lock, flags); 1886 raw_spin_lock_irqsave(&i8259A_lock, flags);
1816 1887
1817 v = inb(0xa1) << 8 | inb(0x21); 1888 v = inb(0xa1) << 8 | inb(0x21);
1818 printk(KERN_DEBUG "... PIC IMR: %04x\n", v); 1889 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
@@ -1826,7 +1897,7 @@ __apicdebuginit(void) print_PIC(void)
1826 outb(0x0a,0xa0); 1897 outb(0x0a,0xa0);
1827 outb(0x0a,0x20); 1898 outb(0x0a,0x20);
1828 1899
1829 spin_unlock_irqrestore(&i8259A_lock, flags); 1900 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1830 1901
1831 printk(KERN_DEBUG "... PIC ISR: %04x\n", v); 1902 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1832 1903
@@ -1885,9 +1956,9 @@ void __init enable_IO_APIC(void)
1885 * The number of IO-APIC IRQ registers (== #pins): 1956 * The number of IO-APIC IRQ registers (== #pins):
1886 */ 1957 */
1887 for (apic = 0; apic < nr_ioapics; apic++) { 1958 for (apic = 0; apic < nr_ioapics; apic++) {
1888 spin_lock_irqsave(&ioapic_lock, flags); 1959 raw_spin_lock_irqsave(&ioapic_lock, flags);
1889 reg_01.raw = io_apic_read(apic, 1); 1960 reg_01.raw = io_apic_read(apic, 1);
1890 spin_unlock_irqrestore(&ioapic_lock, flags); 1961 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1891 nr_ioapic_registers[apic] = reg_01.bits.entries+1; 1962 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1892 } 1963 }
1893 1964
@@ -2027,9 +2098,9 @@ void __init setup_ioapic_ids_from_mpc(void)
2027 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) { 2098 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2028 2099
2029 /* Read the register 0 value */ 2100 /* Read the register 0 value */
2030 spin_lock_irqsave(&ioapic_lock, flags); 2101 raw_spin_lock_irqsave(&ioapic_lock, flags);
2031 reg_00.raw = io_apic_read(apic_id, 0); 2102 reg_00.raw = io_apic_read(apic_id, 0);
2032 spin_unlock_irqrestore(&ioapic_lock, flags); 2103 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2033 2104
2034 old_id = mp_ioapics[apic_id].apicid; 2105 old_id = mp_ioapics[apic_id].apicid;
2035 2106
@@ -2088,16 +2159,16 @@ void __init setup_ioapic_ids_from_mpc(void)
2088 mp_ioapics[apic_id].apicid); 2159 mp_ioapics[apic_id].apicid);
2089 2160
2090 reg_00.bits.ID = mp_ioapics[apic_id].apicid; 2161 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2091 spin_lock_irqsave(&ioapic_lock, flags); 2162 raw_spin_lock_irqsave(&ioapic_lock, flags);
2092 io_apic_write(apic_id, 0, reg_00.raw); 2163 io_apic_write(apic_id, 0, reg_00.raw);
2093 spin_unlock_irqrestore(&ioapic_lock, flags); 2164 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2094 2165
2095 /* 2166 /*
2096 * Sanity check 2167 * Sanity check
2097 */ 2168 */
2098 spin_lock_irqsave(&ioapic_lock, flags); 2169 raw_spin_lock_irqsave(&ioapic_lock, flags);
2099 reg_00.raw = io_apic_read(apic_id, 0); 2170 reg_00.raw = io_apic_read(apic_id, 0);
2100 spin_unlock_irqrestore(&ioapic_lock, flags); 2171 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2101 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid) 2172 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2102 printk("could not set ID!\n"); 2173 printk("could not set ID!\n");
2103 else 2174 else
@@ -2180,7 +2251,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2180 unsigned long flags; 2251 unsigned long flags;
2181 struct irq_cfg *cfg; 2252 struct irq_cfg *cfg;
2182 2253
2183 spin_lock_irqsave(&ioapic_lock, flags); 2254 raw_spin_lock_irqsave(&ioapic_lock, flags);
2184 if (irq < legacy_pic->nr_legacy_irqs) { 2255 if (irq < legacy_pic->nr_legacy_irqs) {
2185 legacy_pic->chip->mask(irq); 2256 legacy_pic->chip->mask(irq);
2186 if (legacy_pic->irq_pending(irq)) 2257 if (legacy_pic->irq_pending(irq))
@@ -2188,7 +2259,7 @@ static unsigned int startup_ioapic_irq(unsigned int irq)
2188 } 2259 }
2189 cfg = irq_cfg(irq); 2260 cfg = irq_cfg(irq);
2190 __unmask_IO_APIC_irq(cfg); 2261 __unmask_IO_APIC_irq(cfg);
2191 spin_unlock_irqrestore(&ioapic_lock, flags); 2262 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2192 2263
2193 return was_pending; 2264 return was_pending;
2194} 2265}
@@ -2199,9 +2270,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2199 struct irq_cfg *cfg = irq_cfg(irq); 2270 struct irq_cfg *cfg = irq_cfg(irq);
2200 unsigned long flags; 2271 unsigned long flags;
2201 2272
2202 spin_lock_irqsave(&vector_lock, flags); 2273 raw_spin_lock_irqsave(&vector_lock, flags);
2203 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector); 2274 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2204 spin_unlock_irqrestore(&vector_lock, flags); 2275 raw_spin_unlock_irqrestore(&vector_lock, flags);
2205 2276
2206 return 1; 2277 return 1;
2207} 2278}
@@ -2294,14 +2365,14 @@ set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2294 irq = desc->irq; 2365 irq = desc->irq;
2295 cfg = desc->chip_data; 2366 cfg = desc->chip_data;
2296 2367
2297 spin_lock_irqsave(&ioapic_lock, flags); 2368 raw_spin_lock_irqsave(&ioapic_lock, flags);
2298 ret = set_desc_affinity(desc, mask, &dest); 2369 ret = set_desc_affinity(desc, mask, &dest);
2299 if (!ret) { 2370 if (!ret) {
2300 /* Only the high 8 bits are valid. */ 2371 /* Only the high 8 bits are valid. */
2301 dest = SET_APIC_LOGICAL_ID(dest); 2372 dest = SET_APIC_LOGICAL_ID(dest);
2302 __target_IO_APIC_irq(irq, dest, cfg); 2373 __target_IO_APIC_irq(irq, dest, cfg);
2303 } 2374 }
2304 spin_unlock_irqrestore(&ioapic_lock, flags); 2375 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2305 2376
2306 return ret; 2377 return ret;
2307} 2378}
@@ -2536,9 +2607,9 @@ static void eoi_ioapic_irq(struct irq_desc *desc)
2536 irq = desc->irq; 2607 irq = desc->irq;
2537 cfg = desc->chip_data; 2608 cfg = desc->chip_data;
2538 2609
2539 spin_lock_irqsave(&ioapic_lock, flags); 2610 raw_spin_lock_irqsave(&ioapic_lock, flags);
2540 __eoi_ioapic_irq(irq, cfg); 2611 __eoi_ioapic_irq(irq, cfg);
2541 spin_unlock_irqrestore(&ioapic_lock, flags); 2612 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2542} 2613}
2543 2614
2544static void ack_apic_level(unsigned int irq) 2615static void ack_apic_level(unsigned int irq)
@@ -3120,13 +3191,13 @@ static int ioapic_resume(struct sys_device *dev)
3120 data = container_of(dev, struct sysfs_ioapic_data, dev); 3191 data = container_of(dev, struct sysfs_ioapic_data, dev);
3121 entry = data->entry; 3192 entry = data->entry;
3122 3193
3123 spin_lock_irqsave(&ioapic_lock, flags); 3194 raw_spin_lock_irqsave(&ioapic_lock, flags);
3124 reg_00.raw = io_apic_read(dev->id, 0); 3195 reg_00.raw = io_apic_read(dev->id, 0);
3125 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) { 3196 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3126 reg_00.bits.ID = mp_ioapics[dev->id].apicid; 3197 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3127 io_apic_write(dev->id, 0, reg_00.raw); 3198 io_apic_write(dev->id, 0, reg_00.raw);
3128 } 3199 }
3129 spin_unlock_irqrestore(&ioapic_lock, flags); 3200 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3130 for (i = 0; i < nr_ioapic_registers[dev->id]; i++) 3201 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3131 ioapic_write_entry(dev->id, i, entry[i]); 3202 ioapic_write_entry(dev->id, i, entry[i]);
3132 3203
@@ -3189,7 +3260,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3189 if (irq_want < nr_irqs_gsi) 3260 if (irq_want < nr_irqs_gsi)
3190 irq_want = nr_irqs_gsi; 3261 irq_want = nr_irqs_gsi;
3191 3262
3192 spin_lock_irqsave(&vector_lock, flags); 3263 raw_spin_lock_irqsave(&vector_lock, flags);
3193 for (new = irq_want; new < nr_irqs; new++) { 3264 for (new = irq_want; new < nr_irqs; new++) {
3194 desc_new = irq_to_desc_alloc_node(new, node); 3265 desc_new = irq_to_desc_alloc_node(new, node);
3195 if (!desc_new) { 3266 if (!desc_new) {
@@ -3208,14 +3279,11 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
3208 irq = new; 3279 irq = new;
3209 break; 3280 break;
3210 } 3281 }
3211 spin_unlock_irqrestore(&vector_lock, flags); 3282 raw_spin_unlock_irqrestore(&vector_lock, flags);
3283
3284 if (irq > 0)
3285 dynamic_irq_init_keep_chip_data(irq);
3212 3286
3213 if (irq > 0) {
3214 dynamic_irq_init(irq);
3215 /* restore it, in case dynamic_irq_init clear it */
3216 if (desc_new)
3217 desc_new->chip_data = cfg_new;
3218 }
3219 return irq; 3287 return irq;
3220} 3288}
3221 3289
@@ -3237,20 +3305,13 @@ int create_irq(void)
3237void destroy_irq(unsigned int irq) 3305void destroy_irq(unsigned int irq)
3238{ 3306{
3239 unsigned long flags; 3307 unsigned long flags;
3240 struct irq_cfg *cfg;
3241 struct irq_desc *desc;
3242 3308
3243 /* store it, in case dynamic_irq_cleanup clear it */ 3309 dynamic_irq_cleanup_keep_chip_data(irq);
3244 desc = irq_to_desc(irq);
3245 cfg = desc->chip_data;
3246 dynamic_irq_cleanup(irq);
3247 /* connect back irq_cfg */
3248 desc->chip_data = cfg;
3249 3310
3250 free_irte(irq); 3311 free_irte(irq);
3251 spin_lock_irqsave(&vector_lock, flags); 3312 raw_spin_lock_irqsave(&vector_lock, flags);
3252 __clear_irq_vector(irq, cfg); 3313 __clear_irq_vector(irq, get_irq_chip_data(irq));
3253 spin_unlock_irqrestore(&vector_lock, flags); 3314 raw_spin_unlock_irqrestore(&vector_lock, flags);
3254} 3315}
3255 3316
3256/* 3317/*
@@ -3787,9 +3848,9 @@ int __init io_apic_get_redir_entries (int ioapic)
3787 union IO_APIC_reg_01 reg_01; 3848 union IO_APIC_reg_01 reg_01;
3788 unsigned long flags; 3849 unsigned long flags;
3789 3850
3790 spin_lock_irqsave(&ioapic_lock, flags); 3851 raw_spin_lock_irqsave(&ioapic_lock, flags);
3791 reg_01.raw = io_apic_read(ioapic, 1); 3852 reg_01.raw = io_apic_read(ioapic, 1);
3792 spin_unlock_irqrestore(&ioapic_lock, flags); 3853 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3793 3854
3794 return reg_01.bits.entries; 3855 return reg_01.bits.entries;
3795} 3856}
@@ -3816,28 +3877,6 @@ void __init probe_nr_irqs_gsi(void)
3816 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi); 3877 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3817} 3878}
3818 3879
3819#ifdef CONFIG_SPARSE_IRQ
3820int __init arch_probe_nr_irqs(void)
3821{
3822 int nr;
3823
3824 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3825 nr_irqs = NR_VECTORS * nr_cpu_ids;
3826
3827 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3828#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3829 /*
3830 * for MSI and HT dyn irq
3831 */
3832 nr += nr_irqs_gsi * 16;
3833#endif
3834 if (nr < nr_irqs)
3835 nr_irqs = nr;
3836
3837 return 0;
3838}
3839#endif
3840
3841static int __io_apic_set_pci_routing(struct device *dev, int irq, 3880static int __io_apic_set_pci_routing(struct device *dev, int irq,
3842 struct io_apic_irq_attr *irq_attr) 3881 struct io_apic_irq_attr *irq_attr)
3843{ 3882{
@@ -3951,9 +3990,9 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3951 if (physids_empty(apic_id_map)) 3990 if (physids_empty(apic_id_map))
3952 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map); 3991 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3953 3992
3954 spin_lock_irqsave(&ioapic_lock, flags); 3993 raw_spin_lock_irqsave(&ioapic_lock, flags);
3955 reg_00.raw = io_apic_read(ioapic, 0); 3994 reg_00.raw = io_apic_read(ioapic, 0);
3956 spin_unlock_irqrestore(&ioapic_lock, flags); 3995 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3957 3996
3958 if (apic_id >= get_physical_broadcast()) { 3997 if (apic_id >= get_physical_broadcast()) {
3959 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying " 3998 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
@@ -3987,10 +4026,10 @@ int __init io_apic_get_unique_id(int ioapic, int apic_id)
3987 if (reg_00.bits.ID != apic_id) { 4026 if (reg_00.bits.ID != apic_id) {
3988 reg_00.bits.ID = apic_id; 4027 reg_00.bits.ID = apic_id;
3989 4028
3990 spin_lock_irqsave(&ioapic_lock, flags); 4029 raw_spin_lock_irqsave(&ioapic_lock, flags);
3991 io_apic_write(ioapic, 0, reg_00.raw); 4030 io_apic_write(ioapic, 0, reg_00.raw);
3992 reg_00.raw = io_apic_read(ioapic, 0); 4031 reg_00.raw = io_apic_read(ioapic, 0);
3993 spin_unlock_irqrestore(&ioapic_lock, flags); 4032 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3994 4033
3995 /* Sanity check */ 4034 /* Sanity check */
3996 if (reg_00.bits.ID != apic_id) { 4035 if (reg_00.bits.ID != apic_id) {
@@ -4011,9 +4050,9 @@ int __init io_apic_get_version(int ioapic)
4011 union IO_APIC_reg_01 reg_01; 4050 union IO_APIC_reg_01 reg_01;
4012 unsigned long flags; 4051 unsigned long flags;
4013 4052
4014 spin_lock_irqsave(&ioapic_lock, flags); 4053 raw_spin_lock_irqsave(&ioapic_lock, flags);
4015 reg_01.raw = io_apic_read(ioapic, 1); 4054 reg_01.raw = io_apic_read(ioapic, 1);
4016 spin_unlock_irqrestore(&ioapic_lock, flags); 4055 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4017 4056
4018 return reg_01.bits.version; 4057 return reg_01.bits.version;
4019} 4058}
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c
index 3817739acee9..f72b5f0f388e 100644
--- a/arch/x86/kernel/apic/nmi.c
+++ b/arch/x86/kernel/apic/nmi.c
@@ -416,13 +416,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
416 416
417 /* We can be called before check_nmi_watchdog, hence NULL check. */ 417 /* We can be called before check_nmi_watchdog, hence NULL check. */
418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { 418 if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
419 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 419 static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */
420 420
421 spin_lock(&lock); 421 raw_spin_lock(&lock);
422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); 422 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
423 show_regs(regs); 423 show_regs(regs);
424 dump_stack(); 424 dump_stack();
425 spin_unlock(&lock); 425 raw_spin_unlock(&lock);
426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); 426 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
427 427
428 rc = 1; 428 rc = 1;
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 1c790e75f7a0..9bac6817456f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -32,7 +32,7 @@
32 */ 32 */
33 33
34static int i8259A_auto_eoi; 34static int i8259A_auto_eoi;
35DEFINE_SPINLOCK(i8259A_lock); 35DEFINE_RAW_SPINLOCK(i8259A_lock);
36static void mask_and_ack_8259A(unsigned int); 36static void mask_and_ack_8259A(unsigned int);
37static void mask_8259A(void); 37static void mask_8259A(void);
38static void unmask_8259A(void); 38static void unmask_8259A(void);
@@ -74,13 +74,13 @@ static void disable_8259A_irq(unsigned int irq)
74 unsigned int mask = 1 << irq; 74 unsigned int mask = 1 << irq;
75 unsigned long flags; 75 unsigned long flags;
76 76
77 spin_lock_irqsave(&i8259A_lock, flags); 77 raw_spin_lock_irqsave(&i8259A_lock, flags);
78 cached_irq_mask |= mask; 78 cached_irq_mask |= mask;
79 if (irq & 8) 79 if (irq & 8)
80 outb(cached_slave_mask, PIC_SLAVE_IMR); 80 outb(cached_slave_mask, PIC_SLAVE_IMR);
81 else 81 else
82 outb(cached_master_mask, PIC_MASTER_IMR); 82 outb(cached_master_mask, PIC_MASTER_IMR);
83 spin_unlock_irqrestore(&i8259A_lock, flags); 83 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
84} 84}
85 85
86static void enable_8259A_irq(unsigned int irq) 86static void enable_8259A_irq(unsigned int irq)
@@ -88,13 +88,13 @@ static void enable_8259A_irq(unsigned int irq)
88 unsigned int mask = ~(1 << irq); 88 unsigned int mask = ~(1 << irq);
89 unsigned long flags; 89 unsigned long flags;
90 90
91 spin_lock_irqsave(&i8259A_lock, flags); 91 raw_spin_lock_irqsave(&i8259A_lock, flags);
92 cached_irq_mask &= mask; 92 cached_irq_mask &= mask;
93 if (irq & 8) 93 if (irq & 8)
94 outb(cached_slave_mask, PIC_SLAVE_IMR); 94 outb(cached_slave_mask, PIC_SLAVE_IMR);
95 else 95 else
96 outb(cached_master_mask, PIC_MASTER_IMR); 96 outb(cached_master_mask, PIC_MASTER_IMR);
97 spin_unlock_irqrestore(&i8259A_lock, flags); 97 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
98} 98}
99 99
100static int i8259A_irq_pending(unsigned int irq) 100static int i8259A_irq_pending(unsigned int irq)
@@ -103,12 +103,12 @@ static int i8259A_irq_pending(unsigned int irq)
103 unsigned long flags; 103 unsigned long flags;
104 int ret; 104 int ret;
105 105
106 spin_lock_irqsave(&i8259A_lock, flags); 106 raw_spin_lock_irqsave(&i8259A_lock, flags);
107 if (irq < 8) 107 if (irq < 8)
108 ret = inb(PIC_MASTER_CMD) & mask; 108 ret = inb(PIC_MASTER_CMD) & mask;
109 else 109 else
110 ret = inb(PIC_SLAVE_CMD) & (mask >> 8); 110 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
111 spin_unlock_irqrestore(&i8259A_lock, flags); 111 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
112 112
113 return ret; 113 return ret;
114} 114}
@@ -156,7 +156,7 @@ static void mask_and_ack_8259A(unsigned int irq)
156 unsigned int irqmask = 1 << irq; 156 unsigned int irqmask = 1 << irq;
157 unsigned long flags; 157 unsigned long flags;
158 158
159 spin_lock_irqsave(&i8259A_lock, flags); 159 raw_spin_lock_irqsave(&i8259A_lock, flags);
160 /* 160 /*
161 * Lightweight spurious IRQ detection. We do not want 161 * Lightweight spurious IRQ detection. We do not want
162 * to overdo spurious IRQ handling - it's usually a sign 162 * to overdo spurious IRQ handling - it's usually a sign
@@ -189,7 +189,7 @@ handle_real_irq:
189 outb(cached_master_mask, PIC_MASTER_IMR); 189 outb(cached_master_mask, PIC_MASTER_IMR);
190 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ 190 outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
191 } 191 }
192 spin_unlock_irqrestore(&i8259A_lock, flags); 192 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
193 return; 193 return;
194 194
195spurious_8259A_irq: 195spurious_8259A_irq:
@@ -291,24 +291,24 @@ static void mask_8259A(void)
291{ 291{
292 unsigned long flags; 292 unsigned long flags;
293 293
294 spin_lock_irqsave(&i8259A_lock, flags); 294 raw_spin_lock_irqsave(&i8259A_lock, flags);
295 295
296 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 296 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
297 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 297 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
298 298
299 spin_unlock_irqrestore(&i8259A_lock, flags); 299 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
300} 300}
301 301
302static void unmask_8259A(void) 302static void unmask_8259A(void)
303{ 303{
304 unsigned long flags; 304 unsigned long flags;
305 305
306 spin_lock_irqsave(&i8259A_lock, flags); 306 raw_spin_lock_irqsave(&i8259A_lock, flags);
307 307
308 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 308 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
309 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 309 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
310 310
311 spin_unlock_irqrestore(&i8259A_lock, flags); 311 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
312} 312}
313 313
314static void init_8259A(int auto_eoi) 314static void init_8259A(int auto_eoi)
@@ -317,7 +317,7 @@ static void init_8259A(int auto_eoi)
317 317
318 i8259A_auto_eoi = auto_eoi; 318 i8259A_auto_eoi = auto_eoi;
319 319
320 spin_lock_irqsave(&i8259A_lock, flags); 320 raw_spin_lock_irqsave(&i8259A_lock, flags);
321 321
322 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 322 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
323 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ 323 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
@@ -362,7 +362,7 @@ static void init_8259A(int auto_eoi)
362 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */ 362 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
363 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */ 363 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
364 364
365 spin_unlock_irqrestore(&i8259A_lock, flags); 365 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
366} 366}
367 367
368/* 368/*
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 89b9510e8030..d2f787b3de56 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -84,24 +84,7 @@ static struct irqaction irq2 = {
84}; 84};
85 85
86DEFINE_PER_CPU(vector_irq_t, vector_irq) = { 86DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
87 [0 ... IRQ0_VECTOR - 1] = -1, 87 [0 ... NR_VECTORS - 1] = -1,
88 [IRQ0_VECTOR] = 0,
89 [IRQ1_VECTOR] = 1,
90 [IRQ2_VECTOR] = 2,
91 [IRQ3_VECTOR] = 3,
92 [IRQ4_VECTOR] = 4,
93 [IRQ5_VECTOR] = 5,
94 [IRQ6_VECTOR] = 6,
95 [IRQ7_VECTOR] = 7,
96 [IRQ8_VECTOR] = 8,
97 [IRQ9_VECTOR] = 9,
98 [IRQ10_VECTOR] = 10,
99 [IRQ11_VECTOR] = 11,
100 [IRQ12_VECTOR] = 12,
101 [IRQ13_VECTOR] = 13,
102 [IRQ14_VECTOR] = 14,
103 [IRQ15_VECTOR] = 15,
104 [IRQ15_VECTOR + 1 ... NR_VECTORS - 1] = -1
105}; 88};
106 89
107int vector_used_by_percpu_irq(unsigned int vector) 90int vector_used_by_percpu_irq(unsigned int vector)
@@ -116,6 +99,9 @@ int vector_used_by_percpu_irq(unsigned int vector)
116 return 0; 99 return 0;
117} 100}
118 101
102/* Number of legacy interrupts */
103int nr_legacy_irqs __read_mostly = NR_IRQS_LEGACY;
104
119void __init init_ISA_irqs(void) 105void __init init_ISA_irqs(void)
120{ 106{
121 int i; 107 int i;
@@ -142,6 +128,19 @@ void __init init_ISA_irqs(void)
142 128
143void __init init_IRQ(void) 129void __init init_IRQ(void)
144{ 130{
131 int i;
132
133 /*
134 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
135 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
136 * then this configuration will likely be static after the boot. If
137 * these IRQ's are handled by more mordern controllers like IO-APIC,
138 * then this vector space can be freed and re-used dynamically as the
139 * irq's migrate etc.
140 */
141 for (i = 0; i < nr_legacy_irqs; i++)
142 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
143
145 x86_init.irqs.intr_init(); 144 x86_init.irqs.intr_init();
146} 145}
147 146
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 704bddcdf64d..8e1aac86b50c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -461,6 +461,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"), 461 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini3,1"),
462 }, 462 },
463 }, 463 },
464 { /* Handle problems with rebooting on the iMac9,1. */
465 .callback = set_pci_reboot,
466 .ident = "Apple iMac9,1",
467 .matches = {
468 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
469 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
470 },
471 },
464 { } 472 { }
465}; 473};
466 474
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f7a52f4a21a5..86f7edcd0438 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -243,6 +243,11 @@ static void __cpuinit smp_callin(void)
243 map_cpu_to_logical_apicid(); 243 map_cpu_to_logical_apicid();
244 244
245 notify_cpu_starting(cpuid); 245 notify_cpu_starting(cpuid);
246
247 /*
248 * Need to setup vector mappings before we enable interrupts.
249 */
250 __setup_vector_irq(smp_processor_id());
246 /* 251 /*
247 * Get our bogomips. 252 * Get our bogomips.
248 * 253 *
@@ -317,7 +322,6 @@ notrace static void __cpuinit start_secondary(void *unused)
317 */ 322 */
318 ipi_call_lock(); 323 ipi_call_lock();
319 lock_vector_lock(); 324 lock_vector_lock();
320 __setup_vector_irq(smp_processor_id());
321 set_cpu_online(smp_processor_id(), true); 325 set_cpu_online(smp_processor_id(), true);
322 unlock_vector_lock(); 326 unlock_vector_lock();
323 ipi_call_unlock(); 327 ipi_call_unlock();
@@ -1216,11 +1220,12 @@ __init void prefill_possible_map(void)
1216 1220
1217 total_cpus = max_t(int, possible, num_processors + disabled_cpus); 1221 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1218 1222
1219 if (possible > CONFIG_NR_CPUS) { 1223 /* nr_cpu_ids could be reduced via nr_cpus= */
1224 if (possible > nr_cpu_ids) {
1220 printk(KERN_WARNING 1225 printk(KERN_WARNING
1221 "%d Processors exceeds NR_CPUS limit of %d\n", 1226 "%d Processors exceeds NR_CPUS limit of %d\n",
1222 possible, CONFIG_NR_CPUS); 1227 possible, nr_cpu_ids);
1223 possible = CONFIG_NR_CPUS; 1228 possible = nr_cpu_ids;
1224 } 1229 }
1225 1230
1226 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 1231 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index be2573448ed9..fb5cc5e14cfa 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -70,11 +70,11 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
70 * manually to deassert NMI lines for the watchdog if run 70 * manually to deassert NMI lines for the watchdog if run
71 * on an 82489DX-based system. 71 * on an 82489DX-based system.
72 */ 72 */
73 spin_lock(&i8259A_lock); 73 raw_spin_lock(&i8259A_lock);
74 outb(0x0c, PIC_MASTER_OCW3); 74 outb(0x0c, PIC_MASTER_OCW3);
75 /* Ack the IRQ; AEOI will end it automatically. */ 75 /* Ack the IRQ; AEOI will end it automatically. */
76 inb(PIC_MASTER_POLL); 76 inb(PIC_MASTER_POLL);
77 spin_unlock(&i8259A_lock); 77 raw_spin_unlock(&i8259A_lock);
78 } 78 }
79 79
80 global_clock_event->event_handler(global_clock_event); 80 global_clock_event->event_handler(global_clock_event);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index f067e9556a47..e680ea52db9b 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -553,7 +553,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
553 struct irq_desc *desc; 553 struct irq_desc *desc;
554 unsigned long flags; 554 unsigned long flags;
555 555
556 spin_lock_irqsave(&i8259A_lock, flags); 556 raw_spin_lock_irqsave(&i8259A_lock, flags);
557 557
558 /* Find out what's interrupting in the PIIX4 master 8259 */ 558 /* Find out what's interrupting in the PIIX4 master 8259 */
559 outb(0x0c, 0x20); /* OCW3 Poll command */ 559 outb(0x0c, 0x20); /* OCW3 Poll command */
@@ -590,7 +590,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
590 outb(0x60 + realirq, 0x20); 590 outb(0x60 + realirq, 0x20);
591 } 591 }
592 592
593 spin_unlock_irqrestore(&i8259A_lock, flags); 593 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
594 594
595 desc = irq_to_desc(realirq); 595 desc = irq_to_desc(realirq);
596 596
@@ -608,7 +608,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
608 return IRQ_HANDLED; 608 return IRQ_HANDLED;
609 609
610out_unlock: 610out_unlock:
611 spin_unlock_irqrestore(&i8259A_lock, flags); 611 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
612 return IRQ_NONE; 612 return IRQ_NONE;
613} 613}
614 614
diff --git a/arch/x86/kernel/vmiclock_32.c b/arch/x86/kernel/vmiclock_32.c
index 74c92bb194df..2f1ca5614292 100644
--- a/arch/x86/kernel/vmiclock_32.c
+++ b/arch/x86/kernel/vmiclock_32.c
@@ -79,11 +79,7 @@ unsigned long vmi_tsc_khz(void)
79 79
80static inline unsigned int vmi_get_timer_vector(void) 80static inline unsigned int vmi_get_timer_vector(void)
81{ 81{
82#ifdef CONFIG_X86_IO_APIC 82 return IRQ0_VECTOR;
83 return FIRST_DEVICE_VECTOR;
84#else
85 return FIRST_EXTERNAL_VECTOR;
86#endif
87} 83}
88 84
89/** vmi clockchip */ 85/** vmi clockchip */
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48dfc12db..b8725461d887 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
279 /* SRAT: Static Resource Affinity Table */ 279 /* SRAT: Static Resource Affinity Table */
280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) { 280 if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY, 281 acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
282 acpi_parse_x2apic_affinity, NR_CPUS); 282 acpi_parse_x2apic_affinity, nr_cpu_ids);
283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY, 283 acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
284 acpi_parse_processor_affinity, NR_CPUS); 284 acpi_parse_processor_affinity, nr_cpu_ids);
285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY, 285 ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
286 acpi_parse_memory_affinity, 286 acpi_parse_memory_affinity,
287 NR_NODE_MEMBLKS); 287 NR_NODE_MEMBLKS);
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd09bc1..2f8413794d05 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
649 int bit_idx = __ffs(pending_bits); 649 int bit_idx = __ffs(pending_bits);
650 int port = (word_idx * BITS_PER_LONG) + bit_idx; 650 int port = (word_idx * BITS_PER_LONG) + bit_idx;
651 int irq = evtchn_to_irq[port]; 651 int irq = evtchn_to_irq[port];
652 struct irq_desc *desc;
652 653
653 if (irq != -1) 654 if (irq != -1) {
654 handle_irq(irq, regs); 655 desc = irq_to_desc(irq);
656 if (desc)
657 generic_handle_irq_desc(irq, desc);
658 }
655 } 659 }
656 } 660 }
657 661
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 451481c082b5..4d9b26e044bc 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -400,7 +400,9 @@ static inline int irq_has_action(unsigned int irq)
400 400
401/* Dynamic irq helper functions */ 401/* Dynamic irq helper functions */
402extern void dynamic_irq_init(unsigned int irq); 402extern void dynamic_irq_init(unsigned int irq);
403void dynamic_irq_init_keep_chip_data(unsigned int irq);
403extern void dynamic_irq_cleanup(unsigned int irq); 404extern void dynamic_irq_cleanup(unsigned int irq);
405void dynamic_irq_cleanup_keep_chip_data(unsigned int irq);
404 406
405/* Set/get chip/data for an IRQ: */ 407/* Set/get chip/data for an IRQ: */
406extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); 408extern int set_irq_chip(unsigned int irq, struct irq_chip *chip);
diff --git a/init/main.c b/init/main.c
index 4cb47a159f02..05b5283e98fa 100644
--- a/init/main.c
+++ b/init/main.c
@@ -149,6 +149,20 @@ static int __init nosmp(char *str)
149 149
150early_param("nosmp", nosmp); 150early_param("nosmp", nosmp);
151 151
152/* this is hard limit */
153static int __init nrcpus(char *str)
154{
155 int nr_cpus;
156
157 get_option(&str, &nr_cpus);
158 if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
159 nr_cpu_ids = nr_cpus;
160
161 return 0;
162}
163
164early_param("nr_cpus", nrcpus);
165
152static int __init maxcpus(char *str) 166static int __init maxcpus(char *str)
153{ 167{
154 get_option(&str, &setup_max_cpus); 168 get_option(&str, &setup_max_cpus);
@@ -584,6 +598,7 @@ asmlinkage void __init start_kernel(void)
584 local_irq_disable(); 598 local_irq_disable();
585 } 599 }
586 rcu_init(); 600 rcu_init();
601 radix_tree_init();
587 /* init some links before init_ISA_irqs() */ 602 /* init some links before init_ISA_irqs() */
588 early_irq_init(); 603 early_irq_init();
589 init_IRQ(); 604 init_IRQ();
@@ -657,7 +672,6 @@ asmlinkage void __init start_kernel(void)
657 proc_caches_init(); 672 proc_caches_init();
658 buffer_init(); 673 buffer_init();
659 key_init(); 674 key_init();
660 radix_tree_init();
661 security_init(); 675 security_init();
662 vfs_caches_init(totalram_pages); 676 vfs_caches_init(totalram_pages);
663 signals_init(); 677 signals_init();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index ecc3fa28f666..d70394f12ee9 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -18,11 +18,7 @@
18 18
19#include "internals.h" 19#include "internals.h"
20 20
21/** 21static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data)
22 * dynamic_irq_init - initialize a dynamically allocated irq
23 * @irq: irq number to initialize
24 */
25void dynamic_irq_init(unsigned int irq)
26{ 22{
27 struct irq_desc *desc; 23 struct irq_desc *desc;
28 unsigned long flags; 24 unsigned long flags;
@@ -41,7 +37,8 @@ void dynamic_irq_init(unsigned int irq)
41 desc->depth = 1; 37 desc->depth = 1;
42 desc->msi_desc = NULL; 38 desc->msi_desc = NULL;
43 desc->handler_data = NULL; 39 desc->handler_data = NULL;
44 desc->chip_data = NULL; 40 if (!keep_chip_data)
41 desc->chip_data = NULL;
45 desc->action = NULL; 42 desc->action = NULL;
46 desc->irq_count = 0; 43 desc->irq_count = 0;
47 desc->irqs_unhandled = 0; 44 desc->irqs_unhandled = 0;
@@ -55,10 +52,26 @@ void dynamic_irq_init(unsigned int irq)
55} 52}
56 53
57/** 54/**
58 * dynamic_irq_cleanup - cleanup a dynamically allocated irq 55 * dynamic_irq_init - initialize a dynamically allocated irq
59 * @irq: irq number to initialize 56 * @irq: irq number to initialize
60 */ 57 */
61void dynamic_irq_cleanup(unsigned int irq) 58void dynamic_irq_init(unsigned int irq)
59{
60 dynamic_irq_init_x(irq, false);
61}
62
63/**
64 * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq
65 * @irq: irq number to initialize
66 *
67 * does not set irq_to_desc(irq)->chip_data to NULL
68 */
69void dynamic_irq_init_keep_chip_data(unsigned int irq)
70{
71 dynamic_irq_init_x(irq, true);
72}
73
74static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data)
62{ 75{
63 struct irq_desc *desc = irq_to_desc(irq); 76 struct irq_desc *desc = irq_to_desc(irq);
64 unsigned long flags; 77 unsigned long flags;
@@ -77,7 +90,8 @@ void dynamic_irq_cleanup(unsigned int irq)
77 } 90 }
78 desc->msi_desc = NULL; 91 desc->msi_desc = NULL;
79 desc->handler_data = NULL; 92 desc->handler_data = NULL;
80 desc->chip_data = NULL; 93 if (!keep_chip_data)
94 desc->chip_data = NULL;
81 desc->handle_irq = handle_bad_irq; 95 desc->handle_irq = handle_bad_irq;
82 desc->chip = &no_irq_chip; 96 desc->chip = &no_irq_chip;
83 desc->name = NULL; 97 desc->name = NULL;
@@ -85,6 +99,26 @@ void dynamic_irq_cleanup(unsigned int irq)
85 raw_spin_unlock_irqrestore(&desc->lock, flags); 99 raw_spin_unlock_irqrestore(&desc->lock, flags);
86} 100}
87 101
102/**
103 * dynamic_irq_cleanup - cleanup a dynamically allocated irq
104 * @irq: irq number to initialize
105 */
106void dynamic_irq_cleanup(unsigned int irq)
107{
108 dynamic_irq_cleanup_x(irq, false);
109}
110
111/**
112 * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq
113 * @irq: irq number to initialize
114 *
115 * does not set irq_to_desc(irq)->chip_data to NULL
116 */
117void dynamic_irq_cleanup_keep_chip_data(unsigned int irq)
118{
119 dynamic_irq_cleanup_x(irq, true);
120}
121
88 122
89/** 123/**
90 * set_irq_chip - set the irq chip for an irq 124 * set_irq_chip - set the irq chip for an irq
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 814940e7f485..76d5a671bfe1 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -19,7 +19,7 @@
19#include <linux/kernel_stat.h> 19#include <linux/kernel_stat.h>
20#include <linux/rculist.h> 20#include <linux/rculist.h>
21#include <linux/hash.h> 21#include <linux/hash.h>
22#include <linux/bootmem.h> 22#include <linux/radix-tree.h>
23#include <trace/events/irq.h> 23#include <trace/events/irq.h>
24 24
25#include "internals.h" 25#include "internals.h"
@@ -87,12 +87,8 @@ void __ref init_kstat_irqs(struct irq_desc *desc, int node, int nr)
87{ 87{
88 void *ptr; 88 void *ptr;
89 89
90 if (slab_is_available()) 90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs),
91 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), 91 GFP_ATOMIC, node);
92 GFP_ATOMIC, node);
93 else
94 ptr = alloc_bootmem_node(NODE_DATA(node),
95 nr * sizeof(*desc->kstat_irqs));
96 92
97 /* 93 /*
98 * don't overwite if can not get new one 94 * don't overwite if can not get new one
@@ -132,7 +128,26 @@ static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
132 */ 128 */
133DEFINE_RAW_SPINLOCK(sparse_irq_lock); 129DEFINE_RAW_SPINLOCK(sparse_irq_lock);
134 130
135struct irq_desc **irq_desc_ptrs __read_mostly; 131static RADIX_TREE(irq_desc_tree, GFP_ATOMIC);
132
133static void set_irq_desc(unsigned int irq, struct irq_desc *desc)
134{
135 radix_tree_insert(&irq_desc_tree, irq, desc);
136}
137
138struct irq_desc *irq_to_desc(unsigned int irq)
139{
140 return radix_tree_lookup(&irq_desc_tree, irq);
141}
142
143void replace_irq_desc(unsigned int irq, struct irq_desc *desc)
144{
145 void **ptr;
146
147 ptr = radix_tree_lookup_slot(&irq_desc_tree, irq);
148 if (ptr)
149 radix_tree_replace_slot(ptr, desc);
150}
136 151
137static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = { 152static struct irq_desc irq_desc_legacy[NR_IRQS_LEGACY] __cacheline_aligned_in_smp = {
138 [0 ... NR_IRQS_LEGACY-1] = { 153 [0 ... NR_IRQS_LEGACY-1] = {
@@ -164,9 +179,6 @@ int __init early_irq_init(void)
164 legacy_count = ARRAY_SIZE(irq_desc_legacy); 179 legacy_count = ARRAY_SIZE(irq_desc_legacy);
165 node = first_online_node; 180 node = first_online_node;
166 181
167 /* allocate irq_desc_ptrs array based on nr_irqs */
168 irq_desc_ptrs = kcalloc(nr_irqs, sizeof(void *), GFP_NOWAIT);
169
170 /* allocate based on nr_cpu_ids */ 182 /* allocate based on nr_cpu_ids */
171 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids * 183 kstat_irqs_legacy = kzalloc_node(NR_IRQS_LEGACY * nr_cpu_ids *
172 sizeof(int), GFP_NOWAIT, node); 184 sizeof(int), GFP_NOWAIT, node);
@@ -180,23 +192,12 @@ int __init early_irq_init(void)
180 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); 192 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
181 alloc_desc_masks(&desc[i], node, true); 193 alloc_desc_masks(&desc[i], node, true);
182 init_desc_masks(&desc[i]); 194 init_desc_masks(&desc[i]);
183 irq_desc_ptrs[i] = desc + i; 195 set_irq_desc(i, &desc[i]);
184 } 196 }
185 197
186 for (i = legacy_count; i < nr_irqs; i++)
187 irq_desc_ptrs[i] = NULL;
188
189 return arch_early_irq_init(); 198 return arch_early_irq_init();
190} 199}
191 200
192struct irq_desc *irq_to_desc(unsigned int irq)
193{
194 if (irq_desc_ptrs && irq < nr_irqs)
195 return irq_desc_ptrs[irq];
196
197 return NULL;
198}
199
200struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node) 201struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
201{ 202{
202 struct irq_desc *desc; 203 struct irq_desc *desc;
@@ -208,21 +209,18 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
208 return NULL; 209 return NULL;
209 } 210 }
210 211
211 desc = irq_desc_ptrs[irq]; 212 desc = irq_to_desc(irq);
212 if (desc) 213 if (desc)
213 return desc; 214 return desc;
214 215
215 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 216 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
216 217
217 /* We have to check it to avoid races with another CPU */ 218 /* We have to check it to avoid races with another CPU */
218 desc = irq_desc_ptrs[irq]; 219 desc = irq_to_desc(irq);
219 if (desc) 220 if (desc)
220 goto out_unlock; 221 goto out_unlock;
221 222
222 if (slab_is_available()) 223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
223 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
224 else
225 desc = alloc_bootmem_node(NODE_DATA(node), sizeof(*desc));
226 224
227 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); 225 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
228 if (!desc) { 226 if (!desc) {
@@ -231,7 +229,7 @@ struct irq_desc * __ref irq_to_desc_alloc_node(unsigned int irq, int node)
231 } 229 }
232 init_one_irq_desc(irq, desc, node); 230 init_one_irq_desc(irq, desc, node);
233 231
234 irq_desc_ptrs[irq] = desc; 232 set_irq_desc(irq, desc);
235 233
236out_unlock: 234out_unlock:
237 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 235 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index b2821f070a3d..c63f3bc88f0b 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -21,11 +21,7 @@ extern void clear_kstat_irqs(struct irq_desc *desc);
21extern raw_spinlock_t sparse_irq_lock; 21extern raw_spinlock_t sparse_irq_lock;
22 22
23#ifdef CONFIG_SPARSE_IRQ 23#ifdef CONFIG_SPARSE_IRQ
24/* irq_desc_ptrs allocated at boot time */ 24void replace_irq_desc(unsigned int irq, struct irq_desc *desc);
25extern struct irq_desc **irq_desc_ptrs;
26#else
27/* irq_desc_ptrs is a fixed size array */
28extern struct irq_desc *irq_desc_ptrs[NR_IRQS];
29#endif 25#endif
30 26
31#ifdef CONFIG_PROC_FS 27#ifdef CONFIG_PROC_FS
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 26bac9d8f860..963559dbd858 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -70,7 +70,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
70 raw_spin_lock_irqsave(&sparse_irq_lock, flags); 70 raw_spin_lock_irqsave(&sparse_irq_lock, flags);
71 71
72 /* We have to check it to avoid races with another CPU */ 72 /* We have to check it to avoid races with another CPU */
73 desc = irq_desc_ptrs[irq]; 73 desc = irq_to_desc(irq);
74 74
75 if (desc && old_desc != desc) 75 if (desc && old_desc != desc)
76 goto out_unlock; 76 goto out_unlock;
@@ -90,7 +90,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc,
90 goto out_unlock; 90 goto out_unlock;
91 } 91 }
92 92
93 irq_desc_ptrs[irq] = desc; 93 replace_irq_desc(irq, desc);
94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags); 94 raw_spin_unlock_irqrestore(&sparse_irq_lock, flags);
95 95
96 /* free the old one */ 96 /* free the old one */