aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/Kconfig4
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/kernel/io_apic.c199
-rw-r--r--arch/x86/kernel/setup_percpu.c8
-rw-r--r--arch/x86/kernel/visws_quirks.c2
-rw-r--r--arch/x86/kernel/vmlinux_32.lds.S1
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S2
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--drivers/char/random.c5
-rw-r--r--drivers/pci/intr_remapping.c11
-rw-r--r--include/asm-generic/vmlinux.lds.h13
-rw-r--r--include/linux/init.h43
-rw-r--r--include/linux/irq.h15
-rw-r--r--include/linux/kernel_stat.h16
-rw-r--r--init/Makefile2
-rw-r--r--init/dyn_array.c120
-rw-r--r--init/main.c11
-rw-r--r--kernel/irq/chip.c30
-rw-r--r--kernel/irq/handle.c114
19 files changed, 103 insertions, 496 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index c8a7c2eb6490..071004d3a1b1 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -102,7 +102,3 @@ config HAVE_CLK
102 help 102 help
103 The <linux/clk.h> calls support software clock gating and 103 The <linux/clk.h> calls support software clock gating and
104 thus are a key power management tool on many systems. 104 thus are a key power management tool on many systems.
105
106config HAVE_DYN_ARRAY
107 def_bool n
108
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8636ddf2f4a4..8da6123a60d0 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -33,7 +33,6 @@ config X86
33 select HAVE_ARCH_TRACEHOOK 33 select HAVE_ARCH_TRACEHOOK
34 select HAVE_GENERIC_DMA_COHERENT if X86_32 34 select HAVE_GENERIC_DMA_COHERENT if X86_32
35 select HAVE_EFFICIENT_UNALIGNED_ACCESS 35 select HAVE_EFFICIENT_UNALIGNED_ACCESS
36 select HAVE_DYN_ARRAY
37 36
38config ARCH_DEFCONFIG 37config ARCH_DEFCONFIG
39 string 38 string
diff --git a/arch/x86/kernel/io_apic.c b/arch/x86/kernel/io_apic.c
index e03bc0f87eef..6f80dc2f137e 100644
--- a/arch/x86/kernel/io_apic.c
+++ b/arch/x86/kernel/io_apic.c
@@ -107,7 +107,6 @@ static int __init parse_noapic(char *str)
107} 107}
108early_param("noapic", parse_noapic); 108early_param("noapic", parse_noapic);
109 109
110struct irq_cfg;
111struct irq_pin_list; 110struct irq_pin_list;
112struct irq_cfg { 111struct irq_cfg {
113 unsigned int irq; 112 unsigned int irq;
@@ -120,7 +119,7 @@ struct irq_cfg {
120}; 119};
121 120
122/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */ 121/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
123static struct irq_cfg irq_cfg_legacy[] __initdata = { 122static struct irq_cfg irq_cfgx[NR_IRQS] = {
124 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, }, 123 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
125 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, }, 124 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
126 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, }, 125 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
@@ -139,49 +138,27 @@ static struct irq_cfg irq_cfg_legacy[] __initdata = {
139 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, }, 138 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
140}; 139};
141 140
142static struct irq_cfg irq_cfg_init = { .irq = -1U, };
143
144static void init_one_irq_cfg(struct irq_cfg *cfg)
145{
146 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
147}
148
149static struct irq_cfg *irq_cfgx;
150
151static void __init init_work(void *data)
152{
153 struct dyn_array *da = data;
154 struct irq_cfg *cfg;
155 int legacy_count;
156 int i;
157
158 cfg = *da->name;
159
160 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
161
162 legacy_count = ARRAY_SIZE(irq_cfg_legacy);
163 for (i = legacy_count; i < *da->nr; i++)
164 init_one_irq_cfg(&cfg[i]);
165}
166
167#define for_each_irq_cfg(irq, cfg) \ 141#define for_each_irq_cfg(irq, cfg) \
168 for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq]) 142 for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
169 143
170DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work); 144static struct irq_cfg *irq_cfg(unsigned int irq)
171
172struct irq_cfg *irq_cfg(unsigned int irq)
173{ 145{
174 if (irq < nr_irqs) 146 return irq < nr_irqs ? irq_cfgx + irq : NULL;
175 return &irq_cfgx[irq];
176
177 return NULL;
178} 147}
179struct irq_cfg *irq_cfg_alloc(unsigned int irq) 148
149static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
180{ 150{
181 return irq_cfg(irq); 151 return irq_cfg(irq);
182} 152}
183 153
184/* 154/*
155 * Rough estimation of how many shared IRQs there are, can be changed
156 * anytime.
157 */
158#define MAX_PLUS_SHARED_IRQS NR_IRQS
159#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
160
161/*
185 * This is performance-critical, we want to do it O(1) 162 * This is performance-critical, we want to do it O(1)
186 * 163 *
187 * the indexing order of this array favors 1:1 mappings 164 * the indexing order of this array favors 1:1 mappings
@@ -193,59 +170,29 @@ struct irq_pin_list {
193 struct irq_pin_list *next; 170 struct irq_pin_list *next;
194}; 171};
195 172
196static struct irq_pin_list *irq_2_pin_head; 173static struct irq_pin_list irq_2_pin_head[PIN_MAP_SIZE];
197/* fill one page ? */
198static int nr_irq_2_pin = 0x100;
199static struct irq_pin_list *irq_2_pin_ptr; 174static struct irq_pin_list *irq_2_pin_ptr;
200static void __init irq_2_pin_init_work(void *data) 175
176static void __init irq_2_pin_init(void)
201{ 177{
202 struct dyn_array *da = data; 178 struct irq_pin_list *pin = irq_2_pin_head;
203 struct irq_pin_list *pin;
204 int i; 179 int i;
205 180
206 pin = *da->name; 181 for (i = 1; i < PIN_MAP_SIZE; i++)
207
208 for (i = 1; i < *da->nr; i++)
209 pin[i-1].next = &pin[i]; 182 pin[i-1].next = &pin[i];
210 183
211 irq_2_pin_ptr = &pin[0]; 184 irq_2_pin_ptr = &pin[0];
212} 185}
213DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
214 186
215static struct irq_pin_list *get_one_free_irq_2_pin(void) 187static struct irq_pin_list *get_one_free_irq_2_pin(void)
216{ 188{
217 struct irq_pin_list *pin; 189 struct irq_pin_list *pin = irq_2_pin_ptr;
218 int i;
219
220 pin = irq_2_pin_ptr;
221
222 if (pin) {
223 irq_2_pin_ptr = pin->next;
224 pin->next = NULL;
225 return pin;
226 }
227
228 /*
229 * we run out of pre-allocate ones, allocate more
230 */
231 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
232
233 if (after_bootmem)
234 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
235 GFP_ATOMIC);
236 else
237 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
238 nr_irq_2_pin, PAGE_SIZE, 0);
239 190
240 if (!pin) 191 if (!pin)
241 panic("can not get more irq_2_pin\n"); 192 panic("can not get more irq_2_pin\n");
242 193
243 for (i = 1; i < nr_irq_2_pin; i++)
244 pin[i-1].next = &pin[i];
245
246 irq_2_pin_ptr = pin->next; 194 irq_2_pin_ptr = pin->next;
247 pin->next = NULL; 195 pin->next = NULL;
248
249 return pin; 196 return pin;
250} 197}
251 198
@@ -284,8 +231,9 @@ static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned i
284static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value) 231static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
285{ 232{
286 struct io_apic __iomem *io_apic = io_apic_base(apic); 233 struct io_apic __iomem *io_apic = io_apic_base(apic);
287 if (sis_apic_bug) 234
288 writel(reg, &io_apic->index); 235 if (sis_apic_bug)
236 writel(reg, &io_apic->index);
289 writel(value, &io_apic->data); 237 writel(value, &io_apic->data);
290} 238}
291 239
@@ -1044,11 +992,11 @@ static int pin_2_irq(int idx, int apic, int pin)
1044 while (i < apic) 992 while (i < apic)
1045 irq += nr_ioapic_registers[i++]; 993 irq += nr_ioapic_registers[i++];
1046 irq += pin; 994 irq += pin;
1047 /* 995 /*
1048 * For MPS mode, so far only needed by ES7000 platform 996 * For MPS mode, so far only needed by ES7000 platform
1049 */ 997 */
1050 if (ioapic_renumber_irq) 998 if (ioapic_renumber_irq)
1051 irq = ioapic_renumber_irq(apic, irq); 999 irq = ioapic_renumber_irq(apic, irq);
1052 } 1000 }
1053 1001
1054#ifdef CONFIG_X86_32 1002#ifdef CONFIG_X86_32
@@ -1232,19 +1180,19 @@ static struct irq_chip ir_ioapic_chip;
1232#ifdef CONFIG_X86_32 1180#ifdef CONFIG_X86_32
1233static inline int IO_APIC_irq_trigger(int irq) 1181static inline int IO_APIC_irq_trigger(int irq)
1234{ 1182{
1235 int apic, idx, pin; 1183 int apic, idx, pin;
1236 1184
1237 for (apic = 0; apic < nr_ioapics; apic++) { 1185 for (apic = 0; apic < nr_ioapics; apic++) {
1238 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) { 1186 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1239 idx = find_irq_entry(apic, pin, mp_INT); 1187 idx = find_irq_entry(apic, pin, mp_INT);
1240 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin))) 1188 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1241 return irq_trigger(idx); 1189 return irq_trigger(idx);
1242 } 1190 }
1243 } 1191 }
1244 /* 1192 /*
1245 * nonexistent IRQs are edge default 1193 * nonexistent IRQs are edge default
1246 */ 1194 */
1247 return 0; 1195 return 0;
1248} 1196}
1249#else 1197#else
1250static inline int IO_APIC_irq_trigger(int irq) 1198static inline int IO_APIC_irq_trigger(int irq)
@@ -1509,8 +1457,8 @@ __apicdebuginit(void) print_IO_APIC(void)
1509 reg_01.raw = io_apic_read(apic, 1); 1457 reg_01.raw = io_apic_read(apic, 1);
1510 if (reg_01.bits.version >= 0x10) 1458 if (reg_01.bits.version >= 0x10)
1511 reg_02.raw = io_apic_read(apic, 2); 1459 reg_02.raw = io_apic_read(apic, 2);
1512 if (reg_01.bits.version >= 0x20) 1460 if (reg_01.bits.version >= 0x20)
1513 reg_03.raw = io_apic_read(apic, 3); 1461 reg_03.raw = io_apic_read(apic, 3);
1514 spin_unlock_irqrestore(&ioapic_lock, flags); 1462 spin_unlock_irqrestore(&ioapic_lock, flags);
1515 1463
1516 printk("\n"); 1464 printk("\n");
@@ -2089,9 +2037,9 @@ static int ioapic_retrigger_irq(unsigned int irq)
2089#else 2037#else
2090static int ioapic_retrigger_irq(unsigned int irq) 2038static int ioapic_retrigger_irq(unsigned int irq)
2091{ 2039{
2092 send_IPI_self(irq_cfg(irq)->vector); 2040 send_IPI_self(irq_cfg(irq)->vector);
2093 2041
2094 return 1; 2042 return 1;
2095} 2043}
2096#endif 2044#endif
2097 2045
@@ -2189,7 +2137,7 @@ static int migrate_irq_remapped_level(int irq)
2189 2137
2190 if (io_apic_level_ack_pending(irq)) { 2138 if (io_apic_level_ack_pending(irq)) {
2191 /* 2139 /*
2192 * Interrupt in progress. Migrating irq now will change the 2140 * Interrupt in progress. Migrating irq now will change the
2193 * vector information in the IO-APIC RTE and that will confuse 2141 * vector information in the IO-APIC RTE and that will confuse
2194 * the EOI broadcast performed by cpu. 2142 * the EOI broadcast performed by cpu.
2195 * So, delay the irq migration to the next instance. 2143 * So, delay the irq migration to the next instance.
@@ -2426,28 +2374,28 @@ static void ack_apic_level(unsigned int irq)
2426} 2374}
2427 2375
2428static struct irq_chip ioapic_chip __read_mostly = { 2376static struct irq_chip ioapic_chip __read_mostly = {
2429 .name = "IO-APIC", 2377 .name = "IO-APIC",
2430 .startup = startup_ioapic_irq, 2378 .startup = startup_ioapic_irq,
2431 .mask = mask_IO_APIC_irq, 2379 .mask = mask_IO_APIC_irq,
2432 .unmask = unmask_IO_APIC_irq, 2380 .unmask = unmask_IO_APIC_irq,
2433 .ack = ack_apic_edge, 2381 .ack = ack_apic_edge,
2434 .eoi = ack_apic_level, 2382 .eoi = ack_apic_level,
2435#ifdef CONFIG_SMP 2383#ifdef CONFIG_SMP
2436 .set_affinity = set_ioapic_affinity_irq, 2384 .set_affinity = set_ioapic_affinity_irq,
2437#endif 2385#endif
2438 .retrigger = ioapic_retrigger_irq, 2386 .retrigger = ioapic_retrigger_irq,
2439}; 2387};
2440 2388
2441#ifdef CONFIG_INTR_REMAP 2389#ifdef CONFIG_INTR_REMAP
2442static struct irq_chip ir_ioapic_chip __read_mostly = { 2390static struct irq_chip ir_ioapic_chip __read_mostly = {
2443 .name = "IR-IO-APIC", 2391 .name = "IR-IO-APIC",
2444 .startup = startup_ioapic_irq, 2392 .startup = startup_ioapic_irq,
2445 .mask = mask_IO_APIC_irq, 2393 .mask = mask_IO_APIC_irq,
2446 .unmask = unmask_IO_APIC_irq, 2394 .unmask = unmask_IO_APIC_irq,
2447 .ack = ack_x2apic_edge, 2395 .ack = ack_x2apic_edge,
2448 .eoi = ack_x2apic_level, 2396 .eoi = ack_x2apic_level,
2449#ifdef CONFIG_SMP 2397#ifdef CONFIG_SMP
2450 .set_affinity = set_ir_ioapic_affinity_irq, 2398 .set_affinity = set_ir_ioapic_affinity_irq,
2451#endif 2399#endif
2452 .retrigger = ioapic_retrigger_irq, 2400 .retrigger = ioapic_retrigger_irq,
2453}; 2401};
@@ -2636,8 +2584,8 @@ static inline void __init check_timer(void)
2636 2584
2637 local_irq_save(flags); 2585 local_irq_save(flags);
2638 2586
2639 ver = apic_read(APIC_LVR); 2587 ver = apic_read(APIC_LVR);
2640 ver = GET_APIC_VERSION(ver); 2588 ver = GET_APIC_VERSION(ver);
2641 2589
2642 /* 2590 /*
2643 * get/set the timer IRQ vector: 2591 * get/set the timer IRQ vector:
@@ -2822,12 +2770,12 @@ void __init setup_IO_APIC(void)
2822 io_apic_irqs = ~PIC_IRQS; 2770 io_apic_irqs = ~PIC_IRQS;
2823 2771
2824 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n"); 2772 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2825 /* 2773 /*
2826 * Set up IO-APIC IRQ routing. 2774 * Set up IO-APIC IRQ routing.
2827 */ 2775 */
2828#ifdef CONFIG_X86_32 2776#ifdef CONFIG_X86_32
2829 if (!acpi_ioapic) 2777 if (!acpi_ioapic)
2830 setup_ioapic_ids_from_mpc(); 2778 setup_ioapic_ids_from_mpc();
2831#endif 2779#endif
2832 sync_Arb_IDs(); 2780 sync_Arb_IDs();
2833 setup_IO_APIC_irqs(); 2781 setup_IO_APIC_irqs();
@@ -2842,9 +2790,9 @@ void __init setup_IO_APIC(void)
2842 2790
2843static int __init io_apic_bug_finalize(void) 2791static int __init io_apic_bug_finalize(void)
2844{ 2792{
2845 if (sis_apic_bug == -1) 2793 if (sis_apic_bug == -1)
2846 sis_apic_bug = 0; 2794 sis_apic_bug = 0;
2847 return 0; 2795 return 0;
2848} 2796}
2849 2797
2850late_initcall(io_apic_bug_finalize); 2798late_initcall(io_apic_bug_finalize);
@@ -3199,7 +3147,7 @@ static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3199 if (index < 0) { 3147 if (index < 0) {
3200 printk(KERN_ERR 3148 printk(KERN_ERR
3201 "Unable to allocate %d IRTE for PCI %s\n", nvec, 3149 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3202 pci_name(dev)); 3150 pci_name(dev));
3203 return -ENOSPC; 3151 return -ENOSPC;
3204 } 3152 }
3205 return index; 3153 return index;
@@ -3885,23 +3833,24 @@ static struct resource * __init ioapic_setup_resources(void)
3885void __init ioapic_init_mappings(void) 3833void __init ioapic_init_mappings(void)
3886{ 3834{
3887 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0; 3835 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3888 int i;
3889 struct resource *ioapic_res; 3836 struct resource *ioapic_res;
3837 int i;
3890 3838
3839 irq_2_pin_init();
3891 ioapic_res = ioapic_setup_resources(); 3840 ioapic_res = ioapic_setup_resources();
3892 for (i = 0; i < nr_ioapics; i++) { 3841 for (i = 0; i < nr_ioapics; i++) {
3893 if (smp_found_config) { 3842 if (smp_found_config) {
3894 ioapic_phys = mp_ioapics[i].mp_apicaddr; 3843 ioapic_phys = mp_ioapics[i].mp_apicaddr;
3895#ifdef CONFIG_X86_32 3844#ifdef CONFIG_X86_32
3896 if (!ioapic_phys) { 3845 if (!ioapic_phys) {
3897 printk(KERN_ERR 3846 printk(KERN_ERR
3898 "WARNING: bogus zero IO-APIC " 3847 "WARNING: bogus zero IO-APIC "
3899 "address found in MPTABLE, " 3848 "address found in MPTABLE, "
3900 "disabling IO/APIC support!\n"); 3849 "disabling IO/APIC support!\n");
3901 smp_found_config = 0; 3850 smp_found_config = 0;
3902 skip_ioapic_setup = 1; 3851 skip_ioapic_setup = 1;
3903 goto fake_ioapic_page; 3852 goto fake_ioapic_page;
3904 } 3853 }
3905#endif 3854#endif
3906 } else { 3855 } else {
3907#ifdef CONFIG_X86_32 3856#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 2b7dab699e83..410c88f0bfeb 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -140,7 +140,7 @@ static void __init setup_cpu_pda_map(void)
140 */ 140 */
141void __init setup_per_cpu_areas(void) 141void __init setup_per_cpu_areas(void)
142{ 142{
143 ssize_t size, old_size, da_size; 143 ssize_t size, old_size;
144 char *ptr; 144 char *ptr;
145 int cpu; 145 int cpu;
146 unsigned long align = 1; 146 unsigned long align = 1;
@@ -150,9 +150,8 @@ void __init setup_per_cpu_areas(void)
150 150
151 /* Copy section for each CPU (we discard the original) */ 151 /* Copy section for each CPU (we discard the original) */
152 old_size = PERCPU_ENOUGH_ROOM; 152 old_size = PERCPU_ENOUGH_ROOM;
153 da_size = per_cpu_dyn_array_size(&align);
154 align = max_t(unsigned long, PAGE_SIZE, align); 153 align = max_t(unsigned long, PAGE_SIZE, align);
155 size = roundup(old_size + da_size, align); 154 size = roundup(old_size, align);
156 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n", 155 printk(KERN_INFO "PERCPU: Allocating %zd bytes of per cpu data\n",
157 size); 156 size);
158 157
@@ -182,9 +181,6 @@ void __init setup_per_cpu_areas(void)
182#endif 181#endif
183 per_cpu_offset(cpu) = ptr - __per_cpu_start; 182 per_cpu_offset(cpu) = ptr - __per_cpu_start;
184 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 183 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
185
186 per_cpu_alloc_dyn_array(cpu, ptr + old_size);
187
188 } 184 }
189 185
190 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", 186 printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n",
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 817aa55a1209..0c9667f0752a 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
633 /* 633 /*
634 * handle this 'virtual interrupt' as a Cobalt one now. 634 * handle this 'virtual interrupt' as a Cobalt one now.
635 */ 635 */
636 kstat_irqs_this_cpu(desc)++; 636 kstat_incr_irqs_this_cpu(realirq, desc);
637 637
638 if (likely(desc->action != NULL)) 638 if (likely(desc->action != NULL))
639 handle_IRQ_event(realirq, desc->action); 639 handle_IRQ_event(realirq, desc->action);
diff --git a/arch/x86/kernel/vmlinux_32.lds.S b/arch/x86/kernel/vmlinux_32.lds.S
index c36007ab3940..a9b8560adbc2 100644
--- a/arch/x86/kernel/vmlinux_32.lds.S
+++ b/arch/x86/kernel/vmlinux_32.lds.S
@@ -145,7 +145,6 @@ SECTIONS
145 *(.x86_cpu_dev.init) 145 *(.x86_cpu_dev.init)
146 __x86_cpu_dev_end = .; 146 __x86_cpu_dev_end = .;
147 } 147 }
148 DYN_ARRAY_INIT(8)
149 SECURITY_INIT 148 SECURITY_INIT
150 . = ALIGN(4); 149 . = ALIGN(4);
151 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) { 150 .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 30973dbac8c2..3245ad72594a 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -174,8 +174,6 @@ SECTIONS
174 } 174 }
175 __x86_cpu_dev_end = .; 175 __x86_cpu_dev_end = .;
176 176
177 DYN_ARRAY_INIT(8)
178
179 SECURITY_INIT 177 SECURITY_INIT
180 178
181 . = ALIGN(8); 179 . = ALIGN(8);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index bb6bc721b13d..5601506f2dd9 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
243 243
244 kstat_irqs_this_cpu(irq_to_desc(irq))++; 244 kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
245 245
246out: 246out:
247 raw_local_irq_restore(flags); 247 raw_local_irq_restore(flags);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 9ce80213007b..1137d2976043 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -558,12 +558,7 @@ struct timer_rand_state {
558 unsigned dont_count_entropy:1; 558 unsigned dont_count_entropy:1;
559}; 559};
560 560
561#ifdef CONFIG_HAVE_DYN_ARRAY
562static struct timer_rand_state **irq_timer_state;
563DEFINE_DYN_ARRAY(irq_timer_state, sizeof(struct timer_rand_state *), nr_irqs, PAGE_SIZE, NULL);
564#else
565static struct timer_rand_state *irq_timer_state[NR_IRQS]; 561static struct timer_rand_state *irq_timer_state[NR_IRQS];
566#endif
567 562
568static struct timer_rand_state *get_timer_rand_state(unsigned int irq) 563static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
569{ 564{
diff --git a/drivers/pci/intr_remapping.c b/drivers/pci/intr_remapping.c
index 0f43b265eee6..950769e87475 100644
--- a/drivers/pci/intr_remapping.c
+++ b/drivers/pci/intr_remapping.c
@@ -19,20 +19,13 @@ struct irq_2_iommu {
19 u8 irte_mask; 19 u8 irte_mask;
20}; 20};
21 21
22#ifdef CONFIG_HAVE_DYN_ARRAY
23static struct irq_2_iommu *irq_2_iommuX;
24DEFINE_DYN_ARRAY(irq_2_iommuX, sizeof(struct irq_2_iommu), nr_irqs, PAGE_SIZE, NULL);
25#else
26static struct irq_2_iommu irq_2_iommuX[NR_IRQS]; 22static struct irq_2_iommu irq_2_iommuX[NR_IRQS];
27#endif
28 23
29static struct irq_2_iommu *irq_2_iommu(unsigned int irq) 24static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
30{ 25{
31 if (irq < nr_irqs) 26 return (irq < nr_irqs) ?: irq_2_iommuX + irq : NULL;
32 return &irq_2_iommuX[irq];
33
34 return NULL;
35} 27}
28
36static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq) 29static struct irq_2_iommu *irq_2_iommu_alloc(unsigned int irq)
37{ 30{
38 return irq_2_iommu(irq); 31 return irq_2_iommu(irq);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index c68eda9d9a90..7440a0dceddb 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -210,19 +210,6 @@
210 * All archs are supposed to use RO_DATA() */ 210 * All archs are supposed to use RO_DATA() */
211#define RODATA RO_DATA(4096) 211#define RODATA RO_DATA(4096)
212 212
213#define DYN_ARRAY_INIT(align) \
214 . = ALIGN((align)); \
215 .dyn_array.init : AT(ADDR(.dyn_array.init) - LOAD_OFFSET) { \
216 VMLINUX_SYMBOL(__dyn_array_start) = .; \
217 *(.dyn_array.init) \
218 VMLINUX_SYMBOL(__dyn_array_end) = .; \
219 } \
220 . = ALIGN((align)); \
221 .per_cpu_dyn_array.init : AT(ADDR(.per_cpu_dyn_array.init) - LOAD_OFFSET) { \
222 VMLINUX_SYMBOL(__per_cpu_dyn_array_start) = .; \
223 *(.per_cpu_dyn_array.init) \
224 VMLINUX_SYMBOL(__per_cpu_dyn_array_end) = .; \
225 }
226#define SECURITY_INIT \ 213#define SECURITY_INIT \
227 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \ 214 .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
228 VMLINUX_SYMBOL(__security_initcall_start) = .; \ 215 VMLINUX_SYMBOL(__security_initcall_start) = .; \
diff --git a/include/linux/init.h b/include/linux/init.h
index 59fbb4aaba6a..70ad53e1eab8 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -247,49 +247,6 @@ struct obs_kernel_param {
247/* Relies on boot_command_line being set */ 247/* Relies on boot_command_line being set */
248void __init parse_early_param(void); 248void __init parse_early_param(void);
249 249
250struct dyn_array {
251 void **name;
252 unsigned long size;
253 unsigned int *nr;
254 unsigned long align;
255 void (*init_work)(void *);
256};
257extern struct dyn_array *__dyn_array_start[], *__dyn_array_end[];
258extern struct dyn_array *__per_cpu_dyn_array_start[], *__per_cpu_dyn_array_end[];
259
260#define DEFINE_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
261 static struct dyn_array __dyn_array_##nameX __initdata = \
262 { .name = (void **)&(nameX),\
263 .size = sizeX,\
264 .nr = &(nrX),\
265 .align = alignX,\
266 .init_work = init_workX,\
267 }; \
268 static struct dyn_array *__dyn_array_ptr_##nameX __used \
269 __attribute__((__section__(".dyn_array.init"))) = \
270 &__dyn_array_##nameX
271
272#define DEFINE_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
273 DEFINE_DYN_ARRAY_ADDR(nameX, nameX, sizeX, nrX, alignX, init_workX)
274
275#define DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, addrX, sizeX, nrX, alignX, init_workX) \
276 static struct dyn_array __per_cpu_dyn_array_##nameX __initdata = \
277 { .name = (void **)&(addrX),\
278 .size = sizeX,\
279 .nr = &(nrX),\
280 .align = alignX,\
281 .init_work = init_workX,\
282 }; \
283 static struct dyn_array *__per_cpu_dyn_array_ptr_##nameX __used \
284 __attribute__((__section__(".per_cpu_dyn_array.init"))) = \
285 &__per_cpu_dyn_array_##nameX
286
287#define DEFINE_PER_CPU_DYN_ARRAY(nameX, sizeX, nrX, alignX, init_workX) \
288 DEFINE_PER_CPU_DYN_ARRAY_ADDR(nameX, nameX, nrX, alignX, init_workX)
289
290extern void pre_alloc_dyn_array(void);
291extern unsigned long per_cpu_dyn_array_size(unsigned long *align);
292extern void per_cpu_alloc_dyn_array(int cpu, char *ptr);
293#endif /* __ASSEMBLY__ */ 250#endif /* __ASSEMBLY__ */
294 251
295/** 252/**
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 3f33c7790300..38bf89f2ade0 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -139,8 +139,6 @@ struct irq_chip {
139 const char *typename; 139 const char *typename;
140}; 140};
141 141
142struct timer_rand_state;
143struct irq_2_iommu;
144/** 142/**
145 * struct irq_desc - interrupt descriptor 143 * struct irq_desc - interrupt descriptor
146 * 144 *
@@ -167,9 +165,6 @@ struct irq_2_iommu;
167 */ 165 */
168struct irq_desc { 166struct irq_desc {
169 unsigned int irq; 167 unsigned int irq;
170#ifdef CONFIG_HAVE_DYN_ARRAY
171 unsigned int *kstat_irqs;
172#endif
173 irq_flow_handler_t handle_irq; 168 irq_flow_handler_t handle_irq;
174 struct irq_chip *chip; 169 struct irq_chip *chip;
175 struct msi_desc *msi_desc; 170 struct msi_desc *msi_desc;
@@ -198,23 +193,13 @@ struct irq_desc {
198} ____cacheline_internodealigned_in_smp; 193} ____cacheline_internodealigned_in_smp;
199 194
200 195
201#ifndef CONFIG_HAVE_DYN_ARRAY
202/* could be removed if we get rid of all irq_desc reference */
203extern struct irq_desc irq_desc[NR_IRQS]; 196extern struct irq_desc irq_desc[NR_IRQS];
204#else
205extern struct irq_desc *irq_desc;
206#endif
207 197
208static inline struct irq_desc *irq_to_desc(unsigned int irq) 198static inline struct irq_desc *irq_to_desc(unsigned int irq)
209{ 199{
210 return (irq < nr_irqs) ? irq_desc + irq : NULL; 200 return (irq < nr_irqs) ? irq_desc + irq : NULL;
211} 201}
212 202
213#ifdef CONFIG_HAVE_DYN_ARRAY
214#define kstat_irqs_this_cpu(DESC) \
215 ((DESC)->kstat_irqs[smp_processor_id()])
216#endif
217
218/* 203/*
219 * Migration helpers for obsolete names, they will go away: 204 * Migration helpers for obsolete names, they will go away:
220 */ 205 */
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 21249d8c1293..a9d0d360b776 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,9 +28,7 @@ struct cpu_usage_stat {
28 28
29struct kernel_stat { 29struct kernel_stat {
30 struct cpu_usage_stat cpustat; 30 struct cpu_usage_stat cpustat;
31#ifndef CONFIG_HAVE_DYN_ARRAY
32 unsigned int irqs[NR_IRQS]; 31 unsigned int irqs[NR_IRQS];
33#endif
34}; 32};
35 33
36DECLARE_PER_CPU(struct kernel_stat, kstat); 34DECLARE_PER_CPU(struct kernel_stat, kstat);
@@ -41,20 +39,18 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
41 39
42extern unsigned long long nr_context_switches(void); 40extern unsigned long long nr_context_switches(void);
43 41
44#ifndef CONFIG_HAVE_DYN_ARRAY 42struct irq_desc;
45#define kstat_irqs_this_cpu(irq) \
46 (kstat_this_cpu.irqs[irq])
47#endif
48 43
44static inline void kstat_incr_irqs_this_cpu(unsigned int irq,
45 struct irq_desc *desc)
46{
47 kstat_this_cpu.irqs[irq]++;
48}
49 49
50#ifndef CONFIG_HAVE_DYN_ARRAY
51static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) 50static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
52{ 51{
53 return kstat_cpu(cpu).irqs[irq]; 52 return kstat_cpu(cpu).irqs[irq];
54} 53}
55#else
56extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
57#endif
58 54
59/* 55/*
60 * Number of interrupts per specific IRQ source, since bootup 56 * Number of interrupts per specific IRQ source, since bootup
diff --git a/init/Makefile b/init/Makefile
index dc5eeca6eb6d..4a243df426f7 100644
--- a/init/Makefile
+++ b/init/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5obj-y := main.o dyn_array.o version.o mounts.o 5obj-y := main.o version.o mounts.o
6ifneq ($(CONFIG_BLK_DEV_INITRD),y) 6ifneq ($(CONFIG_BLK_DEV_INITRD),y)
7obj-y += noinitramfs.o 7obj-y += noinitramfs.o
8else 8else
diff --git a/init/dyn_array.c b/init/dyn_array.c
deleted file mode 100644
index c8d5e2a18588..000000000000
--- a/init/dyn_array.c
+++ /dev/null
@@ -1,120 +0,0 @@
1#include <linux/types.h>
2#include <linux/kernel.h>
3#include <linux/kallsyms.h>
4#include <linux/init.h>
5#include <linux/bootmem.h>
6#include <linux/irq.h>
7
8void __init pre_alloc_dyn_array(void)
9{
10#ifdef CONFIG_HAVE_DYN_ARRAY
11 unsigned long total_size = 0, size, phys;
12 unsigned long max_align = 1;
13 struct dyn_array **daa;
14 char *ptr;
15
16 /* get the total size at first */
17 for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
18 struct dyn_array *da = *daa;
19
20 printk(KERN_DEBUG "dyn_array %pF size:%#lx nr:%d align:%#lx\n",
21 da->name, da->size, *da->nr, da->align);
22 size = da->size * (*da->nr);
23 total_size += roundup(size, da->align);
24 if (da->align > max_align)
25 max_align = da->align;
26 }
27 if (total_size)
28 printk(KERN_DEBUG "dyn_array total_size: %#lx\n",
29 total_size);
30 else
31 return;
32
33 /* allocate them all together */
34 max_align = max_t(unsigned long, max_align, PAGE_SIZE);
35 ptr = __alloc_bootmem(total_size, max_align, 0);
36 phys = virt_to_phys(ptr);
37
38 for (daa = __dyn_array_start ; daa < __dyn_array_end; daa++) {
39 struct dyn_array *da = *daa;
40
41 size = da->size * (*da->nr);
42 phys = roundup(phys, da->align);
43 printk(KERN_DEBUG "dyn_array %pF ==> [%#lx - %#lx]\n",
44 da->name, phys, phys + size);
45 *da->name = phys_to_virt(phys);
46
47 phys += size;
48
49 if (da->init_work)
50 da->init_work(da);
51 }
52#else
53#ifdef CONFIG_GENERIC_HARDIRQS
54 unsigned int i;
55
56 for (i = 0; i < NR_IRQS; i++)
57 irq_desc[i].irq = i;
58#endif
59#endif
60}
61
62unsigned long __init per_cpu_dyn_array_size(unsigned long *align)
63{
64 unsigned long total_size = 0;
65#ifdef CONFIG_HAVE_DYN_ARRAY
66 unsigned long size;
67 struct dyn_array **daa;
68 unsigned max_align = 1;
69
70 for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
71 struct dyn_array *da = *daa;
72
73 printk(KERN_DEBUG "per_cpu_dyn_array %pF size:%#lx nr:%d align:%#lx\n",
74 da->name, da->size, *da->nr, da->align);
75 size = da->size * (*da->nr);
76 total_size += roundup(size, da->align);
77 if (da->align > max_align)
78 max_align = da->align;
79 }
80 if (total_size) {
81 printk(KERN_DEBUG "per_cpu_dyn_array total_size: %#lx\n",
82 total_size);
83 *align = max_align;
84 }
85#endif
86 return total_size;
87}
88
89#ifdef CONFIG_SMP
90void __init per_cpu_alloc_dyn_array(int cpu, char *ptr)
91{
92#ifdef CONFIG_HAVE_DYN_ARRAY
93 unsigned long size, phys;
94 struct dyn_array **daa;
95 unsigned long addr;
96 void **array;
97
98 phys = virt_to_phys(ptr);
99 for (daa = __per_cpu_dyn_array_start ; daa < __per_cpu_dyn_array_end; daa++) {
100 struct dyn_array *da = *daa;
101
102 size = da->size * (*da->nr);
103 phys = roundup(phys, da->align);
104 printk(KERN_DEBUG "per_cpu_dyn_array %pF ==> [%#lx - %#lx]\n",
105 da->name, phys, phys + size);
106
107 addr = (unsigned long)da->name;
108 addr += per_cpu_offset(cpu);
109 array = (void **)addr;
110 *array = phys_to_virt(phys);
111 *da->name = *array; /* so init_work could use it directly */
112
113 phys += size;
114
115 if (da->init_work)
116 da->init_work(da);
117 }
118#endif
119}
120#endif
diff --git a/init/main.c b/init/main.c
index e81cf427d9c7..27f6bf6108e9 100644
--- a/init/main.c
+++ b/init/main.c
@@ -391,23 +391,17 @@ EXPORT_SYMBOL(__per_cpu_offset);
391 391
392static void __init setup_per_cpu_areas(void) 392static void __init setup_per_cpu_areas(void)
393{ 393{
394 unsigned long size, i, old_size; 394 unsigned long size, i;
395 char *ptr; 395 char *ptr;
396 unsigned long nr_possible_cpus = num_possible_cpus(); 396 unsigned long nr_possible_cpus = num_possible_cpus();
397 unsigned long align = 1;
398 unsigned da_size;
399 397
400 /* Copy section for each CPU (we discard the original) */ 398 /* Copy section for each CPU (we discard the original) */
401 old_size = PERCPU_ENOUGH_ROOM; 399 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
402 da_size = per_cpu_dyn_array_size(&align);
403 align = max_t(unsigned long, PAGE_SIZE, align);
404 size = ALIGN(old_size + da_size, align);
405 ptr = alloc_bootmem_pages(size * nr_possible_cpus); 400 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
406 401
407 for_each_possible_cpu(i) { 402 for_each_possible_cpu(i) {
408 __per_cpu_offset[i] = ptr - __per_cpu_start; 403 __per_cpu_offset[i] = ptr - __per_cpu_start;
409 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 404 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
410 per_cpu_alloc_dyn_array(i, ptr + old_size);
411 ptr += size; 405 ptr += size;
412 } 406 }
413} 407}
@@ -573,7 +567,6 @@ asmlinkage void __init start_kernel(void)
573 printk(KERN_NOTICE); 567 printk(KERN_NOTICE);
574 printk(linux_banner); 568 printk(linux_banner);
575 setup_arch(&command_line); 569 setup_arch(&command_line);
576 pre_alloc_dyn_array();
577 mm_init_owner(&init_mm, &init_task); 570 mm_init_owner(&init_mm, &init_task);
578 setup_command_line(command_line); 571 setup_command_line(command_line);
579 unwind_setup(); 572 unwind_setup();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e6f73dbfcc3d..d96d6f687c48 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -326,11 +326,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
326 if (unlikely(desc->status & IRQ_INPROGRESS)) 326 if (unlikely(desc->status & IRQ_INPROGRESS))
327 goto out_unlock; 327 goto out_unlock;
328 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 328 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
329#ifdef CONFIG_HAVE_DYN_ARRAY 329 kstat_incr_irqs_this_cpu(irq, desc);
330 kstat_irqs_this_cpu(desc)++;
331#else
332 kstat_irqs_this_cpu(irq)++;
333#endif
334 330
335 action = desc->action; 331 action = desc->action;
336 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 332 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
@@ -371,11 +367,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
371 if (unlikely(desc->status & IRQ_INPROGRESS)) 367 if (unlikely(desc->status & IRQ_INPROGRESS))
372 goto out_unlock; 368 goto out_unlock;
373 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 369 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
374#ifdef CONFIG_HAVE_DYN_ARRAY 370 kstat_incr_irqs_this_cpu(irq, desc);
375 kstat_irqs_this_cpu(desc)++;
376#else
377 kstat_irqs_this_cpu(irq)++;
378#endif
379 371
380 /* 372 /*
381 * If its disabled or no action available 373 * If its disabled or no action available
@@ -422,11 +414,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
422 goto out; 414 goto out;
423 415
424 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 416 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
425#ifdef CONFIG_HAVE_DYN_ARRAY 417 kstat_incr_irqs_this_cpu(irq, desc);
426 kstat_irqs_this_cpu(desc)++;
427#else
428 kstat_irqs_this_cpu(irq)++;
429#endif
430 418
431 /* 419 /*
432 * If its disabled or no action available 420 * If its disabled or no action available
@@ -490,11 +478,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
490 mask_ack_irq(desc, irq); 478 mask_ack_irq(desc, irq);
491 goto out_unlock; 479 goto out_unlock;
492 } 480 }
493#ifdef CONFIG_HAVE_DYN_ARRAY 481 kstat_incr_irqs_this_cpu(irq, desc);
494 kstat_irqs_this_cpu(desc)++;
495#else
496 kstat_irqs_this_cpu(irq)++;
497#endif
498 482
499 /* Start handling the irq */ 483 /* Start handling the irq */
500 desc->chip->ack(irq); 484 desc->chip->ack(irq);
@@ -549,11 +533,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
549{ 533{
550 irqreturn_t action_ret; 534 irqreturn_t action_ret;
551 535
552#ifdef CONFIG_HAVE_DYN_ARRAY 536 kstat_incr_irqs_this_cpu(irq, desc);
553 kstat_irqs_this_cpu(desc)++;
554#else
555 kstat_irqs_this_cpu(irq)++;
556#endif
557 537
558 if (desc->chip->ack) 538 if (desc->chip->ack)
559 desc->chip->ack(irq); 539 desc->chip->ack(irq);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index f837133cdfbe..9fe86b3a60a5 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -18,11 +18,6 @@
18 18
19#include "internals.h" 19#include "internals.h"
20 20
21/*
22 * lockdep: we want to handle all irq_desc locks as a single lock-class:
23 */
24static struct lock_class_key irq_desc_lock_class;
25
26/** 21/**
27 * handle_bad_irq - handle spurious and unhandled irqs 22 * handle_bad_irq - handle spurious and unhandled irqs
28 * @irq: the interrupt number 23 * @irq: the interrupt number
@@ -30,15 +25,10 @@ static struct lock_class_key irq_desc_lock_class;
30 * 25 *
31 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
32 */ 27 */
33void 28void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
34handle_bad_irq(unsigned int irq, struct irq_desc *desc)
35{ 29{
36 print_irq_desc(irq, desc); 30 print_irq_desc(irq, desc);
37#ifdef CONFIG_HAVE_DYN_ARRAY 31 kstat_incr_irqs_this_cpu(irq, desc);
38 kstat_irqs_this_cpu(desc)++;
39#else
40 kstat_irqs_this_cpu(irq)++;
41#endif
42 ack_bad_irq(irq); 32 ack_bad_irq(irq);
43} 33}
44 34
@@ -59,80 +49,6 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc)
59int nr_irqs = NR_IRQS; 49int nr_irqs = NR_IRQS;
60EXPORT_SYMBOL_GPL(nr_irqs); 50EXPORT_SYMBOL_GPL(nr_irqs);
61 51
62#ifdef CONFIG_HAVE_DYN_ARRAY
63static struct irq_desc irq_desc_init = {
64 .irq = -1U,
65 .status = IRQ_DISABLED,
66 .chip = &no_irq_chip,
67 .handle_irq = handle_bad_irq,
68 .depth = 1,
69 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
70#ifdef CONFIG_SMP
71 .affinity = CPU_MASK_ALL
72#endif
73};
74
75
76static void init_one_irq_desc(struct irq_desc *desc)
77{
78 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
79 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
80}
81
82extern int after_bootmem;
83extern void *__alloc_bootmem_nopanic(unsigned long size,
84 unsigned long align,
85 unsigned long goal);
86
87static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
88{
89 unsigned long bytes, total_bytes;
90 char *ptr;
91 int i;
92 unsigned long phys;
93
94 /* Compute how many bytes we need per irq and allocate them */
95 bytes = nr * sizeof(unsigned int);
96 total_bytes = bytes * nr_desc;
97 if (after_bootmem)
98 ptr = kzalloc(total_bytes, GFP_ATOMIC);
99 else
100 ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
101
102 if (!ptr)
103 panic(" can not allocate kstat_irqs\n");
104
105 phys = __pa(ptr);
106 printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
107
108 for (i = 0; i < nr_desc; i++) {
109 desc[i].kstat_irqs = (unsigned int *)ptr;
110 ptr += bytes;
111 }
112}
113
114static void __init init_work(void *data)
115{
116 struct dyn_array *da = data;
117 int i;
118 struct irq_desc *desc;
119
120 desc = *da->name;
121
122 for (i = 0; i < *da->nr; i++) {
123 init_one_irq_desc(&desc[i]);
124 desc[i].irq = i;
125 }
126
127 /* init kstat_irqs, nr_cpu_ids is ready already */
128 init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
129}
130
131struct irq_desc *irq_desc;
132DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
133
134#else
135
136struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 52struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
137 [0 ... NR_IRQS-1] = { 53 [0 ... NR_IRQS-1] = {
138 .status = IRQ_DISABLED, 54 .status = IRQ_DISABLED,
@@ -146,8 +62,6 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
146 } 62 }
147}; 63};
148 64
149#endif
150
151/* 65/*
152 * What should we do if we get a hw irq event on an illegal vector? 66 * What should we do if we get a hw irq event on an illegal vector?
153 * Each architecture has to answer this themself. 67 * Each architecture has to answer this themself.
@@ -258,11 +172,8 @@ unsigned int __do_IRQ(unsigned int irq)
258 struct irqaction *action; 172 struct irqaction *action;
259 unsigned int status; 173 unsigned int status;
260 174
261#ifdef CONFIG_HAVE_DYN_ARRAY 175 kstat_incr_irqs_this_cpu(irq, desc);
262 kstat_irqs_this_cpu(desc)++; 176
263#else
264 kstat_irqs_this_cpu(irq)++;
265#endif
266 if (CHECK_IRQ_PER_CPU(desc->status)) { 177 if (CHECK_IRQ_PER_CPU(desc->status)) {
267 irqreturn_t action_ret; 178 irqreturn_t action_ret;
268 179
@@ -351,23 +262,16 @@ out:
351 262
352 263
353#ifdef CONFIG_TRACE_IRQFLAGS 264#ifdef CONFIG_TRACE_IRQFLAGS
265/*
266 * lockdep: we want to handle all irq_desc locks as a single lock-class:
267 */
268static struct lock_class_key irq_desc_lock_class;
269
354void early_init_irq_lock_class(void) 270void early_init_irq_lock_class(void)
355{ 271{
356#ifndef CONFIG_HAVE_DYN_ARRAY
357 int i; 272 int i;
358 273
359 for (i = 0; i < nr_irqs; i++) 274 for (i = 0; i < nr_irqs; i++)
360 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 275 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class);
361#endif
362} 276}
363#endif 277#endif
364
365#ifdef CONFIG_HAVE_DYN_ARRAY
366unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
367{
368 struct irq_desc *desc = irq_to_desc(irq);
369 return desc->kstat_irqs[cpu];
370}
371#endif
372EXPORT_SYMBOL(kstat_irqs_cpu);
373