aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/io_apic_32.c2
-rw-r--r--arch/x86/kernel/irq_32.c4
-rw-r--r--arch/x86/kernel/irq_64.c4
-rw-r--r--arch/x86/kernel/visws_quirks.c2
-rw-r--r--arch/x86/xen/spinlock.c2
-rw-r--r--fs/proc/proc_misc.c2
-rw-r--r--include/linux/irq.h7
-rw-r--r--include/linux/kernel_stat.h22
-rw-r--r--kernel/irq/chip.c15
-rw-r--r--kernel/irq/handle.c97
-rw-r--r--kernel/sched.c5
11 files changed, 106 insertions, 56 deletions
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index c2160cfdec9b..204884b1415a 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -526,7 +526,7 @@ static void do_irq_balance(void)
526 if (package_index == i) 526 if (package_index == i)
527 IRQ_DELTA(package_index, j) = 0; 527 IRQ_DELTA(package_index, j) = 0;
528 /* Determine the total count per processor per IRQ */ 528 /* Determine the total count per processor per IRQ */
529 value_now = (unsigned long) kstat_cpu(i).irqs[j]; 529 value_now = (unsigned long) kstat_irqs_cpu(j, i);
530 530
531 /* Determine the activity per processor per IRQ */ 531 /* Determine the activity per processor per IRQ */
532 delta = value_now - LAST_CPU_IRQ(i, j); 532 delta = value_now - LAST_CPU_IRQ(i, j);
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index ede513be517d..576c5df6cad8 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -280,7 +280,7 @@ int show_interrupts(struct seq_file *p, void *v)
280 any_count = kstat_irqs(i); 280 any_count = kstat_irqs(i);
281#else 281#else
282 for_each_online_cpu(j) 282 for_each_online_cpu(j)
283 any_count |= kstat_cpu(j).irqs[i]; 283 any_count |= kstat_irqs_cpu(i, j);
284#endif 284#endif
285 action = desc->action; 285 action = desc->action;
286 if (!action && !any_count) 286 if (!action && !any_count)
@@ -290,7 +290,7 @@ int show_interrupts(struct seq_file *p, void *v)
290 seq_printf(p, "%10u ", kstat_irqs(i)); 290 seq_printf(p, "%10u ", kstat_irqs(i));
291#else 291#else
292 for_each_online_cpu(j) 292 for_each_online_cpu(j)
293 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 293 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
294#endif 294#endif
295 seq_printf(p, " %8s", desc->chip->name); 295 seq_printf(p, " %8s", desc->chip->name);
296 seq_printf(p, "-%-8s", desc->name); 296 seq_printf(p, "-%-8s", desc->name);
diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
index 738eb65a924e..4a0a4eb44dcb 100644
--- a/arch/x86/kernel/irq_64.c
+++ b/arch/x86/kernel/irq_64.c
@@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v)
90 any_count = kstat_irqs(i); 90 any_count = kstat_irqs(i);
91#else 91#else
92 for_each_online_cpu(j) 92 for_each_online_cpu(j)
93 any_count |= kstat_cpu(j).irqs[i]; 93 any_count |= kstat_irqs_cpu(i, j);
94#endif 94#endif
95 action = desc->action; 95 action = desc->action;
96 if (!action && !any_count) 96 if (!action && !any_count)
@@ -100,7 +100,7 @@ int show_interrupts(struct seq_file *p, void *v)
100 seq_printf(p, "%10u ", kstat_irqs(i)); 100 seq_printf(p, "%10u ", kstat_irqs(i));
101#else 101#else
102 for_each_online_cpu(j) 102 for_each_online_cpu(j)
103 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 103 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
104#endif 104#endif
105 seq_printf(p, " %8s", desc->chip->name); 105 seq_printf(p, " %8s", desc->chip->name);
106 seq_printf(p, "-%-8s", desc->name); 106 seq_printf(p, "-%-8s", desc->name);
diff --git a/arch/x86/kernel/visws_quirks.c b/arch/x86/kernel/visws_quirks.c
index 9d85ab384435..817aa55a1209 100644
--- a/arch/x86/kernel/visws_quirks.c
+++ b/arch/x86/kernel/visws_quirks.c
@@ -633,7 +633,7 @@ static irqreturn_t piix4_master_intr(int irq, void *dev_id)
633 /* 633 /*
634 * handle this 'virtual interrupt' as a Cobalt one now. 634 * handle this 'virtual interrupt' as a Cobalt one now.
635 */ 635 */
636 kstat_cpu(smp_processor_id()).irqs[realirq]++; 636 kstat_irqs_this_cpu(desc)++;
637 637
638 if (likely(desc->action != NULL)) 638 if (likely(desc->action != NULL))
639 handle_IRQ_event(realirq, desc->action); 639 handle_IRQ_event(realirq, desc->action);
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index dd71e3a021cd..bb6bc721b13d 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -241,7 +241,7 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl
241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); 241 ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq));
242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ 242 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
243 243
244 kstat_this_cpu.irqs[irq]++; 244 kstat_irqs_this_cpu(irq_to_desc(irq))++;
245 245
246out: 246out:
247 raw_local_irq_restore(flags); 247 raw_local_irq_restore(flags);
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index a2173a2a5625..aa069acf61a0 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -532,7 +532,7 @@ static int show_stat(struct seq_file *p, void *v)
532 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal); 532 steal = cputime64_add(steal, kstat_cpu(i).cpustat.steal);
533 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest); 533 guest = cputime64_add(guest, kstat_cpu(i).cpustat.guest);
534 for (j = 0; j < nr_irqs; j++) { 534 for (j = 0; j < nr_irqs; j++) {
535 unsigned int temp = kstat_cpu(i).irqs[j]; 535 unsigned int temp = kstat_irqs_cpu(j, i);
536 sum += temp; 536 sum += temp;
537 per_irq_sum[j] += temp; 537 per_irq_sum[j] += temp;
538 } 538 }
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 60c856aaac0f..cbf471aee1ce 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -158,6 +158,11 @@ struct irq_desc {
158 struct irq_desc *next; 158 struct irq_desc *next;
159 struct timer_rand_state *timer_rand_state; 159 struct timer_rand_state *timer_rand_state;
160#endif 160#endif
161#ifdef CONFIG_HAVE_DYN_ARRAY
162 unsigned int *kstat_irqs;
163#else
164 unsigned int kstat_irqs[NR_CPUS];
165#endif
161 irq_flow_handler_t handle_irq; 166 irq_flow_handler_t handle_irq;
162 struct irq_chip *chip; 167 struct irq_chip *chip;
163 struct msi_desc *msi_desc; 168 struct msi_desc *msi_desc;
@@ -190,6 +195,8 @@ extern struct irq_desc *irq_to_desc(unsigned int irq);
190/* could be removed if we get rid of all irq_desc reference */ 195/* could be removed if we get rid of all irq_desc reference */
191extern struct irq_desc irq_desc[NR_IRQS]; 196extern struct irq_desc irq_desc[NR_IRQS];
192#endif 197#endif
198#define kstat_irqs_this_cpu(DESC) \
199 ((DESC)->kstat_irqs[smp_processor_id()])
193 200
194/* 201/*
195 * Migration helpers for obsolete names, they will go away: 202 * Migration helpers for obsolete names, they will go away:
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index fe1f7fe534b4..f10616712de5 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,10 +28,8 @@ struct cpu_usage_stat {
28 28
29struct kernel_stat { 29struct kernel_stat {
30 struct cpu_usage_stat cpustat; 30 struct cpu_usage_stat cpustat;
31#ifdef CONFIG_HAVE_DYN_ARRAY 31#ifndef CONFIG_GENERIC_HARDIRQS
32 unsigned int *irqs; 32 unsigned int irqs[NR_IRQS];
33#else
34 unsigned int irqs[NR_IRQS];
35#endif 33#endif
36}; 34};
37 35
@@ -43,15 +41,25 @@ DECLARE_PER_CPU(struct kernel_stat, kstat);
43 41
44extern unsigned long long nr_context_switches(void); 42extern unsigned long long nr_context_switches(void);
45 43
44#ifndef CONFIG_GENERIC_HARDIRQS
45static inline unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
46{
47 return kstat_cpu(cpu).irqs[irq];
48}
49#else
50extern unsigned int kstat_irqs_cpu(unsigned int irq, int cpu);
51#endif
52
46/* 53/*
47 * Number of interrupts per specific IRQ source, since bootup 54 * Number of interrupts per specific IRQ source, since bootup
48 */ 55 */
49static inline int kstat_irqs(int irq) 56static inline unsigned int kstat_irqs(unsigned int irq)
50{ 57{
51 int cpu, sum = 0; 58 unsigned int sum = 0;
59 int cpu;
52 60
53 for_each_possible_cpu(cpu) 61 for_each_possible_cpu(cpu)
54 sum += kstat_cpu(cpu).irqs[irq]; 62 sum += kstat_irqs_cpu(irq, cpu);
55 63
56 return sum; 64 return sum;
57} 65}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 76c225cf4b26..2aa3d4b2fce8 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -312,14 +312,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
312{ 312{
313 struct irqaction *action; 313 struct irqaction *action;
314 irqreturn_t action_ret; 314 irqreturn_t action_ret;
315 const unsigned int cpu = smp_processor_id();
316 315
317 spin_lock(&desc->lock); 316 spin_lock(&desc->lock);
318 317
319 if (unlikely(desc->status & IRQ_INPROGRESS)) 318 if (unlikely(desc->status & IRQ_INPROGRESS))
320 goto out_unlock; 319 goto out_unlock;
321 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 320 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
322 kstat_cpu(cpu).irqs[irq]++; 321 kstat_irqs_this_cpu(desc)++;
323 322
324 action = desc->action; 323 action = desc->action;
325 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 324 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
@@ -351,7 +350,6 @@ out_unlock:
351void 350void
352handle_level_irq(unsigned int irq, struct irq_desc *desc) 351handle_level_irq(unsigned int irq, struct irq_desc *desc)
353{ 352{
354 unsigned int cpu = smp_processor_id();
355 struct irqaction *action; 353 struct irqaction *action;
356 irqreturn_t action_ret; 354 irqreturn_t action_ret;
357 355
@@ -361,7 +359,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
361 if (unlikely(desc->status & IRQ_INPROGRESS)) 359 if (unlikely(desc->status & IRQ_INPROGRESS))
362 goto out_unlock; 360 goto out_unlock;
363 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 361 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
364 kstat_cpu(cpu).irqs[irq]++; 362 kstat_irqs_this_cpu(desc)++;
365 363
366 /* 364 /*
367 * If its disabled or no action available 365 * If its disabled or no action available
@@ -399,7 +397,6 @@ out_unlock:
399void 397void
400handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 398handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
401{ 399{
402 unsigned int cpu = smp_processor_id();
403 struct irqaction *action; 400 struct irqaction *action;
404 irqreturn_t action_ret; 401 irqreturn_t action_ret;
405 402
@@ -409,7 +406,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
409 goto out; 406 goto out;
410 407
411 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 408 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
412 kstat_cpu(cpu).irqs[irq]++; 409 kstat_irqs_this_cpu(desc)++;
413 410
414 /* 411 /*
415 * If its disabled or no action available 412 * If its disabled or no action available
@@ -458,8 +455,6 @@ out:
458void 455void
459handle_edge_irq(unsigned int irq, struct irq_desc *desc) 456handle_edge_irq(unsigned int irq, struct irq_desc *desc)
460{ 457{
461 const unsigned int cpu = smp_processor_id();
462
463 spin_lock(&desc->lock); 458 spin_lock(&desc->lock);
464 459
465 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 460 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -476,7 +471,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
476 goto out_unlock; 471 goto out_unlock;
477 } 472 }
478 473
479 kstat_cpu(cpu).irqs[irq]++; 474 kstat_irqs_this_cpu(desc)++;
480 475
481 /* Start handling the irq */ 476 /* Start handling the irq */
482 desc->chip->ack(irq); 477 desc->chip->ack(irq);
@@ -531,7 +526,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
531{ 526{
532 irqreturn_t action_ret; 527 irqreturn_t action_ret;
533 528
534 kstat_this_cpu.irqs[irq]++; 529 kstat_irqs_this_cpu(desc)++;
535 530
536 if (desc->chip->ack) 531 if (desc->chip->ack)
537 desc->chip->ack(irq); 532 desc->chip->ack(irq);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 9fc33b3378e6..1f346990f3f8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -37,7 +37,7 @@ void
37handle_bad_irq(unsigned int irq, struct irq_desc *desc) 37handle_bad_irq(unsigned int irq, struct irq_desc *desc)
38{ 38{
39 print_irq_desc(irq, desc); 39 print_irq_desc(irq, desc);
40 kstat_this_cpu.irqs[irq]++; 40 kstat_irqs_this_cpu(desc)++;
41 ack_bad_irq(irq); 41 ack_bad_irq(irq);
42} 42}
43 43
@@ -80,17 +80,38 @@ static void init_one_irq_desc(struct irq_desc *desc)
80#endif 80#endif
81} 81}
82 82
83#ifdef CONFIG_HAVE_SPARSE_IRQ 83extern int after_bootmem;
84static int nr_irq_desc = 32; 84extern void *__alloc_bootmem_nopanic(unsigned long size,
85 unsigned long align,
86 unsigned long goal);
85 87
86static int __init parse_nr_irq_desc(char *arg) 88static void init_kstat_irqs(struct irq_desc *desc, int nr_desc, int nr)
87{ 89{
88 if (arg) 90 unsigned long bytes, total_bytes;
89 nr_irq_desc = simple_strtoul(arg, NULL, 0); 91 char *ptr;
90 return 0; 92 int i;
93 unsigned long phys;
94
95 /* Compute how many bytes we need per irq and allocate them */
96 bytes = nr * sizeof(unsigned int);
97 total_bytes = bytes * nr_desc;
98 if (after_bootmem)
99 ptr = kzalloc(total_bytes, GFP_ATOMIC);
100 else
101 ptr = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
102
103 if (!ptr)
104 panic(" can not allocate kstat_irqs\n");
105
106 phys = __pa(ptr);
107 printk(KERN_DEBUG "kstat_irqs ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
108
109 for (i = 0; i < nr_desc; i++) {
110 desc[i].kstat_irqs = (unsigned int *)ptr;
111 ptr += bytes;
112 }
91} 113}
92 114
93early_param("nr_irq_desc", parse_nr_irq_desc);
94 115
95static void __init init_work(void *data) 116static void __init init_work(void *data)
96{ 117{
@@ -100,25 +121,44 @@ static void __init init_work(void *data)
100 121
101 desc = *da->name; 122 desc = *da->name;
102 123
103 for (i = 0; i < *da->nr; i++) 124 for (i = 0; i < *da->nr; i++) {
104 init_one_irq_desc(&desc[i]); 125 init_one_irq_desc(&desc[i]);
126#ifndef CONFIG_HAVE_SPARSE_IRQ
127 desc[i].irq = i;
128#endif
129 }
105 130
131#ifdef CONFIG_HAVE_SPARSE_IRQ
106 for (i = 1; i < *da->nr; i++) 132 for (i = 1; i < *da->nr; i++)
107 desc[i-1].next = &desc[i]; 133 desc[i-1].next = &desc[i];
134#endif
135
136 /* init kstat_irqs, nr_cpu_ids is ready already */
137 init_kstat_irqs(desc, *da->nr, nr_cpu_ids);
108} 138}
109 139
140#ifdef CONFIG_HAVE_SPARSE_IRQ
141static int nr_irq_desc = 32;
142
143static int __init parse_nr_irq_desc(char *arg)
144{
145 if (arg)
146 nr_irq_desc = simple_strtoul(arg, NULL, 0);
147 return 0;
148}
149
150early_param("nr_irq_desc", parse_nr_irq_desc);
151
110static struct irq_desc *sparse_irqs; 152static struct irq_desc *sparse_irqs;
111DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work); 153DEFINE_DYN_ARRAY(sparse_irqs, sizeof(struct irq_desc), nr_irq_desc, PAGE_SIZE, init_work);
112 154
113extern int after_bootmem;
114extern void *__alloc_bootmem_nopanic(unsigned long size,
115 unsigned long align,
116 unsigned long goal);
117struct irq_desc *irq_to_desc(unsigned int irq) 155struct irq_desc *irq_to_desc(unsigned int irq)
118{ 156{
119 struct irq_desc *desc, *desc_pri; 157 struct irq_desc *desc, *desc_pri;
120 int i; 158 int i;
121 int count = 0; 159 int count = 0;
160 unsigned long phys;
161 unsigned long total_bytes;
122 162
123 BUG_ON(irq == -1U); 163 BUG_ON(irq == -1U);
124 164
@@ -141,38 +181,34 @@ struct irq_desc *irq_to_desc(unsigned int irq)
141 */ 181 */
142 printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc); 182 printk(KERN_DEBUG "try to get more irq_desc %d\n", nr_irq_desc);
143 183
184 total_bytes = sizeof(struct irq_desc) * nr_irq_desc;
144 if (after_bootmem) 185 if (after_bootmem)
145 desc = kzalloc(sizeof(struct irq_desc)*nr_irq_desc, GFP_ATOMIC); 186 desc = kzalloc(total_bytes, GFP_ATOMIC);
146 else 187 else
147 desc = __alloc_bootmem_nopanic(sizeof(struct irq_desc)*nr_irq_desc, PAGE_SIZE, 0); 188 desc = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
148 189
149 if (!desc) 190 if (!desc)
150 panic("please boot with nr_irq_desc= %d\n", count * 2); 191 panic("please boot with nr_irq_desc= %d\n", count * 2);
151 192
193 phys = __pa(desc);
194 printk(KERN_DEBUG "irq_desc ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
195
152 for (i = 0; i < nr_irq_desc; i++) 196 for (i = 0; i < nr_irq_desc; i++)
153 init_one_irq_desc(&desc[i]); 197 init_one_irq_desc(&desc[i]);
154 198
155 for (i = 1; i < nr_irq_desc; i++) 199 for (i = 1; i < nr_irq_desc; i++)
156 desc[i-1].next = &desc[i]; 200 desc[i-1].next = &desc[i];
157 201
202 /* init kstat_irqs, nr_cpu_ids is ready already */
203 init_kstat_irqs(desc, nr_irq_desc, nr_cpu_ids);
204
158 desc->irq = irq; 205 desc->irq = irq;
159 desc_pri->next = desc; 206 desc_pri->next = desc;
160 207
161 return desc; 208 return desc;
162} 209}
163#else 210#else
164static void __init init_work(void *data)
165{
166 struct dyn_array *da = data;
167 int i;
168 struct irq_desc *desc;
169
170 desc = *da->name;
171 211
172 for (i = 0; i < *da->nr; i++)
173 init_one_irq_desc(&desc[i]);
174
175}
176static struct irq_desc *irq_desc; 212static struct irq_desc *irq_desc;
177DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work); 213DEFINE_DYN_ARRAY(irq_desc, sizeof(struct irq_desc), nr_irqs, PAGE_SIZE, init_work);
178 214
@@ -315,7 +351,7 @@ unsigned int __do_IRQ(unsigned int irq)
315 struct irqaction *action; 351 struct irqaction *action;
316 unsigned int status; 352 unsigned int status;
317 353
318 kstat_this_cpu.irqs[irq]++; 354 kstat_irqs_this_cpu(desc)++;
319 if (CHECK_IRQ_PER_CPU(desc->status)) { 355 if (CHECK_IRQ_PER_CPU(desc->status)) {
320 irqreturn_t action_ret; 356 irqreturn_t action_ret;
321 357
@@ -415,3 +451,10 @@ void early_init_irq_lock_class(void)
415} 451}
416#endif 452#endif
417 453
454unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
455{
456 struct irq_desc *desc = irq_to_desc(irq);
457 return desc->kstat_irqs[cpu];
458}
459EXPORT_SYMBOL(kstat_irqs_cpu);
460
diff --git a/kernel/sched.c b/kernel/sched.c
index b9d713781b5b..6f230596bd0c 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -4048,11 +4048,8 @@ static inline void idle_balance(int cpu, struct rq *rq)
4048#endif 4048#endif
4049 4049
4050DEFINE_PER_CPU(struct kernel_stat, kstat); 4050DEFINE_PER_CPU(struct kernel_stat, kstat);
4051EXPORT_PER_CPU_SYMBOL(kstat);
4052 4051
4053#ifdef CONFIG_HAVE_DYN_ARRAY 4052EXPORT_PER_CPU_SYMBOL(kstat);
4054DEFINE_PER_CPU_DYN_ARRAY_ADDR(per_cpu__kstat_irqs, per_cpu__kstat.irqs, sizeof(unsigned int), nr_irqs, sizeof(unsigned long), NULL);
4055#endif
4056 4053
4057/* 4054/*
4058 * Return p->sum_exec_runtime plus any more ns on the sched_clock 4055 * Return p->sum_exec_runtime plus any more ns on the sched_clock