aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/handle.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2009-04-27 21:00:38 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-28 06:21:17 -0400
commit85ac16d033370caf6f48d743c8dc8103700f5cc5 (patch)
tree04a73af31c07a8ad29780b777b3f9d041fa236fa /kernel/irq/handle.c
parent57b150cce8e004ddd36330490a68bfb59b7271e9 (diff)
x86/irq: change irq_desc_alloc() to take node instead of cpu
This simplifies the node awareness of the code. All our allocators only deal with a NUMA node ID locality not with CPU ids anyway - so there's no need to maintain (and transform) a CPU id all across the IRq layer. v2: keep move_irq_desc related [ Impact: cleanup, prepare IRQ code to be NUMA-aware ] Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Suresh Siddha <suresh.b.siddha@intel.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Jeremy Fitzhardinge <jeremy@goop.org> LKML-Reference: <49F65536.2020300@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/handle.c')
-rw-r--r--kernel/irq/handle.c28
1 files changed, 11 insertions, 17 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 3e0cbc44bd73..a6368db2618b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -81,12 +81,10 @@ static struct irq_desc irq_desc_init = {
81 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), 81 .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock),
82}; 82};
83 83
84void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) 84void init_kstat_irqs(struct irq_desc *desc, int node, int nr)
85{ 85{
86 int node;
87 void *ptr; 86 void *ptr;
88 87
89 node = cpu_to_node(cpu);
90 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); 88 ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node);
91 89
92 /* 90 /*
@@ -94,33 +92,32 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr)
94 * init_copy_kstat_irqs() could still use old one 92 * init_copy_kstat_irqs() could still use old one
95 */ 93 */
96 if (ptr) { 94 if (ptr) {
97 printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", 95 printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node);
98 cpu, node);
99 desc->kstat_irqs = ptr; 96 desc->kstat_irqs = ptr;
100 } 97 }
101} 98}
102 99
103static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) 100static void init_one_irq_desc(int irq, struct irq_desc *desc, int node)
104{ 101{
105 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); 102 memcpy(desc, &irq_desc_init, sizeof(struct irq_desc));
106 103
107 spin_lock_init(&desc->lock); 104 spin_lock_init(&desc->lock);
108 desc->irq = irq; 105 desc->irq = irq;
109#ifdef CONFIG_SMP 106#ifdef CONFIG_SMP
110 desc->cpu = cpu; 107 desc->node = node;
111#endif 108#endif
112 lockdep_set_class(&desc->lock, &irq_desc_lock_class); 109 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
113 init_kstat_irqs(desc, cpu, nr_cpu_ids); 110 init_kstat_irqs(desc, node, nr_cpu_ids);
114 if (!desc->kstat_irqs) { 111 if (!desc->kstat_irqs) {
115 printk(KERN_ERR "can not alloc kstat_irqs\n"); 112 printk(KERN_ERR "can not alloc kstat_irqs\n");
116 BUG_ON(1); 113 BUG_ON(1);
117 } 114 }
118 if (!alloc_desc_masks(desc, cpu, false)) { 115 if (!alloc_desc_masks(desc, node, false)) {
119 printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); 116 printk(KERN_ERR "can not alloc irq_desc cpumasks\n");
120 BUG_ON(1); 117 BUG_ON(1);
121 } 118 }
122 init_desc_masks(desc); 119 init_desc_masks(desc);
123 arch_init_chip_data(desc, cpu); 120 arch_init_chip_data(desc, node);
124} 121}
125 122
126/* 123/*
@@ -189,11 +186,10 @@ struct irq_desc *irq_to_desc(unsigned int irq)
189 return NULL; 186 return NULL;
190} 187}
191 188
192struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 189struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
193{ 190{
194 struct irq_desc *desc; 191 struct irq_desc *desc;
195 unsigned long flags; 192 unsigned long flags;
196 int node;
197 193
198 if (irq >= nr_irqs) { 194 if (irq >= nr_irqs) {
199 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", 195 WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n",
@@ -212,15 +208,13 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu)
212 if (desc) 208 if (desc)
213 goto out_unlock; 209 goto out_unlock;
214 210
215 node = cpu_to_node(cpu);
216 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); 211 desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node);
217 printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", 212 printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node);
218 irq, cpu, node);
219 if (!desc) { 213 if (!desc) {
220 printk(KERN_ERR "can not alloc irq_desc\n"); 214 printk(KERN_ERR "can not alloc irq_desc\n");
221 BUG_ON(1); 215 BUG_ON(1);
222 } 216 }
223 init_one_irq_desc(irq, desc, cpu); 217 init_one_irq_desc(irq, desc, node);
224 218
225 irq_desc_ptrs[irq] = desc; 219 irq_desc_ptrs[irq] = desc;
226 220
@@ -270,7 +264,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
270 return (irq < NR_IRQS) ? irq_desc + irq : NULL; 264 return (irq < NR_IRQS) ? irq_desc + irq : NULL;
271} 265}
272 266
273struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) 267struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node)
274{ 268{
275 return irq_to_desc(irq); 269 return irq_to_desc(irq);
276} 270}