diff options
author | Yinghai Lu <yinghai@kernel.org> | 2009-04-27 21:00:38 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-28 06:21:17 -0400 |
commit | 85ac16d033370caf6f48d743c8dc8103700f5cc5 (patch) | |
tree | 04a73af31c07a8ad29780b777b3f9d041fa236fa /kernel/irq | |
parent | 57b150cce8e004ddd36330490a68bfb59b7271e9 (diff) |
x86/irq: change irq_desc_alloc() to take node instead of cpu
This simplifies the node awareness of the code. All our allocators
only deal with a NUMA node ID locality not with CPU ids anyway - so
there's no need to maintain (and transform) a CPU id all across the
IRq layer.
v2: keep move_irq_desc related
[ Impact: cleanup, prepare IRQ code to be NUMA-aware ]
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
LKML-Reference: <49F65536.2020300@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/handle.c | 28 | ||||
-rw-r--r-- | kernel/irq/internals.h | 2 | ||||
-rw-r--r-- | kernel/irq/numa_migrate.c | 36 |
3 files changed, 24 insertions, 42 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 3e0cbc44bd73..a6368db2618b 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -81,12 +81,10 @@ static struct irq_desc irq_desc_init = { | |||
81 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), | 81 | .lock = __SPIN_LOCK_UNLOCKED(irq_desc_init.lock), |
82 | }; | 82 | }; |
83 | 83 | ||
84 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 84 | void init_kstat_irqs(struct irq_desc *desc, int node, int nr) |
85 | { | 85 | { |
86 | int node; | ||
87 | void *ptr; | 86 | void *ptr; |
88 | 87 | ||
89 | node = cpu_to_node(cpu); | ||
90 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); | 88 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); |
91 | 89 | ||
92 | /* | 90 | /* |
@@ -94,33 +92,32 @@ void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | |||
94 | * init_copy_kstat_irqs() could still use old one | 92 | * init_copy_kstat_irqs() could still use old one |
95 | */ | 93 | */ |
96 | if (ptr) { | 94 | if (ptr) { |
97 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | 95 | printk(KERN_DEBUG " alloc kstat_irqs on node %d\n", node); |
98 | cpu, node); | ||
99 | desc->kstat_irqs = ptr; | 96 | desc->kstat_irqs = ptr; |
100 | } | 97 | } |
101 | } | 98 | } |
102 | 99 | ||
103 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 100 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int node) |
104 | { | 101 | { |
105 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); | 102 | memcpy(desc, &irq_desc_init, sizeof(struct irq_desc)); |
106 | 103 | ||
107 | spin_lock_init(&desc->lock); | 104 | spin_lock_init(&desc->lock); |
108 | desc->irq = irq; | 105 | desc->irq = irq; |
109 | #ifdef CONFIG_SMP | 106 | #ifdef CONFIG_SMP |
110 | desc->cpu = cpu; | 107 | desc->node = node; |
111 | #endif | 108 | #endif |
112 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 109 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
113 | init_kstat_irqs(desc, cpu, nr_cpu_ids); | 110 | init_kstat_irqs(desc, node, nr_cpu_ids); |
114 | if (!desc->kstat_irqs) { | 111 | if (!desc->kstat_irqs) { |
115 | printk(KERN_ERR "can not alloc kstat_irqs\n"); | 112 | printk(KERN_ERR "can not alloc kstat_irqs\n"); |
116 | BUG_ON(1); | 113 | BUG_ON(1); |
117 | } | 114 | } |
118 | if (!alloc_desc_masks(desc, cpu, false)) { | 115 | if (!alloc_desc_masks(desc, node, false)) { |
119 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); | 116 | printk(KERN_ERR "can not alloc irq_desc cpumasks\n"); |
120 | BUG_ON(1); | 117 | BUG_ON(1); |
121 | } | 118 | } |
122 | init_desc_masks(desc); | 119 | init_desc_masks(desc); |
123 | arch_init_chip_data(desc, cpu); | 120 | arch_init_chip_data(desc, node); |
124 | } | 121 | } |
125 | 122 | ||
126 | /* | 123 | /* |
@@ -189,11 +186,10 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
189 | return NULL; | 186 | return NULL; |
190 | } | 187 | } |
191 | 188 | ||
192 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 189 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
193 | { | 190 | { |
194 | struct irq_desc *desc; | 191 | struct irq_desc *desc; |
195 | unsigned long flags; | 192 | unsigned long flags; |
196 | int node; | ||
197 | 193 | ||
198 | if (irq >= nr_irqs) { | 194 | if (irq >= nr_irqs) { |
199 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", | 195 | WARN(1, "irq (%d) >= nr_irqs (%d) in irq_to_desc_alloc\n", |
@@ -212,15 +208,13 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
212 | if (desc) | 208 | if (desc) |
213 | goto out_unlock; | 209 | goto out_unlock; |
214 | 210 | ||
215 | node = cpu_to_node(cpu); | ||
216 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 211 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
217 | printk(KERN_DEBUG " alloc irq_desc for %d on cpu %d node %d\n", | 212 | printk(KERN_DEBUG " alloc irq_desc for %d on node %d\n", irq, node); |
218 | irq, cpu, node); | ||
219 | if (!desc) { | 213 | if (!desc) { |
220 | printk(KERN_ERR "can not alloc irq_desc\n"); | 214 | printk(KERN_ERR "can not alloc irq_desc\n"); |
221 | BUG_ON(1); | 215 | BUG_ON(1); |
222 | } | 216 | } |
223 | init_one_irq_desc(irq, desc, cpu); | 217 | init_one_irq_desc(irq, desc, node); |
224 | 218 | ||
225 | irq_desc_ptrs[irq] = desc; | 219 | irq_desc_ptrs[irq] = desc; |
226 | 220 | ||
@@ -270,7 +264,7 @@ struct irq_desc *irq_to_desc(unsigned int irq) | |||
270 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; | 264 | return (irq < NR_IRQS) ? irq_desc + irq : NULL; |
271 | } | 265 | } |
272 | 266 | ||
273 | struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | 267 | struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node) |
274 | { | 268 | { |
275 | return irq_to_desc(irq); | 269 | return irq_to_desc(irq); |
276 | } | 270 | } |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index de5f412f6a92..73468253143b 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -16,7 +16,7 @@ extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | |||
16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 16 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); |
17 | 17 | ||
18 | extern struct lock_class_key irq_desc_lock_class; | 18 | extern struct lock_class_key irq_desc_lock_class; |
19 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 19 | extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); |
20 | extern void clear_kstat_irqs(struct irq_desc *desc); | 20 | extern void clear_kstat_irqs(struct irq_desc *desc); |
21 | extern spinlock_t sparse_irq_lock; | 21 | extern spinlock_t sparse_irq_lock; |
22 | 22 | ||
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index ce72bc3f4ced..2f69bee57bf2 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
@@ -15,9 +15,9 @@ | |||
15 | 15 | ||
16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, | 16 | static void init_copy_kstat_irqs(struct irq_desc *old_desc, |
17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
18 | int cpu, int nr) | 18 | int node, int nr) |
19 | { | 19 | { |
20 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, node, nr); |
21 | 21 | ||
22 | if (desc->kstat_irqs != old_desc->kstat_irqs) | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
@@ -34,20 +34,20 @@ static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | |||
34 | } | 34 | } |
35 | 35 | ||
36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, | 36 | static bool init_copy_one_irq_desc(int irq, struct irq_desc *old_desc, |
37 | struct irq_desc *desc, int cpu) | 37 | struct irq_desc *desc, int node) |
38 | { | 38 | { |
39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); | 39 | memcpy(desc, old_desc, sizeof(struct irq_desc)); |
40 | if (!alloc_desc_masks(desc, cpu, false)) { | 40 | if (!alloc_desc_masks(desc, node, false)) { |
41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " | 41 | printk(KERN_ERR "irq %d: can not get new irq_desc cpumask " |
42 | "for migration.\n", irq); | 42 | "for migration.\n", irq); |
43 | return false; | 43 | return false; |
44 | } | 44 | } |
45 | spin_lock_init(&desc->lock); | 45 | spin_lock_init(&desc->lock); |
46 | desc->cpu = cpu; | 46 | desc->node = node; |
47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); | 47 | lockdep_set_class(&desc->lock, &irq_desc_lock_class); |
48 | init_copy_kstat_irqs(old_desc, desc, cpu, nr_cpu_ids); | 48 | init_copy_kstat_irqs(old_desc, desc, node, nr_cpu_ids); |
49 | init_copy_desc_masks(old_desc, desc); | 49 | init_copy_desc_masks(old_desc, desc); |
50 | arch_init_copy_chip_data(old_desc, desc, cpu); | 50 | arch_init_copy_chip_data(old_desc, desc, node); |
51 | return true; | 51 | return true; |
52 | } | 52 | } |
53 | 53 | ||
@@ -59,12 +59,11 @@ static void free_one_irq_desc(struct irq_desc *old_desc, struct irq_desc *desc) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | 61 | static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, |
62 | int cpu) | 62 | int node) |
63 | { | 63 | { |
64 | struct irq_desc *desc; | 64 | struct irq_desc *desc; |
65 | unsigned int irq; | 65 | unsigned int irq; |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | int node; | ||
68 | 67 | ||
69 | irq = old_desc->irq; | 68 | irq = old_desc->irq; |
70 | 69 | ||
@@ -76,7 +75,6 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
76 | if (desc && old_desc != desc) | 75 | if (desc && old_desc != desc) |
77 | goto out_unlock; | 76 | goto out_unlock; |
78 | 77 | ||
79 | node = cpu_to_node(cpu); | ||
80 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); | 78 | desc = kzalloc_node(sizeof(*desc), GFP_ATOMIC, node); |
81 | if (!desc) { | 79 | if (!desc) { |
82 | printk(KERN_ERR "irq %d: can not get new irq_desc " | 80 | printk(KERN_ERR "irq %d: can not get new irq_desc " |
@@ -85,7 +83,7 @@ static struct irq_desc *__real_move_irq_desc(struct irq_desc *old_desc, | |||
85 | desc = old_desc; | 83 | desc = old_desc; |
86 | goto out_unlock; | 84 | goto out_unlock; |
87 | } | 85 | } |
88 | if (!init_copy_one_irq_desc(irq, old_desc, desc, cpu)) { | 86 | if (!init_copy_one_irq_desc(irq, old_desc, desc, node)) { |
89 | /* still use old one */ | 87 | /* still use old one */ |
90 | kfree(desc); | 88 | kfree(desc); |
91 | desc = old_desc; | 89 | desc = old_desc; |
@@ -107,24 +105,14 @@ out_unlock: | |||
107 | return desc; | 105 | return desc; |
108 | } | 106 | } |
109 | 107 | ||
110 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int cpu) | 108 | struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) |
111 | { | 109 | { |
112 | int old_cpu; | ||
113 | int node, old_node; | ||
114 | |||
115 | /* those all static, do move them */ | 110 | /* those all static, do move them */ |
116 | if (desc->irq < NR_IRQS_LEGACY) | 111 | if (desc->irq < NR_IRQS_LEGACY) |
117 | return desc; | 112 | return desc; |
118 | 113 | ||
119 | old_cpu = desc->cpu; | 114 | if (desc->node != node) |
120 | if (old_cpu != cpu) { | 115 | desc = __real_move_irq_desc(desc, node); |
121 | node = cpu_to_node(cpu); | ||
122 | old_node = cpu_to_node(old_cpu); | ||
123 | if (old_node != node) | ||
124 | desc = __real_move_irq_desc(desc, cpu); | ||
125 | else | ||
126 | desc->cpu = cpu; | ||
127 | } | ||
128 | 116 | ||
129 | return desc; | 117 | return desc; |
130 | } | 118 | } |