aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiang Liu <jiang.liu@linux.intel.com>2015-04-13 22:30:03 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-04-24 09:36:55 -0400
commit7f3262edcdf623296b514377d52911b115c7ab49 (patch)
treec6c01f6014383a2af3090545b2ba3349da439ea8
parentc6c2002b744215810c770dd73f45da954bcfa9d5 (diff)
x86/irq: Move private data in struct irq_cfg into dedicated data structure
Several fields in struct irq_cfg are private to vector.c, so move it into dedicated data structure. This helps to hide implementation details. Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Cohen <david.a.cohen@linux.intel.com> Cc: Sander Eikelenboom <linux@eikelenboom.it> Cc: David Vrabel <david.vrabel@citrix.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dimitri Sivanich <sivanich@sgi.com> Link: http://lkml.kernel.org/r/1428978610-28986-27-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Bjorn Helgaas <bhelgaas@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Link: http://lkml.kernel.org/r/1416901802-24211-35-git-send-email-jiang.liu@linux.intel.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Tested-by: Joerg Roedel <jroedel@suse.de>
-rw-r--r--arch/x86/include/asm/hw_irq.h3
-rw-r--r--arch/x86/kernel/apic/vector.c221
2 files changed, 119 insertions, 105 deletions
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index 727c62378a65..3b8233a26348 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -171,11 +171,8 @@ enum {
171}; 171};
172 172
173struct irq_cfg { 173struct irq_cfg {
174 cpumask_var_t domain;
175 cpumask_var_t old_domain;
176 unsigned int dest_apicid; 174 unsigned int dest_apicid;
177 u8 vector; 175 u8 vector;
178 u8 move_in_progress : 1;
179}; 176};
180 177
181extern struct irq_domain *x86_vector_domain; 178extern struct irq_domain *x86_vector_domain;
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 0092a6e0d5ee..60047495041c 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -21,11 +21,18 @@
21#include <asm/desc.h> 21#include <asm/desc.h>
22#include <asm/irq_remapping.h> 22#include <asm/irq_remapping.h>
23 23
24struct apic_chip_data {
25 struct irq_cfg cfg;
26 cpumask_var_t domain;
27 cpumask_var_t old_domain;
28 u8 move_in_progress : 1;
29};
30
24struct irq_domain *x86_vector_domain; 31struct irq_domain *x86_vector_domain;
25static DEFINE_RAW_SPINLOCK(vector_lock); 32static DEFINE_RAW_SPINLOCK(vector_lock);
26static struct irq_chip lapic_controller; 33static struct irq_chip lapic_controller;
27#ifdef CONFIG_X86_IO_APIC 34#ifdef CONFIG_X86_IO_APIC
28static struct irq_cfg *legacy_irq_cfgs[NR_IRQS_LEGACY]; 35static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
29#endif 36#endif
30 37
31void lock_vector_lock(void) 38void lock_vector_lock(void)
@@ -41,12 +48,7 @@ void unlock_vector_lock(void)
41 raw_spin_unlock(&vector_lock); 48 raw_spin_unlock(&vector_lock);
42} 49}
43 50
44struct irq_cfg *irq_cfg(unsigned int irq) 51static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
45{
46 return irqd_cfg(irq_get_irq_data(irq));
47}
48
49struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
50{ 52{
51 if (!irq_data) 53 if (!irq_data)
52 return NULL; 54 return NULL;
@@ -57,36 +59,48 @@ struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
57 return irq_data->chip_data; 59 return irq_data->chip_data;
58} 60}
59 61
60static struct irq_cfg *alloc_irq_cfg(int node) 62struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
63{
64 struct apic_chip_data *data = apic_chip_data(irq_data);
65
66 return data ? &data->cfg : NULL;
67}
68
69struct irq_cfg *irq_cfg(unsigned int irq)
61{ 70{
62 struct irq_cfg *cfg; 71 return irqd_cfg(irq_get_irq_data(irq));
72}
63 73
64 cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node); 74static struct apic_chip_data *alloc_apic_chip_data(int node)
65 if (!cfg) 75{
76 struct apic_chip_data *data;
77
78 data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
79 if (!data)
66 return NULL; 80 return NULL;
67 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node)) 81 if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
68 goto out_cfg; 82 goto out_data;
69 if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node)) 83 if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
70 goto out_domain; 84 goto out_domain;
71 return cfg; 85 return data;
72out_domain: 86out_domain:
73 free_cpumask_var(cfg->domain); 87 free_cpumask_var(data->domain);
74out_cfg: 88out_data:
75 kfree(cfg); 89 kfree(data);
76 return NULL; 90 return NULL;
77} 91}
78 92
79static void free_irq_cfg(struct irq_cfg *cfg) 93static void free_apic_chip_data(struct apic_chip_data *data)
80{ 94{
81 if (cfg) { 95 if (data) {
82 free_cpumask_var(cfg->domain); 96 free_cpumask_var(data->domain);
83 free_cpumask_var(cfg->old_domain); 97 free_cpumask_var(data->old_domain);
84 kfree(cfg); 98 kfree(data);
85 } 99 }
86} 100}
87 101
88static int 102static int __assign_irq_vector(int irq, struct apic_chip_data *d,
89__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask) 103 const struct cpumask *mask)
90{ 104{
91 /* 105 /*
92 * NOTE! The local APIC isn't very good at handling 106 * NOTE! The local APIC isn't very good at handling
@@ -104,7 +118,7 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
104 int cpu, err; 118 int cpu, err;
105 cpumask_var_t tmp_mask; 119 cpumask_var_t tmp_mask;
106 120
107 if (cfg->move_in_progress) 121 if (d->move_in_progress)
108 return -EBUSY; 122 return -EBUSY;
109 123
110 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) 124 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
@@ -112,26 +126,26 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
112 126
113 /* Only try and allocate irqs on cpus that are present */ 127 /* Only try and allocate irqs on cpus that are present */
114 err = -ENOSPC; 128 err = -ENOSPC;
115 cpumask_clear(cfg->old_domain); 129 cpumask_clear(d->old_domain);
116 cpu = cpumask_first_and(mask, cpu_online_mask); 130 cpu = cpumask_first_and(mask, cpu_online_mask);
117 while (cpu < nr_cpu_ids) { 131 while (cpu < nr_cpu_ids) {
118 int new_cpu, vector, offset; 132 int new_cpu, vector, offset;
119 133
120 apic->vector_allocation_domain(cpu, tmp_mask, mask); 134 apic->vector_allocation_domain(cpu, tmp_mask, mask);
121 135
122 if (cpumask_subset(tmp_mask, cfg->domain)) { 136 if (cpumask_subset(tmp_mask, d->domain)) {
123 err = 0; 137 err = 0;
124 if (cpumask_equal(tmp_mask, cfg->domain)) 138 if (cpumask_equal(tmp_mask, d->domain))
125 break; 139 break;
126 /* 140 /*
127 * New cpumask using the vector is a proper subset of 141 * New cpumask using the vector is a proper subset of
128 * the current in use mask. So cleanup the vector 142 * the current in use mask. So cleanup the vector
129 * allocation for the members that are not used anymore. 143 * allocation for the members that are not used anymore.
130 */ 144 */
131 cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask); 145 cpumask_andnot(d->old_domain, d->domain, tmp_mask);
132 cfg->move_in_progress = 146 d->move_in_progress =
133 cpumask_intersects(cfg->old_domain, cpu_online_mask); 147 cpumask_intersects(d->old_domain, cpu_online_mask);
134 cpumask_and(cfg->domain, cfg->domain, tmp_mask); 148 cpumask_and(d->domain, d->domain, tmp_mask);
135 break; 149 break;
136 } 150 }
137 151
@@ -145,8 +159,8 @@ next:
145 } 159 }
146 160
147 if (unlikely(current_vector == vector)) { 161 if (unlikely(current_vector == vector)) {
148 cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask); 162 cpumask_or(d->old_domain, d->old_domain, tmp_mask);
149 cpumask_andnot(tmp_mask, mask, cfg->old_domain); 163 cpumask_andnot(tmp_mask, mask, d->old_domain);
150 cpu = cpumask_first_and(tmp_mask, cpu_online_mask); 164 cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
151 continue; 165 continue;
152 } 166 }
@@ -162,15 +176,15 @@ next:
162 /* Found one! */ 176 /* Found one! */
163 current_vector = vector; 177 current_vector = vector;
164 current_offset = offset; 178 current_offset = offset;
165 if (cfg->vector) { 179 if (d->cfg.vector) {
166 cpumask_copy(cfg->old_domain, cfg->domain); 180 cpumask_copy(d->old_domain, d->domain);
167 cfg->move_in_progress = 181 d->move_in_progress =
168 cpumask_intersects(cfg->old_domain, cpu_online_mask); 182 cpumask_intersects(d->old_domain, cpu_online_mask);
169 } 183 }
170 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) 184 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
171 per_cpu(vector_irq, new_cpu)[vector] = irq; 185 per_cpu(vector_irq, new_cpu)[vector] = irq;
172 cfg->vector = vector; 186 d->cfg.vector = vector;
173 cpumask_copy(cfg->domain, tmp_mask); 187 cpumask_copy(d->domain, tmp_mask);
174 err = 0; 188 err = 0;
175 break; 189 break;
176 } 190 }
@@ -178,46 +192,46 @@ next:
178 192
179 if (!err) { 193 if (!err) {
180 /* cache destination APIC IDs into cfg->dest_apicid */ 194 /* cache destination APIC IDs into cfg->dest_apicid */
181 err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, 195 err = apic->cpu_mask_to_apicid_and(mask, d->domain,
182 &cfg->dest_apicid); 196 &d->cfg.dest_apicid);
183 } 197 }
184 198
185 return err; 199 return err;
186} 200}
187 201
188static int assign_irq_vector(int irq, struct irq_cfg *cfg, 202static int assign_irq_vector(int irq, struct apic_chip_data *data,
189 const struct cpumask *mask) 203 const struct cpumask *mask)
190{ 204{
191 int err; 205 int err;
192 unsigned long flags; 206 unsigned long flags;
193 207
194 raw_spin_lock_irqsave(&vector_lock, flags); 208 raw_spin_lock_irqsave(&vector_lock, flags);
195 err = __assign_irq_vector(irq, cfg, mask); 209 err = __assign_irq_vector(irq, data, mask);
196 raw_spin_unlock_irqrestore(&vector_lock, flags); 210 raw_spin_unlock_irqrestore(&vector_lock, flags);
197 return err; 211 return err;
198} 212}
199 213
200static void clear_irq_vector(int irq, struct irq_cfg *cfg) 214static void clear_irq_vector(int irq, struct apic_chip_data *data)
201{ 215{
202 int cpu, vector; 216 int cpu, vector;
203 unsigned long flags; 217 unsigned long flags;
204 218
205 raw_spin_lock_irqsave(&vector_lock, flags); 219 raw_spin_lock_irqsave(&vector_lock, flags);
206 BUG_ON(!cfg->vector); 220 BUG_ON(!data->cfg.vector);
207 221
208 vector = cfg->vector; 222 vector = data->cfg.vector;
209 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 223 for_each_cpu_and(cpu, data->domain, cpu_online_mask)
210 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; 224 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
211 225
212 cfg->vector = 0; 226 data->cfg.vector = 0;
213 cpumask_clear(cfg->domain); 227 cpumask_clear(data->domain);
214 228
215 if (likely(!cfg->move_in_progress)) { 229 if (likely(!data->move_in_progress)) {
216 raw_spin_unlock_irqrestore(&vector_lock, flags); 230 raw_spin_unlock_irqrestore(&vector_lock, flags);
217 return; 231 return;
218 } 232 }
219 233
220 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 234 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
221 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 235 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
222 vector++) { 236 vector++) {
223 if (per_cpu(vector_irq, cpu)[vector] != irq) 237 if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -226,7 +240,7 @@ static void clear_irq_vector(int irq, struct irq_cfg *cfg)
226 break; 240 break;
227 } 241 }
228 } 242 }
229 cfg->move_in_progress = 0; 243 data->move_in_progress = 0;
230 raw_spin_unlock_irqrestore(&vector_lock, flags); 244 raw_spin_unlock_irqrestore(&vector_lock, flags);
231} 245}
232 246
@@ -261,10 +275,10 @@ static void x86_vector_free_irqs(struct irq_domain *domain,
261 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); 275 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
262 if (irq_data && irq_data->chip_data) { 276 if (irq_data && irq_data->chip_data) {
263 clear_irq_vector(virq + i, irq_data->chip_data); 277 clear_irq_vector(virq + i, irq_data->chip_data);
264 free_irq_cfg(irq_data->chip_data); 278 free_apic_chip_data(irq_data->chip_data);
265#ifdef CONFIG_X86_IO_APIC 279#ifdef CONFIG_X86_IO_APIC
266 if (virq + i < nr_legacy_irqs()) 280 if (virq + i < nr_legacy_irqs())
267 legacy_irq_cfgs[virq + i] = NULL; 281 legacy_irq_data[virq + i] = NULL;
268#endif 282#endif
269 irq_domain_reset_irq_data(irq_data); 283 irq_domain_reset_irq_data(irq_data);
270 } 284 }
@@ -275,9 +289,9 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
275 unsigned int nr_irqs, void *arg) 289 unsigned int nr_irqs, void *arg)
276{ 290{
277 struct irq_alloc_info *info = arg; 291 struct irq_alloc_info *info = arg;
292 struct apic_chip_data *data;
278 const struct cpumask *mask; 293 const struct cpumask *mask;
279 struct irq_data *irq_data; 294 struct irq_data *irq_data;
280 struct irq_cfg *cfg;
281 int i, err; 295 int i, err;
282 296
283 if (disable_apic) 297 if (disable_apic)
@@ -292,20 +306,20 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
292 irq_data = irq_domain_get_irq_data(domain, virq + i); 306 irq_data = irq_domain_get_irq_data(domain, virq + i);
293 BUG_ON(!irq_data); 307 BUG_ON(!irq_data);
294#ifdef CONFIG_X86_IO_APIC 308#ifdef CONFIG_X86_IO_APIC
295 if (virq + i < nr_legacy_irqs() && legacy_irq_cfgs[virq + i]) 309 if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
296 cfg = legacy_irq_cfgs[virq + i]; 310 data = legacy_irq_data[virq + i];
297 else 311 else
298#endif 312#endif
299 cfg = alloc_irq_cfg(irq_data->node); 313 data = alloc_apic_chip_data(irq_data->node);
300 if (!cfg) { 314 if (!data) {
301 err = -ENOMEM; 315 err = -ENOMEM;
302 goto error; 316 goto error;
303 } 317 }
304 318
305 irq_data->chip = &lapic_controller; 319 irq_data->chip = &lapic_controller;
306 irq_data->chip_data = cfg; 320 irq_data->chip_data = data;
307 irq_data->hwirq = virq + i; 321 irq_data->hwirq = virq + i;
308 err = assign_irq_vector(virq, cfg, mask); 322 err = assign_irq_vector(virq, data, mask);
309 if (err) 323 if (err)
310 goto error; 324 goto error;
311 } 325 }
@@ -349,22 +363,22 @@ int __init arch_probe_nr_irqs(void)
349static void init_legacy_irqs(void) 363static void init_legacy_irqs(void)
350{ 364{
351 int i, node = cpu_to_node(0); 365 int i, node = cpu_to_node(0);
352 struct irq_cfg *cfg; 366 struct apic_chip_data *data;
353 367
354 /* 368 /*
355 * For legacy IRQ's, start with assigning irq0 to irq15 to 369 * For legacy IRQ's, start with assigning irq0 to irq15 to
356 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. 370 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
357 */ 371 */
358 for (i = 0; i < nr_legacy_irqs(); i++) { 372 for (i = 0; i < nr_legacy_irqs(); i++) {
359 cfg = legacy_irq_cfgs[i] = alloc_irq_cfg(node); 373 data = legacy_irq_data[i] = alloc_apic_chip_data(node);
360 BUG_ON(!cfg); 374 BUG_ON(!data);
361 /* 375 /*
362 * For legacy IRQ's, start with assigning irq0 to irq15 to 376 * For legacy IRQ's, start with assigning irq0 to irq15 to
363 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's. 377 * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
364 */ 378 */
365 cfg->vector = IRQ0_VECTOR + i; 379 data->cfg.vector = IRQ0_VECTOR + i;
366 cpumask_setall(cfg->domain); 380 cpumask_setall(data->domain);
367 irq_set_chip_data(i, cfg); 381 irq_set_chip_data(i, data);
368 } 382 }
369} 383}
370#else 384#else
@@ -390,7 +404,7 @@ static void __setup_vector_irq(int cpu)
390{ 404{
391 /* Initialize vector_irq on a new cpu */ 405 /* Initialize vector_irq on a new cpu */
392 int irq, vector; 406 int irq, vector;
393 struct irq_cfg *cfg; 407 struct apic_chip_data *data;
394 408
395 /* 409 /*
396 * vector_lock will make sure that we don't run into irq vector 410 * vector_lock will make sure that we don't run into irq vector
@@ -400,13 +414,13 @@ static void __setup_vector_irq(int cpu)
400 raw_spin_lock(&vector_lock); 414 raw_spin_lock(&vector_lock);
401 /* Mark the inuse vectors */ 415 /* Mark the inuse vectors */
402 for_each_active_irq(irq) { 416 for_each_active_irq(irq) {
403 cfg = irq_cfg(irq); 417 data = apic_chip_data(irq_get_irq_data(irq));
404 if (!cfg) 418 if (!data)
405 continue; 419 continue;
406 420
407 if (!cpumask_test_cpu(cpu, cfg->domain)) 421 if (!cpumask_test_cpu(cpu, data->domain))
408 continue; 422 continue;
409 vector = cfg->vector; 423 vector = data->cfg.vector;
410 per_cpu(vector_irq, cpu)[vector] = irq; 424 per_cpu(vector_irq, cpu)[vector] = irq;
411 } 425 }
412 /* Mark the free vectors */ 426 /* Mark the free vectors */
@@ -415,8 +429,8 @@ static void __setup_vector_irq(int cpu)
415 if (irq <= VECTOR_UNDEFINED) 429 if (irq <= VECTOR_UNDEFINED)
416 continue; 430 continue;
417 431
418 cfg = irq_cfg(irq); 432 data = apic_chip_data(irq_get_irq_data(irq));
419 if (!cpumask_test_cpu(cpu, cfg->domain)) 433 if (!cpumask_test_cpu(cpu, data->domain))
420 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED; 434 per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
421 } 435 }
422 raw_spin_unlock(&vector_lock); 436 raw_spin_unlock(&vector_lock);
@@ -442,15 +456,15 @@ void setup_vector_irq(int cpu)
442 __setup_vector_irq(cpu); 456 __setup_vector_irq(cpu);
443} 457}
444 458
445static int apic_retrigger_irq(struct irq_data *data) 459static int apic_retrigger_irq(struct irq_data *irq_data)
446{ 460{
447 struct irq_cfg *cfg = irqd_cfg(data); 461 struct apic_chip_data *data = apic_chip_data(irq_data);
448 unsigned long flags; 462 unsigned long flags;
449 int cpu; 463 int cpu;
450 464
451 raw_spin_lock_irqsave(&vector_lock, flags); 465 raw_spin_lock_irqsave(&vector_lock, flags);
452 cpu = cpumask_first_and(cfg->domain, cpu_online_mask); 466 cpu = cpumask_first_and(data->domain, cpu_online_mask);
453 apic->send_IPI_mask(cpumask_of(cpu), cfg->vector); 467 apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
454 raw_spin_unlock_irqrestore(&vector_lock, flags); 468 raw_spin_unlock_irqrestore(&vector_lock, flags);
455 469
456 return 1; 470 return 1;
@@ -466,7 +480,7 @@ void apic_ack_edge(struct irq_data *data)
466static int apic_set_affinity(struct irq_data *irq_data, 480static int apic_set_affinity(struct irq_data *irq_data,
467 const struct cpumask *dest, bool force) 481 const struct cpumask *dest, bool force)
468{ 482{
469 struct irq_cfg *cfg = irq_data->chip_data; 483 struct apic_chip_data *data = irq_data->chip_data;
470 int err, irq = irq_data->irq; 484 int err, irq = irq_data->irq;
471 485
472 if (!config_enabled(CONFIG_SMP)) 486 if (!config_enabled(CONFIG_SMP))
@@ -475,11 +489,11 @@ static int apic_set_affinity(struct irq_data *irq_data,
475 if (!cpumask_intersects(dest, cpu_online_mask)) 489 if (!cpumask_intersects(dest, cpu_online_mask))
476 return -EINVAL; 490 return -EINVAL;
477 491
478 err = assign_irq_vector(irq, cfg, dest); 492 err = assign_irq_vector(irq, data, dest);
479 if (err) { 493 if (err) {
480 struct irq_data *top = irq_get_irq_data(irq); 494 struct irq_data *top = irq_get_irq_data(irq);
481 495
482 if (assign_irq_vector(irq, cfg, top->affinity)) 496 if (assign_irq_vector(irq, data, top->affinity))
483 pr_err("Failed to recover vector for irq %d\n", irq); 497 pr_err("Failed to recover vector for irq %d\n", irq);
484 return err; 498 return err;
485 } 499 }
@@ -494,28 +508,31 @@ static struct irq_chip lapic_controller = {
494}; 508};
495 509
496#ifdef CONFIG_SMP 510#ifdef CONFIG_SMP
497static void __send_cleanup_vector(struct irq_cfg *cfg) 511static void __send_cleanup_vector(struct apic_chip_data *data)
498{ 512{
499 cpumask_var_t cleanup_mask; 513 cpumask_var_t cleanup_mask;
500 514
501 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) { 515 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
502 unsigned int i; 516 unsigned int i;
503 517
504 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask) 518 for_each_cpu_and(i, data->old_domain, cpu_online_mask)
505 apic->send_IPI_mask(cpumask_of(i), 519 apic->send_IPI_mask(cpumask_of(i),
506 IRQ_MOVE_CLEANUP_VECTOR); 520 IRQ_MOVE_CLEANUP_VECTOR);
507 } else { 521 } else {
508 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask); 522 cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
509 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR); 523 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
510 free_cpumask_var(cleanup_mask); 524 free_cpumask_var(cleanup_mask);
511 } 525 }
512 cfg->move_in_progress = 0; 526 data->move_in_progress = 0;
513} 527}
514 528
515void send_cleanup_vector(struct irq_cfg *cfg) 529void send_cleanup_vector(struct irq_cfg *cfg)
516{ 530{
517 if (cfg->move_in_progress) 531 struct apic_chip_data *data;
518 __send_cleanup_vector(cfg); 532
533 data = container_of(cfg, struct apic_chip_data, cfg);
534 if (data->move_in_progress)
535 __send_cleanup_vector(data);
519} 536}
520 537
521asmlinkage __visible void smp_irq_move_cleanup_interrupt(void) 538asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
@@ -531,7 +548,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
531 int irq; 548 int irq;
532 unsigned int irr; 549 unsigned int irr;
533 struct irq_desc *desc; 550 struct irq_desc *desc;
534 struct irq_cfg *cfg; 551 struct apic_chip_data *data;
535 552
536 irq = __this_cpu_read(vector_irq[vector]); 553 irq = __this_cpu_read(vector_irq[vector]);
537 554
@@ -542,8 +559,8 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
542 if (!desc) 559 if (!desc)
543 continue; 560 continue;
544 561
545 cfg = irq_cfg(irq); 562 data = apic_chip_data(&desc->irq_data);
546 if (!cfg) 563 if (!data)
547 continue; 564 continue;
548 565
549 raw_spin_lock(&desc->lock); 566 raw_spin_lock(&desc->lock);
@@ -552,10 +569,11 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
552 * Check if the irq migration is in progress. If so, we 569 * Check if the irq migration is in progress. If so, we
553 * haven't received the cleanup request yet for this irq. 570 * haven't received the cleanup request yet for this irq.
554 */ 571 */
555 if (cfg->move_in_progress) 572 if (data->move_in_progress)
556 goto unlock; 573 goto unlock;
557 574
558 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 575 if (vector == data->cfg.vector &&
576 cpumask_test_cpu(me, data->domain))
559 goto unlock; 577 goto unlock;
560 578
561 irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); 579 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -581,14 +599,15 @@ unlock:
581static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) 599static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
582{ 600{
583 unsigned me; 601 unsigned me;
602 struct apic_chip_data *data;
584 603
585 if (likely(!cfg->move_in_progress)) 604 data = container_of(cfg, struct apic_chip_data, cfg);
605 if (likely(!data->move_in_progress))
586 return; 606 return;
587 607
588 me = smp_processor_id(); 608 me = smp_processor_id();
589 609 if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
590 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) 610 __send_cleanup_vector(data);
591 __send_cleanup_vector(cfg);
592} 611}
593 612
594void irq_complete_move(struct irq_cfg *cfg) 613void irq_complete_move(struct irq_cfg *cfg)
@@ -600,10 +619,8 @@ void irq_force_complete_move(int irq)
600{ 619{
601 struct irq_cfg *cfg = irq_cfg(irq); 620 struct irq_cfg *cfg = irq_cfg(irq);
602 621
603 if (!cfg) 622 if (cfg)
604 return; 623 __irq_complete_move(cfg, cfg->vector);
605
606 __irq_complete_move(cfg, cfg->vector);
607} 624}
608#endif 625#endif
609 626