diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2017-09-13 17:29:45 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2017-09-25 14:51:59 -0400 |
commit | ba224feac8bb367edd62da33552353d4bdc3fe3a (patch) | |
tree | a9398b2660f63585f683cb933da8970fda833fe4 | |
parent | ba801640b10d87b1c4e26cbcbe414a001255404f (diff) |
x86/vector: Untangle internal state from irq_cfg
The vector management state is not required to live in irq_cfg. irq_cfg is
only relevant for the depending irq domains (IOAPIC, DMAR, MSI ...).
The seperation of the vector management status allows to direct a shut down
interrupt to a special shutdown vector w/o confusing the internal state of
the vector management.
Preparatory change for the rework of managed interrupts and the global
vector reservation scheme.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Tested-by: Juergen Gross <jgross@suse.com>
Tested-by: Yu Chen <yu.c.chen@intel.com>
Acked-by: Juergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213155.683712356@linutronix.de
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 88 |
2 files changed, 49 insertions, 42 deletions
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 386368890376..661540a93072 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -124,8 +124,7 @@ struct irq_alloc_info { | |||
124 | 124 | ||
125 | struct irq_cfg { | 125 | struct irq_cfg { |
126 | unsigned int dest_apicid; | 126 | unsigned int dest_apicid; |
127 | u8 vector; | 127 | unsigned int vector; |
128 | u8 old_vector; | ||
129 | }; | 128 | }; |
130 | 129 | ||
131 | extern struct irq_cfg *irq_cfg(unsigned int irq); | 130 | extern struct irq_cfg *irq_cfg(unsigned int irq); |
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 17d7d7fd45d9..f08d44fabef4 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -25,7 +25,9 @@ | |||
25 | #include <asm/trace/irq_vectors.h> | 25 | #include <asm/trace/irq_vectors.h> |
26 | 26 | ||
27 | struct apic_chip_data { | 27 | struct apic_chip_data { |
28 | struct irq_cfg cfg; | 28 | struct irq_cfg hw_irq_cfg; |
29 | unsigned int vector; | ||
30 | unsigned int prev_vector; | ||
29 | unsigned int cpu; | 31 | unsigned int cpu; |
30 | unsigned int prev_cpu; | 32 | unsigned int prev_cpu; |
31 | unsigned int irq; | 33 | unsigned int irq; |
@@ -86,7 +88,7 @@ struct irq_cfg *irqd_cfg(struct irq_data *irqd) | |||
86 | { | 88 | { |
87 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 89 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
88 | 90 | ||
89 | return apicd ? &apicd->cfg : NULL; | 91 | return apicd ? &apicd->hw_irq_cfg : NULL; |
90 | } | 92 | } |
91 | EXPORT_SYMBOL_GPL(irqd_cfg); | 93 | EXPORT_SYMBOL_GPL(irqd_cfg); |
92 | 94 | ||
@@ -110,16 +112,18 @@ static void free_apic_chip_data(struct apic_chip_data *apicd) | |||
110 | kfree(apicd); | 112 | kfree(apicd); |
111 | } | 113 | } |
112 | 114 | ||
113 | static void apic_update_irq_cfg(struct irq_data *irqd) | 115 | static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, |
116 | unsigned int cpu) | ||
114 | { | 117 | { |
115 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 118 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
116 | 119 | ||
117 | lockdep_assert_held(&vector_lock); | 120 | lockdep_assert_held(&vector_lock); |
118 | 121 | ||
119 | apicd->cfg.dest_apicid = apic->calc_dest_apicid(apicd->cpu); | 122 | apicd->hw_irq_cfg.vector = vector; |
120 | irq_data_update_effective_affinity(irqd, cpumask_of(apicd->cpu)); | 123 | apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); |
121 | trace_vector_config(irqd->irq, apicd->cfg.vector, apicd->cpu, | 124 | irq_data_update_effective_affinity(irqd, cpumask_of(cpu)); |
122 | apicd->cfg.dest_apicid); | 125 | trace_vector_config(irqd->irq, vector, cpu, |
126 | apicd->hw_irq_cfg.dest_apicid); | ||
123 | } | 127 | } |
124 | 128 | ||
125 | static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, | 129 | static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, |
@@ -130,19 +134,19 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, | |||
130 | 134 | ||
131 | lockdep_assert_held(&vector_lock); | 135 | lockdep_assert_held(&vector_lock); |
132 | 136 | ||
133 | trace_vector_update(irqd->irq, newvec, newcpu, apicd->cfg.vector, | 137 | trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, |
134 | apicd->cpu); | 138 | apicd->cpu); |
135 | 139 | ||
136 | /* Setup the vector move, if required */ | 140 | /* Setup the vector move, if required */ |
137 | if (apicd->cfg.vector && cpu_online(apicd->cpu)) { | 141 | if (apicd->vector && cpu_online(apicd->cpu)) { |
138 | apicd->move_in_progress = true; | 142 | apicd->move_in_progress = true; |
139 | apicd->cfg.old_vector = apicd->cfg.vector; | 143 | apicd->prev_vector = apicd->vector; |
140 | apicd->prev_cpu = apicd->cpu; | 144 | apicd->prev_cpu = apicd->cpu; |
141 | } else { | 145 | } else { |
142 | apicd->cfg.old_vector = 0; | 146 | apicd->prev_vector = 0; |
143 | } | 147 | } |
144 | 148 | ||
145 | apicd->cfg.vector = newvec; | 149 | apicd->vector = newvec; |
146 | apicd->cpu = newcpu; | 150 | apicd->cpu = newcpu; |
147 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); | 151 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); |
148 | per_cpu(vector_irq, newcpu)[newvec] = desc; | 152 | per_cpu(vector_irq, newcpu)[newvec] = desc; |
@@ -151,8 +155,10 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, | |||
151 | static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest) | 155 | static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest) |
152 | { | 156 | { |
153 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 157 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
154 | int vector = apicd->cfg.vector; | ||
155 | unsigned int cpu = apicd->cpu; | 158 | unsigned int cpu = apicd->cpu; |
159 | int vector = apicd->vector; | ||
160 | |||
161 | lockdep_assert_held(&vector_lock); | ||
156 | 162 | ||
157 | /* | 163 | /* |
158 | * If the current target CPU is online and in the new requested | 164 | * If the current target CPU is online and in the new requested |
@@ -172,12 +178,13 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest) | |||
172 | static int assign_vector_locked(struct irq_data *irqd, | 178 | static int assign_vector_locked(struct irq_data *irqd, |
173 | const struct cpumask *dest) | 179 | const struct cpumask *dest) |
174 | { | 180 | { |
181 | struct apic_chip_data *apicd = apic_chip_data(irqd); | ||
175 | int vector = allocate_vector(irqd, dest); | 182 | int vector = allocate_vector(irqd, dest); |
176 | 183 | ||
177 | if (vector < 0) | 184 | if (vector < 0) |
178 | return vector; | 185 | return vector; |
179 | 186 | ||
180 | apic_update_irq_cfg(irqd); | 187 | apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); |
181 | return 0; | 188 | return 0; |
182 | } | 189 | } |
183 | 190 | ||
@@ -207,27 +214,28 @@ static int assign_irq_vector_policy(struct irq_data *irqd, | |||
207 | static void clear_irq_vector(struct irq_data *irqd) | 214 | static void clear_irq_vector(struct irq_data *irqd) |
208 | { | 215 | { |
209 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 216 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
210 | unsigned int vector = apicd->cfg.vector; | 217 | unsigned int vector = apicd->vector; |
211 | 218 | ||
212 | lockdep_assert_held(&vector_lock); | 219 | lockdep_assert_held(&vector_lock); |
220 | |||
213 | if (!vector) | 221 | if (!vector) |
214 | return; | 222 | return; |
215 | 223 | ||
216 | trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->cfg.old_vector, | 224 | trace_vector_clear(irqd->irq, vector, apicd->cpu, apicd->prev_vector, |
217 | apicd->prev_cpu); | 225 | apicd->prev_cpu); |
218 | 226 | ||
219 | per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED; | 227 | per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_UNUSED; |
220 | irq_matrix_free(vector_matrix, apicd->cpu, vector, false); | 228 | irq_matrix_free(vector_matrix, apicd->cpu, vector, false); |
221 | apicd->cfg.vector = 0; | 229 | apicd->vector = 0; |
222 | 230 | ||
223 | /* Clean up move in progress */ | 231 | /* Clean up move in progress */ |
224 | vector = apicd->cfg.old_vector; | 232 | vector = apicd->prev_vector; |
225 | if (!vector) | 233 | if (!vector) |
226 | return; | 234 | return; |
227 | 235 | ||
228 | per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED; | 236 | per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_UNUSED; |
229 | irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, false); | 237 | irq_matrix_free(vector_matrix, apicd->prev_cpu, vector, false); |
230 | apicd->cfg.old_vector = 0; | 238 | apicd->prev_vector = 0; |
231 | apicd->move_in_progress = 0; | 239 | apicd->move_in_progress = 0; |
232 | hlist_del_init(&apicd->clist); | 240 | hlist_del_init(&apicd->clist); |
233 | } | 241 | } |
@@ -293,11 +301,11 @@ static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, | |||
293 | * config. | 301 | * config. |
294 | */ | 302 | */ |
295 | if (info->flags & X86_IRQ_ALLOC_LEGACY) { | 303 | if (info->flags & X86_IRQ_ALLOC_LEGACY) { |
296 | apicd->cfg.vector = ISA_IRQ_VECTOR(virq + i); | 304 | apicd->vector = ISA_IRQ_VECTOR(virq + i); |
297 | apicd->cpu = 0; | 305 | apicd->cpu = 0; |
298 | trace_vector_setup(virq + i, true, 0); | 306 | trace_vector_setup(virq + i, true, 0); |
299 | raw_spin_lock_irqsave(&vector_lock, flags); | 307 | raw_spin_lock_irqsave(&vector_lock, flags); |
300 | apic_update_irq_cfg(irqd); | 308 | apic_update_irq_cfg(irqd, apicd->vector, apicd->cpu); |
301 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 309 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
302 | continue; | 310 | continue; |
303 | } | 311 | } |
@@ -319,7 +327,7 @@ error: | |||
319 | void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, | 327 | void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, |
320 | struct irq_data *irqd, int ind) | 328 | struct irq_data *irqd, int ind) |
321 | { | 329 | { |
322 | unsigned int cpu, vec, prev_cpu, prev_vec; | 330 | unsigned int cpu, vector, prev_cpu, prev_vector; |
323 | struct apic_chip_data *apicd; | 331 | struct apic_chip_data *apicd; |
324 | unsigned long flags; | 332 | unsigned long flags; |
325 | int irq; | 333 | int irq; |
@@ -344,14 +352,14 @@ void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, | |||
344 | 352 | ||
345 | raw_spin_lock_irqsave(&vector_lock, flags); | 353 | raw_spin_lock_irqsave(&vector_lock, flags); |
346 | cpu = apicd->cpu; | 354 | cpu = apicd->cpu; |
347 | vec = apicd->cfg.vector; | 355 | vector = apicd->vector; |
348 | prev_cpu = apicd->prev_cpu; | 356 | prev_cpu = apicd->prev_cpu; |
349 | prev_vec = apicd->cfg.old_vector; | 357 | prev_vector = apicd->prev_vector; |
350 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 358 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
351 | seq_printf(m, "%*sVector: %5u\n", ind, "", vec); | 359 | seq_printf(m, "%*sVector: %5u\n", ind, "", vector); |
352 | seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu); | 360 | seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu); |
353 | if (prev_vec) { | 361 | if (prev_vector) { |
354 | seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vec); | 362 | seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector); |
355 | seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu); | 363 | seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu); |
356 | } | 364 | } |
357 | } | 365 | } |
@@ -461,10 +469,10 @@ static void vector_update_shutdown_irqs(void) | |||
461 | struct irq_data *irqd = irq_desc_get_irq_data(desc); | 469 | struct irq_data *irqd = irq_desc_get_irq_data(desc); |
462 | struct apic_chip_data *ad = apic_chip_data(irqd); | 470 | struct apic_chip_data *ad = apic_chip_data(irqd); |
463 | 471 | ||
464 | if (!ad || !ad->cfg.vector || ad->cpu != smp_processor_id()) | 472 | if (!ad || !ad->vector || ad->cpu != smp_processor_id()) |
465 | continue; | 473 | continue; |
466 | this_cpu_write(vector_irq[ad->cfg.vector], desc); | 474 | this_cpu_write(vector_irq[ad->vector], desc); |
467 | irq_matrix_assign(vector_matrix, ad->cfg.vector); | 475 | irq_matrix_assign(vector_matrix, ad->vector); |
468 | } | 476 | } |
469 | } | 477 | } |
470 | 478 | ||
@@ -543,7 +551,7 @@ static int apic_retrigger_irq(struct irq_data *irqd) | |||
543 | unsigned long flags; | 551 | unsigned long flags; |
544 | 552 | ||
545 | raw_spin_lock_irqsave(&vector_lock, flags); | 553 | raw_spin_lock_irqsave(&vector_lock, flags); |
546 | apic->send_IPI(apicd->cpu, apicd->cfg.vector); | 554 | apic->send_IPI(apicd->cpu, apicd->vector); |
547 | raw_spin_unlock_irqrestore(&vector_lock, flags); | 555 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
548 | 556 | ||
549 | return 1; | 557 | return 1; |
@@ -567,14 +575,14 @@ static struct irq_chip lapic_controller = { | |||
567 | 575 | ||
568 | static void free_moved_vector(struct apic_chip_data *apicd) | 576 | static void free_moved_vector(struct apic_chip_data *apicd) |
569 | { | 577 | { |
570 | unsigned int vector = apicd->cfg.old_vector; | 578 | unsigned int vector = apicd->prev_vector; |
571 | unsigned int cpu = apicd->prev_cpu; | 579 | unsigned int cpu = apicd->prev_cpu; |
572 | 580 | ||
573 | trace_vector_free_moved(apicd->irq, vector, false); | 581 | trace_vector_free_moved(apicd->irq, vector, false); |
574 | irq_matrix_free(vector_matrix, cpu, vector, false); | 582 | irq_matrix_free(vector_matrix, cpu, vector, false); |
575 | __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); | 583 | __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); |
576 | hlist_del_init(&apicd->clist); | 584 | hlist_del_init(&apicd->clist); |
577 | apicd->cfg.old_vector = 0; | 585 | apicd->prev_vector = 0; |
578 | apicd->move_in_progress = 0; | 586 | apicd->move_in_progress = 0; |
579 | } | 587 | } |
580 | 588 | ||
@@ -589,7 +597,7 @@ asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void) | |||
589 | raw_spin_lock(&vector_lock); | 597 | raw_spin_lock(&vector_lock); |
590 | 598 | ||
591 | hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { | 599 | hlist_for_each_entry_safe(apicd, tmp, clhead, clist) { |
592 | unsigned int irr, vector = apicd->cfg.old_vector; | 600 | unsigned int irr, vector = apicd->prev_vector; |
593 | 601 | ||
594 | /* | 602 | /* |
595 | * Paranoia: Check if the vector that needs to be cleaned | 603 | * Paranoia: Check if the vector that needs to be cleaned |
@@ -623,7 +631,7 @@ static void __send_cleanup_vector(struct apic_chip_data *apicd) | |||
623 | hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); | 631 | hlist_add_head(&apicd->clist, per_cpu_ptr(&cleanup_list, cpu)); |
624 | apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); | 632 | apic->send_IPI(cpu, IRQ_MOVE_CLEANUP_VECTOR); |
625 | } else { | 633 | } else { |
626 | apicd->cfg.old_vector = 0; | 634 | apicd->prev_vector = 0; |
627 | } | 635 | } |
628 | raw_spin_unlock(&vector_lock); | 636 | raw_spin_unlock(&vector_lock); |
629 | } | 637 | } |
@@ -632,7 +640,7 @@ void send_cleanup_vector(struct irq_cfg *cfg) | |||
632 | { | 640 | { |
633 | struct apic_chip_data *apicd; | 641 | struct apic_chip_data *apicd; |
634 | 642 | ||
635 | apicd = container_of(cfg, struct apic_chip_data, cfg); | 643 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
636 | if (apicd->move_in_progress) | 644 | if (apicd->move_in_progress) |
637 | __send_cleanup_vector(apicd); | 645 | __send_cleanup_vector(apicd); |
638 | } | 646 | } |
@@ -641,11 +649,11 @@ static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector) | |||
641 | { | 649 | { |
642 | struct apic_chip_data *apicd; | 650 | struct apic_chip_data *apicd; |
643 | 651 | ||
644 | apicd = container_of(cfg, struct apic_chip_data, cfg); | 652 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
645 | if (likely(!apicd->move_in_progress)) | 653 | if (likely(!apicd->move_in_progress)) |
646 | return; | 654 | return; |
647 | 655 | ||
648 | if (vector == apicd->cfg.vector && apicd->cpu == smp_processor_id()) | 656 | if (vector == apicd->vector && apicd->cpu == smp_processor_id()) |
649 | __send_cleanup_vector(apicd); | 657 | __send_cleanup_vector(apicd); |
650 | } | 658 | } |
651 | 659 | ||
@@ -683,9 +691,9 @@ void irq_force_complete_move(struct irq_desc *desc) | |||
683 | goto unlock; | 691 | goto unlock; |
684 | 692 | ||
685 | /* | 693 | /* |
686 | * If old_vector is empty, no action required. | 694 | * If prev_vector is empty, no action required. |
687 | */ | 695 | */ |
688 | vector = apicd->cfg.old_vector; | 696 | vector = apicd->prev_vector; |
689 | if (!vector) | 697 | if (!vector) |
690 | goto unlock; | 698 | goto unlock; |
691 | 699 | ||