diff options
author | Jiang Liu <jiang.liu@linux.intel.com> | 2015-04-13 22:30:10 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2015-04-24 09:36:55 -0400 |
commit | f7fa7aeeecb7a9abdd5f5d069a71ffb3e99a2a07 (patch) | |
tree | cfdc9b18cdc63da678488b5e6008e40a06ff334b | |
parent | d746d1ebd30c48562a3fb512ab18d5822f137820 (diff) |
x86/irq: Avoid memory allocation in __assign_irq_vector()
Function __assign_irq_vector() is protected by vector_lock, so use
a global temporary cpu_mask to avoid allocating/freeing cpu_mask.
Signed-off-by: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Cohen <david.a.cohen@linux.intel.com>
Cc: Sander Eikelenboom <linux@eikelenboom.it>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dimitri Sivanich <sivanich@sgi.com>
Link: http://lkml.kernel.org/r/1428978610-28986-34-git-send-email-jiang.liu@linux.intel.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index ad786f8a7cc7..1c7dd42b98c1 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -30,6 +30,7 @@ struct apic_chip_data { | |||
30 | 30 | ||
31 | struct irq_domain *x86_vector_domain; | 31 | struct irq_domain *x86_vector_domain; |
32 | static DEFINE_RAW_SPINLOCK(vector_lock); | 32 | static DEFINE_RAW_SPINLOCK(vector_lock); |
33 | static cpumask_var_t vector_cpumask; | ||
33 | static struct irq_chip lapic_controller; | 34 | static struct irq_chip lapic_controller; |
34 | #ifdef CONFIG_X86_IO_APIC | 35 | #ifdef CONFIG_X86_IO_APIC |
35 | static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; | 36 | static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; |
@@ -116,14 +117,10 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, | |||
116 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; | 117 | static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; |
117 | static int current_offset = VECTOR_OFFSET_START % 16; | 118 | static int current_offset = VECTOR_OFFSET_START % 16; |
118 | int cpu, err; | 119 | int cpu, err; |
119 | cpumask_var_t tmp_mask; | ||
120 | 120 | ||
121 | if (d->move_in_progress) | 121 | if (d->move_in_progress) |
122 | return -EBUSY; | 122 | return -EBUSY; |
123 | 123 | ||
124 | if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC)) | ||
125 | return -ENOMEM; | ||
126 | |||
127 | /* Only try and allocate irqs on cpus that are present */ | 124 | /* Only try and allocate irqs on cpus that are present */ |
128 | err = -ENOSPC; | 125 | err = -ENOSPC; |
129 | cpumask_clear(d->old_domain); | 126 | cpumask_clear(d->old_domain); |
@@ -131,21 +128,22 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d, | |||
131 | while (cpu < nr_cpu_ids) { | 128 | while (cpu < nr_cpu_ids) { |
132 | int new_cpu, vector, offset; | 129 | int new_cpu, vector, offset; |
133 | 130 | ||
134 | apic->vector_allocation_domain(cpu, tmp_mask, mask); | 131 | apic->vector_allocation_domain(cpu, vector_cpumask, mask); |
135 | 132 | ||
136 | if (cpumask_subset(tmp_mask, d->domain)) { | 133 | if (cpumask_subset(vector_cpumask, d->domain)) { |
137 | err = 0; | 134 | err = 0; |
138 | if (cpumask_equal(tmp_mask, d->domain)) | 135 | if (cpumask_equal(vector_cpumask, d->domain)) |
139 | break; | 136 | break; |
140 | /* | 137 | /* |
141 | * New cpumask using the vector is a proper subset of | 138 | * New cpumask using the vector is a proper subset of |
142 | * the current in use mask. So cleanup the vector | 139 | * the current in use mask. So cleanup the vector |
143 | * allocation for the members that are not used anymore. | 140 | * allocation for the members that are not used anymore. |
144 | */ | 141 | */ |
145 | cpumask_andnot(d->old_domain, d->domain, tmp_mask); | 142 | cpumask_andnot(d->old_domain, d->domain, |
143 | vector_cpumask); | ||
146 | d->move_in_progress = | 144 | d->move_in_progress = |
147 | cpumask_intersects(d->old_domain, cpu_online_mask); | 145 | cpumask_intersects(d->old_domain, cpu_online_mask); |
148 | cpumask_and(d->domain, d->domain, tmp_mask); | 146 | cpumask_and(d->domain, d->domain, vector_cpumask); |
149 | break; | 147 | break; |
150 | } | 148 | } |
151 | 149 | ||
@@ -159,16 +157,18 @@ next: | |||
159 | } | 157 | } |
160 | 158 | ||
161 | if (unlikely(current_vector == vector)) { | 159 | if (unlikely(current_vector == vector)) { |
162 | cpumask_or(d->old_domain, d->old_domain, tmp_mask); | 160 | cpumask_or(d->old_domain, d->old_domain, |
163 | cpumask_andnot(tmp_mask, mask, d->old_domain); | 161 | vector_cpumask); |
164 | cpu = cpumask_first_and(tmp_mask, cpu_online_mask); | 162 | cpumask_andnot(vector_cpumask, mask, d->old_domain); |
163 | cpu = cpumask_first_and(vector_cpumask, | ||
164 | cpu_online_mask); | ||
165 | continue; | 165 | continue; |
166 | } | 166 | } |
167 | 167 | ||
168 | if (test_bit(vector, used_vectors)) | 168 | if (test_bit(vector, used_vectors)) |
169 | goto next; | 169 | goto next; |
170 | 170 | ||
171 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) { | 171 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { |
172 | if (per_cpu(vector_irq, new_cpu)[vector] > | 172 | if (per_cpu(vector_irq, new_cpu)[vector] > |
173 | VECTOR_UNDEFINED) | 173 | VECTOR_UNDEFINED) |
174 | goto next; | 174 | goto next; |
@@ -181,14 +181,13 @@ next: | |||
181 | d->move_in_progress = | 181 | d->move_in_progress = |
182 | cpumask_intersects(d->old_domain, cpu_online_mask); | 182 | cpumask_intersects(d->old_domain, cpu_online_mask); |
183 | } | 183 | } |
184 | for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) | 184 | for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) |
185 | per_cpu(vector_irq, new_cpu)[vector] = irq; | 185 | per_cpu(vector_irq, new_cpu)[vector] = irq; |
186 | d->cfg.vector = vector; | 186 | d->cfg.vector = vector; |
187 | cpumask_copy(d->domain, tmp_mask); | 187 | cpumask_copy(d->domain, vector_cpumask); |
188 | err = 0; | 188 | err = 0; |
189 | break; | 189 | break; |
190 | } | 190 | } |
191 | free_cpumask_var(tmp_mask); | ||
192 | 191 | ||
193 | if (!err) { | 192 | if (!err) { |
194 | /* cache destination APIC IDs into cfg->dest_apicid */ | 193 | /* cache destination APIC IDs into cfg->dest_apicid */ |
@@ -397,6 +396,8 @@ int __init arch_early_irq_init(void) | |||
397 | arch_init_msi_domain(x86_vector_domain); | 396 | arch_init_msi_domain(x86_vector_domain); |
398 | arch_init_htirq_domain(x86_vector_domain); | 397 | arch_init_htirq_domain(x86_vector_domain); |
399 | 398 | ||
399 | BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); | ||
400 | |||
400 | return arch_early_ioapic_init(); | 401 | return arch_early_ioapic_init(); |
401 | } | 402 | } |
402 | 403 | ||