aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/irq.h5
-rw-r--r--arch/x86/include/asm/pgtable_types.h6
-rw-r--r--arch/x86/kernel/apic/io_apic.c6
-rw-r--r--arch/x86/kernel/apic/vector.c221
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c5
-rw-r--r--arch/x86/kernel/head64.c8
-rw-r--r--arch/x86/kernel/irq.c11
-rw-r--r--arch/x86/mm/pageattr.c4
-rw-r--r--arch/x86/platform/efi/quirks.c17
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c8
-rw-r--r--arch/x86/platform/intel-quark/imr.c18
12 files changed, 209 insertions, 103 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 330e738ccfc1..9af2e6338400 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -509,11 +509,10 @@ config X86_INTEL_CE
509 509
510config X86_INTEL_MID 510config X86_INTEL_MID
511 bool "Intel MID platform support" 511 bool "Intel MID platform support"
512 depends on X86_32
513 depends on X86_EXTENDED_PLATFORM 512 depends on X86_EXTENDED_PLATFORM
514 depends on X86_PLATFORM_DEVICES 513 depends on X86_PLATFORM_DEVICES
515 depends on PCI 514 depends on PCI
516 depends on PCI_GOANY 515 depends on X86_64 || (PCI_GOANY && X86_32)
517 depends on X86_IO_APIC 516 depends on X86_IO_APIC
518 select SFI 517 select SFI
519 select I2C 518 select I2C
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index 881b4768644a..e7de5c9a4fbd 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -23,11 +23,13 @@ extern void irq_ctx_init(int cpu);
23 23
24#define __ARCH_HAS_DO_SOFTIRQ 24#define __ARCH_HAS_DO_SOFTIRQ
25 25
26struct irq_desc;
27
26#ifdef CONFIG_HOTPLUG_CPU 28#ifdef CONFIG_HOTPLUG_CPU
27#include <linux/cpumask.h> 29#include <linux/cpumask.h>
28extern int check_irq_vectors_for_cpu_disable(void); 30extern int check_irq_vectors_for_cpu_disable(void);
29extern void fixup_irqs(void); 31extern void fixup_irqs(void);
30extern void irq_force_complete_move(int); 32extern void irq_force_complete_move(struct irq_desc *desc);
31#endif 33#endif
32 34
33#ifdef CONFIG_HAVE_KVM 35#ifdef CONFIG_HAVE_KVM
@@ -37,7 +39,6 @@ extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
37extern void (*x86_platform_ipi_callback)(void); 39extern void (*x86_platform_ipi_callback)(void);
38extern void native_init_IRQ(void); 40extern void native_init_IRQ(void);
39 41
40struct irq_desc;
41extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs); 42extern bool handle_irq(struct irq_desc *desc, struct pt_regs *regs);
42 43
43extern __visible unsigned int do_IRQ(struct pt_regs *regs); 44extern __visible unsigned int do_IRQ(struct pt_regs *regs);
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index 04c27a013165..4432ab7f407c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -366,20 +366,18 @@ static inline enum page_cache_mode pgprot2cachemode(pgprot_t pgprot)
366} 366}
367static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot) 367static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
368{ 368{
369 pgprotval_t val = pgprot_val(pgprot);
369 pgprot_t new; 370 pgprot_t new;
370 unsigned long val;
371 371
372 val = pgprot_val(pgprot);
373 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | 372 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
374 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); 373 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
375 return new; 374 return new;
376} 375}
377static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot) 376static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
378{ 377{
378 pgprotval_t val = pgprot_val(pgprot);
379 pgprot_t new; 379 pgprot_t new;
380 unsigned long val;
381 380
382 val = pgprot_val(pgprot);
383 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) | 381 pgprot_val(new) = (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
384 ((val & _PAGE_PAT_LARGE) >> 382 ((val & _PAGE_PAT_LARGE) >>
385 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT)); 383 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index f25321894ad2..fdb0fbfb1197 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -2521,6 +2521,7 @@ void __init setup_ioapic_dest(void)
2521{ 2521{
2522 int pin, ioapic, irq, irq_entry; 2522 int pin, ioapic, irq, irq_entry;
2523 const struct cpumask *mask; 2523 const struct cpumask *mask;
2524 struct irq_desc *desc;
2524 struct irq_data *idata; 2525 struct irq_data *idata;
2525 struct irq_chip *chip; 2526 struct irq_chip *chip;
2526 2527
@@ -2536,7 +2537,9 @@ void __init setup_ioapic_dest(void)
2536 if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq)) 2537 if (irq < 0 || !mp_init_irq_at_boot(ioapic, irq))
2537 continue; 2538 continue;
2538 2539
2539 idata = irq_get_irq_data(irq); 2540 desc = irq_to_desc(irq);
2541 raw_spin_lock_irq(&desc->lock);
2542 idata = irq_desc_get_irq_data(desc);
2540 2543
2541 /* 2544 /*
2542 * Honour affinities which have been set in early boot 2545 * Honour affinities which have been set in early boot
@@ -2550,6 +2553,7 @@ void __init setup_ioapic_dest(void)
2550 /* Might be lapic_chip for irq 0 */ 2553 /* Might be lapic_chip for irq 0 */
2551 if (chip->irq_set_affinity) 2554 if (chip->irq_set_affinity)
2552 chip->irq_set_affinity(idata, mask, false); 2555 chip->irq_set_affinity(idata, mask, false);
2556 raw_spin_unlock_irq(&desc->lock);
2553 } 2557 }
2554} 2558}
2555#endif 2559#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 908cb37da171..3b670df4ba7b 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -31,7 +31,7 @@ struct apic_chip_data {
31struct irq_domain *x86_vector_domain; 31struct irq_domain *x86_vector_domain;
32EXPORT_SYMBOL_GPL(x86_vector_domain); 32EXPORT_SYMBOL_GPL(x86_vector_domain);
33static DEFINE_RAW_SPINLOCK(vector_lock); 33static DEFINE_RAW_SPINLOCK(vector_lock);
34static cpumask_var_t vector_cpumask; 34static cpumask_var_t vector_cpumask, vector_searchmask, searched_cpumask;
35static struct irq_chip lapic_controller; 35static struct irq_chip lapic_controller;
36#ifdef CONFIG_X86_IO_APIC 36#ifdef CONFIG_X86_IO_APIC
37static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY]; 37static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
@@ -118,35 +118,47 @@ static int __assign_irq_vector(int irq, struct apic_chip_data *d,
118 */ 118 */
119 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START; 119 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
120 static int current_offset = VECTOR_OFFSET_START % 16; 120 static int current_offset = VECTOR_OFFSET_START % 16;
121 int cpu, err; 121 int cpu, vector;
122 122
123 if (d->move_in_progress) 123 /*
124 * If there is still a move in progress or the previous move has not
125 * been cleaned up completely, tell the caller to come back later.
126 */
127 if (d->move_in_progress ||
128 cpumask_intersects(d->old_domain, cpu_online_mask))
124 return -EBUSY; 129 return -EBUSY;
125 130
126 /* Only try and allocate irqs on cpus that are present */ 131 /* Only try and allocate irqs on cpus that are present */
127 err = -ENOSPC;
128 cpumask_clear(d->old_domain); 132 cpumask_clear(d->old_domain);
133 cpumask_clear(searched_cpumask);
129 cpu = cpumask_first_and(mask, cpu_online_mask); 134 cpu = cpumask_first_and(mask, cpu_online_mask);
130 while (cpu < nr_cpu_ids) { 135 while (cpu < nr_cpu_ids) {
131 int new_cpu, vector, offset; 136 int new_cpu, offset;
132 137
138 /* Get the possible target cpus for @mask/@cpu from the apic */
133 apic->vector_allocation_domain(cpu, vector_cpumask, mask); 139 apic->vector_allocation_domain(cpu, vector_cpumask, mask);
134 140
141 /*
142 * Clear the offline cpus from @vector_cpumask for searching
143 * and verify whether the result overlaps with @mask. If true,
144 * then the call to apic->cpu_mask_to_apicid_and() will
145 * succeed as well. If not, no point in trying to find a
146 * vector in this mask.
147 */
148 cpumask_and(vector_searchmask, vector_cpumask, cpu_online_mask);
149 if (!cpumask_intersects(vector_searchmask, mask))
150 goto next_cpu;
151
135 if (cpumask_subset(vector_cpumask, d->domain)) { 152 if (cpumask_subset(vector_cpumask, d->domain)) {
136 err = 0;
137 if (cpumask_equal(vector_cpumask, d->domain)) 153 if (cpumask_equal(vector_cpumask, d->domain))
138 break; 154 goto success;
139 /* 155 /*
140 * New cpumask using the vector is a proper subset of 156 * Mark the cpus which are not longer in the mask for
141 * the current in use mask. So cleanup the vector 157 * cleanup.
142 * allocation for the members that are not used anymore.
143 */ 158 */
144 cpumask_andnot(d->old_domain, d->domain, 159 cpumask_andnot(d->old_domain, d->domain, vector_cpumask);
145 vector_cpumask); 160 vector = d->cfg.vector;
146 d->move_in_progress = 161 goto update;
147 cpumask_intersects(d->old_domain, cpu_online_mask);
148 cpumask_and(d->domain, d->domain, vector_cpumask);
149 break;
150 } 162 }
151 163
152 vector = current_vector; 164 vector = current_vector;
@@ -158,45 +170,60 @@ next:
158 vector = FIRST_EXTERNAL_VECTOR + offset; 170 vector = FIRST_EXTERNAL_VECTOR + offset;
159 } 171 }
160 172
161 if (unlikely(current_vector == vector)) { 173 /* If the search wrapped around, try the next cpu */
162 cpumask_or(d->old_domain, d->old_domain, 174 if (unlikely(current_vector == vector))
163 vector_cpumask); 175 goto next_cpu;
164 cpumask_andnot(vector_cpumask, mask, d->old_domain);
165 cpu = cpumask_first_and(vector_cpumask,
166 cpu_online_mask);
167 continue;
168 }
169 176
170 if (test_bit(vector, used_vectors)) 177 if (test_bit(vector, used_vectors))
171 goto next; 178 goto next;
172 179
173 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) { 180 for_each_cpu(new_cpu, vector_searchmask) {
174 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector])) 181 if (!IS_ERR_OR_NULL(per_cpu(vector_irq, new_cpu)[vector]))
175 goto next; 182 goto next;
176 } 183 }
177 /* Found one! */ 184 /* Found one! */
178 current_vector = vector; 185 current_vector = vector;
179 current_offset = offset; 186 current_offset = offset;
180 if (d->cfg.vector) { 187 /* Schedule the old vector for cleanup on all cpus */
188 if (d->cfg.vector)
181 cpumask_copy(d->old_domain, d->domain); 189 cpumask_copy(d->old_domain, d->domain);
182 d->move_in_progress = 190 for_each_cpu(new_cpu, vector_searchmask)
183 cpumask_intersects(d->old_domain, cpu_online_mask);
184 }
185 for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
186 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq); 191 per_cpu(vector_irq, new_cpu)[vector] = irq_to_desc(irq);
187 d->cfg.vector = vector; 192 goto update;
188 cpumask_copy(d->domain, vector_cpumask);
189 err = 0;
190 break;
191 }
192 193
193 if (!err) { 194next_cpu:
194 /* cache destination APIC IDs into cfg->dest_apicid */ 195 /*
195 err = apic->cpu_mask_to_apicid_and(mask, d->domain, 196 * We exclude the current @vector_cpumask from the requested
196 &d->cfg.dest_apicid); 197 * @mask and try again with the next online cpu in the
198 * result. We cannot modify @mask, so we use @vector_cpumask
199 * as a temporary buffer here as it will be reassigned when
200 * calling apic->vector_allocation_domain() above.
201 */
202 cpumask_or(searched_cpumask, searched_cpumask, vector_cpumask);
203 cpumask_andnot(vector_cpumask, mask, searched_cpumask);
204 cpu = cpumask_first_and(vector_cpumask, cpu_online_mask);
205 continue;
197 } 206 }
207 return -ENOSPC;
198 208
199 return err; 209update:
210 /*
211 * Exclude offline cpus from the cleanup mask and set the
212 * move_in_progress flag when the result is not empty.
213 */
214 cpumask_and(d->old_domain, d->old_domain, cpu_online_mask);
215 d->move_in_progress = !cpumask_empty(d->old_domain);
216 d->cfg.vector = vector;
217 cpumask_copy(d->domain, vector_cpumask);
218success:
219 /*
220 * Cache destination APIC IDs into cfg->dest_apicid. This cannot fail
221 * as we already established, that mask & d->domain & cpu_online_mask
222 * is not empty.
223 */
224 BUG_ON(apic->cpu_mask_to_apicid_and(mask, d->domain,
225 &d->cfg.dest_apicid));
226 return 0;
200} 227}
201 228
202static int assign_irq_vector(int irq, struct apic_chip_data *data, 229static int assign_irq_vector(int irq, struct apic_chip_data *data,
@@ -226,10 +253,8 @@ static int assign_irq_vector_policy(int irq, int node,
226static void clear_irq_vector(int irq, struct apic_chip_data *data) 253static void clear_irq_vector(int irq, struct apic_chip_data *data)
227{ 254{
228 struct irq_desc *desc; 255 struct irq_desc *desc;
229 unsigned long flags;
230 int cpu, vector; 256 int cpu, vector;
231 257
232 raw_spin_lock_irqsave(&vector_lock, flags);
233 BUG_ON(!data->cfg.vector); 258 BUG_ON(!data->cfg.vector);
234 259
235 vector = data->cfg.vector; 260 vector = data->cfg.vector;
@@ -239,10 +264,13 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
239 data->cfg.vector = 0; 264 data->cfg.vector = 0;
240 cpumask_clear(data->domain); 265 cpumask_clear(data->domain);
241 266
242 if (likely(!data->move_in_progress)) { 267 /*
243 raw_spin_unlock_irqrestore(&vector_lock, flags); 268 * If move is in progress or the old_domain mask is not empty,
269 * i.e. the cleanup IPI has not been processed yet, we need to remove
270 * the old references to desc from all cpus vector tables.
271 */
272 if (!data->move_in_progress && cpumask_empty(data->old_domain))
244 return; 273 return;
245 }
246 274
247 desc = irq_to_desc(irq); 275 desc = irq_to_desc(irq);
248 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) { 276 for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
@@ -255,7 +283,6 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
255 } 283 }
256 } 284 }
257 data->move_in_progress = 0; 285 data->move_in_progress = 0;
258 raw_spin_unlock_irqrestore(&vector_lock, flags);
259} 286}
260 287
261void init_irq_alloc_info(struct irq_alloc_info *info, 288void init_irq_alloc_info(struct irq_alloc_info *info,
@@ -276,19 +303,24 @@ void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
276static void x86_vector_free_irqs(struct irq_domain *domain, 303static void x86_vector_free_irqs(struct irq_domain *domain,
277 unsigned int virq, unsigned int nr_irqs) 304 unsigned int virq, unsigned int nr_irqs)
278{ 305{
306 struct apic_chip_data *apic_data;
279 struct irq_data *irq_data; 307 struct irq_data *irq_data;
308 unsigned long flags;
280 int i; 309 int i;
281 310
282 for (i = 0; i < nr_irqs; i++) { 311 for (i = 0; i < nr_irqs; i++) {
283 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i); 312 irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
284 if (irq_data && irq_data->chip_data) { 313 if (irq_data && irq_data->chip_data) {
314 raw_spin_lock_irqsave(&vector_lock, flags);
285 clear_irq_vector(virq + i, irq_data->chip_data); 315 clear_irq_vector(virq + i, irq_data->chip_data);
286 free_apic_chip_data(irq_data->chip_data); 316 apic_data = irq_data->chip_data;
317 irq_domain_reset_irq_data(irq_data);
318 raw_spin_unlock_irqrestore(&vector_lock, flags);
319 free_apic_chip_data(apic_data);
287#ifdef CONFIG_X86_IO_APIC 320#ifdef CONFIG_X86_IO_APIC
288 if (virq + i < nr_legacy_irqs()) 321 if (virq + i < nr_legacy_irqs())
289 legacy_irq_data[virq + i] = NULL; 322 legacy_irq_data[virq + i] = NULL;
290#endif 323#endif
291 irq_domain_reset_irq_data(irq_data);
292 } 324 }
293 } 325 }
294} 326}
@@ -406,6 +438,8 @@ int __init arch_early_irq_init(void)
406 arch_init_htirq_domain(x86_vector_domain); 438 arch_init_htirq_domain(x86_vector_domain);
407 439
408 BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL)); 440 BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
441 BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
442 BUG_ON(!alloc_cpumask_var(&searched_cpumask, GFP_KERNEL));
409 443
410 return arch_early_ioapic_init(); 444 return arch_early_ioapic_init();
411} 445}
@@ -494,14 +528,7 @@ static int apic_set_affinity(struct irq_data *irq_data,
494 return -EINVAL; 528 return -EINVAL;
495 529
496 err = assign_irq_vector(irq, data, dest); 530 err = assign_irq_vector(irq, data, dest);
497 if (err) { 531 return err ? err : IRQ_SET_MASK_OK;
498 if (assign_irq_vector(irq, data,
499 irq_data_get_affinity_mask(irq_data)))
500 pr_err("Failed to recover vector for irq %d\n", irq);
501 return err;
502 }
503
504 return IRQ_SET_MASK_OK;
505} 532}
506 533
507static struct irq_chip lapic_controller = { 534static struct irq_chip lapic_controller = {
@@ -513,20 +540,12 @@ static struct irq_chip lapic_controller = {
513#ifdef CONFIG_SMP 540#ifdef CONFIG_SMP
514static void __send_cleanup_vector(struct apic_chip_data *data) 541static void __send_cleanup_vector(struct apic_chip_data *data)
515{ 542{
516 cpumask_var_t cleanup_mask; 543 raw_spin_lock(&vector_lock);
517 544 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
518 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
519 unsigned int i;
520
521 for_each_cpu_and(i, data->old_domain, cpu_online_mask)
522 apic->send_IPI_mask(cpumask_of(i),
523 IRQ_MOVE_CLEANUP_VECTOR);
524 } else {
525 cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
526 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
527 free_cpumask_var(cleanup_mask);
528 }
529 data->move_in_progress = 0; 545 data->move_in_progress = 0;
546 if (!cpumask_empty(data->old_domain))
547 apic->send_IPI_mask(data->old_domain, IRQ_MOVE_CLEANUP_VECTOR);
548 raw_spin_unlock(&vector_lock);
530} 549}
531 550
532void send_cleanup_vector(struct irq_cfg *cfg) 551void send_cleanup_vector(struct irq_cfg *cfg)
@@ -570,12 +589,25 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
570 goto unlock; 589 goto unlock;
571 590
572 /* 591 /*
573 * Check if the irq migration is in progress. If so, we 592 * Nothing to cleanup if irq migration is in progress
574 * haven't received the cleanup request yet for this irq. 593 * or this cpu is not set in the cleanup mask.
575 */ 594 */
576 if (data->move_in_progress) 595 if (data->move_in_progress ||
596 !cpumask_test_cpu(me, data->old_domain))
577 goto unlock; 597 goto unlock;
578 598
599 /*
600 * We have two cases to handle here:
601 * 1) vector is unchanged but the target mask got reduced
602 * 2) vector and the target mask has changed
603 *
604 * #1 is obvious, but in #2 we have two vectors with the same
605 * irq descriptor: the old and the new vector. So we need to
606 * make sure that we only cleanup the old vector. The new
607 * vector has the current @vector number in the config and
608 * this cpu is part of the target mask. We better leave that
609 * one alone.
610 */
579 if (vector == data->cfg.vector && 611 if (vector == data->cfg.vector &&
580 cpumask_test_cpu(me, data->domain)) 612 cpumask_test_cpu(me, data->domain))
581 goto unlock; 613 goto unlock;
@@ -593,6 +625,7 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
593 goto unlock; 625 goto unlock;
594 } 626 }
595 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED); 627 __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
628 cpumask_clear_cpu(me, data->old_domain);
596unlock: 629unlock:
597 raw_spin_unlock(&desc->lock); 630 raw_spin_unlock(&desc->lock);
598 } 631 }
@@ -621,12 +654,48 @@ void irq_complete_move(struct irq_cfg *cfg)
621 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax); 654 __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
622} 655}
623 656
624void irq_force_complete_move(int irq) 657/*
658 * Called with @desc->lock held and interrupts disabled.
659 */
660void irq_force_complete_move(struct irq_desc *desc)
625{ 661{
626 struct irq_cfg *cfg = irq_cfg(irq); 662 struct irq_data *irqdata = irq_desc_get_irq_data(desc);
663 struct apic_chip_data *data = apic_chip_data(irqdata);
664 struct irq_cfg *cfg = data ? &data->cfg : NULL;
627 665
628 if (cfg) 666 if (!cfg)
629 __irq_complete_move(cfg, cfg->vector); 667 return;
668
669 __irq_complete_move(cfg, cfg->vector);
670
671 /*
672 * This is tricky. If the cleanup of @data->old_domain has not been
673 * done yet, then the following setaffinity call will fail with
674 * -EBUSY. This can leave the interrupt in a stale state.
675 *
676 * The cleanup cannot make progress because we hold @desc->lock. So in
677 * case @data->old_domain is not yet cleaned up, we need to drop the
678 * lock and acquire it again. @desc cannot go away, because the
679 * hotplug code holds the sparse irq lock.
680 */
681 raw_spin_lock(&vector_lock);
682 /* Clean out all offline cpus (including ourself) first. */
683 cpumask_and(data->old_domain, data->old_domain, cpu_online_mask);
684 while (!cpumask_empty(data->old_domain)) {
685 raw_spin_unlock(&vector_lock);
686 raw_spin_unlock(&desc->lock);
687 cpu_relax();
688 raw_spin_lock(&desc->lock);
689 /*
690 * Reevaluate apic_chip_data. It might have been cleared after
691 * we dropped @desc->lock.
692 */
693 data = apic_chip_data(irqdata);
694 if (!data)
695 return;
696 raw_spin_lock(&vector_lock);
697 }
698 raw_spin_unlock(&vector_lock);
630} 699}
631#endif 700#endif
632 701
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index d760c6bb37b5..624db00583f4 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -889,7 +889,10 @@ void __init uv_system_init(void)
889 return; 889 return;
890 } 890 }
891 pr_info("UV: Found %s hub\n", hub); 891 pr_info("UV: Found %s hub\n", hub);
892 map_low_mmrs(); 892
893 /* We now only need to map the MMRs on UV1 */
894 if (is_uv1_hub())
895 map_low_mmrs();
893 896
894 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR ); 897 m_n_config.v = uv_read_local_mmr(UVH_RH_GAM_CONFIG_MMR );
895 m_val = m_n_config.s.m_skt; 898 m_val = m_n_config.s.m_skt;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index f129a9af6357..2c0f3407bd1f 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -192,5 +192,13 @@ void __init x86_64_start_reservations(char *real_mode_data)
192 192
193 reserve_ebda_region(); 193 reserve_ebda_region();
194 194
195 switch (boot_params.hdr.hardware_subarch) {
196 case X86_SUBARCH_INTEL_MID:
197 x86_intel_mid_early_setup();
198 break;
199 default:
200 break;
201 }
202
195 start_kernel(); 203 start_kernel();
196} 204}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index f8062aaf5df9..61521dc19c10 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -462,7 +462,7 @@ void fixup_irqs(void)
462 * non intr-remapping case, we can't wait till this interrupt 462 * non intr-remapping case, we can't wait till this interrupt
463 * arrives at this cpu before completing the irq move. 463 * arrives at this cpu before completing the irq move.
464 */ 464 */
465 irq_force_complete_move(irq); 465 irq_force_complete_move(desc);
466 466
467 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { 467 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
468 break_affinity = 1; 468 break_affinity = 1;
@@ -470,6 +470,15 @@ void fixup_irqs(void)
470 } 470 }
471 471
472 chip = irq_data_get_irq_chip(data); 472 chip = irq_data_get_irq_chip(data);
473 /*
474 * The interrupt descriptor might have been cleaned up
475 * already, but it is not yet removed from the radix tree
476 */
477 if (!chip) {
478 raw_spin_unlock(&desc->lock);
479 continue;
480 }
481
473 if (!irqd_can_move_in_process_context(data) && chip->irq_mask) 482 if (!irqd_can_move_in_process_context(data) && chip->irq_mask)
474 chip->irq_mask(data); 483 chip->irq_mask(data);
475 484
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index fc6a4c8f6e2a..2440814b0069 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -33,7 +33,7 @@ struct cpa_data {
33 pgd_t *pgd; 33 pgd_t *pgd;
34 pgprot_t mask_set; 34 pgprot_t mask_set;
35 pgprot_t mask_clr; 35 pgprot_t mask_clr;
36 int numpages; 36 unsigned long numpages;
37 int flags; 37 int flags;
38 unsigned long pfn; 38 unsigned long pfn;
39 unsigned force_split : 1; 39 unsigned force_split : 1;
@@ -1350,7 +1350,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
1350 * CPA operation. Either a large page has been 1350 * CPA operation. Either a large page has been
1351 * preserved or a single page update happened. 1351 * preserved or a single page update happened.
1352 */ 1352 */
1353 BUG_ON(cpa->numpages > numpages); 1353 BUG_ON(cpa->numpages > numpages || !cpa->numpages);
1354 numpages -= cpa->numpages; 1354 numpages -= cpa->numpages;
1355 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) 1355 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
1356 cpa->curpage++; 1356 cpa->curpage++;
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 1c7380da65ff..2d66db8f80f9 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -8,6 +8,7 @@
8#include <linux/memblock.h> 8#include <linux/memblock.h>
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/acpi.h> 10#include <linux/acpi.h>
11#include <linux/dmi.h>
11#include <asm/efi.h> 12#include <asm/efi.h>
12#include <asm/uv/uv.h> 13#include <asm/uv/uv.h>
13 14
@@ -248,6 +249,16 @@ out:
248 return ret; 249 return ret;
249} 250}
250 251
252static const struct dmi_system_id sgi_uv1_dmi[] = {
253 { NULL, "SGI UV1",
254 { DMI_MATCH(DMI_PRODUCT_NAME, "Stoutland Platform"),
255 DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
256 DMI_MATCH(DMI_BIOS_VENDOR, "SGI.COM"),
257 }
258 },
259 { } /* NULL entry stops DMI scanning */
260};
261
251void __init efi_apply_memmap_quirks(void) 262void __init efi_apply_memmap_quirks(void)
252{ 263{
253 /* 264 /*
@@ -260,10 +271,8 @@ void __init efi_apply_memmap_quirks(void)
260 efi_unmap_memmap(); 271 efi_unmap_memmap();
261 } 272 }
262 273
263 /* 274 /* UV2+ BIOS has a fix for this issue. UV1 still needs the quirk. */
264 * UV doesn't support the new EFI pagetable mapping yet. 275 if (dmi_check_system(sgi_uv1_dmi))
265 */
266 if (is_uv_system())
267 set_bit(EFI_OLD_MEMMAP, &efi.flags); 276 set_bit(EFI_OLD_MEMMAP, &efi.flags);
268} 277}
269 278
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
index 1bbc21e2e4ae..90bb997ed0a2 100644
--- a/arch/x86/platform/intel-mid/intel-mid.c
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -138,7 +138,7 @@ static void intel_mid_arch_setup(void)
138 intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip](); 138 intel_mid_ops = get_intel_mid_ops[__intel_mid_cpu_chip]();
139 else { 139 else {
140 intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL](); 140 intel_mid_ops = get_intel_mid_ops[INTEL_MID_CPU_CHIP_PENWELL]();
141 pr_info("ARCH: Unknown SoC, assuming PENWELL!\n"); 141 pr_info("ARCH: Unknown SoC, assuming Penwell!\n");
142 } 142 }
143 143
144out: 144out:
@@ -214,12 +214,10 @@ static inline int __init setup_x86_intel_mid_timer(char *arg)
214 else if (strcmp("lapic_and_apbt", arg) == 0) 214 else if (strcmp("lapic_and_apbt", arg) == 0)
215 intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT; 215 intel_mid_timer_options = INTEL_MID_TIMER_LAPIC_APBT;
216 else { 216 else {
217 pr_warn("X86 INTEL_MID timer option %s not recognised" 217 pr_warn("X86 INTEL_MID timer option %s not recognised use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n",
218 " use x86_intel_mid_timer=apbt_only or lapic_and_apbt\n", 218 arg);
219 arg);
220 return -EINVAL; 219 return -EINVAL;
221 } 220 }
222 return 0; 221 return 0;
223} 222}
224__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer); 223__setup("x86_intel_mid_timer=", setup_x86_intel_mid_timer);
225
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index c1bdafaac3ca..c61b6c332e97 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -220,11 +220,12 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
220 if (imr_is_enabled(&imr)) { 220 if (imr_is_enabled(&imr)) {
221 base = imr_to_phys(imr.addr_lo); 221 base = imr_to_phys(imr.addr_lo);
222 end = imr_to_phys(imr.addr_hi) + IMR_MASK; 222 end = imr_to_phys(imr.addr_hi) + IMR_MASK;
223 size = end - base + 1;
223 } else { 224 } else {
224 base = 0; 225 base = 0;
225 end = 0; 226 end = 0;
227 size = 0;
226 } 228 }
227 size = end - base;
228 seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " 229 seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
229 "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, 230 "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
230 &base, &end, size, imr.rmask, imr.wmask, 231 &base, &end, size, imr.rmask, imr.wmask,
@@ -579,6 +580,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
579{ 580{
580 phys_addr_t base = virt_to_phys(&_text); 581 phys_addr_t base = virt_to_phys(&_text);
581 size_t size = virt_to_phys(&__end_rodata) - base; 582 size_t size = virt_to_phys(&__end_rodata) - base;
583 unsigned long start, end;
582 int i; 584 int i;
583 int ret; 585 int ret;
584 586
@@ -586,18 +588,24 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
586 for (i = 0; i < idev->max_imr; i++) 588 for (i = 0; i < idev->max_imr; i++)
587 imr_clear(i); 589 imr_clear(i);
588 590
591 start = (unsigned long)_text;
592 end = (unsigned long)__end_rodata - 1;
593
589 /* 594 /*
590 * Setup a locked IMR around the physical extent of the kernel 595 * Setup a locked IMR around the physical extent of the kernel
591 * from the beginning of the .text secton to the end of the 596 * from the beginning of the .text secton to the end of the
592 * .rodata section as one physically contiguous block. 597 * .rodata section as one physically contiguous block.
598 *
599 * We don't round up @size since it is already PAGE_SIZE aligned.
600 * See vmlinux.lds.S for details.
593 */ 601 */
594 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true); 602 ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
595 if (ret < 0) { 603 if (ret < 0) {
596 pr_err("unable to setup IMR for kernel: (%p - %p)\n", 604 pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
597 &_text, &__end_rodata); 605 size / 1024, start, end);
598 } else { 606 } else {
599 pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n", 607 pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
600 size / 1024, &_text, &__end_rodata); 608 size / 1024, start, end);
601 } 609 }
602 610
603} 611}