aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 12:44:53 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-06-10 12:44:53 -0400
commitf4e5b30d809d3882c69f43b5c90779af033d40c4 (patch)
tree1b8e67b40d348bfea17e827cedf0fd9101b5536a
parenta2211de0f979aa83e9008fd04f39ea5f63473154 (diff)
parent1d9f3e20a56d33e55748552aeec597f58542f92d (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 updates and fixes from Thomas Gleixner: - Fix the (late) fallout from the vector management rework causing hlist corruption and irq descriptor reference leaks caused by a missing sanity check. The straight forward fix triggered another long standing issue to surface. The pre rework code hid the issue due to being way slower, but now the chance that user space sees an EBUSY error return when updating irq affinities is way higher, though quite a bunch of userspace tools do not handle it properly despite the fact that EBUSY could be returned for at least 10 years. It turned out that the EBUSY return can be avoided completely by utilizing the existing delayed affinity update mechanism for irq remapped scenarios as well. That's a bit more error handling in the kernel, but avoids fruitless fingerpointing discussions with tool developers. - Decouple PHYSICAL_MASK from AMD SME as its going to be required for the upcoming Intel memory encryption support as well. - Handle legacy device ACPI detection properly for newer platforms - Fix the wrong argument ordering in the vector allocation tracepoint - Simplify the IDT setup code for the APIC=n case - Use the proper string helpers in the MTRR code - Remove a stale unused VDSO source file - Convert the microcode update lock to a raw spinlock as its used in atomic context. * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/intel_rdt: Enable CMT and MBM on new Skylake stepping x86/apic/vector: Print APIC control bits in debugfs genirq/affinity: Defer affinity setting if irq chip is busy x86/platform/uv: Use apic_ack_irq() x86/ioapic: Use apic_ack_irq() irq_remapping: Use apic_ack_irq() x86/apic: Provide apic_ack_irq() genirq/migration: Avoid out of line call if pending is not set genirq/generic_pending: Do not lose pending affinity update x86/apic/vector: Prevent hlist corruption and leaks x86/vector: Fix the args of vector_alloc tracepoint x86/idt: Simplify the idt_setup_apic_and_irq_gates() x86/platform/uv: Remove extra parentheses x86/mm: Decouple dynamic __PHYSICAL_MASK from AMD SME x86: Mark native_set_p4d() as __always_inline x86/microcode: Make the late update update_lock a raw lock for RT x86/mtrr: Convert to use strncpy_from_user() helper x86/mtrr: Convert to use match_string() helper x86/vdso: Remove unused file x86/i8237: Register device based on FADT legacy boot flag
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c5
-rw-r--r--arch/x86/include/asm/apic.h2
-rw-r--r--arch/x86/include/asm/page_types.h8
-rw-r--r--arch/x86/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/trace/irq_vectors.h2
-rw-r--r--arch/x86/include/asm/x86_init.h1
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/apic/vector.c45
-rw-r--r--arch/x86/kernel/cpu/intel_rdt.c2
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c6
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c33
-rw-r--r--arch/x86/kernel/i8237.c25
-rw-r--r--arch/x86/kernel/idt.c7
-rw-r--r--arch/x86/kernel/platform-quirks.c7
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c3
-rw-r--r--arch/x86/mm/pgtable.c5
-rw-r--r--arch/x86/platform/uv/tlb_uv.c2
-rw-r--r--arch/x86/platform/uv/uv_irq.c7
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/intel_irq_remapping.c2
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--include/linux/irq.h7
-rw-r--r--kernel/irq/manage.c37
-rw-r--r--kernel/irq/migration.c31
26 files changed, 176 insertions, 80 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f182a4e8e5bd..0527060b2710 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -334,6 +334,9 @@ config ARCH_SUPPORTS_UPROBES
334config FIX_EARLYCON_MEM 334config FIX_EARLYCON_MEM
335 def_bool y 335 def_bool y
336 336
337config DYNAMIC_PHYSICAL_MASK
338 bool
339
337config PGTABLE_LEVELS 340config PGTABLE_LEVELS
338 int 341 int
339 default 5 if X86_5LEVEL 342 default 5 if X86_5LEVEL
@@ -1486,6 +1489,7 @@ config ARCH_HAS_MEM_ENCRYPT
1486config AMD_MEM_ENCRYPT 1489config AMD_MEM_ENCRYPT
1487 bool "AMD Secure Memory Encryption (SME) support" 1490 bool "AMD Secure Memory Encryption (SME) support"
1488 depends on X86_64 && CPU_SUP_AMD 1491 depends on X86_64 && CPU_SUP_AMD
1492 select DYNAMIC_PHYSICAL_MASK
1489 ---help--- 1493 ---help---
1490 Say yes to enable support for the encryption of system memory. 1494 Say yes to enable support for the encryption of system memory.
1491 This requires an AMD processor that supports Secure Memory 1495 This requires an AMD processor that supports Secure Memory
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index 522d11431433..748456c365f4 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -69,6 +69,8 @@ static struct alloc_pgt_data pgt_data;
69/* The top level page table entry pointer. */ 69/* The top level page table entry pointer. */
70static unsigned long top_level_pgt; 70static unsigned long top_level_pgt;
71 71
72phys_addr_t physical_mask = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
73
72/* 74/*
73 * Mapping information structure passed to kernel_ident_mapping_init(). 75 * Mapping information structure passed to kernel_ident_mapping_init().
74 * Due to relocation, pointers must be assigned at run time not build time. 76 * Due to relocation, pointers must be assigned at run time not build time.
@@ -81,6 +83,9 @@ void initialize_identity_maps(void)
81 /* If running as an SEV guest, the encryption mask is required. */ 83 /* If running as an SEV guest, the encryption mask is required. */
82 set_sev_encryption_mask(); 84 set_sev_encryption_mask();
83 85
86 /* Exclude the encryption mask from __PHYSICAL_MASK */
87 physical_mask &= ~sme_me_mask;
88
84 /* Init mapping_info with run-time function/buffer pointers. */ 89 /* Init mapping_info with run-time function/buffer pointers. */
85 mapping_info.alloc_pgt_page = alloc_pgt_page; 90 mapping_info.alloc_pgt_page = alloc_pgt_page;
86 mapping_info.context = &pgt_data; 91 mapping_info.context = &pgt_data;
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 08acd954f00e..74a9e06b6cfd 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {}
436 436
437#endif /* CONFIG_X86_LOCAL_APIC */ 437#endif /* CONFIG_X86_LOCAL_APIC */
438 438
439extern void apic_ack_irq(struct irq_data *data);
440
439static inline void ack_APIC_irq(void) 441static inline void ack_APIC_irq(void)
440{ 442{
441 /* 443 /*
diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h
index 1e53560a84bb..c85e15010f48 100644
--- a/arch/x86/include/asm/page_types.h
+++ b/arch/x86/include/asm/page_types.h
@@ -17,7 +17,6 @@
17#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 17#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
18#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 18#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
19 19
20#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
21#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) 20#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
22 21
23/* Cast *PAGE_MASK to a signed type so that it is sign-extended if 22/* Cast *PAGE_MASK to a signed type so that it is sign-extended if
@@ -55,6 +54,13 @@
55 54
56#ifndef __ASSEMBLY__ 55#ifndef __ASSEMBLY__
57 56
57#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
58extern phys_addr_t physical_mask;
59#define __PHYSICAL_MASK physical_mask
60#else
61#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
62#endif
63
58extern int devmem_is_allowed(unsigned long pagenr); 64extern int devmem_is_allowed(unsigned long pagenr);
59 65
60extern unsigned long max_low_pfn_mapped; 66extern unsigned long max_low_pfn_mapped;
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c5385f9a88f..0fdcd21dadbd 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
216} 216}
217#endif 217#endif
218 218
219static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) 219static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
220{ 220{
221 pgd_t pgd; 221 pgd_t pgd;
222 222
@@ -230,7 +230,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
230 *p4dp = native_make_p4d(native_pgd_val(pgd)); 230 *p4dp = native_make_p4d(native_pgd_val(pgd));
231} 231}
232 232
233static inline void native_p4d_clear(p4d_t *p4d) 233static __always_inline void native_p4d_clear(p4d_t *p4d)
234{ 234{
235 native_set_p4d(p4d, native_make_p4d(0)); 235 native_set_p4d(p4d, native_make_p4d(0));
236} 236}
diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h
index 22647a642e98..0af81b590a0c 100644
--- a/arch/x86/include/asm/trace/irq_vectors.h
+++ b/arch/x86/include/asm/trace/irq_vectors.h
@@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc,
236 TP_PROTO(unsigned int irq, unsigned int vector, bool reserved, 236 TP_PROTO(unsigned int irq, unsigned int vector, bool reserved,
237 int ret), 237 int ret),
238 238
239 TP_ARGS(irq, vector, ret, reserved), 239 TP_ARGS(irq, vector, reserved, ret),
240 240
241 TP_STRUCT__entry( 241 TP_STRUCT__entry(
242 __field( unsigned int, irq ) 242 __field( unsigned int, irq )
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 2d27236c16a3..b85a7c54c6a1 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -301,5 +301,6 @@ extern struct x86_apic_ops x86_apic_ops;
301extern void x86_early_init_platform_quirks(void); 301extern void x86_early_init_platform_quirks(void);
302extern void x86_init_noop(void); 302extern void x86_init_noop(void);
303extern void x86_init_uint_noop(unsigned int unused); 303extern void x86_init_uint_noop(unsigned int unused);
304extern bool x86_pnpbios_disabled(void);
304 305
305#endif 306#endif
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 7553819c74c3..3982f79d2377 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1851,7 +1851,7 @@ static void ioapic_ir_ack_level(struct irq_data *irq_data)
1851 * intr-remapping table entry. Hence for the io-apic 1851 * intr-remapping table entry. Hence for the io-apic
1852 * EOI we use the pin number. 1852 * EOI we use the pin number.
1853 */ 1853 */
1854 ack_APIC_irq(); 1854 apic_ack_irq(irq_data);
1855 eoi_ioapic_pin(data->entry.vector, data); 1855 eoi_ioapic_pin(data->entry.vector, data);
1856} 1856}
1857 1857
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index bb6f7a2148d7..35aaee4fc028 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -235,6 +235,15 @@ static int allocate_vector(struct irq_data *irqd, const struct cpumask *dest)
235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest)) 235 if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, dest))
236 return 0; 236 return 0;
237 237
238 /*
239 * Careful here. @apicd might either have move_in_progress set or
240 * be enqueued for cleanup. Assigning a new vector would either
241 * leave a stale vector on some CPU around or in case of a pending
242 * cleanup corrupt the hlist.
243 */
244 if (apicd->move_in_progress || !hlist_unhashed(&apicd->clist))
245 return -EBUSY;
246
238 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu); 247 vector = irq_matrix_alloc(vector_matrix, dest, resvd, &cpu);
239 if (vector > 0) 248 if (vector > 0)
240 apic_update_vector(irqd, vector, cpu); 249 apic_update_vector(irqd, vector, cpu);
@@ -579,8 +588,7 @@ error:
579static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, 588static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
580 struct irq_data *irqd, int ind) 589 struct irq_data *irqd, int ind)
581{ 590{
582 unsigned int cpu, vector, prev_cpu, prev_vector; 591 struct apic_chip_data apicd;
583 struct apic_chip_data *apicd;
584 unsigned long flags; 592 unsigned long flags;
585 int irq; 593 int irq;
586 594
@@ -596,24 +604,26 @@ static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d,
596 return; 604 return;
597 } 605 }
598 606
599 apicd = irqd->chip_data; 607 if (!irqd->chip_data) {
600 if (!apicd) {
601 seq_printf(m, "%*sVector: Not assigned\n", ind, ""); 608 seq_printf(m, "%*sVector: Not assigned\n", ind, "");
602 return; 609 return;
603 } 610 }
604 611
605 raw_spin_lock_irqsave(&vector_lock, flags); 612 raw_spin_lock_irqsave(&vector_lock, flags);
606 cpu = apicd->cpu; 613 memcpy(&apicd, irqd->chip_data, sizeof(apicd));
607 vector = apicd->vector;
608 prev_cpu = apicd->prev_cpu;
609 prev_vector = apicd->prev_vector;
610 raw_spin_unlock_irqrestore(&vector_lock, flags); 614 raw_spin_unlock_irqrestore(&vector_lock, flags);
611 seq_printf(m, "%*sVector: %5u\n", ind, "", vector); 615
612 seq_printf(m, "%*sTarget: %5u\n", ind, "", cpu); 616 seq_printf(m, "%*sVector: %5u\n", ind, "", apicd.vector);
613 if (prev_vector) { 617 seq_printf(m, "%*sTarget: %5u\n", ind, "", apicd.cpu);
614 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", prev_vector); 618 if (apicd.prev_vector) {
615 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", prev_cpu); 619 seq_printf(m, "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector);
620 seq_printf(m, "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu);
616 } 621 }
622 seq_printf(m, "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0);
623 seq_printf(m, "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0);
624 seq_printf(m, "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0);
625 seq_printf(m, "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0);
626 seq_printf(m, "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(&apicd.clist));
617} 627}
618#endif 628#endif
619 629
@@ -800,13 +810,18 @@ static int apic_retrigger_irq(struct irq_data *irqd)
800 return 1; 810 return 1;
801} 811}
802 812
803void apic_ack_edge(struct irq_data *irqd) 813void apic_ack_irq(struct irq_data *irqd)
804{ 814{
805 irq_complete_move(irqd_cfg(irqd));
806 irq_move_irq(irqd); 815 irq_move_irq(irqd);
807 ack_APIC_irq(); 816 ack_APIC_irq();
808} 817}
809 818
819void apic_ack_edge(struct irq_data *irqd)
820{
821 irq_complete_move(irqd_cfg(irqd));
822 apic_ack_irq(irqd);
823}
824
810static struct irq_chip lapic_controller = { 825static struct irq_chip lapic_controller = {
811 .name = "APIC", 826 .name = "APIC",
812 .irq_ack = apic_ack_edge, 827 .irq_ack = apic_ack_edge,
diff --git a/arch/x86/kernel/cpu/intel_rdt.c b/arch/x86/kernel/cpu/intel_rdt.c
index 24bfa63e86cf..ec4754f81cbd 100644
--- a/arch/x86/kernel/cpu/intel_rdt.c
+++ b/arch/x86/kernel/cpu/intel_rdt.c
@@ -845,6 +845,8 @@ static __init void rdt_quirks(void)
845 case INTEL_FAM6_SKYLAKE_X: 845 case INTEL_FAM6_SKYLAKE_X:
846 if (boot_cpu_data.x86_stepping <= 4) 846 if (boot_cpu_data.x86_stepping <= 4)
847 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat"); 847 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
848 else
849 set_rdt_options("!l3cat");
848 } 850 }
849} 851}
850 852
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 77e201301528..08286269fd24 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -70,7 +70,7 @@ static DEFINE_MUTEX(microcode_mutex);
70/* 70/*
71 * Serialize late loading so that CPUs get updated one-by-one. 71 * Serialize late loading so that CPUs get updated one-by-one.
72 */ 72 */
73static DEFINE_SPINLOCK(update_lock); 73static DEFINE_RAW_SPINLOCK(update_lock);
74 74
75struct ucode_cpu_info ucode_cpu_info[NR_CPUS]; 75struct ucode_cpu_info ucode_cpu_info[NR_CPUS];
76 76
@@ -560,9 +560,9 @@ static int __reload_late(void *info)
560 if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC)) 560 if (__wait_for_cpus(&late_cpus_in, NSEC_PER_SEC))
561 return -1; 561 return -1;
562 562
563 spin_lock(&update_lock); 563 raw_spin_lock(&update_lock);
564 apply_microcode_local(&err); 564 apply_microcode_local(&err);
565 spin_unlock(&update_lock); 565 raw_spin_unlock(&update_lock);
566 566
567 /* siblings return UCODE_OK because their engine got updated already */ 567 /* siblings return UCODE_OK because their engine got updated already */
568 if (err > UCODE_NFOUND) { 568 if (err > UCODE_NFOUND) {
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 558444b23923..c610f47373e4 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -106,17 +106,9 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
106 106
107 memset(line, 0, LINE_SIZE); 107 memset(line, 0, LINE_SIZE);
108 108
109 length = len; 109 length = strncpy_from_user(line, buf, LINE_SIZE - 1);
110 length--;
111
112 if (length > LINE_SIZE - 1)
113 length = LINE_SIZE - 1;
114
115 if (length < 0) 110 if (length < 0)
116 return -EINVAL; 111 return length;
117
118 if (copy_from_user(line, buf, length))
119 return -EFAULT;
120 112
121 linelen = strlen(line); 113 linelen = strlen(line);
122 ptr = line + linelen - 1; 114 ptr = line + linelen - 1;
@@ -149,17 +141,16 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
149 return -EINVAL; 141 return -EINVAL;
150 ptr = skip_spaces(ptr + 5); 142 ptr = skip_spaces(ptr + 5);
151 143
152 for (i = 0; i < MTRR_NUM_TYPES; ++i) { 144 i = match_string(mtrr_strings, MTRR_NUM_TYPES, ptr);
153 if (strcmp(ptr, mtrr_strings[i])) 145 if (i < 0)
154 continue; 146 return i;
155 base >>= PAGE_SHIFT; 147
156 size >>= PAGE_SHIFT; 148 base >>= PAGE_SHIFT;
157 err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true); 149 size >>= PAGE_SHIFT;
158 if (err < 0) 150 err = mtrr_add_page((unsigned long)base, (unsigned long)size, i, true);
159 return err; 151 if (err < 0)
160 return len; 152 return err;
161 } 153 return len;
162 return -EINVAL;
163} 154}
164 155
165static long 156static long
diff --git a/arch/x86/kernel/i8237.c b/arch/x86/kernel/i8237.c
index 8eeaa81de066..0a3e70fd00d6 100644
--- a/arch/x86/kernel/i8237.c
+++ b/arch/x86/kernel/i8237.c
@@ -9,10 +9,12 @@
9 * your option) any later version. 9 * your option) any later version.
10 */ 10 */
11 11
12#include <linux/dmi.h>
12#include <linux/init.h> 13#include <linux/init.h>
13#include <linux/syscore_ops.h> 14#include <linux/syscore_ops.h>
14 15
15#include <asm/dma.h> 16#include <asm/dma.h>
17#include <asm/x86_init.h>
16 18
17/* 19/*
18 * This module just handles suspend/resume issues with the 20 * This module just handles suspend/resume issues with the
@@ -49,6 +51,29 @@ static struct syscore_ops i8237_syscore_ops = {
49 51
50static int __init i8237A_init_ops(void) 52static int __init i8237A_init_ops(void)
51{ 53{
54 /*
55 * From SKL PCH onwards, the legacy DMA device is removed in which the
56 * I/O ports (81h-83h, 87h, 89h-8Bh, 8Fh) related to it are removed
57 * as well. All removed ports must return 0xff for a inb() request.
58 *
59 * Note: DMA_PAGE_2 (port 0x81) should not be checked for detecting
60 * the presence of DMA device since it may be used by BIOS to decode
61 * LPC traffic for POST codes. Original LPC only decodes one byte of
62 * port 0x80 but some BIOS may choose to enhance PCH LPC port 0x8x
63 * decoding.
64 */
65 if (dma_inb(DMA_PAGE_0) == 0xFF)
66 return -ENODEV;
67
68 /*
69 * It is not required to load this driver as newer SoC may not
70 * support 8237 DMA or bus mastering from LPC. Platform firmware
71 * must announce the support for such legacy devices via
72 * ACPI_FADT_LEGACY_DEVICES field in FADT table.
73 */
74 if (x86_pnpbios_disabled() && dmi_get_bios_year() >= 2017)
75 return -ENODEV;
76
52 register_syscore_ops(&i8237_syscore_ops); 77 register_syscore_ops(&i8237_syscore_ops);
53 return 0; 78 return 0;
54} 79}
diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c
index 2c3a1b4294eb..74383a3780dc 100644
--- a/arch/x86/kernel/idt.c
+++ b/arch/x86/kernel/idt.c
@@ -317,15 +317,12 @@ void __init idt_setup_apic_and_irq_gates(void)
317 set_intr_gate(i, entry); 317 set_intr_gate(i, entry);
318 } 318 }
319 319
320 for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
321#ifdef CONFIG_X86_LOCAL_APIC 320#ifdef CONFIG_X86_LOCAL_APIC
321 for_each_clear_bit_from(i, system_vectors, NR_VECTORS) {
322 set_bit(i, system_vectors); 322 set_bit(i, system_vectors);
323 set_intr_gate(i, spurious_interrupt); 323 set_intr_gate(i, spurious_interrupt);
324#else
325 entry = irq_entries_start + 8 * (i - FIRST_EXTERNAL_VECTOR);
326 set_intr_gate(i, entry);
327#endif
328 } 324 }
325#endif
329} 326}
330 327
331/** 328/**
diff --git a/arch/x86/kernel/platform-quirks.c b/arch/x86/kernel/platform-quirks.c
index 235fe6008ac8..b348a672f71d 100644
--- a/arch/x86/kernel/platform-quirks.c
+++ b/arch/x86/kernel/platform-quirks.c
@@ -33,9 +33,14 @@ void __init x86_early_init_platform_quirks(void)
33 x86_platform.set_legacy_features(); 33 x86_platform.set_legacy_features();
34} 34}
35 35
36bool __init x86_pnpbios_disabled(void)
37{
38 return x86_platform.legacy.devices.pnpbios == 0;
39}
40
36#if defined(CONFIG_PNPBIOS) 41#if defined(CONFIG_PNPBIOS)
37bool __init arch_pnpbios_disabled(void) 42bool __init arch_pnpbios_disabled(void)
38{ 43{
39 return x86_platform.legacy.devices.pnpbios == 0; 44 return x86_pnpbios_disabled();
40} 45}
41#endif 46#endif
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index 1b2197d13832..7ae36868aed2 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -527,6 +527,7 @@ void __init sme_enable(struct boot_params *bp)
527 /* SEV state cannot be controlled by a command line option */ 527 /* SEV state cannot be controlled by a command line option */
528 sme_me_mask = me_mask; 528 sme_me_mask = me_mask;
529 sev_enabled = true; 529 sev_enabled = true;
530 physical_mask &= ~sme_me_mask;
530 return; 531 return;
531 } 532 }
532 533
@@ -561,4 +562,6 @@ void __init sme_enable(struct boot_params *bp)
561 sme_me_mask = 0; 562 sme_me_mask = 0;
562 else 563 else
563 sme_me_mask = active_by_default ? me_mask : 0; 564 sme_me_mask = active_by_default ? me_mask : 0;
565
566 physical_mask &= ~sme_me_mask;
564} 567}
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 938dbcd46b97..47b5951e592b 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -8,6 +8,11 @@
8#include <asm/fixmap.h> 8#include <asm/fixmap.h>
9#include <asm/mtrr.h> 9#include <asm/mtrr.h>
10 10
11#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
12phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
13EXPORT_SYMBOL(physical_mask);
14#endif
15
11#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) 16#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
12 17
13#ifdef CONFIG_HIGHPTE 18#ifdef CONFIG_HIGHPTE
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index b36caae0fb2f..b96d38288c60 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -615,7 +615,7 @@ static int uv2_3_wait_completion(struct bau_desc *bau_desc,
615 615
616 /* spin on the status MMR, waiting for it to go idle */ 616 /* spin on the status MMR, waiting for it to go idle */
617 while (descriptor_stat != UV2H_DESC_IDLE) { 617 while (descriptor_stat != UV2H_DESC_IDLE) {
618 if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT)) { 618 if (descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) {
619 /* 619 /*
620 * A h/w bug on the destination side may 620 * A h/w bug on the destination side may
621 * have prevented the message being marked 621 * have prevented the message being marked
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
index e4cb9f4cde8a..fc13cbbb2dce 100644
--- a/arch/x86/platform/uv/uv_irq.c
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -47,11 +47,6 @@ static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
47 47
48static void uv_noop(struct irq_data *data) { } 48static void uv_noop(struct irq_data *data) { }
49 49
50static void uv_ack_apic(struct irq_data *data)
51{
52 ack_APIC_irq();
53}
54
55static int 50static int
56uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask, 51uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
57 bool force) 52 bool force)
@@ -73,7 +68,7 @@ static struct irq_chip uv_irq_chip = {
73 .name = "UV-CORE", 68 .name = "UV-CORE",
74 .irq_mask = uv_noop, 69 .irq_mask = uv_noop,
75 .irq_unmask = uv_noop, 70 .irq_unmask = uv_noop,
76 .irq_eoi = uv_ack_apic, 71 .irq_eoi = apic_ack_irq,
77 .irq_set_affinity = uv_set_irq_affinity, 72 .irq_set_affinity = uv_set_irq_affinity,
78}; 73};
79 74
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 1912e9106fbe..0cea80be2888 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -4385,7 +4385,7 @@ static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
4385 4385
4386static struct irq_chip amd_ir_chip = { 4386static struct irq_chip amd_ir_chip = {
4387 .name = "AMD-IR", 4387 .name = "AMD-IR",
4388 .irq_ack = ir_ack_apic_edge, 4388 .irq_ack = apic_ack_irq,
4389 .irq_set_affinity = amd_ir_set_affinity, 4389 .irq_set_affinity = amd_ir_set_affinity,
4390 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity, 4390 .irq_set_vcpu_affinity = amd_ir_set_vcpu_affinity,
4391 .irq_compose_msi_msg = ir_compose_msi_msg, 4391 .irq_compose_msi_msg = ir_compose_msi_msg,
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 3062a154a9fb..967450bd421a 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -1223,7 +1223,7 @@ static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
1223 1223
1224static struct irq_chip intel_ir_chip = { 1224static struct irq_chip intel_ir_chip = {
1225 .name = "INTEL-IR", 1225 .name = "INTEL-IR",
1226 .irq_ack = ir_ack_apic_edge, 1226 .irq_ack = apic_ack_irq,
1227 .irq_set_affinity = intel_ir_set_affinity, 1227 .irq_set_affinity = intel_ir_set_affinity,
1228 .irq_compose_msi_msg = intel_ir_compose_msi_msg, 1228 .irq_compose_msi_msg = intel_ir_compose_msi_msg,
1229 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity, 1229 .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 496deee3ae3a..7d0f3074d41d 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -156,11 +156,6 @@ void panic_if_irq_remap(const char *msg)
156 panic(msg); 156 panic(msg);
157} 157}
158 158
159void ir_ack_apic_edge(struct irq_data *data)
160{
161 ack_APIC_irq();
162}
163
164/** 159/**
165 * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU 160 * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
166 * device serving request @info 161 * device serving request @info
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index 039c7af7b190..0afef6e43be4 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -65,8 +65,6 @@ struct irq_remap_ops {
65extern struct irq_remap_ops intel_irq_remap_ops; 65extern struct irq_remap_ops intel_irq_remap_ops;
66extern struct irq_remap_ops amd_iommu_irq_ops; 66extern struct irq_remap_ops amd_iommu_irq_ops;
67 67
68extern void ir_ack_apic_edge(struct irq_data *data);
69
70#else /* CONFIG_IRQ_REMAP */ 68#else /* CONFIG_IRQ_REMAP */
71 69
72#define irq_remapping_enabled 0 70#define irq_remapping_enabled 0
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b2067083aa94..4bd2f34947f4 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -552,7 +552,12 @@ extern int irq_affinity_online_cpu(unsigned int cpu);
552#endif 552#endif
553 553
554#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) 554#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
555void irq_move_irq(struct irq_data *data); 555void __irq_move_irq(struct irq_data *data);
556static inline void irq_move_irq(struct irq_data *data)
557{
558 if (unlikely(irqd_is_setaffinity_pending(data)))
559 __irq_move_irq(data);
560}
556void irq_move_masked_irq(struct irq_data *data); 561void irq_move_masked_irq(struct irq_data *data);
557void irq_force_complete_move(struct irq_desc *desc); 562void irq_force_complete_move(struct irq_desc *desc);
558#else 563#else
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 4c2ef8084e32..daeabd791d58 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -205,6 +205,39 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
205 return ret; 205 return ret;
206} 206}
207 207
208#ifdef CONFIG_GENERIC_PENDING_IRQ
209static inline int irq_set_affinity_pending(struct irq_data *data,
210 const struct cpumask *dest)
211{
212 struct irq_desc *desc = irq_data_to_desc(data);
213
214 irqd_set_move_pending(data);
215 irq_copy_pending(desc, dest);
216 return 0;
217}
218#else
219static inline int irq_set_affinity_pending(struct irq_data *data,
220 const struct cpumask *dest)
221{
222 return -EBUSY;
223}
224#endif
225
226static int irq_try_set_affinity(struct irq_data *data,
227 const struct cpumask *dest, bool force)
228{
229 int ret = irq_do_set_affinity(data, dest, force);
230
231 /*
232 * In case that the underlying vector management is busy and the
233 * architecture supports the generic pending mechanism then utilize
234 * this to avoid returning an error to user space.
235 */
236 if (ret == -EBUSY && !force)
237 ret = irq_set_affinity_pending(data, dest);
238 return ret;
239}
240
208int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, 241int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
209 bool force) 242 bool force)
210{ 243{
@@ -215,8 +248,8 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
215 if (!chip || !chip->irq_set_affinity) 248 if (!chip || !chip->irq_set_affinity)
216 return -EINVAL; 249 return -EINVAL;
217 250
218 if (irq_can_move_pcntxt(data)) { 251 if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
219 ret = irq_do_set_affinity(data, mask, force); 252 ret = irq_try_set_affinity(data, mask, force);
220 } else { 253 } else {
221 irqd_set_move_pending(data); 254 irqd_set_move_pending(data);
222 irq_copy_pending(desc, mask); 255 irq_copy_pending(desc, mask);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 86ae0eb80b53..def48589ea48 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -38,17 +38,18 @@ bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear)
38void irq_move_masked_irq(struct irq_data *idata) 38void irq_move_masked_irq(struct irq_data *idata)
39{ 39{
40 struct irq_desc *desc = irq_data_to_desc(idata); 40 struct irq_desc *desc = irq_data_to_desc(idata);
41 struct irq_chip *chip = desc->irq_data.chip; 41 struct irq_data *data = &desc->irq_data;
42 struct irq_chip *chip = data->chip;
42 43
43 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) 44 if (likely(!irqd_is_setaffinity_pending(data)))
44 return; 45 return;
45 46
46 irqd_clr_move_pending(&desc->irq_data); 47 irqd_clr_move_pending(data);
47 48
48 /* 49 /*
49 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway. 50 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
50 */ 51 */
51 if (irqd_is_per_cpu(&desc->irq_data)) { 52 if (irqd_is_per_cpu(data)) {
52 WARN_ON(1); 53 WARN_ON(1);
53 return; 54 return;
54 } 55 }
@@ -73,13 +74,24 @@ void irq_move_masked_irq(struct irq_data *idata)
73 * For correct operation this depends on the caller 74 * For correct operation this depends on the caller
74 * masking the irqs. 75 * masking the irqs.
75 */ 76 */
76 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) 77 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) {
77 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); 78 int ret;
78 79
80 ret = irq_do_set_affinity(data, desc->pending_mask, false);
81 /*
82 * If the there is a cleanup pending in the underlying
83 * vector management, reschedule the move for the next
84 * interrupt. Leave desc->pending_mask intact.
85 */
86 if (ret == -EBUSY) {
87 irqd_set_move_pending(data);
88 return;
89 }
90 }
79 cpumask_clear(desc->pending_mask); 91 cpumask_clear(desc->pending_mask);
80} 92}
81 93
82void irq_move_irq(struct irq_data *idata) 94void __irq_move_irq(struct irq_data *idata)
83{ 95{
84 bool masked; 96 bool masked;
85 97
@@ -90,9 +102,6 @@ void irq_move_irq(struct irq_data *idata)
90 */ 102 */
91 idata = irq_desc_get_irq_data(irq_data_to_desc(idata)); 103 idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
92 104
93 if (likely(!irqd_is_setaffinity_pending(idata)))
94 return;
95
96 if (unlikely(irqd_irq_disabled(idata))) 105 if (unlikely(irqd_irq_disabled(idata)))
97 return; 106 return;
98 107