aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-06-16 13:28:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2019-06-16 13:28:14 -0400
commit963172d9c7e862654d3d24cbcafb33f33ae697a8 (patch)
treeae01933868d7ab7021818151a98503b2279aee21
parentefba92d58fa37d714d665deddb5cc6458b39bb88 (diff)
parent78f4e932f7760d965fb1569025d1576ab77557c5 (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "The accumulated fixes from this and last week: - Fix vmalloc TLB flush and map range calculations which lead to stale TLBs, spurious faults and other hard to diagnose issues. - Use fault_in_pages_writable() for prefaulting the user stack in the FPU code as it's less fragile than the current solution - Use the PF_KTHREAD flag when checking for a kernel thread instead of current->mm as the latter can give the wrong answer due to use_mm() - Compute the vmemmap size correctly for KASLR and 5-Level paging. Otherwise this can end up with a way too small vmemmap area. - Make KASAN and 5-level paging work again by making sure that all invalid bits are masked out when computing the P4D offset. This worked before but got broken recently when the LDT remap area was moved. - Prevent a NULL pointer dereference in the resource control code which can be triggered with certain mount options when the requested resource is not available. - Enforce ordering of microcode loading vs. perf initialization on secondary CPUs. Otherwise perf tries to access a non-existing MSR as the boot CPU marked it as available. - Don't stop the resource control group walk early otherwise the control bitmaps are not updated correctly and become inconsistent. - Unbreak kgdb by returning 0 on success from kgdb_arch_set_breakpoint() instead of an error code. - Add more Icelake CPU model defines so depending changes can be queued in other trees" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/microcode, cpuhotplug: Add a microcode loader CPU hotplug callback x86/kasan: Fix boot with 5-level paging and KASAN x86/fpu: Don't use current->mm to check for a kthread x86/kgdb: Return 0 from kgdb_arch_set_breakpoint() x86/resctrl: Prevent NULL pointer dereference when local MBM is disabled x86/resctrl: Don't stop walking closids when a locksetup group is found x86/fpu: Update kernel's FPU state before using for the fsave header x86/mm/KASLR: Compute the size of the vmemmap section properly x86/fpu: Use fault_in_pages_writeable() for pre-faulting x86/CPU: Add more Icelake model numbers mm/vmalloc: Avoid rare case of flushing TLB with weird arguments mm/vmalloc: Fix calculation of direct map addr range
-rw-r--r--arch/x86/include/asm/fpu/internal.h6
-rw-r--r--arch/x86/include/asm/intel-family.h3
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c2
-rw-r--r--arch/x86/kernel/cpu/resctrl/monitor.c3
-rw-r--r--arch/x86/kernel/cpu/resctrl/rdtgroup.c7
-rw-r--r--arch/x86/kernel/fpu/core.c2
-rw-r--r--arch/x86/kernel/fpu/signal.c16
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/mm/kasan_init_64.c2
-rw-r--r--arch/x86/mm/kaslr.c11
-rw-r--r--include/linux/cpuhotplug.h1
-rw-r--r--mm/vmalloc.c14
12 files changed, 45 insertions, 24 deletions
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 9e27fa05a7ae..4c95c365058a 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
536 struct fpu *fpu = &current->thread.fpu; 536 struct fpu *fpu = &current->thread.fpu;
537 int cpu = smp_processor_id(); 537 int cpu = smp_processor_id();
538 538
539 if (WARN_ON_ONCE(current->mm == NULL)) 539 if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
540 return; 540 return;
541 541
542 if (!fpregs_state_valid(fpu, cpu)) { 542 if (!fpregs_state_valid(fpu, cpu)) {
@@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
567 * otherwise. 567 * otherwise.
568 * 568 *
569 * The FPU context is only stored/restored for a user task and 569 * The FPU context is only stored/restored for a user task and
570 * ->mm is used to distinguish between kernel and user threads. 570 * PF_KTHREAD is used to distinguish between kernel and user threads.
571 */ 571 */
572static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu) 572static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
573{ 573{
574 if (static_cpu_has(X86_FEATURE_FPU) && current->mm) { 574 if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
575 if (!copy_fpregs_to_fpstate(old_fpu)) 575 if (!copy_fpregs_to_fpstate(old_fpu))
576 old_fpu->last_cpu = -1; 576 old_fpu->last_cpu = -1;
577 else 577 else
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 9f15384c504a..310118805f57 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -52,6 +52,9 @@
52 52
53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66 53#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
54 54
55#define INTEL_FAM6_ICELAKE_X 0x6A
56#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
57#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
55#define INTEL_FAM6_ICELAKE_MOBILE 0x7E 58#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
56 59
57/* "Small Core" Processors (Atom) */ 60/* "Small Core" Processors (Atom) */
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index 70a04436380e..a813987b5552 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -872,7 +872,7 @@ int __init microcode_init(void)
872 goto out_ucode_group; 872 goto out_ucode_group;
873 873
874 register_syscore_ops(&mc_syscore_ops); 874 register_syscore_ops(&mc_syscore_ops);
875 cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online", 875 cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
876 mc_cpu_online, mc_cpu_down_prep); 876 mc_cpu_online, mc_cpu_down_prep);
877 877
878 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION); 878 pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
diff --git a/arch/x86/kernel/cpu/resctrl/monitor.c b/arch/x86/kernel/cpu/resctrl/monitor.c
index 7ee93125a211..397206f23d14 100644
--- a/arch/x86/kernel/cpu/resctrl/monitor.c
+++ b/arch/x86/kernel/cpu/resctrl/monitor.c
@@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
360 struct list_head *head; 360 struct list_head *head;
361 struct rdtgroup *entry; 361 struct rdtgroup *entry;
362 362
363 if (!is_mbm_local_enabled())
364 return;
365
363 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA]; 366 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
364 closid = rgrp->closid; 367 closid = rgrp->closid;
365 rmid = rgrp->mon.rmid; 368 rmid = rgrp->mon.rmid;
diff --git a/arch/x86/kernel/cpu/resctrl/rdtgroup.c b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
index 2f48f208f7e2..2131b8bbaad7 100644
--- a/arch/x86/kernel/cpu/resctrl/rdtgroup.c
+++ b/arch/x86/kernel/cpu/resctrl/rdtgroup.c
@@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
2534 if (closid_allocated(i) && i != closid) { 2534 if (closid_allocated(i) && i != closid) {
2535 mode = rdtgroup_mode_by_closid(i); 2535 mode = rdtgroup_mode_by_closid(i);
2536 if (mode == RDT_MODE_PSEUDO_LOCKSETUP) 2536 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2537 break; 2537 /*
2538 * ctrl values for locksetup aren't relevant
2539 * until the schemata is written, and the mode
2540 * becomes RDT_MODE_PSEUDO_LOCKED.
2541 */
2542 continue;
2538 /* 2543 /*
2539 * If CDP is active include peer domain's 2544 * If CDP is active include peer domain's
2540 * usage to ensure there is no overlap 2545 * usage to ensure there is no overlap
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 466fca686fb9..649fbc3fcf9f 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
102 102
103 kernel_fpu_disable(); 103 kernel_fpu_disable();
104 104
105 if (current->mm) { 105 if (!(current->flags & PF_KTHREAD)) {
106 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) { 106 if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
107 set_thread_flag(TIF_NEED_FPU_LOAD); 107 set_thread_flag(TIF_NEED_FPU_LOAD);
108 /* 108 /*
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 5a8d118bc423..0071b794ed19 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -5,6 +5,7 @@
5 5
6#include <linux/compat.h> 6#include <linux/compat.h>
7#include <linux/cpu.h> 7#include <linux/cpu.h>
8#include <linux/pagemap.h>
8 9
9#include <asm/fpu/internal.h> 10#include <asm/fpu/internal.h>
10#include <asm/fpu/signal.h> 11#include <asm/fpu/signal.h>
@@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
61 struct user_i387_ia32_struct env; 62 struct user_i387_ia32_struct env;
62 struct _fpstate_32 __user *fp = buf; 63 struct _fpstate_32 __user *fp = buf;
63 64
65 fpregs_lock();
66 if (!test_thread_flag(TIF_NEED_FPU_LOAD))
67 copy_fxregs_to_kernel(&tsk->thread.fpu);
68 fpregs_unlock();
69
64 convert_from_fxsr(&env, tsk); 70 convert_from_fxsr(&env, tsk);
65 71
66 if (__copy_to_user(buf, &env, sizeof(env)) || 72 if (__copy_to_user(buf, &env, sizeof(env)) ||
@@ -189,15 +195,7 @@ retry:
189 fpregs_unlock(); 195 fpregs_unlock();
190 196
191 if (ret) { 197 if (ret) {
192 int aligned_size; 198 if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
193 int nr_pages;
194
195 aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
196 nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
197
198 ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
199 NULL, FOLL_WRITE);
200 if (ret == nr_pages)
201 goto retry; 199 goto retry;
202 return -EFAULT; 200 return -EFAULT;
203 } 201 }
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index 9a8c1648fc9a..6690c5652aeb 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
758 BREAK_INSTR_SIZE); 758 BREAK_INSTR_SIZE);
759 bpt->type = BP_POKE_BREAKPOINT; 759 bpt->type = BP_POKE_BREAKPOINT;
760 760
761 return err; 761 return 0;
762} 762}
763 763
764int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) 764int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index 8dc0fc0b1382..296da58f3013 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
199 if (!pgtable_l5_enabled()) 199 if (!pgtable_l5_enabled())
200 return (p4d_t *)pgd; 200 return (p4d_t *)pgd;
201 201
202 p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; 202 p4d = pgd_val(*pgd) & PTE_PFN_MASK;
203 p4d += __START_KERNEL_map - phys_base; 203 p4d += __START_KERNEL_map - phys_base;
204 return (p4d_t *)p4d + p4d_index(addr); 204 return (p4d_t *)p4d + p4d_index(addr);
205} 205}
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index dc3f058bdf9b..dc6182eecefa 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
52} kaslr_regions[] = { 52} kaslr_regions[] = {
53 { &page_offset_base, 0 }, 53 { &page_offset_base, 0 },
54 { &vmalloc_base, 0 }, 54 { &vmalloc_base, 0 },
55 { &vmemmap_base, 1 }, 55 { &vmemmap_base, 0 },
56}; 56};
57 57
58/* Get size in bytes used by the memory region */ 58/* Get size in bytes used by the memory region */
@@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
78 unsigned long rand, memory_tb; 78 unsigned long rand, memory_tb;
79 struct rnd_state rand_state; 79 struct rnd_state rand_state;
80 unsigned long remain_entropy; 80 unsigned long remain_entropy;
81 unsigned long vmemmap_size;
81 82
82 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; 83 vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
83 vaddr = vaddr_start; 84 vaddr = vaddr_start;
@@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
109 if (memory_tb < kaslr_regions[0].size_tb) 110 if (memory_tb < kaslr_regions[0].size_tb)
110 kaslr_regions[0].size_tb = memory_tb; 111 kaslr_regions[0].size_tb = memory_tb;
111 112
113 /*
114 * Calculate the vmemmap region size in TBs, aligned to a TB
115 * boundary.
116 */
117 vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
118 sizeof(struct page);
119 kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
120
112 /* Calculate entropy available between regions */ 121 /* Calculate entropy available between regions */
113 remain_entropy = vaddr_end - vaddr_start; 122 remain_entropy = vaddr_end - vaddr_start;
114 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) 123 for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 6a381594608c..5c6062206760 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -101,6 +101,7 @@ enum cpuhp_state {
101 CPUHP_AP_IRQ_BCM2836_STARTING, 101 CPUHP_AP_IRQ_BCM2836_STARTING,
102 CPUHP_AP_IRQ_MIPS_GIC_STARTING, 102 CPUHP_AP_IRQ_MIPS_GIC_STARTING,
103 CPUHP_AP_ARM_MVEBU_COHERENCY, 103 CPUHP_AP_ARM_MVEBU_COHERENCY,
104 CPUHP_AP_MICROCODE_LOADER,
104 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, 105 CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
105 CPUHP_AP_PERF_X86_STARTING, 106 CPUHP_AP_PERF_X86_STARTING,
106 CPUHP_AP_PERF_X86_AMD_IBS_STARTING, 107 CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 7350a124524b..4c9e150e5ad3 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2123,9 +2123,9 @@ static inline void set_area_direct_map(const struct vm_struct *area,
2123/* Handle removing and resetting vm mappings related to the vm_struct. */ 2123/* Handle removing and resetting vm mappings related to the vm_struct. */
2124static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) 2124static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2125{ 2125{
2126 unsigned long addr = (unsigned long)area->addr;
2127 unsigned long start = ULONG_MAX, end = 0; 2126 unsigned long start = ULONG_MAX, end = 0;
2128 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS; 2127 int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2128 int flush_dmap = 0;
2129 int i; 2129 int i;
2130 2130
2131 /* 2131 /*
@@ -2135,8 +2135,8 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2135 * execute permissions, without leaving a RW+X window. 2135 * execute permissions, without leaving a RW+X window.
2136 */ 2136 */
2137 if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { 2137 if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
2138 set_memory_nx(addr, area->nr_pages); 2138 set_memory_nx((unsigned long)area->addr, area->nr_pages);
2139 set_memory_rw(addr, area->nr_pages); 2139 set_memory_rw((unsigned long)area->addr, area->nr_pages);
2140 } 2140 }
2141 2141
2142 remove_vm_area(area->addr); 2142 remove_vm_area(area->addr);
@@ -2160,9 +2160,11 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2160 * the vm_unmap_aliases() flush includes the direct map. 2160 * the vm_unmap_aliases() flush includes the direct map.
2161 */ 2161 */
2162 for (i = 0; i < area->nr_pages; i++) { 2162 for (i = 0; i < area->nr_pages; i++) {
2163 if (page_address(area->pages[i])) { 2163 unsigned long addr = (unsigned long)page_address(area->pages[i]);
2164 if (addr) {
2164 start = min(addr, start); 2165 start = min(addr, start);
2165 end = max(addr, end); 2166 end = max(addr + PAGE_SIZE, end);
2167 flush_dmap = 1;
2166 } 2168 }
2167 } 2169 }
2168 2170
@@ -2172,7 +2174,7 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2172 * reset the direct map permissions to the default. 2174 * reset the direct map permissions to the default.
2173 */ 2175 */
2174 set_area_direct_map(area, set_direct_map_invalid_noflush); 2176 set_area_direct_map(area, set_direct_map_invalid_noflush);
2175 _vm_unmap_aliases(start, end, 1); 2177 _vm_unmap_aliases(start, end, flush_dmap);
2176 set_area_direct_map(area, set_direct_map_default_noflush); 2178 set_area_direct_map(area, set_direct_map_default_noflush);
2177} 2179}
2178 2180