aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-27 15:02:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-27 15:02:00 -0500
commit8a5f06056a25ac7dbca2b0505cc0fe8ffb6947c1 (patch)
treed864baa3498f141ab7433c878390b50d7febcb98
parent351e1aa6cb4b136e3f772605071d7a8db3c5b4e0 (diff)
parent625210cfa6c0c26ea422f655bf68288176f174e6 (diff)
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Thomas Gleixner: "A set of fixes for x86: - Fix the swapped outb() parameters in the KASLR code - Fix the PKEY handling at fork which missed to preserve the pkey state for the child. Comes with a test case to validate that. - Fix the entry stack handling for XEN PV to respect that XEN PV systems enter the function already on the current thread stack and not on the trampoline. - Fix kexec load failure caused by using a stale value when the kexec_buf structure is reused for subsequent allocations. - Fix a bogus sizeof() in the memory encryption code - Enforce PCI dependency for the Intel Low Power Subsystem - Enforce PCI_LOCKLESS_CONFIG when PCI is enabled" * 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/Kconfig: Select PCI_LOCKLESS_CONFIG if PCI is enabled x86/entry/64/compat: Fix stack switching for XEN PV x86/kexec: Fix a kexec_file_load() failure x86/mm/mem_encrypt: Fix erroneous sizeof() x86/selftests/pkeys: Fork() to check for state being preserved x86/pkeys: Properly copy pkey state at fork() x86/kaslr: Fix incorrect i8254 outb() parameters x86/intel/lpss: Make PCI dependency explicit
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/entry/entry_64_compat.S6
-rw-r--r--arch/x86/include/asm/mmu_context.h18
-rw-r--r--arch/x86/kernel/crash.c1
-rw-r--r--arch/x86/kernel/kexec-bzimage64.c2
-rw-r--r--arch/x86/lib/kaslr.c4
-rw-r--r--arch/x86/mm/mem_encrypt_identity.c4
-rw-r--r--tools/testing/selftests/x86/protection_keys.c41
8 files changed, 61 insertions, 17 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 4b4a7f32b68e..26387c7bf305 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -198,7 +198,7 @@ config X86
198 select IRQ_FORCED_THREADING 198 select IRQ_FORCED_THREADING
199 select NEED_SG_DMA_LENGTH 199 select NEED_SG_DMA_LENGTH
200 select PCI_DOMAINS if PCI 200 select PCI_DOMAINS if PCI
201 select PCI_LOCKLESS_CONFIG 201 select PCI_LOCKLESS_CONFIG if PCI
202 select PERF_EVENTS 202 select PERF_EVENTS
203 select RTC_LIB 203 select RTC_LIB
204 select RTC_MC146818_LIB 204 select RTC_MC146818_LIB
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 8eaf8952c408..39913770a44d 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -361,7 +361,8 @@ ENTRY(entry_INT80_compat)
361 361
362 /* Need to switch before accessing the thread stack. */ 362 /* Need to switch before accessing the thread stack. */
363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi 363 SWITCH_TO_KERNEL_CR3 scratch_reg=%rdi
364 movq %rsp, %rdi 364 /* In the Xen PV case we already run on the thread stack. */
365 ALTERNATIVE "movq %rsp, %rdi", "jmp .Lint80_keep_stack", X86_FEATURE_XENPV
365 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp 366 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
366 367
367 pushq 6*8(%rdi) /* regs->ss */ 368 pushq 6*8(%rdi) /* regs->ss */
@@ -370,8 +371,9 @@ ENTRY(entry_INT80_compat)
370 pushq 3*8(%rdi) /* regs->cs */ 371 pushq 3*8(%rdi) /* regs->cs */
371 pushq 2*8(%rdi) /* regs->ip */ 372 pushq 2*8(%rdi) /* regs->ip */
372 pushq 1*8(%rdi) /* regs->orig_ax */ 373 pushq 1*8(%rdi) /* regs->orig_ax */
373
374 pushq (%rdi) /* pt_regs->di */ 374 pushq (%rdi) /* pt_regs->di */
375.Lint80_keep_stack:
376
375 pushq %rsi /* pt_regs->si */ 377 pushq %rsi /* pt_regs->si */
376 xorl %esi, %esi /* nospec si */ 378 xorl %esi, %esi /* nospec si */
377 pushq %rdx /* pt_regs->dx */ 379 pushq %rdx /* pt_regs->dx */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 0ca50611e8ce..19d18fae6ec6 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -178,6 +178,10 @@ static inline void switch_ldt(struct mm_struct *prev, struct mm_struct *next)
178 178
179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk); 179void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
180 180
181/*
182 * Init a new mm. Used on mm copies, like at fork()
183 * and on mm's that are brand-new, like at execve().
184 */
181static inline int init_new_context(struct task_struct *tsk, 185static inline int init_new_context(struct task_struct *tsk,
182 struct mm_struct *mm) 186 struct mm_struct *mm)
183{ 187{
@@ -228,8 +232,22 @@ do { \
228} while (0) 232} while (0)
229#endif 233#endif
230 234
235static inline void arch_dup_pkeys(struct mm_struct *oldmm,
236 struct mm_struct *mm)
237{
238#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
239 if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
240 return;
241
242 /* Duplicate the oldmm pkey state in mm: */
243 mm->context.pkey_allocation_map = oldmm->context.pkey_allocation_map;
244 mm->context.execute_only_pkey = oldmm->context.execute_only_pkey;
245#endif
246}
247
231static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) 248static inline int arch_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm)
232{ 249{
250 arch_dup_pkeys(oldmm, mm);
233 paravirt_arch_dup_mmap(oldmm, mm); 251 paravirt_arch_dup_mmap(oldmm, mm);
234 return ldt_dup_context(oldmm, mm); 252 return ldt_dup_context(oldmm, mm);
235} 253}
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
index c8b07d8ea5a2..17ffc869cab8 100644
--- a/arch/x86/kernel/crash.c
+++ b/arch/x86/kernel/crash.c
@@ -470,6 +470,7 @@ int crash_load_segments(struct kimage *image)
470 470
471 kbuf.memsz = kbuf.bufsz; 471 kbuf.memsz = kbuf.bufsz;
472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN; 472 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
473 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
473 ret = kexec_add_buffer(&kbuf); 474 ret = kexec_add_buffer(&kbuf);
474 if (ret) { 475 if (ret) {
475 vfree((void *)image->arch.elf_headers); 476 vfree((void *)image->arch.elf_headers);
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07228dd..0d5efa34f359 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -434,6 +434,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
434 kbuf.memsz = PAGE_ALIGN(header->init_size); 434 kbuf.memsz = PAGE_ALIGN(header->init_size);
435 kbuf.buf_align = header->kernel_alignment; 435 kbuf.buf_align = header->kernel_alignment;
436 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR; 436 kbuf.buf_min = MIN_KERNEL_LOAD_ADDR;
437 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
437 ret = kexec_add_buffer(&kbuf); 438 ret = kexec_add_buffer(&kbuf);
438 if (ret) 439 if (ret)
439 goto out_free_params; 440 goto out_free_params;
@@ -448,6 +449,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel,
448 kbuf.bufsz = kbuf.memsz = initrd_len; 449 kbuf.bufsz = kbuf.memsz = initrd_len;
449 kbuf.buf_align = PAGE_SIZE; 450 kbuf.buf_align = PAGE_SIZE;
450 kbuf.buf_min = MIN_INITRD_LOAD_ADDR; 451 kbuf.buf_min = MIN_INITRD_LOAD_ADDR;
452 kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
451 ret = kexec_add_buffer(&kbuf); 453 ret = kexec_add_buffer(&kbuf);
452 if (ret) 454 if (ret)
453 goto out_free_params; 455 goto out_free_params;
diff --git a/arch/x86/lib/kaslr.c b/arch/x86/lib/kaslr.c
index 79778ab200e4..a53665116458 100644
--- a/arch/x86/lib/kaslr.c
+++ b/arch/x86/lib/kaslr.c
@@ -36,8 +36,8 @@ static inline u16 i8254(void)
36 u16 status, timer; 36 u16 status, timer;
37 37
38 do { 38 do {
39 outb(I8254_PORT_CONTROL, 39 outb(I8254_CMD_READBACK | I8254_SELECT_COUNTER0,
40 I8254_CMD_READBACK | I8254_SELECT_COUNTER0); 40 I8254_PORT_CONTROL);
41 status = inb(I8254_PORT_COUNTER0); 41 status = inb(I8254_PORT_COUNTER0);
42 timer = inb(I8254_PORT_COUNTER0); 42 timer = inb(I8254_PORT_COUNTER0);
43 timer |= inb(I8254_PORT_COUNTER0) << 8; 43 timer |= inb(I8254_PORT_COUNTER0) << 8;
diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c
index a19ef1a416ff..4aa9b1480866 100644
--- a/arch/x86/mm/mem_encrypt_identity.c
+++ b/arch/x86/mm/mem_encrypt_identity.c
@@ -158,8 +158,8 @@ static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
158 pmd = pmd_offset(pud, ppd->vaddr); 158 pmd = pmd_offset(pud, ppd->vaddr);
159 if (pmd_none(*pmd)) { 159 if (pmd_none(*pmd)) {
160 pte = ppd->pgtable_area; 160 pte = ppd->pgtable_area;
161 memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); 161 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
162 ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; 162 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); 163 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
164 } 164 }
165 165
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index 460b4bdf4c1e..5d546dcdbc80 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1133,6 +1133,21 @@ void test_pkey_syscalls_bad_args(int *ptr, u16 pkey)
1133 pkey_assert(err); 1133 pkey_assert(err);
1134} 1134}
1135 1135
1136void become_child(void)
1137{
1138 pid_t forkret;
1139
1140 forkret = fork();
1141 pkey_assert(forkret >= 0);
1142 dprintf3("[%d] fork() ret: %d\n", getpid(), forkret);
1143
1144 if (!forkret) {
1145 /* in the child */
1146 return;
1147 }
1148 exit(0);
1149}
1150
1136/* Assumes that all pkeys other than 'pkey' are unallocated */ 1151/* Assumes that all pkeys other than 'pkey' are unallocated */
1137void test_pkey_alloc_exhaust(int *ptr, u16 pkey) 1152void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1138{ 1153{
@@ -1141,7 +1156,7 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1141 int nr_allocated_pkeys = 0; 1156 int nr_allocated_pkeys = 0;
1142 int i; 1157 int i;
1143 1158
1144 for (i = 0; i < NR_PKEYS*2; i++) { 1159 for (i = 0; i < NR_PKEYS*3; i++) {
1145 int new_pkey; 1160 int new_pkey;
1146 dprintf1("%s() alloc loop: %d\n", __func__, i); 1161 dprintf1("%s() alloc loop: %d\n", __func__, i);
1147 new_pkey = alloc_pkey(); 1162 new_pkey = alloc_pkey();
@@ -1152,21 +1167,27 @@ void test_pkey_alloc_exhaust(int *ptr, u16 pkey)
1152 if ((new_pkey == -1) && (errno == ENOSPC)) { 1167 if ((new_pkey == -1) && (errno == ENOSPC)) {
1153 dprintf2("%s() failed to allocate pkey after %d tries\n", 1168 dprintf2("%s() failed to allocate pkey after %d tries\n",
1154 __func__, nr_allocated_pkeys); 1169 __func__, nr_allocated_pkeys);
1155 break; 1170 } else {
1171 /*
1172 * Ensure the number of successes never
1173 * exceeds the number of keys supported
1174 * in the hardware.
1175 */
1176 pkey_assert(nr_allocated_pkeys < NR_PKEYS);
1177 allocated_pkeys[nr_allocated_pkeys++] = new_pkey;
1156 } 1178 }
1157 pkey_assert(nr_allocated_pkeys < NR_PKEYS); 1179
1158 allocated_pkeys[nr_allocated_pkeys++] = new_pkey; 1180 /*
1181 * Make sure that allocation state is properly
1182 * preserved across fork().
1183 */
1184 if (i == NR_PKEYS*2)
1185 become_child();
1159 } 1186 }
1160 1187
1161 dprintf3("%s()::%d\n", __func__, __LINE__); 1188 dprintf3("%s()::%d\n", __func__, __LINE__);
1162 1189
1163 /* 1190 /*
1164 * ensure it did not reach the end of the loop without
1165 * failure:
1166 */
1167 pkey_assert(i < NR_PKEYS*2);
1168
1169 /*
1170 * There are 16 pkeys supported in hardware. Three are 1191 * There are 16 pkeys supported in hardware. Three are
1171 * allocated by the time we get here: 1192 * allocated by the time we get here:
1172 * 1. The default key (0) 1193 * 1. The default key (0)