aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/fault.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-04-02 18:45:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-02 18:45:30 -0400
commitd22fff81418edc92be534cad8d59da914049bf69 (patch)
tree96b22b20bbc789a76e744bcfc11a7f0854b62ece /arch/x86/mm/fault.c
parent986b37c0ae4f0a3f93d8974d03a9cbc1502dd377 (diff)
parenteaeb8e76cd5751e805f6e4a3fcec91d283e3b0c2 (diff)
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm updates from Ingo Molnar: - Extend the memmap= boot parameter syntax to allow the redeclaration and dropping of existing ranges, and to support all e820 range types (Jan H. Schönherr) - Improve the W+X boot time security checks to remove false positive warnings on Xen (Jan Beulich) - Support booting as Xen PVH guest (Juergen Gross) - Improved 5-level paging (LA57) support, in particular it's possible now to have a single kernel image for both 4-level and 5-level hardware (Kirill A. Shutemov) - AMD hardware RAM encryption support (SME/SEV) fixes (Tom Lendacky) - Preparatory commits for hardware-encrypted RAM support on Intel CPUs. (Kirill A. Shutemov) - Improved Intel-MID support (Andy Shevchenko) - Show EFI page tables in page_tables debug files (Andy Lutomirski) - ... plus misc fixes and smaller cleanups * 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (56 commits) x86/cpu/tme: Fix spelling: "configuation" -> "configuration" x86/boot: Fix SEV boot failure from change to __PHYSICAL_MASK_SHIFT x86/mm: Update comment in detect_tme() regarding x86_phys_bits x86/mm/32: Remove unused node_memmap_size_bytes() & CONFIG_NEED_NODE_MEMMAP_SIZE logic x86/mm: Remove pointless checks in vmalloc_fault x86/platform/intel-mid: Add special handling for ACPI HW reduced platforms ACPI, x86/boot: Introduce the ->reduced_hw_early_init() ACPI callback ACPI, x86/boot: Split out acpi_generic_reduce_hw_init() and export x86/pconfig: Provide defines and helper to run MKTME_KEY_PROG leaf x86/pconfig: Detect PCONFIG targets x86/tme: Detect if TME and MKTME is activated by BIOS x86/boot/compressed/64: Handle 5-level paging boot if kernel is above 4G x86/boot/compressed/64: Use page table in trampoline memory x86/boot/compressed/64: Use stack from trampoline memory x86/boot/compressed/64: Make sure we have a 32-bit code segment x86/mm: Do not use paravirtualized calls in native_set_p4d() kdump, vmcoreinfo: Export pgtable_l5_enabled value x86/boot/compressed/64: Prepare new top-level page table for trampoline x86/boot/compressed/64: Set up trampoline memory x86/boot/compressed/64: Save and restore trampoline memory ...
Diffstat (limited to 'arch/x86/mm/fault.c')
-rw-r--r--arch/x86/mm/fault.c60
1 files changed, 19 insertions, 41 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index f75ea0748b9f..73bd8c95ac71 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -417,11 +417,11 @@ void vmalloc_sync_all(void)
417 */ 417 */
418static noinline int vmalloc_fault(unsigned long address) 418static noinline int vmalloc_fault(unsigned long address)
419{ 419{
420 pgd_t *pgd, *pgd_ref; 420 pgd_t *pgd, *pgd_k;
421 p4d_t *p4d, *p4d_ref; 421 p4d_t *p4d, *p4d_k;
422 pud_t *pud, *pud_ref; 422 pud_t *pud;
423 pmd_t *pmd, *pmd_ref; 423 pmd_t *pmd;
424 pte_t *pte, *pte_ref; 424 pte_t *pte;
425 425
426 /* Make sure we are in vmalloc area: */ 426 /* Make sure we are in vmalloc area: */
427 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 427 if (!(address >= VMALLOC_START && address < VMALLOC_END))
@@ -435,73 +435,51 @@ static noinline int vmalloc_fault(unsigned long address)
435 * case just flush: 435 * case just flush:
436 */ 436 */
437 pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); 437 pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address);
438 pgd_ref = pgd_offset_k(address); 438 pgd_k = pgd_offset_k(address);
439 if (pgd_none(*pgd_ref)) 439 if (pgd_none(*pgd_k))
440 return -1; 440 return -1;
441 441
442 if (CONFIG_PGTABLE_LEVELS > 4) { 442 if (pgtable_l5_enabled) {
443 if (pgd_none(*pgd)) { 443 if (pgd_none(*pgd)) {
444 set_pgd(pgd, *pgd_ref); 444 set_pgd(pgd, *pgd_k);
445 arch_flush_lazy_mmu_mode(); 445 arch_flush_lazy_mmu_mode();
446 } else { 446 } else {
447 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); 447 BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k));
448 } 448 }
449 } 449 }
450 450
451 /* With 4-level paging, copying happens on the p4d level. */ 451 /* With 4-level paging, copying happens on the p4d level. */
452 p4d = p4d_offset(pgd, address); 452 p4d = p4d_offset(pgd, address);
453 p4d_ref = p4d_offset(pgd_ref, address); 453 p4d_k = p4d_offset(pgd_k, address);
454 if (p4d_none(*p4d_ref)) 454 if (p4d_none(*p4d_k))
455 return -1; 455 return -1;
456 456
457 if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) { 457 if (p4d_none(*p4d) && !pgtable_l5_enabled) {
458 set_p4d(p4d, *p4d_ref); 458 set_p4d(p4d, *p4d_k);
459 arch_flush_lazy_mmu_mode(); 459 arch_flush_lazy_mmu_mode();
460 } else { 460 } else {
461 BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_ref)); 461 BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k));
462 } 462 }
463 463
464 /*
465 * Below here mismatches are bugs because these lower tables
466 * are shared:
467 */
468 BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); 464 BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4);
469 465
470 pud = pud_offset(p4d, address); 466 pud = pud_offset(p4d, address);
471 pud_ref = pud_offset(p4d_ref, address); 467 if (pud_none(*pud))
472 if (pud_none(*pud_ref))
473 return -1; 468 return -1;
474 469
475 if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
476 BUG();
477
478 if (pud_large(*pud)) 470 if (pud_large(*pud))
479 return 0; 471 return 0;
480 472
481 pmd = pmd_offset(pud, address); 473 pmd = pmd_offset(pud, address);
482 pmd_ref = pmd_offset(pud_ref, address); 474 if (pmd_none(*pmd))
483 if (pmd_none(*pmd_ref))
484 return -1; 475 return -1;
485 476
486 if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
487 BUG();
488
489 if (pmd_large(*pmd)) 477 if (pmd_large(*pmd))
490 return 0; 478 return 0;
491 479
492 pte_ref = pte_offset_kernel(pmd_ref, address);
493 if (!pte_present(*pte_ref))
494 return -1;
495
496 pte = pte_offset_kernel(pmd, address); 480 pte = pte_offset_kernel(pmd, address);
497 481 if (!pte_present(*pte))
498 /* 482 return -1;
499 * Don't use pte_page here, because the mappings can point
500 * outside mem_map, and the NUMA hash lookup cannot handle
501 * that:
502 */
503 if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
504 BUG();
505 483
506 return 0; 484 return 0;
507} 485}