diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 22:45:29 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-26 22:45:29 -0500 |
| commit | 2003cd90c473f66d34114bc61c49e7d74d370894 (patch) | |
| tree | df966071c6721c30cb60fb94a8cfbbbea40827e9 | |
| parent | 24e55910e4801d772f95becde20b526b8b10388d (diff) | |
| parent | a8aed3e0752b4beb2e37cbed6df69faae88268da (diff) | |
Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Ingo Molnar.
* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/mm/pageattr: Prevent PSE and GLOABL leftovers to confuse pmd/pte_present and pmd_huge
Revert "x86, mm: Make spurious_fault check explicitly check explicitly check the PRESENT bit"
x86/mm/numa: Don't check if node is NUMA_NO_NODE
x86, efi: Make "noefi" really disable EFI runtime serivces
x86/apic: Fix parsing of the 'lapic' cmdline option
| -rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
| -rw-r--r-- | arch/x86/mm/fault.c | 8 | ||||
| -rw-r--r-- | arch/x86/mm/numa.c | 3 | ||||
| -rw-r--r-- | arch/x86/mm/pageattr.c | 50 | ||||
| -rw-r--r-- | arch/x86/platform/efi/efi.c | 5 |
5 files changed, 53 insertions, 15 deletions
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index a5b4dce1b7ac..904611bf0e5a 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
| @@ -131,7 +131,7 @@ static int __init parse_lapic(char *arg) | |||
| 131 | { | 131 | { |
| 132 | if (config_enabled(CONFIG_X86_32) && !arg) | 132 | if (config_enabled(CONFIG_X86_32) && !arg) |
| 133 | force_enable_local_apic = 1; | 133 | force_enable_local_apic = 1; |
| 134 | else if (!strncmp(arg, "notscdeadline", 13)) | 134 | else if (arg && !strncmp(arg, "notscdeadline", 13)) |
| 135 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); | 135 | setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER); |
| 136 | return 0; | 136 | return 0; |
| 137 | } | 137 | } |
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index fb674fd3fc22..2b97525246d4 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
| @@ -939,14 +939,8 @@ spurious_fault(unsigned long error_code, unsigned long address) | |||
| 939 | if (pmd_large(*pmd)) | 939 | if (pmd_large(*pmd)) |
| 940 | return spurious_fault_check(error_code, (pte_t *) pmd); | 940 | return spurious_fault_check(error_code, (pte_t *) pmd); |
| 941 | 941 | ||
| 942 | /* | ||
| 943 | * Note: don't use pte_present() here, since it returns true | ||
| 944 | * if the _PAGE_PROTNONE bit is set. However, this aliases the | ||
| 945 | * _PAGE_GLOBAL bit, which for kernel pages give false positives | ||
| 946 | * when CONFIG_DEBUG_PAGEALLOC is used. | ||
| 947 | */ | ||
| 948 | pte = pte_offset_kernel(pmd, address); | 942 | pte = pte_offset_kernel(pmd, address); |
| 949 | if (!(pte_flags(*pte) & _PAGE_PRESENT)) | 943 | if (!pte_present(*pte)) |
| 950 | return 0; | 944 | return 0; |
| 951 | 945 | ||
| 952 | ret = spurious_fault_check(error_code, pte); | 946 | ret = spurious_fault_check(error_code, pte); |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index dfd30259eb89..ff3633c794c6 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
| @@ -97,8 +97,7 @@ void numa_set_node(int cpu, int node) | |||
| 97 | #endif | 97 | #endif |
| 98 | per_cpu(x86_cpu_to_node_map, cpu) = node; | 98 | per_cpu(x86_cpu_to_node_map, cpu) = node; |
| 99 | 99 | ||
| 100 | if (node != NUMA_NO_NODE) | 100 | set_cpu_numa_node(cpu, node); |
| 101 | set_cpu_numa_node(cpu, node); | ||
| 102 | } | 101 | } |
| 103 | 102 | ||
| 104 | void numa_clear_node(int cpu) | 103 | void numa_clear_node(int cpu) |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index ca1f1c2bb7be..091934e1d0d9 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
| @@ -473,6 +473,19 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
| 473 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | 473 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); |
| 474 | 474 | ||
| 475 | /* | 475 | /* |
| 476 | * Set the PSE and GLOBAL flags only if the PRESENT flag is | ||
| 477 | * set otherwise pmd_present/pmd_huge will return true even on | ||
| 478 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL | ||
| 479 | * for the ancient hardware that doesn't support it. | ||
| 480 | */ | ||
| 481 | if (pgprot_val(new_prot) & _PAGE_PRESENT) | ||
| 482 | pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL; | ||
| 483 | else | ||
| 484 | pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); | ||
| 485 | |||
| 486 | new_prot = canon_pgprot(new_prot); | ||
| 487 | |||
| 488 | /* | ||
| 476 | * old_pte points to the large page base address. So we need | 489 | * old_pte points to the large page base address. So we need |
| 477 | * to add the offset of the virtual address: | 490 | * to add the offset of the virtual address: |
| 478 | */ | 491 | */ |
| @@ -517,7 +530,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
| 517 | * The address is aligned and the number of pages | 530 | * The address is aligned and the number of pages |
| 518 | * covers the full page. | 531 | * covers the full page. |
| 519 | */ | 532 | */ |
| 520 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | 533 | new_pte = pfn_pte(pte_pfn(old_pte), new_prot); |
| 521 | __set_pmd_pte(kpte, address, new_pte); | 534 | __set_pmd_pte(kpte, address, new_pte); |
| 522 | cpa->flags |= CPA_FLUSHTLB; | 535 | cpa->flags |= CPA_FLUSHTLB; |
| 523 | do_split = 0; | 536 | do_split = 0; |
| @@ -561,16 +574,35 @@ int __split_large_page(pte_t *kpte, unsigned long address, pte_t *pbase) | |||
| 561 | #ifdef CONFIG_X86_64 | 574 | #ifdef CONFIG_X86_64 |
| 562 | if (level == PG_LEVEL_1G) { | 575 | if (level == PG_LEVEL_1G) { |
| 563 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | 576 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; |
| 564 | pgprot_val(ref_prot) |= _PAGE_PSE; | 577 | /* |
| 578 | * Set the PSE flags only if the PRESENT flag is set | ||
| 579 | * otherwise pmd_present/pmd_huge will return true | ||
| 580 | * even on a non present pmd. | ||
| 581 | */ | ||
| 582 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | ||
| 583 | pgprot_val(ref_prot) |= _PAGE_PSE; | ||
| 584 | else | ||
| 585 | pgprot_val(ref_prot) &= ~_PAGE_PSE; | ||
| 565 | } | 586 | } |
| 566 | #endif | 587 | #endif |
| 567 | 588 | ||
| 568 | /* | 589 | /* |
| 590 | * Set the GLOBAL flags only if the PRESENT flag is set | ||
| 591 | * otherwise pmd/pte_present will return true even on a non | ||
| 592 | * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL | ||
| 593 | * for the ancient hardware that doesn't support it. | ||
| 594 | */ | ||
| 595 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | ||
| 596 | pgprot_val(ref_prot) |= _PAGE_GLOBAL; | ||
| 597 | else | ||
| 598 | pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; | ||
| 599 | |||
| 600 | /* | ||
| 569 | * Get the target pfn from the original entry: | 601 | * Get the target pfn from the original entry: |
| 570 | */ | 602 | */ |
| 571 | pfn = pte_pfn(*kpte); | 603 | pfn = pte_pfn(*kpte); |
| 572 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) | 604 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
| 573 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); | 605 | set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); |
| 574 | 606 | ||
| 575 | if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), | 607 | if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), |
| 576 | PFN_DOWN(__pa(address)) + 1)) | 608 | PFN_DOWN(__pa(address)) + 1)) |
| @@ -685,6 +717,18 @@ repeat: | |||
| 685 | new_prot = static_protections(new_prot, address, pfn); | 717 | new_prot = static_protections(new_prot, address, pfn); |
| 686 | 718 | ||
| 687 | /* | 719 | /* |
| 720 | * Set the GLOBAL flags only if the PRESENT flag is | ||
| 721 | * set otherwise pte_present will return true even on | ||
| 722 | * a non present pte. The canon_pgprot will clear | ||
| 723 | * _PAGE_GLOBAL for the ancient hardware that doesn't | ||
| 724 | * support it. | ||
| 725 | */ | ||
| 726 | if (pgprot_val(new_prot) & _PAGE_PRESENT) | ||
| 727 | pgprot_val(new_prot) |= _PAGE_GLOBAL; | ||
| 728 | else | ||
| 729 | pgprot_val(new_prot) &= ~_PAGE_GLOBAL; | ||
| 730 | |||
| 731 | /* | ||
| 688 | * We need to keep the pfn from the existing PTE, | 732 | * We need to keep the pfn from the existing PTE, |
| 689 | * after all we're only going to change it's attributes | 733 | * after all we're only going to change it's attributes |
| 690 | * not the memory it points to | 734 | * not the memory it points to |
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 70b2a3a305d6..2f81db40d7ca 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c | |||
| @@ -85,9 +85,10 @@ int efi_enabled(int facility) | |||
| 85 | } | 85 | } |
| 86 | EXPORT_SYMBOL(efi_enabled); | 86 | EXPORT_SYMBOL(efi_enabled); |
| 87 | 87 | ||
| 88 | static bool disable_runtime = false; | ||
| 88 | static int __init setup_noefi(char *arg) | 89 | static int __init setup_noefi(char *arg) |
| 89 | { | 90 | { |
| 90 | clear_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); | 91 | disable_runtime = true; |
| 91 | return 0; | 92 | return 0; |
| 92 | } | 93 | } |
| 93 | early_param("noefi", setup_noefi); | 94 | early_param("noefi", setup_noefi); |
| @@ -734,7 +735,7 @@ void __init efi_init(void) | |||
| 734 | if (!efi_is_native()) | 735 | if (!efi_is_native()) |
| 735 | pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); | 736 | pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); |
| 736 | else { | 737 | else { |
| 737 | if (efi_runtime_init()) | 738 | if (disable_runtime || efi_runtime_init()) |
| 738 | return; | 739 | return; |
| 739 | set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); | 740 | set_bit(EFI_RUNTIME_SERVICES, &x86_efi_facility); |
| 740 | } | 741 | } |
