diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-11 05:49:34 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-11 05:49:34 -0400 |
commit | d95c3578120e5bc4784069439f00ccb1b5f87717 (patch) | |
tree | c819de31de3983f3d69f223ede07667ff23bf7da /arch/x86/mm/pageattr.c | |
parent | ba1d755a36f66101aa88ac9ebb54694def6ec38d (diff) | |
parent | 78b020d035074fc3aa4d017353bb2c32e2aff56f (diff) |
Merge branch 'x86/core' into cpus4096
Diffstat (limited to 'arch/x86/mm/pageattr.c')
-rw-r--r-- | arch/x86/mm/pageattr.c | 88 |
1 files changed, 60 insertions, 28 deletions
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 4cf30dee8161..8253bc97587e 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -482,6 +482,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
482 | pbase = (pte_t *)page_address(base); | 482 | pbase = (pte_t *)page_address(base); |
483 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); | 483 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
484 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | 484 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
485 | /* | ||
486 | * If we ever want to utilize the PAT bit, we need to | ||
487 | * update this function to make sure it's converted from | ||
488 | * bit 12 to bit 7 when we cross from the 2MB level to | ||
489 | * the 4K level: | ||
490 | */ | ||
491 | WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); | ||
485 | 492 | ||
486 | #ifdef CONFIG_X86_64 | 493 | #ifdef CONFIG_X86_64 |
487 | if (level == PG_LEVEL_1G) { | 494 | if (level == PG_LEVEL_1G) { |
@@ -508,18 +515,13 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
508 | #endif | 515 | #endif |
509 | 516 | ||
510 | /* | 517 | /* |
511 | * Install the new, split up pagetable. Important details here: | 518 | * Install the new, split up pagetable. |
512 | * | ||
513 | * On Intel the NX bit of all levels must be cleared to make a | ||
514 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | ||
515 | * Architectures Software Developer's Manual). | ||
516 | * | 519 | * |
517 | * Mark the entry present. The current mapping might be | 520 | * We use the standard kernel pagetable protections for the new |
518 | * set to not present, which we preserved above. | 521 | * pagetable protections, the actual ptes set above control the |
522 | * primary protection behavior: | ||
519 | */ | 523 | */ |
520 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); | 524 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); |
521 | pgprot_val(ref_prot) |= _PAGE_PRESENT; | ||
522 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); | ||
523 | base = NULL; | 525 | base = NULL; |
524 | 526 | ||
525 | out_unlock: | 527 | out_unlock: |
@@ -534,6 +536,36 @@ out_unlock: | |||
534 | return 0; | 536 | return 0; |
535 | } | 537 | } |
536 | 538 | ||
539 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | ||
540 | int primary) | ||
541 | { | ||
542 | /* | ||
543 | * Ignore all non primary paths. | ||
544 | */ | ||
545 | if (!primary) | ||
546 | return 0; | ||
547 | |||
548 | /* | ||
549 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | ||
550 | * to have holes. | ||
551 | * Also set numpages to '1' indicating that we processed cpa req for | ||
552 | * one virtual address page and its pfn. TBD: numpages can be set based | ||
553 | * on the initial value and the level returned by lookup_address(). | ||
554 | */ | ||
555 | if (within(vaddr, PAGE_OFFSET, | ||
556 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | ||
557 | cpa->numpages = 1; | ||
558 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | ||
559 | return 0; | ||
560 | } else { | ||
561 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | ||
562 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | ||
563 | *cpa->vaddr); | ||
564 | |||
565 | return -EFAULT; | ||
566 | } | ||
567 | } | ||
568 | |||
537 | static int __change_page_attr(struct cpa_data *cpa, int primary) | 569 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
538 | { | 570 | { |
539 | unsigned long address; | 571 | unsigned long address; |
@@ -545,23 +577,14 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) | |||
545 | address = cpa->vaddr[cpa->curpage]; | 577 | address = cpa->vaddr[cpa->curpage]; |
546 | else | 578 | else |
547 | address = *cpa->vaddr; | 579 | address = *cpa->vaddr; |
548 | |||
549 | repeat: | 580 | repeat: |
550 | kpte = lookup_address(address, &level); | 581 | kpte = lookup_address(address, &level); |
551 | if (!kpte) | 582 | if (!kpte) |
552 | return 0; | 583 | return __cpa_process_fault(cpa, address, primary); |
553 | 584 | ||
554 | old_pte = *kpte; | 585 | old_pte = *kpte; |
555 | if (!pte_val(old_pte)) { | 586 | if (!pte_val(old_pte)) |
556 | if (!primary) | 587 | return __cpa_process_fault(cpa, address, primary); |
557 | return 0; | ||
558 | |||
559 | /* | ||
560 | * Special error value returned, indicating that the mapping | ||
561 | * did not exist at this address. | ||
562 | */ | ||
563 | return -EFAULT; | ||
564 | } | ||
565 | 588 | ||
566 | if (level == PG_LEVEL_4K) { | 589 | if (level == PG_LEVEL_4K) { |
567 | pte_t new_pte; | 590 | pte_t new_pte; |
@@ -659,12 +682,7 @@ static int cpa_process_alias(struct cpa_data *cpa) | |||
659 | vaddr = *cpa->vaddr; | 682 | vaddr = *cpa->vaddr; |
660 | 683 | ||
661 | if (!(within(vaddr, PAGE_OFFSET, | 684 | if (!(within(vaddr, PAGE_OFFSET, |
662 | PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) | 685 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { |
663 | #ifdef CONFIG_X86_64 | ||
664 | || within(vaddr, PAGE_OFFSET + (1UL<<32), | ||
665 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)) | ||
666 | #endif | ||
667 | )) { | ||
668 | 686 | ||
669 | alias_cpa = *cpa; | 687 | alias_cpa = *cpa; |
670 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | 688 | temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); |
@@ -795,6 +813,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
795 | 813 | ||
796 | vm_unmap_aliases(); | 814 | vm_unmap_aliases(); |
797 | 815 | ||
816 | /* | ||
817 | * If we're called with lazy mmu updates enabled, the | ||
818 | * in-memory pte state may be stale. Flush pending updates to | ||
819 | * bring them up to date. | ||
820 | */ | ||
821 | arch_flush_lazy_mmu_mode(); | ||
822 | |||
798 | cpa.vaddr = addr; | 823 | cpa.vaddr = addr; |
799 | cpa.numpages = numpages; | 824 | cpa.numpages = numpages; |
800 | cpa.mask_set = mask_set; | 825 | cpa.mask_set = mask_set; |
@@ -837,6 +862,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |||
837 | } else | 862 | } else |
838 | cpa_flush_all(cache); | 863 | cpa_flush_all(cache); |
839 | 864 | ||
865 | /* | ||
866 | * If we've been called with lazy mmu updates enabled, then | ||
867 | * make sure that everything gets flushed out before we | ||
868 | * return. | ||
869 | */ | ||
870 | arch_flush_lazy_mmu_mode(); | ||
871 | |||
840 | out: | 872 | out: |
841 | return ret; | 873 | return ret; |
842 | } | 874 | } |