diff options
author | Mike Travis <travis@sgi.com> | 2008-12-31 20:34:16 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-03 12:53:31 -0500 |
commit | 7eb19553369c46cc1fa64caf120cbcab1b597f7c (patch) | |
tree | ef1a3beae706b9497c845d0a2557ceb4d2754998 /arch/x86/mm | |
parent | 6092848a2a23b660150a38bc06f59d75838d70c8 (diff) | |
parent | 8c384cdee3e04d6194a2c2b192b624754f990835 (diff) |
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux-2.6-cpumask into merge-rr-cpumask
Conflicts:
arch/x86/kernel/io_apic.c
kernel/rcuclassic.c
kernel/sched.c
kernel/time/tick-sched.c
Signed-off-by: Mike Travis <travis@sgi.com>
[ mingo@elte.hu: backmerged typo fix for io_apic.c ]
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 33 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 236 |
5 files changed, 262 insertions, 14 deletions
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 21e996a70d68..57ec8c86a877 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -393,7 +393,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, | |||
393 | if (pte && pte_present(*pte) && !pte_exec(*pte)) | 393 | if (pte && pte_present(*pte) && !pte_exec(*pte)) |
394 | printk(KERN_CRIT "kernel tried to execute " | 394 | printk(KERN_CRIT "kernel tried to execute " |
395 | "NX-protected page - exploit attempt? " | 395 | "NX-protected page - exploit attempt? " |
396 | "(uid: %d)\n", current->uid); | 396 | "(uid: %d)\n", current_uid()); |
397 | } | 397 | } |
398 | #endif | 398 | #endif |
399 | 399 | ||
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index c483f4242079..8655b5bb0963 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/init.h> | 21 | #include <linux/init.h> |
22 | #include <linux/highmem.h> | 22 | #include <linux/highmem.h> |
23 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
24 | #include <linux/pci.h> | ||
24 | #include <linux/pfn.h> | 25 | #include <linux/pfn.h> |
25 | #include <linux/poison.h> | 26 | #include <linux/poison.h> |
26 | #include <linux/bootmem.h> | 27 | #include <linux/bootmem.h> |
@@ -67,7 +68,7 @@ static unsigned long __meminitdata table_top; | |||
67 | 68 | ||
68 | static int __initdata after_init_bootmem; | 69 | static int __initdata after_init_bootmem; |
69 | 70 | ||
70 | static __init void *alloc_low_page(unsigned long *phys) | 71 | static __init void *alloc_low_page(void) |
71 | { | 72 | { |
72 | unsigned long pfn = table_end++; | 73 | unsigned long pfn = table_end++; |
73 | void *adr; | 74 | void *adr; |
@@ -77,7 +78,6 @@ static __init void *alloc_low_page(unsigned long *phys) | |||
77 | 78 | ||
78 | adr = __va(pfn * PAGE_SIZE); | 79 | adr = __va(pfn * PAGE_SIZE); |
79 | memset(adr, 0, PAGE_SIZE); | 80 | memset(adr, 0, PAGE_SIZE); |
80 | *phys = pfn * PAGE_SIZE; | ||
81 | return adr; | 81 | return adr; |
82 | } | 82 | } |
83 | 83 | ||
@@ -92,16 +92,17 @@ static pmd_t * __init one_md_table_init(pgd_t *pgd) | |||
92 | pmd_t *pmd_table; | 92 | pmd_t *pmd_table; |
93 | 93 | ||
94 | #ifdef CONFIG_X86_PAE | 94 | #ifdef CONFIG_X86_PAE |
95 | unsigned long phys; | ||
96 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { | 95 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
97 | if (after_init_bootmem) | 96 | if (after_init_bootmem) |
98 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 97 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
99 | else | 98 | else |
100 | pmd_table = (pmd_t *)alloc_low_page(&phys); | 99 | pmd_table = (pmd_t *)alloc_low_page(); |
101 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); | 100 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
102 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); | 101 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
103 | pud = pud_offset(pgd, 0); | 102 | pud = pud_offset(pgd, 0); |
104 | BUG_ON(pmd_table != pmd_offset(pud, 0)); | 103 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
104 | |||
105 | return pmd_table; | ||
105 | } | 106 | } |
106 | #endif | 107 | #endif |
107 | pud = pud_offset(pgd, 0); | 108 | pud = pud_offset(pgd, 0); |
@@ -126,10 +127,8 @@ static pte_t * __init one_page_table_init(pmd_t *pmd) | |||
126 | if (!page_table) | 127 | if (!page_table) |
127 | page_table = | 128 | page_table = |
128 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 129 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
129 | } else { | 130 | } else |
130 | unsigned long phys; | 131 | page_table = (pte_t *)alloc_low_page(); |
131 | page_table = (pte_t *)alloc_low_page(&phys); | ||
132 | } | ||
133 | 132 | ||
134 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); | 133 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
135 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); | 134 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
@@ -969,7 +968,7 @@ void __init mem_init(void) | |||
969 | int codesize, reservedpages, datasize, initsize; | 968 | int codesize, reservedpages, datasize, initsize; |
970 | int tmp; | 969 | int tmp; |
971 | 970 | ||
972 | start_periodic_check_for_corruption(); | 971 | pci_iommu_alloc(); |
973 | 972 | ||
974 | #ifdef CONFIG_FLATMEM | 973 | #ifdef CONFIG_FLATMEM |
975 | BUG_ON(!mem_map); | 974 | BUG_ON(!mem_map); |
@@ -1040,11 +1039,25 @@ void __init mem_init(void) | |||
1040 | (unsigned long)&_text, (unsigned long)&_etext, | 1039 | (unsigned long)&_text, (unsigned long)&_etext, |
1041 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | 1040 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); |
1042 | 1041 | ||
1042 | /* | ||
1043 | * Check boundaries twice: Some fundamental inconsistencies can | ||
1044 | * be detected at build time already. | ||
1045 | */ | ||
1046 | #define __FIXADDR_TOP (-PAGE_SIZE) | ||
1047 | #ifdef CONFIG_HIGHMEM | ||
1048 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | ||
1049 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); | ||
1050 | #endif | ||
1051 | #define high_memory (-128UL << 20) | ||
1052 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | ||
1053 | #undef high_memory | ||
1054 | #undef __FIXADDR_TOP | ||
1055 | |||
1043 | #ifdef CONFIG_HIGHMEM | 1056 | #ifdef CONFIG_HIGHMEM |
1044 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | 1057 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
1045 | BUG_ON(VMALLOC_END > PKMAP_BASE); | 1058 | BUG_ON(VMALLOC_END > PKMAP_BASE); |
1046 | #endif | 1059 | #endif |
1047 | BUG_ON(VMALLOC_START > VMALLOC_END); | 1060 | BUG_ON(VMALLOC_START >= VMALLOC_END); |
1048 | BUG_ON((unsigned long)high_memory > VMALLOC_START); | 1061 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
1049 | 1062 | ||
1050 | if (boot_cpu_data.wp_works_ok < 0) | 1063 | if (boot_cpu_data.wp_works_ok < 0) |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 9db01db6e3cd..9f7a0d24d42a 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -902,8 +902,6 @@ void __init mem_init(void) | |||
902 | long codesize, reservedpages, datasize, initsize; | 902 | long codesize, reservedpages, datasize, initsize; |
903 | unsigned long absent_pages; | 903 | unsigned long absent_pages; |
904 | 904 | ||
905 | start_periodic_check_for_corruption(); | ||
906 | |||
907 | pci_iommu_alloc(); | 905 | pci_iommu_alloc(); |
908 | 906 | ||
909 | /* clear_bss() already clear the empty_zero_page */ | 907 | /* clear_bss() already clear the empty_zero_page */ |
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d4c4307ff3e0..bd85d42819e1 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -223,7 +223,8 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
223 | * Check if the request spans more than any BAR in the iomem resource | 223 | * Check if the request spans more than any BAR in the iomem resource |
224 | * tree. | 224 | * tree. |
225 | */ | 225 | */ |
226 | WARN_ON(iomem_map_sanity_check(phys_addr, size)); | 226 | WARN_ONCE(iomem_map_sanity_check(phys_addr, size), |
227 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); | ||
227 | 228 | ||
228 | /* | 229 | /* |
229 | * Don't allow anybody to remap normal RAM that we're using.. | 230 | * Don't allow anybody to remap normal RAM that we're using.. |
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index eb1bf000d12e..85cbd3cd3723 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c | |||
@@ -596,6 +596,242 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) | |||
596 | free_memtype(addr, addr + size); | 596 | free_memtype(addr, addr + size); |
597 | } | 597 | } |
598 | 598 | ||
599 | /* | ||
600 | * Internal interface to reserve a range of physical memory with prot. | ||
601 | * Reserved non RAM regions only and after successful reserve_memtype, | ||
602 | * this func also keeps identity mapping (if any) in sync with this new prot. | ||
603 | */ | ||
604 | static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t vma_prot) | ||
605 | { | ||
606 | int is_ram = 0; | ||
607 | int id_sz, ret; | ||
608 | unsigned long flags; | ||
609 | unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); | ||
610 | |||
611 | is_ram = pagerange_is_ram(paddr, paddr + size); | ||
612 | |||
613 | if (is_ram != 0) { | ||
614 | /* | ||
615 | * For mapping RAM pages, drivers need to call | ||
616 | * set_memory_[uc|wc|wb] directly, for reserve and free, before | ||
617 | * setting up the PTE. | ||
618 | */ | ||
619 | WARN_ON_ONCE(1); | ||
620 | return 0; | ||
621 | } | ||
622 | |||
623 | ret = reserve_memtype(paddr, paddr + size, want_flags, &flags); | ||
624 | if (ret) | ||
625 | return ret; | ||
626 | |||
627 | if (flags != want_flags) { | ||
628 | free_memtype(paddr, paddr + size); | ||
629 | printk(KERN_ERR | ||
630 | "%s:%d map pfn expected mapping type %s for %Lx-%Lx, got %s\n", | ||
631 | current->comm, current->pid, | ||
632 | cattr_name(want_flags), | ||
633 | (unsigned long long)paddr, | ||
634 | (unsigned long long)(paddr + size), | ||
635 | cattr_name(flags)); | ||
636 | return -EINVAL; | ||
637 | } | ||
638 | |||
639 | /* Need to keep identity mapping in sync */ | ||
640 | if (paddr >= __pa(high_memory)) | ||
641 | return 0; | ||
642 | |||
643 | id_sz = (__pa(high_memory) < paddr + size) ? | ||
644 | __pa(high_memory) - paddr : | ||
645 | size; | ||
646 | |||
647 | if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) { | ||
648 | free_memtype(paddr, paddr + size); | ||
649 | printk(KERN_ERR | ||
650 | "%s:%d reserve_pfn_range ioremap_change_attr failed %s " | ||
651 | "for %Lx-%Lx\n", | ||
652 | current->comm, current->pid, | ||
653 | cattr_name(flags), | ||
654 | (unsigned long long)paddr, | ||
655 | (unsigned long long)(paddr + size)); | ||
656 | return -EINVAL; | ||
657 | } | ||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | /* | ||
662 | * Internal interface to free a range of physical memory. | ||
663 | * Frees non RAM regions only. | ||
664 | */ | ||
665 | static void free_pfn_range(u64 paddr, unsigned long size) | ||
666 | { | ||
667 | int is_ram; | ||
668 | |||
669 | is_ram = pagerange_is_ram(paddr, paddr + size); | ||
670 | if (is_ram == 0) | ||
671 | free_memtype(paddr, paddr + size); | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * track_pfn_vma_copy is called when vma that is covering the pfnmap gets | ||
676 | * copied through copy_page_range(). | ||
677 | * | ||
678 | * If the vma has a linear pfn mapping for the entire range, we get the prot | ||
679 | * from pte and reserve the entire vma range with single reserve_pfn_range call. | ||
680 | * Otherwise, we reserve the entire vma range, my ging through the PTEs page | ||
681 | * by page to get physical address and protection. | ||
682 | */ | ||
683 | int track_pfn_vma_copy(struct vm_area_struct *vma) | ||
684 | { | ||
685 | int retval = 0; | ||
686 | unsigned long i, j; | ||
687 | resource_size_t paddr; | ||
688 | unsigned long prot; | ||
689 | unsigned long vma_start = vma->vm_start; | ||
690 | unsigned long vma_end = vma->vm_end; | ||
691 | unsigned long vma_size = vma_end - vma_start; | ||
692 | |||
693 | if (!pat_enabled) | ||
694 | return 0; | ||
695 | |||
696 | if (is_linear_pfn_mapping(vma)) { | ||
697 | /* | ||
698 | * reserve the whole chunk covered by vma. We need the | ||
699 | * starting address and protection from pte. | ||
700 | */ | ||
701 | if (follow_phys(vma, vma_start, 0, &prot, &paddr)) { | ||
702 | WARN_ON_ONCE(1); | ||
703 | return -EINVAL; | ||
704 | } | ||
705 | return reserve_pfn_range(paddr, vma_size, __pgprot(prot)); | ||
706 | } | ||
707 | |||
708 | /* reserve entire vma page by page, using pfn and prot from pte */ | ||
709 | for (i = 0; i < vma_size; i += PAGE_SIZE) { | ||
710 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) | ||
711 | continue; | ||
712 | |||
713 | retval = reserve_pfn_range(paddr, PAGE_SIZE, __pgprot(prot)); | ||
714 | if (retval) | ||
715 | goto cleanup_ret; | ||
716 | } | ||
717 | return 0; | ||
718 | |||
719 | cleanup_ret: | ||
720 | /* Reserve error: Cleanup partial reservation and return error */ | ||
721 | for (j = 0; j < i; j += PAGE_SIZE) { | ||
722 | if (follow_phys(vma, vma_start + j, 0, &prot, &paddr)) | ||
723 | continue; | ||
724 | |||
725 | free_pfn_range(paddr, PAGE_SIZE); | ||
726 | } | ||
727 | |||
728 | return retval; | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * track_pfn_vma_new is called when a _new_ pfn mapping is being established | ||
733 | * for physical range indicated by pfn and size. | ||
734 | * | ||
735 | * prot is passed in as a parameter for the new mapping. If the vma has a | ||
736 | * linear pfn mapping for the entire range reserve the entire vma range with | ||
737 | * single reserve_pfn_range call. | ||
738 | * Otherwise, we look t the pfn and size and reserve only the specified range | ||
739 | * page by page. | ||
740 | * | ||
741 | * Note that this function can be called with caller trying to map only a | ||
742 | * subrange/page inside the vma. | ||
743 | */ | ||
744 | int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t prot, | ||
745 | unsigned long pfn, unsigned long size) | ||
746 | { | ||
747 | int retval = 0; | ||
748 | unsigned long i, j; | ||
749 | resource_size_t base_paddr; | ||
750 | resource_size_t paddr; | ||
751 | unsigned long vma_start = vma->vm_start; | ||
752 | unsigned long vma_end = vma->vm_end; | ||
753 | unsigned long vma_size = vma_end - vma_start; | ||
754 | |||
755 | if (!pat_enabled) | ||
756 | return 0; | ||
757 | |||
758 | if (is_linear_pfn_mapping(vma)) { | ||
759 | /* reserve the whole chunk starting from vm_pgoff */ | ||
760 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | ||
761 | return reserve_pfn_range(paddr, vma_size, prot); | ||
762 | } | ||
763 | |||
764 | /* reserve page by page using pfn and size */ | ||
765 | base_paddr = (resource_size_t)pfn << PAGE_SHIFT; | ||
766 | for (i = 0; i < size; i += PAGE_SIZE) { | ||
767 | paddr = base_paddr + i; | ||
768 | retval = reserve_pfn_range(paddr, PAGE_SIZE, prot); | ||
769 | if (retval) | ||
770 | goto cleanup_ret; | ||
771 | } | ||
772 | return 0; | ||
773 | |||
774 | cleanup_ret: | ||
775 | /* Reserve error: Cleanup partial reservation and return error */ | ||
776 | for (j = 0; j < i; j += PAGE_SIZE) { | ||
777 | paddr = base_paddr + j; | ||
778 | free_pfn_range(paddr, PAGE_SIZE); | ||
779 | } | ||
780 | |||
781 | return retval; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * untrack_pfn_vma is called while unmapping a pfnmap for a region. | ||
786 | * untrack can be called for a specific region indicated by pfn and size or | ||
787 | * can be for the entire vma (in which case size can be zero). | ||
788 | */ | ||
789 | void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn, | ||
790 | unsigned long size) | ||
791 | { | ||
792 | unsigned long i; | ||
793 | resource_size_t paddr; | ||
794 | unsigned long prot; | ||
795 | unsigned long vma_start = vma->vm_start; | ||
796 | unsigned long vma_end = vma->vm_end; | ||
797 | unsigned long vma_size = vma_end - vma_start; | ||
798 | |||
799 | if (!pat_enabled) | ||
800 | return; | ||
801 | |||
802 | if (is_linear_pfn_mapping(vma)) { | ||
803 | /* free the whole chunk starting from vm_pgoff */ | ||
804 | paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT; | ||
805 | free_pfn_range(paddr, vma_size); | ||
806 | return; | ||
807 | } | ||
808 | |||
809 | if (size != 0 && size != vma_size) { | ||
810 | /* free page by page, using pfn and size */ | ||
811 | paddr = (resource_size_t)pfn << PAGE_SHIFT; | ||
812 | for (i = 0; i < size; i += PAGE_SIZE) { | ||
813 | paddr = paddr + i; | ||
814 | free_pfn_range(paddr, PAGE_SIZE); | ||
815 | } | ||
816 | } else { | ||
817 | /* free entire vma, page by page, using the pfn from pte */ | ||
818 | for (i = 0; i < vma_size; i += PAGE_SIZE) { | ||
819 | if (follow_phys(vma, vma_start + i, 0, &prot, &paddr)) | ||
820 | continue; | ||
821 | |||
822 | free_pfn_range(paddr, PAGE_SIZE); | ||
823 | } | ||
824 | } | ||
825 | } | ||
826 | |||
827 | pgprot_t pgprot_writecombine(pgprot_t prot) | ||
828 | { | ||
829 | if (pat_enabled) | ||
830 | return __pgprot(pgprot_val(prot) | _PAGE_CACHE_WC); | ||
831 | else | ||
832 | return pgprot_noncached(prot); | ||
833 | } | ||
834 | |||
599 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) | 835 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) |
600 | 836 | ||
601 | /* get Nth element of the linked list */ | 837 | /* get Nth element of the linked list */ |