aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/init_32.c48
-rw-r--r--arch/x86/mm/init_64.c2
-rw-r--r--arch/x86/mm/iomap_32.c10
-rw-r--r--arch/x86/mm/ioremap.c25
-rw-r--r--arch/x86/mm/pageattr.c49
-rw-r--r--arch/x86/mm/pat.c6
6 files changed, 93 insertions, 47 deletions
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 4a6989e47a53..00263bf07a88 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -137,6 +137,47 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
137 return pte_offset_kernel(pmd, 0); 137 return pte_offset_kernel(pmd, 0);
138} 138}
139 139
140static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
141 unsigned long vaddr, pte_t *lastpte)
142{
143#ifdef CONFIG_HIGHMEM
144 /*
145 * Something (early fixmap) may already have put a pte
146 * page here, which causes the page table allocation
147 * to become nonlinear. Attempt to fix it, and if it
148 * is still nonlinear then we have to bug.
149 */
150 int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
151 int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
152
153 if (pmd_idx_kmap_begin != pmd_idx_kmap_end
154 && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
155 && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end
156 && ((__pa(pte) >> PAGE_SHIFT) < table_start
157 || (__pa(pte) >> PAGE_SHIFT) >= table_end)) {
158 pte_t *newpte;
159 int i;
160
161 BUG_ON(after_init_bootmem);
162 newpte = alloc_low_page();
163 for (i = 0; i < PTRS_PER_PTE; i++)
164 set_pte(newpte + i, pte[i]);
165
166 paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
167 set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
168 BUG_ON(newpte != pte_offset_kernel(pmd, 0));
169 __flush_tlb_all();
170
171 paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
172 pte = newpte;
173 }
174 BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
175 && vaddr > fix_to_virt(FIX_KMAP_END)
176 && lastpte && lastpte + PTRS_PER_PTE != pte);
177#endif
178 return pte;
179}
180
140/* 181/*
141 * This function initializes a certain range of kernel virtual memory 182 * This function initializes a certain range of kernel virtual memory
142 * with new bootmem page tables, everywhere page tables are missing in 183 * with new bootmem page tables, everywhere page tables are missing in
@@ -153,6 +194,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
153 unsigned long vaddr; 194 unsigned long vaddr;
154 pgd_t *pgd; 195 pgd_t *pgd;
155 pmd_t *pmd; 196 pmd_t *pmd;
197 pte_t *pte = NULL;
156 198
157 vaddr = start; 199 vaddr = start;
158 pgd_idx = pgd_index(vaddr); 200 pgd_idx = pgd_index(vaddr);
@@ -164,7 +206,8 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
164 pmd = pmd + pmd_index(vaddr); 206 pmd = pmd + pmd_index(vaddr);
165 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); 207 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
166 pmd++, pmd_idx++) { 208 pmd++, pmd_idx++) {
167 one_page_table_init(pmd); 209 pte = page_table_kmap_check(one_page_table_init(pmd),
210 pmd, vaddr, pte);
168 211
169 vaddr += PMD_SIZE; 212 vaddr += PMD_SIZE;
170 } 213 }
@@ -507,7 +550,6 @@ static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base)
507 * Fixed mappings, only the page table structure has to be 550 * Fixed mappings, only the page table structure has to be
508 * created - mappings will be set by set_fixmap(): 551 * created - mappings will be set by set_fixmap():
509 */ 552 */
510 early_ioremap_clear();
511 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 553 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
512 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; 554 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
513 page_table_range_init(vaddr, end, pgd_base); 555 page_table_range_init(vaddr, end, pgd_base);
@@ -800,7 +842,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
800 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 842 tables += PAGE_ALIGN(ptes * sizeof(pte_t));
801 843
802 /* for fixmap */ 844 /* for fixmap */
803 tables += PAGE_SIZE * 2; 845 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t));
804 846
805 /* 847 /*
806 * RED-PEN putting page tables only on node 0 could 848 * RED-PEN putting page tables only on node 0 could
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 23f68e77ad1f..e6d36b490250 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -596,7 +596,7 @@ static void __init init_gbpages(void)
596 direct_gbpages = 0; 596 direct_gbpages = 0;
597} 597}
598 598
599static unsigned long __init kernel_physical_mapping_init(unsigned long start, 599static unsigned long __meminit kernel_physical_mapping_init(unsigned long start,
600 unsigned long end, 600 unsigned long end,
601 unsigned long page_size_mask) 601 unsigned long page_size_mask)
602{ 602{
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index d0151d8ce452..ca53224fc56c 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <asm/iomap.h> 19#include <asm/iomap.h>
20#include <asm/pat.h>
20#include <linux/module.h> 21#include <linux/module.h>
21 22
22/* Map 'pfn' using fixed map 'type' and protections 'prot' 23/* Map 'pfn' using fixed map 'type' and protections 'prot'
@@ -29,6 +30,15 @@ iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
29 30
30 pagefault_disable(); 31 pagefault_disable();
31 32
33 /*
34 * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
35 * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
36 * MTRR is UC or WC. UC_MINUS gets the real intention, of the
37 * user, which is "WC if the MTRR is WC, UC if you can't do that."
38 */
39 if (!pat_enabled && pgprot_val(prot) == pgprot_val(PAGE_KERNEL_WC))
40 prot = PAGE_KERNEL_UC_MINUS;
41
32 idx = type + KM_TYPE_NR*smp_processor_id(); 42 idx = type + KM_TYPE_NR*smp_processor_id();
33 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); 43 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
34 set_pte(kmap_pte-idx, pfn_pte(pfn, prot)); 44 set_pte(kmap_pte-idx, pfn_pte(pfn, prot));
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index bd85d42819e1..af750ab973b6 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -557,34 +557,9 @@ void __init early_ioremap_init(void)
557 } 557 }
558} 558}
559 559
560void __init early_ioremap_clear(void)
561{
562 pmd_t *pmd;
563
564 if (early_ioremap_debug)
565 printk(KERN_INFO "early_ioremap_clear()\n");
566
567 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
568 pmd_clear(pmd);
569 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
570 __flush_tlb_all();
571}
572
573void __init early_ioremap_reset(void) 560void __init early_ioremap_reset(void)
574{ 561{
575 enum fixed_addresses idx;
576 unsigned long addr, phys;
577 pte_t *pte;
578
579 after_paging_init = 1; 562 after_paging_init = 1;
580 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
581 addr = fix_to_virt(idx);
582 pte = early_ioremap_pte(addr);
583 if (pte_present(*pte)) {
584 phys = pte_val(*pte) & PAGE_MASK;
585 set_fixmap(idx, phys);
586 }
587 }
588} 563}
589 564
590static void __init __early_set_fixmap(enum fixed_addresses idx, 565static void __init __early_set_fixmap(enum fixed_addresses idx,
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e89d24815f26..84ba74820ad6 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -534,6 +534,36 @@ out_unlock:
534 return 0; 534 return 0;
535} 535}
536 536
537static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
538 int primary)
539{
540 /*
541 * Ignore all non primary paths.
542 */
543 if (!primary)
544 return 0;
545
546 /*
547 * Ignore the NULL PTE for kernel identity mapping, as it is expected
548 * to have holes.
549 * Also set numpages to '1' indicating that we processed cpa req for
550 * one virtual address page and its pfn. TBD: numpages can be set based
551 * on the initial value and the level returned by lookup_address().
552 */
553 if (within(vaddr, PAGE_OFFSET,
554 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
555 cpa->numpages = 1;
556 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
557 return 0;
558 } else {
559 WARN(1, KERN_WARNING "CPA: called for zero pte. "
560 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
561 *cpa->vaddr);
562
563 return -EFAULT;
564 }
565}
566
537static int __change_page_attr(struct cpa_data *cpa, int primary) 567static int __change_page_attr(struct cpa_data *cpa, int primary)
538{ 568{
539 unsigned long address; 569 unsigned long address;
@@ -549,17 +579,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
549repeat: 579repeat:
550 kpte = lookup_address(address, &level); 580 kpte = lookup_address(address, &level);
551 if (!kpte) 581 if (!kpte)
552 return 0; 582 return __cpa_process_fault(cpa, address, primary);
553 583
554 old_pte = *kpte; 584 old_pte = *kpte;
555 if (!pte_val(old_pte)) { 585 if (!pte_val(old_pte))
556 if (!primary) 586 return __cpa_process_fault(cpa, address, primary);
557 return 0;
558 WARN(1, KERN_WARNING "CPA: called for zero pte. "
559 "vaddr = %lx cpa->vaddr = %lx\n", address,
560 *cpa->vaddr);
561 return -EINVAL;
562 }
563 587
564 if (level == PG_LEVEL_4K) { 588 if (level == PG_LEVEL_4K) {
565 pte_t new_pte; 589 pte_t new_pte;
@@ -657,12 +681,7 @@ static int cpa_process_alias(struct cpa_data *cpa)
657 vaddr = *cpa->vaddr; 681 vaddr = *cpa->vaddr;
658 682
659 if (!(within(vaddr, PAGE_OFFSET, 683 if (!(within(vaddr, PAGE_OFFSET,
660 PAGE_OFFSET + (max_low_pfn_mapped << PAGE_SHIFT)) 684 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
661#ifdef CONFIG_X86_64
662 || within(vaddr, PAGE_OFFSET + (1UL<<32),
663 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))
664#endif
665 )) {
666 685
667 alias_cpa = *cpa; 686 alias_cpa = *cpa;
668 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); 687 temp_cpa_vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index c9488513fd70..7b61036427df 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -333,6 +333,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
333 req_type & _PAGE_CACHE_MASK); 333 req_type & _PAGE_CACHE_MASK);
334 } 334 }
335 335
336 if (new_type)
337 *new_type = actual_type;
338
336 /* 339 /*
337 * For legacy reasons, some parts of the physical address range in the 340 * For legacy reasons, some parts of the physical address range in the
338 * legacy 1MB region is treated as non-RAM (even when listed as RAM in 341 * legacy 1MB region is treated as non-RAM (even when listed as RAM in
@@ -356,9 +359,6 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
356 new->end = end; 359 new->end = end;
357 new->type = actual_type; 360 new->type = actual_type;
358 361
359 if (new_type)
360 *new_type = actual_type;
361
362 spin_lock(&memtype_lock); 362 spin_lock(&memtype_lock);
363 363
364 if (cached_entry && start >= cached_start) 364 if (cached_entry && start >= cached_start)