diff options
author | Yinghai Lu <yinghai@kernel.org> | 2013-01-24 15:19:53 -0500 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2013-01-29 18:20:13 -0500 |
commit | 6b9c75aca6cba4d99a6e8d8274b1788d4d4b50d9 (patch) | |
tree | 14ced936eace998f2ad86ab10573ed198ff955d2 | |
parent | 8170e6bed465b4b0c7687f93e9948aca4358a33b (diff) |
x86, 64bit: #PF handler set page to cover only 2M per #PF
We only map a single 2 MiB page per #PF, even though we should be able
to do this a full gigabyte at a time with no additional memory cost.
This is a workaround for a broken AMD reference BIOS (and its
derivatives in shipping system) which maps a large chunk of memory as
WB in the MTRR system but will #MC if the processor wanders off and
tries to prefetch that memory, which can happen any time the memory is
mapped in the TLB.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-13-git-send-email-yinghai@kernel.org
Cc: Alexander Duyck <alexander.h.duyck@intel.com>
[ hpa: rewrote the patch description ]
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/x86/kernel/head64.c | 42 |
1 files changed, 25 insertions, 17 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index f57df05ea126..816fc85c9bb3 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -53,15 +53,15 @@ int __init early_make_pgtable(unsigned long address) | |||
53 | unsigned long physaddr = address - __PAGE_OFFSET; | 53 | unsigned long physaddr = address - __PAGE_OFFSET; |
54 | unsigned long i; | 54 | unsigned long i; |
55 | pgdval_t pgd, *pgd_p; | 55 | pgdval_t pgd, *pgd_p; |
56 | pudval_t *pud_p; | 56 | pudval_t pud, *pud_p; |
57 | pmdval_t pmd, *pmd_p; | 57 | pmdval_t pmd, *pmd_p; |
58 | 58 | ||
59 | /* Invalid address or early pgt is done ? */ | 59 | /* Invalid address or early pgt is done ? */ |
60 | if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt)) | 60 | if (physaddr >= MAXMEM || read_cr3() != __pa(early_level4_pgt)) |
61 | return -1; | 61 | return -1; |
62 | 62 | ||
63 | i = (address >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1); | 63 | again: |
64 | pgd_p = &early_level4_pgt[i].pgd; | 64 | pgd_p = &early_level4_pgt[pgd_index(address)].pgd; |
65 | pgd = *pgd_p; | 65 | pgd = *pgd_p; |
66 | 66 | ||
67 | /* | 67 | /* |
@@ -69,29 +69,37 @@ int __init early_make_pgtable(unsigned long address) | |||
69 | * critical -- __PAGE_OFFSET would point us back into the dynamic | 69 | * critical -- __PAGE_OFFSET would point us back into the dynamic |
70 | * range and we might end up looping forever... | 70 | * range and we might end up looping forever... |
71 | */ | 71 | */ |
72 | if (pgd && next_early_pgt < EARLY_DYNAMIC_PAGE_TABLES) { | 72 | if (pgd) |
73 | pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); | 73 | pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); |
74 | } else { | 74 | else { |
75 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES-1) | 75 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { |
76 | reset_early_page_tables(); | 76 | reset_early_page_tables(); |
77 | goto again; | ||
78 | } | ||
77 | 79 | ||
78 | pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; | 80 | pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++]; |
79 | for (i = 0; i < PTRS_PER_PUD; i++) | 81 | for (i = 0; i < PTRS_PER_PUD; i++) |
80 | pud_p[i] = 0; | 82 | pud_p[i] = 0; |
81 | |||
82 | *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; | 83 | *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; |
83 | } | 84 | } |
84 | i = (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | 85 | pud_p += pud_index(address); |
85 | pud_p += i; | 86 | pud = *pud_p; |
86 | |||
87 | pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; | ||
88 | pmd = (physaddr & PUD_MASK) + (__PAGE_KERNEL_LARGE & ~_PAGE_GLOBAL); | ||
89 | for (i = 0; i < PTRS_PER_PMD; i++) { | ||
90 | pmd_p[i] = pmd; | ||
91 | pmd += PMD_SIZE; | ||
92 | } | ||
93 | 87 | ||
94 | *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; | 88 | if (pud) |
89 | pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); | ||
90 | else { | ||
91 | if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) { | ||
92 | reset_early_page_tables(); | ||
93 | goto again; | ||
94 | } | ||
95 | |||
96 | pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++]; | ||
97 | for (i = 0; i < PTRS_PER_PMD; i++) | ||
98 | pmd_p[i] = 0; | ||
99 | *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE; | ||
100 | } | ||
101 | pmd = (physaddr & PMD_MASK) + (__PAGE_KERNEL_LARGE & ~_PAGE_GLOBAL); | ||
102 | pmd_p[pmd_index(address)] = pmd; | ||
95 | 103 | ||
96 | return 0; | 104 | return 0; |
97 | } | 105 | } |