aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:54:01 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-16 18:54:01 -0400
commit9a45f036af363aec1efec08827c825d69c115a9a (patch)
treed9a81016dacbbcdf87d8e2ec3dcebed6b5029870 /arch/x86/mm
parent168f1a7163b37294a0ef33829e1ed54d41e33c42 (diff)
parentd2d3462f9f08da364c8fbd41e8e32229d610d49d (diff)
Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 boot updates from Ingo Molnar: "The biggest changes in this cycle were: - prepare for more KASLR related changes, by restructuring, cleaning up and fixing the existing boot code. (Kees Cook, Baoquan He, Yinghai Lu) - simplifly/concentrate subarch handling code, eliminate paravirt_enabled() usage. (Luis R Rodriguez)" * 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits) x86/KASLR: Clarify purpose of each get_random_long() x86/KASLR: Add virtual address choosing function x86/KASLR: Return earliest overlap when avoiding regions x86/KASLR: Add 'struct slot_area' to manage random_addr slots x86/boot: Add missing file header comments x86/KASLR: Initialize mapping_info every time x86/boot: Comment what finalize_identity_maps() does x86/KASLR: Build identity mappings on demand x86/boot: Split out kernel_ident_mapping_init() x86/boot: Clean up indenting for asm/boot.h x86/KASLR: Improve comments around the mem_avoid[] logic x86/boot: Simplify pointer casting in choose_random_location() x86/KASLR: Consolidate mem_avoid[] entries x86/boot: Clean up pointer casting x86/boot: Warn on future overlapping memcpy() use x86/boot: Extract error reporting functions x86/boot: Correctly bounds-check relocations x86/KASLR: Clean up unused code from old 'run_size' and rename it to 'kernel_total_size' x86/boot: Fix "run_size" calculation x86/boot: Calculate decompression size during boot not build ...
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/ident_map.c79
-rw-r--r--arch/x86/mm/init_32.c3
-rw-r--r--arch/x86/mm/init_64.c74
3 files changed, 80 insertions, 76 deletions
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
new file mode 100644
index 000000000000..ec21796ac5fd
--- /dev/null
+++ b/arch/x86/mm/ident_map.c
@@ -0,0 +1,79 @@
1/*
2 * Helper routines for building identity mapping page tables. This is
3 * included by both the compressed kernel and the regular kernel.
4 */
5
6static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
7 unsigned long addr, unsigned long end)
8{
9 addr &= PMD_MASK;
10 for (; addr < end; addr += PMD_SIZE) {
11 pmd_t *pmd = pmd_page + pmd_index(addr);
12
13 if (!pmd_present(*pmd))
14 set_pmd(pmd, __pmd(addr | pmd_flag));
15 }
16}
17
18static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
19 unsigned long addr, unsigned long end)
20{
21 unsigned long next;
22
23 for (; addr < end; addr = next) {
24 pud_t *pud = pud_page + pud_index(addr);
25 pmd_t *pmd;
26
27 next = (addr & PUD_MASK) + PUD_SIZE;
28 if (next > end)
29 next = end;
30
31 if (pud_present(*pud)) {
32 pmd = pmd_offset(pud, 0);
33 ident_pmd_init(info->pmd_flag, pmd, addr, next);
34 continue;
35 }
36 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
37 if (!pmd)
38 return -ENOMEM;
39 ident_pmd_init(info->pmd_flag, pmd, addr, next);
40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
41 }
42
43 return 0;
44}
45
46int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
47 unsigned long addr, unsigned long end)
48{
49 unsigned long next;
50 int result;
51 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
52
53 for (; addr < end; addr = next) {
54 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
55 pud_t *pud;
56
57 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
58 if (next > end)
59 next = end;
60
61 if (pgd_present(*pgd)) {
62 pud = pud_offset(pgd, 0);
63 result = ident_pud_init(info, pud, addr, next);
64 if (result)
65 return result;
66 continue;
67 }
68
69 pud = (pud_t *)info->alloc_pgt_page(info->context);
70 if (!pud)
71 return -ENOMEM;
72 result = ident_pud_init(info, pud, addr, next);
73 if (result)
74 return result;
75 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
76 }
77
78 return 0;
79}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index 85af914e3d27..84df150ee77e 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -804,9 +804,6 @@ void __init mem_init(void)
804 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); 804 BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
805#undef high_memory 805#undef high_memory
806#undef __FIXADDR_TOP 806#undef __FIXADDR_TOP
807#ifdef CONFIG_RANDOMIZE_BASE
808 BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
809#endif
810 807
811#ifdef CONFIG_HIGHMEM 808#ifdef CONFIG_HIGHMEM
812 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); 809 BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 89d97477c1d9..bce2e5d9edd4 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -58,79 +58,7 @@
58 58
59#include "mm_internal.h" 59#include "mm_internal.h"
60 60
61static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 61#include "ident_map.c"
62 unsigned long addr, unsigned long end)
63{
64 addr &= PMD_MASK;
65 for (; addr < end; addr += PMD_SIZE) {
66 pmd_t *pmd = pmd_page + pmd_index(addr);
67
68 if (!pmd_present(*pmd))
69 set_pmd(pmd, __pmd(addr | pmd_flag));
70 }
71}
72static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
73 unsigned long addr, unsigned long end)
74{
75 unsigned long next;
76
77 for (; addr < end; addr = next) {
78 pud_t *pud = pud_page + pud_index(addr);
79 pmd_t *pmd;
80
81 next = (addr & PUD_MASK) + PUD_SIZE;
82 if (next > end)
83 next = end;
84
85 if (pud_present(*pud)) {
86 pmd = pmd_offset(pud, 0);
87 ident_pmd_init(info->pmd_flag, pmd, addr, next);
88 continue;
89 }
90 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
91 if (!pmd)
92 return -ENOMEM;
93 ident_pmd_init(info->pmd_flag, pmd, addr, next);
94 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
95 }
96
97 return 0;
98}
99
100int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
101 unsigned long addr, unsigned long end)
102{
103 unsigned long next;
104 int result;
105 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
106
107 for (; addr < end; addr = next) {
108 pgd_t *pgd = pgd_page + pgd_index(addr) + off;
109 pud_t *pud;
110
111 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
112 if (next > end)
113 next = end;
114
115 if (pgd_present(*pgd)) {
116 pud = pud_offset(pgd, 0);
117 result = ident_pud_init(info, pud, addr, next);
118 if (result)
119 return result;
120 continue;
121 }
122
123 pud = (pud_t *)info->alloc_pgt_page(info->context);
124 if (!pud)
125 return -ENOMEM;
126 result = ident_pud_init(info, pud, addr, next);
127 if (result)
128 return result;
129 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
130 }
131
132 return 0;
133}
134 62
135/* 63/*
136 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the 64 * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the