aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-09-01 12:33:46 -0400
commit0cb7bf61b1e9f05027de58c80f9b46a714d24e35 (patch)
tree41fb55cf62d07b425122f9a8b96412c0d8eb99c5 /arch/x86/mm
parentaa877175e7a9982233ed8f10cb4bfddd78d82741 (diff)
parent3eab887a55424fc2c27553b7bfe32330df83f7b8 (diff)
Merge branch 'linus' into smp/hotplug
Apply upstream changes to avoid conflicts with pending patches.
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/ident_map.c19
-rw-r--r--arch/x86/mm/init.c14
-rw-r--r--arch/x86/mm/kaslr.c4
3 files changed, 25 insertions, 12 deletions
diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c
index ec21796ac5fd..4473cb4f8b90 100644
--- a/arch/x86/mm/ident_map.c
+++ b/arch/x86/mm/ident_map.c
@@ -3,15 +3,17 @@
3 * included by both the compressed kernel and the regular kernel. 3 * included by both the compressed kernel and the regular kernel.
4 */ 4 */
5 5
6static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 6static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page,
7 unsigned long addr, unsigned long end) 7 unsigned long addr, unsigned long end)
8{ 8{
9 addr &= PMD_MASK; 9 addr &= PMD_MASK;
10 for (; addr < end; addr += PMD_SIZE) { 10 for (; addr < end; addr += PMD_SIZE) {
11 pmd_t *pmd = pmd_page + pmd_index(addr); 11 pmd_t *pmd = pmd_page + pmd_index(addr);
12 12
13 if (!pmd_present(*pmd)) 13 if (pmd_present(*pmd))
14 set_pmd(pmd, __pmd(addr | pmd_flag)); 14 continue;
15
16 set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag));
15 } 17 }
16} 18}
17 19
@@ -30,13 +32,13 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
30 32
31 if (pud_present(*pud)) { 33 if (pud_present(*pud)) {
32 pmd = pmd_offset(pud, 0); 34 pmd = pmd_offset(pud, 0);
33 ident_pmd_init(info->pmd_flag, pmd, addr, next); 35 ident_pmd_init(info, pmd, addr, next);
34 continue; 36 continue;
35 } 37 }
36 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 38 pmd = (pmd_t *)info->alloc_pgt_page(info->context);
37 if (!pmd) 39 if (!pmd)
38 return -ENOMEM; 40 return -ENOMEM;
39 ident_pmd_init(info->pmd_flag, pmd, addr, next); 41 ident_pmd_init(info, pmd, addr, next);
40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 42 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
41 } 43 }
42 44
@@ -44,14 +46,15 @@ static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
44} 46}
45 47
46int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 48int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
47 unsigned long addr, unsigned long end) 49 unsigned long pstart, unsigned long pend)
48{ 50{
51 unsigned long addr = pstart + info->offset;
52 unsigned long end = pend + info->offset;
49 unsigned long next; 53 unsigned long next;
50 int result; 54 int result;
51 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
52 55
53 for (; addr < end; addr = next) { 56 for (; addr < end; addr = next) {
54 pgd_t *pgd = pgd_page + pgd_index(addr) + off; 57 pgd_t *pgd = pgd_page + pgd_index(addr);
55 pud_t *pud; 58 pud_t *pud;
56 59
57 next = (addr & PGDIR_MASK) + PGDIR_SIZE; 60 next = (addr & PGDIR_MASK) + PGDIR_SIZE;
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 620928903be3..d28a2d741f9e 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -122,8 +122,18 @@ __ref void *alloc_low_pages(unsigned int num)
122 return __va(pfn << PAGE_SHIFT); 122 return __va(pfn << PAGE_SHIFT);
123} 123}
124 124
125/* need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS */ 125/*
126#define INIT_PGT_BUF_SIZE (6 * PAGE_SIZE) 126 * By default need 3 4k for initial PMD_SIZE, 3 4k for 0-ISA_END_ADDRESS.
127 * With KASLR memory randomization, depending on the machine e820 memory
128 * and the PUD alignment. We may need twice more pages when KASLR memory
129 * randomization is enabled.
130 */
131#ifndef CONFIG_RANDOMIZE_MEMORY
132#define INIT_PGD_PAGE_COUNT 6
133#else
134#define INIT_PGD_PAGE_COUNT 12
135#endif
136#define INIT_PGT_BUF_SIZE (INIT_PGD_PAGE_COUNT * PAGE_SIZE)
127RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE); 137RESERVE_BRK(early_pgt_alloc, INIT_PGT_BUF_SIZE);
128void __init early_alloc_pgt_buf(void) 138void __init early_alloc_pgt_buf(void)
129{ 139{
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 26dccd6c0df1..bda8d5eef04d 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
77 */ 77 */
78static inline bool kaslr_memory_enabled(void) 78static inline bool kaslr_memory_enabled(void)
79{ 79{
80 return kaslr_enabled() && !config_enabled(CONFIG_KASAN); 80 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
81} 81}
82 82
83/* Initialize base and padding for each memory region randomized with KASLR */ 83/* Initialize base and padding for each memory region randomized with KASLR */
@@ -97,7 +97,7 @@ void __init kernel_randomize_memory(void)
97 * add padding if needed (especially for memory hotplug support). 97 * add padding if needed (especially for memory hotplug support).
98 */ 98 */
99 BUG_ON(kaslr_regions[0].base != &page_offset_base); 99 BUG_ON(kaslr_regions[0].base != &page_offset_base);
100 memory_tb = ((max_pfn << PAGE_SHIFT) >> TB_SHIFT) + 100 memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) +
101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; 101 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING;
102 102
103 /* Adapt phyiscal memory region size based on available memory */ 103 /* Adapt phyiscal memory region size based on available memory */