aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2005-07-29 15:25:28 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-07-29 16:12:49 -0400
commit8bf2755664bfe03a414e7ec02d9f16a3d5beb60d (patch)
tree4cf707d59b69c153a29d93e90fa5dc53baec64a0 /arch/x86_64/kernel
parent36c4fd23cc06f81d68ee968c4c1bf1cebb3dcea5 (diff)
[PATCH] x86_64 machine_kexec: Use standard pagetable helpers
Use the standard hardware page table manipulation macros. This is possible now that linux works with all 4 levels of the page tables. Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel')
-rw-r--r--arch/x86_64/kernel/machine_kexec.c67
1 files changed, 25 insertions, 42 deletions
diff --git a/arch/x86_64/kernel/machine_kexec.c b/arch/x86_64/kernel/machine_kexec.c
index 717f7db4b532..89fab51e20f4 100644
--- a/arch/x86_64/kernel/machine_kexec.c
+++ b/arch/x86_64/kernel/machine_kexec.c
@@ -8,43 +8,26 @@
8 8
9#include <linux/mm.h> 9#include <linux/mm.h>
10#include <linux/kexec.h> 10#include <linux/kexec.h>
11#include <linux/delay.h>
12#include <linux/string.h> 11#include <linux/string.h>
13#include <linux/reboot.h> 12#include <linux/reboot.h>
14#include <asm/pda.h>
15#include <asm/pgtable.h> 13#include <asm/pgtable.h>
16#include <asm/pgalloc.h>
17#include <asm/tlbflush.h> 14#include <asm/tlbflush.h>
18#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
19#include <asm/io.h> 16#include <asm/io.h>
20#include <asm/apic.h> 17
21#include <asm/cpufeature.h> 18static void init_level2_page(pmd_t *level2p, unsigned long addr)
22#include <asm/hw_irq.h>
23
24#define LEVEL0_SIZE (1UL << 12UL)
25#define LEVEL1_SIZE (1UL << 21UL)
26#define LEVEL2_SIZE (1UL << 30UL)
27#define LEVEL3_SIZE (1UL << 39UL)
28#define LEVEL4_SIZE (1UL << 48UL)
29
30#define L0_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
31#define L1_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE)
32#define L2_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
33#define L3_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
34
35static void init_level2_page(u64 *level2p, unsigned long addr)
36{ 19{
37 unsigned long end_addr; 20 unsigned long end_addr;
38 21
39 addr &= PAGE_MASK; 22 addr &= PAGE_MASK;
40 end_addr = addr + LEVEL2_SIZE; 23 end_addr = addr + PUD_SIZE;
41 while (addr < end_addr) { 24 while (addr < end_addr) {
42 *(level2p++) = addr | L1_ATTR; 25 set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
43 addr += LEVEL1_SIZE; 26 addr += PMD_SIZE;
44 } 27 }
45} 28}
46 29
47static int init_level3_page(struct kimage *image, u64 *level3p, 30static int init_level3_page(struct kimage *image, pud_t *level3p,
48 unsigned long addr, unsigned long last_addr) 31 unsigned long addr, unsigned long last_addr)
49{ 32{
50 unsigned long end_addr; 33 unsigned long end_addr;
@@ -52,32 +35,32 @@ static int init_level3_page(struct kimage *image, u64 *level3p,
52 35
53 result = 0; 36 result = 0;
54 addr &= PAGE_MASK; 37 addr &= PAGE_MASK;
55 end_addr = addr + LEVEL3_SIZE; 38 end_addr = addr + PGDIR_SIZE;
56 while ((addr < last_addr) && (addr < end_addr)) { 39 while ((addr < last_addr) && (addr < end_addr)) {
57 struct page *page; 40 struct page *page;
58 u64 *level2p; 41 pmd_t *level2p;
59 42
60 page = kimage_alloc_control_pages(image, 0); 43 page = kimage_alloc_control_pages(image, 0);
61 if (!page) { 44 if (!page) {
62 result = -ENOMEM; 45 result = -ENOMEM;
63 goto out; 46 goto out;
64 } 47 }
65 level2p = (u64 *)page_address(page); 48 level2p = (pmd_t *)page_address(page);
66 init_level2_page(level2p, addr); 49 init_level2_page(level2p, addr);
67 *(level3p++) = __pa(level2p) | L2_ATTR; 50 set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
68 addr += LEVEL2_SIZE; 51 addr += PUD_SIZE;
69 } 52 }
70 /* clear the unused entries */ 53 /* clear the unused entries */
71 while (addr < end_addr) { 54 while (addr < end_addr) {
72 *(level3p++) = 0; 55 pud_clear(level3p++);
73 addr += LEVEL2_SIZE; 56 addr += PUD_SIZE;
74 } 57 }
75out: 58out:
76 return result; 59 return result;
77} 60}
78 61
79 62
80static int init_level4_page(struct kimage *image, u64 *level4p, 63static int init_level4_page(struct kimage *image, pgd_t *level4p,
81 unsigned long addr, unsigned long last_addr) 64 unsigned long addr, unsigned long last_addr)
82{ 65{
83 unsigned long end_addr; 66 unsigned long end_addr;
@@ -85,28 +68,28 @@ static int init_level4_page(struct kimage *image, u64 *level4p,
85 68
86 result = 0; 69 result = 0;
87 addr &= PAGE_MASK; 70 addr &= PAGE_MASK;
88 end_addr = addr + LEVEL4_SIZE; 71 end_addr = addr + (PTRS_PER_PGD * PGDIR_SIZE);
89 while ((addr < last_addr) && (addr < end_addr)) { 72 while ((addr < last_addr) && (addr < end_addr)) {
90 struct page *page; 73 struct page *page;
91 u64 *level3p; 74 pud_t *level3p;
92 75
93 page = kimage_alloc_control_pages(image, 0); 76 page = kimage_alloc_control_pages(image, 0);
94 if (!page) { 77 if (!page) {
95 result = -ENOMEM; 78 result = -ENOMEM;
96 goto out; 79 goto out;
97 } 80 }
98 level3p = (u64 *)page_address(page); 81 level3p = (pud_t *)page_address(page);
99 result = init_level3_page(image, level3p, addr, last_addr); 82 result = init_level3_page(image, level3p, addr, last_addr);
100 if (result) { 83 if (result) {
101 goto out; 84 goto out;
102 } 85 }
103 *(level4p++) = __pa(level3p) | L3_ATTR; 86 set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
104 addr += LEVEL3_SIZE; 87 addr += PGDIR_SIZE;
105 } 88 }
106 /* clear the unused entries */ 89 /* clear the unused entries */
107 while (addr < end_addr) { 90 while (addr < end_addr) {
108 *(level4p++) = 0; 91 pgd_clear(level4p++);
109 addr += LEVEL3_SIZE; 92 addr += PGDIR_SIZE;
110 } 93 }
111out: 94out:
112 return result; 95 return result;
@@ -115,8 +98,8 @@ out:
115 98
116static int init_pgtable(struct kimage *image, unsigned long start_pgtable) 99static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
117{ 100{
118 u64 *level4p; 101 pgd_t *level4p;
119 level4p = (u64 *)__va(start_pgtable); 102 level4p = (pgd_t *)__va(start_pgtable);
120 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT); 103 return init_level4_page(image, level4p, 0, end_pfn << PAGE_SHIFT);
121} 104}
122 105
@@ -176,7 +159,7 @@ int machine_kexec_prepare(struct kimage *image)
176 159
177 /* Calculate the offsets */ 160 /* Calculate the offsets */
178 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 161 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
179 control_code_buffer = start_pgtable + 4096UL; 162 control_code_buffer = start_pgtable + PAGE_SIZE;
180 163
181 /* Setup the identity mapped 64bit page table */ 164 /* Setup the identity mapped 64bit page table */
182 result = init_pgtable(image, start_pgtable); 165 result = init_pgtable(image, start_pgtable);
@@ -212,7 +195,7 @@ NORET_TYPE void machine_kexec(struct kimage *image)
212 /* Calculate the offsets */ 195 /* Calculate the offsets */
213 page_list = image->head; 196 page_list = image->head;
214 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT; 197 start_pgtable = page_to_pfn(image->control_code_page) << PAGE_SHIFT;
215 control_code_buffer = start_pgtable + 4096UL; 198 control_code_buffer = start_pgtable + PAGE_SIZE;
216 199
217 /* Set the low half of the page table to my identity mapped 200 /* Set the low half of the page table to my identity mapped
218 * page table for kexec. Leave the high half pointing at the 201 * page table for kexec. Leave the high half pointing at the