diff options
Diffstat (limited to 'arch/x86/kernel/head_32.S')
| -rw-r--r-- | arch/x86/kernel/head_32.S | 76 |
1 files changed, 36 insertions, 40 deletions
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index c32ca19d591a..30683883e0cd 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
| @@ -38,42 +38,40 @@ | |||
| 38 | #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id | 38 | #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id |
| 39 | 39 | ||
| 40 | /* | 40 | /* |
| 41 | * This is how much memory *in addition to the memory covered up to | 41 | * This is how much memory in addition to the memory covered up to |
| 42 | * and including _end* we need mapped initially. | 42 | * and including _end we need mapped initially. |
| 43 | * We need: | 43 | * We need: |
| 44 | * - one bit for each possible page, but only in low memory, which means | 44 | * (KERNEL_IMAGE_SIZE/4096) / 1024 pages (worst case, non PAE) |
| 45 | * 2^32/4096/8 = 128K worst case (4G/4G split.) | 45 | * (KERNEL_IMAGE_SIZE/4096) / 512 + 4 pages (worst case for PAE) |
| 46 | * - enough space to map all low memory, which means | ||
| 47 | * (2^32/4096) / 1024 pages (worst case, non PAE) | ||
| 48 | * (2^32/4096) / 512 + 4 pages (worst case for PAE) | ||
| 49 | * - a few pages for allocator use before the kernel pagetable has | ||
| 50 | * been set up | ||
| 51 | * | 46 | * |
| 52 | * Modulo rounding, each megabyte assigned here requires a kilobyte of | 47 | * Modulo rounding, each megabyte assigned here requires a kilobyte of |
| 53 | * memory, which is currently unreclaimed. | 48 | * memory, which is currently unreclaimed. |
| 54 | * | 49 | * |
| 55 | * This should be a multiple of a page. | 50 | * This should be a multiple of a page. |
| 51 | * | ||
| 52 | * KERNEL_IMAGE_SIZE should be greater than pa(_end) | ||
| 53 | * and small than max_low_pfn, otherwise will waste some page table entries | ||
| 56 | */ | 54 | */ |
| 57 | LOW_PAGES = 1<<(32-PAGE_SHIFT_asm) | ||
| 58 | |||
| 59 | /* | ||
| 60 | * To preserve the DMA pool in PAGEALLOC kernels, we'll allocate | ||
| 61 | * pagetables from above the 16MB DMA limit, so we'll have to set | ||
| 62 | * up pagetables 16MB more (worst-case): | ||
| 63 | */ | ||
| 64 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
| 65 | LOW_PAGES = LOW_PAGES + 0x1000000 | ||
| 66 | #endif | ||
| 67 | 55 | ||
| 68 | #if PTRS_PER_PMD > 1 | 56 | #if PTRS_PER_PMD > 1 |
| 69 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PMD) + PTRS_PER_PGD | 57 | #define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) |
| 70 | #else | 58 | #else |
| 71 | PAGE_TABLE_SIZE = (LOW_PAGES / PTRS_PER_PGD) | 59 | #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) |
| 72 | #endif | 60 | #endif |
| 73 | BOOTBITMAP_SIZE = LOW_PAGES / 8 | ||
| 74 | ALLOCATOR_SLOP = 4 | ||
| 75 | 61 | ||
| 76 | INIT_MAP_BEYOND_END = BOOTBITMAP_SIZE + (PAGE_TABLE_SIZE + ALLOCATOR_SLOP)*PAGE_SIZE_asm | 62 | /* Enough space to fit pagetables for the low memory linear map */ |
| 63 | MAPPING_BEYOND_END = \ | ||
| 64 | PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Worst-case size of the kernel mapping we need to make: | ||
| 68 | * the worst-case size of the kernel itself, plus the extra we need | ||
| 69 | * to map for the linear map. | ||
| 70 | */ | ||
| 71 | KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT | ||
| 72 | |||
| 73 | INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm | ||
| 74 | RESERVE_BRK(pagetables, INIT_MAP_SIZE) | ||
| 77 | 75 | ||
| 78 | /* | 76 | /* |
| 79 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, | 77 | * 32-bit kernel entrypoint; only used by the boot CPU. On entry, |
| @@ -166,10 +164,10 @@ num_subarch_entries = (. - subarch_entries) / 4 | |||
| 166 | 164 | ||
| 167 | /* | 165 | /* |
| 168 | * Initialize page tables. This creates a PDE and a set of page | 166 | * Initialize page tables. This creates a PDE and a set of page |
| 169 | * tables, which are located immediately beyond _end. The variable | 167 | * tables, which are located immediately beyond __brk_base. The variable |
| 170 | * init_pg_tables_end is set up to point to the first "safe" location. | 168 | * _brk_end is set up to point to the first "safe" location. |
| 171 | * Mappings are created both at virtual address 0 (identity mapping) | 169 | * Mappings are created both at virtual address 0 (identity mapping) |
| 172 | * and PAGE_OFFSET for up to _end+sizeof(page tables)+INIT_MAP_BEYOND_END. | 170 | * and PAGE_OFFSET for up to _end. |
| 173 | * | 171 | * |
| 174 | * Note that the stack is not yet set up! | 172 | * Note that the stack is not yet set up! |
| 175 | */ | 173 | */ |
| @@ -190,8 +188,7 @@ default_entry: | |||
| 190 | 188 | ||
| 191 | xorl %ebx,%ebx /* %ebx is kept at zero */ | 189 | xorl %ebx,%ebx /* %ebx is kept at zero */ |
| 192 | 190 | ||
| 193 | movl $pa(pg0), %edi | 191 | movl $pa(__brk_base), %edi |
| 194 | movl %edi, pa(init_pg_tables_start) | ||
| 195 | movl $pa(swapper_pg_pmd), %edx | 192 | movl $pa(swapper_pg_pmd), %edx |
| 196 | movl $PTE_IDENT_ATTR, %eax | 193 | movl $PTE_IDENT_ATTR, %eax |
| 197 | 10: | 194 | 10: |
| @@ -209,14 +206,14 @@ default_entry: | |||
| 209 | loop 11b | 206 | loop 11b |
| 210 | 207 | ||
| 211 | /* | 208 | /* |
| 212 | * End condition: we must map up to and including INIT_MAP_BEYOND_END | 209 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
| 213 | * bytes beyond the end of our own page tables. | ||
| 214 | */ | 210 | */ |
| 215 | leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp | 211 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
| 216 | cmpl %ebp,%eax | 212 | cmpl %ebp,%eax |
| 217 | jb 10b | 213 | jb 10b |
| 218 | 1: | 214 | 1: |
| 219 | movl %edi,pa(init_pg_tables_end) | 215 | addl $__PAGE_OFFSET, %edi |
| 216 | movl %edi, pa(_brk_end) | ||
| 220 | shrl $12, %eax | 217 | shrl $12, %eax |
| 221 | movl %eax, pa(max_pfn_mapped) | 218 | movl %eax, pa(max_pfn_mapped) |
| 222 | 219 | ||
| @@ -227,8 +224,7 @@ default_entry: | |||
| 227 | 224 | ||
| 228 | page_pde_offset = (__PAGE_OFFSET >> 20); | 225 | page_pde_offset = (__PAGE_OFFSET >> 20); |
| 229 | 226 | ||
| 230 | movl $pa(pg0), %edi | 227 | movl $pa(__brk_base), %edi |
| 231 | movl %edi, pa(init_pg_tables_start) | ||
| 232 | movl $pa(swapper_pg_dir), %edx | 228 | movl $pa(swapper_pg_dir), %edx |
| 233 | movl $PTE_IDENT_ATTR, %eax | 229 | movl $PTE_IDENT_ATTR, %eax |
| 234 | 10: | 230 | 10: |
| @@ -242,14 +238,13 @@ page_pde_offset = (__PAGE_OFFSET >> 20); | |||
| 242 | addl $0x1000,%eax | 238 | addl $0x1000,%eax |
| 243 | loop 11b | 239 | loop 11b |
| 244 | /* | 240 | /* |
| 245 | * End condition: we must map up to and including INIT_MAP_BEYOND_END | 241 | * End condition: we must map up to the end + MAPPING_BEYOND_END. |
| 246 | * bytes beyond the end of our own page tables; the +0x007 is | ||
| 247 | * the attribute bits | ||
| 248 | */ | 242 | */ |
| 249 | leal (INIT_MAP_BEYOND_END+PTE_IDENT_ATTR)(%edi),%ebp | 243 | movl $pa(_end) + MAPPING_BEYOND_END + PTE_IDENT_ATTR, %ebp |
| 250 | cmpl %ebp,%eax | 244 | cmpl %ebp,%eax |
| 251 | jb 10b | 245 | jb 10b |
| 252 | movl %edi,pa(init_pg_tables_end) | 246 | addl $__PAGE_OFFSET, %edi |
| 247 | movl %edi, pa(_brk_end) | ||
| 253 | shrl $12, %eax | 248 | shrl $12, %eax |
| 254 | movl %eax, pa(max_pfn_mapped) | 249 | movl %eax, pa(max_pfn_mapped) |
| 255 | 250 | ||
| @@ -636,6 +631,7 @@ swapper_pg_fixmap: | |||
| 636 | .fill 1024,4,0 | 631 | .fill 1024,4,0 |
| 637 | ENTRY(empty_zero_page) | 632 | ENTRY(empty_zero_page) |
| 638 | .fill 4096,1,0 | 633 | .fill 4096,1,0 |
| 634 | |||
| 639 | /* | 635 | /* |
| 640 | * This starts the data section. | 636 | * This starts the data section. |
| 641 | */ | 637 | */ |
