diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 13:41:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-03-22 13:41:36 -0400 |
commit | 73d5a8675f32b8e22e11773b314324316f920192 (patch) | |
tree | d353161d8d162afd6fa90267c96a902a0a9e85be /arch | |
parent | e77277dfe28baed73b6fd7c66c3fb44580a3dac1 (diff) | |
parent | d8aa5ec3382e6a545b8f25178d1e0992d4927f19 (diff) |
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
xen: update mask_rw_pte after kernel page tables init changes
xen: set max_pfn_mapped to the last pfn mapped
x86: Cleanup highmap after brk is concluded
Fix up trivial onflict (added header file includes) in
arch/x86/mm/init_64.c
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/head64.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/setup.c | 25 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 11 | ||||
-rw-r--r-- | arch/x86/xen/mmu.c | 21 |
4 files changed, 21 insertions, 39 deletions
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 2d2673c28aff..5655c2272adb 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c | |||
@@ -77,9 +77,6 @@ void __init x86_64_start_kernel(char * real_mode_data) | |||
77 | /* Make NULL pointers segfault */ | 77 | /* Make NULL pointers segfault */ |
78 | zap_identity_mappings(); | 78 | zap_identity_mappings(); |
79 | 79 | ||
80 | /* Cleanup the over mapped high alias */ | ||
81 | cleanup_highmap(); | ||
82 | |||
83 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; | 80 | max_pfn_mapped = KERNEL_IMAGE_SIZE >> PAGE_SHIFT; |
84 | 81 | ||
85 | for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { | 82 | for (i = 0; i < NUM_EXCEPTION_VECTORS; i++) { |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 9d43b28e0728..32bd87cbf982 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -294,30 +294,11 @@ static void __init init_gbpages(void) | |||
294 | else | 294 | else |
295 | direct_gbpages = 0; | 295 | direct_gbpages = 0; |
296 | } | 296 | } |
297 | |||
298 | static void __init cleanup_highmap_brk_end(void) | ||
299 | { | ||
300 | pud_t *pud; | ||
301 | pmd_t *pmd; | ||
302 | |||
303 | mmu_cr4_features = read_cr4(); | ||
304 | |||
305 | /* | ||
306 | * _brk_end cannot change anymore, but it and _end may be | ||
307 | * located on different 2M pages. cleanup_highmap(), however, | ||
308 | * can only consider _end when it runs, so destroy any | ||
309 | * mappings beyond _brk_end here. | ||
310 | */ | ||
311 | pud = pud_offset(pgd_offset_k(_brk_end), _brk_end); | ||
312 | pmd = pmd_offset(pud, _brk_end - 1); | ||
313 | while (++pmd <= pmd_offset(pud, (unsigned long)_end - 1)) | ||
314 | pmd_clear(pmd); | ||
315 | } | ||
316 | #else | 297 | #else |
317 | static inline void init_gbpages(void) | 298 | static inline void init_gbpages(void) |
318 | { | 299 | { |
319 | } | 300 | } |
320 | static inline void cleanup_highmap_brk_end(void) | 301 | static void __init cleanup_highmap(void) |
321 | { | 302 | { |
322 | } | 303 | } |
323 | #endif | 304 | #endif |
@@ -330,8 +311,6 @@ static void __init reserve_brk(void) | |||
330 | /* Mark brk area as locked down and no longer taking any | 311 | /* Mark brk area as locked down and no longer taking any |
331 | new allocations */ | 312 | new allocations */ |
332 | _brk_start = 0; | 313 | _brk_start = 0; |
333 | |||
334 | cleanup_highmap_brk_end(); | ||
335 | } | 314 | } |
336 | 315 | ||
337 | #ifdef CONFIG_BLK_DEV_INITRD | 316 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -950,6 +929,8 @@ void __init setup_arch(char **cmdline_p) | |||
950 | */ | 929 | */ |
951 | reserve_brk(); | 930 | reserve_brk(); |
952 | 931 | ||
932 | cleanup_highmap(); | ||
933 | |||
953 | memblock.current_limit = get_max_mapped(); | 934 | memblock.current_limit = get_max_mapped(); |
954 | memblock_x86_fill(); | 935 | memblock_x86_fill(); |
955 | 936 | ||
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 0aa34669ed3f..2362b646178e 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> |
53 | #include <asm/init.h> | 53 | #include <asm/init.h> |
54 | #include <asm/uv/uv.h> | 54 | #include <asm/uv/uv.h> |
55 | #include <asm/setup.h> | ||
55 | 56 | ||
56 | static int __init parse_direct_gbpages_off(char *arg) | 57 | static int __init parse_direct_gbpages_off(char *arg) |
57 | { | 58 | { |
@@ -294,18 +295,18 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) | |||
294 | * to the compile time generated pmds. This results in invalid pmds up | 295 | * to the compile time generated pmds. This results in invalid pmds up |
295 | * to the point where we hit the physaddr 0 mapping. | 296 | * to the point where we hit the physaddr 0 mapping. |
296 | * | 297 | * |
297 | * We limit the mappings to the region from _text to _end. _end is | 298 | * We limit the mappings to the region from _text to _brk_end. _brk_end |
298 | * rounded up to the 2MB boundary. This catches the invalid pmds as | 299 | * is rounded up to the 2MB boundary. This catches the invalid pmds as |
299 | * well, as they are located before _text: | 300 | * well, as they are located before _text: |
300 | */ | 301 | */ |
301 | void __init cleanup_highmap(void) | 302 | void __init cleanup_highmap(void) |
302 | { | 303 | { |
303 | unsigned long vaddr = __START_KERNEL_map; | 304 | unsigned long vaddr = __START_KERNEL_map; |
304 | unsigned long end = roundup((unsigned long)_end, PMD_SIZE) - 1; | 305 | unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT); |
306 | unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; | ||
305 | pmd_t *pmd = level2_kernel_pgt; | 307 | pmd_t *pmd = level2_kernel_pgt; |
306 | pmd_t *last_pmd = pmd + PTRS_PER_PMD; | ||
307 | 308 | ||
308 | for (; pmd < last_pmd; pmd++, vaddr += PMD_SIZE) { | 309 | for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) { |
309 | if (pmd_none(*pmd)) | 310 | if (pmd_none(*pmd)) |
310 | continue; | 311 | continue; |
311 | if (vaddr < (unsigned long) _text || vaddr > end) | 312 | if (vaddr < (unsigned long) _text || vaddr > end) |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 39ee7182fd18..c82df6c9c0f0 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -1487,10 +1487,12 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) | |||
1487 | /* | 1487 | /* |
1488 | * If the new pfn is within the range of the newly allocated | 1488 | * If the new pfn is within the range of the newly allocated |
1489 | * kernel pagetable, and it isn't being mapped into an | 1489 | * kernel pagetable, and it isn't being mapped into an |
1490 | * early_ioremap fixmap slot, make sure it is RO. | 1490 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
1491 | * it is RO. | ||
1491 | */ | 1492 | */ |
1492 | if (!is_early_ioremap_ptep(ptep) && | 1493 | if (((!is_early_ioremap_ptep(ptep) && |
1493 | pfn >= pgt_buf_start && pfn < pgt_buf_end) | 1494 | pfn >= pgt_buf_start && pfn < pgt_buf_end)) || |
1495 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) | ||
1494 | pte = pte_wrprotect(pte); | 1496 | pte = pte_wrprotect(pte); |
1495 | 1497 | ||
1496 | return pte; | 1498 | return pte; |
@@ -1700,9 +1702,6 @@ static __init void xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |||
1700 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | 1702 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { |
1701 | pte_t pte; | 1703 | pte_t pte; |
1702 | 1704 | ||
1703 | if (pfn > max_pfn_mapped) | ||
1704 | max_pfn_mapped = pfn; | ||
1705 | |||
1706 | if (!pte_none(pte_page[pteidx])) | 1705 | if (!pte_none(pte_page[pteidx])) |
1707 | continue; | 1706 | continue; |
1708 | 1707 | ||
@@ -1760,6 +1759,12 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1760 | pud_t *l3; | 1759 | pud_t *l3; |
1761 | pmd_t *l2; | 1760 | pmd_t *l2; |
1762 | 1761 | ||
1762 | /* max_pfn_mapped is the last pfn mapped in the initial memory | ||
1763 | * mappings. Considering that on Xen after the kernel mappings we | ||
1764 | * have the mappings of some pages that don't exist in pfn space, we | ||
1765 | * set max_pfn_mapped to the last real pfn mapped. */ | ||
1766 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); | ||
1767 | |||
1763 | /* Zap identity mapping */ | 1768 | /* Zap identity mapping */ |
1764 | init_level4_pgt[0] = __pgd(0); | 1769 | init_level4_pgt[0] = __pgd(0); |
1765 | 1770 | ||
@@ -1864,9 +1869,7 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, | |||
1864 | initial_kernel_pmd = | 1869 | initial_kernel_pmd = |
1865 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | 1870 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); |
1866 | 1871 | ||
1867 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + | 1872 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); |
1868 | xen_start_info->nr_pt_frames * PAGE_SIZE + | ||
1869 | 512*1024); | ||
1870 | 1873 | ||
1871 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | 1874 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
1872 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); | 1875 | memcpy(initial_kernel_pmd, kernel_pmd, sizeof(pmd_t) * PTRS_PER_PMD); |