aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2008-09-15 16:44:55 -0400
committerNicolas Pitre <nico@cam.org>2009-03-15 21:01:20 -0400
commitd73cd42893f4cdc06e6829fea2347bb92cb789d1 (patch)
treefddff067f2b09aa13741bc9d05956429616e986a /arch/arm/mm/mmu.c
parent5f0fbf9ecaf354fa4bbf266fffdea2ea3d14a0ed (diff)
[ARM] kmap support
The kmap virtual area borrows a 2MB range at the top of the 16MB area below PAGE_OFFSET currently reserved for kernel modules and/or the XIP kernel. This 2MB corresponds to the range covered by 2 consecutive second-level page tables, or a single pmd entry as seen by the Linux page table abstraction. Because XIP kernels are unlikely to be seen on systems needing highmem support, there shouldn't be any shortage of VM space for modules (14 MB for modules is still way more than twice the typical usage). Because the virtual mapping of highmem pages can go away at any moment after kunmap() is called on them, we need to bypass the delayed cache flushing provided by flush_dcache_page() in that case. The atomic kmap versions are based on fixmaps, and __cpuc_flush_dcache_page() is used directly in that case. Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c13
1 files changed, 13 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d4d082c5c2d4..4810a4c9ffce 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -21,6 +21,7 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/sizes.h> 22#include <asm/sizes.h>
23#include <asm/tlb.h> 23#include <asm/tlb.h>
24#include <asm/highmem.h>
24 25
25#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 27#include <asm/mach/map.h>
@@ -895,6 +896,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
895 flush_cache_all(); 896 flush_cache_all();
896} 897}
897 898
899static void __init kmap_init(void)
900{
901#ifdef CONFIG_HIGHMEM
902 pmd_t *pmd = pmd_off_k(PKMAP_BASE);
903 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
904 BUG_ON(!pmd_none(*pmd) || !pte);
905 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
906 pkmap_page_table = pte + PTRS_PER_PTE;
907#endif
908}
909
898/* 910/*
899 * paging_init() sets up the page tables, initialises the zone memory 911 * paging_init() sets up the page tables, initialises the zone memory
900 * maps, and sets up the zero page, bad page and bad page tables. 912 * maps, and sets up the zero page, bad page and bad page tables.
@@ -908,6 +920,7 @@ void __init paging_init(struct machine_desc *mdesc)
908 prepare_page_table(); 920 prepare_page_table();
909 bootmem_init(); 921 bootmem_init();
910 devicemaps_init(mdesc); 922 devicemaps_init(mdesc);
923 kmap_init();
911 924
912 top_pmd = pmd_off_k(0xffff0000); 925 top_pmd = pmd_off_k(0xffff0000);
913 926