aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2008-09-15 16:44:55 -0400
committerNicolas Pitre <nico@cam.org>2009-03-15 21:01:20 -0400
commitd73cd42893f4cdc06e6829fea2347bb92cb789d1 (patch)
treefddff067f2b09aa13741bc9d05956429616e986a /arch/arm
parent5f0fbf9ecaf354fa4bbf266fffdea2ea3d14a0ed (diff)
[ARM] kmap support
The kmap virtual area borrows a 2MB range at the top of the 16MB area below PAGE_OFFSET currently reserved for kernel modules and/or the XIP kernel. This 2MB corresponds to the range covered by 2 consecutive second-level page tables, or a single pmd entry as seen by the Linux page table abstraction. Because XIP kernels are unlikely to be seen on systems needing highmem support, there shouldn't be any shortage of VM space for modules (14 MB for modules is still way more than twice the typical usage). Because the virtual mapping of highmem pages can go away at any moment after kunmap() is called on them, we need to bypass the delayed cache flushing provided by flush_dcache_page() in that case. The atomic kmap versions are based on fixmaps, and __cpuc_flush_dcache_page() is used directly in that case. Signed-off-by: Nicolas Pitre <nico@marvell.com>
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/include/asm/highmem.h28
-rw-r--r--arch/arm/include/asm/memory.h13
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c116
-rw-r--r--arch/arm/mm/mmu.c13
6 files changed, 169 insertions, 4 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
new file mode 100644
index 000000000000..023d5b374544
--- /dev/null
+++ b/arch/arm/include/asm/highmem.h
@@ -0,0 +1,28 @@
1#ifndef _ASM_HIGHMEM_H
2#define _ASM_HIGHMEM_H
3
4#include <asm/kmap_types.h>
5
6#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
7#define LAST_PKMAP PTRS_PER_PTE
8#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
9#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
11
12#define kmap_prot PAGE_KERNEL
13
14#define flush_cache_kmaps() flush_cache_all()
15
16extern pte_t *pkmap_page_table;
17
18extern void *kmap_high(struct page *page);
19extern void kunmap_high(struct page *page);
20
21extern void *kmap(struct page *page);
22extern void kunmap(struct page *page);
23extern void *kmap_atomic(struct page *page, enum km_type type);
24extern void kunmap_atomic(void *kvaddr, enum km_type type);
25extern void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
26extern struct page *kmap_atomic_to_page(const void *ptr);
27
28#endif
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 0202a7c20e62..ae472bc376d3 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -44,14 +44,21 @@
44 * The module space lives between the addresses given by TASK_SIZE 44 * The module space lives between the addresses given by TASK_SIZE
45 * and PAGE_OFFSET - it must be within 32MB of the kernel text. 45 * and PAGE_OFFSET - it must be within 32MB of the kernel text.
46 */ 46 */
47#define MODULES_END (PAGE_OFFSET) 47#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024)
48#define MODULES_VADDR (MODULES_END - 16*1048576)
49
50#if TASK_SIZE > MODULES_VADDR 48#if TASK_SIZE > MODULES_VADDR
51#error Top of user space clashes with start of module space 49#error Top of user space clashes with start of module space
52#endif 50#endif
53 51
54/* 52/*
53 * The highmem pkmap virtual space shares the end of the module area.
54 */
55#ifdef CONFIG_HIGHMEM
56#define MODULES_END (PAGE_OFFSET - PMD_SIZE)
57#else
58#define MODULES_END (PAGE_OFFSET)
59#endif
60
61/*
55 * The XIP kernel gets mapped at the bottom of the module vm area. 62 * The XIP kernel gets mapped at the bottom of the module vm area.
56 * Since we use sections to map it, this macro replaces the physical address 63 * Since we use sections to map it, this macro replaces the physical address
57 * with its virtual address while keeping offset from the base section. 64 * with its virtual address while keeping offset from the base section.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 480f78a3611a..185e7dc7dcf2 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
16 16
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o 18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19obj-$(CONFIG_HIGHMEM) += highmem.o
19 20
20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 21obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
21obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o 22obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 0fa9bf388f0b..4e283481cee1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -192,7 +192,7 @@ void flush_dcache_page(struct page *page)
192 struct address_space *mapping = page_mapping(page); 192 struct address_space *mapping = page_mapping(page);
193 193
194#ifndef CONFIG_SMP 194#ifndef CONFIG_SMP
195 if (mapping && !mapping_mapped(mapping)) 195 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
196 set_bit(PG_dcache_dirty, &page->flags); 196 set_bit(PG_dcache_dirty, &page->flags);
197 else 197 else
198#endif 198#endif
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
new file mode 100644
index 000000000000..a34954d9df7d
--- /dev/null
+++ b/arch/arm/mm/highmem.c
@@ -0,0 +1,116 @@
1/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/highmem.h>
15#include <linux/interrupt.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include "mm.h"
20
21void *kmap(struct page *page)
22{
23 might_sleep();
24 if (!PageHighMem(page))
25 return page_address(page);
26 return kmap_high(page);
27}
28EXPORT_SYMBOL(kmap);
29
30void kunmap(struct page *page)
31{
32 BUG_ON(in_interrupt());
33 if (!PageHighMem(page))
34 return;
35 kunmap_high(page);
36}
37EXPORT_SYMBOL(kunmap);
38
39void *kmap_atomic(struct page *page, enum km_type type)
40{
41 unsigned int idx;
42 unsigned long vaddr;
43
44 pagefault_disable();
45 if (!PageHighMem(page))
46 return page_address(page);
47
48 idx = type + KM_TYPE_NR * smp_processor_id();
49 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
50#ifdef CONFIG_DEBUG_HIGHMEM
51 /*
52 * With debugging enabled, kunmap_atomic forces that entry to 0.
53 * Make sure it was indeed properly unmapped.
54 */
55 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
56#endif
57 set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0);
58 /*
59 * When debugging is off, kunmap_atomic leaves the previous mapping
60 * in place, so this TLB flush ensures the TLB is updated with the
61 * new mapping.
62 */
63 local_flush_tlb_kernel_page(vaddr);
64
65 return (void *)vaddr;
66}
67EXPORT_SYMBOL(kmap_atomic);
68
69void kunmap_atomic(void *kvaddr, enum km_type type)
70{
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72 unsigned int idx = type + KM_TYPE_NR * smp_processor_id();
73
74 if (kvaddr >= (void *)FIXADDR_START) {
75 __cpuc_flush_dcache_page((void *)vaddr);
76#ifdef CONFIG_DEBUG_HIGHMEM
77 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
78 set_pte_ext(TOP_PTE(vaddr), __pte(0), 0);
79 local_flush_tlb_kernel_page(vaddr);
80#else
81 (void) idx; /* to kill a warning */
82#endif
83 }
84 pagefault_enable();
85}
86EXPORT_SYMBOL(kunmap_atomic);
87
88void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
89{
90 unsigned int idx;
91 unsigned long vaddr;
92
93 pagefault_disable();
94
95 idx = type + KM_TYPE_NR * smp_processor_id();
96 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
97#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(!pte_none(*(TOP_PTE(vaddr))));
99#endif
100 set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0);
101 local_flush_tlb_kernel_page(vaddr);
102
103 return (void *)vaddr;
104}
105
106struct page *kmap_atomic_to_page(const void *ptr)
107{
108 unsigned long vaddr = (unsigned long)ptr;
109 pte_t *pte;
110
111 if (vaddr < FIXADDR_START)
112 return virt_to_page(ptr);
113
114 pte = TOP_PTE(vaddr);
115 return pte_page(*pte);
116}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index d4d082c5c2d4..4810a4c9ffce 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -21,6 +21,7 @@
21#include <asm/setup.h> 21#include <asm/setup.h>
22#include <asm/sizes.h> 22#include <asm/sizes.h>
23#include <asm/tlb.h> 23#include <asm/tlb.h>
24#include <asm/highmem.h>
24 25
25#include <asm/mach/arch.h> 26#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 27#include <asm/mach/map.h>
@@ -895,6 +896,17 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
895 flush_cache_all(); 896 flush_cache_all();
896} 897}
897 898
899static void __init kmap_init(void)
900{
901#ifdef CONFIG_HIGHMEM
902 pmd_t *pmd = pmd_off_k(PKMAP_BASE);
903 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
904 BUG_ON(!pmd_none(*pmd) || !pte);
905 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
906 pkmap_page_table = pte + PTRS_PER_PTE;
907#endif
908}
909
898/* 910/*
899 * paging_init() sets up the page tables, initialises the zone memory 911 * paging_init() sets up the page tables, initialises the zone memory
900 * maps, and sets up the zero page, bad page and bad page tables. 912 * maps, and sets up the zero page, bad page and bad page tables.
@@ -908,6 +920,7 @@ void __init paging_init(struct machine_desc *mdesc)
908 prepare_page_table(); 920 prepare_page_table();
909 bootmem_init(); 921 bootmem_init();
910 devicemaps_init(mdesc); 922 devicemaps_init(mdesc);
923 kmap_init();
911 924
912 top_pmd = pmd_off_k(0xffff0000); 925 top_pmd = pmd_off_k(0xffff0000);
913 926