diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 11:12:43 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-08 11:12:43 -0500 |
commit | 79c9601c2e0dbbe69895d302de4d19f3a31fbd30 (patch) | |
tree | 78d4be2df851b2b4106adcfd736622a90cecf9e9 /arch/arm/mm | |
parent | 41440ffe21f29bdb985cab76b2d0b06d83e63b19 (diff) | |
parent | 3d14b5beba35250c548d3851a2b84fce742d8311 (diff) |
Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm
* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (272 commits)
Fix soc_common PCMCIA configuration
ARM: 5827/1: SA1100: h3100/h3600: emit messages on failed gpio_request
ARM: 5826/1: SA1100: h3100/h3600: always build htc-egpio driver
ARM: 5825/1: SA1100: h3600: update defconfig
ARM: 5824/1: SA1100: reuse h3600 PCMCIA driver on h3100
ARM: 5823/1: SA1100: h3100/h3600: add support for gpio-keys
ARM: 5822/1: SA1100: h3100/h3600: clean up #includes
ARM: 5821/1: SA1100: h3100/h3600: revise copyright boilerplates
ARM: 5820/1: SA1100: h3100/h3600: split h3600.c
ARM: 5819/1: SA1100: h3100/h3600: merge h3600.h and h3600_gpio.h into h3xxx.h
ARM: 5818/1: SA1100: h3100/h3600: drop old GPIO definitions
ARM: 5817/1: SA1100: h3100/h3600: configure all unused gpios as inputs
ARM: 5816/1: SA1100: h3600: remove IRQ_GPIO_* definitions
ARM: 5815/1: SA1100: h3100/h3600: remove now unused assign_h3600_egpio handlers
ARM: 5814/1: SA1100: h3100/h3600: convert all users of assign_h3600_egpio to gpiolib
ARM: 5813/1: SA1100: h3100/h3600: add htc-egpio driver
ARM: 5812/1: SA1100: h3100/h3600: separate machine-specific LCD helpers
ARM: 5811/2: pcmcia: convert sa1100_h3600 driver to gpiolib
ARM: 5799/1: SA1100: h3600: stop setting direction for LCD pins
ARM: 5798/1: SA1100: h3600: remove unused cruft from h3600.h
...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 13 | ||||
-rw-r--r-- | arch/arm/mm/Makefile | 4 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 25 | ||||
-rw-r--r-- | arch/arm/mm/cache-tauros2.c | 263 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v6.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 497 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 9 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 49 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 33 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/vmregion.c | 131 | ||||
-rw-r--r-- | arch/arm/mm/vmregion.h | 29 |
14 files changed, 716 insertions, 353 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9264d814cd7a..dd4698c67cc3 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
@@ -388,7 +388,7 @@ config CPU_FEROCEON_OLD_ID | |||
388 | 388 | ||
389 | # ARMv6 | 389 | # ARMv6 |
390 | config CPU_V6 | 390 | config CPU_V6 |
391 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 391 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX || ARCH_DOVE |
392 | select CPU_32v6 | 392 | select CPU_32v6 |
393 | select CPU_ABRT_EV6 | 393 | select CPU_ABRT_EV6 |
394 | select CPU_PABRT_V6 | 394 | select CPU_PABRT_V6 |
@@ -764,6 +764,15 @@ config CACHE_L2X0 | |||
764 | help | 764 | help |
765 | This option enables the L2x0 PrimeCell. | 765 | This option enables the L2x0 PrimeCell. |
766 | 766 | ||
767 | config CACHE_TAUROS2 | ||
768 | bool "Enable the Tauros2 L2 cache controller" | ||
769 | depends on ARCH_DOVE | ||
770 | default y | ||
771 | select OUTER_CACHE | ||
772 | help | ||
773 | This option enables the Tauros2 L2 cache controller (as | ||
774 | found on PJ1/PJ4). | ||
775 | |||
767 | config CACHE_XSC3L2 | 776 | config CACHE_XSC3L2 |
768 | bool "Enable the L2 cache on XScale3" | 777 | bool "Enable the L2 cache on XScale3" |
769 | depends on CPU_XSC3 | 778 | depends on CPU_XSC3 |
@@ -774,5 +783,5 @@ config CACHE_XSC3L2 | |||
774 | 783 | ||
775 | config ARM_L1_CACHE_SHIFT | 784 | config ARM_L1_CACHE_SHIFT |
776 | int | 785 | int |
777 | default 6 if ARCH_OMAP3 | 786 | default 6 if ARCH_OMAP3 || ARCH_S5PC1XX |
778 | default 5 | 787 | default 5 |
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 055cb2aa8134..827e238e5d4a 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ | |||
6 | iomap.o | 6 | iomap.o |
7 | 7 | ||
8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ | 8 | obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ |
9 | pgd.o mmu.o | 9 | pgd.o mmu.o vmregion.o |
10 | 10 | ||
11 | ifneq ($(CONFIG_MMU),y) | 11 | ifneq ($(CONFIG_MMU),y) |
12 | obj-y += nommu.o | 12 | obj-y += nommu.o |
@@ -87,4 +87,4 @@ obj-$(CONFIG_CPU_V7) += proc-v7.o | |||
87 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o | 87 | obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o |
88 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o | 88 | obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o |
89 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o | 89 | obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o |
90 | 90 | obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o | |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index b480f1d3591f..747f9a9021bb 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -99,18 +99,25 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) | |||
99 | 99 | ||
100 | l2x0_base = base; | 100 | l2x0_base = base; |
101 | 101 | ||
102 | /* disable L2X0 */ | 102 | /* |
103 | writel(0, l2x0_base + L2X0_CTRL); | 103 | * Check if l2x0 controller is already enabled. |
104 | * If you are booting from non-secure mode | ||
105 | * accessing the below registers will fault. | ||
106 | */ | ||
107 | if (!(readl(l2x0_base + L2X0_CTRL) & 1)) { | ||
104 | 108 | ||
105 | aux = readl(l2x0_base + L2X0_AUX_CTRL); | 109 | /* l2x0 controller is disabled */ |
106 | aux &= aux_mask; | ||
107 | aux |= aux_val; | ||
108 | writel(aux, l2x0_base + L2X0_AUX_CTRL); | ||
109 | 110 | ||
110 | l2x0_inv_all(); | 111 | aux = readl(l2x0_base + L2X0_AUX_CTRL); |
112 | aux &= aux_mask; | ||
113 | aux |= aux_val; | ||
114 | writel(aux, l2x0_base + L2X0_AUX_CTRL); | ||
111 | 115 | ||
112 | /* enable L2X0 */ | 116 | l2x0_inv_all(); |
113 | writel(1, l2x0_base + L2X0_CTRL); | 117 | |
118 | /* enable L2X0 */ | ||
119 | writel(1, l2x0_base + L2X0_CTRL); | ||
120 | } | ||
114 | 121 | ||
115 | outer_cache.inv_range = l2x0_inv_range; | 122 | outer_cache.inv_range = l2x0_inv_range; |
116 | outer_cache.clean_range = l2x0_clean_range; | 123 | outer_cache.clean_range = l2x0_clean_range; |
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c new file mode 100644 index 000000000000..50868651890f --- /dev/null +++ b/arch/arm/mm/cache-tauros2.c | |||
@@ -0,0 +1,263 @@ | |||
1 | /* | ||
2 | * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support | ||
3 | * | ||
4 | * Copyright (C) 2008 Marvell Semiconductor | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public | ||
7 | * License version 2. This program is licensed "as is" without any | ||
8 | * warranty of any kind, whether express or implied. | ||
9 | * | ||
10 | * References: | ||
11 | * - PJ1 CPU Core Datasheet, | ||
12 | * Document ID MV-S104837-01, Rev 0.7, January 24 2008. | ||
13 | * - PJ4 CPU Core Datasheet, | ||
14 | * Document ID MV-S105190-00, Rev 0.7, March 14 2008. | ||
15 | */ | ||
16 | |||
17 | #include <linux/init.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/hardware/cache-tauros2.h> | ||
20 | |||
21 | |||
22 | /* | ||
23 | * When Tauros2 is used on a CPU that supports the v7 hierarchical | ||
24 | * cache operations, the cache handling code in proc-v7.S takes care | ||
25 | * of everything, including handling DMA coherency. | ||
26 | * | ||
27 | * So, we only need to register outer cache operations here if we're | ||
28 | * being used on a pre-v7 CPU, and we only need to build support for | ||
29 | * outer cache operations into the kernel image if the kernel has been | ||
30 | * configured to support a pre-v7 CPU. | ||
31 | */ | ||
32 | #if __LINUX_ARM_ARCH__ < 7 | ||
33 | /* | ||
34 | * Low-level cache maintenance operations. | ||
35 | */ | ||
36 | static inline void tauros2_clean_pa(unsigned long addr) | ||
37 | { | ||
38 | __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr)); | ||
39 | } | ||
40 | |||
41 | static inline void tauros2_clean_inv_pa(unsigned long addr) | ||
42 | { | ||
43 | __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr)); | ||
44 | } | ||
45 | |||
46 | static inline void tauros2_inv_pa(unsigned long addr) | ||
47 | { | ||
48 | __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr)); | ||
49 | } | ||
50 | |||
51 | |||
52 | /* | ||
53 | * Linux primitives. | ||
54 | * | ||
55 | * Note that the end addresses passed to Linux primitives are | ||
56 | * noninclusive. | ||
57 | */ | ||
58 | #define CACHE_LINE_SIZE 32 | ||
59 | |||
60 | static void tauros2_inv_range(unsigned long start, unsigned long end) | ||
61 | { | ||
62 | /* | ||
63 | * Clean and invalidate partial first cache line. | ||
64 | */ | ||
65 | if (start & (CACHE_LINE_SIZE - 1)) { | ||
66 | tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1)); | ||
67 | start = (start | (CACHE_LINE_SIZE - 1)) + 1; | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * Clean and invalidate partial last cache line. | ||
72 | */ | ||
73 | if (end & (CACHE_LINE_SIZE - 1)) { | ||
74 | tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1)); | ||
75 | end &= ~(CACHE_LINE_SIZE - 1); | ||
76 | } | ||
77 | |||
78 | /* | ||
79 | * Invalidate all full cache lines between 'start' and 'end'. | ||
80 | */ | ||
81 | while (start < end) { | ||
82 | tauros2_inv_pa(start); | ||
83 | start += CACHE_LINE_SIZE; | ||
84 | } | ||
85 | |||
86 | dsb(); | ||
87 | } | ||
88 | |||
89 | static void tauros2_clean_range(unsigned long start, unsigned long end) | ||
90 | { | ||
91 | start &= ~(CACHE_LINE_SIZE - 1); | ||
92 | while (start < end) { | ||
93 | tauros2_clean_pa(start); | ||
94 | start += CACHE_LINE_SIZE; | ||
95 | } | ||
96 | |||
97 | dsb(); | ||
98 | } | ||
99 | |||
100 | static void tauros2_flush_range(unsigned long start, unsigned long end) | ||
101 | { | ||
102 | start &= ~(CACHE_LINE_SIZE - 1); | ||
103 | while (start < end) { | ||
104 | tauros2_clean_inv_pa(start); | ||
105 | start += CACHE_LINE_SIZE; | ||
106 | } | ||
107 | |||
108 | dsb(); | ||
109 | } | ||
110 | #endif | ||
111 | |||
112 | static inline u32 __init read_extra_features(void) | ||
113 | { | ||
114 | u32 u; | ||
115 | |||
116 | __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u)); | ||
117 | |||
118 | return u; | ||
119 | } | ||
120 | |||
121 | static inline void __init write_extra_features(u32 u) | ||
122 | { | ||
123 | __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u)); | ||
124 | } | ||
125 | |||
126 | static void __init disable_l2_prefetch(void) | ||
127 | { | ||
128 | u32 u; | ||
129 | |||
130 | /* | ||
131 | * Read the CPU Extra Features register and verify that the | ||
132 | * Disable L2 Prefetch bit is set. | ||
133 | */ | ||
134 | u = read_extra_features(); | ||
135 | if (!(u & 0x01000000)) { | ||
136 | printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n"); | ||
137 | write_extra_features(u | 0x01000000); | ||
138 | } | ||
139 | } | ||
140 | |||
141 | static inline int __init cpuid_scheme(void) | ||
142 | { | ||
143 | extern int processor_id; | ||
144 | |||
145 | return !!((processor_id & 0x000f0000) == 0x000f0000); | ||
146 | } | ||
147 | |||
148 | static inline u32 __init read_mmfr3(void) | ||
149 | { | ||
150 | u32 mmfr3; | ||
151 | |||
152 | __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3)); | ||
153 | |||
154 | return mmfr3; | ||
155 | } | ||
156 | |||
157 | static inline u32 __init read_actlr(void) | ||
158 | { | ||
159 | u32 actlr; | ||
160 | |||
161 | __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); | ||
162 | |||
163 | return actlr; | ||
164 | } | ||
165 | |||
166 | static inline void __init write_actlr(u32 actlr) | ||
167 | { | ||
168 | __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr)); | ||
169 | } | ||
170 | |||
171 | void __init tauros2_init(void) | ||
172 | { | ||
173 | extern int processor_id; | ||
174 | char *mode; | ||
175 | |||
176 | disable_l2_prefetch(); | ||
177 | |||
178 | #ifdef CONFIG_CPU_32v5 | ||
179 | if ((processor_id & 0xff0f0000) == 0x56050000) { | ||
180 | u32 feat; | ||
181 | |||
182 | /* | ||
183 | * v5 CPUs with Tauros2 have the L2 cache enable bit | ||
184 | * located in the CPU Extra Features register. | ||
185 | */ | ||
186 | feat = read_extra_features(); | ||
187 | if (!(feat & 0x00400000)) { | ||
188 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
189 | write_extra_features(feat | 0x00400000); | ||
190 | } | ||
191 | |||
192 | mode = "ARMv5"; | ||
193 | outer_cache.inv_range = tauros2_inv_range; | ||
194 | outer_cache.clean_range = tauros2_clean_range; | ||
195 | outer_cache.flush_range = tauros2_flush_range; | ||
196 | } | ||
197 | #endif | ||
198 | |||
199 | #ifdef CONFIG_CPU_32v6 | ||
200 | /* | ||
201 | * Check whether this CPU lacks support for the v7 hierarchical | ||
202 | * cache ops. (PJ4 is in its v6 personality mode if the MMFR3 | ||
203 | * register indicates no support for the v7 hierarchical cache | ||
204 | * ops.) | ||
205 | */ | ||
206 | if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) { | ||
207 | /* | ||
208 | * When Tauros2 is used in an ARMv6 system, the L2 | ||
209 | * enable bit is in the ARMv6 ARM-mandated position | ||
210 | * (bit [26] of the System Control Register). | ||
211 | */ | ||
212 | if (!(get_cr() & 0x04000000)) { | ||
213 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
214 | adjust_cr(0x04000000, 0x04000000); | ||
215 | } | ||
216 | |||
217 | mode = "ARMv6"; | ||
218 | outer_cache.inv_range = tauros2_inv_range; | ||
219 | outer_cache.clean_range = tauros2_clean_range; | ||
220 | outer_cache.flush_range = tauros2_flush_range; | ||
221 | } | ||
222 | #endif | ||
223 | |||
224 | #ifdef CONFIG_CPU_32v7 | ||
225 | /* | ||
226 | * Check whether this CPU has support for the v7 hierarchical | ||
227 | * cache ops. (PJ4 is in its v7 personality mode if the MMFR3 | ||
228 | * register indicates support for the v7 hierarchical cache | ||
229 | * ops.) | ||
230 | * | ||
231 | * (Although strictly speaking there may exist CPUs that | ||
232 | * implement the v7 cache ops but are only ARMv6 CPUs (due to | ||
233 | * not complying with all of the other ARMv7 requirements), | ||
234 | * there are no real-life examples of Tauros2 being used on | ||
235 | * such CPUs as of yet.) | ||
236 | */ | ||
237 | if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) { | ||
238 | u32 actlr; | ||
239 | |||
240 | /* | ||
241 | * When Tauros2 is used in an ARMv7 system, the L2 | ||
242 | * enable bit is located in the Auxiliary System Control | ||
243 | * Register (which is the only register allowed by the | ||
244 | * ARMv7 spec to contain fine-grained cache control bits). | ||
245 | */ | ||
246 | actlr = read_actlr(); | ||
247 | if (!(actlr & 0x00000002)) { | ||
248 | printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); | ||
249 | write_actlr(actlr | 0x00000002); | ||
250 | } | ||
251 | |||
252 | mode = "ARMv7"; | ||
253 | } | ||
254 | #endif | ||
255 | |||
256 | if (mode == NULL) { | ||
257 | printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); | ||
258 | return; | ||
259 | } | ||
260 | |||
261 | printk(KERN_INFO "Tauros2: L2 cache support initialised " | ||
262 | "in %s mode.\n", mode); | ||
263 | } | ||
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 4127a7bddfe5..841f355319bf 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from, KM_USER0); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to, KM_USER1); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | #ifdef CONFIG_HIGHMEM | ||
45 | /* | ||
46 | * kmap_atomic() doesn't set the page virtual address, and | ||
47 | * kunmap_atomic() takes care of cache flushing already. | ||
48 | */ | ||
49 | if (page_address(to) != NULL) | ||
50 | #endif | ||
51 | __cpuc_flush_dcache_page(kto); | ||
44 | kunmap_atomic(kto, KM_USER1); | 52 | kunmap_atomic(kto, KM_USER1); |
45 | kunmap_atomic(kfrom, KM_USER0); | 53 | kunmap_atomic(kfrom, KM_USER0); |
46 | } | 54 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index b9590a7085ca..26325cb5d368 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -63,194 +63,152 @@ static u64 get_coherent_dma_mask(struct device *dev) | |||
63 | return mask; | 63 | return mask; |
64 | } | 64 | } |
65 | 65 | ||
66 | #ifdef CONFIG_MMU | ||
67 | /* | 66 | /* |
68 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 67 | * Allocate a DMA buffer for 'dev' of size 'size' using the |
68 | * specified gfp mask. Note that 'size' must be page aligned. | ||
69 | */ | 69 | */ |
70 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | 70 | static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) |
71 | static DEFINE_SPINLOCK(consistent_lock); | 71 | { |
72 | unsigned long order = get_order(size); | ||
73 | struct page *page, *p, *e; | ||
74 | void *ptr; | ||
75 | u64 mask = get_coherent_dma_mask(dev); | ||
72 | 76 | ||
73 | /* | 77 | #ifdef CONFIG_DMA_API_DEBUG |
74 | * VM region handling support. | 78 | u64 limit = (mask + 1) & ~mask; |
75 | * | 79 | if (limit && size >= limit) { |
76 | * This should become something generic, handling VM region allocations for | 80 | dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", |
77 | * vmalloc and similar (ioremap, module space, etc). | 81 | size, mask); |
78 | * | 82 | return NULL; |
79 | * I envisage vmalloc()'s supporting vm_struct becoming: | 83 | } |
80 | * | 84 | #endif |
81 | * struct vm_struct { | ||
82 | * struct vm_region region; | ||
83 | * unsigned long flags; | ||
84 | * struct page **pages; | ||
85 | * unsigned int nr_pages; | ||
86 | * unsigned long phys_addr; | ||
87 | * }; | ||
88 | * | ||
89 | * get_vm_area() would then call vm_region_alloc with an appropriate | ||
90 | * struct vm_region head (eg): | ||
91 | * | ||
92 | * struct vm_region vmalloc_head = { | ||
93 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
94 | * .vm_start = VMALLOC_START, | ||
95 | * .vm_end = VMALLOC_END, | ||
96 | * }; | ||
97 | * | ||
98 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
99 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
100 | * would have to initialise this each time prior to calling vm_region_alloc(). | ||
101 | */ | ||
102 | struct arm_vm_region { | ||
103 | struct list_head vm_list; | ||
104 | unsigned long vm_start; | ||
105 | unsigned long vm_end; | ||
106 | struct page *vm_pages; | ||
107 | int vm_active; | ||
108 | }; | ||
109 | 85 | ||
110 | static struct arm_vm_region consistent_head = { | 86 | if (!mask) |
111 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | 87 | return NULL; |
112 | .vm_start = CONSISTENT_BASE, | ||
113 | .vm_end = CONSISTENT_END, | ||
114 | }; | ||
115 | 88 | ||
116 | static struct arm_vm_region * | 89 | if (mask < 0xffffffffULL) |
117 | arm_vm_region_alloc(struct arm_vm_region *head, size_t size, gfp_t gfp) | 90 | gfp |= GFP_DMA; |
118 | { | 91 | |
119 | unsigned long addr = head->vm_start, end = head->vm_end - size; | 92 | page = alloc_pages(gfp, order); |
120 | unsigned long flags; | 93 | if (!page) |
121 | struct arm_vm_region *c, *new; | 94 | return NULL; |
122 | |||
123 | new = kmalloc(sizeof(struct arm_vm_region), gfp); | ||
124 | if (!new) | ||
125 | goto out; | ||
126 | |||
127 | spin_lock_irqsave(&consistent_lock, flags); | ||
128 | |||
129 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
130 | if ((addr + size) < addr) | ||
131 | goto nospc; | ||
132 | if ((addr + size) <= c->vm_start) | ||
133 | goto found; | ||
134 | addr = c->vm_end; | ||
135 | if (addr > end) | ||
136 | goto nospc; | ||
137 | } | ||
138 | 95 | ||
139 | found: | ||
140 | /* | 96 | /* |
141 | * Insert this entry _before_ the one we found. | 97 | * Now split the huge page and free the excess pages |
142 | */ | 98 | */ |
143 | list_add_tail(&new->vm_list, &c->vm_list); | 99 | split_page(page, order); |
144 | new->vm_start = addr; | 100 | for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) |
145 | new->vm_end = addr + size; | 101 | __free_page(p); |
146 | new->vm_active = 1; | 102 | |
147 | 103 | /* | |
148 | spin_unlock_irqrestore(&consistent_lock, flags); | 104 | * Ensure that the allocated pages are zeroed, and that any data |
149 | return new; | 105 | * lurking in the kernel direct-mapped region is invalidated. |
150 | 106 | */ | |
151 | nospc: | 107 | ptr = page_address(page); |
152 | spin_unlock_irqrestore(&consistent_lock, flags); | 108 | memset(ptr, 0, size); |
153 | kfree(new); | 109 | dmac_flush_range(ptr, ptr + size); |
154 | out: | 110 | outer_flush_range(__pa(ptr), __pa(ptr) + size); |
155 | return NULL; | 111 | |
112 | return page; | ||
156 | } | 113 | } |
157 | 114 | ||
158 | static struct arm_vm_region *arm_vm_region_find(struct arm_vm_region *head, unsigned long addr) | 115 | /* |
116 | * Free a DMA buffer. 'size' must be page aligned. | ||
117 | */ | ||
118 | static void __dma_free_buffer(struct page *page, size_t size) | ||
159 | { | 119 | { |
160 | struct arm_vm_region *c; | 120 | struct page *e = page + (size >> PAGE_SHIFT); |
161 | 121 | ||
162 | list_for_each_entry(c, &head->vm_list, vm_list) { | 122 | while (page < e) { |
163 | if (c->vm_active && c->vm_start == addr) | 123 | __free_page(page); |
164 | goto out; | 124 | page++; |
165 | } | 125 | } |
166 | c = NULL; | ||
167 | out: | ||
168 | return c; | ||
169 | } | 126 | } |
170 | 127 | ||
128 | #ifdef CONFIG_MMU | ||
129 | /* | ||
130 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | ||
131 | */ | ||
132 | static pte_t *consistent_pte[NUM_CONSISTENT_PTES]; | ||
133 | |||
134 | #include "vmregion.h" | ||
135 | |||
136 | static struct arm_vmregion_head consistent_head = { | ||
137 | .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock), | ||
138 | .vm_list = LIST_HEAD_INIT(consistent_head.vm_list), | ||
139 | .vm_start = CONSISTENT_BASE, | ||
140 | .vm_end = CONSISTENT_END, | ||
141 | }; | ||
142 | |||
171 | #ifdef CONFIG_HUGETLB_PAGE | 143 | #ifdef CONFIG_HUGETLB_PAGE |
172 | #error ARM Coherent DMA allocator does not (yet) support huge TLB | 144 | #error ARM Coherent DMA allocator does not (yet) support huge TLB |
173 | #endif | 145 | #endif |
174 | 146 | ||
175 | static void * | 147 | /* |
176 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 148 | * Initialise the consistent memory allocation. |
177 | pgprot_t prot) | 149 | */ |
150 | static int __init consistent_init(void) | ||
178 | { | 151 | { |
179 | struct page *page; | 152 | int ret = 0; |
180 | struct arm_vm_region *c; | 153 | pgd_t *pgd; |
181 | unsigned long order; | 154 | pmd_t *pmd; |
182 | u64 mask = get_coherent_dma_mask(dev); | 155 | pte_t *pte; |
183 | u64 limit; | 156 | int i = 0; |
157 | u32 base = CONSISTENT_BASE; | ||
184 | 158 | ||
185 | if (!consistent_pte[0]) { | 159 | do { |
186 | printk(KERN_ERR "%s: not initialised\n", __func__); | 160 | pgd = pgd_offset(&init_mm, base); |
187 | dump_stack(); | 161 | pmd = pmd_alloc(&init_mm, pgd, base); |
188 | return NULL; | 162 | if (!pmd) { |
189 | } | 163 | printk(KERN_ERR "%s: no pmd tables\n", __func__); |
164 | ret = -ENOMEM; | ||
165 | break; | ||
166 | } | ||
167 | WARN_ON(!pmd_none(*pmd)); | ||
190 | 168 | ||
191 | if (!mask) | 169 | pte = pte_alloc_kernel(pmd, base); |
192 | goto no_page; | 170 | if (!pte) { |
171 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
172 | ret = -ENOMEM; | ||
173 | break; | ||
174 | } | ||
193 | 175 | ||
194 | /* | 176 | consistent_pte[i++] = pte; |
195 | * Sanity check the allocation size. | 177 | base += (1 << PGDIR_SHIFT); |
196 | */ | 178 | } while (base < CONSISTENT_END); |
197 | size = PAGE_ALIGN(size); | ||
198 | limit = (mask + 1) & ~mask; | ||
199 | if ((limit && size >= limit) || | ||
200 | size >= (CONSISTENT_END - CONSISTENT_BASE)) { | ||
201 | printk(KERN_WARNING "coherent allocation too big " | ||
202 | "(requested %#x mask %#llx)\n", size, mask); | ||
203 | goto no_page; | ||
204 | } | ||
205 | 179 | ||
206 | order = get_order(size); | 180 | return ret; |
181 | } | ||
207 | 182 | ||
208 | if (mask < 0xffffffffULL) | 183 | core_initcall(consistent_init); |
209 | gfp |= GFP_DMA; | ||
210 | 184 | ||
211 | page = alloc_pages(gfp, order); | 185 | static void * |
212 | if (!page) | 186 | __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot) |
213 | goto no_page; | 187 | { |
188 | struct arm_vmregion *c; | ||
214 | 189 | ||
215 | /* | 190 | if (!consistent_pte[0]) { |
216 | * Invalidate any data that might be lurking in the | 191 | printk(KERN_ERR "%s: not initialised\n", __func__); |
217 | * kernel direct-mapped region for device DMA. | 192 | dump_stack(); |
218 | */ | 193 | return NULL; |
219 | { | ||
220 | void *ptr = page_address(page); | ||
221 | memset(ptr, 0, size); | ||
222 | dmac_flush_range(ptr, ptr + size); | ||
223 | outer_flush_range(__pa(ptr), __pa(ptr) + size); | ||
224 | } | 194 | } |
225 | 195 | ||
226 | /* | 196 | /* |
227 | * Allocate a virtual address in the consistent mapping region. | 197 | * Allocate a virtual address in the consistent mapping region. |
228 | */ | 198 | */ |
229 | c = arm_vm_region_alloc(&consistent_head, size, | 199 | c = arm_vmregion_alloc(&consistent_head, size, |
230 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); | 200 | gfp & ~(__GFP_DMA | __GFP_HIGHMEM)); |
231 | if (c) { | 201 | if (c) { |
232 | pte_t *pte; | 202 | pte_t *pte; |
233 | struct page *end = page + (1 << order); | ||
234 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); | 203 | int idx = CONSISTENT_PTE_INDEX(c->vm_start); |
235 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | 204 | u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); |
236 | 205 | ||
237 | pte = consistent_pte[idx] + off; | 206 | pte = consistent_pte[idx] + off; |
238 | c->vm_pages = page; | 207 | c->vm_pages = page; |
239 | 208 | ||
240 | split_page(page, order); | ||
241 | |||
242 | /* | ||
243 | * Set the "dma handle" | ||
244 | */ | ||
245 | *handle = page_to_dma(dev, page); | ||
246 | |||
247 | do { | 209 | do { |
248 | BUG_ON(!pte_none(*pte)); | 210 | BUG_ON(!pte_none(*pte)); |
249 | 211 | ||
250 | /* | ||
251 | * x86 does not mark the pages reserved... | ||
252 | */ | ||
253 | SetPageReserved(page); | ||
254 | set_pte_ext(pte, mk_pte(page, prot), 0); | 212 | set_pte_ext(pte, mk_pte(page, prot), 0); |
255 | page++; | 213 | page++; |
256 | pte++; | 214 | pte++; |
@@ -261,48 +219,90 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
261 | } | 219 | } |
262 | } while (size -= PAGE_SIZE); | 220 | } while (size -= PAGE_SIZE); |
263 | 221 | ||
264 | /* | ||
265 | * Free the otherwise unused pages. | ||
266 | */ | ||
267 | while (page < end) { | ||
268 | __free_page(page); | ||
269 | page++; | ||
270 | } | ||
271 | |||
272 | return (void *)c->vm_start; | 222 | return (void *)c->vm_start; |
273 | } | 223 | } |
274 | |||
275 | if (page) | ||
276 | __free_pages(page, order); | ||
277 | no_page: | ||
278 | *handle = ~0; | ||
279 | return NULL; | 224 | return NULL; |
280 | } | 225 | } |
226 | |||
227 | static void __dma_free_remap(void *cpu_addr, size_t size) | ||
228 | { | ||
229 | struct arm_vmregion *c; | ||
230 | unsigned long addr; | ||
231 | pte_t *ptep; | ||
232 | int idx; | ||
233 | u32 off; | ||
234 | |||
235 | c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); | ||
236 | if (!c) { | ||
237 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
238 | __func__, cpu_addr); | ||
239 | dump_stack(); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | if ((c->vm_end - c->vm_start) != size) { | ||
244 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
245 | __func__, c->vm_end - c->vm_start, size); | ||
246 | dump_stack(); | ||
247 | size = c->vm_end - c->vm_start; | ||
248 | } | ||
249 | |||
250 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
251 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
252 | ptep = consistent_pte[idx] + off; | ||
253 | addr = c->vm_start; | ||
254 | do { | ||
255 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
256 | |||
257 | ptep++; | ||
258 | addr += PAGE_SIZE; | ||
259 | off++; | ||
260 | if (off >= PTRS_PER_PTE) { | ||
261 | off = 0; | ||
262 | ptep = consistent_pte[++idx]; | ||
263 | } | ||
264 | |||
265 | if (pte_none(pte) || !pte_present(pte)) | ||
266 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
267 | __func__); | ||
268 | } while (size -= PAGE_SIZE); | ||
269 | |||
270 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
271 | |||
272 | arm_vmregion_free(&consistent_head, c); | ||
273 | } | ||
274 | |||
281 | #else /* !CONFIG_MMU */ | 275 | #else /* !CONFIG_MMU */ |
276 | |||
277 | #define __dma_alloc_remap(page, size, gfp, prot) page_address(page) | ||
278 | #define __dma_free_remap(addr, size) do { } while (0) | ||
279 | |||
280 | #endif /* CONFIG_MMU */ | ||
281 | |||
282 | static void * | 282 | static void * |
283 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | 283 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, |
284 | pgprot_t prot) | 284 | pgprot_t prot) |
285 | { | 285 | { |
286 | void *virt; | 286 | struct page *page; |
287 | u64 mask = get_coherent_dma_mask(dev); | 287 | void *addr; |
288 | 288 | ||
289 | if (!mask) | 289 | *handle = ~0; |
290 | goto error; | 290 | size = PAGE_ALIGN(size); |
291 | 291 | ||
292 | if (mask < 0xffffffffULL) | 292 | page = __dma_alloc_buffer(dev, size, gfp); |
293 | gfp |= GFP_DMA; | 293 | if (!page) |
294 | virt = kmalloc(size, gfp); | 294 | return NULL; |
295 | if (!virt) | ||
296 | goto error; | ||
297 | 295 | ||
298 | *handle = virt_to_dma(dev, virt); | 296 | if (!arch_is_coherent()) |
299 | return virt; | 297 | addr = __dma_alloc_remap(page, size, gfp, prot); |
298 | else | ||
299 | addr = page_address(page); | ||
300 | 300 | ||
301 | error: | 301 | if (addr) |
302 | *handle = ~0; | 302 | *handle = page_to_dma(dev, page); |
303 | return NULL; | 303 | |
304 | return addr; | ||
304 | } | 305 | } |
305 | #endif /* CONFIG_MMU */ | ||
306 | 306 | ||
307 | /* | 307 | /* |
308 | * Allocate DMA-coherent memory space and return both the kernel remapped | 308 | * Allocate DMA-coherent memory space and return both the kernel remapped |
@@ -316,19 +316,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gf | |||
316 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) | 316 | if (dma_alloc_from_coherent(dev, size, handle, &memory)) |
317 | return memory; | 317 | return memory; |
318 | 318 | ||
319 | if (arch_is_coherent()) { | ||
320 | void *virt; | ||
321 | |||
322 | virt = kmalloc(size, gfp); | ||
323 | if (!virt) | ||
324 | return NULL; | ||
325 | *handle = virt_to_dma(dev, virt); | ||
326 | |||
327 | return virt; | ||
328 | } | ||
329 | |||
330 | return __dma_alloc(dev, size, handle, gfp, | 319 | return __dma_alloc(dev, size, handle, gfp, |
331 | pgprot_noncached(pgprot_kernel)); | 320 | pgprot_dmacoherent(pgprot_kernel)); |
332 | } | 321 | } |
333 | EXPORT_SYMBOL(dma_alloc_coherent); | 322 | EXPORT_SYMBOL(dma_alloc_coherent); |
334 | 323 | ||
@@ -349,15 +338,12 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
349 | { | 338 | { |
350 | int ret = -ENXIO; | 339 | int ret = -ENXIO; |
351 | #ifdef CONFIG_MMU | 340 | #ifdef CONFIG_MMU |
352 | unsigned long flags, user_size, kern_size; | 341 | unsigned long user_size, kern_size; |
353 | struct arm_vm_region *c; | 342 | struct arm_vmregion *c; |
354 | 343 | ||
355 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 344 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
356 | 345 | ||
357 | spin_lock_irqsave(&consistent_lock, flags); | 346 | c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); |
358 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); | ||
359 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
360 | |||
361 | if (c) { | 347 | if (c) { |
362 | unsigned long off = vma->vm_pgoff; | 348 | unsigned long off = vma->vm_pgoff; |
363 | 349 | ||
@@ -379,7 +365,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
379 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | 365 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
380 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 366 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
381 | { | 367 | { |
382 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | 368 | vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot); |
383 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); | 369 | return dma_mmap(dev, vma, cpu_addr, dma_addr, size); |
384 | } | 370 | } |
385 | EXPORT_SYMBOL(dma_mmap_coherent); | 371 | EXPORT_SYMBOL(dma_mmap_coherent); |
@@ -396,144 +382,23 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
396 | * free a page as defined by the above mapping. | 382 | * free a page as defined by the above mapping. |
397 | * Must not be called with IRQs disabled. | 383 | * Must not be called with IRQs disabled. |
398 | */ | 384 | */ |
399 | #ifdef CONFIG_MMU | ||
400 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 385 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
401 | { | 386 | { |
402 | struct arm_vm_region *c; | ||
403 | unsigned long flags, addr; | ||
404 | pte_t *ptep; | ||
405 | int idx; | ||
406 | u32 off; | ||
407 | |||
408 | WARN_ON(irqs_disabled()); | 387 | WARN_ON(irqs_disabled()); |
409 | 388 | ||
410 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | 389 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
411 | return; | 390 | return; |
412 | 391 | ||
413 | if (arch_is_coherent()) { | ||
414 | kfree(cpu_addr); | ||
415 | return; | ||
416 | } | ||
417 | |||
418 | size = PAGE_ALIGN(size); | 392 | size = PAGE_ALIGN(size); |
419 | 393 | ||
420 | spin_lock_irqsave(&consistent_lock, flags); | 394 | if (!arch_is_coherent()) |
421 | c = arm_vm_region_find(&consistent_head, (unsigned long)cpu_addr); | 395 | __dma_free_remap(cpu_addr, size); |
422 | if (!c) | ||
423 | goto no_area; | ||
424 | |||
425 | c->vm_active = 0; | ||
426 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
427 | |||
428 | if ((c->vm_end - c->vm_start) != size) { | ||
429 | printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", | ||
430 | __func__, c->vm_end - c->vm_start, size); | ||
431 | dump_stack(); | ||
432 | size = c->vm_end - c->vm_start; | ||
433 | } | ||
434 | |||
435 | idx = CONSISTENT_PTE_INDEX(c->vm_start); | ||
436 | off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); | ||
437 | ptep = consistent_pte[idx] + off; | ||
438 | addr = c->vm_start; | ||
439 | do { | ||
440 | pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep); | ||
441 | unsigned long pfn; | ||
442 | |||
443 | ptep++; | ||
444 | addr += PAGE_SIZE; | ||
445 | off++; | ||
446 | if (off >= PTRS_PER_PTE) { | ||
447 | off = 0; | ||
448 | ptep = consistent_pte[++idx]; | ||
449 | } | ||
450 | |||
451 | if (!pte_none(pte) && pte_present(pte)) { | ||
452 | pfn = pte_pfn(pte); | ||
453 | |||
454 | if (pfn_valid(pfn)) { | ||
455 | struct page *page = pfn_to_page(pfn); | ||
456 | |||
457 | /* | ||
458 | * x86 does not mark the pages reserved... | ||
459 | */ | ||
460 | ClearPageReserved(page); | ||
461 | |||
462 | __free_page(page); | ||
463 | continue; | ||
464 | } | ||
465 | } | ||
466 | |||
467 | printk(KERN_CRIT "%s: bad page in kernel page table\n", | ||
468 | __func__); | ||
469 | } while (size -= PAGE_SIZE); | ||
470 | |||
471 | flush_tlb_kernel_range(c->vm_start, c->vm_end); | ||
472 | |||
473 | spin_lock_irqsave(&consistent_lock, flags); | ||
474 | list_del(&c->vm_list); | ||
475 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
476 | |||
477 | kfree(c); | ||
478 | return; | ||
479 | 396 | ||
480 | no_area: | 397 | __dma_free_buffer(dma_to_page(dev, handle), size); |
481 | spin_unlock_irqrestore(&consistent_lock, flags); | ||
482 | printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", | ||
483 | __func__, cpu_addr); | ||
484 | dump_stack(); | ||
485 | } | 398 | } |
486 | #else /* !CONFIG_MMU */ | ||
487 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | ||
488 | { | ||
489 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
490 | return; | ||
491 | kfree(cpu_addr); | ||
492 | } | ||
493 | #endif /* CONFIG_MMU */ | ||
494 | EXPORT_SYMBOL(dma_free_coherent); | 399 | EXPORT_SYMBOL(dma_free_coherent); |
495 | 400 | ||
496 | /* | 401 | /* |
497 | * Initialise the consistent memory allocation. | ||
498 | */ | ||
499 | static int __init consistent_init(void) | ||
500 | { | ||
501 | int ret = 0; | ||
502 | #ifdef CONFIG_MMU | ||
503 | pgd_t *pgd; | ||
504 | pmd_t *pmd; | ||
505 | pte_t *pte; | ||
506 | int i = 0; | ||
507 | u32 base = CONSISTENT_BASE; | ||
508 | |||
509 | do { | ||
510 | pgd = pgd_offset(&init_mm, base); | ||
511 | pmd = pmd_alloc(&init_mm, pgd, base); | ||
512 | if (!pmd) { | ||
513 | printk(KERN_ERR "%s: no pmd tables\n", __func__); | ||
514 | ret = -ENOMEM; | ||
515 | break; | ||
516 | } | ||
517 | WARN_ON(!pmd_none(*pmd)); | ||
518 | |||
519 | pte = pte_alloc_kernel(pmd, base); | ||
520 | if (!pte) { | ||
521 | printk(KERN_ERR "%s: no pte tables\n", __func__); | ||
522 | ret = -ENOMEM; | ||
523 | break; | ||
524 | } | ||
525 | |||
526 | consistent_pte[i++] = pte; | ||
527 | base += (1 << PGDIR_SHIFT); | ||
528 | } while (base < CONSISTENT_END); | ||
529 | #endif /* !CONFIG_MMU */ | ||
530 | |||
531 | return ret; | ||
532 | } | ||
533 | |||
534 | core_initcall(consistent_init); | ||
535 | |||
536 | /* | ||
537 | * Make an area consistent for devices. | 402 | * Make an area consistent for devices. |
538 | * Note: Drivers should NOT use this function directly, as it will break | 403 | * Note: Drivers should NOT use this function directly, as it will break |
539 | * platforms with CONFIG_DMABOUNCE. | 404 | * platforms with CONFIG_DMABOUNCE. |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index d0d17b6a3703..729602291958 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -23,6 +23,8 @@ | |||
23 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
24 | #include <asm/tlbflush.h> | 24 | #include <asm/tlbflush.h> |
25 | 25 | ||
26 | #include "mm.h" | ||
27 | |||
26 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | 28 | static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; |
27 | 29 | ||
28 | /* | 30 | /* |
@@ -151,7 +153,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |||
151 | if (!pfn_valid(pfn)) | 153 | if (!pfn_valid(pfn)) |
152 | return; | 154 | return; |
153 | 155 | ||
156 | /* | ||
157 | * The zero page is never written to, so never has any dirty | ||
158 | * cache lines, and therefore never needs to be flushed. | ||
159 | */ | ||
154 | page = pfn_to_page(pfn); | 160 | page = pfn_to_page(pfn); |
161 | if (page == ZERO_PAGE(0)) | ||
162 | return; | ||
163 | |||
155 | mapping = page_mapping(page); | 164 | mapping = page_mapping(page); |
156 | #ifndef CONFIG_SMP | 165 | #ifndef CONFIG_SMP |
157 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) | 166 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 7f294f307c83..329594e760cd 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
@@ -35,14 +35,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
35 | : | 35 | : |
36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) | 36 | : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) |
37 | : "cc"); | 37 | : "cc"); |
38 | __flush_icache_all(); | ||
39 | } | 38 | } |
40 | 39 | ||
41 | void flush_cache_mm(struct mm_struct *mm) | 40 | void flush_cache_mm(struct mm_struct *mm) |
42 | { | 41 | { |
43 | if (cache_is_vivt()) { | 42 | if (cache_is_vivt()) { |
44 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) | 43 | vivt_flush_cache_mm(mm); |
45 | __cpuc_flush_user_all(); | ||
46 | return; | 44 | return; |
47 | } | 45 | } |
48 | 46 | ||
@@ -52,16 +50,13 @@ void flush_cache_mm(struct mm_struct *mm) | |||
52 | : | 50 | : |
53 | : "r" (0) | 51 | : "r" (0) |
54 | : "cc"); | 52 | : "cc"); |
55 | __flush_icache_all(); | ||
56 | } | 53 | } |
57 | } | 54 | } |
58 | 55 | ||
59 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 56 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
60 | { | 57 | { |
61 | if (cache_is_vivt()) { | 58 | if (cache_is_vivt()) { |
62 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) | 59 | vivt_flush_cache_range(vma, start, end); |
63 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | ||
64 | vma->vm_flags); | ||
65 | return; | 60 | return; |
66 | } | 61 | } |
67 | 62 | ||
@@ -71,22 +66,26 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |||
71 | : | 66 | : |
72 | : "r" (0) | 67 | : "r" (0) |
73 | : "cc"); | 68 | : "cc"); |
74 | __flush_icache_all(); | ||
75 | } | 69 | } |
70 | |||
71 | if (vma->vm_flags & VM_EXEC) | ||
72 | __flush_icache_all(); | ||
76 | } | 73 | } |
77 | 74 | ||
78 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 75 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
79 | { | 76 | { |
80 | if (cache_is_vivt()) { | 77 | if (cache_is_vivt()) { |
81 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 78 | vivt_flush_cache_page(vma, user_addr, pfn); |
82 | unsigned long addr = user_addr & PAGE_MASK; | ||
83 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | ||
84 | } | ||
85 | return; | 79 | return; |
86 | } | 80 | } |
87 | 81 | ||
88 | if (cache_is_vipt_aliasing()) | 82 | if (cache_is_vipt_aliasing()) { |
89 | flush_pfn_alias(pfn, user_addr); | 83 | flush_pfn_alias(pfn, user_addr); |
84 | __flush_icache_all(); | ||
85 | } | ||
86 | |||
87 | if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) | ||
88 | __flush_icache_all(); | ||
90 | } | 89 | } |
91 | 90 | ||
92 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | 91 | void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, |
@@ -94,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
94 | unsigned long len, int write) | 93 | unsigned long len, int write) |
95 | { | 94 | { |
96 | if (cache_is_vivt()) { | 95 | if (cache_is_vivt()) { |
97 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { | 96 | vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write); |
98 | unsigned long addr = (unsigned long)kaddr; | ||
99 | __cpuc_coherent_kern_range(addr, addr + len); | ||
100 | } | ||
101 | return; | 97 | return; |
102 | } | 98 | } |
103 | 99 | ||
104 | if (cache_is_vipt_aliasing()) { | 100 | if (cache_is_vipt_aliasing()) { |
105 | flush_pfn_alias(page_to_pfn(page), uaddr); | 101 | flush_pfn_alias(page_to_pfn(page), uaddr); |
102 | __flush_icache_all(); | ||
106 | return; | 103 | return; |
107 | } | 104 | } |
108 | 105 | ||
@@ -120,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
120 | 117 | ||
121 | void __flush_dcache_page(struct address_space *mapping, struct page *page) | 118 | void __flush_dcache_page(struct address_space *mapping, struct page *page) |
122 | { | 119 | { |
120 | void *addr = page_address(page); | ||
121 | |||
123 | /* | 122 | /* |
124 | * Writeback any data associated with the kernel mapping of this | 123 | * Writeback any data associated with the kernel mapping of this |
125 | * page. This ensures that data in the physical page is mutually | 124 | * page. This ensures that data in the physical page is mutually |
@@ -130,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
130 | * kmap_atomic() doesn't set the page virtual address, and | 129 | * kmap_atomic() doesn't set the page virtual address, and |
131 | * kunmap_atomic() takes care of cache flushing already. | 130 | * kunmap_atomic() takes care of cache flushing already. |
132 | */ | 131 | */ |
133 | if (page_address(page)) | 132 | if (addr) |
134 | #endif | 133 | #endif |
135 | __cpuc_flush_dcache_page(page_address(page)); | 134 | __cpuc_flush_dcache_page(addr); |
136 | 135 | ||
137 | /* | 136 | /* |
138 | * If this is a page cache page, and we have an aliasing VIPT cache, | 137 | * If this is a page cache page, and we have an aliasing VIPT cache, |
@@ -196,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p | |||
196 | */ | 195 | */ |
197 | void flush_dcache_page(struct page *page) | 196 | void flush_dcache_page(struct page *page) |
198 | { | 197 | { |
199 | struct address_space *mapping = page_mapping(page); | 198 | struct address_space *mapping; |
199 | |||
200 | /* | ||
201 | * The zero page is never written to, so never has any dirty | ||
202 | * cache lines, and therefore never needs to be flushed. | ||
203 | */ | ||
204 | if (page == ZERO_PAGE(0)) | ||
205 | return; | ||
206 | |||
207 | mapping = page_mapping(page); | ||
200 | 208 | ||
201 | #ifndef CONFIG_SMP | 209 | #ifndef CONFIG_SMP |
202 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) | 210 | if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) |
@@ -242,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l | |||
242 | * userspace address only. | 250 | * userspace address only. |
243 | */ | 251 | */ |
244 | flush_pfn_alias(pfn, vmaddr); | 252 | flush_pfn_alias(pfn, vmaddr); |
253 | __flush_icache_all(); | ||
245 | } | 254 | } |
246 | 255 | ||
247 | /* | 256 | /* |
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index c4f6f05198e0..a888363398f8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h | |||
@@ -24,6 +24,8 @@ struct mem_type { | |||
24 | 24 | ||
25 | const struct mem_type *get_mem_type(unsigned int type); | 25 | const struct mem_type *get_mem_type(unsigned int type); |
26 | 26 | ||
27 | extern void __flush_dcache_page(struct address_space *mapping, struct page *page); | ||
28 | |||
27 | #endif | 29 | #endif |
28 | 30 | ||
29 | struct map_desc; | 31 | struct map_desc; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ea67be0223ac..8c7fbd19a4b3 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -881,7 +881,7 @@ void __init reserve_node_zero(pg_data_t *pgdat) | |||
881 | BOOTMEM_EXCLUSIVE); | 881 | BOOTMEM_EXCLUSIVE); |
882 | } | 882 | } |
883 | 883 | ||
884 | if (machine_is_treo680()) { | 884 | if (machine_is_treo680() || machine_is_centro()) { |
885 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, | 885 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, |
886 | BOOTMEM_EXCLUSIVE); | 886 | BOOTMEM_EXCLUSIVE); |
887 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, | 887 | reserve_bootmem_node(pgdat, 0xa2000000, 0x1000, |
@@ -1036,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc) | |||
1036 | */ | 1036 | */ |
1037 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); | 1037 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); |
1038 | empty_zero_page = virt_to_page(zero_page); | 1038 | empty_zero_page = virt_to_page(zero_page); |
1039 | flush_dcache_page(empty_zero_page); | 1039 | __flush_dcache_page(NULL, empty_zero_page); |
1040 | } | 1040 | } |
1041 | 1041 | ||
1042 | /* | 1042 | /* |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 70f75d2e3ead..5485c821101c 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
@@ -130,9 +130,16 @@ ENTRY(cpu_v6_set_pte_ext) | |||
130 | 130 | ||
131 | 131 | ||
132 | 132 | ||
133 | 133 | .type cpu_v6_name, #object | |
134 | cpu_v6_name: | 134 | cpu_v6_name: |
135 | .asciz "ARMv6-compatible processor" | 135 | .asciz "ARMv6-compatible processor" |
136 | .size cpu_v6_name, . - cpu_v6_name | ||
137 | |||
138 | .type cpu_pj4_name, #object | ||
139 | cpu_pj4_name: | ||
140 | .asciz "Marvell PJ4 processor" | ||
141 | .size cpu_pj4_name, . - cpu_pj4_name | ||
142 | |||
136 | .align | 143 | .align |
137 | 144 | ||
138 | __INIT | 145 | __INIT |
@@ -241,3 +248,27 @@ __v6_proc_info: | |||
241 | .long v6_user_fns | 248 | .long v6_user_fns |
242 | .long v6_cache_fns | 249 | .long v6_cache_fns |
243 | .size __v6_proc_info, . - __v6_proc_info | 250 | .size __v6_proc_info, . - __v6_proc_info |
251 | |||
252 | .type __pj4_v6_proc_info, #object | ||
253 | __pj4_v6_proc_info: | ||
254 | .long 0x560f5810 | ||
255 | .long 0xff0ffff0 | ||
256 | .long PMD_TYPE_SECT | \ | ||
257 | PMD_SECT_BUFFERABLE | \ | ||
258 | PMD_SECT_CACHEABLE | \ | ||
259 | PMD_SECT_AP_WRITE | \ | ||
260 | PMD_SECT_AP_READ | ||
261 | .long PMD_TYPE_SECT | \ | ||
262 | PMD_SECT_XN | \ | ||
263 | PMD_SECT_AP_WRITE | \ | ||
264 | PMD_SECT_AP_READ | ||
265 | b __v6_setup | ||
266 | .long cpu_arch_name | ||
267 | .long cpu_elf_name | ||
268 | .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP | ||
269 | .long cpu_pj4_name | ||
270 | .long v6_processor_functions | ||
271 | .long v6wbi_tlb_fns | ||
272 | .long v6_user_fns | ||
273 | .long v6_cache_fns | ||
274 | .size __pj4_v6_proc_info, . - __pj4_v6_proc_info | ||
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 2028f3702881..fab134e29826 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
@@ -396,7 +396,7 @@ __xsc3_setup: | |||
396 | orr r4, r4, #0x18 @ cache the page table in L2 | 396 | orr r4, r4, #0x18 @ cache the page table in L2 |
397 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer | 397 | mcr p15, 0, r4, c2, c0, 0 @ load page table pointer |
398 | 398 | ||
399 | mov r0, #0 @ don't allow CP access | 399 | mov r0, #1 << 6 @ cp6 access for early sched_clock |
400 | mcr p15, 0, r0, c15, c1, 0 @ write CP access register | 400 | mcr p15, 0, r0, c15, c1, 0 @ write CP access register |
401 | 401 | ||
402 | mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg | 402 | mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg |
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c new file mode 100644 index 000000000000..19e09bdb1b8a --- /dev/null +++ b/arch/arm/mm/vmregion.c | |||
@@ -0,0 +1,131 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/list.h> | ||
3 | #include <linux/slab.h> | ||
4 | |||
5 | #include "vmregion.h" | ||
6 | |||
7 | /* | ||
8 | * VM region handling support. | ||
9 | * | ||
10 | * This should become something generic, handling VM region allocations for | ||
11 | * vmalloc and similar (ioremap, module space, etc). | ||
12 | * | ||
13 | * I envisage vmalloc()'s supporting vm_struct becoming: | ||
14 | * | ||
15 | * struct vm_struct { | ||
16 | * struct vmregion region; | ||
17 | * unsigned long flags; | ||
18 | * struct page **pages; | ||
19 | * unsigned int nr_pages; | ||
20 | * unsigned long phys_addr; | ||
21 | * }; | ||
22 | * | ||
23 | * get_vm_area() would then call vmregion_alloc with an appropriate | ||
24 | * struct vmregion head (eg): | ||
25 | * | ||
26 | * struct vmregion vmalloc_head = { | ||
27 | * .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list), | ||
28 | * .vm_start = VMALLOC_START, | ||
29 | * .vm_end = VMALLOC_END, | ||
30 | * }; | ||
31 | * | ||
32 | * However, vmalloc_head.vm_start is variable (typically, it is dependent on | ||
33 | * the amount of RAM found at boot time.) I would imagine that get_vm_area() | ||
34 | * would have to initialise this each time prior to calling vmregion_alloc(). | ||
35 | */ | ||
36 | |||
37 | struct arm_vmregion * | ||
38 | arm_vmregion_alloc(struct arm_vmregion_head *head, size_t size, gfp_t gfp) | ||
39 | { | ||
40 | unsigned long addr = head->vm_start, end = head->vm_end - size; | ||
41 | unsigned long flags; | ||
42 | struct arm_vmregion *c, *new; | ||
43 | |||
44 | if (head->vm_end - head->vm_start < size) { | ||
45 | printk(KERN_WARNING "%s: allocation too big (requested %#x)\n", | ||
46 | __func__, size); | ||
47 | goto out; | ||
48 | } | ||
49 | |||
50 | new = kmalloc(sizeof(struct arm_vmregion), gfp); | ||
51 | if (!new) | ||
52 | goto out; | ||
53 | |||
54 | spin_lock_irqsave(&head->vm_lock, flags); | ||
55 | |||
56 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
57 | if ((addr + size) < addr) | ||
58 | goto nospc; | ||
59 | if ((addr + size) <= c->vm_start) | ||
60 | goto found; | ||
61 | addr = c->vm_end; | ||
62 | if (addr > end) | ||
63 | goto nospc; | ||
64 | } | ||
65 | |||
66 | found: | ||
67 | /* | ||
68 | * Insert this entry _before_ the one we found. | ||
69 | */ | ||
70 | list_add_tail(&new->vm_list, &c->vm_list); | ||
71 | new->vm_start = addr; | ||
72 | new->vm_end = addr + size; | ||
73 | new->vm_active = 1; | ||
74 | |||
75 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
76 | return new; | ||
77 | |||
78 | nospc: | ||
79 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
80 | kfree(new); | ||
81 | out: | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
86 | { | ||
87 | struct arm_vmregion *c; | ||
88 | |||
89 | list_for_each_entry(c, &head->vm_list, vm_list) { | ||
90 | if (c->vm_active && c->vm_start == addr) | ||
91 | goto out; | ||
92 | } | ||
93 | c = NULL; | ||
94 | out: | ||
95 | return c; | ||
96 | } | ||
97 | |||
98 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr) | ||
99 | { | ||
100 | struct arm_vmregion *c; | ||
101 | unsigned long flags; | ||
102 | |||
103 | spin_lock_irqsave(&head->vm_lock, flags); | ||
104 | c = __arm_vmregion_find(head, addr); | ||
105 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
106 | return c; | ||
107 | } | ||
108 | |||
109 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr) | ||
110 | { | ||
111 | struct arm_vmregion *c; | ||
112 | unsigned long flags; | ||
113 | |||
114 | spin_lock_irqsave(&head->vm_lock, flags); | ||
115 | c = __arm_vmregion_find(head, addr); | ||
116 | if (c) | ||
117 | c->vm_active = 0; | ||
118 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
119 | return c; | ||
120 | } | ||
121 | |||
122 | void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c) | ||
123 | { | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&head->vm_lock, flags); | ||
127 | list_del(&c->vm_list); | ||
128 | spin_unlock_irqrestore(&head->vm_lock, flags); | ||
129 | |||
130 | kfree(c); | ||
131 | } | ||
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h new file mode 100644 index 000000000000..6b2cdbdf3a85 --- /dev/null +++ b/arch/arm/mm/vmregion.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef VMREGION_H | ||
2 | #define VMREGION_H | ||
3 | |||
4 | #include <linux/spinlock.h> | ||
5 | #include <linux/list.h> | ||
6 | |||
7 | struct page; | ||
8 | |||
9 | struct arm_vmregion_head { | ||
10 | spinlock_t vm_lock; | ||
11 | struct list_head vm_list; | ||
12 | unsigned long vm_start; | ||
13 | unsigned long vm_end; | ||
14 | }; | ||
15 | |||
16 | struct arm_vmregion { | ||
17 | struct list_head vm_list; | ||
18 | unsigned long vm_start; | ||
19 | unsigned long vm_end; | ||
20 | struct page *vm_pages; | ||
21 | int vm_active; | ||
22 | }; | ||
23 | |||
24 | struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, gfp_t); | ||
25 | struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long); | ||
26 | struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long); | ||
27 | void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *); | ||
28 | |||
29 | #endif | ||