diff options
author | Greg Ungerer <gerg@uclinux.org> | 2011-03-28 08:37:13 -0400 |
---|---|---|
committer | Greg Ungerer <gerg@uclinux.org> | 2011-05-23 20:03:50 -0400 |
commit | 1bccc43c1010f2ac88156c005fab6823569b0a39 (patch) | |
tree | 587e5627e09234fd29ed7726b0626ca6806af6e3 /arch/m68k/mm | |
parent | 593732bd41a6f16eeed9880ae7d51920fc5350ff (diff) |
m68k: mv kmap_mm.c to kmap.c
The non-mmu kmap_no.c has been removed. So we can move kmap_mm.c
back to being the only kmap.c.
Signed-off-by: Greg Ungerer <gerg@uclinux.org>
Diffstat (limited to 'arch/m68k/mm')
-rw-r--r-- | arch/m68k/mm/kmap.c | 368 | ||||
-rw-r--r-- | arch/m68k/mm/kmap_mm.c | 367 |
2 files changed, 365 insertions, 370 deletions
diff --git a/arch/m68k/mm/kmap.c b/arch/m68k/mm/kmap.c index a373d136b2b2..69345849454b 100644 --- a/arch/m68k/mm/kmap.c +++ b/arch/m68k/mm/kmap.c | |||
@@ -1,5 +1,367 @@ | |||
1 | #ifdef CONFIG_MMU | 1 | /* |
2 | #include "kmap_mm.c" | 2 | * linux/arch/m68k/mm/kmap.c |
3 | * | ||
4 | * Copyright (C) 1997 Roman Hodek | ||
5 | * | ||
6 | * 10/01/99 cleaned up the code and changing to the same interface | ||
7 | * used by other architectures /Roman Zippel | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include <asm/setup.h> | ||
19 | #include <asm/segment.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | #define PTRTREESIZE (256*1024) | ||
28 | |||
29 | /* | ||
30 | * For 040/060 we can use the virtual memory area like other architectures, | ||
31 | * but for 020/030 we want to use early termination page descriptor and we | ||
32 | * can't mix this with normal page descriptors, so we have to copy that code | ||
33 | * (mm/vmalloc.c) and return appriorate aligned addresses. | ||
34 | */ | ||
35 | |||
36 | #ifdef CPU_M68040_OR_M68060_ONLY | ||
37 | |||
38 | #define IO_SIZE PAGE_SIZE | ||
39 | |||
40 | static inline struct vm_struct *get_io_area(unsigned long size) | ||
41 | { | ||
42 | return get_vm_area(size, VM_IOREMAP); | ||
43 | } | ||
44 | |||
45 | |||
46 | static inline void free_io_area(void *addr) | ||
47 | { | ||
48 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
49 | } | ||
50 | |||
3 | #else | 51 | #else |
4 | #include "kmap_no.c" | 52 | |
53 | #define IO_SIZE (256*1024) | ||
54 | |||
55 | static struct vm_struct *iolist; | ||
56 | |||
57 | static struct vm_struct *get_io_area(unsigned long size) | ||
58 | { | ||
59 | unsigned long addr; | ||
60 | struct vm_struct **p, *tmp, *area; | ||
61 | |||
62 | area = kmalloc(sizeof(*area), GFP_KERNEL); | ||
63 | if (!area) | ||
64 | return NULL; | ||
65 | addr = KMAP_START; | ||
66 | for (p = &iolist; (tmp = *p) ; p = &tmp->next) { | ||
67 | if (size + addr < (unsigned long)tmp->addr) | ||
68 | break; | ||
69 | if (addr > KMAP_END-size) { | ||
70 | kfree(area); | ||
71 | return NULL; | ||
72 | } | ||
73 | addr = tmp->size + (unsigned long)tmp->addr; | ||
74 | } | ||
75 | area->addr = (void *)addr; | ||
76 | area->size = size + IO_SIZE; | ||
77 | area->next = *p; | ||
78 | *p = area; | ||
79 | return area; | ||
80 | } | ||
81 | |||
82 | static inline void free_io_area(void *addr) | ||
83 | { | ||
84 | struct vm_struct **p, *tmp; | ||
85 | |||
86 | if (!addr) | ||
87 | return; | ||
88 | addr = (void *)((unsigned long)addr & -IO_SIZE); | ||
89 | for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { | ||
90 | if (tmp->addr == addr) { | ||
91 | *p = tmp->next; | ||
92 | __iounmap(tmp->addr, tmp->size); | ||
93 | kfree(tmp); | ||
94 | return; | ||
95 | } | ||
96 | } | ||
97 | } | ||
98 | |||
5 | #endif | 99 | #endif |
100 | |||
101 | /* | ||
102 | * Map some physical address range into the kernel address space. | ||
103 | */ | ||
104 | /* Rewritten by Andreas Schwab to remove all races. */ | ||
105 | |||
106 | void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | ||
107 | { | ||
108 | struct vm_struct *area; | ||
109 | unsigned long virtaddr, retaddr; | ||
110 | long offset; | ||
111 | pgd_t *pgd_dir; | ||
112 | pmd_t *pmd_dir; | ||
113 | pte_t *pte_dir; | ||
114 | |||
115 | /* | ||
116 | * Don't allow mappings that wrap.. | ||
117 | */ | ||
118 | if (!size || physaddr > (unsigned long)(-size)) | ||
119 | return NULL; | ||
120 | |||
121 | #ifdef CONFIG_AMIGA | ||
122 | if (MACH_IS_AMIGA) { | ||
123 | if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) | ||
124 | && (cacheflag == IOMAP_NOCACHE_SER)) | ||
125 | return (void __iomem *)physaddr; | ||
126 | } | ||
127 | #endif | ||
128 | |||
129 | #ifdef DEBUG | ||
130 | printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); | ||
131 | #endif | ||
132 | /* | ||
133 | * Mappings have to be aligned | ||
134 | */ | ||
135 | offset = physaddr & (IO_SIZE - 1); | ||
136 | physaddr &= -IO_SIZE; | ||
137 | size = (size + offset + IO_SIZE - 1) & -IO_SIZE; | ||
138 | |||
139 | /* | ||
140 | * Ok, go for it.. | ||
141 | */ | ||
142 | area = get_io_area(size); | ||
143 | if (!area) | ||
144 | return NULL; | ||
145 | |||
146 | virtaddr = (unsigned long)area->addr; | ||
147 | retaddr = virtaddr + offset; | ||
148 | #ifdef DEBUG | ||
149 | printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); | ||
150 | #endif | ||
151 | |||
152 | /* | ||
153 | * add cache and table flags to physical address | ||
154 | */ | ||
155 | if (CPU_IS_040_OR_060) { | ||
156 | physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | | ||
157 | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
158 | switch (cacheflag) { | ||
159 | case IOMAP_FULL_CACHING: | ||
160 | physaddr |= _PAGE_CACHE040; | ||
161 | break; | ||
162 | case IOMAP_NOCACHE_SER: | ||
163 | default: | ||
164 | physaddr |= _PAGE_NOCACHE_S; | ||
165 | break; | ||
166 | case IOMAP_NOCACHE_NONSER: | ||
167 | physaddr |= _PAGE_NOCACHE; | ||
168 | break; | ||
169 | case IOMAP_WRITETHROUGH: | ||
170 | physaddr |= _PAGE_CACHE040W; | ||
171 | break; | ||
172 | } | ||
173 | } else { | ||
174 | physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
175 | switch (cacheflag) { | ||
176 | case IOMAP_NOCACHE_SER: | ||
177 | case IOMAP_NOCACHE_NONSER: | ||
178 | default: | ||
179 | physaddr |= _PAGE_NOCACHE030; | ||
180 | break; | ||
181 | case IOMAP_FULL_CACHING: | ||
182 | case IOMAP_WRITETHROUGH: | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | while ((long)size > 0) { | ||
188 | #ifdef DEBUG | ||
189 | if (!(virtaddr & (PTRTREESIZE-1))) | ||
190 | printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); | ||
191 | #endif | ||
192 | pgd_dir = pgd_offset_k(virtaddr); | ||
193 | pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); | ||
194 | if (!pmd_dir) { | ||
195 | printk("ioremap: no mem for pmd_dir\n"); | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | if (CPU_IS_020_OR_030) { | ||
200 | pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; | ||
201 | physaddr += PTRTREESIZE; | ||
202 | virtaddr += PTRTREESIZE; | ||
203 | size -= PTRTREESIZE; | ||
204 | } else { | ||
205 | pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); | ||
206 | if (!pte_dir) { | ||
207 | printk("ioremap: no mem for pte_dir\n"); | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | pte_val(*pte_dir) = physaddr; | ||
212 | virtaddr += PAGE_SIZE; | ||
213 | physaddr += PAGE_SIZE; | ||
214 | size -= PAGE_SIZE; | ||
215 | } | ||
216 | } | ||
217 | #ifdef DEBUG | ||
218 | printk("\n"); | ||
219 | #endif | ||
220 | flush_tlb_all(); | ||
221 | |||
222 | return (void __iomem *)retaddr; | ||
223 | } | ||
224 | EXPORT_SYMBOL(__ioremap); | ||
225 | |||
226 | /* | ||
227 | * Unmap a ioremap()ed region again | ||
228 | */ | ||
229 | void iounmap(void __iomem *addr) | ||
230 | { | ||
231 | #ifdef CONFIG_AMIGA | ||
232 | if ((!MACH_IS_AMIGA) || | ||
233 | (((unsigned long)addr < 0x40000000) || | ||
234 | ((unsigned long)addr > 0x60000000))) | ||
235 | free_io_area((__force void *)addr); | ||
236 | #else | ||
237 | free_io_area((__force void *)addr); | ||
238 | #endif | ||
239 | } | ||
240 | EXPORT_SYMBOL(iounmap); | ||
241 | |||
242 | /* | ||
243 | * __iounmap unmaps nearly everything, so be careful | ||
244 | * it doesn't free currently pointer/page tables anymore but it | ||
245 | * wans't used anyway and might be added later. | ||
246 | */ | ||
247 | void __iounmap(void *addr, unsigned long size) | ||
248 | { | ||
249 | unsigned long virtaddr = (unsigned long)addr; | ||
250 | pgd_t *pgd_dir; | ||
251 | pmd_t *pmd_dir; | ||
252 | pte_t *pte_dir; | ||
253 | |||
254 | while ((long)size > 0) { | ||
255 | pgd_dir = pgd_offset_k(virtaddr); | ||
256 | if (pgd_bad(*pgd_dir)) { | ||
257 | printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
258 | pgd_clear(pgd_dir); | ||
259 | return; | ||
260 | } | ||
261 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
262 | |||
263 | if (CPU_IS_020_OR_030) { | ||
264 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
265 | int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; | ||
266 | |||
267 | if (pmd_type == _PAGE_PRESENT) { | ||
268 | pmd_dir->pmd[pmd_off] = 0; | ||
269 | virtaddr += PTRTREESIZE; | ||
270 | size -= PTRTREESIZE; | ||
271 | continue; | ||
272 | } else if (pmd_type == 0) | ||
273 | continue; | ||
274 | } | ||
275 | |||
276 | if (pmd_bad(*pmd_dir)) { | ||
277 | printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
278 | pmd_clear(pmd_dir); | ||
279 | return; | ||
280 | } | ||
281 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
282 | |||
283 | pte_val(*pte_dir) = 0; | ||
284 | virtaddr += PAGE_SIZE; | ||
285 | size -= PAGE_SIZE; | ||
286 | } | ||
287 | |||
288 | flush_tlb_all(); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Set new cache mode for some kernel address space. | ||
293 | * The caller must push data for that range itself, if such data may already | ||
294 | * be in the cache. | ||
295 | */ | ||
296 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | ||
297 | { | ||
298 | unsigned long virtaddr = (unsigned long)addr; | ||
299 | pgd_t *pgd_dir; | ||
300 | pmd_t *pmd_dir; | ||
301 | pte_t *pte_dir; | ||
302 | |||
303 | if (CPU_IS_040_OR_060) { | ||
304 | switch (cmode) { | ||
305 | case IOMAP_FULL_CACHING: | ||
306 | cmode = _PAGE_CACHE040; | ||
307 | break; | ||
308 | case IOMAP_NOCACHE_SER: | ||
309 | default: | ||
310 | cmode = _PAGE_NOCACHE_S; | ||
311 | break; | ||
312 | case IOMAP_NOCACHE_NONSER: | ||
313 | cmode = _PAGE_NOCACHE; | ||
314 | break; | ||
315 | case IOMAP_WRITETHROUGH: | ||
316 | cmode = _PAGE_CACHE040W; | ||
317 | break; | ||
318 | } | ||
319 | } else { | ||
320 | switch (cmode) { | ||
321 | case IOMAP_NOCACHE_SER: | ||
322 | case IOMAP_NOCACHE_NONSER: | ||
323 | default: | ||
324 | cmode = _PAGE_NOCACHE030; | ||
325 | break; | ||
326 | case IOMAP_FULL_CACHING: | ||
327 | case IOMAP_WRITETHROUGH: | ||
328 | cmode = 0; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | while ((long)size > 0) { | ||
333 | pgd_dir = pgd_offset_k(virtaddr); | ||
334 | if (pgd_bad(*pgd_dir)) { | ||
335 | printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
336 | pgd_clear(pgd_dir); | ||
337 | return; | ||
338 | } | ||
339 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
340 | |||
341 | if (CPU_IS_020_OR_030) { | ||
342 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
343 | |||
344 | if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { | ||
345 | pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & | ||
346 | _CACHEMASK040) | cmode; | ||
347 | virtaddr += PTRTREESIZE; | ||
348 | size -= PTRTREESIZE; | ||
349 | continue; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | if (pmd_bad(*pmd_dir)) { | ||
354 | printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
355 | pmd_clear(pmd_dir); | ||
356 | return; | ||
357 | } | ||
358 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
359 | |||
360 | pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode; | ||
361 | virtaddr += PAGE_SIZE; | ||
362 | size -= PAGE_SIZE; | ||
363 | } | ||
364 | |||
365 | flush_tlb_all(); | ||
366 | } | ||
367 | EXPORT_SYMBOL(kernel_set_cachemode); | ||
diff --git a/arch/m68k/mm/kmap_mm.c b/arch/m68k/mm/kmap_mm.c deleted file mode 100644 index 69345849454b..000000000000 --- a/arch/m68k/mm/kmap_mm.c +++ /dev/null | |||
@@ -1,367 +0,0 @@ | |||
1 | /* | ||
2 | * linux/arch/m68k/mm/kmap.c | ||
3 | * | ||
4 | * Copyright (C) 1997 Roman Hodek | ||
5 | * | ||
6 | * 10/01/99 cleaned up the code and changing to the same interface | ||
7 | * used by other architectures /Roman Zippel | ||
8 | */ | ||
9 | |||
10 | #include <linux/module.h> | ||
11 | #include <linux/mm.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | |||
18 | #include <asm/setup.h> | ||
19 | #include <asm/segment.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/system.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | |||
27 | #define PTRTREESIZE (256*1024) | ||
28 | |||
29 | /* | ||
30 | * For 040/060 we can use the virtual memory area like other architectures, | ||
31 | * but for 020/030 we want to use early termination page descriptor and we | ||
32 | * can't mix this with normal page descriptors, so we have to copy that code | ||
33 | * (mm/vmalloc.c) and return appriorate aligned addresses. | ||
34 | */ | ||
35 | |||
36 | #ifdef CPU_M68040_OR_M68060_ONLY | ||
37 | |||
38 | #define IO_SIZE PAGE_SIZE | ||
39 | |||
40 | static inline struct vm_struct *get_io_area(unsigned long size) | ||
41 | { | ||
42 | return get_vm_area(size, VM_IOREMAP); | ||
43 | } | ||
44 | |||
45 | |||
46 | static inline void free_io_area(void *addr) | ||
47 | { | ||
48 | vfree((void *)(PAGE_MASK & (unsigned long)addr)); | ||
49 | } | ||
50 | |||
51 | #else | ||
52 | |||
53 | #define IO_SIZE (256*1024) | ||
54 | |||
55 | static struct vm_struct *iolist; | ||
56 | |||
57 | static struct vm_struct *get_io_area(unsigned long size) | ||
58 | { | ||
59 | unsigned long addr; | ||
60 | struct vm_struct **p, *tmp, *area; | ||
61 | |||
62 | area = kmalloc(sizeof(*area), GFP_KERNEL); | ||
63 | if (!area) | ||
64 | return NULL; | ||
65 | addr = KMAP_START; | ||
66 | for (p = &iolist; (tmp = *p) ; p = &tmp->next) { | ||
67 | if (size + addr < (unsigned long)tmp->addr) | ||
68 | break; | ||
69 | if (addr > KMAP_END-size) { | ||
70 | kfree(area); | ||
71 | return NULL; | ||
72 | } | ||
73 | addr = tmp->size + (unsigned long)tmp->addr; | ||
74 | } | ||
75 | area->addr = (void *)addr; | ||
76 | area->size = size + IO_SIZE; | ||
77 | area->next = *p; | ||
78 | *p = area; | ||
79 | return area; | ||
80 | } | ||
81 | |||
82 | static inline void free_io_area(void *addr) | ||
83 | { | ||
84 | struct vm_struct **p, *tmp; | ||
85 | |||
86 | if (!addr) | ||
87 | return; | ||
88 | addr = (void *)((unsigned long)addr & -IO_SIZE); | ||
89 | for (p = &iolist ; (tmp = *p) ; p = &tmp->next) { | ||
90 | if (tmp->addr == addr) { | ||
91 | *p = tmp->next; | ||
92 | __iounmap(tmp->addr, tmp->size); | ||
93 | kfree(tmp); | ||
94 | return; | ||
95 | } | ||
96 | } | ||
97 | } | ||
98 | |||
99 | #endif | ||
100 | |||
101 | /* | ||
102 | * Map some physical address range into the kernel address space. | ||
103 | */ | ||
104 | /* Rewritten by Andreas Schwab to remove all races. */ | ||
105 | |||
106 | void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag) | ||
107 | { | ||
108 | struct vm_struct *area; | ||
109 | unsigned long virtaddr, retaddr; | ||
110 | long offset; | ||
111 | pgd_t *pgd_dir; | ||
112 | pmd_t *pmd_dir; | ||
113 | pte_t *pte_dir; | ||
114 | |||
115 | /* | ||
116 | * Don't allow mappings that wrap.. | ||
117 | */ | ||
118 | if (!size || physaddr > (unsigned long)(-size)) | ||
119 | return NULL; | ||
120 | |||
121 | #ifdef CONFIG_AMIGA | ||
122 | if (MACH_IS_AMIGA) { | ||
123 | if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000) | ||
124 | && (cacheflag == IOMAP_NOCACHE_SER)) | ||
125 | return (void __iomem *)physaddr; | ||
126 | } | ||
127 | #endif | ||
128 | |||
129 | #ifdef DEBUG | ||
130 | printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag); | ||
131 | #endif | ||
132 | /* | ||
133 | * Mappings have to be aligned | ||
134 | */ | ||
135 | offset = physaddr & (IO_SIZE - 1); | ||
136 | physaddr &= -IO_SIZE; | ||
137 | size = (size + offset + IO_SIZE - 1) & -IO_SIZE; | ||
138 | |||
139 | /* | ||
140 | * Ok, go for it.. | ||
141 | */ | ||
142 | area = get_io_area(size); | ||
143 | if (!area) | ||
144 | return NULL; | ||
145 | |||
146 | virtaddr = (unsigned long)area->addr; | ||
147 | retaddr = virtaddr + offset; | ||
148 | #ifdef DEBUG | ||
149 | printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr); | ||
150 | #endif | ||
151 | |||
152 | /* | ||
153 | * add cache and table flags to physical address | ||
154 | */ | ||
155 | if (CPU_IS_040_OR_060) { | ||
156 | physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 | | ||
157 | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
158 | switch (cacheflag) { | ||
159 | case IOMAP_FULL_CACHING: | ||
160 | physaddr |= _PAGE_CACHE040; | ||
161 | break; | ||
162 | case IOMAP_NOCACHE_SER: | ||
163 | default: | ||
164 | physaddr |= _PAGE_NOCACHE_S; | ||
165 | break; | ||
166 | case IOMAP_NOCACHE_NONSER: | ||
167 | physaddr |= _PAGE_NOCACHE; | ||
168 | break; | ||
169 | case IOMAP_WRITETHROUGH: | ||
170 | physaddr |= _PAGE_CACHE040W; | ||
171 | break; | ||
172 | } | ||
173 | } else { | ||
174 | physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY); | ||
175 | switch (cacheflag) { | ||
176 | case IOMAP_NOCACHE_SER: | ||
177 | case IOMAP_NOCACHE_NONSER: | ||
178 | default: | ||
179 | physaddr |= _PAGE_NOCACHE030; | ||
180 | break; | ||
181 | case IOMAP_FULL_CACHING: | ||
182 | case IOMAP_WRITETHROUGH: | ||
183 | break; | ||
184 | } | ||
185 | } | ||
186 | |||
187 | while ((long)size > 0) { | ||
188 | #ifdef DEBUG | ||
189 | if (!(virtaddr & (PTRTREESIZE-1))) | ||
190 | printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr); | ||
191 | #endif | ||
192 | pgd_dir = pgd_offset_k(virtaddr); | ||
193 | pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr); | ||
194 | if (!pmd_dir) { | ||
195 | printk("ioremap: no mem for pmd_dir\n"); | ||
196 | return NULL; | ||
197 | } | ||
198 | |||
199 | if (CPU_IS_020_OR_030) { | ||
200 | pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr; | ||
201 | physaddr += PTRTREESIZE; | ||
202 | virtaddr += PTRTREESIZE; | ||
203 | size -= PTRTREESIZE; | ||
204 | } else { | ||
205 | pte_dir = pte_alloc_kernel(pmd_dir, virtaddr); | ||
206 | if (!pte_dir) { | ||
207 | printk("ioremap: no mem for pte_dir\n"); | ||
208 | return NULL; | ||
209 | } | ||
210 | |||
211 | pte_val(*pte_dir) = physaddr; | ||
212 | virtaddr += PAGE_SIZE; | ||
213 | physaddr += PAGE_SIZE; | ||
214 | size -= PAGE_SIZE; | ||
215 | } | ||
216 | } | ||
217 | #ifdef DEBUG | ||
218 | printk("\n"); | ||
219 | #endif | ||
220 | flush_tlb_all(); | ||
221 | |||
222 | return (void __iomem *)retaddr; | ||
223 | } | ||
224 | EXPORT_SYMBOL(__ioremap); | ||
225 | |||
226 | /* | ||
227 | * Unmap a ioremap()ed region again | ||
228 | */ | ||
229 | void iounmap(void __iomem *addr) | ||
230 | { | ||
231 | #ifdef CONFIG_AMIGA | ||
232 | if ((!MACH_IS_AMIGA) || | ||
233 | (((unsigned long)addr < 0x40000000) || | ||
234 | ((unsigned long)addr > 0x60000000))) | ||
235 | free_io_area((__force void *)addr); | ||
236 | #else | ||
237 | free_io_area((__force void *)addr); | ||
238 | #endif | ||
239 | } | ||
240 | EXPORT_SYMBOL(iounmap); | ||
241 | |||
242 | /* | ||
243 | * __iounmap unmaps nearly everything, so be careful | ||
244 | * it doesn't free currently pointer/page tables anymore but it | ||
245 | * wans't used anyway and might be added later. | ||
246 | */ | ||
247 | void __iounmap(void *addr, unsigned long size) | ||
248 | { | ||
249 | unsigned long virtaddr = (unsigned long)addr; | ||
250 | pgd_t *pgd_dir; | ||
251 | pmd_t *pmd_dir; | ||
252 | pte_t *pte_dir; | ||
253 | |||
254 | while ((long)size > 0) { | ||
255 | pgd_dir = pgd_offset_k(virtaddr); | ||
256 | if (pgd_bad(*pgd_dir)) { | ||
257 | printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
258 | pgd_clear(pgd_dir); | ||
259 | return; | ||
260 | } | ||
261 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
262 | |||
263 | if (CPU_IS_020_OR_030) { | ||
264 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
265 | int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK; | ||
266 | |||
267 | if (pmd_type == _PAGE_PRESENT) { | ||
268 | pmd_dir->pmd[pmd_off] = 0; | ||
269 | virtaddr += PTRTREESIZE; | ||
270 | size -= PTRTREESIZE; | ||
271 | continue; | ||
272 | } else if (pmd_type == 0) | ||
273 | continue; | ||
274 | } | ||
275 | |||
276 | if (pmd_bad(*pmd_dir)) { | ||
277 | printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
278 | pmd_clear(pmd_dir); | ||
279 | return; | ||
280 | } | ||
281 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
282 | |||
283 | pte_val(*pte_dir) = 0; | ||
284 | virtaddr += PAGE_SIZE; | ||
285 | size -= PAGE_SIZE; | ||
286 | } | ||
287 | |||
288 | flush_tlb_all(); | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * Set new cache mode for some kernel address space. | ||
293 | * The caller must push data for that range itself, if such data may already | ||
294 | * be in the cache. | ||
295 | */ | ||
296 | void kernel_set_cachemode(void *addr, unsigned long size, int cmode) | ||
297 | { | ||
298 | unsigned long virtaddr = (unsigned long)addr; | ||
299 | pgd_t *pgd_dir; | ||
300 | pmd_t *pmd_dir; | ||
301 | pte_t *pte_dir; | ||
302 | |||
303 | if (CPU_IS_040_OR_060) { | ||
304 | switch (cmode) { | ||
305 | case IOMAP_FULL_CACHING: | ||
306 | cmode = _PAGE_CACHE040; | ||
307 | break; | ||
308 | case IOMAP_NOCACHE_SER: | ||
309 | default: | ||
310 | cmode = _PAGE_NOCACHE_S; | ||
311 | break; | ||
312 | case IOMAP_NOCACHE_NONSER: | ||
313 | cmode = _PAGE_NOCACHE; | ||
314 | break; | ||
315 | case IOMAP_WRITETHROUGH: | ||
316 | cmode = _PAGE_CACHE040W; | ||
317 | break; | ||
318 | } | ||
319 | } else { | ||
320 | switch (cmode) { | ||
321 | case IOMAP_NOCACHE_SER: | ||
322 | case IOMAP_NOCACHE_NONSER: | ||
323 | default: | ||
324 | cmode = _PAGE_NOCACHE030; | ||
325 | break; | ||
326 | case IOMAP_FULL_CACHING: | ||
327 | case IOMAP_WRITETHROUGH: | ||
328 | cmode = 0; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | while ((long)size > 0) { | ||
333 | pgd_dir = pgd_offset_k(virtaddr); | ||
334 | if (pgd_bad(*pgd_dir)) { | ||
335 | printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir)); | ||
336 | pgd_clear(pgd_dir); | ||
337 | return; | ||
338 | } | ||
339 | pmd_dir = pmd_offset(pgd_dir, virtaddr); | ||
340 | |||
341 | if (CPU_IS_020_OR_030) { | ||
342 | int pmd_off = (virtaddr/PTRTREESIZE) & 15; | ||
343 | |||
344 | if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) { | ||
345 | pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] & | ||
346 | _CACHEMASK040) | cmode; | ||
347 | virtaddr += PTRTREESIZE; | ||
348 | size -= PTRTREESIZE; | ||
349 | continue; | ||
350 | } | ||
351 | } | ||
352 | |||
353 | if (pmd_bad(*pmd_dir)) { | ||
354 | printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir)); | ||
355 | pmd_clear(pmd_dir); | ||
356 | return; | ||
357 | } | ||
358 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | ||
359 | |||
360 | pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode; | ||
361 | virtaddr += PAGE_SIZE; | ||
362 | size -= PAGE_SIZE; | ||
363 | } | ||
364 | |||
365 | flush_tlb_all(); | ||
366 | } | ||
367 | EXPORT_SYMBOL(kernel_set_cachemode); | ||