diff options
Diffstat (limited to 'arch/sh64/mm/ioremap.c')
-rw-r--r-- | arch/sh64/mm/ioremap.c | 469 |
1 files changed, 469 insertions, 0 deletions
diff --git a/arch/sh64/mm/ioremap.c b/arch/sh64/mm/ioremap.c new file mode 100644 index 000000000000..f4003da556bc --- /dev/null +++ b/arch/sh64/mm/ioremap.c | |||
@@ -0,0 +1,469 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * arch/sh64/mm/ioremap.c | ||
7 | * | ||
8 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
9 | * Copyright (C) 2003, 2004 Paul Mundt | ||
10 | * | ||
11 | * Mostly derived from arch/sh/mm/ioremap.c which, in turn is mostly | ||
12 | * derived from arch/i386/mm/ioremap.c . | ||
13 | * | ||
14 | * (C) Copyright 1995 1996 Linus Torvalds | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/string.h> | ||
21 | #include <asm/io.h> | ||
22 | #include <asm/pgalloc.h> | ||
23 | #include <asm/tlbflush.h> | ||
24 | #include <linux/ioport.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/proc_fs.h> | ||
27 | |||
28 | static void shmedia_mapioaddr(unsigned long, unsigned long); | ||
29 | static unsigned long shmedia_ioremap(struct resource *, u32, int); | ||
30 | |||
31 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
32 | unsigned long phys_addr, unsigned long flags) | ||
33 | { | ||
34 | unsigned long end; | ||
35 | unsigned long pfn; | ||
36 | pgprot_t pgprot = __pgprot(_PAGE_PRESENT | _PAGE_READ | | ||
37 | _PAGE_WRITE | _PAGE_DIRTY | | ||
38 | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
39 | |||
40 | address &= ~PMD_MASK; | ||
41 | end = address + size; | ||
42 | if (end > PMD_SIZE) | ||
43 | end = PMD_SIZE; | ||
44 | if (address >= end) | ||
45 | BUG(); | ||
46 | |||
47 | pfn = phys_addr >> PAGE_SHIFT; | ||
48 | |||
49 | pr_debug(" %s: pte %p address %lx size %lx phys_addr %lx\n", | ||
50 | __FUNCTION__,pte,address,size,phys_addr); | ||
51 | |||
52 | do { | ||
53 | if (!pte_none(*pte)) { | ||
54 | printk("remap_area_pte: page already exists\n"); | ||
55 | BUG(); | ||
56 | } | ||
57 | |||
58 | set_pte(pte, pfn_pte(pfn, pgprot)); | ||
59 | address += PAGE_SIZE; | ||
60 | pfn++; | ||
61 | pte++; | ||
62 | } while (address && (address < end)); | ||
63 | } | ||
64 | |||
65 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
66 | unsigned long phys_addr, unsigned long flags) | ||
67 | { | ||
68 | unsigned long end; | ||
69 | |||
70 | address &= ~PGDIR_MASK; | ||
71 | end = address + size; | ||
72 | |||
73 | if (end > PGDIR_SIZE) | ||
74 | end = PGDIR_SIZE; | ||
75 | |||
76 | phys_addr -= address; | ||
77 | |||
78 | if (address >= end) | ||
79 | BUG(); | ||
80 | |||
81 | do { | ||
82 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
83 | if (!pte) | ||
84 | return -ENOMEM; | ||
85 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
86 | address = (address + PMD_SIZE) & PMD_MASK; | ||
87 | pmd++; | ||
88 | } while (address && (address < end)); | ||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
93 | unsigned long size, unsigned long flags) | ||
94 | { | ||
95 | int error; | ||
96 | pgd_t * dir; | ||
97 | unsigned long end = address + size; | ||
98 | |||
99 | phys_addr -= address; | ||
100 | dir = pgd_offset_k(address); | ||
101 | flush_cache_all(); | ||
102 | if (address >= end) | ||
103 | BUG(); | ||
104 | spin_lock(&init_mm.page_table_lock); | ||
105 | do { | ||
106 | pmd_t *pmd = pmd_alloc(&init_mm, dir, address); | ||
107 | error = -ENOMEM; | ||
108 | if (!pmd) | ||
109 | break; | ||
110 | if (remap_area_pmd(pmd, address, end - address, | ||
111 | phys_addr + address, flags)) { | ||
112 | break; | ||
113 | } | ||
114 | error = 0; | ||
115 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
116 | dir++; | ||
117 | } while (address && (address < end)); | ||
118 | spin_unlock(&init_mm.page_table_lock); | ||
119 | flush_tlb_all(); | ||
120 | return 0; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Generic mapping function (not visible outside): | ||
125 | */ | ||
126 | |||
127 | /* | ||
128 | * Remap an arbitrary physical address space into the kernel virtual | ||
129 | * address space. Needed when the kernel wants to access high addresses | ||
130 | * directly. | ||
131 | * | ||
132 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
133 | * have to convert them into an offset in a page-aligned mapping, but the | ||
134 | * caller shouldn't need to know that small detail. | ||
135 | */ | ||
136 | void * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
137 | { | ||
138 | void * addr; | ||
139 | struct vm_struct * area; | ||
140 | unsigned long offset, last_addr; | ||
141 | |||
142 | /* Don't allow wraparound or zero size */ | ||
143 | last_addr = phys_addr + size - 1; | ||
144 | if (!size || last_addr < phys_addr) | ||
145 | return NULL; | ||
146 | |||
147 | /* | ||
148 | * Mappings have to be page-aligned | ||
149 | */ | ||
150 | offset = phys_addr & ~PAGE_MASK; | ||
151 | phys_addr &= PAGE_MASK; | ||
152 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
153 | |||
154 | /* | ||
155 | * Ok, go for it.. | ||
156 | */ | ||
157 | area = get_vm_area(size, VM_IOREMAP); | ||
158 | pr_debug("Get vm_area returns %p addr %p\n",area,area->addr); | ||
159 | if (!area) | ||
160 | return NULL; | ||
161 | area->phys_addr = phys_addr; | ||
162 | addr = area->addr; | ||
163 | if (remap_area_pages((unsigned long)addr, phys_addr, size, flags)) { | ||
164 | vunmap(addr); | ||
165 | return NULL; | ||
166 | } | ||
167 | return (void *) (offset + (char *)addr); | ||
168 | } | ||
169 | |||
170 | void iounmap(void *addr) | ||
171 | { | ||
172 | struct vm_struct *area; | ||
173 | |||
174 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | ||
175 | area = remove_vm_area((void *) (PAGE_MASK & (unsigned long) addr)); | ||
176 | if (!area) { | ||
177 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | ||
178 | return; | ||
179 | } | ||
180 | |||
181 | kfree(area); | ||
182 | } | ||
183 | |||
184 | static struct resource shmedia_iomap = { | ||
185 | .name = "shmedia_iomap", | ||
186 | .start = IOBASE_VADDR + PAGE_SIZE, | ||
187 | .end = IOBASE_END - 1, | ||
188 | }; | ||
189 | |||
190 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va); | ||
191 | static void shmedia_unmapioaddr(unsigned long vaddr); | ||
192 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz); | ||
193 | |||
194 | /* | ||
195 | * We have the same problem as the SPARC, so lets have the same comment: | ||
196 | * Our mini-allocator... | ||
197 | * Boy this is gross! We need it because we must map I/O for | ||
198 | * timers and interrupt controller before the kmalloc is available. | ||
199 | */ | ||
200 | |||
201 | #define XNMLN 15 | ||
202 | #define XNRES 10 | ||
203 | |||
204 | struct xresource { | ||
205 | struct resource xres; /* Must be first */ | ||
206 | int xflag; /* 1 == used */ | ||
207 | char xname[XNMLN+1]; | ||
208 | }; | ||
209 | |||
210 | static struct xresource xresv[XNRES]; | ||
211 | |||
212 | static struct xresource *xres_alloc(void) | ||
213 | { | ||
214 | struct xresource *xrp; | ||
215 | int n; | ||
216 | |||
217 | xrp = xresv; | ||
218 | for (n = 0; n < XNRES; n++) { | ||
219 | if (xrp->xflag == 0) { | ||
220 | xrp->xflag = 1; | ||
221 | return xrp; | ||
222 | } | ||
223 | xrp++; | ||
224 | } | ||
225 | return NULL; | ||
226 | } | ||
227 | |||
228 | static void xres_free(struct xresource *xrp) | ||
229 | { | ||
230 | xrp->xflag = 0; | ||
231 | } | ||
232 | |||
233 | static struct resource *shmedia_find_resource(struct resource *root, | ||
234 | unsigned long vaddr) | ||
235 | { | ||
236 | struct resource *res; | ||
237 | |||
238 | for (res = root->child; res; res = res->sibling) | ||
239 | if (res->start <= vaddr && res->end >= vaddr) | ||
240 | return res; | ||
241 | |||
242 | return NULL; | ||
243 | } | ||
244 | |||
245 | static unsigned long shmedia_alloc_io(unsigned long phys, unsigned long size, | ||
246 | const char *name) | ||
247 | { | ||
248 | static int printed_full = 0; | ||
249 | struct xresource *xres; | ||
250 | struct resource *res; | ||
251 | char *tack; | ||
252 | int tlen; | ||
253 | |||
254 | if (name == NULL) name = "???"; | ||
255 | |||
256 | if ((xres = xres_alloc()) != 0) { | ||
257 | tack = xres->xname; | ||
258 | res = &xres->xres; | ||
259 | } else { | ||
260 | if (!printed_full) { | ||
261 | printk("%s: done with statics, switching to kmalloc\n", | ||
262 | __FUNCTION__); | ||
263 | printed_full = 1; | ||
264 | } | ||
265 | tlen = strlen(name); | ||
266 | tack = kmalloc(sizeof (struct resource) + tlen + 1, GFP_KERNEL); | ||
267 | if (!tack) | ||
268 | return -ENOMEM; | ||
269 | memset(tack, 0, sizeof(struct resource)); | ||
270 | res = (struct resource *) tack; | ||
271 | tack += sizeof (struct resource); | ||
272 | } | ||
273 | |||
274 | strncpy(tack, name, XNMLN); | ||
275 | tack[XNMLN] = 0; | ||
276 | res->name = tack; | ||
277 | |||
278 | return shmedia_ioremap(res, phys, size); | ||
279 | } | ||
280 | |||
281 | static unsigned long shmedia_ioremap(struct resource *res, u32 pa, int sz) | ||
282 | { | ||
283 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | ||
284 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | ||
285 | unsigned long va; | ||
286 | unsigned int psz; | ||
287 | |||
288 | if (allocate_resource(&shmedia_iomap, res, round_sz, | ||
289 | shmedia_iomap.start, shmedia_iomap.end, | ||
290 | PAGE_SIZE, NULL, NULL) != 0) { | ||
291 | panic("alloc_io_res(%s): cannot occupy\n", | ||
292 | (res->name != NULL)? res->name: "???"); | ||
293 | } | ||
294 | |||
295 | va = res->start; | ||
296 | pa &= PAGE_MASK; | ||
297 | |||
298 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
299 | |||
300 | /* log at boot time ... */ | ||
301 | printk("mapioaddr: %6s [%2d page%s] va 0x%08lx pa 0x%08x\n", | ||
302 | ((res->name != NULL) ? res->name : "???"), | ||
303 | psz, psz == 1 ? " " : "s", va, pa); | ||
304 | |||
305 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
306 | shmedia_mapioaddr(pa, va); | ||
307 | va += PAGE_SIZE; | ||
308 | pa += PAGE_SIZE; | ||
309 | } | ||
310 | |||
311 | res->start += offset; | ||
312 | res->end = res->start + sz - 1; /* not strictly necessary.. */ | ||
313 | |||
314 | return res->start; | ||
315 | } | ||
316 | |||
317 | static void shmedia_free_io(struct resource *res) | ||
318 | { | ||
319 | unsigned long len = res->end - res->start + 1; | ||
320 | |||
321 | BUG_ON((len & (PAGE_SIZE - 1)) != 0); | ||
322 | |||
323 | while (len) { | ||
324 | len -= PAGE_SIZE; | ||
325 | shmedia_unmapioaddr(res->start + len); | ||
326 | } | ||
327 | |||
328 | release_resource(res); | ||
329 | } | ||
330 | |||
331 | static void *sh64_get_page(void) | ||
332 | { | ||
333 | extern int after_bootmem; | ||
334 | void *page; | ||
335 | |||
336 | if (after_bootmem) { | ||
337 | page = (void *)get_zeroed_page(GFP_ATOMIC); | ||
338 | } else { | ||
339 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
340 | } | ||
341 | |||
342 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | ||
343 | panic("sh64_get_page: Out of memory already?\n"); | ||
344 | |||
345 | return page; | ||
346 | } | ||
347 | |||
348 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va) | ||
349 | { | ||
350 | pgd_t *pgdp; | ||
351 | pmd_t *pmdp; | ||
352 | pte_t *ptep, pte; | ||
353 | pgprot_t prot; | ||
354 | unsigned long flags = 1; /* 1 = CB0-1 device */ | ||
355 | |||
356 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | ||
357 | |||
358 | pgdp = pgd_offset_k(va); | ||
359 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | ||
360 | pmdp = (pmd_t *)sh64_get_page(); | ||
361 | set_pgd(pgdp, __pgd((unsigned long)pmdp | _KERNPG_TABLE)); | ||
362 | } | ||
363 | |||
364 | pmdp = pmd_offset(pgdp, va); | ||
365 | if (pmd_none(*pmdp) || !pmd_present(*pmdp) ) { | ||
366 | ptep = (pte_t *)sh64_get_page(); | ||
367 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | ||
368 | } | ||
369 | |||
370 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | | ||
371 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); | ||
372 | |||
373 | pte = pfn_pte(pa >> PAGE_SHIFT, prot); | ||
374 | ptep = pte_offset_kernel(pmdp, va); | ||
375 | |||
376 | if (!pte_none(*ptep) && | ||
377 | pte_val(*ptep) != pte_val(pte)) | ||
378 | pte_ERROR(*ptep); | ||
379 | |||
380 | set_pte(ptep, pte); | ||
381 | |||
382 | flush_tlb_kernel_range(va, PAGE_SIZE); | ||
383 | } | ||
384 | |||
385 | static void shmedia_unmapioaddr(unsigned long vaddr) | ||
386 | { | ||
387 | pgd_t *pgdp; | ||
388 | pmd_t *pmdp; | ||
389 | pte_t *ptep; | ||
390 | |||
391 | pgdp = pgd_offset_k(vaddr); | ||
392 | pmdp = pmd_offset(pgdp, vaddr); | ||
393 | |||
394 | if (pmd_none(*pmdp) || pmd_bad(*pmdp)) | ||
395 | return; | ||
396 | |||
397 | ptep = pte_offset_kernel(pmdp, vaddr); | ||
398 | |||
399 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
400 | return; | ||
401 | |||
402 | clear_page((void *)ptep); | ||
403 | pte_clear(&init_mm, vaddr, ptep); | ||
404 | } | ||
405 | |||
406 | unsigned long onchip_remap(unsigned long phys, unsigned long size, const char *name) | ||
407 | { | ||
408 | if (size < PAGE_SIZE) | ||
409 | size = PAGE_SIZE; | ||
410 | |||
411 | return shmedia_alloc_io(phys, size, name); | ||
412 | } | ||
413 | |||
414 | void onchip_unmap(unsigned long vaddr) | ||
415 | { | ||
416 | struct resource *res; | ||
417 | unsigned int psz; | ||
418 | |||
419 | res = shmedia_find_resource(&shmedia_iomap, vaddr); | ||
420 | if (!res) { | ||
421 | printk(KERN_ERR "%s: Failed to free 0x%08lx\n", | ||
422 | __FUNCTION__, vaddr); | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
427 | |||
428 | printk(KERN_DEBUG "unmapioaddr: %6s [%2d page%s] freed\n", | ||
429 | res->name, psz, psz == 1 ? " " : "s"); | ||
430 | |||
431 | shmedia_free_io(res); | ||
432 | |||
433 | if ((char *)res >= (char *)xresv && | ||
434 | (char *)res < (char *)&xresv[XNRES]) { | ||
435 | xres_free((struct xresource *)res); | ||
436 | } else { | ||
437 | kfree(res); | ||
438 | } | ||
439 | } | ||
440 | |||
441 | #ifdef CONFIG_PROC_FS | ||
442 | static int | ||
443 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
444 | void *data) | ||
445 | { | ||
446 | char *p = buf, *e = buf + length; | ||
447 | struct resource *r; | ||
448 | const char *nm; | ||
449 | |||
450 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | ||
451 | if (p + 32 >= e) /* Better than nothing */ | ||
452 | break; | ||
453 | if ((nm = r->name) == 0) nm = "???"; | ||
454 | p += sprintf(p, "%08lx-%08lx: %s\n", r->start, r->end, nm); | ||
455 | } | ||
456 | |||
457 | return p-buf; | ||
458 | } | ||
459 | #endif /* CONFIG_PROC_FS */ | ||
460 | |||
461 | static int __init register_proc_onchip(void) | ||
462 | { | ||
463 | #ifdef CONFIG_PROC_FS | ||
464 | create_proc_read_entry("io_map",0,0, ioremap_proc_info, &shmedia_iomap); | ||
465 | #endif | ||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | __initcall(register_proc_onchip); | ||