diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:34:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:05 -0500 |
commit | 240d3a7c47e3fb9c2533f63e9e323a25d91d0643 (patch) | |
tree | 133f2b2bb45eb70cdc9e694db12a0a1c8ac81d4c /arch/x86 | |
parent | e4c1b977f0036c00ebabb60375cb63d0de9d43fa (diff) |
x86: unify ioremap
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/mm/ioremap_32.c | 18 | ||||
-rw-r--r-- | arch/x86/mm/ioremap_64.c | 317 |
2 files changed, 313 insertions, 22 deletions
diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c index 4d919c37d1d6..f4a2082568c8 100644 --- a/arch/x86/mm/ioremap_32.c +++ b/arch/x86/mm/ioremap_32.c | |||
@@ -19,6 +19,18 @@ | |||
19 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
20 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
21 | 21 | ||
22 | #ifdef CONFIG_X86_64 | ||
23 | |||
24 | unsigned long __phys_addr(unsigned long x) | ||
25 | { | ||
26 | if (x >= __START_KERNEL_map) | ||
27 | return x - __START_KERNEL_map + phys_base; | ||
28 | return x - PAGE_OFFSET; | ||
29 | } | ||
30 | EXPORT_SYMBOL(__phys_addr); | ||
31 | |||
32 | #endif | ||
33 | |||
22 | /* | 34 | /* |
23 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 35 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
24 | * conflicts. | 36 | * conflicts. |
@@ -49,6 +61,7 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, | |||
49 | * memmap entry. | 61 | * memmap entry. |
50 | */ | 62 | */ |
51 | err = change_page_attr_addr(vaddr, npages, prot); | 63 | err = change_page_attr_addr(vaddr, npages, prot); |
64 | |||
52 | if (!err) | 65 | if (!err) |
53 | global_flush_tlb(); | 66 | global_flush_tlb(); |
54 | 67 | ||
@@ -83,6 +96,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
83 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | 96 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
84 | return (__force void __iomem *)phys_to_virt(phys_addr); | 97 | return (__force void __iomem *)phys_to_virt(phys_addr); |
85 | 98 | ||
99 | #ifdef CONFIG_X86_32 | ||
86 | /* | 100 | /* |
87 | * Don't allow anybody to remap normal RAM that we're using.. | 101 | * Don't allow anybody to remap normal RAM that we're using.. |
88 | */ | 102 | */ |
@@ -98,6 +112,7 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
98 | if (!PageReserved(page)) | 112 | if (!PageReserved(page)) |
99 | return NULL; | 113 | return NULL; |
100 | } | 114 | } |
115 | #endif | ||
101 | 116 | ||
102 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); | 117 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); |
103 | 118 | ||
@@ -211,6 +226,7 @@ void iounmap(volatile void __iomem *addr) | |||
211 | } | 226 | } |
212 | EXPORT_SYMBOL(iounmap); | 227 | EXPORT_SYMBOL(iounmap); |
213 | 228 | ||
229 | #ifdef CONFIG_X86_32 | ||
214 | 230 | ||
215 | int __initdata early_ioremap_debug; | 231 | int __initdata early_ioremap_debug; |
216 | 232 | ||
@@ -443,3 +459,5 @@ void __this_fixmap_does_not_exist(void) | |||
443 | { | 459 | { |
444 | WARN_ON(1); | 460 | WARN_ON(1); |
445 | } | 461 | } |
462 | |||
463 | #endif /* CONFIG_X86_32 */ | ||
diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c index e79d2b353de0..f4a2082568c8 100644 --- a/arch/x86/mm/ioremap_64.c +++ b/arch/x86/mm/ioremap_64.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * (C) Copyright 1995 1996 Linus Torvalds | 6 | * (C) Copyright 1995 1996 Linus Torvalds |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #include <linux/bootmem.h> | ||
9 | #include <linux/init.h> | 10 | #include <linux/init.h> |
10 | #include <linux/io.h> | 11 | #include <linux/io.h> |
11 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -18,6 +19,8 @@ | |||
18 | #include <asm/pgtable.h> | 19 | #include <asm/pgtable.h> |
19 | #include <asm/tlbflush.h> | 20 | #include <asm/tlbflush.h> |
20 | 21 | ||
22 | #ifdef CONFIG_X86_64 | ||
23 | |||
21 | unsigned long __phys_addr(unsigned long x) | 24 | unsigned long __phys_addr(unsigned long x) |
22 | { | 25 | { |
23 | if (x >= __START_KERNEL_map) | 26 | if (x >= __START_KERNEL_map) |
@@ -26,6 +29,8 @@ unsigned long __phys_addr(unsigned long x) | |||
26 | } | 29 | } |
27 | EXPORT_SYMBOL(__phys_addr); | 30 | EXPORT_SYMBOL(__phys_addr); |
28 | 31 | ||
32 | #endif | ||
33 | |||
29 | /* | 34 | /* |
30 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 35 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
31 | * conflicts. | 36 | * conflicts. |
@@ -33,28 +38,33 @@ EXPORT_SYMBOL(__phys_addr); | |||
33 | static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, | 38 | static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, |
34 | pgprot_t prot) | 39 | pgprot_t prot) |
35 | { | 40 | { |
36 | int err = 0; | 41 | unsigned long npages, vaddr, last_addr = phys_addr + size - 1; |
37 | if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { | 42 | int err, level; |
38 | unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | 43 | |
39 | unsigned long vaddr = (unsigned long) __va(phys_addr); | 44 | /* No change for pages after the last mapping */ |
40 | int level; | 45 | if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) |
41 | 46 | return 0; | |
42 | /* | 47 | |
43 | * If there is no identity map for this address, | 48 | npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
44 | * change_page_attr_addr is unnecessary | 49 | vaddr = (unsigned long) __va(phys_addr); |
45 | */ | 50 | |
46 | if (!lookup_address(vaddr, &level)) | 51 | /* |
47 | return err; | 52 | * If there is no identity map for this address, |
48 | /* | 53 | * change_page_attr_addr is unnecessary |
49 | * Must use a address here and not struct page because | 54 | */ |
50 | * the phys addr can be a in hole between nodes and | 55 | if (!lookup_address(vaddr, &level)) |
51 | * not have an memmap entry. | 56 | return 0; |
52 | */ | 57 | |
53 | err = change_page_attr_addr(vaddr, npages, prot); | 58 | /* |
54 | 59 | * Must use an address here and not struct page because the | |
55 | if (!err) | 60 | * phys addr can be a in hole between nodes and not have a |
56 | global_flush_tlb(); | 61 | * memmap entry. |
57 | } | 62 | */ |
63 | err = change_page_attr_addr(vaddr, npages, prot); | ||
64 | |||
65 | if (!err) | ||
66 | global_flush_tlb(); | ||
67 | |||
58 | return err; | 68 | return err; |
59 | } | 69 | } |
60 | 70 | ||
@@ -86,7 +96,26 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
86 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | 96 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
87 | return (__force void __iomem *)phys_to_virt(phys_addr); | 97 | return (__force void __iomem *)phys_to_virt(phys_addr); |
88 | 98 | ||
99 | #ifdef CONFIG_X86_32 | ||
100 | /* | ||
101 | * Don't allow anybody to remap normal RAM that we're using.. | ||
102 | */ | ||
103 | if (phys_addr <= virt_to_phys(high_memory - 1)) { | ||
104 | char *t_addr, *t_end; | ||
105 | struct page *page; | ||
106 | |||
107 | t_addr = __va(phys_addr); | ||
108 | t_end = t_addr + (size - 1); | ||
109 | |||
110 | for (page = virt_to_page(t_addr); | ||
111 | page <= virt_to_page(t_end); page++) | ||
112 | if (!PageReserved(page)) | ||
113 | return NULL; | ||
114 | } | ||
115 | #endif | ||
116 | |||
89 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); | 117 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); |
118 | |||
90 | /* | 119 | /* |
91 | * Mappings have to be page-aligned | 120 | * Mappings have to be page-aligned |
92 | */ | 121 | */ |
@@ -107,10 +136,12 @@ void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | |||
107 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | 136 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); |
108 | return NULL; | 137 | return NULL; |
109 | } | 138 | } |
139 | |||
110 | if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { | 140 | if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { |
111 | vunmap(addr); | 141 | vunmap(addr); |
112 | return NULL; | 142 | return NULL; |
113 | } | 143 | } |
144 | |||
114 | return (void __iomem *) (offset + (char __iomem *)addr); | 145 | return (void __iomem *) (offset + (char __iomem *)addr); |
115 | } | 146 | } |
116 | EXPORT_SYMBOL(__ioremap); | 147 | EXPORT_SYMBOL(__ioremap); |
@@ -154,12 +185,19 @@ void iounmap(volatile void __iomem *addr) | |||
154 | 185 | ||
155 | if ((void __force *)addr <= high_memory) | 186 | if ((void __force *)addr <= high_memory) |
156 | return; | 187 | return; |
188 | |||
189 | /* | ||
190 | * __ioremap special-cases the PCI/ISA range by not instantiating a | ||
191 | * vm_area and by simply returning an address into the kernel mapping | ||
192 | * of ISA space. So handle that here. | ||
193 | */ | ||
157 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | 194 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && |
158 | addr < phys_to_virt(ISA_END_ADDRESS)) | 195 | addr < phys_to_virt(ISA_END_ADDRESS)) |
159 | return; | 196 | return; |
160 | 197 | ||
161 | addr = (volatile void __iomem *) | 198 | addr = (volatile void __iomem *) |
162 | (PAGE_MASK & (unsigned long __force)addr); | 199 | (PAGE_MASK & (unsigned long __force)addr); |
200 | |||
163 | /* Use the vm area unlocked, assuming the caller | 201 | /* Use the vm area unlocked, assuming the caller |
164 | ensures there isn't another iounmap for the same address | 202 | ensures there isn't another iounmap for the same address |
165 | in parallel. Reuse of the virtual address is prevented by | 203 | in parallel. Reuse of the virtual address is prevented by |
@@ -188,3 +226,238 @@ void iounmap(volatile void __iomem *addr) | |||
188 | } | 226 | } |
189 | EXPORT_SYMBOL(iounmap); | 227 | EXPORT_SYMBOL(iounmap); |
190 | 228 | ||
229 | #ifdef CONFIG_X86_32 | ||
230 | |||
231 | int __initdata early_ioremap_debug; | ||
232 | |||
233 | static int __init early_ioremap_debug_setup(char *str) | ||
234 | { | ||
235 | early_ioremap_debug = 1; | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | ||
240 | |||
241 | static __initdata int after_paging_init; | ||
242 | static __initdata unsigned long bm_pte[1024] | ||
243 | __attribute__((aligned(PAGE_SIZE))); | ||
244 | |||
245 | static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) | ||
246 | { | ||
247 | return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); | ||
248 | } | ||
249 | |||
250 | static inline unsigned long * __init early_ioremap_pte(unsigned long addr) | ||
251 | { | ||
252 | return bm_pte + ((addr >> PAGE_SHIFT) & 1023); | ||
253 | } | ||
254 | |||
255 | void __init early_ioremap_init(void) | ||
256 | { | ||
257 | unsigned long *pgd; | ||
258 | |||
259 | if (early_ioremap_debug) | ||
260 | printk(KERN_DEBUG "early_ioremap_init()\n"); | ||
261 | |||
262 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
263 | *pgd = __pa(bm_pte) | _PAGE_TABLE; | ||
264 | memset(bm_pte, 0, sizeof(bm_pte)); | ||
265 | /* | ||
266 | * The boot-ioremap range spans multiple pgds, for which | ||
267 | * we are not prepared: | ||
268 | */ | ||
269 | if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { | ||
270 | WARN_ON(1); | ||
271 | printk(KERN_WARNING "pgd %p != %p\n", | ||
272 | pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); | ||
273 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | ||
274 | fix_to_virt(FIX_BTMAP_BEGIN)); | ||
275 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", | ||
276 | fix_to_virt(FIX_BTMAP_END)); | ||
277 | |||
278 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | ||
279 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | ||
280 | FIX_BTMAP_BEGIN); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | void __init early_ioremap_clear(void) | ||
285 | { | ||
286 | unsigned long *pgd; | ||
287 | |||
288 | if (early_ioremap_debug) | ||
289 | printk(KERN_DEBUG "early_ioremap_clear()\n"); | ||
290 | |||
291 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
292 | *pgd = 0; | ||
293 | __flush_tlb_all(); | ||
294 | } | ||
295 | |||
296 | void __init early_ioremap_reset(void) | ||
297 | { | ||
298 | enum fixed_addresses idx; | ||
299 | unsigned long *pte, phys, addr; | ||
300 | |||
301 | after_paging_init = 1; | ||
302 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
303 | addr = fix_to_virt(idx); | ||
304 | pte = early_ioremap_pte(addr); | ||
305 | if (!*pte & _PAGE_PRESENT) { | ||
306 | phys = *pte & PAGE_MASK; | ||
307 | set_fixmap(idx, phys); | ||
308 | } | ||
309 | } | ||
310 | } | ||
311 | |||
312 | static void __init __early_set_fixmap(enum fixed_addresses idx, | ||
313 | unsigned long phys, pgprot_t flags) | ||
314 | { | ||
315 | unsigned long *pte, addr = __fix_to_virt(idx); | ||
316 | |||
317 | if (idx >= __end_of_fixed_addresses) { | ||
318 | BUG(); | ||
319 | return; | ||
320 | } | ||
321 | pte = early_ioremap_pte(addr); | ||
322 | if (pgprot_val(flags)) | ||
323 | *pte = (phys & PAGE_MASK) | pgprot_val(flags); | ||
324 | else | ||
325 | *pte = 0; | ||
326 | __flush_tlb_one(addr); | ||
327 | } | ||
328 | |||
329 | static inline void __init early_set_fixmap(enum fixed_addresses idx, | ||
330 | unsigned long phys) | ||
331 | { | ||
332 | if (after_paging_init) | ||
333 | set_fixmap(idx, phys); | ||
334 | else | ||
335 | __early_set_fixmap(idx, phys, PAGE_KERNEL); | ||
336 | } | ||
337 | |||
338 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) | ||
339 | { | ||
340 | if (after_paging_init) | ||
341 | clear_fixmap(idx); | ||
342 | else | ||
343 | __early_set_fixmap(idx, 0, __pgprot(0)); | ||
344 | } | ||
345 | |||
346 | |||
347 | int __initdata early_ioremap_nested; | ||
348 | |||
349 | static int __init check_early_ioremap_leak(void) | ||
350 | { | ||
351 | if (!early_ioremap_nested) | ||
352 | return 0; | ||
353 | |||
354 | printk(KERN_WARNING | ||
355 | "Debug warning: early ioremap leak of %d areas detected.\n", | ||
356 | early_ioremap_nested); | ||
357 | printk(KERN_WARNING | ||
358 | "please boot with early_ioremap_debug and report the dmesg.\n"); | ||
359 | WARN_ON(1); | ||
360 | |||
361 | return 1; | ||
362 | } | ||
363 | late_initcall(check_early_ioremap_leak); | ||
364 | |||
365 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) | ||
366 | { | ||
367 | unsigned long offset, last_addr; | ||
368 | unsigned int nrpages, nesting; | ||
369 | enum fixed_addresses idx0, idx; | ||
370 | |||
371 | WARN_ON(system_state != SYSTEM_BOOTING); | ||
372 | |||
373 | nesting = early_ioremap_nested; | ||
374 | if (early_ioremap_debug) { | ||
375 | printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", | ||
376 | phys_addr, size, nesting); | ||
377 | dump_stack(); | ||
378 | } | ||
379 | |||
380 | /* Don't allow wraparound or zero size */ | ||
381 | last_addr = phys_addr + size - 1; | ||
382 | if (!size || last_addr < phys_addr) { | ||
383 | WARN_ON(1); | ||
384 | return NULL; | ||
385 | } | ||
386 | |||
387 | if (nesting >= FIX_BTMAPS_NESTING) { | ||
388 | WARN_ON(1); | ||
389 | return NULL; | ||
390 | } | ||
391 | early_ioremap_nested++; | ||
392 | /* | ||
393 | * Mappings have to be page-aligned | ||
394 | */ | ||
395 | offset = phys_addr & ~PAGE_MASK; | ||
396 | phys_addr &= PAGE_MASK; | ||
397 | size = PAGE_ALIGN(last_addr) - phys_addr; | ||
398 | |||
399 | /* | ||
400 | * Mappings have to fit in the FIX_BTMAP area. | ||
401 | */ | ||
402 | nrpages = size >> PAGE_SHIFT; | ||
403 | if (nrpages > NR_FIX_BTMAPS) { | ||
404 | WARN_ON(1); | ||
405 | return NULL; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Ok, go for it.. | ||
410 | */ | ||
411 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | ||
412 | idx = idx0; | ||
413 | while (nrpages > 0) { | ||
414 | early_set_fixmap(idx, phys_addr); | ||
415 | phys_addr += PAGE_SIZE; | ||
416 | --idx; | ||
417 | --nrpages; | ||
418 | } | ||
419 | if (early_ioremap_debug) | ||
420 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | ||
421 | |||
422 | return (void *) (offset + fix_to_virt(idx0)); | ||
423 | } | ||
424 | |||
425 | void __init early_iounmap(void *addr, unsigned long size) | ||
426 | { | ||
427 | unsigned long virt_addr; | ||
428 | unsigned long offset; | ||
429 | unsigned int nrpages; | ||
430 | enum fixed_addresses idx; | ||
431 | unsigned int nesting; | ||
432 | |||
433 | nesting = --early_ioremap_nested; | ||
434 | WARN_ON(nesting < 0); | ||
435 | |||
436 | if (early_ioremap_debug) { | ||
437 | printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, | ||
438 | size, nesting); | ||
439 | dump_stack(); | ||
440 | } | ||
441 | |||
442 | virt_addr = (unsigned long)addr; | ||
443 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { | ||
444 | WARN_ON(1); | ||
445 | return; | ||
446 | } | ||
447 | offset = virt_addr & ~PAGE_MASK; | ||
448 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | ||
449 | |||
450 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | ||
451 | while (nrpages > 0) { | ||
452 | early_clear_fixmap(idx); | ||
453 | --idx; | ||
454 | --nrpages; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | void __this_fixmap_does_not_exist(void) | ||
459 | { | ||
460 | WARN_ON(1); | ||
461 | } | ||
462 | |||
463 | #endif /* CONFIG_X86_32 */ | ||