diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2008-01-30 07:34:05 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 07:34:05 -0500 |
commit | e64c8aa0c5e5d23730b2d702297e01cd7fe53144 (patch) | |
tree | ab5c5a9e560402cfae9171b30f5f7ead3910191e /arch | |
parent | 240d3a7c47e3fb9c2533f63e9e323a25d91d0643 (diff) |
x86: unify ioremap_32 and _64
Unify the now identical ioremap_32.c and ioremap_64.c into the
same ioremap.c file. No code changed.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/mm/Makefile_32 | 2 | ||||
-rw-r--r-- | arch/x86/mm/Makefile_64 | 2 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c (renamed from arch/x86/mm/ioremap_32.c) | 0 | ||||
-rw-r--r-- | arch/x86/mm/ioremap_64.c | 463 |
4 files changed, 2 insertions, 465 deletions
diff --git a/arch/x86/mm/Makefile_32 b/arch/x86/mm/Makefile_32 index 424e5a862271..af0d39bea6c5 100644 --- a/arch/x86/mm/Makefile_32 +++ b/arch/x86/mm/Makefile_32 | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux i386-specific parts of the memory manager. | 2 | # Makefile for the linux i386-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init_32.o pgtable_32.o fault_32.o ioremap_32.o extable.o pageattr.o mmap.o | 5 | obj-y := init_32.o pgtable_32.o fault_32.o ioremap.o extable.o pageattr.o mmap.o |
6 | 6 | ||
7 | obj-$(CONFIG_CPA_DEBUG) += pageattr-test.o | 7 | obj-$(CONFIG_CPA_DEBUG) += pageattr-test.o |
8 | obj-$(CONFIG_NUMA) += discontig_32.o | 8 | obj-$(CONFIG_NUMA) += discontig_32.o |
diff --git a/arch/x86/mm/Makefile_64 b/arch/x86/mm/Makefile_64 index 043584478457..b564b5a760da 100644 --- a/arch/x86/mm/Makefile_64 +++ b/arch/x86/mm/Makefile_64 | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the linux x86_64-specific parts of the memory manager. | 2 | # Makefile for the linux x86_64-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init_64.o fault_64.o ioremap_64.o extable.o pageattr.o mmap.o | 5 | obj-y := init_64.o fault_64.o ioremap.o extable.o pageattr.o mmap.o |
6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 6 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
7 | obj-$(CONFIG_NUMA) += numa_64.o | 7 | obj-$(CONFIG_NUMA) += numa_64.o |
8 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o | 8 | obj-$(CONFIG_K8_NUMA) += k8topology_64.o |
diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap.c index f4a2082568c8..f4a2082568c8 100644 --- a/arch/x86/mm/ioremap_32.c +++ b/arch/x86/mm/ioremap.c | |||
diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c deleted file mode 100644 index f4a2082568c8..000000000000 --- a/arch/x86/mm/ioremap_64.c +++ /dev/null | |||
@@ -1,463 +0,0 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * This is needed for high PCI addresses that aren't mapped in the | ||
4 | * 640k-1MB IO memory area on PC's | ||
5 | * | ||
6 | * (C) Copyright 1995 1996 Linus Torvalds | ||
7 | */ | ||
8 | |||
9 | #include <linux/bootmem.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/io.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | |||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/e820.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <asm/pgtable.h> | ||
20 | #include <asm/tlbflush.h> | ||
21 | |||
22 | #ifdef CONFIG_X86_64 | ||
23 | |||
24 | unsigned long __phys_addr(unsigned long x) | ||
25 | { | ||
26 | if (x >= __START_KERNEL_map) | ||
27 | return x - __START_KERNEL_map + phys_base; | ||
28 | return x - PAGE_OFFSET; | ||
29 | } | ||
30 | EXPORT_SYMBOL(__phys_addr); | ||
31 | |||
32 | #endif | ||
33 | |||
34 | /* | ||
35 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | ||
36 | * conflicts. | ||
37 | */ | ||
38 | static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, | ||
39 | pgprot_t prot) | ||
40 | { | ||
41 | unsigned long npages, vaddr, last_addr = phys_addr + size - 1; | ||
42 | int err, level; | ||
43 | |||
44 | /* No change for pages after the last mapping */ | ||
45 | if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) | ||
46 | return 0; | ||
47 | |||
48 | npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
49 | vaddr = (unsigned long) __va(phys_addr); | ||
50 | |||
51 | /* | ||
52 | * If there is no identity map for this address, | ||
53 | * change_page_attr_addr is unnecessary | ||
54 | */ | ||
55 | if (!lookup_address(vaddr, &level)) | ||
56 | return 0; | ||
57 | |||
58 | /* | ||
59 | * Must use an address here and not struct page because the | ||
60 | * phys addr can be a in hole between nodes and not have a | ||
61 | * memmap entry. | ||
62 | */ | ||
63 | err = change_page_attr_addr(vaddr, npages, prot); | ||
64 | |||
65 | if (!err) | ||
66 | global_flush_tlb(); | ||
67 | |||
68 | return err; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Remap an arbitrary physical address space into the kernel virtual | ||
73 | * address space. Needed when the kernel wants to access high addresses | ||
74 | * directly. | ||
75 | * | ||
76 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
77 | * have to convert them into an offset in a page-aligned mapping, but the | ||
78 | * caller shouldn't need to know that small detail. | ||
79 | */ | ||
80 | void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, | ||
81 | unsigned long flags) | ||
82 | { | ||
83 | void __iomem *addr; | ||
84 | struct vm_struct *area; | ||
85 | unsigned long offset, last_addr; | ||
86 | pgprot_t pgprot; | ||
87 | |||
88 | /* Don't allow wraparound or zero size */ | ||
89 | last_addr = phys_addr + size - 1; | ||
90 | if (!size || last_addr < phys_addr) | ||
91 | return NULL; | ||
92 | |||
93 | /* | ||
94 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
95 | */ | ||
96 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | ||
97 | return (__force void __iomem *)phys_to_virt(phys_addr); | ||
98 | |||
99 | #ifdef CONFIG_X86_32 | ||
100 | /* | ||
101 | * Don't allow anybody to remap normal RAM that we're using.. | ||
102 | */ | ||
103 | if (phys_addr <= virt_to_phys(high_memory - 1)) { | ||
104 | char *t_addr, *t_end; | ||
105 | struct page *page; | ||
106 | |||
107 | t_addr = __va(phys_addr); | ||
108 | t_end = t_addr + (size - 1); | ||
109 | |||
110 | for (page = virt_to_page(t_addr); | ||
111 | page <= virt_to_page(t_end); page++) | ||
112 | if (!PageReserved(page)) | ||
113 | return NULL; | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); | ||
118 | |||
119 | /* | ||
120 | * Mappings have to be page-aligned | ||
121 | */ | ||
122 | offset = phys_addr & ~PAGE_MASK; | ||
123 | phys_addr &= PAGE_MASK; | ||
124 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
125 | |||
126 | /* | ||
127 | * Ok, go for it.. | ||
128 | */ | ||
129 | area = get_vm_area(size, VM_IOREMAP); | ||
130 | if (!area) | ||
131 | return NULL; | ||
132 | area->phys_addr = phys_addr; | ||
133 | addr = (void __iomem *) area->addr; | ||
134 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | ||
135 | phys_addr, pgprot)) { | ||
136 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | ||
137 | return NULL; | ||
138 | } | ||
139 | |||
140 | if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { | ||
141 | vunmap(addr); | ||
142 | return NULL; | ||
143 | } | ||
144 | |||
145 | return (void __iomem *) (offset + (char __iomem *)addr); | ||
146 | } | ||
147 | EXPORT_SYMBOL(__ioremap); | ||
148 | |||
149 | /** | ||
150 | * ioremap_nocache - map bus memory into CPU space | ||
151 | * @offset: bus address of the memory | ||
152 | * @size: size of the resource to map | ||
153 | * | ||
154 | * ioremap_nocache performs a platform specific sequence of operations to | ||
155 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
156 | * writew/writel functions and the other mmio helpers. The returned | ||
157 | * address is not guaranteed to be usable directly as a virtual | ||
158 | * address. | ||
159 | * | ||
160 | * This version of ioremap ensures that the memory is marked uncachable | ||
161 | * on the CPU as well as honouring existing caching rules from things like | ||
162 | * the PCI bus. Note that there are other caches and buffers on many | ||
163 | * busses. In particular driver authors should read up on PCI writes | ||
164 | * | ||
165 | * It's useful if some control registers are in such an area and | ||
166 | * write combining or read caching is not desirable: | ||
167 | * | ||
168 | * Must be freed with iounmap. | ||
169 | */ | ||
170 | void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) | ||
171 | { | ||
172 | return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); | ||
173 | } | ||
174 | EXPORT_SYMBOL(ioremap_nocache); | ||
175 | |||
176 | /** | ||
177 | * iounmap - Free a IO remapping | ||
178 | * @addr: virtual address from ioremap_* | ||
179 | * | ||
180 | * Caller must ensure there is only one unmapping for the same pointer. | ||
181 | */ | ||
182 | void iounmap(volatile void __iomem *addr) | ||
183 | { | ||
184 | struct vm_struct *p, *o; | ||
185 | |||
186 | if ((void __force *)addr <= high_memory) | ||
187 | return; | ||
188 | |||
189 | /* | ||
190 | * __ioremap special-cases the PCI/ISA range by not instantiating a | ||
191 | * vm_area and by simply returning an address into the kernel mapping | ||
192 | * of ISA space. So handle that here. | ||
193 | */ | ||
194 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | ||
195 | addr < phys_to_virt(ISA_END_ADDRESS)) | ||
196 | return; | ||
197 | |||
198 | addr = (volatile void __iomem *) | ||
199 | (PAGE_MASK & (unsigned long __force)addr); | ||
200 | |||
201 | /* Use the vm area unlocked, assuming the caller | ||
202 | ensures there isn't another iounmap for the same address | ||
203 | in parallel. Reuse of the virtual address is prevented by | ||
204 | leaving it in the global lists until we're done with it. | ||
205 | cpa takes care of the direct mappings. */ | ||
206 | read_lock(&vmlist_lock); | ||
207 | for (p = vmlist; p; p = p->next) { | ||
208 | if (p->addr == addr) | ||
209 | break; | ||
210 | } | ||
211 | read_unlock(&vmlist_lock); | ||
212 | |||
213 | if (!p) { | ||
214 | printk(KERN_ERR "iounmap: bad address %p\n", addr); | ||
215 | dump_stack(); | ||
216 | return; | ||
217 | } | ||
218 | |||
219 | /* Reset the direct mapping. Can block */ | ||
220 | ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL); | ||
221 | |||
222 | /* Finally remove it */ | ||
223 | o = remove_vm_area((void *)addr); | ||
224 | BUG_ON(p != o || o == NULL); | ||
225 | kfree(p); | ||
226 | } | ||
227 | EXPORT_SYMBOL(iounmap); | ||
228 | |||
229 | #ifdef CONFIG_X86_32 | ||
230 | |||
231 | int __initdata early_ioremap_debug; | ||
232 | |||
233 | static int __init early_ioremap_debug_setup(char *str) | ||
234 | { | ||
235 | early_ioremap_debug = 1; | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | ||
240 | |||
241 | static __initdata int after_paging_init; | ||
242 | static __initdata unsigned long bm_pte[1024] | ||
243 | __attribute__((aligned(PAGE_SIZE))); | ||
244 | |||
245 | static inline unsigned long * __init early_ioremap_pgd(unsigned long addr) | ||
246 | { | ||
247 | return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023); | ||
248 | } | ||
249 | |||
250 | static inline unsigned long * __init early_ioremap_pte(unsigned long addr) | ||
251 | { | ||
252 | return bm_pte + ((addr >> PAGE_SHIFT) & 1023); | ||
253 | } | ||
254 | |||
255 | void __init early_ioremap_init(void) | ||
256 | { | ||
257 | unsigned long *pgd; | ||
258 | |||
259 | if (early_ioremap_debug) | ||
260 | printk(KERN_DEBUG "early_ioremap_init()\n"); | ||
261 | |||
262 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
263 | *pgd = __pa(bm_pte) | _PAGE_TABLE; | ||
264 | memset(bm_pte, 0, sizeof(bm_pte)); | ||
265 | /* | ||
266 | * The boot-ioremap range spans multiple pgds, for which | ||
267 | * we are not prepared: | ||
268 | */ | ||
269 | if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { | ||
270 | WARN_ON(1); | ||
271 | printk(KERN_WARNING "pgd %p != %p\n", | ||
272 | pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); | ||
273 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", | ||
274 | fix_to_virt(FIX_BTMAP_BEGIN)); | ||
275 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", | ||
276 | fix_to_virt(FIX_BTMAP_END)); | ||
277 | |||
278 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); | ||
279 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", | ||
280 | FIX_BTMAP_BEGIN); | ||
281 | } | ||
282 | } | ||
283 | |||
284 | void __init early_ioremap_clear(void) | ||
285 | { | ||
286 | unsigned long *pgd; | ||
287 | |||
288 | if (early_ioremap_debug) | ||
289 | printk(KERN_DEBUG "early_ioremap_clear()\n"); | ||
290 | |||
291 | pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); | ||
292 | *pgd = 0; | ||
293 | __flush_tlb_all(); | ||
294 | } | ||
295 | |||
296 | void __init early_ioremap_reset(void) | ||
297 | { | ||
298 | enum fixed_addresses idx; | ||
299 | unsigned long *pte, phys, addr; | ||
300 | |||
301 | after_paging_init = 1; | ||
302 | for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) { | ||
303 | addr = fix_to_virt(idx); | ||
304 | pte = early_ioremap_pte(addr); | ||
305 | if (!*pte & _PAGE_PRESENT) { | ||
306 | phys = *pte & PAGE_MASK; | ||
307 | set_fixmap(idx, phys); | ||
308 | } | ||
309 | } | ||
310 | } | ||
311 | |||
312 | static void __init __early_set_fixmap(enum fixed_addresses idx, | ||
313 | unsigned long phys, pgprot_t flags) | ||
314 | { | ||
315 | unsigned long *pte, addr = __fix_to_virt(idx); | ||
316 | |||
317 | if (idx >= __end_of_fixed_addresses) { | ||
318 | BUG(); | ||
319 | return; | ||
320 | } | ||
321 | pte = early_ioremap_pte(addr); | ||
322 | if (pgprot_val(flags)) | ||
323 | *pte = (phys & PAGE_MASK) | pgprot_val(flags); | ||
324 | else | ||
325 | *pte = 0; | ||
326 | __flush_tlb_one(addr); | ||
327 | } | ||
328 | |||
329 | static inline void __init early_set_fixmap(enum fixed_addresses idx, | ||
330 | unsigned long phys) | ||
331 | { | ||
332 | if (after_paging_init) | ||
333 | set_fixmap(idx, phys); | ||
334 | else | ||
335 | __early_set_fixmap(idx, phys, PAGE_KERNEL); | ||
336 | } | ||
337 | |||
338 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) | ||
339 | { | ||
340 | if (after_paging_init) | ||
341 | clear_fixmap(idx); | ||
342 | else | ||
343 | __early_set_fixmap(idx, 0, __pgprot(0)); | ||
344 | } | ||
345 | |||
346 | |||
347 | int __initdata early_ioremap_nested; | ||
348 | |||
349 | static int __init check_early_ioremap_leak(void) | ||
350 | { | ||
351 | if (!early_ioremap_nested) | ||
352 | return 0; | ||
353 | |||
354 | printk(KERN_WARNING | ||
355 | "Debug warning: early ioremap leak of %d areas detected.\n", | ||
356 | early_ioremap_nested); | ||
357 | printk(KERN_WARNING | ||
358 | "please boot with early_ioremap_debug and report the dmesg.\n"); | ||
359 | WARN_ON(1); | ||
360 | |||
361 | return 1; | ||
362 | } | ||
363 | late_initcall(check_early_ioremap_leak); | ||
364 | |||
365 | void __init *early_ioremap(unsigned long phys_addr, unsigned long size) | ||
366 | { | ||
367 | unsigned long offset, last_addr; | ||
368 | unsigned int nrpages, nesting; | ||
369 | enum fixed_addresses idx0, idx; | ||
370 | |||
371 | WARN_ON(system_state != SYSTEM_BOOTING); | ||
372 | |||
373 | nesting = early_ioremap_nested; | ||
374 | if (early_ioremap_debug) { | ||
375 | printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ", | ||
376 | phys_addr, size, nesting); | ||
377 | dump_stack(); | ||
378 | } | ||
379 | |||
380 | /* Don't allow wraparound or zero size */ | ||
381 | last_addr = phys_addr + size - 1; | ||
382 | if (!size || last_addr < phys_addr) { | ||
383 | WARN_ON(1); | ||
384 | return NULL; | ||
385 | } | ||
386 | |||
387 | if (nesting >= FIX_BTMAPS_NESTING) { | ||
388 | WARN_ON(1); | ||
389 | return NULL; | ||
390 | } | ||
391 | early_ioremap_nested++; | ||
392 | /* | ||
393 | * Mappings have to be page-aligned | ||
394 | */ | ||
395 | offset = phys_addr & ~PAGE_MASK; | ||
396 | phys_addr &= PAGE_MASK; | ||
397 | size = PAGE_ALIGN(last_addr) - phys_addr; | ||
398 | |||
399 | /* | ||
400 | * Mappings have to fit in the FIX_BTMAP area. | ||
401 | */ | ||
402 | nrpages = size >> PAGE_SHIFT; | ||
403 | if (nrpages > NR_FIX_BTMAPS) { | ||
404 | WARN_ON(1); | ||
405 | return NULL; | ||
406 | } | ||
407 | |||
408 | /* | ||
409 | * Ok, go for it.. | ||
410 | */ | ||
411 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | ||
412 | idx = idx0; | ||
413 | while (nrpages > 0) { | ||
414 | early_set_fixmap(idx, phys_addr); | ||
415 | phys_addr += PAGE_SIZE; | ||
416 | --idx; | ||
417 | --nrpages; | ||
418 | } | ||
419 | if (early_ioremap_debug) | ||
420 | printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); | ||
421 | |||
422 | return (void *) (offset + fix_to_virt(idx0)); | ||
423 | } | ||
424 | |||
425 | void __init early_iounmap(void *addr, unsigned long size) | ||
426 | { | ||
427 | unsigned long virt_addr; | ||
428 | unsigned long offset; | ||
429 | unsigned int nrpages; | ||
430 | enum fixed_addresses idx; | ||
431 | unsigned int nesting; | ||
432 | |||
433 | nesting = --early_ioremap_nested; | ||
434 | WARN_ON(nesting < 0); | ||
435 | |||
436 | if (early_ioremap_debug) { | ||
437 | printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr, | ||
438 | size, nesting); | ||
439 | dump_stack(); | ||
440 | } | ||
441 | |||
442 | virt_addr = (unsigned long)addr; | ||
443 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { | ||
444 | WARN_ON(1); | ||
445 | return; | ||
446 | } | ||
447 | offset = virt_addr & ~PAGE_MASK; | ||
448 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | ||
449 | |||
450 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting; | ||
451 | while (nrpages > 0) { | ||
452 | early_clear_fixmap(idx); | ||
453 | --idx; | ||
454 | --nrpages; | ||
455 | } | ||
456 | } | ||
457 | |||
458 | void __this_fixmap_does_not_exist(void) | ||
459 | { | ||
460 | WARN_ON(1); | ||
461 | } | ||
462 | |||
463 | #endif /* CONFIG_X86_32 */ | ||