diff options
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r-- | arch/x86/mm/ioremap.c | 81 |
1 files changed, 19 insertions, 62 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 2feb9bdedaaf..12e4d2d3c110 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -24,43 +24,6 @@ | |||
24 | 24 | ||
25 | #include "physaddr.h" | 25 | #include "physaddr.h" |
26 | 26 | ||
27 | int page_is_ram(unsigned long pagenr) | ||
28 | { | ||
29 | resource_size_t addr, end; | ||
30 | int i; | ||
31 | |||
32 | /* | ||
33 | * A special case is the first 4Kb of memory; | ||
34 | * This is a BIOS owned area, not kernel ram, but generally | ||
35 | * not listed as such in the E820 table. | ||
36 | */ | ||
37 | if (pagenr == 0) | ||
38 | return 0; | ||
39 | |||
40 | /* | ||
41 | * Second special case: Some BIOSen report the PC BIOS | ||
42 | * area (640->1Mb) as ram even though it is not. | ||
43 | */ | ||
44 | if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) && | ||
45 | pagenr < (BIOS_END >> PAGE_SHIFT)) | ||
46 | return 0; | ||
47 | |||
48 | for (i = 0; i < e820.nr_map; i++) { | ||
49 | /* | ||
50 | * Not usable memory: | ||
51 | */ | ||
52 | if (e820.map[i].type != E820_RAM) | ||
53 | continue; | ||
54 | addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
55 | end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT; | ||
56 | |||
57 | |||
58 | if ((pagenr >= addr) && (pagenr < end)) | ||
59 | return 1; | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* | 27 | /* |
65 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | 28 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
66 | * conflicts. | 29 | * conflicts. |
@@ -281,30 +244,6 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | |||
281 | } | 244 | } |
282 | EXPORT_SYMBOL(ioremap_cache); | 245 | EXPORT_SYMBOL(ioremap_cache); |
283 | 246 | ||
284 | static void __iomem *ioremap_default(resource_size_t phys_addr, | ||
285 | unsigned long size) | ||
286 | { | ||
287 | unsigned long flags; | ||
288 | void __iomem *ret; | ||
289 | int err; | ||
290 | |||
291 | /* | ||
292 | * - WB for WB-able memory and no other conflicting mappings | ||
293 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | ||
294 | * - Inherit from confliting mappings otherwise | ||
295 | */ | ||
296 | err = reserve_memtype(phys_addr, phys_addr + size, | ||
297 | _PAGE_CACHE_WB, &flags); | ||
298 | if (err < 0) | ||
299 | return NULL; | ||
300 | |||
301 | ret = __ioremap_caller(phys_addr, size, flags, | ||
302 | __builtin_return_address(0)); | ||
303 | |||
304 | free_memtype(phys_addr, phys_addr + size); | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, | 247 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
309 | unsigned long prot_val) | 248 | unsigned long prot_val) |
310 | { | 249 | { |
@@ -380,7 +319,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) | |||
380 | if (page_is_ram(start >> PAGE_SHIFT)) | 319 | if (page_is_ram(start >> PAGE_SHIFT)) |
381 | return __va(phys); | 320 | return __va(phys); |
382 | 321 | ||
383 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); | 322 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); |
384 | if (addr) | 323 | if (addr) |
385 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 324 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
386 | 325 | ||
@@ -446,6 +385,10 @@ void __init early_ioremap_init(void) | |||
446 | * The boot-ioremap range spans multiple pmds, for which | 385 | * The boot-ioremap range spans multiple pmds, for which |
447 | * we are not prepared: | 386 | * we are not prepared: |
448 | */ | 387 | */ |
388 | #define __FIXADDR_TOP (-PAGE_SIZE) | ||
389 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) | ||
390 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); | ||
391 | #undef __FIXADDR_TOP | ||
449 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { | 392 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
450 | WARN_ON(1); | 393 | WARN_ON(1); |
451 | printk(KERN_WARNING "pmd %p != %p\n", | 394 | printk(KERN_WARNING "pmd %p != %p\n", |
@@ -505,6 +448,20 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) | |||
505 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; | 448 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
506 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; | 449 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
507 | 450 | ||
451 | void __init fixup_early_ioremap(void) | ||
452 | { | ||
453 | int i; | ||
454 | |||
455 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { | ||
456 | if (prev_map[i]) { | ||
457 | WARN_ON(1); | ||
458 | break; | ||
459 | } | ||
460 | } | ||
461 | |||
462 | early_ioremap_init(); | ||
463 | } | ||
464 | |||
508 | static int __init check_early_ioremap_leak(void) | 465 | static int __init check_early_ioremap_leak(void) |
509 | { | 466 | { |
510 | int count = 0; | 467 | int count = 0; |