diff options
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r-- | arch/x86/mm/ioremap.c | 26 |
1 files changed, 11 insertions, 15 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d1b867101e5f..115f13ee40c9 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -142,7 +142,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
142 | /* | 142 | /* |
143 | * Don't remap the low PCI/ISA area, it's always mapped.. | 143 | * Don't remap the low PCI/ISA area, it's always mapped.. |
144 | */ | 144 | */ |
145 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | 145 | if (is_ISA_range(phys_addr, last_addr)) |
146 | return (__force void __iomem *)phys_to_virt(phys_addr); | 146 | return (__force void __iomem *)phys_to_virt(phys_addr); |
147 | 147 | ||
148 | /* | 148 | /* |
@@ -261,7 +261,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | |||
261 | { | 261 | { |
262 | /* | 262 | /* |
263 | * Ideally, this should be: | 263 | * Ideally, this should be: |
264 | * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; | 264 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
265 | * | 265 | * |
266 | * Till we fix all X drivers to use ioremap_wc(), we will use | 266 | * Till we fix all X drivers to use ioremap_wc(), we will use |
267 | * UC MINUS. | 267 | * UC MINUS. |
@@ -285,7 +285,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
285 | */ | 285 | */ |
286 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | 286 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) |
287 | { | 287 | { |
288 | if (pat_wc_enabled) | 288 | if (pat_enabled) |
289 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, | 289 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
290 | __builtin_return_address(0)); | 290 | __builtin_return_address(0)); |
291 | else | 291 | else |
@@ -341,8 +341,8 @@ void iounmap(volatile void __iomem *addr) | |||
341 | * vm_area and by simply returning an address into the kernel mapping | 341 | * vm_area and by simply returning an address into the kernel mapping |
342 | * of ISA space. So handle that here. | 342 | * of ISA space. So handle that here. |
343 | */ | 343 | */ |
344 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | 344 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
345 | addr < phys_to_virt(ISA_END_ADDRESS)) | 345 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) |
346 | return; | 346 | return; |
347 | 347 | ||
348 | addr = (volatile void __iomem *) | 348 | addr = (volatile void __iomem *) |
@@ -355,7 +355,7 @@ void iounmap(volatile void __iomem *addr) | |||
355 | cpa takes care of the direct mappings. */ | 355 | cpa takes care of the direct mappings. */ |
356 | read_lock(&vmlist_lock); | 356 | read_lock(&vmlist_lock); |
357 | for (p = vmlist; p; p = p->next) { | 357 | for (p = vmlist; p; p = p->next) { |
358 | if (p->addr == addr) | 358 | if (p->addr == (void __force *)addr) |
359 | break; | 359 | break; |
360 | } | 360 | } |
361 | read_unlock(&vmlist_lock); | 361 | read_unlock(&vmlist_lock); |
@@ -369,7 +369,7 @@ void iounmap(volatile void __iomem *addr) | |||
369 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | 369 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
370 | 370 | ||
371 | /* Finally remove it */ | 371 | /* Finally remove it */ |
372 | o = remove_vm_area((void *)addr); | 372 | o = remove_vm_area((void __force *)addr); |
373 | BUG_ON(p != o || o == NULL); | 373 | BUG_ON(p != o || o == NULL); |
374 | kfree(p); | 374 | kfree(p); |
375 | } | 375 | } |
@@ -388,7 +388,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) | |||
388 | if (page_is_ram(start >> PAGE_SHIFT)) | 388 | if (page_is_ram(start >> PAGE_SHIFT)) |
389 | return __va(phys); | 389 | return __va(phys); |
390 | 390 | ||
391 | addr = (void *)ioremap_default(start, PAGE_SIZE); | 391 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); |
392 | if (addr) | 392 | if (addr) |
393 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 393 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
394 | 394 | ||
@@ -404,8 +404,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
404 | return; | 404 | return; |
405 | } | 405 | } |
406 | 406 | ||
407 | #ifdef CONFIG_X86_32 | ||
408 | |||
409 | int __initdata early_ioremap_debug; | 407 | int __initdata early_ioremap_debug; |
410 | 408 | ||
411 | static int __init early_ioremap_debug_setup(char *str) | 409 | static int __init early_ioremap_debug_setup(char *str) |
@@ -417,8 +415,7 @@ static int __init early_ioremap_debug_setup(char *str) | |||
417 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | 415 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
418 | 416 | ||
419 | static __initdata int after_paging_init; | 417 | static __initdata int after_paging_init; |
420 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] | 418 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
421 | __section(.bss.page_aligned); | ||
422 | 419 | ||
423 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 420 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
424 | { | 421 | { |
@@ -507,10 +504,11 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, | |||
507 | return; | 504 | return; |
508 | } | 505 | } |
509 | pte = early_ioremap_pte(addr); | 506 | pte = early_ioremap_pte(addr); |
507 | |||
510 | if (pgprot_val(flags)) | 508 | if (pgprot_val(flags)) |
511 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | 509 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
512 | else | 510 | else |
513 | pte_clear(NULL, addr, pte); | 511 | pte_clear(&init_mm, addr, pte); |
514 | __flush_tlb_one(addr); | 512 | __flush_tlb_one(addr); |
515 | } | 513 | } |
516 | 514 | ||
@@ -648,5 +646,3 @@ void __this_fixmap_does_not_exist(void) | |||
648 | { | 646 | { |
649 | WARN_ON(1); | 647 | WARN_ON(1); |
650 | } | 648 | } |
651 | |||
652 | #endif /* CONFIG_X86_32 */ | ||