diff options
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r-- | arch/x86/mm/ioremap.c | 37 |
1 files changed, 21 insertions, 16 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index d1b867101e5f..24c1d3c30186 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/mmiotrace.h> | ||
15 | 16 | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/e820.h> | 18 | #include <asm/e820.h> |
@@ -122,10 +123,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
122 | { | 123 | { |
123 | unsigned long pfn, offset, vaddr; | 124 | unsigned long pfn, offset, vaddr; |
124 | resource_size_t last_addr; | 125 | resource_size_t last_addr; |
126 | const resource_size_t unaligned_phys_addr = phys_addr; | ||
127 | const unsigned long unaligned_size = size; | ||
125 | struct vm_struct *area; | 128 | struct vm_struct *area; |
126 | unsigned long new_prot_val; | 129 | unsigned long new_prot_val; |
127 | pgprot_t prot; | 130 | pgprot_t prot; |
128 | int retval; | 131 | int retval; |
132 | void __iomem *ret_addr; | ||
129 | 133 | ||
130 | /* Don't allow wraparound or zero size */ | 134 | /* Don't allow wraparound or zero size */ |
131 | last_addr = phys_addr + size - 1; | 135 | last_addr = phys_addr + size - 1; |
@@ -142,7 +146,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
142 | /* | 146 | /* |
143 | * Don't remap the low PCI/ISA area, it's always mapped.. | 147 | * Don't remap the low PCI/ISA area, it's always mapped.. |
144 | */ | 148 | */ |
145 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | 149 | if (is_ISA_range(phys_addr, last_addr)) |
146 | return (__force void __iomem *)phys_to_virt(phys_addr); | 150 | return (__force void __iomem *)phys_to_virt(phys_addr); |
147 | 151 | ||
148 | /* | 152 | /* |
@@ -233,7 +237,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
233 | return NULL; | 237 | return NULL; |
234 | } | 238 | } |
235 | 239 | ||
236 | return (void __iomem *) (vaddr + offset); | 240 | ret_addr = (void __iomem *) (vaddr + offset); |
241 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); | ||
242 | |||
243 | return ret_addr; | ||
237 | } | 244 | } |
238 | 245 | ||
239 | /** | 246 | /** |
@@ -261,7 +268,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | |||
261 | { | 268 | { |
262 | /* | 269 | /* |
263 | * Ideally, this should be: | 270 | * Ideally, this should be: |
264 | * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; | 271 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
265 | * | 272 | * |
266 | * Till we fix all X drivers to use ioremap_wc(), we will use | 273 | * Till we fix all X drivers to use ioremap_wc(), we will use |
267 | * UC MINUS. | 274 | * UC MINUS. |
@@ -285,7 +292,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
285 | */ | 292 | */ |
286 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | 293 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) |
287 | { | 294 | { |
288 | if (pat_wc_enabled) | 295 | if (pat_enabled) |
289 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, | 296 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
290 | __builtin_return_address(0)); | 297 | __builtin_return_address(0)); |
291 | else | 298 | else |
@@ -341,13 +348,15 @@ void iounmap(volatile void __iomem *addr) | |||
341 | * vm_area and by simply returning an address into the kernel mapping | 348 | * vm_area and by simply returning an address into the kernel mapping |
342 | * of ISA space. So handle that here. | 349 | * of ISA space. So handle that here. |
343 | */ | 350 | */ |
344 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | 351 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
345 | addr < phys_to_virt(ISA_END_ADDRESS)) | 352 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) |
346 | return; | 353 | return; |
347 | 354 | ||
348 | addr = (volatile void __iomem *) | 355 | addr = (volatile void __iomem *) |
349 | (PAGE_MASK & (unsigned long __force)addr); | 356 | (PAGE_MASK & (unsigned long __force)addr); |
350 | 357 | ||
358 | mmiotrace_iounmap(addr); | ||
359 | |||
351 | /* Use the vm area unlocked, assuming the caller | 360 | /* Use the vm area unlocked, assuming the caller |
352 | ensures there isn't another iounmap for the same address | 361 | ensures there isn't another iounmap for the same address |
353 | in parallel. Reuse of the virtual address is prevented by | 362 | in parallel. Reuse of the virtual address is prevented by |
@@ -355,7 +364,7 @@ void iounmap(volatile void __iomem *addr) | |||
355 | cpa takes care of the direct mappings. */ | 364 | cpa takes care of the direct mappings. */ |
356 | read_lock(&vmlist_lock); | 365 | read_lock(&vmlist_lock); |
357 | for (p = vmlist; p; p = p->next) { | 366 | for (p = vmlist; p; p = p->next) { |
358 | if (p->addr == addr) | 367 | if (p->addr == (void __force *)addr) |
359 | break; | 368 | break; |
360 | } | 369 | } |
361 | read_unlock(&vmlist_lock); | 370 | read_unlock(&vmlist_lock); |
@@ -369,7 +378,7 @@ void iounmap(volatile void __iomem *addr) | |||
369 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | 378 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
370 | 379 | ||
371 | /* Finally remove it */ | 380 | /* Finally remove it */ |
372 | o = remove_vm_area((void *)addr); | 381 | o = remove_vm_area((void __force *)addr); |
373 | BUG_ON(p != o || o == NULL); | 382 | BUG_ON(p != o || o == NULL); |
374 | kfree(p); | 383 | kfree(p); |
375 | } | 384 | } |
@@ -388,7 +397,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) | |||
388 | if (page_is_ram(start >> PAGE_SHIFT)) | 397 | if (page_is_ram(start >> PAGE_SHIFT)) |
389 | return __va(phys); | 398 | return __va(phys); |
390 | 399 | ||
391 | addr = (void *)ioremap_default(start, PAGE_SIZE); | 400 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); |
392 | if (addr) | 401 | if (addr) |
393 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 402 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
394 | 403 | ||
@@ -404,8 +413,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
404 | return; | 413 | return; |
405 | } | 414 | } |
406 | 415 | ||
407 | #ifdef CONFIG_X86_32 | ||
408 | |||
409 | int __initdata early_ioremap_debug; | 416 | int __initdata early_ioremap_debug; |
410 | 417 | ||
411 | static int __init early_ioremap_debug_setup(char *str) | 418 | static int __init early_ioremap_debug_setup(char *str) |
@@ -417,8 +424,7 @@ static int __init early_ioremap_debug_setup(char *str) | |||
417 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | 424 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
418 | 425 | ||
419 | static __initdata int after_paging_init; | 426 | static __initdata int after_paging_init; |
420 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] | 427 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
421 | __section(.bss.page_aligned); | ||
422 | 428 | ||
423 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 429 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
424 | { | 430 | { |
@@ -507,10 +513,11 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, | |||
507 | return; | 513 | return; |
508 | } | 514 | } |
509 | pte = early_ioremap_pte(addr); | 515 | pte = early_ioremap_pte(addr); |
516 | |||
510 | if (pgprot_val(flags)) | 517 | if (pgprot_val(flags)) |
511 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | 518 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
512 | else | 519 | else |
513 | pte_clear(NULL, addr, pte); | 520 | pte_clear(&init_mm, addr, pte); |
514 | __flush_tlb_one(addr); | 521 | __flush_tlb_one(addr); |
515 | } | 522 | } |
516 | 523 | ||
@@ -648,5 +655,3 @@ void __this_fixmap_does_not_exist(void) | |||
648 | { | 655 | { |
649 | WARN_ON(1); | 656 | WARN_ON(1); |
650 | } | 657 | } |
651 | |||
652 | #endif /* CONFIG_X86_32 */ | ||