diff options
Diffstat (limited to 'arch/x86/mm/ioremap.c')
-rw-r--r-- | arch/x86/mm/ioremap.c | 60 |
1 files changed, 44 insertions, 16 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 2b2bb3f9b683..24c1d3c30186 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/mmiotrace.h> | ||
15 | 16 | ||
16 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
17 | #include <asm/e820.h> | 18 | #include <asm/e820.h> |
@@ -122,10 +123,13 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
122 | { | 123 | { |
123 | unsigned long pfn, offset, vaddr; | 124 | unsigned long pfn, offset, vaddr; |
124 | resource_size_t last_addr; | 125 | resource_size_t last_addr; |
126 | const resource_size_t unaligned_phys_addr = phys_addr; | ||
127 | const unsigned long unaligned_size = size; | ||
125 | struct vm_struct *area; | 128 | struct vm_struct *area; |
126 | unsigned long new_prot_val; | 129 | unsigned long new_prot_val; |
127 | pgprot_t prot; | 130 | pgprot_t prot; |
128 | int retval; | 131 | int retval; |
132 | void __iomem *ret_addr; | ||
129 | 133 | ||
130 | /* Don't allow wraparound or zero size */ | 134 | /* Don't allow wraparound or zero size */ |
131 | last_addr = phys_addr + size - 1; | 135 | last_addr = phys_addr + size - 1; |
@@ -142,7 +146,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
142 | /* | 146 | /* |
143 | * Don't remap the low PCI/ISA area, it's always mapped.. | 147 | * Don't remap the low PCI/ISA area, it's always mapped.. |
144 | */ | 148 | */ |
145 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | 149 | if (is_ISA_range(phys_addr, last_addr)) |
146 | return (__force void __iomem *)phys_to_virt(phys_addr); | 150 | return (__force void __iomem *)phys_to_virt(phys_addr); |
147 | 151 | ||
148 | /* | 152 | /* |
@@ -233,7 +237,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr, | |||
233 | return NULL; | 237 | return NULL; |
234 | } | 238 | } |
235 | 239 | ||
236 | return (void __iomem *) (vaddr + offset); | 240 | ret_addr = (void __iomem *) (vaddr + offset); |
241 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); | ||
242 | |||
243 | return ret_addr; | ||
237 | } | 244 | } |
238 | 245 | ||
239 | /** | 246 | /** |
@@ -261,7 +268,7 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) | |||
261 | { | 268 | { |
262 | /* | 269 | /* |
263 | * Ideally, this should be: | 270 | * Ideally, this should be: |
264 | * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; | 271 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
265 | * | 272 | * |
266 | * Till we fix all X drivers to use ioremap_wc(), we will use | 273 | * Till we fix all X drivers to use ioremap_wc(), we will use |
267 | * UC MINUS. | 274 | * UC MINUS. |
@@ -285,7 +292,7 @@ EXPORT_SYMBOL(ioremap_nocache); | |||
285 | */ | 292 | */ |
286 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) | 293 | void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size) |
287 | { | 294 | { |
288 | if (pat_wc_enabled) | 295 | if (pat_enabled) |
289 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, | 296 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
290 | __builtin_return_address(0)); | 297 | __builtin_return_address(0)); |
291 | else | 298 | else |
@@ -300,6 +307,29 @@ void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) | |||
300 | } | 307 | } |
301 | EXPORT_SYMBOL(ioremap_cache); | 308 | EXPORT_SYMBOL(ioremap_cache); |
302 | 309 | ||
310 | static void __iomem *ioremap_default(resource_size_t phys_addr, | ||
311 | unsigned long size) | ||
312 | { | ||
313 | unsigned long flags; | ||
314 | void *ret; | ||
315 | int err; | ||
316 | |||
317 | /* | ||
318 | * - WB for WB-able memory and no other conflicting mappings | ||
319 | * - UC_MINUS for non-WB-able memory with no other conflicting mappings | ||
320 | * - Inherit from confliting mappings otherwise | ||
321 | */ | ||
322 | err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags); | ||
323 | if (err < 0) | ||
324 | return NULL; | ||
325 | |||
326 | ret = (void *) __ioremap_caller(phys_addr, size, flags, | ||
327 | __builtin_return_address(0)); | ||
328 | |||
329 | free_memtype(phys_addr, phys_addr + size); | ||
330 | return (void __iomem *)ret; | ||
331 | } | ||
332 | |||
303 | /** | 333 | /** |
304 | * iounmap - Free a IO remapping | 334 | * iounmap - Free a IO remapping |
305 | * @addr: virtual address from ioremap_* | 335 | * @addr: virtual address from ioremap_* |
@@ -318,13 +348,15 @@ void iounmap(volatile void __iomem *addr) | |||
318 | * vm_area and by simply returning an address into the kernel mapping | 348 | * vm_area and by simply returning an address into the kernel mapping |
319 | * of ISA space. So handle that here. | 349 | * of ISA space. So handle that here. |
320 | */ | 350 | */ |
321 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | 351 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
322 | addr < phys_to_virt(ISA_END_ADDRESS)) | 352 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) |
323 | return; | 353 | return; |
324 | 354 | ||
325 | addr = (volatile void __iomem *) | 355 | addr = (volatile void __iomem *) |
326 | (PAGE_MASK & (unsigned long __force)addr); | 356 | (PAGE_MASK & (unsigned long __force)addr); |
327 | 357 | ||
358 | mmiotrace_iounmap(addr); | ||
359 | |||
328 | /* Use the vm area unlocked, assuming the caller | 360 | /* Use the vm area unlocked, assuming the caller |
329 | ensures there isn't another iounmap for the same address | 361 | ensures there isn't another iounmap for the same address |
330 | in parallel. Reuse of the virtual address is prevented by | 362 | in parallel. Reuse of the virtual address is prevented by |
@@ -332,7 +364,7 @@ void iounmap(volatile void __iomem *addr) | |||
332 | cpa takes care of the direct mappings. */ | 364 | cpa takes care of the direct mappings. */ |
333 | read_lock(&vmlist_lock); | 365 | read_lock(&vmlist_lock); |
334 | for (p = vmlist; p; p = p->next) { | 366 | for (p = vmlist; p; p = p->next) { |
335 | if (p->addr == addr) | 367 | if (p->addr == (void __force *)addr) |
336 | break; | 368 | break; |
337 | } | 369 | } |
338 | read_unlock(&vmlist_lock); | 370 | read_unlock(&vmlist_lock); |
@@ -346,7 +378,7 @@ void iounmap(volatile void __iomem *addr) | |||
346 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); | 378 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
347 | 379 | ||
348 | /* Finally remove it */ | 380 | /* Finally remove it */ |
349 | o = remove_vm_area((void *)addr); | 381 | o = remove_vm_area((void __force *)addr); |
350 | BUG_ON(p != o || o == NULL); | 382 | BUG_ON(p != o || o == NULL); |
351 | kfree(p); | 383 | kfree(p); |
352 | } | 384 | } |
@@ -365,7 +397,7 @@ void *xlate_dev_mem_ptr(unsigned long phys) | |||
365 | if (page_is_ram(start >> PAGE_SHIFT)) | 397 | if (page_is_ram(start >> PAGE_SHIFT)) |
366 | return __va(phys); | 398 | return __va(phys); |
367 | 399 | ||
368 | addr = (void *)ioremap(start, PAGE_SIZE); | 400 | addr = (void __force *)ioremap_default(start, PAGE_SIZE); |
369 | if (addr) | 401 | if (addr) |
370 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); | 402 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
371 | 403 | ||
@@ -381,8 +413,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr) | |||
381 | return; | 413 | return; |
382 | } | 414 | } |
383 | 415 | ||
384 | #ifdef CONFIG_X86_32 | ||
385 | |||
386 | int __initdata early_ioremap_debug; | 416 | int __initdata early_ioremap_debug; |
387 | 417 | ||
388 | static int __init early_ioremap_debug_setup(char *str) | 418 | static int __init early_ioremap_debug_setup(char *str) |
@@ -394,8 +424,7 @@ static int __init early_ioremap_debug_setup(char *str) | |||
394 | early_param("early_ioremap_debug", early_ioremap_debug_setup); | 424 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
395 | 425 | ||
396 | static __initdata int after_paging_init; | 426 | static __initdata int after_paging_init; |
397 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] | 427 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
398 | __section(.bss.page_aligned); | ||
399 | 428 | ||
400 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) | 429 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
401 | { | 430 | { |
@@ -484,10 +513,11 @@ static void __init __early_set_fixmap(enum fixed_addresses idx, | |||
484 | return; | 513 | return; |
485 | } | 514 | } |
486 | pte = early_ioremap_pte(addr); | 515 | pte = early_ioremap_pte(addr); |
516 | |||
487 | if (pgprot_val(flags)) | 517 | if (pgprot_val(flags)) |
488 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); | 518 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
489 | else | 519 | else |
490 | pte_clear(NULL, addr, pte); | 520 | pte_clear(&init_mm, addr, pte); |
491 | __flush_tlb_one(addr); | 521 | __flush_tlb_one(addr); |
492 | } | 522 | } |
493 | 523 | ||
@@ -625,5 +655,3 @@ void __this_fixmap_does_not_exist(void) | |||
625 | { | 655 | { |
626 | WARN_ON(1); | 656 | WARN_ON(1); |
627 | } | 657 | } |
628 | |||
629 | #endif /* CONFIG_X86_32 */ | ||