diff options
author | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-02-22 09:02:08 -0500 |
---|---|---|
committer | Ard Biesheuvel <ard.biesheuvel@linaro.org> | 2016-04-04 04:26:42 -0400 |
commit | 9ab9e4fce45379cb6a7dbf87cf8f8e6ba01853c2 (patch) | |
tree | 905b07720628e6a24a1f719f0af05543712ef66e /arch/arm/mm/ioremap.c | |
parent | c269cba35b061181bc23c470809c00e8f71e535a (diff) |
ARM: memremap: implement arch_memremap_wb()
The generic memremap() falls back to using ioremap_cache() to create
MEMREMAP_WB mappings if the requested region is not already covered
by the linear mapping, unless the architecture provides an implementation
of arch_memremap_wb().
Since ioremap_cache() is not appropriate on ARM to map memory with the
same attributes used for the linear mapping, implement arch_memremap_wb()
which does exactly that. Also, relax the WARN() check to allow MT_MEMORY_RW
mappings of pfn_valid() pages.
Cc: Russell King <rmk+kernel@arm.linux.org.uk>
Acked-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Diffstat (limited to 'arch/arm/mm/ioremap.c')
-rw-r--r-- | arch/arm/mm/ioremap.c | 12 |
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index d5350f6af089..ff0eed23ddf1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -297,9 +297,10 @@ static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, | |||
297 | } | 297 | } |
298 | 298 | ||
299 | /* | 299 | /* |
300 | * Don't allow RAM to be mapped - this causes problems with ARMv6+ | 300 | * Don't allow RAM to be mapped with mismatched attributes - this |
301 | * causes problems with ARMv6+ | ||
301 | */ | 302 | */ |
302 | if (WARN_ON(pfn_valid(pfn))) | 303 | if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW)) |
303 | return NULL; | 304 | return NULL; |
304 | 305 | ||
305 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 306 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
@@ -418,6 +419,13 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached) | |||
418 | __builtin_return_address(0)); | 419 | __builtin_return_address(0)); |
419 | } | 420 | } |
420 | 421 | ||
422 | void *arch_memremap_wb(phys_addr_t phys_addr, size_t size) | ||
423 | { | ||
424 | return (__force void *)arch_ioremap_caller(phys_addr, size, | ||
425 | MT_MEMORY_RW, | ||
426 | __builtin_return_address(0)); | ||
427 | } | ||
428 | |||
421 | void __iounmap(volatile void __iomem *io_addr) | 429 | void __iounmap(volatile void __iomem *io_addr) |
422 | { | 430 | { |
423 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); | 431 | void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); |