From 0b59e38ffaf7b201ff6afe5b736365d16848c7e3 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:21:32 +0900 Subject: sh: Merge _32/_64 ioremap implementations. There is nothing of interest in the _64 version anymore, so the _32 one can be renamed and used unconditionally. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 171 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 171 insertions(+) create mode 100644 arch/sh/mm/ioremap.c (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c new file mode 100644 index 000000000000..24f6ba6bff71 --- /dev/null +++ b/arch/sh/mm/ioremap.c @@ -0,0 +1,171 @@ +/* + * arch/sh/mm/ioremap.c + * + * (C) Copyright 1995 1996 Linus Torvalds + * (C) Copyright 2005 - 2010 Paul Mundt + * + * Re-map IO memory to kernel address space so that we can access it. + * This is needed for high PCI addresses that aren't mapped in the + * 640k-1MB IO memory area on PC's + * + * This file is subject to the terms and conditions of the GNU General + * Public License. See the file "COPYING" in the main directory of this + * archive for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) +{ + struct vm_struct *area; + unsigned long offset, last_addr, addr, orig_addr; + pgprot_t pgprot; + + /* Don't allow wraparound or zero size */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr) + return NULL; + + /* + * If we're in the fixed PCI memory range, mapping through page + * tables is not only pointless, but also fundamentally broken. + * Just return the physical address instead. + * + * For boards that map a small PCI memory aperture somewhere in + * P1/P2 space, ioremap() will already do the right thing, + * and we'll never get this far. + */ + if (is_pci_memory_fixed_range(phys_addr, size)) + return (void __iomem *)phys_addr; + + /* + * Mappings have to be page-aligned + */ + offset = phys_addr & ~PAGE_MASK; + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(last_addr+1) - phys_addr; + + /* + * If we can't yet use the regular approach, go the fixmap route. + */ + if (!mem_init_done) + return ioremap_fixed(phys_addr, size, __pgprot(flags)); + + /* + * Ok, go for it.. + */ + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + area->phys_addr = phys_addr; + orig_addr = addr = (unsigned long)area->addr; + +#ifdef CONFIG_PMB + /* + * First try to remap through the PMB once a valid VMA has been + * established. Smaller allocations (or the rest of the size + * remaining after a PMB mapping due to the size not being + * perfectly aligned on a PMB size boundary) are then mapped + * through the UTLB using conventional page tables. + * + * PMB entries are all pre-faulted. + */ + if (unlikely(phys_addr >= P1SEG)) { + unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + + if (likely(mapped)) { + addr += mapped; + phys_addr += mapped; + size -= mapped; + } + } +#endif + + pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); + if (likely(size)) + if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { + vunmap((void *)orig_addr); + return NULL; + } + + return (void __iomem *)(offset + (char *)orig_addr); +} +EXPORT_SYMBOL(__ioremap_caller); + +/* + * Simple checks for non-translatable mappings. + */ +static inline int iomapping_nontranslatable(unsigned long offset) +{ +#ifdef CONFIG_29BIT + /* + * In 29-bit mode this includes the fixed P1/P2 areas, as well as + * parts of P3. + */ + if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) + return 1; +#endif + + if (is_pci_memory_fixed_range(offset, 0)) + return 1; + + return 0; +} + +void __iounmap(void __iomem *addr) +{ + unsigned long vaddr = (unsigned long __force)addr; + struct vm_struct *p; + + /* + * Nothing to do if there is no translatable mapping. + */ + if (iomapping_nontranslatable(vaddr)) + return; + +#ifdef CONFIG_PMB + /* + * Purge any PMB entries that may have been established for this + * mapping, then proceed with conventional VMA teardown. + * + * XXX: Note that due to the way that remove_vm_area() does + * matching of the resultant VMA, we aren't able to fast-forward + * the address past the PMB space until the end of the VMA where + * the page tables reside. As such, unmap_vm_area() will be + * forced to linearly scan over the area until it finds the page + * tables where PTEs that need to be unmapped actually reside, + * which is far from optimal. Perhaps we need to use a separate + * VMA for the PMB mappings? + * -- PFM. + */ + pmb_unmap(vaddr); +#endif + + p = remove_vm_area((void *)(vaddr & PAGE_MASK)); + if (!p) { + printk(KERN_ERR "%s: bad address %p\n", __func__, addr); + return; + } + + kfree(p); +} +EXPORT_SYMBOL(__iounmap); -- cgit v1.2.2 From 12b6b01cb47dc3eefbef866592193661dad7afb9 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:33:08 +0900 Subject: sh: Handle unmapping of fixed slots transparently in iounmap(). iounmap() should balance whatever is done by ioremap(). Presently ioremap() can do any of fixed mappings, PMB mappings, or page table mappings. Presently only the latter two are handled through the standard unmap path, so tie in the fixed unmapping, too. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 24f6ba6bff71..e8b65f645aed 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -142,6 +142,12 @@ void __iounmap(void __iomem *addr) if (iomapping_nontranslatable(vaddr)) return; + /* + * There's no VMA if it's from an early fixed mapping. + */ + if (iounmap_fixed(addr) == 0) + return; + #ifdef CONFIG_PMB /* * Purge any PMB entries that may have been established for this -- cgit v1.2.2 From af1415314a4190b8ea06e53808d392fcf91555af Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Mon, 18 Jan 2010 21:45:00 +0900 Subject: sh: Flag __ioremap_caller() __init_refok. The mem_init_done test makes sure that this path is only entered in __init cases, so leaving ioremap_fixed() as __init and flagging the caller __init_refok is sufficient. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index e8b65f645aed..a130b2278e92 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -33,8 +33,9 @@ * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ -void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) +void __iomem * __init_refok +__ioremap_caller(unsigned long phys_addr, unsigned long size, + unsigned long flags, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; -- cgit v1.2.2 From d57d64080ddc0ff13fcffc898b6251074a482ba1 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 19 Jan 2010 13:34:38 +0900 Subject: sh: Prevent 64-bit pgprot clobbering across ioremap implementations. Presently 'flags' gets passed around a lot between the various ioremap helpers and implementations, which is only 32-bits. In the X2TLB case we use 64-bit pgprots which presently results in the upper 32bits being chopped off (which handily include our read/write/exec permissions). As such, we convert everything internally to using pgprot_t directly and simply convert over with pgprot_val() where needed. With this in place, transparent fixmap utilization for early ioremap works as expected. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index a130b2278e92..85b420d00622 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -35,11 +35,10 @@ */ void __iomem * __init_refok __ioremap_caller(unsigned long phys_addr, unsigned long size, - unsigned long flags, void *caller) + pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; - pgprot_t pgprot; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; @@ -69,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) - return ioremap_fixed(phys_addr, size, __pgprot(flags)); + return ioremap_fixed(phys_addr, size, pgprot); /* * Ok, go for it.. @@ -91,8 +90,9 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * PMB entries are all pre-faulted. */ if (unlikely(phys_addr >= P1SEG)) { - unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); + unsigned long mapped; + mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); if (likely(mapped)) { addr += mapped; phys_addr += mapped; @@ -101,7 +101,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, } #endif - pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); if (likely(size)) if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); -- cgit v1.2.2 From acf2c9685fb8295cb62a623d7358a1cfde8b07ea Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 19 Jan 2010 13:49:19 +0900 Subject: sh: Kill off duplicate address alignment in ioremap_fixed(). This is already taken care of in the top-level ioremap, and now that no one should be calling ioremap_fixed() directly we can simply throw the mapping displacement in as an additional argument. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 85b420d00622..bb03308e8408 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -68,7 +68,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) - return ioremap_fixed(phys_addr, size, pgprot); + return ioremap_fixed(phys_addr, offset, size, pgprot); /* * Ok, go for it.. -- cgit v1.2.2 From 9762528f37ddc7071509dddb10e7b4b3b957fd01 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 29 Jan 2010 16:14:29 +0900 Subject: sh: Kill off deprecated fixed PCI memory window accessors. This kills off the deprected fixed memory range accessors for the cases of non-translatable ioremapping. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index bb03308e8408..94583c5da855 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -45,18 +45,6 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, if (!size || last_addr < phys_addr) return NULL; - /* - * If we're in the fixed PCI memory range, mapping through page - * tables is not only pointless, but also fundamentally broken. - * Just return the physical address instead. - * - * For boards that map a small PCI memory aperture somewhere in - * P1/P2 space, ioremap() will already do the right thing, - * and we'll never get this far. - */ - if (is_pci_memory_fixed_range(phys_addr, size)) - return (void __iomem *)phys_addr; - /* * Mappings have to be page-aligned */ @@ -125,9 +113,6 @@ static inline int iomapping_nontranslatable(unsigned long offset) return 1; #endif - if (is_pci_memory_fixed_range(offset, 0)) - return 1; - return 0; } -- cgit v1.2.2 From 7bdda6209f224aa784a036df54b22cb338d2e859 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Wed, 17 Feb 2010 13:23:00 +0900 Subject: sh: Fix up more 64-bit pgprot truncation on SH-X2 TLB. Both the store queue API and the PMB remapping take unsigned long for their pgprot flags, which cuts off the extended protection bits. In the case of the PMB this isn't really a problem since the cache attribute bits that we care about are all in the lower 32-bits, but we do it just to be safe. The store queue remapping on the other hand depends on the extended prot bits for enabling userspace access to the mappings. Signed-off-by: Paul Mundt --- arch/sh/mm/ioremap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/sh/mm/ioremap.c') diff --git a/arch/sh/mm/ioremap.c b/arch/sh/mm/ioremap.c index 94583c5da855..c68d2d7d00a9 100644 --- a/arch/sh/mm/ioremap.c +++ b/arch/sh/mm/ioremap.c @@ -80,7 +80,7 @@ __ioremap_caller(unsigned long phys_addr, unsigned long size, if (unlikely(phys_addr >= P1SEG)) { unsigned long mapped; - mapped = pmb_remap(addr, phys_addr, size, pgprot_val(pgprot)); + mapped = pmb_remap(addr, phys_addr, size, pgprot); if (likely(mapped)) { addr += mapped; phys_addr += mapped; -- cgit v1.2.2