From 319f551a0a167b49b5bbb4a9ff4802046a572bc5 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:47:53 +0100 Subject: ARM: 5994/1: ARM: Add outer_cache_fns.sync function pointer (2/4) This patch introduces the outer_cache_fns.sync function pointer together with the OUTER_CACHE_SYNC config option that can be used to drain the write buffer of the outer cache. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index c4ed9f93f646..88a24de55aaf 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config OUTER_CACHE bool +config OUTER_CACHE_SYNC + bool + help + The outer cache has a outer_cache_fns.sync function pointer + that can be used to drain the write buffer of the outer cache. + config CACHE_FEROCEON_L2 bool "Enable the Feroceon L2 cache controller" depends on ARCH_KIRKWOOD || ARCH_MV78XX0 -- cgit v1.2.2 From 23107c542068b2b94390aa333f6b330af64961e4 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:48:53 +0100 Subject: ARM: 5995/1: ARM: Add L2x0 outer_sync() support (3/4) The L2x0 cache controllers need to explicitly drain their write buffer even for Normal Noncacheable memory accesses. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 1 + arch/arm/mm/cache-l2x0.c | 10 ++++++++++ 2 files changed, 11 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 88a24de55aaf..55a2a00db77f 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -763,6 +763,7 @@ config CACHE_L2X0 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 default y select OUTER_CACHE + select OUTER_CACHE_SYNC help This option enables the L2x0 PrimeCell. diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 07334632d3e2..21ad68ba22ba 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr) } #endif +static void l2x0_cache_sync(void) +{ + unsigned long flags; + + spin_lock_irqsave(&l2x0_lock, flags); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + static inline void l2x0_inv_all(void) { unsigned long flags; @@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.inv_range = l2x0_inv_range; outer_cache.clean_range = l2x0_clean_range; outer_cache.flush_range = l2x0_flush_range; + outer_cache.sync = l2x0_cache_sync; printk(KERN_INFO "L2X0 cache controller enabled\n"); } -- cgit v1.2.2 From e7c5650f6067f65f8e961394f376d4862808d0d2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Wed, 24 Mar 2010 16:49:54 +0100 Subject: ARM: 5996/1: ARM: Change the mandatory barriers implementation (4/4) The mandatory barriers (mb, rmb, wmb) are used even on uniprocessor systems for things like ordering Normal Non-cacheable memory accesses with DMA transfer (via Device memory writes). The current implementation uses dmb() for mb() and friends but this is not sufficient. The DMB only ensures the relative ordering of the observability of accesses by other processors or devices acting as masters. In case of DMA transfers started by writes to device memory, the relative ordering is not ensured because accesses to slave ports of a device are not considered observable by the DMB definition. A DSB is required for the data to reach the main memory (even if mapped as Normal Non-cacheable) before the device receives the notification to begin the transfer. Furthermore, some L2 cache controllers (like L2x0 or PL310) buffer stores to Normal Non-cacheable memory and this would need to be drained with the outer_sync() function call. The patch also allows platforms to define their own mandatory barriers implementation by selecting CONFIG_ARCH_HAS_BARRIERS and providing a mach/barriers.h file. Note that the SMP barriers are unchanged (being DMBs as before) since they are only guaranteed to work with Normal Cacheable memory. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 55a2a00db77f..5bd7c89a6045 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -788,3 +788,9 @@ config ARM_L1_CACHE_SHIFT int default 6 if ARM_L1_CACHE_SHIFT_6 default 5 + +config ARCH_HAS_BARRIERS + bool + help + This option allows the use of custom mandatory barriers + included via the mach/barriers.h file. -- cgit v1.2.2 From 5a0e3ad6af8660be21ca98a971cd00f331318c05 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 24 Mar 2010 17:04:11 +0900 Subject: include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h percpu.h is included by sched.h and module.h and thus ends up being included when building most .c files. percpu.h includes slab.h which in turn includes gfp.h making everything defined by the two files universally available and complicating inclusion dependencies. percpu.h -> slab.h dependency is about to be removed. Prepare for this change by updating users of gfp and slab facilities include those headers directly instead of assuming availability. As this conversion needs to touch large number of source files, the following script is used as the basis of conversion. http://userweb.kernel.org/~tj/misc/slabh-sweep.py The script does the followings. * Scan files for gfp and slab usages and update includes such that only the necessary includes are there. ie. if only gfp is used, gfp.h, if slab is used, slab.h. * When the script inserts a new include, it looks at the include blocks and try to put the new include such that its order conforms to its surrounding. It's put in the include block which contains core kernel includes, in the same order that the rest are ordered - alphabetical, Christmas tree, rev-Xmas-tree or at the end if there doesn't seem to be any matching order. * If the script can't find a place to put a new include (mostly because the file doesn't have fitting include block), it prints out an error message indicating which .h file needs to be added to the file. The conversion was done in the following steps. 1. The initial automatic conversion of all .c files updated slightly over 4000 files, deleting around 700 includes and adding ~480 gfp.h and ~3000 slab.h inclusions. The script emitted errors for ~400 files. 2. Each error was manually checked. Some didn't need the inclusion, some needed manual addition while adding it to implementation .h or embedding .c file was more appropriate for others. This step added inclusions to around 150 files. 3. The script was run again and the output was compared to the edits from #2 to make sure no file was left behind. 4. Several build tests were done and a couple of problems were fixed. e.g. lib/decompress_*.c used malloc/free() wrappers around slab APIs requiring slab.h to be added manually. 5. The script was run on all .h files but without automatically editing them as sprinkling gfp.h and slab.h inclusions around .h files could easily lead to inclusion dependency hell. Most gfp.h inclusion directives were ignored as stuff from gfp.h was usually wildly available and often used in preprocessor macros. Each slab.h inclusion directive was examined and added manually as necessary. 6. percpu.h was updated not to include slab.h. 7. Build test were done on the following configurations and failures were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my distributed build env didn't work with gcov compiles) and a few more options had to be turned off depending on archs to make things build (like ipr on powerpc/64 which failed due to missing writeq). * x86 and x86_64 UP and SMP allmodconfig and a custom test config. * powerpc and powerpc64 SMP allmodconfig * sparc and sparc64 SMP allmodconfig * ia64 SMP allmodconfig * s390 SMP allmodconfig * alpha SMP allmodconfig * um on x86_64 SMP allmodconfig 8. percpu.h modifications were reverted so that it could be applied as a separate patch and serve as bisection point. Given the fact that I had only a couple of failures from tests on step 6, I'm fairly confident about the coverage of this conversion patch. If there is a breakage, it's likely to be something in one of the arch headers which should be easily discoverable easily on most builds of the specific arch. Signed-off-by: Tejun Heo Guess-its-ok-by: Christoph Lameter Cc: Ingo Molnar Cc: Lee Schermerhorn --- arch/arm/mm/dma-mapping.c | 2 +- arch/arm/mm/fault-armv.c | 1 + arch/arm/mm/init.c | 1 + arch/arm/mm/pgd.c | 1 + 4 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0da7eccf7749..1351edc0b26f 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -11,7 +11,7 @@ */ #include #include -#include +#include #include #include #include diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index c9b97e9836a2..0d414c28eb2c 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -16,6 +16,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 7829cb5425f5..83db12a68d56 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 2690146161ba..be5f58e153bf 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ #include +#include #include #include -- cgit v1.2.2 From 85b3cce880a19e78286570d5fd004cc3cac06f57 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 9 Apr 2010 15:00:11 +0100 Subject: ARM: Fix ioremap_cached()/ioremap_wc() for SMP platforms Write combining/cached device mappings are not setting the shared bit, which could potentially cause problems on SMP systems since the cache lines won't participate in the cache coherency protocol. Signed-off-by: Russell King Tested-by: Santosh Shilimkar --- arch/arm/mm/mmu.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9d4da6ac28eb..4223d086aa17 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -420,6 +420,10 @@ static void __init build_mem_type_table(void) user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; + mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; #endif -- cgit v1.2.2 From 7e5a69e83ba7a0d5917ad830f417cba8b8d6aa72 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 29 Mar 2010 21:46:02 +0100 Subject: ARM: 6007/1: fix highmem with VIPT cache and DMA The VIVT cache of a highmem page is always flushed before the page is unmapped. This cache flush is explicit through flush_cache_kmaps() in flush_all_zero_pkmaps(), or through __cpuc_flush_dcache_area() in kunmap_atomic(). There is also an implicit flush of those highmem pages that were part of a process that just terminated making those pages free as the whole VIVT cache has to be flushed on every task switch. Hence unmapped highmem pages need no cache maintenance in that case. However unmapped pages may still be cached with a VIPT cache because the cache is tagged with physical addresses. There is no need for a whole cache flush during task switching for that reason, and despite the explicit cache flushes in flush_all_zero_pkmaps() and kunmap_atomic(), some highmem pages that were mapped in user space end up still cached even when they become unmapped. So, we do have to perform cache maintenance on those unmapped highmem pages in the context of DMA when using a VIPT cache. Unfortunately, it is not possible to perform that cache maintenance using physical addresses as all the L1 cache maintenance coprocessor functions accept virtual addresses only. Therefore we have no choice but to set up a temporary virtual mapping for that purpose. And of course the explicit cache flushing when unmapping a highmem page on a system with a VIPT cache now can go, which should increase performance. While at it, because the code in __flush_dcache_page() has to be modified anyway, let's also make sure the mapped highmem pages are pinned with kmap_high_get() for the duration of the cache maintenance operation. Because kunmap() does unmap highmem pages lazily, it was reported by Gary King that those pages ended up being unmapped during cache maintenance on SMP causing segmentation faults. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/copypage-v6.c | 9 +---- arch/arm/mm/dma-mapping.c | 5 +++ arch/arm/mm/flush.c | 25 ++++++++------ arch/arm/mm/highmem.c | 87 ++++++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 107 insertions(+), 19 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 8bca4dea6dfa..f55fa1044f72 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -41,14 +41,7 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, kfrom = kmap_atomic(from, KM_USER0); kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (page_address(to) != NULL) -#endif - __cpuc_flush_dcache_area(kto, PAGE_SIZE); + __cpuc_flush_dcache_area(kto, PAGE_SIZE); kunmap_atomic(kto, KM_USER1); kunmap_atomic(kfrom, KM_USER0); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 1351edc0b26f..13fa536d82e6 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -464,6 +464,11 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, vaddr += offset; op(vaddr, len, dir); kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + vaddr = kmap_high_l1_vipt(page, &saved_pte); + op(vaddr + offset, len, dir); + kunmap_high_l1_vipt(page, saved_pte); } } else { vaddr = page_address(page) + offset; diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index e34f095e2090..c6844cb9b508 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -13,6 +13,7 @@ #include #include +#include #include #include #include @@ -152,21 +153,25 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void __flush_dcache_page(struct address_space *mapping, struct page *page) { - void *addr = page_address(page); - /* * Writeback any data associated with the kernel mapping of this * page. This ensures that data in the physical page is mutually * coherent with the kernels mapping. */ -#ifdef CONFIG_HIGHMEM - /* - * kmap_atomic() doesn't set the page virtual address, and - * kunmap_atomic() takes care of cache flushing already. - */ - if (addr) -#endif - __cpuc_flush_dcache_area(addr, PAGE_SIZE); + if (!PageHighMem(page)) { + __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE); + } else { + void *addr = kmap_high_get(page); + if (addr) { + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high(page); + } else if (cache_is_vipt()) { + pte_t saved_pte; + addr = kmap_high_l1_vipt(page, &saved_pte); + __cpuc_flush_dcache_area(addr, PAGE_SIZE); + kunmap_high_l1_vipt(page, saved_pte); + } + } /* * If this is a page cache page, and we have an aliasing VIPT cache, diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 2be1ec7c1b41..77b030f5ec09 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -79,7 +79,8 @@ void kunmap_atomic(void *kvaddr, enum km_type type) unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); if (kvaddr >= (void *)FIXADDR_START) { - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); @@ -124,3 +125,87 @@ struct page *kmap_atomic_to_page(const void *ptr) pte = TOP_PTE(vaddr); return pte_page(*pte); } + +#ifdef CONFIG_CPU_CACHE_VIPT + +#include + +/* + * The VIVT cache of a highmem page is always flushed before the page + * is unmapped. Hence unmapped highmem pages need no cache maintenance + * in that case. + * + * However unmapped pages may still be cached with a VIPT cache, and + * it is not possible to perform cache maintenance on them using physical + * addresses unfortunately. So we have no choice but to set up a temporary + * virtual mapping for that purpose. + * + * Yet this VIPT cache maintenance may be triggered from DMA support + * functions which are possibly called from interrupt context. As we don't + * want to keep interrupt disabled all the time when such maintenance is + * taking place, we therefore allow for some reentrancy by preserving and + * restoring the previous fixmap entry before the interrupted context is + * resumed. If the reentrancy depth is 0 then there is no need to restore + * the previous fixmap, and leaving the current one in place allow it to + * be reused the next time without a TLB flush (common with DMA). + */ + +static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); + +void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + if (!in_interrupt()) + preempt_disable(); + + raw_local_irq_save(flags); + (*depth)++; + if (pte_val(*ptep) == pte_val(pte)) { + *saved_pte = pte; + } else { + *saved_pte = *ptep; + set_pte_ext(ptep, pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + return (void *)vaddr; +} + +void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) +{ + unsigned int idx, cpu = smp_processor_id(); + int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); + unsigned long vaddr, flags; + pte_t pte, *ptep; + + idx = KM_L1_CACHE + KM_TYPE_NR * cpu; + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + ptep = TOP_PTE(vaddr); + pte = mk_pte(page, kmap_prot); + + BUG_ON(pte_val(*ptep) != pte_val(pte)); + BUG_ON(*depth <= 0); + + raw_local_irq_save(flags); + (*depth)--; + if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { + set_pte_ext(ptep, saved_pte, 0); + local_flush_tlb_kernel_page(vaddr); + } + raw_local_irq_restore(flags); + + if (!in_interrupt()) + preempt_enable(); +} + +#endif /* CONFIG_CPU_CACHE_VIPT */ -- cgit v1.2.2 From 3f2d4f561fab4588344cc519fd323382ab950928 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Tue, 13 Apr 2010 07:01:46 +0100 Subject: ARM: 6052/1: kdump: make kexec work in interrupt context When crash happens in interrupt context there is no userspace context. We always use current->active_mm in those cases. Signed-off-by: Mika Westerberg Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4223d086aa17..241c24a1c18f 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1054,10 +1054,12 @@ void setup_mm_for_reboot(char mode) pgd_t *pgd; int i; - if (current->mm && current->mm->pgd) - pgd = current->mm->pgd; - else - pgd = init_mm.pgd; + /* + * We need to access to user-mode page tables here. For kernel threads + * we don't have any user-mode mappings so we use the context that we + * "borrowed". + */ + pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) -- cgit v1.2.2 From 4260415f6a3b92c5c986398d96c314df37a4ccbf Mon Sep 17 00:00:00 2001 From: Russell King Date: Mon, 19 Apr 2010 10:15:03 +0100 Subject: ARM: fix build error in arch/arm/kernel/process.c /tmp/ccJ3ssZW.s: Assembler messages: /tmp/ccJ3ssZW.s:1952: Error: can't resolve `.text' {.text section} - `.LFB1077' This is caused because: .section .data .section .text .section .text .previous does not return us to the .text section, but the .data section; this makes use of .previous dangerous if the ordering of previous sections is not known. Fix up the other users of .previous; .pushsection and .popsection are a safer pairing to use than .section and .previous. Signed-off-by: Russell King --- arch/arm/mm/alignment.c | 24 ++++++++++++------------ arch/arm/mm/proc-sa1100.S | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index edddd66faac6..a2ab51fa73e2 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -166,15 +166,15 @@ union offset_union { THUMB( "1: "ins" %1, [%2]\n" ) \ THUMB( " add %2, %2, #1\n" ) \ "2:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "3: mov %0, #1\n" \ " b 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 3b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (val), "=r" (addr) \ : "0" (err), "2" (addr)) @@ -226,16 +226,16 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "2: "ins" %1, [%2]\n" \ "3:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "4: mov %0, #1\n" \ " b 3b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 4b\n" \ " .long 2b, 4b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ @@ -266,18 +266,18 @@ union offset_union { " mov %1, %1, "NEXT_BYTE"\n" \ "4: "ins" %1, [%2]\n" \ "5:\n" \ - " .section .fixup,\"ax\"\n" \ + " .pushsection .fixup,\"ax\"\n" \ " .align 2\n" \ "6: mov %0, #1\n" \ " b 5b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ + " .popsection\n" \ + " .pushsection __ex_table,\"a\"\n" \ " .align 3\n" \ " .long 1b, 6b\n" \ " .long 2b, 6b\n" \ " .long 3b, 6b\n" \ " .long 4b, 6b\n" \ - " .previous\n" \ + " .popsection\n" \ : "=r" (err), "=&r" (v), "=&r" (a) \ : "0" (err), "1" (v), "2" (a)); \ if (err) \ diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index ee7700242c19..5c47760c2064 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -45,7 +45,7 @@ ENTRY(cpu_sa1100_proc_init) mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland mov pc, lr - .previous + .section .text /* * cpu_sa1100_proc_fin() -- cgit v1.2.2 From a3be6327163c223c1715c8307a616941fbf8bf73 Mon Sep 17 00:00:00 2001 From: Hans Ulli Kroll Date: Tue, 27 Apr 2010 12:45:10 +0200 Subject: ARM: Gemini: fix compiler error in copypage-fa.c Fix compiler error in copypage-fs.c missing struct vm_area_struct *vma in function fa_copy_user_highpage Signed-off-by: Hans Ulli Kroll --- arch/arm/mm/copypage-fa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index b2a6008b0111..d2852e1635b1 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -40,7 +40,7 @@ fa_copy_user_page(void *kto, const void *kfrom) } void fa_copy_user_highpage(struct page *to, struct page *from, - unsigned long vaddr) + unsigned long vaddr, struct vm_area_struct *vma) { void *kto, *kfrom; -- cgit v1.2.2 From e220ba60223a9d63e70217e5b112160df8c21cea Mon Sep 17 00:00:00 2001 From: Dave Estes Date: Tue, 11 Aug 2009 17:58:49 -0400 Subject: arm: mm: qsd8x50: Fix incorrect permission faults Handle incorrectly reported permission faults for qsd8650. On permission faults, retry MVA to PA conversion. If retry detects translation fault. Report as translation fault. Cc: Jamie Lokier Signed-off-by: Dave Estes --- arch/arm/mm/Kconfig | 2 ++ arch/arm/mm/abort-ev7.S | 21 +++++++++++++++++++++ 2 files changed, 23 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5bd7c89a6045..d3022dabb4aa 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -572,6 +572,8 @@ config CPU_TLB_V6 config CPU_TLB_V7 bool +config VERIFY_PERMISSION_FAULT + bool endif config CPU_HAS_ASID diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 2e6dc040c654..ec88b157d3bb 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -29,5 +29,26 @@ ENTRY(v7_early_abort) * V6 code adjusts the returned DFSR. * New designs should not need to patch up faults. */ + +#if defined(CONFIG_VERIFY_PERMISSION_FAULT) + /* + * Detect erroneous permission failures and fix + */ + ldr r3, =0x40d @ On permission fault + and r3, r1, r3 + cmp r3, #0x0d + movne pc, lr + + mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR + isb + mrc p15, 0, r2, c7, c4, 0 @ Read the PAR + and r3, r2, #0x7b @ On translation fault + cmp r3, #0x0b + movne pc, lr + bic r1, r1, #0xf @ Fix up FSR FS[5:0] + and r2, r2, #0x7e + orr r1, r1, r2, LSR #1 +#endif + mov pc, lr ENDPROC(v7_early_abort) -- cgit v1.2.2 From ea056df7965fc46cfff28fd3808bf3ada23d5059 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 4 May 2010 17:27:43 +0100 Subject: ARM: 6093/1: Fix kernel memory printing for sparsemem The show_mem() and mem_init() function are assuming that the page map is contiguous and calculates the start and end page of a bank using (map + pfn). This fails with SPARSEMEM where pfn_to_page() must be used. Tested-by: Will Deacon Tested-by: Marek Vasut Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/mm/init.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 83db12a68d56..0ed29bfeba1c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -86,9 +86,6 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; - for_each_nodebank (i,mi,node) { struct membank *bank = &mi->bank[i]; unsigned int pfn1, pfn2; @@ -97,8 +94,8 @@ void show_mem(void) pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); - page = map + pfn1; - end = map + pfn2; + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { total++; @@ -603,9 +600,6 @@ void __init mem_init(void) reserved_pages = free_pages = 0; for_each_online_node(node) { - pg_data_t *n = NODE_DATA(node); - struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; - for_each_nodebank(i, &meminfo, node) { struct membank *bank = &meminfo.bank[i]; unsigned int pfn1, pfn2; @@ -614,8 +608,8 @@ void __init mem_init(void) pfn1 = bank_pfn_start(bank); pfn2 = bank_pfn_end(bank); - page = map + pfn1; - end = map + pfn2; + page = pfn_to_page(pfn1); + end = pfn_to_page(pfn2 - 1) + 1; do { if (PageReserved(page)) -- cgit v1.2.2 From 66b196475031c748a5861390a4fadb915e14ccdc Mon Sep 17 00:00:00 2001 From: Haojian Zhuang Date: Wed, 28 Apr 2010 10:59:45 -0400 Subject: [ARM] mmp: enable L2 in mmp2 Enable Tauros2 L2 in mmp2. Tauros2 L2 is shared in Marvell ARM cores. Signed-off-by: Haojian Zhuang Signed-off-by: Eric Miao --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5bd7c89a6045..698912602387 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -769,7 +769,7 @@ config CACHE_L2X0 config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" - depends on ARCH_DOVE + depends on (ARCH_DOVE || ARCH_MMP) default y select OUTER_CACHE help -- cgit v1.2.2