From 8bf1268f48ad9bf5d6401b4db913e6d85b0863f6 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Mar 2015 16:41:35 +0000 Subject: ARM: dma-api: fix off-by-one error in __dma_supported() When validating the mask against the amount of memory we have available (so that we can trap 32-bit DMA addresses with >32-bits memory), we had not taken account of the fact that max_pfn is the maximum PFN number plus one that would be in the system. There are several references in the code which bear this out: mm/page_owner.c: for (; pfn < max_pfn; pfn++) { } arch/x86/kernel/setup.c: high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 170a116d1b29..c27447653903 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -171,7 +171,7 @@ static int __dma_supported(struct device *dev, u64 mask, bool warn) */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && - dma_to_pfn(dev, ~0) < max_pfn) { + dma_to_pfn(dev, ~0) < max_pfn - 1) { if (warn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); -- cgit v1.2.2 From 6d021b724481fbb908eb29384898deb9f00dfe70 Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 10 Mar 2015 19:40:55 +0000 Subject: ARM: dump pgd, pmd and pte states on unhandled data abort faults It can be useful to dump the page table entries when an unhandled data abort fault occurs. This can aid debugging of these situations, for example, a STREX instruction causing an external abort on non-linefetch fault, as has been reported recently. Signed-off-by: Russell King --- arch/arm/mm/fault.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index a982dc3190df..6333d9c17875 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -552,6 +552,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n", inf->name, fsr, addr); + show_pte(current->mm, addr); info.si_signo = inf->sig; info.si_errno = 0; -- cgit v1.2.2 From 5c95ed47f1777e9e9b1eb29e48f34e9af3139f29 Mon Sep 17 00:00:00 2001 From: Fabrice Gasnier Date: Thu, 12 Mar 2015 14:04:42 +0100 Subject: ARM: 8310/1: l2c: Fix prefetch settings dt parsing Allow prefetch settings overriding by device tree, in case l2x0_cache_size_of_parse() returns value, prefetch tuning properties are silently ignored. E.g. arm,double-linefill* and arm,prefetch*. This happens for example, when "cache-size" or "cache-sets" properties haven't been filled in l2c dt node. Comments from Fabrice Gasnier: Allow device tree to override the L2C prefetch settings, even when l2x0_cache_size_of_parse() fails to parse the cache geometry due to (eg) missing "cache-size" or "cache-sets" properties. Signed-off-by: Fabrice Gasnier Reviewed-by: Tomasz Figa Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 33 ++++++++++++++++----------------- 1 file changed, 16 insertions(+), 17 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c6c7696b8db9..8f15f70622a6 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -1131,23 +1131,22 @@ static void __init l2c310_of_parse(const struct device_node *np, } ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); - if (ret) - return; - - switch (assoc) { - case 16: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - case 8: - *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; - *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; - break; - default: - pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", - assoc); - break; + if (!ret) { + switch (assoc) { + case 16: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + case 8: + *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; + *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; + break; + default: + pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n", + assoc); + break; + } } prefetch = l2x0_saved_regs.prefetch_ctrl; -- cgit v1.2.2 From f2ca09f381a59e1eddb89aa70207740c2ee0fe94 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Fri, 13 Mar 2015 21:41:45 +0100 Subject: ARM: 8311/1: Don't use is_module_addr in setting page attributes The set_memory_* functions currently only support module addresses. The addresses are validated using is_module_addr. That function is special though and relies on internal state in the module subsystem to work properly. At the time of module initialization and calling set_memory_*, it's too early for is_module_addr to work properly so it always returns false. Rather than be subject to the whims of the module state, just bounds check against the module virtual address range. Signed-off-by: Laura Abbott Signed-off-by: Russell King --- arch/arm/mm/pageattr.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c index 004e35cdcfff..cf30daff8932 100644 --- a/arch/arm/mm/pageattr.c +++ b/arch/arm/mm/pageattr.c @@ -49,7 +49,10 @@ static int change_memory_common(unsigned long addr, int numpages, WARN_ON_ONCE(1); } - if (!is_module_address(start) || !is_module_address(end - 1)) + if (start < MODULES_VADDR || start >= MODULES_END) + return -EINVAL; + + if (end < MODULES_VADDR || start >= MODULES_END) return -EINVAL; data.set_mask = set_mask; -- cgit v1.2.2 From e5b61deb3af465f11db7e5e11944ba00a33ece1f Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Wed, 25 Mar 2015 19:16:05 +0100 Subject: ARM: 8332/1: add CONFIG_VDSO Kconfig and Makefile bits Allow users to enable the vdso in Kconfig; include the vdso in the build if CONFIG_VDSO is enabled. Add 'vdso_install' target. Signed-off-by: Nathan Lynch Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9b4f29e595a4..8a5f1e644104 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -825,6 +825,20 @@ config KUSER_HELPERS Say N here only if you are absolutely certain that you do not need these helpers; otherwise, the safe option is to say Y. +config VDSO + bool "Enable VDSO for acceleration of some system calls" + depends on AEABI && MMU + default y if ARM_ARCH_TIMER + select GENERIC_TIME_VSYSCALL + help + Place in the process address space an ELF shared object + providing fast implementations of gettimeofday and + clock_gettime. Systems that implement the ARM architected + timer will receive maximum benefit. + + You must have glibc 2.22 or later for programs to seamlessly + take advantage of this. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP -- cgit v1.2.2 From 49f28aa6b0d0735dbe5f04263c49a199ed0c5bb7 Mon Sep 17 00:00:00 2001 From: Tomasz Figa Date: Wed, 1 Apr 2015 07:26:33 +0100 Subject: ARM: 8337/1: mm: Do not invoke OOM for higher order IOMMU DMA allocations IOMMU should be able to use single pages as well as bigger blocks, so if higher order allocations fail, we should not affect state of the system, with events such as OOM killer, but rather fall back to order 0 allocations. This patch changes the behavior of ARM IOMMU DMA allocator to use __GFP_NORETRY, which bypasses OOM invocation, for orders higher than zero and, only if that fails, fall back to normal order 0 allocation which might invoke OOM killer. Signed-off-by: Tomasz Figa Reviewed-by: Doug Anderson Acked-by: David Rientjes Acked-by: Marek Szyprowski Signed-off-by: Russell King --- arch/arm/mm/dma-mapping.c | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index c27447653903..f9941cd689e9 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -1135,13 +1135,28 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp |= __GFP_NOWARN | __GFP_HIGHMEM; while (count) { - int j, order = __fls(count); + int j, order; + + for (order = __fls(count); order > 0; --order) { + /* + * We do not want OOM killer to be invoked as long + * as we can fall back to single pages, so we force + * __GFP_NORETRY for orders higher than zero. + */ + pages[i] = alloc_pages(gfp | __GFP_NORETRY, order); + if (pages[i]) + break; + } - pages[i] = alloc_pages(gfp, order); - while (!pages[i] && order) - pages[i] = alloc_pages(gfp, --order); - if (!pages[i]) - goto error; + if (!pages[i]) { + /* + * Fall back to single page allocation. + * Might invoke OOM killer as last resort. + */ + pages[i] = alloc_pages(gfp, 0); + if (!pages[i]) + goto error; + } if (order) { split_page(pages[i], order); -- cgit v1.2.2 From e1e2f6e4c5759aab3a8cfb1a0c19017ea770dfd2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Fri, 9 Jan 2015 19:34:43 +0100 Subject: ARM: 8276/1: Make CPU_DCACHE_DISABLE depend on !SMP Enabling CPU_DCACHE_DISABLE on a SMP capable system will prevent the kernel from booting because of the following ldrex instruction in arch_spin_lock: (gdb) x/10i $pc => 0xc053cfa8 <_raw_spin_lock+4>: ldrex r3, [r0] 0xc053cfac <_raw_spin_lock+8>: add r2, r3, #65536 ; 0x10000 which is taken by the very first printk call: at /home/fainelli/work/linux/arch/arm/include/asm/spinlock.h:65 fmt=0xc0637650 " 01 66Booting Linux on physical CPU 0x%xn", args=) at kernel/printk/printk.c:1525 fmt=0xc05370f4 " 24320215342 04340235344 20320215342 36377/341 17") at kernel/printk/printk.c:1688 ldrex requires exclusive monitor(s) (local or global) which are no longer working when the Data cache is disabled in CP15 and will just hang the CPU there. Acked-by: Arnd Bergmann Signed-off-by: Florian Fainelli Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 9b4f29e595a4..133ecff6664e 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -738,7 +738,7 @@ config CPU_ICACHE_DISABLE config CPU_DCACHE_DISABLE bool "Disable D-Cache (C-bit)" - depends on CPU_CP15 + depends on CPU_CP15 && !SMP help Say Y here to disable the processor data cache. Unless you have a reason not to or are unsure, say N. -- cgit v1.2.2 From 6c5c2a01fcfdb70f2e95e30e96ccf53b88e81023 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 4 Apr 2015 23:22:07 +0100 Subject: ARM: proc-arm94*.S: fix setup function Both ARM946 and ARM940 setup functions were corrupting r1 and r2, which is not permissible - these are used to carry the machine ID and boot data into the kernel, and must be preserved. The code responsible for this was the same in both files: they were using the registers to generate a protection region register value. Fix this by turning this process into a macro, and using that macro in both these files with an alternative register allocation. r0, r3 and r7 can be used for temporary values here. Reported-by: Alex Dumitrache Tested-by: Georg Hofstetter Signed-off-by: Russell King --- arch/arm/mm/proc-arm940.S | 26 ++++++++------------------ arch/arm/mm/proc-arm946.S | 22 ++++++---------------- arch/arm/mm/proc-macros.S | 24 ++++++++++++++++++++++++ 3 files changed, 38 insertions(+), 34 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index e5212d489377..c42cdd3b44bc 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -297,26 +297,16 @@ __arm940_setup: mcr p15, 0, r0, c6, c0, 1 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM - mcr p15, 0, r0, c6, c1, 1 + ldr r7, =CONFIG_DRAM_SIZE >> 12 @ size of RAM (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c1, 0 @ set area 1, RAM + mcr p15, 0, r3, c6, c1, 1 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the area register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH - mcr p15, 0, r0, c6, c2, 1 + ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) + pr_val r3, r0, r6, #1 + mcr p15, 0, r3, c6, c2, 0 @ set area 2, ROM/FLASH + mcr p15, 0, r3, c6, c2, 1 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index b3dd9b2d0b8e..17a8c2075c62 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -343,24 +343,14 @@ __arm946_setup: mcr p15, 0, r0, c6, c0, 0 @ set region 0, default ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM - ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the region register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM + ldr r7, =CONFIG_DRAM_SIZE @ size of RAM (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c1, 0 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH - ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB) - mov r2, #10 @ 11 is the minimum (4KB) -1: add r2, r2, #1 @ area size *= 2 - mov r1, r1, lsr #1 - bne 1b @ count not zero r-shift - orr r0, r0, r2, lsl #1 @ the region register value - orr r0, r0, #1 @ set enable bit - mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH + ldr r7, =CONFIG_FLASH_SIZE @ size of FLASH (must be >= 4KB) + pr_val r3, r0, r7, #1 + mcr p15, 0, r3, c6, c2, 0 mov r0, #0x06 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 082b9f2f7e90..d081c9d9420d 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -331,3 +331,27 @@ ENTRY(\name\()_tlb_fns) .globl \x .equ \x, \y .endm + + /* + * Macro to calculate the log2 size for the protection region + * registers. This calculates rd = log2(size) - 1. tmp must + * not be the same register as rd. + */ +.macro pr_sz, rd, size, tmp + mov \tmp, \size, lsr #12 + mov \rd, #11 +1: movs \tmp, \tmp, lsr #1 + addne \rd, \rd, #1 + bne 1b +.endm + + /* + * Macro to generate a protection region register value + * given a pre-masked address, size, and enable bit. + * Corrupts size. + */ +.macro pr_val, dest, addr, size, enable + pr_sz \dest, \size, \size @ calculate log2(size) - 1 + orr \dest, \addr, \dest, lsl #1 @ mask in the region size + orr \dest, \dest, \enable +.endm -- cgit v1.2.2