aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-02 13:57:39 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-02 13:57:39 -0400
commit3f9c08f7ce2ca7b176dc7ee8dd287a82dfe53e60 (patch)
treec0813c7f6b9424aaf15ccd2ff227acb71e32d1ff /arch
parent1f2609460c4a96ef19c6bd2dadea46ad47e6dfa1 (diff)
parent6bf755db4d5e7ccea61fb17727a183b9bd8945b1 (diff)
Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
Pull ARM fixes from Russell King: "A few fixes for ARM. Some of these are correctness issues: - TLBs must be flushed after the old mappings are removed by the DMA mapping code, but before the new mappings are established. - An off-by-one entry error in the Keystone LPAE setup code. Fixes include: - ensuring that the identity mapping for LPAE does not remove the kernel image from the identity map. - preventing userspace from trapping into kgdb. - fixing a preemption issue in the Intel iwmmxt code. - fixing a build error with nommu. Other changes include: - Adding a note about which areas of memory are expected to be accessible while the identity mapping tables are in place" * 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: ARM: 8124/1: don't enter kgdb when userspace executes a kgdb break instruction ARM: idmap: add identity mapping usage note ARM: 8115/1: LPAE: reduce damage caused by idmap to virtual memory layout ARM: fix alignment of keystone page table fixup ARM: 8112/1: only select ARM_PATCH_PHYS_VIRT if MMU is enabled ARM: 8100/1: Fix preemption disable in iwmmxt_task_enable() ARM: DMA: ensure that old section mappings are flushed from the TLB
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig4
-rw-r--r--arch/arm/kernel/iwmmxt.S23
-rw-r--r--arch/arm/kernel/kgdb.c4
-rw-r--r--arch/arm/mm/dma-mapping.c11
-rw-r--r--arch/arm/mm/idmap.c12
-rw-r--r--arch/arm/mm/mmu.c6
6 files changed, 43 insertions, 17 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 88acf8bc1490..290f02ee0157 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -313,7 +313,7 @@ config ARCH_MULTIPLATFORM
313config ARCH_INTEGRATOR 313config ARCH_INTEGRATOR
314 bool "ARM Ltd. Integrator family" 314 bool "ARM Ltd. Integrator family"
315 select ARM_AMBA 315 select ARM_AMBA
316 select ARM_PATCH_PHYS_VIRT 316 select ARM_PATCH_PHYS_VIRT if MMU
317 select AUTO_ZRELADDR 317 select AUTO_ZRELADDR
318 select COMMON_CLK 318 select COMMON_CLK
319 select COMMON_CLK_VERSATILE 319 select COMMON_CLK_VERSATILE
@@ -659,7 +659,7 @@ config ARCH_MSM
659config ARCH_SHMOBILE_LEGACY 659config ARCH_SHMOBILE_LEGACY
660 bool "Renesas ARM SoCs (non-multiplatform)" 660 bool "Renesas ARM SoCs (non-multiplatform)"
661 select ARCH_SHMOBILE 661 select ARCH_SHMOBILE
662 select ARM_PATCH_PHYS_VIRT 662 select ARM_PATCH_PHYS_VIRT if MMU
663 select CLKDEV_LOOKUP 663 select CLKDEV_LOOKUP
664 select GENERIC_CLOCKEVENTS 664 select GENERIC_CLOCKEVENTS
665 select HAVE_ARM_SCU if SMP 665 select HAVE_ARM_SCU if SMP
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index a5599cfc43cb..2b32978ae905 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -94,13 +94,19 @@ ENTRY(iwmmxt_task_enable)
94 94
95 mrc p15, 0, r2, c2, c0, 0 95 mrc p15, 0, r2, c2, c0, 0
96 mov r2, r2 @ cpwait 96 mov r2, r2 @ cpwait
97 bl concan_save
97 98
98 teq r1, #0 @ test for last ownership 99#ifdef CONFIG_PREEMPT_COUNT
99 mov lr, r9 @ normal exit from exception 100 get_thread_info r10
100 beq concan_load @ no owner, skip save 101#endif
1024: dec_preempt_count r10, r3
103 mov pc, r9 @ normal exit from exception
101 104
102concan_save: 105concan_save:
103 106
107 teq r1, #0 @ test for last ownership
108 beq concan_load @ no owner, skip save
109
104 tmrc r2, wCon 110 tmrc r2, wCon
105 111
106 @ CUP? wCx 112 @ CUP? wCx
@@ -138,7 +144,7 @@ concan_dump:
138 wstrd wR15, [r1, #MMX_WR15] 144 wstrd wR15, [r1, #MMX_WR15]
139 145
1402: teq r0, #0 @ anything to load? 1462: teq r0, #0 @ anything to load?
141 beq 3f 147 moveq pc, lr @ if not, return
142 148
143concan_load: 149concan_load:
144 150
@@ -171,14 +177,9 @@ concan_load:
171 @ clear CUP/MUP (only if r1 != 0) 177 @ clear CUP/MUP (only if r1 != 0)
172 teq r1, #0 178 teq r1, #0
173 mov r2, #0 179 mov r2, #0
174 beq 3f 180 moveq pc, lr
175 tmcr wCon, r2
176 181
1773: 182 tmcr wCon, r2
178#ifdef CONFIG_PREEMPT_COUNT
179 get_thread_info r10
180#endif
1814: dec_preempt_count r10, r3
182 mov pc, lr 183 mov pc, lr
183 184
184/* 185/*
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index 778c2f7024ff..a74b53c1b7df 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -160,12 +160,16 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int instr)
160static struct undef_hook kgdb_brkpt_hook = { 160static struct undef_hook kgdb_brkpt_hook = {
161 .instr_mask = 0xffffffff, 161 .instr_mask = 0xffffffff,
162 .instr_val = KGDB_BREAKINST, 162 .instr_val = KGDB_BREAKINST,
163 .cpsr_mask = MODE_MASK,
164 .cpsr_val = SVC_MODE,
163 .fn = kgdb_brk_fn 165 .fn = kgdb_brk_fn
164}; 166};
165 167
166static struct undef_hook kgdb_compiled_brkpt_hook = { 168static struct undef_hook kgdb_compiled_brkpt_hook = {
167 .instr_mask = 0xffffffff, 169 .instr_mask = 0xffffffff,
168 .instr_val = KGDB_COMPILED_BREAK, 170 .instr_val = KGDB_COMPILED_BREAK,
171 .cpsr_mask = MODE_MASK,
172 .cpsr_val = SVC_MODE,
169 .fn = kgdb_compiled_brk_fn 173 .fn = kgdb_compiled_brk_fn
170}; 174};
171 175
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 4c88935654ca..1f88db06b133 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -461,12 +461,21 @@ void __init dma_contiguous_remap(void)
461 map.type = MT_MEMORY_DMA_READY; 461 map.type = MT_MEMORY_DMA_READY;
462 462
463 /* 463 /*
464 * Clear previous low-memory mapping 464 * Clear previous low-memory mapping to ensure that the
465 * TLB does not see any conflicting entries, then flush
466 * the TLB of the old entries before creating new mappings.
467 *
468 * This ensures that any speculatively loaded TLB entries
469 * (even though they may be rare) can not cause any problems,
470 * and ensures that this code is architecturally compliant.
465 */ 471 */
466 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); 472 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
467 addr += PMD_SIZE) 473 addr += PMD_SIZE)
468 pmd_clear(pmd_off_k(addr)); 474 pmd_clear(pmd_off_k(addr));
469 475
476 flush_tlb_kernel_range(__phys_to_virt(start),
477 __phys_to_virt(end));
478
470 iotable_init(&map, 1); 479 iotable_init(&map, 1);
471 } 480 }
472} 481}
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 8e0e52eb76b5..c447ec70e868 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -9,6 +9,11 @@
9#include <asm/sections.h> 9#include <asm/sections.h>
10#include <asm/system_info.h> 10#include <asm/system_info.h>
11 11
12/*
13 * Note: accesses outside of the kernel image and the identity map area
14 * are not supported on any CPU using the idmap tables as its current
15 * page tables.
16 */
12pgd_t *idmap_pgd; 17pgd_t *idmap_pgd;
13phys_addr_t (*arch_virt_to_idmap) (unsigned long x); 18phys_addr_t (*arch_virt_to_idmap) (unsigned long x);
14 19
@@ -25,6 +30,13 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
25 pr_warning("Failed to allocate identity pmd.\n"); 30 pr_warning("Failed to allocate identity pmd.\n");
26 return; 31 return;
27 } 32 }
33 /*
34 * Copy the original PMD to ensure that the PMD entries for
35 * the kernel image are preserved.
36 */
37 if (!pud_none(*pud))
38 memcpy(pmd, pmd_offset(pud, 0),
39 PTRS_PER_PMD * sizeof(pmd_t));
28 pud_populate(&init_mm, pud, pmd); 40 pud_populate(&init_mm, pud, pmd);
29 pmd += pmd_index(addr); 41 pmd += pmd_index(addr);
30 } else 42 } else
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index ab14b79b03f0..6e3ba8d112a2 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1406,8 +1406,8 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1406 return; 1406 return;
1407 1407
1408 /* remap kernel code and data */ 1408 /* remap kernel code and data */
1409 map_start = init_mm.start_code; 1409 map_start = init_mm.start_code & PMD_MASK;
1410 map_end = init_mm.brk; 1410 map_end = ALIGN(init_mm.brk, PMD_SIZE);
1411 1411
1412 /* get a handle on things... */ 1412 /* get a handle on things... */
1413 pgd0 = pgd_offset_k(0); 1413 pgd0 = pgd_offset_k(0);
@@ -1442,7 +1442,7 @@ void __init early_paging_init(const struct machine_desc *mdesc,
1442 } 1442 }
1443 1443
1444 /* remap pmds for kernel mapping */ 1444 /* remap pmds for kernel mapping */
1445 phys = __pa(map_start) & PMD_MASK; 1445 phys = __pa(map_start);
1446 do { 1446 do {
1447 *pmdk++ = __pmd(phys | pmdprot); 1447 *pmdk++ = __pmd(phys | pmdprot);
1448 phys += PMD_SIZE; 1448 phys += PMD_SIZE;