diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-01-24 15:44:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-01-24 15:44:57 -0500 |
commit | 01acd3efd798c225bbbb5e0bac067c8123b77de9 (patch) | |
tree | efe607fa332e01c7a423a84eb06d572c75159c37 /arch | |
parent | 1496ec13a1ae92cea305d266ff73f850138f92c7 (diff) | |
parent | 210b1847b32951f52d19df229972399e5b987de2 (diff) |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King:
"A number of fixes:
Patrik found a problem with preempt counting in the VFP assembly
functions which can cause the preempt count to be upset.
Nicolas fixed a problem with the parsing of the DT when it straddles a
1MB boundary.
Subhash Jadavani reported a problem with sparsemem and our highmem
support for cache maintanence for DMA areas, and TI found a bug in
their strongly ordered memory mapping type.
Also, three fixes by way of Will Deacon's tree from Dave Martin for
instruction compatibility and Marc Zyngier to fix hypervisor boot mode
issues."
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7629/1: mm: Fix missing XN flag for for MT_MEMORY_SO
ARM: DMA: Fix struct page iterator in dma_cache_maint() to work with sparsemem
ARM: 7628/1: head.S: map one extra section for the ATAG/DTB area
ARM: 7627/1: Predicate preempt logic on PREEMP_COUNT not PREEMPT alone
ARM: virt: simplify __hyp_stub_install epilog
ARM: virt: boot secondary CPUs through the right entry point
ARM: virt: Avoid bx instruction for compatibility with <=ARMv4
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arm/kernel/hyp-stub.S | 18 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 18 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 2 | ||||
-rw-r--r-- | arch/arm/vfp/entry.S | 6 | ||||
-rw-r--r-- | arch/arm/vfp/vfphw.S | 4 |
6 files changed, 26 insertions, 27 deletions
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 4eee351f4668..486a15ae9011 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
@@ -246,6 +246,7 @@ __create_page_tables: | |||
246 | 246 | ||
247 | /* | 247 | /* |
248 | * Then map boot params address in r2 if specified. | 248 | * Then map boot params address in r2 if specified. |
249 | * We map 2 sections in case the ATAGs/DTB crosses a section boundary. | ||
249 | */ | 250 | */ |
250 | mov r0, r2, lsr #SECTION_SHIFT | 251 | mov r0, r2, lsr #SECTION_SHIFT |
251 | movs r0, r0, lsl #SECTION_SHIFT | 252 | movs r0, r0, lsl #SECTION_SHIFT |
@@ -253,6 +254,8 @@ __create_page_tables: | |||
253 | addne r3, r3, #PAGE_OFFSET | 254 | addne r3, r3, #PAGE_OFFSET |
254 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) | 255 | addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) |
255 | orrne r6, r7, r0 | 256 | orrne r6, r7, r0 |
257 | strne r6, [r3], #1 << PMD_ORDER | ||
258 | addne r6, r6, #1 << SECTION_SHIFT | ||
256 | strne r6, [r3] | 259 | strne r6, [r3] |
257 | 260 | ||
258 | #ifdef CONFIG_DEBUG_LL | 261 | #ifdef CONFIG_DEBUG_LL |
@@ -331,7 +334,7 @@ ENTRY(secondary_startup) | |||
331 | * as it has already been validated by the primary processor. | 334 | * as it has already been validated by the primary processor. |
332 | */ | 335 | */ |
333 | #ifdef CONFIG_ARM_VIRT_EXT | 336 | #ifdef CONFIG_ARM_VIRT_EXT |
334 | bl __hyp_stub_install | 337 | bl __hyp_stub_install_secondary |
335 | #endif | 338 | #endif |
336 | safe_svcmode_maskall r9 | 339 | safe_svcmode_maskall r9 |
337 | 340 | ||
diff --git a/arch/arm/kernel/hyp-stub.S b/arch/arm/kernel/hyp-stub.S index 65b2417aebce..1315c4ccfa56 100644 --- a/arch/arm/kernel/hyp-stub.S +++ b/arch/arm/kernel/hyp-stub.S | |||
@@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) | |||
99 | * immediately. | 99 | * immediately. |
100 | */ | 100 | */ |
101 | compare_cpu_mode_with_primary r4, r5, r6, r7 | 101 | compare_cpu_mode_with_primary r4, r5, r6, r7 |
102 | bxne lr | 102 | movne pc, lr |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * Once we have given up on one CPU, we do not try to install the | 105 | * Once we have given up on one CPU, we do not try to install the |
@@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) | |||
111 | */ | 111 | */ |
112 | 112 | ||
113 | cmp r4, #HYP_MODE | 113 | cmp r4, #HYP_MODE |
114 | bxne lr @ give up if the CPU is not in HYP mode | 114 | movne pc, lr @ give up if the CPU is not in HYP mode |
115 | 115 | ||
116 | /* | 116 | /* |
117 | * Configure HSCTLR to set correct exception endianness/instruction set | 117 | * Configure HSCTLR to set correct exception endianness/instruction set |
@@ -120,7 +120,8 @@ ENTRY(__hyp_stub_install_secondary) | |||
120 | * Eventually, CPU-specific code might be needed -- assume not for now | 120 | * Eventually, CPU-specific code might be needed -- assume not for now |
121 | * | 121 | * |
122 | * This code relies on the "eret" instruction to synchronize the | 122 | * This code relies on the "eret" instruction to synchronize the |
123 | * various coprocessor accesses. | 123 | * various coprocessor accesses. This is done when we switch to SVC |
124 | * (see safe_svcmode_maskall). | ||
124 | */ | 125 | */ |
125 | @ Now install the hypervisor stub: | 126 | @ Now install the hypervisor stub: |
126 | adr r7, __hyp_stub_vectors | 127 | adr r7, __hyp_stub_vectors |
@@ -155,14 +156,7 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE | |||
155 | 1: | 156 | 1: |
156 | #endif | 157 | #endif |
157 | 158 | ||
158 | bic r7, r4, #MODE_MASK | 159 | bx lr @ The boot CPU mode is left in r4. |
159 | orr r7, r7, #SVC_MODE | ||
160 | THUMB( orr r7, r7, #PSR_T_BIT ) | ||
161 | msr spsr_cxsf, r7 @ This is SPSR_hyp. | ||
162 | |||
163 | __MSR_ELR_HYP(14) @ msr elr_hyp, lr | ||
164 | __ERET @ return, switching to SVC mode | ||
165 | @ The boot CPU mode is left in r4. | ||
166 | ENDPROC(__hyp_stub_install_secondary) | 160 | ENDPROC(__hyp_stub_install_secondary) |
167 | 161 | ||
168 | __hyp_stub_do_trap: | 162 | __hyp_stub_do_trap: |
@@ -200,7 +194,7 @@ ENDPROC(__hyp_get_vectors) | |||
200 | @ fall through | 194 | @ fall through |
201 | ENTRY(__hyp_set_vectors) | 195 | ENTRY(__hyp_set_vectors) |
202 | __HVC(0) | 196 | __HVC(0) |
203 | bx lr | 197 | mov pc, lr |
204 | ENDPROC(__hyp_set_vectors) | 198 | ENDPROC(__hyp_set_vectors) |
205 | 199 | ||
206 | #ifndef ZIMAGE | 200 | #ifndef ZIMAGE |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 6b2fb87c8698..076c26d43864 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
774 | size_t size, enum dma_data_direction dir, | 774 | size_t size, enum dma_data_direction dir, |
775 | void (*op)(const void *, size_t, int)) | 775 | void (*op)(const void *, size_t, int)) |
776 | { | 776 | { |
777 | unsigned long pfn; | ||
778 | size_t left = size; | ||
779 | |||
780 | pfn = page_to_pfn(page) + offset / PAGE_SIZE; | ||
781 | offset %= PAGE_SIZE; | ||
782 | |||
777 | /* | 783 | /* |
778 | * A single sg entry may refer to multiple physically contiguous | 784 | * A single sg entry may refer to multiple physically contiguous |
779 | * pages. But we still need to process highmem pages individually. | 785 | * pages. But we still need to process highmem pages individually. |
780 | * If highmem is not configured then the bulk of this loop gets | 786 | * If highmem is not configured then the bulk of this loop gets |
781 | * optimized out. | 787 | * optimized out. |
782 | */ | 788 | */ |
783 | size_t left = size; | ||
784 | do { | 789 | do { |
785 | size_t len = left; | 790 | size_t len = left; |
786 | void *vaddr; | 791 | void *vaddr; |
787 | 792 | ||
793 | page = pfn_to_page(pfn); | ||
794 | |||
788 | if (PageHighMem(page)) { | 795 | if (PageHighMem(page)) { |
789 | if (len + offset > PAGE_SIZE) { | 796 | if (len + offset > PAGE_SIZE) |
790 | if (offset >= PAGE_SIZE) { | ||
791 | page += offset / PAGE_SIZE; | ||
792 | offset %= PAGE_SIZE; | ||
793 | } | ||
794 | len = PAGE_SIZE - offset; | 797 | len = PAGE_SIZE - offset; |
795 | } | ||
796 | vaddr = kmap_high_get(page); | 798 | vaddr = kmap_high_get(page); |
797 | if (vaddr) { | 799 | if (vaddr) { |
798 | vaddr += offset; | 800 | vaddr += offset; |
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, | |||
809 | op(vaddr, len, dir); | 811 | op(vaddr, len, dir); |
810 | } | 812 | } |
811 | offset = 0; | 813 | offset = 0; |
812 | page++; | 814 | pfn++; |
813 | left -= len; | 815 | left -= len; |
814 | } while (left); | 816 | } while (left); |
815 | } | 817 | } |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 9f0610243bd6..ce328c7f5c94 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
@@ -283,7 +283,7 @@ static struct mem_type mem_types[] = { | |||
283 | }, | 283 | }, |
284 | [MT_MEMORY_SO] = { | 284 | [MT_MEMORY_SO] = { |
285 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | | 285 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
286 | L_PTE_MT_UNCACHED, | 286 | L_PTE_MT_UNCACHED | L_PTE_XN, |
287 | .prot_l1 = PMD_TYPE_TABLE, | 287 | .prot_l1 = PMD_TYPE_TABLE, |
288 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | | 288 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | |
289 | PMD_SECT_UNCACHED | PMD_SECT_XN, | 289 | PMD_SECT_UNCACHED | PMD_SECT_XN, |
diff --git a/arch/arm/vfp/entry.S b/arch/arm/vfp/entry.S index cc926c985981..323ce1a62bbf 100644 --- a/arch/arm/vfp/entry.S +++ b/arch/arm/vfp/entry.S | |||
@@ -22,7 +22,7 @@ | |||
22 | @ IRQs disabled. | 22 | @ IRQs disabled. |
23 | @ | 23 | @ |
24 | ENTRY(do_vfp) | 24 | ENTRY(do_vfp) |
25 | #ifdef CONFIG_PREEMPT | 25 | #ifdef CONFIG_PREEMPT_COUNT |
26 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 26 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
27 | add r11, r4, #1 @ increment it | 27 | add r11, r4, #1 @ increment it |
28 | str r11, [r10, #TI_PREEMPT] | 28 | str r11, [r10, #TI_PREEMPT] |
@@ -35,7 +35,7 @@ ENTRY(do_vfp) | |||
35 | ENDPROC(do_vfp) | 35 | ENDPROC(do_vfp) |
36 | 36 | ||
37 | ENTRY(vfp_null_entry) | 37 | ENTRY(vfp_null_entry) |
38 | #ifdef CONFIG_PREEMPT | 38 | #ifdef CONFIG_PREEMPT_COUNT |
39 | get_thread_info r10 | 39 | get_thread_info r10 |
40 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 40 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
41 | sub r11, r4, #1 @ decrement it | 41 | sub r11, r4, #1 @ decrement it |
@@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry) | |||
53 | 53 | ||
54 | __INIT | 54 | __INIT |
55 | ENTRY(vfp_testing_entry) | 55 | ENTRY(vfp_testing_entry) |
56 | #ifdef CONFIG_PREEMPT | 56 | #ifdef CONFIG_PREEMPT_COUNT |
57 | get_thread_info r10 | 57 | get_thread_info r10 |
58 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 58 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
59 | sub r11, r4, #1 @ decrement it | 59 | sub r11, r4, #1 @ decrement it |
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S index ea0349f63586..dd5e56f95f3f 100644 --- a/arch/arm/vfp/vfphw.S +++ b/arch/arm/vfp/vfphw.S | |||
@@ -168,7 +168,7 @@ vfp_hw_state_valid: | |||
168 | @ else it's one 32-bit instruction, so | 168 | @ else it's one 32-bit instruction, so |
169 | @ always subtract 4 from the following | 169 | @ always subtract 4 from the following |
170 | @ instruction address. | 170 | @ instruction address. |
171 | #ifdef CONFIG_PREEMPT | 171 | #ifdef CONFIG_PREEMPT_COUNT |
172 | get_thread_info r10 | 172 | get_thread_info r10 |
173 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 173 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
174 | sub r11, r4, #1 @ decrement it | 174 | sub r11, r4, #1 @ decrement it |
@@ -192,7 +192,7 @@ look_for_VFP_exceptions: | |||
192 | @ not recognised by VFP | 192 | @ not recognised by VFP |
193 | 193 | ||
194 | DBGSTR "not VFP" | 194 | DBGSTR "not VFP" |
195 | #ifdef CONFIG_PREEMPT | 195 | #ifdef CONFIG_PREEMPT_COUNT |
196 | get_thread_info r10 | 196 | get_thread_info r10 |
197 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count | 197 | ldr r4, [r10, #TI_PREEMPT] @ get preempt count |
198 | sub r11, r4, #1 @ decrement it | 198 | sub r11, r4, #1 @ decrement it |