diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-07 16:47:18 -0500 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-03-07 16:47:18 -0500 |
| commit | 19cf3edbf33375e81f9b41a087d3fad95858a4e3 (patch) | |
| tree | 040d427ab7111cdee528536a2a6e6b1931f05817 | |
| parent | d345243629db38a6340bcb69f372329d35b8f650 (diff) | |
| parent | 455bd4c430b0c0a361f38e8658a0d6cb469942b5 (diff) | |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King:
"Mainly a group of fixes, the only exception is the wiring up of the
kcmp syscall now that those patches went in during the last merge
window."
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7668/1: fix memset-related crashes caused by recent GCC (4.7.2) optimizations
ARM: 7667/1: perf: Fix section mismatch on armpmu_init()
ARM: 7666/1: decompressor: add -mno-single-pic-base for building the decompressor
ARM: 7665/1: Wire up kcmp syscall
ARM: 7664/1: perf: remove erroneous semicolon from event initialisation
ARM: 7663/1: perf: fix ARMv7 EVTYPE_MASK to include NSH bit
ARM: 7662/1: hw_breakpoint: reset debug logic on secondary CPUs in s2ram resume
ARM: 7661/1: mm: perform explicit branch predictor maintenance when required
ARM: 7660/1: tlb: add branch predictor maintenance operations
ARM: 7659/1: mm: make mm->context.id an atomic64_t variable
ARM: 7658/1: mm: fix race updating mm->context.id on ASID rollover
ARM: 7657/1: head: fix swapper and idmap population with LPAE and big-endian
ARM: 7655/1: smp_twd: make twd_local_timer_of_register() no-op for nosmp
ARM: 7652/1: mm: fix missing use of 'asid' to get asid value from mm->context.id
ARM: 7642/1: netx: bump IRQ offset to 64
| -rw-r--r-- | arch/arm/boot/compressed/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm/include/asm/mmu.h | 8 | ||||
| -rw-r--r-- | arch/arm/include/asm/mmu_context.h | 2 | ||||
| -rw-r--r-- | arch/arm/include/asm/tlbflush.h | 34 | ||||
| -rw-r--r-- | arch/arm/include/uapi/asm/unistd.h | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/asm-offsets.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/calls.S | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/head.S | 26 | ||||
| -rw-r--r-- | arch/arm/kernel/hw_breakpoint.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/smp.c | 1 | ||||
| -rw-r--r-- | arch/arm/kernel/smp_tlb.c | 12 | ||||
| -rw-r--r-- | arch/arm/kernel/smp_twd.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/suspend.c | 1 | ||||
| -rw-r--r-- | arch/arm/lib/memset.S | 85 | ||||
| -rw-r--r-- | arch/arm/mach-netx/generic.c | 2 | ||||
| -rw-r--r-- | arch/arm/mach-netx/include/mach/irqs.h | 64 | ||||
| -rw-r--r-- | arch/arm/mm/context.c | 29 | ||||
| -rw-r--r-- | arch/arm/mm/idmap.c | 1 | ||||
| -rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 |
21 files changed, 178 insertions, 109 deletions
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile index 5cad8a6dadb0..afed28e37ea5 100644 --- a/arch/arm/boot/compressed/Makefile +++ b/arch/arm/boot/compressed/Makefile | |||
| @@ -120,7 +120,7 @@ ORIG_CFLAGS := $(KBUILD_CFLAGS) | |||
| 120 | KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) | 120 | KBUILD_CFLAGS = $(subst -pg, , $(ORIG_CFLAGS)) |
| 121 | endif | 121 | endif |
| 122 | 122 | ||
| 123 | ccflags-y := -fpic -fno-builtin -I$(obj) | 123 | ccflags-y := -fpic -mno-single-pic-base -fno-builtin -I$(obj) |
| 124 | asflags-y := -Wa,-march=all -DZIMAGE | 124 | asflags-y := -Wa,-march=all -DZIMAGE |
| 125 | 125 | ||
| 126 | # Supply kernel BSS size to the decompressor via a linker symbol. | 126 | # Supply kernel BSS size to the decompressor via a linker symbol. |
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index 9f77e7804f3b..e3d55547e755 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h | |||
| @@ -5,15 +5,15 @@ | |||
| 5 | 5 | ||
| 6 | typedef struct { | 6 | typedef struct { |
| 7 | #ifdef CONFIG_CPU_HAS_ASID | 7 | #ifdef CONFIG_CPU_HAS_ASID |
| 8 | u64 id; | 8 | atomic64_t id; |
| 9 | #endif | 9 | #endif |
| 10 | unsigned int vmalloc_seq; | 10 | unsigned int vmalloc_seq; |
| 11 | } mm_context_t; | 11 | } mm_context_t; |
| 12 | 12 | ||
| 13 | #ifdef CONFIG_CPU_HAS_ASID | 13 | #ifdef CONFIG_CPU_HAS_ASID |
| 14 | #define ASID_BITS 8 | 14 | #define ASID_BITS 8 |
| 15 | #define ASID_MASK ((~0ULL) << ASID_BITS) | 15 | #define ASID_MASK ((~0ULL) << ASID_BITS) |
| 16 | #define ASID(mm) ((mm)->context.id & ~ASID_MASK) | 16 | #define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) |
| 17 | #else | 17 | #else |
| 18 | #define ASID(mm) (0) | 18 | #define ASID(mm) (0) |
| 19 | #endif | 19 | #endif |
| @@ -26,7 +26,7 @@ typedef struct { | |||
| 26 | * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> | 26 | * modified for 2.6 by Hyok S. Choi <hyok.choi@samsung.com> |
| 27 | */ | 27 | */ |
| 28 | typedef struct { | 28 | typedef struct { |
| 29 | unsigned long end_brk; | 29 | unsigned long end_brk; |
| 30 | } mm_context_t; | 30 | } mm_context_t; |
| 31 | 31 | ||
| 32 | #endif | 32 | #endif |
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index e1f644bc7cc5..863a6611323c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h | |||
| @@ -25,7 +25,7 @@ void __check_vmalloc_seq(struct mm_struct *mm); | |||
| 25 | #ifdef CONFIG_CPU_HAS_ASID | 25 | #ifdef CONFIG_CPU_HAS_ASID |
| 26 | 26 | ||
| 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); | 27 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); |
| 28 | #define init_new_context(tsk,mm) ({ mm->context.id = 0; }) | 28 | #define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) |
| 29 | 29 | ||
| 30 | #else /* !CONFIG_CPU_HAS_ASID */ | 30 | #else /* !CONFIG_CPU_HAS_ASID */ |
| 31 | 31 | ||
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 6e924d3a77eb..4db8c8820f0d 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
| @@ -34,10 +34,13 @@ | |||
| 34 | #define TLB_V6_D_ASID (1 << 17) | 34 | #define TLB_V6_D_ASID (1 << 17) |
| 35 | #define TLB_V6_I_ASID (1 << 18) | 35 | #define TLB_V6_I_ASID (1 << 18) |
| 36 | 36 | ||
| 37 | #define TLB_V6_BP (1 << 19) | ||
| 38 | |||
| 37 | /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ | 39 | /* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ |
| 38 | #define TLB_V7_UIS_PAGE (1 << 19) | 40 | #define TLB_V7_UIS_PAGE (1 << 20) |
| 39 | #define TLB_V7_UIS_FULL (1 << 20) | 41 | #define TLB_V7_UIS_FULL (1 << 21) |
| 40 | #define TLB_V7_UIS_ASID (1 << 21) | 42 | #define TLB_V7_UIS_ASID (1 << 22) |
| 43 | #define TLB_V7_UIS_BP (1 << 23) | ||
| 41 | 44 | ||
| 42 | #define TLB_BARRIER (1 << 28) | 45 | #define TLB_BARRIER (1 << 28) |
| 43 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ | 46 | #define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ |
| @@ -150,7 +153,8 @@ | |||
| 150 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ | 153 | #define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
| 151 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ | 154 | TLB_V6_I_FULL | TLB_V6_D_FULL | \ |
| 152 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ | 155 | TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ |
| 153 | TLB_V6_I_ASID | TLB_V6_D_ASID) | 156 | TLB_V6_I_ASID | TLB_V6_D_ASID | \ |
| 157 | TLB_V6_BP) | ||
| 154 | 158 | ||
| 155 | #ifdef CONFIG_CPU_TLB_V6 | 159 | #ifdef CONFIG_CPU_TLB_V6 |
| 156 | # define v6wbi_possible_flags v6wbi_tlb_flags | 160 | # define v6wbi_possible_flags v6wbi_tlb_flags |
| @@ -166,9 +170,11 @@ | |||
| 166 | #endif | 170 | #endif |
| 167 | 171 | ||
| 168 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ | 172 | #define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
| 169 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) | 173 | TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | \ |
| 174 | TLB_V7_UIS_ASID | TLB_V7_UIS_BP) | ||
| 170 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ | 175 | #define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \ |
| 171 | TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) | 176 | TLB_V6_U_FULL | TLB_V6_U_PAGE | \ |
| 177 | TLB_V6_U_ASID | TLB_V6_BP) | ||
| 172 | 178 | ||
| 173 | #ifdef CONFIG_CPU_TLB_V7 | 179 | #ifdef CONFIG_CPU_TLB_V7 |
| 174 | 180 | ||
| @@ -430,6 +436,20 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr) | |||
| 430 | } | 436 | } |
| 431 | } | 437 | } |
| 432 | 438 | ||
| 439 | static inline void local_flush_bp_all(void) | ||
| 440 | { | ||
| 441 | const int zero = 0; | ||
| 442 | const unsigned int __tlb_flag = __cpu_tlb_flags; | ||
| 443 | |||
| 444 | if (tlb_flag(TLB_V7_UIS_BP)) | ||
| 445 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); | ||
| 446 | else if (tlb_flag(TLB_V6_BP)) | ||
| 447 | asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero)); | ||
| 448 | |||
| 449 | if (tlb_flag(TLB_BARRIER)) | ||
| 450 | isb(); | ||
| 451 | } | ||
| 452 | |||
| 433 | /* | 453 | /* |
| 434 | * flush_pmd_entry | 454 | * flush_pmd_entry |
| 435 | * | 455 | * |
| @@ -480,6 +500,7 @@ static inline void clean_pmd_entry(void *pmd) | |||
| 480 | #define flush_tlb_kernel_page local_flush_tlb_kernel_page | 500 | #define flush_tlb_kernel_page local_flush_tlb_kernel_page |
| 481 | #define flush_tlb_range local_flush_tlb_range | 501 | #define flush_tlb_range local_flush_tlb_range |
| 482 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range | 502 | #define flush_tlb_kernel_range local_flush_tlb_kernel_range |
| 503 | #define flush_bp_all local_flush_bp_all | ||
| 483 | #else | 504 | #else |
| 484 | extern void flush_tlb_all(void); | 505 | extern void flush_tlb_all(void); |
| 485 | extern void flush_tlb_mm(struct mm_struct *mm); | 506 | extern void flush_tlb_mm(struct mm_struct *mm); |
| @@ -487,6 +508,7 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr); | |||
| 487 | extern void flush_tlb_kernel_page(unsigned long kaddr); | 508 | extern void flush_tlb_kernel_page(unsigned long kaddr); |
| 488 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); | 509 | extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 489 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); | 510 | extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); |
| 511 | extern void flush_bp_all(void); | ||
| 490 | #endif | 512 | #endif |
| 491 | 513 | ||
| 492 | /* | 514 | /* |
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 4da7cde70b5d..af33b44990ed 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h | |||
| @@ -404,7 +404,7 @@ | |||
| 404 | #define __NR_setns (__NR_SYSCALL_BASE+375) | 404 | #define __NR_setns (__NR_SYSCALL_BASE+375) |
| 405 | #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) | 405 | #define __NR_process_vm_readv (__NR_SYSCALL_BASE+376) |
| 406 | #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) | 406 | #define __NR_process_vm_writev (__NR_SYSCALL_BASE+377) |
| 407 | /* 378 for kcmp */ | 407 | #define __NR_kcmp (__NR_SYSCALL_BASE+378) |
| 408 | #define __NR_finit_module (__NR_SYSCALL_BASE+379) | 408 | #define __NR_finit_module (__NR_SYSCALL_BASE+379) |
| 409 | 409 | ||
| 410 | /* | 410 | /* |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 5ce738b43508..923eec7105cf 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
| @@ -110,7 +110,7 @@ int main(void) | |||
| 110 | BLANK(); | 110 | BLANK(); |
| 111 | #endif | 111 | #endif |
| 112 | #ifdef CONFIG_CPU_HAS_ASID | 112 | #ifdef CONFIG_CPU_HAS_ASID |
| 113 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id)); | 113 | DEFINE(MM_CONTEXT_ID, offsetof(struct mm_struct, context.id.counter)); |
| 114 | BLANK(); | 114 | BLANK(); |
| 115 | #endif | 115 | #endif |
| 116 | DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); | 116 | DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); |
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index 0cc57611fc4f..c6ca7e376773 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S | |||
| @@ -387,7 +387,7 @@ | |||
| 387 | /* 375 */ CALL(sys_setns) | 387 | /* 375 */ CALL(sys_setns) |
| 388 | CALL(sys_process_vm_readv) | 388 | CALL(sys_process_vm_readv) |
| 389 | CALL(sys_process_vm_writev) | 389 | CALL(sys_process_vm_writev) |
| 390 | CALL(sys_ni_syscall) /* reserved for sys_kcmp */ | 390 | CALL(sys_kcmp) |
| 391 | CALL(sys_finit_module) | 391 | CALL(sys_finit_module) |
| 392 | #ifndef syscalls_counted | 392 | #ifndef syscalls_counted |
| 393 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls | 393 | .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls |
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 486a15ae9011..e0eb9a1cae77 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S | |||
| @@ -184,13 +184,22 @@ __create_page_tables: | |||
| 184 | orr r3, r3, #3 @ PGD block type | 184 | orr r3, r3, #3 @ PGD block type |
| 185 | mov r6, #4 @ PTRS_PER_PGD | 185 | mov r6, #4 @ PTRS_PER_PGD |
| 186 | mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER | 186 | mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER |
| 187 | 1: str r3, [r0], #4 @ set bottom PGD entry bits | 187 | 1: |
| 188 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 188 | str r7, [r0], #4 @ set top PGD entry bits | 189 | str r7, [r0], #4 @ set top PGD entry bits |
| 190 | str r3, [r0], #4 @ set bottom PGD entry bits | ||
| 191 | #else | ||
| 192 | str r3, [r0], #4 @ set bottom PGD entry bits | ||
| 193 | str r7, [r0], #4 @ set top PGD entry bits | ||
| 194 | #endif | ||
| 189 | add r3, r3, #0x1000 @ next PMD table | 195 | add r3, r3, #0x1000 @ next PMD table |
| 190 | subs r6, r6, #1 | 196 | subs r6, r6, #1 |
| 191 | bne 1b | 197 | bne 1b |
| 192 | 198 | ||
| 193 | add r4, r4, #0x1000 @ point to the PMD tables | 199 | add r4, r4, #0x1000 @ point to the PMD tables |
| 200 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 201 | add r4, r4, #4 @ we only write the bottom word | ||
| 202 | #endif | ||
| 194 | #endif | 203 | #endif |
| 195 | 204 | ||
| 196 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags | 205 | ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags |
| @@ -258,6 +267,11 @@ __create_page_tables: | |||
| 258 | addne r6, r6, #1 << SECTION_SHIFT | 267 | addne r6, r6, #1 << SECTION_SHIFT |
| 259 | strne r6, [r3] | 268 | strne r6, [r3] |
| 260 | 269 | ||
| 270 | #if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8) | ||
| 271 | sub r4, r4, #4 @ Fixup page table pointer | ||
| 272 | @ for 64-bit descriptors | ||
| 273 | #endif | ||
| 274 | |||
| 261 | #ifdef CONFIG_DEBUG_LL | 275 | #ifdef CONFIG_DEBUG_LL |
| 262 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) | 276 | #if !defined(CONFIG_DEBUG_ICEDCC) && !defined(CONFIG_DEBUG_SEMIHOSTING) |
| 263 | /* | 277 | /* |
| @@ -276,13 +290,17 @@ __create_page_tables: | |||
| 276 | orr r3, r7, r3, lsl #SECTION_SHIFT | 290 | orr r3, r7, r3, lsl #SECTION_SHIFT |
| 277 | #ifdef CONFIG_ARM_LPAE | 291 | #ifdef CONFIG_ARM_LPAE |
| 278 | mov r7, #1 << (54 - 32) @ XN | 292 | mov r7, #1 << (54 - 32) @ XN |
| 293 | #ifdef CONFIG_CPU_ENDIAN_BE8 | ||
| 294 | str r7, [r0], #4 | ||
| 295 | str r3, [r0], #4 | ||
| 279 | #else | 296 | #else |
| 280 | orr r3, r3, #PMD_SECT_XN | ||
| 281 | #endif | ||
| 282 | str r3, [r0], #4 | 297 | str r3, [r0], #4 |
| 283 | #ifdef CONFIG_ARM_LPAE | ||
| 284 | str r7, [r0], #4 | 298 | str r7, [r0], #4 |
| 285 | #endif | 299 | #endif |
| 300 | #else | ||
| 301 | orr r3, r3, #PMD_SECT_XN | ||
| 302 | str r3, [r0], #4 | ||
| 303 | #endif | ||
| 286 | 304 | ||
| 287 | #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ | 305 | #else /* CONFIG_DEBUG_ICEDCC || CONFIG_DEBUG_SEMIHOSTING */ |
| 288 | /* we don't need any serial debugging mappings */ | 306 | /* we don't need any serial debugging mappings */ |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 5eae53e7a2e1..96093b75ab90 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
| @@ -1023,7 +1023,7 @@ out_mdbgen: | |||
| 1023 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, | 1023 | static int __cpuinit dbg_reset_notify(struct notifier_block *self, |
| 1024 | unsigned long action, void *cpu) | 1024 | unsigned long action, void *cpu) |
| 1025 | { | 1025 | { |
| 1026 | if (action == CPU_ONLINE) | 1026 | if ((action & ~CPU_TASKS_FROZEN) == CPU_ONLINE) |
| 1027 | smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); | 1027 | smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1); |
| 1028 | 1028 | ||
| 1029 | return NOTIFY_OK; | 1029 | return NOTIFY_OK; |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 31e0eb353cd8..146157dfe27c 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -400,7 +400,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
| 400 | } | 400 | } |
| 401 | 401 | ||
| 402 | if (event->group_leader != event) { | 402 | if (event->group_leader != event) { |
| 403 | if (validate_group(event) != 0); | 403 | if (validate_group(event) != 0) |
| 404 | return -EINVAL; | 404 | return -EINVAL; |
| 405 | } | 405 | } |
| 406 | 406 | ||
| @@ -484,7 +484,7 @@ const struct dev_pm_ops armpmu_dev_pm_ops = { | |||
| 484 | SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) | 484 | SET_RUNTIME_PM_OPS(armpmu_runtime_suspend, armpmu_runtime_resume, NULL) |
| 485 | }; | 485 | }; |
| 486 | 486 | ||
| 487 | static void __init armpmu_init(struct arm_pmu *armpmu) | 487 | static void armpmu_init(struct arm_pmu *armpmu) |
| 488 | { | 488 | { |
| 489 | atomic_set(&armpmu->active_events, 0); | 489 | atomic_set(&armpmu->active_events, 0); |
| 490 | mutex_init(&armpmu->reserve_mutex); | 490 | mutex_init(&armpmu->reserve_mutex); |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8c79a9e70b83..039cffb053a7 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
| @@ -774,7 +774,7 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
| 774 | /* | 774 | /* |
| 775 | * PMXEVTYPER: Event selection reg | 775 | * PMXEVTYPER: Event selection reg |
| 776 | */ | 776 | */ |
| 777 | #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ | 777 | #define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */ |
| 778 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ | 778 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ |
| 779 | 779 | ||
| 780 | /* | 780 | /* |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 1bdfd87c8e41..31644f1978d5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -285,6 +285,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
| 285 | * switch away from it before attempting any exclusive accesses. | 285 | * switch away from it before attempting any exclusive accesses. |
| 286 | */ | 286 | */ |
| 287 | cpu_switch_mm(mm->pgd, mm); | 287 | cpu_switch_mm(mm->pgd, mm); |
| 288 | local_flush_bp_all(); | ||
| 288 | enter_lazy_tlb(mm, current); | 289 | enter_lazy_tlb(mm, current); |
| 289 | local_flush_tlb_all(); | 290 | local_flush_tlb_all(); |
| 290 | 291 | ||
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c index 02c5d2ce23bf..bd0300531399 100644 --- a/arch/arm/kernel/smp_tlb.c +++ b/arch/arm/kernel/smp_tlb.c | |||
| @@ -64,6 +64,11 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) | |||
| 64 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | 64 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | static inline void ipi_flush_bp_all(void *ignored) | ||
| 68 | { | ||
| 69 | local_flush_bp_all(); | ||
| 70 | } | ||
| 71 | |||
| 67 | void flush_tlb_all(void) | 72 | void flush_tlb_all(void) |
| 68 | { | 73 | { |
| 69 | if (tlb_ops_need_broadcast()) | 74 | if (tlb_ops_need_broadcast()) |
| @@ -127,3 +132,10 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
| 127 | local_flush_tlb_kernel_range(start, end); | 132 | local_flush_tlb_kernel_range(start, end); |
| 128 | } | 133 | } |
| 129 | 134 | ||
| 135 | void flush_bp_all(void) | ||
| 136 | { | ||
| 137 | if (tlb_ops_need_broadcast()) | ||
| 138 | on_each_cpu(ipi_flush_bp_all, NULL, 1); | ||
| 139 | else | ||
| 140 | local_flush_bp_all(); | ||
| 141 | } | ||
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index c092115d903a..3f2565037480 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c | |||
| @@ -22,6 +22,7 @@ | |||
| 22 | #include <linux/of_irq.h> | 22 | #include <linux/of_irq.h> |
| 23 | #include <linux/of_address.h> | 23 | #include <linux/of_address.h> |
| 24 | 24 | ||
| 25 | #include <asm/smp_plat.h> | ||
| 25 | #include <asm/smp_twd.h> | 26 | #include <asm/smp_twd.h> |
| 26 | #include <asm/localtimer.h> | 27 | #include <asm/localtimer.h> |
| 27 | 28 | ||
| @@ -373,6 +374,9 @@ void __init twd_local_timer_of_register(void) | |||
| 373 | struct device_node *np; | 374 | struct device_node *np; |
| 374 | int err; | 375 | int err; |
| 375 | 376 | ||
| 377 | if (!is_smp() || !setup_max_cpus) | ||
| 378 | return; | ||
| 379 | |||
| 376 | np = of_find_matching_node(NULL, twd_of_match); | 380 | np = of_find_matching_node(NULL, twd_of_match); |
| 377 | if (!np) | 381 | if (!np) |
| 378 | return; | 382 | return; |
diff --git a/arch/arm/kernel/suspend.c b/arch/arm/kernel/suspend.c index 358bca3a995e..c59c97ea8268 100644 --- a/arch/arm/kernel/suspend.c +++ b/arch/arm/kernel/suspend.c | |||
| @@ -68,6 +68,7 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |||
| 68 | ret = __cpu_suspend(arg, fn); | 68 | ret = __cpu_suspend(arg, fn); |
| 69 | if (ret == 0) { | 69 | if (ret == 0) { |
| 70 | cpu_switch_mm(mm->pgd, mm); | 70 | cpu_switch_mm(mm->pgd, mm); |
| 71 | local_flush_bp_all(); | ||
| 71 | local_flush_tlb_all(); | 72 | local_flush_tlb_all(); |
| 72 | } | 73 | } |
| 73 | 74 | ||
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S index 650d5923ab83..d912e7397ecc 100644 --- a/arch/arm/lib/memset.S +++ b/arch/arm/lib/memset.S | |||
| @@ -19,9 +19,9 @@ | |||
| 19 | 1: subs r2, r2, #4 @ 1 do we have enough | 19 | 1: subs r2, r2, #4 @ 1 do we have enough |
| 20 | blt 5f @ 1 bytes to align with? | 20 | blt 5f @ 1 bytes to align with? |
| 21 | cmp r3, #2 @ 1 | 21 | cmp r3, #2 @ 1 |
| 22 | strltb r1, [r0], #1 @ 1 | 22 | strltb r1, [ip], #1 @ 1 |
| 23 | strleb r1, [r0], #1 @ 1 | 23 | strleb r1, [ip], #1 @ 1 |
| 24 | strb r1, [r0], #1 @ 1 | 24 | strb r1, [ip], #1 @ 1 |
| 25 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) | 25 | add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) |
| 26 | /* | 26 | /* |
| 27 | * The pointer is now aligned and the length is adjusted. Try doing the | 27 | * The pointer is now aligned and the length is adjusted. Try doing the |
| @@ -29,10 +29,14 @@ | |||
| 29 | */ | 29 | */ |
| 30 | 30 | ||
| 31 | ENTRY(memset) | 31 | ENTRY(memset) |
| 32 | ands r3, r0, #3 @ 1 unaligned? | 32 | /* |
| 33 | * Preserve the contents of r0 for the return value. | ||
| 34 | */ | ||
| 35 | mov ip, r0 | ||
| 36 | ands r3, ip, #3 @ 1 unaligned? | ||
| 33 | bne 1b @ 1 | 37 | bne 1b @ 1 |
| 34 | /* | 38 | /* |
| 35 | * we know that the pointer in r0 is aligned to a word boundary. | 39 | * we know that the pointer in ip is aligned to a word boundary. |
| 36 | */ | 40 | */ |
| 37 | orr r1, r1, r1, lsl #8 | 41 | orr r1, r1, r1, lsl #8 |
| 38 | orr r1, r1, r1, lsl #16 | 42 | orr r1, r1, r1, lsl #16 |
| @@ -43,29 +47,28 @@ ENTRY(memset) | |||
| 43 | #if ! CALGN(1)+0 | 47 | #if ! CALGN(1)+0 |
| 44 | 48 | ||
| 45 | /* | 49 | /* |
| 46 | * We need an extra register for this loop - save the return address and | 50 | * We need 2 extra registers for this loop - use r8 and the LR |
| 47 | * use the LR | ||
| 48 | */ | 51 | */ |
| 49 | str lr, [sp, #-4]! | 52 | stmfd sp!, {r8, lr} |
| 50 | mov ip, r1 | 53 | mov r8, r1 |
| 51 | mov lr, r1 | 54 | mov lr, r1 |
| 52 | 55 | ||
| 53 | 2: subs r2, r2, #64 | 56 | 2: subs r2, r2, #64 |
| 54 | stmgeia r0!, {r1, r3, ip, lr} @ 64 bytes at a time. | 57 | stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time. |
| 55 | stmgeia r0!, {r1, r3, ip, lr} | 58 | stmgeia ip!, {r1, r3, r8, lr} |
| 56 | stmgeia r0!, {r1, r3, ip, lr} | 59 | stmgeia ip!, {r1, r3, r8, lr} |
| 57 | stmgeia r0!, {r1, r3, ip, lr} | 60 | stmgeia ip!, {r1, r3, r8, lr} |
| 58 | bgt 2b | 61 | bgt 2b |
| 59 | ldmeqfd sp!, {pc} @ Now <64 bytes to go. | 62 | ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go. |
| 60 | /* | 63 | /* |
| 61 | * No need to correct the count; we're only testing bits from now on | 64 | * No need to correct the count; we're only testing bits from now on |
| 62 | */ | 65 | */ |
| 63 | tst r2, #32 | 66 | tst r2, #32 |
| 64 | stmneia r0!, {r1, r3, ip, lr} | 67 | stmneia ip!, {r1, r3, r8, lr} |
| 65 | stmneia r0!, {r1, r3, ip, lr} | 68 | stmneia ip!, {r1, r3, r8, lr} |
| 66 | tst r2, #16 | 69 | tst r2, #16 |
| 67 | stmneia r0!, {r1, r3, ip, lr} | 70 | stmneia ip!, {r1, r3, r8, lr} |
| 68 | ldr lr, [sp], #4 | 71 | ldmfd sp!, {r8, lr} |
| 69 | 72 | ||
| 70 | #else | 73 | #else |
| 71 | 74 | ||
| @@ -74,54 +77,54 @@ ENTRY(memset) | |||
| 74 | * whole cache lines at once. | 77 | * whole cache lines at once. |
| 75 | */ | 78 | */ |
| 76 | 79 | ||
| 77 | stmfd sp!, {r4-r7, lr} | 80 | stmfd sp!, {r4-r8, lr} |
| 78 | mov r4, r1 | 81 | mov r4, r1 |
| 79 | mov r5, r1 | 82 | mov r5, r1 |
| 80 | mov r6, r1 | 83 | mov r6, r1 |
| 81 | mov r7, r1 | 84 | mov r7, r1 |
| 82 | mov ip, r1 | 85 | mov r8, r1 |
| 83 | mov lr, r1 | 86 | mov lr, r1 |
| 84 | 87 | ||
| 85 | cmp r2, #96 | 88 | cmp r2, #96 |
| 86 | tstgt r0, #31 | 89 | tstgt ip, #31 |
| 87 | ble 3f | 90 | ble 3f |
| 88 | 91 | ||
| 89 | and ip, r0, #31 | 92 | and r8, ip, #31 |
| 90 | rsb ip, ip, #32 | 93 | rsb r8, r8, #32 |
| 91 | sub r2, r2, ip | 94 | sub r2, r2, r8 |
| 92 | movs ip, ip, lsl #(32 - 4) | 95 | movs r8, r8, lsl #(32 - 4) |
| 93 | stmcsia r0!, {r4, r5, r6, r7} | 96 | stmcsia ip!, {r4, r5, r6, r7} |
| 94 | stmmiia r0!, {r4, r5} | 97 | stmmiia ip!, {r4, r5} |
| 95 | tst ip, #(1 << 30) | 98 | tst r8, #(1 << 30) |
| 96 | mov ip, r1 | 99 | mov r8, r1 |
| 97 | strne r1, [r0], #4 | 100 | strne r1, [ip], #4 |
| 98 | 101 | ||
| 99 | 3: subs r2, r2, #64 | 102 | 3: subs r2, r2, #64 |
| 100 | stmgeia r0!, {r1, r3-r7, ip, lr} | 103 | stmgeia ip!, {r1, r3-r8, lr} |
| 101 | stmgeia r0!, {r1, r3-r7, ip, lr} | 104 | stmgeia ip!, {r1, r3-r8, lr} |
| 102 | bgt 3b | 105 | bgt 3b |
| 103 | ldmeqfd sp!, {r4-r7, pc} | 106 | ldmeqfd sp!, {r4-r8, pc} |
| 104 | 107 | ||
| 105 | tst r2, #32 | 108 | tst r2, #32 |
| 106 | stmneia r0!, {r1, r3-r7, ip, lr} | 109 | stmneia ip!, {r1, r3-r8, lr} |
| 107 | tst r2, #16 | 110 | tst r2, #16 |
| 108 | stmneia r0!, {r4-r7} | 111 | stmneia ip!, {r4-r7} |
| 109 | ldmfd sp!, {r4-r7, lr} | 112 | ldmfd sp!, {r4-r8, lr} |
| 110 | 113 | ||
| 111 | #endif | 114 | #endif |
| 112 | 115 | ||
| 113 | 4: tst r2, #8 | 116 | 4: tst r2, #8 |
| 114 | stmneia r0!, {r1, r3} | 117 | stmneia ip!, {r1, r3} |
| 115 | tst r2, #4 | 118 | tst r2, #4 |
| 116 | strne r1, [r0], #4 | 119 | strne r1, [ip], #4 |
| 117 | /* | 120 | /* |
| 118 | * When we get here, we've got less than 4 bytes to zero. We | 121 | * When we get here, we've got less than 4 bytes to zero. We |
| 119 | * may have an unaligned pointer as well. | 122 | * may have an unaligned pointer as well. |
| 120 | */ | 123 | */ |
| 121 | 5: tst r2, #2 | 124 | 5: tst r2, #2 |
| 122 | strneb r1, [r0], #1 | 125 | strneb r1, [ip], #1 |
| 123 | strneb r1, [r0], #1 | 126 | strneb r1, [ip], #1 |
| 124 | tst r2, #1 | 127 | tst r2, #1 |
| 125 | strneb r1, [r0], #1 | 128 | strneb r1, [ip], #1 |
| 126 | mov pc, lr | 129 | mov pc, lr |
| 127 | ENDPROC(memset) | 130 | ENDPROC(memset) |
diff --git a/arch/arm/mach-netx/generic.c b/arch/arm/mach-netx/generic.c index 27c2cb7ab813..1504b68f4c66 100644 --- a/arch/arm/mach-netx/generic.c +++ b/arch/arm/mach-netx/generic.c | |||
| @@ -168,7 +168,7 @@ void __init netx_init_irq(void) | |||
| 168 | { | 168 | { |
| 169 | int irq; | 169 | int irq; |
| 170 | 170 | ||
| 171 | vic_init(io_p2v(NETX_PA_VIC), 0, ~0, 0); | 171 | vic_init(io_p2v(NETX_PA_VIC), NETX_IRQ_VIC_START, ~0, 0); |
| 172 | 172 | ||
| 173 | for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { | 173 | for (irq = NETX_IRQ_HIF_CHAINED(0); irq <= NETX_IRQ_HIF_LAST; irq++) { |
| 174 | irq_set_chip_and_handler(irq, &netx_hif_chip, | 174 | irq_set_chip_and_handler(irq, &netx_hif_chip, |
diff --git a/arch/arm/mach-netx/include/mach/irqs.h b/arch/arm/mach-netx/include/mach/irqs.h index 6ce914d54a30..8f74a844a775 100644 --- a/arch/arm/mach-netx/include/mach/irqs.h +++ b/arch/arm/mach-netx/include/mach/irqs.h | |||
| @@ -17,42 +17,42 @@ | |||
| 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
| 18 | */ | 18 | */ |
| 19 | 19 | ||
| 20 | #define NETX_IRQ_VIC_START 0 | 20 | #define NETX_IRQ_VIC_START 64 |
| 21 | #define NETX_IRQ_SOFTINT 0 | 21 | #define NETX_IRQ_SOFTINT (NETX_IRQ_VIC_START + 0) |
| 22 | #define NETX_IRQ_TIMER0 1 | 22 | #define NETX_IRQ_TIMER0 (NETX_IRQ_VIC_START + 1) |
| 23 | #define NETX_IRQ_TIMER1 2 | 23 | #define NETX_IRQ_TIMER1 (NETX_IRQ_VIC_START + 2) |
| 24 | #define NETX_IRQ_TIMER2 3 | 24 | #define NETX_IRQ_TIMER2 (NETX_IRQ_VIC_START + 3) |
| 25 | #define NETX_IRQ_SYSTIME_NS 4 | 25 | #define NETX_IRQ_SYSTIME_NS (NETX_IRQ_VIC_START + 4) |
| 26 | #define NETX_IRQ_SYSTIME_S 5 | 26 | #define NETX_IRQ_SYSTIME_S (NETX_IRQ_VIC_START + 5) |
| 27 | #define NETX_IRQ_GPIO_15 6 | 27 | #define NETX_IRQ_GPIO_15 (NETX_IRQ_VIC_START + 6) |
| 28 | #define NETX_IRQ_WATCHDOG 7 | 28 | #define NETX_IRQ_WATCHDOG (NETX_IRQ_VIC_START + 7) |
| 29 | #define NETX_IRQ_UART0 8 | 29 | #define NETX_IRQ_UART0 (NETX_IRQ_VIC_START + 8) |
| 30 | #define NETX_IRQ_UART1 9 | 30 | #define NETX_IRQ_UART1 (NETX_IRQ_VIC_START + 9) |
| 31 | #define NETX_IRQ_UART2 10 | 31 | #define NETX_IRQ_UART2 (NETX_IRQ_VIC_START + 10) |
| 32 | #define NETX_IRQ_USB 11 | 32 | #define NETX_IRQ_USB (NETX_IRQ_VIC_START + 11) |
| 33 | #define NETX_IRQ_SPI 12 | 33 | #define NETX_IRQ_SPI (NETX_IRQ_VIC_START + 12) |
| 34 | #define NETX_IRQ_I2C 13 | 34 | #define NETX_IRQ_I2C (NETX_IRQ_VIC_START + 13) |
| 35 | #define NETX_IRQ_LCD 14 | 35 | #define NETX_IRQ_LCD (NETX_IRQ_VIC_START + 14) |
| 36 | #define NETX_IRQ_HIF 15 | 36 | #define NETX_IRQ_HIF (NETX_IRQ_VIC_START + 15) |
| 37 | #define NETX_IRQ_GPIO_0_14 16 | 37 | #define NETX_IRQ_GPIO_0_14 (NETX_IRQ_VIC_START + 16) |
| 38 | #define NETX_IRQ_XPEC0 17 | 38 | #define NETX_IRQ_XPEC0 (NETX_IRQ_VIC_START + 17) |
| 39 | #define NETX_IRQ_XPEC1 18 | 39 | #define NETX_IRQ_XPEC1 (NETX_IRQ_VIC_START + 18) |
| 40 | #define NETX_IRQ_XPEC2 19 | 40 | #define NETX_IRQ_XPEC2 (NETX_IRQ_VIC_START + 19) |
| 41 | #define NETX_IRQ_XPEC3 20 | 41 | #define NETX_IRQ_XPEC3 (NETX_IRQ_VIC_START + 20) |
| 42 | #define NETX_IRQ_XPEC(no) (17 + (no)) | 42 | #define NETX_IRQ_XPEC(no) (NETX_IRQ_VIC_START + 17 + (no)) |
| 43 | #define NETX_IRQ_MSYNC0 21 | 43 | #define NETX_IRQ_MSYNC0 (NETX_IRQ_VIC_START + 21) |
| 44 | #define NETX_IRQ_MSYNC1 22 | 44 | #define NETX_IRQ_MSYNC1 (NETX_IRQ_VIC_START + 22) |
| 45 | #define NETX_IRQ_MSYNC2 23 | 45 | #define NETX_IRQ_MSYNC2 (NETX_IRQ_VIC_START + 23) |
| 46 | #define NETX_IRQ_MSYNC3 24 | 46 | #define NETX_IRQ_MSYNC3 (NETX_IRQ_VIC_START + 24) |
| 47 | #define NETX_IRQ_IRQ_PHY 25 | 47 | #define NETX_IRQ_IRQ_PHY (NETX_IRQ_VIC_START + 25) |
| 48 | #define NETX_IRQ_ISO_AREA 26 | 48 | #define NETX_IRQ_ISO_AREA (NETX_IRQ_VIC_START + 26) |
| 49 | /* int 27 is reserved */ | 49 | /* int 27 is reserved */ |
| 50 | /* int 28 is reserved */ | 50 | /* int 28 is reserved */ |
| 51 | #define NETX_IRQ_TIMER3 29 | 51 | #define NETX_IRQ_TIMER3 (NETX_IRQ_VIC_START + 29) |
| 52 | #define NETX_IRQ_TIMER4 30 | 52 | #define NETX_IRQ_TIMER4 (NETX_IRQ_VIC_START + 30) |
| 53 | /* int 31 is reserved */ | 53 | /* int 31 is reserved */ |
| 54 | 54 | ||
| 55 | #define NETX_IRQS 32 | 55 | #define NETX_IRQS (NETX_IRQ_VIC_START + 32) |
| 56 | 56 | ||
| 57 | /* for multiplexed irqs on gpio 0..14 */ | 57 | /* for multiplexed irqs on gpio 0..14 */ |
| 58 | #define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) | 58 | #define NETX_IRQ_GPIO(x) (NETX_IRQS + (x)) |
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 7a0511191f6b..a5a4b2bc42ba 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -152,9 +152,9 @@ static int is_reserved_asid(u64 asid) | |||
| 152 | return 0; | 152 | return 0; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static void new_context(struct mm_struct *mm, unsigned int cpu) | 155 | static u64 new_context(struct mm_struct *mm, unsigned int cpu) |
| 156 | { | 156 | { |
| 157 | u64 asid = mm->context.id; | 157 | u64 asid = atomic64_read(&mm->context.id); |
| 158 | u64 generation = atomic64_read(&asid_generation); | 158 | u64 generation = atomic64_read(&asid_generation); |
| 159 | 159 | ||
| 160 | if (asid != 0 && is_reserved_asid(asid)) { | 160 | if (asid != 0 && is_reserved_asid(asid)) { |
| @@ -181,13 +181,14 @@ static void new_context(struct mm_struct *mm, unsigned int cpu) | |||
| 181 | cpumask_clear(mm_cpumask(mm)); | 181 | cpumask_clear(mm_cpumask(mm)); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | mm->context.id = asid; | 184 | return asid; |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | 187 | void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) |
| 188 | { | 188 | { |
| 189 | unsigned long flags; | 189 | unsigned long flags; |
| 190 | unsigned int cpu = smp_processor_id(); | 190 | unsigned int cpu = smp_processor_id(); |
| 191 | u64 asid; | ||
| 191 | 192 | ||
| 192 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) | 193 | if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)) |
| 193 | __check_vmalloc_seq(mm); | 194 | __check_vmalloc_seq(mm); |
| @@ -198,20 +199,26 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) | |||
| 198 | */ | 199 | */ |
| 199 | cpu_set_reserved_ttbr0(); | 200 | cpu_set_reserved_ttbr0(); |
| 200 | 201 | ||
| 201 | if (!((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 202 | asid = atomic64_read(&mm->context.id); |
| 202 | && atomic64_xchg(&per_cpu(active_asids, cpu), mm->context.id)) | 203 | if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) |
| 204 | && atomic64_xchg(&per_cpu(active_asids, cpu), asid)) | ||
| 203 | goto switch_mm_fastpath; | 205 | goto switch_mm_fastpath; |
| 204 | 206 | ||
| 205 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); | 207 | raw_spin_lock_irqsave(&cpu_asid_lock, flags); |
| 206 | /* Check that our ASID belongs to the current generation. */ | 208 | /* Check that our ASID belongs to the current generation. */ |
| 207 | if ((mm->context.id ^ atomic64_read(&asid_generation)) >> ASID_BITS) | 209 | asid = atomic64_read(&mm->context.id); |
| 208 | new_context(mm, cpu); | 210 | if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) { |
| 209 | 211 | asid = new_context(mm, cpu); | |
| 210 | atomic64_set(&per_cpu(active_asids, cpu), mm->context.id); | 212 | atomic64_set(&mm->context.id, asid); |
| 211 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 213 | } |
| 212 | 214 | ||
| 213 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) | 215 | if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { |
| 216 | local_flush_bp_all(); | ||
| 214 | local_flush_tlb_all(); | 217 | local_flush_tlb_all(); |
| 218 | } | ||
| 219 | |||
| 220 | atomic64_set(&per_cpu(active_asids, cpu), asid); | ||
| 221 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | ||
| 215 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); | 222 | raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); |
| 216 | 223 | ||
| 217 | switch_mm_fastpath: | 224 | switch_mm_fastpath: |
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 2dffc010cc41..5ee505c937d1 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c | |||
| @@ -141,6 +141,7 @@ void setup_mm_for_reboot(void) | |||
| 141 | { | 141 | { |
| 142 | /* Switch to the identity mapping. */ | 142 | /* Switch to the identity mapping. */ |
| 143 | cpu_switch_mm(idmap_pgd, &init_mm); | 143 | cpu_switch_mm(idmap_pgd, &init_mm); |
| 144 | local_flush_bp_all(); | ||
| 144 | 145 | ||
| 145 | #ifdef CONFIG_CPU_HAS_ASID | 146 | #ifdef CONFIG_CPU_HAS_ASID |
| 146 | /* | 147 | /* |
diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index 50bf1dafc9ea..6ffd78c0f9ab 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S | |||
| @@ -48,7 +48,7 @@ | |||
| 48 | ENTRY(cpu_v7_switch_mm) | 48 | ENTRY(cpu_v7_switch_mm) |
| 49 | #ifdef CONFIG_MMU | 49 | #ifdef CONFIG_MMU |
| 50 | mmid r1, r1 @ get mm->context.id | 50 | mmid r1, r1 @ get mm->context.id |
| 51 | and r3, r1, #0xff | 51 | asid r3, r1 |
| 52 | mov r3, r3, lsl #(48 - 32) @ ASID | 52 | mov r3, r3, lsl #(48 - 32) @ ASID |
| 53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 | 53 | mcrr p15, 0, r0, r3, c2 @ set TTB 0 |
| 54 | isb | 54 | isb |
