diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-16 19:52:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-08-16 19:52:29 -0400 |
commit | 2620bf06f168527e8d5159d6c21ea80e60b663fd (patch) | |
tree | 0ce69c6150ac8e5fd929cfc6cd34d2b4ad1cabc1 /arch | |
parent | 359d16ca1bd6adcd9f031da35d807f9c37ec6a6e (diff) | |
parent | 2a2822475d0e734adffab72644329d9c042ce2e1 (diff) |
Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
Pull ARM fixes from Russell King:
"The usual collection of random fixes. Also some further fixes to the
last set of security fixes, and some more from Will (which you may
already have in a slightly different form)"
* 'fixes' of git://git.linaro.org/people/rmk/linux-arm:
ARM: 7807/1: kexec: validate CPU hotplug support
ARM: 7812/1: rwlocks: retry trylock operation if strex fails on free lock
ARM: 7811/1: locks: use early clobber in arch_spin_trylock
ARM: 7810/1: perf: Fix array out of bounds access in armpmu_map_hw_event()
ARM: 7809/1: perf: fix event validation for software group leaders
ARM: Fix FIQ code on VIVT CPUs
ARM: Fix !kuser helpers case
ARM: Fix the world famous typo with is_gate_vma()
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/include/asm/smp_plat.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 51 | ||||
-rw-r--r-- | arch/arm/kernel/entry-armv.S | 3 | ||||
-rw-r--r-- | arch/arm/kernel/fiq.c | 8 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 20 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 5 | ||||
-rw-r--r-- | arch/arm/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/arm/kernel/smp.c | 10 |
8 files changed, 69 insertions, 33 deletions
diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index 6462a721ebd4..a252c0bfacf5 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h | |||
@@ -88,4 +88,7 @@ static inline u32 mpidr_hash_size(void) | |||
88 | { | 88 | { |
89 | return 1 << mpidr_hash.bits; | 89 | return 1 << mpidr_hash.bits; |
90 | } | 90 | } |
91 | |||
92 | extern int platform_can_cpu_hotplug(void); | ||
93 | |||
91 | #endif | 94 | #endif |
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index f8b8965666e9..b07c09e5a0ac 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -107,7 +107,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
107 | " subs %1, %0, %0, ror #16\n" | 107 | " subs %1, %0, %0, ror #16\n" |
108 | " addeq %0, %0, %4\n" | 108 | " addeq %0, %0, %4\n" |
109 | " strexeq %2, %0, [%3]" | 109 | " strexeq %2, %0, [%3]" |
110 | : "=&r" (slock), "=&r" (contended), "=r" (res) | 110 | : "=&r" (slock), "=&r" (contended), "=&r" (res) |
111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) | 111 | : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) |
112 | : "cc"); | 112 | : "cc"); |
113 | } while (res); | 113 | } while (res); |
@@ -168,17 +168,20 @@ static inline void arch_write_lock(arch_rwlock_t *rw) | |||
168 | 168 | ||
169 | static inline int arch_write_trylock(arch_rwlock_t *rw) | 169 | static inline int arch_write_trylock(arch_rwlock_t *rw) |
170 | { | 170 | { |
171 | unsigned long tmp; | 171 | unsigned long contended, res; |
172 | 172 | ||
173 | __asm__ __volatile__( | 173 | do { |
174 | " ldrex %0, [%1]\n" | 174 | __asm__ __volatile__( |
175 | " teq %0, #0\n" | 175 | " ldrex %0, [%2]\n" |
176 | " strexeq %0, %2, [%1]" | 176 | " mov %1, #0\n" |
177 | : "=&r" (tmp) | 177 | " teq %0, #0\n" |
178 | : "r" (&rw->lock), "r" (0x80000000) | 178 | " strexeq %1, %3, [%2]" |
179 | : "cc"); | 179 | : "=&r" (contended), "=&r" (res) |
180 | : "r" (&rw->lock), "r" (0x80000000) | ||
181 | : "cc"); | ||
182 | } while (res); | ||
180 | 183 | ||
181 | if (tmp == 0) { | 184 | if (!contended) { |
182 | smp_mb(); | 185 | smp_mb(); |
183 | return 1; | 186 | return 1; |
184 | } else { | 187 | } else { |
@@ -254,18 +257,26 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) | |||
254 | 257 | ||
255 | static inline int arch_read_trylock(arch_rwlock_t *rw) | 258 | static inline int arch_read_trylock(arch_rwlock_t *rw) |
256 | { | 259 | { |
257 | unsigned long tmp, tmp2 = 1; | 260 | unsigned long contended, res; |
258 | 261 | ||
259 | __asm__ __volatile__( | 262 | do { |
260 | " ldrex %0, [%2]\n" | 263 | __asm__ __volatile__( |
261 | " adds %0, %0, #1\n" | 264 | " ldrex %0, [%2]\n" |
262 | " strexpl %1, %0, [%2]\n" | 265 | " mov %1, #0\n" |
263 | : "=&r" (tmp), "+r" (tmp2) | 266 | " adds %0, %0, #1\n" |
264 | : "r" (&rw->lock) | 267 | " strexpl %1, %0, [%2]" |
265 | : "cc"); | 268 | : "=&r" (contended), "=&r" (res) |
269 | : "r" (&rw->lock) | ||
270 | : "cc"); | ||
271 | } while (res); | ||
266 | 272 | ||
267 | smp_mb(); | 273 | /* If the lock is negative, then it is already held for write. */ |
268 | return tmp2 == 0; | 274 | if (contended < 0x80000000) { |
275 | smp_mb(); | ||
276 | return 1; | ||
277 | } else { | ||
278 | return 0; | ||
279 | } | ||
269 | } | 280 | } |
270 | 281 | ||
271 | /* read_can_lock - would read_trylock() succeed? */ | 282 | /* read_can_lock - would read_trylock() succeed? */ |
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index d40d0ef389db..9cbe70c8b0ef 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S | |||
@@ -357,7 +357,8 @@ ENDPROC(__pabt_svc) | |||
357 | .endm | 357 | .endm |
358 | 358 | ||
359 | .macro kuser_cmpxchg_check | 359 | .macro kuser_cmpxchg_check |
360 | #if !defined(CONFIG_CPU_32v6K) && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | 360 | #if !defined(CONFIG_CPU_32v6K) && defined(CONFIG_KUSER_HELPERS) && \ |
361 | !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) | ||
361 | #ifndef CONFIG_MMU | 362 | #ifndef CONFIG_MMU |
362 | #warning "NPTL on non MMU needs fixing" | 363 | #warning "NPTL on non MMU needs fixing" |
363 | #else | 364 | #else |
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c index 25442f451148..fc7920288a3d 100644 --- a/arch/arm/kernel/fiq.c +++ b/arch/arm/kernel/fiq.c | |||
@@ -84,17 +84,13 @@ int show_fiq_list(struct seq_file *p, int prec) | |||
84 | 84 | ||
85 | void set_fiq_handler(void *start, unsigned int length) | 85 | void set_fiq_handler(void *start, unsigned int length) |
86 | { | 86 | { |
87 | #if defined(CONFIG_CPU_USE_DOMAINS) | ||
88 | void *base = (void *)0xffff0000; | ||
89 | #else | ||
90 | void *base = vectors_page; | 87 | void *base = vectors_page; |
91 | #endif | ||
92 | unsigned offset = FIQ_OFFSET; | 88 | unsigned offset = FIQ_OFFSET; |
93 | 89 | ||
94 | memcpy(base + offset, start, length); | 90 | memcpy(base + offset, start, length); |
91 | if (!cache_is_vipt_nonaliasing()) | ||
92 | flush_icache_range(base + offset, offset + length); | ||
95 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); | 93 | flush_icache_range(0xffff0000 + offset, 0xffff0000 + offset + length); |
96 | if (!vectors_high()) | ||
97 | flush_icache_range(offset, offset + length); | ||
98 | } | 94 | } |
99 | 95 | ||
100 | int claim_fiq(struct fiq_handler *f) | 96 | int claim_fiq(struct fiq_handler *f) |
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 4fb074c446bf..d7c82df69243 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
16 | #include <asm/cacheflush.h> | 16 | #include <asm/cacheflush.h> |
17 | #include <asm/mach-types.h> | 17 | #include <asm/mach-types.h> |
18 | #include <asm/smp_plat.h> | ||
18 | #include <asm/system_misc.h> | 19 | #include <asm/system_misc.h> |
19 | 20 | ||
20 | extern const unsigned char relocate_new_kernel[]; | 21 | extern const unsigned char relocate_new_kernel[]; |
@@ -39,6 +40,14 @@ int machine_kexec_prepare(struct kimage *image) | |||
39 | int i, err; | 40 | int i, err; |
40 | 41 | ||
41 | /* | 42 | /* |
43 | * Validate that if the current HW supports SMP, then the SW supports | ||
44 | * and implements CPU hotplug for the current HW. If not, we won't be | ||
45 | * able to kexec reliably, so fail the prepare operation. | ||
46 | */ | ||
47 | if (num_possible_cpus() > 1 && !platform_can_cpu_hotplug()) | ||
48 | return -EINVAL; | ||
49 | |||
50 | /* | ||
42 | * No segment at default ATAGs address. try to locate | 51 | * No segment at default ATAGs address. try to locate |
43 | * a dtb using magic. | 52 | * a dtb using magic. |
44 | */ | 53 | */ |
@@ -134,10 +143,13 @@ void machine_kexec(struct kimage *image) | |||
134 | unsigned long reboot_code_buffer_phys; | 143 | unsigned long reboot_code_buffer_phys; |
135 | void *reboot_code_buffer; | 144 | void *reboot_code_buffer; |
136 | 145 | ||
137 | if (num_online_cpus() > 1) { | 146 | /* |
138 | pr_err("kexec: error: multiple CPUs still online\n"); | 147 | * This can only happen if machine_shutdown() failed to disable some |
139 | return; | 148 | * CPU, and that can only happen if the checks in |
140 | } | 149 | * machine_kexec_prepare() were not correct. If this fails, we can't |
150 | * reliably kexec anyway, so BUG_ON is appropriate. | ||
151 | */ | ||
152 | BUG_ON(num_online_cpus() > 1); | ||
141 | 153 | ||
142 | page_list = image->head & PAGE_MASK; | 154 | page_list = image->head & PAGE_MASK; |
143 | 155 | ||
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 21f77906602c..e186ee1e63f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -56,7 +56,7 @@ armpmu_map_hw_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) | |||
56 | int mapping; | 56 | int mapping; |
57 | 57 | ||
58 | if (config >= PERF_COUNT_HW_MAX) | 58 | if (config >= PERF_COUNT_HW_MAX) |
59 | return -ENOENT; | 59 | return -EINVAL; |
60 | 60 | ||
61 | mapping = (*event_map)[config]; | 61 | mapping = (*event_map)[config]; |
62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; | 62 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
@@ -258,6 +258,9 @@ validate_event(struct pmu_hw_events *hw_events, | |||
258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 258 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
259 | struct pmu *leader_pmu = event->group_leader->pmu; | 259 | struct pmu *leader_pmu = event->group_leader->pmu; |
260 | 260 | ||
261 | if (is_software_event(event)) | ||
262 | return 1; | ||
263 | |||
261 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF) |
262 | return 1; | 265 | return 1; |
263 | 266 | ||
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 536c85fe72a8..94f6b05f9e24 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -462,7 +462,7 @@ int in_gate_area_no_mm(unsigned long addr) | |||
462 | { | 462 | { |
463 | return in_gate_area(NULL, addr); | 463 | return in_gate_area(NULL, addr); |
464 | } | 464 | } |
465 | #define is_gate_vma(vma) ((vma) = &gate_vma) | 465 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
466 | #else | 466 | #else |
467 | #define is_gate_vma(vma) 0 | 467 | #define is_gate_vma(vma) 0 |
468 | #endif | 468 | #endif |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index c2b4f8f0be9a..2dc19349eb19 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -145,6 +145,16 @@ int boot_secondary(unsigned int cpu, struct task_struct *idle) | |||
145 | return -ENOSYS; | 145 | return -ENOSYS; |
146 | } | 146 | } |
147 | 147 | ||
148 | int platform_can_cpu_hotplug(void) | ||
149 | { | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | if (smp_ops.cpu_kill) | ||
152 | return 1; | ||
153 | #endif | ||
154 | |||
155 | return 0; | ||
156 | } | ||
157 | |||
148 | #ifdef CONFIG_HOTPLUG_CPU | 158 | #ifdef CONFIG_HOTPLUG_CPU |
149 | static void percpu_timer_stop(void); | 159 | static void percpu_timer_stop(void); |
150 | 160 | ||