aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-09-30 13:37:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-30 13:37:05 -0400
commit815a4bb18ba834a58a60da14536ace4dcaa8b465 (patch)
tree73769cdcf8a42b10aa6063c3645c7175225d4589
parent15c03dd4859ab16f9212238f29dd315654aa94f6 (diff)
parent55c2e26204276b27f2b7a63123b701c950e45d89 (diff)
Merge tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC Fixes from Vineet Gupta: - Handle unaligned access in zero delay loops - spinlock livelock fix for SMP systemC model - fix 32bit overflow in access_ok - better setup of clockevents * tag 'arc-fixes-for-3.12' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc: ARC: Use clockevents_config_and_register over clockevents_register_device ARC: Workaround spinlock livelock in SMP SystemC simulation ARC: Fix 32-bit wrap around in access_ok() ARC: Handle zero-overhead-loop in unaligned access handler
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
4 files changed, 18 insertions, 8 deletions
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
45 45
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 48 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
49 smp_mb(); 56 smp_mb();
50} 57}
51 58
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
43 * Because it essentially checks if buffer end is within limit and @len is 43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too. 44 * non-ngeative, which implies that buffer start will be within limit too.
45 * 45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally 46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time 47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed. 48 * subsumed.
49 * 49 *
@@ -53,7 +53,7 @@
53 * 53 *
54 */ 54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs())) 56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz)))) 58 likely(__user_ok((addr), (sz))))
59 59
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
227{ 227{
228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
229 229
230 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
231
232 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
233 clk->cpumask = cpumask_of(cpu); 230 clk->cpumask = cpumask_of(cpu);
234 231 clockevents_config_and_register(clk, arc_get_core_freq(),
235 clockevents_register_device(clk); 232 0, ARC_TIMER_MAX);
236 233
237 /* 234 /*
238 * setup the per-cpu timer IRQ handler - for all cpus 235 * setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
245 regs->status32 &= ~STATUS_DE_MASK; 245 regs->status32 &= ~STATUS_DE_MASK;
246 } else { 246 } else {
247 regs->ret += state.instr_len; 247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
248 } 254 }
249 255
250 return 0; 256 return 0;