aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-10-09 06:36:13 -0400
committerIngo Molnar <mingo@kernel.org>2013-10-09 06:36:13 -0400
commit37bf06375c90a42fe07b9bebdb07bc316ae5a0ce (patch)
treede572dd6d3955b0725001776a7b03796f99e1e8e /arch/arc
parent6bfa687c19b7ab8adee03f0d43c197c2945dd869 (diff)
parentd0e639c9e06d44e713170031fe05fb60ebe680af (diff)
Merge tag 'v3.12-rc4' into sched/core
Merge Linux v3.12-rc4 to fix a conflict and also to refresh the tree before applying more scheduler patches. Conflicts: arch/avr32/include/asm/Kbuild Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/arc')
-rw-r--r--arch/arc/include/asm/spinlock.h9
-rw-r--r--arch/arc/include/asm/uaccess.h4
-rw-r--r--arch/arc/kernel/signal.c25
-rw-r--r--arch/arc/kernel/time.c7
-rw-r--r--arch/arc/kernel/unaligned.c6
5 files changed, 31 insertions, 20 deletions
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index f158197ac5b0..b6a8c2dfbe6e 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -45,7 +45,14 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
45 45
46static inline void arch_spin_unlock(arch_spinlock_t *lock) 46static inline void arch_spin_unlock(arch_spinlock_t *lock)
47{ 47{
48 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__; 48 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__;
49
50 __asm__ __volatile__(
51 " ex %0, [%1] \n"
52 : "+r" (tmp)
53 : "r"(&(lock->slock))
54 : "memory");
55
49 smp_mb(); 56 smp_mb();
50} 57}
51 58
diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h
index 32420824375b..30c9baffa96f 100644
--- a/arch/arc/include/asm/uaccess.h
+++ b/arch/arc/include/asm/uaccess.h
@@ -43,7 +43,7 @@
43 * Because it essentially checks if buffer end is within limit and @len is 43 * Because it essentially checks if buffer end is within limit and @len is
44 * non-ngeative, which implies that buffer start will be within limit too. 44 * non-ngeative, which implies that buffer start will be within limit too.
45 * 45 *
46 * The reason for rewriting being, for majorit yof cases, @len is generally 46 * The reason for rewriting being, for majority of cases, @len is generally
47 * compile time constant, causing first sub-expression to be compile time 47 * compile time constant, causing first sub-expression to be compile time
48 * subsumed. 48 * subsumed.
49 * 49 *
@@ -53,7 +53,7 @@
53 * 53 *
54 */ 54 */
55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \ 55#define __user_ok(addr, sz) (((sz) <= TASK_SIZE) && \
56 (((addr)+(sz)) <= get_fs())) 56 ((addr) <= (get_fs() - (sz))))
57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \ 57#define __access_ok(addr, sz) (unlikely(__kernel_ok) || \
58 likely(__user_ok((addr), (sz)))) 58 likely(__user_ok((addr), (sz))))
59 59
diff --git a/arch/arc/kernel/signal.c b/arch/arc/kernel/signal.c
index ee6ef2f60a28..7e95e1a86510 100644
--- a/arch/arc/kernel/signal.c
+++ b/arch/arc/kernel/signal.c
@@ -101,7 +101,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
101{ 101{
102 struct rt_sigframe __user *sf; 102 struct rt_sigframe __user *sf;
103 unsigned int magic; 103 unsigned int magic;
104 int err;
105 struct pt_regs *regs = current_pt_regs(); 104 struct pt_regs *regs = current_pt_regs();
106 105
107 /* Always make any pending restarted system calls return -EINTR */ 106 /* Always make any pending restarted system calls return -EINTR */
@@ -119,15 +118,16 @@ SYSCALL_DEFINE0(rt_sigreturn)
119 if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) 118 if (!access_ok(VERIFY_READ, sf, sizeof(*sf)))
120 goto badframe; 119 goto badframe;
121 120
122 err = restore_usr_regs(regs, sf); 121 if (__get_user(magic, &sf->sigret_magic))
123 err |= __get_user(magic, &sf->sigret_magic);
124 if (err)
125 goto badframe; 122 goto badframe;
126 123
127 if (unlikely(is_do_ss_needed(magic))) 124 if (unlikely(is_do_ss_needed(magic)))
128 if (restore_altstack(&sf->uc.uc_stack)) 125 if (restore_altstack(&sf->uc.uc_stack))
129 goto badframe; 126 goto badframe;
130 127
128 if (restore_usr_regs(regs, sf))
129 goto badframe;
130
131 /* Don't restart from sigreturn */ 131 /* Don't restart from sigreturn */
132 syscall_wont_restart(regs); 132 syscall_wont_restart(regs);
133 133
@@ -191,6 +191,15 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
191 return 1; 191 return 1;
192 192
193 /* 193 /*
194 * w/o SA_SIGINFO, struct ucontext is partially populated (only
195 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
196 * during signal handler execution. This works for SA_SIGINFO as well
197 * although the semantics are now overloaded (the same reg state can be
198 * inspected by userland: but are they allowed to fiddle with it ?
199 */
200 err |= stash_usr_regs(sf, regs, set);
201
202 /*
194 * SA_SIGINFO requires 3 args to signal handler: 203 * SA_SIGINFO requires 3 args to signal handler:
195 * #1: sig-no (common to any handler) 204 * #1: sig-no (common to any handler)
196 * #2: struct siginfo 205 * #2: struct siginfo
@@ -213,14 +222,6 @@ setup_rt_frame(int signo, struct k_sigaction *ka, siginfo_t *info,
213 magic = MAGIC_SIGALTSTK; 222 magic = MAGIC_SIGALTSTK;
214 } 223 }
215 224
216 /*
217 * w/o SA_SIGINFO, struct ucontext is partially populated (only
218 * uc_mcontext/uc_sigmask) for kernel's normal user state preservation
219 * during signal handler execution. This works for SA_SIGINFO as well
220 * although the semantics are now overloaded (the same reg state can be
221 * inspected by userland: but are they allowed to fiddle with it ?
222 */
223 err |= stash_usr_regs(sf, regs, set);
224 err |= __put_user(magic, &sf->sigret_magic); 225 err |= __put_user(magic, &sf->sigret_magic);
225 if (err) 226 if (err)
226 return err; 227 return err;
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 0e51e69cf30d..3fde7de3ea67 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -227,12 +227,9 @@ void __attribute__((weak)) arc_local_timer_setup(unsigned int cpu)
227{ 227{
228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu); 228 struct clock_event_device *clk = &per_cpu(arc_clockevent_device, cpu);
229 229
230 clockevents_calc_mult_shift(clk, arc_get_core_freq(), 5);
231
232 clk->max_delta_ns = clockevent_delta2ns(ARC_TIMER_MAX, clk);
233 clk->cpumask = cpumask_of(cpu); 230 clk->cpumask = cpumask_of(cpu);
234 231 clockevents_config_and_register(clk, arc_get_core_freq(),
235 clockevents_register_device(clk); 232 0, ARC_TIMER_MAX);
236 233
237 /* 234 /*
238 * setup the per-cpu timer IRQ handler - for all cpus 235 * setup the per-cpu timer IRQ handler - for all cpus
diff --git a/arch/arc/kernel/unaligned.c b/arch/arc/kernel/unaligned.c
index 28d170060747..7ff5b5c183bb 100644
--- a/arch/arc/kernel/unaligned.c
+++ b/arch/arc/kernel/unaligned.c
@@ -245,6 +245,12 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
245 regs->status32 &= ~STATUS_DE_MASK; 245 regs->status32 &= ~STATUS_DE_MASK;
246 } else { 246 } else {
247 regs->ret += state.instr_len; 247 regs->ret += state.instr_len;
248
249 /* handle zero-overhead-loop */
250 if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
251 regs->ret = regs->lp_start;
252 regs->lp_count--;
253 }
248 } 254 }
249 255
250 return 0; 256 return 0;