aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c13
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/sched.c10
3 files changed, 14 insertions, 12 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 2b6e1757aedd..5ae51f1bc7c8 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -418,8 +418,19 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
418 /* Switch the timer base, if necessary: */ 418 /* Switch the timer base, if necessary: */
419 new_base = switch_hrtimer_base(timer, base); 419 new_base = switch_hrtimer_base(timer, base);
420 420
421 if (mode == HRTIMER_REL) 421 if (mode == HRTIMER_REL) {
422 tim = ktime_add(tim, new_base->get_time()); 422 tim = ktime_add(tim, new_base->get_time());
423 /*
424 * CONFIG_TIME_LOW_RES is a temporary way for architectures
425 * to signal that they simply return xtime in
426 * do_gettimeoffset(). In this case we want to round up by
427 * resolution when starting a relative timer, to avoid short
428 * timeouts. This will go away with the GTOD framework.
429 */
430#ifdef CONFIG_TIME_LOW_RES
431 tim = ktime_add(tim, base->resolution);
432#endif
433 }
423 timer->expires = tim; 434 timer->expires = tim;
424 435
425 enqueue_hrtimer(timer, new_base); 436 enqueue_hrtimer(timer, new_base);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 5f33cdb6fff5..d2cf144d0af5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -242,8 +242,7 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
242 if (write) { 242 if (write) {
243 copy_to_user_page(vma, page, addr, 243 copy_to_user_page(vma, page, addr,
244 maddr + offset, buf, bytes); 244 maddr + offset, buf, bytes);
245 if (!PageCompound(page)) 245 set_page_dirty_lock(page);
246 set_page_dirty_lock(page);
247 } else { 246 } else {
248 copy_from_user_page(vma, page, addr, 247 copy_from_user_page(vma, page, addr,
249 buf, maddr + offset, bytes); 248 buf, maddr + offset, bytes);
diff --git a/kernel/sched.c b/kernel/sched.c
index 87d93be336a1..66d957227de9 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1204,9 +1204,6 @@ static int try_to_wake_up(task_t *p, unsigned int state, int sync)
1204 } 1204 }
1205 } 1205 }
1206 1206
1207 if (p->last_waker_cpu != this_cpu)
1208 goto out_set_cpu;
1209
1210 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed))) 1207 if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
1211 goto out_set_cpu; 1208 goto out_set_cpu;
1212 1209
@@ -1277,8 +1274,6 @@ out_set_cpu:
1277 cpu = task_cpu(p); 1274 cpu = task_cpu(p);
1278 } 1275 }
1279 1276
1280 p->last_waker_cpu = this_cpu;
1281
1282out_activate: 1277out_activate:
1283#endif /* CONFIG_SMP */ 1278#endif /* CONFIG_SMP */
1284 if (old_state == TASK_UNINTERRUPTIBLE) { 1279 if (old_state == TASK_UNINTERRUPTIBLE) {
@@ -1360,12 +1355,9 @@ void fastcall sched_fork(task_t *p, int clone_flags)
1360#ifdef CONFIG_SCHEDSTATS 1355#ifdef CONFIG_SCHEDSTATS
1361 memset(&p->sched_info, 0, sizeof(p->sched_info)); 1356 memset(&p->sched_info, 0, sizeof(p->sched_info));
1362#endif 1357#endif
1363#if defined(CONFIG_SMP) 1358#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
1364 p->last_waker_cpu = cpu;
1365#if defined(__ARCH_WANT_UNLOCKED_CTXSW)
1366 p->oncpu = 0; 1359 p->oncpu = 0;
1367#endif 1360#endif
1368#endif
1369#ifdef CONFIG_PREEMPT 1361#ifdef CONFIG_PREEMPT
1370 /* Want to start with kernel preemption disabled. */ 1362 /* Want to start with kernel preemption disabled. */
1371 task_thread_info(p)->preempt_count = 1; 1363 task_thread_info(p)->preempt_count = 1;