aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
commit567bee2803cb46caeb6011de5b738fde33dc3896 (patch)
tree05bab01377bffa356bfbe06c4b6193b23b7c24ca /kernel/sched
parentaa0b7ae06387d40a988ce16a189082dee6e570bc (diff)
parent093e5840ae76f1082633503964d035f40ed0216d (diff)
Merge branch 'sched/urgent' into sched/core, to pick up fixes before merging new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/clock.c2
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c4
-rw-r--r--kernel/sched/wait.c20
4 files changed, 19 insertions, 19 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index c0a205101c23..caf4041f5b0a 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * sched_clock for unstable cpu clocks 2 * sched_clock for unstable cpu clocks
3 * 3 *
4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 4 * Copyright (C) 2008 Red Hat, Inc., Peter Zijlstra
5 * 5 *
6 * Updates and enhancements: 6 * Updates and enhancements:
7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com> 7 * Copyright (C) 2008 Red Hat, Inc. Steven Rostedt <srostedt@redhat.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index aa3f97869217..1315cec45882 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8260,12 +8260,12 @@ static void cpu_cgroup_fork(struct task_struct *task, void *private)
8260 sched_move_task(task); 8260 sched_move_task(task);
8261} 8261}
8262 8262
8263static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 8263static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
8264 struct cgroup_taskset *tset)
8265{ 8264{
8266 struct task_struct *task; 8265 struct task_struct *task;
8266 struct cgroup_subsys_state *css;
8267 8267
8268 cgroup_taskset_for_each(task, tset) { 8268 cgroup_taskset_for_each(task, css, tset) {
8269#ifdef CONFIG_RT_GROUP_SCHED 8269#ifdef CONFIG_RT_GROUP_SCHED
8270 if (!sched_rt_can_attach(css_tg(css), task)) 8270 if (!sched_rt_can_attach(css_tg(css), task))
8271 return -EINVAL; 8271 return -EINVAL;
@@ -8278,12 +8278,12 @@ static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
8278 return 0; 8278 return 0;
8279} 8279}
8280 8280
8281static void cpu_cgroup_attach(struct cgroup_subsys_state *css, 8281static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8282 struct cgroup_taskset *tset)
8283{ 8282{
8284 struct task_struct *task; 8283 struct task_struct *task;
8284 struct cgroup_subsys_state *css;
8285 8285
8286 cgroup_taskset_for_each(task, tset) 8286 cgroup_taskset_for_each(task, css, tset)
8287 sched_move_task(task); 8287 sched_move_task(task);
8288} 8288}
8289 8289
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1093873dcd0f..93efb962c2e1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -17,7 +17,7 @@
17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de> 17 * Copyright (C) 2007, Thomas Gleixner <tglx@linutronix.de>
18 * 18 *
19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra 19 * Adaptive scheduling granularity, math enhancements by Peter Zijlstra
20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> 20 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
21 */ 21 */
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
@@ -2780,7 +2780,7 @@ static inline int update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
2780 int decayed, removed = 0; 2780 int decayed, removed = 0;
2781 2781
2782 if (atomic_long_read(&cfs_rq->removed_load_avg)) { 2782 if (atomic_long_read(&cfs_rq->removed_load_avg)) {
2783 long r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0); 2783 s64 r = atomic_long_xchg(&cfs_rq->removed_load_avg, 0);
2784 sa->load_avg = max_t(long, sa->load_avg - r, 0); 2784 sa->load_avg = max_t(long, sa->load_avg - r, 0);
2785 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0); 2785 sa->load_sum = max_t(s64, sa->load_sum - r * LOAD_AVG_MAX, 0);
2786 removed = 1; 2786 removed = 1;
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index f10bd873e684..f15d6b6a538a 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -392,7 +392,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
392 do { 392 do {
393 prepare_to_wait(wq, &q->wait, mode); 393 prepare_to_wait(wq, &q->wait, mode);
394 if (test_bit(q->key.bit_nr, q->key.flags)) 394 if (test_bit(q->key.bit_nr, q->key.flags))
395 ret = (*action)(&q->key); 395 ret = (*action)(&q->key, mode);
396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret); 396 } while (test_bit(q->key.bit_nr, q->key.flags) && !ret);
397 finish_wait(wq, &q->wait); 397 finish_wait(wq, &q->wait);
398 return ret; 398 return ret;
@@ -431,7 +431,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
431 prepare_to_wait_exclusive(wq, &q->wait, mode); 431 prepare_to_wait_exclusive(wq, &q->wait, mode);
432 if (!test_bit(q->key.bit_nr, q->key.flags)) 432 if (!test_bit(q->key.bit_nr, q->key.flags))
433 continue; 433 continue;
434 ret = action(&q->key); 434 ret = action(&q->key, mode);
435 if (!ret) 435 if (!ret)
436 continue; 436 continue;
437 abort_exclusive_wait(wq, &q->wait, mode, &q->key); 437 abort_exclusive_wait(wq, &q->wait, mode, &q->key);
@@ -581,43 +581,43 @@ void wake_up_atomic_t(atomic_t *p)
581} 581}
582EXPORT_SYMBOL(wake_up_atomic_t); 582EXPORT_SYMBOL(wake_up_atomic_t);
583 583
584__sched int bit_wait(struct wait_bit_key *word) 584__sched int bit_wait(struct wait_bit_key *word, int mode)
585{ 585{
586 schedule(); 586 schedule();
587 if (signal_pending(current)) 587 if (signal_pending_state(mode, current))
588 return -EINTR; 588 return -EINTR;
589 return 0; 589 return 0;
590} 590}
591EXPORT_SYMBOL(bit_wait); 591EXPORT_SYMBOL(bit_wait);
592 592
593__sched int bit_wait_io(struct wait_bit_key *word) 593__sched int bit_wait_io(struct wait_bit_key *word, int mode)
594{ 594{
595 io_schedule(); 595 io_schedule();
596 if (signal_pending(current)) 596 if (signal_pending_state(mode, current))
597 return -EINTR; 597 return -EINTR;
598 return 0; 598 return 0;
599} 599}
600EXPORT_SYMBOL(bit_wait_io); 600EXPORT_SYMBOL(bit_wait_io);
601 601
602__sched int bit_wait_timeout(struct wait_bit_key *word) 602__sched int bit_wait_timeout(struct wait_bit_key *word, int mode)
603{ 603{
604 unsigned long now = READ_ONCE(jiffies); 604 unsigned long now = READ_ONCE(jiffies);
605 if (time_after_eq(now, word->timeout)) 605 if (time_after_eq(now, word->timeout))
606 return -EAGAIN; 606 return -EAGAIN;
607 schedule_timeout(word->timeout - now); 607 schedule_timeout(word->timeout - now);
608 if (signal_pending(current)) 608 if (signal_pending_state(mode, current))
609 return -EINTR; 609 return -EINTR;
610 return 0; 610 return 0;
611} 611}
612EXPORT_SYMBOL_GPL(bit_wait_timeout); 612EXPORT_SYMBOL_GPL(bit_wait_timeout);
613 613
614__sched int bit_wait_io_timeout(struct wait_bit_key *word) 614__sched int bit_wait_io_timeout(struct wait_bit_key *word, int mode)
615{ 615{
616 unsigned long now = READ_ONCE(jiffies); 616 unsigned long now = READ_ONCE(jiffies);
617 if (time_after_eq(now, word->timeout)) 617 if (time_after_eq(now, word->timeout))
618 return -EAGAIN; 618 return -EAGAIN;
619 io_schedule_timeout(word->timeout - now); 619 io_schedule_timeout(word->timeout - now);
620 if (signal_pending(current)) 620 if (signal_pending_state(mode, current))
621 return -EINTR; 621 return -EINTR;
622 return 0; 622 return 0;
623} 623}