aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e1dc903d5a75..788ecce1e0e4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -521,7 +521,7 @@ static inline void sched_info_dequeued(task_t *t)
521 * long it was waiting to run. We also note when it began so that we 521 * long it was waiting to run. We also note when it began so that we
522 * can keep stats on how long its timeslice is. 522 * can keep stats on how long its timeslice is.
523 */ 523 */
524static inline void sched_info_arrive(task_t *t) 524static void sched_info_arrive(task_t *t)
525{ 525{
526 unsigned long now = jiffies, diff = 0; 526 unsigned long now = jiffies, diff = 0;
527 struct runqueue *rq = task_rq(t); 527 struct runqueue *rq = task_rq(t);
@@ -1007,7 +1007,7 @@ void kick_process(task_t *p)
1007 * We want to under-estimate the load of migration sources, to 1007 * We want to under-estimate the load of migration sources, to
1008 * balance conservatively. 1008 * balance conservatively.
1009 */ 1009 */
1010static inline unsigned long __source_load(int cpu, int type, enum idle_type idle) 1010static unsigned long __source_load(int cpu, int type, enum idle_type idle)
1011{ 1011{
1012 runqueue_t *rq = cpu_rq(cpu); 1012 runqueue_t *rq = cpu_rq(cpu);
1013 unsigned long running = rq->nr_running; 1013 unsigned long running = rq->nr_running;
@@ -1870,7 +1870,7 @@ void sched_exec(void)
1870 * pull_task - move a task from a remote runqueue to the local runqueue. 1870 * pull_task - move a task from a remote runqueue to the local runqueue.
1871 * Both runqueues must be locked. 1871 * Both runqueues must be locked.
1872 */ 1872 */
1873static inline 1873static
1874void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, 1874void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu) 1875 runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
1876{ 1876{
@@ -1892,7 +1892,7 @@ void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
1892/* 1892/*
1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu? 1893 * can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
1894 */ 1894 */
1895static inline 1895static
1896int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu, 1896int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
1897 struct sched_domain *sd, enum idle_type idle, 1897 struct sched_domain *sd, enum idle_type idle,
1898 int *all_pinned) 1898 int *all_pinned)
@@ -2378,7 +2378,7 @@ out_balanced:
2378 * idle_balance is called by schedule() if this_cpu is about to become 2378 * idle_balance is called by schedule() if this_cpu is about to become
2379 * idle. Attempts to pull tasks from other CPUs. 2379 * idle. Attempts to pull tasks from other CPUs.
2380 */ 2380 */
2381static inline void idle_balance(int this_cpu, runqueue_t *this_rq) 2381static void idle_balance(int this_cpu, runqueue_t *this_rq)
2382{ 2382{
2383 struct sched_domain *sd; 2383 struct sched_domain *sd;
2384 2384
@@ -2762,7 +2762,7 @@ static inline void wakeup_busy_runqueue(runqueue_t *rq)
2762 resched_task(rq->idle); 2762 resched_task(rq->idle);
2763} 2763}
2764 2764
2765static inline void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq) 2765static void wake_sleeping_dependent(int this_cpu, runqueue_t *this_rq)
2766{ 2766{
2767 struct sched_domain *tmp, *sd = NULL; 2767 struct sched_domain *tmp, *sd = NULL;
2768 cpumask_t sibling_map; 2768 cpumask_t sibling_map;
@@ -2816,7 +2816,7 @@ static inline unsigned long smt_slice(task_t *p, struct sched_domain *sd)
2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100; 2816 return p->time_slice * (100 - sd->per_cpu_gain) / 100;
2817} 2817}
2818 2818
2819static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq) 2819static int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
2820{ 2820{
2821 struct sched_domain *tmp, *sd = NULL; 2821 struct sched_domain *tmp, *sd = NULL;
2822 cpumask_t sibling_map; 2822 cpumask_t sibling_map;
@@ -6008,7 +6008,7 @@ next_sg:
6008 * Detach sched domains from a group of cpus specified in cpu_map 6008 * Detach sched domains from a group of cpus specified in cpu_map
6009 * These cpus will now be attached to the NULL domain 6009 * These cpus will now be attached to the NULL domain
6010 */ 6010 */
6011static inline void detach_destroy_domains(const cpumask_t *cpu_map) 6011static void detach_destroy_domains(const cpumask_t *cpu_map)
6012{ 6012{
6013 int i; 6013 int i;
6014 6014