diff options
| author | Steve French <sfrench@us.ibm.com> | 2009-07-17 23:13:38 -0400 |
|---|---|---|
| committer | Steve French <sfrench@us.ibm.com> | 2009-07-17 23:13:38 -0400 |
| commit | 287638b2c533165c3c03dfa15196c2ba583cd287 (patch) | |
| tree | e753dd2cd7421a26e179c43d7f6d4d39541a4afb /kernel | |
| parent | f6c43385435640e056424034caac0d765c45e370 (diff) | |
| parent | a1cc1ba7aec1ba41317d227b1fe8d0f8c0cec232 (diff) | |
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/sched.c | 43 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 3 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 18 | ||||
| -rw-r--r-- | kernel/trace/trace_functions.c | 2 |
4 files changed, 53 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 01f55ada3598..98972d366fdc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -493,6 +493,7 @@ struct rt_rq { | |||
| 493 | #endif | 493 | #endif |
| 494 | #ifdef CONFIG_SMP | 494 | #ifdef CONFIG_SMP |
| 495 | unsigned long rt_nr_migratory; | 495 | unsigned long rt_nr_migratory; |
| 496 | unsigned long rt_nr_total; | ||
| 496 | int overloaded; | 497 | int overloaded; |
| 497 | struct plist_head pushable_tasks; | 498 | struct plist_head pushable_tasks; |
| 498 | #endif | 499 | #endif |
| @@ -2571,15 +2572,37 @@ static void __sched_fork(struct task_struct *p) | |||
| 2571 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; | 2572 | p->se.avg_wakeup = sysctl_sched_wakeup_granularity; |
| 2572 | 2573 | ||
| 2573 | #ifdef CONFIG_SCHEDSTATS | 2574 | #ifdef CONFIG_SCHEDSTATS |
| 2574 | p->se.wait_start = 0; | 2575 | p->se.wait_start = 0; |
| 2575 | p->se.sum_sleep_runtime = 0; | 2576 | p->se.wait_max = 0; |
| 2576 | p->se.sleep_start = 0; | 2577 | p->se.wait_count = 0; |
| 2577 | p->se.block_start = 0; | 2578 | p->se.wait_sum = 0; |
| 2578 | p->se.sleep_max = 0; | 2579 | |
| 2579 | p->se.block_max = 0; | 2580 | p->se.sleep_start = 0; |
| 2580 | p->se.exec_max = 0; | 2581 | p->se.sleep_max = 0; |
| 2581 | p->se.slice_max = 0; | 2582 | p->se.sum_sleep_runtime = 0; |
| 2582 | p->se.wait_max = 0; | 2583 | |
| 2584 | p->se.block_start = 0; | ||
| 2585 | p->se.block_max = 0; | ||
| 2586 | p->se.exec_max = 0; | ||
| 2587 | p->se.slice_max = 0; | ||
| 2588 | |||
| 2589 | p->se.nr_migrations_cold = 0; | ||
| 2590 | p->se.nr_failed_migrations_affine = 0; | ||
| 2591 | p->se.nr_failed_migrations_running = 0; | ||
| 2592 | p->se.nr_failed_migrations_hot = 0; | ||
| 2593 | p->se.nr_forced_migrations = 0; | ||
| 2594 | p->se.nr_forced2_migrations = 0; | ||
| 2595 | |||
| 2596 | p->se.nr_wakeups = 0; | ||
| 2597 | p->se.nr_wakeups_sync = 0; | ||
| 2598 | p->se.nr_wakeups_migrate = 0; | ||
| 2599 | p->se.nr_wakeups_local = 0; | ||
| 2600 | p->se.nr_wakeups_remote = 0; | ||
| 2601 | p->se.nr_wakeups_affine = 0; | ||
| 2602 | p->se.nr_wakeups_affine_attempts = 0; | ||
| 2603 | p->se.nr_wakeups_passive = 0; | ||
| 2604 | p->se.nr_wakeups_idle = 0; | ||
| 2605 | |||
| 2583 | #endif | 2606 | #endif |
| 2584 | 2607 | ||
| 2585 | INIT_LIST_HEAD(&p->rt.run_list); | 2608 | INIT_LIST_HEAD(&p->rt.run_list); |
| @@ -9074,7 +9097,7 @@ static void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq) | |||
| 9074 | #ifdef CONFIG_SMP | 9097 | #ifdef CONFIG_SMP |
| 9075 | rt_rq->rt_nr_migratory = 0; | 9098 | rt_rq->rt_nr_migratory = 0; |
| 9076 | rt_rq->overloaded = 0; | 9099 | rt_rq->overloaded = 0; |
| 9077 | plist_head_init(&rq->rt.pushable_tasks, &rq->lock); | 9100 | plist_head_init(&rt_rq->pushable_tasks, &rq->lock); |
| 9078 | #endif | 9101 | #endif |
| 9079 | 9102 | ||
| 9080 | rt_rq->rt_time = 0; | 9103 | rt_rq->rt_time = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index ba7fd6e9556f..7c248dc30f41 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -687,7 +687,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) | |||
| 687 | * all of which have the same weight. | 687 | * all of which have the same weight. |
| 688 | */ | 688 | */ |
| 689 | if (sched_feat(NORMALIZED_SLEEPER) && | 689 | if (sched_feat(NORMALIZED_SLEEPER) && |
| 690 | task_of(se)->policy != SCHED_IDLE) | 690 | (!entity_is_task(se) || |
| 691 | task_of(se)->policy != SCHED_IDLE)) | ||
| 691 | thresh = calc_delta_fair(thresh, se); | 692 | thresh = calc_delta_fair(thresh, se); |
| 692 | 693 | ||
| 693 | vruntime -= thresh; | 694 | vruntime -= thresh; |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 9bf0d2a73045..3918e01994e0 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -10,6 +10,8 @@ static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se) | |||
| 10 | 10 | ||
| 11 | #ifdef CONFIG_RT_GROUP_SCHED | 11 | #ifdef CONFIG_RT_GROUP_SCHED |
| 12 | 12 | ||
| 13 | #define rt_entity_is_task(rt_se) (!(rt_se)->my_q) | ||
| 14 | |||
| 13 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 15 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 14 | { | 16 | { |
| 15 | return rt_rq->rq; | 17 | return rt_rq->rq; |
| @@ -22,6 +24,8 @@ static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se) | |||
| 22 | 24 | ||
| 23 | #else /* CONFIG_RT_GROUP_SCHED */ | 25 | #else /* CONFIG_RT_GROUP_SCHED */ |
| 24 | 26 | ||
| 27 | #define rt_entity_is_task(rt_se) (1) | ||
| 28 | |||
| 25 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) | 29 | static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) |
| 26 | { | 30 | { |
| 27 | return container_of(rt_rq, struct rq, rt); | 31 | return container_of(rt_rq, struct rq, rt); |
| @@ -73,7 +77,7 @@ static inline void rt_clear_overload(struct rq *rq) | |||
| 73 | 77 | ||
| 74 | static void update_rt_migration(struct rt_rq *rt_rq) | 78 | static void update_rt_migration(struct rt_rq *rt_rq) |
| 75 | { | 79 | { |
| 76 | if (rt_rq->rt_nr_migratory && (rt_rq->rt_nr_running > 1)) { | 80 | if (rt_rq->rt_nr_migratory && rt_rq->rt_nr_total > 1) { |
| 77 | if (!rt_rq->overloaded) { | 81 | if (!rt_rq->overloaded) { |
| 78 | rt_set_overload(rq_of_rt_rq(rt_rq)); | 82 | rt_set_overload(rq_of_rt_rq(rt_rq)); |
| 79 | rt_rq->overloaded = 1; | 83 | rt_rq->overloaded = 1; |
| @@ -86,6 +90,12 @@ static void update_rt_migration(struct rt_rq *rt_rq) | |||
| 86 | 90 | ||
| 87 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 91 | static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| 88 | { | 92 | { |
| 93 | if (!rt_entity_is_task(rt_se)) | ||
| 94 | return; | ||
| 95 | |||
| 96 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||
| 97 | |||
| 98 | rt_rq->rt_nr_total++; | ||
| 89 | if (rt_se->nr_cpus_allowed > 1) | 99 | if (rt_se->nr_cpus_allowed > 1) |
| 90 | rt_rq->rt_nr_migratory++; | 100 | rt_rq->rt_nr_migratory++; |
| 91 | 101 | ||
| @@ -94,6 +104,12 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | |||
| 94 | 104 | ||
| 95 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) | 105 | static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) |
| 96 | { | 106 | { |
| 107 | if (!rt_entity_is_task(rt_se)) | ||
| 108 | return; | ||
| 109 | |||
| 110 | rt_rq = &rq_of_rt_rq(rt_rq)->rt; | ||
| 111 | |||
| 112 | rt_rq->rt_nr_total--; | ||
| 97 | if (rt_se->nr_cpus_allowed > 1) | 113 | if (rt_se->nr_cpus_allowed > 1) |
| 98 | rt_rq->rt_nr_migratory--; | 114 | rt_rq->rt_nr_migratory--; |
| 99 | 115 | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 7402144bff21..75ef000613c3 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -363,7 +363,7 @@ ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | |||
| 363 | out_reg: | 363 | out_reg: |
| 364 | ret = register_ftrace_function_probe(glob, ops, count); | 364 | ret = register_ftrace_function_probe(glob, ops, count); |
| 365 | 365 | ||
| 366 | return ret; | 366 | return ret < 0 ? ret : 0; |
| 367 | } | 367 | } |
| 368 | 368 | ||
| 369 | static struct ftrace_func_command ftrace_traceon_cmd = { | 369 | static struct ftrace_func_command ftrace_traceon_cmd = { |
