diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 41 |
1 files changed, 20 insertions, 21 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 2b2b780939c9..15ce772a471a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -225,7 +225,8 @@ struct rq { | |||
225 | unsigned long nr_uninterruptible; | 225 | unsigned long nr_uninterruptible; |
226 | 226 | ||
227 | unsigned long expired_timestamp; | 227 | unsigned long expired_timestamp; |
228 | unsigned long long timestamp_last_tick; | 228 | /* Cached timestamp set by update_cpu_clock() */ |
229 | unsigned long long most_recent_timestamp; | ||
229 | struct task_struct *curr, *idle; | 230 | struct task_struct *curr, *idle; |
230 | unsigned long next_balance; | 231 | unsigned long next_balance; |
231 | struct mm_struct *prev_mm; | 232 | struct mm_struct *prev_mm; |
@@ -944,8 +945,8 @@ static void activate_task(struct task_struct *p, struct rq *rq, int local) | |||
944 | if (!local) { | 945 | if (!local) { |
945 | /* Compensate for drifting sched_clock */ | 946 | /* Compensate for drifting sched_clock */ |
946 | struct rq *this_rq = this_rq(); | 947 | struct rq *this_rq = this_rq(); |
947 | now = (now - this_rq->timestamp_last_tick) | 948 | now = (now - this_rq->most_recent_timestamp) |
948 | + rq->timestamp_last_tick; | 949 | + rq->most_recent_timestamp; |
949 | } | 950 | } |
950 | #endif | 951 | #endif |
951 | 952 | ||
@@ -1689,8 +1690,8 @@ void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
1689 | * Not the local CPU - must adjust timestamp. This should | 1690 | * Not the local CPU - must adjust timestamp. This should |
1690 | * get optimised away in the !CONFIG_SMP case. | 1691 | * get optimised away in the !CONFIG_SMP case. |
1691 | */ | 1692 | */ |
1692 | p->timestamp = (p->timestamp - this_rq->timestamp_last_tick) | 1693 | p->timestamp = (p->timestamp - this_rq->most_recent_timestamp) |
1693 | + rq->timestamp_last_tick; | 1694 | + rq->most_recent_timestamp; |
1694 | __activate_task(p, rq); | 1695 | __activate_task(p, rq); |
1695 | if (TASK_PREEMPTS_CURR(p, rq)) | 1696 | if (TASK_PREEMPTS_CURR(p, rq)) |
1696 | resched_task(rq->curr); | 1697 | resched_task(rq->curr); |
@@ -2068,8 +2069,8 @@ static void pull_task(struct rq *src_rq, struct prio_array *src_array, | |||
2068 | set_task_cpu(p, this_cpu); | 2069 | set_task_cpu(p, this_cpu); |
2069 | inc_nr_running(p, this_rq); | 2070 | inc_nr_running(p, this_rq); |
2070 | enqueue_task(p, this_array); | 2071 | enqueue_task(p, this_array); |
2071 | p->timestamp = (p->timestamp - src_rq->timestamp_last_tick) | 2072 | p->timestamp = (p->timestamp - src_rq->most_recent_timestamp) |
2072 | + this_rq->timestamp_last_tick; | 2073 | + this_rq->most_recent_timestamp; |
2073 | /* | 2074 | /* |
2074 | * Note that idle threads have a prio of MAX_PRIO, for this test | 2075 | * Note that idle threads have a prio of MAX_PRIO, for this test |
2075 | * to be always true for them. | 2076 | * to be always true for them. |
@@ -2105,10 +2106,15 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, | |||
2105 | * 2) too many balance attempts have failed. | 2106 | * 2) too many balance attempts have failed. |
2106 | */ | 2107 | */ |
2107 | 2108 | ||
2108 | if (sd->nr_balance_failed > sd->cache_nice_tries) | 2109 | if (sd->nr_balance_failed > sd->cache_nice_tries) { |
2110 | #ifdef CONFIG_SCHEDSTATS | ||
2111 | if (task_hot(p, rq->most_recent_timestamp, sd)) | ||
2112 | schedstat_inc(sd, lb_hot_gained[idle]); | ||
2113 | #endif | ||
2109 | return 1; | 2114 | return 1; |
2115 | } | ||
2110 | 2116 | ||
2111 | if (task_hot(p, rq->timestamp_last_tick, sd)) | 2117 | if (task_hot(p, rq->most_recent_timestamp, sd)) |
2112 | return 0; | 2118 | return 0; |
2113 | return 1; | 2119 | return 1; |
2114 | } | 2120 | } |
@@ -2206,11 +2212,6 @@ skip_queue: | |||
2206 | goto skip_bitmap; | 2212 | goto skip_bitmap; |
2207 | } | 2213 | } |
2208 | 2214 | ||
2209 | #ifdef CONFIG_SCHEDSTATS | ||
2210 | if (task_hot(tmp, busiest->timestamp_last_tick, sd)) | ||
2211 | schedstat_inc(sd, lb_hot_gained[idle]); | ||
2212 | #endif | ||
2213 | |||
2214 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); | 2215 | pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu); |
2215 | pulled++; | 2216 | pulled++; |
2216 | rem_load_move -= tmp->load_weight; | 2217 | rem_load_move -= tmp->load_weight; |
@@ -2971,7 +2972,8 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2971 | static inline void | 2972 | static inline void |
2972 | update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) | 2973 | update_cpu_clock(struct task_struct *p, struct rq *rq, unsigned long long now) |
2973 | { | 2974 | { |
2974 | p->sched_time += now - max(p->timestamp, rq->timestamp_last_tick); | 2975 | p->sched_time += now - p->last_ran; |
2976 | p->last_ran = rq->most_recent_timestamp = now; | ||
2975 | } | 2977 | } |
2976 | 2978 | ||
2977 | /* | 2979 | /* |
@@ -2984,8 +2986,7 @@ unsigned long long current_sched_time(const struct task_struct *p) | |||
2984 | unsigned long flags; | 2986 | unsigned long flags; |
2985 | 2987 | ||
2986 | local_irq_save(flags); | 2988 | local_irq_save(flags); |
2987 | ns = max(p->timestamp, task_rq(p)->timestamp_last_tick); | 2989 | ns = p->sched_time + sched_clock() - p->last_ran; |
2988 | ns = p->sched_time + sched_clock() - ns; | ||
2989 | local_irq_restore(flags); | 2990 | local_irq_restore(flags); |
2990 | 2991 | ||
2991 | return ns; | 2992 | return ns; |
@@ -3176,8 +3177,6 @@ void scheduler_tick(void) | |||
3176 | 3177 | ||
3177 | update_cpu_clock(p, rq, now); | 3178 | update_cpu_clock(p, rq, now); |
3178 | 3179 | ||
3179 | rq->timestamp_last_tick = now; | ||
3180 | |||
3181 | if (p == rq->idle) | 3180 | if (p == rq->idle) |
3182 | /* Task on the idle queue */ | 3181 | /* Task on the idle queue */ |
3183 | wake_priority_sleeper(rq); | 3182 | wake_priority_sleeper(rq); |
@@ -5032,8 +5031,8 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5032 | * afterwards, and pretending it was a local activate. | 5031 | * afterwards, and pretending it was a local activate. |
5033 | * This way is cleaner and logically correct. | 5032 | * This way is cleaner and logically correct. |
5034 | */ | 5033 | */ |
5035 | p->timestamp = p->timestamp - rq_src->timestamp_last_tick | 5034 | p->timestamp = p->timestamp - rq_src->most_recent_timestamp |
5036 | + rq_dest->timestamp_last_tick; | 5035 | + rq_dest->most_recent_timestamp; |
5037 | deactivate_task(p, rq_src); | 5036 | deactivate_task(p, rq_src); |
5038 | __activate_task(p, rq_dest); | 5037 | __activate_task(p, rq_dest); |
5039 | if (TASK_PREEMPTS_CURR(p, rq_dest)) | 5038 | if (TASK_PREEMPTS_CURR(p, rq_dest)) |