diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched.c | 68 | ||||
-rw-r--r-- | kernel/sched_debug.c | 3 |
2 files changed, 53 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 45e17b83b7f1..96e9b82246d2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -262,7 +262,8 @@ struct rq { | |||
262 | s64 clock_max_delta; | 262 | s64 clock_max_delta; |
263 | 263 | ||
264 | unsigned int clock_warps, clock_overflows; | 264 | unsigned int clock_warps, clock_overflows; |
265 | unsigned int clock_unstable_events; | 265 | u64 idle_clock; |
266 | unsigned int clock_deep_idle_events; | ||
266 | u64 tick_timestamp; | 267 | u64 tick_timestamp; |
267 | 268 | ||
268 | atomic_t nr_iowait; | 269 | atomic_t nr_iowait; |
@@ -556,18 +557,40 @@ static inline struct rq *this_rq_lock(void) | |||
556 | } | 557 | } |
557 | 558 | ||
558 | /* | 559 | /* |
559 | * CPU frequency is/was unstable - start new by setting prev_clock_raw: | 560 | * We are going deep-idle (irqs are disabled): |
560 | */ | 561 | */ |
561 | void sched_clock_unstable_event(void) | 562 | void sched_clock_idle_sleep_event(void) |
562 | { | 563 | { |
563 | unsigned long flags; | 564 | struct rq *rq = cpu_rq(smp_processor_id()); |
564 | struct rq *rq; | ||
565 | 565 | ||
566 | rq = task_rq_lock(current, &flags); | 566 | spin_lock(&rq->lock); |
567 | rq->prev_clock_raw = sched_clock(); | 567 | __update_rq_clock(rq); |
568 | rq->clock_unstable_events++; | 568 | spin_unlock(&rq->lock); |
569 | task_rq_unlock(rq, &flags); | 569 | rq->clock_deep_idle_events++; |
570 | } | 570 | } |
571 | EXPORT_SYMBOL_GPL(sched_clock_idle_sleep_event); | ||
572 | |||
573 | /* | ||
574 | * We just idled delta nanoseconds (called with irqs disabled): | ||
575 | */ | ||
576 | void sched_clock_idle_wakeup_event(u64 delta_ns) | ||
577 | { | ||
578 | struct rq *rq = cpu_rq(smp_processor_id()); | ||
579 | u64 now = sched_clock(); | ||
580 | |||
581 | rq->idle_clock += delta_ns; | ||
582 | /* | ||
583 | * Override the previous timestamp and ignore all | ||
584 | * sched_clock() deltas that occured while we idled, | ||
585 | * and use the PM-provided delta_ns to advance the | ||
586 | * rq clock: | ||
587 | */ | ||
588 | spin_lock(&rq->lock); | ||
589 | rq->prev_clock_raw = now; | ||
590 | rq->clock += delta_ns; | ||
591 | spin_unlock(&rq->lock); | ||
592 | } | ||
593 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | ||
571 | 594 | ||
572 | /* | 595 | /* |
573 | * resched_task - mark a task 'to be rescheduled now'. | 596 | * resched_task - mark a task 'to be rescheduled now'. |
@@ -2494,7 +2517,7 @@ group_next: | |||
2494 | * a think about bumping its value to force at least one task to be | 2517 | * a think about bumping its value to force at least one task to be |
2495 | * moved | 2518 | * moved |
2496 | */ | 2519 | */ |
2497 | if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task/2) { | 2520 | if (*imbalance + SCHED_LOAD_SCALE_FUZZ < busiest_load_per_task) { |
2498 | unsigned long tmp, pwr_now, pwr_move; | 2521 | unsigned long tmp, pwr_now, pwr_move; |
2499 | unsigned int imbn; | 2522 | unsigned int imbn; |
2500 | 2523 | ||
@@ -3020,6 +3043,7 @@ static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3020 | struct sched_domain *sd; | 3043 | struct sched_domain *sd; |
3021 | /* Earliest time when we have to do rebalance again */ | 3044 | /* Earliest time when we have to do rebalance again */ |
3022 | unsigned long next_balance = jiffies + 60*HZ; | 3045 | unsigned long next_balance = jiffies + 60*HZ; |
3046 | int update_next_balance = 0; | ||
3023 | 3047 | ||
3024 | for_each_domain(cpu, sd) { | 3048 | for_each_domain(cpu, sd) { |
3025 | if (!(sd->flags & SD_LOAD_BALANCE)) | 3049 | if (!(sd->flags & SD_LOAD_BALANCE)) |
@@ -3056,8 +3080,10 @@ static inline void rebalance_domains(int cpu, enum cpu_idle_type idle) | |||
3056 | if (sd->flags & SD_SERIALIZE) | 3080 | if (sd->flags & SD_SERIALIZE) |
3057 | spin_unlock(&balancing); | 3081 | spin_unlock(&balancing); |
3058 | out: | 3082 | out: |
3059 | if (time_after(next_balance, sd->last_balance + interval)) | 3083 | if (time_after(next_balance, sd->last_balance + interval)) { |
3060 | next_balance = sd->last_balance + interval; | 3084 | next_balance = sd->last_balance + interval; |
3085 | update_next_balance = 1; | ||
3086 | } | ||
3061 | 3087 | ||
3062 | /* | 3088 | /* |
3063 | * Stop the load balance at this level. There is another | 3089 | * Stop the load balance at this level. There is another |
@@ -3067,7 +3093,14 @@ out: | |||
3067 | if (!balance) | 3093 | if (!balance) |
3068 | break; | 3094 | break; |
3069 | } | 3095 | } |
3070 | rq->next_balance = next_balance; | 3096 | |
3097 | /* | ||
3098 | * next_balance will be updated only when there is a need. | ||
3099 | * When the cpu is attached to null domain for ex, it will not be | ||
3100 | * updated. | ||
3101 | */ | ||
3102 | if (likely(update_next_balance)) | ||
3103 | rq->next_balance = next_balance; | ||
3071 | } | 3104 | } |
3072 | 3105 | ||
3073 | /* | 3106 | /* |
@@ -4890,7 +4923,7 @@ static inline void sched_init_granularity(void) | |||
4890 | if (sysctl_sched_granularity > gran_limit) | 4923 | if (sysctl_sched_granularity > gran_limit) |
4891 | sysctl_sched_granularity = gran_limit; | 4924 | sysctl_sched_granularity = gran_limit; |
4892 | 4925 | ||
4893 | sysctl_sched_runtime_limit = sysctl_sched_granularity * 4; | 4926 | sysctl_sched_runtime_limit = sysctl_sched_granularity * 8; |
4894 | sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2; | 4927 | sysctl_sched_wakeup_granularity = sysctl_sched_granularity / 2; |
4895 | } | 4928 | } |
4896 | 4929 | ||
@@ -5234,15 +5267,16 @@ static void migrate_dead_tasks(unsigned int dead_cpu) | |||
5234 | static struct ctl_table sd_ctl_dir[] = { | 5267 | static struct ctl_table sd_ctl_dir[] = { |
5235 | { | 5268 | { |
5236 | .procname = "sched_domain", | 5269 | .procname = "sched_domain", |
5237 | .mode = 0755, | 5270 | .mode = 0555, |
5238 | }, | 5271 | }, |
5239 | {0,}, | 5272 | {0,}, |
5240 | }; | 5273 | }; |
5241 | 5274 | ||
5242 | static struct ctl_table sd_ctl_root[] = { | 5275 | static struct ctl_table sd_ctl_root[] = { |
5243 | { | 5276 | { |
5277 | .ctl_name = CTL_KERN, | ||
5244 | .procname = "kernel", | 5278 | .procname = "kernel", |
5245 | .mode = 0755, | 5279 | .mode = 0555, |
5246 | .child = sd_ctl_dir, | 5280 | .child = sd_ctl_dir, |
5247 | }, | 5281 | }, |
5248 | {0,}, | 5282 | {0,}, |
@@ -5318,7 +5352,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | |||
5318 | for_each_domain(cpu, sd) { | 5352 | for_each_domain(cpu, sd) { |
5319 | snprintf(buf, 32, "domain%d", i); | 5353 | snprintf(buf, 32, "domain%d", i); |
5320 | entry->procname = kstrdup(buf, GFP_KERNEL); | 5354 | entry->procname = kstrdup(buf, GFP_KERNEL); |
5321 | entry->mode = 0755; | 5355 | entry->mode = 0555; |
5322 | entry->child = sd_alloc_ctl_domain_table(sd); | 5356 | entry->child = sd_alloc_ctl_domain_table(sd); |
5323 | entry++; | 5357 | entry++; |
5324 | i++; | 5358 | i++; |
@@ -5338,7 +5372,7 @@ static void init_sched_domain_sysctl(void) | |||
5338 | for (i = 0; i < cpu_num; i++, entry++) { | 5372 | for (i = 0; i < cpu_num; i++, entry++) { |
5339 | snprintf(buf, 32, "cpu%d", i); | 5373 | snprintf(buf, 32, "cpu%d", i); |
5340 | entry->procname = kstrdup(buf, GFP_KERNEL); | 5374 | entry->procname = kstrdup(buf, GFP_KERNEL); |
5341 | entry->mode = 0755; | 5375 | entry->mode = 0555; |
5342 | entry->child = sd_alloc_ctl_cpu_table(i); | 5376 | entry->child = sd_alloc_ctl_cpu_table(i); |
5343 | } | 5377 | } |
5344 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); | 5378 | sd_sysctl_header = register_sysctl_table(sd_ctl_root); |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 87e524762b85..ab18f45f2ab2 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -154,10 +154,11 @@ static void print_cpu(struct seq_file *m, int cpu) | |||
154 | P(next_balance); | 154 | P(next_balance); |
155 | P(curr->pid); | 155 | P(curr->pid); |
156 | P(clock); | 156 | P(clock); |
157 | P(idle_clock); | ||
157 | P(prev_clock_raw); | 158 | P(prev_clock_raw); |
158 | P(clock_warps); | 159 | P(clock_warps); |
159 | P(clock_overflows); | 160 | P(clock_overflows); |
160 | P(clock_unstable_events); | 161 | P(clock_deep_idle_events); |
161 | P(clock_max_delta); | 162 | P(clock_max_delta); |
162 | P(cpu_load[0]); | 163 | P(cpu_load[0]); |
163 | P(cpu_load[1]); | 164 | P(cpu_load[1]); |