diff options
| -rw-r--r-- | include/linux/sched.h | 42 | ||||
| -rw-r--r-- | kernel/sched.c | 73 | ||||
| -rw-r--r-- | kernel/sched_debug.c | 2 | ||||
| -rw-r--r-- | kernel/sched_stats.h | 8 |
4 files changed, 65 insertions, 60 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index 7accc04e23ab..10a83d8d5775 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -569,7 +569,7 @@ struct sched_info { | |||
| 569 | last_queued; /* when we were last queued to run */ | 569 | last_queued; /* when we were last queued to run */ |
| 570 | #ifdef CONFIG_SCHEDSTATS | 570 | #ifdef CONFIG_SCHEDSTATS |
| 571 | /* BKL stats */ | 571 | /* BKL stats */ |
| 572 | unsigned long bkl_count; | 572 | unsigned int bkl_count; |
| 573 | #endif | 573 | #endif |
| 574 | }; | 574 | }; |
| 575 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ | 575 | #endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */ |
| @@ -705,34 +705,34 @@ struct sched_domain { | |||
| 705 | 705 | ||
| 706 | #ifdef CONFIG_SCHEDSTATS | 706 | #ifdef CONFIG_SCHEDSTATS |
| 707 | /* load_balance() stats */ | 707 | /* load_balance() stats */ |
| 708 | unsigned long lb_count[CPU_MAX_IDLE_TYPES]; | 708 | unsigned int lb_count[CPU_MAX_IDLE_TYPES]; |
| 709 | unsigned long lb_failed[CPU_MAX_IDLE_TYPES]; | 709 | unsigned int lb_failed[CPU_MAX_IDLE_TYPES]; |
| 710 | unsigned long lb_balanced[CPU_MAX_IDLE_TYPES]; | 710 | unsigned int lb_balanced[CPU_MAX_IDLE_TYPES]; |
| 711 | unsigned long lb_imbalance[CPU_MAX_IDLE_TYPES]; | 711 | unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES]; |
| 712 | unsigned long lb_gained[CPU_MAX_IDLE_TYPES]; | 712 | unsigned int lb_gained[CPU_MAX_IDLE_TYPES]; |
| 713 | unsigned long lb_hot_gained[CPU_MAX_IDLE_TYPES]; | 713 | unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES]; |
| 714 | unsigned long lb_nobusyg[CPU_MAX_IDLE_TYPES]; | 714 | unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES]; |
| 715 | unsigned long lb_nobusyq[CPU_MAX_IDLE_TYPES]; | 715 | unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES]; |
| 716 | 716 | ||
| 717 | /* Active load balancing */ | 717 | /* Active load balancing */ |
| 718 | unsigned long alb_count; | 718 | unsigned int alb_count; |
| 719 | unsigned long alb_failed; | 719 | unsigned int alb_failed; |
| 720 | unsigned long alb_pushed; | 720 | unsigned int alb_pushed; |
| 721 | 721 | ||
| 722 | /* SD_BALANCE_EXEC stats */ | 722 | /* SD_BALANCE_EXEC stats */ |
| 723 | unsigned long sbe_count; | 723 | unsigned int sbe_count; |
| 724 | unsigned long sbe_balanced; | 724 | unsigned int sbe_balanced; |
| 725 | unsigned long sbe_pushed; | 725 | unsigned int sbe_pushed; |
| 726 | 726 | ||
| 727 | /* SD_BALANCE_FORK stats */ | 727 | /* SD_BALANCE_FORK stats */ |
| 728 | unsigned long sbf_count; | 728 | unsigned int sbf_count; |
| 729 | unsigned long sbf_balanced; | 729 | unsigned int sbf_balanced; |
| 730 | unsigned long sbf_pushed; | 730 | unsigned int sbf_pushed; |
| 731 | 731 | ||
| 732 | /* try_to_wake_up() stats */ | 732 | /* try_to_wake_up() stats */ |
| 733 | unsigned long ttwu_wake_remote; | 733 | unsigned int ttwu_wake_remote; |
| 734 | unsigned long ttwu_move_affine; | 734 | unsigned int ttwu_move_affine; |
| 735 | unsigned long ttwu_move_balance; | 735 | unsigned int ttwu_move_balance; |
| 736 | #endif | 736 | #endif |
| 737 | }; | 737 | }; |
| 738 | 738 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 12534421d7b5..ed90be46fb31 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -266,7 +266,8 @@ struct rt_rq { | |||
| 266 | * acquire operations must be ordered by ascending &runqueue. | 266 | * acquire operations must be ordered by ascending &runqueue. |
| 267 | */ | 267 | */ |
| 268 | struct rq { | 268 | struct rq { |
| 269 | spinlock_t lock; /* runqueue lock */ | 269 | /* runqueue lock: */ |
| 270 | spinlock_t lock; | ||
| 270 | 271 | ||
| 271 | /* | 272 | /* |
| 272 | * nr_running and cpu_load should be in the same cacheline because | 273 | * nr_running and cpu_load should be in the same cacheline because |
| @@ -279,13 +280,15 @@ struct rq { | |||
| 279 | #ifdef CONFIG_NO_HZ | 280 | #ifdef CONFIG_NO_HZ |
| 280 | unsigned char in_nohz_recently; | 281 | unsigned char in_nohz_recently; |
| 281 | #endif | 282 | #endif |
| 282 | struct load_weight load; /* capture load from *all* tasks on this cpu */ | 283 | /* capture load from *all* tasks on this cpu: */ |
| 284 | struct load_weight load; | ||
| 283 | unsigned long nr_load_updates; | 285 | unsigned long nr_load_updates; |
| 284 | u64 nr_switches; | 286 | u64 nr_switches; |
| 285 | 287 | ||
| 286 | struct cfs_rq cfs; | 288 | struct cfs_rq cfs; |
| 287 | #ifdef CONFIG_FAIR_GROUP_SCHED | 289 | #ifdef CONFIG_FAIR_GROUP_SCHED |
| 288 | struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ | 290 | /* list of leaf cfs_rq on this cpu: */ |
| 291 | struct list_head leaf_cfs_rq_list; | ||
| 289 | #endif | 292 | #endif |
| 290 | struct rt_rq rt; | 293 | struct rt_rq rt; |
| 291 | 294 | ||
| @@ -317,7 +320,8 @@ struct rq { | |||
| 317 | /* For active balancing */ | 320 | /* For active balancing */ |
| 318 | int active_balance; | 321 | int active_balance; |
| 319 | int push_cpu; | 322 | int push_cpu; |
| 320 | int cpu; /* cpu of this runqueue */ | 323 | /* cpu of this runqueue: */ |
| 324 | int cpu; | ||
| 321 | 325 | ||
| 322 | struct task_struct *migration_thread; | 326 | struct task_struct *migration_thread; |
| 323 | struct list_head migration_queue; | 327 | struct list_head migration_queue; |
| @@ -328,22 +332,22 @@ struct rq { | |||
| 328 | struct sched_info rq_sched_info; | 332 | struct sched_info rq_sched_info; |
| 329 | 333 | ||
| 330 | /* sys_sched_yield() stats */ | 334 | /* sys_sched_yield() stats */ |
| 331 | unsigned long yld_exp_empty; | 335 | unsigned int yld_exp_empty; |
| 332 | unsigned long yld_act_empty; | 336 | unsigned int yld_act_empty; |
| 333 | unsigned long yld_both_empty; | 337 | unsigned int yld_both_empty; |
| 334 | unsigned long yld_count; | 338 | unsigned int yld_count; |
| 335 | 339 | ||
| 336 | /* schedule() stats */ | 340 | /* schedule() stats */ |
| 337 | unsigned long sched_switch; | 341 | unsigned int sched_switch; |
| 338 | unsigned long sched_count; | 342 | unsigned int sched_count; |
| 339 | unsigned long sched_goidle; | 343 | unsigned int sched_goidle; |
| 340 | 344 | ||
| 341 | /* try_to_wake_up() stats */ | 345 | /* try_to_wake_up() stats */ |
| 342 | unsigned long ttwu_count; | 346 | unsigned int ttwu_count; |
| 343 | unsigned long ttwu_local; | 347 | unsigned int ttwu_local; |
| 344 | 348 | ||
| 345 | /* BKL stats */ | 349 | /* BKL stats */ |
| 346 | unsigned long bkl_count; | 350 | unsigned int bkl_count; |
| 347 | #endif | 351 | #endif |
| 348 | struct lock_class_key rq_lock_key; | 352 | struct lock_class_key rq_lock_key; |
| 349 | }; | 353 | }; |
| @@ -449,12 +453,12 @@ enum { | |||
| 449 | }; | 453 | }; |
| 450 | 454 | ||
| 451 | const_debug unsigned int sysctl_sched_features = | 455 | const_debug unsigned int sysctl_sched_features = |
| 452 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | | 456 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | |
| 453 | SCHED_FEAT_START_DEBIT *1 | | 457 | SCHED_FEAT_START_DEBIT * 1 | |
| 454 | SCHED_FEAT_TREE_AVG *0 | | 458 | SCHED_FEAT_TREE_AVG * 0 | |
| 455 | SCHED_FEAT_APPROX_AVG *0 | | 459 | SCHED_FEAT_APPROX_AVG * 0 | |
| 456 | SCHED_FEAT_WAKEUP_PREEMPT *1 | | 460 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | |
| 457 | SCHED_FEAT_PREEMPT_RESTRICT *1; | 461 | SCHED_FEAT_PREEMPT_RESTRICT * 1; |
| 458 | 462 | ||
| 459 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 463 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
| 460 | 464 | ||
| @@ -3880,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout); | |||
| 3880 | 3884 | ||
| 3881 | int __sched wait_for_completion_interruptible(struct completion *x) | 3885 | int __sched wait_for_completion_interruptible(struct completion *x) |
| 3882 | { | 3886 | { |
| 3883 | return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); | 3887 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
| 3888 | if (t == -ERESTARTSYS) | ||
| 3889 | return t; | ||
| 3890 | return 0; | ||
| 3884 | } | 3891 | } |
| 3885 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 3892 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
| 3886 | 3893 | ||
| @@ -4815,18 +4822,18 @@ static void show_task(struct task_struct *p) | |||
| 4815 | unsigned state; | 4822 | unsigned state; |
| 4816 | 4823 | ||
| 4817 | state = p->state ? __ffs(p->state) + 1 : 0; | 4824 | state = p->state ? __ffs(p->state) + 1 : 0; |
| 4818 | printk("%-13.13s %c", p->comm, | 4825 | printk(KERN_INFO "%-13.13s %c", p->comm, |
| 4819 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); | 4826 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
| 4820 | #if BITS_PER_LONG == 32 | 4827 | #if BITS_PER_LONG == 32 |
| 4821 | if (state == TASK_RUNNING) | 4828 | if (state == TASK_RUNNING) |
| 4822 | printk(" running "); | 4829 | printk(KERN_CONT " running "); |
| 4823 | else | 4830 | else |
| 4824 | printk(" %08lx ", thread_saved_pc(p)); | 4831 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
| 4825 | #else | 4832 | #else |
| 4826 | if (state == TASK_RUNNING) | 4833 | if (state == TASK_RUNNING) |
| 4827 | printk(" running task "); | 4834 | printk(KERN_CONT " running task "); |
| 4828 | else | 4835 | else |
| 4829 | printk(" %016lx ", thread_saved_pc(p)); | 4836 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
| 4830 | #endif | 4837 | #endif |
| 4831 | #ifdef CONFIG_DEBUG_STACK_USAGE | 4838 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 4832 | { | 4839 | { |
| @@ -4836,7 +4843,7 @@ static void show_task(struct task_struct *p) | |||
| 4836 | free = (unsigned long)n - (unsigned long)end_of_stack(p); | 4843 | free = (unsigned long)n - (unsigned long)end_of_stack(p); |
| 4837 | } | 4844 | } |
| 4838 | #endif | 4845 | #endif |
| 4839 | printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); | 4846 | printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid); |
| 4840 | 4847 | ||
| 4841 | if (state != TASK_RUNNING) | 4848 | if (state != TASK_RUNNING) |
| 4842 | show_stack(p, NULL); | 4849 | show_stack(p, NULL); |
| @@ -5385,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
| 5385 | return table; | 5392 | return table; |
| 5386 | } | 5393 | } |
| 5387 | 5394 | ||
| 5388 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | 5395 | static ctl_table * sd_alloc_ctl_cpu_table(int cpu) |
| 5389 | { | 5396 | { |
| 5390 | struct ctl_table *entry, *table; | 5397 | struct ctl_table *entry, *table; |
| 5391 | struct sched_domain *sd; | 5398 | struct sched_domain *sd; |
| @@ -5619,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 5619 | } | 5626 | } |
| 5620 | 5627 | ||
| 5621 | if (!group->__cpu_power) { | 5628 | if (!group->__cpu_power) { |
| 5622 | printk("\n"); | 5629 | printk(KERN_CONT "\n"); |
| 5623 | printk(KERN_ERR "ERROR: domain->cpu_power not " | 5630 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
| 5624 | "set\n"); | 5631 | "set\n"); |
| 5625 | break; | 5632 | break; |
| 5626 | } | 5633 | } |
| 5627 | 5634 | ||
| 5628 | if (!cpus_weight(group->cpumask)) { | 5635 | if (!cpus_weight(group->cpumask)) { |
| 5629 | printk("\n"); | 5636 | printk(KERN_CONT "\n"); |
| 5630 | printk(KERN_ERR "ERROR: empty group\n"); | 5637 | printk(KERN_ERR "ERROR: empty group\n"); |
| 5631 | break; | 5638 | break; |
| 5632 | } | 5639 | } |
| 5633 | 5640 | ||
| 5634 | if (cpus_intersects(groupmask, group->cpumask)) { | 5641 | if (cpus_intersects(groupmask, group->cpumask)) { |
| 5635 | printk("\n"); | 5642 | printk(KERN_CONT "\n"); |
| 5636 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 5643 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
| 5637 | break; | 5644 | break; |
| 5638 | } | 5645 | } |
| @@ -5640,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
| 5640 | cpus_or(groupmask, groupmask, group->cpumask); | 5647 | cpus_or(groupmask, groupmask, group->cpumask); |
| 5641 | 5648 | ||
| 5642 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); | 5649 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); |
| 5643 | printk(" %s", str); | 5650 | printk(KERN_CONT " %s", str); |
| 5644 | 5651 | ||
| 5645 | group = group->next; | 5652 | group = group->next; |
| 5646 | } while (group != sd->groups); | 5653 | } while (group != sd->groups); |
| 5647 | printk("\n"); | 5654 | printk(KERN_CONT "\n"); |
| 5648 | 5655 | ||
| 5649 | if (!cpus_equal(sd->span, groupmask)) | 5656 | if (!cpus_equal(sd->span, groupmask)) |
| 5650 | printk(KERN_ERR "ERROR: groups don't span " | 5657 | printk(KERN_ERR "ERROR: groups don't span " |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index a5e517ec07c3..e6fb392e5164 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
| @@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) | |||
| 137 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); | 137 | SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); |
| 138 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); | 138 | SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); |
| 139 | #ifdef CONFIG_SCHEDSTATS | 139 | #ifdef CONFIG_SCHEDSTATS |
| 140 | SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", | 140 | SEQ_printf(m, " .%-30s: %d\n", "bkl_count", |
| 141 | rq->bkl_count); | 141 | rq->bkl_count); |
| 142 | #endif | 142 | #endif |
| 143 | SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", | 143 | SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index 1c084842c3e7..ef1a7df80ea2 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
| 21 | 21 | ||
| 22 | /* runqueue-specific stats */ | 22 | /* runqueue-specific stats */ |
| 23 | seq_printf(seq, | 23 | seq_printf(seq, |
| 24 | "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", | 24 | "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu", |
| 25 | cpu, rq->yld_both_empty, | 25 | cpu, rq->yld_both_empty, |
| 26 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, | 26 | rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, |
| 27 | rq->sched_switch, rq->sched_count, rq->sched_goidle, | 27 | rq->sched_switch, rq->sched_count, rq->sched_goidle, |
| @@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
| 42 | seq_printf(seq, "domain%d %s", dcount++, mask_str); | 42 | seq_printf(seq, "domain%d %s", dcount++, mask_str); |
| 43 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; | 43 | for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; |
| 44 | itype++) { | 44 | itype++) { |
| 45 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " | 45 | seq_printf(seq, " %u %u %u %u %u %u %u %u", |
| 46 | "%lu", | ||
| 47 | sd->lb_count[itype], | 46 | sd->lb_count[itype], |
| 48 | sd->lb_balanced[itype], | 47 | sd->lb_balanced[itype], |
| 49 | sd->lb_failed[itype], | 48 | sd->lb_failed[itype], |
| @@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v) | |||
| 53 | sd->lb_nobusyq[itype], | 52 | sd->lb_nobusyq[itype], |
| 54 | sd->lb_nobusyg[itype]); | 53 | sd->lb_nobusyg[itype]); |
| 55 | } | 54 | } |
| 56 | seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" | 55 | seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n", |
| 57 | " %lu %lu %lu\n", | ||
| 58 | sd->alb_count, sd->alb_failed, sd->alb_pushed, | 56 | sd->alb_count, sd->alb_failed, sd->alb_pushed, |
| 59 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, | 57 | sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, |
| 60 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, | 58 | sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, |
