diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 94 |
1 files changed, 61 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 92721d1534b8..ed90be46fb31 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -266,7 +266,8 @@ struct rt_rq { | |||
266 | * acquire operations must be ordered by ascending &runqueue. | 266 | * acquire operations must be ordered by ascending &runqueue. |
267 | */ | 267 | */ |
268 | struct rq { | 268 | struct rq { |
269 | spinlock_t lock; /* runqueue lock */ | 269 | /* runqueue lock: */ |
270 | spinlock_t lock; | ||
270 | 271 | ||
271 | /* | 272 | /* |
272 | * nr_running and cpu_load should be in the same cacheline because | 273 | * nr_running and cpu_load should be in the same cacheline because |
@@ -279,13 +280,15 @@ struct rq { | |||
279 | #ifdef CONFIG_NO_HZ | 280 | #ifdef CONFIG_NO_HZ |
280 | unsigned char in_nohz_recently; | 281 | unsigned char in_nohz_recently; |
281 | #endif | 282 | #endif |
282 | struct load_weight load; /* capture load from *all* tasks on this cpu */ | 283 | /* capture load from *all* tasks on this cpu: */ |
284 | struct load_weight load; | ||
283 | unsigned long nr_load_updates; | 285 | unsigned long nr_load_updates; |
284 | u64 nr_switches; | 286 | u64 nr_switches; |
285 | 287 | ||
286 | struct cfs_rq cfs; | 288 | struct cfs_rq cfs; |
287 | #ifdef CONFIG_FAIR_GROUP_SCHED | 289 | #ifdef CONFIG_FAIR_GROUP_SCHED |
288 | struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ | 290 | /* list of leaf cfs_rq on this cpu: */ |
291 | struct list_head leaf_cfs_rq_list; | ||
289 | #endif | 292 | #endif |
290 | struct rt_rq rt; | 293 | struct rt_rq rt; |
291 | 294 | ||
@@ -317,7 +320,8 @@ struct rq { | |||
317 | /* For active balancing */ | 320 | /* For active balancing */ |
318 | int active_balance; | 321 | int active_balance; |
319 | int push_cpu; | 322 | int push_cpu; |
320 | int cpu; /* cpu of this runqueue */ | 323 | /* cpu of this runqueue: */ |
324 | int cpu; | ||
321 | 325 | ||
322 | struct task_struct *migration_thread; | 326 | struct task_struct *migration_thread; |
323 | struct list_head migration_queue; | 327 | struct list_head migration_queue; |
@@ -328,22 +332,22 @@ struct rq { | |||
328 | struct sched_info rq_sched_info; | 332 | struct sched_info rq_sched_info; |
329 | 333 | ||
330 | /* sys_sched_yield() stats */ | 334 | /* sys_sched_yield() stats */ |
331 | unsigned long yld_exp_empty; | 335 | unsigned int yld_exp_empty; |
332 | unsigned long yld_act_empty; | 336 | unsigned int yld_act_empty; |
333 | unsigned long yld_both_empty; | 337 | unsigned int yld_both_empty; |
334 | unsigned long yld_count; | 338 | unsigned int yld_count; |
335 | 339 | ||
336 | /* schedule() stats */ | 340 | /* schedule() stats */ |
337 | unsigned long sched_switch; | 341 | unsigned int sched_switch; |
338 | unsigned long sched_count; | 342 | unsigned int sched_count; |
339 | unsigned long sched_goidle; | 343 | unsigned int sched_goidle; |
340 | 344 | ||
341 | /* try_to_wake_up() stats */ | 345 | /* try_to_wake_up() stats */ |
342 | unsigned long ttwu_count; | 346 | unsigned int ttwu_count; |
343 | unsigned long ttwu_local; | 347 | unsigned int ttwu_local; |
344 | 348 | ||
345 | /* BKL stats */ | 349 | /* BKL stats */ |
346 | unsigned long bkl_count; | 350 | unsigned int bkl_count; |
347 | #endif | 351 | #endif |
348 | struct lock_class_key rq_lock_key; | 352 | struct lock_class_key rq_lock_key; |
349 | }; | 353 | }; |
@@ -449,12 +453,12 @@ enum { | |||
449 | }; | 453 | }; |
450 | 454 | ||
451 | const_debug unsigned int sysctl_sched_features = | 455 | const_debug unsigned int sysctl_sched_features = |
452 | SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | | 456 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | |
453 | SCHED_FEAT_START_DEBIT *1 | | 457 | SCHED_FEAT_START_DEBIT * 1 | |
454 | SCHED_FEAT_TREE_AVG *0 | | 458 | SCHED_FEAT_TREE_AVG * 0 | |
455 | SCHED_FEAT_APPROX_AVG *0 | | 459 | SCHED_FEAT_APPROX_AVG * 0 | |
456 | SCHED_FEAT_WAKEUP_PREEMPT *1 | | 460 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | |
457 | SCHED_FEAT_PREEMPT_RESTRICT *1; | 461 | SCHED_FEAT_PREEMPT_RESTRICT * 1; |
458 | 462 | ||
459 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) | 463 | #define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) |
460 | 464 | ||
@@ -3334,6 +3338,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime) | |||
3334 | } | 3338 | } |
3335 | 3339 | ||
3336 | /* | 3340 | /* |
3341 | * Account scaled user cpu time to a process. | ||
3342 | * @p: the process that the cpu time gets accounted to | ||
3343 | * @cputime: the cpu time spent in user space since the last update | ||
3344 | */ | ||
3345 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
3346 | { | ||
3347 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
3348 | } | ||
3349 | |||
3350 | /* | ||
3337 | * Account system cpu time to a process. | 3351 | * Account system cpu time to a process. |
3338 | * @p: the process that the cpu time gets accounted to | 3352 | * @p: the process that the cpu time gets accounted to |
3339 | * @hardirq_offset: the offset to subtract from hardirq_count() | 3353 | * @hardirq_offset: the offset to subtract from hardirq_count() |
@@ -3371,6 +3385,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
3371 | } | 3385 | } |
3372 | 3386 | ||
3373 | /* | 3387 | /* |
3388 | * Account scaled system cpu time to a process. | ||
3389 | * @p: the process that the cpu time gets accounted to | ||
3390 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
3391 | * @cputime: the cpu time spent in kernel space since the last update | ||
3392 | */ | ||
3393 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | ||
3394 | { | ||
3395 | p->stimescaled = cputime_add(p->stimescaled, cputime); | ||
3396 | } | ||
3397 | |||
3398 | /* | ||
3374 | * Account for involuntary wait time. | 3399 | * Account for involuntary wait time. |
3375 | * @p: the process from which the cpu time has been stolen | 3400 | * @p: the process from which the cpu time has been stolen |
3376 | * @steal: the cpu time spent in involuntary wait | 3401 | * @steal: the cpu time spent in involuntary wait |
@@ -3859,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout); | |||
3859 | 3884 | ||
3860 | int __sched wait_for_completion_interruptible(struct completion *x) | 3885 | int __sched wait_for_completion_interruptible(struct completion *x) |
3861 | { | 3886 | { |
3862 | return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); | 3887 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
3888 | if (t == -ERESTARTSYS) | ||
3889 | return t; | ||
3890 | return 0; | ||
3863 | } | 3891 | } |
3864 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 3892 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
3865 | 3893 | ||
@@ -4794,18 +4822,18 @@ static void show_task(struct task_struct *p) | |||
4794 | unsigned state; | 4822 | unsigned state; |
4795 | 4823 | ||
4796 | state = p->state ? __ffs(p->state) + 1 : 0; | 4824 | state = p->state ? __ffs(p->state) + 1 : 0; |
4797 | printk("%-13.13s %c", p->comm, | 4825 | printk(KERN_INFO "%-13.13s %c", p->comm, |
4798 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); | 4826 | state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); |
4799 | #if BITS_PER_LONG == 32 | 4827 | #if BITS_PER_LONG == 32 |
4800 | if (state == TASK_RUNNING) | 4828 | if (state == TASK_RUNNING) |
4801 | printk(" running "); | 4829 | printk(KERN_CONT " running "); |
4802 | else | 4830 | else |
4803 | printk(" %08lx ", thread_saved_pc(p)); | 4831 | printk(KERN_CONT " %08lx ", thread_saved_pc(p)); |
4804 | #else | 4832 | #else |
4805 | if (state == TASK_RUNNING) | 4833 | if (state == TASK_RUNNING) |
4806 | printk(" running task "); | 4834 | printk(KERN_CONT " running task "); |
4807 | else | 4835 | else |
4808 | printk(" %016lx ", thread_saved_pc(p)); | 4836 | printk(KERN_CONT " %016lx ", thread_saved_pc(p)); |
4809 | #endif | 4837 | #endif |
4810 | #ifdef CONFIG_DEBUG_STACK_USAGE | 4838 | #ifdef CONFIG_DEBUG_STACK_USAGE |
4811 | { | 4839 | { |
@@ -4815,7 +4843,7 @@ static void show_task(struct task_struct *p) | |||
4815 | free = (unsigned long)n - (unsigned long)end_of_stack(p); | 4843 | free = (unsigned long)n - (unsigned long)end_of_stack(p); |
4816 | } | 4844 | } |
4817 | #endif | 4845 | #endif |
4818 | printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); | 4846 | printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid); |
4819 | 4847 | ||
4820 | if (state != TASK_RUNNING) | 4848 | if (state != TASK_RUNNING) |
4821 | show_stack(p, NULL); | 4849 | show_stack(p, NULL); |
@@ -5364,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
5364 | return table; | 5392 | return table; |
5365 | } | 5393 | } |
5366 | 5394 | ||
5367 | static ctl_table *sd_alloc_ctl_cpu_table(int cpu) | 5395 | static ctl_table * sd_alloc_ctl_cpu_table(int cpu) |
5368 | { | 5396 | { |
5369 | struct ctl_table *entry, *table; | 5397 | struct ctl_table *entry, *table; |
5370 | struct sched_domain *sd; | 5398 | struct sched_domain *sd; |
@@ -5598,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5598 | } | 5626 | } |
5599 | 5627 | ||
5600 | if (!group->__cpu_power) { | 5628 | if (!group->__cpu_power) { |
5601 | printk("\n"); | 5629 | printk(KERN_CONT "\n"); |
5602 | printk(KERN_ERR "ERROR: domain->cpu_power not " | 5630 | printk(KERN_ERR "ERROR: domain->cpu_power not " |
5603 | "set\n"); | 5631 | "set\n"); |
5604 | break; | 5632 | break; |
5605 | } | 5633 | } |
5606 | 5634 | ||
5607 | if (!cpus_weight(group->cpumask)) { | 5635 | if (!cpus_weight(group->cpumask)) { |
5608 | printk("\n"); | 5636 | printk(KERN_CONT "\n"); |
5609 | printk(KERN_ERR "ERROR: empty group\n"); | 5637 | printk(KERN_ERR "ERROR: empty group\n"); |
5610 | break; | 5638 | break; |
5611 | } | 5639 | } |
5612 | 5640 | ||
5613 | if (cpus_intersects(groupmask, group->cpumask)) { | 5641 | if (cpus_intersects(groupmask, group->cpumask)) { |
5614 | printk("\n"); | 5642 | printk(KERN_CONT "\n"); |
5615 | printk(KERN_ERR "ERROR: repeated CPUs\n"); | 5643 | printk(KERN_ERR "ERROR: repeated CPUs\n"); |
5616 | break; | 5644 | break; |
5617 | } | 5645 | } |
@@ -5619,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu) | |||
5619 | cpus_or(groupmask, groupmask, group->cpumask); | 5647 | cpus_or(groupmask, groupmask, group->cpumask); |
5620 | 5648 | ||
5621 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); | 5649 | cpumask_scnprintf(str, NR_CPUS, group->cpumask); |
5622 | printk(" %s", str); | 5650 | printk(KERN_CONT " %s", str); |
5623 | 5651 | ||
5624 | group = group->next; | 5652 | group = group->next; |
5625 | } while (group != sd->groups); | 5653 | } while (group != sd->groups); |
5626 | printk("\n"); | 5654 | printk(KERN_CONT "\n"); |
5627 | 5655 | ||
5628 | if (!cpus_equal(sd->span, groupmask)) | 5656 | if (!cpus_equal(sd->span, groupmask)) |
5629 | printk(KERN_ERR "ERROR: groups don't span " | 5657 | printk(KERN_ERR "ERROR: groups don't span " |