diff options
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 65 |
1 files changed, 51 insertions, 14 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2d8927fda712..0533496b6228 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <linux/slab.h> | 72 | #include <linux/slab.h> |
73 | #include <linux/init_task.h> | 73 | #include <linux/init_task.h> |
74 | #include <linux/binfmts.h> | 74 | #include <linux/binfmts.h> |
75 | #include <linux/context_tracking.h> | ||
75 | 76 | ||
76 | #include <asm/switch_to.h> | 77 | #include <asm/switch_to.h> |
77 | #include <asm/tlb.h> | 78 | #include <asm/tlb.h> |
@@ -922,6 +923,13 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
922 | rq->skip_clock_update = 1; | 923 | rq->skip_clock_update = 1; |
923 | } | 924 | } |
924 | 925 | ||
926 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
927 | |||
928 | void register_task_migration_notifier(struct notifier_block *n) | ||
929 | { | ||
930 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
931 | } | ||
932 | |||
925 | #ifdef CONFIG_SMP | 933 | #ifdef CONFIG_SMP |
926 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 934 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
927 | { | 935 | { |
@@ -952,8 +960,18 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
952 | trace_sched_migrate_task(p, new_cpu); | 960 | trace_sched_migrate_task(p, new_cpu); |
953 | 961 | ||
954 | if (task_cpu(p) != new_cpu) { | 962 | if (task_cpu(p) != new_cpu) { |
963 | struct task_migration_notifier tmn; | ||
964 | |||
965 | if (p->sched_class->migrate_task_rq) | ||
966 | p->sched_class->migrate_task_rq(p, new_cpu); | ||
955 | p->se.nr_migrations++; | 967 | p->se.nr_migrations++; |
956 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); | 968 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
969 | |||
970 | tmn.task = p; | ||
971 | tmn.from_cpu = task_cpu(p); | ||
972 | tmn.to_cpu = new_cpu; | ||
973 | |||
974 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
957 | } | 975 | } |
958 | 976 | ||
959 | __set_task_cpu(p, new_cpu); | 977 | __set_task_cpu(p, new_cpu); |
@@ -1524,6 +1542,15 @@ static void __sched_fork(struct task_struct *p) | |||
1524 | p->se.vruntime = 0; | 1542 | p->se.vruntime = 0; |
1525 | INIT_LIST_HEAD(&p->se.group_node); | 1543 | INIT_LIST_HEAD(&p->se.group_node); |
1526 | 1544 | ||
1545 | /* | ||
1546 | * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be | ||
1547 | * removed when useful for applications beyond shares distribution (e.g. | ||
1548 | * load-balance). | ||
1549 | */ | ||
1550 | #if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED) | ||
1551 | p->se.avg.runnable_avg_period = 0; | ||
1552 | p->se.avg.runnable_avg_sum = 0; | ||
1553 | #endif | ||
1527 | #ifdef CONFIG_SCHEDSTATS | 1554 | #ifdef CONFIG_SCHEDSTATS |
1528 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); | 1555 | memset(&p->se.statistics, 0, sizeof(p->se.statistics)); |
1529 | #endif | 1556 | #endif |
@@ -1886,8 +1913,8 @@ context_switch(struct rq *rq, struct task_struct *prev, | |||
1886 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); | 1913 | spin_release(&rq->lock.dep_map, 1, _THIS_IP_); |
1887 | #endif | 1914 | #endif |
1888 | 1915 | ||
1916 | context_tracking_task_switch(prev, next); | ||
1889 | /* Here we just switch the register state and the stack. */ | 1917 | /* Here we just switch the register state and the stack. */ |
1890 | rcu_switch(prev, next); | ||
1891 | switch_to(prev, next, prev); | 1918 | switch_to(prev, next, prev); |
1892 | 1919 | ||
1893 | barrier(); | 1920 | barrier(); |
@@ -2911,7 +2938,7 @@ asmlinkage void __sched schedule(void) | |||
2911 | } | 2938 | } |
2912 | EXPORT_SYMBOL(schedule); | 2939 | EXPORT_SYMBOL(schedule); |
2913 | 2940 | ||
2914 | #ifdef CONFIG_RCU_USER_QS | 2941 | #ifdef CONFIG_CONTEXT_TRACKING |
2915 | asmlinkage void __sched schedule_user(void) | 2942 | asmlinkage void __sched schedule_user(void) |
2916 | { | 2943 | { |
2917 | /* | 2944 | /* |
@@ -2920,9 +2947,9 @@ asmlinkage void __sched schedule_user(void) | |||
2920 | * we haven't yet exited the RCU idle mode. Do it here manually until | 2947 | * we haven't yet exited the RCU idle mode. Do it here manually until |
2921 | * we find a better solution. | 2948 | * we find a better solution. |
2922 | */ | 2949 | */ |
2923 | rcu_user_exit(); | 2950 | user_exit(); |
2924 | schedule(); | 2951 | schedule(); |
2925 | rcu_user_enter(); | 2952 | user_enter(); |
2926 | } | 2953 | } |
2927 | #endif | 2954 | #endif |
2928 | 2955 | ||
@@ -3027,7 +3054,7 @@ asmlinkage void __sched preempt_schedule_irq(void) | |||
3027 | /* Catch callers which need to be fixed */ | 3054 | /* Catch callers which need to be fixed */ |
3028 | BUG_ON(ti->preempt_count || !irqs_disabled()); | 3055 | BUG_ON(ti->preempt_count || !irqs_disabled()); |
3029 | 3056 | ||
3030 | rcu_user_exit(); | 3057 | user_exit(); |
3031 | do { | 3058 | do { |
3032 | add_preempt_count(PREEMPT_ACTIVE); | 3059 | add_preempt_count(PREEMPT_ACTIVE); |
3033 | local_irq_enable(); | 3060 | local_irq_enable(); |
@@ -4474,6 +4501,7 @@ static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; | |||
4474 | void sched_show_task(struct task_struct *p) | 4501 | void sched_show_task(struct task_struct *p) |
4475 | { | 4502 | { |
4476 | unsigned long free = 0; | 4503 | unsigned long free = 0; |
4504 | int ppid; | ||
4477 | unsigned state; | 4505 | unsigned state; |
4478 | 4506 | ||
4479 | state = p->state ? __ffs(p->state) + 1 : 0; | 4507 | state = p->state ? __ffs(p->state) + 1 : 0; |
@@ -4493,8 +4521,11 @@ void sched_show_task(struct task_struct *p) | |||
4493 | #ifdef CONFIG_DEBUG_STACK_USAGE | 4521 | #ifdef CONFIG_DEBUG_STACK_USAGE |
4494 | free = stack_not_used(p); | 4522 | free = stack_not_used(p); |
4495 | #endif | 4523 | #endif |
4524 | rcu_read_lock(); | ||
4525 | ppid = task_pid_nr(rcu_dereference(p->real_parent)); | ||
4526 | rcu_read_unlock(); | ||
4496 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, | 4527 | printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, |
4497 | task_pid_nr(p), task_pid_nr(rcu_dereference(p->real_parent)), | 4528 | task_pid_nr(p), ppid, |
4498 | (unsigned long)task_thread_info(p)->flags); | 4529 | (unsigned long)task_thread_info(p)->flags); |
4499 | 4530 | ||
4500 | show_stack(p, NULL); | 4531 | show_stack(p, NULL); |
@@ -7468,7 +7499,7 @@ static inline struct task_group *cgroup_tg(struct cgroup *cgrp) | |||
7468 | struct task_group, css); | 7499 | struct task_group, css); |
7469 | } | 7500 | } |
7470 | 7501 | ||
7471 | static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp) | 7502 | static struct cgroup_subsys_state *cpu_cgroup_css_alloc(struct cgroup *cgrp) |
7472 | { | 7503 | { |
7473 | struct task_group *tg, *parent; | 7504 | struct task_group *tg, *parent; |
7474 | 7505 | ||
@@ -7485,7 +7516,7 @@ static struct cgroup_subsys_state *cpu_cgroup_create(struct cgroup *cgrp) | |||
7485 | return &tg->css; | 7516 | return &tg->css; |
7486 | } | 7517 | } |
7487 | 7518 | ||
7488 | static void cpu_cgroup_destroy(struct cgroup *cgrp) | 7519 | static void cpu_cgroup_css_free(struct cgroup *cgrp) |
7489 | { | 7520 | { |
7490 | struct task_group *tg = cgroup_tg(cgrp); | 7521 | struct task_group *tg = cgroup_tg(cgrp); |
7491 | 7522 | ||
@@ -7845,8 +7876,8 @@ static struct cftype cpu_files[] = { | |||
7845 | 7876 | ||
7846 | struct cgroup_subsys cpu_cgroup_subsys = { | 7877 | struct cgroup_subsys cpu_cgroup_subsys = { |
7847 | .name = "cpu", | 7878 | .name = "cpu", |
7848 | .create = cpu_cgroup_create, | 7879 | .css_alloc = cpu_cgroup_css_alloc, |
7849 | .destroy = cpu_cgroup_destroy, | 7880 | .css_free = cpu_cgroup_css_free, |
7850 | .can_attach = cpu_cgroup_can_attach, | 7881 | .can_attach = cpu_cgroup_can_attach, |
7851 | .attach = cpu_cgroup_attach, | 7882 | .attach = cpu_cgroup_attach, |
7852 | .exit = cpu_cgroup_exit, | 7883 | .exit = cpu_cgroup_exit, |
@@ -7869,7 +7900,7 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
7869 | struct cpuacct root_cpuacct; | 7900 | struct cpuacct root_cpuacct; |
7870 | 7901 | ||
7871 | /* create a new cpu accounting group */ | 7902 | /* create a new cpu accounting group */ |
7872 | static struct cgroup_subsys_state *cpuacct_create(struct cgroup *cgrp) | 7903 | static struct cgroup_subsys_state *cpuacct_css_alloc(struct cgroup *cgrp) |
7873 | { | 7904 | { |
7874 | struct cpuacct *ca; | 7905 | struct cpuacct *ca; |
7875 | 7906 | ||
@@ -7899,7 +7930,7 @@ out: | |||
7899 | } | 7930 | } |
7900 | 7931 | ||
7901 | /* destroy an existing cpu accounting group */ | 7932 | /* destroy an existing cpu accounting group */ |
7902 | static void cpuacct_destroy(struct cgroup *cgrp) | 7933 | static void cpuacct_css_free(struct cgroup *cgrp) |
7903 | { | 7934 | { |
7904 | struct cpuacct *ca = cgroup_ca(cgrp); | 7935 | struct cpuacct *ca = cgroup_ca(cgrp); |
7905 | 7936 | ||
@@ -8070,9 +8101,15 @@ void cpuacct_charge(struct task_struct *tsk, u64 cputime) | |||
8070 | 8101 | ||
8071 | struct cgroup_subsys cpuacct_subsys = { | 8102 | struct cgroup_subsys cpuacct_subsys = { |
8072 | .name = "cpuacct", | 8103 | .name = "cpuacct", |
8073 | .create = cpuacct_create, | 8104 | .css_alloc = cpuacct_css_alloc, |
8074 | .destroy = cpuacct_destroy, | 8105 | .css_free = cpuacct_css_free, |
8075 | .subsys_id = cpuacct_subsys_id, | 8106 | .subsys_id = cpuacct_subsys_id, |
8076 | .base_cftypes = files, | 8107 | .base_cftypes = files, |
8077 | }; | 8108 | }; |
8078 | #endif /* CONFIG_CGROUP_CPUACCT */ | 8109 | #endif /* CONFIG_CGROUP_CPUACCT */ |
8110 | |||
8111 | void dump_cpu_task(int cpu) | ||
8112 | { | ||
8113 | pr_info("Task dump for CPU %d:\n", cpu); | ||
8114 | sched_show_task(cpu_curr(cpu)); | ||
8115 | } | ||