diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 71 |
1 files changed, 62 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index d1ad69b270ca..8dcdec6fe0fe 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -594,18 +594,14 @@ enum { | |||
594 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, | 594 | SCHED_FEAT_NEW_FAIR_SLEEPERS = 1, |
595 | SCHED_FEAT_WAKEUP_PREEMPT = 2, | 595 | SCHED_FEAT_WAKEUP_PREEMPT = 2, |
596 | SCHED_FEAT_START_DEBIT = 4, | 596 | SCHED_FEAT_START_DEBIT = 4, |
597 | SCHED_FEAT_TREE_AVG = 8, | 597 | SCHED_FEAT_HRTICK = 8, |
598 | SCHED_FEAT_APPROX_AVG = 16, | 598 | SCHED_FEAT_DOUBLE_TICK = 16, |
599 | SCHED_FEAT_HRTICK = 32, | ||
600 | SCHED_FEAT_DOUBLE_TICK = 64, | ||
601 | }; | 599 | }; |
602 | 600 | ||
603 | const_debug unsigned int sysctl_sched_features = | 601 | const_debug unsigned int sysctl_sched_features = |
604 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | | 602 | SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 | |
605 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | | 603 | SCHED_FEAT_WAKEUP_PREEMPT * 1 | |
606 | SCHED_FEAT_START_DEBIT * 1 | | 604 | SCHED_FEAT_START_DEBIT * 1 | |
607 | SCHED_FEAT_TREE_AVG * 0 | | ||
608 | SCHED_FEAT_APPROX_AVG * 0 | | ||
609 | SCHED_FEAT_HRTICK * 1 | | 605 | SCHED_FEAT_HRTICK * 1 | |
610 | SCHED_FEAT_DOUBLE_TICK * 0; | 606 | SCHED_FEAT_DOUBLE_TICK * 0; |
611 | 607 | ||
@@ -1056,6 +1052,49 @@ static void resched_cpu(int cpu) | |||
1056 | resched_task(cpu_curr(cpu)); | 1052 | resched_task(cpu_curr(cpu)); |
1057 | spin_unlock_irqrestore(&rq->lock, flags); | 1053 | spin_unlock_irqrestore(&rq->lock, flags); |
1058 | } | 1054 | } |
1055 | |||
1056 | #ifdef CONFIG_NO_HZ | ||
1057 | /* | ||
1058 | * When add_timer_on() enqueues a timer into the timer wheel of an | ||
1059 | * idle CPU then this timer might expire before the next timer event | ||
1060 | * which is scheduled to wake up that CPU. In case of a completely | ||
1061 | * idle system the next event might even be infinite time into the | ||
1062 | * future. wake_up_idle_cpu() ensures that the CPU is woken up and | ||
1063 | * leaves the inner idle loop so the newly added timer is taken into | ||
1064 | * account when the CPU goes back to idle and evaluates the timer | ||
1065 | * wheel for the next timer event. | ||
1066 | */ | ||
1067 | void wake_up_idle_cpu(int cpu) | ||
1068 | { | ||
1069 | struct rq *rq = cpu_rq(cpu); | ||
1070 | |||
1071 | if (cpu == smp_processor_id()) | ||
1072 | return; | ||
1073 | |||
1074 | /* | ||
1075 | * This is safe, as this function is called with the timer | ||
1076 | * wheel base lock of (cpu) held. When the CPU is on the way | ||
1077 | * to idle and has not yet set rq->curr to idle then it will | ||
1078 | * be serialized on the timer wheel base lock and take the new | ||
1079 | * timer into account automatically. | ||
1080 | */ | ||
1081 | if (rq->curr != rq->idle) | ||
1082 | return; | ||
1083 | |||
1084 | /* | ||
1085 | * We can set TIF_RESCHED on the idle task of the other CPU | ||
1086 | * lockless. The worst case is that the other CPU runs the | ||
1087 | * idle task through an additional NOOP schedule() | ||
1088 | */ | ||
1089 | set_tsk_thread_flag(rq->idle, TIF_NEED_RESCHED); | ||
1090 | |||
1091 | /* NEED_RESCHED must be visible before we test polling */ | ||
1092 | smp_mb(); | ||
1093 | if (!tsk_is_polling(rq->idle)) | ||
1094 | smp_send_reschedule(cpu); | ||
1095 | } | ||
1096 | #endif | ||
1097 | |||
1059 | #else | 1098 | #else |
1060 | static void __resched_task(struct task_struct *p, int tif_bit) | 1099 | static void __resched_task(struct task_struct *p, int tif_bit) |
1061 | { | 1100 | { |
@@ -1396,6 +1435,12 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
1396 | { | 1435 | { |
1397 | s64 delta; | 1436 | s64 delta; |
1398 | 1437 | ||
1438 | /* | ||
1439 | * Buddy candidates are cache hot: | ||
1440 | */ | ||
1441 | if (&p->se == cfs_rq_of(&p->se)->next) | ||
1442 | return 1; | ||
1443 | |||
1399 | if (p->sched_class != &fair_sched_class) | 1444 | if (p->sched_class != &fair_sched_class) |
1400 | return 0; | 1445 | return 0; |
1401 | 1446 | ||
@@ -1855,10 +1900,11 @@ out_activate: | |||
1855 | schedstat_inc(p, se.nr_wakeups_remote); | 1900 | schedstat_inc(p, se.nr_wakeups_remote); |
1856 | update_rq_clock(rq); | 1901 | update_rq_clock(rq); |
1857 | activate_task(rq, p, 1); | 1902 | activate_task(rq, p, 1); |
1858 | check_preempt_curr(rq, p); | ||
1859 | success = 1; | 1903 | success = 1; |
1860 | 1904 | ||
1861 | out_running: | 1905 | out_running: |
1906 | check_preempt_curr(rq, p); | ||
1907 | |||
1862 | p->state = TASK_RUNNING; | 1908 | p->state = TASK_RUNNING; |
1863 | #ifdef CONFIG_SMP | 1909 | #ifdef CONFIG_SMP |
1864 | if (p->sched_class->task_wake_up) | 1910 | if (p->sched_class->task_wake_up) |
@@ -1892,6 +1938,8 @@ static void __sched_fork(struct task_struct *p) | |||
1892 | p->se.exec_start = 0; | 1938 | p->se.exec_start = 0; |
1893 | p->se.sum_exec_runtime = 0; | 1939 | p->se.sum_exec_runtime = 0; |
1894 | p->se.prev_sum_exec_runtime = 0; | 1940 | p->se.prev_sum_exec_runtime = 0; |
1941 | p->se.last_wakeup = 0; | ||
1942 | p->se.avg_overlap = 0; | ||
1895 | 1943 | ||
1896 | #ifdef CONFIG_SCHEDSTATS | 1944 | #ifdef CONFIG_SCHEDSTATS |
1897 | p->se.wait_start = 0; | 1945 | p->se.wait_start = 0; |
@@ -3877,7 +3925,7 @@ need_resched_nonpreemptible: | |||
3877 | 3925 | ||
3878 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { | 3926 | if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) { |
3879 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && | 3927 | if (unlikely((prev->state & TASK_INTERRUPTIBLE) && |
3880 | unlikely(signal_pending(prev)))) { | 3928 | signal_pending(prev))) { |
3881 | prev->state = TASK_RUNNING; | 3929 | prev->state = TASK_RUNNING; |
3882 | } else { | 3930 | } else { |
3883 | deactivate_task(rq, prev, 1); | 3931 | deactivate_task(rq, prev, 1); |
@@ -6802,6 +6850,10 @@ static int ndoms_cur; /* number of sched domains in 'doms_cur' */ | |||
6802 | */ | 6850 | */ |
6803 | static cpumask_t fallback_doms; | 6851 | static cpumask_t fallback_doms; |
6804 | 6852 | ||
6853 | void __attribute__((weak)) arch_update_cpu_topology(void) | ||
6854 | { | ||
6855 | } | ||
6856 | |||
6805 | /* | 6857 | /* |
6806 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. | 6858 | * Set up scheduler domains and groups. Callers must hold the hotplug lock. |
6807 | * For now this just excludes isolated cpus, but could be used to | 6859 | * For now this just excludes isolated cpus, but could be used to |
@@ -6811,6 +6863,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map) | |||
6811 | { | 6863 | { |
6812 | int err; | 6864 | int err; |
6813 | 6865 | ||
6866 | arch_update_cpu_topology(); | ||
6814 | ndoms_cur = 1; | 6867 | ndoms_cur = 1; |
6815 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | 6868 | doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL); |
6816 | if (!doms_cur) | 6869 | if (!doms_cur) |
@@ -6915,7 +6968,7 @@ match2: | |||
6915 | } | 6968 | } |
6916 | 6969 | ||
6917 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 6970 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
6918 | static int arch_reinit_sched_domains(void) | 6971 | int arch_reinit_sched_domains(void) |
6919 | { | 6972 | { |
6920 | int err; | 6973 | int err; |
6921 | 6974 | ||