aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched_cpupri.c15
-rw-r--r--kernel/sched_fair.c32
2 files changed, 33 insertions, 14 deletions
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dd..d014efbf947 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 if (lowest_mask) 84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87 /*
88 * We have to ensure that we have at least one bit
89 * still set in the array, since the map could have
90 * been concurrently emptied between the first and
91 * second reads of vec->mask. If we hit this
92 * condition, simply act as though we never hit this
93 * priority level and continue on.
94 */
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
86 return 1; 99 return 1;
87 } 100 }
88 101
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9ffb2b2ceba..652e8bdef9a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
612{ 612{
613#ifdef CONFIG_SCHEDSTATS 613#ifdef CONFIG_SCHEDSTATS
614 struct task_struct *tsk = NULL;
615
616 if (entity_is_task(se))
617 tsk = task_of(se);
618
614 if (se->sleep_start) { 619 if (se->sleep_start) {
615 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 620 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
616 struct task_struct *tsk = task_of(se);
617 621
618 if ((s64)delta < 0) 622 if ((s64)delta < 0)
619 delta = 0; 623 delta = 0;
@@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
624 se->sleep_start = 0; 628 se->sleep_start = 0;
625 se->sum_sleep_runtime += delta; 629 se->sum_sleep_runtime += delta;
626 630
627 account_scheduler_latency(tsk, delta >> 10, 1); 631 if (tsk)
632 account_scheduler_latency(tsk, delta >> 10, 1);
628 } 633 }
629 if (se->block_start) { 634 if (se->block_start) {
630 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 635 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
631 struct task_struct *tsk = task_of(se);
632 636
633 if ((s64)delta < 0) 637 if ((s64)delta < 0)
634 delta = 0; 638 delta = 0;
@@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
639 se->block_start = 0; 643 se->block_start = 0;
640 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
641 645
642 /* 646 if (tsk) {
643 * Blocking time is in units of nanosecs, so shift by 20 to 647 /*
644 * get a milliseconds-range estimation of the amount of 648 * Blocking time is in units of nanosecs, so shift by
645 * time that the task spent sleeping: 649 * 20 to get a milliseconds-range estimation of the
646 */ 650 * amount of time that the task spent sleeping:
647 if (unlikely(prof_on == SLEEP_PROFILING)) { 651 */
648 652 if (unlikely(prof_on == SLEEP_PROFILING)) {
649 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), 653 profile_hits(SLEEP_PROFILING,
650 delta >> 20); 654 (void *)get_wchan(tsk),
655 delta >> 20);
656 }
657 account_scheduler_latency(tsk, delta >> 10, 0);
651 } 658 }
652 account_scheduler_latency(tsk, delta >> 10, 0);
653 } 659 }
654#endif 660#endif
655} 661}