diff options
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 84 |
1 files changed, 56 insertions, 28 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index a9ecac398bb9..dd153d6f8a04 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -667,9 +667,13 @@ static int effective_prio(task_t *p) | |||
667 | /* | 667 | /* |
668 | * __activate_task - move a task to the runqueue. | 668 | * __activate_task - move a task to the runqueue. |
669 | */ | 669 | */ |
670 | static inline void __activate_task(task_t *p, runqueue_t *rq) | 670 | static void __activate_task(task_t *p, runqueue_t *rq) |
671 | { | 671 | { |
672 | enqueue_task(p, rq->active); | 672 | prio_array_t *target = rq->active; |
673 | |||
674 | if (batch_task(p)) | ||
675 | target = rq->expired; | ||
676 | enqueue_task(p, target); | ||
673 | rq->nr_running++; | 677 | rq->nr_running++; |
674 | } | 678 | } |
675 | 679 | ||
@@ -688,7 +692,7 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
688 | unsigned long long __sleep_time = now - p->timestamp; | 692 | unsigned long long __sleep_time = now - p->timestamp; |
689 | unsigned long sleep_time; | 693 | unsigned long sleep_time; |
690 | 694 | ||
691 | if (unlikely(p->policy == SCHED_BATCH)) | 695 | if (batch_task(p)) |
692 | sleep_time = 0; | 696 | sleep_time = 0; |
693 | else { | 697 | else { |
694 | if (__sleep_time > NS_MAX_SLEEP_AVG) | 698 | if (__sleep_time > NS_MAX_SLEEP_AVG) |
@@ -700,21 +704,25 @@ static int recalc_task_prio(task_t *p, unsigned long long now) | |||
700 | if (likely(sleep_time > 0)) { | 704 | if (likely(sleep_time > 0)) { |
701 | /* | 705 | /* |
702 | * User tasks that sleep a long time are categorised as | 706 | * User tasks that sleep a long time are categorised as |
703 | * idle and will get just interactive status to stay active & | 707 | * idle. They will only have their sleep_avg increased to a |
704 | * prevent them suddenly becoming cpu hogs and starving | 708 | * level that makes them just interactive priority to stay |
705 | * other processes. | 709 | * active yet prevent them suddenly becoming cpu hogs and |
710 | * starving other processes. | ||
706 | */ | 711 | */ |
707 | if (p->mm && p->activated != -1 && | 712 | if (p->mm && sleep_time > INTERACTIVE_SLEEP(p)) { |
708 | sleep_time > INTERACTIVE_SLEEP(p)) { | 713 | unsigned long ceiling; |
709 | p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG - | 714 | |
710 | DEF_TIMESLICE); | 715 | ceiling = JIFFIES_TO_NS(MAX_SLEEP_AVG - |
716 | DEF_TIMESLICE); | ||
717 | if (p->sleep_avg < ceiling) | ||
718 | p->sleep_avg = ceiling; | ||
711 | } else { | 719 | } else { |
712 | /* | 720 | /* |
713 | * Tasks waking from uninterruptible sleep are | 721 | * Tasks waking from uninterruptible sleep are |
714 | * limited in their sleep_avg rise as they | 722 | * limited in their sleep_avg rise as they |
715 | * are likely to be waiting on I/O | 723 | * are likely to be waiting on I/O |
716 | */ | 724 | */ |
717 | if (p->activated == -1 && p->mm) { | 725 | if (p->sleep_type == SLEEP_NONINTERACTIVE && p->mm) { |
718 | if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) | 726 | if (p->sleep_avg >= INTERACTIVE_SLEEP(p)) |
719 | sleep_time = 0; | 727 | sleep_time = 0; |
720 | else if (p->sleep_avg + sleep_time >= | 728 | else if (p->sleep_avg + sleep_time >= |
@@ -769,7 +777,7 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) | |||
769 | * This checks to make sure it's not an uninterruptible task | 777 | * This checks to make sure it's not an uninterruptible task |
770 | * that is now waking up. | 778 | * that is now waking up. |
771 | */ | 779 | */ |
772 | if (!p->activated) { | 780 | if (p->sleep_type == SLEEP_NORMAL) { |
773 | /* | 781 | /* |
774 | * Tasks which were woken up by interrupts (ie. hw events) | 782 | * Tasks which were woken up by interrupts (ie. hw events) |
775 | * are most likely of interactive nature. So we give them | 783 | * are most likely of interactive nature. So we give them |
@@ -778,13 +786,13 @@ static void activate_task(task_t *p, runqueue_t *rq, int local) | |||
778 | * on a CPU, first time around: | 786 | * on a CPU, first time around: |
779 | */ | 787 | */ |
780 | if (in_interrupt()) | 788 | if (in_interrupt()) |
781 | p->activated = 2; | 789 | p->sleep_type = SLEEP_INTERRUPTED; |
782 | else { | 790 | else { |
783 | /* | 791 | /* |
784 | * Normal first-time wakeups get a credit too for | 792 | * Normal first-time wakeups get a credit too for |
785 | * on-runqueue time, but it will be weighted down: | 793 | * on-runqueue time, but it will be weighted down: |
786 | */ | 794 | */ |
787 | p->activated = 1; | 795 | p->sleep_type = SLEEP_INTERACTIVE; |
788 | } | 796 | } |
789 | } | 797 | } |
790 | p->timestamp = now; | 798 | p->timestamp = now; |
@@ -1272,19 +1280,19 @@ out_activate: | |||
1272 | * Tasks on involuntary sleep don't earn | 1280 | * Tasks on involuntary sleep don't earn |
1273 | * sleep_avg beyond just interactive state. | 1281 | * sleep_avg beyond just interactive state. |
1274 | */ | 1282 | */ |
1275 | p->activated = -1; | 1283 | p->sleep_type = SLEEP_NONINTERACTIVE; |
1276 | } | 1284 | } else |
1277 | 1285 | ||
1278 | /* | 1286 | /* |
1279 | * Tasks that have marked their sleep as noninteractive get | 1287 | * Tasks that have marked their sleep as noninteractive get |
1280 | * woken up without updating their sleep average. (i.e. their | 1288 | * woken up with their sleep average not weighted in an |
1281 | * sleep is handled in a priority-neutral manner, no priority | 1289 | * interactive way. |
1282 | * boost and no penalty.) | ||
1283 | */ | 1290 | */ |
1284 | if (old_state & TASK_NONINTERACTIVE) | 1291 | if (old_state & TASK_NONINTERACTIVE) |
1285 | __activate_task(p, rq); | 1292 | p->sleep_type = SLEEP_NONINTERACTIVE; |
1286 | else | 1293 | |
1287 | activate_task(p, rq, cpu == this_cpu); | 1294 | |
1295 | activate_task(p, rq, cpu == this_cpu); | ||
1288 | /* | 1296 | /* |
1289 | * Sync wakeups (i.e. those types of wakeups where the waker | 1297 | * Sync wakeups (i.e. those types of wakeups where the waker |
1290 | * has indicated that it will leave the CPU in short order) | 1298 | * has indicated that it will leave the CPU in short order) |
@@ -1658,6 +1666,21 @@ unsigned long nr_iowait(void) | |||
1658 | return sum; | 1666 | return sum; |
1659 | } | 1667 | } |
1660 | 1668 | ||
1669 | unsigned long nr_active(void) | ||
1670 | { | ||
1671 | unsigned long i, running = 0, uninterruptible = 0; | ||
1672 | |||
1673 | for_each_online_cpu(i) { | ||
1674 | running += cpu_rq(i)->nr_running; | ||
1675 | uninterruptible += cpu_rq(i)->nr_uninterruptible; | ||
1676 | } | ||
1677 | |||
1678 | if (unlikely((long)uninterruptible < 0)) | ||
1679 | uninterruptible = 0; | ||
1680 | |||
1681 | return running + uninterruptible; | ||
1682 | } | ||
1683 | |||
1661 | #ifdef CONFIG_SMP | 1684 | #ifdef CONFIG_SMP |
1662 | 1685 | ||
1663 | /* | 1686 | /* |
@@ -2860,6 +2883,12 @@ EXPORT_SYMBOL(sub_preempt_count); | |||
2860 | 2883 | ||
2861 | #endif | 2884 | #endif |
2862 | 2885 | ||
2886 | static inline int interactive_sleep(enum sleep_type sleep_type) | ||
2887 | { | ||
2888 | return (sleep_type == SLEEP_INTERACTIVE || | ||
2889 | sleep_type == SLEEP_INTERRUPTED); | ||
2890 | } | ||
2891 | |||
2863 | /* | 2892 | /* |
2864 | * schedule() is the main scheduler function. | 2893 | * schedule() is the main scheduler function. |
2865 | */ | 2894 | */ |
@@ -2983,12 +3012,12 @@ go_idle: | |||
2983 | queue = array->queue + idx; | 3012 | queue = array->queue + idx; |
2984 | next = list_entry(queue->next, task_t, run_list); | 3013 | next = list_entry(queue->next, task_t, run_list); |
2985 | 3014 | ||
2986 | if (!rt_task(next) && next->activated > 0) { | 3015 | if (!rt_task(next) && interactive_sleep(next->sleep_type)) { |
2987 | unsigned long long delta = now - next->timestamp; | 3016 | unsigned long long delta = now - next->timestamp; |
2988 | if (unlikely((long long)(now - next->timestamp) < 0)) | 3017 | if (unlikely((long long)(now - next->timestamp) < 0)) |
2989 | delta = 0; | 3018 | delta = 0; |
2990 | 3019 | ||
2991 | if (next->activated == 1) | 3020 | if (next->sleep_type == SLEEP_INTERACTIVE) |
2992 | delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; | 3021 | delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128; |
2993 | 3022 | ||
2994 | array = next->array; | 3023 | array = next->array; |
@@ -2998,10 +3027,9 @@ go_idle: | |||
2998 | dequeue_task(next, array); | 3027 | dequeue_task(next, array); |
2999 | next->prio = new_prio; | 3028 | next->prio = new_prio; |
3000 | enqueue_task(next, array); | 3029 | enqueue_task(next, array); |
3001 | } else | 3030 | } |
3002 | requeue_task(next, array); | ||
3003 | } | 3031 | } |
3004 | next->activated = 0; | 3032 | next->sleep_type = SLEEP_NORMAL; |
3005 | switch_tasks: | 3033 | switch_tasks: |
3006 | if (next == rq->idle) | 3034 | if (next == rq->idle) |
3007 | schedstat_inc(rq, sched_goidle); | 3035 | schedstat_inc(rq, sched_goidle); |