aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c138
1 files changed, 120 insertions, 18 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 5cd069b77fd7..6e5a89ba4f76 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -671,6 +671,108 @@ static inline void resched_task(struct task_struct *p)
671 671
672#include "sched_stats.h" 672#include "sched_stats.h"
673 673
674static u64 div64_likely32(u64 divident, unsigned long divisor)
675{
676#if BITS_PER_LONG == 32
677 if (likely(divident <= 0xffffffffULL))
678 return (u32)divident / divisor;
679 do_div(divident, divisor);
680
681 return divident;
682#else
683 return divident / divisor;
684#endif
685}
686
687#if BITS_PER_LONG == 32
688# define WMULT_CONST (~0UL)
689#else
690# define WMULT_CONST (1UL << 32)
691#endif
692
693#define WMULT_SHIFT 32
694
695static inline unsigned long
696calc_delta_mine(unsigned long delta_exec, unsigned long weight,
697 struct load_weight *lw)
698{
699 u64 tmp;
700
701 if (unlikely(!lw->inv_weight))
702 lw->inv_weight = WMULT_CONST / lw->weight;
703
704 tmp = (u64)delta_exec * weight;
705 /*
706 * Check whether we'd overflow the 64-bit multiplication:
707 */
708 if (unlikely(tmp > WMULT_CONST)) {
709 tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight)
710 >> (WMULT_SHIFT/2);
711 } else {
712 tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
713 }
714
715 return (unsigned long)min(tmp, (u64)sysctl_sched_runtime_limit);
716}
717
718static inline unsigned long
719calc_delta_fair(unsigned long delta_exec, struct load_weight *lw)
720{
721 return calc_delta_mine(delta_exec, NICE_0_LOAD, lw);
722}
723
724static void update_load_add(struct load_weight *lw, unsigned long inc)
725{
726 lw->weight += inc;
727 lw->inv_weight = 0;
728}
729
730static void update_load_sub(struct load_weight *lw, unsigned long dec)
731{
732 lw->weight -= dec;
733 lw->inv_weight = 0;
734}
735
736static void __update_curr_load(struct rq *rq, struct load_stat *ls)
737{
738 if (rq->curr != rq->idle && ls->load.weight) {
739 ls->delta_exec += ls->delta_stat;
740 ls->delta_fair += calc_delta_fair(ls->delta_stat, &ls->load);
741 ls->delta_stat = 0;
742 }
743}
744
745/*
746 * Update delta_exec, delta_fair fields for rq.
747 *
748 * delta_fair clock advances at a rate inversely proportional to
749 * total load (rq->ls.load.weight) on the runqueue, while
750 * delta_exec advances at the same rate as wall-clock (provided
751 * cpu is not idle).
752 *
753 * delta_exec / delta_fair is a measure of the (smoothened) load on this
754 * runqueue over any given interval. This (smoothened) load is used
755 * during load balance.
756 *
757 * This function is called /before/ updating rq->ls.load
758 * and when switching tasks.
759 */
760static void update_curr_load(struct rq *rq, u64 now)
761{
762 struct load_stat *ls = &rq->ls;
763 u64 start;
764
765 start = ls->load_update_start;
766 ls->load_update_start = now;
767 ls->delta_stat += now - start;
768 /*
769 * Stagger updates to ls->delta_fair. Very frequent updates
770 * can be expensive.
771 */
772 if (ls->delta_stat >= sysctl_sched_stat_granularity)
773 __update_curr_load(rq, ls);
774}
775
674/* 776/*
675 * To aid in avoiding the subversion of "niceness" due to uneven distribution 777 * To aid in avoiding the subversion of "niceness" due to uneven distribution
676 * of tasks with abnormal "nice" values across CPUs the contribution that 778 * of tasks with abnormal "nice" values across CPUs the contribution that
@@ -693,24 +795,6 @@ static inline void resched_task(struct task_struct *p)
693#define RTPRIO_TO_LOAD_WEIGHT(rp) \ 795#define RTPRIO_TO_LOAD_WEIGHT(rp) \
694 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp)) 796 (PRIO_TO_LOAD_WEIGHT(MAX_RT_PRIO) + LOAD_WEIGHT(rp))
695 797
696static void set_load_weight(struct task_struct *p)
697{
698 if (task_has_rt_policy(p)) {
699#ifdef CONFIG_SMP
700 if (p == task_rq(p)->migration_thread)
701 /*
702 * The migration thread does the actual balancing.
703 * Giving its load any weight will skew balancing
704 * adversely.
705 */
706 p->load_weight = 0;
707 else
708#endif
709 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
710 } else
711 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
712}
713
714static inline void 798static inline void
715inc_raw_weighted_load(struct rq *rq, const struct task_struct *p) 799inc_raw_weighted_load(struct rq *rq, const struct task_struct *p)
716{ 800{
@@ -735,6 +819,24 @@ static inline void dec_nr_running(struct task_struct *p, struct rq *rq)
735 dec_raw_weighted_load(rq, p); 819 dec_raw_weighted_load(rq, p);
736} 820}
737 821
822static void set_load_weight(struct task_struct *p)
823{
824 if (task_has_rt_policy(p)) {
825#ifdef CONFIG_SMP
826 if (p == task_rq(p)->migration_thread)
827 /*
828 * The migration thread does the actual balancing.
829 * Giving its load any weight will skew balancing
830 * adversely.
831 */
832 p->load_weight = 0;
833 else
834#endif
835 p->load_weight = RTPRIO_TO_LOAD_WEIGHT(p->rt_priority);
836 } else
837 p->load_weight = PRIO_TO_LOAD_WEIGHT(p->static_prio);
838}
839
738/* 840/*
739 * Adding/removing a task to/from a priority array: 841 * Adding/removing a task to/from a priority array:
740 */ 842 */