diff options
-rw-r--r-- | include/linux/sched.h | 25 | ||||
-rw-r--r-- | kernel/sched/sched.h | 26 |
2 files changed, 25 insertions, 26 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index e880d7d115ef..f8826d04fb12 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -756,31 +756,6 @@ enum cpu_idle_type { | |||
756 | }; | 756 | }; |
757 | 757 | ||
758 | /* | 758 | /* |
759 | * Increase resolution of nice-level calculations for 64-bit architectures. | ||
760 | * The extra resolution improves shares distribution and load balancing of | ||
761 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
762 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
763 | * and does not change the user-interface for setting shares/weights. | ||
764 | * | ||
765 | * We increase resolution only if we have enough bits to allow this increased | ||
766 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
767 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
768 | * increased costs. | ||
769 | */ | ||
770 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
771 | # define SCHED_LOAD_RESOLUTION 10 | ||
772 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
773 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
774 | #else | ||
775 | # define SCHED_LOAD_RESOLUTION 0 | ||
776 | # define scale_load(w) (w) | ||
777 | # define scale_load_down(w) (w) | ||
778 | #endif | ||
779 | |||
780 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | ||
781 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
782 | |||
783 | /* | ||
784 | * Increase resolution of cpu_power calculations | 759 | * Increase resolution of cpu_power calculations |
785 | */ | 760 | */ |
786 | #define SCHED_POWER_SHIFT 10 | 761 | #define SCHED_POWER_SHIFT 10 |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index cc03cfdf469f..709a30cdfd85 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -33,6 +33,31 @@ extern __read_mostly int scheduler_running; | |||
33 | */ | 33 | */ |
34 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) | 34 | #define NS_TO_JIFFIES(TIME) ((unsigned long)(TIME) / (NSEC_PER_SEC / HZ)) |
35 | 35 | ||
36 | /* | ||
37 | * Increase resolution of nice-level calculations for 64-bit architectures. | ||
38 | * The extra resolution improves shares distribution and load balancing of | ||
39 | * low-weight task groups (eg. nice +19 on an autogroup), deeper taskgroup | ||
40 | * hierarchies, especially on larger systems. This is not a user-visible change | ||
41 | * and does not change the user-interface for setting shares/weights. | ||
42 | * | ||
43 | * We increase resolution only if we have enough bits to allow this increased | ||
44 | * resolution (i.e. BITS_PER_LONG > 32). The costs for increasing resolution | ||
45 | * when BITS_PER_LONG <= 32 are pretty high and the returns do not justify the | ||
46 | * increased costs. | ||
47 | */ | ||
48 | #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ | ||
49 | # define SCHED_LOAD_RESOLUTION 10 | ||
50 | # define scale_load(w) ((w) << SCHED_LOAD_RESOLUTION) | ||
51 | # define scale_load_down(w) ((w) >> SCHED_LOAD_RESOLUTION) | ||
52 | #else | ||
53 | # define SCHED_LOAD_RESOLUTION 0 | ||
54 | # define scale_load(w) (w) | ||
55 | # define scale_load_down(w) (w) | ||
56 | #endif | ||
57 | |||
58 | #define SCHED_LOAD_SHIFT (10 + SCHED_LOAD_RESOLUTION) | ||
59 | #define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) | ||
60 | |||
36 | #define NICE_0_LOAD SCHED_LOAD_SCALE | 61 | #define NICE_0_LOAD SCHED_LOAD_SCALE |
37 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT | 62 | #define NICE_0_SHIFT SCHED_LOAD_SHIFT |
38 | 63 | ||
@@ -784,7 +809,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
784 | } | 809 | } |
785 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ | 810 | #endif /* __ARCH_WANT_UNLOCKED_CTXSW */ |
786 | 811 | ||
787 | |||
788 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | 812 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) |
789 | { | 813 | { |
790 | lw->weight += inc; | 814 | lw->weight += inc; |