diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-04-19 15:10:50 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-05-07 07:14:51 -0400 |
commit | 8527632dc95472adb571701e852479531c0567a2 (patch) | |
tree | 658d9237beda390534cae46558b9c3e1adf4bf23 /kernel/sched | |
parent | 45ceebf77653975815d82fcf7cec0a164215ae11 (diff) |
sched: Move update_load_*() methods from sched.h to fair.c
These inlines are only used by kernel/sched/fair.c so they do
not need to be present in the main kernel/sched/sched.h file.
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Link: http://lkml.kernel.org/r/1366398650-31599-3-git-send-email-paul.gortmaker@windriver.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 18 | ||||
-rw-r--r-- | kernel/sched/sched.h | 18 |
2 files changed, 18 insertions, 18 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c61a614465c8..08a554dd3e90 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -113,6 +113,24 @@ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; | |||
113 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; | 113 | unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; |
114 | #endif | 114 | #endif |
115 | 115 | ||
116 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | ||
117 | { | ||
118 | lw->weight += inc; | ||
119 | lw->inv_weight = 0; | ||
120 | } | ||
121 | |||
122 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | ||
123 | { | ||
124 | lw->weight -= dec; | ||
125 | lw->inv_weight = 0; | ||
126 | } | ||
127 | |||
128 | static inline void update_load_set(struct load_weight *lw, unsigned long w) | ||
129 | { | ||
130 | lw->weight = w; | ||
131 | lw->inv_weight = 0; | ||
132 | } | ||
133 | |||
116 | /* | 134 | /* |
117 | * Increase the granularity value when there are more CPUs, | 135 | * Increase the granularity value when there are more CPUs, |
118 | * because with more CPUs the 'effective latency' as visible | 136 | * because with more CPUs the 'effective latency' as visible |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a38ee0a0650e..f1f6256c1224 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -892,24 +892,6 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) | |||
892 | #define WF_FORK 0x02 /* child wakeup after fork */ | 892 | #define WF_FORK 0x02 /* child wakeup after fork */ |
893 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ | 893 | #define WF_MIGRATED 0x4 /* internal use, task got migrated */ |
894 | 894 | ||
895 | static inline void update_load_add(struct load_weight *lw, unsigned long inc) | ||
896 | { | ||
897 | lw->weight += inc; | ||
898 | lw->inv_weight = 0; | ||
899 | } | ||
900 | |||
901 | static inline void update_load_sub(struct load_weight *lw, unsigned long dec) | ||
902 | { | ||
903 | lw->weight -= dec; | ||
904 | lw->inv_weight = 0; | ||
905 | } | ||
906 | |||
907 | static inline void update_load_set(struct load_weight *lw, unsigned long w) | ||
908 | { | ||
909 | lw->weight = w; | ||
910 | lw->inv_weight = 0; | ||
911 | } | ||
912 | |||
913 | /* | 895 | /* |
914 | * To aid in avoiding the subversion of "niceness" due to uneven distribution | 896 | * To aid in avoiding the subversion of "niceness" due to uneven distribution |
915 | * of tasks with abnormal "nice" values across CPUs the contribution that | 897 | * of tasks with abnormal "nice" values across CPUs the contribution that |