diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:32:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 18:32:59 -0400 |
commit | 99e97b860e14c64760855198e91d1166697131a7 (patch) | |
tree | fadc8368c3f784bff92fba82d983e7861559cf9d /kernel/timer.c | |
parent | 82782ca77d1bfb32b0334cce40a25b91bd8ec016 (diff) | |
parent | f04d82b7e0c63d0251f9952a537a4bc4d73aa1a9 (diff) |
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
sched: fix typo in sched-rt-group.txt file
ftrace: fix typo about map of kernel priority in ftrace.txt file.
sched: properly define the sched_group::cpumask and sched_domain::span fields
sched, timers: cleanup avenrun users
sched, timers: move calc_load() to scheduler
sched: Don't export sched_mc_power_savings on multi-socket single core system
sched: emit thread info flags with stack trace
sched: rt: document the risk of small values in the bandwidth settings
sched: Replace first_cpu() with cpumask_first() in ILB nomination code
sched: remove extra call overhead for schedule()
sched: use group_first_cpu() instead of cpumask_first(sched_group_cpus())
wait: don't use __wake_up_common()
sched: Nominate a power-efficient ilb in select_nohz_balancer()
sched: Nominate idle load balancer from a semi-idle package.
sched: remove redundant hierarchy walk in check_preempt_wakeup
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 86 |
1 files changed, 8 insertions, 78 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index cffffad01c31..a26ed294f938 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1123,47 +1123,6 @@ void update_process_times(int user_tick) | |||
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | /* | 1125 | /* |
1126 | * Nr of active tasks - counted in fixed-point numbers | ||
1127 | */ | ||
1128 | static unsigned long count_active_tasks(void) | ||
1129 | { | ||
1130 | return nr_active() * FIXED_1; | ||
1131 | } | ||
1132 | |||
1133 | /* | ||
1134 | * Hmm.. Changed this, as the GNU make sources (load.c) seems to | ||
1135 | * imply that avenrun[] is the standard name for this kind of thing. | ||
1136 | * Nothing else seems to be standardized: the fractional size etc | ||
1137 | * all seem to differ on different machines. | ||
1138 | * | ||
1139 | * Requires xtime_lock to access. | ||
1140 | */ | ||
1141 | unsigned long avenrun[3]; | ||
1142 | |||
1143 | EXPORT_SYMBOL(avenrun); | ||
1144 | |||
1145 | /* | ||
1146 | * calc_load - given tick count, update the avenrun load estimates. | ||
1147 | * This is called while holding a write_lock on xtime_lock. | ||
1148 | */ | ||
1149 | static inline void calc_load(unsigned long ticks) | ||
1150 | { | ||
1151 | unsigned long active_tasks; /* fixed-point */ | ||
1152 | static int count = LOAD_FREQ; | ||
1153 | |||
1154 | count -= ticks; | ||
1155 | if (unlikely(count < 0)) { | ||
1156 | active_tasks = count_active_tasks(); | ||
1157 | do { | ||
1158 | CALC_LOAD(avenrun[0], EXP_1, active_tasks); | ||
1159 | CALC_LOAD(avenrun[1], EXP_5, active_tasks); | ||
1160 | CALC_LOAD(avenrun[2], EXP_15, active_tasks); | ||
1161 | count += LOAD_FREQ; | ||
1162 | } while (count < 0); | ||
1163 | } | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * This function runs timers and the timer-tq in bottom half context. | 1126 | * This function runs timers and the timer-tq in bottom half context. |
1168 | */ | 1127 | */ |
1169 | static void run_timer_softirq(struct softirq_action *h) | 1128 | static void run_timer_softirq(struct softirq_action *h) |
@@ -1187,16 +1146,6 @@ void run_local_timers(void) | |||
1187 | } | 1146 | } |
1188 | 1147 | ||
1189 | /* | 1148 | /* |
1190 | * Called by the timer interrupt. xtime_lock must already be taken | ||
1191 | * by the timer IRQ! | ||
1192 | */ | ||
1193 | static inline void update_times(unsigned long ticks) | ||
1194 | { | ||
1195 | update_wall_time(); | ||
1196 | calc_load(ticks); | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * The 64-bit jiffies value is not atomic - you MUST NOT read it | 1149 | * The 64-bit jiffies value is not atomic - you MUST NOT read it |
1201 | * without sampling the sequence number in xtime_lock. | 1150 | * without sampling the sequence number in xtime_lock. |
1202 | * jiffies is defined in the linker script... | 1151 | * jiffies is defined in the linker script... |
@@ -1205,7 +1154,8 @@ static inline void update_times(unsigned long ticks) | |||
1205 | void do_timer(unsigned long ticks) | 1154 | void do_timer(unsigned long ticks) |
1206 | { | 1155 | { |
1207 | jiffies_64 += ticks; | 1156 | jiffies_64 += ticks; |
1208 | update_times(ticks); | 1157 | update_wall_time(); |
1158 | calc_global_load(); | ||
1209 | } | 1159 | } |
1210 | 1160 | ||
1211 | #ifdef __ARCH_WANT_SYS_ALARM | 1161 | #ifdef __ARCH_WANT_SYS_ALARM |
@@ -1406,37 +1356,17 @@ int do_sysinfo(struct sysinfo *info) | |||
1406 | { | 1356 | { |
1407 | unsigned long mem_total, sav_total; | 1357 | unsigned long mem_total, sav_total; |
1408 | unsigned int mem_unit, bitcount; | 1358 | unsigned int mem_unit, bitcount; |
1409 | unsigned long seq; | 1359 | struct timespec tp; |
1410 | 1360 | ||
1411 | memset(info, 0, sizeof(struct sysinfo)); | 1361 | memset(info, 0, sizeof(struct sysinfo)); |
1412 | 1362 | ||
1413 | do { | 1363 | ktime_get_ts(&tp); |
1414 | struct timespec tp; | 1364 | monotonic_to_bootbased(&tp); |
1415 | seq = read_seqbegin(&xtime_lock); | 1365 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); |
1416 | |||
1417 | /* | ||
1418 | * This is annoying. The below is the same thing | ||
1419 | * posix_get_clock_monotonic() does, but it wants to | ||
1420 | * take the lock which we want to cover the loads stuff | ||
1421 | * too. | ||
1422 | */ | ||
1423 | |||
1424 | getnstimeofday(&tp); | ||
1425 | tp.tv_sec += wall_to_monotonic.tv_sec; | ||
1426 | tp.tv_nsec += wall_to_monotonic.tv_nsec; | ||
1427 | monotonic_to_bootbased(&tp); | ||
1428 | if (tp.tv_nsec - NSEC_PER_SEC >= 0) { | ||
1429 | tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; | ||
1430 | tp.tv_sec++; | ||
1431 | } | ||
1432 | info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); | ||
1433 | 1366 | ||
1434 | info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); | 1367 | get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT); |
1435 | info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); | ||
1436 | info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); | ||
1437 | 1368 | ||
1438 | info->procs = nr_threads; | 1369 | info->procs = nr_threads; |
1439 | } while (read_seqretry(&xtime_lock, seq)); | ||
1440 | 1370 | ||
1441 | si_meminfo(info); | 1371 | si_meminfo(info); |
1442 | si_swapinfo(info); | 1372 | si_swapinfo(info); |