aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-05-02 14:08:52 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-05-15 09:32:45 -0400
commit2d02494f5a90f2e4b3c4c6acc85ec94674cdc431 (patch)
tree8032438de5b55282976583b111d02d9379ff3966 /kernel
parentdce48a84adf1806676319f6f480e30a6daa012f9 (diff)
sched, timers: cleanup avenrun users
avenrun is an rough estimate so we don't have to worry about consistency of the three avenrun values. Remove the xtime lock dependency and provide a function to scale the values. Cleanup the users. [ Impact: cleanup ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/timer.c32
2 files changed, 21 insertions, 26 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f4eb88153bd1..497c09ba61e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2868,6 +2868,21 @@ static unsigned long calc_load_update;
2868unsigned long avenrun[3]; 2868unsigned long avenrun[3];
2869EXPORT_SYMBOL(avenrun); 2869EXPORT_SYMBOL(avenrun);
2870 2870
2871/**
2872 * get_avenrun - get the load average array
2873 * @loads: pointer to dest load array
2874 * @offset: offset to add
2875 * @shift: shift count to shift the result left
2876 *
2877 * These values are estimates at best, so no need for locking.
2878 */
2879void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2880{
2881 loads[0] = (avenrun[0] + offset) << shift;
2882 loads[1] = (avenrun[1] + offset) << shift;
2883 loads[2] = (avenrun[2] + offset) << shift;
2884}
2885
2871static unsigned long 2886static unsigned long
2872calc_load(unsigned long load, unsigned long exp, unsigned long active) 2887calc_load(unsigned long load, unsigned long exp, unsigned long active)
2873{ 2888{
diff --git a/kernel/timer.c b/kernel/timer.c
index 6a21d7af9620..a26ed294f938 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1356,37 +1356,17 @@ int do_sysinfo(struct sysinfo *info)
1356{ 1356{
1357 unsigned long mem_total, sav_total; 1357 unsigned long mem_total, sav_total;
1358 unsigned int mem_unit, bitcount; 1358 unsigned int mem_unit, bitcount;
1359 unsigned long seq; 1359 struct timespec tp;
1360 1360
1361 memset(info, 0, sizeof(struct sysinfo)); 1361 memset(info, 0, sizeof(struct sysinfo));
1362 1362
1363 do { 1363 ktime_get_ts(&tp);
1364 struct timespec tp; 1364 monotonic_to_bootbased(&tp);
1365 seq = read_seqbegin(&xtime_lock); 1365 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1366
1367 /*
1368 * This is annoying. The below is the same thing
1369 * posix_get_clock_monotonic() does, but it wants to
1370 * take the lock which we want to cover the loads stuff
1371 * too.
1372 */
1373
1374 getnstimeofday(&tp);
1375 tp.tv_sec += wall_to_monotonic.tv_sec;
1376 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1377 monotonic_to_bootbased(&tp);
1378 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1379 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1380 tp.tv_sec++;
1381 }
1382 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1383 1366
1384 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); 1367 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1385 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1386 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1387 1368
1388 info->procs = nr_threads; 1369 info->procs = nr_threads;
1389 } while (read_seqretry(&xtime_lock, seq));
1390 1370
1391 si_meminfo(info); 1371 si_meminfo(info);
1392 si_swapinfo(info); 1372 si_swapinfo(info);