aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-05-02 14:08:52 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-05-15 09:32:45 -0400
commit2d02494f5a90f2e4b3c4c6acc85ec94674cdc431 (patch)
tree8032438de5b55282976583b111d02d9379ff3966
parentdce48a84adf1806676319f6f480e30a6daa012f9 (diff)
sched, timers: cleanup avenrun users
avenrun is an rough estimate so we don't have to worry about consistency of the three avenrun values. Remove the xtime lock dependency and provide a function to scale the values. Cleanup the users. [ Impact: cleanup ] Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org>
-rw-r--r--fs/proc/loadavg.c18
-rw-r--r--include/linux/sched.h1
-rw-r--r--kernel/sched.c15
-rw-r--r--kernel/timer.c32
4 files changed, 28 insertions, 38 deletions
diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c
index 9bca39cf99ee..1afa4dd4cae2 100644
--- a/fs/proc/loadavg.c
+++ b/fs/proc/loadavg.c
@@ -12,20 +12,14 @@
12 12
13static int loadavg_proc_show(struct seq_file *m, void *v) 13static int loadavg_proc_show(struct seq_file *m, void *v)
14{ 14{
15 int a, b, c; 15 unsigned long avnrun[3];
16 unsigned long seq;
17 16
18 do { 17 get_avenrun(avnrun, FIXED_1/200, 0);
19 seq = read_seqbegin(&xtime_lock);
20 a = avenrun[0] + (FIXED_1/200);
21 b = avenrun[1] + (FIXED_1/200);
22 c = avenrun[2] + (FIXED_1/200);
23 } while (read_seqretry(&xtime_lock, seq));
24 18
25 seq_printf(m, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n", 19 seq_printf(m, "%lu.%02lu %lu.%02lu %lu.%02lu %ld/%d %d\n",
26 LOAD_INT(a), LOAD_FRAC(a), 20 LOAD_INT(avnrun[0]), LOAD_FRAC(avnrun[0]),
27 LOAD_INT(b), LOAD_FRAC(b), 21 LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]),
28 LOAD_INT(c), LOAD_FRAC(c), 22 LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]),
29 nr_running(), nr_threads, 23 nr_running(), nr_threads,
30 task_active_pid_ns(current)->last_pid); 24 task_active_pid_ns(current)->last_pid);
31 return 0; 25 return 0;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 6eb4892efe45..de7b3b217772 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -116,6 +116,7 @@ struct fs_struct;
116 * 11 bit fractions. 116 * 11 bit fractions.
117 */ 117 */
118extern unsigned long avenrun[]; /* Load averages */ 118extern unsigned long avenrun[]; /* Load averages */
119extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
119 120
120#define FSHIFT 11 /* nr of bits of precision */ 121#define FSHIFT 11 /* nr of bits of precision */
121#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */ 122#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
diff --git a/kernel/sched.c b/kernel/sched.c
index f4eb88153bd1..497c09ba61e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2868,6 +2868,21 @@ static unsigned long calc_load_update;
2868unsigned long avenrun[3]; 2868unsigned long avenrun[3];
2869EXPORT_SYMBOL(avenrun); 2869EXPORT_SYMBOL(avenrun);
2870 2870
2871/**
2872 * get_avenrun - get the load average array
2873 * @loads: pointer to dest load array
2874 * @offset: offset to add
2875 * @shift: shift count to shift the result left
2876 *
2877 * These values are estimates at best, so no need for locking.
2878 */
2879void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
2880{
2881 loads[0] = (avenrun[0] + offset) << shift;
2882 loads[1] = (avenrun[1] + offset) << shift;
2883 loads[2] = (avenrun[2] + offset) << shift;
2884}
2885
2871static unsigned long 2886static unsigned long
2872calc_load(unsigned long load, unsigned long exp, unsigned long active) 2887calc_load(unsigned long load, unsigned long exp, unsigned long active)
2873{ 2888{
diff --git a/kernel/timer.c b/kernel/timer.c
index 6a21d7af9620..a26ed294f938 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1356,37 +1356,17 @@ int do_sysinfo(struct sysinfo *info)
1356{ 1356{
1357 unsigned long mem_total, sav_total; 1357 unsigned long mem_total, sav_total;
1358 unsigned int mem_unit, bitcount; 1358 unsigned int mem_unit, bitcount;
1359 unsigned long seq; 1359 struct timespec tp;
1360 1360
1361 memset(info, 0, sizeof(struct sysinfo)); 1361 memset(info, 0, sizeof(struct sysinfo));
1362 1362
1363 do { 1363 ktime_get_ts(&tp);
1364 struct timespec tp; 1364 monotonic_to_bootbased(&tp);
1365 seq = read_seqbegin(&xtime_lock); 1365 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1366
1367 /*
1368 * This is annoying. The below is the same thing
1369 * posix_get_clock_monotonic() does, but it wants to
1370 * take the lock which we want to cover the loads stuff
1371 * too.
1372 */
1373
1374 getnstimeofday(&tp);
1375 tp.tv_sec += wall_to_monotonic.tv_sec;
1376 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1377 monotonic_to_bootbased(&tp);
1378 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1379 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1380 tp.tv_sec++;
1381 }
1382 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1383 1366
1384 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); 1367 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1385 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1386 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1387 1368
1388 info->procs = nr_threads; 1369 info->procs = nr_threads;
1389 } while (read_seqretry(&xtime_lock, seq));
1390 1370
1391 si_meminfo(info); 1371 si_meminfo(info);
1392 si_swapinfo(info); 1372 si_swapinfo(info);