aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-31 09:11:40 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-31 09:11:47 -0500
commit6f43092441bda528dd38f2dc6c1e2522c5079fb7 (patch)
treee8bba1cf80b19be3e3658b9351b32469ba8c6bac
parentaa5e97ce4bbc9d5daeec16b1d15bb3f6b7b4f4d4 (diff)
[PATCH] improve precision of idle time detection.
Increase the precision of the idle time calculation that is exported to user space via /sys/devices/system/cpu/cpu<x>/idle_time_us Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/cpu.h3
-rw-r--r--arch/s390/kernel/process.c67
-rw-r--r--arch/s390/kernel/smp.c25
-rw-r--r--arch/s390/kernel/vtime.c3
4 files changed, 61 insertions, 37 deletions
diff --git a/arch/s390/include/asm/cpu.h b/arch/s390/include/asm/cpu.h
index e5a6a9ba3adf..89456df43c4a 100644
--- a/arch/s390/include/asm/cpu.h
+++ b/arch/s390/include/asm/cpu.h
@@ -14,7 +14,6 @@
14 14
15struct s390_idle_data { 15struct s390_idle_data {
16 spinlock_t lock; 16 spinlock_t lock;
17 unsigned int in_idle;
18 unsigned long long idle_count; 17 unsigned long long idle_count;
19 unsigned long long idle_enter; 18 unsigned long long idle_enter;
20 unsigned long long idle_time; 19 unsigned long long idle_time;
@@ -26,7 +25,7 @@ void s390_idle_leave(void);
26 25
27static inline void s390_idle_check(void) 26static inline void s390_idle_check(void)
28{ 27{
29 if ((&__get_cpu_var(s390_idle))->in_idle) 28 if ((&__get_cpu_var(s390_idle))->idle_enter != 0ULL)
30 s390_idle_leave(); 29 s390_idle_leave();
31} 30}
32 31
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 04f8c67a6101..1e06436f07c2 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -38,6 +38,7 @@
38#include <linux/utsname.h> 38#include <linux/utsname.h>
39#include <linux/tick.h> 39#include <linux/tick.h>
40#include <linux/elfcore.h> 40#include <linux/elfcore.h>
41#include <linux/kernel_stat.h>
41#include <asm/uaccess.h> 42#include <asm/uaccess.h>
42#include <asm/pgtable.h> 43#include <asm/pgtable.h>
43#include <asm/system.h> 44#include <asm/system.h>
@@ -79,30 +80,19 @@ DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = {
79 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock) 80 .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
80}; 81};
81 82
82static int s390_idle_enter(void) 83void s390_idle_leave(void)
83{ 84{
84 struct s390_idle_data *idle; 85 struct s390_idle_data *idle;
86 unsigned long long idle_time;
85 87
86 idle = &__get_cpu_var(s390_idle); 88 idle = &__get_cpu_var(s390_idle);
89 idle_time = S390_lowcore.int_clock - idle->idle_enter;
87 spin_lock(&idle->lock); 90 spin_lock(&idle->lock);
91 idle->idle_time += idle_time;
92 idle->idle_enter = 0ULL;
88 idle->idle_count++; 93 idle->idle_count++;
89 idle->in_idle = 1;
90 idle->idle_enter = get_clock();
91 spin_unlock(&idle->lock); 94 spin_unlock(&idle->lock);
92 vtime_stop_cpu_timer();
93 return NOTIFY_OK;
94}
95
96void s390_idle_leave(void)
97{
98 struct s390_idle_data *idle;
99
100 vtime_start_cpu_timer(); 95 vtime_start_cpu_timer();
101 idle = &__get_cpu_var(s390_idle);
102 spin_lock(&idle->lock);
103 idle->idle_time += get_clock() - idle->idle_enter;
104 idle->in_idle = 0;
105 spin_unlock(&idle->lock);
106} 96}
107 97
108extern void s390_handle_mcck(void); 98extern void s390_handle_mcck(void);
@@ -111,16 +101,16 @@ extern void s390_handle_mcck(void);
111 */ 101 */
112static void default_idle(void) 102static void default_idle(void)
113{ 103{
104 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
105 unsigned long addr;
106 psw_t psw;
107
114 /* CPU is going idle. */ 108 /* CPU is going idle. */
115 local_irq_disable(); 109 local_irq_disable();
116 if (need_resched()) { 110 if (need_resched()) {
117 local_irq_enable(); 111 local_irq_enable();
118 return; 112 return;
119 } 113 }
120 if (s390_idle_enter() == NOTIFY_BAD) {
121 local_irq_enable();
122 return;
123 }
124#ifdef CONFIG_HOTPLUG_CPU 114#ifdef CONFIG_HOTPLUG_CPU
125 if (cpu_is_offline(smp_processor_id())) { 115 if (cpu_is_offline(smp_processor_id())) {
126 preempt_enable_no_resched(); 116 preempt_enable_no_resched();
@@ -138,9 +128,42 @@ static void default_idle(void)
138 trace_hardirqs_on(); 128 trace_hardirqs_on();
139 /* Don't trace preempt off for idle. */ 129 /* Don't trace preempt off for idle. */
140 stop_critical_timings(); 130 stop_critical_timings();
131 vtime_stop_cpu_timer();
132
133 /*
134 * The inline assembly is equivalent to
135 * idle->idle_enter = get_clock();
136 * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
137 * PSW_MASK_IO | PSW_MASK_EXT);
138 * The difference is that the inline assembly makes sure that
139 * the stck instruction is right before the lpsw instruction.
140 * This is done to increase the precision.
141 */
142
141 /* Wait for external, I/O or machine check interrupt. */ 143 /* Wait for external, I/O or machine check interrupt. */
142 __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | 144 psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT;
143 PSW_MASK_IO | PSW_MASK_EXT); 145#ifndef __s390x__
146 asm volatile(
147 " basr %0,0\n"
148 "0: ahi %0,1f-0b\n"
149 " st %0,4(%2)\n"
150 " stck 0(%3)\n"
151 " lpsw 0(%2)\n"
152 "1:"
153 : "=&d" (addr), "=m" (idle->idle_enter)
154 : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
155 : "memory", "cc");
156#else /* __s390x__ */
157 asm volatile(
158 " larl %0,1f\n"
159 " stg %0,8(%2)\n"
160 " stck 0(%3)\n"
161 " lpswe 0(%2)\n"
162 "1:"
163 : "=&d" (addr), "=m" (idle->idle_enter)
164 : "a" (&psw), "a" (&idle->idle_enter), "m" (psw)
165 : "memory", "cc");
166#endif /* __s390x__ */
144 start_critical_timings(); 167 start_critical_timings();
145} 168}
146 169
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fc78541dc57..3979a6fc0882 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -851,9 +851,11 @@ static ssize_t show_idle_count(struct sys_device *dev,
851 unsigned long long idle_count; 851 unsigned long long idle_count;
852 852
853 idle = &per_cpu(s390_idle, dev->id); 853 idle = &per_cpu(s390_idle, dev->id);
854 spin_lock_irq(&idle->lock); 854 spin_lock(&idle->lock);
855 idle_count = idle->idle_count; 855 idle_count = idle->idle_count;
856 spin_unlock_irq(&idle->lock); 856 if (idle->idle_enter)
857 idle_count++;
858 spin_unlock(&idle->lock);
857 return sprintf(buf, "%llu\n", idle_count); 859 return sprintf(buf, "%llu\n", idle_count);
858} 860}
859static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); 861static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -862,18 +864,17 @@ static ssize_t show_idle_time(struct sys_device *dev,
862 struct sysdev_attribute *attr, char *buf) 864 struct sysdev_attribute *attr, char *buf)
863{ 865{
864 struct s390_idle_data *idle; 866 struct s390_idle_data *idle;
865 unsigned long long new_time; 867 unsigned long long now, idle_time, idle_enter;
866 868
867 idle = &per_cpu(s390_idle, dev->id); 869 idle = &per_cpu(s390_idle, dev->id);
868 spin_lock_irq(&idle->lock); 870 spin_lock(&idle->lock);
869 if (idle->in_idle) { 871 now = get_clock();
870 new_time = get_clock(); 872 idle_time = idle->idle_time;
871 idle->idle_time += new_time - idle->idle_enter; 873 idle_enter = idle->idle_enter;
872 idle->idle_enter = new_time; 874 if (idle_enter != 0ULL && idle_enter < now)
873 } 875 idle_time += now - idle_enter;
874 new_time = idle->idle_time; 876 spin_unlock(&idle->lock);
875 spin_unlock_irq(&idle->lock); 877 return sprintf(buf, "%llu\n", idle_time >> 12);
876 return sprintf(buf, "%llu\n", new_time >> 12);
877} 878}
878static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); 879static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
879 880
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 1254a4d0d762..25d21fef76ba 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -112,6 +112,7 @@ EXPORT_SYMBOL_GPL(account_system_vtime);
112 112
113static inline void set_vtimer(__u64 expires) 113static inline void set_vtimer(__u64 expires)
114{ 114{
115 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
115 __u64 timer; 116 __u64 timer;
116 117
117 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 118 asm volatile (" STPT %0\n" /* Store current cpu timer value */
@@ -121,7 +122,7 @@ static inline void set_vtimer(__u64 expires)
121 S390_lowcore.last_update_timer = expires; 122 S390_lowcore.last_update_timer = expires;
122 123
123 /* store expire time for this CPU timer */ 124 /* store expire time for this CPU timer */
124 __get_cpu_var(virt_cpu_timer).to_expire = expires; 125 vq->to_expire = expires;
125} 126}
126 127
127void vtime_start_cpu_timer(void) 128void vtime_start_cpu_timer(void)