diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:40 -0500 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-12-31 09:11:47 -0500 |
commit | 6f43092441bda528dd38f2dc6c1e2522c5079fb7 (patch) | |
tree | e8bba1cf80b19be3e3658b9351b32469ba8c6bac /arch/s390/kernel/process.c | |
parent | aa5e97ce4bbc9d5daeec16b1d15bb3f6b7b4f4d4 (diff) |
[PATCH] improve precision of idle time detection.
Increase the precision of the idle time calculation that is exported
to user space via /sys/devices/system/cpu/cpu<x>/idle_time_us
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/process.c')
-rw-r--r-- | arch/s390/kernel/process.c | 67 |
1 files changed, 45 insertions, 22 deletions
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 04f8c67a6101..1e06436f07c2 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/tick.h> | 39 | #include <linux/tick.h> |
40 | #include <linux/elfcore.h> | 40 | #include <linux/elfcore.h> |
41 | #include <linux/kernel_stat.h> | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
@@ -79,30 +80,19 @@ DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = { | |||
79 | .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock) | 80 | .lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock) |
80 | }; | 81 | }; |
81 | 82 | ||
82 | static int s390_idle_enter(void) | 83 | void s390_idle_leave(void) |
83 | { | 84 | { |
84 | struct s390_idle_data *idle; | 85 | struct s390_idle_data *idle; |
86 | unsigned long long idle_time; | ||
85 | 87 | ||
86 | idle = &__get_cpu_var(s390_idle); | 88 | idle = &__get_cpu_var(s390_idle); |
89 | idle_time = S390_lowcore.int_clock - idle->idle_enter; | ||
87 | spin_lock(&idle->lock); | 90 | spin_lock(&idle->lock); |
91 | idle->idle_time += idle_time; | ||
92 | idle->idle_enter = 0ULL; | ||
88 | idle->idle_count++; | 93 | idle->idle_count++; |
89 | idle->in_idle = 1; | ||
90 | idle->idle_enter = get_clock(); | ||
91 | spin_unlock(&idle->lock); | 94 | spin_unlock(&idle->lock); |
92 | vtime_stop_cpu_timer(); | ||
93 | return NOTIFY_OK; | ||
94 | } | ||
95 | |||
96 | void s390_idle_leave(void) | ||
97 | { | ||
98 | struct s390_idle_data *idle; | ||
99 | |||
100 | vtime_start_cpu_timer(); | 95 | vtime_start_cpu_timer(); |
101 | idle = &__get_cpu_var(s390_idle); | ||
102 | spin_lock(&idle->lock); | ||
103 | idle->idle_time += get_clock() - idle->idle_enter; | ||
104 | idle->in_idle = 0; | ||
105 | spin_unlock(&idle->lock); | ||
106 | } | 96 | } |
107 | 97 | ||
108 | extern void s390_handle_mcck(void); | 98 | extern void s390_handle_mcck(void); |
@@ -111,16 +101,16 @@ extern void s390_handle_mcck(void); | |||
111 | */ | 101 | */ |
112 | static void default_idle(void) | 102 | static void default_idle(void) |
113 | { | 103 | { |
104 | struct s390_idle_data *idle = &__get_cpu_var(s390_idle); | ||
105 | unsigned long addr; | ||
106 | psw_t psw; | ||
107 | |||
114 | /* CPU is going idle. */ | 108 | /* CPU is going idle. */ |
115 | local_irq_disable(); | 109 | local_irq_disable(); |
116 | if (need_resched()) { | 110 | if (need_resched()) { |
117 | local_irq_enable(); | 111 | local_irq_enable(); |
118 | return; | 112 | return; |
119 | } | 113 | } |
120 | if (s390_idle_enter() == NOTIFY_BAD) { | ||
121 | local_irq_enable(); | ||
122 | return; | ||
123 | } | ||
124 | #ifdef CONFIG_HOTPLUG_CPU | 114 | #ifdef CONFIG_HOTPLUG_CPU |
125 | if (cpu_is_offline(smp_processor_id())) { | 115 | if (cpu_is_offline(smp_processor_id())) { |
126 | preempt_enable_no_resched(); | 116 | preempt_enable_no_resched(); |
@@ -138,9 +128,42 @@ static void default_idle(void) | |||
138 | trace_hardirqs_on(); | 128 | trace_hardirqs_on(); |
139 | /* Don't trace preempt off for idle. */ | 129 | /* Don't trace preempt off for idle. */ |
140 | stop_critical_timings(); | 130 | stop_critical_timings(); |
131 | vtime_stop_cpu_timer(); | ||
132 | |||
133 | /* | ||
134 | * The inline assembly is equivalent to | ||
135 | * idle->idle_enter = get_clock(); | ||
136 | * __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | ||
137 | * PSW_MASK_IO | PSW_MASK_EXT); | ||
138 | * The difference is that the inline assembly makes sure that | ||
139 | * the stck instruction is right before the lpsw instruction. | ||
140 | * This is done to increase the precision. | ||
141 | */ | ||
142 | |||
141 | /* Wait for external, I/O or machine check interrupt. */ | 143 | /* Wait for external, I/O or machine check interrupt. */ |
142 | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | | 144 | psw.mask = psw_kernel_bits|PSW_MASK_WAIT|PSW_MASK_IO|PSW_MASK_EXT; |
143 | PSW_MASK_IO | PSW_MASK_EXT); | 145 | #ifndef __s390x__ |
146 | asm volatile( | ||
147 | " basr %0,0\n" | ||
148 | "0: ahi %0,1f-0b\n" | ||
149 | " st %0,4(%2)\n" | ||
150 | " stck 0(%3)\n" | ||
151 | " lpsw 0(%2)\n" | ||
152 | "1:" | ||
153 | : "=&d" (addr), "=m" (idle->idle_enter) | ||
154 | : "a" (&psw), "a" (&idle->idle_enter), "m" (psw) | ||
155 | : "memory", "cc"); | ||
156 | #else /* __s390x__ */ | ||
157 | asm volatile( | ||
158 | " larl %0,1f\n" | ||
159 | " stg %0,8(%2)\n" | ||
160 | " stck 0(%3)\n" | ||
161 | " lpswe 0(%2)\n" | ||
162 | "1:" | ||
163 | : "=&d" (addr), "=m" (idle->idle_enter) | ||
164 | : "a" (&psw), "a" (&idle->idle_enter), "m" (psw) | ||
165 | : "memory", "cc"); | ||
166 | #endif /* __s390x__ */ | ||
144 | start_critical_timings(); | 167 | start_critical_timings(); |
145 | } | 168 | } |
146 | 169 | ||