aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/s390/appldata/appldata_base.c130
-rw-r--r--arch/s390/include/asm/cputime.h6
-rw-r--r--arch/s390/include/asm/timer.h51
-rw-r--r--arch/s390/include/asm/vtimer.h33
-rw-r--r--arch/s390/kernel/asm-offsets.c10
-rw-r--r--arch/s390/kernel/entry.S37
-rw-r--r--arch/s390/kernel/entry.h4
-rw-r--r--arch/s390/kernel/entry64.S39
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/kernel/smp.c8
-rw-r--r--arch/s390/kernel/time.c2
-rw-r--r--arch/s390/kernel/vtime.c370
-rw-r--r--arch/s390/lib/delay.c2
13 files changed, 222 insertions, 472 deletions
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index fadefce09962..bae0f402bf2a 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -27,7 +27,7 @@
27#include <linux/suspend.h> 27#include <linux/suspend.h>
28#include <linux/platform_device.h> 28#include <linux/platform_device.h>
29#include <asm/appldata.h> 29#include <asm/appldata.h>
30#include <asm/timer.h> 30#include <asm/vtimer.h>
31#include <asm/uaccess.h> 31#include <asm/uaccess.h>
32#include <asm/io.h> 32#include <asm/io.h>
33#include <asm/smp.h> 33#include <asm/smp.h>
@@ -82,8 +82,7 @@ static struct ctl_table appldata_dir_table[] = {
82/* 82/*
83 * Timer 83 * Timer
84 */ 84 */
85static DEFINE_PER_CPU(struct vtimer_list, appldata_timer); 85static struct vtimer_list appldata_timer;
86static atomic_t appldata_expire_count = ATOMIC_INIT(0);
87 86
88static DEFINE_SPINLOCK(appldata_timer_lock); 87static DEFINE_SPINLOCK(appldata_timer_lock);
89static int appldata_interval = APPLDATA_CPU_INTERVAL; 88static int appldata_interval = APPLDATA_CPU_INTERVAL;
@@ -113,10 +112,7 @@ static LIST_HEAD(appldata_ops_list);
113 */ 112 */
114static void appldata_timer_function(unsigned long data) 113static void appldata_timer_function(unsigned long data)
115{ 114{
116 if (atomic_dec_and_test(&appldata_expire_count)) { 115 queue_work(appldata_wq, (struct work_struct *) data);
117 atomic_set(&appldata_expire_count, num_online_cpus());
118 queue_work(appldata_wq, (struct work_struct *) data);
119 }
120} 116}
121 117
122/* 118/*
@@ -129,7 +125,6 @@ static void appldata_work_fn(struct work_struct *work)
129 struct list_head *lh; 125 struct list_head *lh;
130 struct appldata_ops *ops; 126 struct appldata_ops *ops;
131 127
132 get_online_cpus();
133 mutex_lock(&appldata_ops_mutex); 128 mutex_lock(&appldata_ops_mutex);
134 list_for_each(lh, &appldata_ops_list) { 129 list_for_each(lh, &appldata_ops_list) {
135 ops = list_entry(lh, struct appldata_ops, list); 130 ops = list_entry(lh, struct appldata_ops, list);
@@ -138,7 +133,6 @@ static void appldata_work_fn(struct work_struct *work)
138 } 133 }
139 } 134 }
140 mutex_unlock(&appldata_ops_mutex); 135 mutex_unlock(&appldata_ops_mutex);
141 put_online_cpus();
142} 136}
143 137
144/* 138/*
@@ -166,20 +160,6 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
166 160
167/****************************** /proc stuff **********************************/ 161/****************************** /proc stuff **********************************/
168 162
169/*
170 * appldata_mod_vtimer_wrap()
171 *
172 * wrapper function for mod_virt_timer(), because smp_call_function_single()
173 * accepts only one parameter.
174 */
175static void __appldata_mod_vtimer_wrap(void *p) {
176 struct {
177 struct vtimer_list *timer;
178 u64 expires;
179 } *args = p;
180 mod_virt_timer_periodic(args->timer, args->expires);
181}
182
183#define APPLDATA_ADD_TIMER 0 163#define APPLDATA_ADD_TIMER 0
184#define APPLDATA_DEL_TIMER 1 164#define APPLDATA_DEL_TIMER 1
185#define APPLDATA_MOD_TIMER 2 165#define APPLDATA_MOD_TIMER 2
@@ -190,49 +170,28 @@ static void __appldata_mod_vtimer_wrap(void *p) {
190 * Add, delete or modify virtual timers on all online cpus. 170 * Add, delete or modify virtual timers on all online cpus.
191 * The caller needs to get the appldata_timer_lock spinlock. 171 * The caller needs to get the appldata_timer_lock spinlock.
192 */ 172 */
193static void 173static void __appldata_vtimer_setup(int cmd)
194__appldata_vtimer_setup(int cmd)
195{ 174{
196 u64 per_cpu_interval; 175 u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
197 int i;
198 176
199 switch (cmd) { 177 switch (cmd) {
200 case APPLDATA_ADD_TIMER: 178 case APPLDATA_ADD_TIMER:
201 if (appldata_timer_active) 179 if (appldata_timer_active)
202 break; 180 break;
203 per_cpu_interval = (u64) (appldata_interval*1000 / 181 appldata_timer.expires = timer_interval;
204 num_online_cpus()) * TOD_MICRO; 182 add_virt_timer_periodic(&appldata_timer);
205 for_each_online_cpu(i) {
206 per_cpu(appldata_timer, i).expires = per_cpu_interval;
207 smp_call_function_single(i, add_virt_timer_periodic,
208 &per_cpu(appldata_timer, i),
209 1);
210 }
211 appldata_timer_active = 1; 183 appldata_timer_active = 1;
212 break; 184 break;
213 case APPLDATA_DEL_TIMER: 185 case APPLDATA_DEL_TIMER:
214 for_each_online_cpu(i) 186 del_virt_timer(&appldata_timer);
215 del_virt_timer(&per_cpu(appldata_timer, i));
216 if (!appldata_timer_active) 187 if (!appldata_timer_active)
217 break; 188 break;
218 appldata_timer_active = 0; 189 appldata_timer_active = 0;
219 atomic_set(&appldata_expire_count, num_online_cpus());
220 break; 190 break;
221 case APPLDATA_MOD_TIMER: 191 case APPLDATA_MOD_TIMER:
222 per_cpu_interval = (u64) (appldata_interval*1000 /
223 num_online_cpus()) * TOD_MICRO;
224 if (!appldata_timer_active) 192 if (!appldata_timer_active)
225 break; 193 break;
226 for_each_online_cpu(i) { 194 mod_virt_timer_periodic(&appldata_timer, timer_interval);
227 struct {
228 struct vtimer_list *timer;
229 u64 expires;
230 } args;
231 args.timer = &per_cpu(appldata_timer, i);
232 args.expires = per_cpu_interval;
233 smp_call_function_single(i, __appldata_mod_vtimer_wrap,
234 &args, 1);
235 }
236 } 195 }
237} 196}
238 197
@@ -263,14 +222,12 @@ appldata_timer_handler(ctl_table *ctl, int write,
263 len = *lenp; 222 len = *lenp;
264 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) 223 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
265 return -EFAULT; 224 return -EFAULT;
266 get_online_cpus();
267 spin_lock(&appldata_timer_lock); 225 spin_lock(&appldata_timer_lock);
268 if (buf[0] == '1') 226 if (buf[0] == '1')
269 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 227 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
270 else if (buf[0] == '0') 228 else if (buf[0] == '0')
271 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 229 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
272 spin_unlock(&appldata_timer_lock); 230 spin_unlock(&appldata_timer_lock);
273 put_online_cpus();
274out: 231out:
275 *lenp = len; 232 *lenp = len;
276 *ppos += len; 233 *ppos += len;
@@ -303,20 +260,17 @@ appldata_interval_handler(ctl_table *ctl, int write,
303 goto out; 260 goto out;
304 } 261 }
305 len = *lenp; 262 len = *lenp;
306 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { 263 if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
307 return -EFAULT; 264 return -EFAULT;
308 }
309 interval = 0; 265 interval = 0;
310 sscanf(buf, "%i", &interval); 266 sscanf(buf, "%i", &interval);
311 if (interval <= 0) 267 if (interval <= 0)
312 return -EINVAL; 268 return -EINVAL;
313 269
314 get_online_cpus();
315 spin_lock(&appldata_timer_lock); 270 spin_lock(&appldata_timer_lock);
316 appldata_interval = interval; 271 appldata_interval = interval;
317 __appldata_vtimer_setup(APPLDATA_MOD_TIMER); 272 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
318 spin_unlock(&appldata_timer_lock); 273 spin_unlock(&appldata_timer_lock);
319 put_online_cpus();
320out: 274out:
321 *lenp = len; 275 *lenp = len;
322 *ppos += len; 276 *ppos += len;
@@ -483,14 +437,12 @@ static int appldata_freeze(struct device *dev)
483 int rc; 437 int rc;
484 struct list_head *lh; 438 struct list_head *lh;
485 439
486 get_online_cpus();
487 spin_lock(&appldata_timer_lock); 440 spin_lock(&appldata_timer_lock);
488 if (appldata_timer_active) { 441 if (appldata_timer_active) {
489 __appldata_vtimer_setup(APPLDATA_DEL_TIMER); 442 __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
490 appldata_timer_suspended = 1; 443 appldata_timer_suspended = 1;
491 } 444 }
492 spin_unlock(&appldata_timer_lock); 445 spin_unlock(&appldata_timer_lock);
493 put_online_cpus();
494 446
495 mutex_lock(&appldata_ops_mutex); 447 mutex_lock(&appldata_ops_mutex);
496 list_for_each(lh, &appldata_ops_list) { 448 list_for_each(lh, &appldata_ops_list) {
@@ -514,14 +466,12 @@ static int appldata_restore(struct device *dev)
514 int rc; 466 int rc;
515 struct list_head *lh; 467 struct list_head *lh;
516 468
517 get_online_cpus();
518 spin_lock(&appldata_timer_lock); 469 spin_lock(&appldata_timer_lock);
519 if (appldata_timer_suspended) { 470 if (appldata_timer_suspended) {
520 __appldata_vtimer_setup(APPLDATA_ADD_TIMER); 471 __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
521 appldata_timer_suspended = 0; 472 appldata_timer_suspended = 0;
522 } 473 }
523 spin_unlock(&appldata_timer_lock); 474 spin_unlock(&appldata_timer_lock);
524 put_online_cpus();
525 475
526 mutex_lock(&appldata_ops_mutex); 476 mutex_lock(&appldata_ops_mutex);
527 list_for_each(lh, &appldata_ops_list) { 477 list_for_each(lh, &appldata_ops_list) {
@@ -565,53 +515,6 @@ static struct platform_driver appldata_pdrv = {
565 515
566/******************************* init / exit *********************************/ 516/******************************* init / exit *********************************/
567 517
568static void __cpuinit appldata_online_cpu(int cpu)
569{
570 init_virt_timer(&per_cpu(appldata_timer, cpu));
571 per_cpu(appldata_timer, cpu).function = appldata_timer_function;
572 per_cpu(appldata_timer, cpu).data = (unsigned long)
573 &appldata_work;
574 atomic_inc(&appldata_expire_count);
575 spin_lock(&appldata_timer_lock);
576 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
577 spin_unlock(&appldata_timer_lock);
578}
579
580static void __cpuinit appldata_offline_cpu(int cpu)
581{
582 del_virt_timer(&per_cpu(appldata_timer, cpu));
583 if (atomic_dec_and_test(&appldata_expire_count)) {
584 atomic_set(&appldata_expire_count, num_online_cpus());
585 queue_work(appldata_wq, &appldata_work);
586 }
587 spin_lock(&appldata_timer_lock);
588 __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
589 spin_unlock(&appldata_timer_lock);
590}
591
592static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
593 unsigned long action,
594 void *hcpu)
595{
596 switch (action) {
597 case CPU_ONLINE:
598 case CPU_ONLINE_FROZEN:
599 appldata_online_cpu((long) hcpu);
600 break;
601 case CPU_DEAD:
602 case CPU_DEAD_FROZEN:
603 appldata_offline_cpu((long) hcpu);
604 break;
605 default:
606 break;
607 }
608 return NOTIFY_OK;
609}
610
611static struct notifier_block __cpuinitdata appldata_nb = {
612 .notifier_call = appldata_cpu_notify,
613};
614
615/* 518/*
616 * appldata_init() 519 * appldata_init()
617 * 520 *
@@ -619,7 +522,10 @@ static struct notifier_block __cpuinitdata appldata_nb = {
619 */ 522 */
620static int __init appldata_init(void) 523static int __init appldata_init(void)
621{ 524{
622 int i, rc; 525 int rc;
526
527 appldata_timer.function = appldata_timer_function;
528 appldata_timer.data = (unsigned long) &appldata_work;
623 529
624 rc = platform_driver_register(&appldata_pdrv); 530 rc = platform_driver_register(&appldata_pdrv);
625 if (rc) 531 if (rc)
@@ -637,14 +543,6 @@ static int __init appldata_init(void)
637 goto out_device; 543 goto out_device;
638 } 544 }
639 545
640 get_online_cpus();
641 for_each_online_cpu(i)
642 appldata_online_cpu(i);
643 put_online_cpus();
644
645 /* Register cpu hotplug notifier */
646 register_hotcpu_notifier(&appldata_nb);
647
648 appldata_sysctl_header = register_sysctl_table(appldata_dir_table); 546 appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
649 return 0; 547 return 0;
650 548
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 357ea7b9714e..8709bdef233c 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -168,9 +168,11 @@ struct s390_idle_data {
168 int nohz_delay; 168 int nohz_delay;
169 unsigned int sequence; 169 unsigned int sequence;
170 unsigned long long idle_count; 170 unsigned long long idle_count;
171 unsigned long long idle_enter;
172 unsigned long long idle_exit;
173 unsigned long long idle_time; 171 unsigned long long idle_time;
172 unsigned long long clock_idle_enter;
173 unsigned long long clock_idle_exit;
174 unsigned long long timer_idle_enter;
175 unsigned long long timer_idle_exit;
174}; 176};
175 177
176DECLARE_PER_CPU(struct s390_idle_data, s390_idle); 178DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
deleted file mode 100644
index 15d647901e5c..000000000000
--- a/arch/s390/include/asm/timer.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * include/asm-s390/timer.h
3 *
4 * (C) Copyright IBM Corp. 2003,2006
5 * Virtual CPU timer
6 *
7 * Author: Jan Glauber (jang@de.ibm.com)
8 */
9
10#ifndef _ASM_S390_TIMER_H
11#define _ASM_S390_TIMER_H
12
13#include <linux/timer.h>
14
15#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
16
17struct vtimer_list {
18 struct list_head entry;
19
20 int cpu;
21 __u64 expires;
22 __u64 interval;
23
24 void (*function)(unsigned long);
25 unsigned long data;
26};
27
28/* the vtimer value will wrap after ca. 71 years */
29struct vtimer_queue {
30 struct list_head list;
31 spinlock_t lock;
32 __u64 timer; /* last programmed timer */
33 __u64 elapsed; /* elapsed time of timer expire values */
34 __u64 idle_enter; /* cpu timer on idle enter */
35 __u64 idle_exit; /* cpu timer on idle exit */
36};
37
38extern void init_virt_timer(struct vtimer_list *timer);
39extern void add_virt_timer(void *new);
40extern void add_virt_timer_periodic(void *new);
41extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
42extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires);
43extern int del_virt_timer(struct vtimer_list *timer);
44
45extern void init_cpu_vtimer(void);
46extern void vtime_init(void);
47
48extern void vtime_stop_cpu(void);
49extern void vtime_start_leave(void);
50
51#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/vtimer.h b/arch/s390/include/asm/vtimer.h
new file mode 100644
index 000000000000..bfe25d513ad2
--- /dev/null
+++ b/arch/s390/include/asm/vtimer.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright IBM Corp. 2003, 2012
3 * Virtual CPU timer
4 *
5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_TIMER_H
9#define _ASM_S390_TIMER_H
10
11#define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
12
13struct vtimer_list {
14 struct list_head entry;
15 u64 expires;
16 u64 interval;
17 void (*function)(unsigned long);
18 unsigned long data;
19};
20
21extern void init_virt_timer(struct vtimer_list *timer);
22extern void add_virt_timer(struct vtimer_list *timer);
23extern void add_virt_timer_periodic(struct vtimer_list *timer);
24extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
25extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
26extern int del_virt_timer(struct vtimer_list *timer);
27
28extern void init_cpu_vtimer(void);
29extern void vtime_init(void);
30
31extern void vtime_stop_cpu(void);
32
33#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 0e974ddd156b..45ef1a7b08f9 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -9,7 +9,6 @@
9#include <linux/kbuild.h> 9#include <linux/kbuild.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <asm/cputime.h> 11#include <asm/cputime.h>
12#include <asm/timer.h>
13#include <asm/vdso.h> 12#include <asm/vdso.h>
14#include <asm/pgtable.h> 13#include <asm/pgtable.h>
15 14
@@ -72,11 +71,10 @@ int main(void)
72 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); 71 DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
73 BLANK(); 72 BLANK();
74 /* idle data offsets */ 73 /* idle data offsets */
75 DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter)); 74 DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
76 DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit)); 75 DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
77 /* vtimer queue offsets */ 76 DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
78 DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter)); 77 DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
79 DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
80 /* lowcore offsets */ 78 /* lowcore offsets */
81 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); 79 DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
82 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); 80 DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 2c0eff488875..870bad6d56fc 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -616,17 +616,13 @@ ext_skip:
616 * Load idle PSW. The second "half" of this function is in cleanup_idle. 616 * Load idle PSW. The second "half" of this function is in cleanup_idle.
617 */ 617 */
618ENTRY(psw_idle) 618ENTRY(psw_idle)
619 st %r4,__SF_EMPTY(%r15) 619 st %r3,__SF_EMPTY(%r15)
620 basr %r1,0 620 basr %r1,0
621 la %r1,psw_idle_lpsw+4-.(%r1) 621 la %r1,psw_idle_lpsw+4-.(%r1)
622 st %r1,__SF_EMPTY+4(%r15) 622 st %r1,__SF_EMPTY+4(%r15)
623 oi __SF_EMPTY+4(%r15),0x80 623 oi __SF_EMPTY+4(%r15),0x80
624 la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1) 624 stck __CLOCK_IDLE_ENTER(%r2)
625 stck __IDLE_ENTER(%r2) 625 stpt __TIMER_IDLE_ENTER(%r2)
626 ltr %r5,%r5
627 stpt __VQ_IDLE_ENTER(%r3)
628 jz psw_idle_lpsw
629 spt 0(%r1)
630psw_idle_lpsw: 626psw_idle_lpsw:
631 lpsw __SF_EMPTY(%r15) 627 lpsw __SF_EMPTY(%r15)
632 br %r14 628 br %r14
@@ -885,33 +881,28 @@ cleanup_io_restore_insn:
885 881
886cleanup_idle: 882cleanup_idle:
887 # copy interrupt clock & cpu timer 883 # copy interrupt clock & cpu timer
888 mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK 884 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
889 mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER 885 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
890 chi %r11,__LC_SAVE_AREA_ASYNC 886 chi %r11,__LC_SAVE_AREA_ASYNC
891 je 0f 887 je 0f
892 mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 888 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
893 mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER 889 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8940: # check if stck has been executed 8900: # check if stck has been executed
895 cl %r9,BASED(cleanup_idle_insn) 891 cl %r9,BASED(cleanup_idle_insn)
896 jhe 1f 892 jhe 1f
897 mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) 893 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
898 mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) 894 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
899 j 2f 8951: # account system time going idle
9001: # check if the cpu timer has been reprogrammed
901 ltr %r5,%r5
902 jz 2f
903 spt __VQ_IDLE_ENTER(%r3)
9042: # account system time going idle
905 lm %r9,%r10,__LC_STEAL_TIMER 896 lm %r9,%r10,__LC_STEAL_TIMER
906 ADD64 %r9,%r10,__IDLE_ENTER(%r2) 897 ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
907 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK 898 SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
908 stm %r9,%r10,__LC_STEAL_TIMER 899 stm %r9,%r10,__LC_STEAL_TIMER
909 mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) 900 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
910 lm %r9,%r10,__LC_SYSTEM_TIMER 901 lm %r9,%r10,__LC_SYSTEM_TIMER
911 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER 902 ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
912 SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3) 903 SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
913 stm %r9,%r10,__LC_SYSTEM_TIMER 904 stm %r9,%r10,__LC_SYSTEM_TIMER
914 mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) 905 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 # prepare return psw 906 # prepare return psw
916 n %r8,BASED(cleanup_idle_wait) # clear wait state bit 907 n %r8,BASED(cleanup_idle_wait) # clear wait state bit
917 l %r9,24(%r11) # return from psw_idle 908 l %r9,24(%r11) # return from psw_idle
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index f66a229ab0b3..a5f4dc42a5db 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -5,7 +5,6 @@
5#include <linux/signal.h> 5#include <linux/signal.h>
6#include <asm/ptrace.h> 6#include <asm/ptrace.h>
7#include <asm/cputime.h> 7#include <asm/cputime.h>
8#include <asm/timer.h>
9 8
10extern void (*pgm_check_table[128])(struct pt_regs *); 9extern void (*pgm_check_table[128])(struct pt_regs *);
11extern void *restart_stack; 10extern void *restart_stack;
@@ -17,8 +16,7 @@ void io_int_handler(void);
17void mcck_int_handler(void); 16void mcck_int_handler(void);
18void restart_int_handler(void); 17void restart_int_handler(void);
19void restart_call_handler(void); 18void restart_call_handler(void);
20void psw_idle(struct s390_idle_data *, struct vtimer_queue *, 19void psw_idle(struct s390_idle_data *, unsigned long);
21 unsigned long, int);
22 20
23asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); 21asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
24asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); 22asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 1983c22a8a99..349b7eeb348a 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -642,15 +642,11 @@ ext_skip:
642 * Load idle PSW. The second "half" of this function is in cleanup_idle. 642 * Load idle PSW. The second "half" of this function is in cleanup_idle.
643 */ 643 */
644ENTRY(psw_idle) 644ENTRY(psw_idle)
645 stg %r4,__SF_EMPTY(%r15) 645 stg %r3,__SF_EMPTY(%r15)
646 larl %r1,psw_idle_lpsw+4 646 larl %r1,psw_idle_lpsw+4
647 stg %r1,__SF_EMPTY+8(%r15) 647 stg %r1,__SF_EMPTY+8(%r15)
648 larl %r1,.Lvtimer_max 648 STCK __CLOCK_IDLE_ENTER(%r2)
649 STCK __IDLE_ENTER(%r2) 649 stpt __TIMER_IDLE_ENTER(%r2)
650 ltr %r5,%r5
651 stpt __VQ_IDLE_ENTER(%r3)
652 jz psw_idle_lpsw
653 spt 0(%r1)
654psw_idle_lpsw: 650psw_idle_lpsw:
655 lpswe __SF_EMPTY(%r15) 651 lpswe __SF_EMPTY(%r15)
656 br %r14 652 br %r14
@@ -918,33 +914,28 @@ cleanup_io_restore_insn:
918 914
919cleanup_idle: 915cleanup_idle:
920 # copy interrupt clock & cpu timer 916 # copy interrupt clock & cpu timer
921 mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK 917 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
922 mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER 918 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
923 cghi %r11,__LC_SAVE_AREA_ASYNC 919 cghi %r11,__LC_SAVE_AREA_ASYNC
924 je 0f 920 je 0f
925 mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 921 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
926 mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER 922 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9270: # check if stck & stpt have been executed 9230: # check if stck & stpt have been executed
928 clg %r9,BASED(cleanup_idle_insn) 924 clg %r9,BASED(cleanup_idle_insn)
929 jhe 1f 925 jhe 1f
930 mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) 926 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
931 mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) 927 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
932 j 2f 9281: # account system time going idle
9331: # check if the cpu timer has been reprogrammed
934 ltr %r5,%r5
935 jz 2f
936 spt __VQ_IDLE_ENTER(%r3)
9372: # account system time going idle
938 lg %r9,__LC_STEAL_TIMER 929 lg %r9,__LC_STEAL_TIMER
939 alg %r9,__IDLE_ENTER(%r2) 930 alg %r9,__CLOCK_IDLE_ENTER(%r2)
940 slg %r9,__LC_LAST_UPDATE_CLOCK 931 slg %r9,__LC_LAST_UPDATE_CLOCK
941 stg %r9,__LC_STEAL_TIMER 932 stg %r9,__LC_STEAL_TIMER
942 mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) 933 mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
943 lg %r9,__LC_SYSTEM_TIMER 934 lg %r9,__LC_SYSTEM_TIMER
944 alg %r9,__LC_LAST_UPDATE_TIMER 935 alg %r9,__LC_LAST_UPDATE_TIMER
945 slg %r9,__VQ_IDLE_ENTER(%r3) 936 slg %r9,__TIMER_IDLE_ENTER(%r2)
946 stg %r9,__LC_SYSTEM_TIMER 937 stg %r9,__LC_SYSTEM_TIMER
947 mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) 938 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
948 # prepare return psw 939 # prepare return psw
949 nihh %r8,0xfffd # clear wait state bit 940 nihh %r8,0xfffd # clear wait state bit
950 lg %r9,48(%r11) # return from psw_idle 941 lg %r9,48(%r11) # return from psw_idle
@@ -960,8 +951,6 @@ cleanup_idle_insn:
960 .quad __critical_start 951 .quad __critical_start
961.Lcritical_length: 952.Lcritical_length:
962 .quad __critical_end - __critical_start 953 .quad __critical_end - __critical_start
963.Lvtimer_max:
964 .quad 0x7fffffffffffffff
965 954
966 955
967#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) 956#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 7efbfa53d659..733175373a4c 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -25,8 +25,8 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <asm/io.h> 26#include <asm/io.h>
27#include <asm/processor.h> 27#include <asm/processor.h>
28#include <asm/vtimer.h>
28#include <asm/irq.h> 29#include <asm/irq.h>
29#include <asm/timer.h>
30#include <asm/nmi.h> 30#include <asm/nmi.h>
31#include <asm/smp.h> 31#include <asm/smp.h>
32#include <asm/switch_to.h> 32#include <asm/switch_to.h>
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index eeb441bbddae..5481da80926a 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -38,7 +38,7 @@
38#include <asm/setup.h> 38#include <asm/setup.h>
39#include <asm/irq.h> 39#include <asm/irq.h>
40#include <asm/tlbflush.h> 40#include <asm/tlbflush.h>
41#include <asm/timer.h> 41#include <asm/vtimer.h>
42#include <asm/lowcore.h> 42#include <asm/lowcore.h>
43#include <asm/sclp.h> 43#include <asm/sclp.h>
44#include <asm/vdso.h> 44#include <asm/vdso.h>
@@ -917,7 +917,7 @@ static ssize_t show_idle_count(struct device *dev,
917 do { 917 do {
918 sequence = ACCESS_ONCE(idle->sequence); 918 sequence = ACCESS_ONCE(idle->sequence);
919 idle_count = ACCESS_ONCE(idle->idle_count); 919 idle_count = ACCESS_ONCE(idle->idle_count);
920 if (ACCESS_ONCE(idle->idle_enter)) 920 if (ACCESS_ONCE(idle->clock_idle_enter))
921 idle_count++; 921 idle_count++;
922 } while ((sequence & 1) || (idle->sequence != sequence)); 922 } while ((sequence & 1) || (idle->sequence != sequence));
923 return sprintf(buf, "%llu\n", idle_count); 923 return sprintf(buf, "%llu\n", idle_count);
@@ -935,8 +935,8 @@ static ssize_t show_idle_time(struct device *dev,
935 now = get_clock(); 935 now = get_clock();
936 sequence = ACCESS_ONCE(idle->sequence); 936 sequence = ACCESS_ONCE(idle->sequence);
937 idle_time = ACCESS_ONCE(idle->idle_time); 937 idle_time = ACCESS_ONCE(idle->idle_time);
938 idle_enter = ACCESS_ONCE(idle->idle_enter); 938 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
939 idle_exit = ACCESS_ONCE(idle->idle_exit); 939 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
940 } while ((sequence & 1) || (idle->sequence != sequence)); 940 } while ((sequence & 1) || (idle->sequence != sequence));
941 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 941 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
942 return sprintf(buf, "%llu\n", idle_time >> 12); 942 return sprintf(buf, "%llu\n", idle_time >> 12);
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 30cf3bdc9b77..dcec960fc724 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -44,7 +44,7 @@
44#include <asm/vdso.h> 44#include <asm/vdso.h>
45#include <asm/irq.h> 45#include <asm/irq.h>
46#include <asm/irq_regs.h> 46#include <asm/irq_regs.h>
47#include <asm/timer.h> 47#include <asm/vtimer.h>
48#include <asm/etr.h> 48#include <asm/etr.h>
49#include <asm/cio.h> 49#include <asm/cio.h>
50#include "entry.h" 50#include "entry.h"
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 39ebff506946..4fc97b40a6e1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -1,71 +1,82 @@
1/* 1/*
2 * arch/s390/kernel/vtime.c
3 * Virtual cpu timer based timer functions. 2 * Virtual cpu timer based timer functions.
4 * 3 *
5 * S390 version 4 * Copyright IBM Corp. 2004, 2012
6 * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
7 * Author(s): Jan Glauber <jan.glauber@de.ibm.com> 5 * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
8 */ 6 */
9 7
10#include <linux/module.h> 8#include <linux/kernel_stat.h>
9#include <linux/notifier.h>
10#include <linux/kprobes.h>
11#include <linux/export.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/time.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/smp.h>
16#include <linux/types.h>
17#include <linux/timex.h> 13#include <linux/timex.h>
18#include <linux/notifier.h> 14#include <linux/types.h>
19#include <linux/kernel_stat.h> 15#include <linux/time.h>
20#include <linux/rcupdate.h>
21#include <linux/posix-timers.h>
22#include <linux/cpu.h> 16#include <linux/cpu.h>
23#include <linux/kprobes.h> 17#include <linux/smp.h>
24 18
25#include <asm/timer.h>
26#include <asm/irq_regs.h> 19#include <asm/irq_regs.h>
27#include <asm/cputime.h> 20#include <asm/cputime.h>
21#include <asm/vtimer.h>
28#include <asm/irq.h> 22#include <asm/irq.h>
29#include "entry.h" 23#include "entry.h"
30 24
31static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); 25static void virt_timer_expire(void);
32 26
33DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 27DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
34 28
35static inline __u64 get_vtimer(void) 29static LIST_HEAD(virt_timer_list);
30static DEFINE_SPINLOCK(virt_timer_lock);
31static atomic64_t virt_timer_current;
32static atomic64_t virt_timer_elapsed;
33
34static inline u64 get_vtimer(void)
36{ 35{
37 __u64 timer; 36 u64 timer;
38 37
39 asm volatile("STPT %0" : "=m" (timer)); 38 asm volatile("stpt %0" : "=m" (timer));
40 return timer; 39 return timer;
41} 40}
42 41
43static inline void set_vtimer(__u64 expires) 42static inline void set_vtimer(u64 expires)
44{ 43{
45 __u64 timer; 44 u64 timer;
46 45
47 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 46 asm volatile(
48 " SPT %1" /* Set new value immediately afterwards */ 47 " stpt %0\n" /* Store current cpu timer value */
49 : "=m" (timer) : "m" (expires) ); 48 " spt %1" /* Set new value imm. afterwards */
49 : "=m" (timer) : "m" (expires));
50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer; 50 S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
51 S390_lowcore.last_update_timer = expires; 51 S390_lowcore.last_update_timer = expires;
52} 52}
53 53
54static inline int virt_timer_forward(u64 elapsed)
55{
56 BUG_ON(!irqs_disabled());
57
58 if (list_empty(&virt_timer_list))
59 return 0;
60 elapsed = atomic64_add_return(elapsed, &virt_timer_elapsed);
61 return elapsed >= atomic64_read(&virt_timer_current);
62}
63
54/* 64/*
55 * Update process times based on virtual cpu times stored by entry.S 65 * Update process times based on virtual cpu times stored by entry.S
56 * to the lowcore fields user_timer, system_timer & steal_clock. 66 * to the lowcore fields user_timer, system_timer & steal_clock.
57 */ 67 */
58static void do_account_vtime(struct task_struct *tsk, int hardirq_offset) 68static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
59{ 69{
60 struct thread_info *ti = task_thread_info(tsk); 70 struct thread_info *ti = task_thread_info(tsk);
61 __u64 timer, clock, user, system, steal; 71 u64 timer, clock, user, system, steal;
62 72
63 timer = S390_lowcore.last_update_timer; 73 timer = S390_lowcore.last_update_timer;
64 clock = S390_lowcore.last_update_clock; 74 clock = S390_lowcore.last_update_clock;
65 asm volatile (" STPT %0\n" /* Store current cpu timer value */ 75 asm volatile(
66 " STCK %1" /* Store current tod clock value */ 76 " stpt %0\n" /* Store current cpu timer value */
67 : "=m" (S390_lowcore.last_update_timer), 77 " stck %1" /* Store current tod clock value */
68 "=m" (S390_lowcore.last_update_clock) ); 78 : "=m" (S390_lowcore.last_update_timer),
79 "=m" (S390_lowcore.last_update_clock));
69 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 80 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
70 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock; 81 S390_lowcore.steal_timer += S390_lowcore.last_update_clock - clock;
71 82
@@ -84,6 +95,8 @@ static void do_account_vtime(struct task_struct *tsk, int hardirq_offset)
84 S390_lowcore.steal_timer = 0; 95 S390_lowcore.steal_timer = 0;
85 account_steal_time(steal); 96 account_steal_time(steal);
86 } 97 }
98
99 return virt_timer_forward(user + system);
87} 100}
88 101
89void account_vtime(struct task_struct *prev, struct task_struct *next) 102void account_vtime(struct task_struct *prev, struct task_struct *next)
@@ -101,7 +114,8 @@ void account_vtime(struct task_struct *prev, struct task_struct *next)
101 114
102void account_process_tick(struct task_struct *tsk, int user_tick) 115void account_process_tick(struct task_struct *tsk, int user_tick)
103{ 116{
104 do_account_vtime(tsk, HARDIRQ_OFFSET); 117 if (do_account_vtime(tsk, HARDIRQ_OFFSET))
118 virt_timer_expire();
105} 119}
106 120
107/* 121/*
@@ -111,7 +125,7 @@ void account_process_tick(struct task_struct *tsk, int user_tick)
111void account_system_vtime(struct task_struct *tsk) 125void account_system_vtime(struct task_struct *tsk)
112{ 126{
113 struct thread_info *ti = task_thread_info(tsk); 127 struct thread_info *ti = task_thread_info(tsk);
114 __u64 timer, system; 128 u64 timer, system;
115 129
116 timer = S390_lowcore.last_update_timer; 130 timer = S390_lowcore.last_update_timer;
117 S390_lowcore.last_update_timer = get_vtimer(); 131 S390_lowcore.last_update_timer = get_vtimer();
@@ -121,13 +135,14 @@ void account_system_vtime(struct task_struct *tsk)
121 S390_lowcore.steal_timer -= system; 135 S390_lowcore.steal_timer -= system;
122 ti->system_timer = S390_lowcore.system_timer; 136 ti->system_timer = S390_lowcore.system_timer;
123 account_system_time(tsk, 0, system, system); 137 account_system_time(tsk, 0, system, system);
138
139 virt_timer_forward(system);
124} 140}
125EXPORT_SYMBOL_GPL(account_system_vtime); 141EXPORT_SYMBOL_GPL(account_system_vtime);
126 142
127void __kprobes vtime_stop_cpu(void) 143void __kprobes vtime_stop_cpu(void)
128{ 144{
129 struct s390_idle_data *idle = &__get_cpu_var(s390_idle); 145 struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
130 struct vtimer_queue *vq = &__get_cpu_var(virt_cpu_timer);
131 unsigned long long idle_time; 146 unsigned long long idle_time;
132 unsigned long psw_mask; 147 unsigned long psw_mask;
133 148
@@ -141,7 +156,7 @@ void __kprobes vtime_stop_cpu(void)
141 idle->nohz_delay = 0; 156 idle->nohz_delay = 0;
142 157
143 /* Call the assembler magic in entry.S */ 158 /* Call the assembler magic in entry.S */
144 psw_idle(idle, vq, psw_mask, !list_empty(&vq->list)); 159 psw_idle(idle, psw_mask);
145 160
146 /* Reenable preemption tracer. */ 161 /* Reenable preemption tracer. */
147 start_critical_timings(); 162 start_critical_timings();
@@ -149,9 +164,9 @@ void __kprobes vtime_stop_cpu(void)
149 /* Account time spent with enabled wait psw loaded as idle time. */ 164 /* Account time spent with enabled wait psw loaded as idle time. */
150 idle->sequence++; 165 idle->sequence++;
151 smp_wmb(); 166 smp_wmb();
152 idle_time = idle->idle_exit - idle->idle_enter; 167 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
168 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
153 idle->idle_time += idle_time; 169 idle->idle_time += idle_time;
154 idle->idle_enter = idle->idle_exit = 0ULL;
155 idle->idle_count++; 170 idle->idle_count++;
156 account_idle_time(idle_time); 171 account_idle_time(idle_time);
157 smp_wmb(); 172 smp_wmb();
@@ -167,10 +182,10 @@ cputime64_t s390_get_idle_time(int cpu)
167 do { 182 do {
168 now = get_clock(); 183 now = get_clock();
169 sequence = ACCESS_ONCE(idle->sequence); 184 sequence = ACCESS_ONCE(idle->sequence);
170 idle_enter = ACCESS_ONCE(idle->idle_enter); 185 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
171 idle_exit = ACCESS_ONCE(idle->idle_exit); 186 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
172 } while ((sequence & 1) || (idle->sequence != sequence)); 187 } while ((sequence & 1) || (idle->sequence != sequence));
173 return idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 188 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
174} 189}
175 190
176/* 191/*
@@ -179,11 +194,11 @@ cputime64_t s390_get_idle_time(int cpu)
179 */ 194 */
180static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) 195static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
181{ 196{
182 struct vtimer_list *event; 197 struct vtimer_list *tmp;
183 198
184 list_for_each_entry(event, head, entry) { 199 list_for_each_entry(tmp, head, entry) {
185 if (event->expires > timer->expires) { 200 if (tmp->expires > timer->expires) {
186 list_add_tail(&timer->entry, &event->entry); 201 list_add_tail(&timer->entry, &tmp->entry);
187 return; 202 return;
188 } 203 }
189 } 204 }
@@ -191,82 +206,45 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
191} 206}
192 207
193/* 208/*
194 * Do the callback functions of expired vtimer events. 209 * Handler for expired virtual CPU timer.
195 * Called from within the interrupt handler.
196 */
197static void do_callbacks(struct list_head *cb_list)
198{
199 struct vtimer_queue *vq;
200 struct vtimer_list *event, *tmp;
201
202 if (list_empty(cb_list))
203 return;
204
205 vq = &__get_cpu_var(virt_cpu_timer);
206
207 list_for_each_entry_safe(event, tmp, cb_list, entry) {
208 list_del_init(&event->entry);
209 (event->function)(event->data);
210 if (event->interval) {
211 /* Recharge interval timer */
212 event->expires = event->interval + vq->elapsed;
213 spin_lock(&vq->lock);
214 list_add_sorted(event, &vq->list);
215 spin_unlock(&vq->lock);
216 }
217 }
218}
219
220/*
221 * Handler for the virtual CPU timer.
222 */ 210 */
223static void do_cpu_timer_interrupt(struct ext_code ext_code, 211static void virt_timer_expire(void)
224 unsigned int param32, unsigned long param64)
225{ 212{
226 struct vtimer_queue *vq; 213 struct vtimer_list *timer, *tmp;
227 struct vtimer_list *event, *tmp; 214 unsigned long elapsed;
228 struct list_head cb_list; /* the callback queue */ 215 LIST_HEAD(cb_list);
229 __u64 elapsed, next; 216
230 217 /* walk timer list, fire all expired timers */
231 kstat_cpu(smp_processor_id()).irqs[EXTINT_TMR]++; 218 spin_lock(&virt_timer_lock);
232 INIT_LIST_HEAD(&cb_list); 219 elapsed = atomic64_read(&virt_timer_elapsed);
233 vq = &__get_cpu_var(virt_cpu_timer); 220 list_for_each_entry_safe(timer, tmp, &virt_timer_list, entry) {
234 221 if (timer->expires < elapsed)
235 /* walk timer list, fire all expired events */
236 spin_lock(&vq->lock);
237
238 elapsed = vq->elapsed + (vq->timer - S390_lowcore.async_enter_timer);
239 BUG_ON((s64) elapsed < 0);
240 vq->elapsed = 0;
241 list_for_each_entry_safe(event, tmp, &vq->list, entry) {
242 if (event->expires < elapsed)
243 /* move expired timer to the callback queue */ 222 /* move expired timer to the callback queue */
244 list_move_tail(&event->entry, &cb_list); 223 list_move_tail(&timer->entry, &cb_list);
245 else 224 else
246 event->expires -= elapsed; 225 timer->expires -= elapsed;
247 } 226 }
248 spin_unlock(&vq->lock); 227 if (!list_empty(&virt_timer_list)) {
249 228 timer = list_first_entry(&virt_timer_list,
250 do_callbacks(&cb_list); 229 struct vtimer_list, entry);
251 230 atomic64_set(&virt_timer_current, timer->expires);
252 /* next event is first in list */ 231 }
253 next = VTIMER_MAX_SLICE; 232 atomic64_sub(elapsed, &virt_timer_elapsed);
254 spin_lock(&vq->lock); 233 spin_unlock(&virt_timer_lock);
255 if (!list_empty(&vq->list)) { 234
256 event = list_first_entry(&vq->list, struct vtimer_list, entry); 235 /* Do callbacks and recharge periodic timers */
257 next = event->expires; 236 list_for_each_entry_safe(timer, tmp, &cb_list, entry) {
237 list_del_init(&timer->entry);
238 timer->function(timer->data);
239 if (timer->interval) {
240 /* Recharge interval timer */
241 timer->expires = timer->interval +
242 atomic64_read(&virt_timer_elapsed);
243 spin_lock(&virt_timer_lock);
244 list_add_sorted(timer, &virt_timer_list);
245 spin_unlock(&virt_timer_lock);
246 }
258 } 247 }
259 spin_unlock(&vq->lock);
260 /*
261 * To improve precision add the time spent by the
262 * interrupt handler to the elapsed time.
263 * Note: CPU timer counts down and we got an interrupt,
264 * the current content is negative
265 */
266 elapsed = S390_lowcore.async_enter_timer - get_vtimer();
267 set_vtimer(next - elapsed);
268 vq->timer = next - elapsed;
269 vq->elapsed = elapsed;
270} 248}
271 249
272void init_virt_timer(struct vtimer_list *timer) 250void init_virt_timer(struct vtimer_list *timer)
@@ -278,179 +256,108 @@ EXPORT_SYMBOL(init_virt_timer);
278 256
279static inline int vtimer_pending(struct vtimer_list *timer) 257static inline int vtimer_pending(struct vtimer_list *timer)
280{ 258{
281 return (!list_empty(&timer->entry)); 259 return !list_empty(&timer->entry);
282} 260}
283 261
284/*
285 * this function should only run on the specified CPU
286 */
287static void internal_add_vtimer(struct vtimer_list *timer) 262static void internal_add_vtimer(struct vtimer_list *timer)
288{ 263{
289 struct vtimer_queue *vq; 264 if (list_empty(&virt_timer_list)) {
290 unsigned long flags; 265 /* First timer, just program it. */
291 __u64 left, expires; 266 atomic64_set(&virt_timer_current, timer->expires);
292 267 atomic64_set(&virt_timer_elapsed, 0);
293 vq = &per_cpu(virt_cpu_timer, timer->cpu); 268 list_add(&timer->entry, &virt_timer_list);
294 spin_lock_irqsave(&vq->lock, flags);
295
296 BUG_ON(timer->cpu != smp_processor_id());
297
298 if (list_empty(&vq->list)) {
299 /* First timer on this cpu, just program it. */
300 list_add(&timer->entry, &vq->list);
301 set_vtimer(timer->expires);
302 vq->timer = timer->expires;
303 vq->elapsed = 0;
304 } else { 269 } else {
305 /* Check progress of old timers. */ 270 /* Update timer against current base. */
306 expires = timer->expires; 271 timer->expires += atomic64_read(&virt_timer_elapsed);
307 left = get_vtimer(); 272 if (likely((s64) timer->expires <
308 if (likely((s64) expires < (s64) left)) { 273 (s64) atomic64_read(&virt_timer_current)))
309 /* The new timer expires before the current timer. */ 274 /* The new timer expires before the current timer. */
310 set_vtimer(expires); 275 atomic64_set(&virt_timer_current, timer->expires);
311 vq->elapsed += vq->timer - left; 276 /* Insert new timer into the list. */
312 vq->timer = expires; 277 list_add_sorted(timer, &virt_timer_list);
313 } else {
314 vq->elapsed += vq->timer - left;
315 vq->timer = left;
316 }
317 /* Insert new timer into per cpu list. */
318 timer->expires += vq->elapsed;
319 list_add_sorted(timer, &vq->list);
320 } 278 }
321
322 spin_unlock_irqrestore(&vq->lock, flags);
323 /* release CPU acquired in prepare_vtimer or mod_virt_timer() */
324 put_cpu();
325} 279}
326 280
327static inline void prepare_vtimer(struct vtimer_list *timer) 281static void __add_vtimer(struct vtimer_list *timer, int periodic)
328{ 282{
329 BUG_ON(!timer->function); 283 unsigned long flags;
330 BUG_ON(!timer->expires || timer->expires > VTIMER_MAX_SLICE); 284
331 BUG_ON(vtimer_pending(timer)); 285 timer->interval = periodic ? timer->expires : 0;
332 timer->cpu = get_cpu(); 286 spin_lock_irqsave(&virt_timer_lock, flags);
287 internal_add_vtimer(timer);
288 spin_unlock_irqrestore(&virt_timer_lock, flags);
333} 289}
334 290
335/* 291/*
336 * add_virt_timer - add an oneshot virtual CPU timer 292 * add_virt_timer - add an oneshot virtual CPU timer
337 */ 293 */
338void add_virt_timer(void *new) 294void add_virt_timer(struct vtimer_list *timer)
339{ 295{
340 struct vtimer_list *timer; 296 __add_vtimer(timer, 0);
341
342 timer = (struct vtimer_list *)new;
343 prepare_vtimer(timer);
344 timer->interval = 0;
345 internal_add_vtimer(timer);
346} 297}
347EXPORT_SYMBOL(add_virt_timer); 298EXPORT_SYMBOL(add_virt_timer);
348 299
349/* 300/*
350 * add_virt_timer_int - add an interval virtual CPU timer 301 * add_virt_timer_int - add an interval virtual CPU timer
351 */ 302 */
352void add_virt_timer_periodic(void *new) 303void add_virt_timer_periodic(struct vtimer_list *timer)
353{ 304{
354 struct vtimer_list *timer; 305 __add_vtimer(timer, 1);
355
356 timer = (struct vtimer_list *)new;
357 prepare_vtimer(timer);
358 timer->interval = timer->expires;
359 internal_add_vtimer(timer);
360} 306}
361EXPORT_SYMBOL(add_virt_timer_periodic); 307EXPORT_SYMBOL(add_virt_timer_periodic);
362 308
363static int __mod_vtimer(struct vtimer_list *timer, __u64 expires, int periodic) 309static int __mod_vtimer(struct vtimer_list *timer, u64 expires, int periodic)
364{ 310{
365 struct vtimer_queue *vq;
366 unsigned long flags; 311 unsigned long flags;
367 int cpu; 312 int rc;
368 313
369 BUG_ON(!timer->function); 314 BUG_ON(!timer->function);
370 BUG_ON(!expires || expires > VTIMER_MAX_SLICE);
371 315
372 if (timer->expires == expires && vtimer_pending(timer)) 316 if (timer->expires == expires && vtimer_pending(timer))
373 return 1; 317 return 1;
374 318 spin_lock_irqsave(&virt_timer_lock, flags);
375 cpu = get_cpu(); 319 rc = vtimer_pending(timer);
376 vq = &per_cpu(virt_cpu_timer, cpu); 320 if (rc)
377 321 list_del_init(&timer->entry);
378 /* disable interrupts before test if timer is pending */ 322 timer->interval = periodic ? expires : 0;
379 spin_lock_irqsave(&vq->lock, flags);
380
381 /* if timer isn't pending add it on the current CPU */
382 if (!vtimer_pending(timer)) {
383 spin_unlock_irqrestore(&vq->lock, flags);
384
385 if (periodic)
386 timer->interval = expires;
387 else
388 timer->interval = 0;
389 timer->expires = expires;
390 timer->cpu = cpu;
391 internal_add_vtimer(timer);
392 return 0;
393 }
394
395 /* check if we run on the right CPU */
396 BUG_ON(timer->cpu != cpu);
397
398 list_del_init(&timer->entry);
399 timer->expires = expires; 323 timer->expires = expires;
400 if (periodic)
401 timer->interval = expires;
402
403 /* the timer can't expire anymore so we can release the lock */
404 spin_unlock_irqrestore(&vq->lock, flags);
405 internal_add_vtimer(timer); 324 internal_add_vtimer(timer);
406 return 1; 325 spin_unlock_irqrestore(&virt_timer_lock, flags);
326 return rc;
407} 327}
408 328
409/* 329/*
410 * If we change a pending timer the function must be called on the CPU
411 * where the timer is running on.
412 *
413 * returns whether it has modified a pending timer (1) or not (0) 330 * returns whether it has modified a pending timer (1) or not (0)
414 */ 331 */
415int mod_virt_timer(struct vtimer_list *timer, __u64 expires) 332int mod_virt_timer(struct vtimer_list *timer, u64 expires)
416{ 333{
417 return __mod_vtimer(timer, expires, 0); 334 return __mod_vtimer(timer, expires, 0);
418} 335}
419EXPORT_SYMBOL(mod_virt_timer); 336EXPORT_SYMBOL(mod_virt_timer);
420 337
421/* 338/*
422 * If we change a pending timer the function must be called on the CPU
423 * where the timer is running on.
424 *
425 * returns whether it has modified a pending timer (1) or not (0) 339 * returns whether it has modified a pending timer (1) or not (0)
426 */ 340 */
427int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires) 341int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires)
428{ 342{
429 return __mod_vtimer(timer, expires, 1); 343 return __mod_vtimer(timer, expires, 1);
430} 344}
431EXPORT_SYMBOL(mod_virt_timer_periodic); 345EXPORT_SYMBOL(mod_virt_timer_periodic);
432 346
433/* 347/*
434 * delete a virtual timer 348 * Delete a virtual timer.
435 * 349 *
436 * returns whether the deleted timer was pending (1) or not (0) 350 * returns whether the deleted timer was pending (1) or not (0)
437 */ 351 */
438int del_virt_timer(struct vtimer_list *timer) 352int del_virt_timer(struct vtimer_list *timer)
439{ 353{
440 unsigned long flags; 354 unsigned long flags;
441 struct vtimer_queue *vq;
442 355
443 /* check if timer is pending */
444 if (!vtimer_pending(timer)) 356 if (!vtimer_pending(timer))
445 return 0; 357 return 0;
446 358 spin_lock_irqsave(&virt_timer_lock, flags);
447 vq = &per_cpu(virt_cpu_timer, timer->cpu);
448 spin_lock_irqsave(&vq->lock, flags);
449
450 /* we don't interrupt a running timer, just let it expire! */
451 list_del_init(&timer->entry); 359 list_del_init(&timer->entry);
452 360 spin_unlock_irqrestore(&virt_timer_lock, flags);
453 spin_unlock_irqrestore(&vq->lock, flags);
454 return 1; 361 return 1;
455} 362}
456EXPORT_SYMBOL(del_virt_timer); 363EXPORT_SYMBOL(del_virt_timer);
@@ -458,20 +365,10 @@ EXPORT_SYMBOL(del_virt_timer);
458/* 365/*
459 * Start the virtual CPU timer on the current CPU. 366 * Start the virtual CPU timer on the current CPU.
460 */ 367 */
461void init_cpu_vtimer(void) 368void __cpuinit init_cpu_vtimer(void)
462{ 369{
463 struct vtimer_queue *vq;
464
465 /* initialize per cpu vtimer structure */
466 vq = &__get_cpu_var(virt_cpu_timer);
467 INIT_LIST_HEAD(&vq->list);
468 spin_lock_init(&vq->lock);
469
470 /* enable cpu timer interrupts */
471 __ctl_set_bit(0,10);
472
473 /* set initial cpu timer */ 370 /* set initial cpu timer */
474 set_vtimer(0x7fffffffffffffffULL); 371 set_vtimer(VTIMER_MAX_SLICE);
475} 372}
476 373
477static int __cpuinit s390_nohz_notify(struct notifier_block *self, 374static int __cpuinit s390_nohz_notify(struct notifier_block *self,
@@ -493,12 +390,7 @@ static int __cpuinit s390_nohz_notify(struct notifier_block *self,
493 390
494void __init vtime_init(void) 391void __init vtime_init(void)
495{ 392{
496 /* request the cpu timer external interrupt */
497 if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
498 panic("Couldn't request external interrupt 0x1005");
499
500 /* Enable cpu timer interrupts on the boot cpu. */ 393 /* Enable cpu timer interrupts on the boot cpu. */
501 init_cpu_vtimer(); 394 init_cpu_vtimer();
502 cpu_notifier(s390_nohz_notify, 0); 395 cpu_notifier(s390_nohz_notify, 0);
503} 396}
504
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 7d7c3750f438..42d0cf89121d 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -12,8 +12,8 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <asm/vtimer.h>
15#include <asm/div64.h> 16#include <asm/div64.h>
16#include <asm/timer.h>
17 17
18void __delay(unsigned long loops) 18void __delay(unsigned long loops)
19{ 19{