aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/time.c')
-rw-r--r--arch/s390/kernel/time.c278
1 files changed, 187 insertions, 91 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index eccefbbff887..5be981a36c3e 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -12,6 +12,9 @@
12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds 12 * Copyright (C) 1991, 1992, 1995 Linus Torvalds
13 */ 13 */
14 14
15#define KMSG_COMPONENT "time"
16#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
17
15#include <linux/errno.h> 18#include <linux/errno.h>
16#include <linux/module.h> 19#include <linux/module.h>
17#include <linux/sched.h> 20#include <linux/sched.h>
@@ -20,6 +23,8 @@
20#include <linux/string.h> 23#include <linux/string.h>
21#include <linux/mm.h> 24#include <linux/mm.h>
22#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/cpu.h>
27#include <linux/stop_machine.h>
23#include <linux/time.h> 28#include <linux/time.h>
24#include <linux/sysdev.h> 29#include <linux/sysdev.h>
25#include <linux/delay.h> 30#include <linux/delay.h>
@@ -36,6 +41,7 @@
36#include <asm/delay.h> 41#include <asm/delay.h>
37#include <asm/s390_ext.h> 42#include <asm/s390_ext.h>
38#include <asm/div64.h> 43#include <asm/div64.h>
44#include <asm/vdso.h>
39#include <asm/irq.h> 45#include <asm/irq.h>
40#include <asm/irq_regs.h> 46#include <asm/irq_regs.h>
41#include <asm/timer.h> 47#include <asm/timer.h>
@@ -223,6 +229,36 @@ static struct clocksource clocksource_tod = {
223}; 229};
224 230
225 231
232void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
233{
234 if (clock != &clocksource_tod)
235 return;
236
237 /* Make userspace gettimeofday spin until we're done. */
238 ++vdso_data->tb_update_count;
239 smp_wmb();
240 vdso_data->xtime_tod_stamp = clock->cycle_last;
241 vdso_data->xtime_clock_sec = xtime.tv_sec;
242 vdso_data->xtime_clock_nsec = xtime.tv_nsec;
243 vdso_data->wtom_clock_sec = wall_to_monotonic.tv_sec;
244 vdso_data->wtom_clock_nsec = wall_to_monotonic.tv_nsec;
245 smp_wmb();
246 ++vdso_data->tb_update_count;
247}
248
249extern struct timezone sys_tz;
250
251void update_vsyscall_tz(void)
252{
253 /* Make userspace gettimeofday spin until we're done. */
254 ++vdso_data->tb_update_count;
255 smp_wmb();
256 vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
257 vdso_data->tz_dsttime = sys_tz.tz_dsttime;
258 smp_wmb();
259 ++vdso_data->tb_update_count;
260}
261
226/* 262/*
227 * Initialize the TOD clock and the CPU timer of 263 * Initialize the TOD clock and the CPU timer of
228 * the boot cpu. 264 * the boot cpu.
@@ -253,10 +289,8 @@ void __init time_init(void)
253 289
254 /* Enable TOD clock interrupts on the boot cpu. */ 290 /* Enable TOD clock interrupts on the boot cpu. */
255 init_cpu_timer(); 291 init_cpu_timer();
256 292 /* Enable cpu timer interrupts on the boot cpu. */
257#ifdef CONFIG_VIRT_TIMER
258 vtime_init(); 293 vtime_init();
259#endif
260} 294}
261 295
262/* 296/*
@@ -288,8 +322,8 @@ static unsigned long long adjust_time(unsigned long long old,
288 } 322 }
289 sched_clock_base_cc += delta; 323 sched_clock_base_cc += delta;
290 if (adjust.offset != 0) { 324 if (adjust.offset != 0) {
291 printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", 325 pr_notice("The ETR interface has adjusted the clock "
292 adjust.offset); 326 "by %li microseconds\n", adjust.offset);
293 adjust.modes = ADJ_OFFSET_SINGLESHOT; 327 adjust.modes = ADJ_OFFSET_SINGLESHOT;
294 do_adjtimex(&adjust); 328 do_adjtimex(&adjust);
295 } 329 }
@@ -360,6 +394,15 @@ static void enable_sync_clock(void)
360 atomic_set_mask(0x80000000, sw_ptr); 394 atomic_set_mask(0x80000000, sw_ptr);
361} 395}
362 396
397/* Single threaded workqueue used for etr and stp sync events */
398static struct workqueue_struct *time_sync_wq;
399
400static void __init time_init_wq(void)
401{
402 if (!time_sync_wq)
403 time_sync_wq = create_singlethread_workqueue("timesync");
404}
405
363/* 406/*
364 * External Time Reference (ETR) code. 407 * External Time Reference (ETR) code.
365 */ 408 */
@@ -425,6 +468,7 @@ static struct timer_list etr_timer;
425 468
426static void etr_timeout(unsigned long dummy); 469static void etr_timeout(unsigned long dummy);
427static void etr_work_fn(struct work_struct *work); 470static void etr_work_fn(struct work_struct *work);
471static DEFINE_MUTEX(etr_work_mutex);
428static DECLARE_WORK(etr_work, etr_work_fn); 472static DECLARE_WORK(etr_work, etr_work_fn);
429 473
430/* 474/*
@@ -440,8 +484,8 @@ static void etr_reset(void)
440 etr_tolec = get_clock(); 484 etr_tolec = get_clock();
441 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); 485 set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags);
442 } else if (etr_port0_online || etr_port1_online) { 486 } else if (etr_port0_online || etr_port1_online) {
443 printk(KERN_WARNING "Running on non ETR capable " 487 pr_warning("The real or virtual hardware system does "
444 "machine, only local mode available.\n"); 488 "not provide an ETR interface\n");
445 etr_port0_online = etr_port1_online = 0; 489 etr_port0_online = etr_port1_online = 0;
446 } 490 }
447} 491}
@@ -452,17 +496,18 @@ static int __init etr_init(void)
452 496
453 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) 497 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
454 return 0; 498 return 0;
499 time_init_wq();
455 /* Check if this machine has the steai instruction. */ 500 /* Check if this machine has the steai instruction. */
456 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 501 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
457 etr_steai_available = 1; 502 etr_steai_available = 1;
458 setup_timer(&etr_timer, etr_timeout, 0UL); 503 setup_timer(&etr_timer, etr_timeout, 0UL);
459 if (etr_port0_online) { 504 if (etr_port0_online) {
460 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 505 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
461 schedule_work(&etr_work); 506 queue_work(time_sync_wq, &etr_work);
462 } 507 }
463 if (etr_port1_online) { 508 if (etr_port1_online) {
464 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 509 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
465 schedule_work(&etr_work); 510 queue_work(time_sync_wq, &etr_work);
466 } 511 }
467 return 0; 512 return 0;
468} 513}
@@ -489,7 +534,7 @@ void etr_switch_to_local(void)
489 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 534 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
490 disable_sync_clock(NULL); 535 disable_sync_clock(NULL);
491 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 536 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
492 schedule_work(&etr_work); 537 queue_work(time_sync_wq, &etr_work);
493} 538}
494 539
495/* 540/*
@@ -505,7 +550,7 @@ void etr_sync_check(void)
505 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 550 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
506 disable_sync_clock(NULL); 551 disable_sync_clock(NULL);
507 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 552 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
508 schedule_work(&etr_work); 553 queue_work(time_sync_wq, &etr_work);
509} 554}
510 555
511/* 556/*
@@ -529,13 +574,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm)
529 * Both ports are not up-to-date now. 574 * Both ports are not up-to-date now.
530 */ 575 */
531 set_bit(ETR_EVENT_PORT_ALERT, &etr_events); 576 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
532 schedule_work(&etr_work); 577 queue_work(time_sync_wq, &etr_work);
533} 578}
534 579
535static void etr_timeout(unsigned long dummy) 580static void etr_timeout(unsigned long dummy)
536{ 581{
537 set_bit(ETR_EVENT_UPDATE, &etr_events); 582 set_bit(ETR_EVENT_UPDATE, &etr_events);
538 schedule_work(&etr_work); 583 queue_work(time_sync_wq, &etr_work);
539} 584}
540 585
541/* 586/*
@@ -642,14 +687,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
642} 687}
643 688
644struct clock_sync_data { 689struct clock_sync_data {
690 atomic_t cpus;
645 int in_sync; 691 int in_sync;
646 unsigned long long fixup_cc; 692 unsigned long long fixup_cc;
693 int etr_port;
694 struct etr_aib *etr_aib;
647}; 695};
648 696
649static void clock_sync_cpu_start(void *dummy) 697static void clock_sync_cpu(struct clock_sync_data *sync)
650{ 698{
651 struct clock_sync_data *sync = dummy; 699 atomic_dec(&sync->cpus);
652
653 enable_sync_clock(); 700 enable_sync_clock();
654 /* 701 /*
655 * This looks like a busy wait loop but it isn't. etr_sync_cpus 702 * This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -675,39 +722,35 @@ static void clock_sync_cpu_start(void *dummy)
675 fixup_clock_comparator(sync->fixup_cc); 722 fixup_clock_comparator(sync->fixup_cc);
676} 723}
677 724
678static void clock_sync_cpu_end(void *dummy)
679{
680}
681
682/* 725/*
683 * Sync the TOD clock using the port refered to by aibp. This port 726 * Sync the TOD clock using the port refered to by aibp. This port
684 * has to be enabled and the other port has to be disabled. The 727 * has to be enabled and the other port has to be disabled. The
685 * last eacr update has to be more than 1.6 seconds in the past. 728 * last eacr update has to be more than 1.6 seconds in the past.
686 */ 729 */
687static int etr_sync_clock(struct etr_aib *aib, int port) 730static int etr_sync_clock(void *data)
688{ 731{
689 struct etr_aib *sync_port; 732 static int first;
690 struct clock_sync_data etr_sync;
691 unsigned long long clock, old_clock, delay, delta; 733 unsigned long long clock, old_clock, delay, delta;
692 int follows; 734 struct clock_sync_data *etr_sync;
735 struct etr_aib *sync_port, *aib;
736 int port;
693 int rc; 737 int rc;
694 738
695 /* Check if the current aib is adjacent to the sync port aib. */ 739 etr_sync = data;
696 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
697 follows = etr_aib_follows(sync_port, aib, port);
698 memcpy(sync_port, aib, sizeof(*aib));
699 if (!follows)
700 return -EAGAIN;
701 740
702 /* 741 if (xchg(&first, 1) == 1) {
703 * Catch all other cpus and make them wait until we have 742 /* Slave */
704 * successfully synced the clock. smp_call_function will 743 clock_sync_cpu(etr_sync);
705 * return after all other cpus are in etr_sync_cpu_start. 744 return 0;
706 */ 745 }
707 memset(&etr_sync, 0, sizeof(etr_sync)); 746
708 preempt_disable(); 747 /* Wait until all other cpus entered the sync function. */
709 smp_call_function(clock_sync_cpu_start, &etr_sync, 0); 748 while (atomic_read(&etr_sync->cpus) != 0)
710 local_irq_disable(); 749 cpu_relax();
750
751 port = etr_sync->etr_port;
752 aib = etr_sync->etr_aib;
753 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
711 enable_sync_clock(); 754 enable_sync_clock();
712 755
713 /* Set clock to next OTE. */ 756 /* Set clock to next OTE. */
@@ -724,16 +767,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
724 delay = (unsigned long long) 767 delay = (unsigned long long)
725 (aib->edf2.etv - sync_port->edf2.etv) << 32; 768 (aib->edf2.etv - sync_port->edf2.etv) << 32;
726 delta = adjust_time(old_clock, clock, delay); 769 delta = adjust_time(old_clock, clock, delay);
727 etr_sync.fixup_cc = delta; 770 etr_sync->fixup_cc = delta;
728 fixup_clock_comparator(delta); 771 fixup_clock_comparator(delta);
729 /* Verify that the clock is properly set. */ 772 /* Verify that the clock is properly set. */
730 if (!etr_aib_follows(sync_port, aib, port)) { 773 if (!etr_aib_follows(sync_port, aib, port)) {
731 /* Didn't work. */ 774 /* Didn't work. */
732 disable_sync_clock(NULL); 775 disable_sync_clock(NULL);
733 etr_sync.in_sync = -EAGAIN; 776 etr_sync->in_sync = -EAGAIN;
734 rc = -EAGAIN; 777 rc = -EAGAIN;
735 } else { 778 } else {
736 etr_sync.in_sync = 1; 779 etr_sync->in_sync = 1;
737 rc = 0; 780 rc = 0;
738 } 781 }
739 } else { 782 } else {
@@ -741,12 +784,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
741 __ctl_clear_bit(0, 29); 784 __ctl_clear_bit(0, 29);
742 __ctl_clear_bit(14, 21); 785 __ctl_clear_bit(14, 21);
743 disable_sync_clock(NULL); 786 disable_sync_clock(NULL);
744 etr_sync.in_sync = -EAGAIN; 787 etr_sync->in_sync = -EAGAIN;
745 rc = -EAGAIN; 788 rc = -EAGAIN;
746 } 789 }
747 local_irq_enable(); 790 xchg(&first, 0);
748 smp_call_function(clock_sync_cpu_end, NULL, 0); 791 return rc;
749 preempt_enable(); 792}
793
794static int etr_sync_clock_stop(struct etr_aib *aib, int port)
795{
796 struct clock_sync_data etr_sync;
797 struct etr_aib *sync_port;
798 int follows;
799 int rc;
800
801 /* Check if the current aib is adjacent to the sync port aib. */
802 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
803 follows = etr_aib_follows(sync_port, aib, port);
804 memcpy(sync_port, aib, sizeof(*aib));
805 if (!follows)
806 return -EAGAIN;
807 memset(&etr_sync, 0, sizeof(etr_sync));
808 etr_sync.etr_aib = aib;
809 etr_sync.etr_port = port;
810 get_online_cpus();
811 atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
812 rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
813 put_online_cpus();
750 return rc; 814 return rc;
751} 815}
752 816
@@ -903,7 +967,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
903} 967}
904 968
905/* 969/*
906 * ETR tasklet. In this function you'll find the main logic. In 970 * ETR work. In this function you'll find the main logic. In
907 * particular this is the only function that calls etr_update_eacr(), 971 * particular this is the only function that calls etr_update_eacr(),
908 * it "controls" the etr control register. 972 * it "controls" the etr control register.
909 */ 973 */
@@ -914,6 +978,9 @@ static void etr_work_fn(struct work_struct *work)
914 struct etr_aib aib; 978 struct etr_aib aib;
915 int sync_port; 979 int sync_port;
916 980
981 /* prevent multiple execution. */
982 mutex_lock(&etr_work_mutex);
983
917 /* Create working copy of etr_eacr. */ 984 /* Create working copy of etr_eacr. */
918 eacr = etr_eacr; 985 eacr = etr_eacr;
919 986
@@ -929,7 +996,7 @@ static void etr_work_fn(struct work_struct *work)
929 del_timer_sync(&etr_timer); 996 del_timer_sync(&etr_timer);
930 etr_update_eacr(eacr); 997 etr_update_eacr(eacr);
931 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); 998 clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
932 return; 999 goto out_unlock;
933 } 1000 }
934 1001
935 /* Store aib to get the current ETR status word. */ 1002 /* Store aib to get the current ETR status word. */
@@ -1016,7 +1083,7 @@ static void etr_work_fn(struct work_struct *work)
1016 eacr.es || sync_port < 0) { 1083 eacr.es || sync_port < 0) {
1017 etr_update_eacr(eacr); 1084 etr_update_eacr(eacr);
1018 etr_set_tolec_timeout(now); 1085 etr_set_tolec_timeout(now);
1019 return; 1086 goto out_unlock;
1020 } 1087 }
1021 1088
1022 /* 1089 /*
@@ -1036,7 +1103,7 @@ static void etr_work_fn(struct work_struct *work)
1036 etr_update_eacr(eacr); 1103 etr_update_eacr(eacr);
1037 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); 1104 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1038 if (now < etr_tolec + (1600000 << 12) || 1105 if (now < etr_tolec + (1600000 << 12) ||
1039 etr_sync_clock(&aib, sync_port) != 0) { 1106 etr_sync_clock_stop(&aib, sync_port) != 0) {
1040 /* Sync failed. Try again in 1/2 second. */ 1107 /* Sync failed. Try again in 1/2 second. */
1041 eacr.es = 0; 1108 eacr.es = 0;
1042 etr_update_eacr(eacr); 1109 etr_update_eacr(eacr);
@@ -1044,6 +1111,8 @@ static void etr_work_fn(struct work_struct *work)
1044 etr_set_sync_timeout(); 1111 etr_set_sync_timeout();
1045 } else 1112 } else
1046 etr_set_tolec_timeout(now); 1113 etr_set_tolec_timeout(now);
1114out_unlock:
1115 mutex_unlock(&etr_work_mutex);
1047} 1116}
1048 1117
1049/* 1118/*
@@ -1125,13 +1194,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
1125 return count; /* Nothing to do. */ 1194 return count; /* Nothing to do. */
1126 etr_port0_online = value; 1195 etr_port0_online = value;
1127 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 1196 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1128 schedule_work(&etr_work); 1197 queue_work(time_sync_wq, &etr_work);
1129 } else { 1198 } else {
1130 if (etr_port1_online == value) 1199 if (etr_port1_online == value)
1131 return count; /* Nothing to do. */ 1200 return count; /* Nothing to do. */
1132 etr_port1_online = value; 1201 etr_port1_online = value;
1133 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 1202 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1134 schedule_work(&etr_work); 1203 queue_work(time_sync_wq, &etr_work);
1135 } 1204 }
1136 return count; 1205 return count;
1137} 1206}
@@ -1332,6 +1401,7 @@ static struct stp_sstpi stp_info;
1332static void *stp_page; 1401static void *stp_page;
1333 1402
1334static void stp_work_fn(struct work_struct *work); 1403static void stp_work_fn(struct work_struct *work);
1404static DEFINE_MUTEX(stp_work_mutex);
1335static DECLARE_WORK(stp_work, stp_work_fn); 1405static DECLARE_WORK(stp_work, stp_work_fn);
1336 1406
1337static int __init early_parse_stp(char *p) 1407static int __init early_parse_stp(char *p)
@@ -1356,7 +1426,8 @@ static void __init stp_reset(void)
1356 if (rc == 0) 1426 if (rc == 0)
1357 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); 1427 set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
1358 else if (stp_online) { 1428 else if (stp_online) {
1359 printk(KERN_WARNING "Running on non STP capable machine.\n"); 1429 pr_warning("The real or virtual hardware system does "
1430 "not provide an STP interface\n");
1360 free_bootmem((unsigned long) stp_page, PAGE_SIZE); 1431 free_bootmem((unsigned long) stp_page, PAGE_SIZE);
1361 stp_page = NULL; 1432 stp_page = NULL;
1362 stp_online = 0; 1433 stp_online = 0;
@@ -1365,8 +1436,12 @@ static void __init stp_reset(void)
1365 1436
1366static int __init stp_init(void) 1437static int __init stp_init(void)
1367{ 1438{
1368 if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) 1439 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1369 schedule_work(&stp_work); 1440 return 0;
1441 time_init_wq();
1442 if (!stp_online)
1443 return 0;
1444 queue_work(time_sync_wq, &stp_work);
1370 return 0; 1445 return 0;
1371} 1446}
1372 1447
@@ -1383,7 +1458,7 @@ arch_initcall(stp_init);
1383static void stp_timing_alert(struct stp_irq_parm *intparm) 1458static void stp_timing_alert(struct stp_irq_parm *intparm)
1384{ 1459{
1385 if (intparm->tsc || intparm->lac || intparm->tcpc) 1460 if (intparm->tsc || intparm->lac || intparm->tcpc)
1386 schedule_work(&stp_work); 1461 queue_work(time_sync_wq, &stp_work);
1387} 1462}
1388 1463
1389/* 1464/*
@@ -1397,7 +1472,7 @@ void stp_sync_check(void)
1397 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 1472 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1398 return; 1473 return;
1399 disable_sync_clock(NULL); 1474 disable_sync_clock(NULL);
1400 schedule_work(&stp_work); 1475 queue_work(time_sync_wq, &stp_work);
1401} 1476}
1402 1477
1403/* 1478/*
@@ -1411,46 +1486,34 @@ void stp_island_check(void)
1411 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 1486 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1412 return; 1487 return;
1413 disable_sync_clock(NULL); 1488 disable_sync_clock(NULL);
1414 schedule_work(&stp_work); 1489 queue_work(time_sync_wq, &stp_work);
1415} 1490}
1416 1491
1417/* 1492
1418 * STP tasklet. Check for the STP state and take over the clock 1493static int stp_sync_clock(void *data)
1419 * synchronization if the STP clock source is usable.
1420 */
1421static void stp_work_fn(struct work_struct *work)
1422{ 1494{
1423 struct clock_sync_data stp_sync; 1495 static int first;
1424 unsigned long long old_clock, delta; 1496 unsigned long long old_clock, delta;
1497 struct clock_sync_data *stp_sync;
1425 int rc; 1498 int rc;
1426 1499
1427 if (!stp_online) { 1500 stp_sync = data;
1428 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1429 return;
1430 }
1431 1501
1432 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); 1502 if (xchg(&first, 1) == 1) {
1433 if (rc) 1503 /* Slave */
1434 return; 1504 clock_sync_cpu(stp_sync);
1505 return 0;
1506 }
1435 1507
1436 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); 1508 /* Wait until all other cpus entered the sync function. */
1437 if (rc || stp_info.c == 0) 1509 while (atomic_read(&stp_sync->cpus) != 0)
1438 return; 1510 cpu_relax();
1439 1511
1440 /*
1441 * Catch all other cpus and make them wait until we have
1442 * successfully synced the clock. smp_call_function will
1443 * return after all other cpus are in clock_sync_cpu_start.
1444 */
1445 memset(&stp_sync, 0, sizeof(stp_sync));
1446 preempt_disable();
1447 smp_call_function(clock_sync_cpu_start, &stp_sync, 0);
1448 local_irq_disable();
1449 enable_sync_clock(); 1512 enable_sync_clock();
1450 1513
1451 set_bit(CLOCK_SYNC_STP, &clock_sync_flags); 1514 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1452 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 1515 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1453 schedule_work(&etr_work); 1516 queue_work(time_sync_wq, &etr_work);
1454 1517
1455 rc = 0; 1518 rc = 0;
1456 if (stp_info.todoff[0] || stp_info.todoff[1] || 1519 if (stp_info.todoff[0] || stp_info.todoff[1] ||
@@ -1469,16 +1532,49 @@ static void stp_work_fn(struct work_struct *work)
1469 } 1532 }
1470 if (rc) { 1533 if (rc) {
1471 disable_sync_clock(NULL); 1534 disable_sync_clock(NULL);
1472 stp_sync.in_sync = -EAGAIN; 1535 stp_sync->in_sync = -EAGAIN;
1473 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); 1536 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1474 if (etr_port0_online || etr_port1_online) 1537 if (etr_port0_online || etr_port1_online)
1475 schedule_work(&etr_work); 1538 queue_work(time_sync_wq, &etr_work);
1476 } else 1539 } else
1477 stp_sync.in_sync = 1; 1540 stp_sync->in_sync = 1;
1541 xchg(&first, 0);
1542 return 0;
1543}
1544
1545/*
1546 * STP work. Check for the STP state and take over the clock
1547 * synchronization if the STP clock source is usable.
1548 */
1549static void stp_work_fn(struct work_struct *work)
1550{
1551 struct clock_sync_data stp_sync;
1552 int rc;
1553
1554 /* prevent multiple execution. */
1555 mutex_lock(&stp_work_mutex);
1556
1557 if (!stp_online) {
1558 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1559 goto out_unlock;
1560 }
1561
1562 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1563 if (rc)
1564 goto out_unlock;
1565
1566 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1567 if (rc || stp_info.c == 0)
1568 goto out_unlock;
1569
1570 memset(&stp_sync, 0, sizeof(stp_sync));
1571 get_online_cpus();
1572 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1573 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
1574 put_online_cpus();
1478 1575
1479 local_irq_enable(); 1576out_unlock:
1480 smp_call_function(clock_sync_cpu_end, NULL, 0); 1577 mutex_unlock(&stp_work_mutex);
1481 preempt_enable();
1482} 1578}
1483 1579
1484/* 1580/*
@@ -1587,7 +1683,7 @@ static ssize_t stp_online_store(struct sysdev_class *class,
1587 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 1683 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1588 return -EOPNOTSUPP; 1684 return -EOPNOTSUPP;
1589 stp_online = value; 1685 stp_online = value;
1590 schedule_work(&stp_work); 1686 queue_work(time_sync_wq, &stp_work);
1591 return count; 1687 return count;
1592} 1688}
1593 1689