aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/time.c
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-12-25 07:38:37 -0500
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2008-12-25 07:38:55 -0500
commit750887dedc088d28198b170bcae83695247797d1 (patch)
tree1af6de8113e994fdf92589c783f0a1baf3a41e36 /arch/s390/kernel/time.c
parentb020632e40c3ed5e8c0c066d022672907e8401cf (diff)
[S390] convert etr/stp to stop_machine interface
This converts the etr and stp code to the new stop_machine interface which allows to synchronize all cpus without allocating any memory. This way we get rid of the only reason why we haven't converted s390 to the generic IPI interface yet. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/time.c')
-rw-r--r--arch/s390/kernel/time.c212
1 files changed, 131 insertions, 81 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index b73bbf31f432..6e09bc285ba0 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -20,6 +20,8 @@
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/mm.h> 21#include <linux/mm.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/cpu.h>
24#include <linux/stop_machine.h>
23#include <linux/time.h> 25#include <linux/time.h>
24#include <linux/sysdev.h> 26#include <linux/sysdev.h>
25#include <linux/delay.h> 27#include <linux/delay.h>
@@ -391,6 +393,15 @@ static void enable_sync_clock(void)
391 atomic_set_mask(0x80000000, sw_ptr); 393 atomic_set_mask(0x80000000, sw_ptr);
392} 394}
393 395
396/* Single threaded workqueue used for etr and stp sync events */
397static struct workqueue_struct *time_sync_wq;
398
399static void __init time_init_wq(void)
400{
401 if (!time_sync_wq)
402 time_sync_wq = create_singlethread_workqueue("timesync");
403}
404
394/* 405/*
395 * External Time Reference (ETR) code. 406 * External Time Reference (ETR) code.
396 */ 407 */
@@ -483,17 +494,18 @@ static int __init etr_init(void)
483 494
484 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) 495 if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags))
485 return 0; 496 return 0;
497 time_init_wq();
486 /* Check if this machine has the steai instruction. */ 498 /* Check if this machine has the steai instruction. */
487 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) 499 if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
488 etr_steai_available = 1; 500 etr_steai_available = 1;
489 setup_timer(&etr_timer, etr_timeout, 0UL); 501 setup_timer(&etr_timer, etr_timeout, 0UL);
490 if (etr_port0_online) { 502 if (etr_port0_online) {
491 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 503 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
492 schedule_work(&etr_work); 504 queue_work(time_sync_wq, &etr_work);
493 } 505 }
494 if (etr_port1_online) { 506 if (etr_port1_online) {
495 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 507 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
496 schedule_work(&etr_work); 508 queue_work(time_sync_wq, &etr_work);
497 } 509 }
498 return 0; 510 return 0;
499} 511}
@@ -520,7 +532,7 @@ void etr_switch_to_local(void)
520 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 532 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
521 disable_sync_clock(NULL); 533 disable_sync_clock(NULL);
522 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); 534 set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
523 schedule_work(&etr_work); 535 queue_work(time_sync_wq, &etr_work);
524} 536}
525 537
526/* 538/*
@@ -536,7 +548,7 @@ void etr_sync_check(void)
536 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 548 if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
537 disable_sync_clock(NULL); 549 disable_sync_clock(NULL);
538 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); 550 set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
539 schedule_work(&etr_work); 551 queue_work(time_sync_wq, &etr_work);
540} 552}
541 553
542/* 554/*
@@ -560,13 +572,13 @@ static void etr_timing_alert(struct etr_irq_parm *intparm)
560 * Both ports are not up-to-date now. 572 * Both ports are not up-to-date now.
561 */ 573 */
562 set_bit(ETR_EVENT_PORT_ALERT, &etr_events); 574 set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
563 schedule_work(&etr_work); 575 queue_work(time_sync_wq, &etr_work);
564} 576}
565 577
566static void etr_timeout(unsigned long dummy) 578static void etr_timeout(unsigned long dummy)
567{ 579{
568 set_bit(ETR_EVENT_UPDATE, &etr_events); 580 set_bit(ETR_EVENT_UPDATE, &etr_events);
569 schedule_work(&etr_work); 581 queue_work(time_sync_wq, &etr_work);
570} 582}
571 583
572/* 584/*
@@ -673,14 +685,16 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
673} 685}
674 686
675struct clock_sync_data { 687struct clock_sync_data {
688 atomic_t cpus;
676 int in_sync; 689 int in_sync;
677 unsigned long long fixup_cc; 690 unsigned long long fixup_cc;
691 int etr_port;
692 struct etr_aib *etr_aib;
678}; 693};
679 694
680static void clock_sync_cpu_start(void *dummy) 695static void clock_sync_cpu(struct clock_sync_data *sync)
681{ 696{
682 struct clock_sync_data *sync = dummy; 697 atomic_dec(&sync->cpus);
683
684 enable_sync_clock(); 698 enable_sync_clock();
685 /* 699 /*
686 * This looks like a busy wait loop but it isn't. etr_sync_cpus 700 * This looks like a busy wait loop but it isn't. etr_sync_cpus
@@ -706,39 +720,35 @@ static void clock_sync_cpu_start(void *dummy)
706 fixup_clock_comparator(sync->fixup_cc); 720 fixup_clock_comparator(sync->fixup_cc);
707} 721}
708 722
709static void clock_sync_cpu_end(void *dummy)
710{
711}
712
713/* 723/*
714 * Sync the TOD clock using the port refered to by aibp. This port 724 * Sync the TOD clock using the port refered to by aibp. This port
715 * has to be enabled and the other port has to be disabled. The 725 * has to be enabled and the other port has to be disabled. The
716 * last eacr update has to be more than 1.6 seconds in the past. 726 * last eacr update has to be more than 1.6 seconds in the past.
717 */ 727 */
718static int etr_sync_clock(struct etr_aib *aib, int port) 728static int etr_sync_clock(void *data)
719{ 729{
720 struct etr_aib *sync_port; 730 static int first;
721 struct clock_sync_data etr_sync;
722 unsigned long long clock, old_clock, delay, delta; 731 unsigned long long clock, old_clock, delay, delta;
723 int follows; 732 struct clock_sync_data *etr_sync;
733 struct etr_aib *sync_port, *aib;
734 int port;
724 int rc; 735 int rc;
725 736
726 /* Check if the current aib is adjacent to the sync port aib. */ 737 etr_sync = data;
727 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
728 follows = etr_aib_follows(sync_port, aib, port);
729 memcpy(sync_port, aib, sizeof(*aib));
730 if (!follows)
731 return -EAGAIN;
732 738
733 /* 739 if (xchg(&first, 1) == 1) {
734 * Catch all other cpus and make them wait until we have 740 /* Slave */
735 * successfully synced the clock. smp_call_function will 741 clock_sync_cpu(etr_sync);
736 * return after all other cpus are in etr_sync_cpu_start. 742 return 0;
737 */ 743 }
738 memset(&etr_sync, 0, sizeof(etr_sync)); 744
739 preempt_disable(); 745 /* Wait until all other cpus entered the sync function. */
740 smp_call_function(clock_sync_cpu_start, &etr_sync, 0); 746 while (atomic_read(&etr_sync->cpus) != 0)
741 local_irq_disable(); 747 cpu_relax();
748
749 port = etr_sync->etr_port;
750 aib = etr_sync->etr_aib;
751 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
742 enable_sync_clock(); 752 enable_sync_clock();
743 753
744 /* Set clock to next OTE. */ 754 /* Set clock to next OTE. */
@@ -755,16 +765,16 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
755 delay = (unsigned long long) 765 delay = (unsigned long long)
756 (aib->edf2.etv - sync_port->edf2.etv) << 32; 766 (aib->edf2.etv - sync_port->edf2.etv) << 32;
757 delta = adjust_time(old_clock, clock, delay); 767 delta = adjust_time(old_clock, clock, delay);
758 etr_sync.fixup_cc = delta; 768 etr_sync->fixup_cc = delta;
759 fixup_clock_comparator(delta); 769 fixup_clock_comparator(delta);
760 /* Verify that the clock is properly set. */ 770 /* Verify that the clock is properly set. */
761 if (!etr_aib_follows(sync_port, aib, port)) { 771 if (!etr_aib_follows(sync_port, aib, port)) {
762 /* Didn't work. */ 772 /* Didn't work. */
763 disable_sync_clock(NULL); 773 disable_sync_clock(NULL);
764 etr_sync.in_sync = -EAGAIN; 774 etr_sync->in_sync = -EAGAIN;
765 rc = -EAGAIN; 775 rc = -EAGAIN;
766 } else { 776 } else {
767 etr_sync.in_sync = 1; 777 etr_sync->in_sync = 1;
768 rc = 0; 778 rc = 0;
769 } 779 }
770 } else { 780 } else {
@@ -772,12 +782,33 @@ static int etr_sync_clock(struct etr_aib *aib, int port)
772 __ctl_clear_bit(0, 29); 782 __ctl_clear_bit(0, 29);
773 __ctl_clear_bit(14, 21); 783 __ctl_clear_bit(14, 21);
774 disable_sync_clock(NULL); 784 disable_sync_clock(NULL);
775 etr_sync.in_sync = -EAGAIN; 785 etr_sync->in_sync = -EAGAIN;
776 rc = -EAGAIN; 786 rc = -EAGAIN;
777 } 787 }
778 local_irq_enable(); 788 xchg(&first, 0);
779 smp_call_function(clock_sync_cpu_end, NULL, 0); 789 return rc;
780 preempt_enable(); 790}
791
792static int etr_sync_clock_stop(struct etr_aib *aib, int port)
793{
794 struct clock_sync_data etr_sync;
795 struct etr_aib *sync_port;
796 int follows;
797 int rc;
798
799 /* Check if the current aib is adjacent to the sync port aib. */
800 sync_port = (port == 0) ? &etr_port0 : &etr_port1;
801 follows = etr_aib_follows(sync_port, aib, port);
802 memcpy(sync_port, aib, sizeof(*aib));
803 if (!follows)
804 return -EAGAIN;
805 memset(&etr_sync, 0, sizeof(etr_sync));
806 etr_sync.etr_aib = aib;
807 etr_sync.etr_port = port;
808 get_online_cpus();
809 atomic_set(&etr_sync.cpus, num_online_cpus() - 1);
810 rc = stop_machine(etr_sync_clock, &etr_sync, &cpu_online_map);
811 put_online_cpus();
781 return rc; 812 return rc;
782} 813}
783 814
@@ -934,7 +965,7 @@ static void etr_update_eacr(struct etr_eacr eacr)
934} 965}
935 966
936/* 967/*
937 * ETR tasklet. In this function you'll find the main logic. In 968 * ETR work. In this function you'll find the main logic. In
938 * particular this is the only function that calls etr_update_eacr(), 969 * particular this is the only function that calls etr_update_eacr(),
939 * it "controls" the etr control register. 970 * it "controls" the etr control register.
940 */ 971 */
@@ -1067,7 +1098,7 @@ static void etr_work_fn(struct work_struct *work)
1067 etr_update_eacr(eacr); 1098 etr_update_eacr(eacr);
1068 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); 1099 set_bit(CLOCK_SYNC_ETR, &clock_sync_flags);
1069 if (now < etr_tolec + (1600000 << 12) || 1100 if (now < etr_tolec + (1600000 << 12) ||
1070 etr_sync_clock(&aib, sync_port) != 0) { 1101 etr_sync_clock_stop(&aib, sync_port) != 0) {
1071 /* Sync failed. Try again in 1/2 second. */ 1102 /* Sync failed. Try again in 1/2 second. */
1072 eacr.es = 0; 1103 eacr.es = 0;
1073 etr_update_eacr(eacr); 1104 etr_update_eacr(eacr);
@@ -1156,13 +1187,13 @@ static ssize_t etr_online_store(struct sys_device *dev,
1156 return count; /* Nothing to do. */ 1187 return count; /* Nothing to do. */
1157 etr_port0_online = value; 1188 etr_port0_online = value;
1158 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); 1189 set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
1159 schedule_work(&etr_work); 1190 queue_work(time_sync_wq, &etr_work);
1160 } else { 1191 } else {
1161 if (etr_port1_online == value) 1192 if (etr_port1_online == value)
1162 return count; /* Nothing to do. */ 1193 return count; /* Nothing to do. */
1163 etr_port1_online = value; 1194 etr_port1_online = value;
1164 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events); 1195 set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
1165 schedule_work(&etr_work); 1196 queue_work(time_sync_wq, &etr_work);
1166 } 1197 }
1167 return count; 1198 return count;
1168} 1199}
@@ -1396,8 +1427,12 @@ static void __init stp_reset(void)
1396 1427
1397static int __init stp_init(void) 1428static int __init stp_init(void)
1398{ 1429{
1399 if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) 1430 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1400 schedule_work(&stp_work); 1431 return 0;
1432 time_init_wq();
1433 if (!stp_online)
1434 return 0;
1435 queue_work(time_sync_wq, &stp_work);
1401 return 0; 1436 return 0;
1402} 1437}
1403 1438
@@ -1414,7 +1449,7 @@ arch_initcall(stp_init);
1414static void stp_timing_alert(struct stp_irq_parm *intparm) 1449static void stp_timing_alert(struct stp_irq_parm *intparm)
1415{ 1450{
1416 if (intparm->tsc || intparm->lac || intparm->tcpc) 1451 if (intparm->tsc || intparm->lac || intparm->tcpc)
1417 schedule_work(&stp_work); 1452 queue_work(time_sync_wq, &stp_work);
1418} 1453}
1419 1454
1420/* 1455/*
@@ -1428,7 +1463,7 @@ void stp_sync_check(void)
1428 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 1463 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1429 return; 1464 return;
1430 disable_sync_clock(NULL); 1465 disable_sync_clock(NULL);
1431 schedule_work(&stp_work); 1466 queue_work(time_sync_wq, &stp_work);
1432} 1467}
1433 1468
1434/* 1469/*
@@ -1442,46 +1477,34 @@ void stp_island_check(void)
1442 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) 1477 if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags))
1443 return; 1478 return;
1444 disable_sync_clock(NULL); 1479 disable_sync_clock(NULL);
1445 schedule_work(&stp_work); 1480 queue_work(time_sync_wq, &stp_work);
1446} 1481}
1447 1482
1448/* 1483
1449 * STP tasklet. Check for the STP state and take over the clock 1484static int stp_sync_clock(void *data)
1450 * synchronization if the STP clock source is usable.
1451 */
1452static void stp_work_fn(struct work_struct *work)
1453{ 1485{
1454 struct clock_sync_data stp_sync; 1486 static int first;
1455 unsigned long long old_clock, delta; 1487 unsigned long long old_clock, delta;
1488 struct clock_sync_data *stp_sync;
1456 int rc; 1489 int rc;
1457 1490
1458 if (!stp_online) { 1491 stp_sync = data;
1459 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1460 return;
1461 }
1462 1492
1463 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); 1493 if (xchg(&first, 1) == 1) {
1464 if (rc) 1494 /* Slave */
1465 return; 1495 clock_sync_cpu(stp_sync);
1496 return 0;
1497 }
1466 1498
1467 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); 1499 /* Wait until all other cpus entered the sync function. */
1468 if (rc || stp_info.c == 0) 1500 while (atomic_read(&stp_sync->cpus) != 0)
1469 return; 1501 cpu_relax();
1470 1502
1471 /*
1472 * Catch all other cpus and make them wait until we have
1473 * successfully synced the clock. smp_call_function will
1474 * return after all other cpus are in clock_sync_cpu_start.
1475 */
1476 memset(&stp_sync, 0, sizeof(stp_sync));
1477 preempt_disable();
1478 smp_call_function(clock_sync_cpu_start, &stp_sync, 0);
1479 local_irq_disable();
1480 enable_sync_clock(); 1503 enable_sync_clock();
1481 1504
1482 set_bit(CLOCK_SYNC_STP, &clock_sync_flags); 1505 set_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1483 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) 1506 if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags))
1484 schedule_work(&etr_work); 1507 queue_work(time_sync_wq, &etr_work);
1485 1508
1486 rc = 0; 1509 rc = 0;
1487 if (stp_info.todoff[0] || stp_info.todoff[1] || 1510 if (stp_info.todoff[0] || stp_info.todoff[1] ||
@@ -1500,16 +1523,43 @@ static void stp_work_fn(struct work_struct *work)
1500 } 1523 }
1501 if (rc) { 1524 if (rc) {
1502 disable_sync_clock(NULL); 1525 disable_sync_clock(NULL);
1503 stp_sync.in_sync = -EAGAIN; 1526 stp_sync->in_sync = -EAGAIN;
1504 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); 1527 clear_bit(CLOCK_SYNC_STP, &clock_sync_flags);
1505 if (etr_port0_online || etr_port1_online) 1528 if (etr_port0_online || etr_port1_online)
1506 schedule_work(&etr_work); 1529 queue_work(time_sync_wq, &etr_work);
1507 } else 1530 } else
1508 stp_sync.in_sync = 1; 1531 stp_sync->in_sync = 1;
1532 xchg(&first, 0);
1533 return 0;
1534}
1509 1535
1510 local_irq_enable(); 1536/*
1511 smp_call_function(clock_sync_cpu_end, NULL, 0); 1537 * STP work. Check for the STP state and take over the clock
1512 preempt_enable(); 1538 * synchronization if the STP clock source is usable.
1539 */
1540static void stp_work_fn(struct work_struct *work)
1541{
1542 struct clock_sync_data stp_sync;
1543 int rc;
1544
1545 if (!stp_online) {
1546 chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
1547 return;
1548 }
1549
1550 rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0);
1551 if (rc)
1552 return;
1553
1554 rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi));
1555 if (rc || stp_info.c == 0)
1556 return;
1557
1558 memset(&stp_sync, 0, sizeof(stp_sync));
1559 get_online_cpus();
1560 atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
1561 stop_machine(stp_sync_clock, &stp_sync, &cpu_online_map);
1562 put_online_cpus();
1513} 1563}
1514 1564
1515/* 1565/*
@@ -1618,7 +1668,7 @@ static ssize_t stp_online_store(struct sysdev_class *class,
1618 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) 1668 if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
1619 return -EOPNOTSUPP; 1669 return -EOPNOTSUPP;
1620 stp_online = value; 1670 stp_online = value;
1621 schedule_work(&stp_work); 1671 queue_work(time_sync_wq, &stp_work);
1622 return count; 1672 return count;
1623} 1673}
1624 1674