aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-17 06:38:17 -0400
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2013-10-22 03:16:40 -0400
commit8c071b0f19dfa230335d22ce56a8fab5bd20cedc (patch)
treeffd0d6272cd4e3f33b3069b22cb52457030a7f7a
parent9784bd4f1a6ea736ad9bf241f5a965e0a2913a5e (diff)
s390/time: correct use of store clock fast
The result of the store-clock-fast (STCKF) instruction is a bit fuzzy. It can happen that the value stored on one CPU is smaller than the value stored on another CPU, although the order of the stores is the other way around. This can cause deltas of get_tod_clock() values to become negative when they should not be. We need to be more careful with store-clock-fast, this patch partially reverts git commit e4b7b4238e666682555461fa52eecd74652f36bb "time: always use stckf instead of stck if available". The get_tod_clock() function now uses the store-clock-extended (STCKE) instruction. get_tod_clock_fast() can be used if the fuzziness of store-clock-fast is acceptable e.g. for wait loops local to a CPU. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/timex.h28
-rw-r--r--arch/s390/kernel/debug.c2
-rw-r--r--arch/s390/kvm/interrupt.c6
-rw-r--r--arch/s390/lib/delay.c14
-rw-r--r--drivers/s390/char/sclp.c4
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/qdio_main.c10
7 files changed, 34 insertions, 34 deletions
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8ad8af915032..819b94d22720 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -71,30 +71,30 @@ static inline void local_tick_enable(unsigned long long comp)
71 71
72typedef unsigned long long cycles_t; 72typedef unsigned long long cycles_t;
73 73
74static inline unsigned long long get_tod_clock(void)
75{
76 unsigned long long clk;
77
78#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
79 asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
80#else
81 asm volatile("stck %0" : "=Q" (clk) : : "cc");
82#endif
83 return clk;
84}
85
86static inline void get_tod_clock_ext(char *clk) 74static inline void get_tod_clock_ext(char *clk)
87{ 75{
88 asm volatile("stcke %0" : "=Q" (*clk) : : "cc"); 76 asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
89} 77}
90 78
91static inline unsigned long long get_tod_clock_xt(void) 79static inline unsigned long long get_tod_clock(void)
92{ 80{
93 unsigned char clk[16]; 81 unsigned char clk[16];
94 get_tod_clock_ext(clk); 82 get_tod_clock_ext(clk);
95 return *((unsigned long long *)&clk[1]); 83 return *((unsigned long long *)&clk[1]);
96} 84}
97 85
86static inline unsigned long long get_tod_clock_fast(void)
87{
88#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
89 unsigned long long clk;
90
91 asm volatile("stckf %0" : "=Q" (clk) : : "cc");
92 return clk;
93#else
94 return get_tod_clock();
95#endif
96}
97
98static inline cycles_t get_cycles(void) 98static inline cycles_t get_cycles(void)
99{ 99{
100 return (cycles_t) get_tod_clock() >> 2; 100 return (cycles_t) get_tod_clock() >> 2;
@@ -125,7 +125,7 @@ extern u64 sched_clock_base_cc;
125 */ 125 */
126static inline unsigned long long get_tod_clock_monotonic(void) 126static inline unsigned long long get_tod_clock_monotonic(void)
127{ 127{
128 return get_tod_clock_xt() - sched_clock_base_cc; 128 return get_tod_clock() - sched_clock_base_cc;
129} 129}
130 130
131/** 131/**
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index f1279dc2e1bc..17d62fe5d7b7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -867,7 +867,7 @@ static inline void
867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level, 867debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
868 int exception) 868 int exception)
869{ 869{
870 active->id.stck = get_tod_clock(); 870 active->id.stck = get_tod_clock_fast();
871 active->id.fields.cpuid = smp_processor_id(); 871 active->id.fields.cpuid = smp_processor_id();
872 active->caller = __builtin_return_address(0); 872 active->caller = __builtin_return_address(0);
873 active->id.fields.exception = exception; 873 active->id.fields.exception = exception;
diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c
index 7f35cb33e510..7f1f7ac5cf7f 100644
--- a/arch/s390/kvm/interrupt.c
+++ b/arch/s390/kvm/interrupt.c
@@ -385,7 +385,7 @@ static int kvm_cpu_has_interrupt(struct kvm_vcpu *vcpu)
385 } 385 }
386 386
387 if ((!rc) && (vcpu->arch.sie_block->ckc < 387 if ((!rc) && (vcpu->arch.sie_block->ckc <
388 get_tod_clock() + vcpu->arch.sie_block->epoch)) { 388 get_tod_clock_fast() + vcpu->arch.sie_block->epoch)) {
389 if ((!psw_extint_disabled(vcpu)) && 389 if ((!psw_extint_disabled(vcpu)) &&
390 (vcpu->arch.sie_block->gcr[0] & 0x800ul)) 390 (vcpu->arch.sie_block->gcr[0] & 0x800ul))
391 rc = 1; 391 rc = 1;
@@ -425,7 +425,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
425 goto no_timer; 425 goto no_timer;
426 } 426 }
427 427
428 now = get_tod_clock() + vcpu->arch.sie_block->epoch; 428 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
429 if (vcpu->arch.sie_block->ckc < now) { 429 if (vcpu->arch.sie_block->ckc < now) {
430 __unset_cpu_idle(vcpu); 430 __unset_cpu_idle(vcpu);
431 return 0; 431 return 0;
@@ -515,7 +515,7 @@ void kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
515 } 515 }
516 516
517 if ((vcpu->arch.sie_block->ckc < 517 if ((vcpu->arch.sie_block->ckc <
518 get_tod_clock() + vcpu->arch.sie_block->epoch)) 518 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
519 __try_deliver_ckc_interrupt(vcpu); 519 __try_deliver_ckc_interrupt(vcpu);
520 520
521 if (atomic_read(&fi->active)) { 521 if (atomic_read(&fi->active)) {
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c
index 57c87d7d7ede..a9f3d0042d58 100644
--- a/arch/s390/lib/delay.c
+++ b/arch/s390/lib/delay.c
@@ -44,7 +44,7 @@ static void __udelay_disabled(unsigned long long usecs)
44 do { 44 do {
45 set_clock_comparator(end); 45 set_clock_comparator(end);
46 vtime_stop_cpu(); 46 vtime_stop_cpu();
47 } while (get_tod_clock() < end); 47 } while (get_tod_clock_fast() < end);
48 lockdep_on(); 48 lockdep_on();
49 __ctl_load(cr0, 0, 0); 49 __ctl_load(cr0, 0, 0);
50 __ctl_load(cr6, 6, 6); 50 __ctl_load(cr6, 6, 6);
@@ -55,7 +55,7 @@ static void __udelay_enabled(unsigned long long usecs)
55{ 55{
56 u64 clock_saved, end; 56 u64 clock_saved, end;
57 57
58 end = get_tod_clock() + (usecs << 12); 58 end = get_tod_clock_fast() + (usecs << 12);
59 do { 59 do {
60 clock_saved = 0; 60 clock_saved = 0;
61 if (end < S390_lowcore.clock_comparator) { 61 if (end < S390_lowcore.clock_comparator) {
@@ -65,7 +65,7 @@ static void __udelay_enabled(unsigned long long usecs)
65 vtime_stop_cpu(); 65 vtime_stop_cpu();
66 if (clock_saved) 66 if (clock_saved)
67 local_tick_enable(clock_saved); 67 local_tick_enable(clock_saved);
68 } while (get_tod_clock() < end); 68 } while (get_tod_clock_fast() < end);
69} 69}
70 70
71/* 71/*
@@ -109,8 +109,8 @@ void udelay_simple(unsigned long long usecs)
109{ 109{
110 u64 end; 110 u64 end;
111 111
112 end = get_tod_clock() + (usecs << 12); 112 end = get_tod_clock_fast() + (usecs << 12);
113 while (get_tod_clock() < end) 113 while (get_tod_clock_fast() < end)
114 cpu_relax(); 114 cpu_relax();
115} 115}
116 116
@@ -120,10 +120,10 @@ void __ndelay(unsigned long long nsecs)
120 120
121 nsecs <<= 9; 121 nsecs <<= 9;
122 do_div(nsecs, 125); 122 do_div(nsecs, 125);
123 end = get_tod_clock() + nsecs; 123 end = get_tod_clock_fast() + nsecs;
124 if (nsecs & ~0xfffUL) 124 if (nsecs & ~0xfffUL)
125 __udelay(nsecs >> 12); 125 __udelay(nsecs >> 12);
126 while (get_tod_clock() < end) 126 while (get_tod_clock_fast() < end)
127 barrier(); 127 barrier();
128} 128}
129EXPORT_SYMBOL(__ndelay); 129EXPORT_SYMBOL(__ndelay);
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index a3aa374799dc..1fe264379e0d 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -486,7 +486,7 @@ sclp_sync_wait(void)
486 timeout = 0; 486 timeout = 0;
487 if (timer_pending(&sclp_request_timer)) { 487 if (timer_pending(&sclp_request_timer)) {
488 /* Get timeout TOD value */ 488 /* Get timeout TOD value */
489 timeout = get_tod_clock() + 489 timeout = get_tod_clock_fast() +
490 sclp_tod_from_jiffies(sclp_request_timer.expires - 490 sclp_tod_from_jiffies(sclp_request_timer.expires -
491 jiffies); 491 jiffies);
492 } 492 }
@@ -508,7 +508,7 @@ sclp_sync_wait(void)
508 while (sclp_running_state != sclp_running_state_idle) { 508 while (sclp_running_state != sclp_running_state_idle) {
509 /* Check for expired request timer */ 509 /* Check for expired request timer */
510 if (timer_pending(&sclp_request_timer) && 510 if (timer_pending(&sclp_request_timer) &&
511 get_tod_clock() > timeout && 511 get_tod_clock_fast() > timeout &&
512 del_timer(&sclp_request_timer)) 512 del_timer(&sclp_request_timer))
513 sclp_request_timer.function(sclp_request_timer.data); 513 sclp_request_timer.function(sclp_request_timer.data);
514 cpu_relax(); 514 cpu_relax();
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index d7da67a31c77..88e35d85d205 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -878,9 +878,9 @@ static void css_reset(void)
878 atomic_inc(&chpid_reset_count); 878 atomic_inc(&chpid_reset_count);
879 } 879 }
880 /* Wait for machine check for all channel paths. */ 880 /* Wait for machine check for all channel paths. */
881 timeout = get_tod_clock() + (RCHP_TIMEOUT << 12); 881 timeout = get_tod_clock_fast() + (RCHP_TIMEOUT << 12);
882 while (atomic_read(&chpid_reset_count) != 0) { 882 while (atomic_read(&chpid_reset_count) != 0) {
883 if (get_tod_clock() > timeout) 883 if (get_tod_clock_fast() > timeout)
884 break; 884 break;
885 cpu_relax(); 885 cpu_relax();
886 } 886 }
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 8ed52aa49122..bbd3e511c771 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -338,10 +338,10 @@ again:
338 retries++; 338 retries++;
339 339
340 if (!start_time) { 340 if (!start_time) {
341 start_time = get_tod_clock(); 341 start_time = get_tod_clock_fast();
342 goto again; 342 goto again;
343 } 343 }
344 if ((get_tod_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE) 344 if (get_tod_clock_fast() - start_time < QDIO_BUSY_BIT_PATIENCE)
345 goto again; 345 goto again;
346 } 346 }
347 if (retries) { 347 if (retries) {
@@ -504,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
504 int count, stop; 504 int count, stop;
505 unsigned char state = 0; 505 unsigned char state = 0;
506 506
507 q->timestamp = get_tod_clock(); 507 q->timestamp = get_tod_clock_fast();
508 508
509 /* 509 /*
510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 510 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -595,7 +595,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
595 * At this point we know, that inbound first_to_check 595 * At this point we know, that inbound first_to_check
596 * has (probably) not moved (see qdio_inbound_processing). 596 * has (probably) not moved (see qdio_inbound_processing).
597 */ 597 */
598 if (get_tod_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 598 if (get_tod_clock_fast() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 599 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
600 q->first_to_check); 600 q->first_to_check);
601 return 1; 601 return 1;
@@ -728,7 +728,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
728 int count, stop; 728 int count, stop;
729 unsigned char state = 0; 729 unsigned char state = 0;
730 730
731 q->timestamp = get_tod_clock(); 731 q->timestamp = get_tod_clock_fast();
732 732
733 if (need_siga_sync(q)) 733 if (need_siga_sync(q))
734 if (((queue_type(q) != QDIO_IQDIO_QFMT) && 734 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&