diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2008-07-14 03:58:56 -0400 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-07-14 04:02:09 -0400 |
commit | d2fec595511b5718bdb65645b3d5d99800d97943 (patch) | |
tree | a94c3560fc2ad6aa89d61d646f73f4d7c1dfcc9b /arch/s390/kernel/time.c | |
parent | 761cdf6aacdb76f819050f4938cdab1f4cdcb945 (diff) |
[S390] stp support.
Add support for clock synchronization with the server time protocol.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/kernel/time.c')
-rw-r--r-- | arch/s390/kernel/time.c | 634 |
1 files changed, 484 insertions, 150 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7aec676fefd5..7418bebb547f 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * Time of day based timer functions. | 3 | * Time of day based timer functions. |
4 | * | 4 | * |
5 | * S390 version | 5 | * S390 version |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 6 | * Copyright IBM Corp. 1999, 2008 |
7 | * Author(s): Hartmut Penner (hp@de.ibm.com), | 7 | * Author(s): Hartmut Penner (hp@de.ibm.com), |
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), | 8 | * Martin Schwidefsky (schwidefsky@de.ibm.com), |
9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | 9 | * Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) |
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/notifier.h> | 31 | #include <linux/notifier.h> |
32 | #include <linux/clocksource.h> | 32 | #include <linux/clocksource.h> |
33 | #include <linux/clockchips.h> | 33 | #include <linux/clockchips.h> |
34 | #include <linux/bootmem.h> | ||
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
35 | #include <asm/delay.h> | 36 | #include <asm/delay.h> |
36 | #include <asm/s390_ext.h> | 37 | #include <asm/s390_ext.h> |
@@ -162,7 +163,7 @@ void init_cpu_timer(void) | |||
162 | /* Enable clock comparator timer interrupt. */ | 163 | /* Enable clock comparator timer interrupt. */ |
163 | __ctl_set_bit(0,11); | 164 | __ctl_set_bit(0,11); |
164 | 165 | ||
165 | /* Always allow ETR external interrupts, even without an ETR. */ | 166 | /* Always allow the timing alert external interrupt. */ |
166 | __ctl_set_bit(0, 4); | 167 | __ctl_set_bit(0, 4); |
167 | } | 168 | } |
168 | 169 | ||
@@ -170,8 +171,21 @@ static void clock_comparator_interrupt(__u16 code) | |||
170 | { | 171 | { |
171 | } | 172 | } |
172 | 173 | ||
174 | static void etr_timing_alert(struct etr_irq_parm *); | ||
175 | static void stp_timing_alert(struct stp_irq_parm *); | ||
176 | |||
177 | static void timing_alert_interrupt(__u16 code) | ||
178 | { | ||
179 | if (S390_lowcore.ext_params & 0x00c40000) | ||
180 | etr_timing_alert((struct etr_irq_parm *) | ||
181 | &S390_lowcore.ext_params); | ||
182 | if (S390_lowcore.ext_params & 0x00038000) | ||
183 | stp_timing_alert((struct stp_irq_parm *) | ||
184 | &S390_lowcore.ext_params); | ||
185 | } | ||
186 | |||
173 | static void etr_reset(void); | 187 | static void etr_reset(void); |
174 | static void etr_ext_handler(__u16); | 188 | static void stp_reset(void); |
175 | 189 | ||
176 | /* | 190 | /* |
177 | * Get the TOD clock running. | 191 | * Get the TOD clock running. |
@@ -181,6 +195,7 @@ static u64 __init reset_tod_clock(void) | |||
181 | u64 time; | 195 | u64 time; |
182 | 196 | ||
183 | etr_reset(); | 197 | etr_reset(); |
198 | stp_reset(); | ||
184 | if (store_clock(&time) == 0) | 199 | if (store_clock(&time) == 0) |
185 | return time; | 200 | return time; |
186 | /* TOD clock not running. Set the clock to Unix Epoch. */ | 201 | /* TOD clock not running. Set the clock to Unix Epoch. */ |
@@ -231,8 +246,9 @@ void __init time_init(void) | |||
231 | if (clocksource_register(&clocksource_tod) != 0) | 246 | if (clocksource_register(&clocksource_tod) != 0) |
232 | panic("Could not register TOD clock source"); | 247 | panic("Could not register TOD clock source"); |
233 | 248 | ||
234 | /* request the etr external interrupt */ | 249 | /* request the timing alert external interrupt */ |
235 | if (register_early_external_interrupt(0x1406, etr_ext_handler, | 250 | if (register_early_external_interrupt(0x1406, |
251 | timing_alert_interrupt, | ||
236 | &ext_int_etr_cc) != 0) | 252 | &ext_int_etr_cc) != 0) |
237 | panic("Couldn't request external interrupt 0x1406"); | 253 | panic("Couldn't request external interrupt 0x1406"); |
238 | 254 | ||
@@ -245,10 +261,112 @@ void __init time_init(void) | |||
245 | } | 261 | } |
246 | 262 | ||
247 | /* | 263 | /* |
264 | * The time is "clock". old is what we think the time is. | ||
265 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
266 | * "delay" is an approximation how long the synchronization took. If | ||
267 | * the time correction is positive, then "delay" is subtracted from | ||
268 | * the time difference and only the remaining part is passed to ntp. | ||
269 | */ | ||
270 | static unsigned long long adjust_time(unsigned long long old, | ||
271 | unsigned long long clock, | ||
272 | unsigned long long delay) | ||
273 | { | ||
274 | unsigned long long delta, ticks; | ||
275 | struct timex adjust; | ||
276 | |||
277 | if (clock > old) { | ||
278 | /* It is later than we thought. */ | ||
279 | delta = ticks = clock - old; | ||
280 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
281 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
282 | adjust.offset = ticks * (1000000 / HZ); | ||
283 | } else { | ||
284 | /* It is earlier than we thought. */ | ||
285 | delta = ticks = old - clock; | ||
286 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
287 | delta = -delta; | ||
288 | adjust.offset = -ticks * (1000000 / HZ); | ||
289 | } | ||
290 | jiffies_timer_cc += delta; | ||
291 | if (adjust.offset != 0) { | ||
292 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
293 | adjust.offset); | ||
294 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
295 | do_adjtimex(&adjust); | ||
296 | } | ||
297 | return delta; | ||
298 | } | ||
299 | |||
300 | static DEFINE_PER_CPU(atomic_t, clock_sync_word); | ||
301 | static unsigned long clock_sync_flags; | ||
302 | |||
303 | #define CLOCK_SYNC_HAS_ETR 0 | ||
304 | #define CLOCK_SYNC_HAS_STP 1 | ||
305 | #define CLOCK_SYNC_ETR 2 | ||
306 | #define CLOCK_SYNC_STP 3 | ||
307 | |||
308 | /* | ||
309 | * The synchronous get_clock function. It will write the current clock | ||
310 | * value to the clock pointer and return 0 if the clock is in sync with | ||
311 | * the external time source. If the clock mode is local it will return | ||
312 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
313 | * reference. | ||
314 | */ | ||
315 | int get_sync_clock(unsigned long long *clock) | ||
316 | { | ||
317 | atomic_t *sw_ptr; | ||
318 | unsigned int sw0, sw1; | ||
319 | |||
320 | sw_ptr = &get_cpu_var(clock_sync_word); | ||
321 | sw0 = atomic_read(sw_ptr); | ||
322 | *clock = get_clock(); | ||
323 | sw1 = atomic_read(sw_ptr); | ||
324 | put_cpu_var(clock_sync_sync); | ||
325 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
326 | /* Success: time is in sync. */ | ||
327 | return 0; | ||
328 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags) && | ||
329 | !test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | ||
330 | return -ENOSYS; | ||
331 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags) && | ||
332 | !test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
333 | return -EACCES; | ||
334 | return -EAGAIN; | ||
335 | } | ||
336 | EXPORT_SYMBOL(get_sync_clock); | ||
337 | |||
338 | /* | ||
339 | * Make get_sync_clock return -EAGAIN. | ||
340 | */ | ||
341 | static void disable_sync_clock(void *dummy) | ||
342 | { | ||
343 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | ||
344 | /* | ||
345 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
346 | * fail until the sync bit is turned back on. In addition | ||
347 | * increase the "sequence" counter to avoid the race of an | ||
348 | * etr event and the complete recovery against get_sync_clock. | ||
349 | */ | ||
350 | atomic_clear_mask(0x80000000, sw_ptr); | ||
351 | atomic_inc(sw_ptr); | ||
352 | } | ||
353 | |||
354 | /* | ||
355 | * Make get_sync_clock return 0 again. | ||
356 | * Needs to be called from a context disabled for preemption. | ||
357 | */ | ||
358 | static void enable_sync_clock(void) | ||
359 | { | ||
360 | atomic_t *sw_ptr = &__get_cpu_var(clock_sync_word); | ||
361 | atomic_set_mask(0x80000000, sw_ptr); | ||
362 | } | ||
363 | |||
364 | /* | ||
248 | * External Time Reference (ETR) code. | 365 | * External Time Reference (ETR) code. |
249 | */ | 366 | */ |
250 | static int etr_port0_online; | 367 | static int etr_port0_online; |
251 | static int etr_port1_online; | 368 | static int etr_port1_online; |
369 | static int etr_steai_available; | ||
252 | 370 | ||
253 | static int __init early_parse_etr(char *p) | 371 | static int __init early_parse_etr(char *p) |
254 | { | 372 | { |
@@ -273,12 +391,6 @@ enum etr_event { | |||
273 | ETR_EVENT_UPDATE, | 391 | ETR_EVENT_UPDATE, |
274 | }; | 392 | }; |
275 | 393 | ||
276 | enum etr_flags { | ||
277 | ETR_FLAG_ENOSYS, | ||
278 | ETR_FLAG_EACCES, | ||
279 | ETR_FLAG_STEAI, | ||
280 | }; | ||
281 | |||
282 | /* | 394 | /* |
283 | * Valid bit combinations of the eacr register are (x = don't care): | 395 | * Valid bit combinations of the eacr register are (x = don't care): |
284 | * e0 e1 dp p0 p1 ea es sl | 396 | * e0 e1 dp p0 p1 ea es sl |
@@ -305,74 +417,18 @@ enum etr_flags { | |||
305 | */ | 417 | */ |
306 | static struct etr_eacr etr_eacr; | 418 | static struct etr_eacr etr_eacr; |
307 | static u64 etr_tolec; /* time of last eacr update */ | 419 | static u64 etr_tolec; /* time of last eacr update */ |
308 | static unsigned long etr_flags; | ||
309 | static struct etr_aib etr_port0; | 420 | static struct etr_aib etr_port0; |
310 | static int etr_port0_uptodate; | 421 | static int etr_port0_uptodate; |
311 | static struct etr_aib etr_port1; | 422 | static struct etr_aib etr_port1; |
312 | static int etr_port1_uptodate; | 423 | static int etr_port1_uptodate; |
313 | static unsigned long etr_events; | 424 | static unsigned long etr_events; |
314 | static struct timer_list etr_timer; | 425 | static struct timer_list etr_timer; |
315 | static DEFINE_PER_CPU(atomic_t, etr_sync_word); | ||
316 | 426 | ||
317 | static void etr_timeout(unsigned long dummy); | 427 | static void etr_timeout(unsigned long dummy); |
318 | static void etr_work_fn(struct work_struct *work); | 428 | static void etr_work_fn(struct work_struct *work); |
319 | static DECLARE_WORK(etr_work, etr_work_fn); | 429 | static DECLARE_WORK(etr_work, etr_work_fn); |
320 | 430 | ||
321 | /* | 431 | /* |
322 | * The etr get_clock function. It will write the current clock value | ||
323 | * to the clock pointer and return 0 if the clock is in sync with the | ||
324 | * external time source. If the clock mode is local it will return | ||
325 | * -ENOSYS and -EAGAIN if the clock is not in sync with the external | ||
326 | * reference. This function is what ETR is all about.. | ||
327 | */ | ||
328 | int get_sync_clock(unsigned long long *clock) | ||
329 | { | ||
330 | atomic_t *sw_ptr; | ||
331 | unsigned int sw0, sw1; | ||
332 | |||
333 | sw_ptr = &get_cpu_var(etr_sync_word); | ||
334 | sw0 = atomic_read(sw_ptr); | ||
335 | *clock = get_clock(); | ||
336 | sw1 = atomic_read(sw_ptr); | ||
337 | put_cpu_var(etr_sync_sync); | ||
338 | if (sw0 == sw1 && (sw0 & 0x80000000U)) | ||
339 | /* Success: time is in sync. */ | ||
340 | return 0; | ||
341 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | ||
342 | return -ENOSYS; | ||
343 | if (test_bit(ETR_FLAG_EACCES, &etr_flags)) | ||
344 | return -EACCES; | ||
345 | return -EAGAIN; | ||
346 | } | ||
347 | EXPORT_SYMBOL(get_sync_clock); | ||
348 | |||
349 | /* | ||
350 | * Make get_sync_clock return -EAGAIN. | ||
351 | */ | ||
352 | static void etr_disable_sync_clock(void *dummy) | ||
353 | { | ||
354 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
355 | /* | ||
356 | * Clear the in-sync bit 2^31. All get_sync_clock calls will | ||
357 | * fail until the sync bit is turned back on. In addition | ||
358 | * increase the "sequence" counter to avoid the race of an | ||
359 | * etr event and the complete recovery against get_sync_clock. | ||
360 | */ | ||
361 | atomic_clear_mask(0x80000000, sw_ptr); | ||
362 | atomic_inc(sw_ptr); | ||
363 | } | ||
364 | |||
365 | /* | ||
366 | * Make get_sync_clock return 0 again. | ||
367 | * Needs to be called from a context disabled for preemption. | ||
368 | */ | ||
369 | static void etr_enable_sync_clock(void) | ||
370 | { | ||
371 | atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word); | ||
372 | atomic_set_mask(0x80000000, sw_ptr); | ||
373 | } | ||
374 | |||
375 | /* | ||
376 | * Reset ETR attachment. | 432 | * Reset ETR attachment. |
377 | */ | 433 | */ |
378 | static void etr_reset(void) | 434 | static void etr_reset(void) |
@@ -381,15 +437,13 @@ static void etr_reset(void) | |||
381 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, | 437 | .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0, |
382 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, | 438 | .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0, |
383 | .es = 0, .sl = 0 }; | 439 | .es = 0, .sl = 0 }; |
384 | if (etr_setr(&etr_eacr) == 0) | 440 | if (etr_setr(&etr_eacr) == 0) { |
385 | etr_tolec = get_clock(); | 441 | etr_tolec = get_clock(); |
386 | else { | 442 | set_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags); |
387 | set_bit(ETR_FLAG_ENOSYS, &etr_flags); | 443 | } else if (etr_port0_online || etr_port1_online) { |
388 | if (etr_port0_online || etr_port1_online) { | 444 | printk(KERN_WARNING "Running on non ETR capable " |
389 | printk(KERN_WARNING "Running on non ETR capable " | 445 | "machine, only local mode available.\n"); |
390 | "machine, only local mode available.\n"); | 446 | etr_port0_online = etr_port1_online = 0; |
391 | etr_port0_online = etr_port1_online = 0; | ||
392 | } | ||
393 | } | 447 | } |
394 | } | 448 | } |
395 | 449 | ||
@@ -397,14 +451,12 @@ static int __init etr_init(void) | |||
397 | { | 451 | { |
398 | struct etr_aib aib; | 452 | struct etr_aib aib; |
399 | 453 | ||
400 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | 454 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
401 | return 0; | 455 | return 0; |
402 | /* Check if this machine has the steai instruction. */ | 456 | /* Check if this machine has the steai instruction. */ |
403 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) | 457 | if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0) |
404 | set_bit(ETR_FLAG_STEAI, &etr_flags); | 458 | etr_steai_available = 1; |
405 | setup_timer(&etr_timer, etr_timeout, 0UL); | 459 | setup_timer(&etr_timer, etr_timeout, 0UL); |
406 | if (!etr_port0_online && !etr_port1_online) | ||
407 | set_bit(ETR_FLAG_EACCES, &etr_flags); | ||
408 | if (etr_port0_online) { | 460 | if (etr_port0_online) { |
409 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 461 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
410 | schedule_work(&etr_work); | 462 | schedule_work(&etr_work); |
@@ -435,7 +487,8 @@ void etr_switch_to_local(void) | |||
435 | { | 487 | { |
436 | if (!etr_eacr.sl) | 488 | if (!etr_eacr.sl) |
437 | return; | 489 | return; |
438 | etr_disable_sync_clock(NULL); | 490 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
491 | disable_sync_clock(NULL); | ||
439 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); | 492 | set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events); |
440 | schedule_work(&etr_work); | 493 | schedule_work(&etr_work); |
441 | } | 494 | } |
@@ -450,23 +503,21 @@ void etr_sync_check(void) | |||
450 | { | 503 | { |
451 | if (!etr_eacr.es) | 504 | if (!etr_eacr.es) |
452 | return; | 505 | return; |
453 | etr_disable_sync_clock(NULL); | 506 | if (test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) |
507 | disable_sync_clock(NULL); | ||
454 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); | 508 | set_bit(ETR_EVENT_SYNC_CHECK, &etr_events); |
455 | schedule_work(&etr_work); | 509 | schedule_work(&etr_work); |
456 | } | 510 | } |
457 | 511 | ||
458 | /* | 512 | /* |
459 | * ETR external interrupt. There are two causes: | 513 | * ETR timing alert. There are two causes: |
460 | * 1) port state change, check the usability of the port | 514 | * 1) port state change, check the usability of the port |
461 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the | 515 | * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the |
462 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) | 516 | * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3) |
463 | * or ETR-data word 4 (edf4) has changed. | 517 | * or ETR-data word 4 (edf4) has changed. |
464 | */ | 518 | */ |
465 | static void etr_ext_handler(__u16 code) | 519 | static void etr_timing_alert(struct etr_irq_parm *intparm) |
466 | { | 520 | { |
467 | struct etr_interruption_parameter *intparm = | ||
468 | (struct etr_interruption_parameter *) &S390_lowcore.ext_params; | ||
469 | |||
470 | if (intparm->pc0) | 521 | if (intparm->pc0) |
471 | /* ETR port 0 state change. */ | 522 | /* ETR port 0 state change. */ |
472 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); | 523 | set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events); |
@@ -591,58 +642,23 @@ static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p) | |||
591 | return 1; | 642 | return 1; |
592 | } | 643 | } |
593 | 644 | ||
594 | /* | 645 | struct clock_sync_data { |
595 | * The time is "clock". old is what we think the time is. | ||
596 | * Adjust the value by a multiple of jiffies and add the delta to ntp. | ||
597 | * "delay" is an approximation how long the synchronization took. If | ||
598 | * the time correction is positive, then "delay" is subtracted from | ||
599 | * the time difference and only the remaining part is passed to ntp. | ||
600 | */ | ||
601 | static unsigned long long etr_adjust_time(unsigned long long old, | ||
602 | unsigned long long clock, | ||
603 | unsigned long long delay) | ||
604 | { | ||
605 | unsigned long long delta, ticks; | ||
606 | struct timex adjust; | ||
607 | |||
608 | if (clock > old) { | ||
609 | /* It is later than we thought. */ | ||
610 | delta = ticks = clock - old; | ||
611 | delta = ticks = (delta < delay) ? 0 : delta - delay; | ||
612 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
613 | adjust.offset = ticks * (1000000 / HZ); | ||
614 | } else { | ||
615 | /* It is earlier than we thought. */ | ||
616 | delta = ticks = old - clock; | ||
617 | delta -= do_div(ticks, CLK_TICKS_PER_JIFFY); | ||
618 | delta = -delta; | ||
619 | adjust.offset = -ticks * (1000000 / HZ); | ||
620 | } | ||
621 | jiffies_timer_cc += delta; | ||
622 | if (adjust.offset != 0) { | ||
623 | printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n", | ||
624 | adjust.offset); | ||
625 | adjust.modes = ADJ_OFFSET_SINGLESHOT; | ||
626 | do_adjtimex(&adjust); | ||
627 | } | ||
628 | return delta; | ||
629 | } | ||
630 | |||
631 | static struct { | ||
632 | int in_sync; | 646 | int in_sync; |
633 | unsigned long long fixup_cc; | 647 | unsigned long long fixup_cc; |
634 | } etr_sync; | 648 | }; |
635 | 649 | ||
636 | static void etr_sync_cpu_start(void *dummy) | 650 | static void clock_sync_cpu_start(void *dummy) |
637 | { | 651 | { |
638 | etr_enable_sync_clock(); | 652 | struct clock_sync_data *sync = dummy; |
653 | |||
654 | enable_sync_clock(); | ||
639 | /* | 655 | /* |
640 | * This looks like a busy wait loop but it isn't. etr_sync_cpus | 656 | * This looks like a busy wait loop but it isn't. etr_sync_cpus |
641 | * is called on all other cpus while the TOD clocks is stopped. | 657 | * is called on all other cpus while the TOD clocks is stopped. |
642 | * __udelay will stop the cpu on an enabled wait psw until the | 658 | * __udelay will stop the cpu on an enabled wait psw until the |
643 | * TOD is running again. | 659 | * TOD is running again. |
644 | */ | 660 | */ |
645 | while (etr_sync.in_sync == 0) { | 661 | while (sync->in_sync == 0) { |
646 | __udelay(1); | 662 | __udelay(1); |
647 | /* | 663 | /* |
648 | * A different cpu changes *in_sync. Therefore use | 664 | * A different cpu changes *in_sync. Therefore use |
@@ -650,17 +666,17 @@ static void etr_sync_cpu_start(void *dummy) | |||
650 | */ | 666 | */ |
651 | barrier(); | 667 | barrier(); |
652 | } | 668 | } |
653 | if (etr_sync.in_sync != 1) | 669 | if (sync->in_sync != 1) |
654 | /* Didn't work. Clear per-cpu in sync bit again. */ | 670 | /* Didn't work. Clear per-cpu in sync bit again. */ |
655 | etr_disable_sync_clock(NULL); | 671 | disable_sync_clock(NULL); |
656 | /* | 672 | /* |
657 | * This round of TOD syncing is done. Set the clock comparator | 673 | * This round of TOD syncing is done. Set the clock comparator |
658 | * to the next tick and let the processor continue. | 674 | * to the next tick and let the processor continue. |
659 | */ | 675 | */ |
660 | fixup_clock_comparator(etr_sync.fixup_cc); | 676 | fixup_clock_comparator(sync->fixup_cc); |
661 | } | 677 | } |
662 | 678 | ||
663 | static void etr_sync_cpu_end(void *dummy) | 679 | static void clock_sync_cpu_end(void *dummy) |
664 | { | 680 | { |
665 | } | 681 | } |
666 | 682 | ||
@@ -672,6 +688,7 @@ static void etr_sync_cpu_end(void *dummy) | |||
672 | static int etr_sync_clock(struct etr_aib *aib, int port) | 688 | static int etr_sync_clock(struct etr_aib *aib, int port) |
673 | { | 689 | { |
674 | struct etr_aib *sync_port; | 690 | struct etr_aib *sync_port; |
691 | struct clock_sync_data etr_sync; | ||
675 | unsigned long long clock, old_clock, delay, delta; | 692 | unsigned long long clock, old_clock, delay, delta; |
676 | int follows; | 693 | int follows; |
677 | int rc; | 694 | int rc; |
@@ -690,9 +707,9 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
690 | */ | 707 | */ |
691 | memset(&etr_sync, 0, sizeof(etr_sync)); | 708 | memset(&etr_sync, 0, sizeof(etr_sync)); |
692 | preempt_disable(); | 709 | preempt_disable(); |
693 | smp_call_function(etr_sync_cpu_start, NULL, 0, 0); | 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); |
694 | local_irq_disable(); | 711 | local_irq_disable(); |
695 | etr_enable_sync_clock(); | 712 | enable_sync_clock(); |
696 | 713 | ||
697 | /* Set clock to next OTE. */ | 714 | /* Set clock to next OTE. */ |
698 | __ctl_set_bit(14, 21); | 715 | __ctl_set_bit(14, 21); |
@@ -707,13 +724,13 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
707 | /* Adjust Linux timing variables. */ | 724 | /* Adjust Linux timing variables. */ |
708 | delay = (unsigned long long) | 725 | delay = (unsigned long long) |
709 | (aib->edf2.etv - sync_port->edf2.etv) << 32; | 726 | (aib->edf2.etv - sync_port->edf2.etv) << 32; |
710 | delta = etr_adjust_time(old_clock, clock, delay); | 727 | delta = adjust_time(old_clock, clock, delay); |
711 | etr_sync.fixup_cc = delta; | 728 | etr_sync.fixup_cc = delta; |
712 | fixup_clock_comparator(delta); | 729 | fixup_clock_comparator(delta); |
713 | /* Verify that the clock is properly set. */ | 730 | /* Verify that the clock is properly set. */ |
714 | if (!etr_aib_follows(sync_port, aib, port)) { | 731 | if (!etr_aib_follows(sync_port, aib, port)) { |
715 | /* Didn't work. */ | 732 | /* Didn't work. */ |
716 | etr_disable_sync_clock(NULL); | 733 | disable_sync_clock(NULL); |
717 | etr_sync.in_sync = -EAGAIN; | 734 | etr_sync.in_sync = -EAGAIN; |
718 | rc = -EAGAIN; | 735 | rc = -EAGAIN; |
719 | } else { | 736 | } else { |
@@ -724,12 +741,12 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
724 | /* Could not set the clock ?!? */ | 741 | /* Could not set the clock ?!? */ |
725 | __ctl_clear_bit(0, 29); | 742 | __ctl_clear_bit(0, 29); |
726 | __ctl_clear_bit(14, 21); | 743 | __ctl_clear_bit(14, 21); |
727 | etr_disable_sync_clock(NULL); | 744 | disable_sync_clock(NULL); |
728 | etr_sync.in_sync = -EAGAIN; | 745 | etr_sync.in_sync = -EAGAIN; |
729 | rc = -EAGAIN; | 746 | rc = -EAGAIN; |
730 | } | 747 | } |
731 | local_irq_enable(); | 748 | local_irq_enable(); |
732 | smp_call_function(etr_sync_cpu_end,NULL,0,0); | 749 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); |
733 | preempt_enable(); | 750 | preempt_enable(); |
734 | return rc; | 751 | return rc; |
735 | } | 752 | } |
@@ -832,7 +849,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, | |||
832 | * Do not try to get the alternate port aib if the clock | 849 | * Do not try to get the alternate port aib if the clock |
833 | * is not in sync yet. | 850 | * is not in sync yet. |
834 | */ | 851 | */ |
835 | if (!eacr.es) | 852 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags) && !eacr.es) |
836 | return eacr; | 853 | return eacr; |
837 | 854 | ||
838 | /* | 855 | /* |
@@ -840,7 +857,7 @@ static struct etr_eacr etr_handle_update(struct etr_aib *aib, | |||
840 | * the other port immediately. If only stetr is available the | 857 | * the other port immediately. If only stetr is available the |
841 | * data-port bit toggle has to be used. | 858 | * data-port bit toggle has to be used. |
842 | */ | 859 | */ |
843 | if (test_bit(ETR_FLAG_STEAI, &etr_flags)) { | 860 | if (etr_steai_available) { |
844 | if (eacr.p0 && !etr_port0_uptodate) { | 861 | if (eacr.p0 && !etr_port0_uptodate) { |
845 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); | 862 | etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0); |
846 | etr_port0_uptodate = 1; | 863 | etr_port0_uptodate = 1; |
@@ -909,10 +926,10 @@ static void etr_work_fn(struct work_struct *work) | |||
909 | if (!eacr.ea) { | 926 | if (!eacr.ea) { |
910 | /* Both ports offline. Reset everything. */ | 927 | /* Both ports offline. Reset everything. */ |
911 | eacr.dp = eacr.es = eacr.sl = 0; | 928 | eacr.dp = eacr.es = eacr.sl = 0; |
912 | on_each_cpu(etr_disable_sync_clock, NULL, 0, 1); | 929 | on_each_cpu(disable_sync_clock, NULL, 0, 1); |
913 | del_timer_sync(&etr_timer); | 930 | del_timer_sync(&etr_timer); |
914 | etr_update_eacr(eacr); | 931 | etr_update_eacr(eacr); |
915 | set_bit(ETR_FLAG_EACCES, &etr_flags); | 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
916 | return; | 933 | return; |
917 | } | 934 | } |
918 | 935 | ||
@@ -953,7 +970,6 @@ static void etr_work_fn(struct work_struct *work) | |||
953 | eacr.e1 = 1; | 970 | eacr.e1 = 1; |
954 | sync_port = (etr_port0_uptodate && | 971 | sync_port = (etr_port0_uptodate && |
955 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 972 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
956 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
957 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { | 973 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) { |
958 | eacr.sl = 0; | 974 | eacr.sl = 0; |
959 | eacr.e0 = 0; | 975 | eacr.e0 = 0; |
@@ -962,7 +978,6 @@ static void etr_work_fn(struct work_struct *work) | |||
962 | eacr.es = 0; | 978 | eacr.es = 0; |
963 | sync_port = (etr_port1_uptodate && | 979 | sync_port = (etr_port1_uptodate && |
964 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 980 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
965 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
966 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { | 981 | } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) { |
967 | eacr.sl = 1; | 982 | eacr.sl = 1; |
968 | eacr.e0 = 1; | 983 | eacr.e0 = 1; |
@@ -976,7 +991,6 @@ static void etr_work_fn(struct work_struct *work) | |||
976 | eacr.e1 = 1; | 991 | eacr.e1 = 1; |
977 | sync_port = (etr_port0_uptodate && | 992 | sync_port = (etr_port0_uptodate && |
978 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; | 993 | etr_port_valid(&etr_port0, 0)) ? 0 : -1; |
979 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
980 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { | 994 | } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) { |
981 | eacr.sl = 1; | 995 | eacr.sl = 1; |
982 | eacr.e0 = 0; | 996 | eacr.e0 = 0; |
@@ -985,19 +999,22 @@ static void etr_work_fn(struct work_struct *work) | |||
985 | eacr.es = 0; | 999 | eacr.es = 0; |
986 | sync_port = (etr_port1_uptodate && | 1000 | sync_port = (etr_port1_uptodate && |
987 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; | 1001 | etr_port_valid(&etr_port1, 1)) ? 1 : -1; |
988 | clear_bit(ETR_FLAG_EACCES, &etr_flags); | ||
989 | } else { | 1002 | } else { |
990 | /* Both ports not usable. */ | 1003 | /* Both ports not usable. */ |
991 | eacr.es = eacr.sl = 0; | 1004 | eacr.es = eacr.sl = 0; |
992 | sync_port = -1; | 1005 | sync_port = -1; |
993 | set_bit(ETR_FLAG_EACCES, &etr_flags); | 1006 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
994 | } | 1007 | } |
995 | 1008 | ||
1009 | if (!test_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | ||
1010 | eacr.es = 0; | ||
1011 | |||
996 | /* | 1012 | /* |
997 | * If the clock is in sync just update the eacr and return. | 1013 | * If the clock is in sync just update the eacr and return. |
998 | * If there is no valid sync port wait for a port update. | 1014 | * If there is no valid sync port wait for a port update. |
999 | */ | 1015 | */ |
1000 | if (eacr.es || sync_port < 0) { | 1016 | if (test_bit(CLOCK_SYNC_STP, &clock_sync_flags) || |
1017 | eacr.es || sync_port < 0) { | ||
1001 | etr_update_eacr(eacr); | 1018 | etr_update_eacr(eacr); |
1002 | etr_set_tolec_timeout(now); | 1019 | etr_set_tolec_timeout(now); |
1003 | return; | 1020 | return; |
@@ -1018,11 +1035,13 @@ static void etr_work_fn(struct work_struct *work) | |||
1018 | * and set up a timer to try again after 0.5 seconds | 1035 | * and set up a timer to try again after 0.5 seconds |
1019 | */ | 1036 | */ |
1020 | etr_update_eacr(eacr); | 1037 | etr_update_eacr(eacr); |
1038 | set_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | ||
1021 | if (now < etr_tolec + (1600000 << 12) || | 1039 | if (now < etr_tolec + (1600000 << 12) || |
1022 | etr_sync_clock(&aib, sync_port) != 0) { | 1040 | etr_sync_clock(&aib, sync_port) != 0) { |
1023 | /* Sync failed. Try again in 1/2 second. */ | 1041 | /* Sync failed. Try again in 1/2 second. */ |
1024 | eacr.es = 0; | 1042 | eacr.es = 0; |
1025 | etr_update_eacr(eacr); | 1043 | etr_update_eacr(eacr); |
1044 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | ||
1026 | etr_set_sync_timeout(); | 1045 | etr_set_sync_timeout(); |
1027 | } else | 1046 | } else |
1028 | etr_set_tolec_timeout(now); | 1047 | etr_set_tolec_timeout(now); |
@@ -1097,8 +1116,8 @@ static ssize_t etr_online_store(struct sys_device *dev, | |||
1097 | value = simple_strtoul(buf, NULL, 0); | 1116 | value = simple_strtoul(buf, NULL, 0); |
1098 | if (value != 0 && value != 1) | 1117 | if (value != 0 && value != 1) |
1099 | return -EINVAL; | 1118 | return -EINVAL; |
1100 | if (test_bit(ETR_FLAG_ENOSYS, &etr_flags)) | 1119 | if (!test_bit(CLOCK_SYNC_HAS_ETR, &clock_sync_flags)) |
1101 | return -ENOSYS; | 1120 | return -EOPNOTSUPP; |
1102 | if (dev == &etr_port0_dev) { | 1121 | if (dev == &etr_port0_dev) { |
1103 | if (etr_port0_online == value) | 1122 | if (etr_port0_online == value) |
1104 | return count; /* Nothing to do. */ | 1123 | return count; /* Nothing to do. */ |
@@ -1292,3 +1311,318 @@ out: | |||
1292 | } | 1311 | } |
1293 | 1312 | ||
1294 | device_initcall(etr_init_sysfs); | 1313 | device_initcall(etr_init_sysfs); |
1314 | |||
1315 | /* | ||
1316 | * Server Time Protocol (STP) code. | ||
1317 | */ | ||
1318 | static int stp_online; | ||
1319 | static struct stp_sstpi stp_info; | ||
1320 | static void *stp_page; | ||
1321 | |||
1322 | static void stp_work_fn(struct work_struct *work); | ||
1323 | static DECLARE_WORK(stp_work, stp_work_fn); | ||
1324 | |||
1325 | static int __init early_parse_stp(char *p) | ||
1326 | { | ||
1327 | if (strncmp(p, "off", 3) == 0) | ||
1328 | stp_online = 0; | ||
1329 | else if (strncmp(p, "on", 2) == 0) | ||
1330 | stp_online = 1; | ||
1331 | return 0; | ||
1332 | } | ||
1333 | early_param("stp", early_parse_stp); | ||
1334 | |||
1335 | /* | ||
1336 | * Reset STP attachment. | ||
1337 | */ | ||
1338 | static void stp_reset(void) | ||
1339 | { | ||
1340 | int rc; | ||
1341 | |||
1342 | stp_page = alloc_bootmem_pages(PAGE_SIZE); | ||
1343 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1344 | if (rc == 1) | ||
1345 | set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); | ||
1346 | else if (stp_online) { | ||
1347 | printk(KERN_WARNING "Running on non STP capable machine.\n"); | ||
1348 | free_bootmem((unsigned long) stp_page, PAGE_SIZE); | ||
1349 | stp_page = NULL; | ||
1350 | stp_online = 0; | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | static int __init stp_init(void) | ||
1355 | { | ||
1356 | if (test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags) && stp_online) | ||
1357 | schedule_work(&stp_work); | ||
1358 | return 0; | ||
1359 | } | ||
1360 | |||
1361 | arch_initcall(stp_init); | ||
1362 | |||
1363 | /* | ||
1364 | * STP timing alert. There are three causes: | ||
1365 | * 1) timing status change | ||
1366 | * 2) link availability change | ||
1367 | * 3) time control parameter change | ||
1368 | * In all three cases we are only interested in the clock source state. | ||
1369 | * If a STP clock source is now available use it. | ||
1370 | */ | ||
1371 | static void stp_timing_alert(struct stp_irq_parm *intparm) | ||
1372 | { | ||
1373 | if (intparm->tsc || intparm->lac || intparm->tcpc) | ||
1374 | schedule_work(&stp_work); | ||
1375 | } | ||
1376 | |||
1377 | /* | ||
1378 | * STP sync check machine check. This is called when the timing state | ||
1379 | * changes from the synchronized state to the unsynchronized state. | ||
1380 | * After a STP sync check the clock is not in sync. The machine check | ||
1381 | * is broadcasted to all cpus at the same time. | ||
1382 | */ | ||
1383 | void stp_sync_check(void) | ||
1384 | { | ||
1385 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
1386 | return; | ||
1387 | disable_sync_clock(NULL); | ||
1388 | schedule_work(&stp_work); | ||
1389 | } | ||
1390 | |||
1391 | /* | ||
1392 | * STP island condition machine check. This is called when an attached | ||
1393 | * server attempts to communicate over an STP link and the servers | ||
1394 | * have matching CTN ids and have a valid stratum-1 configuration | ||
1395 | * but the configurations do not match. | ||
1396 | */ | ||
1397 | void stp_island_check(void) | ||
1398 | { | ||
1399 | if (!test_bit(CLOCK_SYNC_STP, &clock_sync_flags)) | ||
1400 | return; | ||
1401 | disable_sync_clock(NULL); | ||
1402 | schedule_work(&stp_work); | ||
1403 | } | ||
1404 | |||
1405 | /* | ||
1406 | * STP tasklet. Check for the STP state and take over the clock | ||
1407 | * synchronization if the STP clock source is usable. | ||
1408 | */ | ||
1409 | static void stp_work_fn(struct work_struct *work) | ||
1410 | { | ||
1411 | struct clock_sync_data stp_sync; | ||
1412 | unsigned long long old_clock, delta; | ||
1413 | int rc; | ||
1414 | |||
1415 | if (!stp_online) { | ||
1416 | chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); | ||
1417 | return; | ||
1418 | } | ||
1419 | |||
1420 | rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0xb0e0); | ||
1421 | if (rc) | ||
1422 | return; | ||
1423 | |||
1424 | rc = chsc_sstpi(stp_page, &stp_info, sizeof(struct stp_sstpi)); | ||
1425 | if (rc || stp_info.c == 0) | ||
1426 | return; | ||
1427 | |||
1428 | /* | ||
1429 | * Catch all other cpus and make them wait until we have | ||
1430 | * successfully synced the clock. smp_call_function will | ||
1431 | * return after all other cpus are in clock_sync_cpu_start. | ||
1432 | */ | ||
1433 | memset(&stp_sync, 0, sizeof(stp_sync)); | ||
1434 | preempt_disable(); | ||
1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0); | ||
1436 | local_irq_disable(); | ||
1437 | enable_sync_clock(); | ||
1438 | |||
1439 | set_bit(CLOCK_SYNC_STP, &clock_sync_flags); | ||
1440 | if (test_and_clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags)) | ||
1441 | schedule_work(&etr_work); | ||
1442 | |||
1443 | rc = 0; | ||
1444 | if (stp_info.todoff[0] || stp_info.todoff[1] || | ||
1445 | stp_info.todoff[2] || stp_info.todoff[3] || | ||
1446 | stp_info.tmd != 2) { | ||
1447 | old_clock = get_clock(); | ||
1448 | rc = chsc_sstpc(stp_page, STP_OP_SYNC, 0); | ||
1449 | if (rc == 0) { | ||
1450 | delta = adjust_time(old_clock, get_clock(), 0); | ||
1451 | fixup_clock_comparator(delta); | ||
1452 | rc = chsc_sstpi(stp_page, &stp_info, | ||
1453 | sizeof(struct stp_sstpi)); | ||
1454 | if (rc == 0 && stp_info.tmd != 2) | ||
1455 | rc = -EAGAIN; | ||
1456 | } | ||
1457 | } | ||
1458 | if (rc) { | ||
1459 | disable_sync_clock(NULL); | ||
1460 | stp_sync.in_sync = -EAGAIN; | ||
1461 | clear_bit(CLOCK_SYNC_STP, &clock_sync_flags); | ||
1462 | if (etr_port0_online || etr_port1_online) | ||
1463 | schedule_work(&etr_work); | ||
1464 | } else | ||
1465 | stp_sync.in_sync = 1; | ||
1466 | |||
1467 | local_irq_enable(); | ||
1468 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | ||
1469 | preempt_enable(); | ||
1470 | } | ||
1471 | |||
1472 | /* | ||
1473 | * STP class sysfs interface functions | ||
1474 | */ | ||
1475 | static struct sysdev_class stp_sysclass = { | ||
1476 | .name = "stp", | ||
1477 | }; | ||
1478 | |||
1479 | static ssize_t stp_ctn_id_show(struct sysdev_class *class, char *buf) | ||
1480 | { | ||
1481 | if (!stp_online) | ||
1482 | return -ENODATA; | ||
1483 | return sprintf(buf, "%016llx\n", | ||
1484 | *(unsigned long long *) stp_info.ctnid); | ||
1485 | } | ||
1486 | |||
1487 | static SYSDEV_CLASS_ATTR(ctn_id, 0400, stp_ctn_id_show, NULL); | ||
1488 | |||
1489 | static ssize_t stp_ctn_type_show(struct sysdev_class *class, char *buf) | ||
1490 | { | ||
1491 | if (!stp_online) | ||
1492 | return -ENODATA; | ||
1493 | return sprintf(buf, "%i\n", stp_info.ctn); | ||
1494 | } | ||
1495 | |||
1496 | static SYSDEV_CLASS_ATTR(ctn_type, 0400, stp_ctn_type_show, NULL); | ||
1497 | |||
1498 | static ssize_t stp_dst_offset_show(struct sysdev_class *class, char *buf) | ||
1499 | { | ||
1500 | if (!stp_online || !(stp_info.vbits & 0x2000)) | ||
1501 | return -ENODATA; | ||
1502 | return sprintf(buf, "%i\n", (int)(s16) stp_info.dsto); | ||
1503 | } | ||
1504 | |||
1505 | static SYSDEV_CLASS_ATTR(dst_offset, 0400, stp_dst_offset_show, NULL); | ||
1506 | |||
1507 | static ssize_t stp_leap_seconds_show(struct sysdev_class *class, char *buf) | ||
1508 | { | ||
1509 | if (!stp_online || !(stp_info.vbits & 0x8000)) | ||
1510 | return -ENODATA; | ||
1511 | return sprintf(buf, "%i\n", (int)(s16) stp_info.leaps); | ||
1512 | } | ||
1513 | |||
1514 | static SYSDEV_CLASS_ATTR(leap_seconds, 0400, stp_leap_seconds_show, NULL); | ||
1515 | |||
1516 | static ssize_t stp_stratum_show(struct sysdev_class *class, char *buf) | ||
1517 | { | ||
1518 | if (!stp_online) | ||
1519 | return -ENODATA; | ||
1520 | return sprintf(buf, "%i\n", (int)(s16) stp_info.stratum); | ||
1521 | } | ||
1522 | |||
1523 | static SYSDEV_CLASS_ATTR(stratum, 0400, stp_stratum_show, NULL); | ||
1524 | |||
1525 | static ssize_t stp_time_offset_show(struct sysdev_class *class, char *buf) | ||
1526 | { | ||
1527 | if (!stp_online || !(stp_info.vbits & 0x0800)) | ||
1528 | return -ENODATA; | ||
1529 | return sprintf(buf, "%i\n", (int) stp_info.tto); | ||
1530 | } | ||
1531 | |||
1532 | static SYSDEV_CLASS_ATTR(time_offset, 0400, stp_time_offset_show, NULL); | ||
1533 | |||
1534 | static ssize_t stp_time_zone_offset_show(struct sysdev_class *class, char *buf) | ||
1535 | { | ||
1536 | if (!stp_online || !(stp_info.vbits & 0x4000)) | ||
1537 | return -ENODATA; | ||
1538 | return sprintf(buf, "%i\n", (int)(s16) stp_info.tzo); | ||
1539 | } | ||
1540 | |||
1541 | static SYSDEV_CLASS_ATTR(time_zone_offset, 0400, | ||
1542 | stp_time_zone_offset_show, NULL); | ||
1543 | |||
1544 | static ssize_t stp_timing_mode_show(struct sysdev_class *class, char *buf) | ||
1545 | { | ||
1546 | if (!stp_online) | ||
1547 | return -ENODATA; | ||
1548 | return sprintf(buf, "%i\n", stp_info.tmd); | ||
1549 | } | ||
1550 | |||
1551 | static SYSDEV_CLASS_ATTR(timing_mode, 0400, stp_timing_mode_show, NULL); | ||
1552 | |||
1553 | static ssize_t stp_timing_state_show(struct sysdev_class *class, char *buf) | ||
1554 | { | ||
1555 | if (!stp_online) | ||
1556 | return -ENODATA; | ||
1557 | return sprintf(buf, "%i\n", stp_info.tst); | ||
1558 | } | ||
1559 | |||
1560 | static SYSDEV_CLASS_ATTR(timing_state, 0400, stp_timing_state_show, NULL); | ||
1561 | |||
1562 | static ssize_t stp_online_show(struct sysdev_class *class, char *buf) | ||
1563 | { | ||
1564 | return sprintf(buf, "%i\n", stp_online); | ||
1565 | } | ||
1566 | |||
1567 | static ssize_t stp_online_store(struct sysdev_class *class, | ||
1568 | const char *buf, size_t count) | ||
1569 | { | ||
1570 | unsigned int value; | ||
1571 | |||
1572 | value = simple_strtoul(buf, NULL, 0); | ||
1573 | if (value != 0 && value != 1) | ||
1574 | return -EINVAL; | ||
1575 | if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags)) | ||
1576 | return -EOPNOTSUPP; | ||
1577 | stp_online = value; | ||
1578 | schedule_work(&stp_work); | ||
1579 | return count; | ||
1580 | } | ||
1581 | |||
1582 | /* | ||
1583 | * Can't use SYSDEV_CLASS_ATTR because the attribute should be named | ||
1584 | * stp/online but attr_online already exists in this file .. | ||
1585 | */ | ||
1586 | static struct sysdev_class_attribute attr_stp_online = { | ||
1587 | .attr = { .name = "online", .mode = 0600 }, | ||
1588 | .show = stp_online_show, | ||
1589 | .store = stp_online_store, | ||
1590 | }; | ||
1591 | |||
1592 | static struct sysdev_class_attribute *stp_attributes[] = { | ||
1593 | &attr_ctn_id, | ||
1594 | &attr_ctn_type, | ||
1595 | &attr_dst_offset, | ||
1596 | &attr_leap_seconds, | ||
1597 | &attr_stp_online, | ||
1598 | &attr_stratum, | ||
1599 | &attr_time_offset, | ||
1600 | &attr_time_zone_offset, | ||
1601 | &attr_timing_mode, | ||
1602 | &attr_timing_state, | ||
1603 | NULL | ||
1604 | }; | ||
1605 | |||
1606 | static int __init stp_init_sysfs(void) | ||
1607 | { | ||
1608 | struct sysdev_class_attribute **attr; | ||
1609 | int rc; | ||
1610 | |||
1611 | rc = sysdev_class_register(&stp_sysclass); | ||
1612 | if (rc) | ||
1613 | goto out; | ||
1614 | for (attr = stp_attributes; *attr; attr++) { | ||
1615 | rc = sysdev_class_create_file(&stp_sysclass, *attr); | ||
1616 | if (rc) | ||
1617 | goto out_unreg; | ||
1618 | } | ||
1619 | return 0; | ||
1620 | out_unreg: | ||
1621 | for (; attr >= stp_attributes; attr--) | ||
1622 | sysdev_class_remove_file(&stp_sysclass, *attr); | ||
1623 | sysdev_class_unregister(&stp_sysclass); | ||
1624 | out: | ||
1625 | return rc; | ||
1626 | } | ||
1627 | |||
1628 | device_initcall(stp_init_sysfs); | ||