aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/time.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
commitbaea7b946f00a291b166ccae7fcfed6c01530cc6 (patch)
tree4aa275fbdbec9c7b9b4629e8bee2bbecd3c6a6af /arch/powerpc/kernel/time.c
parentae19ffbadc1b2100285a5b5b3d0a4e0a11390904 (diff)
parent94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff)
Merge branch 'origin' into for-linus
Conflicts: MAINTAINERS
Diffstat (limited to 'arch/powerpc/kernel/time.c')
-rw-r--r--arch/powerpc/kernel/time.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a180b4f9a4f6..92dc844299b6 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -53,7 +53,7 @@
53#include <linux/posix-timers.h> 53#include <linux/posix-timers.h>
54#include <linux/irq.h> 54#include <linux/irq.h>
55#include <linux/delay.h> 55#include <linux/delay.h>
56#include <linux/perf_counter.h> 56#include <linux/perf_event.h>
57 57
58#include <asm/io.h> 58#include <asm/io.h>
59#include <asm/processor.h> 59#include <asm/processor.h>
@@ -193,6 +193,8 @@ EXPORT_SYMBOL(__cputime_clockt_factor);
193DEFINE_PER_CPU(unsigned long, cputime_last_delta); 193DEFINE_PER_CPU(unsigned long, cputime_last_delta);
194DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); 194DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta);
195 195
196cputime_t cputime_one_jiffy;
197
196static void calc_cputime_factors(void) 198static void calc_cputime_factors(void)
197{ 199{
198 struct div_result res; 200 struct div_result res;
@@ -501,6 +503,7 @@ static int __init iSeries_tb_recal(void)
501 tb_to_xs = divres.result_low; 503 tb_to_xs = divres.result_low;
502 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec; 504 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
503 vdso_data->tb_to_xs = tb_to_xs; 505 vdso_data->tb_to_xs = tb_to_xs;
506 setup_cputime_one_jiffy();
504 } 507 }
505 else { 508 else {
506 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" 509 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -527,25 +530,25 @@ void __init iSeries_time_init_early(void)
527} 530}
528#endif /* CONFIG_PPC_ISERIES */ 531#endif /* CONFIG_PPC_ISERIES */
529 532
530#if defined(CONFIG_PERF_COUNTERS) && defined(CONFIG_PPC32) 533#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_PPC32)
531DEFINE_PER_CPU(u8, perf_counter_pending); 534DEFINE_PER_CPU(u8, perf_event_pending);
532 535
533void set_perf_counter_pending(void) 536void set_perf_event_pending(void)
534{ 537{
535 get_cpu_var(perf_counter_pending) = 1; 538 get_cpu_var(perf_event_pending) = 1;
536 set_dec(1); 539 set_dec(1);
537 put_cpu_var(perf_counter_pending); 540 put_cpu_var(perf_event_pending);
538} 541}
539 542
540#define test_perf_counter_pending() __get_cpu_var(perf_counter_pending) 543#define test_perf_event_pending() __get_cpu_var(perf_event_pending)
541#define clear_perf_counter_pending() __get_cpu_var(perf_counter_pending) = 0 544#define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0
542 545
543#else /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 546#else /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
544 547
545#define test_perf_counter_pending() 0 548#define test_perf_event_pending() 0
546#define clear_perf_counter_pending() 549#define clear_perf_event_pending()
547 550
548#endif /* CONFIG_PERF_COUNTERS && CONFIG_PPC32 */ 551#endif /* CONFIG_PERF_EVENTS && CONFIG_PPC32 */
549 552
550/* 553/*
551 * For iSeries shared processors, we have to let the hypervisor 554 * For iSeries shared processors, we have to let the hypervisor
@@ -573,9 +576,9 @@ void timer_interrupt(struct pt_regs * regs)
573 set_dec(DECREMENTER_MAX); 576 set_dec(DECREMENTER_MAX);
574 577
575#ifdef CONFIG_PPC32 578#ifdef CONFIG_PPC32
576 if (test_perf_counter_pending()) { 579 if (test_perf_event_pending()) {
577 clear_perf_counter_pending(); 580 clear_perf_event_pending();
578 perf_counter_do_pending(); 581 perf_event_do_pending();
579 } 582 }
580 if (atomic_read(&ppc_n_lost_interrupts) != 0) 583 if (atomic_read(&ppc_n_lost_interrupts) != 0)
581 do_IRQ(regs); 584 do_IRQ(regs);
@@ -774,11 +777,12 @@ int update_persistent_clock(struct timespec now)
774 return ppc_md.set_rtc_time(&tm); 777 return ppc_md.set_rtc_time(&tm);
775} 778}
776 779
777unsigned long read_persistent_clock(void) 780void read_persistent_clock(struct timespec *ts)
778{ 781{
779 struct rtc_time tm; 782 struct rtc_time tm;
780 static int first = 1; 783 static int first = 1;
781 784
785 ts->tv_nsec = 0;
782 /* XXX this is a litle fragile but will work okay in the short term */ 786 /* XXX this is a litle fragile but will work okay in the short term */
783 if (first) { 787 if (first) {
784 first = 0; 788 first = 0;
@@ -786,14 +790,18 @@ unsigned long read_persistent_clock(void)
786 timezone_offset = ppc_md.time_init(); 790 timezone_offset = ppc_md.time_init();
787 791
788 /* get_boot_time() isn't guaranteed to be safe to call late */ 792 /* get_boot_time() isn't guaranteed to be safe to call late */
789 if (ppc_md.get_boot_time) 793 if (ppc_md.get_boot_time) {
790 return ppc_md.get_boot_time() -timezone_offset; 794 ts->tv_sec = ppc_md.get_boot_time() - timezone_offset;
795 return;
796 }
797 }
798 if (!ppc_md.get_rtc_time) {
799 ts->tv_sec = 0;
800 return;
791 } 801 }
792 if (!ppc_md.get_rtc_time)
793 return 0;
794 ppc_md.get_rtc_time(&tm); 802 ppc_md.get_rtc_time(&tm);
795 return mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday, 803 ts->tv_sec = mktime(tm.tm_year+1900, tm.tm_mon+1, tm.tm_mday,
796 tm.tm_hour, tm.tm_min, tm.tm_sec); 804 tm.tm_hour, tm.tm_min, tm.tm_sec);
797} 805}
798 806
799/* clocksource code */ 807/* clocksource code */
@@ -955,6 +963,7 @@ void __init time_init(void)
955 tb_ticks_per_usec = ppc_tb_freq / 1000000; 963 tb_ticks_per_usec = ppc_tb_freq / 1000000;
956 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); 964 tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000);
957 calc_cputime_factors(); 965 calc_cputime_factors();
966 setup_cputime_one_jiffy();
958 967
959 /* 968 /*
960 * Calculate the length of each tick in ns. It will not be 969 * Calculate the length of each tick in ns. It will not be