diff options
author | Tony Luck <tony.luck@intel.com> | 2007-07-20 14:26:47 -0400 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-07-20 14:26:47 -0400 |
commit | c36c282b88963d0957368a443168588e62301fda (patch) | |
tree | 6343887ae42a65635a61b4ad99fd7f3e8dd24758 | |
parent | f4fbfb0dda5577075a049eec7fb7ad38abca1912 (diff) | |
parent | 1f564ad6d4182859612cbae452122e5eb2d62a76 (diff) |
Pull ia64-clocksource into release branch
-rw-r--r-- | Documentation/kernel-parameters.txt | 2 | ||||
-rw-r--r-- | Documentation/time_interpolators.txt | 41 | ||||
-rw-r--r-- | arch/ia64/Kconfig | 6 | ||||
-rw-r--r-- | arch/ia64/configs/bigsur_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/configs/gensparse_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/configs/sim_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/configs/sn2_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/configs/tiger_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/configs/zx1_defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/defconfig | 2 | ||||
-rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 35 | ||||
-rw-r--r-- | arch/ia64/kernel/cyclone.c | 46 | ||||
-rw-r--r-- | arch/ia64/kernel/fsys.S | 179 | ||||
-rw-r--r-- | arch/ia64/kernel/fsyscall_gtod_data.h | 23 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 96 | ||||
-rw-r--r-- | arch/ia64/sn/kernel/sn2/timer.c | 29 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 4 | ||||
-rw-r--r-- | drivers/char/hpet.c | 70 | ||||
-rw-r--r-- | include/linux/clocksource.h | 6 | ||||
-rw-r--r-- | include/linux/timex.h | 60 | ||||
-rw-r--r-- | kernel/time.c | 88 | ||||
-rw-r--r-- | kernel/time/ntp.c | 10 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 4 | ||||
-rw-r--r-- | kernel/timer.c | 188 |
24 files changed, 335 insertions, 566 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 854744bde224..5fbe07706ae9 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -1154,6 +1154,8 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1154 | 1154 | ||
1155 | nointroute [IA-64] | 1155 | nointroute [IA-64] |
1156 | 1156 | ||
1157 | nojitter [IA64] Disables jitter checking for ITC timers. | ||
1158 | |||
1157 | nolapic [IA-32,APIC] Do not enable or use the local APIC. | 1159 | nolapic [IA-32,APIC] Do not enable or use the local APIC. |
1158 | 1160 | ||
1159 | nolapic_timer [IA-32,APIC] Do not use the local APIC timer. | 1161 | nolapic_timer [IA-32,APIC] Do not use the local APIC timer. |
diff --git a/Documentation/time_interpolators.txt b/Documentation/time_interpolators.txt deleted file mode 100644 index e3b60854fbc2..000000000000 --- a/Documentation/time_interpolators.txt +++ /dev/null | |||
@@ -1,41 +0,0 @@ | |||
1 | Time Interpolators | ||
2 | ------------------ | ||
3 | |||
4 | Time interpolators are a base of time calculation between timer ticks and | ||
5 | allow an accurate determination of time down to the accuracy of the time | ||
6 | source in nanoseconds. | ||
7 | |||
8 | The architecture specific code typically provides gettimeofday and | ||
9 | settimeofday under Linux. The time interpolator provides both if an arch | ||
10 | defines CONFIG_TIME_INTERPOLATION. The arch still must set up timer tick | ||
11 | operations and call the necessary functions to advance the clock. | ||
12 | |||
13 | With the time interpolator a standardized interface exists for time | ||
14 | interpolation between ticks. The provided logic is highly scalable | ||
15 | and has been tested in SMP situations of up to 512 CPUs. | ||
16 | |||
17 | If CONFIG_TIME_INTERPOLATION is defined then the architecture specific code | ||
18 | (or the device drivers - like HPET) may register time interpolators. | ||
19 | These are typically defined in the following way: | ||
20 | |||
21 | static struct time_interpolator my_interpolator { | ||
22 | .frequency = MY_FREQUENCY, | ||
23 | .source = TIME_SOURCE_MMIO32, | ||
24 | .shift = 8, /* scaling for higher accuracy */ | ||
25 | .drift = -1, /* Unknown drift */ | ||
26 | .jitter = 0 /* time source is stable */ | ||
27 | }; | ||
28 | |||
29 | void time_init(void) | ||
30 | { | ||
31 | .... | ||
32 | /* Initialization of the timer *. | ||
33 | my_interpolator.address = &my_timer; | ||
34 | register_time_interpolator(&my_interpolator); | ||
35 | .... | ||
36 | } | ||
37 | |||
38 | For more details see include/linux/timex.h and kernel/timer.c. | ||
39 | |||
40 | Christoph Lameter <christoph@lameter.com>, October 31, 2004 | ||
41 | |||
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 616c96e73483..36c7b9682aa6 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -62,7 +62,11 @@ config GENERIC_CALIBRATE_DELAY | |||
62 | bool | 62 | bool |
63 | default y | 63 | default y |
64 | 64 | ||
65 | config TIME_INTERPOLATION | 65 | config GENERIC_TIME |
66 | bool | ||
67 | default y | ||
68 | |||
69 | config GENERIC_TIME_VSYSCALL | ||
66 | bool | 70 | bool |
67 | default y | 71 | default y |
68 | 72 | ||
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index 90e9c2e61bf4..9eb48c0927b0 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig | |||
@@ -85,7 +85,7 @@ CONFIG_MMU=y | |||
85 | CONFIG_SWIOTLB=y | 85 | CONFIG_SWIOTLB=y |
86 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 86 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
87 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 87 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
88 | CONFIG_TIME_INTERPOLATION=y | 88 | CONFIG_GENERIC_TIME=y |
89 | CONFIG_EFI=y | 89 | CONFIG_EFI=y |
90 | CONFIG_GENERIC_IOMAP=y | 90 | CONFIG_GENERIC_IOMAP=y |
91 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 91 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 0d29aa2066b3..3a9ed951db08 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig | |||
@@ -86,7 +86,7 @@ CONFIG_MMU=y | |||
86 | CONFIG_SWIOTLB=y | 86 | CONFIG_SWIOTLB=y |
87 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 87 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
88 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 88 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
89 | CONFIG_TIME_INTERPOLATION=y | 89 | CONFIG_GENERIC_TIME=y |
90 | CONFIG_EFI=y | 90 | CONFIG_EFI=y |
91 | CONFIG_GENERIC_IOMAP=y | 91 | CONFIG_GENERIC_IOMAP=y |
92 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 92 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig index d9146c31ea13..c420d9f3df98 100644 --- a/arch/ia64/configs/sim_defconfig +++ b/arch/ia64/configs/sim_defconfig | |||
@@ -86,7 +86,7 @@ CONFIG_MMU=y | |||
86 | CONFIG_SWIOTLB=y | 86 | CONFIG_SWIOTLB=y |
87 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 87 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
88 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 88 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
89 | CONFIG_TIME_INTERPOLATION=y | 89 | CONFIG_GENERIC_TIME=y |
90 | CONFIG_EFI=y | 90 | CONFIG_EFI=y |
91 | CONFIG_GENERIC_IOMAP=y | 91 | CONFIG_GENERIC_IOMAP=y |
92 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | 92 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y |
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index 64e951de4e57..4c9ffc47bc7a 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -93,7 +93,7 @@ CONFIG_SWIOTLB=y | |||
93 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | 93 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y |
94 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 94 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
95 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 95 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
96 | CONFIG_TIME_INTERPOLATION=y | 96 | CONFIG_GENERIC_TIME=y |
97 | CONFIG_DMI=y | 97 | CONFIG_DMI=y |
98 | CONFIG_EFI=y | 98 | CONFIG_EFI=y |
99 | CONFIG_GENERIC_IOMAP=y | 99 | CONFIG_GENERIC_IOMAP=y |
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig index 205bbc30b0d0..3dbb3987df27 100644 --- a/arch/ia64/configs/tiger_defconfig +++ b/arch/ia64/configs/tiger_defconfig | |||
@@ -97,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |||
97 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 97 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
98 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 98 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
99 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 99 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
100 | CONFIG_TIME_INTERPOLATION=y | 100 | CONFIG_GENERIC_TIME=y |
101 | CONFIG_DMI=y | 101 | CONFIG_DMI=y |
102 | CONFIG_EFI=y | 102 | CONFIG_EFI=y |
103 | CONFIG_GENERIC_IOMAP=y | 103 | CONFIG_GENERIC_IOMAP=y |
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig index 1c7955c16358..4a060fc39934 100644 --- a/arch/ia64/configs/zx1_defconfig +++ b/arch/ia64/configs/zx1_defconfig | |||
@@ -96,7 +96,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |||
96 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 96 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
97 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 97 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
98 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 98 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
99 | CONFIG_TIME_INTERPOLATION=y | 99 | CONFIG_GENERIC_TIME=y |
100 | CONFIG_DMI=y | 100 | CONFIG_DMI=y |
101 | CONFIG_EFI=y | 101 | CONFIG_EFI=y |
102 | CONFIG_GENERIC_IOMAP=y | 102 | CONFIG_GENERIC_IOMAP=y |
diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 80298ef905eb..03172dc8c403 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig | |||
@@ -97,7 +97,7 @@ CONFIG_RWSEM_XCHGADD_ALGORITHM=y | |||
97 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set | 97 | # CONFIG_ARCH_HAS_ILOG2_U64 is not set |
98 | CONFIG_GENERIC_FIND_NEXT_BIT=y | 98 | CONFIG_GENERIC_FIND_NEXT_BIT=y |
99 | CONFIG_GENERIC_CALIBRATE_DELAY=y | 99 | CONFIG_GENERIC_CALIBRATE_DELAY=y |
100 | CONFIG_TIME_INTERPOLATION=y | 100 | CONFIG_GENERIC_TIME=y |
101 | CONFIG_DMI=y | 101 | CONFIG_DMI=y |
102 | CONFIG_EFI=y | 102 | CONFIG_EFI=y |
103 | CONFIG_GENERIC_IOMAP=y | 103 | CONFIG_GENERIC_IOMAP=y |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 2236fabbb3c6..0aebc6f79e95 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #define ASM_OFFSETS_C 1 | 7 | #define ASM_OFFSETS_C 1 |
8 | 8 | ||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/clocksource.h> | ||
10 | 11 | ||
11 | #include <asm-ia64/processor.h> | 12 | #include <asm-ia64/processor.h> |
12 | #include <asm-ia64/ptrace.h> | 13 | #include <asm-ia64/ptrace.h> |
@@ -15,6 +16,7 @@ | |||
15 | #include <asm-ia64/mca.h> | 16 | #include <asm-ia64/mca.h> |
16 | 17 | ||
17 | #include "../kernel/sigframe.h" | 18 | #include "../kernel/sigframe.h" |
19 | #include "../kernel/fsyscall_gtod_data.h" | ||
18 | 20 | ||
19 | #define DEFINE(sym, val) \ | 21 | #define DEFINE(sym, val) \ |
20 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | 22 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) |
@@ -256,17 +258,24 @@ void foo(void) | |||
256 | BLANK(); | 258 | BLANK(); |
257 | 259 | ||
258 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ | 260 | /* used by fsys_gettimeofday in arch/ia64/kernel/fsys.S */ |
259 | DEFINE(IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET, offsetof (struct time_interpolator, addr)); | 261 | DEFINE(IA64_GTOD_LOCK_OFFSET, |
260 | DEFINE(IA64_TIME_INTERPOLATOR_SOURCE_OFFSET, offsetof (struct time_interpolator, source)); | 262 | offsetof (struct fsyscall_gtod_data_t, lock)); |
261 | DEFINE(IA64_TIME_INTERPOLATOR_SHIFT_OFFSET, offsetof (struct time_interpolator, shift)); | 263 | DEFINE(IA64_GTOD_WALL_TIME_OFFSET, |
262 | DEFINE(IA64_TIME_INTERPOLATOR_NSEC_OFFSET, offsetof (struct time_interpolator, nsec_per_cyc)); | 264 | offsetof (struct fsyscall_gtod_data_t, wall_time)); |
263 | DEFINE(IA64_TIME_INTERPOLATOR_OFFSET_OFFSET, offsetof (struct time_interpolator, offset)); | 265 | DEFINE(IA64_GTOD_MONO_TIME_OFFSET, |
264 | DEFINE(IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET, offsetof (struct time_interpolator, last_cycle)); | 266 | offsetof (struct fsyscall_gtod_data_t, monotonic_time)); |
265 | DEFINE(IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET, offsetof (struct time_interpolator, last_counter)); | 267 | DEFINE(IA64_CLKSRC_MASK_OFFSET, |
266 | DEFINE(IA64_TIME_INTERPOLATOR_JITTER_OFFSET, offsetof (struct time_interpolator, jitter)); | 268 | offsetof (struct fsyscall_gtod_data_t, clk_mask)); |
267 | DEFINE(IA64_TIME_INTERPOLATOR_MASK_OFFSET, offsetof (struct time_interpolator, mask)); | 269 | DEFINE(IA64_CLKSRC_MULT_OFFSET, |
268 | DEFINE(IA64_TIME_SOURCE_CPU, TIME_SOURCE_CPU); | 270 | offsetof (struct fsyscall_gtod_data_t, clk_mult)); |
269 | DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64); | 271 | DEFINE(IA64_CLKSRC_SHIFT_OFFSET, |
270 | DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32); | 272 | offsetof (struct fsyscall_gtod_data_t, clk_shift)); |
271 | DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec)); | 273 | DEFINE(IA64_CLKSRC_MMIO_OFFSET, |
274 | offsetof (struct fsyscall_gtod_data_t, clk_fsys_mmio)); | ||
275 | DEFINE(IA64_CLKSRC_CYCLE_LAST_OFFSET, | ||
276 | offsetof (struct fsyscall_gtod_data_t, clk_cycle_last)); | ||
277 | DEFINE(IA64_ITC_JITTER_OFFSET, | ||
278 | offsetof (struct itc_jitter_data_t, itc_jitter)); | ||
279 | DEFINE(IA64_ITC_LASTCYCLE_OFFSET, | ||
280 | offsetof (struct itc_jitter_data_t, itc_lastcycle)); | ||
272 | } | 281 | } |
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index e00b21514f7c..2fd96d9062a1 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/time.h> | 3 | #include <linux/time.h> |
4 | #include <linux/errno.h> | 4 | #include <linux/errno.h> |
5 | #include <linux/timex.h> | 5 | #include <linux/timex.h> |
6 | #include <linux/clocksource.h> | ||
6 | #include <asm/io.h> | 7 | #include <asm/io.h> |
7 | 8 | ||
8 | /* IBM Summit (EXA) Cyclone counter code*/ | 9 | /* IBM Summit (EXA) Cyclone counter code*/ |
@@ -18,13 +19,21 @@ void __init cyclone_setup(void) | |||
18 | use_cyclone = 1; | 19 | use_cyclone = 1; |
19 | } | 20 | } |
20 | 21 | ||
22 | static void __iomem *cyclone_mc; | ||
21 | 23 | ||
22 | struct time_interpolator cyclone_interpolator = { | 24 | static cycle_t read_cyclone(void) |
23 | .source = TIME_SOURCE_MMIO64, | 25 | { |
24 | .shift = 16, | 26 | return (cycle_t)readq((void __iomem *)cyclone_mc); |
25 | .frequency = CYCLONE_TIMER_FREQ, | 27 | } |
26 | .drift = -100, | 28 | |
27 | .mask = (1LL << 40) - 1 | 29 | static struct clocksource clocksource_cyclone = { |
30 | .name = "cyclone", | ||
31 | .rating = 300, | ||
32 | .read = read_cyclone, | ||
33 | .mask = (1LL << 40) - 1, | ||
34 | .mult = 0, /*to be caluclated*/ | ||
35 | .shift = 16, | ||
36 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
28 | }; | 37 | }; |
29 | 38 | ||
30 | int __init init_cyclone_clock(void) | 39 | int __init init_cyclone_clock(void) |
@@ -44,13 +53,15 @@ int __init init_cyclone_clock(void) | |||
44 | offset = (CYCLONE_CBAR_ADDR); | 53 | offset = (CYCLONE_CBAR_ADDR); |
45 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); | 54 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); |
46 | if(!reg){ | 55 | if(!reg){ |
47 | printk(KERN_ERR "Summit chipset: Could not find valid CBAR register.\n"); | 56 | printk(KERN_ERR "Summit chipset: Could not find valid CBAR" |
57 | " register.\n"); | ||
48 | use_cyclone = 0; | 58 | use_cyclone = 0; |
49 | return -ENODEV; | 59 | return -ENODEV; |
50 | } | 60 | } |
51 | base = readq(reg); | 61 | base = readq(reg); |
52 | if(!base){ | 62 | if(!base){ |
53 | printk(KERN_ERR "Summit chipset: Could not find valid CBAR value.\n"); | 63 | printk(KERN_ERR "Summit chipset: Could not find valid CBAR" |
64 | " value.\n"); | ||
54 | use_cyclone = 0; | 65 | use_cyclone = 0; |
55 | return -ENODEV; | 66 | return -ENODEV; |
56 | } | 67 | } |
@@ -60,7 +71,8 @@ int __init init_cyclone_clock(void) | |||
60 | offset = (base + CYCLONE_PMCC_OFFSET); | 71 | offset = (base + CYCLONE_PMCC_OFFSET); |
61 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); | 72 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); |
62 | if(!reg){ | 73 | if(!reg){ |
63 | printk(KERN_ERR "Summit chipset: Could not find valid PMCC register.\n"); | 74 | printk(KERN_ERR "Summit chipset: Could not find valid PMCC" |
75 | " register.\n"); | ||
64 | use_cyclone = 0; | 76 | use_cyclone = 0; |
65 | return -ENODEV; | 77 | return -ENODEV; |
66 | } | 78 | } |
@@ -71,7 +83,8 @@ int __init init_cyclone_clock(void) | |||
71 | offset = (base + CYCLONE_MPCS_OFFSET); | 83 | offset = (base + CYCLONE_MPCS_OFFSET); |
72 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); | 84 | reg = (u64*)ioremap_nocache(offset, sizeof(u64)); |
73 | if(!reg){ | 85 | if(!reg){ |
74 | printk(KERN_ERR "Summit chipset: Could not find valid MPCS register.\n"); | 86 | printk(KERN_ERR "Summit chipset: Could not find valid MPCS" |
87 | " register.\n"); | ||
75 | use_cyclone = 0; | 88 | use_cyclone = 0; |
76 | return -ENODEV; | 89 | return -ENODEV; |
77 | } | 90 | } |
@@ -82,7 +95,8 @@ int __init init_cyclone_clock(void) | |||
82 | offset = (base + CYCLONE_MPMC_OFFSET); | 95 | offset = (base + CYCLONE_MPMC_OFFSET); |
83 | cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); | 96 | cyclone_timer = (u32*)ioremap_nocache(offset, sizeof(u32)); |
84 | if(!cyclone_timer){ | 97 | if(!cyclone_timer){ |
85 | printk(KERN_ERR "Summit chipset: Could not find valid MPMC register.\n"); | 98 | printk(KERN_ERR "Summit chipset: Could not find valid MPMC" |
99 | " register.\n"); | ||
86 | use_cyclone = 0; | 100 | use_cyclone = 0; |
87 | return -ENODEV; | 101 | return -ENODEV; |
88 | } | 102 | } |
@@ -93,7 +107,8 @@ int __init init_cyclone_clock(void) | |||
93 | int stall = 100; | 107 | int stall = 100; |
94 | while(stall--) barrier(); | 108 | while(stall--) barrier(); |
95 | if(readl(cyclone_timer) == old){ | 109 | if(readl(cyclone_timer) == old){ |
96 | printk(KERN_ERR "Summit chipset: Counter not counting! DISABLED\n"); | 110 | printk(KERN_ERR "Summit chipset: Counter not counting!" |
111 | " DISABLED\n"); | ||
97 | iounmap(cyclone_timer); | 112 | iounmap(cyclone_timer); |
98 | cyclone_timer = 0; | 113 | cyclone_timer = 0; |
99 | use_cyclone = 0; | 114 | use_cyclone = 0; |
@@ -101,8 +116,11 @@ int __init init_cyclone_clock(void) | |||
101 | } | 116 | } |
102 | } | 117 | } |
103 | /* initialize last tick */ | 118 | /* initialize last tick */ |
104 | cyclone_interpolator.addr = cyclone_timer; | 119 | cyclone_mc = cyclone_timer; |
105 | register_time_interpolator(&cyclone_interpolator); | 120 | clocksource_cyclone.fsys_mmio = cyclone_timer; |
121 | clocksource_cyclone.mult = clocksource_hz2mult(CYCLONE_TIMER_FREQ, | ||
122 | clocksource_cyclone.shift); | ||
123 | clocksource_register(&clocksource_cyclone); | ||
106 | 124 | ||
107 | return 0; | 125 | return 0; |
108 | } | 126 | } |
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 3f926c2dc708..44841971f077 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S | |||
@@ -147,12 +147,11 @@ ENTRY(fsys_set_tid_address) | |||
147 | FSYS_RETURN | 147 | FSYS_RETURN |
148 | END(fsys_set_tid_address) | 148 | END(fsys_set_tid_address) |
149 | 149 | ||
150 | /* | 150 | #if IA64_GTOD_LOCK_OFFSET !=0 |
151 | * Ensure that the time interpolator structure is compatible with the asm code | 151 | #error fsys_gettimeofday incompatible with changes to struct fsyscall_gtod_data_t |
152 | */ | 152 | #endif |
153 | #if IA64_TIME_INTERPOLATOR_SOURCE_OFFSET !=0 || IA64_TIME_INTERPOLATOR_SHIFT_OFFSET != 2 \ | 153 | #if IA64_ITC_JITTER_OFFSET !=0 |
154 | || IA64_TIME_INTERPOLATOR_JITTER_OFFSET != 3 || IA64_TIME_INTERPOLATOR_NSEC_OFFSET != 4 | 154 | #error fsys_gettimeofday incompatible with changes to struct itc_jitter_data_t |
155 | #error fsys_gettimeofday incompatible with changes to struct time_interpolator | ||
156 | #endif | 155 | #endif |
157 | #define CLOCK_REALTIME 0 | 156 | #define CLOCK_REALTIME 0 |
158 | #define CLOCK_MONOTONIC 1 | 157 | #define CLOCK_MONOTONIC 1 |
@@ -179,126 +178,124 @@ ENTRY(fsys_gettimeofday) | |||
179 | // r11 = preserved: saved ar.pfs | 178 | // r11 = preserved: saved ar.pfs |
180 | // r12 = preserved: memory stack | 179 | // r12 = preserved: memory stack |
181 | // r13 = preserved: thread pointer | 180 | // r13 = preserved: thread pointer |
182 | // r14 = address of mask / mask | 181 | // r14 = address of mask / mask value |
183 | // r15 = preserved: system call number | 182 | // r15 = preserved: system call number |
184 | // r16 = preserved: current task pointer | 183 | // r16 = preserved: current task pointer |
185 | // r17 = wall to monotonic use | 184 | // r17 = (not used) |
186 | // r18 = time_interpolator->offset | 185 | // r18 = (not used) |
187 | // r19 = address of wall_to_monotonic | 186 | // r19 = address of itc_lastcycle |
188 | // r20 = pointer to struct time_interpolator / pointer to time_interpolator->address | 187 | // r20 = struct fsyscall_gtod_data (= address of gtod_lock.sequence) |
189 | // r21 = shift factor | 188 | // r21 = address of mmio_ptr |
190 | // r22 = address of time interpolator->last_counter | 189 | // r22 = address of wall_time or monotonic_time |
191 | // r23 = address of time_interpolator->last_cycle | 190 | // r23 = address of shift / value |
192 | // r24 = adress of time_interpolator->offset | 191 | // r24 = address mult factor / cycle_last value |
193 | // r25 = last_cycle value | 192 | // r25 = itc_lastcycle value |
194 | // r26 = last_counter value | 193 | // r26 = address clocksource cycle_last |
195 | // r27 = pointer to xtime | 194 | // r27 = (not used) |
196 | // r28 = sequence number at the beginning of critcal section | 195 | // r28 = sequence number at the beginning of critcal section |
197 | // r29 = address of seqlock | 196 | // r29 = address of itc_jitter |
198 | // r30 = time processing flags / memory address | 197 | // r30 = time processing flags / memory address |
199 | // r31 = pointer to result | 198 | // r31 = pointer to result |
200 | // Predicates | 199 | // Predicates |
201 | // p6,p7 short term use | 200 | // p6,p7 short term use |
202 | // p8 = timesource ar.itc | 201 | // p8 = timesource ar.itc |
203 | // p9 = timesource mmio64 | 202 | // p9 = timesource mmio64 |
204 | // p10 = timesource mmio32 | 203 | // p10 = timesource mmio32 - not used |
205 | // p11 = timesource not to be handled by asm code | 204 | // p11 = timesource not to be handled by asm code |
206 | // p12 = memory time source ( = p9 | p10) | 205 | // p12 = memory time source ( = p9 | p10) - not used |
207 | // p13 = do cmpxchg with time_interpolator_last_cycle | 206 | // p13 = do cmpxchg with itc_lastcycle |
208 | // p14 = Divide by 1000 | 207 | // p14 = Divide by 1000 |
209 | // p15 = Add monotonic | 208 | // p15 = Add monotonic |
210 | // | 209 | // |
211 | // Note that instructions are optimized for McKinley. McKinley can process two | 210 | // Note that instructions are optimized for McKinley. McKinley can |
212 | // bundles simultaneously and therefore we continuously try to feed the CPU | 211 | // process two bundles simultaneously and therefore we continuously |
213 | // two bundles and then a stop. | 212 | // try to feed the CPU two bundles and then a stop. |
214 | tnat.nz p6,p0 = r31 // branch deferred since it does not fit into bundle structure | 213 | // |
214 | // Additional note that code has changed a lot. Optimization is TBD. | ||
215 | // Comments begin with "?" are maybe outdated. | ||
216 | tnat.nz p6,p0 = r31 // ? branch deferred to fit later bundle | ||
215 | mov pr = r30,0xc000 // Set predicates according to function | 217 | mov pr = r30,0xc000 // Set predicates according to function |
216 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 | 218 | add r2 = TI_FLAGS+IA64_TASK_SIZE,r16 |
217 | movl r20 = time_interpolator | 219 | movl r20 = fsyscall_gtod_data // load fsyscall gettimeofday data address |
218 | ;; | 220 | ;; |
219 | ld8 r20 = [r20] // get pointer to time_interpolator structure | 221 | movl r29 = itc_jitter_data // itc_jitter |
220 | movl r29 = xtime_lock | 222 | add r22 = IA64_GTOD_WALL_TIME_OFFSET,r20 // wall_time |
221 | ld4 r2 = [r2] // process work pending flags | 223 | ld4 r2 = [r2] // process work pending flags |
222 | movl r27 = xtime | 224 | ;; |
223 | ;; // only one bundle here | 225 | (p15) add r22 = IA64_GTOD_MONO_TIME_OFFSET,r20 // monotonic_time |
224 | ld8 r21 = [r20] // first quad with control information | 226 | add r21 = IA64_CLKSRC_MMIO_OFFSET,r20 |
227 | add r19 = IA64_ITC_LASTCYCLE_OFFSET,r29 | ||
225 | and r2 = TIF_ALLWORK_MASK,r2 | 228 | and r2 = TIF_ALLWORK_MASK,r2 |
226 | (p6) br.cond.spnt.few .fail_einval // deferred branch | 229 | (p6) br.cond.spnt.few .fail_einval // ? deferred branch |
227 | ;; | 230 | ;; |
228 | add r10 = IA64_TIME_INTERPOLATOR_ADDRESS_OFFSET,r20 | 231 | add r26 = IA64_CLKSRC_CYCLE_LAST_OFFSET,r20 // clksrc_cycle_last |
229 | extr r3 = r21,32,32 // time_interpolator->nsec_per_cyc | ||
230 | extr r8 = r21,0,16 // time_interpolator->source | ||
231 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled | 232 | cmp.ne p6, p0 = 0, r2 // Fallback if work is scheduled |
232 | (p6) br.cond.spnt.many fsys_fallback_syscall | 233 | (p6) br.cond.spnt.many fsys_fallback_syscall |
233 | ;; | 234 | ;; |
234 | cmp.eq p8,p12 = 0,r8 // Check for cpu timer | 235 | // Begin critical section |
235 | cmp.eq p9,p0 = 1,r8 // MMIO64 ? | 236 | .time_redo: |
236 | extr r2 = r21,24,8 // time_interpolator->jitter | 237 | ld4.acq r28 = [r20] // gtod_lock.sequence, Must take first |
237 | cmp.eq p10,p0 = 2,r8 // MMIO32 ? | 238 | ;; |
238 | cmp.ltu p11,p0 = 2,r8 // function or other clock | 239 | and r28 = ~1,r28 // And make sequence even to force retry if odd |
239 | (p11) br.cond.spnt.many fsys_fallback_syscall | ||
240 | ;; | 240 | ;; |
241 | setf.sig f7 = r3 // Setup for scaling of counter | 241 | ld8 r30 = [r21] // clocksource->mmio_ptr |
242 | (p15) movl r19 = wall_to_monotonic | 242 | add r24 = IA64_CLKSRC_MULT_OFFSET,r20 |
243 | (p12) ld8 r30 = [r10] | 243 | ld4 r2 = [r29] // itc_jitter value |
244 | cmp.ne p13,p0 = r2,r0 // need jitter compensation? | 244 | add r23 = IA64_CLKSRC_SHIFT_OFFSET,r20 |
245 | extr r21 = r21,16,8 // shift factor | 245 | add r14 = IA64_CLKSRC_MASK_OFFSET,r20 |
246 | ;; | 246 | ;; |
247 | .time_redo: | 247 | ld4 r3 = [r24] // clocksource mult value |
248 | .pred.rel.mutex p8,p9,p10 | 248 | ld8 r14 = [r14] // clocksource mask value |
249 | ld4.acq r28 = [r29] // xtime_lock.sequence. Must come first for locking purposes | 249 | cmp.eq p8,p9 = 0,r30 // use cpu timer if no mmio_ptr |
250 | ;; | 250 | ;; |
251 | and r28 = ~1,r28 // Make sequence even to force retry if odd | 251 | setf.sig f7 = r3 // Setup for mult scaling of counter |
252 | (p8) cmp.ne p13,p0 = r2,r0 // need itc_jitter compensation, set p13 | ||
253 | ld4 r23 = [r23] // clocksource shift value | ||
254 | ld8 r24 = [r26] // get clksrc_cycle_last value | ||
255 | (p9) cmp.eq p13,p0 = 0,r30 // if mmio_ptr, clear p13 jitter control | ||
252 | ;; | 256 | ;; |
257 | .pred.rel.mutex p8,p9 | ||
253 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! | 258 | (p8) mov r2 = ar.itc // CPU_TIMER. 36 clocks latency!!! |
254 | add r22 = IA64_TIME_INTERPOLATOR_LAST_COUNTER_OFFSET,r20 | 259 | (p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. |
255 | (p9) ld8 r2 = [r30] // readq(ti->address). Could also have latency issues.. | 260 | (p13) ld8 r25 = [r19] // get itc_lastcycle value |
256 | (p10) ld4 r2 = [r30] // readw(ti->address) | 261 | ;; // ? could be removed by moving the last add upward |
257 | (p13) add r23 = IA64_TIME_INTERPOLATOR_LAST_CYCLE_OFFSET,r20 | 262 | ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec |
258 | ;; // could be removed by moving the last add upward | 263 | ;; |
259 | ld8 r26 = [r22] // time_interpolator->last_counter | 264 | ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec |
260 | (p13) ld8 r25 = [r23] // time interpolator->last_cycle | 265 | (p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) |
261 | add r24 = IA64_TIME_INTERPOLATOR_OFFSET_OFFSET,r20 | 266 | ;; |
262 | (p15) ld8 r17 = [r19],IA64_TIMESPEC_TV_NSEC_OFFSET | 267 | (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared |
263 | ld8 r9 = [r27],IA64_TIMESPEC_TV_NSEC_OFFSET | 268 | sub r10 = r2,r24 // current_cycle - last_cycle |
264 | add r14 = IA64_TIME_INTERPOLATOR_MASK_OFFSET, r20 | 269 | ;; |
265 | ;; | 270 | (p6) sub r10 = r25,r24 // time we got was less than last_cycle |
266 | ld8 r18 = [r24] // time_interpolator->offset | ||
267 | ld8 r8 = [r27],-IA64_TIMESPEC_TV_NSEC_OFFSET // xtime.tv_nsec | ||
268 | (p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) | ||
269 | ;; | ||
270 | ld8 r14 = [r14] // time_interpolator->mask | ||
271 | (p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared | ||
272 | sub r10 = r2,r26 // current_counter - last_counter | ||
273 | ;; | ||
274 | (p6) sub r10 = r25,r26 // time we got was less than last_cycle | ||
275 | (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg | 271 | (p7) mov ar.ccv = r25 // more than last_cycle. Prep for cmpxchg |
276 | ;; | 272 | ;; |
273 | (p7) cmpxchg8.rel r3 = [r19],r2,ar.ccv | ||
274 | ;; | ||
275 | (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful | ||
276 | ;; | ||
277 | (p7) sub r10 = r3,r24 // then use new last_cycle instead | ||
278 | ;; | ||
277 | and r10 = r10,r14 // Apply mask | 279 | and r10 = r10,r14 // Apply mask |
278 | ;; | 280 | ;; |
279 | setf.sig f8 = r10 | 281 | setf.sig f8 = r10 |
280 | nop.i 123 | 282 | nop.i 123 |
281 | ;; | 283 | ;; |
282 | (p7) cmpxchg8.rel r3 = [r23],r2,ar.ccv | 284 | // fault check takes 5 cycles and we have spare time |
283 | EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare time | 285 | EX(.fail_efault, probe.w.fault r31, 3) |
284 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) | 286 | xmpy.l f8 = f8,f7 // nsec_per_cyc*(counter-last_counter) |
285 | (p15) add r9 = r9,r17 // Add wall to monotonic.secs to result secs | ||
286 | ;; | 287 | ;; |
287 | (p15) ld8 r17 = [r19],-IA64_TIMESPEC_TV_NSEC_OFFSET | 288 | // ? simulate tbit.nz.or p7,p0 = r28,0 |
288 | (p7) cmp.ne p7,p0 = r25,r3 // if cmpxchg not successful redo | ||
289 | // simulate tbit.nz.or p7,p0 = r28,0 | ||
290 | getf.sig r2 = f8 | 289 | getf.sig r2 = f8 |
291 | mf | 290 | mf |
292 | add r8 = r8,r18 // Add time interpolator offset | ||
293 | ;; | 291 | ;; |
294 | ld4 r10 = [r29] // xtime_lock.sequence | 292 | ld4 r10 = [r20] // gtod_lock.sequence |
295 | (p15) add r8 = r8, r17 // Add monotonic.nsecs to nsecs | 293 | shr.u r2 = r2,r23 // shift by factor |
296 | shr.u r2 = r2,r21 | 294 | ;; // ? overloaded 3 bundles! |
297 | ;; // overloaded 3 bundles! | ||
298 | // End critical section. | ||
299 | add r8 = r8,r2 // Add xtime.nsecs | 295 | add r8 = r8,r2 // Add xtime.nsecs |
300 | cmp4.ne.or p7,p0 = r28,r10 | 296 | cmp4.ne p7,p0 = r28,r10 |
301 | (p7) br.cond.dpnt.few .time_redo // sequence number changed ? | 297 | (p7) br.cond.dpnt.few .time_redo // sequence number changed, redo |
298 | // End critical section. | ||
302 | // Now r8=tv->tv_nsec and r9=tv->tv_sec | 299 | // Now r8=tv->tv_nsec and r9=tv->tv_sec |
303 | mov r10 = r0 | 300 | mov r10 = r0 |
304 | movl r2 = 1000000000 | 301 | movl r2 = 1000000000 |
@@ -308,19 +305,19 @@ EX(.fail_efault, probe.w.fault r31, 3) // This takes 5 cycles and we have spare | |||
308 | .time_normalize: | 305 | .time_normalize: |
309 | mov r21 = r8 | 306 | mov r21 = r8 |
310 | cmp.ge p6,p0 = r8,r2 | 307 | cmp.ge p6,p0 = r8,r2 |
311 | (p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting some time | 308 | (p14) shr.u r20 = r8, 3 // We can repeat this if necessary just wasting time |
312 | ;; | 309 | ;; |
313 | (p14) setf.sig f8 = r20 | 310 | (p14) setf.sig f8 = r20 |
314 | (p6) sub r8 = r8,r2 | 311 | (p6) sub r8 = r8,r2 |
315 | (p6) add r9 = 1,r9 // two nops before the branch. | 312 | (p6) add r9 = 1,r9 // two nops before the branch. |
316 | (p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod | 313 | (p14) setf.sig f7 = r3 // Chances for repeats are 1 in 10000 for gettod |
317 | (p6) br.cond.dpnt.few .time_normalize | 314 | (p6) br.cond.dpnt.few .time_normalize |
318 | ;; | 315 | ;; |
319 | // Divided by 8 though shift. Now divide by 125 | 316 | // Divided by 8 though shift. Now divide by 125 |
320 | // The compiler was able to do that with a multiply | 317 | // The compiler was able to do that with a multiply |
321 | // and a shift and we do the same | 318 | // and a shift and we do the same |
322 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles | 319 | EX(.fail_efault, probe.w.fault r23, 3) // This also costs 5 cycles |
323 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it... | 320 | (p14) xmpy.hu f8 = f8, f7 // xmpy has 5 cycles latency so use it |
324 | ;; | 321 | ;; |
325 | mov r8 = r0 | 322 | mov r8 = r0 |
326 | (p14) getf.sig r2 = f8 | 323 | (p14) getf.sig r2 = f8 |
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h new file mode 100644 index 000000000000..490dab55fba3 --- /dev/null +++ b/arch/ia64/kernel/fsyscall_gtod_data.h | |||
@@ -0,0 +1,23 @@ | |||
1 | /* | ||
2 | * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. | ||
3 | * Contributed by Peter Keilty <peter.keilty@hp.com> | ||
4 | * | ||
5 | * fsyscall gettimeofday data | ||
6 | */ | ||
7 | |||
8 | struct fsyscall_gtod_data_t { | ||
9 | seqlock_t lock; | ||
10 | struct timespec wall_time; | ||
11 | struct timespec monotonic_time; | ||
12 | cycle_t clk_mask; | ||
13 | u32 clk_mult; | ||
14 | u32 clk_shift; | ||
15 | void *clk_fsys_mmio; | ||
16 | cycle_t clk_cycle_last; | ||
17 | } __attribute__ ((aligned (L1_CACHE_BYTES))); | ||
18 | |||
19 | struct itc_jitter_data_t { | ||
20 | int itc_jitter; | ||
21 | cycle_t itc_lastcycle; | ||
22 | } __attribute__ ((aligned (L1_CACHE_BYTES))); | ||
23 | |||
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 3486fe7d6e65..627785c48ea9 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
20 | #include <linux/efi.h> | 20 | #include <linux/efi.h> |
21 | #include <linux/timex.h> | 21 | #include <linux/timex.h> |
22 | #include <linux/clocksource.h> | ||
22 | 23 | ||
23 | #include <asm/machvec.h> | 24 | #include <asm/machvec.h> |
24 | #include <asm/delay.h> | 25 | #include <asm/delay.h> |
@@ -28,6 +29,16 @@ | |||
28 | #include <asm/sections.h> | 29 | #include <asm/sections.h> |
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | 31 | ||
32 | #include "fsyscall_gtod_data.h" | ||
33 | |||
34 | static cycle_t itc_get_cycles(void); | ||
35 | |||
36 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | ||
37 | .lock = SEQLOCK_UNLOCKED, | ||
38 | }; | ||
39 | |||
40 | struct itc_jitter_data_t itc_jitter_data; | ||
41 | |||
31 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ | 42 | volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */ |
32 | 43 | ||
33 | #ifdef CONFIG_IA64_DEBUG_IRQ | 44 | #ifdef CONFIG_IA64_DEBUG_IRQ |
@@ -37,11 +48,16 @@ EXPORT_SYMBOL(last_cli_ip); | |||
37 | 48 | ||
38 | #endif | 49 | #endif |
39 | 50 | ||
40 | static struct time_interpolator itc_interpolator = { | 51 | static struct clocksource clocksource_itc = { |
41 | .shift = 16, | 52 | .name = "itc", |
42 | .mask = 0xffffffffffffffffLL, | 53 | .rating = 350, |
43 | .source = TIME_SOURCE_CPU | 54 | .read = itc_get_cycles, |
55 | .mask = 0xffffffffffffffff, | ||
56 | .mult = 0, /*to be caluclated*/ | ||
57 | .shift = 16, | ||
58 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
44 | }; | 59 | }; |
60 | static struct clocksource *itc_clocksource; | ||
45 | 61 | ||
46 | static irqreturn_t | 62 | static irqreturn_t |
47 | timer_interrupt (int irq, void *dev_id) | 63 | timer_interrupt (int irq, void *dev_id) |
@@ -210,8 +226,6 @@ ia64_init_itm (void) | |||
210 | + itc_freq/2)/itc_freq; | 226 | + itc_freq/2)/itc_freq; |
211 | 227 | ||
212 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { | 228 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) { |
213 | itc_interpolator.frequency = local_cpu_data->itc_freq; | ||
214 | itc_interpolator.drift = itc_drift; | ||
215 | #ifdef CONFIG_SMP | 229 | #ifdef CONFIG_SMP |
216 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. | 230 | /* On IA64 in an SMP configuration ITCs are never accurately synchronized. |
217 | * Jitter compensation requires a cmpxchg which may limit | 231 | * Jitter compensation requires a cmpxchg which may limit |
@@ -223,15 +237,50 @@ ia64_init_itm (void) | |||
223 | * even going backward) if the ITC offsets between the individual CPUs | 237 | * even going backward) if the ITC offsets between the individual CPUs |
224 | * are too large. | 238 | * are too large. |
225 | */ | 239 | */ |
226 | if (!nojitter) itc_interpolator.jitter = 1; | 240 | if (!nojitter) |
241 | itc_jitter_data.itc_jitter = 1; | ||
227 | #endif | 242 | #endif |
228 | register_time_interpolator(&itc_interpolator); | ||
229 | } | 243 | } |
230 | 244 | ||
231 | /* Setup the CPU local timer tick */ | 245 | /* Setup the CPU local timer tick */ |
232 | ia64_cpu_local_tick(); | 246 | ia64_cpu_local_tick(); |
247 | |||
248 | if (!itc_clocksource) { | ||
249 | /* Sort out mult/shift values: */ | ||
250 | clocksource_itc.mult = | ||
251 | clocksource_hz2mult(local_cpu_data->itc_freq, | ||
252 | clocksource_itc.shift); | ||
253 | clocksource_register(&clocksource_itc); | ||
254 | itc_clocksource = &clocksource_itc; | ||
255 | } | ||
233 | } | 256 | } |
234 | 257 | ||
258 | static cycle_t itc_get_cycles() | ||
259 | { | ||
260 | u64 lcycle, now, ret; | ||
261 | |||
262 | if (!itc_jitter_data.itc_jitter) | ||
263 | return get_cycles(); | ||
264 | |||
265 | lcycle = itc_jitter_data.itc_lastcycle; | ||
266 | now = get_cycles(); | ||
267 | if (lcycle && time_after(lcycle, now)) | ||
268 | return lcycle; | ||
269 | |||
270 | /* | ||
271 | * Keep track of the last timer value returned. | ||
272 | * In an SMP environment, you could lose out in contention of | ||
273 | * cmpxchg. If so, your cmpxchg returns new value which the | ||
274 | * winner of contention updated to. Use the new value instead. | ||
275 | */ | ||
276 | ret = cmpxchg(&itc_jitter_data.itc_lastcycle, lcycle, now); | ||
277 | if (unlikely(ret != lcycle)) | ||
278 | return ret; | ||
279 | |||
280 | return now; | ||
281 | } | ||
282 | |||
283 | |||
235 | static struct irqaction timer_irqaction = { | 284 | static struct irqaction timer_irqaction = { |
236 | .handler = timer_interrupt, | 285 | .handler = timer_interrupt, |
237 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, | 286 | .flags = IRQF_DISABLED | IRQF_IRQPOLL, |
@@ -307,3 +356,34 @@ ia64_setup_printk_clock(void) | |||
307 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) | 356 | if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) |
308 | ia64_printk_clock = ia64_itc_printk_clock; | 357 | ia64_printk_clock = ia64_itc_printk_clock; |
309 | } | 358 | } |
359 | |||
360 | void update_vsyscall(struct timespec *wall, struct clocksource *c) | ||
361 | { | ||
362 | unsigned long flags; | ||
363 | |||
364 | write_seqlock_irqsave(&fsyscall_gtod_data.lock, flags); | ||
365 | |||
366 | /* copy fsyscall clock data */ | ||
367 | fsyscall_gtod_data.clk_mask = c->mask; | ||
368 | fsyscall_gtod_data.clk_mult = c->mult; | ||
369 | fsyscall_gtod_data.clk_shift = c->shift; | ||
370 | fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; | ||
371 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | ||
372 | |||
373 | /* copy kernel time structures */ | ||
374 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | ||
375 | fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; | ||
376 | fsyscall_gtod_data.monotonic_time.tv_sec = wall_to_monotonic.tv_sec | ||
377 | + wall->tv_sec; | ||
378 | fsyscall_gtod_data.monotonic_time.tv_nsec = wall_to_monotonic.tv_nsec | ||
379 | + wall->tv_nsec; | ||
380 | |||
381 | /* normalize */ | ||
382 | while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { | ||
383 | fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; | ||
384 | fsyscall_gtod_data.monotonic_time.tv_sec++; | ||
385 | } | ||
386 | |||
387 | write_sequnlock_irqrestore(&fsyscall_gtod_data.lock, flags); | ||
388 | } | ||
389 | |||
diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index 56a88b6df4b4..19e25d2b64fc 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/clocksource.h> | ||
14 | 15 | ||
15 | #include <asm/hw_irq.h> | 16 | #include <asm/hw_irq.h> |
16 | #include <asm/system.h> | 17 | #include <asm/system.h> |
@@ -22,11 +23,21 @@ | |||
22 | 23 | ||
23 | extern unsigned long sn_rtc_cycles_per_second; | 24 | extern unsigned long sn_rtc_cycles_per_second; |
24 | 25 | ||
25 | static struct time_interpolator sn2_interpolator = { | 26 | static void __iomem *sn2_mc; |
26 | .drift = -1, | 27 | |
27 | .shift = 10, | 28 | static cycle_t read_sn2(void) |
28 | .mask = (1LL << 55) - 1, | 29 | { |
29 | .source = TIME_SOURCE_MMIO64 | 30 | return (cycle_t)readq(sn2_mc); |
31 | } | ||
32 | |||
33 | static struct clocksource clocksource_sn2 = { | ||
34 | .name = "sn2_rtc", | ||
35 | .rating = 300, | ||
36 | .read = read_sn2, | ||
37 | .mask = (1LL << 55) - 1, | ||
38 | .mult = 0, | ||
39 | .shift = 10, | ||
40 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
30 | }; | 41 | }; |
31 | 42 | ||
32 | /* | 43 | /* |
@@ -47,9 +58,11 @@ ia64_sn_udelay (unsigned long usecs) | |||
47 | 58 | ||
48 | void __init sn_timer_init(void) | 59 | void __init sn_timer_init(void) |
49 | { | 60 | { |
50 | sn2_interpolator.frequency = sn_rtc_cycles_per_second; | 61 | sn2_mc = RTC_COUNTER_ADDR; |
51 | sn2_interpolator.addr = RTC_COUNTER_ADDR; | 62 | clocksource_sn2.fsys_mmio = RTC_COUNTER_ADDR; |
52 | register_time_interpolator(&sn2_interpolator); | 63 | clocksource_sn2.mult = clocksource_hz2mult(sn_rtc_cycles_per_second, |
64 | clocksource_sn2.shift); | ||
65 | clocksource_register(&clocksource_sn2); | ||
53 | 66 | ||
54 | ia64_udelay = &ia64_sn_udelay; | 67 | ia64_udelay = &ia64_sn_udelay; |
55 | } | 68 | } |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 80ffc7829916..bb5d23be4260 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -475,7 +475,7 @@ static void acpi_processor_idle(void) | |||
475 | /* Get end time (ticks) */ | 475 | /* Get end time (ticks) */ |
476 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); | 476 | t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); |
477 | 477 | ||
478 | #ifdef CONFIG_GENERIC_TIME | 478 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) |
479 | /* TSC halts in C2, so notify users */ | 479 | /* TSC halts in C2, so notify users */ |
480 | mark_tsc_unstable("possible TSC halt in C2"); | 480 | mark_tsc_unstable("possible TSC halt in C2"); |
481 | #endif | 481 | #endif |
@@ -517,7 +517,7 @@ static void acpi_processor_idle(void) | |||
517 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); | 517 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); |
518 | } | 518 | } |
519 | 519 | ||
520 | #ifdef CONFIG_GENERIC_TIME | 520 | #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC) |
521 | /* TSC halts in C3, so notify users */ | 521 | /* TSC halts in C3, so notify users */ |
522 | mark_tsc_unstable("TSC halts in C3"); | 522 | mark_tsc_unstable("TSC halts in C3"); |
523 | #endif | 523 | #endif |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 0be700f4e8fd..ba0e74ad74bb 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/bcd.h> | 29 | #include <linux/bcd.h> |
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/bitops.h> | 31 | #include <linux/bitops.h> |
32 | #include <linux/clocksource.h> | ||
32 | 33 | ||
33 | #include <asm/current.h> | 34 | #include <asm/current.h> |
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
@@ -51,8 +52,34 @@ | |||
51 | 52 | ||
52 | #define HPET_RANGE_SIZE 1024 /* from HPET spec */ | 53 | #define HPET_RANGE_SIZE 1024 /* from HPET spec */ |
53 | 54 | ||
55 | #if BITS_PER_LONG == 64 | ||
56 | #define write_counter(V, MC) writeq(V, MC) | ||
57 | #define read_counter(MC) readq(MC) | ||
58 | #else | ||
59 | #define write_counter(V, MC) writel(V, MC) | ||
60 | #define read_counter(MC) readl(MC) | ||
61 | #endif | ||
62 | |||
54 | static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; | 63 | static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ; |
55 | 64 | ||
65 | static void __iomem *hpet_mctr; | ||
66 | |||
67 | static cycle_t read_hpet(void) | ||
68 | { | ||
69 | return (cycle_t)read_counter((void __iomem *)hpet_mctr); | ||
70 | } | ||
71 | |||
72 | static struct clocksource clocksource_hpet = { | ||
73 | .name = "hpet", | ||
74 | .rating = 250, | ||
75 | .read = read_hpet, | ||
76 | .mask = 0xffffffffffffffff, | ||
77 | .mult = 0, /*to be caluclated*/ | ||
78 | .shift = 10, | ||
79 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
80 | }; | ||
81 | static struct clocksource *hpet_clocksource; | ||
82 | |||
56 | /* A lock for concurrent access by app and isr hpet activity. */ | 83 | /* A lock for concurrent access by app and isr hpet activity. */ |
57 | static DEFINE_SPINLOCK(hpet_lock); | 84 | static DEFINE_SPINLOCK(hpet_lock); |
58 | /* A lock for concurrent intermodule access to hpet and isr hpet activity. */ | 85 | /* A lock for concurrent intermodule access to hpet and isr hpet activity. */ |
@@ -79,7 +106,7 @@ struct hpets { | |||
79 | struct hpets *hp_next; | 106 | struct hpets *hp_next; |
80 | struct hpet __iomem *hp_hpet; | 107 | struct hpet __iomem *hp_hpet; |
81 | unsigned long hp_hpet_phys; | 108 | unsigned long hp_hpet_phys; |
82 | struct time_interpolator *hp_interpolator; | 109 | struct clocksource *hp_clocksource; |
83 | unsigned long long hp_tick_freq; | 110 | unsigned long long hp_tick_freq; |
84 | unsigned long hp_delta; | 111 | unsigned long hp_delta; |
85 | unsigned int hp_ntimer; | 112 | unsigned int hp_ntimer; |
@@ -94,13 +121,6 @@ static struct hpets *hpets; | |||
94 | #define HPET_PERIODIC 0x0004 | 121 | #define HPET_PERIODIC 0x0004 |
95 | #define HPET_SHARED_IRQ 0x0008 | 122 | #define HPET_SHARED_IRQ 0x0008 |
96 | 123 | ||
97 | #if BITS_PER_LONG == 64 | ||
98 | #define write_counter(V, MC) writeq(V, MC) | ||
99 | #define read_counter(MC) readq(MC) | ||
100 | #else | ||
101 | #define write_counter(V, MC) writel(V, MC) | ||
102 | #define read_counter(MC) readl(MC) | ||
103 | #endif | ||
104 | 124 | ||
105 | #ifndef readq | 125 | #ifndef readq |
106 | static inline unsigned long long readq(void __iomem *addr) | 126 | static inline unsigned long long readq(void __iomem *addr) |
@@ -737,27 +757,6 @@ static ctl_table dev_root[] = { | |||
737 | 757 | ||
738 | static struct ctl_table_header *sysctl_header; | 758 | static struct ctl_table_header *sysctl_header; |
739 | 759 | ||
740 | static void hpet_register_interpolator(struct hpets *hpetp) | ||
741 | { | ||
742 | #ifdef CONFIG_TIME_INTERPOLATION | ||
743 | struct time_interpolator *ti; | ||
744 | |||
745 | ti = kzalloc(sizeof(*ti), GFP_KERNEL); | ||
746 | if (!ti) | ||
747 | return; | ||
748 | |||
749 | ti->source = TIME_SOURCE_MMIO64; | ||
750 | ti->shift = 10; | ||
751 | ti->addr = &hpetp->hp_hpet->hpet_mc; | ||
752 | ti->frequency = hpetp->hp_tick_freq; | ||
753 | ti->drift = HPET_DRIFT; | ||
754 | ti->mask = -1; | ||
755 | |||
756 | hpetp->hp_interpolator = ti; | ||
757 | register_time_interpolator(ti); | ||
758 | #endif | ||
759 | } | ||
760 | |||
761 | /* | 760 | /* |
762 | * Adjustment for when arming the timer with | 761 | * Adjustment for when arming the timer with |
763 | * initial conditions. That is, main counter | 762 | * initial conditions. That is, main counter |
@@ -909,7 +908,16 @@ int hpet_alloc(struct hpet_data *hdp) | |||
909 | } | 908 | } |
910 | 909 | ||
911 | hpetp->hp_delta = hpet_calibrate(hpetp); | 910 | hpetp->hp_delta = hpet_calibrate(hpetp); |
912 | hpet_register_interpolator(hpetp); | 911 | |
912 | if (!hpet_clocksource) { | ||
913 | hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc; | ||
914 | CLKSRC_FSYS_MMIO_SET(clocksource_hpet.fsys_mmio, hpet_mctr); | ||
915 | clocksource_hpet.mult = clocksource_hz2mult(hpetp->hp_tick_freq, | ||
916 | clocksource_hpet.shift); | ||
917 | clocksource_register(&clocksource_hpet); | ||
918 | hpetp->hp_clocksource = &clocksource_hpet; | ||
919 | hpet_clocksource = &clocksource_hpet; | ||
920 | } | ||
913 | 921 | ||
914 | return 0; | 922 | return 0; |
915 | } | 923 | } |
@@ -995,7 +1003,7 @@ static int hpet_acpi_add(struct acpi_device *device) | |||
995 | 1003 | ||
996 | static int hpet_acpi_remove(struct acpi_device *device, int type) | 1004 | static int hpet_acpi_remove(struct acpi_device *device, int type) |
997 | { | 1005 | { |
998 | /* XXX need to unregister interpolator, dealloc mem, etc */ | 1006 | /* XXX need to unregister clocksource, dealloc mem, etc */ |
999 | return -EINVAL; | 1007 | return -EINVAL; |
1000 | } | 1008 | } |
1001 | 1009 | ||
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index bf297b03a4e4..16ea3374dddf 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -67,6 +67,12 @@ struct clocksource { | |||
67 | unsigned long flags; | 67 | unsigned long flags; |
68 | cycle_t (*vread)(void); | 68 | cycle_t (*vread)(void); |
69 | void (*resume)(void); | 69 | void (*resume)(void); |
70 | #ifdef CONFIG_IA64 | ||
71 | void *fsys_mmio; /* used by fsyscall asm code */ | ||
72 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr)) | ||
73 | #else | ||
74 | #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0) | ||
75 | #endif | ||
70 | 76 | ||
71 | /* timekeeping specific data, ignore */ | 77 | /* timekeeping specific data, ignore */ |
72 | cycle_t cycle_interval; | 78 | cycle_t cycle_interval; |
diff --git a/include/linux/timex.h b/include/linux/timex.h index da929dbbea2a..37ac3ff90faf 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -224,66 +224,6 @@ static inline int ntp_synced(void) | |||
224 | __x < 0 ? -(-__x >> __s) : __x >> __s; \ | 224 | __x < 0 ? -(-__x >> __s) : __x >> __s; \ |
225 | }) | 225 | }) |
226 | 226 | ||
227 | |||
228 | #ifdef CONFIG_TIME_INTERPOLATION | ||
229 | |||
230 | #define TIME_SOURCE_CPU 0 | ||
231 | #define TIME_SOURCE_MMIO64 1 | ||
232 | #define TIME_SOURCE_MMIO32 2 | ||
233 | #define TIME_SOURCE_FUNCTION 3 | ||
234 | |||
235 | /* For proper operations time_interpolator clocks must run slightly slower | ||
236 | * than the standard clock since the interpolator may only correct by having | ||
237 | * time jump forward during a tick. A slower clock is usually a side effect | ||
238 | * of the integer divide of the nanoseconds in a second by the frequency. | ||
239 | * The accuracy of the division can be increased by specifying a shift. | ||
240 | * However, this may cause the clock not to be slow enough. | ||
241 | * The interpolator will self-tune the clock by slowing down if no | ||
242 | * resets occur or speeding up if the time jumps per analysis cycle | ||
243 | * become too high. | ||
244 | * | ||
245 | * Setting jitter compensates for a fluctuating timesource by comparing | ||
246 | * to the last value read from the timesource to insure that an earlier value | ||
247 | * is not returned by a later call. The price to pay | ||
248 | * for the compensation is that the timer routines are not as scalable anymore. | ||
249 | */ | ||
250 | |||
251 | struct time_interpolator { | ||
252 | u16 source; /* time source flags */ | ||
253 | u8 shift; /* increases accuracy of multiply by shifting. */ | ||
254 | /* Note that bits may be lost if shift is set too high */ | ||
255 | u8 jitter; /* if set compensate for fluctuations */ | ||
256 | u32 nsec_per_cyc; /* set by register_time_interpolator() */ | ||
257 | void *addr; /* address of counter or function */ | ||
258 | cycles_t mask; /* mask the valid bits of the counter */ | ||
259 | unsigned long offset; /* nsec offset at last update of interpolator */ | ||
260 | u64 last_counter; /* counter value in units of the counter at last update */ | ||
261 | cycles_t last_cycle; /* Last timer value if TIME_SOURCE_JITTER is set */ | ||
262 | u64 frequency; /* frequency in counts/second */ | ||
263 | long drift; /* drift in parts-per-million (or -1) */ | ||
264 | unsigned long skips; /* skips forward */ | ||
265 | unsigned long ns_skipped; /* nanoseconds skipped */ | ||
266 | struct time_interpolator *next; | ||
267 | }; | ||
268 | |||
269 | extern void register_time_interpolator(struct time_interpolator *); | ||
270 | extern void unregister_time_interpolator(struct time_interpolator *); | ||
271 | extern void time_interpolator_reset(void); | ||
272 | extern unsigned long time_interpolator_get_offset(void); | ||
273 | extern void time_interpolator_update(long delta_nsec); | ||
274 | |||
275 | #else /* !CONFIG_TIME_INTERPOLATION */ | ||
276 | |||
277 | static inline void time_interpolator_reset(void) | ||
278 | { | ||
279 | } | ||
280 | |||
281 | static inline void time_interpolator_update(long delta_nsec) | ||
282 | { | ||
283 | } | ||
284 | |||
285 | #endif /* !CONFIG_TIME_INTERPOLATION */ | ||
286 | |||
287 | #define TICK_LENGTH_SHIFT 32 | 227 | #define TICK_LENGTH_SHIFT 32 |
288 | 228 | ||
289 | #ifdef CONFIG_NO_HZ | 229 | #ifdef CONFIG_NO_HZ |
diff --git a/kernel/time.c b/kernel/time.c index ffe19149d770..e325597f5bf5 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -136,7 +136,6 @@ static inline void warp_clock(void) | |||
136 | write_seqlock_irq(&xtime_lock); | 136 | write_seqlock_irq(&xtime_lock); |
137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; |
138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; | 138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; |
139 | time_interpolator_reset(); | ||
140 | write_sequnlock_irq(&xtime_lock); | 139 | write_sequnlock_irq(&xtime_lock); |
141 | clock_was_set(); | 140 | clock_was_set(); |
142 | } | 141 | } |
@@ -309,92 +308,6 @@ struct timespec timespec_trunc(struct timespec t, unsigned gran) | |||
309 | } | 308 | } |
310 | EXPORT_SYMBOL(timespec_trunc); | 309 | EXPORT_SYMBOL(timespec_trunc); |
311 | 310 | ||
312 | #ifdef CONFIG_TIME_INTERPOLATION | ||
313 | void getnstimeofday (struct timespec *tv) | ||
314 | { | ||
315 | unsigned long seq,sec,nsec; | ||
316 | |||
317 | do { | ||
318 | seq = read_seqbegin(&xtime_lock); | ||
319 | sec = xtime.tv_sec; | ||
320 | nsec = xtime.tv_nsec+time_interpolator_get_offset(); | ||
321 | } while (unlikely(read_seqretry(&xtime_lock, seq))); | ||
322 | |||
323 | while (unlikely(nsec >= NSEC_PER_SEC)) { | ||
324 | nsec -= NSEC_PER_SEC; | ||
325 | ++sec; | ||
326 | } | ||
327 | tv->tv_sec = sec; | ||
328 | tv->tv_nsec = nsec; | ||
329 | } | ||
330 | EXPORT_SYMBOL_GPL(getnstimeofday); | ||
331 | |||
332 | int do_settimeofday (struct timespec *tv) | ||
333 | { | ||
334 | time_t wtm_sec, sec = tv->tv_sec; | ||
335 | long wtm_nsec, nsec = tv->tv_nsec; | ||
336 | |||
337 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
338 | return -EINVAL; | ||
339 | |||
340 | write_seqlock_irq(&xtime_lock); | ||
341 | { | ||
342 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
343 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
344 | |||
345 | set_normalized_timespec(&xtime, sec, nsec); | ||
346 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
347 | |||
348 | time_adjust = 0; /* stop active adjtime() */ | ||
349 | time_status |= STA_UNSYNC; | ||
350 | time_maxerror = NTP_PHASE_LIMIT; | ||
351 | time_esterror = NTP_PHASE_LIMIT; | ||
352 | time_interpolator_reset(); | ||
353 | } | ||
354 | write_sequnlock_irq(&xtime_lock); | ||
355 | clock_was_set(); | ||
356 | return 0; | ||
357 | } | ||
358 | EXPORT_SYMBOL(do_settimeofday); | ||
359 | |||
360 | void do_gettimeofday (struct timeval *tv) | ||
361 | { | ||
362 | unsigned long seq, nsec, usec, sec, offset; | ||
363 | do { | ||
364 | seq = read_seqbegin(&xtime_lock); | ||
365 | offset = time_interpolator_get_offset(); | ||
366 | sec = xtime.tv_sec; | ||
367 | nsec = xtime.tv_nsec; | ||
368 | } while (unlikely(read_seqretry(&xtime_lock, seq))); | ||
369 | |||
370 | usec = (nsec + offset) / 1000; | ||
371 | |||
372 | while (unlikely(usec >= USEC_PER_SEC)) { | ||
373 | usec -= USEC_PER_SEC; | ||
374 | ++sec; | ||
375 | } | ||
376 | |||
377 | tv->tv_sec = sec; | ||
378 | tv->tv_usec = usec; | ||
379 | |||
380 | /* | ||
381 | * Make sure xtime.tv_sec [returned by sys_time()] always | ||
382 | * follows the gettimeofday() result precisely. This | ||
383 | * condition is extremely unlikely, it can hit at most | ||
384 | * once per second: | ||
385 | */ | ||
386 | if (unlikely(xtime.tv_sec != tv->tv_sec)) { | ||
387 | unsigned long flags; | ||
388 | |||
389 | write_seqlock_irqsave(&xtime_lock, flags); | ||
390 | update_wall_time(); | ||
391 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
392 | } | ||
393 | } | ||
394 | EXPORT_SYMBOL(do_gettimeofday); | ||
395 | |||
396 | #else /* CONFIG_TIME_INTERPOLATION */ | ||
397 | |||
398 | #ifndef CONFIG_GENERIC_TIME | 311 | #ifndef CONFIG_GENERIC_TIME |
399 | /* | 312 | /* |
400 | * Simulate gettimeofday using do_gettimeofday which only allows a timeval | 313 | * Simulate gettimeofday using do_gettimeofday which only allows a timeval |
@@ -410,7 +323,6 @@ void getnstimeofday(struct timespec *tv) | |||
410 | } | 323 | } |
411 | EXPORT_SYMBOL_GPL(getnstimeofday); | 324 | EXPORT_SYMBOL_GPL(getnstimeofday); |
412 | #endif | 325 | #endif |
413 | #endif /* CONFIG_TIME_INTERPOLATION */ | ||
414 | 326 | ||
415 | /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. | 327 | /* Converts Gregorian date to seconds since 1970-01-01 00:00:00. |
416 | * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 | 328 | * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 438c6b723ee2..b5e352597cbb 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -116,11 +116,6 @@ void second_overflow(void) | |||
116 | if (xtime.tv_sec % 86400 == 0) { | 116 | if (xtime.tv_sec % 86400 == 0) { |
117 | xtime.tv_sec--; | 117 | xtime.tv_sec--; |
118 | wall_to_monotonic.tv_sec++; | 118 | wall_to_monotonic.tv_sec++; |
119 | /* | ||
120 | * The timer interpolator will make time change | ||
121 | * gradually instead of an immediate jump by one second | ||
122 | */ | ||
123 | time_interpolator_update(-NSEC_PER_SEC); | ||
124 | time_state = TIME_OOP; | 119 | time_state = TIME_OOP; |
125 | printk(KERN_NOTICE "Clock: inserting leap second " | 120 | printk(KERN_NOTICE "Clock: inserting leap second " |
126 | "23:59:60 UTC\n"); | 121 | "23:59:60 UTC\n"); |
@@ -130,11 +125,6 @@ void second_overflow(void) | |||
130 | if ((xtime.tv_sec + 1) % 86400 == 0) { | 125 | if ((xtime.tv_sec + 1) % 86400 == 0) { |
131 | xtime.tv_sec++; | 126 | xtime.tv_sec++; |
132 | wall_to_monotonic.tv_sec--; | 127 | wall_to_monotonic.tv_sec--; |
133 | /* | ||
134 | * Use of time interpolator for a gradual change of | ||
135 | * time | ||
136 | */ | ||
137 | time_interpolator_update(NSEC_PER_SEC); | ||
138 | time_state = TIME_WAIT; | 128 | time_state = TIME_WAIT; |
139 | printk(KERN_NOTICE "Clock: deleting leap second " | 129 | printk(KERN_NOTICE "Clock: deleting leap second " |
140 | "23:59:59 UTC\n"); | 130 | "23:59:59 UTC\n"); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 89698776613e..88c81026e003 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -466,10 +466,6 @@ void update_wall_time(void) | |||
466 | second_overflow(); | 466 | second_overflow(); |
467 | } | 467 | } |
468 | 468 | ||
469 | /* interpolator bits */ | ||
470 | time_interpolator_update(clock->xtime_interval | ||
471 | >> clock->shift); | ||
472 | |||
473 | /* accumulate error between NTP and clock interval */ | 469 | /* accumulate error between NTP and clock interval */ |
474 | clock->error += current_tick_length(); | 470 | clock->error += current_tick_length(); |
475 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); | 471 | clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); |
diff --git a/kernel/timer.c b/kernel/timer.c index d1e8b975c7ae..6ce1952eea7d 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1349,194 +1349,6 @@ void __init init_timers(void) | |||
1349 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); | 1349 | open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL); |
1350 | } | 1350 | } |
1351 | 1351 | ||
1352 | #ifdef CONFIG_TIME_INTERPOLATION | ||
1353 | |||
1354 | struct time_interpolator *time_interpolator __read_mostly; | ||
1355 | static struct time_interpolator *time_interpolator_list __read_mostly; | ||
1356 | static DEFINE_SPINLOCK(time_interpolator_lock); | ||
1357 | |||
1358 | static inline cycles_t time_interpolator_get_cycles(unsigned int src) | ||
1359 | { | ||
1360 | unsigned long (*x)(void); | ||
1361 | |||
1362 | switch (src) | ||
1363 | { | ||
1364 | case TIME_SOURCE_FUNCTION: | ||
1365 | x = time_interpolator->addr; | ||
1366 | return x(); | ||
1367 | |||
1368 | case TIME_SOURCE_MMIO64 : | ||
1369 | return readq_relaxed((void __iomem *)time_interpolator->addr); | ||
1370 | |||
1371 | case TIME_SOURCE_MMIO32 : | ||
1372 | return readl_relaxed((void __iomem *)time_interpolator->addr); | ||
1373 | |||
1374 | default: return get_cycles(); | ||
1375 | } | ||
1376 | } | ||
1377 | |||
1378 | static inline u64 time_interpolator_get_counter(int writelock) | ||
1379 | { | ||
1380 | unsigned int src = time_interpolator->source; | ||
1381 | |||
1382 | if (time_interpolator->jitter) | ||
1383 | { | ||
1384 | cycles_t lcycle; | ||
1385 | cycles_t now; | ||
1386 | |||
1387 | do { | ||
1388 | lcycle = time_interpolator->last_cycle; | ||
1389 | now = time_interpolator_get_cycles(src); | ||
1390 | if (lcycle && time_after(lcycle, now)) | ||
1391 | return lcycle; | ||
1392 | |||
1393 | /* When holding the xtime write lock, there's no need | ||
1394 | * to add the overhead of the cmpxchg. Readers are | ||
1395 | * force to retry until the write lock is released. | ||
1396 | */ | ||
1397 | if (writelock) { | ||
1398 | time_interpolator->last_cycle = now; | ||
1399 | return now; | ||
1400 | } | ||
1401 | /* Keep track of the last timer value returned. The use of cmpxchg here | ||
1402 | * will cause contention in an SMP environment. | ||
1403 | */ | ||
1404 | } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle)); | ||
1405 | return now; | ||
1406 | } | ||
1407 | else | ||
1408 | return time_interpolator_get_cycles(src); | ||
1409 | } | ||
1410 | |||
1411 | void time_interpolator_reset(void) | ||
1412 | { | ||
1413 | time_interpolator->offset = 0; | ||
1414 | time_interpolator->last_counter = time_interpolator_get_counter(1); | ||
1415 | } | ||
1416 | |||
1417 | #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift) | ||
1418 | |||
1419 | unsigned long time_interpolator_get_offset(void) | ||
1420 | { | ||
1421 | /* If we do not have a time interpolator set up then just return zero */ | ||
1422 | if (!time_interpolator) | ||
1423 | return 0; | ||
1424 | |||
1425 | return time_interpolator->offset + | ||
1426 | GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator); | ||
1427 | } | ||
1428 | |||
1429 | #define INTERPOLATOR_ADJUST 65536 | ||
1430 | #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST | ||
1431 | |||
1432 | void time_interpolator_update(long delta_nsec) | ||
1433 | { | ||
1434 | u64 counter; | ||
1435 | unsigned long offset; | ||
1436 | |||
1437 | /* If there is no time interpolator set up then do nothing */ | ||
1438 | if (!time_interpolator) | ||
1439 | return; | ||
1440 | |||
1441 | /* | ||
1442 | * The interpolator compensates for late ticks by accumulating the late | ||
1443 | * time in time_interpolator->offset. A tick earlier than expected will | ||
1444 | * lead to a reset of the offset and a corresponding jump of the clock | ||
1445 | * forward. Again this only works if the interpolator clock is running | ||
1446 | * slightly slower than the regular clock and the tuning logic insures | ||
1447 | * that. | ||
1448 | */ | ||
1449 | |||
1450 | counter = time_interpolator_get_counter(1); | ||
1451 | offset = time_interpolator->offset + | ||
1452 | GET_TI_NSECS(counter, time_interpolator); | ||
1453 | |||
1454 | if (delta_nsec < 0 || (unsigned long) delta_nsec < offset) | ||
1455 | time_interpolator->offset = offset - delta_nsec; | ||
1456 | else { | ||
1457 | time_interpolator->skips++; | ||
1458 | time_interpolator->ns_skipped += delta_nsec - offset; | ||
1459 | time_interpolator->offset = 0; | ||
1460 | } | ||
1461 | time_interpolator->last_counter = counter; | ||
1462 | |||
1463 | /* Tuning logic for time interpolator invoked every minute or so. | ||
1464 | * Decrease interpolator clock speed if no skips occurred and an offset is carried. | ||
1465 | * Increase interpolator clock speed if we skip too much time. | ||
1466 | */ | ||
1467 | if (jiffies % INTERPOLATOR_ADJUST == 0) | ||
1468 | { | ||
1469 | if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec) | ||
1470 | time_interpolator->nsec_per_cyc--; | ||
1471 | if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0) | ||
1472 | time_interpolator->nsec_per_cyc++; | ||
1473 | time_interpolator->skips = 0; | ||
1474 | time_interpolator->ns_skipped = 0; | ||
1475 | } | ||
1476 | } | ||
1477 | |||
1478 | static inline int | ||
1479 | is_better_time_interpolator(struct time_interpolator *new) | ||
1480 | { | ||
1481 | if (!time_interpolator) | ||
1482 | return 1; | ||
1483 | return new->frequency > 2*time_interpolator->frequency || | ||
1484 | (unsigned long)new->drift < (unsigned long)time_interpolator->drift; | ||
1485 | } | ||
1486 | |||
1487 | void | ||
1488 | register_time_interpolator(struct time_interpolator *ti) | ||
1489 | { | ||
1490 | unsigned long flags; | ||
1491 | |||
1492 | /* Sanity check */ | ||
1493 | BUG_ON(ti->frequency == 0 || ti->mask == 0); | ||
1494 | |||
1495 | ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency; | ||
1496 | spin_lock(&time_interpolator_lock); | ||
1497 | write_seqlock_irqsave(&xtime_lock, flags); | ||
1498 | if (is_better_time_interpolator(ti)) { | ||
1499 | time_interpolator = ti; | ||
1500 | time_interpolator_reset(); | ||
1501 | } | ||
1502 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
1503 | |||
1504 | ti->next = time_interpolator_list; | ||
1505 | time_interpolator_list = ti; | ||
1506 | spin_unlock(&time_interpolator_lock); | ||
1507 | } | ||
1508 | |||
1509 | void | ||
1510 | unregister_time_interpolator(struct time_interpolator *ti) | ||
1511 | { | ||
1512 | struct time_interpolator *curr, **prev; | ||
1513 | unsigned long flags; | ||
1514 | |||
1515 | spin_lock(&time_interpolator_lock); | ||
1516 | prev = &time_interpolator_list; | ||
1517 | for (curr = *prev; curr; curr = curr->next) { | ||
1518 | if (curr == ti) { | ||
1519 | *prev = curr->next; | ||
1520 | break; | ||
1521 | } | ||
1522 | prev = &curr->next; | ||
1523 | } | ||
1524 | |||
1525 | write_seqlock_irqsave(&xtime_lock, flags); | ||
1526 | if (ti == time_interpolator) { | ||
1527 | /* we lost the best time-interpolator: */ | ||
1528 | time_interpolator = NULL; | ||
1529 | /* find the next-best interpolator */ | ||
1530 | for (curr = time_interpolator_list; curr; curr = curr->next) | ||
1531 | if (is_better_time_interpolator(curr)) | ||
1532 | time_interpolator = curr; | ||
1533 | time_interpolator_reset(); | ||
1534 | } | ||
1535 | write_sequnlock_irqrestore(&xtime_lock, flags); | ||
1536 | spin_unlock(&time_interpolator_lock); | ||
1537 | } | ||
1538 | #endif /* CONFIG_TIME_INTERPOLATION */ | ||
1539 | |||
1540 | /** | 1352 | /** |
1541 | * msleep - sleep safely even with waitqueue interruptions | 1353 | * msleep - sleep safely even with waitqueue interruptions |
1542 | * @msecs: Time in milliseconds to sleep for | 1354 | * @msecs: Time in milliseconds to sleep for |