aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2017-10-31 13:43:39 -0400
committerTony Luck <tony.luck@intel.com>2017-10-31 13:58:36 -0400
commitd4d1fc61eb38ff8e5af657e2d2f2290859a277f2 (patch)
tree7ea81dfb26bb9efa59638d7985b8cb49efd6d8bd
parent0b07194bb55ed836c2cc7c22e866b87a14681984 (diff)
ia64: Update fsyscall gettime to use modern vsyscall_update
John Stultz provided the outline for this patch back in May 2014 here: http://patches.linaro.org/patch/30501/ but I let this sit on the shelf for too long and in the intervening years almost every field in "struct timekeeper" was changed. So this is almost completely different from his original. Though the key change in arch/ia64/kernel/fsys.S remains the same. The core logic change with the updated vsyscall method is that we preserve the base nanosecond value in shifted nanoseconds, which allows us to avoid truncating and rounding up to the next nanosecond every tick to avoid inconsistencies. Thus the logic moved from nsec = ((cycle_delta * mult)>>shift) + base_nsec; to nsec = ((cycle_delta * mult) + base_snsec) >> shift; Cc: John Stultz <john.stultz@linaro.org> Cc: linux-ia64@vger.kernel.org Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/Kconfig2
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/fsys.S8
-rw-r--r--arch/ia64/kernel/fsyscall_gtod_data.h10
-rw-r--r--arch/ia64/kernel/time.c40
5 files changed, 36 insertions, 26 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 6a15083cc366..4d032e7f1637 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -46,7 +46,7 @@ config IA64
46 select ARCH_TASK_STRUCT_ALLOCATOR 46 select ARCH_TASK_STRUCT_ALLOCATOR
47 select ARCH_THREAD_STACK_ALLOCATOR 47 select ARCH_THREAD_STACK_ALLOCATOR
48 select ARCH_CLOCKSOURCE_DATA 48 select ARCH_CLOCKSOURCE_DATA
49 select GENERIC_TIME_VSYSCALL_OLD 49 select GENERIC_TIME_VSYSCALL
50 select SYSCTL_ARCH_UNALIGN_NO_WARN 50 select SYSCTL_ARCH_UNALIGN_NO_WARN
51 select HAVE_MOD_ARCH_SPECIFIC 51 select HAVE_MOD_ARCH_SPECIFIC
52 select MODULES_USE_ELF_RELA 52 select MODULES_USE_ELF_RELA
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 798bdb209d00..c5eecf3e76fc 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -211,6 +211,8 @@ void foo(void)
211 BLANK(); 211 BLANK();
212 DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, 212 DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET,
213 offsetof (struct timespec, tv_nsec)); 213 offsetof (struct timespec, tv_nsec));
214 DEFINE(IA64_TIME_SN_SPEC_SNSEC_OFFSET,
215 offsetof (struct time_sn_spec, snsec));
214 216
215 DEFINE(CLONE_SETTLS_BIT, 19); 217 DEFINE(CLONE_SETTLS_BIT, 19);
216#if CLONE_SETTLS != (1<<19) 218#if CLONE_SETTLS != (1<<19)
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index edbf7af95849..0d3c7abb31a8 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -235,9 +235,9 @@ ENTRY(fsys_gettimeofday)
235 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!! 235 MOV_FROM_ITC(p8, p6, r2, r10) // CPU_TIMER. 36 clocks latency!!!
236(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues.. 236(p9) ld8 r2 = [r30] // MMIO_TIMER. Could also have latency issues..
237(p13) ld8 r25 = [r19] // get itc_lastcycle value 237(p13) ld8 r25 = [r19] // get itc_lastcycle value
238 ld8 r9 = [r22],IA64_TIMESPEC_TV_NSEC_OFFSET // tv_sec 238 ld8 r9 = [r22],IA64_TIME_SN_SPEC_SNSEC_OFFSET // sec
239 ;; 239 ;;
240 ld8 r8 = [r22],-IA64_TIMESPEC_TV_NSEC_OFFSET // tv_nsec 240 ld8 r8 = [r22],-IA64_TIME_SN_SPEC_SNSEC_OFFSET // snsec
241(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm) 241(p13) sub r3 = r25,r2 // Diff needed before comparison (thanks davidm)
242 ;; 242 ;;
243(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared 243(p13) cmp.gt.unc p6,p7 = r3,r0 // check if it is less than last. p6,p7 cleared
@@ -265,9 +265,9 @@ EX(.fail_efault, probe.w.fault r31, 3)
265 mf 265 mf
266 ;; 266 ;;
267 ld4 r10 = [r20] // gtod_lock.sequence 267 ld4 r10 = [r20] // gtod_lock.sequence
268 shr.u r2 = r2,r23 // shift by factor
269 ;;
270 add r8 = r8,r2 // Add xtime.nsecs 268 add r8 = r8,r2 // Add xtime.nsecs
269 ;;
270 shr.u r8 = r8,r23 // shift by factor
271 cmp4.ne p7,p0 = r28,r10 271 cmp4.ne p7,p0 = r28,r10
272(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo 272(p7) br.cond.dpnt.few .time_redo // sequence number changed, redo
273 // End critical section. 273 // End critical section.
diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h
index dcc514917731..28363bfc9f57 100644
--- a/arch/ia64/kernel/fsyscall_gtod_data.h
+++ b/arch/ia64/kernel/fsyscall_gtod_data.h
@@ -5,10 +5,16 @@
5 * fsyscall gettimeofday data 5 * fsyscall gettimeofday data
6 */ 6 */
7 7
8/* like timespec, but includes "shifted nanoseconds" */
9struct time_sn_spec {
10 u64 sec;
11 u64 snsec;
12};
13
8struct fsyscall_gtod_data_t { 14struct fsyscall_gtod_data_t {
9 seqcount_t seq; 15 seqcount_t seq;
10 struct timespec wall_time; 16 struct time_sn_spec wall_time;
11 struct timespec monotonic_time; 17 struct time_sn_spec monotonic_time;
12 u64 clk_mask; 18 u64 clk_mask;
13 u32 clk_mult; 19 u32 clk_mult;
14 u32 clk_shift; 20 u32 clk_shift;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index aa7be020a904..c6ecb97151a2 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -430,30 +430,32 @@ void update_vsyscall_tz(void)
430{ 430{
431} 431}
432 432
433void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, 433void update_vsyscall(struct timekeeper *tk)
434 struct clocksource *c, u32 mult, u64 cycle_last)
435{ 434{
436 write_seqcount_begin(&fsyscall_gtod_data.seq); 435 write_seqcount_begin(&fsyscall_gtod_data.seq);
437 436
438 /* copy fsyscall clock data */ 437 /* copy vsyscall data */
439 fsyscall_gtod_data.clk_mask = c->mask; 438 fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
440 fsyscall_gtod_data.clk_mult = mult; 439 fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
441 fsyscall_gtod_data.clk_shift = c->shift; 440 fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
442 fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; 441 fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
443 fsyscall_gtod_data.clk_cycle_last = cycle_last; 442 fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
444 443
445 /* copy kernel time structures */ 444 fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
446 fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; 445 fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
447 fsyscall_gtod_data.wall_time.tv_nsec = wall->tv_nsec; 446
448 fsyscall_gtod_data.monotonic_time.tv_sec = wtm->tv_sec 447 fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
449 + wall->tv_sec; 448 + tk->wall_to_monotonic.tv_sec;
450 fsyscall_gtod_data.monotonic_time.tv_nsec = wtm->tv_nsec 449 fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
451 + wall->tv_nsec; 450 + ((u64)tk->wall_to_monotonic.tv_nsec
451 << tk->tkr_mono.shift);
452 452
453 /* normalize */ 453 /* normalize */
454 while (fsyscall_gtod_data.monotonic_time.tv_nsec >= NSEC_PER_SEC) { 454 while (fsyscall_gtod_data.monotonic_time.snsec >=
455 fsyscall_gtod_data.monotonic_time.tv_nsec -= NSEC_PER_SEC; 455 (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
456 fsyscall_gtod_data.monotonic_time.tv_sec++; 456 fsyscall_gtod_data.monotonic_time.snsec -=
457 ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
458 fsyscall_gtod_data.monotonic_time.sec++;
457 } 459 }
458 460
459 write_seqcount_end(&fsyscall_gtod_data.seq); 461 write_seqcount_end(&fsyscall_gtod_data.seq);