diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/Makefile | 2 | ||||
| -rw-r--r-- | kernel/time/alarmtimer.c | 2 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 76 | ||||
| -rw-r--r-- | kernel/time/hrtimer.c | 116 | ||||
| -rw-r--r-- | kernel/time/ntp.c | 11 | ||||
| -rw-r--r-- | kernel/time/posix-cpu-timers.c | 3 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 11 | ||||
| -rw-r--r-- | kernel/time/time.c | 4 | ||||
| -rw-r--r-- | kernel/time/timecounter.c | 112 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 12 |
10 files changed, 190 insertions, 159 deletions
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index f622cf28628a..c09c07817d7a 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o | 1 | obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o |
| 2 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o | 2 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o |
| 3 | obj-y += timeconv.o posix-clock.o alarmtimer.o | 3 | obj-y += timeconv.o timecounter.o posix-clock.o alarmtimer.o |
| 4 | 4 | ||
| 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
| 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 6 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index a7077d3ae52f..1b001ed1edb9 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
| @@ -788,7 +788,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags, | |||
| 788 | goto out; | 788 | goto out; |
| 789 | } | 789 | } |
| 790 | 790 | ||
| 791 | restart = ¤t_thread_info()->restart_block; | 791 | restart = ¤t->restart_block; |
| 792 | restart->fn = alarm_timer_nsleep_restart; | 792 | restart->fn = alarm_timer_nsleep_restart; |
| 793 | restart->nanosleep.clockid = type; | 793 | restart->nanosleep.clockid = type; |
| 794 | restart->nanosleep.expires = exp.tv64; | 794 | restart->nanosleep.expires = exp.tv64; |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index b79f39bda7e1..4892352f0e49 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -34,82 +34,6 @@ | |||
| 34 | #include "tick-internal.h" | 34 | #include "tick-internal.h" |
| 35 | #include "timekeeping_internal.h" | 35 | #include "timekeeping_internal.h" |
| 36 | 36 | ||
| 37 | void timecounter_init(struct timecounter *tc, | ||
| 38 | const struct cyclecounter *cc, | ||
| 39 | u64 start_tstamp) | ||
| 40 | { | ||
| 41 | tc->cc = cc; | ||
| 42 | tc->cycle_last = cc->read(cc); | ||
| 43 | tc->nsec = start_tstamp; | ||
| 44 | } | ||
| 45 | EXPORT_SYMBOL_GPL(timecounter_init); | ||
| 46 | |||
| 47 | /** | ||
| 48 | * timecounter_read_delta - get nanoseconds since last call of this function | ||
| 49 | * @tc: Pointer to time counter | ||
| 50 | * | ||
| 51 | * When the underlying cycle counter runs over, this will be handled | ||
| 52 | * correctly as long as it does not run over more than once between | ||
| 53 | * calls. | ||
| 54 | * | ||
| 55 | * The first call to this function for a new time counter initializes | ||
| 56 | * the time tracking and returns an undefined result. | ||
| 57 | */ | ||
| 58 | static u64 timecounter_read_delta(struct timecounter *tc) | ||
| 59 | { | ||
| 60 | cycle_t cycle_now, cycle_delta; | ||
| 61 | u64 ns_offset; | ||
| 62 | |||
| 63 | /* read cycle counter: */ | ||
| 64 | cycle_now = tc->cc->read(tc->cc); | ||
| 65 | |||
| 66 | /* calculate the delta since the last timecounter_read_delta(): */ | ||
| 67 | cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; | ||
| 68 | |||
| 69 | /* convert to nanoseconds: */ | ||
| 70 | ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta); | ||
| 71 | |||
| 72 | /* update time stamp of timecounter_read_delta() call: */ | ||
| 73 | tc->cycle_last = cycle_now; | ||
| 74 | |||
| 75 | return ns_offset; | ||
| 76 | } | ||
| 77 | |||
| 78 | u64 timecounter_read(struct timecounter *tc) | ||
| 79 | { | ||
| 80 | u64 nsec; | ||
| 81 | |||
| 82 | /* increment time by nanoseconds since last call */ | ||
| 83 | nsec = timecounter_read_delta(tc); | ||
| 84 | nsec += tc->nsec; | ||
| 85 | tc->nsec = nsec; | ||
| 86 | |||
| 87 | return nsec; | ||
| 88 | } | ||
| 89 | EXPORT_SYMBOL_GPL(timecounter_read); | ||
| 90 | |||
| 91 | u64 timecounter_cyc2time(struct timecounter *tc, | ||
| 92 | cycle_t cycle_tstamp) | ||
| 93 | { | ||
| 94 | u64 cycle_delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; | ||
| 95 | u64 nsec; | ||
| 96 | |||
| 97 | /* | ||
| 98 | * Instead of always treating cycle_tstamp as more recent | ||
| 99 | * than tc->cycle_last, detect when it is too far in the | ||
| 100 | * future and treat it as old time stamp instead. | ||
| 101 | */ | ||
| 102 | if (cycle_delta > tc->cc->mask / 2) { | ||
| 103 | cycle_delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; | ||
| 104 | nsec = tc->nsec - cyclecounter_cyc2ns(tc->cc, cycle_delta); | ||
| 105 | } else { | ||
| 106 | nsec = cyclecounter_cyc2ns(tc->cc, cycle_delta) + tc->nsec; | ||
| 107 | } | ||
| 108 | |||
| 109 | return nsec; | ||
| 110 | } | ||
| 111 | EXPORT_SYMBOL_GPL(timecounter_cyc2time); | ||
| 112 | |||
| 113 | /** | 37 | /** |
| 114 | * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks | 38 | * clocks_calc_mult_shift - calculate mult/shift factors for scaled math of clocks |
| 115 | * @mult: pointer to mult variable | 39 | * @mult: pointer to mult variable |
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 37e50aadd471..bee0c1f78091 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
| @@ -122,7 +122,7 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | |||
| 122 | mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); | 122 | mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); |
| 123 | boot = ktime_add(mono, off_boot); | 123 | boot = ktime_add(mono, off_boot); |
| 124 | xtim = ktime_add(mono, off_real); | 124 | xtim = ktime_add(mono, off_real); |
| 125 | tai = ktime_add(xtim, off_tai); | 125 | tai = ktime_add(mono, off_tai); |
| 126 | 126 | ||
| 127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; | 127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; |
| 128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; | 128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; |
| @@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
| 266 | /* | 266 | /* |
| 267 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
| 268 | */ | 268 | */ |
| 269 | u64 ktime_divns(const ktime_t kt, s64 div) | 269 | u64 __ktime_divns(const ktime_t kt, s64 div) |
| 270 | { | 270 | { |
| 271 | u64 dclc; | 271 | u64 dclc; |
| 272 | int sft = 0; | 272 | int sft = 0; |
| @@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div) | |||
| 282 | 282 | ||
| 283 | return dclc; | 283 | return dclc; |
| 284 | } | 284 | } |
| 285 | EXPORT_SYMBOL_GPL(ktime_divns); | 285 | EXPORT_SYMBOL_GPL(__ktime_divns); |
| 286 | #endif /* BITS_PER_LONG >= 64 */ | 286 | #endif /* BITS_PER_LONG >= 64 */ |
| 287 | 287 | ||
| 288 | /* | 288 | /* |
| @@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer) | |||
| 440 | trace_hrtimer_cancel(timer); | 440 | trace_hrtimer_cancel(timer); |
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) | ||
| 444 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | ||
| 445 | { | ||
| 446 | struct hrtimer_clock_base *base = cpu_base->clock_base; | ||
| 447 | ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; | ||
| 448 | int i; | ||
| 449 | |||
| 450 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | ||
| 451 | struct timerqueue_node *next; | ||
| 452 | struct hrtimer *timer; | ||
| 453 | |||
| 454 | next = timerqueue_getnext(&base->active); | ||
| 455 | if (!next) | ||
| 456 | continue; | ||
| 457 | |||
| 458 | timer = container_of(next, struct hrtimer, node); | ||
| 459 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | ||
| 460 | if (expires.tv64 < expires_next.tv64) | ||
| 461 | expires_next = expires; | ||
| 462 | } | ||
| 463 | /* | ||
| 464 | * clock_was_set() might have changed base->offset of any of | ||
| 465 | * the clock bases so the result might be negative. Fix it up | ||
| 466 | * to prevent a false positive in clockevents_program_event(). | ||
| 467 | */ | ||
| 468 | if (expires_next.tv64 < 0) | ||
| 469 | expires_next.tv64 = 0; | ||
| 470 | return expires_next; | ||
| 471 | } | ||
| 472 | #endif | ||
| 473 | |||
| 443 | /* High resolution timer related functions */ | 474 | /* High resolution timer related functions */ |
| 444 | #ifdef CONFIG_HIGH_RES_TIMERS | 475 | #ifdef CONFIG_HIGH_RES_TIMERS |
| 445 | 476 | ||
| @@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void) | |||
| 488 | static void | 519 | static void |
| 489 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | 520 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) |
| 490 | { | 521 | { |
| 491 | int i; | 522 | ktime_t expires_next = __hrtimer_get_next_event(cpu_base); |
| 492 | struct hrtimer_clock_base *base = cpu_base->clock_base; | ||
| 493 | ktime_t expires, expires_next; | ||
| 494 | |||
| 495 | expires_next.tv64 = KTIME_MAX; | ||
| 496 | |||
| 497 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | ||
| 498 | struct hrtimer *timer; | ||
| 499 | struct timerqueue_node *next; | ||
| 500 | |||
| 501 | next = timerqueue_getnext(&base->active); | ||
| 502 | if (!next) | ||
| 503 | continue; | ||
| 504 | timer = container_of(next, struct hrtimer, node); | ||
| 505 | |||
| 506 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | ||
| 507 | /* | ||
| 508 | * clock_was_set() has changed base->offset so the | ||
| 509 | * result might be negative. Fix it up to prevent a | ||
| 510 | * false positive in clockevents_program_event() | ||
| 511 | */ | ||
| 512 | if (expires.tv64 < 0) | ||
| 513 | expires.tv64 = 0; | ||
| 514 | if (expires.tv64 < expires_next.tv64) | ||
| 515 | expires_next = expires; | ||
| 516 | } | ||
| 517 | 523 | ||
| 518 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | 524 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) |
| 519 | return; | 525 | return; |
| @@ -587,6 +593,15 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
| 587 | return 0; | 593 | return 0; |
| 588 | 594 | ||
| 589 | /* | 595 | /* |
| 596 | * When the target cpu of the timer is currently executing | ||
| 597 | * hrtimer_interrupt(), then we do not touch the clock event | ||
| 598 | * device. hrtimer_interrupt() will reevaluate all clock bases | ||
| 599 | * before reprogramming the device. | ||
| 600 | */ | ||
| 601 | if (cpu_base->in_hrtirq) | ||
| 602 | return 0; | ||
| 603 | |||
| 604 | /* | ||
| 590 | * If a hang was detected in the last timer interrupt then we | 605 | * If a hang was detected in the last timer interrupt then we |
| 591 | * do not schedule a timer which is earlier than the expiry | 606 | * do not schedule a timer which is earlier than the expiry |
| 592 | * which we enforced in the hang detection. We want the system | 607 | * which we enforced in the hang detection. We want the system |
| @@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | |||
| 1104 | ktime_t hrtimer_get_next_event(void) | 1119 | ktime_t hrtimer_get_next_event(void) |
| 1105 | { | 1120 | { |
| 1106 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | 1121 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
| 1107 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 1122 | ktime_t mindelta = { .tv64 = KTIME_MAX }; |
| 1108 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | ||
| 1109 | unsigned long flags; | 1123 | unsigned long flags; |
| 1110 | int i; | ||
| 1111 | 1124 | ||
| 1112 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | 1125 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
| 1113 | 1126 | ||
| 1114 | if (!hrtimer_hres_active()) { | 1127 | if (!hrtimer_hres_active()) |
| 1115 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1128 | mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base), |
| 1116 | struct hrtimer *timer; | 1129 | ktime_get()); |
| 1117 | struct timerqueue_node *next; | ||
| 1118 | |||
| 1119 | next = timerqueue_getnext(&base->active); | ||
| 1120 | if (!next) | ||
| 1121 | continue; | ||
| 1122 | |||
| 1123 | timer = container_of(next, struct hrtimer, node); | ||
| 1124 | delta.tv64 = hrtimer_get_expires_tv64(timer); | ||
| 1125 | delta = ktime_sub(delta, base->get_time()); | ||
| 1126 | if (delta.tv64 < mindelta.tv64) | ||
| 1127 | mindelta.tv64 = delta.tv64; | ||
| 1128 | } | ||
| 1129 | } | ||
| 1130 | 1130 | ||
| 1131 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 1131 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
| 1132 | 1132 | ||
| @@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1253 | raw_spin_lock(&cpu_base->lock); | 1253 | raw_spin_lock(&cpu_base->lock); |
| 1254 | entry_time = now = hrtimer_update_base(cpu_base); | 1254 | entry_time = now = hrtimer_update_base(cpu_base); |
| 1255 | retry: | 1255 | retry: |
| 1256 | expires_next.tv64 = KTIME_MAX; | 1256 | cpu_base->in_hrtirq = 1; |
| 1257 | /* | 1257 | /* |
| 1258 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 1258 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
| 1259 | * held to prevent that a timer is enqueued in our queue via | 1259 | * held to prevent that a timer is enqueued in our queue via |
| @@ -1291,28 +1291,20 @@ retry: | |||
| 1291 | * are right-of a not yet expired timer, because that | 1291 | * are right-of a not yet expired timer, because that |
| 1292 | * timer will have to trigger a wakeup anyway. | 1292 | * timer will have to trigger a wakeup anyway. |
| 1293 | */ | 1293 | */ |
| 1294 | 1294 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) | |
| 1295 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | ||
| 1296 | ktime_t expires; | ||
| 1297 | |||
| 1298 | expires = ktime_sub(hrtimer_get_expires(timer), | ||
| 1299 | base->offset); | ||
| 1300 | if (expires.tv64 < 0) | ||
| 1301 | expires.tv64 = KTIME_MAX; | ||
| 1302 | if (expires.tv64 < expires_next.tv64) | ||
| 1303 | expires_next = expires; | ||
| 1304 | break; | 1295 | break; |
| 1305 | } | ||
| 1306 | 1296 | ||
| 1307 | __run_hrtimer(timer, &basenow); | 1297 | __run_hrtimer(timer, &basenow); |
| 1308 | } | 1298 | } |
| 1309 | } | 1299 | } |
| 1310 | 1300 | /* Reevaluate the clock bases for the next expiry */ | |
| 1301 | expires_next = __hrtimer_get_next_event(cpu_base); | ||
| 1311 | /* | 1302 | /* |
| 1312 | * Store the new expiry value so the migration code can verify | 1303 | * Store the new expiry value so the migration code can verify |
| 1313 | * against it. | 1304 | * against it. |
| 1314 | */ | 1305 | */ |
| 1315 | cpu_base->expires_next = expires_next; | 1306 | cpu_base->expires_next = expires_next; |
| 1307 | cpu_base->in_hrtirq = 0; | ||
| 1316 | raw_spin_unlock(&cpu_base->lock); | 1308 | raw_spin_unlock(&cpu_base->lock); |
| 1317 | 1309 | ||
| 1318 | /* Reprogramming necessary ? */ | 1310 | /* Reprogramming necessary ? */ |
| @@ -1591,7 +1583,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
| 1591 | goto out; | 1583 | goto out; |
| 1592 | } | 1584 | } |
| 1593 | 1585 | ||
| 1594 | restart = ¤t_thread_info()->restart_block; | 1586 | restart = ¤t->restart_block; |
| 1595 | restart->fn = hrtimer_nanosleep_restart; | 1587 | restart->fn = hrtimer_nanosleep_restart; |
| 1596 | restart->nanosleep.clockid = t.timer.base->clockid; | 1588 | restart->nanosleep.clockid = t.timer.base->clockid; |
| 1597 | restart->nanosleep.rmtp = rmtp; | 1589 | restart->nanosleep.rmtp = rmtp; |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 87a346fd6d61..4b585e0fdd22 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
| @@ -488,13 +488,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
| 488 | 488 | ||
| 489 | getnstimeofday64(&now); | 489 | getnstimeofday64(&now); |
| 490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { | 490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { |
| 491 | struct timespec adjust = timespec64_to_timespec(now); | 491 | struct timespec64 adjust = now; |
| 492 | 492 | ||
| 493 | fail = -ENODEV; | 493 | fail = -ENODEV; |
| 494 | if (persistent_clock_is_local) | 494 | if (persistent_clock_is_local) |
| 495 | adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); | 495 | adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); |
| 496 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 496 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
| 497 | fail = update_persistent_clock(adjust); | 497 | fail = update_persistent_clock(timespec64_to_timespec(adjust)); |
| 498 | #endif | 498 | #endif |
| 499 | #ifdef CONFIG_RTC_SYSTOHC | 499 | #ifdef CONFIG_RTC_SYSTOHC |
| 500 | if (fail == -ENODEV) | 500 | if (fail == -ENODEV) |
| @@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc) | |||
| 633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) | 633 | if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME))) |
| 634 | return -EPERM; | 634 | return -EPERM; |
| 635 | 635 | ||
| 636 | if (txc->modes & ADJ_FREQUENCY) { | ||
| 637 | if (LONG_MIN / PPM_SCALE > txc->freq) | ||
| 638 | return -EINVAL; | ||
| 639 | if (LONG_MAX / PPM_SCALE < txc->freq) | ||
| 640 | return -EINVAL; | ||
| 641 | } | ||
| 642 | |||
| 636 | return 0; | 643 | return 0; |
| 637 | } | 644 | } |
| 638 | 645 | ||
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index a16b67859e2a..0075da74abf0 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
| @@ -1334,8 +1334,7 @@ static long posix_cpu_nsleep_restart(struct restart_block *restart_block); | |||
| 1334 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | 1334 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
| 1335 | struct timespec *rqtp, struct timespec __user *rmtp) | 1335 | struct timespec *rqtp, struct timespec __user *rmtp) |
| 1336 | { | 1336 | { |
| 1337 | struct restart_block *restart_block = | 1337 | struct restart_block *restart_block = ¤t->restart_block; |
| 1338 | ¤t_thread_info()->restart_block; | ||
| 1339 | struct itimerspec it; | 1338 | struct itimerspec it; |
| 1340 | int error; | 1339 | int error; |
| 1341 | 1340 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1363d58f07e9..a4c4edac4528 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -326,13 +326,6 @@ static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, | |||
| 326 | return NOTIFY_OK; | 326 | return NOTIFY_OK; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | /* | ||
| 330 | * Worst case string length in chunks of CPU range seems 2 steps | ||
| 331 | * separations: 0,2,4,6,... | ||
| 332 | * This is NR_CPUS + sizeof('\0') | ||
| 333 | */ | ||
| 334 | static char __initdata nohz_full_buf[NR_CPUS + 1]; | ||
| 335 | |||
| 336 | static int tick_nohz_init_all(void) | 329 | static int tick_nohz_init_all(void) |
| 337 | { | 330 | { |
| 338 | int err = -1; | 331 | int err = -1; |
| @@ -393,8 +386,8 @@ void __init tick_nohz_init(void) | |||
| 393 | context_tracking_cpu_set(cpu); | 386 | context_tracking_cpu_set(cpu); |
| 394 | 387 | ||
| 395 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | 388 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
| 396 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), tick_nohz_full_mask); | 389 | pr_info("NO_HZ: Full dynticks CPUs: %*pbl.\n", |
| 397 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | 390 | cpumask_pr_args(tick_nohz_full_mask)); |
| 398 | } | 391 | } |
| 399 | #endif | 392 | #endif |
| 400 | 393 | ||
diff --git a/kernel/time/time.c b/kernel/time/time.c index 6390517e77d4..2c85b7724af4 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c | |||
| @@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv, | |||
| 196 | if (tv) { | 196 | if (tv) { |
| 197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) | 197 | if (copy_from_user(&user_tv, tv, sizeof(*tv))) |
| 198 | return -EFAULT; | 198 | return -EFAULT; |
| 199 | |||
| 200 | if (!timeval_valid(&user_tv)) | ||
| 201 | return -EINVAL; | ||
| 202 | |||
| 199 | new_ts.tv_sec = user_tv.tv_sec; | 203 | new_ts.tv_sec = user_tv.tv_sec; |
| 200 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; | 204 | new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC; |
| 201 | } | 205 | } |
diff --git a/kernel/time/timecounter.c b/kernel/time/timecounter.c new file mode 100644 index 000000000000..4687b3104bae --- /dev/null +++ b/kernel/time/timecounter.c | |||
| @@ -0,0 +1,112 @@ | |||
| 1 | /* | ||
| 2 | * linux/kernel/time/timecounter.c | ||
| 3 | * | ||
| 4 | * based on code that migrated away from | ||
| 5 | * linux/kernel/time/clocksource.c | ||
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License as published by | ||
| 9 | * the Free Software Foundation; either version 2 of the License, or | ||
| 10 | * (at your option) any later version. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/export.h> | ||
| 19 | #include <linux/timecounter.h> | ||
| 20 | |||
| 21 | void timecounter_init(struct timecounter *tc, | ||
| 22 | const struct cyclecounter *cc, | ||
| 23 | u64 start_tstamp) | ||
| 24 | { | ||
| 25 | tc->cc = cc; | ||
| 26 | tc->cycle_last = cc->read(cc); | ||
| 27 | tc->nsec = start_tstamp; | ||
| 28 | tc->mask = (1ULL << cc->shift) - 1; | ||
| 29 | tc->frac = 0; | ||
| 30 | } | ||
| 31 | EXPORT_SYMBOL_GPL(timecounter_init); | ||
| 32 | |||
| 33 | /** | ||
| 34 | * timecounter_read_delta - get nanoseconds since last call of this function | ||
| 35 | * @tc: Pointer to time counter | ||
| 36 | * | ||
| 37 | * When the underlying cycle counter runs over, this will be handled | ||
| 38 | * correctly as long as it does not run over more than once between | ||
| 39 | * calls. | ||
| 40 | * | ||
| 41 | * The first call to this function for a new time counter initializes | ||
| 42 | * the time tracking and returns an undefined result. | ||
| 43 | */ | ||
| 44 | static u64 timecounter_read_delta(struct timecounter *tc) | ||
| 45 | { | ||
| 46 | cycle_t cycle_now, cycle_delta; | ||
| 47 | u64 ns_offset; | ||
| 48 | |||
| 49 | /* read cycle counter: */ | ||
| 50 | cycle_now = tc->cc->read(tc->cc); | ||
| 51 | |||
| 52 | /* calculate the delta since the last timecounter_read_delta(): */ | ||
| 53 | cycle_delta = (cycle_now - tc->cycle_last) & tc->cc->mask; | ||
| 54 | |||
| 55 | /* convert to nanoseconds: */ | ||
| 56 | ns_offset = cyclecounter_cyc2ns(tc->cc, cycle_delta, | ||
| 57 | tc->mask, &tc->frac); | ||
| 58 | |||
| 59 | /* update time stamp of timecounter_read_delta() call: */ | ||
| 60 | tc->cycle_last = cycle_now; | ||
| 61 | |||
| 62 | return ns_offset; | ||
| 63 | } | ||
| 64 | |||
| 65 | u64 timecounter_read(struct timecounter *tc) | ||
| 66 | { | ||
| 67 | u64 nsec; | ||
| 68 | |||
| 69 | /* increment time by nanoseconds since last call */ | ||
| 70 | nsec = timecounter_read_delta(tc); | ||
| 71 | nsec += tc->nsec; | ||
| 72 | tc->nsec = nsec; | ||
| 73 | |||
| 74 | return nsec; | ||
| 75 | } | ||
| 76 | EXPORT_SYMBOL_GPL(timecounter_read); | ||
| 77 | |||
| 78 | /* | ||
| 79 | * This is like cyclecounter_cyc2ns(), but it is used for computing a | ||
| 80 | * time previous to the time stored in the cycle counter. | ||
| 81 | */ | ||
| 82 | static u64 cc_cyc2ns_backwards(const struct cyclecounter *cc, | ||
| 83 | cycle_t cycles, u64 mask, u64 frac) | ||
| 84 | { | ||
| 85 | u64 ns = (u64) cycles; | ||
| 86 | |||
| 87 | ns = ((ns * cc->mult) - frac) >> cc->shift; | ||
| 88 | |||
| 89 | return ns; | ||
| 90 | } | ||
| 91 | |||
| 92 | u64 timecounter_cyc2time(struct timecounter *tc, | ||
| 93 | cycle_t cycle_tstamp) | ||
| 94 | { | ||
| 95 | u64 delta = (cycle_tstamp - tc->cycle_last) & tc->cc->mask; | ||
| 96 | u64 nsec = tc->nsec, frac = tc->frac; | ||
| 97 | |||
| 98 | /* | ||
| 99 | * Instead of always treating cycle_tstamp as more recent | ||
| 100 | * than tc->cycle_last, detect when it is too far in the | ||
| 101 | * future and treat it as old time stamp instead. | ||
| 102 | */ | ||
| 103 | if (delta > tc->cc->mask / 2) { | ||
| 104 | delta = (tc->cycle_last - cycle_tstamp) & tc->cc->mask; | ||
| 105 | nsec -= cc_cyc2ns_backwards(tc->cc, delta, tc->mask, frac); | ||
| 106 | } else { | ||
| 107 | nsec += cyclecounter_cyc2ns(tc->cc, delta, tc->mask, &frac); | ||
| 108 | } | ||
| 109 | |||
| 110 | return nsec; | ||
| 111 | } | ||
| 112 | EXPORT_SYMBOL_GPL(timecounter_cyc2time); | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6a931852082f..b124af259800 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -1659,24 +1659,24 @@ out: | |||
| 1659 | } | 1659 | } |
| 1660 | 1660 | ||
| 1661 | /** | 1661 | /** |
| 1662 | * getboottime - Return the real time of system boot. | 1662 | * getboottime64 - Return the real time of system boot. |
| 1663 | * @ts: pointer to the timespec to be set | 1663 | * @ts: pointer to the timespec64 to be set |
| 1664 | * | 1664 | * |
| 1665 | * Returns the wall-time of boot in a timespec. | 1665 | * Returns the wall-time of boot in a timespec64. |
| 1666 | * | 1666 | * |
| 1667 | * This is based on the wall_to_monotonic offset and the total suspend | 1667 | * This is based on the wall_to_monotonic offset and the total suspend |
| 1668 | * time. Calls to settimeofday will affect the value returned (which | 1668 | * time. Calls to settimeofday will affect the value returned (which |
| 1669 | * basically means that however wrong your real time clock is at boot time, | 1669 | * basically means that however wrong your real time clock is at boot time, |
| 1670 | * you get the right time here). | 1670 | * you get the right time here). |
| 1671 | */ | 1671 | */ |
| 1672 | void getboottime(struct timespec *ts) | 1672 | void getboottime64(struct timespec64 *ts) |
| 1673 | { | 1673 | { |
| 1674 | struct timekeeper *tk = &tk_core.timekeeper; | 1674 | struct timekeeper *tk = &tk_core.timekeeper; |
| 1675 | ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); | 1675 | ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); |
| 1676 | 1676 | ||
| 1677 | *ts = ktime_to_timespec(t); | 1677 | *ts = ktime_to_timespec64(t); |
| 1678 | } | 1678 | } |
| 1679 | EXPORT_SYMBOL_GPL(getboottime); | 1679 | EXPORT_SYMBOL_GPL(getboottime64); |
| 1680 | 1680 | ||
| 1681 | unsigned long get_seconds(void) | 1681 | unsigned long get_seconds(void) |
| 1682 | { | 1682 | { |
