aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c3
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/posix-cpu-timers.c11
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/softirq.c20
-rw-r--r--kernel/time.c54
-rw-r--r--kernel/time/ntp.c398
-rw-r--r--kernel/time/timekeeping.c17
-rw-r--r--kernel/workqueue.c6
10 files changed, 277 insertions, 244 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 4a856a3643bb..32c254a8ab9a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -955,7 +955,8 @@ asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp)
955 __put_user(txc.jitcnt, &utp->jitcnt) || 955 __put_user(txc.jitcnt, &utp->jitcnt) ||
956 __put_user(txc.calcnt, &utp->calcnt) || 956 __put_user(txc.calcnt, &utp->calcnt) ||
957 __put_user(txc.errcnt, &utp->errcnt) || 957 __put_user(txc.errcnt, &utp->errcnt) ||
958 __put_user(txc.stbcnt, &utp->stbcnt)) 958 __put_user(txc.stbcnt, &utp->stbcnt) ||
959 __put_user(txc.tai, &utp->tai))
959 ret = -EFAULT; 960 ret = -EFAULT;
960 961
961 return ret; 962 return ret;
diff --git a/kernel/kexec.c b/kernel/kexec.c
index cb85c79989b4..1c5fcacbcf33 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1217,7 +1217,7 @@ static int __init parse_crashkernel_mem(char *cmdline,
1217 } 1217 }
1218 1218
1219 /* match ? */ 1219 /* match ? */
1220 if (system_ram >= start && system_ram <= end) { 1220 if (system_ram >= start && system_ram < end) {
1221 *crash_size = size; 1221 *crash_size = size;
1222 break; 1222 break;
1223 } 1223 }
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index ae5c6c147c4b..f1525ad06cb3 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -4,8 +4,9 @@
4 4
5#include <linux/sched.h> 5#include <linux/sched.h>
6#include <linux/posix-timers.h> 6#include <linux/posix-timers.h>
7#include <asm/uaccess.h>
8#include <linux/errno.h> 7#include <linux/errno.h>
8#include <linux/math64.h>
9#include <asm/uaccess.h>
9 10
10static int check_clock(const clockid_t which_clock) 11static int check_clock(const clockid_t which_clock)
11{ 12{
@@ -47,12 +48,10 @@ static void sample_to_timespec(const clockid_t which_clock,
47 union cpu_time_count cpu, 48 union cpu_time_count cpu,
48 struct timespec *tp) 49 struct timespec *tp)
49{ 50{
50 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { 51 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
51 tp->tv_sec = div_long_long_rem(cpu.sched, 52 *tp = ns_to_timespec(cpu.sched);
52 NSEC_PER_SEC, &tp->tv_nsec); 53 else
53 } else {
54 cputime_to_timespec(cpu.cpu, tp); 54 cputime_to_timespec(cpu.cpu, tp);
55 }
56} 55}
57 56
58static inline int cpu_time_before(const clockid_t which_clock, 57static inline int cpu_time_before(const clockid_t which_clock,
diff --git a/kernel/sched.c b/kernel/sched.c
index e2f7f5acc807..34bcc5bc120e 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -8025,7 +8025,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
8025 8025
8026 se->my_q = cfs_rq; 8026 se->my_q = cfs_rq;
8027 se->load.weight = tg->shares; 8027 se->load.weight = tg->shares;
8028 se->load.inv_weight = div64_64(1ULL<<32, se->load.weight); 8028 se->load.inv_weight = div64_u64(1ULL<<32, se->load.weight);
8029 se->parent = parent; 8029 se->parent = parent;
8030} 8030}
8031#endif 8031#endif
@@ -8692,7 +8692,7 @@ static void __set_se_shares(struct sched_entity *se, unsigned long shares)
8692 dequeue_entity(cfs_rq, se, 0); 8692 dequeue_entity(cfs_rq, se, 0);
8693 8693
8694 se->load.weight = shares; 8694 se->load.weight = shares;
8695 se->load.inv_weight = div64_64((1ULL<<32), shares); 8695 se->load.inv_weight = div64_u64((1ULL<<32), shares);
8696 8696
8697 if (on_rq) 8697 if (on_rq)
8698 enqueue_entity(cfs_rq, se, 0); 8698 enqueue_entity(cfs_rq, se, 0);
@@ -8787,7 +8787,7 @@ static unsigned long to_ratio(u64 period, u64 runtime)
8787 if (runtime == RUNTIME_INF) 8787 if (runtime == RUNTIME_INF)
8788 return 1ULL << 16; 8788 return 1ULL << 16;
8789 8789
8790 return div64_64(runtime << 16, period); 8790 return div64_u64(runtime << 16, period);
8791} 8791}
8792 8792
8793#ifdef CONFIG_CGROUP_SCHED 8793#ifdef CONFIG_CGROUP_SCHED
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 8a9498e7c831..6b4a12558e88 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -357,8 +357,8 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
357 357
358 avg_per_cpu = p->se.sum_exec_runtime; 358 avg_per_cpu = p->se.sum_exec_runtime;
359 if (p->se.nr_migrations) { 359 if (p->se.nr_migrations) {
360 avg_per_cpu = div64_64(avg_per_cpu, 360 avg_per_cpu = div64_u64(avg_per_cpu,
361 p->se.nr_migrations); 361 p->se.nr_migrations);
362 } else { 362 } else {
363 avg_per_cpu = -1LL; 363 avg_per_cpu = -1LL;
364 } 364 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 3c44956ee7e2..36e061740047 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -589,16 +589,20 @@ static void takeover_tasklets(unsigned int cpu)
589 local_irq_disable(); 589 local_irq_disable();
590 590
591 /* Find end, append list for that CPU. */ 591 /* Find end, append list for that CPU. */
592 *__get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).head; 592 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
593 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail; 593 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
594 per_cpu(tasklet_vec, cpu).head = NULL; 594 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
595 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; 595 per_cpu(tasklet_vec, cpu).head = NULL;
596 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
597 }
596 raise_softirq_irqoff(TASKLET_SOFTIRQ); 598 raise_softirq_irqoff(TASKLET_SOFTIRQ);
597 599
598 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head; 600 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
599 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail; 601 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
600 per_cpu(tasklet_hi_vec, cpu).head = NULL; 602 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
601 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; 603 per_cpu(tasklet_hi_vec, cpu).head = NULL;
604 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
605 }
602 raise_softirq_irqoff(HI_SOFTIRQ); 606 raise_softirq_irqoff(HI_SOFTIRQ);
603 607
604 local_irq_enable(); 608 local_irq_enable();
diff --git a/kernel/time.c b/kernel/time.c
index 86729042e4cd..cbe0d5a222ff 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -36,6 +36,7 @@
36#include <linux/security.h> 36#include <linux/security.h>
37#include <linux/fs.h> 37#include <linux/fs.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/math64.h>
39 40
40#include <asm/uaccess.h> 41#include <asm/uaccess.h>
41#include <asm/unistd.h> 42#include <asm/unistd.h>
@@ -391,13 +392,17 @@ EXPORT_SYMBOL(set_normalized_timespec);
391struct timespec ns_to_timespec(const s64 nsec) 392struct timespec ns_to_timespec(const s64 nsec)
392{ 393{
393 struct timespec ts; 394 struct timespec ts;
395 s32 rem;
394 396
395 if (!nsec) 397 if (!nsec)
396 return (struct timespec) {0, 0}; 398 return (struct timespec) {0, 0};
397 399
398 ts.tv_sec = div_long_long_rem_signed(nsec, NSEC_PER_SEC, &ts.tv_nsec); 400 ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
399 if (unlikely(nsec < 0)) 401 if (unlikely(rem < 0)) {
400 set_normalized_timespec(&ts, ts.tv_sec, ts.tv_nsec); 402 ts.tv_sec--;
403 rem += NSEC_PER_SEC;
404 }
405 ts.tv_nsec = rem;
401 406
402 return ts; 407 return ts;
403} 408}
@@ -527,8 +532,10 @@ jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
527 * Convert jiffies to nanoseconds and separate with 532 * Convert jiffies to nanoseconds and separate with
528 * one divide. 533 * one divide.
529 */ 534 */
530 u64 nsec = (u64)jiffies * TICK_NSEC; 535 u32 rem;
531 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &value->tv_nsec); 536 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
537 NSEC_PER_SEC, &rem);
538 value->tv_nsec = rem;
532} 539}
533EXPORT_SYMBOL(jiffies_to_timespec); 540EXPORT_SYMBOL(jiffies_to_timespec);
534 541
@@ -566,12 +573,11 @@ void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
566 * Convert jiffies to nanoseconds and separate with 573 * Convert jiffies to nanoseconds and separate with
567 * one divide. 574 * one divide.
568 */ 575 */
569 u64 nsec = (u64)jiffies * TICK_NSEC; 576 u32 rem;
570 long tv_usec;
571 577
572 value->tv_sec = div_long_long_rem(nsec, NSEC_PER_SEC, &tv_usec); 578 value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
573 tv_usec /= NSEC_PER_USEC; 579 NSEC_PER_SEC, &rem);
574 value->tv_usec = tv_usec; 580 value->tv_usec = rem / NSEC_PER_USEC;
575} 581}
576EXPORT_SYMBOL(jiffies_to_timeval); 582EXPORT_SYMBOL(jiffies_to_timeval);
577 583
@@ -587,9 +593,7 @@ clock_t jiffies_to_clock_t(long x)
587 return x / (HZ / USER_HZ); 593 return x / (HZ / USER_HZ);
588# endif 594# endif
589#else 595#else
590 u64 tmp = (u64)x * TICK_NSEC; 596 return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
591 do_div(tmp, (NSEC_PER_SEC / USER_HZ));
592 return (long)tmp;
593#endif 597#endif
594} 598}
595EXPORT_SYMBOL(jiffies_to_clock_t); 599EXPORT_SYMBOL(jiffies_to_clock_t);
@@ -601,16 +605,12 @@ unsigned long clock_t_to_jiffies(unsigned long x)
601 return ~0UL; 605 return ~0UL;
602 return x * (HZ / USER_HZ); 606 return x * (HZ / USER_HZ);
603#else 607#else
604 u64 jif;
605
606 /* Don't worry about loss of precision here .. */ 608 /* Don't worry about loss of precision here .. */
607 if (x >= ~0UL / HZ * USER_HZ) 609 if (x >= ~0UL / HZ * USER_HZ)
608 return ~0UL; 610 return ~0UL;
609 611
610 /* .. but do try to contain it here */ 612 /* .. but do try to contain it here */
611 jif = x * (u64) HZ; 613 return div_u64((u64)x * HZ, USER_HZ);
612 do_div(jif, USER_HZ);
613 return jif;
614#endif 614#endif
615} 615}
616EXPORT_SYMBOL(clock_t_to_jiffies); 616EXPORT_SYMBOL(clock_t_to_jiffies);
@@ -619,10 +619,9 @@ u64 jiffies_64_to_clock_t(u64 x)
619{ 619{
620#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0 620#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
621# if HZ < USER_HZ 621# if HZ < USER_HZ
622 x *= USER_HZ; 622 x = div_u64(x * USER_HZ, HZ);
623 do_div(x, HZ);
624# elif HZ > USER_HZ 623# elif HZ > USER_HZ
625 do_div(x, HZ / USER_HZ); 624 x = div_u64(x, HZ / USER_HZ);
626# else 625# else
627 /* Nothing to do */ 626 /* Nothing to do */
628# endif 627# endif
@@ -632,8 +631,7 @@ u64 jiffies_64_to_clock_t(u64 x)
632 * but even this doesn't overflow in hundreds of years 631 * but even this doesn't overflow in hundreds of years
633 * in 64 bits, so.. 632 * in 64 bits, so..
634 */ 633 */
635 x *= TICK_NSEC; 634 x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
636 do_div(x, (NSEC_PER_SEC / USER_HZ));
637#endif 635#endif
638 return x; 636 return x;
639} 637}
@@ -642,21 +640,17 @@ EXPORT_SYMBOL(jiffies_64_to_clock_t);
642u64 nsec_to_clock_t(u64 x) 640u64 nsec_to_clock_t(u64 x)
643{ 641{
644#if (NSEC_PER_SEC % USER_HZ) == 0 642#if (NSEC_PER_SEC % USER_HZ) == 0
645 do_div(x, (NSEC_PER_SEC / USER_HZ)); 643 return div_u64(x, NSEC_PER_SEC / USER_HZ);
646#elif (USER_HZ % 512) == 0 644#elif (USER_HZ % 512) == 0
647 x *= USER_HZ/512; 645 return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
648 do_div(x, (NSEC_PER_SEC / 512));
649#else 646#else
650 /* 647 /*
651 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024, 648 * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
652 * overflow after 64.99 years. 649 * overflow after 64.99 years.
653 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ... 650 * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
654 */ 651 */
655 x *= 9; 652 return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
656 do_div(x, (unsigned long)((9ull * NSEC_PER_SEC + (USER_HZ/2)) /
657 USER_HZ));
658#endif 653#endif
659 return x;
660} 654}
661 655
662#if (BITS_PER_LONG < 64) 656#if (BITS_PER_LONG < 64)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 5fd9b9469770..5125ddd8196b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -15,7 +15,8 @@
15#include <linux/jiffies.h> 15#include <linux/jiffies.h>
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/capability.h> 17#include <linux/capability.h>
18#include <asm/div64.h> 18#include <linux/math64.h>
19#include <linux/clocksource.h>
19#include <asm/timex.h> 20#include <asm/timex.h>
20 21
21/* 22/*
@@ -23,11 +24,14 @@
23 */ 24 */
24unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */ 25unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
25unsigned long tick_nsec; /* ACTHZ period (nsec) */ 26unsigned long tick_nsec; /* ACTHZ period (nsec) */
26static u64 tick_length, tick_length_base; 27u64 tick_length;
28static u64 tick_length_base;
29
30static struct hrtimer leap_timer;
27 31
28#define MAX_TICKADJ 500 /* microsecs */ 32#define MAX_TICKADJ 500 /* microsecs */
29#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 33#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \
30 TICK_LENGTH_SHIFT) / NTP_INTERVAL_FREQ) 34 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
31 35
32/* 36/*
33 * phase-lock loop variables 37 * phase-lock loop variables
@@ -35,11 +39,12 @@ static u64 tick_length, tick_length_base;
35/* TIME_ERROR prevents overwriting the CMOS clock */ 39/* TIME_ERROR prevents overwriting the CMOS clock */
36static int time_state = TIME_OK; /* clock synchronization status */ 40static int time_state = TIME_OK; /* clock synchronization status */
37int time_status = STA_UNSYNC; /* clock status bits */ 41int time_status = STA_UNSYNC; /* clock status bits */
38static s64 time_offset; /* time adjustment (ns) */ 42static long time_tai; /* TAI offset (s) */
43static s64 time_offset; /* time adjustment (ns) */
39static long time_constant = 2; /* pll time constant */ 44static long time_constant = 2; /* pll time constant */
40long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */ 45long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
41long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */ 46long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
42long time_freq; /* frequency offset (scaled ppm)*/ 47static s64 time_freq; /* frequency offset (scaled ns/s)*/
43static long time_reftime; /* time at last adjustment (s) */ 48static long time_reftime; /* time at last adjustment (s) */
44long time_adjust; 49long time_adjust;
45static long ntp_tick_adj; 50static long ntp_tick_adj;
@@ -47,16 +52,56 @@ static long ntp_tick_adj;
47static void ntp_update_frequency(void) 52static void ntp_update_frequency(void)
48{ 53{
49 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) 54 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
50 << TICK_LENGTH_SHIFT; 55 << NTP_SCALE_SHIFT;
51 second_length += (s64)ntp_tick_adj << TICK_LENGTH_SHIFT; 56 second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT;
52 second_length += (s64)time_freq << (TICK_LENGTH_SHIFT - SHIFT_NSEC); 57 second_length += time_freq;
53 58
54 tick_length_base = second_length; 59 tick_length_base = second_length;
55 60
56 do_div(second_length, HZ); 61 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
57 tick_nsec = second_length >> TICK_LENGTH_SHIFT; 62 tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ);
63}
64
65static void ntp_update_offset(long offset)
66{
67 long mtemp;
68 s64 freq_adj;
69
70 if (!(time_status & STA_PLL))
71 return;
58 72
59 do_div(tick_length_base, NTP_INTERVAL_FREQ); 73 if (!(time_status & STA_NANO))
74 offset *= NSEC_PER_USEC;
75
76 /*
77 * Scale the phase adjustment and
78 * clamp to the operating range.
79 */
80 offset = min(offset, MAXPHASE);
81 offset = max(offset, -MAXPHASE);
82
83 /*
84 * Select how the frequency is to be controlled
85 * and in which mode (PLL or FLL).
86 */
87 if (time_status & STA_FREQHOLD || time_reftime == 0)
88 time_reftime = xtime.tv_sec;
89 mtemp = xtime.tv_sec - time_reftime;
90 time_reftime = xtime.tv_sec;
91
92 freq_adj = (s64)offset * mtemp;
93 freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant);
94 time_status &= ~STA_MODE;
95 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
96 freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
97 mtemp);
98 time_status |= STA_MODE;
99 }
100 freq_adj += time_freq;
101 freq_adj = min(freq_adj, MAXFREQ_SCALED);
102 time_freq = max(freq_adj, -MAXFREQ_SCALED);
103
104 time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
60} 105}
61 106
62/** 107/**
@@ -78,62 +123,70 @@ void ntp_clear(void)
78} 123}
79 124
80/* 125/*
81 * this routine handles the overflow of the microsecond field 126 * Leap second processing. If in leap-insert state at the end of the
82 * 127 * day, the system clock is set back one second; if in leap-delete
83 * The tricky bits of code to handle the accurate clock support 128 * state, the system clock is set ahead one second.
84 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
85 * They were originally developed for SUN and DEC kernels.
86 * All the kudos should go to Dave for this stuff.
87 */ 129 */
88void second_overflow(void) 130static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
89{ 131{
90 long time_adj; 132 enum hrtimer_restart res = HRTIMER_NORESTART;
91 133
92 /* Bump the maxerror field */ 134 write_seqlock_irq(&xtime_lock);
93 time_maxerror += MAXFREQ >> SHIFT_USEC;
94 if (time_maxerror > NTP_PHASE_LIMIT) {
95 time_maxerror = NTP_PHASE_LIMIT;
96 time_status |= STA_UNSYNC;
97 }
98 135
99 /*
100 * Leap second processing. If in leap-insert state at the end of the
101 * day, the system clock is set back one second; if in leap-delete
102 * state, the system clock is set ahead one second. The microtime()
103 * routine or external clock driver will insure that reported time is
104 * always monotonic. The ugly divides should be replaced.
105 */
106 switch (time_state) { 136 switch (time_state) {
107 case TIME_OK: 137 case TIME_OK:
108 if (time_status & STA_INS)
109 time_state = TIME_INS;
110 else if (time_status & STA_DEL)
111 time_state = TIME_DEL;
112 break; 138 break;
113 case TIME_INS: 139 case TIME_INS:
114 if (xtime.tv_sec % 86400 == 0) { 140 xtime.tv_sec--;
115 xtime.tv_sec--; 141 wall_to_monotonic.tv_sec++;
116 wall_to_monotonic.tv_sec++; 142 time_state = TIME_OOP;
117 time_state = TIME_OOP; 143 printk(KERN_NOTICE "Clock: "
118 printk(KERN_NOTICE "Clock: inserting leap second " 144 "inserting leap second 23:59:60 UTC\n");
119 "23:59:60 UTC\n"); 145 leap_timer.expires = ktime_add_ns(leap_timer.expires,
120 } 146 NSEC_PER_SEC);
147 res = HRTIMER_RESTART;
121 break; 148 break;
122 case TIME_DEL: 149 case TIME_DEL:
123 if ((xtime.tv_sec + 1) % 86400 == 0) { 150 xtime.tv_sec++;
124 xtime.tv_sec++; 151 time_tai--;
125 wall_to_monotonic.tv_sec--; 152 wall_to_monotonic.tv_sec--;
126 time_state = TIME_WAIT; 153 time_state = TIME_WAIT;
127 printk(KERN_NOTICE "Clock: deleting leap second " 154 printk(KERN_NOTICE "Clock: "
128 "23:59:59 UTC\n"); 155 "deleting leap second 23:59:59 UTC\n");
129 }
130 break; 156 break;
131 case TIME_OOP: 157 case TIME_OOP:
158 time_tai++;
132 time_state = TIME_WAIT; 159 time_state = TIME_WAIT;
133 break; 160 /* fall through */
134 case TIME_WAIT: 161 case TIME_WAIT:
135 if (!(time_status & (STA_INS | STA_DEL))) 162 if (!(time_status & (STA_INS | STA_DEL)))
136 time_state = TIME_OK; 163 time_state = TIME_OK;
164 break;
165 }
166 update_vsyscall(&xtime, clock);
167
168 write_sequnlock_irq(&xtime_lock);
169
170 return res;
171}
172
173/*
174 * this routine handles the overflow of the microsecond field
175 *
176 * The tricky bits of code to handle the accurate clock support
177 * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
178 * They were originally developed for SUN and DEC kernels.
179 * All the kudos should go to Dave for this stuff.
180 */
181void second_overflow(void)
182{
183 s64 time_adj;
184
185 /* Bump the maxerror field */
186 time_maxerror += MAXFREQ / NSEC_PER_USEC;
187 if (time_maxerror > NTP_PHASE_LIMIT) {
188 time_maxerror = NTP_PHASE_LIMIT;
189 time_status |= STA_UNSYNC;
137 } 190 }
138 191
139 /* 192 /*
@@ -143,7 +196,7 @@ void second_overflow(void)
143 tick_length = tick_length_base; 196 tick_length = tick_length_base;
144 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); 197 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant);
145 time_offset -= time_adj; 198 time_offset -= time_adj;
146 tick_length += (s64)time_adj << (TICK_LENGTH_SHIFT - SHIFT_UPDATE); 199 tick_length += time_adj;
147 200
148 if (unlikely(time_adjust)) { 201 if (unlikely(time_adjust)) {
149 if (time_adjust > MAX_TICKADJ) { 202 if (time_adjust > MAX_TICKADJ) {
@@ -154,25 +207,12 @@ void second_overflow(void)
154 tick_length -= MAX_TICKADJ_SCALED; 207 tick_length -= MAX_TICKADJ_SCALED;
155 } else { 208 } else {
156 tick_length += (s64)(time_adjust * NSEC_PER_USEC / 209 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
157 NTP_INTERVAL_FREQ) << TICK_LENGTH_SHIFT; 210 NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
158 time_adjust = 0; 211 time_adjust = 0;
159 } 212 }
160 } 213 }
161} 214}
162 215
163/*
164 * Return how long ticks are at the moment, that is, how much time
165 * update_wall_time_one_tick will add to xtime next time we call it
166 * (assuming no calls to do_adjtimex in the meantime).
167 * The return value is in fixed-point nanoseconds shifted by the
168 * specified number of bits to the right of the binary point.
169 * This function has no side-effects.
170 */
171u64 current_tick_length(void)
172{
173 return tick_length;
174}
175
176#ifdef CONFIG_GENERIC_CMOS_UPDATE 216#ifdef CONFIG_GENERIC_CMOS_UPDATE
177 217
178/* Disable the cmos update - used by virtualization and embedded */ 218/* Disable the cmos update - used by virtualization and embedded */
@@ -236,8 +276,8 @@ static inline void notify_cmos_timer(void) { }
236 */ 276 */
237int do_adjtimex(struct timex *txc) 277int do_adjtimex(struct timex *txc)
238{ 278{
239 long mtemp, save_adjust, rem; 279 struct timespec ts;
240 s64 freq_adj, temp64; 280 long save_adjust, sec;
241 int result; 281 int result;
242 282
243 /* In order to modify anything, you gotta be super-user! */ 283 /* In order to modify anything, you gotta be super-user! */
@@ -247,147 +287,132 @@ int do_adjtimex(struct timex *txc)
247 /* Now we validate the data before disabling interrupts */ 287 /* Now we validate the data before disabling interrupts */
248 288
249 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) { 289 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
250 /* singleshot must not be used with any other mode bits */ 290 /* singleshot must not be used with any other mode bits */
251 if (txc->modes != ADJ_OFFSET_SINGLESHOT && 291 if (txc->modes & ~ADJ_OFFSET_SS_READ)
252 txc->modes != ADJ_OFFSET_SS_READ)
253 return -EINVAL; 292 return -EINVAL;
254 } 293 }
255 294
256 if (txc->modes != ADJ_OFFSET_SINGLESHOT && (txc->modes & ADJ_OFFSET))
257 /* adjustment Offset limited to +- .512 seconds */
258 if (txc->offset <= - MAXPHASE || txc->offset >= MAXPHASE )
259 return -EINVAL;
260
261 /* if the quartz is off by more than 10% something is VERY wrong ! */ 295 /* if the quartz is off by more than 10% something is VERY wrong ! */
262 if (txc->modes & ADJ_TICK) 296 if (txc->modes & ADJ_TICK)
263 if (txc->tick < 900000/USER_HZ || 297 if (txc->tick < 900000/USER_HZ ||
264 txc->tick > 1100000/USER_HZ) 298 txc->tick > 1100000/USER_HZ)
265 return -EINVAL; 299 return -EINVAL;
266 300
301 if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
302 hrtimer_cancel(&leap_timer);
303 getnstimeofday(&ts);
304
267 write_seqlock_irq(&xtime_lock); 305 write_seqlock_irq(&xtime_lock);
268 result = time_state; /* mostly `TIME_OK' */
269 306
270 /* Save for later - semantics of adjtime is to return old value */ 307 /* Save for later - semantics of adjtime is to return old value */
271 save_adjust = time_adjust; 308 save_adjust = time_adjust;
272 309
273#if 0 /* STA_CLOCKERR is never set yet */
274 time_status &= ~STA_CLOCKERR; /* reset STA_CLOCKERR */
275#endif
276 /* If there are input parameters, then process them */ 310 /* If there are input parameters, then process them */
277 if (txc->modes) 311 if (txc->modes) {
278 { 312 if (txc->modes & ADJ_STATUS) {
279 if (txc->modes & ADJ_STATUS) /* only set allowed bits */ 313 if ((time_status & STA_PLL) &&
280 time_status = (txc->status & ~STA_RONLY) | 314 !(txc->status & STA_PLL)) {
281 (time_status & STA_RONLY); 315 time_state = TIME_OK;
282 316 time_status = STA_UNSYNC;
283 if (txc->modes & ADJ_FREQUENCY) { /* p. 22 */ 317 }
284 if (txc->freq > MAXFREQ || txc->freq < -MAXFREQ) { 318 /* only set allowed bits */
285 result = -EINVAL; 319 time_status &= STA_RONLY;
286 goto leave; 320 time_status |= txc->status & ~STA_RONLY;
287 } 321
288 time_freq = ((s64)txc->freq * NSEC_PER_USEC) 322 switch (time_state) {
289 >> (SHIFT_USEC - SHIFT_NSEC); 323 case TIME_OK:
290 } 324 start_timer:
291 325 sec = ts.tv_sec;
292 if (txc->modes & ADJ_MAXERROR) { 326 if (time_status & STA_INS) {
293 if (txc->maxerror < 0 || txc->maxerror >= NTP_PHASE_LIMIT) { 327 time_state = TIME_INS;
294 result = -EINVAL; 328 sec += 86400 - sec % 86400;
295 goto leave; 329 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
330 } else if (time_status & STA_DEL) {
331 time_state = TIME_DEL;
332 sec += 86400 - (sec + 1) % 86400;
333 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
334 }
335 break;
336 case TIME_INS:
337 case TIME_DEL:
338 time_state = TIME_OK;
339 goto start_timer;
340 break;
341 case TIME_WAIT:
342 if (!(time_status & (STA_INS | STA_DEL)))
343 time_state = TIME_OK;
344 break;
345 case TIME_OOP:
346 hrtimer_restart(&leap_timer);
347 break;
348 }
296 } 349 }
297 time_maxerror = txc->maxerror;
298 }
299 350
300 if (txc->modes & ADJ_ESTERROR) { 351 if (txc->modes & ADJ_NANO)
301 if (txc->esterror < 0 || txc->esterror >= NTP_PHASE_LIMIT) { 352 time_status |= STA_NANO;
302 result = -EINVAL; 353 if (txc->modes & ADJ_MICRO)
303 goto leave; 354 time_status &= ~STA_NANO;
355
356 if (txc->modes & ADJ_FREQUENCY) {
357 time_freq = (s64)txc->freq * PPM_SCALE;
358 time_freq = min(time_freq, MAXFREQ_SCALED);
359 time_freq = max(time_freq, -MAXFREQ_SCALED);
304 } 360 }
305 time_esterror = txc->esterror;
306 }
307 361
308 if (txc->modes & ADJ_TIMECONST) { /* p. 24 */ 362 if (txc->modes & ADJ_MAXERROR)
309 if (txc->constant < 0) { /* NTP v4 uses values > 6 */ 363 time_maxerror = txc->maxerror;
310 result = -EINVAL; 364 if (txc->modes & ADJ_ESTERROR)
311 goto leave; 365 time_esterror = txc->esterror;
366
367 if (txc->modes & ADJ_TIMECONST) {
368 time_constant = txc->constant;
369 if (!(time_status & STA_NANO))
370 time_constant += 4;
371 time_constant = min(time_constant, (long)MAXTC);
372 time_constant = max(time_constant, 0l);
312 } 373 }
313 time_constant = min(txc->constant + 4, (long)MAXTC);
314 }
315 374
316 if (txc->modes & ADJ_OFFSET) { /* values checked earlier */ 375 if (txc->modes & ADJ_TAI && txc->constant > 0)
317 if (txc->modes == ADJ_OFFSET_SINGLESHOT) { 376 time_tai = txc->constant;
318 /* adjtime() is independent from ntp_adjtime() */ 377
319 time_adjust = txc->offset; 378 if (txc->modes & ADJ_OFFSET) {
379 if (txc->modes == ADJ_OFFSET_SINGLESHOT)
380 /* adjtime() is independent from ntp_adjtime() */
381 time_adjust = txc->offset;
382 else
383 ntp_update_offset(txc->offset);
320 } 384 }
321 else if (time_status & STA_PLL) { 385 if (txc->modes & ADJ_TICK)
322 time_offset = txc->offset * NSEC_PER_USEC; 386 tick_usec = txc->tick;
323 387
324 /* 388 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
325 * Scale the phase adjustment and 389 ntp_update_frequency();
326 * clamp to the operating range. 390 }
327 */ 391
328 time_offset = min(time_offset, (s64)MAXPHASE * NSEC_PER_USEC); 392 result = time_state; /* mostly `TIME_OK' */
329 time_offset = max(time_offset, (s64)-MAXPHASE * NSEC_PER_USEC); 393 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
330
331 /*
332 * Select whether the frequency is to be controlled
333 * and in which mode (PLL or FLL). Clamp to the operating
334 * range. Ugly multiply/divide should be replaced someday.
335 */
336
337 if (time_status & STA_FREQHOLD || time_reftime == 0)
338 time_reftime = xtime.tv_sec;
339 mtemp = xtime.tv_sec - time_reftime;
340 time_reftime = xtime.tv_sec;
341
342 freq_adj = time_offset * mtemp;
343 freq_adj = shift_right(freq_adj, time_constant * 2 +
344 (SHIFT_PLL + 2) * 2 - SHIFT_NSEC);
345 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
346 u64 utemp64;
347 temp64 = time_offset << (SHIFT_NSEC - SHIFT_FLL);
348 if (time_offset < 0) {
349 utemp64 = -temp64;
350 do_div(utemp64, mtemp);
351 freq_adj -= utemp64;
352 } else {
353 utemp64 = temp64;
354 do_div(utemp64, mtemp);
355 freq_adj += utemp64;
356 }
357 }
358 freq_adj += time_freq;
359 freq_adj = min(freq_adj, (s64)MAXFREQ_NSEC);
360 time_freq = max(freq_adj, (s64)-MAXFREQ_NSEC);
361 time_offset = div_long_long_rem_signed(time_offset,
362 NTP_INTERVAL_FREQ,
363 &rem);
364 time_offset <<= SHIFT_UPDATE;
365 } /* STA_PLL */
366 } /* txc->modes & ADJ_OFFSET */
367 if (txc->modes & ADJ_TICK)
368 tick_usec = txc->tick;
369
370 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
371 ntp_update_frequency();
372 } /* txc->modes */
373leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
374 result = TIME_ERROR; 394 result = TIME_ERROR;
375 395
376 if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || 396 if ((txc->modes == ADJ_OFFSET_SINGLESHOT) ||
377 (txc->modes == ADJ_OFFSET_SS_READ)) 397 (txc->modes == ADJ_OFFSET_SS_READ))
378 txc->offset = save_adjust; 398 txc->offset = save_adjust;
379 else 399 else {
380 txc->offset = ((long)shift_right(time_offset, SHIFT_UPDATE)) * 400 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
381 NTP_INTERVAL_FREQ / 1000; 401 NTP_SCALE_SHIFT);
382 txc->freq = (time_freq / NSEC_PER_USEC) << 402 if (!(time_status & STA_NANO))
383 (SHIFT_USEC - SHIFT_NSEC); 403 txc->offset /= NSEC_PER_USEC;
404 }
405 txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
406 (s64)PPM_SCALE_INV,
407 NTP_SCALE_SHIFT);
384 txc->maxerror = time_maxerror; 408 txc->maxerror = time_maxerror;
385 txc->esterror = time_esterror; 409 txc->esterror = time_esterror;
386 txc->status = time_status; 410 txc->status = time_status;
387 txc->constant = time_constant; 411 txc->constant = time_constant;
388 txc->precision = 1; 412 txc->precision = 1;
389 txc->tolerance = MAXFREQ; 413 txc->tolerance = MAXFREQ_SCALED / PPM_SCALE;
390 txc->tick = tick_usec; 414 txc->tick = tick_usec;
415 txc->tai = time_tai;
391 416
392 /* PPS is not implemented, so these are zero */ 417 /* PPS is not implemented, so these are zero */
393 txc->ppsfreq = 0; 418 txc->ppsfreq = 0;
@@ -399,9 +424,15 @@ leave: if ((time_status & (STA_UNSYNC|STA_CLOCKERR)) != 0)
399 txc->errcnt = 0; 424 txc->errcnt = 0;
400 txc->stbcnt = 0; 425 txc->stbcnt = 0;
401 write_sequnlock_irq(&xtime_lock); 426 write_sequnlock_irq(&xtime_lock);
402 do_gettimeofday(&txc->time); 427
428 txc->time.tv_sec = ts.tv_sec;
429 txc->time.tv_usec = ts.tv_nsec;
430 if (!(time_status & STA_NANO))
431 txc->time.tv_usec /= NSEC_PER_USEC;
432
403 notify_cmos_timer(); 433 notify_cmos_timer();
404 return(result); 434
435 return result;
405} 436}
406 437
407static int __init ntp_tick_adj_setup(char *str) 438static int __init ntp_tick_adj_setup(char *str)
@@ -411,3 +442,10 @@ static int __init ntp_tick_adj_setup(char *str)
411} 442}
412 443
413__setup("ntp_tick_adj=", ntp_tick_adj_setup); 444__setup("ntp_tick_adj=", ntp_tick_adj_setup);
445
446void __init ntp_init(void)
447{
448 ntp_clear();
449 hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
450 leap_timer.function = ntp_leap_second;
451}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 2d6087c7cf98..e91c29f961c9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -53,7 +53,7 @@ void update_xtime_cache(u64 nsec)
53 timespec_add_ns(&xtime_cache, nsec); 53 timespec_add_ns(&xtime_cache, nsec);
54} 54}
55 55
56static struct clocksource *clock; /* pointer to current clocksource */ 56struct clocksource *clock;
57 57
58 58
59#ifdef CONFIG_GENERIC_TIME 59#ifdef CONFIG_GENERIC_TIME
@@ -246,7 +246,7 @@ void __init timekeeping_init(void)
246 246
247 write_seqlock_irqsave(&xtime_lock, flags); 247 write_seqlock_irqsave(&xtime_lock, flags);
248 248
249 ntp_clear(); 249 ntp_init();
250 250
251 clock = clocksource_get_next(); 251 clock = clocksource_get_next();
252 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 252 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -371,7 +371,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
371 * here. This is tuned so that an error of about 1 msec is adjusted 371 * here. This is tuned so that an error of about 1 msec is adjusted
372 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 372 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
373 */ 373 */
374 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); 374 error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
375 error2 = abs(error2); 375 error2 = abs(error2);
376 for (look_ahead = 0; error2 > 0; look_ahead++) 376 for (look_ahead = 0; error2 > 0; look_ahead++)
377 error2 >>= 2; 377 error2 >>= 2;
@@ -380,8 +380,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
380 * Now calculate the error in (1 << look_ahead) ticks, but first 380 * Now calculate the error in (1 << look_ahead) ticks, but first
381 * remove the single look ahead already included in the error. 381 * remove the single look ahead already included in the error.
382 */ 382 */
383 tick_error = current_tick_length() >> 383 tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
384 (TICK_LENGTH_SHIFT - clock->shift + 1);
385 tick_error -= clock->xtime_interval >> 1; 384 tick_error -= clock->xtime_interval >> 1;
386 error = ((error - tick_error) >> look_ahead) + tick_error; 385 error = ((error - tick_error) >> look_ahead) + tick_error;
387 386
@@ -412,7 +411,7 @@ static void clocksource_adjust(s64 offset)
412 s64 error, interval = clock->cycle_interval; 411 s64 error, interval = clock->cycle_interval;
413 int adj; 412 int adj;
414 413
415 error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1); 414 error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
416 if (error > interval) { 415 if (error > interval) {
417 error >>= 2; 416 error >>= 2;
418 if (likely(error <= interval)) 417 if (likely(error <= interval))
@@ -434,7 +433,7 @@ static void clocksource_adjust(s64 offset)
434 clock->xtime_interval += interval; 433 clock->xtime_interval += interval;
435 clock->xtime_nsec -= offset; 434 clock->xtime_nsec -= offset;
436 clock->error -= (interval - offset) << 435 clock->error -= (interval - offset) <<
437 (TICK_LENGTH_SHIFT - clock->shift); 436 (NTP_SCALE_SHIFT - clock->shift);
438} 437}
439 438
440/** 439/**
@@ -473,8 +472,8 @@ void update_wall_time(void)
473 } 472 }
474 473
475 /* accumulate error between NTP and clock interval */ 474 /* accumulate error between NTP and clock interval */
476 clock->error += current_tick_length(); 475 clock->error += tick_length;
477 clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift); 476 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
478 } 477 }
479 478
480 /* correct the clock when NTP error is too big */ 479 /* correct the clock when NTP error is too big */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 721093a22561..29fc39f1029c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -195,7 +195,6 @@ static void delayed_work_timer_fn(unsigned long __data)
195int queue_delayed_work(struct workqueue_struct *wq, 195int queue_delayed_work(struct workqueue_struct *wq,
196 struct delayed_work *dwork, unsigned long delay) 196 struct delayed_work *dwork, unsigned long delay)
197{ 197{
198 timer_stats_timer_set_start_info(&dwork->timer);
199 if (delay == 0) 198 if (delay == 0)
200 return queue_work(wq, &dwork->work); 199 return queue_work(wq, &dwork->work);
201 200
@@ -219,11 +218,12 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
219 struct timer_list *timer = &dwork->timer; 218 struct timer_list *timer = &dwork->timer;
220 struct work_struct *work = &dwork->work; 219 struct work_struct *work = &dwork->work;
221 220
222 timer_stats_timer_set_start_info(&dwork->timer);
223 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) { 221 if (!test_and_set_bit(WORK_STRUCT_PENDING, work_data_bits(work))) {
224 BUG_ON(timer_pending(timer)); 222 BUG_ON(timer_pending(timer));
225 BUG_ON(!list_empty(&work->entry)); 223 BUG_ON(!list_empty(&work->entry));
226 224
225 timer_stats_timer_set_start_info(&dwork->timer);
226
227 /* This stores cwq for the moment, for the timer_fn */ 227 /* This stores cwq for the moment, for the timer_fn */
228 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id())); 228 set_wq_data(work, wq_per_cpu(wq, raw_smp_processor_id()));
229 timer->expires = jiffies + delay; 229 timer->expires = jiffies + delay;
@@ -564,7 +564,6 @@ EXPORT_SYMBOL(schedule_work);
564int schedule_delayed_work(struct delayed_work *dwork, 564int schedule_delayed_work(struct delayed_work *dwork,
565 unsigned long delay) 565 unsigned long delay)
566{ 566{
567 timer_stats_timer_set_start_info(&dwork->timer);
568 return queue_delayed_work(keventd_wq, dwork, delay); 567 return queue_delayed_work(keventd_wq, dwork, delay);
569} 568}
570EXPORT_SYMBOL(schedule_delayed_work); 569EXPORT_SYMBOL(schedule_delayed_work);
@@ -581,7 +580,6 @@ EXPORT_SYMBOL(schedule_delayed_work);
581int schedule_delayed_work_on(int cpu, 580int schedule_delayed_work_on(int cpu,
582 struct delayed_work *dwork, unsigned long delay) 581 struct delayed_work *dwork, unsigned long delay)
583{ 582{
584 timer_stats_timer_set_start_info(&dwork->timer);
585 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay); 583 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
586} 584}
587EXPORT_SYMBOL(schedule_delayed_work_on); 585EXPORT_SYMBOL(schedule_delayed_work_on);