aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt4
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/x86/kernel/hpet.c80
-rw-r--r--arch/x86/kernel/quirks.c3
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--include/linux/timer.h22
-rw-r--r--include/linux/timex.h2
-rw-r--r--kernel/posix-cpu-timers.c3
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/time/clockevents.c20
-rw-r--r--kernel/time/ntp.c444
-rw-r--r--kernel/timer.c110
12 files changed, 437 insertions, 261 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 954b23cecfd1..fa4e1239a8fa 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -493,10 +493,12 @@ and is between 256 and 4096 characters. It is defined in the file
493 Default: 64 493 Default: 64
494 494
495 hpet= [X86-32,HPET] option to control HPET usage 495 hpet= [X86-32,HPET] option to control HPET usage
496 Format: { enable (default) | disable | force } 496 Format: { enable (default) | disable | force |
497 verbose }
497 disable: disable HPET and use PIT instead 498 disable: disable HPET and use PIT instead
498 force: allow force enabled of undocumented chips (ICH4, 499 force: allow force enabled of undocumented chips (ICH4,
499 VIA, nVidia) 500 VIA, nVidia)
501 verbose: show contents of HPET registers during setup
500 502
501 com20020= [HW,NET] ARCnet - COM20020 chipset 503 com20020= [HW,NET] ARCnet - COM20020 chipset
502 Format: 504 Format:
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 6a0ad196aeb3..f085369301b1 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx)
508 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); 508 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
509 set_bit(ctx->prio, spu_prio->bitmap); 509 set_bit(ctx->prio, spu_prio->bitmap);
510 if (!spu_prio->nr_waiting++) 510 if (!spu_prio->nr_waiting++)
511 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); 511 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
512 } 512 }
513} 513}
514 514
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index a00545fe5cdd..648b3a2a3a44 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -80,6 +80,7 @@ static inline void hpet_clear_mapping(void)
80 */ 80 */
81static int boot_hpet_disable; 81static int boot_hpet_disable;
82int hpet_force_user; 82int hpet_force_user;
83static int hpet_verbose;
83 84
84static int __init hpet_setup(char *str) 85static int __init hpet_setup(char *str)
85{ 86{
@@ -88,6 +89,8 @@ static int __init hpet_setup(char *str)
88 boot_hpet_disable = 1; 89 boot_hpet_disable = 1;
89 if (!strncmp("force", str, 5)) 90 if (!strncmp("force", str, 5))
90 hpet_force_user = 1; 91 hpet_force_user = 1;
92 if (!strncmp("verbose", str, 7))
93 hpet_verbose = 1;
91 } 94 }
92 return 1; 95 return 1;
93} 96}
@@ -119,6 +122,43 @@ int is_hpet_enabled(void)
119} 122}
120EXPORT_SYMBOL_GPL(is_hpet_enabled); 123EXPORT_SYMBOL_GPL(is_hpet_enabled);
121 124
125static void _hpet_print_config(const char *function, int line)
126{
127 u32 i, timers, l, h;
128 printk(KERN_INFO "hpet: %s(%d):\n", function, line);
129 l = hpet_readl(HPET_ID);
130 h = hpet_readl(HPET_PERIOD);
131 timers = ((l & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT) + 1;
132 printk(KERN_INFO "hpet: ID: 0x%x, PERIOD: 0x%x\n", l, h);
133 l = hpet_readl(HPET_CFG);
134 h = hpet_readl(HPET_STATUS);
135 printk(KERN_INFO "hpet: CFG: 0x%x, STATUS: 0x%x\n", l, h);
136 l = hpet_readl(HPET_COUNTER);
137 h = hpet_readl(HPET_COUNTER+4);
138 printk(KERN_INFO "hpet: COUNTER_l: 0x%x, COUNTER_h: 0x%x\n", l, h);
139
140 for (i = 0; i < timers; i++) {
141 l = hpet_readl(HPET_Tn_CFG(i));
142 h = hpet_readl(HPET_Tn_CFG(i)+4);
143 printk(KERN_INFO "hpet: T%d: CFG_l: 0x%x, CFG_h: 0x%x\n",
144 i, l, h);
145 l = hpet_readl(HPET_Tn_CMP(i));
146 h = hpet_readl(HPET_Tn_CMP(i)+4);
147 printk(KERN_INFO "hpet: T%d: CMP_l: 0x%x, CMP_h: 0x%x\n",
148 i, l, h);
149 l = hpet_readl(HPET_Tn_ROUTE(i));
150 h = hpet_readl(HPET_Tn_ROUTE(i)+4);
151 printk(KERN_INFO "hpet: T%d ROUTE_l: 0x%x, ROUTE_h: 0x%x\n",
152 i, l, h);
153 }
154}
155
156#define hpet_print_config() \
157do { \
158 if (hpet_verbose) \
159 _hpet_print_config(__FUNCTION__, __LINE__); \
160} while (0)
161
122/* 162/*
123 * When the hpet driver (/dev/hpet) is enabled, we need to reserve 163 * When the hpet driver (/dev/hpet) is enabled, we need to reserve
124 * timer 0 and timer 1 in case of RTC emulation. 164 * timer 0 and timer 1 in case of RTC emulation.
@@ -191,27 +231,37 @@ static struct clock_event_device hpet_clockevent = {
191 .rating = 50, 231 .rating = 50,
192}; 232};
193 233
194static void hpet_start_counter(void) 234static void hpet_stop_counter(void)
195{ 235{
196 unsigned long cfg = hpet_readl(HPET_CFG); 236 unsigned long cfg = hpet_readl(HPET_CFG);
197
198 cfg &= ~HPET_CFG_ENABLE; 237 cfg &= ~HPET_CFG_ENABLE;
199 hpet_writel(cfg, HPET_CFG); 238 hpet_writel(cfg, HPET_CFG);
200 hpet_writel(0, HPET_COUNTER); 239 hpet_writel(0, HPET_COUNTER);
201 hpet_writel(0, HPET_COUNTER + 4); 240 hpet_writel(0, HPET_COUNTER + 4);
241}
242
243static void hpet_start_counter(void)
244{
245 unsigned long cfg = hpet_readl(HPET_CFG);
202 cfg |= HPET_CFG_ENABLE; 246 cfg |= HPET_CFG_ENABLE;
203 hpet_writel(cfg, HPET_CFG); 247 hpet_writel(cfg, HPET_CFG);
204} 248}
205 249
250static void hpet_restart_counter(void)
251{
252 hpet_stop_counter();
253 hpet_start_counter();
254}
255
206static void hpet_resume_device(void) 256static void hpet_resume_device(void)
207{ 257{
208 force_hpet_resume(); 258 force_hpet_resume();
209} 259}
210 260
211static void hpet_restart_counter(void) 261static void hpet_resume_counter(void)
212{ 262{
213 hpet_resume_device(); 263 hpet_resume_device();
214 hpet_start_counter(); 264 hpet_restart_counter();
215} 265}
216 266
217static void hpet_enable_legacy_int(void) 267static void hpet_enable_legacy_int(void)
@@ -259,29 +309,23 @@ static int hpet_setup_msi_irq(unsigned int irq);
259static void hpet_set_mode(enum clock_event_mode mode, 309static void hpet_set_mode(enum clock_event_mode mode,
260 struct clock_event_device *evt, int timer) 310 struct clock_event_device *evt, int timer)
261{ 311{
262 unsigned long cfg, cmp, now; 312 unsigned long cfg;
263 uint64_t delta; 313 uint64_t delta;
264 314
265 switch (mode) { 315 switch (mode) {
266 case CLOCK_EVT_MODE_PERIODIC: 316 case CLOCK_EVT_MODE_PERIODIC:
317 hpet_stop_counter();
267 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult; 318 delta = ((uint64_t)(NSEC_PER_SEC/HZ)) * evt->mult;
268 delta >>= evt->shift; 319 delta >>= evt->shift;
269 now = hpet_readl(HPET_COUNTER);
270 cmp = now + (unsigned long) delta;
271 cfg = hpet_readl(HPET_Tn_CFG(timer)); 320 cfg = hpet_readl(HPET_Tn_CFG(timer));
272 /* Make sure we use edge triggered interrupts */ 321 /* Make sure we use edge triggered interrupts */
273 cfg &= ~HPET_TN_LEVEL; 322 cfg &= ~HPET_TN_LEVEL;
274 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | 323 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
275 HPET_TN_SETVAL | HPET_TN_32BIT; 324 HPET_TN_SETVAL | HPET_TN_32BIT;
276 hpet_writel(cfg, HPET_Tn_CFG(timer)); 325 hpet_writel(cfg, HPET_Tn_CFG(timer));
277 /*
278 * The first write after writing TN_SETVAL to the
279 * config register sets the counter value, the second
280 * write sets the period.
281 */
282 hpet_writel(cmp, HPET_Tn_CMP(timer));
283 udelay(1);
284 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer)); 326 hpet_writel((unsigned long) delta, HPET_Tn_CMP(timer));
327 hpet_start_counter();
328 hpet_print_config();
285 break; 329 break;
286 330
287 case CLOCK_EVT_MODE_ONESHOT: 331 case CLOCK_EVT_MODE_ONESHOT:
@@ -308,6 +352,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
308 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); 352 irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
309 enable_irq(hdev->irq); 353 enable_irq(hdev->irq);
310 } 354 }
355 hpet_print_config();
311 break; 356 break;
312 } 357 }
313} 358}
@@ -526,6 +571,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
526 571
527 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT); 572 num_timers = ((id & HPET_ID_NUMBER) >> HPET_ID_NUMBER_SHIFT);
528 num_timers++; /* Value read out starts from 0 */ 573 num_timers++; /* Value read out starts from 0 */
574 hpet_print_config();
529 575
530 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL); 576 hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
531 if (!hpet_devs) 577 if (!hpet_devs)
@@ -695,7 +741,7 @@ static struct clocksource clocksource_hpet = {
695 .mask = HPET_MASK, 741 .mask = HPET_MASK,
696 .shift = HPET_SHIFT, 742 .shift = HPET_SHIFT,
697 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 743 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
698 .resume = hpet_restart_counter, 744 .resume = hpet_resume_counter,
699#ifdef CONFIG_X86_64 745#ifdef CONFIG_X86_64
700 .vread = vread_hpet, 746 .vread = vread_hpet,
701#endif 747#endif
@@ -707,7 +753,7 @@ static int hpet_clocksource_register(void)
707 cycle_t t1; 753 cycle_t t1;
708 754
709 /* Start the counter */ 755 /* Start the counter */
710 hpet_start_counter(); 756 hpet_restart_counter();
711 757
712 /* Verify whether hpet counter works */ 758 /* Verify whether hpet counter works */
713 t1 = read_hpet(); 759 t1 = read_hpet();
@@ -793,6 +839,7 @@ int __init hpet_enable(void)
793 * information and the number of channels 839 * information and the number of channels
794 */ 840 */
795 id = hpet_readl(HPET_ID); 841 id = hpet_readl(HPET_ID);
842 hpet_print_config();
796 843
797#ifdef CONFIG_HPET_EMULATE_RTC 844#ifdef CONFIG_HPET_EMULATE_RTC
798 /* 845 /*
@@ -845,6 +892,7 @@ static __init int hpet_late_init(void)
845 return -ENODEV; 892 return -ENODEV;
846 893
847 hpet_reserve_platform_timers(hpet_readl(HPET_ID)); 894 hpet_reserve_platform_timers(hpet_readl(HPET_ID));
895 hpet_print_config();
848 896
849 for_each_online_cpu(cpu) { 897 for_each_online_cpu(cpu) {
850 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); 898 hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu);
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 309949e9e1c1..697d1b78cfbf 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -172,7 +172,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_4,
172 ich_force_enable_hpet); 172 ich_force_enable_hpet);
173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7, 173DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH9_7,
174 ich_force_enable_hpet); 174 ich_force_enable_hpet);
175 175DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x3a16, /* ICH10 */
176 ich_force_enable_hpet);
176 177
177static struct pci_dev *cached_dev; 178static struct pci_dev *cached_dev;
178 179
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 69c0ce321b4e..cb9daa6ac029 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd)
2715 * to prevent HoL blocking, then start the HoL timer that 2715 * to prevent HoL blocking, then start the HoL timer that
2716 * periodically continues, then stop procs, so they can detect 2716 * periodically continues, then stop procs, so they can detect
2717 * link down if they want, and do something about it. 2717 * link down if they want, and do something about it.
2718 * Timer may already be running, so use __mod_timer, not add_timer. 2718 * Timer may already be running, so use mod_timer, not add_timer.
2719 */ 2719 */
2720void ipath_hol_down(struct ipath_devdata *dd) 2720void ipath_hol_down(struct ipath_devdata *dd)
2721{ 2721{
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd)
2724 dd->ipath_hol_next = IPATH_HOL_DOWNCONT; 2724 dd->ipath_hol_next = IPATH_HOL_DOWNCONT;
2725 dd->ipath_hol_timer.expires = jiffies + 2725 dd->ipath_hol_timer.expires = jiffies +
2726 msecs_to_jiffies(ipath_hol_timeout_ms); 2726 msecs_to_jiffies(ipath_hol_timeout_ms);
2727 __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); 2727 mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires);
2728} 2728}
2729 2729
2730/* 2730/*
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque)
2763 else { 2763 else {
2764 dd->ipath_hol_timer.expires = jiffies + 2764 dd->ipath_hol_timer.expires = jiffies +
2765 msecs_to_jiffies(ipath_hol_timeout_ms); 2765 msecs_to_jiffies(ipath_hol_timeout_ms);
2766 __mod_timer(&dd->ipath_hol_timer, 2766 mod_timer(&dd->ipath_hol_timer,
2767 dd->ipath_hol_timer.expires); 2767 dd->ipath_hol_timer.expires);
2768 } 2768 }
2769} 2769}
diff --git a/include/linux/timer.h b/include/linux/timer.h
index daf9685b861c..e2d662e3416e 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer)
86 86
87extern void add_timer_on(struct timer_list *timer, int cpu); 87extern void add_timer_on(struct timer_list *timer, int cpu);
88extern int del_timer(struct timer_list * timer); 88extern int del_timer(struct timer_list * timer);
89extern int __mod_timer(struct timer_list *timer, unsigned long expires);
90extern int mod_timer(struct timer_list *timer, unsigned long expires); 89extern int mod_timer(struct timer_list *timer, unsigned long expires);
90extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
91 91
92/* 92/*
93 * The jiffies value which is added to now, when there is no timer 93 * The jiffies value which is added to now, when there is no timer
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
146} 146}
147#endif 147#endif
148 148
149/** 149extern void add_timer(struct timer_list *timer);
150 * add_timer - start a timer
151 * @timer: the timer to be added
152 *
153 * The kernel will do a ->function(->data) callback from the
154 * timer interrupt at the ->expires point in the future. The
155 * current time is 'jiffies'.
156 *
157 * The timer's ->expires, ->function (and if the handler uses it, ->data)
158 * fields must be set prior calling this function.
159 *
160 * Timers with an ->expires field in the past will be executed in the next
161 * timer tick.
162 */
163static inline void add_timer(struct timer_list *timer)
164{
165 BUG_ON(timer_pending(timer));
166 __mod_timer(timer, timer->expires);
167}
168 150
169#ifdef CONFIG_SMP 151#ifdef CONFIG_SMP
170 extern int try_to_del_timer_sync(struct timer_list *timer); 152 extern int try_to_del_timer_sync(struct timer_list *timer);
diff --git a/include/linux/timex.h b/include/linux/timex.h
index 998a55d80acf..aa3475fcff64 100644
--- a/include/linux/timex.h
+++ b/include/linux/timex.h
@@ -190,7 +190,7 @@ struct timex {
190 * offset and maximum frequency tolerance. 190 * offset and maximum frequency tolerance.
191 */ 191 */
192#define SHIFT_USEC 16 /* frequency offset scale (shift) */ 192#define SHIFT_USEC 16 /* frequency offset scale (shift) */
193#define PPM_SCALE (NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC)) 193#define PPM_SCALE ((s64)NSEC_PER_USEC << (NTP_SCALE_SHIFT - SHIFT_USEC))
194#define PPM_SCALE_INV_SHIFT 19 194#define PPM_SCALE_INV_SHIFT 19
195#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \ 195#define PPM_SCALE_INV ((1ll << (PPM_SCALE_INV_SHIFT + NTP_SCALE_SHIFT)) / \
196 PPM_SCALE + 1) 196 PPM_SCALE + 1)
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e976e505648d..8e5d9a68b022 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1370,7 +1370,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1370 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1371 return 1; 1371 return 1;
1372 } 1372 }
1373 return 0; 1373
1374 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY;
1374} 1375}
1375 1376
1376/* 1377/*
diff --git a/kernel/relay.c b/kernel/relay.c
index 9d79b7854fa6..8f2179c8056f 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length)
750 * from the scheduler (trying to re-grab 750 * from the scheduler (trying to re-grab
751 * rq->lock), so defer it. 751 * rq->lock), so defer it.
752 */ 752 */
753 __mod_timer(&buf->timer, jiffies + 1); 753 mod_timer(&buf->timer, jiffies + 1);
754 } 754 }
755 755
756 old = buf->data; 756 old = buf->data;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index ea2f48af83cf..d13be216a790 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -68,6 +68,17 @@ void clockevents_set_mode(struct clock_event_device *dev,
68 if (dev->mode != mode) { 68 if (dev->mode != mode) {
69 dev->set_mode(mode, dev); 69 dev->set_mode(mode, dev);
70 dev->mode = mode; 70 dev->mode = mode;
71
72 /*
73 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
74 * on it, so fix it up and emit a warning:
75 */
76 if (mode == CLOCK_EVT_MODE_ONESHOT) {
77 if (unlikely(!dev->mult)) {
78 dev->mult = 1;
79 WARN_ON(1);
80 }
81 }
71 } 82 }
72} 83}
73 84
@@ -168,15 +179,6 @@ void clockevents_register_device(struct clock_event_device *dev)
168 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 179 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
169 BUG_ON(!dev->cpumask); 180 BUG_ON(!dev->cpumask);
170 181
171 /*
172 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
173 * on it, so fix it up and emit a warning:
174 */
175 if (unlikely(!dev->mult)) {
176 dev->mult = 1;
177 WARN_ON(1);
178 }
179
180 spin_lock(&clockevents_lock); 182 spin_lock(&clockevents_lock);
181 183
182 list_add(&dev->list, &clockevent_devices); 184 list_add(&dev->list, &clockevent_devices);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f5f793d92415..7fc64375ff43 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -1,71 +1,129 @@
1/* 1/*
2 * linux/kernel/time/ntp.c
3 *
4 * NTP state machine interfaces and logic. 2 * NTP state machine interfaces and logic.
5 * 3 *
6 * This code was mainly moved from kernel/timer.c and kernel/time.c 4 * This code was mainly moved from kernel/timer.c and kernel/time.c
7 * Please see those files for relevant copyright info and historical 5 * Please see those files for relevant copyright info and historical
8 * changelogs. 6 * changelogs.
9 */ 7 */
10
11#include <linux/mm.h>
12#include <linux/time.h>
13#include <linux/timex.h>
14#include <linux/jiffies.h>
15#include <linux/hrtimer.h>
16#include <linux/capability.h> 8#include <linux/capability.h>
17#include <linux/math64.h>
18#include <linux/clocksource.h> 9#include <linux/clocksource.h>
19#include <linux/workqueue.h> 10#include <linux/workqueue.h>
20#include <asm/timex.h> 11#include <linux/hrtimer.h>
12#include <linux/jiffies.h>
13#include <linux/math64.h>
14#include <linux/timex.h>
15#include <linux/time.h>
16#include <linux/mm.h>
21 17
22/* 18/*
23 * Timekeeping variables 19 * NTP timekeeping variables:
24 */ 20 */
25unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
26unsigned long tick_nsec; /* ACTHZ period (nsec) */
27u64 tick_length;
28static u64 tick_length_base;
29 21
30static struct hrtimer leap_timer; 22/* USER_HZ period (usecs): */
23unsigned long tick_usec = TICK_USEC;
31 24
32#define MAX_TICKADJ 500 /* microsecs */ 25/* ACTHZ period (nsecs): */
33#define MAX_TICKADJ_SCALED (((u64)(MAX_TICKADJ * NSEC_PER_USEC) << \ 26unsigned long tick_nsec;
34 NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ) 27
28u64 tick_length;
29static u64 tick_length_base;
30
31static struct hrtimer leap_timer;
32
33#define MAX_TICKADJ 500LL /* usecs */
34#define MAX_TICKADJ_SCALED \
35 (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
35 36
36/* 37/*
37 * phase-lock loop variables 38 * phase-lock loop variables
38 */ 39 */
39/* TIME_ERROR prevents overwriting the CMOS clock */
40static int time_state = TIME_OK; /* clock synchronization status */
41int time_status = STA_UNSYNC; /* clock status bits */
42static long time_tai; /* TAI offset (s) */
43static s64 time_offset; /* time adjustment (ns) */
44static long time_constant = 2; /* pll time constant */
45long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
46long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
47static s64 time_freq; /* frequency offset (scaled ns/s)*/
48static long time_reftime; /* time at last adjustment (s) */
49long time_adjust;
50static long ntp_tick_adj;
51 40
41/*
42 * clock synchronization status
43 *
44 * (TIME_ERROR prevents overwriting the CMOS clock)
45 */
46static int time_state = TIME_OK;
47
48/* clock status bits: */
49int time_status = STA_UNSYNC;
50
51/* TAI offset (secs): */
52static long time_tai;
53
54/* time adjustment (nsecs): */
55static s64 time_offset;
56
57/* pll time constant: */
58static long time_constant = 2;
59
60/* maximum error (usecs): */
61long time_maxerror = NTP_PHASE_LIMIT;
62
63/* estimated error (usecs): */
64long time_esterror = NTP_PHASE_LIMIT;
65
66/* frequency offset (scaled nsecs/secs): */
67static s64 time_freq;
68
69/* time at last adjustment (secs): */
70static long time_reftime;
71
72long time_adjust;
73
74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
75static s64 ntp_tick_adj;
76
77/*
78 * NTP methods:
79 */
80
81/*
82 * Update (tick_length, tick_length_base, tick_nsec), based
83 * on (tick_usec, ntp_tick_adj, time_freq):
84 */
52static void ntp_update_frequency(void) 85static void ntp_update_frequency(void)
53{ 86{
54 u64 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ) 87 u64 second_length;
55 << NTP_SCALE_SHIFT; 88 u64 new_base;
56 second_length += (s64)ntp_tick_adj << NTP_SCALE_SHIFT; 89
57 second_length += time_freq; 90 second_length = (u64)(tick_usec * NSEC_PER_USEC * USER_HZ)
91 << NTP_SCALE_SHIFT;
92
93 second_length += ntp_tick_adj;
94 second_length += time_freq;
58 95
59 tick_length_base = second_length; 96 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT;
97 new_base = div_u64(second_length, NTP_INTERVAL_FREQ);
60 98
61 tick_nsec = div_u64(second_length, HZ) >> NTP_SCALE_SHIFT; 99 /*
62 tick_length_base = div_u64(tick_length_base, NTP_INTERVAL_FREQ); 100 * Don't wait for the next second_overflow, apply
101 * the change to the tick length immediately:
102 */
103 tick_length += new_base - tick_length_base;
104 tick_length_base = new_base;
105}
106
107static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
108{
109 time_status &= ~STA_MODE;
110
111 if (secs < MINSEC)
112 return 0;
113
114 if (!(time_status & STA_FLL) && (secs <= MAXSEC))
115 return 0;
116
117 time_status |= STA_MODE;
118
119 return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
63} 120}
64 121
65static void ntp_update_offset(long offset) 122static void ntp_update_offset(long offset)
66{ 123{
67 long mtemp;
68 s64 freq_adj; 124 s64 freq_adj;
125 s64 offset64;
126 long secs;
69 127
70 if (!(time_status & STA_PLL)) 128 if (!(time_status & STA_PLL))
71 return; 129 return;
@@ -84,24 +142,23 @@ static void ntp_update_offset(long offset)
84 * Select how the frequency is to be controlled 142 * Select how the frequency is to be controlled
85 * and in which mode (PLL or FLL). 143 * and in which mode (PLL or FLL).
86 */ 144 */
87 if (time_status & STA_FREQHOLD || time_reftime == 0) 145 secs = xtime.tv_sec - time_reftime;
88 time_reftime = xtime.tv_sec; 146 if (unlikely(time_status & STA_FREQHOLD))
89 mtemp = xtime.tv_sec - time_reftime; 147 secs = 0;
148
90 time_reftime = xtime.tv_sec; 149 time_reftime = xtime.tv_sec;
91 150
92 freq_adj = (s64)offset * mtemp; 151 offset64 = offset;
93 freq_adj <<= NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant); 152 freq_adj = (offset64 * secs) <<
94 time_status &= ~STA_MODE; 153 (NTP_SCALE_SHIFT - 2 * (SHIFT_PLL + 2 + time_constant));
95 if (mtemp >= MINSEC && (time_status & STA_FLL || mtemp > MAXSEC)) {
96 freq_adj += div_s64((s64)offset << (NTP_SCALE_SHIFT - SHIFT_FLL),
97 mtemp);
98 time_status |= STA_MODE;
99 }
100 freq_adj += time_freq;
101 freq_adj = min(freq_adj, MAXFREQ_SCALED);
102 time_freq = max(freq_adj, -MAXFREQ_SCALED);
103 154
104 time_offset = div_s64((s64)offset << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ); 155 freq_adj += ntp_update_offset_fll(offset64, secs);
156
157 freq_adj = min(freq_adj + time_freq, MAXFREQ_SCALED);
158
159 time_freq = max(freq_adj, -MAXFREQ_SCALED);
160
161 time_offset = div_s64(offset64 << NTP_SCALE_SHIFT, NTP_INTERVAL_FREQ);
105} 162}
106 163
107/** 164/**
@@ -111,15 +168,15 @@ static void ntp_update_offset(long offset)
111 */ 168 */
112void ntp_clear(void) 169void ntp_clear(void)
113{ 170{
114 time_adjust = 0; /* stop active adjtime() */ 171 time_adjust = 0; /* stop active adjtime() */
115 time_status |= STA_UNSYNC; 172 time_status |= STA_UNSYNC;
116 time_maxerror = NTP_PHASE_LIMIT; 173 time_maxerror = NTP_PHASE_LIMIT;
117 time_esterror = NTP_PHASE_LIMIT; 174 time_esterror = NTP_PHASE_LIMIT;
118 175
119 ntp_update_frequency(); 176 ntp_update_frequency();
120 177
121 tick_length = tick_length_base; 178 tick_length = tick_length_base;
122 time_offset = 0; 179 time_offset = 0;
123} 180}
124 181
125/* 182/*
@@ -140,8 +197,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
140 xtime.tv_sec--; 197 xtime.tv_sec--;
141 wall_to_monotonic.tv_sec++; 198 wall_to_monotonic.tv_sec++;
142 time_state = TIME_OOP; 199 time_state = TIME_OOP;
143 printk(KERN_NOTICE "Clock: " 200 printk(KERN_NOTICE
144 "inserting leap second 23:59:60 UTC\n"); 201 "Clock: inserting leap second 23:59:60 UTC\n");
145 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC); 202 hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
146 res = HRTIMER_RESTART; 203 res = HRTIMER_RESTART;
147 break; 204 break;
@@ -150,8 +207,8 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
150 time_tai--; 207 time_tai--;
151 wall_to_monotonic.tv_sec--; 208 wall_to_monotonic.tv_sec--;
152 time_state = TIME_WAIT; 209 time_state = TIME_WAIT;
153 printk(KERN_NOTICE "Clock: " 210 printk(KERN_NOTICE
154 "deleting leap second 23:59:59 UTC\n"); 211 "Clock: deleting leap second 23:59:59 UTC\n");
155 break; 212 break;
156 case TIME_OOP: 213 case TIME_OOP:
157 time_tai++; 214 time_tai++;
@@ -179,7 +236,7 @@ static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
179 */ 236 */
180void second_overflow(void) 237void second_overflow(void)
181{ 238{
182 s64 time_adj; 239 s64 delta;
183 240
184 /* Bump the maxerror field */ 241 /* Bump the maxerror field */
185 time_maxerror += MAXFREQ / NSEC_PER_USEC; 242 time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -192,24 +249,30 @@ void second_overflow(void)
192 * Compute the phase adjustment for the next second. The offset is 249 * Compute the phase adjustment for the next second. The offset is
193 * reduced by a fixed factor times the time constant. 250 * reduced by a fixed factor times the time constant.
194 */ 251 */
195 tick_length = tick_length_base; 252 tick_length = tick_length_base;
196 time_adj = shift_right(time_offset, SHIFT_PLL + time_constant); 253
197 time_offset -= time_adj; 254 delta = shift_right(time_offset, SHIFT_PLL + time_constant);
198 tick_length += time_adj; 255 time_offset -= delta;
199 256 tick_length += delta;
200 if (unlikely(time_adjust)) { 257
201 if (time_adjust > MAX_TICKADJ) { 258 if (!time_adjust)
202 time_adjust -= MAX_TICKADJ; 259 return;
203 tick_length += MAX_TICKADJ_SCALED; 260
204 } else if (time_adjust < -MAX_TICKADJ) { 261 if (time_adjust > MAX_TICKADJ) {
205 time_adjust += MAX_TICKADJ; 262 time_adjust -= MAX_TICKADJ;
206 tick_length -= MAX_TICKADJ_SCALED; 263 tick_length += MAX_TICKADJ_SCALED;
207 } else { 264 return;
208 tick_length += (s64)(time_adjust * NSEC_PER_USEC /
209 NTP_INTERVAL_FREQ) << NTP_SCALE_SHIFT;
210 time_adjust = 0;
211 }
212 } 265 }
266
267 if (time_adjust < -MAX_TICKADJ) {
268 time_adjust += MAX_TICKADJ;
269 tick_length -= MAX_TICKADJ_SCALED;
270 return;
271 }
272
273 tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
274 << NTP_SCALE_SHIFT;
275 time_adjust = 0;
213} 276}
214 277
215#ifdef CONFIG_GENERIC_CMOS_UPDATE 278#ifdef CONFIG_GENERIC_CMOS_UPDATE
@@ -233,12 +296,13 @@ static void sync_cmos_clock(struct work_struct *work)
233 * This code is run on a timer. If the clock is set, that timer 296 * This code is run on a timer. If the clock is set, that timer
234 * may not expire at the correct time. Thus, we adjust... 297 * may not expire at the correct time. Thus, we adjust...
235 */ 298 */
236 if (!ntp_synced()) 299 if (!ntp_synced()) {
237 /* 300 /*
238 * Not synced, exit, do not restart a timer (if one is 301 * Not synced, exit, do not restart a timer (if one is
239 * running, let it run out). 302 * running, let it run out).
240 */ 303 */
241 return; 304 return;
305 }
242 306
243 getnstimeofday(&now); 307 getnstimeofday(&now);
244 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 308 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2)
@@ -270,7 +334,116 @@ static void notify_cmos_timer(void)
270static inline void notify_cmos_timer(void) { } 334static inline void notify_cmos_timer(void) { }
271#endif 335#endif
272 336
273/* adjtimex mainly allows reading (and writing, if superuser) of 337/*
338 * Start the leap seconds timer:
339 */
340static inline void ntp_start_leap_timer(struct timespec *ts)
341{
342 long now = ts->tv_sec;
343
344 if (time_status & STA_INS) {
345 time_state = TIME_INS;
346 now += 86400 - now % 86400;
347 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
348
349 return;
350 }
351
352 if (time_status & STA_DEL) {
353 time_state = TIME_DEL;
354 now += 86400 - (now + 1) % 86400;
355 hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
356 }
357}
358
359/*
360 * Propagate a new txc->status value into the NTP state:
361 */
362static inline void process_adj_status(struct timex *txc, struct timespec *ts)
363{
364 if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
365 time_state = TIME_OK;
366 time_status = STA_UNSYNC;
367 }
368
369 /*
370 * If we turn on PLL adjustments then reset the
371 * reference time to current time.
372 */
373 if (!(time_status & STA_PLL) && (txc->status & STA_PLL))
374 time_reftime = xtime.tv_sec;
375
376 /* only set allowed bits */
377 time_status &= STA_RONLY;
378 time_status |= txc->status & ~STA_RONLY;
379
380 switch (time_state) {
381 case TIME_OK:
382 ntp_start_leap_timer(ts);
383 break;
384 case TIME_INS:
385 case TIME_DEL:
386 time_state = TIME_OK;
387 ntp_start_leap_timer(ts);
388 case TIME_WAIT:
389 if (!(time_status & (STA_INS | STA_DEL)))
390 time_state = TIME_OK;
391 break;
392 case TIME_OOP:
393 hrtimer_restart(&leap_timer);
394 break;
395 }
396}
397/*
398 * Called with the xtime lock held, so we can access and modify
399 * all the global NTP state:
400 */
401static inline void process_adjtimex_modes(struct timex *txc, struct timespec *ts)
402{
403 if (txc->modes & ADJ_STATUS)
404 process_adj_status(txc, ts);
405
406 if (txc->modes & ADJ_NANO)
407 time_status |= STA_NANO;
408
409 if (txc->modes & ADJ_MICRO)
410 time_status &= ~STA_NANO;
411
412 if (txc->modes & ADJ_FREQUENCY) {
413 time_freq = txc->freq * PPM_SCALE;
414 time_freq = min(time_freq, MAXFREQ_SCALED);
415 time_freq = max(time_freq, -MAXFREQ_SCALED);
416 }
417
418 if (txc->modes & ADJ_MAXERROR)
419 time_maxerror = txc->maxerror;
420
421 if (txc->modes & ADJ_ESTERROR)
422 time_esterror = txc->esterror;
423
424 if (txc->modes & ADJ_TIMECONST) {
425 time_constant = txc->constant;
426 if (!(time_status & STA_NANO))
427 time_constant += 4;
428 time_constant = min(time_constant, (long)MAXTC);
429 time_constant = max(time_constant, 0l);
430 }
431
432 if (txc->modes & ADJ_TAI && txc->constant > 0)
433 time_tai = txc->constant;
434
435 if (txc->modes & ADJ_OFFSET)
436 ntp_update_offset(txc->offset);
437
438 if (txc->modes & ADJ_TICK)
439 tick_usec = txc->tick;
440
441 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET))
442 ntp_update_frequency();
443}
444
445/*
446 * adjtimex mainly allows reading (and writing, if superuser) of
274 * kernel time-keeping variables. used by xntpd. 447 * kernel time-keeping variables. used by xntpd.
275 */ 448 */
276int do_adjtimex(struct timex *txc) 449int do_adjtimex(struct timex *txc)
@@ -291,11 +464,14 @@ int do_adjtimex(struct timex *txc)
291 if (txc->modes && !capable(CAP_SYS_TIME)) 464 if (txc->modes && !capable(CAP_SYS_TIME))
292 return -EPERM; 465 return -EPERM;
293 466
294 /* if the quartz is off by more than 10% something is VERY wrong! */ 467 /*
468 * if the quartz is off by more than 10% then
469 * something is VERY wrong!
470 */
295 if (txc->modes & ADJ_TICK && 471 if (txc->modes & ADJ_TICK &&
296 (txc->tick < 900000/USER_HZ || 472 (txc->tick < 900000/USER_HZ ||
297 txc->tick > 1100000/USER_HZ)) 473 txc->tick > 1100000/USER_HZ))
298 return -EINVAL; 474 return -EINVAL;
299 475
300 if (txc->modes & ADJ_STATUS && time_state != TIME_OK) 476 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
301 hrtimer_cancel(&leap_timer); 477 hrtimer_cancel(&leap_timer);
@@ -305,7 +481,6 @@ int do_adjtimex(struct timex *txc)
305 481
306 write_seqlock_irq(&xtime_lock); 482 write_seqlock_irq(&xtime_lock);
307 483
308 /* If there are input parameters, then process them */
309 if (txc->modes & ADJ_ADJTIME) { 484 if (txc->modes & ADJ_ADJTIME) {
310 long save_adjust = time_adjust; 485 long save_adjust = time_adjust;
311 486
@@ -315,98 +490,24 @@ int do_adjtimex(struct timex *txc)
315 ntp_update_frequency(); 490 ntp_update_frequency();
316 } 491 }
317 txc->offset = save_adjust; 492 txc->offset = save_adjust;
318 goto adj_done; 493 } else {
319 }
320 if (txc->modes) {
321 long sec;
322
323 if (txc->modes & ADJ_STATUS) {
324 if ((time_status & STA_PLL) &&
325 !(txc->status & STA_PLL)) {
326 time_state = TIME_OK;
327 time_status = STA_UNSYNC;
328 }
329 /* only set allowed bits */
330 time_status &= STA_RONLY;
331 time_status |= txc->status & ~STA_RONLY;
332
333 switch (time_state) {
334 case TIME_OK:
335 start_timer:
336 sec = ts.tv_sec;
337 if (time_status & STA_INS) {
338 time_state = TIME_INS;
339 sec += 86400 - sec % 86400;
340 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
341 } else if (time_status & STA_DEL) {
342 time_state = TIME_DEL;
343 sec += 86400 - (sec + 1) % 86400;
344 hrtimer_start(&leap_timer, ktime_set(sec, 0), HRTIMER_MODE_ABS);
345 }
346 break;
347 case TIME_INS:
348 case TIME_DEL:
349 time_state = TIME_OK;
350 goto start_timer;
351 break;
352 case TIME_WAIT:
353 if (!(time_status & (STA_INS | STA_DEL)))
354 time_state = TIME_OK;
355 break;
356 case TIME_OOP:
357 hrtimer_restart(&leap_timer);
358 break;
359 }
360 }
361
362 if (txc->modes & ADJ_NANO)
363 time_status |= STA_NANO;
364 if (txc->modes & ADJ_MICRO)
365 time_status &= ~STA_NANO;
366
367 if (txc->modes & ADJ_FREQUENCY) {
368 time_freq = (s64)txc->freq * PPM_SCALE;
369 time_freq = min(time_freq, MAXFREQ_SCALED);
370 time_freq = max(time_freq, -MAXFREQ_SCALED);
371 }
372
373 if (txc->modes & ADJ_MAXERROR)
374 time_maxerror = txc->maxerror;
375 if (txc->modes & ADJ_ESTERROR)
376 time_esterror = txc->esterror;
377
378 if (txc->modes & ADJ_TIMECONST) {
379 time_constant = txc->constant;
380 if (!(time_status & STA_NANO))
381 time_constant += 4;
382 time_constant = min(time_constant, (long)MAXTC);
383 time_constant = max(time_constant, 0l);
384 }
385
386 if (txc->modes & ADJ_TAI && txc->constant > 0)
387 time_tai = txc->constant;
388
389 if (txc->modes & ADJ_OFFSET)
390 ntp_update_offset(txc->offset);
391 if (txc->modes & ADJ_TICK)
392 tick_usec = txc->tick;
393 494
394 if (txc->modes & (ADJ_TICK|ADJ_FREQUENCY|ADJ_OFFSET)) 495 /* If there are input parameters, then process them: */
395 ntp_update_frequency(); 496 if (txc->modes)
396 } 497 process_adjtimex_modes(txc, &ts);
397 498
398 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ, 499 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
399 NTP_SCALE_SHIFT); 500 NTP_SCALE_SHIFT);
400 if (!(time_status & STA_NANO)) 501 if (!(time_status & STA_NANO))
401 txc->offset /= NSEC_PER_USEC; 502 txc->offset /= NSEC_PER_USEC;
503 }
402 504
403adj_done:
404 result = time_state; /* mostly `TIME_OK' */ 505 result = time_state; /* mostly `TIME_OK' */
405 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 506 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
406 result = TIME_ERROR; 507 result = TIME_ERROR;
407 508
408 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) * 509 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
409 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT); 510 PPM_SCALE_INV, NTP_SCALE_SHIFT);
410 txc->maxerror = time_maxerror; 511 txc->maxerror = time_maxerror;
411 txc->esterror = time_esterror; 512 txc->esterror = time_esterror;
412 txc->status = time_status; 513 txc->status = time_status;
@@ -425,6 +526,7 @@ adj_done:
425 txc->calcnt = 0; 526 txc->calcnt = 0;
426 txc->errcnt = 0; 527 txc->errcnt = 0;
427 txc->stbcnt = 0; 528 txc->stbcnt = 0;
529
428 write_sequnlock_irq(&xtime_lock); 530 write_sequnlock_irq(&xtime_lock);
429 531
430 txc->time.tv_sec = ts.tv_sec; 532 txc->time.tv_sec = ts.tv_sec;
@@ -440,6 +542,8 @@ adj_done:
440static int __init ntp_tick_adj_setup(char *str) 542static int __init ntp_tick_adj_setup(char *str)
441{ 543{
442 ntp_tick_adj = simple_strtol(str, NULL, 0); 544 ntp_tick_adj = simple_strtol(str, NULL, 0);
545 ntp_tick_adj <<= NTP_SCALE_SHIFT;
546
443 return 1; 547 return 1;
444} 548}
445 549
diff --git a/kernel/timer.c b/kernel/timer.c
index 13dd64fe143d..9b77fc9a9ac8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
589 } 589 }
590} 590}
591 591
592int __mod_timer(struct timer_list *timer, unsigned long expires) 592static inline int
593__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
593{ 594{
594 struct tvec_base *base, *new_base; 595 struct tvec_base *base, *new_base;
595 unsigned long flags; 596 unsigned long flags;
596 int ret = 0; 597 int ret;
598
599 ret = 0;
597 600
598 timer_stats_timer_set_start_info(timer); 601 timer_stats_timer_set_start_info(timer);
599 BUG_ON(!timer->function); 602 BUG_ON(!timer->function);
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
603 if (timer_pending(timer)) { 606 if (timer_pending(timer)) {
604 detach_timer(timer, 0); 607 detach_timer(timer, 0);
605 ret = 1; 608 ret = 1;
609 } else {
610 if (pending_only)
611 goto out_unlock;
606 } 612 }
607 613
608 debug_timer_activate(timer); 614 debug_timer_activate(timer);
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
629 635
630 timer->expires = expires; 636 timer->expires = expires;
631 internal_add_timer(base, timer); 637 internal_add_timer(base, timer);
638
639out_unlock:
632 spin_unlock_irqrestore(&base->lock, flags); 640 spin_unlock_irqrestore(&base->lock, flags);
633 641
634 return ret; 642 return ret;
635} 643}
636 644
637EXPORT_SYMBOL(__mod_timer);
638
639/** 645/**
640 * add_timer_on - start a timer on a particular CPU 646 * mod_timer_pending - modify a pending timer's timeout
641 * @timer: the timer to be added 647 * @timer: the pending timer to be modified
642 * @cpu: the CPU to start it on 648 * @expires: new timeout in jiffies
643 * 649 *
644 * This is not very scalable on SMP. Double adds are not possible. 650 * mod_timer_pending() is the same for pending timers as mod_timer(),
651 * but will not re-activate and modify already deleted timers.
652 *
653 * It is useful for unserialized use of timers.
645 */ 654 */
646void add_timer_on(struct timer_list *timer, int cpu) 655int mod_timer_pending(struct timer_list *timer, unsigned long expires)
647{ 656{
648 struct tvec_base *base = per_cpu(tvec_bases, cpu); 657 return __mod_timer(timer, expires, true);
649 unsigned long flags;
650
651 timer_stats_timer_set_start_info(timer);
652 BUG_ON(timer_pending(timer) || !timer->function);
653 spin_lock_irqsave(&base->lock, flags);
654 timer_set_base(timer, base);
655 debug_timer_activate(timer);
656 internal_add_timer(base, timer);
657 /*
658 * Check whether the other CPU is idle and needs to be
659 * triggered to reevaluate the timer wheel when nohz is
660 * active. We are protected against the other CPU fiddling
661 * with the timer by holding the timer base lock. This also
662 * makes sure that a CPU on the way to idle can not evaluate
663 * the timer wheel.
664 */
665 wake_up_idle_cpu(cpu);
666 spin_unlock_irqrestore(&base->lock, flags);
667} 658}
659EXPORT_SYMBOL(mod_timer_pending);
668 660
669/** 661/**
670 * mod_timer - modify a timer's timeout 662 * mod_timer - modify a timer's timeout
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
688 */ 680 */
689int mod_timer(struct timer_list *timer, unsigned long expires) 681int mod_timer(struct timer_list *timer, unsigned long expires)
690{ 682{
691 BUG_ON(!timer->function);
692
693 timer_stats_timer_set_start_info(timer);
694 /* 683 /*
695 * This is a common optimization triggered by the 684 * This is a common optimization triggered by the
696 * networking code - if the timer is re-modified 685 * networking code - if the timer is re-modified
@@ -699,12 +688,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
699 if (timer->expires == expires && timer_pending(timer)) 688 if (timer->expires == expires && timer_pending(timer))
700 return 1; 689 return 1;
701 690
702 return __mod_timer(timer, expires); 691 return __mod_timer(timer, expires, false);
703} 692}
704
705EXPORT_SYMBOL(mod_timer); 693EXPORT_SYMBOL(mod_timer);
706 694
707/** 695/**
696 * add_timer - start a timer
697 * @timer: the timer to be added
698 *
699 * The kernel will do a ->function(->data) callback from the
700 * timer interrupt at the ->expires point in the future. The
701 * current time is 'jiffies'.
702 *
703 * The timer's ->expires, ->function (and if the handler uses it, ->data)
704 * fields must be set prior calling this function.
705 *
706 * Timers with an ->expires field in the past will be executed in the next
707 * timer tick.
708 */
709void add_timer(struct timer_list *timer)
710{
711 BUG_ON(timer_pending(timer));
712 mod_timer(timer, timer->expires);
713}
714EXPORT_SYMBOL(add_timer);
715
716/**
717 * add_timer_on - start a timer on a particular CPU
718 * @timer: the timer to be added
719 * @cpu: the CPU to start it on
720 *
721 * This is not very scalable on SMP. Double adds are not possible.
722 */
723void add_timer_on(struct timer_list *timer, int cpu)
724{
725 struct tvec_base *base = per_cpu(tvec_bases, cpu);
726 unsigned long flags;
727
728 timer_stats_timer_set_start_info(timer);
729 BUG_ON(timer_pending(timer) || !timer->function);
730 spin_lock_irqsave(&base->lock, flags);
731 timer_set_base(timer, base);
732 debug_timer_activate(timer);
733 internal_add_timer(base, timer);
734 /*
735 * Check whether the other CPU is idle and needs to be
736 * triggered to reevaluate the timer wheel when nohz is
737 * active. We are protected against the other CPU fiddling
738 * with the timer by holding the timer base lock. This also
739 * makes sure that a CPU on the way to idle can not evaluate
740 * the timer wheel.
741 */
742 wake_up_idle_cpu(cpu);
743 spin_unlock_irqrestore(&base->lock, flags);
744}
745
746/**
708 * del_timer - deactive a timer. 747 * del_timer - deactive a timer.
709 * @timer: the timer to be deactivated 748 * @timer: the timer to be deactivated
710 * 749 *
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer)
733 772
734 return ret; 773 return ret;
735} 774}
736
737EXPORT_SYMBOL(del_timer); 775EXPORT_SYMBOL(del_timer);
738 776
739#ifdef CONFIG_SMP 777#ifdef CONFIG_SMP
@@ -767,7 +805,6 @@ out:
767 805
768 return ret; 806 return ret;
769} 807}
770
771EXPORT_SYMBOL(try_to_del_timer_sync); 808EXPORT_SYMBOL(try_to_del_timer_sync);
772 809
773/** 810/**
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer)
796 cpu_relax(); 833 cpu_relax();
797 } 834 }
798} 835}
799
800EXPORT_SYMBOL(del_timer_sync); 836EXPORT_SYMBOL(del_timer_sync);
801#endif 837#endif
802 838
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout)
1268 expire = timeout + jiffies; 1304 expire = timeout + jiffies;
1269 1305
1270 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1306 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1271 __mod_timer(&timer, expire); 1307 __mod_timer(&timer, expire, false);
1272 schedule(); 1308 schedule();
1273 del_singleshot_timer_sync(&timer); 1309 del_singleshot_timer_sync(&timer);
1274 1310