aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c141
1 files changed, 56 insertions, 85 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index cffffad01c31..54d3912f8cad 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -37,6 +37,8 @@
37#include <linux/delay.h> 37#include <linux/delay.h>
38#include <linux/tick.h> 38#include <linux/tick.h>
39#include <linux/kallsyms.h> 39#include <linux/kallsyms.h>
40#include <linux/perf_counter.h>
41#include <linux/sched.h>
40 42
41#include <asm/uaccess.h> 43#include <asm/uaccess.h>
42#include <asm/unistd.h> 44#include <asm/unistd.h>
@@ -604,13 +606,12 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
604} 606}
605 607
606static inline int 608static inline int
607__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) 609__mod_timer(struct timer_list *timer, unsigned long expires,
610 bool pending_only, int pinned)
608{ 611{
609 struct tvec_base *base, *new_base; 612 struct tvec_base *base, *new_base;
610 unsigned long flags; 613 unsigned long flags;
611 int ret; 614 int ret = 0 , cpu;
612
613 ret = 0;
614 615
615 timer_stats_timer_set_start_info(timer); 616 timer_stats_timer_set_start_info(timer);
616 BUG_ON(!timer->function); 617 BUG_ON(!timer->function);
@@ -629,6 +630,18 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
629 630
630 new_base = __get_cpu_var(tvec_bases); 631 new_base = __get_cpu_var(tvec_bases);
631 632
633 cpu = smp_processor_id();
634
635#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
636 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu)) {
637 int preferred_cpu = get_nohz_load_balancer();
638
639 if (preferred_cpu >= 0)
640 cpu = preferred_cpu;
641 }
642#endif
643 new_base = per_cpu(tvec_bases, cpu);
644
632 if (base != new_base) { 645 if (base != new_base) {
633 /* 646 /*
634 * We are trying to schedule the timer on the local CPU. 647 * We are trying to schedule the timer on the local CPU.
@@ -668,7 +681,7 @@ out_unlock:
668 */ 681 */
669int mod_timer_pending(struct timer_list *timer, unsigned long expires) 682int mod_timer_pending(struct timer_list *timer, unsigned long expires)
670{ 683{
671 return __mod_timer(timer, expires, true); 684 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
672} 685}
673EXPORT_SYMBOL(mod_timer_pending); 686EXPORT_SYMBOL(mod_timer_pending);
674 687
@@ -702,11 +715,33 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
702 if (timer->expires == expires && timer_pending(timer)) 715 if (timer->expires == expires && timer_pending(timer))
703 return 1; 716 return 1;
704 717
705 return __mod_timer(timer, expires, false); 718 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
706} 719}
707EXPORT_SYMBOL(mod_timer); 720EXPORT_SYMBOL(mod_timer);
708 721
709/** 722/**
723 * mod_timer_pinned - modify a timer's timeout
724 * @timer: the timer to be modified
725 * @expires: new timeout in jiffies
726 *
727 * mod_timer_pinned() is a way to update the expire field of an
728 * active timer (if the timer is inactive it will be activated)
729 * and not allow the timer to be migrated to a different CPU.
730 *
731 * mod_timer_pinned(timer, expires) is equivalent to:
732 *
733 * del_timer(timer); timer->expires = expires; add_timer(timer);
734 */
735int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
736{
737 if (timer->expires == expires && timer_pending(timer))
738 return 1;
739
740 return __mod_timer(timer, expires, false, TIMER_PINNED);
741}
742EXPORT_SYMBOL(mod_timer_pinned);
743
744/**
710 * add_timer - start a timer 745 * add_timer - start a timer
711 * @timer: the timer to be added 746 * @timer: the timer to be added
712 * 747 *
@@ -756,6 +791,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
756 wake_up_idle_cpu(cpu); 791 wake_up_idle_cpu(cpu);
757 spin_unlock_irqrestore(&base->lock, flags); 792 spin_unlock_irqrestore(&base->lock, flags);
758} 793}
794EXPORT_SYMBOL_GPL(add_timer_on);
759 795
760/** 796/**
761 * del_timer - deactive a timer. 797 * del_timer - deactive a timer.
@@ -1015,6 +1051,9 @@ cascade:
1015 index = slot = timer_jiffies & TVN_MASK; 1051 index = slot = timer_jiffies & TVN_MASK;
1016 do { 1052 do {
1017 list_for_each_entry(nte, varp->vec + slot, entry) { 1053 list_for_each_entry(nte, varp->vec + slot, entry) {
1054 if (tbase_get_deferrable(nte->base))
1055 continue;
1056
1018 found = 1; 1057 found = 1;
1019 if (time_before(nte->expires, expires)) 1058 if (time_before(nte->expires, expires))
1020 expires = nte->expires; 1059 expires = nte->expires;
@@ -1123,53 +1162,14 @@ void update_process_times(int user_tick)
1123} 1162}
1124 1163
1125/* 1164/*
1126 * Nr of active tasks - counted in fixed-point numbers
1127 */
1128static unsigned long count_active_tasks(void)
1129{
1130 return nr_active() * FIXED_1;
1131}
1132
1133/*
1134 * Hmm.. Changed this, as the GNU make sources (load.c) seems to
1135 * imply that avenrun[] is the standard name for this kind of thing.
1136 * Nothing else seems to be standardized: the fractional size etc
1137 * all seem to differ on different machines.
1138 *
1139 * Requires xtime_lock to access.
1140 */
1141unsigned long avenrun[3];
1142
1143EXPORT_SYMBOL(avenrun);
1144
1145/*
1146 * calc_load - given tick count, update the avenrun load estimates.
1147 * This is called while holding a write_lock on xtime_lock.
1148 */
1149static inline void calc_load(unsigned long ticks)
1150{
1151 unsigned long active_tasks; /* fixed-point */
1152 static int count = LOAD_FREQ;
1153
1154 count -= ticks;
1155 if (unlikely(count < 0)) {
1156 active_tasks = count_active_tasks();
1157 do {
1158 CALC_LOAD(avenrun[0], EXP_1, active_tasks);
1159 CALC_LOAD(avenrun[1], EXP_5, active_tasks);
1160 CALC_LOAD(avenrun[2], EXP_15, active_tasks);
1161 count += LOAD_FREQ;
1162 } while (count < 0);
1163 }
1164}
1165
1166/*
1167 * This function runs timers and the timer-tq in bottom half context. 1165 * This function runs timers and the timer-tq in bottom half context.
1168 */ 1166 */
1169static void run_timer_softirq(struct softirq_action *h) 1167static void run_timer_softirq(struct softirq_action *h)
1170{ 1168{
1171 struct tvec_base *base = __get_cpu_var(tvec_bases); 1169 struct tvec_base *base = __get_cpu_var(tvec_bases);
1172 1170
1171 perf_counter_do_pending();
1172
1173 hrtimer_run_pending(); 1173 hrtimer_run_pending();
1174 1174
1175 if (time_after_eq(jiffies, base->timer_jiffies)) 1175 if (time_after_eq(jiffies, base->timer_jiffies))
@@ -1187,16 +1187,6 @@ void run_local_timers(void)
1187} 1187}
1188 1188
1189/* 1189/*
1190 * Called by the timer interrupt. xtime_lock must already be taken
1191 * by the timer IRQ!
1192 */
1193static inline void update_times(unsigned long ticks)
1194{
1195 update_wall_time();
1196 calc_load(ticks);
1197}
1198
1199/*
1200 * The 64-bit jiffies value is not atomic - you MUST NOT read it 1190 * The 64-bit jiffies value is not atomic - you MUST NOT read it
1201 * without sampling the sequence number in xtime_lock. 1191 * without sampling the sequence number in xtime_lock.
1202 * jiffies is defined in the linker script... 1192 * jiffies is defined in the linker script...
@@ -1205,7 +1195,8 @@ static inline void update_times(unsigned long ticks)
1205void do_timer(unsigned long ticks) 1195void do_timer(unsigned long ticks)
1206{ 1196{
1207 jiffies_64 += ticks; 1197 jiffies_64 += ticks;
1208 update_times(ticks); 1198 update_wall_time();
1199 calc_global_load();
1209} 1200}
1210 1201
1211#ifdef __ARCH_WANT_SYS_ALARM 1202#ifdef __ARCH_WANT_SYS_ALARM
@@ -1353,7 +1344,7 @@ signed long __sched schedule_timeout(signed long timeout)
1353 expire = timeout + jiffies; 1344 expire = timeout + jiffies;
1354 1345
1355 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1346 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1356 __mod_timer(&timer, expire, false); 1347 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1357 schedule(); 1348 schedule();
1358 del_singleshot_timer_sync(&timer); 1349 del_singleshot_timer_sync(&timer);
1359 1350
@@ -1406,37 +1397,17 @@ int do_sysinfo(struct sysinfo *info)
1406{ 1397{
1407 unsigned long mem_total, sav_total; 1398 unsigned long mem_total, sav_total;
1408 unsigned int mem_unit, bitcount; 1399 unsigned int mem_unit, bitcount;
1409 unsigned long seq; 1400 struct timespec tp;
1410 1401
1411 memset(info, 0, sizeof(struct sysinfo)); 1402 memset(info, 0, sizeof(struct sysinfo));
1412 1403
1413 do { 1404 ktime_get_ts(&tp);
1414 struct timespec tp; 1405 monotonic_to_bootbased(&tp);
1415 seq = read_seqbegin(&xtime_lock); 1406 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1416
1417 /*
1418 * This is annoying. The below is the same thing
1419 * posix_get_clock_monotonic() does, but it wants to
1420 * take the lock which we want to cover the loads stuff
1421 * too.
1422 */
1423
1424 getnstimeofday(&tp);
1425 tp.tv_sec += wall_to_monotonic.tv_sec;
1426 tp.tv_nsec += wall_to_monotonic.tv_nsec;
1427 monotonic_to_bootbased(&tp);
1428 if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
1429 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1430 tp.tv_sec++;
1431 }
1432 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1433 1407
1434 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); 1408 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1435 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1436 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1437 1409
1438 info->procs = nr_threads; 1410 info->procs = nr_threads;
1439 } while (read_seqretry(&xtime_lock, seq));
1440 1411
1441 si_meminfo(info); 1412 si_meminfo(info);
1442 si_swapinfo(info); 1413 si_swapinfo(info);