aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:22:46 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-11 21:22:46 -0500
commitb64c5fda3868cb29d5dae0909561aa7d93fb7330 (patch)
tree2ac4be822f32fe5a8e8f33138be81b221ff52384 /kernel
parentf57d54bab696133fae569c5f01352249c36fc74f (diff)
parent9c3f9e281697d02889c3b08922f3b30be75f56c2 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull core timer changes from Ingo Molnar: "It contains continued generic-NOHZ work by Frederic and smaller cleanups." * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: time: Kill xtime_lock, replacing it with jiffies_lock clocksource: arm_generic: use this_cpu_ptr per-cpu helper clocksource: arm_generic: use integer math helpers time/jiffies: Make clocksource_jiffies static clocksource: clean up parse_pmtmr() tick: Correct the comments for tick_sched_timer() tick: Conditionally build nohz specific code in tick handler tick: Consolidate tick handling for high and low res handlers tick: Consolidate timekeeping handling code
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/jiffies.c8
-rw-r--r--kernel/time/tick-common.c8
-rw-r--r--kernel/time/tick-internal.h1
-rw-r--r--kernel/time/tick-sched.c131
-rw-r--r--kernel/time/timekeeping.c14
5 files changed, 70 insertions, 92 deletions
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 6629bf7b5285..7a925ba456fb 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -58,7 +58,7 @@ static cycle_t jiffies_read(struct clocksource *cs)
58 return (cycle_t) jiffies; 58 return (cycle_t) jiffies;
59} 59}
60 60
61struct clocksource clocksource_jiffies = { 61static struct clocksource clocksource_jiffies = {
62 .name = "jiffies", 62 .name = "jiffies",
63 .rating = 1, /* lowest valid rating*/ 63 .rating = 1, /* lowest valid rating*/
64 .read = jiffies_read, 64 .read = jiffies_read,
@@ -67,6 +67,8 @@ struct clocksource clocksource_jiffies = {
67 .shift = JIFFIES_SHIFT, 67 .shift = JIFFIES_SHIFT,
68}; 68};
69 69
70__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock);
71
70#if (BITS_PER_LONG < 64) 72#if (BITS_PER_LONG < 64)
71u64 get_jiffies_64(void) 73u64 get_jiffies_64(void)
72{ 74{
@@ -74,9 +76,9 @@ u64 get_jiffies_64(void)
74 u64 ret; 76 u64 ret;
75 77
76 do { 78 do {
77 seq = read_seqbegin(&xtime_lock); 79 seq = read_seqbegin(&jiffies_lock);
78 ret = jiffies_64; 80 ret = jiffies_64;
79 } while (read_seqretry(&xtime_lock, seq)); 81 } while (read_seqretry(&jiffies_lock, seq));
80 return ret; 82 return ret;
81} 83}
82EXPORT_SYMBOL(get_jiffies_64); 84EXPORT_SYMBOL(get_jiffies_64);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index da6c9ecad4e4..b1600a6973f4 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -63,13 +63,13 @@ int tick_is_oneshot_available(void)
63static void tick_periodic(int cpu) 63static void tick_periodic(int cpu)
64{ 64{
65 if (tick_do_timer_cpu == cpu) { 65 if (tick_do_timer_cpu == cpu) {
66 write_seqlock(&xtime_lock); 66 write_seqlock(&jiffies_lock);
67 67
68 /* Keep track of the next tick event */ 68 /* Keep track of the next tick event */
69 tick_next_period = ktime_add(tick_next_period, tick_period); 69 tick_next_period = ktime_add(tick_next_period, tick_period);
70 70
71 do_timer(1); 71 do_timer(1);
72 write_sequnlock(&xtime_lock); 72 write_sequnlock(&jiffies_lock);
73 } 73 }
74 74
75 update_process_times(user_mode(get_irq_regs())); 75 update_process_times(user_mode(get_irq_regs()));
@@ -130,9 +130,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
130 ktime_t next; 130 ktime_t next;
131 131
132 do { 132 do {
133 seq = read_seqbegin(&xtime_lock); 133 seq = read_seqbegin(&jiffies_lock);
134 next = tick_next_period; 134 next = tick_next_period;
135 } while (read_seqretry(&xtime_lock, seq)); 135 } while (read_seqretry(&jiffies_lock, seq));
136 136
137 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT); 137 clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
138 138
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 4e265b901fed..cf3e59ed6dc0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -141,4 +141,3 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
141#endif 141#endif
142 142
143extern void do_timer(unsigned long ticks); 143extern void do_timer(unsigned long ticks);
144extern seqlock_t xtime_lock;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 80e061a9ed09..d58e552d9fd1 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -31,7 +31,7 @@
31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
32 32
33/* 33/*
34 * The time, when the last jiffy update happened. Protected by xtime_lock. 34 * The time, when the last jiffy update happened. Protected by jiffies_lock.
35 */ 35 */
36static ktime_t last_jiffies_update; 36static ktime_t last_jiffies_update;
37 37
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now)
49 ktime_t delta; 49 ktime_t delta;
50 50
51 /* 51 /*
52 * Do a quick check without holding xtime_lock: 52 * Do a quick check without holding jiffies_lock:
53 */ 53 */
54 delta = ktime_sub(now, last_jiffies_update); 54 delta = ktime_sub(now, last_jiffies_update);
55 if (delta.tv64 < tick_period.tv64) 55 if (delta.tv64 < tick_period.tv64)
56 return; 56 return;
57 57
58 /* Reevalute with xtime_lock held */ 58 /* Reevalute with jiffies_lock held */
59 write_seqlock(&xtime_lock); 59 write_seqlock(&jiffies_lock);
60 60
61 delta = ktime_sub(now, last_jiffies_update); 61 delta = ktime_sub(now, last_jiffies_update);
62 if (delta.tv64 >= tick_period.tv64) { 62 if (delta.tv64 >= tick_period.tv64) {
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now)
79 /* Keep the tick_next_period variable up to date */ 79 /* Keep the tick_next_period variable up to date */
80 tick_next_period = ktime_add(last_jiffies_update, tick_period); 80 tick_next_period = ktime_add(last_jiffies_update, tick_period);
81 } 81 }
82 write_sequnlock(&xtime_lock); 82 write_sequnlock(&jiffies_lock);
83} 83}
84 84
85/* 85/*
@@ -89,15 +89,58 @@ static ktime_t tick_init_jiffy_update(void)
89{ 89{
90 ktime_t period; 90 ktime_t period;
91 91
92 write_seqlock(&xtime_lock); 92 write_seqlock(&jiffies_lock);
93 /* Did we start the jiffies update yet ? */ 93 /* Did we start the jiffies update yet ? */
94 if (last_jiffies_update.tv64 == 0) 94 if (last_jiffies_update.tv64 == 0)
95 last_jiffies_update = tick_next_period; 95 last_jiffies_update = tick_next_period;
96 period = last_jiffies_update; 96 period = last_jiffies_update;
97 write_sequnlock(&xtime_lock); 97 write_sequnlock(&jiffies_lock);
98 return period; 98 return period;
99} 99}
100 100
101
102static void tick_sched_do_timer(ktime_t now)
103{
104 int cpu = smp_processor_id();
105
106#ifdef CONFIG_NO_HZ
107 /*
108 * Check if the do_timer duty was dropped. We don't care about
109 * concurrency: This happens only when the cpu in charge went
110 * into a long sleep. If two cpus happen to assign themself to
111 * this duty, then the jiffies update is still serialized by
112 * jiffies_lock.
113 */
114 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
115 tick_do_timer_cpu = cpu;
116#endif
117
118 /* Check, if the jiffies need an update */
119 if (tick_do_timer_cpu == cpu)
120 tick_do_update_jiffies64(now);
121}
122
123static void tick_sched_handle(struct tick_sched *ts, struct pt_regs *regs)
124{
125#ifdef CONFIG_NO_HZ
126 /*
127 * When we are idle and the tick is stopped, we have to touch
128 * the watchdog as we might not schedule for a really long
129 * time. This happens on complete idle SMP systems while
130 * waiting on the login prompt. We also increment the "start of
131 * idle" jiffy stamp so the idle accounting adjustment we do
132 * when we go busy again does not account too much ticks.
133 */
134 if (ts->tick_stopped) {
135 touch_softlockup_watchdog();
136 if (is_idle_task(current))
137 ts->idle_jiffies++;
138 }
139#endif
140 update_process_times(user_mode(regs));
141 profile_tick(CPU_PROFILING);
142}
143
101/* 144/*
102 * NOHZ - aka dynamic tick functionality 145 * NOHZ - aka dynamic tick functionality
103 */ 146 */
@@ -282,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
282 325
283 /* Read jiffies and the time when jiffies were updated last */ 326 /* Read jiffies and the time when jiffies were updated last */
284 do { 327 do {
285 seq = read_seqbegin(&xtime_lock); 328 seq = read_seqbegin(&jiffies_lock);
286 last_update = last_jiffies_update; 329 last_update = last_jiffies_update;
287 last_jiffies = jiffies; 330 last_jiffies = jiffies;
288 time_delta = timekeeping_max_deferment(); 331 time_delta = timekeeping_max_deferment();
289 } while (read_seqretry(&xtime_lock, seq)); 332 } while (read_seqretry(&jiffies_lock, seq));
290 333
291 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 334 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
292 arch_needs_cpu(cpu)) { 335 arch_needs_cpu(cpu)) {
@@ -652,40 +695,12 @@ static void tick_nohz_handler(struct clock_event_device *dev)
652{ 695{
653 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); 696 struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
654 struct pt_regs *regs = get_irq_regs(); 697 struct pt_regs *regs = get_irq_regs();
655 int cpu = smp_processor_id();
656 ktime_t now = ktime_get(); 698 ktime_t now = ktime_get();
657 699
658 dev->next_event.tv64 = KTIME_MAX; 700 dev->next_event.tv64 = KTIME_MAX;
659 701
660 /* 702 tick_sched_do_timer(now);
661 * Check if the do_timer duty was dropped. We don't care about 703 tick_sched_handle(ts, regs);
662 * concurrency: This happens only when the cpu in charge went
663 * into a long sleep. If two cpus happen to assign themself to
664 * this duty, then the jiffies update is still serialized by
665 * xtime_lock.
666 */
667 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
668 tick_do_timer_cpu = cpu;
669
670 /* Check, if the jiffies need an update */
671 if (tick_do_timer_cpu == cpu)
672 tick_do_update_jiffies64(now);
673
674 /*
675 * When we are idle and the tick is stopped, we have to touch
676 * the watchdog as we might not schedule for a really long
677 * time. This happens on complete idle SMP systems while
678 * waiting on the login prompt. We also increment the "start
679 * of idle" jiffy stamp so the idle accounting adjustment we
680 * do when we go busy again does not account too much ticks.
681 */
682 if (ts->tick_stopped) {
683 touch_softlockup_watchdog();
684 ts->idle_jiffies++;
685 }
686
687 update_process_times(user_mode(regs));
688 profile_tick(CPU_PROFILING);
689 704
690 while (tick_nohz_reprogram(ts, now)) { 705 while (tick_nohz_reprogram(ts, now)) {
691 now = ktime_get(); 706 now = ktime_get();
@@ -806,45 +821,15 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
806 container_of(timer, struct tick_sched, sched_timer); 821 container_of(timer, struct tick_sched, sched_timer);
807 struct pt_regs *regs = get_irq_regs(); 822 struct pt_regs *regs = get_irq_regs();
808 ktime_t now = ktime_get(); 823 ktime_t now = ktime_get();
809 int cpu = smp_processor_id();
810 824
811#ifdef CONFIG_NO_HZ 825 tick_sched_do_timer(now);
812 /*
813 * Check if the do_timer duty was dropped. We don't care about
814 * concurrency: This happens only when the cpu in charge went
815 * into a long sleep. If two cpus happen to assign themself to
816 * this duty, then the jiffies update is still serialized by
817 * xtime_lock.
818 */
819 if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE))
820 tick_do_timer_cpu = cpu;
821#endif
822
823 /* Check, if the jiffies need an update */
824 if (tick_do_timer_cpu == cpu)
825 tick_do_update_jiffies64(now);
826 826
827 /* 827 /*
828 * Do not call, when we are not in irq context and have 828 * Do not call, when we are not in irq context and have
829 * no valid regs pointer 829 * no valid regs pointer
830 */ 830 */
831 if (regs) { 831 if (regs)
832 /* 832 tick_sched_handle(ts, regs);
833 * When we are idle and the tick is stopped, we have to touch
834 * the watchdog as we might not schedule for a really long
835 * time. This happens on complete idle SMP systems while
836 * waiting on the login prompt. We also increment the "start of
837 * idle" jiffy stamp so the idle accounting adjustment we do
838 * when we go busy again does not account too much ticks.
839 */
840 if (ts->tick_stopped) {
841 touch_softlockup_watchdog();
842 if (is_idle_task(current))
843 ts->idle_jiffies++;
844 }
845 update_process_times(user_mode(regs));
846 profile_tick(CPU_PROFILING);
847 }
848 833
849 hrtimer_forward(timer, now, tick_period); 834 hrtimer_forward(timer, now, tick_period);
850 835
@@ -878,7 +863,7 @@ void tick_setup_sched_timer(void)
878 /* Get the next period (per cpu) */ 863 /* Get the next period (per cpu) */
879 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 864 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
880 865
881 /* Offset the tick to avert xtime_lock contention. */ 866 /* Offset the tick to avert jiffies_lock contention. */
882 if (sched_skew_tick) { 867 if (sched_skew_tick) {
883 u64 offset = ktime_to_ns(tick_period) >> 1; 868 u64 offset = ktime_to_ns(tick_period) >> 1;
884 do_div(offset, num_possible_cpus()); 869 do_div(offset, num_possible_cpus());
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e424970bb562..4c7de02eacdc 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -25,12 +25,6 @@
25 25
26static struct timekeeper timekeeper; 26static struct timekeeper timekeeper;
27 27
28/*
29 * This read-write spinlock protects us from races in SMP while
30 * playing with xtime.
31 */
32__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
33
34/* flag for if timekeeping is suspended */ 28/* flag for if timekeeping is suspended */
35int __read_mostly timekeeping_suspended; 29int __read_mostly timekeeping_suspended;
36 30
@@ -1299,9 +1293,7 @@ struct timespec get_monotonic_coarse(void)
1299} 1293}
1300 1294
1301/* 1295/*
1302 * The 64-bit jiffies value is not atomic - you MUST NOT read it 1296 * Must hold jiffies_lock
1303 * without sampling the sequence number in xtime_lock.
1304 * jiffies is defined in the linker script...
1305 */ 1297 */
1306void do_timer(unsigned long ticks) 1298void do_timer(unsigned long ticks)
1307{ 1299{
@@ -1389,7 +1381,7 @@ EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1389 */ 1381 */
1390void xtime_update(unsigned long ticks) 1382void xtime_update(unsigned long ticks)
1391{ 1383{
1392 write_seqlock(&xtime_lock); 1384 write_seqlock(&jiffies_lock);
1393 do_timer(ticks); 1385 do_timer(ticks);
1394 write_sequnlock(&xtime_lock); 1386 write_sequnlock(&jiffies_lock);
1395} 1387}