diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2012-11-21 14:31:52 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2012-11-21 14:31:52 -0500 |
commit | 9c3f9e281697d02889c3b08922f3b30be75f56c2 (patch) | |
tree | e9f3d68e0019a47b982e2b8644f70eb66a3eff3c /kernel/time/tick-sched.c | |
parent | b8f61116c1ce342804a0897b0a80eb4df5f19453 (diff) | |
parent | d6ad418763888f617ac5b4849823e4cd670df1dd (diff) |
Merge branch 'fortglx/3.8/time' of git://git.linaro.org/people/jstultz/linux into timers/core
Fix trivial conflicts in: kernel/time/tick-sched.c
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/time/tick-sched.c')
-rw-r--r-- | kernel/time/tick-sched.c | 22 |
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 77729cc3750b..c96fd6a7625c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -31,7 +31,7 @@ | |||
31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | 31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
32 | 32 | ||
33 | /* | 33 | /* |
34 | * The time, when the last jiffy update happened. Protected by xtime_lock. | 34 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
35 | */ | 35 | */ |
36 | static ktime_t last_jiffies_update; | 36 | static ktime_t last_jiffies_update; |
37 | 37 | ||
@@ -49,14 +49,14 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
49 | ktime_t delta; | 49 | ktime_t delta; |
50 | 50 | ||
51 | /* | 51 | /* |
52 | * Do a quick check without holding xtime_lock: | 52 | * Do a quick check without holding jiffies_lock: |
53 | */ | 53 | */ |
54 | delta = ktime_sub(now, last_jiffies_update); | 54 | delta = ktime_sub(now, last_jiffies_update); |
55 | if (delta.tv64 < tick_period.tv64) | 55 | if (delta.tv64 < tick_period.tv64) |
56 | return; | 56 | return; |
57 | 57 | ||
58 | /* Reevalute with xtime_lock held */ | 58 | /* Reevalute with jiffies_lock held */ |
59 | write_seqlock(&xtime_lock); | 59 | write_seqlock(&jiffies_lock); |
60 | 60 | ||
61 | delta = ktime_sub(now, last_jiffies_update); | 61 | delta = ktime_sub(now, last_jiffies_update); |
62 | if (delta.tv64 >= tick_period.tv64) { | 62 | if (delta.tv64 >= tick_period.tv64) { |
@@ -79,7 +79,7 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
79 | /* Keep the tick_next_period variable up to date */ | 79 | /* Keep the tick_next_period variable up to date */ |
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | 80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); |
81 | } | 81 | } |
82 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&jiffies_lock); |
83 | } | 83 | } |
84 | 84 | ||
85 | /* | 85 | /* |
@@ -89,12 +89,12 @@ static ktime_t tick_init_jiffy_update(void) | |||
89 | { | 89 | { |
90 | ktime_t period; | 90 | ktime_t period; |
91 | 91 | ||
92 | write_seqlock(&xtime_lock); | 92 | write_seqlock(&jiffies_lock); |
93 | /* Did we start the jiffies update yet ? */ | 93 | /* Did we start the jiffies update yet ? */ |
94 | if (last_jiffies_update.tv64 == 0) | 94 | if (last_jiffies_update.tv64 == 0) |
95 | last_jiffies_update = tick_next_period; | 95 | last_jiffies_update = tick_next_period; |
96 | period = last_jiffies_update; | 96 | period = last_jiffies_update; |
97 | write_sequnlock(&xtime_lock); | 97 | write_sequnlock(&jiffies_lock); |
98 | return period; | 98 | return period; |
99 | } | 99 | } |
100 | 100 | ||
@@ -109,7 +109,7 @@ static void tick_sched_do_timer(ktime_t now) | |||
109 | * concurrency: This happens only when the cpu in charge went | 109 | * concurrency: This happens only when the cpu in charge went |
110 | * into a long sleep. If two cpus happen to assign themself to | 110 | * into a long sleep. If two cpus happen to assign themself to |
111 | * this duty, then the jiffies update is still serialized by | 111 | * this duty, then the jiffies update is still serialized by |
112 | * xtime_lock. | 112 | * jiffies_lock. |
113 | */ | 113 | */ |
114 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) | 114 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
115 | tick_do_timer_cpu = cpu; | 115 | tick_do_timer_cpu = cpu; |
@@ -325,11 +325,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
325 | 325 | ||
326 | /* Read jiffies and the time when jiffies were updated last */ | 326 | /* Read jiffies and the time when jiffies were updated last */ |
327 | do { | 327 | do { |
328 | seq = read_seqbegin(&xtime_lock); | 328 | seq = read_seqbegin(&jiffies_lock); |
329 | last_update = last_jiffies_update; | 329 | last_update = last_jiffies_update; |
330 | last_jiffies = jiffies; | 330 | last_jiffies = jiffies; |
331 | time_delta = timekeeping_max_deferment(); | 331 | time_delta = timekeeping_max_deferment(); |
332 | } while (read_seqretry(&xtime_lock, seq)); | 332 | } while (read_seqretry(&jiffies_lock, seq)); |
333 | 333 | ||
334 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || | 334 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || |
335 | arch_needs_cpu(cpu)) { | 335 | arch_needs_cpu(cpu)) { |
@@ -859,7 +859,7 @@ void tick_setup_sched_timer(void) | |||
859 | /* Get the next period (per cpu) */ | 859 | /* Get the next period (per cpu) */ |
860 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | 860 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
861 | 861 | ||
862 | /* Offset the tick to avert xtime_lock contention. */ | 862 | /* Offset the tick to avert jiffies_lock contention. */ |
863 | if (sched_skew_tick) { | 863 | if (sched_skew_tick) { |
864 | u64 offset = ktime_to_ns(tick_period) >> 1; | 864 | u64 offset = ktime_to_ns(tick_period) >> 1; |
865 | do_div(offset, num_possible_cpus()); | 865 | do_div(offset, num_possible_cpus()); |