diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 | 
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2010-03-01 02:55:20 -0500 | 
| commit | 35858adbfca13678af99fb31618ef4428d6dedb0 (patch) | |
| tree | 3336feaa61324486945816cb52c347733e7c0821 /kernel/hrtimer.c | |
| parent | 197d4db752e67160d79fed09968c2140376a80a3 (diff) | |
| parent | 4b70858ba8d4537daf782defebe5f2ff80ccef2b (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'kernel/hrtimer.c')
| -rw-r--r-- | kernel/hrtimer.c | 50 | 
1 files changed, 25 insertions, 25 deletions
| diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index d2f9239dc6ba..0086628b6e97 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
| @@ -127,11 +127,11 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
| 127 | for (;;) { | 127 | for (;;) { | 
| 128 | base = timer->base; | 128 | base = timer->base; | 
| 129 | if (likely(base != NULL)) { | 129 | if (likely(base != NULL)) { | 
| 130 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 130 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| 131 | if (likely(base == timer->base)) | 131 | if (likely(base == timer->base)) | 
| 132 | return base; | 132 | return base; | 
| 133 | /* The timer has migrated to another CPU: */ | 133 | /* The timer has migrated to another CPU: */ | 
| 134 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 134 | raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags); | 
| 135 | } | 135 | } | 
| 136 | cpu_relax(); | 136 | cpu_relax(); | 
| 137 | } | 137 | } | 
| @@ -208,13 +208,13 @@ again: | |||
| 208 | 208 | ||
| 209 | /* See the comment in lock_timer_base() */ | 209 | /* See the comment in lock_timer_base() */ | 
| 210 | timer->base = NULL; | 210 | timer->base = NULL; | 
| 211 | spin_unlock(&base->cpu_base->lock); | 211 | raw_spin_unlock(&base->cpu_base->lock); | 
| 212 | spin_lock(&new_base->cpu_base->lock); | 212 | raw_spin_lock(&new_base->cpu_base->lock); | 
| 213 | 213 | ||
| 214 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 214 | if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) { | 
| 215 | cpu = this_cpu; | 215 | cpu = this_cpu; | 
| 216 | spin_unlock(&new_base->cpu_base->lock); | 216 | raw_spin_unlock(&new_base->cpu_base->lock); | 
| 217 | spin_lock(&base->cpu_base->lock); | 217 | raw_spin_lock(&base->cpu_base->lock); | 
| 218 | timer->base = base; | 218 | timer->base = base; | 
| 219 | goto again; | 219 | goto again; | 
| 220 | } | 220 | } | 
| @@ -230,7 +230,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
| 230 | { | 230 | { | 
| 231 | struct hrtimer_clock_base *base = timer->base; | 231 | struct hrtimer_clock_base *base = timer->base; | 
| 232 | 232 | ||
| 233 | spin_lock_irqsave(&base->cpu_base->lock, *flags); | 233 | raw_spin_lock_irqsave(&base->cpu_base->lock, *flags); | 
| 234 | 234 | ||
| 235 | return base; | 235 | return base; | 
| 236 | } | 236 | } | 
| @@ -628,12 +628,12 @@ static void retrigger_next_event(void *arg) | |||
| 628 | base = &__get_cpu_var(hrtimer_bases); | 628 | base = &__get_cpu_var(hrtimer_bases); | 
| 629 | 629 | ||
| 630 | /* Adjust CLOCK_REALTIME offset */ | 630 | /* Adjust CLOCK_REALTIME offset */ | 
| 631 | spin_lock(&base->lock); | 631 | raw_spin_lock(&base->lock); | 
| 632 | base->clock_base[CLOCK_REALTIME].offset = | 632 | base->clock_base[CLOCK_REALTIME].offset = | 
| 633 | timespec_to_ktime(realtime_offset); | 633 | timespec_to_ktime(realtime_offset); | 
| 634 | 634 | ||
| 635 | hrtimer_force_reprogram(base, 0); | 635 | hrtimer_force_reprogram(base, 0); | 
| 636 | spin_unlock(&base->lock); | 636 | raw_spin_unlock(&base->lock); | 
| 637 | } | 637 | } | 
| 638 | 638 | ||
| 639 | /* | 639 | /* | 
| @@ -694,9 +694,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
| 694 | { | 694 | { | 
| 695 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 695 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 
| 696 | if (wakeup) { | 696 | if (wakeup) { | 
| 697 | spin_unlock(&base->cpu_base->lock); | 697 | raw_spin_unlock(&base->cpu_base->lock); | 
| 698 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 698 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
| 699 | spin_lock(&base->cpu_base->lock); | 699 | raw_spin_lock(&base->cpu_base->lock); | 
| 700 | } else | 700 | } else | 
| 701 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 701 | __raise_softirq_irqoff(HRTIMER_SOFTIRQ); | 
| 702 | 702 | ||
| @@ -790,7 +790,7 @@ static inline void timer_stats_account_hrtimer(struct hrtimer *timer) | |||
| 790 | static inline | 790 | static inline | 
| 791 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 791 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 
| 792 | { | 792 | { | 
| 793 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 793 | raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); | 
| 794 | } | 794 | } | 
| 795 | 795 | ||
| 796 | /** | 796 | /** | 
| @@ -1123,7 +1123,7 @@ ktime_t hrtimer_get_next_event(void) | |||
| 1123 | unsigned long flags; | 1123 | unsigned long flags; | 
| 1124 | int i; | 1124 | int i; | 
| 1125 | 1125 | ||
| 1126 | spin_lock_irqsave(&cpu_base->lock, flags); | 1126 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | 
| 1127 | 1127 | ||
| 1128 | if (!hrtimer_hres_active()) { | 1128 | if (!hrtimer_hres_active()) { | 
| 1129 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1129 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 
| @@ -1140,7 +1140,7 @@ ktime_t hrtimer_get_next_event(void) | |||
| 1140 | } | 1140 | } | 
| 1141 | } | 1141 | } | 
| 1142 | 1142 | ||
| 1143 | spin_unlock_irqrestore(&cpu_base->lock, flags); | 1143 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 
| 1144 | 1144 | ||
| 1145 | if (mindelta.tv64 < 0) | 1145 | if (mindelta.tv64 < 0) | 
| 1146 | mindelta.tv64 = 0; | 1146 | mindelta.tv64 = 0; | 
| @@ -1222,11 +1222,11 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
| 1222 | * they get migrated to another cpu, therefore its safe to unlock | 1222 | * they get migrated to another cpu, therefore its safe to unlock | 
| 1223 | * the timer base. | 1223 | * the timer base. | 
| 1224 | */ | 1224 | */ | 
| 1225 | spin_unlock(&cpu_base->lock); | 1225 | raw_spin_unlock(&cpu_base->lock); | 
| 1226 | trace_hrtimer_expire_entry(timer, now); | 1226 | trace_hrtimer_expire_entry(timer, now); | 
| 1227 | restart = fn(timer); | 1227 | restart = fn(timer); | 
| 1228 | trace_hrtimer_expire_exit(timer); | 1228 | trace_hrtimer_expire_exit(timer); | 
| 1229 | spin_lock(&cpu_base->lock); | 1229 | raw_spin_lock(&cpu_base->lock); | 
| 1230 | 1230 | ||
| 1231 | /* | 1231 | /* | 
| 1232 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 1232 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and | 
| @@ -1261,7 +1261,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
| 1261 | retry: | 1261 | retry: | 
| 1262 | expires_next.tv64 = KTIME_MAX; | 1262 | expires_next.tv64 = KTIME_MAX; | 
| 1263 | 1263 | ||
| 1264 | spin_lock(&cpu_base->lock); | 1264 | raw_spin_lock(&cpu_base->lock); | 
| 1265 | /* | 1265 | /* | 
| 1266 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 1266 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 
| 1267 | * held to prevent that a timer is enqueued in our queue via | 1267 | * held to prevent that a timer is enqueued in our queue via | 
| @@ -1317,7 +1317,7 @@ retry: | |||
| 1317 | * against it. | 1317 | * against it. | 
| 1318 | */ | 1318 | */ | 
| 1319 | cpu_base->expires_next = expires_next; | 1319 | cpu_base->expires_next = expires_next; | 
| 1320 | spin_unlock(&cpu_base->lock); | 1320 | raw_spin_unlock(&cpu_base->lock); | 
| 1321 | 1321 | ||
| 1322 | /* Reprogramming necessary ? */ | 1322 | /* Reprogramming necessary ? */ | 
| 1323 | if (expires_next.tv64 == KTIME_MAX || | 1323 | if (expires_next.tv64 == KTIME_MAX || | 
| @@ -1457,7 +1457,7 @@ void hrtimer_run_queues(void) | |||
| 1457 | gettime = 0; | 1457 | gettime = 0; | 
| 1458 | } | 1458 | } | 
| 1459 | 1459 | ||
| 1460 | spin_lock(&cpu_base->lock); | 1460 | raw_spin_lock(&cpu_base->lock); | 
| 1461 | 1461 | ||
| 1462 | while ((node = base->first)) { | 1462 | while ((node = base->first)) { | 
| 1463 | struct hrtimer *timer; | 1463 | struct hrtimer *timer; | 
| @@ -1469,7 +1469,7 @@ void hrtimer_run_queues(void) | |||
| 1469 | 1469 | ||
| 1470 | __run_hrtimer(timer, &base->softirq_time); | 1470 | __run_hrtimer(timer, &base->softirq_time); | 
| 1471 | } | 1471 | } | 
| 1472 | spin_unlock(&cpu_base->lock); | 1472 | raw_spin_unlock(&cpu_base->lock); | 
| 1473 | } | 1473 | } | 
| 1474 | } | 1474 | } | 
| 1475 | 1475 | ||
| @@ -1625,7 +1625,7 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
| 1625 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1625 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 
| 1626 | int i; | 1626 | int i; | 
| 1627 | 1627 | ||
| 1628 | spin_lock_init(&cpu_base->lock); | 1628 | raw_spin_lock_init(&cpu_base->lock); | 
| 1629 | 1629 | ||
| 1630 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 1630 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) | 
| 1631 | cpu_base->clock_base[i].cpu_base = cpu_base; | 1631 | cpu_base->clock_base[i].cpu_base = cpu_base; | 
| @@ -1683,16 +1683,16 @@ static void migrate_hrtimers(int scpu) | |||
| 1683 | * The caller is globally serialized and nobody else | 1683 | * The caller is globally serialized and nobody else | 
| 1684 | * takes two locks at once, deadlock is not possible. | 1684 | * takes two locks at once, deadlock is not possible. | 
| 1685 | */ | 1685 | */ | 
| 1686 | spin_lock(&new_base->lock); | 1686 | raw_spin_lock(&new_base->lock); | 
| 1687 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1687 | raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 
| 1688 | 1688 | ||
| 1689 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1689 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 
| 1690 | migrate_hrtimer_list(&old_base->clock_base[i], | 1690 | migrate_hrtimer_list(&old_base->clock_base[i], | 
| 1691 | &new_base->clock_base[i]); | 1691 | &new_base->clock_base[i]); | 
| 1692 | } | 1692 | } | 
| 1693 | 1693 | ||
| 1694 | spin_unlock(&old_base->lock); | 1694 | raw_spin_unlock(&old_base->lock); | 
| 1695 | spin_unlock(&new_base->lock); | 1695 | raw_spin_unlock(&new_base->lock); | 
| 1696 | 1696 | ||
| 1697 | /* Check, if we got expired work to do */ | 1697 | /* Check, if we got expired work to do */ | 
| 1698 | __hrtimer_peek_ahead_timers(); | 1698 | __hrtimer_peek_ahead_timers(); | 
