diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/hrtimer.c | 184 |
1 files changed, 98 insertions, 86 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 83fc50416b1d..89a9f535b4ce 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1,8 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * linux/kernel/hrtimer.c | 2 | * linux/kernel/hrtimer.c |
3 | * | 3 | * |
4 | * Copyright(C) 2005, Thomas Gleixner <tglx@linutronix.de> | 4 | * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de> |
5 | * Copyright(C) 2005, Red Hat, Inc., Ingo Molnar | 5 | * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar |
6 | * Copyright(C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
6 | * | 7 | * |
7 | * High-resolution kernel timers | 8 | * High-resolution kernel timers |
8 | * | 9 | * |
@@ -79,21 +80,22 @@ EXPORT_SYMBOL_GPL(ktime_get_real); | |||
79 | * This ensures that we capture erroneous accesses to these clock ids | 80 | * This ensures that we capture erroneous accesses to these clock ids |
80 | * rather than moving them into the range of valid clock id's. | 81 | * rather than moving them into the range of valid clock id's. |
81 | */ | 82 | */ |
82 | 83 | static DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |
83 | #define MAX_HRTIMER_BASES 2 | ||
84 | |||
85 | static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) = | ||
86 | { | 84 | { |
85 | |||
86 | .clock_base = | ||
87 | { | 87 | { |
88 | .index = CLOCK_REALTIME, | 88 | { |
89 | .get_time = &ktime_get_real, | 89 | .index = CLOCK_REALTIME, |
90 | .resolution = KTIME_REALTIME_RES, | 90 | .get_time = &ktime_get_real, |
91 | }, | 91 | .resolution = KTIME_REALTIME_RES, |
92 | { | 92 | }, |
93 | .index = CLOCK_MONOTONIC, | 93 | { |
94 | .get_time = &ktime_get, | 94 | .index = CLOCK_MONOTONIC, |
95 | .resolution = KTIME_MONOTONIC_RES, | 95 | .get_time = &ktime_get, |
96 | }, | 96 | .resolution = KTIME_MONOTONIC_RES, |
97 | }, | ||
98 | } | ||
97 | }; | 99 | }; |
98 | 100 | ||
99 | /** | 101 | /** |
@@ -125,7 +127,7 @@ EXPORT_SYMBOL_GPL(ktime_get_ts); | |||
125 | * Get the coarse grained time at the softirq based on xtime and | 127 | * Get the coarse grained time at the softirq based on xtime and |
126 | * wall_to_monotonic. | 128 | * wall_to_monotonic. |
127 | */ | 129 | */ |
128 | static void hrtimer_get_softirq_time(struct hrtimer_base *base) | 130 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
129 | { | 131 | { |
130 | ktime_t xtim, tomono; | 132 | ktime_t xtim, tomono; |
131 | struct timespec xts; | 133 | struct timespec xts; |
@@ -142,8 +144,9 @@ static void hrtimer_get_softirq_time(struct hrtimer_base *base) | |||
142 | 144 | ||
143 | xtim = timespec_to_ktime(xts); | 145 | xtim = timespec_to_ktime(xts); |
144 | tomono = timespec_to_ktime(wall_to_monotonic); | 146 | tomono = timespec_to_ktime(wall_to_monotonic); |
145 | base[CLOCK_REALTIME].softirq_time = xtim; | 147 | base->clock_base[CLOCK_REALTIME].softirq_time = xtim; |
146 | base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); | 148 | base->clock_base[CLOCK_MONOTONIC].softirq_time = |
149 | ktime_add(xtim, tomono); | ||
147 | } | 150 | } |
148 | 151 | ||
149 | /* | 152 | /* |
@@ -166,19 +169,20 @@ static void hrtimer_get_softirq_time(struct hrtimer_base *base) | |||
166 | * possible to set timer->base = NULL and drop the lock: the timer remains | 169 | * possible to set timer->base = NULL and drop the lock: the timer remains |
167 | * locked. | 170 | * locked. |
168 | */ | 171 | */ |
169 | static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, | 172 | static |
170 | unsigned long *flags) | 173 | struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer, |
174 | unsigned long *flags) | ||
171 | { | 175 | { |
172 | struct hrtimer_base *base; | 176 | struct hrtimer_clock_base *base; |
173 | 177 | ||
174 | for (;;) { | 178 | for (;;) { |
175 | base = timer->base; | 179 | base = timer->base; |
176 | if (likely(base != NULL)) { | 180 | if (likely(base != NULL)) { |
177 | spin_lock_irqsave(&base->lock, *flags); | 181 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
178 | if (likely(base == timer->base)) | 182 | if (likely(base == timer->base)) |
179 | return base; | 183 | return base; |
180 | /* The timer has migrated to another CPU: */ | 184 | /* The timer has migrated to another CPU: */ |
181 | spin_unlock_irqrestore(&base->lock, *flags); | 185 | spin_unlock_irqrestore(&base->cpu_base->lock, *flags); |
182 | } | 186 | } |
183 | cpu_relax(); | 187 | cpu_relax(); |
184 | } | 188 | } |
@@ -187,12 +191,14 @@ static struct hrtimer_base *lock_hrtimer_base(const struct hrtimer *timer, | |||
187 | /* | 191 | /* |
188 | * Switch the timer base to the current CPU when possible. | 192 | * Switch the timer base to the current CPU when possible. |
189 | */ | 193 | */ |
190 | static inline struct hrtimer_base * | 194 | static inline struct hrtimer_clock_base * |
191 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | 195 | switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base) |
192 | { | 196 | { |
193 | struct hrtimer_base *new_base; | 197 | struct hrtimer_clock_base *new_base; |
198 | struct hrtimer_cpu_base *new_cpu_base; | ||
194 | 199 | ||
195 | new_base = &__get_cpu_var(hrtimer_bases)[base->index]; | 200 | new_cpu_base = &__get_cpu_var(hrtimer_bases); |
201 | new_base = &new_cpu_base->clock_base[base->index]; | ||
196 | 202 | ||
197 | if (base != new_base) { | 203 | if (base != new_base) { |
198 | /* | 204 | /* |
@@ -204,13 +210,13 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | |||
204 | * completed. There is no conflict as we hold the lock until | 210 | * completed. There is no conflict as we hold the lock until |
205 | * the timer is enqueued. | 211 | * the timer is enqueued. |
206 | */ | 212 | */ |
207 | if (unlikely(base->curr_timer == timer)) | 213 | if (unlikely(base->cpu_base->curr_timer == timer)) |
208 | return base; | 214 | return base; |
209 | 215 | ||
210 | /* See the comment in lock_timer_base() */ | 216 | /* See the comment in lock_timer_base() */ |
211 | timer->base = NULL; | 217 | timer->base = NULL; |
212 | spin_unlock(&base->lock); | 218 | spin_unlock(&base->cpu_base->lock); |
213 | spin_lock(&new_base->lock); | 219 | spin_lock(&new_base->cpu_base->lock); |
214 | timer->base = new_base; | 220 | timer->base = new_base; |
215 | } | 221 | } |
216 | return new_base; | 222 | return new_base; |
@@ -220,12 +226,12 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_base *base) | |||
220 | 226 | ||
221 | #define set_curr_timer(b, t) do { } while (0) | 227 | #define set_curr_timer(b, t) do { } while (0) |
222 | 228 | ||
223 | static inline struct hrtimer_base * | 229 | static inline struct hrtimer_clock_base * |
224 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 230 | lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
225 | { | 231 | { |
226 | struct hrtimer_base *base = timer->base; | 232 | struct hrtimer_clock_base *base = timer->base; |
227 | 233 | ||
228 | spin_lock_irqsave(&base->lock, *flags); | 234 | spin_lock_irqsave(&base->cpu_base->lock, *flags); |
229 | 235 | ||
230 | return base; | 236 | return base; |
231 | } | 237 | } |
@@ -305,7 +311,7 @@ void hrtimer_notify_resume(void) | |||
305 | static inline | 311 | static inline |
306 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | 312 | void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) |
307 | { | 313 | { |
308 | spin_unlock_irqrestore(&timer->base->lock, *flags); | 314 | spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags); |
309 | } | 315 | } |
310 | 316 | ||
311 | /** | 317 | /** |
@@ -355,7 +361,8 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |||
355 | * The timer is inserted in expiry order. Insertion into the | 361 | * The timer is inserted in expiry order. Insertion into the |
356 | * red black tree is O(log(n)). Must hold the base lock. | 362 | * red black tree is O(log(n)). Must hold the base lock. |
357 | */ | 363 | */ |
358 | static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | 364 | static void enqueue_hrtimer(struct hrtimer *timer, |
365 | struct hrtimer_clock_base *base) | ||
359 | { | 366 | { |
360 | struct rb_node **link = &base->active.rb_node; | 367 | struct rb_node **link = &base->active.rb_node; |
361 | struct rb_node *parent = NULL; | 368 | struct rb_node *parent = NULL; |
@@ -394,7 +401,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
394 | * | 401 | * |
395 | * Caller must hold the base lock. | 402 | * Caller must hold the base lock. |
396 | */ | 403 | */ |
397 | static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | 404 | static void __remove_hrtimer(struct hrtimer *timer, |
405 | struct hrtimer_clock_base *base) | ||
398 | { | 406 | { |
399 | /* | 407 | /* |
400 | * Remove the timer from the rbtree and replace the | 408 | * Remove the timer from the rbtree and replace the |
@@ -410,7 +418,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
410 | * remove hrtimer, called with base lock held | 418 | * remove hrtimer, called with base lock held |
411 | */ | 419 | */ |
412 | static inline int | 420 | static inline int |
413 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | 421 | remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base) |
414 | { | 422 | { |
415 | if (hrtimer_active(timer)) { | 423 | if (hrtimer_active(timer)) { |
416 | __remove_hrtimer(timer, base); | 424 | __remove_hrtimer(timer, base); |
@@ -432,7 +440,7 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
432 | int | 440 | int |
433 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) | 441 | hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) |
434 | { | 442 | { |
435 | struct hrtimer_base *base, *new_base; | 443 | struct hrtimer_clock_base *base, *new_base; |
436 | unsigned long flags; | 444 | unsigned long flags; |
437 | int ret; | 445 | int ret; |
438 | 446 | ||
@@ -479,13 +487,13 @@ EXPORT_SYMBOL_GPL(hrtimer_start); | |||
479 | */ | 487 | */ |
480 | int hrtimer_try_to_cancel(struct hrtimer *timer) | 488 | int hrtimer_try_to_cancel(struct hrtimer *timer) |
481 | { | 489 | { |
482 | struct hrtimer_base *base; | 490 | struct hrtimer_clock_base *base; |
483 | unsigned long flags; | 491 | unsigned long flags; |
484 | int ret = -1; | 492 | int ret = -1; |
485 | 493 | ||
486 | base = lock_hrtimer_base(timer, &flags); | 494 | base = lock_hrtimer_base(timer, &flags); |
487 | 495 | ||
488 | if (base->curr_timer != timer) | 496 | if (base->cpu_base->curr_timer != timer) |
489 | ret = remove_hrtimer(timer, base); | 497 | ret = remove_hrtimer(timer, base); |
490 | 498 | ||
491 | unlock_hrtimer_base(timer, &flags); | 499 | unlock_hrtimer_base(timer, &flags); |
@@ -521,12 +529,12 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel); | |||
521 | */ | 529 | */ |
522 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) | 530 | ktime_t hrtimer_get_remaining(const struct hrtimer *timer) |
523 | { | 531 | { |
524 | struct hrtimer_base *base; | 532 | struct hrtimer_clock_base *base; |
525 | unsigned long flags; | 533 | unsigned long flags; |
526 | ktime_t rem; | 534 | ktime_t rem; |
527 | 535 | ||
528 | base = lock_hrtimer_base(timer, &flags); | 536 | base = lock_hrtimer_base(timer, &flags); |
529 | rem = ktime_sub(timer->expires, timer->base->get_time()); | 537 | rem = ktime_sub(timer->expires, base->get_time()); |
530 | unlock_hrtimer_base(timer, &flags); | 538 | unlock_hrtimer_base(timer, &flags); |
531 | 539 | ||
532 | return rem; | 540 | return rem; |
@@ -542,26 +550,29 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | |||
542 | */ | 550 | */ |
543 | ktime_t hrtimer_get_next_event(void) | 551 | ktime_t hrtimer_get_next_event(void) |
544 | { | 552 | { |
545 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | 553 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
554 | struct hrtimer_clock_base *base = cpu_base->clock_base; | ||
546 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | 555 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; |
547 | unsigned long flags; | 556 | unsigned long flags; |
548 | int i; | 557 | int i; |
549 | 558 | ||
550 | for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { | 559 | spin_lock_irqsave(&cpu_base->lock, flags); |
560 | |||
561 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | ||
551 | struct hrtimer *timer; | 562 | struct hrtimer *timer; |
552 | 563 | ||
553 | spin_lock_irqsave(&base->lock, flags); | 564 | if (!base->first) |
554 | if (!base->first) { | ||
555 | spin_unlock_irqrestore(&base->lock, flags); | ||
556 | continue; | 565 | continue; |
557 | } | 566 | |
558 | timer = rb_entry(base->first, struct hrtimer, node); | 567 | timer = rb_entry(base->first, struct hrtimer, node); |
559 | delta.tv64 = timer->expires.tv64; | 568 | delta.tv64 = timer->expires.tv64; |
560 | spin_unlock_irqrestore(&base->lock, flags); | ||
561 | delta = ktime_sub(delta, base->get_time()); | 569 | delta = ktime_sub(delta, base->get_time()); |
562 | if (delta.tv64 < mindelta.tv64) | 570 | if (delta.tv64 < mindelta.tv64) |
563 | mindelta.tv64 = delta.tv64; | 571 | mindelta.tv64 = delta.tv64; |
564 | } | 572 | } |
573 | |||
574 | spin_unlock_irqrestore(&cpu_base->lock, flags); | ||
575 | |||
565 | if (mindelta.tv64 < 0) | 576 | if (mindelta.tv64 < 0) |
566 | mindelta.tv64 = 0; | 577 | mindelta.tv64 = 0; |
567 | return mindelta; | 578 | return mindelta; |
@@ -577,16 +588,16 @@ ktime_t hrtimer_get_next_event(void) | |||
577 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | 588 | void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, |
578 | enum hrtimer_mode mode) | 589 | enum hrtimer_mode mode) |
579 | { | 590 | { |
580 | struct hrtimer_base *bases; | 591 | struct hrtimer_cpu_base *cpu_base; |
581 | 592 | ||
582 | memset(timer, 0, sizeof(struct hrtimer)); | 593 | memset(timer, 0, sizeof(struct hrtimer)); |
583 | 594 | ||
584 | bases = __raw_get_cpu_var(hrtimer_bases); | 595 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
585 | 596 | ||
586 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) | 597 | if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS) |
587 | clock_id = CLOCK_MONOTONIC; | 598 | clock_id = CLOCK_MONOTONIC; |
588 | 599 | ||
589 | timer->base = &bases[clock_id]; | 600 | timer->base = &cpu_base->clock_base[clock_id]; |
590 | rb_set_parent(&timer->node, &timer->node); | 601 | rb_set_parent(&timer->node, &timer->node); |
591 | } | 602 | } |
592 | EXPORT_SYMBOL_GPL(hrtimer_init); | 603 | EXPORT_SYMBOL_GPL(hrtimer_init); |
@@ -601,10 +612,10 @@ EXPORT_SYMBOL_GPL(hrtimer_init); | |||
601 | */ | 612 | */ |
602 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | 613 | int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) |
603 | { | 614 | { |
604 | struct hrtimer_base *bases; | 615 | struct hrtimer_cpu_base *cpu_base; |
605 | 616 | ||
606 | bases = __raw_get_cpu_var(hrtimer_bases); | 617 | cpu_base = &__raw_get_cpu_var(hrtimer_bases); |
607 | *tp = ktime_to_timespec(bases[which_clock].resolution); | 618 | *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution); |
608 | 619 | ||
609 | return 0; | 620 | return 0; |
610 | } | 621 | } |
@@ -613,9 +624,11 @@ EXPORT_SYMBOL_GPL(hrtimer_get_res); | |||
613 | /* | 624 | /* |
614 | * Expire the per base hrtimer-queue: | 625 | * Expire the per base hrtimer-queue: |
615 | */ | 626 | */ |
616 | static inline void run_hrtimer_queue(struct hrtimer_base *base) | 627 | static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base, |
628 | int index) | ||
617 | { | 629 | { |
618 | struct rb_node *node; | 630 | struct rb_node *node; |
631 | struct hrtimer_clock_base *base = &cpu_base->clock_base[index]; | ||
619 | 632 | ||
620 | if (!base->first) | 633 | if (!base->first) |
621 | return; | 634 | return; |
@@ -623,7 +636,7 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base) | |||
623 | if (base->get_softirq_time) | 636 | if (base->get_softirq_time) |
624 | base->softirq_time = base->get_softirq_time(); | 637 | base->softirq_time = base->get_softirq_time(); |
625 | 638 | ||
626 | spin_lock_irq(&base->lock); | 639 | spin_lock_irq(&cpu_base->lock); |
627 | 640 | ||
628 | while ((node = base->first)) { | 641 | while ((node = base->first)) { |
629 | struct hrtimer *timer; | 642 | struct hrtimer *timer; |
@@ -635,21 +648,21 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base) | |||
635 | break; | 648 | break; |
636 | 649 | ||
637 | fn = timer->function; | 650 | fn = timer->function; |
638 | set_curr_timer(base, timer); | 651 | set_curr_timer(cpu_base, timer); |
639 | __remove_hrtimer(timer, base); | 652 | __remove_hrtimer(timer, base); |
640 | spin_unlock_irq(&base->lock); | 653 | spin_unlock_irq(&cpu_base->lock); |
641 | 654 | ||
642 | restart = fn(timer); | 655 | restart = fn(timer); |
643 | 656 | ||
644 | spin_lock_irq(&base->lock); | 657 | spin_lock_irq(&cpu_base->lock); |
645 | 658 | ||
646 | if (restart != HRTIMER_NORESTART) { | 659 | if (restart != HRTIMER_NORESTART) { |
647 | BUG_ON(hrtimer_active(timer)); | 660 | BUG_ON(hrtimer_active(timer)); |
648 | enqueue_hrtimer(timer, base); | 661 | enqueue_hrtimer(timer, base); |
649 | } | 662 | } |
650 | } | 663 | } |
651 | set_curr_timer(base, NULL); | 664 | set_curr_timer(cpu_base, NULL); |
652 | spin_unlock_irq(&base->lock); | 665 | spin_unlock_irq(&cpu_base->lock); |
653 | } | 666 | } |
654 | 667 | ||
655 | /* | 668 | /* |
@@ -657,13 +670,13 @@ static inline void run_hrtimer_queue(struct hrtimer_base *base) | |||
657 | */ | 670 | */ |
658 | void hrtimer_run_queues(void) | 671 | void hrtimer_run_queues(void) |
659 | { | 672 | { |
660 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | 673 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
661 | int i; | 674 | int i; |
662 | 675 | ||
663 | hrtimer_get_softirq_time(base); | 676 | hrtimer_get_softirq_time(cpu_base); |
664 | 677 | ||
665 | for (i = 0; i < MAX_HRTIMER_BASES; i++) | 678 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
666 | run_hrtimer_queue(&base[i]); | 679 | run_hrtimer_queue(cpu_base, i); |
667 | } | 680 | } |
668 | 681 | ||
669 | /* | 682 | /* |
@@ -792,19 +805,21 @@ sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp) | |||
792 | */ | 805 | */ |
793 | static void __devinit init_hrtimers_cpu(int cpu) | 806 | static void __devinit init_hrtimers_cpu(int cpu) |
794 | { | 807 | { |
795 | struct hrtimer_base *base = per_cpu(hrtimer_bases, cpu); | 808 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
796 | int i; | 809 | int i; |
797 | 810 | ||
798 | for (i = 0; i < MAX_HRTIMER_BASES; i++, base++) { | 811 | spin_lock_init(&cpu_base->lock); |
799 | spin_lock_init(&base->lock); | 812 | lockdep_set_class(&cpu_base->lock, &cpu_base->lock_key); |
800 | lockdep_set_class(&base->lock, &base->lock_key); | 813 | |
801 | } | 814 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
815 | cpu_base->clock_base[i].cpu_base = cpu_base; | ||
816 | |||
802 | } | 817 | } |
803 | 818 | ||
804 | #ifdef CONFIG_HOTPLUG_CPU | 819 | #ifdef CONFIG_HOTPLUG_CPU |
805 | 820 | ||
806 | static void migrate_hrtimer_list(struct hrtimer_base *old_base, | 821 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
807 | struct hrtimer_base *new_base) | 822 | struct hrtimer_clock_base *new_base) |
808 | { | 823 | { |
809 | struct hrtimer *timer; | 824 | struct hrtimer *timer; |
810 | struct rb_node *node; | 825 | struct rb_node *node; |
@@ -819,29 +834,26 @@ static void migrate_hrtimer_list(struct hrtimer_base *old_base, | |||
819 | 834 | ||
820 | static void migrate_hrtimers(int cpu) | 835 | static void migrate_hrtimers(int cpu) |
821 | { | 836 | { |
822 | struct hrtimer_base *old_base, *new_base; | 837 | struct hrtimer_cpu_base *old_base, *new_base; |
823 | int i; | 838 | int i; |
824 | 839 | ||
825 | BUG_ON(cpu_online(cpu)); | 840 | BUG_ON(cpu_online(cpu)); |
826 | old_base = per_cpu(hrtimer_bases, cpu); | 841 | old_base = &per_cpu(hrtimer_bases, cpu); |
827 | new_base = get_cpu_var(hrtimer_bases); | 842 | new_base = &get_cpu_var(hrtimer_bases); |
828 | 843 | ||
829 | local_irq_disable(); | 844 | local_irq_disable(); |
830 | 845 | ||
831 | for (i = 0; i < MAX_HRTIMER_BASES; i++) { | 846 | spin_lock(&new_base->lock); |
832 | 847 | spin_lock(&old_base->lock); | |
833 | spin_lock(&new_base->lock); | ||
834 | spin_lock(&old_base->lock); | ||
835 | 848 | ||
849 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | ||
836 | BUG_ON(old_base->curr_timer); | 850 | BUG_ON(old_base->curr_timer); |
837 | 851 | ||
838 | migrate_hrtimer_list(old_base, new_base); | 852 | migrate_hrtimer_list(&old_base->clock_base[i], |
839 | 853 | &new_base->clock_base[i]); | |
840 | spin_unlock(&old_base->lock); | ||
841 | spin_unlock(&new_base->lock); | ||
842 | old_base++; | ||
843 | new_base++; | ||
844 | } | 854 | } |
855 | spin_unlock(&old_base->lock); | ||
856 | spin_unlock(&new_base->lock); | ||
845 | 857 | ||
846 | local_irq_enable(); | 858 | local_irq_enable(); |
847 | put_cpu_var(hrtimer_bases); | 859 | put_cpu_var(hrtimer_bases); |