aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c538
1 files changed, 248 insertions, 290 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cdec83e722fa..bda9cb924276 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -442,22 +442,6 @@ static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
442static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { } 442static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
443#endif 443#endif
444 444
445/*
446 * Check, whether the timer is on the callback pending list
447 */
448static inline int hrtimer_cb_pending(const struct hrtimer *timer)
449{
450 return timer->state & HRTIMER_STATE_PENDING;
451}
452
453/*
454 * Remove a timer from the callback pending list
455 */
456static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
457{
458 list_del_init(&timer->cb_entry);
459}
460
461/* High resolution timer related functions */ 445/* High resolution timer related functions */
462#ifdef CONFIG_HIGH_RES_TIMERS 446#ifdef CONFIG_HIGH_RES_TIMERS
463 447
@@ -517,7 +501,7 @@ static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
517 if (!base->first) 501 if (!base->first)
518 continue; 502 continue;
519 timer = rb_entry(base->first, struct hrtimer, node); 503 timer = rb_entry(base->first, struct hrtimer, node);
520 expires = ktime_sub(timer->expires, base->offset); 504 expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
521 if (expires.tv64 < cpu_base->expires_next.tv64) 505 if (expires.tv64 < cpu_base->expires_next.tv64)
522 cpu_base->expires_next = expires; 506 cpu_base->expires_next = expires;
523 } 507 }
@@ -539,10 +523,10 @@ static int hrtimer_reprogram(struct hrtimer *timer,
539 struct hrtimer_clock_base *base) 523 struct hrtimer_clock_base *base)
540{ 524{
541 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next; 525 ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
542 ktime_t expires = ktime_sub(timer->expires, base->offset); 526 ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
543 int res; 527 int res;
544 528
545 WARN_ON_ONCE(timer->expires.tv64 < 0); 529 WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
546 530
547 /* 531 /*
548 * When the callback is running, we do not reprogram the clock event 532 * When the callback is running, we do not reprogram the clock event
@@ -651,6 +635,8 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
651{ 635{
652} 636}
653 637
638static void __run_hrtimer(struct hrtimer *timer);
639
654/* 640/*
655 * When High resolution timers are active, try to reprogram. Note, that in case 641 * When High resolution timers are active, try to reprogram. Note, that in case
656 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry 642 * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
@@ -661,40 +647,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
661 struct hrtimer_clock_base *base) 647 struct hrtimer_clock_base *base)
662{ 648{
663 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { 649 if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
664 650 /*
665 /* Timer is expired, act upon the callback mode */ 651 * XXX: recursion check?
666 switch(timer->cb_mode) { 652 * hrtimer_forward() should round up with timer granularity
667 case HRTIMER_CB_IRQSAFE_NO_RESTART: 653 * so that we never get into inf recursion here,
668 debug_hrtimer_deactivate(timer); 654 * it doesn't do that though
669 /* 655 */
670 * We can call the callback from here. No restart 656 __run_hrtimer(timer);
671 * happens, so no danger of recursion 657 return 1;
672 */
673 BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
674 return 1;
675 case HRTIMER_CB_IRQSAFE_PERCPU:
676 case HRTIMER_CB_IRQSAFE_UNLOCKED:
677 /*
678 * This is solely for the sched tick emulation with
679 * dynamic tick support to ensure that we do not
680 * restart the tick right on the edge and end up with
681 * the tick timer in the softirq ! The calling site
682 * takes care of this. Also used for hrtimer sleeper !
683 */
684 debug_hrtimer_deactivate(timer);
685 return 1;
686 case HRTIMER_CB_IRQSAFE:
687 case HRTIMER_CB_SOFTIRQ:
688 /*
689 * Move everything else into the softirq pending list !
690 */
691 list_add_tail(&timer->cb_entry,
692 &base->cpu_base->cb_pending);
693 timer->state = HRTIMER_STATE_PENDING;
694 return 1;
695 default:
696 BUG();
697 }
698 } 658 }
699 return 0; 659 return 0;
700} 660}
@@ -733,11 +693,6 @@ static int hrtimer_switch_to_hres(void)
733 return 1; 693 return 1;
734} 694}
735 695
736static inline void hrtimer_raise_softirq(void)
737{
738 raise_softirq(HRTIMER_SOFTIRQ);
739}
740
741#else 696#else
742 697
743static inline int hrtimer_hres_active(void) { return 0; } 698static inline int hrtimer_hres_active(void) { return 0; }
@@ -756,7 +711,6 @@ static inline int hrtimer_reprogram(struct hrtimer *timer,
756{ 711{
757 return 0; 712 return 0;
758} 713}
759static inline void hrtimer_raise_softirq(void) { }
760 714
761#endif /* CONFIG_HIGH_RES_TIMERS */ 715#endif /* CONFIG_HIGH_RES_TIMERS */
762 716
@@ -795,7 +749,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
795 u64 orun = 1; 749 u64 orun = 1;
796 ktime_t delta; 750 ktime_t delta;
797 751
798 delta = ktime_sub(now, timer->expires); 752 delta = ktime_sub(now, hrtimer_get_expires(timer));
799 753
800 if (delta.tv64 < 0) 754 if (delta.tv64 < 0)
801 return 0; 755 return 0;
@@ -807,8 +761,8 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
807 s64 incr = ktime_to_ns(interval); 761 s64 incr = ktime_to_ns(interval);
808 762
809 orun = ktime_divns(delta, incr); 763 orun = ktime_divns(delta, incr);
810 timer->expires = ktime_add_ns(timer->expires, incr * orun); 764 hrtimer_add_expires_ns(timer, incr * orun);
811 if (timer->expires.tv64 > now.tv64) 765 if (hrtimer_get_expires_tv64(timer) > now.tv64)
812 return orun; 766 return orun;
813 /* 767 /*
814 * This (and the ktime_add() below) is the 768 * This (and the ktime_add() below) is the
@@ -816,7 +770,7 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
816 */ 770 */
817 orun++; 771 orun++;
818 } 772 }
819 timer->expires = ktime_add_safe(timer->expires, interval); 773 hrtimer_add_expires(timer, interval);
820 774
821 return orun; 775 return orun;
822} 776}
@@ -848,7 +802,8 @@ static void enqueue_hrtimer(struct hrtimer *timer,
848 * We dont care about collisions. Nodes with 802 * We dont care about collisions. Nodes with
849 * the same expiry time stay together. 803 * the same expiry time stay together.
850 */ 804 */
851 if (timer->expires.tv64 < entry->expires.tv64) { 805 if (hrtimer_get_expires_tv64(timer) <
806 hrtimer_get_expires_tv64(entry)) {
852 link = &(*link)->rb_left; 807 link = &(*link)->rb_left;
853 } else { 808 } else {
854 link = &(*link)->rb_right; 809 link = &(*link)->rb_right;
@@ -898,10 +853,7 @@ static void __remove_hrtimer(struct hrtimer *timer,
898 struct hrtimer_clock_base *base, 853 struct hrtimer_clock_base *base,
899 unsigned long newstate, int reprogram) 854 unsigned long newstate, int reprogram)
900{ 855{
901 /* High res. callback list. NOP for !HIGHRES */ 856 if (timer->state & HRTIMER_STATE_ENQUEUED) {
902 if (hrtimer_cb_pending(timer))
903 hrtimer_remove_cb_pending(timer);
904 else {
905 /* 857 /*
906 * Remove the timer from the rbtree and replace the 858 * Remove the timer from the rbtree and replace the
907 * first entry pointer if necessary. 859 * first entry pointer if necessary.
@@ -945,9 +897,10 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
945} 897}
946 898
947/** 899/**
948 * hrtimer_start - (re)start an relative timer on the current CPU 900 * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
949 * @timer: the timer to be added 901 * @timer: the timer to be added
950 * @tim: expiry time 902 * @tim: expiry time
903 * @delta_ns: "slack" range for the timer
951 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL) 904 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
952 * 905 *
953 * Returns: 906 * Returns:
@@ -955,11 +908,12 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
955 * 1 when the timer was active 908 * 1 when the timer was active
956 */ 909 */
957int 910int
958hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode) 911hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
912 const enum hrtimer_mode mode)
959{ 913{
960 struct hrtimer_clock_base *base, *new_base; 914 struct hrtimer_clock_base *base, *new_base;
961 unsigned long flags; 915 unsigned long flags;
962 int ret, raise; 916 int ret;
963 917
964 base = lock_hrtimer_base(timer, &flags); 918 base = lock_hrtimer_base(timer, &flags);
965 919
@@ -983,7 +937,7 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
983#endif 937#endif
984 } 938 }
985 939
986 timer->expires = tim; 940 hrtimer_set_expires_range_ns(timer, tim, delta_ns);
987 941
988 timer_stats_hrtimer_set_start_info(timer); 942 timer_stats_hrtimer_set_start_info(timer);
989 943
@@ -994,30 +948,30 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
994 enqueue_hrtimer(timer, new_base, 948 enqueue_hrtimer(timer, new_base,
995 new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); 949 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
996 950
997 /*
998 * The timer may be expired and moved to the cb_pending
999 * list. We can not raise the softirq with base lock held due
1000 * to a possible deadlock with runqueue lock.
1001 */
1002 raise = timer->state == HRTIMER_STATE_PENDING;
1003
1004 /*
1005 * We use preempt_disable to prevent this task from migrating after
1006 * setting up the softirq and raising it. Otherwise, if me migrate
1007 * we will raise the softirq on the wrong CPU.
1008 */
1009 preempt_disable();
1010
1011 unlock_hrtimer_base(timer, &flags); 951 unlock_hrtimer_base(timer, &flags);
1012 952
1013 if (raise)
1014 hrtimer_raise_softirq();
1015 preempt_enable();
1016
1017 return ret; 953 return ret;
1018} 954}
955EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
956
957/**
958 * hrtimer_start - (re)start an hrtimer on the current CPU
959 * @timer: the timer to be added
960 * @tim: expiry time
961 * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
962 *
963 * Returns:
964 * 0 on success
965 * 1 when the timer was active
966 */
967int
968hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
969{
970 return hrtimer_start_range_ns(timer, tim, 0, mode);
971}
1019EXPORT_SYMBOL_GPL(hrtimer_start); 972EXPORT_SYMBOL_GPL(hrtimer_start);
1020 973
974
1021/** 975/**
1022 * hrtimer_try_to_cancel - try to deactivate a timer 976 * hrtimer_try_to_cancel - try to deactivate a timer
1023 * @timer: hrtimer to stop 977 * @timer: hrtimer to stop
@@ -1077,7 +1031,7 @@ ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1077 ktime_t rem; 1031 ktime_t rem;
1078 1032
1079 base = lock_hrtimer_base(timer, &flags); 1033 base = lock_hrtimer_base(timer, &flags);
1080 rem = ktime_sub(timer->expires, base->get_time()); 1034 rem = hrtimer_expires_remaining(timer);
1081 unlock_hrtimer_base(timer, &flags); 1035 unlock_hrtimer_base(timer, &flags);
1082 1036
1083 return rem; 1037 return rem;
@@ -1109,7 +1063,7 @@ ktime_t hrtimer_get_next_event(void)
1109 continue; 1063 continue;
1110 1064
1111 timer = rb_entry(base->first, struct hrtimer, node); 1065 timer = rb_entry(base->first, struct hrtimer, node);
1112 delta.tv64 = timer->expires.tv64; 1066 delta.tv64 = hrtimer_get_expires_tv64(timer);
1113 delta = ktime_sub(delta, base->get_time()); 1067 delta = ktime_sub(delta, base->get_time());
1114 if (delta.tv64 < mindelta.tv64) 1068 if (delta.tv64 < mindelta.tv64)
1115 mindelta.tv64 = delta.tv64; 1069 mindelta.tv64 = delta.tv64;
@@ -1180,60 +1134,6 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
1180} 1134}
1181EXPORT_SYMBOL_GPL(hrtimer_get_res); 1135EXPORT_SYMBOL_GPL(hrtimer_get_res);
1182 1136
1183static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
1184{
1185 spin_lock_irq(&cpu_base->lock);
1186
1187 while (!list_empty(&cpu_base->cb_pending)) {
1188 enum hrtimer_restart (*fn)(struct hrtimer *);
1189 struct hrtimer *timer;
1190 int restart;
1191
1192 timer = list_entry(cpu_base->cb_pending.next,
1193 struct hrtimer, cb_entry);
1194
1195 debug_hrtimer_deactivate(timer);
1196 timer_stats_account_hrtimer(timer);
1197
1198 fn = timer->function;
1199 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
1200 spin_unlock_irq(&cpu_base->lock);
1201
1202 restart = fn(timer);
1203
1204 spin_lock_irq(&cpu_base->lock);
1205
1206 timer->state &= ~HRTIMER_STATE_CALLBACK;
1207 if (restart == HRTIMER_RESTART) {
1208 BUG_ON(hrtimer_active(timer));
1209 /*
1210 * Enqueue the timer, allow reprogramming of the event
1211 * device
1212 */
1213 enqueue_hrtimer(timer, timer->base, 1);
1214 } else if (hrtimer_active(timer)) {
1215 /*
1216 * If the timer was rearmed on another CPU, reprogram
1217 * the event device.
1218 */
1219 struct hrtimer_clock_base *base = timer->base;
1220
1221 if (base->first == &timer->node &&
1222 hrtimer_reprogram(timer, base)) {
1223 /*
1224 * Timer is expired. Thus move it from tree to
1225 * pending list again.
1226 */
1227 __remove_hrtimer(timer, base,
1228 HRTIMER_STATE_PENDING, 0);
1229 list_add_tail(&timer->cb_entry,
1230 &base->cpu_base->cb_pending);
1231 }
1232 }
1233 }
1234 spin_unlock_irq(&cpu_base->lock);
1235}
1236
1237static void __run_hrtimer(struct hrtimer *timer) 1137static void __run_hrtimer(struct hrtimer *timer)
1238{ 1138{
1239 struct hrtimer_clock_base *base = timer->base; 1139 struct hrtimer_clock_base *base = timer->base;
@@ -1241,25 +1141,21 @@ static void __run_hrtimer(struct hrtimer *timer)
1241 enum hrtimer_restart (*fn)(struct hrtimer *); 1141 enum hrtimer_restart (*fn)(struct hrtimer *);
1242 int restart; 1142 int restart;
1243 1143
1144 WARN_ON(!irqs_disabled());
1145
1244 debug_hrtimer_deactivate(timer); 1146 debug_hrtimer_deactivate(timer);
1245 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0); 1147 __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
1246 timer_stats_account_hrtimer(timer); 1148 timer_stats_account_hrtimer(timer);
1247
1248 fn = timer->function; 1149 fn = timer->function;
1249 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || 1150
1250 timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { 1151 /*
1251 /* 1152 * Because we run timers from hardirq context, there is no chance
1252 * Used for scheduler timers, avoid lock inversion with 1153 * they get migrated to another cpu, therefore its safe to unlock
1253 * rq->lock and tasklist_lock. 1154 * the timer base.
1254 * 1155 */
1255 * These timers are required to deal with enqueue expiry 1156 spin_unlock(&cpu_base->lock);
1256 * themselves and are not allowed to migrate. 1157 restart = fn(timer);
1257 */ 1158 spin_lock(&cpu_base->lock);
1258 spin_unlock(&cpu_base->lock);
1259 restart = fn(timer);
1260 spin_lock(&cpu_base->lock);
1261 } else
1262 restart = fn(timer);
1263 1159
1264 /* 1160 /*
1265 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid 1161 * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
@@ -1284,7 +1180,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1284 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); 1180 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1285 struct hrtimer_clock_base *base; 1181 struct hrtimer_clock_base *base;
1286 ktime_t expires_next, now; 1182 ktime_t expires_next, now;
1287 int i, raise = 0; 1183 int i;
1288 1184
1289 BUG_ON(!cpu_base->hres_active); 1185 BUG_ON(!cpu_base->hres_active);
1290 cpu_base->nr_events++; 1186 cpu_base->nr_events++;
@@ -1310,26 +1206,29 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1310 1206
1311 timer = rb_entry(node, struct hrtimer, node); 1207 timer = rb_entry(node, struct hrtimer, node);
1312 1208
1313 if (basenow.tv64 < timer->expires.tv64) { 1209 /*
1210 * The immediate goal for using the softexpires is
1211 * minimizing wakeups, not running timers at the
1212 * earliest interrupt after their soft expiration.
1213 * This allows us to avoid using a Priority Search
1214 * Tree, which can answer a stabbing querry for
1215 * overlapping intervals and instead use the simple
1216 * BST we already have.
1217 * We don't add extra wakeups by delaying timers that
1218 * are right-of a not yet expired timer, because that
1219 * timer will have to trigger a wakeup anyway.
1220 */
1221
1222 if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
1314 ktime_t expires; 1223 ktime_t expires;
1315 1224
1316 expires = ktime_sub(timer->expires, 1225 expires = ktime_sub(hrtimer_get_expires(timer),
1317 base->offset); 1226 base->offset);
1318 if (expires.tv64 < expires_next.tv64) 1227 if (expires.tv64 < expires_next.tv64)
1319 expires_next = expires; 1228 expires_next = expires;
1320 break; 1229 break;
1321 } 1230 }
1322 1231
1323 /* Move softirq callbacks to the pending list */
1324 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1325 __remove_hrtimer(timer, base,
1326 HRTIMER_STATE_PENDING, 0);
1327 list_add_tail(&timer->cb_entry,
1328 &base->cpu_base->cb_pending);
1329 raise = 1;
1330 continue;
1331 }
1332
1333 __run_hrtimer(timer); 1232 __run_hrtimer(timer);
1334 } 1233 }
1335 spin_unlock(&cpu_base->lock); 1234 spin_unlock(&cpu_base->lock);
@@ -1343,15 +1242,30 @@ void hrtimer_interrupt(struct clock_event_device *dev)
1343 if (tick_program_event(expires_next, 0)) 1242 if (tick_program_event(expires_next, 0))
1344 goto retry; 1243 goto retry;
1345 } 1244 }
1346
1347 /* Raise softirq ? */
1348 if (raise)
1349 raise_softirq(HRTIMER_SOFTIRQ);
1350} 1245}
1351 1246
1352static void run_hrtimer_softirq(struct softirq_action *h) 1247/**
1248 * hrtimer_peek_ahead_timers -- run soft-expired timers now
1249 *
1250 * hrtimer_peek_ahead_timers will peek at the timer queue of
1251 * the current cpu and check if there are any timers for which
1252 * the soft expires time has passed. If any such timers exist,
1253 * they are run immediately and then removed from the timer queue.
1254 *
1255 */
1256void hrtimer_peek_ahead_timers(void)
1353{ 1257{
1354 run_hrtimer_pending(&__get_cpu_var(hrtimer_bases)); 1258 struct tick_device *td;
1259 unsigned long flags;
1260
1261 if (!hrtimer_hres_active())
1262 return;
1263
1264 local_irq_save(flags);
1265 td = &__get_cpu_var(tick_cpu_device);
1266 if (td && td->evtdev)
1267 hrtimer_interrupt(td->evtdev);
1268 local_irq_restore(flags);
1355} 1269}
1356 1270
1357#endif /* CONFIG_HIGH_RES_TIMERS */ 1271#endif /* CONFIG_HIGH_RES_TIMERS */
@@ -1365,8 +1279,6 @@ static void run_hrtimer_softirq(struct softirq_action *h)
1365 */ 1279 */
1366void hrtimer_run_pending(void) 1280void hrtimer_run_pending(void)
1367{ 1281{
1368 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
1369
1370 if (hrtimer_hres_active()) 1282 if (hrtimer_hres_active())
1371 return; 1283 return;
1372 1284
@@ -1380,8 +1292,6 @@ void hrtimer_run_pending(void)
1380 */ 1292 */
1381 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1293 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1382 hrtimer_switch_to_hres(); 1294 hrtimer_switch_to_hres();
1383
1384 run_hrtimer_pending(cpu_base);
1385} 1295}
1386 1296
1387/* 1297/*
@@ -1403,9 +1313,7 @@ void hrtimer_run_queues(void)
1403 if (!base->first) 1313 if (!base->first)
1404 continue; 1314 continue;
1405 1315
1406 if (base->get_softirq_time) 1316 if (gettime) {
1407 base->softirq_time = base->get_softirq_time();
1408 else if (gettime) {
1409 hrtimer_get_softirq_time(cpu_base); 1317 hrtimer_get_softirq_time(cpu_base);
1410 gettime = 0; 1318 gettime = 0;
1411 } 1319 }
@@ -1416,17 +1324,10 @@ void hrtimer_run_queues(void)
1416 struct hrtimer *timer; 1324 struct hrtimer *timer;
1417 1325
1418 timer = rb_entry(node, struct hrtimer, node); 1326 timer = rb_entry(node, struct hrtimer, node);
1419 if (base->softirq_time.tv64 <= timer->expires.tv64) 1327 if (base->softirq_time.tv64 <=
1328 hrtimer_get_expires_tv64(timer))
1420 break; 1329 break;
1421 1330
1422 if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
1423 __remove_hrtimer(timer, base,
1424 HRTIMER_STATE_PENDING, 0);
1425 list_add_tail(&timer->cb_entry,
1426 &base->cpu_base->cb_pending);
1427 continue;
1428 }
1429
1430 __run_hrtimer(timer); 1331 __run_hrtimer(timer);
1431 } 1332 }
1432 spin_unlock(&cpu_base->lock); 1333 spin_unlock(&cpu_base->lock);
@@ -1453,9 +1354,6 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
1453{ 1354{
1454 sl->timer.function = hrtimer_wakeup; 1355 sl->timer.function = hrtimer_wakeup;
1455 sl->task = task; 1356 sl->task = task;
1456#ifdef CONFIG_HIGH_RES_TIMERS
1457 sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
1458#endif
1459} 1357}
1460 1358
1461static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) 1359static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
@@ -1464,7 +1362,7 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
1464 1362
1465 do { 1363 do {
1466 set_current_state(TASK_INTERRUPTIBLE); 1364 set_current_state(TASK_INTERRUPTIBLE);
1467 hrtimer_start(&t->timer, t->timer.expires, mode); 1365 hrtimer_start_expires(&t->timer, mode);
1468 if (!hrtimer_active(&t->timer)) 1366 if (!hrtimer_active(&t->timer))
1469 t->task = NULL; 1367 t->task = NULL;
1470 1368
@@ -1486,7 +1384,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
1486 struct timespec rmt; 1384 struct timespec rmt;
1487 ktime_t rem; 1385 ktime_t rem;
1488 1386
1489 rem = ktime_sub(timer->expires, timer->base->get_time()); 1387 rem = hrtimer_expires_remaining(timer);
1490 if (rem.tv64 <= 0) 1388 if (rem.tv64 <= 0)
1491 return 0; 1389 return 0;
1492 rmt = ktime_to_timespec(rem); 1390 rmt = ktime_to_timespec(rem);
@@ -1505,7 +1403,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
1505 1403
1506 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, 1404 hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
1507 HRTIMER_MODE_ABS); 1405 HRTIMER_MODE_ABS);
1508 t.timer.expires.tv64 = restart->nanosleep.expires; 1406 hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
1509 1407
1510 if (do_nanosleep(&t, HRTIMER_MODE_ABS)) 1408 if (do_nanosleep(&t, HRTIMER_MODE_ABS))
1511 goto out; 1409 goto out;
@@ -1530,9 +1428,14 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1530 struct restart_block *restart; 1428 struct restart_block *restart;
1531 struct hrtimer_sleeper t; 1429 struct hrtimer_sleeper t;
1532 int ret = 0; 1430 int ret = 0;
1431 unsigned long slack;
1432
1433 slack = current->timer_slack_ns;
1434 if (rt_task(current))
1435 slack = 0;
1533 1436
1534 hrtimer_init_on_stack(&t.timer, clockid, mode); 1437 hrtimer_init_on_stack(&t.timer, clockid, mode);
1535 t.timer.expires = timespec_to_ktime(*rqtp); 1438 hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
1536 if (do_nanosleep(&t, mode)) 1439 if (do_nanosleep(&t, mode))
1537 goto out; 1440 goto out;
1538 1441
@@ -1552,7 +1455,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
1552 restart->fn = hrtimer_nanosleep_restart; 1455 restart->fn = hrtimer_nanosleep_restart;
1553 restart->nanosleep.index = t.timer.base->index; 1456 restart->nanosleep.index = t.timer.base->index;
1554 restart->nanosleep.rmtp = rmtp; 1457 restart->nanosleep.rmtp = rmtp;
1555 restart->nanosleep.expires = t.timer.expires.tv64; 1458 restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
1556 1459
1557 ret = -ERESTART_RESTARTBLOCK; 1460 ret = -ERESTART_RESTARTBLOCK;
1558out: 1461out:
@@ -1587,18 +1490,16 @@ static void __cpuinit init_hrtimers_cpu(int cpu)
1587 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) 1490 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
1588 cpu_base->clock_base[i].cpu_base = cpu_base; 1491 cpu_base->clock_base[i].cpu_base = cpu_base;
1589 1492
1590 INIT_LIST_HEAD(&cpu_base->cb_pending);
1591 hrtimer_init_hres(cpu_base); 1493 hrtimer_init_hres(cpu_base);
1592} 1494}
1593 1495
1594#ifdef CONFIG_HOTPLUG_CPU 1496#ifdef CONFIG_HOTPLUG_CPU
1595 1497
1596static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, 1498static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1597 struct hrtimer_clock_base *new_base, int dcpu) 1499 struct hrtimer_clock_base *new_base)
1598{ 1500{
1599 struct hrtimer *timer; 1501 struct hrtimer *timer;
1600 struct rb_node *node; 1502 struct rb_node *node;
1601 int raise = 0;
1602 1503
1603 while ((node = rb_first(&old_base->active))) { 1504 while ((node = rb_first(&old_base->active))) {
1604 timer = rb_entry(node, struct hrtimer, node); 1505 timer = rb_entry(node, struct hrtimer, node);
@@ -1606,18 +1507,6 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1606 debug_hrtimer_deactivate(timer); 1507 debug_hrtimer_deactivate(timer);
1607 1508
1608 /* 1509 /*
1609 * Should not happen. Per CPU timers should be
1610 * canceled _before_ the migration code is called
1611 */
1612 if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
1613 __remove_hrtimer(timer, old_base,
1614 HRTIMER_STATE_INACTIVE, 0);
1615 WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
1616 timer, timer->function, dcpu);
1617 continue;
1618 }
1619
1620 /*
1621 * Mark it as STATE_MIGRATE not INACTIVE otherwise the 1510 * Mark it as STATE_MIGRATE not INACTIVE otherwise the
1622 * timer could be seen as !active and just vanish away 1511 * timer could be seen as !active and just vanish away
1623 * under us on another CPU 1512 * under us on another CPU
@@ -1625,111 +1514,83 @@ static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
1625 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); 1514 __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
1626 timer->base = new_base; 1515 timer->base = new_base;
1627 /* 1516 /*
1628 * Enqueue the timer. Allow reprogramming of the event device 1517 * Enqueue the timers on the new cpu, but do not reprogram
1518 * the timer as that would enable a deadlock between
1519 * hrtimer_enqueue_reprogramm() running the timer and us still
1520 * holding a nested base lock.
1521 *
1522 * Instead we tickle the hrtimer interrupt after the migration
1523 * is done, which will run all expired timers and re-programm
1524 * the timer device.
1629 */ 1525 */
1630 enqueue_hrtimer(timer, new_base, 1); 1526 enqueue_hrtimer(timer, new_base, 0);
1631 1527
1632#ifdef CONFIG_HIGH_RES_TIMERS
1633 /*
1634 * Happens with high res enabled when the timer was
1635 * already expired and the callback mode is
1636 * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
1637 * enqueue code does not move them to the soft irq
1638 * pending list for performance/latency reasons, but
1639 * in the migration state, we need to do that
1640 * otherwise we end up with a stale timer.
1641 */
1642 if (timer->state == HRTIMER_STATE_MIGRATE) {
1643 timer->state = HRTIMER_STATE_PENDING;
1644 list_add_tail(&timer->cb_entry,
1645 &new_base->cpu_base->cb_pending);
1646 raise = 1;
1647 }
1648#endif
1649 /* Clear the migration state bit */ 1528 /* Clear the migration state bit */
1650 timer->state &= ~HRTIMER_STATE_MIGRATE; 1529 timer->state &= ~HRTIMER_STATE_MIGRATE;
1651 } 1530 }
1652 return raise;
1653} 1531}
1654 1532
1655#ifdef CONFIG_HIGH_RES_TIMERS 1533static int migrate_hrtimers(int scpu)
1656static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1657 struct hrtimer_cpu_base *new_base)
1658{
1659 struct hrtimer *timer;
1660 int raise = 0;
1661
1662 while (!list_empty(&old_base->cb_pending)) {
1663 timer = list_entry(old_base->cb_pending.next,
1664 struct hrtimer, cb_entry);
1665
1666 __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
1667 timer->base = &new_base->clock_base[timer->base->index];
1668 list_add_tail(&timer->cb_entry, &new_base->cb_pending);
1669 raise = 1;
1670 }
1671 return raise;
1672}
1673#else
1674static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
1675 struct hrtimer_cpu_base *new_base)
1676{
1677 return 0;
1678}
1679#endif
1680
1681static void migrate_hrtimers(int cpu)
1682{ 1534{
1683 struct hrtimer_cpu_base *old_base, *new_base; 1535 struct hrtimer_cpu_base *old_base, *new_base;
1684 int i, raise = 0; 1536 int dcpu, i;
1685 1537
1686 BUG_ON(cpu_online(cpu)); 1538 BUG_ON(cpu_online(scpu));
1687 old_base = &per_cpu(hrtimer_bases, cpu); 1539 old_base = &per_cpu(hrtimer_bases, scpu);
1688 new_base = &get_cpu_var(hrtimer_bases); 1540 new_base = &get_cpu_var(hrtimer_bases);
1689 1541
1690 tick_cancel_sched_timer(cpu); 1542 dcpu = smp_processor_id();
1691 1543
1692 local_irq_disable(); 1544 tick_cancel_sched_timer(scpu);
1693 spin_lock(&new_base->lock); 1545 /*
1546 * The caller is globally serialized and nobody else
1547 * takes two locks at once, deadlock is not possible.
1548 */
1549 spin_lock_irq(&new_base->lock);
1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1550 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1695 1551
1696 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1552 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1697 if (migrate_hrtimer_list(&old_base->clock_base[i], 1553 migrate_hrtimer_list(&old_base->clock_base[i],
1698 &new_base->clock_base[i], cpu)) 1554 &new_base->clock_base[i]);
1699 raise = 1;
1700 } 1555 }
1701 1556
1702 if (migrate_hrtimer_pending(old_base, new_base))
1703 raise = 1;
1704
1705 spin_unlock(&old_base->lock); 1557 spin_unlock(&old_base->lock);
1706 spin_unlock(&new_base->lock); 1558 spin_unlock_irq(&new_base->lock);
1707 local_irq_enable();
1708 put_cpu_var(hrtimer_bases); 1559 put_cpu_var(hrtimer_bases);
1709 1560
1710 if (raise) 1561 return dcpu;
1711 hrtimer_raise_softirq();
1712} 1562}
1563
1564static void tickle_timers(void *arg)
1565{
1566 hrtimer_peek_ahead_timers();
1567}
1568
1713#endif /* CONFIG_HOTPLUG_CPU */ 1569#endif /* CONFIG_HOTPLUG_CPU */
1714 1570
1715static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1571static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1716 unsigned long action, void *hcpu) 1572 unsigned long action, void *hcpu)
1717{ 1573{
1718 unsigned int cpu = (long)hcpu; 1574 int scpu = (long)hcpu;
1719 1575
1720 switch (action) { 1576 switch (action) {
1721 1577
1722 case CPU_UP_PREPARE: 1578 case CPU_UP_PREPARE:
1723 case CPU_UP_PREPARE_FROZEN: 1579 case CPU_UP_PREPARE_FROZEN:
1724 init_hrtimers_cpu(cpu); 1580 init_hrtimers_cpu(scpu);
1725 break; 1581 break;
1726 1582
1727#ifdef CONFIG_HOTPLUG_CPU 1583#ifdef CONFIG_HOTPLUG_CPU
1728 case CPU_DEAD: 1584 case CPU_DEAD:
1729 case CPU_DEAD_FROZEN: 1585 case CPU_DEAD_FROZEN:
1730 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu); 1586 {
1731 migrate_hrtimers(cpu); 1587 int dcpu;
1588
1589 clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
1590 dcpu = migrate_hrtimers(scpu);
1591 smp_call_function_single(dcpu, tickle_timers, NULL, 0);
1732 break; 1592 break;
1593 }
1733#endif 1594#endif
1734 1595
1735 default: 1596 default:
@@ -1748,8 +1609,105 @@ void __init hrtimers_init(void)
1748 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, 1609 hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
1749 (void *)(long)smp_processor_id()); 1610 (void *)(long)smp_processor_id());
1750 register_cpu_notifier(&hrtimers_nb); 1611 register_cpu_notifier(&hrtimers_nb);
1751#ifdef CONFIG_HIGH_RES_TIMERS
1752 open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
1753#endif
1754} 1612}
1755 1613
1614/**
1615 * schedule_hrtimeout_range - sleep until timeout
1616 * @expires: timeout value (ktime_t)
1617 * @delta: slack in expires timeout (ktime_t)
1618 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1619 *
1620 * Make the current task sleep until the given expiry time has
1621 * elapsed. The routine will return immediately unless
1622 * the current task state has been set (see set_current_state()).
1623 *
1624 * The @delta argument gives the kernel the freedom to schedule the
1625 * actual wakeup to a time that is both power and performance friendly.
1626 * The kernel give the normal best effort behavior for "@expires+@delta",
1627 * but may decide to fire the timer earlier, but no earlier than @expires.
1628 *
1629 * You can set the task state as follows -
1630 *
1631 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1632 * pass before the routine returns.
1633 *
1634 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1635 * delivered to the current task.
1636 *
1637 * The current task state is guaranteed to be TASK_RUNNING when this
1638 * routine returns.
1639 *
1640 * Returns 0 when the timer has expired otherwise -EINTR
1641 */
1642int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
1643 const enum hrtimer_mode mode)
1644{
1645 struct hrtimer_sleeper t;
1646
1647 /*
1648 * Optimize when a zero timeout value is given. It does not
1649 * matter whether this is an absolute or a relative time.
1650 */
1651 if (expires && !expires->tv64) {
1652 __set_current_state(TASK_RUNNING);
1653 return 0;
1654 }
1655
1656 /*
1657 * A NULL parameter means "inifinte"
1658 */
1659 if (!expires) {
1660 schedule();
1661 __set_current_state(TASK_RUNNING);
1662 return -EINTR;
1663 }
1664
1665 hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
1666 hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
1667
1668 hrtimer_init_sleeper(&t, current);
1669
1670 hrtimer_start_expires(&t.timer, mode);
1671 if (!hrtimer_active(&t.timer))
1672 t.task = NULL;
1673
1674 if (likely(t.task))
1675 schedule();
1676
1677 hrtimer_cancel(&t.timer);
1678 destroy_hrtimer_on_stack(&t.timer);
1679
1680 __set_current_state(TASK_RUNNING);
1681
1682 return !t.task ? 0 : -EINTR;
1683}
1684EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
1685
1686/**
1687 * schedule_hrtimeout - sleep until timeout
1688 * @expires: timeout value (ktime_t)
1689 * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
1690 *
1691 * Make the current task sleep until the given expiry time has
1692 * elapsed. The routine will return immediately unless
1693 * the current task state has been set (see set_current_state()).
1694 *
1695 * You can set the task state as follows -
1696 *
1697 * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
1698 * pass before the routine returns.
1699 *
1700 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1701 * delivered to the current task.
1702 *
1703 * The current task state is guaranteed to be TASK_RUNNING when this
1704 * routine returns.
1705 *
1706 * Returns 0 when the timer has expired otherwise -EINTR
1707 */
1708int __sched schedule_hrtimeout(ktime_t *expires,
1709 const enum hrtimer_mode mode)
1710{
1711 return schedule_hrtimeout_range(expires, 0, mode);
1712}
1713EXPORT_SYMBOL_GPL(schedule_hrtimeout);