diff options
Diffstat (limited to 'kernel/time/timer.c')
-rw-r--r-- | kernel/time/timer.c | 105 |
1 files changed, 96 insertions, 9 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 343c7ba33b1c..0e315a2e77ae 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -196,6 +196,10 @@ EXPORT_SYMBOL(jiffies_64); | |||
196 | struct timer_base { | 196 | struct timer_base { |
197 | raw_spinlock_t lock; | 197 | raw_spinlock_t lock; |
198 | struct timer_list *running_timer; | 198 | struct timer_list *running_timer; |
199 | #ifdef CONFIG_PREEMPT_RT | ||
200 | spinlock_t expiry_lock; | ||
201 | atomic_t timer_waiters; | ||
202 | #endif | ||
199 | unsigned long clk; | 203 | unsigned long clk; |
200 | unsigned long next_expiry; | 204 | unsigned long next_expiry; |
201 | unsigned int cpu; | 205 | unsigned int cpu; |
@@ -1227,7 +1231,78 @@ int try_to_del_timer_sync(struct timer_list *timer) | |||
1227 | } | 1231 | } |
1228 | EXPORT_SYMBOL(try_to_del_timer_sync); | 1232 | EXPORT_SYMBOL(try_to_del_timer_sync); |
1229 | 1233 | ||
1230 | #ifdef CONFIG_SMP | 1234 | #ifdef CONFIG_PREEMPT_RT |
1235 | static __init void timer_base_init_expiry_lock(struct timer_base *base) | ||
1236 | { | ||
1237 | spin_lock_init(&base->expiry_lock); | ||
1238 | } | ||
1239 | |||
1240 | static inline void timer_base_lock_expiry(struct timer_base *base) | ||
1241 | { | ||
1242 | spin_lock(&base->expiry_lock); | ||
1243 | } | ||
1244 | |||
1245 | static inline void timer_base_unlock_expiry(struct timer_base *base) | ||
1246 | { | ||
1247 | spin_unlock(&base->expiry_lock); | ||
1248 | } | ||
1249 | |||
1250 | /* | ||
1251 | * The counterpart to del_timer_wait_running(). | ||
1252 | * | ||
1253 | * If there is a waiter for base->expiry_lock, then it was waiting for the | ||
1254 | * timer callback to finish. Drop expiry_lock and reaquire it. That allows | ||
1255 | * the waiter to acquire the lock and make progress. | ||
1256 | */ | ||
1257 | static void timer_sync_wait_running(struct timer_base *base) | ||
1258 | { | ||
1259 | if (atomic_read(&base->timer_waiters)) { | ||
1260 | spin_unlock(&base->expiry_lock); | ||
1261 | spin_lock(&base->expiry_lock); | ||
1262 | } | ||
1263 | } | ||
1264 | |||
1265 | /* | ||
1266 | * This function is called on PREEMPT_RT kernels when the fast path | ||
1267 | * deletion of a timer failed because the timer callback function was | ||
1268 | * running. | ||
1269 | * | ||
1270 | * This prevents priority inversion, if the softirq thread on a remote CPU | ||
1271 | * got preempted, and it prevents a life lock when the task which tries to | ||
1272 | * delete a timer preempted the softirq thread running the timer callback | ||
1273 | * function. | ||
1274 | */ | ||
1275 | static void del_timer_wait_running(struct timer_list *timer) | ||
1276 | { | ||
1277 | u32 tf; | ||
1278 | |||
1279 | tf = READ_ONCE(timer->flags); | ||
1280 | if (!(tf & TIMER_MIGRATING)) { | ||
1281 | struct timer_base *base = get_timer_base(tf); | ||
1282 | |||
1283 | /* | ||
1284 | * Mark the base as contended and grab the expiry lock, | ||
1285 | * which is held by the softirq across the timer | ||
1286 | * callback. Drop the lock immediately so the softirq can | ||
1287 | * expire the next timer. In theory the timer could already | ||
1288 | * be running again, but that's more than unlikely and just | ||
1289 | * causes another wait loop. | ||
1290 | */ | ||
1291 | atomic_inc(&base->timer_waiters); | ||
1292 | spin_lock_bh(&base->expiry_lock); | ||
1293 | atomic_dec(&base->timer_waiters); | ||
1294 | spin_unlock_bh(&base->expiry_lock); | ||
1295 | } | ||
1296 | } | ||
1297 | #else | ||
1298 | static inline void timer_base_init_expiry_lock(struct timer_base *base) { } | ||
1299 | static inline void timer_base_lock_expiry(struct timer_base *base) { } | ||
1300 | static inline void timer_base_unlock_expiry(struct timer_base *base) { } | ||
1301 | static inline void timer_sync_wait_running(struct timer_base *base) { } | ||
1302 | static inline void del_timer_wait_running(struct timer_list *timer) { } | ||
1303 | #endif | ||
1304 | |||
1305 | #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) | ||
1231 | /** | 1306 | /** |
1232 | * del_timer_sync - deactivate a timer and wait for the handler to finish. | 1307 | * del_timer_sync - deactivate a timer and wait for the handler to finish. |
1233 | * @timer: the timer to be deactivated | 1308 | * @timer: the timer to be deactivated |
@@ -1266,6 +1341,8 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
1266 | */ | 1341 | */ |
1267 | int del_timer_sync(struct timer_list *timer) | 1342 | int del_timer_sync(struct timer_list *timer) |
1268 | { | 1343 | { |
1344 | int ret; | ||
1345 | |||
1269 | #ifdef CONFIG_LOCKDEP | 1346 | #ifdef CONFIG_LOCKDEP |
1270 | unsigned long flags; | 1347 | unsigned long flags; |
1271 | 1348 | ||
@@ -1283,12 +1360,17 @@ int del_timer_sync(struct timer_list *timer) | |||
1283 | * could lead to deadlock. | 1360 | * could lead to deadlock. |
1284 | */ | 1361 | */ |
1285 | WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); | 1362 | WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE)); |
1286 | for (;;) { | 1363 | |
1287 | int ret = try_to_del_timer_sync(timer); | 1364 | do { |
1288 | if (ret >= 0) | 1365 | ret = try_to_del_timer_sync(timer); |
1289 | return ret; | 1366 | |
1290 | cpu_relax(); | 1367 | if (unlikely(ret < 0)) { |
1291 | } | 1368 | del_timer_wait_running(timer); |
1369 | cpu_relax(); | ||
1370 | } | ||
1371 | } while (ret < 0); | ||
1372 | |||
1373 | return ret; | ||
1292 | } | 1374 | } |
1293 | EXPORT_SYMBOL(del_timer_sync); | 1375 | EXPORT_SYMBOL(del_timer_sync); |
1294 | #endif | 1376 | #endif |
@@ -1360,10 +1442,13 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) | |||
1360 | if (timer->flags & TIMER_IRQSAFE) { | 1442 | if (timer->flags & TIMER_IRQSAFE) { |
1361 | raw_spin_unlock(&base->lock); | 1443 | raw_spin_unlock(&base->lock); |
1362 | call_timer_fn(timer, fn, baseclk); | 1444 | call_timer_fn(timer, fn, baseclk); |
1445 | base->running_timer = NULL; | ||
1363 | raw_spin_lock(&base->lock); | 1446 | raw_spin_lock(&base->lock); |
1364 | } else { | 1447 | } else { |
1365 | raw_spin_unlock_irq(&base->lock); | 1448 | raw_spin_unlock_irq(&base->lock); |
1366 | call_timer_fn(timer, fn, baseclk); | 1449 | call_timer_fn(timer, fn, baseclk); |
1450 | base->running_timer = NULL; | ||
1451 | timer_sync_wait_running(base); | ||
1367 | raw_spin_lock_irq(&base->lock); | 1452 | raw_spin_lock_irq(&base->lock); |
1368 | } | 1453 | } |
1369 | } | 1454 | } |
@@ -1643,7 +1728,7 @@ void update_process_times(int user_tick) | |||
1643 | #endif | 1728 | #endif |
1644 | scheduler_tick(); | 1729 | scheduler_tick(); |
1645 | if (IS_ENABLED(CONFIG_POSIX_TIMERS)) | 1730 | if (IS_ENABLED(CONFIG_POSIX_TIMERS)) |
1646 | run_posix_cpu_timers(p); | 1731 | run_posix_cpu_timers(); |
1647 | } | 1732 | } |
1648 | 1733 | ||
1649 | /** | 1734 | /** |
@@ -1658,6 +1743,7 @@ static inline void __run_timers(struct timer_base *base) | |||
1658 | if (!time_after_eq(jiffies, base->clk)) | 1743 | if (!time_after_eq(jiffies, base->clk)) |
1659 | return; | 1744 | return; |
1660 | 1745 | ||
1746 | timer_base_lock_expiry(base); | ||
1661 | raw_spin_lock_irq(&base->lock); | 1747 | raw_spin_lock_irq(&base->lock); |
1662 | 1748 | ||
1663 | /* | 1749 | /* |
@@ -1684,8 +1770,8 @@ static inline void __run_timers(struct timer_base *base) | |||
1684 | while (levels--) | 1770 | while (levels--) |
1685 | expire_timers(base, heads + levels); | 1771 | expire_timers(base, heads + levels); |
1686 | } | 1772 | } |
1687 | base->running_timer = NULL; | ||
1688 | raw_spin_unlock_irq(&base->lock); | 1773 | raw_spin_unlock_irq(&base->lock); |
1774 | timer_base_unlock_expiry(base); | ||
1689 | } | 1775 | } |
1690 | 1776 | ||
1691 | /* | 1777 | /* |
@@ -1930,6 +2016,7 @@ static void __init init_timer_cpu(int cpu) | |||
1930 | base->cpu = cpu; | 2016 | base->cpu = cpu; |
1931 | raw_spin_lock_init(&base->lock); | 2017 | raw_spin_lock_init(&base->lock); |
1932 | base->clk = jiffies; | 2018 | base->clk = jiffies; |
2019 | timer_base_init_expiry_lock(base); | ||
1933 | } | 2020 | } |
1934 | } | 2021 | } |
1935 | 2022 | ||