diff options
Diffstat (limited to 'kernel/time/posix-cpu-timers.c')
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 147 |
1 files changed, 62 insertions, 85 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index d2a1e6dd0291..60cb24ac9ebc 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -12,6 +12,11 @@ | |||
12 | #include <trace/events/timer.h> | 12 | #include <trace/events/timer.h> |
13 | #include <linux/tick.h> | 13 | #include <linux/tick.h> |
14 | #include <linux/workqueue.h> | 14 | #include <linux/workqueue.h> |
15 | #include <linux/compat.h> | ||
16 | |||
17 | #include "posix-timers.h" | ||
18 | |||
19 | static void posix_cpu_timer_rearm(struct k_itimer *timer); | ||
15 | 20 | ||
16 | /* | 21 | /* |
17 | * Called after updating RLIMIT_CPU to run cpu timer and update | 22 | * Called after updating RLIMIT_CPU to run cpu timer and update |
@@ -322,6 +327,8 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer) | |||
322 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) | 327 | if (CPUCLOCK_WHICH(new_timer->it_clock) >= CPUCLOCK_MAX) |
323 | return -EINVAL; | 328 | return -EINVAL; |
324 | 329 | ||
330 | new_timer->kclock = &clock_posix_cpu; | ||
331 | |||
325 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); | 332 | INIT_LIST_HEAD(&new_timer->it.cpu.entry); |
326 | 333 | ||
327 | rcu_read_lock(); | 334 | rcu_read_lock(); |
@@ -524,7 +531,8 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
524 | * reload the timer. But we need to keep it | 531 | * reload the timer. But we need to keep it |
525 | * ticking in case the signal is deliverable next time. | 532 | * ticking in case the signal is deliverable next time. |
526 | */ | 533 | */ |
527 | posix_cpu_timer_schedule(timer); | 534 | posix_cpu_timer_rearm(timer); |
535 | ++timer->it_requeue_pending; | ||
528 | } | 536 | } |
529 | } | 537 | } |
530 | 538 | ||
@@ -572,7 +580,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, | |||
572 | 580 | ||
573 | WARN_ON_ONCE(p == NULL); | 581 | WARN_ON_ONCE(p == NULL); |
574 | 582 | ||
575 | new_expires = timespec64_to_ns(&new->it_value); | 583 | /* |
584 | * Use the to_ktime conversion because that clamps the maximum | ||
585 | * value to KTIME_MAX and avoid multiplication overflows. | ||
586 | */ | ||
587 | new_expires = ktime_to_ns(timespec64_to_ktime(new->it_value)); | ||
576 | 588 | ||
577 | /* | 589 | /* |
578 | * Protect against sighand release/switch in exit/exec and p->cpu_timers | 590 | * Protect against sighand release/switch in exit/exec and p->cpu_timers |
@@ -712,10 +724,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp | |||
712 | */ | 724 | */ |
713 | itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); | 725 | itp->it_interval = ns_to_timespec64(timer->it.cpu.incr); |
714 | 726 | ||
715 | if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ | 727 | if (!timer->it.cpu.expires) |
716 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; | ||
717 | return; | 728 | return; |
718 | } | ||
719 | 729 | ||
720 | /* | 730 | /* |
721 | * Sample the clock to take the difference with the expiry time. | 731 | * Sample the clock to take the difference with the expiry time. |
@@ -739,7 +749,6 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp | |||
739 | * Call the timer disarmed, nothing else to do. | 749 | * Call the timer disarmed, nothing else to do. |
740 | */ | 750 | */ |
741 | timer->it.cpu.expires = 0; | 751 | timer->it.cpu.expires = 0; |
742 | itp->it_value = ns_to_timespec64(timer->it.cpu.expires); | ||
743 | return; | 752 | return; |
744 | } else { | 753 | } else { |
745 | cpu_timer_sample_group(timer->it_clock, p, &now); | 754 | cpu_timer_sample_group(timer->it_clock, p, &now); |
@@ -976,10 +985,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
976 | } | 985 | } |
977 | 986 | ||
978 | /* | 987 | /* |
979 | * This is called from the signal code (via do_schedule_next_timer) | 988 | * This is called from the signal code (via posixtimer_rearm) |
980 | * when the last timer signal was delivered and we have to reload the timer. | 989 | * when the last timer signal was delivered and we have to reload the timer. |
981 | */ | 990 | */ |
982 | void posix_cpu_timer_schedule(struct k_itimer *timer) | 991 | static void posix_cpu_timer_rearm(struct k_itimer *timer) |
983 | { | 992 | { |
984 | struct sighand_struct *sighand; | 993 | struct sighand_struct *sighand; |
985 | unsigned long flags; | 994 | unsigned long flags; |
@@ -995,12 +1004,12 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
995 | cpu_clock_sample(timer->it_clock, p, &now); | 1004 | cpu_clock_sample(timer->it_clock, p, &now); |
996 | bump_cpu_timer(timer, now); | 1005 | bump_cpu_timer(timer, now); |
997 | if (unlikely(p->exit_state)) | 1006 | if (unlikely(p->exit_state)) |
998 | goto out; | 1007 | return; |
999 | 1008 | ||
1000 | /* Protect timer list r/w in arm_timer() */ | 1009 | /* Protect timer list r/w in arm_timer() */ |
1001 | sighand = lock_task_sighand(p, &flags); | 1010 | sighand = lock_task_sighand(p, &flags); |
1002 | if (!sighand) | 1011 | if (!sighand) |
1003 | goto out; | 1012 | return; |
1004 | } else { | 1013 | } else { |
1005 | /* | 1014 | /* |
1006 | * Protect arm_timer() and timer sampling in case of call to | 1015 | * Protect arm_timer() and timer sampling in case of call to |
@@ -1013,11 +1022,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1013 | * We can't even collect a sample any more. | 1022 | * We can't even collect a sample any more. |
1014 | */ | 1023 | */ |
1015 | timer->it.cpu.expires = 0; | 1024 | timer->it.cpu.expires = 0; |
1016 | goto out; | 1025 | return; |
1017 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { | 1026 | } else if (unlikely(p->exit_state) && thread_group_empty(p)) { |
1018 | unlock_task_sighand(p, &flags); | 1027 | /* If the process is dying, no need to rearm */ |
1019 | /* Optimizations: if the process is dying, no need to rearm */ | 1028 | goto unlock; |
1020 | goto out; | ||
1021 | } | 1029 | } |
1022 | cpu_timer_sample_group(timer->it_clock, p, &now); | 1030 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1023 | bump_cpu_timer(timer, now); | 1031 | bump_cpu_timer(timer, now); |
@@ -1029,12 +1037,8 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1029 | */ | 1037 | */ |
1030 | WARN_ON_ONCE(!irqs_disabled()); | 1038 | WARN_ON_ONCE(!irqs_disabled()); |
1031 | arm_timer(timer); | 1039 | arm_timer(timer); |
1040 | unlock: | ||
1032 | unlock_task_sighand(p, &flags); | 1041 | unlock_task_sighand(p, &flags); |
1033 | |||
1034 | out: | ||
1035 | timer->it_overrun_last = timer->it_overrun; | ||
1036 | timer->it_overrun = -1; | ||
1037 | ++timer->it_requeue_pending; | ||
1038 | } | 1042 | } |
1039 | 1043 | ||
1040 | /** | 1044 | /** |
@@ -1227,9 +1231,11 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1227 | } | 1231 | } |
1228 | 1232 | ||
1229 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | 1233 | static int do_cpu_nanosleep(const clockid_t which_clock, int flags, |
1230 | struct timespec64 *rqtp, struct itimerspec64 *it) | 1234 | const struct timespec64 *rqtp) |
1231 | { | 1235 | { |
1236 | struct itimerspec64 it; | ||
1232 | struct k_itimer timer; | 1237 | struct k_itimer timer; |
1238 | u64 expires; | ||
1233 | int error; | 1239 | int error; |
1234 | 1240 | ||
1235 | /* | 1241 | /* |
@@ -1243,12 +1249,13 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1243 | timer.it_process = current; | 1249 | timer.it_process = current; |
1244 | if (!error) { | 1250 | if (!error) { |
1245 | static struct itimerspec64 zero_it; | 1251 | static struct itimerspec64 zero_it; |
1252 | struct restart_block *restart; | ||
1246 | 1253 | ||
1247 | memset(it, 0, sizeof *it); | 1254 | memset(&it, 0, sizeof(it)); |
1248 | it->it_value = *rqtp; | 1255 | it.it_value = *rqtp; |
1249 | 1256 | ||
1250 | spin_lock_irq(&timer.it_lock); | 1257 | spin_lock_irq(&timer.it_lock); |
1251 | error = posix_cpu_timer_set(&timer, flags, it, NULL); | 1258 | error = posix_cpu_timer_set(&timer, flags, &it, NULL); |
1252 | if (error) { | 1259 | if (error) { |
1253 | spin_unlock_irq(&timer.it_lock); | 1260 | spin_unlock_irq(&timer.it_lock); |
1254 | return error; | 1261 | return error; |
@@ -1277,8 +1284,8 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1277 | /* | 1284 | /* |
1278 | * We were interrupted by a signal. | 1285 | * We were interrupted by a signal. |
1279 | */ | 1286 | */ |
1280 | *rqtp = ns_to_timespec64(timer.it.cpu.expires); | 1287 | expires = timer.it.cpu.expires; |
1281 | error = posix_cpu_timer_set(&timer, 0, &zero_it, it); | 1288 | error = posix_cpu_timer_set(&timer, 0, &zero_it, &it); |
1282 | if (!error) { | 1289 | if (!error) { |
1283 | /* | 1290 | /* |
1284 | * Timer is now unarmed, deletion can not fail. | 1291 | * Timer is now unarmed, deletion can not fail. |
@@ -1298,7 +1305,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1298 | spin_unlock_irq(&timer.it_lock); | 1305 | spin_unlock_irq(&timer.it_lock); |
1299 | } | 1306 | } |
1300 | 1307 | ||
1301 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { | 1308 | if ((it.it_value.tv_sec | it.it_value.tv_nsec) == 0) { |
1302 | /* | 1309 | /* |
1303 | * It actually did fire already. | 1310 | * It actually did fire already. |
1304 | */ | 1311 | */ |
@@ -1306,6 +1313,17 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1306 | } | 1313 | } |
1307 | 1314 | ||
1308 | error = -ERESTART_RESTARTBLOCK; | 1315 | error = -ERESTART_RESTARTBLOCK; |
1316 | /* | ||
1317 | * Report back to the user the time still remaining. | ||
1318 | */ | ||
1319 | restart = ¤t->restart_block; | ||
1320 | restart->nanosleep.expires = expires; | ||
1321 | if (restart->nanosleep.type != TT_NONE) { | ||
1322 | struct timespec ts; | ||
1323 | |||
1324 | ts = timespec64_to_timespec(it.it_value); | ||
1325 | error = nanosleep_copyout(restart, &ts); | ||
1326 | } | ||
1309 | } | 1327 | } |
1310 | 1328 | ||
1311 | return error; | 1329 | return error; |
@@ -1314,11 +1332,9 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1314 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); | 1332 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block); |
1315 | 1333 | ||
1316 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | 1334 | static int posix_cpu_nsleep(const clockid_t which_clock, int flags, |
1317 | struct timespec64 *rqtp, struct timespec __user *rmtp) | 1335 | const struct timespec64 *rqtp) |
1318 | { | 1336 | { |
1319 | struct restart_block *restart_block = ¤t->restart_block; | 1337 | struct restart_block *restart_block = ¤t->restart_block; |
1320 | struct itimerspec64 it; | ||
1321 | struct timespec ts; | ||
1322 | int error; | 1338 | int error; |
1323 | 1339 | ||
1324 | /* | 1340 | /* |
@@ -1329,23 +1345,15 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |||
1329 | CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) | 1345 | CPUCLOCK_PID(which_clock) == task_pid_vnr(current))) |
1330 | return -EINVAL; | 1346 | return -EINVAL; |
1331 | 1347 | ||
1332 | error = do_cpu_nanosleep(which_clock, flags, rqtp, &it); | 1348 | error = do_cpu_nanosleep(which_clock, flags, rqtp); |
1333 | 1349 | ||
1334 | if (error == -ERESTART_RESTARTBLOCK) { | 1350 | if (error == -ERESTART_RESTARTBLOCK) { |
1335 | 1351 | ||
1336 | if (flags & TIMER_ABSTIME) | 1352 | if (flags & TIMER_ABSTIME) |
1337 | return -ERESTARTNOHAND; | 1353 | return -ERESTARTNOHAND; |
1338 | /* | ||
1339 | * Report back to the user the time still remaining. | ||
1340 | */ | ||
1341 | ts = timespec64_to_timespec(it.it_value); | ||
1342 | if (rmtp && copy_to_user(rmtp, &ts, sizeof(*rmtp))) | ||
1343 | return -EFAULT; | ||
1344 | 1354 | ||
1345 | restart_block->fn = posix_cpu_nsleep_restart; | 1355 | restart_block->fn = posix_cpu_nsleep_restart; |
1346 | restart_block->nanosleep.clockid = which_clock; | 1356 | restart_block->nanosleep.clockid = which_clock; |
1347 | restart_block->nanosleep.rmtp = rmtp; | ||
1348 | restart_block->nanosleep.expires = timespec64_to_ns(rqtp); | ||
1349 | } | 1357 | } |
1350 | return error; | 1358 | return error; |
1351 | } | 1359 | } |
@@ -1353,28 +1361,11 @@ static int posix_cpu_nsleep(const clockid_t which_clock, int flags, | |||
1353 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) | 1361 | static long posix_cpu_nsleep_restart(struct restart_block *restart_block) |
1354 | { | 1362 | { |
1355 | clockid_t which_clock = restart_block->nanosleep.clockid; | 1363 | clockid_t which_clock = restart_block->nanosleep.clockid; |
1356 | struct itimerspec64 it; | ||
1357 | struct timespec64 t; | 1364 | struct timespec64 t; |
1358 | struct timespec tmp; | ||
1359 | int error; | ||
1360 | 1365 | ||
1361 | t = ns_to_timespec64(restart_block->nanosleep.expires); | 1366 | t = ns_to_timespec64(restart_block->nanosleep.expires); |
1362 | 1367 | ||
1363 | error = do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t, &it); | 1368 | return do_cpu_nanosleep(which_clock, TIMER_ABSTIME, &t); |
1364 | |||
1365 | if (error == -ERESTART_RESTARTBLOCK) { | ||
1366 | struct timespec __user *rmtp = restart_block->nanosleep.rmtp; | ||
1367 | /* | ||
1368 | * Report back to the user the time still remaining. | ||
1369 | */ | ||
1370 | tmp = timespec64_to_timespec(it.it_value); | ||
1371 | if (rmtp && copy_to_user(rmtp, &tmp, sizeof(*rmtp))) | ||
1372 | return -EFAULT; | ||
1373 | |||
1374 | restart_block->nanosleep.expires = timespec64_to_ns(&t); | ||
1375 | } | ||
1376 | return error; | ||
1377 | |||
1378 | } | 1369 | } |
1379 | 1370 | ||
1380 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) | 1371 | #define PROCESS_CLOCK MAKE_PROCESS_CPUCLOCK(0, CPUCLOCK_SCHED) |
@@ -1396,14 +1387,9 @@ static int process_cpu_timer_create(struct k_itimer *timer) | |||
1396 | return posix_cpu_timer_create(timer); | 1387 | return posix_cpu_timer_create(timer); |
1397 | } | 1388 | } |
1398 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, | 1389 | static int process_cpu_nsleep(const clockid_t which_clock, int flags, |
1399 | struct timespec64 *rqtp, | 1390 | const struct timespec64 *rqtp) |
1400 | struct timespec __user *rmtp) | ||
1401 | { | ||
1402 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp, rmtp); | ||
1403 | } | ||
1404 | static long process_cpu_nsleep_restart(struct restart_block *restart_block) | ||
1405 | { | 1391 | { |
1406 | return -EINVAL; | 1392 | return posix_cpu_nsleep(PROCESS_CLOCK, flags, rqtp); |
1407 | } | 1393 | } |
1408 | static int thread_cpu_clock_getres(const clockid_t which_clock, | 1394 | static int thread_cpu_clock_getres(const clockid_t which_clock, |
1409 | struct timespec64 *tp) | 1395 | struct timespec64 *tp) |
@@ -1421,36 +1407,27 @@ static int thread_cpu_timer_create(struct k_itimer *timer) | |||
1421 | return posix_cpu_timer_create(timer); | 1407 | return posix_cpu_timer_create(timer); |
1422 | } | 1408 | } |
1423 | 1409 | ||
1424 | struct k_clock clock_posix_cpu = { | 1410 | const struct k_clock clock_posix_cpu = { |
1425 | .clock_getres = posix_cpu_clock_getres, | 1411 | .clock_getres = posix_cpu_clock_getres, |
1426 | .clock_set = posix_cpu_clock_set, | 1412 | .clock_set = posix_cpu_clock_set, |
1427 | .clock_get = posix_cpu_clock_get, | 1413 | .clock_get = posix_cpu_clock_get, |
1428 | .timer_create = posix_cpu_timer_create, | 1414 | .timer_create = posix_cpu_timer_create, |
1429 | .nsleep = posix_cpu_nsleep, | 1415 | .nsleep = posix_cpu_nsleep, |
1430 | .nsleep_restart = posix_cpu_nsleep_restart, | ||
1431 | .timer_set = posix_cpu_timer_set, | 1416 | .timer_set = posix_cpu_timer_set, |
1432 | .timer_del = posix_cpu_timer_del, | 1417 | .timer_del = posix_cpu_timer_del, |
1433 | .timer_get = posix_cpu_timer_get, | 1418 | .timer_get = posix_cpu_timer_get, |
1419 | .timer_rearm = posix_cpu_timer_rearm, | ||
1434 | }; | 1420 | }; |
1435 | 1421 | ||
1436 | static __init int init_posix_cpu_timers(void) | 1422 | const struct k_clock clock_process = { |
1437 | { | 1423 | .clock_getres = process_cpu_clock_getres, |
1438 | struct k_clock process = { | 1424 | .clock_get = process_cpu_clock_get, |
1439 | .clock_getres = process_cpu_clock_getres, | 1425 | .timer_create = process_cpu_timer_create, |
1440 | .clock_get = process_cpu_clock_get, | 1426 | .nsleep = process_cpu_nsleep, |
1441 | .timer_create = process_cpu_timer_create, | 1427 | }; |
1442 | .nsleep = process_cpu_nsleep, | ||
1443 | .nsleep_restart = process_cpu_nsleep_restart, | ||
1444 | }; | ||
1445 | struct k_clock thread = { | ||
1446 | .clock_getres = thread_cpu_clock_getres, | ||
1447 | .clock_get = thread_cpu_clock_get, | ||
1448 | .timer_create = thread_cpu_timer_create, | ||
1449 | }; | ||
1450 | |||
1451 | posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | ||
1452 | posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | ||
1453 | 1428 | ||
1454 | return 0; | 1429 | const struct k_clock clock_thread = { |
1455 | } | 1430 | .clock_getres = thread_cpu_clock_getres, |
1456 | __initcall(init_posix_cpu_timers); | 1431 | .clock_get = thread_cpu_clock_get, |
1432 | .timer_create = thread_cpu_timer_create, | ||
1433 | }; | ||