diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/posix-cpu-timers.c | 218 | ||||
-rw-r--r-- | kernel/time.c | 11 | ||||
-rw-r--r-- | kernel/timer.c | 81 |
3 files changed, 121 insertions, 189 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 1a22dfd42df9..564b3b0240dd 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -11,19 +11,18 @@ | |||
11 | #include <trace/events/timer.h> | 11 | #include <trace/events/timer.h> |
12 | 12 | ||
13 | /* | 13 | /* |
14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to run cpu timer and update |
15 | * tsk->signal->cputime_expires expiration cache if necessary. Needs | ||
16 | * siglock protection since other code may update expiration cache as | ||
17 | * well. | ||
15 | */ | 18 | */ |
16 | void update_rlimit_cpu(unsigned long rlim_new) | 19 | void update_rlimit_cpu(unsigned long rlim_new) |
17 | { | 20 | { |
18 | cputime_t cputime = secs_to_cputime(rlim_new); | 21 | cputime_t cputime = secs_to_cputime(rlim_new); |
19 | struct signal_struct *const sig = current->signal; | ||
20 | 22 | ||
21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || | 23 | spin_lock_irq(¤t->sighand->siglock); |
22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
23 | spin_lock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | ||
25 | spin_unlock_irq(¤t->sighand->siglock); | ||
26 | } | ||
27 | } | 26 | } |
28 | 27 | ||
29 | static int check_clock(const clockid_t which_clock) | 28 | static int check_clock(const clockid_t which_clock) |
@@ -548,111 +547,62 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp) | |||
548 | cputime_gt(expires, new_exp); | 547 | cputime_gt(expires, new_exp); |
549 | } | 548 | } |
550 | 549 | ||
551 | static inline int expires_le(cputime_t expires, cputime_t new_exp) | ||
552 | { | ||
553 | return !cputime_eq(expires, cputime_zero) && | ||
554 | cputime_le(expires, new_exp); | ||
555 | } | ||
556 | /* | 550 | /* |
557 | * Insert the timer on the appropriate list before any timers that | 551 | * Insert the timer on the appropriate list before any timers that |
558 | * expire later. This must be called with the tasklist_lock held | 552 | * expire later. This must be called with the tasklist_lock held |
559 | * for reading, and interrupts disabled. | 553 | * for reading, interrupts disabled and p->sighand->siglock taken. |
560 | */ | 554 | */ |
561 | static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | 555 | static void arm_timer(struct k_itimer *timer) |
562 | { | 556 | { |
563 | struct task_struct *p = timer->it.cpu.task; | 557 | struct task_struct *p = timer->it.cpu.task; |
564 | struct list_head *head, *listpos; | 558 | struct list_head *head, *listpos; |
559 | struct task_cputime *cputime_expires; | ||
565 | struct cpu_timer_list *const nt = &timer->it.cpu; | 560 | struct cpu_timer_list *const nt = &timer->it.cpu; |
566 | struct cpu_timer_list *next; | 561 | struct cpu_timer_list *next; |
567 | unsigned long i; | ||
568 | 562 | ||
569 | head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? | 563 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
570 | p->cpu_timers : p->signal->cpu_timers); | 564 | head = p->cpu_timers; |
565 | cputime_expires = &p->cputime_expires; | ||
566 | } else { | ||
567 | head = p->signal->cpu_timers; | ||
568 | cputime_expires = &p->signal->cputime_expires; | ||
569 | } | ||
571 | head += CPUCLOCK_WHICH(timer->it_clock); | 570 | head += CPUCLOCK_WHICH(timer->it_clock); |
572 | 571 | ||
573 | BUG_ON(!irqs_disabled()); | ||
574 | spin_lock(&p->sighand->siglock); | ||
575 | |||
576 | listpos = head; | 572 | listpos = head; |
577 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 573 | list_for_each_entry(next, head, entry) { |
578 | list_for_each_entry(next, head, entry) { | 574 | if (cpu_time_before(timer->it_clock, nt->expires, next->expires)) |
579 | if (next->expires.sched > nt->expires.sched) | 575 | break; |
580 | break; | 576 | listpos = &next->entry; |
581 | listpos = &next->entry; | ||
582 | } | ||
583 | } else { | ||
584 | list_for_each_entry(next, head, entry) { | ||
585 | if (cputime_gt(next->expires.cpu, nt->expires.cpu)) | ||
586 | break; | ||
587 | listpos = &next->entry; | ||
588 | } | ||
589 | } | 577 | } |
590 | list_add(&nt->entry, listpos); | 578 | list_add(&nt->entry, listpos); |
591 | 579 | ||
592 | if (listpos == head) { | 580 | if (listpos == head) { |
581 | union cpu_time_count *exp = &nt->expires; | ||
582 | |||
593 | /* | 583 | /* |
594 | * We are the new earliest-expiring timer. | 584 | * We are the new earliest-expiring POSIX 1.b timer, hence |
595 | * If we are a thread timer, there can always | 585 | * need to update expiration cache. Take into account that |
596 | * be a process timer telling us to stop earlier. | 586 | * for process timers we share expiration cache with itimers |
587 | * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME. | ||
597 | */ | 588 | */ |
598 | 589 | ||
599 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 590 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
600 | union cpu_time_count *exp = &nt->expires; | 591 | case CPUCLOCK_PROF: |
601 | 592 | if (expires_gt(cputime_expires->prof_exp, exp->cpu)) | |
602 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 593 | cputime_expires->prof_exp = exp->cpu; |
603 | default: | 594 | break; |
604 | BUG(); | 595 | case CPUCLOCK_VIRT: |
605 | case CPUCLOCK_PROF: | 596 | if (expires_gt(cputime_expires->virt_exp, exp->cpu)) |
606 | if (expires_gt(p->cputime_expires.prof_exp, | 597 | cputime_expires->virt_exp = exp->cpu; |
607 | exp->cpu)) | 598 | break; |
608 | p->cputime_expires.prof_exp = exp->cpu; | 599 | case CPUCLOCK_SCHED: |
609 | break; | 600 | if (cputime_expires->sched_exp == 0 || |
610 | case CPUCLOCK_VIRT: | 601 | cputime_expires->sched_exp > exp->sched) |
611 | if (expires_gt(p->cputime_expires.virt_exp, | 602 | cputime_expires->sched_exp = exp->sched; |
612 | exp->cpu)) | 603 | break; |
613 | p->cputime_expires.virt_exp = exp->cpu; | ||
614 | break; | ||
615 | case CPUCLOCK_SCHED: | ||
616 | if (p->cputime_expires.sched_exp == 0 || | ||
617 | p->cputime_expires.sched_exp > exp->sched) | ||
618 | p->cputime_expires.sched_exp = | ||
619 | exp->sched; | ||
620 | break; | ||
621 | } | ||
622 | } else { | ||
623 | struct signal_struct *const sig = p->signal; | ||
624 | union cpu_time_count *exp = &timer->it.cpu.expires; | ||
625 | |||
626 | /* | ||
627 | * For a process timer, set the cached expiration time. | ||
628 | */ | ||
629 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | ||
630 | default: | ||
631 | BUG(); | ||
632 | case CPUCLOCK_VIRT: | ||
633 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, | ||
634 | exp->cpu)) | ||
635 | break; | ||
636 | sig->cputime_expires.virt_exp = exp->cpu; | ||
637 | break; | ||
638 | case CPUCLOCK_PROF: | ||
639 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, | ||
640 | exp->cpu)) | ||
641 | break; | ||
642 | i = sig->rlim[RLIMIT_CPU].rlim_cur; | ||
643 | if (i != RLIM_INFINITY && | ||
644 | i <= cputime_to_secs(exp->cpu)) | ||
645 | break; | ||
646 | sig->cputime_expires.prof_exp = exp->cpu; | ||
647 | break; | ||
648 | case CPUCLOCK_SCHED: | ||
649 | sig->cputime_expires.sched_exp = exp->sched; | ||
650 | break; | ||
651 | } | ||
652 | } | 604 | } |
653 | } | 605 | } |
654 | |||
655 | spin_unlock(&p->sighand->siglock); | ||
656 | } | 606 | } |
657 | 607 | ||
658 | /* | 608 | /* |
@@ -660,7 +610,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
660 | */ | 610 | */ |
661 | static void cpu_timer_fire(struct k_itimer *timer) | 611 | static void cpu_timer_fire(struct k_itimer *timer) |
662 | { | 612 | { |
663 | if (unlikely(timer->sigq == NULL)) { | 613 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { |
614 | /* | ||
615 | * User don't want any signal. | ||
616 | */ | ||
617 | timer->it.cpu.expires.sched = 0; | ||
618 | } else if (unlikely(timer->sigq == NULL)) { | ||
664 | /* | 619 | /* |
665 | * This a special case for clock_nanosleep, | 620 | * This a special case for clock_nanosleep, |
666 | * not a normal timer from sys_timer_create. | 621 | * not a normal timer from sys_timer_create. |
@@ -721,7 +676,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
721 | struct itimerspec *new, struct itimerspec *old) | 676 | struct itimerspec *new, struct itimerspec *old) |
722 | { | 677 | { |
723 | struct task_struct *p = timer->it.cpu.task; | 678 | struct task_struct *p = timer->it.cpu.task; |
724 | union cpu_time_count old_expires, new_expires, val; | 679 | union cpu_time_count old_expires, new_expires, old_incr, val; |
725 | int ret; | 680 | int ret; |
726 | 681 | ||
727 | if (unlikely(p == NULL)) { | 682 | if (unlikely(p == NULL)) { |
@@ -752,6 +707,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
752 | BUG_ON(!irqs_disabled()); | 707 | BUG_ON(!irqs_disabled()); |
753 | 708 | ||
754 | ret = 0; | 709 | ret = 0; |
710 | old_incr = timer->it.cpu.incr; | ||
755 | spin_lock(&p->sighand->siglock); | 711 | spin_lock(&p->sighand->siglock); |
756 | old_expires = timer->it.cpu.expires; | 712 | old_expires = timer->it.cpu.expires; |
757 | if (unlikely(timer->it.cpu.firing)) { | 713 | if (unlikely(timer->it.cpu.firing)) { |
@@ -759,7 +715,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
759 | ret = TIMER_RETRY; | 715 | ret = TIMER_RETRY; |
760 | } else | 716 | } else |
761 | list_del_init(&timer->it.cpu.entry); | 717 | list_del_init(&timer->it.cpu.entry); |
762 | spin_unlock(&p->sighand->siglock); | ||
763 | 718 | ||
764 | /* | 719 | /* |
765 | * We need to sample the current value to convert the new | 720 | * We need to sample the current value to convert the new |
@@ -813,6 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
813 | * disable this firing since we are already reporting | 768 | * disable this firing since we are already reporting |
814 | * it as an overrun (thanks to bump_cpu_timer above). | 769 | * it as an overrun (thanks to bump_cpu_timer above). |
815 | */ | 770 | */ |
771 | spin_unlock(&p->sighand->siglock); | ||
816 | read_unlock(&tasklist_lock); | 772 | read_unlock(&tasklist_lock); |
817 | goto out; | 773 | goto out; |
818 | } | 774 | } |
@@ -828,11 +784,11 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
828 | */ | 784 | */ |
829 | timer->it.cpu.expires = new_expires; | 785 | timer->it.cpu.expires = new_expires; |
830 | if (new_expires.sched != 0 && | 786 | if (new_expires.sched != 0 && |
831 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && | ||
832 | cpu_time_before(timer->it_clock, val, new_expires)) { | 787 | cpu_time_before(timer->it_clock, val, new_expires)) { |
833 | arm_timer(timer, val); | 788 | arm_timer(timer); |
834 | } | 789 | } |
835 | 790 | ||
791 | spin_unlock(&p->sighand->siglock); | ||
836 | read_unlock(&tasklist_lock); | 792 | read_unlock(&tasklist_lock); |
837 | 793 | ||
838 | /* | 794 | /* |
@@ -853,7 +809,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
853 | timer->it_overrun = -1; | 809 | timer->it_overrun = -1; |
854 | 810 | ||
855 | if (new_expires.sched != 0 && | 811 | if (new_expires.sched != 0 && |
856 | (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE && | ||
857 | !cpu_time_before(timer->it_clock, val, new_expires)) { | 812 | !cpu_time_before(timer->it_clock, val, new_expires)) { |
858 | /* | 813 | /* |
859 | * The designated time already passed, so we notify | 814 | * The designated time already passed, so we notify |
@@ -867,7 +822,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
867 | out: | 822 | out: |
868 | if (old) { | 823 | if (old) { |
869 | sample_to_timespec(timer->it_clock, | 824 | sample_to_timespec(timer->it_clock, |
870 | timer->it.cpu.incr, &old->it_interval); | 825 | old_incr, &old->it_interval); |
871 | } | 826 | } |
872 | return ret; | 827 | return ret; |
873 | } | 828 | } |
@@ -927,25 +882,6 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
927 | read_unlock(&tasklist_lock); | 882 | read_unlock(&tasklist_lock); |
928 | } | 883 | } |
929 | 884 | ||
930 | if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { | ||
931 | if (timer->it.cpu.incr.sched == 0 && | ||
932 | cpu_time_before(timer->it_clock, | ||
933 | timer->it.cpu.expires, now)) { | ||
934 | /* | ||
935 | * Do-nothing timer expired and has no reload, | ||
936 | * so it's as if it was never set. | ||
937 | */ | ||
938 | timer->it.cpu.expires.sched = 0; | ||
939 | itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; | ||
940 | return; | ||
941 | } | ||
942 | /* | ||
943 | * Account for any expirations and reloads that should | ||
944 | * have happened. | ||
945 | */ | ||
946 | bump_cpu_timer(timer, now); | ||
947 | } | ||
948 | |||
949 | if (unlikely(clear_dead)) { | 885 | if (unlikely(clear_dead)) { |
950 | /* | 886 | /* |
951 | * We've noticed that the thread is dead, but | 887 | * We've noticed that the thread is dead, but |
@@ -1266,6 +1202,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1266 | goto out; | 1202 | goto out; |
1267 | } | 1203 | } |
1268 | read_lock(&tasklist_lock); /* arm_timer needs it. */ | 1204 | read_lock(&tasklist_lock); /* arm_timer needs it. */ |
1205 | spin_lock(&p->sighand->siglock); | ||
1269 | } else { | 1206 | } else { |
1270 | read_lock(&tasklist_lock); | 1207 | read_lock(&tasklist_lock); |
1271 | if (unlikely(p->signal == NULL)) { | 1208 | if (unlikely(p->signal == NULL)) { |
@@ -1286,6 +1223,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1286 | clear_dead_task(timer, now); | 1223 | clear_dead_task(timer, now); |
1287 | goto out_unlock; | 1224 | goto out_unlock; |
1288 | } | 1225 | } |
1226 | spin_lock(&p->sighand->siglock); | ||
1289 | cpu_timer_sample_group(timer->it_clock, p, &now); | 1227 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1290 | bump_cpu_timer(timer, now); | 1228 | bump_cpu_timer(timer, now); |
1291 | /* Leave the tasklist_lock locked for the call below. */ | 1229 | /* Leave the tasklist_lock locked for the call below. */ |
@@ -1294,7 +1232,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1294 | /* | 1232 | /* |
1295 | * Now re-arm for the new expiry time. | 1233 | * Now re-arm for the new expiry time. |
1296 | */ | 1234 | */ |
1297 | arm_timer(timer, now); | 1235 | BUG_ON(!irqs_disabled()); |
1236 | arm_timer(timer); | ||
1237 | spin_unlock(&p->sighand->siglock); | ||
1298 | 1238 | ||
1299 | out_unlock: | 1239 | out_unlock: |
1300 | read_unlock(&tasklist_lock); | 1240 | read_unlock(&tasklist_lock); |
@@ -1386,7 +1326,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1386 | return 1; | 1326 | return 1; |
1387 | } | 1327 | } |
1388 | 1328 | ||
1389 | return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; | 1329 | return 0; |
1390 | } | 1330 | } |
1391 | 1331 | ||
1392 | /* | 1332 | /* |
@@ -1452,21 +1392,23 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1452 | } | 1392 | } |
1453 | 1393 | ||
1454 | /* | 1394 | /* |
1455 | * Set one of the process-wide special case CPU timers. | 1395 | * Set one of the process-wide special case CPU timers or RLIMIT_CPU. |
1456 | * The tsk->sighand->siglock must be held by the caller. | 1396 | * The tsk->sighand->siglock must be held by the caller. |
1457 | * The *newval argument is relative and we update it to be absolute, *oldval | ||
1458 | * is absolute and we update it to be relative. | ||
1459 | */ | 1397 | */ |
1460 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | 1398 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1461 | cputime_t *newval, cputime_t *oldval) | 1399 | cputime_t *newval, cputime_t *oldval) |
1462 | { | 1400 | { |
1463 | union cpu_time_count now; | 1401 | union cpu_time_count now; |
1464 | struct list_head *head; | ||
1465 | 1402 | ||
1466 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1403 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1467 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1404 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1468 | 1405 | ||
1469 | if (oldval) { | 1406 | if (oldval) { |
1407 | /* | ||
1408 | * We are setting itimer. The *oldval is absolute and we update | ||
1409 | * it to be relative, *newval argument is relative and we update | ||
1410 | * it to be absolute. | ||
1411 | */ | ||
1470 | if (!cputime_eq(*oldval, cputime_zero)) { | 1412 | if (!cputime_eq(*oldval, cputime_zero)) { |
1471 | if (cputime_le(*oldval, now.cpu)) { | 1413 | if (cputime_le(*oldval, now.cpu)) { |
1472 | /* Just about to fire. */ | 1414 | /* Just about to fire. */ |
@@ -1479,33 +1421,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1479 | if (cputime_eq(*newval, cputime_zero)) | 1421 | if (cputime_eq(*newval, cputime_zero)) |
1480 | return; | 1422 | return; |
1481 | *newval = cputime_add(*newval, now.cpu); | 1423 | *newval = cputime_add(*newval, now.cpu); |
1482 | |||
1483 | /* | ||
1484 | * If the RLIMIT_CPU timer will expire before the | ||
1485 | * ITIMER_PROF timer, we have nothing else to do. | ||
1486 | */ | ||
1487 | if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur | ||
1488 | < cputime_to_secs(*newval)) | ||
1489 | return; | ||
1490 | } | 1424 | } |
1491 | 1425 | ||
1492 | /* | 1426 | /* |
1493 | * Check whether there are any process timers already set to fire | 1427 | * Update expiration cache if we are the earliest timer, or eventually |
1494 | * before this one. If so, we don't have anything more to do. | 1428 | * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. |
1495 | */ | 1429 | */ |
1496 | head = &tsk->signal->cpu_timers[clock_idx]; | 1430 | switch (clock_idx) { |
1497 | if (list_empty(head) || | 1431 | case CPUCLOCK_PROF: |
1498 | cputime_ge(list_first_entry(head, | 1432 | if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) |
1499 | struct cpu_timer_list, entry)->expires.cpu, | ||
1500 | *newval)) { | ||
1501 | switch (clock_idx) { | ||
1502 | case CPUCLOCK_PROF: | ||
1503 | tsk->signal->cputime_expires.prof_exp = *newval; | 1433 | tsk->signal->cputime_expires.prof_exp = *newval; |
1504 | break; | 1434 | break; |
1505 | case CPUCLOCK_VIRT: | 1435 | case CPUCLOCK_VIRT: |
1436 | if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) | ||
1506 | tsk->signal->cputime_expires.virt_exp = *newval; | 1437 | tsk->signal->cputime_expires.virt_exp = *newval; |
1507 | break; | 1438 | break; |
1508 | } | ||
1509 | } | 1439 | } |
1510 | } | 1440 | } |
1511 | 1441 | ||
diff --git a/kernel/time.c b/kernel/time.c index 804798005d19..2358a3646a63 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -133,12 +133,11 @@ SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv, | |||
133 | */ | 133 | */ |
134 | static inline void warp_clock(void) | 134 | static inline void warp_clock(void) |
135 | { | 135 | { |
136 | write_seqlock_irq(&xtime_lock); | 136 | struct timespec delta, adjust; |
137 | wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; | 137 | delta.tv_sec = sys_tz.tz_minuteswest * 60; |
138 | xtime.tv_sec += sys_tz.tz_minuteswest * 60; | 138 | delta.tv_nsec = 0; |
139 | update_xtime_cache(0); | 139 | adjust = timespec_add_safe(current_kernel_time(), delta); |
140 | write_sequnlock_irq(&xtime_lock); | 140 | do_settimeofday(&adjust); |
141 | clock_was_set(); | ||
142 | } | 141 | } |
143 | 142 | ||
144 | /* | 143 | /* |
diff --git a/kernel/timer.c b/kernel/timer.c index c61a7949387f..7e12e7bc7ce6 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -953,6 +953,47 @@ static int cascade(struct tvec_base *base, struct tvec *tv, int index) | |||
953 | return index; | 953 | return index; |
954 | } | 954 | } |
955 | 955 | ||
956 | static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long), | ||
957 | unsigned long data) | ||
958 | { | ||
959 | int preempt_count = preempt_count(); | ||
960 | |||
961 | #ifdef CONFIG_LOCKDEP | ||
962 | /* | ||
963 | * It is permissible to free the timer from inside the | ||
964 | * function that is called from it, this we need to take into | ||
965 | * account for lockdep too. To avoid bogus "held lock freed" | ||
966 | * warnings as well as problems when looking into | ||
967 | * timer->lockdep_map, make a copy and use that here. | ||
968 | */ | ||
969 | struct lockdep_map lockdep_map = timer->lockdep_map; | ||
970 | #endif | ||
971 | /* | ||
972 | * Couple the lock chain with the lock chain at | ||
973 | * del_timer_sync() by acquiring the lock_map around the fn() | ||
974 | * call here and in del_timer_sync(). | ||
975 | */ | ||
976 | lock_map_acquire(&lockdep_map); | ||
977 | |||
978 | trace_timer_expire_entry(timer); | ||
979 | fn(data); | ||
980 | trace_timer_expire_exit(timer); | ||
981 | |||
982 | lock_map_release(&lockdep_map); | ||
983 | |||
984 | if (preempt_count != preempt_count()) { | ||
985 | WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", | ||
986 | fn, preempt_count, preempt_count()); | ||
987 | /* | ||
988 | * Restore the preempt count. That gives us a decent | ||
989 | * chance to survive and extract information. If the | ||
990 | * callback kept a lock held, bad luck, but not worse | ||
991 | * than the BUG() we had. | ||
992 | */ | ||
993 | preempt_count() = preempt_count; | ||
994 | } | ||
995 | } | ||
996 | |||
956 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) | 997 | #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK) |
957 | 998 | ||
958 | /** | 999 | /** |
@@ -996,45 +1037,7 @@ static inline void __run_timers(struct tvec_base *base) | |||
996 | detach_timer(timer, 1); | 1037 | detach_timer(timer, 1); |
997 | 1038 | ||
998 | spin_unlock_irq(&base->lock); | 1039 | spin_unlock_irq(&base->lock); |
999 | { | 1040 | call_timer_fn(timer, fn, data); |
1000 | int preempt_count = preempt_count(); | ||
1001 | |||
1002 | #ifdef CONFIG_LOCKDEP | ||
1003 | /* | ||
1004 | * It is permissible to free the timer from | ||
1005 | * inside the function that is called from | ||
1006 | * it, this we need to take into account for | ||
1007 | * lockdep too. To avoid bogus "held lock | ||
1008 | * freed" warnings as well as problems when | ||
1009 | * looking into timer->lockdep_map, make a | ||
1010 | * copy and use that here. | ||
1011 | */ | ||
1012 | struct lockdep_map lockdep_map = | ||
1013 | timer->lockdep_map; | ||
1014 | #endif | ||
1015 | /* | ||
1016 | * Couple the lock chain with the lock chain at | ||
1017 | * del_timer_sync() by acquiring the lock_map | ||
1018 | * around the fn() call here and in | ||
1019 | * del_timer_sync(). | ||
1020 | */ | ||
1021 | lock_map_acquire(&lockdep_map); | ||
1022 | |||
1023 | trace_timer_expire_entry(timer); | ||
1024 | fn(data); | ||
1025 | trace_timer_expire_exit(timer); | ||
1026 | |||
1027 | lock_map_release(&lockdep_map); | ||
1028 | |||
1029 | if (preempt_count != preempt_count()) { | ||
1030 | printk(KERN_ERR "huh, entered %p " | ||
1031 | "with preempt_count %08x, exited" | ||
1032 | " with %08x?\n", | ||
1033 | fn, preempt_count, | ||
1034 | preempt_count()); | ||
1035 | BUG(); | ||
1036 | } | ||
1037 | } | ||
1038 | spin_lock_irq(&base->lock); | 1041 | spin_lock_irq(&base->lock); |
1039 | } | 1042 | } |
1040 | } | 1043 | } |