diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 192 |
1 files changed, 184 insertions, 8 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 438ff4523513..3dd921892a4b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -280,7 +280,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
280 | struct task_cputime sum; | 280 | struct task_cputime sum; |
281 | unsigned long flags; | 281 | unsigned long flags; |
282 | 282 | ||
283 | spin_lock_irqsave(&cputimer->lock, flags); | 283 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
284 | if (!cputimer->running) { | 284 | if (!cputimer->running) { |
285 | cputimer->running = 1; | 285 | cputimer->running = 1; |
286 | /* | 286 | /* |
@@ -293,7 +293,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
293 | update_gt_cputime(&cputimer->cputime, &sum); | 293 | update_gt_cputime(&cputimer->cputime, &sum); |
294 | } | 294 | } |
295 | *times = cputimer->cputime; | 295 | *times = cputimer->cputime; |
296 | spin_unlock_irqrestore(&cputimer->lock, flags); | 296 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
297 | } | 297 | } |
298 | 298 | ||
299 | /* | 299 | /* |
@@ -570,7 +570,7 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
570 | p->cpu_timers : p->signal->cpu_timers); | 570 | p->cpu_timers : p->signal->cpu_timers); |
571 | head += CPUCLOCK_WHICH(timer->it_clock); | 571 | head += CPUCLOCK_WHICH(timer->it_clock); |
572 | 572 | ||
573 | BUG_ON(!irqs_disabled()); | 573 | BUG_ON_NONRT(!irqs_disabled()); |
574 | spin_lock(&p->sighand->siglock); | 574 | spin_lock(&p->sighand->siglock); |
575 | 575 | ||
576 | listpos = head; | 576 | listpos = head; |
@@ -749,7 +749,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
749 | /* | 749 | /* |
750 | * Disarm any old timer after extracting its expiry time. | 750 | * Disarm any old timer after extracting its expiry time. |
751 | */ | 751 | */ |
752 | BUG_ON(!irqs_disabled()); | 752 | BUG_ON_NONRT(!irqs_disabled()); |
753 | 753 | ||
754 | ret = 0; | 754 | ret = 0; |
755 | spin_lock(&p->sighand->siglock); | 755 | spin_lock(&p->sighand->siglock); |
@@ -1068,9 +1068,9 @@ static void stop_process_timers(struct task_struct *tsk) | |||
1068 | if (!cputimer->running) | 1068 | if (!cputimer->running) |
1069 | return; | 1069 | return; |
1070 | 1070 | ||
1071 | spin_lock_irqsave(&cputimer->lock, flags); | 1071 | raw_spin_lock_irqsave(&cputimer->lock, flags); |
1072 | cputimer->running = 0; | 1072 | cputimer->running = 0; |
1073 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1073 | raw_spin_unlock_irqrestore(&cputimer->lock, flags); |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | static u32 onecputick; | 1076 | static u32 onecputick; |
@@ -1390,12 +1390,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1390 | * already updated our counts. We need to check if any timers fire now. | 1390 | * already updated our counts. We need to check if any timers fire now. |
1391 | * Interrupts are disabled. | 1391 | * Interrupts are disabled. |
1392 | */ | 1392 | */ |
1393 | void run_posix_cpu_timers(struct task_struct *tsk) | 1393 | void __run_posix_cpu_timers(struct task_struct *tsk) |
1394 | { | 1394 | { |
1395 | LIST_HEAD(firing); | 1395 | LIST_HEAD(firing); |
1396 | struct k_itimer *timer, *next; | 1396 | struct k_itimer *timer, *next; |
1397 | 1397 | ||
1398 | BUG_ON(!irqs_disabled()); | ||
1399 | 1398 | ||
1400 | /* | 1399 | /* |
1401 | * The fast path checks that there are no expired thread or thread | 1400 | * The fast path checks that there are no expired thread or thread |
@@ -1447,6 +1446,177 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1447 | } | 1446 | } |
1448 | } | 1447 | } |
1449 | 1448 | ||
1449 | #include <linux/kthread.h> | ||
1450 | #include <linux/cpu.h> | ||
1451 | DEFINE_PER_CPU(struct task_struct *, posix_timer_task); | ||
1452 | DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); | ||
1453 | |||
1454 | static int posix_cpu_timers_thread(void *data) | ||
1455 | { | ||
1456 | int cpu = (long)data; | ||
1457 | |||
1458 | BUG_ON(per_cpu(posix_timer_task,cpu) != current); | ||
1459 | |||
1460 | while (!kthread_should_stop()) { | ||
1461 | struct task_struct *tsk = NULL; | ||
1462 | struct task_struct *next = NULL; | ||
1463 | |||
1464 | if (cpu_is_offline(cpu)) | ||
1465 | goto wait_to_die; | ||
1466 | |||
1467 | /* grab task list */ | ||
1468 | raw_local_irq_disable(); | ||
1469 | tsk = per_cpu(posix_timer_tasklist, cpu); | ||
1470 | per_cpu(posix_timer_tasklist, cpu) = NULL; | ||
1471 | raw_local_irq_enable(); | ||
1472 | |||
1473 | /* its possible the list is empty, just return */ | ||
1474 | if (!tsk) { | ||
1475 | set_current_state(TASK_INTERRUPTIBLE); | ||
1476 | schedule(); | ||
1477 | __set_current_state(TASK_RUNNING); | ||
1478 | continue; | ||
1479 | } | ||
1480 | |||
1481 | /* Process task list */ | ||
1482 | while (1) { | ||
1483 | /* save next */ | ||
1484 | next = tsk->posix_timer_list; | ||
1485 | |||
1486 | /* run the task timers, clear its ptr and | ||
1487 | * unreference it | ||
1488 | */ | ||
1489 | __run_posix_cpu_timers(tsk); | ||
1490 | tsk->posix_timer_list = NULL; | ||
1491 | put_task_struct(tsk); | ||
1492 | |||
1493 | /* check if this is the last on the list */ | ||
1494 | if (next == tsk) | ||
1495 | break; | ||
1496 | tsk = next; | ||
1497 | } | ||
1498 | } | ||
1499 | return 0; | ||
1500 | |||
1501 | wait_to_die: | ||
1502 | /* Wait for kthread_stop */ | ||
1503 | set_current_state(TASK_INTERRUPTIBLE); | ||
1504 | while (!kthread_should_stop()) { | ||
1505 | schedule(); | ||
1506 | set_current_state(TASK_INTERRUPTIBLE); | ||
1507 | } | ||
1508 | __set_current_state(TASK_RUNNING); | ||
1509 | return 0; | ||
1510 | } | ||
1511 | |||
1512 | static inline int __fastpath_timer_check(struct task_struct *tsk) | ||
1513 | { | ||
1514 | /* tsk == current, ensure it is safe to use ->signal/sighand */ | ||
1515 | if (unlikely(tsk->exit_state)) | ||
1516 | return 0; | ||
1517 | |||
1518 | if (!task_cputime_zero(&tsk->cputime_expires)) | ||
1519 | return 1; | ||
1520 | |||
1521 | if (!task_cputime_zero(&tsk->signal->cputime_expires)) | ||
1522 | return 1; | ||
1523 | |||
1524 | return 0; | ||
1525 | } | ||
1526 | |||
1527 | void run_posix_cpu_timers(struct task_struct *tsk) | ||
1528 | { | ||
1529 | unsigned long cpu = smp_processor_id(); | ||
1530 | struct task_struct *tasklist; | ||
1531 | |||
1532 | BUG_ON(!irqs_disabled()); | ||
1533 | if(!per_cpu(posix_timer_task, cpu)) | ||
1534 | return; | ||
1535 | /* get per-cpu references */ | ||
1536 | tasklist = per_cpu(posix_timer_tasklist, cpu); | ||
1537 | |||
1538 | /* check to see if we're already queued */ | ||
1539 | if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { | ||
1540 | get_task_struct(tsk); | ||
1541 | if (tasklist) { | ||
1542 | tsk->posix_timer_list = tasklist; | ||
1543 | } else { | ||
1544 | /* | ||
1545 | * The list is terminated by a self-pointing | ||
1546 | * task_struct | ||
1547 | */ | ||
1548 | tsk->posix_timer_list = tsk; | ||
1549 | } | ||
1550 | per_cpu(posix_timer_tasklist, cpu) = tsk; | ||
1551 | |||
1552 | wake_up_process(per_cpu(posix_timer_task, cpu)); | ||
1553 | } | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * posix_cpu_thread_call - callback that gets triggered when a CPU is added. | ||
1558 | * Here we can start up the necessary migration thread for the new CPU. | ||
1559 | */ | ||
1560 | static int posix_cpu_thread_call(struct notifier_block *nfb, | ||
1561 | unsigned long action, void *hcpu) | ||
1562 | { | ||
1563 | int cpu = (long)hcpu; | ||
1564 | struct task_struct *p; | ||
1565 | struct sched_param param; | ||
1566 | |||
1567 | switch (action) { | ||
1568 | case CPU_UP_PREPARE: | ||
1569 | p = kthread_create(posix_cpu_timers_thread, hcpu, | ||
1570 | "posixcputmr/%d",cpu); | ||
1571 | if (IS_ERR(p)) | ||
1572 | return NOTIFY_BAD; | ||
1573 | p->flags |= PF_NOFREEZE; | ||
1574 | kthread_bind(p, cpu); | ||
1575 | /* Must be high prio to avoid getting starved */ | ||
1576 | param.sched_priority = MAX_RT_PRIO-1; | ||
1577 | sched_setscheduler(p, SCHED_FIFO, ¶m); | ||
1578 | per_cpu(posix_timer_task,cpu) = p; | ||
1579 | break; | ||
1580 | case CPU_ONLINE: | ||
1581 | /* Strictly unneccessary, as first user will wake it. */ | ||
1582 | wake_up_process(per_cpu(posix_timer_task,cpu)); | ||
1583 | break; | ||
1584 | #ifdef CONFIG_HOTPLUG_CPU | ||
1585 | case CPU_UP_CANCELED: | ||
1586 | /* Unbind it from offline cpu so it can run. Fall thru. */ | ||
1587 | kthread_bind(per_cpu(posix_timer_task,cpu), | ||
1588 | any_online_cpu(cpu_online_map)); | ||
1589 | kthread_stop(per_cpu(posix_timer_task,cpu)); | ||
1590 | per_cpu(posix_timer_task,cpu) = NULL; | ||
1591 | break; | ||
1592 | case CPU_DEAD: | ||
1593 | kthread_stop(per_cpu(posix_timer_task,cpu)); | ||
1594 | per_cpu(posix_timer_task,cpu) = NULL; | ||
1595 | break; | ||
1596 | #endif | ||
1597 | } | ||
1598 | return NOTIFY_OK; | ||
1599 | } | ||
1600 | |||
1601 | /* Register at highest priority so that task migration (migrate_all_tasks) | ||
1602 | * happens before everything else. | ||
1603 | */ | ||
1604 | static struct notifier_block __devinitdata posix_cpu_thread_notifier = { | ||
1605 | .notifier_call = posix_cpu_thread_call, | ||
1606 | .priority = 10 | ||
1607 | }; | ||
1608 | |||
1609 | static int __init posix_cpu_thread_init(void) | ||
1610 | { | ||
1611 | void *cpu = (void *)(long)smp_processor_id(); | ||
1612 | /* Start one for boot CPU. */ | ||
1613 | posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, cpu); | ||
1614 | posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, cpu); | ||
1615 | register_cpu_notifier(&posix_cpu_thread_notifier); | ||
1616 | return 0; | ||
1617 | } | ||
1618 | early_initcall(posix_cpu_thread_init); | ||
1619 | |||
1450 | /* | 1620 | /* |
1451 | * Set one of the process-wide special case CPU timers. | 1621 | * Set one of the process-wide special case CPU timers. |
1452 | * The tsk->sighand->siglock must be held by the caller. | 1622 | * The tsk->sighand->siglock must be held by the caller. |
@@ -1713,6 +1883,12 @@ static __init int init_posix_cpu_timers(void) | |||
1713 | .nsleep_restart = thread_cpu_nsleep_restart, | 1883 | .nsleep_restart = thread_cpu_nsleep_restart, |
1714 | }; | 1884 | }; |
1715 | struct timespec ts; | 1885 | struct timespec ts; |
1886 | unsigned long cpu; | ||
1887 | |||
1888 | /* init the per-cpu posix_timer_tasklets */ | ||
1889 | for_each_cpu_mask(cpu, cpu_possible_map) { | ||
1890 | per_cpu(posix_timer_tasklist, cpu) = NULL; | ||
1891 | } | ||
1716 | 1892 | ||
1717 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | 1893 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1718 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | 1894 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |