aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/timer.c')
-rw-r--r--kernel/time/timer.c149
1 files changed, 67 insertions, 82 deletions
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 2d3f5c504939..2ece3aa5069c 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -90,8 +90,18 @@ struct tvec_base {
90 struct tvec tv5; 90 struct tvec tv5;
91} ____cacheline_aligned; 91} ____cacheline_aligned;
92 92
93/*
94 * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
95 * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
96 * pointer to per-cpu entries because we don't know where we'll map the section,
97 * even for the boot cpu.
98 *
99 * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
100 * rest of them.
101 */
93struct tvec_base boot_tvec_bases; 102struct tvec_base boot_tvec_bases;
94EXPORT_SYMBOL(boot_tvec_bases); 103EXPORT_SYMBOL(boot_tvec_bases);
104
95static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases; 105static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
96 106
97/* Functions below help us manage 'deferrable' flag */ 107/* Functions below help us manage 'deferrable' flag */
@@ -1027,6 +1037,8 @@ int try_to_del_timer_sync(struct timer_list *timer)
1027EXPORT_SYMBOL(try_to_del_timer_sync); 1037EXPORT_SYMBOL(try_to_del_timer_sync);
1028 1038
1029#ifdef CONFIG_SMP 1039#ifdef CONFIG_SMP
1040static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
1041
1030/** 1042/**
1031 * del_timer_sync - deactivate a timer and wait for the handler to finish. 1043 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1032 * @timer: the timer to be deactivated 1044 * @timer: the timer to be deactivated
@@ -1532,64 +1544,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1532} 1544}
1533EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1545EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1534 1546
1535static int init_timers_cpu(int cpu)
1536{
1537 int j;
1538 struct tvec_base *base;
1539 static char tvec_base_done[NR_CPUS];
1540
1541 if (!tvec_base_done[cpu]) {
1542 static char boot_done;
1543
1544 if (boot_done) {
1545 /*
1546 * The APs use this path later in boot
1547 */
1548 base = kzalloc_node(sizeof(*base), GFP_KERNEL,
1549 cpu_to_node(cpu));
1550 if (!base)
1551 return -ENOMEM;
1552
1553 /* Make sure tvec_base has TIMER_FLAG_MASK bits free */
1554 if (WARN_ON(base != tbase_get_base(base))) {
1555 kfree(base);
1556 return -ENOMEM;
1557 }
1558 per_cpu(tvec_bases, cpu) = base;
1559 } else {
1560 /*
1561 * This is for the boot CPU - we use compile-time
1562 * static initialisation because per-cpu memory isn't
1563 * ready yet and because the memory allocators are not
1564 * initialised either.
1565 */
1566 boot_done = 1;
1567 base = &boot_tvec_bases;
1568 }
1569 spin_lock_init(&base->lock);
1570 tvec_base_done[cpu] = 1;
1571 base->cpu = cpu;
1572 } else {
1573 base = per_cpu(tvec_bases, cpu);
1574 }
1575
1576
1577 for (j = 0; j < TVN_SIZE; j++) {
1578 INIT_LIST_HEAD(base->tv5.vec + j);
1579 INIT_LIST_HEAD(base->tv4.vec + j);
1580 INIT_LIST_HEAD(base->tv3.vec + j);
1581 INIT_LIST_HEAD(base->tv2.vec + j);
1582 }
1583 for (j = 0; j < TVR_SIZE; j++)
1584 INIT_LIST_HEAD(base->tv1.vec + j);
1585
1586 base->timer_jiffies = jiffies;
1587 base->next_timer = base->timer_jiffies;
1588 base->active_timers = 0;
1589 base->all_timers = 0;
1590 return 0;
1591}
1592
1593#ifdef CONFIG_HOTPLUG_CPU 1547#ifdef CONFIG_HOTPLUG_CPU
1594static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head) 1548static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1595{ 1549{
@@ -1631,55 +1585,86 @@ static void migrate_timers(int cpu)
1631 migrate_timer_list(new_base, old_base->tv5.vec + i); 1585 migrate_timer_list(new_base, old_base->tv5.vec + i);
1632 } 1586 }
1633 1587
1588 old_base->active_timers = 0;
1589 old_base->all_timers = 0;
1590
1634 spin_unlock(&old_base->lock); 1591 spin_unlock(&old_base->lock);
1635 spin_unlock_irq(&new_base->lock); 1592 spin_unlock_irq(&new_base->lock);
1636 put_cpu_var(tvec_bases); 1593 put_cpu_var(tvec_bases);
1637} 1594}
1638#endif /* CONFIG_HOTPLUG_CPU */
1639 1595
1640static int timer_cpu_notify(struct notifier_block *self, 1596static int timer_cpu_notify(struct notifier_block *self,
1641 unsigned long action, void *hcpu) 1597 unsigned long action, void *hcpu)
1642{ 1598{
1643 long cpu = (long)hcpu; 1599 switch (action) {
1644 int err;
1645
1646 switch(action) {
1647 case CPU_UP_PREPARE:
1648 case CPU_UP_PREPARE_FROZEN:
1649 err = init_timers_cpu(cpu);
1650 if (err < 0)
1651 return notifier_from_errno(err);
1652 break;
1653#ifdef CONFIG_HOTPLUG_CPU
1654 case CPU_DEAD: 1600 case CPU_DEAD:
1655 case CPU_DEAD_FROZEN: 1601 case CPU_DEAD_FROZEN:
1656 migrate_timers(cpu); 1602 migrate_timers((long)hcpu);
1657 break; 1603 break;
1658#endif
1659 default: 1604 default:
1660 break; 1605 break;
1661 } 1606 }
1607
1662 return NOTIFY_OK; 1608 return NOTIFY_OK;
1663} 1609}
1664 1610
1665static struct notifier_block timers_nb = { 1611static inline void timer_register_cpu_notifier(void)
1666 .notifier_call = timer_cpu_notify, 1612{
1667}; 1613 cpu_notifier(timer_cpu_notify, 0);
1614}
1615#else
1616static inline void timer_register_cpu_notifier(void) { }
1617#endif /* CONFIG_HOTPLUG_CPU */
1668 1618
1619static void __init init_timer_cpu(struct tvec_base *base, int cpu)
1620{
1621 int j;
1669 1622
1670void __init init_timers(void) 1623 BUG_ON(base != tbase_get_base(base));
1624
1625 base->cpu = cpu;
1626 per_cpu(tvec_bases, cpu) = base;
1627 spin_lock_init(&base->lock);
1628
1629 for (j = 0; j < TVN_SIZE; j++) {
1630 INIT_LIST_HEAD(base->tv5.vec + j);
1631 INIT_LIST_HEAD(base->tv4.vec + j);
1632 INIT_LIST_HEAD(base->tv3.vec + j);
1633 INIT_LIST_HEAD(base->tv2.vec + j);
1634 }
1635 for (j = 0; j < TVR_SIZE; j++)
1636 INIT_LIST_HEAD(base->tv1.vec + j);
1637
1638 base->timer_jiffies = jiffies;
1639 base->next_timer = base->timer_jiffies;
1640}
1641
1642static void __init init_timer_cpus(void)
1671{ 1643{
1672 int err; 1644 struct tvec_base *base;
1645 int local_cpu = smp_processor_id();
1646 int cpu;
1673 1647
1648 for_each_possible_cpu(cpu) {
1649 if (cpu == local_cpu)
1650 base = &boot_tvec_bases;
1651#ifdef CONFIG_SMP
1652 else
1653 base = per_cpu_ptr(&__tvec_bases, cpu);
1654#endif
1655
1656 init_timer_cpu(base, cpu);
1657 }
1658}
1659
1660void __init init_timers(void)
1661{
1674 /* ensure there are enough low bits for flags in timer->base pointer */ 1662 /* ensure there are enough low bits for flags in timer->base pointer */
1675 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK); 1663 BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
1676 1664
1677 err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE, 1665 init_timer_cpus();
1678 (void *)(long)smp_processor_id());
1679 BUG_ON(err != NOTIFY_OK);
1680
1681 init_timer_stats(); 1666 init_timer_stats();
1682 register_cpu_notifier(&timers_nb); 1667 timer_register_cpu_notifier();
1683 open_softirq(TIMER_SOFTIRQ, run_timer_softirq); 1668 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1684} 1669}
1685 1670