diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 5 | ||||
-rw-r--r-- | kernel/dma-coherent.c | 2 | ||||
-rw-r--r-- | kernel/exit.c | 12 | ||||
-rw-r--r-- | kernel/hrtimer.c | 95 | ||||
-rw-r--r-- | kernel/kexec.c | 8 | ||||
-rw-r--r-- | kernel/kgdb.c | 13 | ||||
-rw-r--r-- | kernel/posix-timers.c | 2 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 14 | ||||
-rw-r--r-- | kernel/time/tick-common.c | 10 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 7 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 13 | ||||
-rw-r--r-- | kernel/trace/trace_sysprof.c | 2 |
13 files changed, 153 insertions, 36 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 13932abde159..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2738 | */ | 2738 | */ |
2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | 2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) |
2740 | { | 2740 | { |
2741 | struct cgroup *oldcgrp, *newcgrp; | 2741 | struct cgroup *oldcgrp, *newcgrp = NULL; |
2742 | 2742 | ||
2743 | if (need_mm_owner_callback) { | 2743 | if (need_mm_owner_callback) { |
2744 | int i; | 2744 | int i; |
2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2746 | struct cgroup_subsys *ss = subsys[i]; | 2746 | struct cgroup_subsys *ss = subsys[i]; |
2747 | oldcgrp = task_cgroup(old, ss->subsys_id); | 2747 | oldcgrp = task_cgroup(old, ss->subsys_id); |
2748 | newcgrp = task_cgroup(new, ss->subsys_id); | 2748 | if (new) |
2749 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2749 | if (oldcgrp == newcgrp) | 2750 | if (oldcgrp == newcgrp) |
2750 | continue; | 2751 | continue; |
2751 | if (ss->mm_owner_changed) | 2752 | if (ss->mm_owner_changed) |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index c1d4d5b4c61c..f013a0c2e111 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
124 | } | 124 | } |
125 | return (mem != NULL); | 125 | return (mem != NULL); |
126 | } | 126 | } |
127 | EXPORT_SYMBOL(dma_alloc_from_coherent); | ||
127 | 128 | ||
128 | /** | 129 | /** |
129 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | 130 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | |||
151 | } | 152 | } |
152 | return 0; | 153 | return 0; |
153 | } | 154 | } |
155 | EXPORT_SYMBOL(dma_release_from_coherent); | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98f..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
583 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
584 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
585 | */ | 585 | */ |
586 | if (!mm) | ||
587 | return 0; | ||
588 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
589 | return 0; | 587 | return 0; |
590 | if (mm->owner != p) | 588 | if (mm->owner != p) |
@@ -627,6 +625,16 @@ retry: | |||
627 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
628 | 626 | ||
629 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
628 | /* | ||
629 | * We found no owner yet mm_users > 1: this implies that we are | ||
630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
632 | * so that subsystems can understand the callback and take action. | ||
633 | */ | ||
634 | down_write(&mm->mmap_sem); | ||
635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
636 | mm->owner = NULL; | ||
637 | up_write(&mm->mmap_sem); | ||
630 | return; | 638 | return; |
631 | 639 | ||
632 | assign_new_owner: | 640 | assign_new_owner: |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce80a74..cdec83e722fa 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
672 | */ | 672 | */ |
673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | 673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); |
674 | return 1; | 674 | return 1; |
675 | case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: | 675 | case HRTIMER_CB_IRQSAFE_PERCPU: |
676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | ||
676 | /* | 677 | /* |
677 | * This is solely for the sched tick emulation with | 678 | * This is solely for the sched tick emulation with |
678 | * dynamic tick support to ensure that we do not | 679 | * dynamic tick support to ensure that we do not |
679 | * restart the tick right on the edge and end up with | 680 | * restart the tick right on the edge and end up with |
680 | * the tick timer in the softirq ! The calling site | 681 | * the tick timer in the softirq ! The calling site |
681 | * takes care of this. | 682 | * takes care of this. Also used for hrtimer sleeper ! |
682 | */ | 683 | */ |
683 | debug_hrtimer_deactivate(timer); | 684 | debug_hrtimer_deactivate(timer); |
684 | return 1; | 685 | return 1; |
@@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1245 | timer_stats_account_hrtimer(timer); | 1246 | timer_stats_account_hrtimer(timer); |
1246 | 1247 | ||
1247 | fn = timer->function; | 1248 | fn = timer->function; |
1248 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { | 1249 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || |
1250 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { | ||
1249 | /* | 1251 | /* |
1250 | * Used for scheduler timers, avoid lock inversion with | 1252 | * Used for scheduler timers, avoid lock inversion with |
1251 | * rq->lock and tasklist_lock. | 1253 | * rq->lock and tasklist_lock. |
@@ -1452,7 +1454,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |||
1452 | sl->timer.function = hrtimer_wakeup; | 1454 | sl->timer.function = hrtimer_wakeup; |
1453 | sl->task = task; | 1455 | sl->task = task; |
1454 | #ifdef CONFIG_HIGH_RES_TIMERS | 1456 | #ifdef CONFIG_HIGH_RES_TIMERS |
1455 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1457 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
1456 | #endif | 1458 | #endif |
1457 | } | 1459 | } |
1458 | 1460 | ||
@@ -1591,29 +1593,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1591 | 1593 | ||
1592 | #ifdef CONFIG_HOTPLUG_CPU | 1594 | #ifdef CONFIG_HOTPLUG_CPU |
1593 | 1595 | ||
1594 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1596 | static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1595 | struct hrtimer_clock_base *new_base) | 1597 | struct hrtimer_clock_base *new_base, int dcpu) |
1596 | { | 1598 | { |
1597 | struct hrtimer *timer; | 1599 | struct hrtimer *timer; |
1598 | struct rb_node *node; | 1600 | struct rb_node *node; |
1601 | int raise = 0; | ||
1599 | 1602 | ||
1600 | while ((node = rb_first(&old_base->active))) { | 1603 | while ((node = rb_first(&old_base->active))) { |
1601 | timer = rb_entry(node, struct hrtimer, node); | 1604 | timer = rb_entry(node, struct hrtimer, node); |
1602 | BUG_ON(hrtimer_callback_running(timer)); | 1605 | BUG_ON(hrtimer_callback_running(timer)); |
1603 | debug_hrtimer_deactivate(timer); | 1606 | debug_hrtimer_deactivate(timer); |
1604 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); | 1607 | |
1608 | /* | ||
1609 | * Should not happen. Per CPU timers should be | ||
1610 | * canceled _before_ the migration code is called | ||
1611 | */ | ||
1612 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { | ||
1613 | __remove_hrtimer(timer, old_base, | ||
1614 | HRTIMER_STATE_INACTIVE, 0); | ||
1615 | WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", | ||
1616 | timer, timer->function, dcpu); | ||
1617 | continue; | ||
1618 | } | ||
1619 | |||
1620 | /* | ||
1621 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | ||
1622 | * timer could be seen as !active and just vanish away | ||
1623 | * under us on another CPU | ||
1624 | */ | ||
1625 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | ||
1605 | timer->base = new_base; | 1626 | timer->base = new_base; |
1606 | /* | 1627 | /* |
1607 | * Enqueue the timer. Allow reprogramming of the event device | 1628 | * Enqueue the timer. Allow reprogramming of the event device |
1608 | */ | 1629 | */ |
1609 | enqueue_hrtimer(timer, new_base, 1); | 1630 | enqueue_hrtimer(timer, new_base, 1); |
1631 | |||
1632 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1633 | /* | ||
1634 | * Happens with high res enabled when the timer was | ||
1635 | * already expired and the callback mode is | ||
1636 | * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The | ||
1637 | * enqueue code does not move them to the soft irq | ||
1638 | * pending list for performance/latency reasons, but | ||
1639 | * in the migration state, we need to do that | ||
1640 | * otherwise we end up with a stale timer. | ||
1641 | */ | ||
1642 | if (timer->state == HRTIMER_STATE_MIGRATE) { | ||
1643 | timer->state = HRTIMER_STATE_PENDING; | ||
1644 | list_add_tail(&timer->cb_entry, | ||
1645 | &new_base->cpu_base->cb_pending); | ||
1646 | raise = 1; | ||
1647 | } | ||
1648 | #endif | ||
1649 | /* Clear the migration state bit */ | ||
1650 | timer->state &= ~HRTIMER_STATE_MIGRATE; | ||
1651 | } | ||
1652 | return raise; | ||
1653 | } | ||
1654 | |||
1655 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1656 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1657 | struct hrtimer_cpu_base *new_base) | ||
1658 | { | ||
1659 | struct hrtimer *timer; | ||
1660 | int raise = 0; | ||
1661 | |||
1662 | while (!list_empty(&old_base->cb_pending)) { | ||
1663 | timer = list_entry(old_base->cb_pending.next, | ||
1664 | struct hrtimer, cb_entry); | ||
1665 | |||
1666 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); | ||
1667 | timer->base = &new_base->clock_base[timer->base->index]; | ||
1668 | list_add_tail(&timer->cb_entry, &new_base->cb_pending); | ||
1669 | raise = 1; | ||
1610 | } | 1670 | } |
1671 | return raise; | ||
1672 | } | ||
1673 | #else | ||
1674 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1675 | struct hrtimer_cpu_base *new_base) | ||
1676 | { | ||
1677 | return 0; | ||
1611 | } | 1678 | } |
1679 | #endif | ||
1612 | 1680 | ||
1613 | static void migrate_hrtimers(int cpu) | 1681 | static void migrate_hrtimers(int cpu) |
1614 | { | 1682 | { |
1615 | struct hrtimer_cpu_base *old_base, *new_base; | 1683 | struct hrtimer_cpu_base *old_base, *new_base; |
1616 | int i; | 1684 | int i, raise = 0; |
1617 | 1685 | ||
1618 | BUG_ON(cpu_online(cpu)); | 1686 | BUG_ON(cpu_online(cpu)); |
1619 | old_base = &per_cpu(hrtimer_bases, cpu); | 1687 | old_base = &per_cpu(hrtimer_bases, cpu); |
@@ -1626,14 +1694,21 @@ static void migrate_hrtimers(int cpu) | |||
1626 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1694 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1627 | 1695 | ||
1628 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1696 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1629 | migrate_hrtimer_list(&old_base->clock_base[i], | 1697 | if (migrate_hrtimer_list(&old_base->clock_base[i], |
1630 | &new_base->clock_base[i]); | 1698 | &new_base->clock_base[i], cpu)) |
1699 | raise = 1; | ||
1631 | } | 1700 | } |
1632 | 1701 | ||
1702 | if (migrate_hrtimer_pending(old_base, new_base)) | ||
1703 | raise = 1; | ||
1704 | |||
1633 | spin_unlock(&old_base->lock); | 1705 | spin_unlock(&old_base->lock); |
1634 | spin_unlock(&new_base->lock); | 1706 | spin_unlock(&new_base->lock); |
1635 | local_irq_enable(); | 1707 | local_irq_enable(); |
1636 | put_cpu_var(hrtimer_bases); | 1708 | put_cpu_var(hrtimer_bases); |
1709 | |||
1710 | if (raise) | ||
1711 | hrtimer_raise_softirq(); | ||
1637 | } | 1712 | } |
1638 | #endif /* CONFIG_HOTPLUG_CPU */ | 1713 | #endif /* CONFIG_HOTPLUG_CPU */ |
1639 | 1714 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f3f0df35d4..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
753 | *old = addr | (*old & ~PAGE_MASK); | 753 | *old = addr | (*old & ~PAGE_MASK); |
754 | 754 | ||
755 | /* The old page I have found cannot be a | 755 | /* The old page I have found cannot be a |
756 | * destination page, so return it. | 756 | * destination page, so return it if it's |
757 | * gfp_flags honor the ones passed in. | ||
757 | */ | 758 | */ |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | ||
760 | PageHighMem(old_page)) { | ||
761 | kimage_free_pages(old_page); | ||
762 | continue; | ||
763 | } | ||
758 | addr = old_addr; | 764 | addr = old_addr; |
759 | page = old_page; | 765 | page = old_page; |
760 | break; | 766 | break; |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index eaa21fc9ad1d..e4dcfb2272a4 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -488,7 +488,7 @@ static int write_mem_msg(int binary) | |||
488 | if (err) | 488 | if (err) |
489 | return err; | 489 | return err; |
490 | if (CACHE_FLUSH_IS_SAFE) | 490 | if (CACHE_FLUSH_IS_SAFE) |
491 | flush_icache_range(addr, addr + length + 1); | 491 | flush_icache_range(addr, addr + length); |
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | 494 | ||
@@ -590,6 +590,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
590 | 590 | ||
591 | /* Signal the primary CPU that we are done: */ | 591 | /* Signal the primary CPU that we are done: */ |
592 | atomic_set(&cpu_in_kgdb[cpu], 0); | 592 | atomic_set(&cpu_in_kgdb[cpu], 0); |
593 | touch_softlockup_watchdog(); | ||
593 | clocksource_touch_watchdog(); | 594 | clocksource_touch_watchdog(); |
594 | local_irq_restore(flags); | 595 | local_irq_restore(flags); |
595 | } | 596 | } |
@@ -1432,6 +1433,7 @@ acquirelock: | |||
1432 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1433 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { |
1433 | 1434 | ||
1434 | atomic_set(&kgdb_active, -1); | 1435 | atomic_set(&kgdb_active, -1); |
1436 | touch_softlockup_watchdog(); | ||
1435 | clocksource_touch_watchdog(); | 1437 | clocksource_touch_watchdog(); |
1436 | local_irq_restore(flags); | 1438 | local_irq_restore(flags); |
1437 | 1439 | ||
@@ -1462,7 +1464,7 @@ acquirelock: | |||
1462 | * Get the passive CPU lock which will hold all the non-primary | 1464 | * Get the passive CPU lock which will hold all the non-primary |
1463 | * CPU in a spin state while the debugger is active | 1465 | * CPU in a spin state while the debugger is active |
1464 | */ | 1466 | */ |
1465 | if (!kgdb_single_step || !kgdb_contthread) { | 1467 | if (!kgdb_single_step) { |
1466 | for (i = 0; i < NR_CPUS; i++) | 1468 | for (i = 0; i < NR_CPUS; i++) |
1467 | atomic_set(&passive_cpu_wait[i], 1); | 1469 | atomic_set(&passive_cpu_wait[i], 1); |
1468 | } | 1470 | } |
@@ -1475,7 +1477,7 @@ acquirelock: | |||
1475 | 1477 | ||
1476 | #ifdef CONFIG_SMP | 1478 | #ifdef CONFIG_SMP |
1477 | /* Signal the other CPUs to enter kgdb_wait() */ | 1479 | /* Signal the other CPUs to enter kgdb_wait() */ |
1478 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1480 | if ((!kgdb_single_step) && kgdb_do_roundup) |
1479 | kgdb_roundup_cpus(flags); | 1481 | kgdb_roundup_cpus(flags); |
1480 | #endif | 1482 | #endif |
1481 | 1483 | ||
@@ -1494,7 +1496,7 @@ acquirelock: | |||
1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1496 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1495 | kgdb_deactivate_sw_breakpoints(); | 1497 | kgdb_deactivate_sw_breakpoints(); |
1496 | kgdb_single_step = 0; | 1498 | kgdb_single_step = 0; |
1497 | kgdb_contthread = NULL; | 1499 | kgdb_contthread = current; |
1498 | exception_level = 0; | 1500 | exception_level = 0; |
1499 | 1501 | ||
1500 | /* Talk to debugger with gdbserial protocol */ | 1502 | /* Talk to debugger with gdbserial protocol */ |
@@ -1508,7 +1510,7 @@ acquirelock: | |||
1508 | kgdb_info[ks->cpu].task = NULL; | 1510 | kgdb_info[ks->cpu].task = NULL; |
1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1511 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1510 | 1512 | ||
1511 | if (!kgdb_single_step || !kgdb_contthread) { | 1513 | if (!kgdb_single_step) { |
1512 | for (i = NR_CPUS-1; i >= 0; i--) | 1514 | for (i = NR_CPUS-1; i >= 0; i--) |
1513 | atomic_set(&passive_cpu_wait[i], 0); | 1515 | atomic_set(&passive_cpu_wait[i], 0); |
1514 | /* | 1516 | /* |
@@ -1524,6 +1526,7 @@ acquirelock: | |||
1524 | kgdb_restore: | 1526 | kgdb_restore: |
1525 | /* Free kgdb_active */ | 1527 | /* Free kgdb_active */ |
1526 | atomic_set(&kgdb_active, -1); | 1528 | atomic_set(&kgdb_active, -1); |
1529 | touch_softlockup_watchdog(); | ||
1527 | clocksource_touch_watchdog(); | 1530 | clocksource_touch_watchdog(); |
1528 | local_irq_restore(flags); | 1531 | local_irq_restore(flags); |
1529 | 1532 | ||
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d5798cbff..5131e5471169 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -441,7 +441,7 @@ static struct k_itimer * alloc_posix_timer(void) | |||
441 | return tmr; | 441 | return tmr; |
442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { | 442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { |
443 | kmem_cache_free(posix_timers_cache, tmr); | 443 | kmem_cache_free(posix_timers_cache, tmr); |
444 | tmr = NULL; | 444 | return NULL; |
445 | } | 445 | } |
446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); | 446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); |
447 | return tmr; | 447 | return tmr; |
diff --git a/kernel/sched.c b/kernel/sched.c index 98890807375b..ad1962dc0aa2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -201,7 +201,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
201 | hrtimer_init(&rt_b->rt_period_timer, | 201 | hrtimer_init(&rt_b->rt_period_timer, |
202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
203 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 203 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
205 | } | 205 | } |
206 | 206 | ||
207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | 207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
@@ -1087,7 +1087,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1087 | return NOTIFY_DONE; | 1087 | return NOTIFY_DONE; |
1088 | } | 1088 | } |
1089 | 1089 | ||
1090 | static void init_hrtick(void) | 1090 | static __init void init_hrtick(void) |
1091 | { | 1091 | { |
1092 | hotcpu_notifier(hotplug_hrtick, 0); | 1092 | hotcpu_notifier(hotplug_hrtick, 0); |
1093 | } | 1093 | } |
@@ -1119,7 +1119,7 @@ static void init_rq_hrtick(struct rq *rq) | |||
1119 | 1119 | ||
1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1121 | rq->hrtick_timer.function = hrtick; | 1121 | rq->hrtick_timer.function = hrtick; |
1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
1123 | } | 1123 | } |
1124 | #else | 1124 | #else |
1125 | static inline void hrtick_clear(struct rq *rq) | 1125 | static inline void hrtick_clear(struct rq *rq) |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index f1f3eee28113..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -235,7 +235,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
239 | TICKDEV_MODE_PERIODIC) | ||
239 | clockevents_shutdown(dev); | 240 | clockevents_shutdown(dev); |
240 | } | 241 | } |
241 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
@@ -245,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
245 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
246 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
247 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
248 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
250 | TICKDEV_MODE_PERIODIC) | ||
249 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
250 | } | 252 | } |
251 | break; | 253 | break; |
@@ -575,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
575 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
576 | } | 578 | } |
577 | 579 | ||
580 | /* | ||
581 | * Check, whether the broadcast device is in one shot mode | ||
582 | */ | ||
583 | int tick_broadcast_oneshot_active(void) | ||
584 | { | ||
585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
586 | } | ||
587 | |||
578 | #endif | 588 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 019315ebf9de..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
113 | !tick_broadcast_oneshot_active()) { | ||
113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
114 | } else { | 115 | } else { |
115 | unsigned long seq; | 116 | unsigned long seq; |
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
149 | * this cpu: | 150 | * this cpu: |
150 | */ | 151 | */ |
151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
302 | 303 | ||
303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
305 | TICK_DO_TIMER_NONE; | ||
304 | } | 306 | } |
305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
306 | } | 308 | } |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 6e9db9734aa6..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -1,6 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
3 | */ | 3 | */ |
4 | |||
5 | #define TICK_DO_TIMER_NONE -1 | ||
6 | #define TICK_DO_TIMER_BOOT -2 | ||
7 | |||
4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
@@ -31,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
31 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
32 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
33 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
38 | extern int tick_broadcast_oneshot_active(void); | ||
34 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
35 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
36 | { | 41 | { |
@@ -39,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
39 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
40 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
41 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
42 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
43 | 49 | ||
44 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
@@ -68,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
68 | { | 74 | { |
69 | return 0; | 75 | return 0; |
70 | } | 76 | } |
77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
71 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
72 | 79 | ||
73 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..cb02324bdb88 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -75,6 +75,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 75 | incr * ticks); |
76 | } | 76 | } |
77 | do_timer(++ticks); | 77 | do_timer(++ticks); |
78 | |||
79 | /* Keep the tick_next_period variable up to date */ | ||
80 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 81 | } |
79 | write_sequnlock(&xtime_lock); | 82 | write_sequnlock(&xtime_lock); |
80 | } | 83 | } |
@@ -221,7 +224,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
221 | */ | 224 | */ |
222 | if (unlikely(!cpu_online(cpu))) { | 225 | if (unlikely(!cpu_online(cpu))) { |
223 | if (cpu == tick_do_timer_cpu) | 226 | if (cpu == tick_do_timer_cpu) |
224 | tick_do_timer_cpu = -1; | 227 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
225 | } | 228 | } |
226 | 229 | ||
227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 230 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
@@ -303,7 +306,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
303 | * invoked. | 306 | * invoked. |
304 | */ | 307 | */ |
305 | if (cpu == tick_do_timer_cpu) | 308 | if (cpu == tick_do_timer_cpu) |
306 | tick_do_timer_cpu = -1; | 309 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
307 | 310 | ||
308 | ts->idle_sleeps++; | 311 | ts->idle_sleeps++; |
309 | 312 | ||
@@ -468,7 +471,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
468 | * this duty, then the jiffies update is still serialized by | 471 | * this duty, then the jiffies update is still serialized by |
469 | * xtime_lock. | 472 | * xtime_lock. |
470 | */ | 473 | */ |
471 | if (unlikely(tick_do_timer_cpu == -1)) | 474 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
472 | tick_do_timer_cpu = cpu; | 475 | tick_do_timer_cpu = cpu; |
473 | 476 | ||
474 | /* Check, if the jiffies need an update */ | 477 | /* Check, if the jiffies need an update */ |
@@ -570,7 +573,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
570 | * this duty, then the jiffies update is still serialized by | 573 | * this duty, then the jiffies update is still serialized by |
571 | * xtime_lock. | 574 | * xtime_lock. |
572 | */ | 575 | */ |
573 | if (unlikely(tick_do_timer_cpu == -1)) | 576 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
574 | tick_do_timer_cpu = cpu; | 577 | tick_do_timer_cpu = cpu; |
575 | #endif | 578 | #endif |
576 | 579 | ||
@@ -622,7 +625,7 @@ void tick_setup_sched_timer(void) | |||
622 | */ | 625 | */ |
623 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 626 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
624 | ts->sched_timer.function = tick_sched_timer; | 627 | ts->sched_timer.function = tick_sched_timer; |
625 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 628 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
626 | 629 | ||
627 | /* Get the next period (per cpu) */ | 630 | /* Get the next period (per cpu) */ |
628 | ts->sched_timer.expires = tick_init_jiffy_update(); | 631 | ts->sched_timer.expires = tick_init_jiffy_update(); |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index bb948e52ce20..db58fb66a135 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -202,7 +202,7 @@ static void start_stack_timer(int cpu) | |||
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
206 | 206 | ||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | 207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); |
208 | } | 208 | } |