diff options
Diffstat (limited to 'kernel')
36 files changed, 987 insertions, 524 deletions
diff --git a/kernel/acct.c b/kernel/acct.c index dd68b9059418..f6006a60df5d 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -548,7 +548,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
548 | #endif | 548 | #endif |
549 | 549 | ||
550 | spin_lock_irq(¤t->sighand->siglock); | 550 | spin_lock_irq(¤t->sighand->siglock); |
551 | tty = current->signal->tty; | 551 | tty = current->signal->tty; /* Safe as we hold the siglock */ |
552 | ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0; | 552 | ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0; |
553 | ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); | 553 | ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); |
554 | ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); | 554 | ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); |
diff --git a/kernel/auditsc.c b/kernel/auditsc.c index 59cedfb040e7..cf5bc2f5f9c3 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c | |||
@@ -246,8 +246,8 @@ static int audit_match_perm(struct audit_context *ctx, int mask) | |||
246 | unsigned n; | 246 | unsigned n; |
247 | if (unlikely(!ctx)) | 247 | if (unlikely(!ctx)) |
248 | return 0; | 248 | return 0; |
249 | |||
250 | n = ctx->major; | 249 | n = ctx->major; |
250 | |||
251 | switch (audit_classify_syscall(ctx->arch, n)) { | 251 | switch (audit_classify_syscall(ctx->arch, n)) { |
252 | case 0: /* native */ | 252 | case 0: /* native */ |
253 | if ((mask & AUDIT_PERM_WRITE) && | 253 | if ((mask & AUDIT_PERM_WRITE) && |
@@ -1204,13 +1204,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1204 | (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", | 1204 | (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", |
1205 | context->return_code); | 1205 | context->return_code); |
1206 | 1206 | ||
1207 | mutex_lock(&tty_mutex); | 1207 | spin_lock_irq(&tsk->sighand->siglock); |
1208 | read_lock(&tasklist_lock); | ||
1209 | if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) | 1208 | if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) |
1210 | tty = tsk->signal->tty->name; | 1209 | tty = tsk->signal->tty->name; |
1211 | else | 1210 | else |
1212 | tty = "(none)"; | 1211 | tty = "(none)"; |
1213 | read_unlock(&tasklist_lock); | 1212 | spin_unlock_irq(&tsk->sighand->siglock); |
1213 | |||
1214 | audit_log_format(ab, | 1214 | audit_log_format(ab, |
1215 | " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" | 1215 | " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" |
1216 | " ppid=%d pid=%d auid=%u uid=%u gid=%u" | 1216 | " ppid=%d pid=%d auid=%u uid=%u gid=%u" |
@@ -1230,7 +1230,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts | |||
1230 | context->egid, context->sgid, context->fsgid, tty, | 1230 | context->egid, context->sgid, context->fsgid, tty, |
1231 | tsk->sessionid); | 1231 | tsk->sessionid); |
1232 | 1232 | ||
1233 | mutex_unlock(&tty_mutex); | ||
1234 | 1233 | ||
1235 | audit_log_task_info(ab, tsk); | 1234 | audit_log_task_info(ab, tsk); |
1236 | if (context->filterkey) { | 1235 | if (context->filterkey) { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 13932abde159..a0123d75ec9a 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -2738,14 +2738,15 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2738 | */ | 2738 | */ |
2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | 2739 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) |
2740 | { | 2740 | { |
2741 | struct cgroup *oldcgrp, *newcgrp; | 2741 | struct cgroup *oldcgrp, *newcgrp = NULL; |
2742 | 2742 | ||
2743 | if (need_mm_owner_callback) { | 2743 | if (need_mm_owner_callback) { |
2744 | int i; | 2744 | int i; |
2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2745 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2746 | struct cgroup_subsys *ss = subsys[i]; | 2746 | struct cgroup_subsys *ss = subsys[i]; |
2747 | oldcgrp = task_cgroup(old, ss->subsys_id); | 2747 | oldcgrp = task_cgroup(old, ss->subsys_id); |
2748 | newcgrp = task_cgroup(new, ss->subsys_id); | 2748 | if (new) |
2749 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2749 | if (oldcgrp == newcgrp) | 2750 | if (oldcgrp == newcgrp) |
2750 | continue; | 2751 | continue; |
2751 | if (ss->mm_owner_changed) | 2752 | if (ss->mm_owner_changed) |
diff --git a/kernel/cpu.c b/kernel/cpu.c index f17e9854c246..86d49045daed 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -199,13 +199,14 @@ static int __ref take_cpu_down(void *_param) | |||
199 | struct take_cpu_down_param *param = _param; | 199 | struct take_cpu_down_param *param = _param; |
200 | int err; | 200 | int err; |
201 | 201 | ||
202 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
203 | param->hcpu); | ||
204 | /* Ensure this CPU doesn't handle any more interrupts. */ | 202 | /* Ensure this CPU doesn't handle any more interrupts. */ |
205 | err = __cpu_disable(); | 203 | err = __cpu_disable(); |
206 | if (err < 0) | 204 | if (err < 0) |
207 | return err; | 205 | return err; |
208 | 206 | ||
207 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
208 | param->hcpu); | ||
209 | |||
209 | /* Force idle task to run as soon as we yield: it should | 210 | /* Force idle task to run as soon as we yield: it should |
210 | immediately notice cpu is offline and die quickly. */ | 211 | immediately notice cpu is offline and die quickly. */ |
211 | sched_idle_next(); | 212 | sched_idle_next(); |
@@ -453,6 +454,25 @@ out: | |||
453 | } | 454 | } |
454 | #endif /* CONFIG_PM_SLEEP_SMP */ | 455 | #endif /* CONFIG_PM_SLEEP_SMP */ |
455 | 456 | ||
457 | /** | ||
458 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | ||
459 | * @cpu: cpu that just started | ||
460 | * | ||
461 | * This function calls the cpu_chain notifiers with CPU_STARTING. | ||
462 | * It must be called by the arch code on the new cpu, before the new cpu | ||
463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | ||
464 | */ | ||
465 | void notify_cpu_starting(unsigned int cpu) | ||
466 | { | ||
467 | unsigned long val = CPU_STARTING; | ||
468 | |||
469 | #ifdef CONFIG_PM_SLEEP_SMP | ||
470 | if (cpu_isset(cpu, frozen_cpus)) | ||
471 | val = CPU_STARTING_FROZEN; | ||
472 | #endif /* CONFIG_PM_SLEEP_SMP */ | ||
473 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | ||
474 | } | ||
475 | |||
456 | #endif /* CONFIG_SMP */ | 476 | #endif /* CONFIG_SMP */ |
457 | 477 | ||
458 | /* | 478 | /* |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 827cd9adccb2..eab7bd6628e0 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1921,7 +1921,7 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs) | |||
1921 | * that has tasks along with an empty 'mems'. But if we did see such | 1921 | * that has tasks along with an empty 'mems'. But if we did see such |
1922 | * a cpuset, we'd handle it just like we do if its 'cpus' was empty. | 1922 | * a cpuset, we'd handle it just like we do if its 'cpus' was empty. |
1923 | */ | 1923 | */ |
1924 | static void scan_for_empty_cpusets(const struct cpuset *root) | 1924 | static void scan_for_empty_cpusets(struct cpuset *root) |
1925 | { | 1925 | { |
1926 | LIST_HEAD(queue); | 1926 | LIST_HEAD(queue); |
1927 | struct cpuset *cp; /* scans cpusets being updated */ | 1927 | struct cpuset *cp; /* scans cpusets being updated */ |
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c index c1d4d5b4c61c..f013a0c2e111 100644 --- a/kernel/dma-coherent.c +++ b/kernel/dma-coherent.c | |||
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, | |||
124 | } | 124 | } |
125 | return (mem != NULL); | 125 | return (mem != NULL); |
126 | } | 126 | } |
127 | EXPORT_SYMBOL(dma_alloc_from_coherent); | ||
127 | 128 | ||
128 | /** | 129 | /** |
129 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool | 130 | * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool |
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) | |||
151 | } | 152 | } |
152 | return 0; | 153 | return 0; |
153 | } | 154 | } |
155 | EXPORT_SYMBOL(dma_release_from_coherent); | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 16395644a98f..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
583 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
584 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
585 | */ | 585 | */ |
586 | if (!mm) | ||
587 | return 0; | ||
588 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
589 | return 0; | 587 | return 0; |
590 | if (mm->owner != p) | 588 | if (mm->owner != p) |
@@ -627,6 +625,16 @@ retry: | |||
627 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
628 | 626 | ||
629 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
628 | /* | ||
629 | * We found no owner yet mm_users > 1: this implies that we are | ||
630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
632 | * so that subsystems can understand the callback and take action. | ||
633 | */ | ||
634 | down_write(&mm->mmap_sem); | ||
635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
636 | mm->owner = NULL; | ||
637 | up_write(&mm->mmap_sem); | ||
630 | return; | 638 | return; |
631 | 639 | ||
632 | assign_new_owner: | 640 | assign_new_owner: |
diff --git a/kernel/fork.c b/kernel/fork.c index 7ce2ebe84796..30de644a40c4 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -802,6 +802,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
802 | 802 | ||
803 | sig->leader = 0; /* session leadership doesn't inherit */ | 803 | sig->leader = 0; /* session leadership doesn't inherit */ |
804 | sig->tty_old_pgrp = NULL; | 804 | sig->tty_old_pgrp = NULL; |
805 | sig->tty = NULL; | ||
805 | 806 | ||
806 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; | 807 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; |
807 | sig->gtime = cputime_zero; | 808 | sig->gtime = cputime_zero; |
@@ -838,6 +839,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
838 | void __cleanup_signal(struct signal_struct *sig) | 839 | void __cleanup_signal(struct signal_struct *sig) |
839 | { | 840 | { |
840 | exit_thread_group_keys(sig); | 841 | exit_thread_group_keys(sig); |
842 | tty_kref_put(sig->tty); | ||
841 | kmem_cache_free(signal_cachep, sig); | 843 | kmem_cache_free(signal_cachep, sig); |
842 | } | 844 | } |
843 | 845 | ||
@@ -1227,7 +1229,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1227 | p->nsproxy->pid_ns->child_reaper = p; | 1229 | p->nsproxy->pid_ns->child_reaper = p; |
1228 | 1230 | ||
1229 | p->signal->leader_pid = pid; | 1231 | p->signal->leader_pid = pid; |
1230 | p->signal->tty = current->signal->tty; | 1232 | tty_kref_put(p->signal->tty); |
1233 | p->signal->tty = tty_kref_get(current->signal->tty); | ||
1231 | set_task_pgrp(p, task_pgrp_nr(current)); | 1234 | set_task_pgrp(p, task_pgrp_nr(current)); |
1232 | set_task_session(p, task_session_nr(current)); | 1235 | set_task_session(p, task_session_nr(current)); |
1233 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1236 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index b8e4dce80a74..cdec83e722fa 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -672,13 +672,14 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
672 | */ | 672 | */ |
673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); | 673 | BUG_ON(timer->function(timer) != HRTIMER_NORESTART); |
674 | return 1; | 674 | return 1; |
675 | case HRTIMER_CB_IRQSAFE_NO_SOFTIRQ: | 675 | case HRTIMER_CB_IRQSAFE_PERCPU: |
676 | case HRTIMER_CB_IRQSAFE_UNLOCKED: | ||
676 | /* | 677 | /* |
677 | * This is solely for the sched tick emulation with | 678 | * This is solely for the sched tick emulation with |
678 | * dynamic tick support to ensure that we do not | 679 | * dynamic tick support to ensure that we do not |
679 | * restart the tick right on the edge and end up with | 680 | * restart the tick right on the edge and end up with |
680 | * the tick timer in the softirq ! The calling site | 681 | * the tick timer in the softirq ! The calling site |
681 | * takes care of this. | 682 | * takes care of this. Also used for hrtimer sleeper ! |
682 | */ | 683 | */ |
683 | debug_hrtimer_deactivate(timer); | 684 | debug_hrtimer_deactivate(timer); |
684 | return 1; | 685 | return 1; |
@@ -1245,7 +1246,8 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1245 | timer_stats_account_hrtimer(timer); | 1246 | timer_stats_account_hrtimer(timer); |
1246 | 1247 | ||
1247 | fn = timer->function; | 1248 | fn = timer->function; |
1248 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ) { | 1249 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU || |
1250 | timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) { | ||
1249 | /* | 1251 | /* |
1250 | * Used for scheduler timers, avoid lock inversion with | 1252 | * Used for scheduler timers, avoid lock inversion with |
1251 | * rq->lock and tasklist_lock. | 1253 | * rq->lock and tasklist_lock. |
@@ -1452,7 +1454,7 @@ void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) | |||
1452 | sl->timer.function = hrtimer_wakeup; | 1454 | sl->timer.function = hrtimer_wakeup; |
1453 | sl->task = task; | 1455 | sl->task = task; |
1454 | #ifdef CONFIG_HIGH_RES_TIMERS | 1456 | #ifdef CONFIG_HIGH_RES_TIMERS |
1455 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1457 | sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
1456 | #endif | 1458 | #endif |
1457 | } | 1459 | } |
1458 | 1460 | ||
@@ -1591,29 +1593,95 @@ static void __cpuinit init_hrtimers_cpu(int cpu) | |||
1591 | 1593 | ||
1592 | #ifdef CONFIG_HOTPLUG_CPU | 1594 | #ifdef CONFIG_HOTPLUG_CPU |
1593 | 1595 | ||
1594 | static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | 1596 | static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base, |
1595 | struct hrtimer_clock_base *new_base) | 1597 | struct hrtimer_clock_base *new_base, int dcpu) |
1596 | { | 1598 | { |
1597 | struct hrtimer *timer; | 1599 | struct hrtimer *timer; |
1598 | struct rb_node *node; | 1600 | struct rb_node *node; |
1601 | int raise = 0; | ||
1599 | 1602 | ||
1600 | while ((node = rb_first(&old_base->active))) { | 1603 | while ((node = rb_first(&old_base->active))) { |
1601 | timer = rb_entry(node, struct hrtimer, node); | 1604 | timer = rb_entry(node, struct hrtimer, node); |
1602 | BUG_ON(hrtimer_callback_running(timer)); | 1605 | BUG_ON(hrtimer_callback_running(timer)); |
1603 | debug_hrtimer_deactivate(timer); | 1606 | debug_hrtimer_deactivate(timer); |
1604 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_INACTIVE, 0); | 1607 | |
1608 | /* | ||
1609 | * Should not happen. Per CPU timers should be | ||
1610 | * canceled _before_ the migration code is called | ||
1611 | */ | ||
1612 | if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) { | ||
1613 | __remove_hrtimer(timer, old_base, | ||
1614 | HRTIMER_STATE_INACTIVE, 0); | ||
1615 | WARN(1, "hrtimer (%p %p)active but cpu %d dead\n", | ||
1616 | timer, timer->function, dcpu); | ||
1617 | continue; | ||
1618 | } | ||
1619 | |||
1620 | /* | ||
1621 | * Mark it as STATE_MIGRATE not INACTIVE otherwise the | ||
1622 | * timer could be seen as !active and just vanish away | ||
1623 | * under us on another CPU | ||
1624 | */ | ||
1625 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | ||
1605 | timer->base = new_base; | 1626 | timer->base = new_base; |
1606 | /* | 1627 | /* |
1607 | * Enqueue the timer. Allow reprogramming of the event device | 1628 | * Enqueue the timer. Allow reprogramming of the event device |
1608 | */ | 1629 | */ |
1609 | enqueue_hrtimer(timer, new_base, 1); | 1630 | enqueue_hrtimer(timer, new_base, 1); |
1631 | |||
1632 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1633 | /* | ||
1634 | * Happens with high res enabled when the timer was | ||
1635 | * already expired and the callback mode is | ||
1636 | * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The | ||
1637 | * enqueue code does not move them to the soft irq | ||
1638 | * pending list for performance/latency reasons, but | ||
1639 | * in the migration state, we need to do that | ||
1640 | * otherwise we end up with a stale timer. | ||
1641 | */ | ||
1642 | if (timer->state == HRTIMER_STATE_MIGRATE) { | ||
1643 | timer->state = HRTIMER_STATE_PENDING; | ||
1644 | list_add_tail(&timer->cb_entry, | ||
1645 | &new_base->cpu_base->cb_pending); | ||
1646 | raise = 1; | ||
1647 | } | ||
1648 | #endif | ||
1649 | /* Clear the migration state bit */ | ||
1650 | timer->state &= ~HRTIMER_STATE_MIGRATE; | ||
1651 | } | ||
1652 | return raise; | ||
1653 | } | ||
1654 | |||
1655 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1656 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1657 | struct hrtimer_cpu_base *new_base) | ||
1658 | { | ||
1659 | struct hrtimer *timer; | ||
1660 | int raise = 0; | ||
1661 | |||
1662 | while (!list_empty(&old_base->cb_pending)) { | ||
1663 | timer = list_entry(old_base->cb_pending.next, | ||
1664 | struct hrtimer, cb_entry); | ||
1665 | |||
1666 | __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0); | ||
1667 | timer->base = &new_base->clock_base[timer->base->index]; | ||
1668 | list_add_tail(&timer->cb_entry, &new_base->cb_pending); | ||
1669 | raise = 1; | ||
1610 | } | 1670 | } |
1671 | return raise; | ||
1672 | } | ||
1673 | #else | ||
1674 | static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base, | ||
1675 | struct hrtimer_cpu_base *new_base) | ||
1676 | { | ||
1677 | return 0; | ||
1611 | } | 1678 | } |
1679 | #endif | ||
1612 | 1680 | ||
1613 | static void migrate_hrtimers(int cpu) | 1681 | static void migrate_hrtimers(int cpu) |
1614 | { | 1682 | { |
1615 | struct hrtimer_cpu_base *old_base, *new_base; | 1683 | struct hrtimer_cpu_base *old_base, *new_base; |
1616 | int i; | 1684 | int i, raise = 0; |
1617 | 1685 | ||
1618 | BUG_ON(cpu_online(cpu)); | 1686 | BUG_ON(cpu_online(cpu)); |
1619 | old_base = &per_cpu(hrtimer_bases, cpu); | 1687 | old_base = &per_cpu(hrtimer_bases, cpu); |
@@ -1626,14 +1694,21 @@ static void migrate_hrtimers(int cpu) | |||
1626 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1694 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1627 | 1695 | ||
1628 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1696 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1629 | migrate_hrtimer_list(&old_base->clock_base[i], | 1697 | if (migrate_hrtimer_list(&old_base->clock_base[i], |
1630 | &new_base->clock_base[i]); | 1698 | &new_base->clock_base[i], cpu)) |
1699 | raise = 1; | ||
1631 | } | 1700 | } |
1632 | 1701 | ||
1702 | if (migrate_hrtimer_pending(old_base, new_base)) | ||
1703 | raise = 1; | ||
1704 | |||
1633 | spin_unlock(&old_base->lock); | 1705 | spin_unlock(&old_base->lock); |
1634 | spin_unlock(&new_base->lock); | 1706 | spin_unlock(&new_base->lock); |
1635 | local_irq_enable(); | 1707 | local_irq_enable(); |
1636 | put_cpu_var(hrtimer_bases); | 1708 | put_cpu_var(hrtimer_bases); |
1709 | |||
1710 | if (raise) | ||
1711 | hrtimer_raise_softirq(); | ||
1637 | } | 1712 | } |
1638 | #endif /* CONFIG_HOTPLUG_CPU */ | 1713 | #endif /* CONFIG_HOTPLUG_CPU */ |
1639 | 1714 | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0314074fa232..60c49e324390 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -89,7 +89,14 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
89 | set_balance_irq_affinity(irq, cpumask); | 89 | set_balance_irq_affinity(irq, cpumask); |
90 | 90 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | set_pending_irq(irq, cpumask); | 92 | if (desc->status & IRQ_MOVE_PCNTXT) { |
93 | unsigned long flags; | ||
94 | |||
95 | spin_lock_irqsave(&desc->lock, flags); | ||
96 | desc->chip->set_affinity(irq, cpumask); | ||
97 | spin_unlock_irqrestore(&desc->lock, flags); | ||
98 | } else | ||
99 | set_pending_irq(irq, cpumask); | ||
93 | #else | 100 | #else |
94 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
95 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f3f0df35d4..aef265325cd3 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -753,8 +753,14 @@ static struct page *kimage_alloc_page(struct kimage *image, | |||
753 | *old = addr | (*old & ~PAGE_MASK); | 753 | *old = addr | (*old & ~PAGE_MASK); |
754 | 754 | ||
755 | /* The old page I have found cannot be a | 755 | /* The old page I have found cannot be a |
756 | * destination page, so return it. | 756 | * destination page, so return it if it's |
757 | * gfp_flags honor the ones passed in. | ||
757 | */ | 758 | */ |
759 | if (!(gfp_mask & __GFP_HIGHMEM) && | ||
760 | PageHighMem(old_page)) { | ||
761 | kimage_free_pages(old_page); | ||
762 | continue; | ||
763 | } | ||
758 | addr = old_addr; | 764 | addr = old_addr; |
759 | page = old_page; | 765 | page = old_page; |
760 | break; | 766 | break; |
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index eaa21fc9ad1d..e4dcfb2272a4 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
@@ -488,7 +488,7 @@ static int write_mem_msg(int binary) | |||
488 | if (err) | 488 | if (err) |
489 | return err; | 489 | return err; |
490 | if (CACHE_FLUSH_IS_SAFE) | 490 | if (CACHE_FLUSH_IS_SAFE) |
491 | flush_icache_range(addr, addr + length + 1); | 491 | flush_icache_range(addr, addr + length); |
492 | return 0; | 492 | return 0; |
493 | } | 493 | } |
494 | 494 | ||
@@ -590,6 +590,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
590 | 590 | ||
591 | /* Signal the primary CPU that we are done: */ | 591 | /* Signal the primary CPU that we are done: */ |
592 | atomic_set(&cpu_in_kgdb[cpu], 0); | 592 | atomic_set(&cpu_in_kgdb[cpu], 0); |
593 | touch_softlockup_watchdog(); | ||
593 | clocksource_touch_watchdog(); | 594 | clocksource_touch_watchdog(); |
594 | local_irq_restore(flags); | 595 | local_irq_restore(flags); |
595 | } | 596 | } |
@@ -1432,6 +1433,7 @@ acquirelock: | |||
1432 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { | 1433 | atomic_read(&kgdb_cpu_doing_single_step) != cpu) { |
1433 | 1434 | ||
1434 | atomic_set(&kgdb_active, -1); | 1435 | atomic_set(&kgdb_active, -1); |
1436 | touch_softlockup_watchdog(); | ||
1435 | clocksource_touch_watchdog(); | 1437 | clocksource_touch_watchdog(); |
1436 | local_irq_restore(flags); | 1438 | local_irq_restore(flags); |
1437 | 1439 | ||
@@ -1462,7 +1464,7 @@ acquirelock: | |||
1462 | * Get the passive CPU lock which will hold all the non-primary | 1464 | * Get the passive CPU lock which will hold all the non-primary |
1463 | * CPU in a spin state while the debugger is active | 1465 | * CPU in a spin state while the debugger is active |
1464 | */ | 1466 | */ |
1465 | if (!kgdb_single_step || !kgdb_contthread) { | 1467 | if (!kgdb_single_step) { |
1466 | for (i = 0; i < NR_CPUS; i++) | 1468 | for (i = 0; i < NR_CPUS; i++) |
1467 | atomic_set(&passive_cpu_wait[i], 1); | 1469 | atomic_set(&passive_cpu_wait[i], 1); |
1468 | } | 1470 | } |
@@ -1475,7 +1477,7 @@ acquirelock: | |||
1475 | 1477 | ||
1476 | #ifdef CONFIG_SMP | 1478 | #ifdef CONFIG_SMP |
1477 | /* Signal the other CPUs to enter kgdb_wait() */ | 1479 | /* Signal the other CPUs to enter kgdb_wait() */ |
1478 | if ((!kgdb_single_step || !kgdb_contthread) && kgdb_do_roundup) | 1480 | if ((!kgdb_single_step) && kgdb_do_roundup) |
1479 | kgdb_roundup_cpus(flags); | 1481 | kgdb_roundup_cpus(flags); |
1480 | #endif | 1482 | #endif |
1481 | 1483 | ||
@@ -1494,7 +1496,7 @@ acquirelock: | |||
1494 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); | 1496 | kgdb_post_primary_code(ks->linux_regs, ks->ex_vector, ks->err_code); |
1495 | kgdb_deactivate_sw_breakpoints(); | 1497 | kgdb_deactivate_sw_breakpoints(); |
1496 | kgdb_single_step = 0; | 1498 | kgdb_single_step = 0; |
1497 | kgdb_contthread = NULL; | 1499 | kgdb_contthread = current; |
1498 | exception_level = 0; | 1500 | exception_level = 0; |
1499 | 1501 | ||
1500 | /* Talk to debugger with gdbserial protocol */ | 1502 | /* Talk to debugger with gdbserial protocol */ |
@@ -1508,7 +1510,7 @@ acquirelock: | |||
1508 | kgdb_info[ks->cpu].task = NULL; | 1510 | kgdb_info[ks->cpu].task = NULL; |
1509 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); | 1511 | atomic_set(&cpu_in_kgdb[ks->cpu], 0); |
1510 | 1512 | ||
1511 | if (!kgdb_single_step || !kgdb_contthread) { | 1513 | if (!kgdb_single_step) { |
1512 | for (i = NR_CPUS-1; i >= 0; i--) | 1514 | for (i = NR_CPUS-1; i >= 0; i--) |
1513 | atomic_set(&passive_cpu_wait[i], 0); | 1515 | atomic_set(&passive_cpu_wait[i], 0); |
1514 | /* | 1516 | /* |
@@ -1524,6 +1526,7 @@ acquirelock: | |||
1524 | kgdb_restore: | 1526 | kgdb_restore: |
1525 | /* Free kgdb_active */ | 1527 | /* Free kgdb_active */ |
1526 | atomic_set(&kgdb_active, -1); | 1528 | atomic_set(&kgdb_active, -1); |
1529 | touch_softlockup_watchdog(); | ||
1527 | clocksource_touch_watchdog(); | 1530 | clocksource_touch_watchdog(); |
1528 | local_irq_restore(flags); | 1531 | local_irq_restore(flags); |
1529 | 1532 | ||
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index e36d5798cbff..5131e5471169 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -441,7 +441,7 @@ static struct k_itimer * alloc_posix_timer(void) | |||
441 | return tmr; | 441 | return tmr; |
442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { | 442 | if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { |
443 | kmem_cache_free(posix_timers_cache, tmr); | 443 | kmem_cache_free(posix_timers_cache, tmr); |
444 | tmr = NULL; | 444 | return NULL; |
445 | } | 445 | } |
446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); | 446 | memset(&tmr->sigq->info, 0, sizeof(siginfo_t)); |
447 | return tmr; | 447 | return tmr; |
diff --git a/kernel/printk.c b/kernel/printk.c index b51b1567bb55..aee891a869a4 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -577,9 +577,6 @@ static int have_callable_console(void) | |||
577 | * @fmt: format string | 577 | * @fmt: format string |
578 | * | 578 | * |
579 | * This is printk(). It can be called from any context. We want it to work. | 579 | * This is printk(). It can be called from any context. We want it to work. |
580 | * Be aware of the fact that if oops_in_progress is not set, we might try to | ||
581 | * wake klogd up which could deadlock on runqueue lock if printk() is called | ||
582 | * from scheduler code. | ||
583 | * | 580 | * |
584 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and | 581 | * We try to grab the console_sem. If we succeed, it's easy - we log the output and |
585 | * call the console drivers. If we fail to get the semaphore we place the output | 582 | * call the console drivers. If we fail to get the semaphore we place the output |
@@ -982,10 +979,25 @@ int is_console_locked(void) | |||
982 | return console_locked; | 979 | return console_locked; |
983 | } | 980 | } |
984 | 981 | ||
985 | void wake_up_klogd(void) | 982 | static DEFINE_PER_CPU(int, printk_pending); |
983 | |||
984 | void printk_tick(void) | ||
986 | { | 985 | { |
987 | if (!oops_in_progress && waitqueue_active(&log_wait)) | 986 | if (__get_cpu_var(printk_pending)) { |
987 | __get_cpu_var(printk_pending) = 0; | ||
988 | wake_up_interruptible(&log_wait); | 988 | wake_up_interruptible(&log_wait); |
989 | } | ||
990 | } | ||
991 | |||
992 | int printk_needs_cpu(int cpu) | ||
993 | { | ||
994 | return per_cpu(printk_pending, cpu); | ||
995 | } | ||
996 | |||
997 | void wake_up_klogd(void) | ||
998 | { | ||
999 | if (waitqueue_active(&log_wait)) | ||
1000 | __raw_get_cpu_var(printk_pending) = 1; | ||
989 | } | 1001 | } |
990 | 1002 | ||
991 | /** | 1003 | /** |
@@ -1291,22 +1303,6 @@ static int __init disable_boot_consoles(void) | |||
1291 | } | 1303 | } |
1292 | late_initcall(disable_boot_consoles); | 1304 | late_initcall(disable_boot_consoles); |
1293 | 1305 | ||
1294 | /** | ||
1295 | * tty_write_message - write a message to a certain tty, not just the console. | ||
1296 | * @tty: the destination tty_struct | ||
1297 | * @msg: the message to write | ||
1298 | * | ||
1299 | * This is used for messages that need to be redirected to a specific tty. | ||
1300 | * We don't put it into the syslog queue right now maybe in the future if | ||
1301 | * really needed. | ||
1302 | */ | ||
1303 | void tty_write_message(struct tty_struct *tty, char *msg) | ||
1304 | { | ||
1305 | if (tty && tty->ops->write) | ||
1306 | tty->ops->write(tty, msg, strlen(msg)); | ||
1307 | return; | ||
1308 | } | ||
1309 | |||
1310 | #if defined CONFIG_PRINTK | 1306 | #if defined CONFIG_PRINTK |
1311 | 1307 | ||
1312 | /* | 1308 | /* |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index aad93cdc9f68..37f72e551542 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #include <linux/notifier.h> | 47 | #include <linux/notifier.h> |
48 | #include <linux/cpu.h> | 48 | #include <linux/cpu.h> |
49 | #include <linux/mutex.h> | 49 | #include <linux/mutex.h> |
50 | #include <linux/time.h> | ||
50 | 51 | ||
51 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 52 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
52 | static struct lock_class_key rcu_lock_key; | 53 | static struct lock_class_key rcu_lock_key; |
@@ -60,12 +61,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map); | |||
60 | static struct rcu_ctrlblk rcu_ctrlblk = { | 61 | static struct rcu_ctrlblk rcu_ctrlblk = { |
61 | .cur = -300, | 62 | .cur = -300, |
62 | .completed = -300, | 63 | .completed = -300, |
64 | .pending = -300, | ||
63 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), | 65 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), |
64 | .cpumask = CPU_MASK_NONE, | 66 | .cpumask = CPU_MASK_NONE, |
65 | }; | 67 | }; |
66 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { | 68 | static struct rcu_ctrlblk rcu_bh_ctrlblk = { |
67 | .cur = -300, | 69 | .cur = -300, |
68 | .completed = -300, | 70 | .completed = -300, |
71 | .pending = -300, | ||
69 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), | 72 | .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), |
70 | .cpumask = CPU_MASK_NONE, | 73 | .cpumask = CPU_MASK_NONE, |
71 | }; | 74 | }; |
@@ -83,7 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
83 | { | 86 | { |
84 | int cpu; | 87 | int cpu; |
85 | cpumask_t cpumask; | 88 | cpumask_t cpumask; |
89 | unsigned long flags; | ||
90 | |||
86 | set_need_resched(); | 91 | set_need_resched(); |
92 | spin_lock_irqsave(&rcp->lock, flags); | ||
87 | if (unlikely(!rcp->signaled)) { | 93 | if (unlikely(!rcp->signaled)) { |
88 | rcp->signaled = 1; | 94 | rcp->signaled = 1; |
89 | /* | 95 | /* |
@@ -109,6 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
109 | for_each_cpu_mask_nr(cpu, cpumask) | 115 | for_each_cpu_mask_nr(cpu, cpumask) |
110 | smp_send_reschedule(cpu); | 116 | smp_send_reschedule(cpu); |
111 | } | 117 | } |
118 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
112 | } | 119 | } |
113 | #else | 120 | #else |
114 | static inline void force_quiescent_state(struct rcu_data *rdp, | 121 | static inline void force_quiescent_state(struct rcu_data *rdp, |
@@ -118,6 +125,126 @@ static inline void force_quiescent_state(struct rcu_data *rdp, | |||
118 | } | 125 | } |
119 | #endif | 126 | #endif |
120 | 127 | ||
128 | static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp, | ||
129 | struct rcu_data *rdp) | ||
130 | { | ||
131 | long batch; | ||
132 | |||
133 | head->next = NULL; | ||
134 | smp_mb(); /* Read of rcu->cur must happen after any change by caller. */ | ||
135 | |||
136 | /* | ||
137 | * Determine the batch number of this callback. | ||
138 | * | ||
139 | * Using ACCESS_ONCE to avoid the following error when gcc eliminates | ||
140 | * local variable "batch" and emits codes like this: | ||
141 | * 1) rdp->batch = rcp->cur + 1 # gets old value | ||
142 | * ...... | ||
143 | * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value | ||
144 | * then [*nxttail[0], *nxttail[1]) may contain callbacks | ||
145 | * that batch# = rdp->batch, see the comment of struct rcu_data. | ||
146 | */ | ||
147 | batch = ACCESS_ONCE(rcp->cur) + 1; | ||
148 | |||
149 | if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) { | ||
150 | /* process callbacks */ | ||
151 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
152 | rdp->nxttail[1] = rdp->nxttail[2]; | ||
153 | if (rcu_batch_after(batch - 1, rdp->batch)) | ||
154 | rdp->nxttail[0] = rdp->nxttail[2]; | ||
155 | } | ||
156 | |||
157 | rdp->batch = batch; | ||
158 | *rdp->nxttail[2] = head; | ||
159 | rdp->nxttail[2] = &head->next; | ||
160 | |||
161 | if (unlikely(++rdp->qlen > qhimark)) { | ||
162 | rdp->blimit = INT_MAX; | ||
163 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
168 | |||
169 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
170 | { | ||
171 | rcp->gp_start = jiffies; | ||
172 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK; | ||
173 | } | ||
174 | |||
175 | static void print_other_cpu_stall(struct rcu_ctrlblk *rcp) | ||
176 | { | ||
177 | int cpu; | ||
178 | long delta; | ||
179 | unsigned long flags; | ||
180 | |||
181 | /* Only let one CPU complain about others per time interval. */ | ||
182 | |||
183 | spin_lock_irqsave(&rcp->lock, flags); | ||
184 | delta = jiffies - rcp->jiffies_stall; | ||
185 | if (delta < 2 || rcp->cur != rcp->completed) { | ||
186 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
187 | return; | ||
188 | } | ||
189 | rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
190 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
191 | |||
192 | /* OK, time to rat on our buddy... */ | ||
193 | |||
194 | printk(KERN_ERR "RCU detected CPU stalls:"); | ||
195 | for_each_possible_cpu(cpu) { | ||
196 | if (cpu_isset(cpu, rcp->cpumask)) | ||
197 | printk(" %d", cpu); | ||
198 | } | ||
199 | printk(" (detected by %d, t=%ld jiffies)\n", | ||
200 | smp_processor_id(), (long)(jiffies - rcp->gp_start)); | ||
201 | } | ||
202 | |||
203 | static void print_cpu_stall(struct rcu_ctrlblk *rcp) | ||
204 | { | ||
205 | unsigned long flags; | ||
206 | |||
207 | printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n", | ||
208 | smp_processor_id(), jiffies, | ||
209 | jiffies - rcp->gp_start); | ||
210 | dump_stack(); | ||
211 | spin_lock_irqsave(&rcp->lock, flags); | ||
212 | if ((long)(jiffies - rcp->jiffies_stall) >= 0) | ||
213 | rcp->jiffies_stall = | ||
214 | jiffies + RCU_SECONDS_TILL_STALL_RECHECK; | ||
215 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
216 | set_need_resched(); /* kick ourselves to get things going. */ | ||
217 | } | ||
218 | |||
219 | static void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
220 | { | ||
221 | long delta; | ||
222 | |||
223 | delta = jiffies - rcp->jiffies_stall; | ||
224 | if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) { | ||
225 | |||
226 | /* We haven't checked in, so go dump stack. */ | ||
227 | print_cpu_stall(rcp); | ||
228 | |||
229 | } else if (rcp->cur != rcp->completed && delta >= 2) { | ||
230 | |||
231 | /* They had two seconds to dump stack, so complain. */ | ||
232 | print_other_cpu_stall(rcp); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | #else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
237 | |||
238 | static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp) | ||
239 | { | ||
240 | } | ||
241 | |||
242 | static inline void check_cpu_stall(struct rcu_ctrlblk *rcp) | ||
243 | { | ||
244 | } | ||
245 | |||
246 | #endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
247 | |||
121 | /** | 248 | /** |
122 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 249 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
123 | * @head: structure to be used for queueing the RCU updates. | 250 | * @head: structure to be used for queueing the RCU updates. |
@@ -133,18 +260,10 @@ void call_rcu(struct rcu_head *head, | |||
133 | void (*func)(struct rcu_head *rcu)) | 260 | void (*func)(struct rcu_head *rcu)) |
134 | { | 261 | { |
135 | unsigned long flags; | 262 | unsigned long flags; |
136 | struct rcu_data *rdp; | ||
137 | 263 | ||
138 | head->func = func; | 264 | head->func = func; |
139 | head->next = NULL; | ||
140 | local_irq_save(flags); | 265 | local_irq_save(flags); |
141 | rdp = &__get_cpu_var(rcu_data); | 266 | __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
142 | *rdp->nxttail = head; | ||
143 | rdp->nxttail = &head->next; | ||
144 | if (unlikely(++rdp->qlen > qhimark)) { | ||
145 | rdp->blimit = INT_MAX; | ||
146 | force_quiescent_state(rdp, &rcu_ctrlblk); | ||
147 | } | ||
148 | local_irq_restore(flags); | 267 | local_irq_restore(flags); |
149 | } | 268 | } |
150 | EXPORT_SYMBOL_GPL(call_rcu); | 269 | EXPORT_SYMBOL_GPL(call_rcu); |
@@ -169,20 +288,10 @@ void call_rcu_bh(struct rcu_head *head, | |||
169 | void (*func)(struct rcu_head *rcu)) | 288 | void (*func)(struct rcu_head *rcu)) |
170 | { | 289 | { |
171 | unsigned long flags; | 290 | unsigned long flags; |
172 | struct rcu_data *rdp; | ||
173 | 291 | ||
174 | head->func = func; | 292 | head->func = func; |
175 | head->next = NULL; | ||
176 | local_irq_save(flags); | 293 | local_irq_save(flags); |
177 | rdp = &__get_cpu_var(rcu_bh_data); | 294 | __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
178 | *rdp->nxttail = head; | ||
179 | rdp->nxttail = &head->next; | ||
180 | |||
181 | if (unlikely(++rdp->qlen > qhimark)) { | ||
182 | rdp->blimit = INT_MAX; | ||
183 | force_quiescent_state(rdp, &rcu_bh_ctrlblk); | ||
184 | } | ||
185 | |||
186 | local_irq_restore(flags); | 295 | local_irq_restore(flags); |
187 | } | 296 | } |
188 | EXPORT_SYMBOL_GPL(call_rcu_bh); | 297 | EXPORT_SYMBOL_GPL(call_rcu_bh); |
@@ -211,12 +320,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); | |||
211 | static inline void raise_rcu_softirq(void) | 320 | static inline void raise_rcu_softirq(void) |
212 | { | 321 | { |
213 | raise_softirq(RCU_SOFTIRQ); | 322 | raise_softirq(RCU_SOFTIRQ); |
214 | /* | ||
215 | * The smp_mb() here is required to ensure that this cpu's | ||
216 | * __rcu_process_callbacks() reads the most recently updated | ||
217 | * value of rcu->cur. | ||
218 | */ | ||
219 | smp_mb(); | ||
220 | } | 323 | } |
221 | 324 | ||
222 | /* | 325 | /* |
@@ -225,6 +328,7 @@ static inline void raise_rcu_softirq(void) | |||
225 | */ | 328 | */ |
226 | static void rcu_do_batch(struct rcu_data *rdp) | 329 | static void rcu_do_batch(struct rcu_data *rdp) |
227 | { | 330 | { |
331 | unsigned long flags; | ||
228 | struct rcu_head *next, *list; | 332 | struct rcu_head *next, *list; |
229 | int count = 0; | 333 | int count = 0; |
230 | 334 | ||
@@ -239,9 +343,9 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
239 | } | 343 | } |
240 | rdp->donelist = list; | 344 | rdp->donelist = list; |
241 | 345 | ||
242 | local_irq_disable(); | 346 | local_irq_save(flags); |
243 | rdp->qlen -= count; | 347 | rdp->qlen -= count; |
244 | local_irq_enable(); | 348 | local_irq_restore(flags); |
245 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) | 349 | if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) |
246 | rdp->blimit = blimit; | 350 | rdp->blimit = blimit; |
247 | 351 | ||
@@ -269,6 +373,7 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
269 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace | 373 | * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace |
270 | * period (if necessary). | 374 | * period (if necessary). |
271 | */ | 375 | */ |
376 | |||
272 | /* | 377 | /* |
273 | * Register a new batch of callbacks, and start it up if there is currently no | 378 | * Register a new batch of callbacks, and start it up if there is currently no |
274 | * active batch and the batch to be registered has not already occurred. | 379 | * active batch and the batch to be registered has not already occurred. |
@@ -276,15 +381,10 @@ static void rcu_do_batch(struct rcu_data *rdp) | |||
276 | */ | 381 | */ |
277 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) | 382 | static void rcu_start_batch(struct rcu_ctrlblk *rcp) |
278 | { | 383 | { |
279 | if (rcp->next_pending && | 384 | if (rcp->cur != rcp->pending && |
280 | rcp->completed == rcp->cur) { | 385 | rcp->completed == rcp->cur) { |
281 | rcp->next_pending = 0; | ||
282 | /* | ||
283 | * next_pending == 0 must be visible in | ||
284 | * __rcu_process_callbacks() before it can see new value of cur. | ||
285 | */ | ||
286 | smp_wmb(); | ||
287 | rcp->cur++; | 386 | rcp->cur++; |
387 | record_gp_stall_check_time(rcp); | ||
288 | 388 | ||
289 | /* | 389 | /* |
290 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a | 390 | * Accessing nohz_cpu_mask before incrementing rcp->cur needs a |
@@ -322,6 +422,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp) | |||
322 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | 422 | static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, |
323 | struct rcu_data *rdp) | 423 | struct rcu_data *rdp) |
324 | { | 424 | { |
425 | unsigned long flags; | ||
426 | |||
325 | if (rdp->quiescbatch != rcp->cur) { | 427 | if (rdp->quiescbatch != rcp->cur) { |
326 | /* start new grace period: */ | 428 | /* start new grace period: */ |
327 | rdp->qs_pending = 1; | 429 | rdp->qs_pending = 1; |
@@ -345,7 +447,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
345 | return; | 447 | return; |
346 | rdp->qs_pending = 0; | 448 | rdp->qs_pending = 0; |
347 | 449 | ||
348 | spin_lock(&rcp->lock); | 450 | spin_lock_irqsave(&rcp->lock, flags); |
349 | /* | 451 | /* |
350 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync | 452 | * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync |
351 | * during cpu startup. Ignore the quiescent state. | 453 | * during cpu startup. Ignore the quiescent state. |
@@ -353,7 +455,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
353 | if (likely(rdp->quiescbatch == rcp->cur)) | 455 | if (likely(rdp->quiescbatch == rcp->cur)) |
354 | cpu_quiet(rdp->cpu, rcp); | 456 | cpu_quiet(rdp->cpu, rcp); |
355 | 457 | ||
356 | spin_unlock(&rcp->lock); | 458 | spin_unlock_irqrestore(&rcp->lock, flags); |
357 | } | 459 | } |
358 | 460 | ||
359 | 461 | ||
@@ -364,33 +466,38 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, | |||
364 | * which is dead and hence not processing interrupts. | 466 | * which is dead and hence not processing interrupts. |
365 | */ | 467 | */ |
366 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, | 468 | static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, |
367 | struct rcu_head **tail) | 469 | struct rcu_head **tail, long batch) |
368 | { | 470 | { |
369 | local_irq_disable(); | 471 | unsigned long flags; |
370 | *this_rdp->nxttail = list; | 472 | |
371 | if (list) | 473 | if (list) { |
372 | this_rdp->nxttail = tail; | 474 | local_irq_save(flags); |
373 | local_irq_enable(); | 475 | this_rdp->batch = batch; |
476 | *this_rdp->nxttail[2] = list; | ||
477 | this_rdp->nxttail[2] = tail; | ||
478 | local_irq_restore(flags); | ||
479 | } | ||
374 | } | 480 | } |
375 | 481 | ||
376 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, | 482 | static void __rcu_offline_cpu(struct rcu_data *this_rdp, |
377 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 483 | struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
378 | { | 484 | { |
379 | /* if the cpu going offline owns the grace period | 485 | unsigned long flags; |
486 | |||
487 | /* | ||
488 | * if the cpu going offline owns the grace period | ||
380 | * we can block indefinitely waiting for it, so flush | 489 | * we can block indefinitely waiting for it, so flush |
381 | * it here | 490 | * it here |
382 | */ | 491 | */ |
383 | spin_lock_bh(&rcp->lock); | 492 | spin_lock_irqsave(&rcp->lock, flags); |
384 | if (rcp->cur != rcp->completed) | 493 | if (rcp->cur != rcp->completed) |
385 | cpu_quiet(rdp->cpu, rcp); | 494 | cpu_quiet(rdp->cpu, rcp); |
386 | spin_unlock_bh(&rcp->lock); | 495 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1); |
387 | rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); | 496 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1); |
388 | rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); | 497 | spin_unlock(&rcp->lock); |
389 | rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail); | ||
390 | 498 | ||
391 | local_irq_disable(); | ||
392 | this_rdp->qlen += rdp->qlen; | 499 | this_rdp->qlen += rdp->qlen; |
393 | local_irq_enable(); | 500 | local_irq_restore(flags); |
394 | } | 501 | } |
395 | 502 | ||
396 | static void rcu_offline_cpu(int cpu) | 503 | static void rcu_offline_cpu(int cpu) |
@@ -420,38 +527,52 @@ static void rcu_offline_cpu(int cpu) | |||
420 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | 527 | static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, |
421 | struct rcu_data *rdp) | 528 | struct rcu_data *rdp) |
422 | { | 529 | { |
423 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { | 530 | unsigned long flags; |
424 | *rdp->donetail = rdp->curlist; | 531 | long completed_snap; |
425 | rdp->donetail = rdp->curtail; | ||
426 | rdp->curlist = NULL; | ||
427 | rdp->curtail = &rdp->curlist; | ||
428 | } | ||
429 | 532 | ||
430 | if (rdp->nxtlist && !rdp->curlist) { | 533 | if (rdp->nxtlist) { |
431 | local_irq_disable(); | 534 | local_irq_save(flags); |
432 | rdp->curlist = rdp->nxtlist; | 535 | completed_snap = ACCESS_ONCE(rcp->completed); |
433 | rdp->curtail = rdp->nxttail; | ||
434 | rdp->nxtlist = NULL; | ||
435 | rdp->nxttail = &rdp->nxtlist; | ||
436 | local_irq_enable(); | ||
437 | 536 | ||
438 | /* | 537 | /* |
439 | * start the next batch of callbacks | 538 | * move the other grace-period-completed entries to |
539 | * [rdp->nxtlist, *rdp->nxttail[0]) temporarily | ||
440 | */ | 540 | */ |
541 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
542 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2]; | ||
543 | else if (!rcu_batch_before(completed_snap, rdp->batch - 1)) | ||
544 | rdp->nxttail[0] = rdp->nxttail[1]; | ||
441 | 545 | ||
442 | /* determine batch number */ | 546 | /* |
443 | rdp->batch = rcp->cur + 1; | 547 | * the grace period for entries in |
444 | /* see the comment and corresponding wmb() in | 548 | * [rdp->nxtlist, *rdp->nxttail[0]) has completed and |
445 | * the rcu_start_batch() | 549 | * move these entries to donelist |
446 | */ | 550 | */ |
447 | smp_rmb(); | 551 | if (rdp->nxttail[0] != &rdp->nxtlist) { |
552 | *rdp->donetail = rdp->nxtlist; | ||
553 | rdp->donetail = rdp->nxttail[0]; | ||
554 | rdp->nxtlist = *rdp->nxttail[0]; | ||
555 | *rdp->donetail = NULL; | ||
556 | |||
557 | if (rdp->nxttail[1] == rdp->nxttail[0]) | ||
558 | rdp->nxttail[1] = &rdp->nxtlist; | ||
559 | if (rdp->nxttail[2] == rdp->nxttail[0]) | ||
560 | rdp->nxttail[2] = &rdp->nxtlist; | ||
561 | rdp->nxttail[0] = &rdp->nxtlist; | ||
562 | } | ||
563 | |||
564 | local_irq_restore(flags); | ||
565 | |||
566 | if (rcu_batch_after(rdp->batch, rcp->pending)) { | ||
567 | unsigned long flags2; | ||
448 | 568 | ||
449 | if (!rcp->next_pending) { | ||
450 | /* and start it/schedule start if it's a new batch */ | 569 | /* and start it/schedule start if it's a new batch */ |
451 | spin_lock(&rcp->lock); | 570 | spin_lock_irqsave(&rcp->lock, flags2); |
452 | rcp->next_pending = 1; | 571 | if (rcu_batch_after(rdp->batch, rcp->pending)) { |
453 | rcu_start_batch(rcp); | 572 | rcp->pending = rdp->batch; |
454 | spin_unlock(&rcp->lock); | 573 | rcu_start_batch(rcp); |
574 | } | ||
575 | spin_unlock_irqrestore(&rcp->lock, flags2); | ||
455 | } | 576 | } |
456 | } | 577 | } |
457 | 578 | ||
@@ -462,21 +583,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, | |||
462 | 583 | ||
463 | static void rcu_process_callbacks(struct softirq_action *unused) | 584 | static void rcu_process_callbacks(struct softirq_action *unused) |
464 | { | 585 | { |
586 | /* | ||
587 | * Memory references from any prior RCU read-side critical sections | ||
588 | * executed by the interrupted code must be see before any RCU | ||
589 | * grace-period manupulations below. | ||
590 | */ | ||
591 | |||
592 | smp_mb(); /* See above block comment. */ | ||
593 | |||
465 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); | 594 | __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); |
466 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); | 595 | __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); |
596 | |||
597 | /* | ||
598 | * Memory references from any later RCU read-side critical sections | ||
599 | * executed by the interrupted code must be see after any RCU | ||
600 | * grace-period manupulations above. | ||
601 | */ | ||
602 | |||
603 | smp_mb(); /* See above block comment. */ | ||
467 | } | 604 | } |
468 | 605 | ||
469 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | 606 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) |
470 | { | 607 | { |
471 | /* This cpu has pending rcu entries and the grace period | 608 | /* Check for CPU stalls, if enabled. */ |
472 | * for them has completed. | 609 | check_cpu_stall(rcp); |
473 | */ | ||
474 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | ||
475 | return 1; | ||
476 | 610 | ||
477 | /* This cpu has no pending entries, but there are new entries */ | 611 | if (rdp->nxtlist) { |
478 | if (!rdp->curlist && rdp->nxtlist) | 612 | long completed_snap = ACCESS_ONCE(rcp->completed); |
479 | return 1; | 613 | |
614 | /* | ||
615 | * This cpu has pending rcu entries and the grace period | ||
616 | * for them has completed. | ||
617 | */ | ||
618 | if (!rcu_batch_before(completed_snap, rdp->batch)) | ||
619 | return 1; | ||
620 | if (!rcu_batch_before(completed_snap, rdp->batch - 1) && | ||
621 | rdp->nxttail[0] != rdp->nxttail[1]) | ||
622 | return 1; | ||
623 | if (rdp->nxttail[0] != &rdp->nxtlist) | ||
624 | return 1; | ||
625 | |||
626 | /* | ||
627 | * This cpu has pending rcu entries and the new batch | ||
628 | * for then hasn't been started nor scheduled start | ||
629 | */ | ||
630 | if (rcu_batch_after(rdp->batch, rcp->pending)) | ||
631 | return 1; | ||
632 | } | ||
480 | 633 | ||
481 | /* This cpu has finished callbacks to invoke */ | 634 | /* This cpu has finished callbacks to invoke */ |
482 | if (rdp->donelist) | 635 | if (rdp->donelist) |
@@ -512,9 +665,15 @@ int rcu_needs_cpu(int cpu) | |||
512 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | 665 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); |
513 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | 666 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); |
514 | 667 | ||
515 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); | 668 | return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu); |
516 | } | 669 | } |
517 | 670 | ||
671 | /* | ||
672 | * Top-level function driving RCU grace-period detection, normally | ||
673 | * invoked from the scheduler-clock interrupt. This function simply | ||
674 | * increments counters that are read only from softirq by this same | ||
675 | * CPU, so there are no memory barriers required. | ||
676 | */ | ||
518 | void rcu_check_callbacks(int cpu, int user) | 677 | void rcu_check_callbacks(int cpu, int user) |
519 | { | 678 | { |
520 | if (user || | 679 | if (user || |
@@ -558,14 +717,17 @@ void rcu_check_callbacks(int cpu, int user) | |||
558 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, | 717 | static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, |
559 | struct rcu_data *rdp) | 718 | struct rcu_data *rdp) |
560 | { | 719 | { |
720 | unsigned long flags; | ||
721 | |||
722 | spin_lock_irqsave(&rcp->lock, flags); | ||
561 | memset(rdp, 0, sizeof(*rdp)); | 723 | memset(rdp, 0, sizeof(*rdp)); |
562 | rdp->curtail = &rdp->curlist; | 724 | rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist; |
563 | rdp->nxttail = &rdp->nxtlist; | ||
564 | rdp->donetail = &rdp->donelist; | 725 | rdp->donetail = &rdp->donelist; |
565 | rdp->quiescbatch = rcp->completed; | 726 | rdp->quiescbatch = rcp->completed; |
566 | rdp->qs_pending = 0; | 727 | rdp->qs_pending = 0; |
567 | rdp->cpu = cpu; | 728 | rdp->cpu = cpu; |
568 | rdp->blimit = blimit; | 729 | rdp->blimit = blimit; |
730 | spin_unlock_irqrestore(&rcp->lock, flags); | ||
569 | } | 731 | } |
570 | 732 | ||
571 | static void __cpuinit rcu_online_cpu(int cpu) | 733 | static void __cpuinit rcu_online_cpu(int cpu) |
@@ -610,6 +772,9 @@ static struct notifier_block __cpuinitdata rcu_nb = { | |||
610 | */ | 772 | */ |
611 | void __init __rcu_init(void) | 773 | void __init __rcu_init(void) |
612 | { | 774 | { |
775 | #ifdef CONFIG_RCU_CPU_STALL_DETECTOR | ||
776 | printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n"); | ||
777 | #endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ | ||
613 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, | 778 | rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, |
614 | (void *)(long)smp_processor_id()); | 779 | (void *)(long)smp_processor_id()); |
615 | /* Register notifier for non-boot CPUs */ | 780 | /* Register notifier for non-boot CPUs */ |
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c index 27827931ca0d..ca4bbbe04aa4 100644 --- a/kernel/rcupreempt.c +++ b/kernel/rcupreempt.c | |||
@@ -59,14 +59,6 @@ | |||
59 | #include <linux/rcupreempt_trace.h> | 59 | #include <linux/rcupreempt_trace.h> |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Macro that prevents the compiler from reordering accesses, but does | ||
63 | * absolutely -nothing- to prevent CPUs from reordering. This is used | ||
64 | * only to mediate communication between mainline code and hardware | ||
65 | * interrupt and NMI handlers. | ||
66 | */ | ||
67 | #define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) | ||
68 | |||
69 | /* | ||
70 | * PREEMPT_RCU data structures. | 62 | * PREEMPT_RCU data structures. |
71 | */ | 63 | */ |
72 | 64 | ||
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c index 5edf82c34bbc..35c2d3360ecf 100644 --- a/kernel/rcupreempt_trace.c +++ b/kernel/rcupreempt_trace.c | |||
@@ -308,11 +308,16 @@ out: | |||
308 | 308 | ||
309 | static int __init rcupreempt_trace_init(void) | 309 | static int __init rcupreempt_trace_init(void) |
310 | { | 310 | { |
311 | int ret; | ||
312 | |||
311 | mutex_init(&rcupreempt_trace_mutex); | 313 | mutex_init(&rcupreempt_trace_mutex); |
312 | rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); | 314 | rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); |
313 | if (!rcupreempt_trace_buf) | 315 | if (!rcupreempt_trace_buf) |
314 | return 1; | 316 | return 1; |
315 | return rcupreempt_debugfs_init(); | 317 | ret = rcupreempt_debugfs_init(); |
318 | if (ret) | ||
319 | kfree(rcupreempt_trace_buf); | ||
320 | return ret; | ||
316 | } | 321 | } |
317 | 322 | ||
318 | static void __exit rcupreempt_trace_cleanup(void) | 323 | static void __exit rcupreempt_trace_cleanup(void) |
diff --git a/kernel/resource.c b/kernel/resource.c index 03d796c1b2e9..7797dae85b50 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource); | |||
38 | 38 | ||
39 | static DEFINE_RWLOCK(resource_lock); | 39 | static DEFINE_RWLOCK(resource_lock); |
40 | 40 | ||
41 | #ifdef CONFIG_PROC_FS | ||
42 | |||
43 | enum { MAX_IORES_LEVEL = 5 }; | ||
44 | |||
45 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) | 41 | static void *r_next(struct seq_file *m, void *v, loff_t *pos) |
46 | { | 42 | { |
47 | struct resource *p = v; | 43 | struct resource *p = v; |
@@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos) | |||
53 | return p->sibling; | 49 | return p->sibling; |
54 | } | 50 | } |
55 | 51 | ||
52 | #ifdef CONFIG_PROC_FS | ||
53 | |||
54 | enum { MAX_IORES_LEVEL = 5 }; | ||
55 | |||
56 | static void *r_start(struct seq_file *m, loff_t *pos) | 56 | static void *r_start(struct seq_file *m, loff_t *pos) |
57 | __acquires(resource_lock) | 57 | __acquires(resource_lock) |
58 | { | 58 | { |
@@ -516,6 +516,70 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t | |||
516 | return result; | 516 | return result; |
517 | } | 517 | } |
518 | 518 | ||
519 | static void __init __reserve_region_with_split(struct resource *root, | ||
520 | resource_size_t start, resource_size_t end, | ||
521 | const char *name) | ||
522 | { | ||
523 | struct resource *parent = root; | ||
524 | struct resource *conflict; | ||
525 | struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); | ||
526 | |||
527 | if (!res) | ||
528 | return; | ||
529 | |||
530 | res->name = name; | ||
531 | res->start = start; | ||
532 | res->end = end; | ||
533 | res->flags = IORESOURCE_BUSY; | ||
534 | |||
535 | for (;;) { | ||
536 | conflict = __request_resource(parent, res); | ||
537 | if (!conflict) | ||
538 | break; | ||
539 | if (conflict != parent) { | ||
540 | parent = conflict; | ||
541 | if (!(conflict->flags & IORESOURCE_BUSY)) | ||
542 | continue; | ||
543 | } | ||
544 | |||
545 | /* Uhhuh, that didn't work out.. */ | ||
546 | kfree(res); | ||
547 | res = NULL; | ||
548 | break; | ||
549 | } | ||
550 | |||
551 | if (!res) { | ||
552 | /* failed, split and try again */ | ||
553 | |||
554 | /* conflict covered whole area */ | ||
555 | if (conflict->start <= start && conflict->end >= end) | ||
556 | return; | ||
557 | |||
558 | if (conflict->start > start) | ||
559 | __reserve_region_with_split(root, start, conflict->start-1, name); | ||
560 | if (!(conflict->flags & IORESOURCE_BUSY)) { | ||
561 | resource_size_t common_start, common_end; | ||
562 | |||
563 | common_start = max(conflict->start, start); | ||
564 | common_end = min(conflict->end, end); | ||
565 | if (common_start < common_end) | ||
566 | __reserve_region_with_split(root, common_start, common_end, name); | ||
567 | } | ||
568 | if (conflict->end < end) | ||
569 | __reserve_region_with_split(root, conflict->end+1, end, name); | ||
570 | } | ||
571 | |||
572 | } | ||
573 | |||
574 | void reserve_region_with_split(struct resource *root, | ||
575 | resource_size_t start, resource_size_t end, | ||
576 | const char *name) | ||
577 | { | ||
578 | write_lock(&resource_lock); | ||
579 | __reserve_region_with_split(root, start, end, name); | ||
580 | write_unlock(&resource_lock); | ||
581 | } | ||
582 | |||
519 | EXPORT_SYMBOL(adjust_resource); | 583 | EXPORT_SYMBOL(adjust_resource); |
520 | 584 | ||
521 | /** | 585 | /** |
@@ -763,3 +827,40 @@ static int __init reserve_setup(char *str) | |||
763 | } | 827 | } |
764 | 828 | ||
765 | __setup("reserve=", reserve_setup); | 829 | __setup("reserve=", reserve_setup); |
830 | |||
831 | /* | ||
832 | * Check if the requested addr and size spans more than any slot in the | ||
833 | * iomem resource tree. | ||
834 | */ | ||
835 | int iomem_map_sanity_check(resource_size_t addr, unsigned long size) | ||
836 | { | ||
837 | struct resource *p = &iomem_resource; | ||
838 | int err = 0; | ||
839 | loff_t l; | ||
840 | |||
841 | read_lock(&resource_lock); | ||
842 | for (p = p->child; p ; p = r_next(NULL, p, &l)) { | ||
843 | /* | ||
844 | * We can probably skip the resources without | ||
845 | * IORESOURCE_IO attribute? | ||
846 | */ | ||
847 | if (p->start >= addr + size) | ||
848 | continue; | ||
849 | if (p->end < addr) | ||
850 | continue; | ||
851 | if (p->start <= addr && (p->end >= addr + size - 1)) | ||
852 | continue; | ||
853 | printk(KERN_WARNING "resource map sanity check conflict: " | ||
854 | "0x%llx 0x%llx 0x%llx 0x%llx %s\n", | ||
855 | (unsigned long long)addr, | ||
856 | (unsigned long long)(addr + size - 1), | ||
857 | (unsigned long long)p->start, | ||
858 | (unsigned long long)p->end, | ||
859 | p->name); | ||
860 | err = -1; | ||
861 | break; | ||
862 | } | ||
863 | read_unlock(&resource_lock); | ||
864 | |||
865 | return err; | ||
866 | } | ||
diff --git a/kernel/sched.c b/kernel/sched.c index cc1f81b50b82..6f230596bd0c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -201,14 +201,19 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) | |||
201 | hrtimer_init(&rt_b->rt_period_timer, | 201 | hrtimer_init(&rt_b->rt_period_timer, |
202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 202 | CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
203 | rt_b->rt_period_timer.function = sched_rt_period_timer; | 203 | rt_b->rt_period_timer.function = sched_rt_period_timer; |
204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 204 | rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; |
205 | } | ||
206 | |||
207 | static inline int rt_bandwidth_enabled(void) | ||
208 | { | ||
209 | return sysctl_sched_rt_runtime >= 0; | ||
205 | } | 210 | } |
206 | 211 | ||
207 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) | 212 | static void start_rt_bandwidth(struct rt_bandwidth *rt_b) |
208 | { | 213 | { |
209 | ktime_t now; | 214 | ktime_t now; |
210 | 215 | ||
211 | if (rt_b->rt_runtime == RUNTIME_INF) | 216 | if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF) |
212 | return; | 217 | return; |
213 | 218 | ||
214 | if (hrtimer_active(&rt_b->rt_period_timer)) | 219 | if (hrtimer_active(&rt_b->rt_period_timer)) |
@@ -298,9 +303,9 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; | |||
298 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); | 303 | static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); |
299 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; | 304 | static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; |
300 | #endif /* CONFIG_RT_GROUP_SCHED */ | 305 | #endif /* CONFIG_RT_GROUP_SCHED */ |
301 | #else /* !CONFIG_FAIR_GROUP_SCHED */ | 306 | #else /* !CONFIG_USER_SCHED */ |
302 | #define root_task_group init_task_group | 307 | #define root_task_group init_task_group |
303 | #endif /* CONFIG_FAIR_GROUP_SCHED */ | 308 | #endif /* CONFIG_USER_SCHED */ |
304 | 309 | ||
305 | /* task_group_lock serializes add/remove of task groups and also changes to | 310 | /* task_group_lock serializes add/remove of task groups and also changes to |
306 | * a task group's cpu shares. | 311 | * a task group's cpu shares. |
@@ -604,9 +609,9 @@ struct rq { | |||
604 | 609 | ||
605 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 610 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
606 | 611 | ||
607 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) | 612 | static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync) |
608 | { | 613 | { |
609 | rq->curr->sched_class->check_preempt_curr(rq, p); | 614 | rq->curr->sched_class->check_preempt_curr(rq, p, sync); |
610 | } | 615 | } |
611 | 616 | ||
612 | static inline int cpu_of(struct rq *rq) | 617 | static inline int cpu_of(struct rq *rq) |
@@ -1087,7 +1092,7 @@ hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
1087 | return NOTIFY_DONE; | 1092 | return NOTIFY_DONE; |
1088 | } | 1093 | } |
1089 | 1094 | ||
1090 | static void init_hrtick(void) | 1095 | static __init void init_hrtick(void) |
1091 | { | 1096 | { |
1092 | hotcpu_notifier(hotplug_hrtick, 0); | 1097 | hotcpu_notifier(hotplug_hrtick, 0); |
1093 | } | 1098 | } |
@@ -1102,7 +1107,7 @@ static void hrtick_start(struct rq *rq, u64 delay) | |||
1102 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); | 1107 | hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); |
1103 | } | 1108 | } |
1104 | 1109 | ||
1105 | static void init_hrtick(void) | 1110 | static inline void init_hrtick(void) |
1106 | { | 1111 | { |
1107 | } | 1112 | } |
1108 | #endif /* CONFIG_SMP */ | 1113 | #endif /* CONFIG_SMP */ |
@@ -1119,9 +1124,9 @@ static void init_rq_hrtick(struct rq *rq) | |||
1119 | 1124 | ||
1120 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1125 | hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
1121 | rq->hrtick_timer.function = hrtick; | 1126 | rq->hrtick_timer.function = hrtick; |
1122 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 1127 | rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
1123 | } | 1128 | } |
1124 | #else | 1129 | #else /* CONFIG_SCHED_HRTICK */ |
1125 | static inline void hrtick_clear(struct rq *rq) | 1130 | static inline void hrtick_clear(struct rq *rq) |
1126 | { | 1131 | { |
1127 | } | 1132 | } |
@@ -1133,7 +1138,7 @@ static inline void init_rq_hrtick(struct rq *rq) | |||
1133 | static inline void init_hrtick(void) | 1138 | static inline void init_hrtick(void) |
1134 | { | 1139 | { |
1135 | } | 1140 | } |
1136 | #endif | 1141 | #endif /* CONFIG_SCHED_HRTICK */ |
1137 | 1142 | ||
1138 | /* | 1143 | /* |
1139 | * resched_task - mark a task 'to be rescheduled now'. | 1144 | * resched_task - mark a task 'to be rescheduled now'. |
@@ -1380,38 +1385,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) | |||
1380 | update_load_sub(&rq->load, load); | 1385 | update_load_sub(&rq->load, load); |
1381 | } | 1386 | } |
1382 | 1387 | ||
1383 | #ifdef CONFIG_SMP | 1388 | #if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) |
1384 | static unsigned long source_load(int cpu, int type); | 1389 | typedef int (*tg_visitor)(struct task_group *, void *); |
1385 | static unsigned long target_load(int cpu, int type); | ||
1386 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | ||
1387 | |||
1388 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
1389 | { | ||
1390 | struct rq *rq = cpu_rq(cpu); | ||
1391 | |||
1392 | if (rq->nr_running) | ||
1393 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | ||
1394 | |||
1395 | return rq->avg_load_per_task; | ||
1396 | } | ||
1397 | |||
1398 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1399 | |||
1400 | typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *); | ||
1401 | 1390 | ||
1402 | /* | 1391 | /* |
1403 | * Iterate the full tree, calling @down when first entering a node and @up when | 1392 | * Iterate the full tree, calling @down when first entering a node and @up when |
1404 | * leaving it for the final time. | 1393 | * leaving it for the final time. |
1405 | */ | 1394 | */ |
1406 | static void | 1395 | static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) |
1407 | walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd) | ||
1408 | { | 1396 | { |
1409 | struct task_group *parent, *child; | 1397 | struct task_group *parent, *child; |
1398 | int ret; | ||
1410 | 1399 | ||
1411 | rcu_read_lock(); | 1400 | rcu_read_lock(); |
1412 | parent = &root_task_group; | 1401 | parent = &root_task_group; |
1413 | down: | 1402 | down: |
1414 | (*down)(parent, cpu, sd); | 1403 | ret = (*down)(parent, data); |
1404 | if (ret) | ||
1405 | goto out_unlock; | ||
1415 | list_for_each_entry_rcu(child, &parent->children, siblings) { | 1406 | list_for_each_entry_rcu(child, &parent->children, siblings) { |
1416 | parent = child; | 1407 | parent = child; |
1417 | goto down; | 1408 | goto down; |
@@ -1419,14 +1410,42 @@ down: | |||
1419 | up: | 1410 | up: |
1420 | continue; | 1411 | continue; |
1421 | } | 1412 | } |
1422 | (*up)(parent, cpu, sd); | 1413 | ret = (*up)(parent, data); |
1414 | if (ret) | ||
1415 | goto out_unlock; | ||
1423 | 1416 | ||
1424 | child = parent; | 1417 | child = parent; |
1425 | parent = parent->parent; | 1418 | parent = parent->parent; |
1426 | if (parent) | 1419 | if (parent) |
1427 | goto up; | 1420 | goto up; |
1421 | out_unlock: | ||
1428 | rcu_read_unlock(); | 1422 | rcu_read_unlock(); |
1423 | |||
1424 | return ret; | ||
1425 | } | ||
1426 | |||
1427 | static int tg_nop(struct task_group *tg, void *data) | ||
1428 | { | ||
1429 | return 0; | ||
1429 | } | 1430 | } |
1431 | #endif | ||
1432 | |||
1433 | #ifdef CONFIG_SMP | ||
1434 | static unsigned long source_load(int cpu, int type); | ||
1435 | static unsigned long target_load(int cpu, int type); | ||
1436 | static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd); | ||
1437 | |||
1438 | static unsigned long cpu_avg_load_per_task(int cpu) | ||
1439 | { | ||
1440 | struct rq *rq = cpu_rq(cpu); | ||
1441 | |||
1442 | if (rq->nr_running) | ||
1443 | rq->avg_load_per_task = rq->load.weight / rq->nr_running; | ||
1444 | |||
1445 | return rq->avg_load_per_task; | ||
1446 | } | ||
1447 | |||
1448 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
1430 | 1449 | ||
1431 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); | 1450 | static void __set_se_shares(struct sched_entity *se, unsigned long shares); |
1432 | 1451 | ||
@@ -1486,11 +1505,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu, | |||
1486 | * This needs to be done in a bottom-up fashion because the rq weight of a | 1505 | * This needs to be done in a bottom-up fashion because the rq weight of a |
1487 | * parent group depends on the shares of its child groups. | 1506 | * parent group depends on the shares of its child groups. |
1488 | */ | 1507 | */ |
1489 | static void | 1508 | static int tg_shares_up(struct task_group *tg, void *data) |
1490 | tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1491 | { | 1509 | { |
1492 | unsigned long rq_weight = 0; | 1510 | unsigned long rq_weight = 0; |
1493 | unsigned long shares = 0; | 1511 | unsigned long shares = 0; |
1512 | struct sched_domain *sd = data; | ||
1494 | int i; | 1513 | int i; |
1495 | 1514 | ||
1496 | for_each_cpu_mask(i, sd->span) { | 1515 | for_each_cpu_mask(i, sd->span) { |
@@ -1515,6 +1534,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1515 | __update_group_shares_cpu(tg, i, shares, rq_weight); | 1534 | __update_group_shares_cpu(tg, i, shares, rq_weight); |
1516 | spin_unlock_irqrestore(&rq->lock, flags); | 1535 | spin_unlock_irqrestore(&rq->lock, flags); |
1517 | } | 1536 | } |
1537 | |||
1538 | return 0; | ||
1518 | } | 1539 | } |
1519 | 1540 | ||
1520 | /* | 1541 | /* |
@@ -1522,10 +1543,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1522 | * This needs to be done in a top-down fashion because the load of a child | 1543 | * This needs to be done in a top-down fashion because the load of a child |
1523 | * group is a fraction of its parents load. | 1544 | * group is a fraction of its parents load. |
1524 | */ | 1545 | */ |
1525 | static void | 1546 | static int tg_load_down(struct task_group *tg, void *data) |
1526 | tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1527 | { | 1547 | { |
1528 | unsigned long load; | 1548 | unsigned long load; |
1549 | long cpu = (long)data; | ||
1529 | 1550 | ||
1530 | if (!tg->parent) { | 1551 | if (!tg->parent) { |
1531 | load = cpu_rq(cpu)->load.weight; | 1552 | load = cpu_rq(cpu)->load.weight; |
@@ -1536,11 +1557,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd) | |||
1536 | } | 1557 | } |
1537 | 1558 | ||
1538 | tg->cfs_rq[cpu]->h_load = load; | 1559 | tg->cfs_rq[cpu]->h_load = load; |
1539 | } | ||
1540 | 1560 | ||
1541 | static void | 1561 | return 0; |
1542 | tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd) | ||
1543 | { | ||
1544 | } | 1562 | } |
1545 | 1563 | ||
1546 | static void update_shares(struct sched_domain *sd) | 1564 | static void update_shares(struct sched_domain *sd) |
@@ -1550,7 +1568,7 @@ static void update_shares(struct sched_domain *sd) | |||
1550 | 1568 | ||
1551 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { | 1569 | if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { |
1552 | sd->last_update = now; | 1570 | sd->last_update = now; |
1553 | walk_tg_tree(tg_nop, tg_shares_up, 0, sd); | 1571 | walk_tg_tree(tg_nop, tg_shares_up, sd); |
1554 | } | 1572 | } |
1555 | } | 1573 | } |
1556 | 1574 | ||
@@ -1561,9 +1579,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd) | |||
1561 | spin_lock(&rq->lock); | 1579 | spin_lock(&rq->lock); |
1562 | } | 1580 | } |
1563 | 1581 | ||
1564 | static void update_h_load(int cpu) | 1582 | static void update_h_load(long cpu) |
1565 | { | 1583 | { |
1566 | walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); | 1584 | walk_tg_tree(tg_load_down, tg_nop, (void *)cpu); |
1567 | } | 1585 | } |
1568 | 1586 | ||
1569 | #else | 1587 | #else |
@@ -1921,11 +1939,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) | |||
1921 | running = task_running(rq, p); | 1939 | running = task_running(rq, p); |
1922 | on_rq = p->se.on_rq; | 1940 | on_rq = p->se.on_rq; |
1923 | ncsw = 0; | 1941 | ncsw = 0; |
1924 | if (!match_state || p->state == match_state) { | 1942 | if (!match_state || p->state == match_state) |
1925 | ncsw = p->nivcsw + p->nvcsw; | 1943 | ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ |
1926 | if (unlikely(!ncsw)) | ||
1927 | ncsw = 1; | ||
1928 | } | ||
1929 | task_rq_unlock(rq, &flags); | 1944 | task_rq_unlock(rq, &flags); |
1930 | 1945 | ||
1931 | /* | 1946 | /* |
@@ -2285,7 +2300,7 @@ out_running: | |||
2285 | trace_mark(kernel_sched_wakeup, | 2300 | trace_mark(kernel_sched_wakeup, |
2286 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 2301 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
2287 | p->pid, p->state, rq, p, rq->curr); | 2302 | p->pid, p->state, rq, p, rq->curr); |
2288 | check_preempt_curr(rq, p); | 2303 | check_preempt_curr(rq, p, sync); |
2289 | 2304 | ||
2290 | p->state = TASK_RUNNING; | 2305 | p->state = TASK_RUNNING; |
2291 | #ifdef CONFIG_SMP | 2306 | #ifdef CONFIG_SMP |
@@ -2420,7 +2435,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
2420 | trace_mark(kernel_sched_wakeup_new, | 2435 | trace_mark(kernel_sched_wakeup_new, |
2421 | "pid %d state %ld ## rq %p task %p rq->curr %p", | 2436 | "pid %d state %ld ## rq %p task %p rq->curr %p", |
2422 | p->pid, p->state, rq, p, rq->curr); | 2437 | p->pid, p->state, rq, p, rq->curr); |
2423 | check_preempt_curr(rq, p); | 2438 | check_preempt_curr(rq, p, 0); |
2424 | #ifdef CONFIG_SMP | 2439 | #ifdef CONFIG_SMP |
2425 | if (p->sched_class->task_wake_up) | 2440 | if (p->sched_class->task_wake_up) |
2426 | p->sched_class->task_wake_up(rq, p); | 2441 | p->sched_class->task_wake_up(rq, p); |
@@ -2880,7 +2895,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p, | |||
2880 | * Note that idle threads have a prio of MAX_PRIO, for this test | 2895 | * Note that idle threads have a prio of MAX_PRIO, for this test |
2881 | * to be always true for them. | 2896 | * to be always true for them. |
2882 | */ | 2897 | */ |
2883 | check_preempt_curr(this_rq, p); | 2898 | check_preempt_curr(this_rq, p, 0); |
2884 | } | 2899 | } |
2885 | 2900 | ||
2886 | /* | 2901 | /* |
@@ -4627,6 +4642,15 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) | |||
4627 | } | 4642 | } |
4628 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ | 4643 | EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ |
4629 | 4644 | ||
4645 | /** | ||
4646 | * complete: - signals a single thread waiting on this completion | ||
4647 | * @x: holds the state of this particular completion | ||
4648 | * | ||
4649 | * This will wake up a single thread waiting on this completion. Threads will be | ||
4650 | * awakened in the same order in which they were queued. | ||
4651 | * | ||
4652 | * See also complete_all(), wait_for_completion() and related routines. | ||
4653 | */ | ||
4630 | void complete(struct completion *x) | 4654 | void complete(struct completion *x) |
4631 | { | 4655 | { |
4632 | unsigned long flags; | 4656 | unsigned long flags; |
@@ -4638,6 +4662,12 @@ void complete(struct completion *x) | |||
4638 | } | 4662 | } |
4639 | EXPORT_SYMBOL(complete); | 4663 | EXPORT_SYMBOL(complete); |
4640 | 4664 | ||
4665 | /** | ||
4666 | * complete_all: - signals all threads waiting on this completion | ||
4667 | * @x: holds the state of this particular completion | ||
4668 | * | ||
4669 | * This will wake up all threads waiting on this particular completion event. | ||
4670 | */ | ||
4641 | void complete_all(struct completion *x) | 4671 | void complete_all(struct completion *x) |
4642 | { | 4672 | { |
4643 | unsigned long flags; | 4673 | unsigned long flags; |
@@ -4658,10 +4688,7 @@ do_wait_for_common(struct completion *x, long timeout, int state) | |||
4658 | wait.flags |= WQ_FLAG_EXCLUSIVE; | 4688 | wait.flags |= WQ_FLAG_EXCLUSIVE; |
4659 | __add_wait_queue_tail(&x->wait, &wait); | 4689 | __add_wait_queue_tail(&x->wait, &wait); |
4660 | do { | 4690 | do { |
4661 | if ((state == TASK_INTERRUPTIBLE && | 4691 | if (signal_pending_state(state, current)) { |
4662 | signal_pending(current)) || | ||
4663 | (state == TASK_KILLABLE && | ||
4664 | fatal_signal_pending(current))) { | ||
4665 | timeout = -ERESTARTSYS; | 4692 | timeout = -ERESTARTSYS; |
4666 | break; | 4693 | break; |
4667 | } | 4694 | } |
@@ -4689,12 +4716,31 @@ wait_for_common(struct completion *x, long timeout, int state) | |||
4689 | return timeout; | 4716 | return timeout; |
4690 | } | 4717 | } |
4691 | 4718 | ||
4719 | /** | ||
4720 | * wait_for_completion: - waits for completion of a task | ||
4721 | * @x: holds the state of this particular completion | ||
4722 | * | ||
4723 | * This waits to be signaled for completion of a specific task. It is NOT | ||
4724 | * interruptible and there is no timeout. | ||
4725 | * | ||
4726 | * See also similar routines (i.e. wait_for_completion_timeout()) with timeout | ||
4727 | * and interrupt capability. Also see complete(). | ||
4728 | */ | ||
4692 | void __sched wait_for_completion(struct completion *x) | 4729 | void __sched wait_for_completion(struct completion *x) |
4693 | { | 4730 | { |
4694 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); | 4731 | wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); |
4695 | } | 4732 | } |
4696 | EXPORT_SYMBOL(wait_for_completion); | 4733 | EXPORT_SYMBOL(wait_for_completion); |
4697 | 4734 | ||
4735 | /** | ||
4736 | * wait_for_completion_timeout: - waits for completion of a task (w/timeout) | ||
4737 | * @x: holds the state of this particular completion | ||
4738 | * @timeout: timeout value in jiffies | ||
4739 | * | ||
4740 | * This waits for either a completion of a specific task to be signaled or for a | ||
4741 | * specified timeout to expire. The timeout is in jiffies. It is not | ||
4742 | * interruptible. | ||
4743 | */ | ||
4698 | unsigned long __sched | 4744 | unsigned long __sched |
4699 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) | 4745 | wait_for_completion_timeout(struct completion *x, unsigned long timeout) |
4700 | { | 4746 | { |
@@ -4702,6 +4748,13 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout) | |||
4702 | } | 4748 | } |
4703 | EXPORT_SYMBOL(wait_for_completion_timeout); | 4749 | EXPORT_SYMBOL(wait_for_completion_timeout); |
4704 | 4750 | ||
4751 | /** | ||
4752 | * wait_for_completion_interruptible: - waits for completion of a task (w/intr) | ||
4753 | * @x: holds the state of this particular completion | ||
4754 | * | ||
4755 | * This waits for completion of a specific task to be signaled. It is | ||
4756 | * interruptible. | ||
4757 | */ | ||
4705 | int __sched wait_for_completion_interruptible(struct completion *x) | 4758 | int __sched wait_for_completion_interruptible(struct completion *x) |
4706 | { | 4759 | { |
4707 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); | 4760 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); |
@@ -4711,6 +4764,14 @@ int __sched wait_for_completion_interruptible(struct completion *x) | |||
4711 | } | 4764 | } |
4712 | EXPORT_SYMBOL(wait_for_completion_interruptible); | 4765 | EXPORT_SYMBOL(wait_for_completion_interruptible); |
4713 | 4766 | ||
4767 | /** | ||
4768 | * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr)) | ||
4769 | * @x: holds the state of this particular completion | ||
4770 | * @timeout: timeout value in jiffies | ||
4771 | * | ||
4772 | * This waits for either a completion of a specific task to be signaled or for a | ||
4773 | * specified timeout to expire. It is interruptible. The timeout is in jiffies. | ||
4774 | */ | ||
4714 | unsigned long __sched | 4775 | unsigned long __sched |
4715 | wait_for_completion_interruptible_timeout(struct completion *x, | 4776 | wait_for_completion_interruptible_timeout(struct completion *x, |
4716 | unsigned long timeout) | 4777 | unsigned long timeout) |
@@ -4719,6 +4780,13 @@ wait_for_completion_interruptible_timeout(struct completion *x, | |||
4719 | } | 4780 | } |
4720 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); | 4781 | EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); |
4721 | 4782 | ||
4783 | /** | ||
4784 | * wait_for_completion_killable: - waits for completion of a task (killable) | ||
4785 | * @x: holds the state of this particular completion | ||
4786 | * | ||
4787 | * This waits to be signaled for completion of a specific task. It can be | ||
4788 | * interrupted by a kill signal. | ||
4789 | */ | ||
4722 | int __sched wait_for_completion_killable(struct completion *x) | 4790 | int __sched wait_for_completion_killable(struct completion *x) |
4723 | { | 4791 | { |
4724 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); | 4792 | long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); |
@@ -5121,7 +5189,8 @@ recheck: | |||
5121 | * Do not allow realtime tasks into groups that have no runtime | 5189 | * Do not allow realtime tasks into groups that have no runtime |
5122 | * assigned. | 5190 | * assigned. |
5123 | */ | 5191 | */ |
5124 | if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) | 5192 | if (rt_bandwidth_enabled() && rt_policy(policy) && |
5193 | task_group(p)->rt_bandwidth.rt_runtime == 0) | ||
5125 | return -EPERM; | 5194 | return -EPERM; |
5126 | #endif | 5195 | #endif |
5127 | 5196 | ||
@@ -5957,7 +6026,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) | |||
5957 | set_task_cpu(p, dest_cpu); | 6026 | set_task_cpu(p, dest_cpu); |
5958 | if (on_rq) { | 6027 | if (on_rq) { |
5959 | activate_task(rq_dest, p, 0); | 6028 | activate_task(rq_dest, p, 0); |
5960 | check_preempt_curr(rq_dest, p); | 6029 | check_preempt_curr(rq_dest, p, 0); |
5961 | } | 6030 | } |
5962 | done: | 6031 | done: |
5963 | ret = 1; | 6032 | ret = 1; |
@@ -6282,7 +6351,7 @@ set_table_entry(struct ctl_table *entry, | |||
6282 | static struct ctl_table * | 6351 | static struct ctl_table * |
6283 | sd_alloc_ctl_domain_table(struct sched_domain *sd) | 6352 | sd_alloc_ctl_domain_table(struct sched_domain *sd) |
6284 | { | 6353 | { |
6285 | struct ctl_table *table = sd_alloc_ctl_entry(12); | 6354 | struct ctl_table *table = sd_alloc_ctl_entry(13); |
6286 | 6355 | ||
6287 | if (table == NULL) | 6356 | if (table == NULL) |
6288 | return NULL; | 6357 | return NULL; |
@@ -6310,7 +6379,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd) | |||
6310 | sizeof(int), 0644, proc_dointvec_minmax); | 6379 | sizeof(int), 0644, proc_dointvec_minmax); |
6311 | set_table_entry(&table[10], "flags", &sd->flags, | 6380 | set_table_entry(&table[10], "flags", &sd->flags, |
6312 | sizeof(int), 0644, proc_dointvec_minmax); | 6381 | sizeof(int), 0644, proc_dointvec_minmax); |
6313 | /* &table[11] is terminator */ | 6382 | set_table_entry(&table[11], "name", sd->name, |
6383 | CORENAME_MAX_SIZE, 0444, proc_dostring); | ||
6384 | /* &table[12] is terminator */ | ||
6314 | 6385 | ||
6315 | return table; | 6386 | return table; |
6316 | } | 6387 | } |
@@ -7194,13 +7265,21 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
7194 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() | 7265 | * Non-inlined to reduce accumulated stack pressure in build_sched_domains() |
7195 | */ | 7266 | */ |
7196 | 7267 | ||
7268 | #ifdef CONFIG_SCHED_DEBUG | ||
7269 | # define SD_INIT_NAME(sd, type) sd->name = #type | ||
7270 | #else | ||
7271 | # define SD_INIT_NAME(sd, type) do { } while (0) | ||
7272 | #endif | ||
7273 | |||
7197 | #define SD_INIT(sd, type) sd_init_##type(sd) | 7274 | #define SD_INIT(sd, type) sd_init_##type(sd) |
7275 | |||
7198 | #define SD_INIT_FUNC(type) \ | 7276 | #define SD_INIT_FUNC(type) \ |
7199 | static noinline void sd_init_##type(struct sched_domain *sd) \ | 7277 | static noinline void sd_init_##type(struct sched_domain *sd) \ |
7200 | { \ | 7278 | { \ |
7201 | memset(sd, 0, sizeof(*sd)); \ | 7279 | memset(sd, 0, sizeof(*sd)); \ |
7202 | *sd = SD_##type##_INIT; \ | 7280 | *sd = SD_##type##_INIT; \ |
7203 | sd->level = SD_LV_##type; \ | 7281 | sd->level = SD_LV_##type; \ |
7282 | SD_INIT_NAME(sd, type); \ | ||
7204 | } | 7283 | } |
7205 | 7284 | ||
7206 | SD_INIT_FUNC(CPU) | 7285 | SD_INIT_FUNC(CPU) |
@@ -8242,20 +8321,25 @@ void __might_sleep(char *file, int line) | |||
8242 | #ifdef in_atomic | 8321 | #ifdef in_atomic |
8243 | static unsigned long prev_jiffy; /* ratelimiting */ | 8322 | static unsigned long prev_jiffy; /* ratelimiting */ |
8244 | 8323 | ||
8245 | if ((in_atomic() || irqs_disabled()) && | 8324 | if ((!in_atomic() && !irqs_disabled()) || |
8246 | system_state == SYSTEM_RUNNING && !oops_in_progress) { | 8325 | system_state != SYSTEM_RUNNING || oops_in_progress) |
8247 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) | 8326 | return; |
8248 | return; | 8327 | if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) |
8249 | prev_jiffy = jiffies; | 8328 | return; |
8250 | printk(KERN_ERR "BUG: sleeping function called from invalid" | 8329 | prev_jiffy = jiffies; |
8251 | " context at %s:%d\n", file, line); | 8330 | |
8252 | printk("in_atomic():%d, irqs_disabled():%d\n", | 8331 | printk(KERN_ERR |
8253 | in_atomic(), irqs_disabled()); | 8332 | "BUG: sleeping function called from invalid context at %s:%d\n", |
8254 | debug_show_held_locks(current); | 8333 | file, line); |
8255 | if (irqs_disabled()) | 8334 | printk(KERN_ERR |
8256 | print_irqtrace_events(current); | 8335 | "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", |
8257 | dump_stack(); | 8336 | in_atomic(), irqs_disabled(), |
8258 | } | 8337 | current->pid, current->comm); |
8338 | |||
8339 | debug_show_held_locks(current); | ||
8340 | if (irqs_disabled()) | ||
8341 | print_irqtrace_events(current); | ||
8342 | dump_stack(); | ||
8259 | #endif | 8343 | #endif |
8260 | } | 8344 | } |
8261 | EXPORT_SYMBOL(__might_sleep); | 8345 | EXPORT_SYMBOL(__might_sleep); |
@@ -8753,73 +8837,95 @@ static DEFINE_MUTEX(rt_constraints_mutex); | |||
8753 | static unsigned long to_ratio(u64 period, u64 runtime) | 8837 | static unsigned long to_ratio(u64 period, u64 runtime) |
8754 | { | 8838 | { |
8755 | if (runtime == RUNTIME_INF) | 8839 | if (runtime == RUNTIME_INF) |
8756 | return 1ULL << 16; | 8840 | return 1ULL << 20; |
8757 | 8841 | ||
8758 | return div64_u64(runtime << 16, period); | 8842 | return div64_u64(runtime << 20, period); |
8759 | } | 8843 | } |
8760 | 8844 | ||
8761 | #ifdef CONFIG_CGROUP_SCHED | 8845 | /* Must be called with tasklist_lock held */ |
8762 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | 8846 | static inline int tg_has_rt_tasks(struct task_group *tg) |
8763 | { | 8847 | { |
8764 | struct task_group *tgi, *parent = tg->parent; | 8848 | struct task_struct *g, *p; |
8765 | unsigned long total = 0; | ||
8766 | 8849 | ||
8767 | if (!parent) { | 8850 | do_each_thread(g, p) { |
8768 | if (global_rt_period() < period) | 8851 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) |
8769 | return 0; | 8852 | return 1; |
8853 | } while_each_thread(g, p); | ||
8770 | 8854 | ||
8771 | return to_ratio(period, runtime) < | 8855 | return 0; |
8772 | to_ratio(global_rt_period(), global_rt_runtime()); | 8856 | } |
8773 | } | ||
8774 | 8857 | ||
8775 | if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) | 8858 | struct rt_schedulable_data { |
8776 | return 0; | 8859 | struct task_group *tg; |
8860 | u64 rt_period; | ||
8861 | u64 rt_runtime; | ||
8862 | }; | ||
8777 | 8863 | ||
8778 | rcu_read_lock(); | 8864 | static int tg_schedulable(struct task_group *tg, void *data) |
8779 | list_for_each_entry_rcu(tgi, &parent->children, siblings) { | 8865 | { |
8780 | if (tgi == tg) | 8866 | struct rt_schedulable_data *d = data; |
8781 | continue; | 8867 | struct task_group *child; |
8868 | unsigned long total, sum = 0; | ||
8869 | u64 period, runtime; | ||
8870 | |||
8871 | period = ktime_to_ns(tg->rt_bandwidth.rt_period); | ||
8872 | runtime = tg->rt_bandwidth.rt_runtime; | ||
8782 | 8873 | ||
8783 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), | 8874 | if (tg == d->tg) { |
8784 | tgi->rt_bandwidth.rt_runtime); | 8875 | period = d->rt_period; |
8876 | runtime = d->rt_runtime; | ||
8785 | } | 8877 | } |
8786 | rcu_read_unlock(); | ||
8787 | 8878 | ||
8788 | return total + to_ratio(period, runtime) <= | 8879 | /* |
8789 | to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), | 8880 | * Cannot have more runtime than the period. |
8790 | parent->rt_bandwidth.rt_runtime); | 8881 | */ |
8791 | } | 8882 | if (runtime > period && runtime != RUNTIME_INF) |
8792 | #elif defined CONFIG_USER_SCHED | 8883 | return -EINVAL; |
8793 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) | ||
8794 | { | ||
8795 | struct task_group *tgi; | ||
8796 | unsigned long total = 0; | ||
8797 | unsigned long global_ratio = | ||
8798 | to_ratio(global_rt_period(), global_rt_runtime()); | ||
8799 | 8884 | ||
8800 | rcu_read_lock(); | 8885 | /* |
8801 | list_for_each_entry_rcu(tgi, &task_groups, list) { | 8886 | * Ensure we don't starve existing RT tasks. |
8802 | if (tgi == tg) | 8887 | */ |
8803 | continue; | 8888 | if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg)) |
8889 | return -EBUSY; | ||
8890 | |||
8891 | total = to_ratio(period, runtime); | ||
8804 | 8892 | ||
8805 | total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), | 8893 | /* |
8806 | tgi->rt_bandwidth.rt_runtime); | 8894 | * Nobody can have more than the global setting allows. |
8895 | */ | ||
8896 | if (total > to_ratio(global_rt_period(), global_rt_runtime())) | ||
8897 | return -EINVAL; | ||
8898 | |||
8899 | /* | ||
8900 | * The sum of our children's runtime should not exceed our own. | ||
8901 | */ | ||
8902 | list_for_each_entry_rcu(child, &tg->children, siblings) { | ||
8903 | period = ktime_to_ns(child->rt_bandwidth.rt_period); | ||
8904 | runtime = child->rt_bandwidth.rt_runtime; | ||
8905 | |||
8906 | if (child == d->tg) { | ||
8907 | period = d->rt_period; | ||
8908 | runtime = d->rt_runtime; | ||
8909 | } | ||
8910 | |||
8911 | sum += to_ratio(period, runtime); | ||
8807 | } | 8912 | } |
8808 | rcu_read_unlock(); | ||
8809 | 8913 | ||
8810 | return total + to_ratio(period, runtime) < global_ratio; | 8914 | if (sum > total) |
8915 | return -EINVAL; | ||
8916 | |||
8917 | return 0; | ||
8811 | } | 8918 | } |
8812 | #endif | ||
8813 | 8919 | ||
8814 | /* Must be called with tasklist_lock held */ | 8920 | static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) |
8815 | static inline int tg_has_rt_tasks(struct task_group *tg) | ||
8816 | { | 8921 | { |
8817 | struct task_struct *g, *p; | 8922 | struct rt_schedulable_data data = { |
8818 | do_each_thread(g, p) { | 8923 | .tg = tg, |
8819 | if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) | 8924 | .rt_period = period, |
8820 | return 1; | 8925 | .rt_runtime = runtime, |
8821 | } while_each_thread(g, p); | 8926 | }; |
8822 | return 0; | 8927 | |
8928 | return walk_tg_tree(tg_schedulable, tg_nop, &data); | ||
8823 | } | 8929 | } |
8824 | 8930 | ||
8825 | static int tg_set_bandwidth(struct task_group *tg, | 8931 | static int tg_set_bandwidth(struct task_group *tg, |
@@ -8829,14 +8935,9 @@ static int tg_set_bandwidth(struct task_group *tg, | |||
8829 | 8935 | ||
8830 | mutex_lock(&rt_constraints_mutex); | 8936 | mutex_lock(&rt_constraints_mutex); |
8831 | read_lock(&tasklist_lock); | 8937 | read_lock(&tasklist_lock); |
8832 | if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { | 8938 | err = __rt_schedulable(tg, rt_period, rt_runtime); |
8833 | err = -EBUSY; | 8939 | if (err) |
8834 | goto unlock; | ||
8835 | } | ||
8836 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) { | ||
8837 | err = -EINVAL; | ||
8838 | goto unlock; | 8940 | goto unlock; |
8839 | } | ||
8840 | 8941 | ||
8841 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); | 8942 | spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); |
8842 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); | 8943 | tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); |
@@ -8905,16 +9006,25 @@ long sched_group_rt_period(struct task_group *tg) | |||
8905 | 9006 | ||
8906 | static int sched_rt_global_constraints(void) | 9007 | static int sched_rt_global_constraints(void) |
8907 | { | 9008 | { |
8908 | struct task_group *tg = &root_task_group; | 9009 | u64 runtime, period; |
8909 | u64 rt_runtime, rt_period; | ||
8910 | int ret = 0; | 9010 | int ret = 0; |
8911 | 9011 | ||
8912 | rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); | 9012 | if (sysctl_sched_rt_period <= 0) |
8913 | rt_runtime = tg->rt_bandwidth.rt_runtime; | 9013 | return -EINVAL; |
9014 | |||
9015 | runtime = global_rt_runtime(); | ||
9016 | period = global_rt_period(); | ||
9017 | |||
9018 | /* | ||
9019 | * Sanity check on the sysctl variables. | ||
9020 | */ | ||
9021 | if (runtime > period && runtime != RUNTIME_INF) | ||
9022 | return -EINVAL; | ||
8914 | 9023 | ||
8915 | mutex_lock(&rt_constraints_mutex); | 9024 | mutex_lock(&rt_constraints_mutex); |
8916 | if (!__rt_schedulable(tg, rt_period, rt_runtime)) | 9025 | read_lock(&tasklist_lock); |
8917 | ret = -EINVAL; | 9026 | ret = __rt_schedulable(NULL, 0, 0); |
9027 | read_unlock(&tasklist_lock); | ||
8918 | mutex_unlock(&rt_constraints_mutex); | 9028 | mutex_unlock(&rt_constraints_mutex); |
8919 | 9029 | ||
8920 | return ret; | 9030 | return ret; |
@@ -8925,6 +9035,9 @@ static int sched_rt_global_constraints(void) | |||
8925 | unsigned long flags; | 9035 | unsigned long flags; |
8926 | int i; | 9036 | int i; |
8927 | 9037 | ||
9038 | if (sysctl_sched_rt_period <= 0) | ||
9039 | return -EINVAL; | ||
9040 | |||
8928 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); | 9041 | spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); |
8929 | for_each_possible_cpu(i) { | 9042 | for_each_possible_cpu(i) { |
8930 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; | 9043 | struct rt_rq *rt_rq = &cpu_rq(i)->rt; |
@@ -8985,7 +9098,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
8985 | 9098 | ||
8986 | if (!cgrp->parent) { | 9099 | if (!cgrp->parent) { |
8987 | /* This is early initialization for the top cgroup */ | 9100 | /* This is early initialization for the top cgroup */ |
8988 | init_task_group.css.cgroup = cgrp; | ||
8989 | return &init_task_group.css; | 9101 | return &init_task_group.css; |
8990 | } | 9102 | } |
8991 | 9103 | ||
@@ -8994,9 +9106,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp) | |||
8994 | if (IS_ERR(tg)) | 9106 | if (IS_ERR(tg)) |
8995 | return ERR_PTR(-ENOMEM); | 9107 | return ERR_PTR(-ENOMEM); |
8996 | 9108 | ||
8997 | /* Bind the cgroup to task_group object we just created */ | ||
8998 | tg->css.cgroup = cgrp; | ||
8999 | |||
9000 | return &tg->css; | 9109 | return &tg->css; |
9001 | } | 9110 | } |
9002 | 9111 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index bbe6b31c3c56..ad958c1ec708 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -333,12 +333,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
333 | unsigned long flags; | 333 | unsigned long flags; |
334 | int num_threads = 1; | 334 | int num_threads = 1; |
335 | 335 | ||
336 | rcu_read_lock(); | ||
337 | if (lock_task_sighand(p, &flags)) { | 336 | if (lock_task_sighand(p, &flags)) { |
338 | num_threads = atomic_read(&p->signal->count); | 337 | num_threads = atomic_read(&p->signal->count); |
339 | unlock_task_sighand(p, &flags); | 338 | unlock_task_sighand(p, &flags); |
340 | } | 339 | } |
341 | rcu_read_unlock(); | ||
342 | 340 | ||
343 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); | 341 | SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); |
344 | SEQ_printf(m, | 342 | SEQ_printf(m, |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fb8994c6d4bb..18fd17172eb6 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
@@ -409,64 +409,6 @@ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
409 | } | 409 | } |
410 | 410 | ||
411 | /* | 411 | /* |
412 | * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in | ||
413 | * that it favours >=0 over <0. | ||
414 | * | ||
415 | * -20 | | ||
416 | * | | ||
417 | * 0 --------+------- | ||
418 | * .' | ||
419 | * 19 .' | ||
420 | * | ||
421 | */ | ||
422 | static unsigned long | ||
423 | calc_delta_asym(unsigned long delta, struct sched_entity *se) | ||
424 | { | ||
425 | struct load_weight lw = { | ||
426 | .weight = NICE_0_LOAD, | ||
427 | .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT) | ||
428 | }; | ||
429 | |||
430 | for_each_sched_entity(se) { | ||
431 | struct load_weight *se_lw = &se->load; | ||
432 | unsigned long rw = cfs_rq_of(se)->load.weight; | ||
433 | |||
434 | #ifdef CONFIG_FAIR_SCHED_GROUP | ||
435 | struct cfs_rq *cfs_rq = se->my_q; | ||
436 | struct task_group *tg = NULL | ||
437 | |||
438 | if (cfs_rq) | ||
439 | tg = cfs_rq->tg; | ||
440 | |||
441 | if (tg && tg->shares < NICE_0_LOAD) { | ||
442 | /* | ||
443 | * scale shares to what it would have been had | ||
444 | * tg->weight been NICE_0_LOAD: | ||
445 | * | ||
446 | * weight = 1024 * shares / tg->weight | ||
447 | */ | ||
448 | lw.weight *= se->load.weight; | ||
449 | lw.weight /= tg->shares; | ||
450 | |||
451 | lw.inv_weight = 0; | ||
452 | |||
453 | se_lw = &lw; | ||
454 | rw += lw.weight - se->load.weight; | ||
455 | } else | ||
456 | #endif | ||
457 | |||
458 | if (se->load.weight < NICE_0_LOAD) { | ||
459 | se_lw = &lw; | ||
460 | rw += NICE_0_LOAD - se->load.weight; | ||
461 | } | ||
462 | |||
463 | delta = calc_delta_mine(delta, rw, se_lw); | ||
464 | } | ||
465 | |||
466 | return delta; | ||
467 | } | ||
468 | |||
469 | /* | ||
470 | * Update the current task's runtime statistics. Skip current tasks that | 412 | * Update the current task's runtime statistics. Skip current tasks that |
471 | * are not in our scheduling class. | 413 | * are not in our scheduling class. |
472 | */ | 414 | */ |
@@ -586,11 +528,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
586 | update_load_add(&cfs_rq->load, se->load.weight); | 528 | update_load_add(&cfs_rq->load, se->load.weight); |
587 | if (!parent_entity(se)) | 529 | if (!parent_entity(se)) |
588 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); | 530 | inc_cpu_load(rq_of(cfs_rq), se->load.weight); |
589 | if (entity_is_task(se)) | 531 | if (entity_is_task(se)) { |
590 | add_cfs_task_weight(cfs_rq, se->load.weight); | 532 | add_cfs_task_weight(cfs_rq, se->load.weight); |
533 | list_add(&se->group_node, &cfs_rq->tasks); | ||
534 | } | ||
591 | cfs_rq->nr_running++; | 535 | cfs_rq->nr_running++; |
592 | se->on_rq = 1; | 536 | se->on_rq = 1; |
593 | list_add(&se->group_node, &cfs_rq->tasks); | ||
594 | } | 537 | } |
595 | 538 | ||
596 | static void | 539 | static void |
@@ -599,11 +542,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) | |||
599 | update_load_sub(&cfs_rq->load, se->load.weight); | 542 | update_load_sub(&cfs_rq->load, se->load.weight); |
600 | if (!parent_entity(se)) | 543 | if (!parent_entity(se)) |
601 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); | 544 | dec_cpu_load(rq_of(cfs_rq), se->load.weight); |
602 | if (entity_is_task(se)) | 545 | if (entity_is_task(se)) { |
603 | add_cfs_task_weight(cfs_rq, -se->load.weight); | 546 | add_cfs_task_weight(cfs_rq, -se->load.weight); |
547 | list_del_init(&se->group_node); | ||
548 | } | ||
604 | cfs_rq->nr_running--; | 549 | cfs_rq->nr_running--; |
605 | se->on_rq = 0; | 550 | se->on_rq = 0; |
606 | list_del_init(&se->group_node); | ||
607 | } | 551 | } |
608 | 552 | ||
609 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) | 553 | static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) |
@@ -1085,7 +1029,6 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1085 | long wl, long wg) | 1029 | long wl, long wg) |
1086 | { | 1030 | { |
1087 | struct sched_entity *se = tg->se[cpu]; | 1031 | struct sched_entity *se = tg->se[cpu]; |
1088 | long more_w; | ||
1089 | 1032 | ||
1090 | if (!tg->parent) | 1033 | if (!tg->parent) |
1091 | return wl; | 1034 | return wl; |
@@ -1097,18 +1040,17 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1097 | if (!wl && sched_feat(ASYM_EFF_LOAD)) | 1040 | if (!wl && sched_feat(ASYM_EFF_LOAD)) |
1098 | return wl; | 1041 | return wl; |
1099 | 1042 | ||
1100 | /* | ||
1101 | * Instead of using this increment, also add the difference | ||
1102 | * between when the shares were last updated and now. | ||
1103 | */ | ||
1104 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1105 | wl += more_w; | ||
1106 | wg += more_w; | ||
1107 | |||
1108 | for_each_sched_entity(se) { | 1043 | for_each_sched_entity(se) { |
1109 | #define D(n) (likely(n) ? (n) : 1) | ||
1110 | |||
1111 | long S, rw, s, a, b; | 1044 | long S, rw, s, a, b; |
1045 | long more_w; | ||
1046 | |||
1047 | /* | ||
1048 | * Instead of using this increment, also add the difference | ||
1049 | * between when the shares were last updated and now. | ||
1050 | */ | ||
1051 | more_w = se->my_q->load.weight - se->my_q->rq_weight; | ||
1052 | wl += more_w; | ||
1053 | wg += more_w; | ||
1112 | 1054 | ||
1113 | S = se->my_q->tg->shares; | 1055 | S = se->my_q->tg->shares; |
1114 | s = se->my_q->shares; | 1056 | s = se->my_q->shares; |
@@ -1117,7 +1059,11 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1117 | a = S*(rw + wl); | 1059 | a = S*(rw + wl); |
1118 | b = S*rw + s*wg; | 1060 | b = S*rw + s*wg; |
1119 | 1061 | ||
1120 | wl = s*(a-b)/D(b); | 1062 | wl = s*(a-b); |
1063 | |||
1064 | if (likely(b)) | ||
1065 | wl /= b; | ||
1066 | |||
1121 | /* | 1067 | /* |
1122 | * Assume the group is already running and will | 1068 | * Assume the group is already running and will |
1123 | * thus already be accounted for in the weight. | 1069 | * thus already be accounted for in the weight. |
@@ -1126,7 +1072,6 @@ static long effective_load(struct task_group *tg, int cpu, | |||
1126 | * alter the group weight. | 1072 | * alter the group weight. |
1127 | */ | 1073 | */ |
1128 | wg = 0; | 1074 | wg = 0; |
1129 | #undef D | ||
1130 | } | 1075 | } |
1131 | 1076 | ||
1132 | return wl; | 1077 | return wl; |
@@ -1143,7 +1088,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu, | |||
1143 | #endif | 1088 | #endif |
1144 | 1089 | ||
1145 | static int | 1090 | static int |
1146 | wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | 1091 | wake_affine(struct sched_domain *this_sd, struct rq *this_rq, |
1147 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, | 1092 | struct task_struct *p, int prev_cpu, int this_cpu, int sync, |
1148 | int idx, unsigned long load, unsigned long this_load, | 1093 | int idx, unsigned long load, unsigned long this_load, |
1149 | unsigned int imbalance) | 1094 | unsigned int imbalance) |
@@ -1158,6 +1103,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1158 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) | 1103 | if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) |
1159 | return 0; | 1104 | return 0; |
1160 | 1105 | ||
1106 | if (!sync && sched_feat(SYNC_WAKEUPS) && | ||
1107 | curr->se.avg_overlap < sysctl_sched_migration_cost && | ||
1108 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
1109 | sync = 1; | ||
1110 | |||
1161 | /* | 1111 | /* |
1162 | * If sync wakeup then subtract the (maximum possible) | 1112 | * If sync wakeup then subtract the (maximum possible) |
1163 | * effect of the currently running task from the load | 1113 | * effect of the currently running task from the load |
@@ -1182,17 +1132,14 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, | |||
1182 | * a reasonable amount of time then attract this newly | 1132 | * a reasonable amount of time then attract this newly |
1183 | * woken task: | 1133 | * woken task: |
1184 | */ | 1134 | */ |
1185 | if (sync && balanced) { | 1135 | if (sync && balanced) |
1186 | if (curr->se.avg_overlap < sysctl_sched_migration_cost && | 1136 | return 1; |
1187 | p->se.avg_overlap < sysctl_sched_migration_cost) | ||
1188 | return 1; | ||
1189 | } | ||
1190 | 1137 | ||
1191 | schedstat_inc(p, se.nr_wakeups_affine_attempts); | 1138 | schedstat_inc(p, se.nr_wakeups_affine_attempts); |
1192 | tl_per_task = cpu_avg_load_per_task(this_cpu); | 1139 | tl_per_task = cpu_avg_load_per_task(this_cpu); |
1193 | 1140 | ||
1194 | if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || | 1141 | if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <= |
1195 | balanced) { | 1142 | tl_per_task)) { |
1196 | /* | 1143 | /* |
1197 | * This domain has SD_WAKE_AFFINE and | 1144 | * This domain has SD_WAKE_AFFINE and |
1198 | * p is cache cold in this domain, and | 1145 | * p is cache cold in this domain, and |
@@ -1211,16 +1158,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1211 | struct sched_domain *sd, *this_sd = NULL; | 1158 | struct sched_domain *sd, *this_sd = NULL; |
1212 | int prev_cpu, this_cpu, new_cpu; | 1159 | int prev_cpu, this_cpu, new_cpu; |
1213 | unsigned long load, this_load; | 1160 | unsigned long load, this_load; |
1214 | struct rq *rq, *this_rq; | 1161 | struct rq *this_rq; |
1215 | unsigned int imbalance; | 1162 | unsigned int imbalance; |
1216 | int idx; | 1163 | int idx; |
1217 | 1164 | ||
1218 | prev_cpu = task_cpu(p); | 1165 | prev_cpu = task_cpu(p); |
1219 | rq = task_rq(p); | ||
1220 | this_cpu = smp_processor_id(); | 1166 | this_cpu = smp_processor_id(); |
1221 | this_rq = cpu_rq(this_cpu); | 1167 | this_rq = cpu_rq(this_cpu); |
1222 | new_cpu = prev_cpu; | 1168 | new_cpu = prev_cpu; |
1223 | 1169 | ||
1170 | if (prev_cpu == this_cpu) | ||
1171 | goto out; | ||
1224 | /* | 1172 | /* |
1225 | * 'this_sd' is the first domain that both | 1173 | * 'this_sd' is the first domain that both |
1226 | * this_cpu and prev_cpu are present in: | 1174 | * this_cpu and prev_cpu are present in: |
@@ -1248,13 +1196,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync) | |||
1248 | load = source_load(prev_cpu, idx); | 1196 | load = source_load(prev_cpu, idx); |
1249 | this_load = target_load(this_cpu, idx); | 1197 | this_load = target_load(this_cpu, idx); |
1250 | 1198 | ||
1251 | if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, | 1199 | if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, |
1252 | load, this_load, imbalance)) | 1200 | load, this_load, imbalance)) |
1253 | return this_cpu; | 1201 | return this_cpu; |
1254 | 1202 | ||
1255 | if (prev_cpu == this_cpu) | ||
1256 | goto out; | ||
1257 | |||
1258 | /* | 1203 | /* |
1259 | * Start passive balancing when half the imbalance_pct | 1204 | * Start passive balancing when half the imbalance_pct |
1260 | * limit is reached. | 1205 | * limit is reached. |
@@ -1281,62 +1226,20 @@ static unsigned long wakeup_gran(struct sched_entity *se) | |||
1281 | * + nice tasks. | 1226 | * + nice tasks. |
1282 | */ | 1227 | */ |
1283 | if (sched_feat(ASYM_GRAN)) | 1228 | if (sched_feat(ASYM_GRAN)) |
1284 | gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); | 1229 | gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load); |
1285 | else | ||
1286 | gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se); | ||
1287 | 1230 | ||
1288 | return gran; | 1231 | return gran; |
1289 | } | 1232 | } |
1290 | 1233 | ||
1291 | /* | 1234 | /* |
1292 | * Should 'se' preempt 'curr'. | ||
1293 | * | ||
1294 | * |s1 | ||
1295 | * |s2 | ||
1296 | * |s3 | ||
1297 | * g | ||
1298 | * |<--->|c | ||
1299 | * | ||
1300 | * w(c, s1) = -1 | ||
1301 | * w(c, s2) = 0 | ||
1302 | * w(c, s3) = 1 | ||
1303 | * | ||
1304 | */ | ||
1305 | static int | ||
1306 | wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) | ||
1307 | { | ||
1308 | s64 gran, vdiff = curr->vruntime - se->vruntime; | ||
1309 | |||
1310 | if (vdiff < 0) | ||
1311 | return -1; | ||
1312 | |||
1313 | gran = wakeup_gran(curr); | ||
1314 | if (vdiff > gran) | ||
1315 | return 1; | ||
1316 | |||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | /* return depth at which a sched entity is present in the hierarchy */ | ||
1321 | static inline int depth_se(struct sched_entity *se) | ||
1322 | { | ||
1323 | int depth = 0; | ||
1324 | |||
1325 | for_each_sched_entity(se) | ||
1326 | depth++; | ||
1327 | |||
1328 | return depth; | ||
1329 | } | ||
1330 | |||
1331 | /* | ||
1332 | * Preempt the current task with a newly woken task if needed: | 1235 | * Preempt the current task with a newly woken task if needed: |
1333 | */ | 1236 | */ |
1334 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | 1237 | static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync) |
1335 | { | 1238 | { |
1336 | struct task_struct *curr = rq->curr; | 1239 | struct task_struct *curr = rq->curr; |
1337 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1240 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
1338 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1241 | struct sched_entity *se = &curr->se, *pse = &p->se; |
1339 | int se_depth, pse_depth; | 1242 | s64 delta_exec; |
1340 | 1243 | ||
1341 | if (unlikely(rt_prio(p->prio))) { | 1244 | if (unlikely(rt_prio(p->prio))) { |
1342 | update_rq_clock(rq); | 1245 | update_rq_clock(rq); |
@@ -1351,6 +1254,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1351 | cfs_rq_of(pse)->next = pse; | 1254 | cfs_rq_of(pse)->next = pse; |
1352 | 1255 | ||
1353 | /* | 1256 | /* |
1257 | * We can come here with TIF_NEED_RESCHED already set from new task | ||
1258 | * wake up path. | ||
1259 | */ | ||
1260 | if (test_tsk_need_resched(curr)) | ||
1261 | return; | ||
1262 | |||
1263 | /* | ||
1354 | * Batch tasks do not preempt (their preemption is driven by | 1264 | * Batch tasks do not preempt (their preemption is driven by |
1355 | * the tick): | 1265 | * the tick): |
1356 | */ | 1266 | */ |
@@ -1360,33 +1270,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) | |||
1360 | if (!sched_feat(WAKEUP_PREEMPT)) | 1270 | if (!sched_feat(WAKEUP_PREEMPT)) |
1361 | return; | 1271 | return; |
1362 | 1272 | ||
1363 | /* | 1273 | if (sched_feat(WAKEUP_OVERLAP) && (sync || |
1364 | * preemption test can be made between sibling entities who are in the | 1274 | (se->avg_overlap < sysctl_sched_migration_cost && |
1365 | * same cfs_rq i.e who have a common parent. Walk up the hierarchy of | 1275 | pse->avg_overlap < sysctl_sched_migration_cost))) { |
1366 | * both tasks until we find their ancestors who are siblings of common | 1276 | resched_task(curr); |
1367 | * parent. | 1277 | return; |
1368 | */ | ||
1369 | |||
1370 | /* First walk up until both entities are at same depth */ | ||
1371 | se_depth = depth_se(se); | ||
1372 | pse_depth = depth_se(pse); | ||
1373 | |||
1374 | while (se_depth > pse_depth) { | ||
1375 | se_depth--; | ||
1376 | se = parent_entity(se); | ||
1377 | } | ||
1378 | |||
1379 | while (pse_depth > se_depth) { | ||
1380 | pse_depth--; | ||
1381 | pse = parent_entity(pse); | ||
1382 | } | ||
1383 | |||
1384 | while (!is_same_group(se, pse)) { | ||
1385 | se = parent_entity(se); | ||
1386 | pse = parent_entity(pse); | ||
1387 | } | 1278 | } |
1388 | 1279 | ||
1389 | if (wakeup_preempt_entity(se, pse) == 1) | 1280 | delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime; |
1281 | if (delta_exec > wakeup_gran(pse)) | ||
1390 | resched_task(curr); | 1282 | resched_task(curr); |
1391 | } | 1283 | } |
1392 | 1284 | ||
@@ -1445,19 +1337,9 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next) | |||
1445 | if (next == &cfs_rq->tasks) | 1337 | if (next == &cfs_rq->tasks) |
1446 | return NULL; | 1338 | return NULL; |
1447 | 1339 | ||
1448 | /* Skip over entities that are not tasks */ | 1340 | se = list_entry(next, struct sched_entity, group_node); |
1449 | do { | 1341 | p = task_of(se); |
1450 | se = list_entry(next, struct sched_entity, group_node); | 1342 | cfs_rq->balance_iterator = next->next; |
1451 | next = next->next; | ||
1452 | } while (next != &cfs_rq->tasks && !entity_is_task(se)); | ||
1453 | |||
1454 | if (next == &cfs_rq->tasks) | ||
1455 | return NULL; | ||
1456 | |||
1457 | cfs_rq->balance_iterator = next; | ||
1458 | |||
1459 | if (entity_is_task(se)) | ||
1460 | p = task_of(se); | ||
1461 | 1343 | ||
1462 | return p; | 1344 | return p; |
1463 | } | 1345 | } |
@@ -1507,7 +1389,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, | |||
1507 | rcu_read_lock(); | 1389 | rcu_read_lock(); |
1508 | update_h_load(busiest_cpu); | 1390 | update_h_load(busiest_cpu); |
1509 | 1391 | ||
1510 | list_for_each_entry(tg, &task_groups, list) { | 1392 | list_for_each_entry_rcu(tg, &task_groups, list) { |
1511 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; | 1393 | struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; |
1512 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; | 1394 | unsigned long busiest_h_load = busiest_cfs_rq->h_load; |
1513 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; | 1395 | unsigned long busiest_weight = busiest_cfs_rq->load.weight; |
@@ -1620,10 +1502,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p) | |||
1620 | * 'current' within the tree based on its new key value. | 1502 | * 'current' within the tree based on its new key value. |
1621 | */ | 1503 | */ |
1622 | swap(curr->vruntime, se->vruntime); | 1504 | swap(curr->vruntime, se->vruntime); |
1505 | resched_task(rq->curr); | ||
1623 | } | 1506 | } |
1624 | 1507 | ||
1625 | enqueue_task_fair(rq, p, 0); | 1508 | enqueue_task_fair(rq, p, 0); |
1626 | resched_task(rq->curr); | ||
1627 | } | 1509 | } |
1628 | 1510 | ||
1629 | /* | 1511 | /* |
@@ -1642,7 +1524,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p, | |||
1642 | if (p->prio > oldprio) | 1524 | if (p->prio > oldprio) |
1643 | resched_task(rq->curr); | 1525 | resched_task(rq->curr); |
1644 | } else | 1526 | } else |
1645 | check_preempt_curr(rq, p); | 1527 | check_preempt_curr(rq, p, 0); |
1646 | } | 1528 | } |
1647 | 1529 | ||
1648 | /* | 1530 | /* |
@@ -1659,7 +1541,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p, | |||
1659 | if (running) | 1541 | if (running) |
1660 | resched_task(rq->curr); | 1542 | resched_task(rq->curr); |
1661 | else | 1543 | else |
1662 | check_preempt_curr(rq, p); | 1544 | check_preempt_curr(rq, p, 0); |
1663 | } | 1545 | } |
1664 | 1546 | ||
1665 | /* Account for a task changing its policy or group. | 1547 | /* Account for a task changing its policy or group. |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 9353ca78154e..7c9e8f4a049f 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1) | |||
11 | SCHED_FEAT(LB_BIAS, 1) | 11 | SCHED_FEAT(LB_BIAS, 1) |
12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | 12 | SCHED_FEAT(LB_WAKEUP_UPDATE, 1) |
13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | ||
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c index 3a4f92dbbe66..dec4ccabe2f5 100644 --- a/kernel/sched_idletask.c +++ b/kernel/sched_idletask.c | |||
@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync) | |||
14 | /* | 14 | /* |
15 | * Idle tasks are unconditionally rescheduled: | 15 | * Idle tasks are unconditionally rescheduled: |
16 | */ | 16 | */ |
17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) | 17 | static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync) |
18 | { | 18 | { |
19 | resched_task(rq->idle); | 19 | resched_task(rq->idle); |
20 | } | 20 | } |
@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p, | |||
76 | if (running) | 76 | if (running) |
77 | resched_task(rq->curr); | 77 | resched_task(rq->curr); |
78 | else | 78 | else |
79 | check_preempt_curr(rq, p); | 79 | check_preempt_curr(rq, p, 0); |
80 | } | 80 | } |
81 | 81 | ||
82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, | 82 | static void prio_changed_idle(struct rq *rq, struct task_struct *p, |
@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p, | |||
93 | if (p->prio > oldprio) | 93 | if (p->prio > oldprio) |
94 | resched_task(rq->curr); | 94 | resched_task(rq->curr); |
95 | } else | 95 | } else |
96 | check_preempt_curr(rq, p); | 96 | check_preempt_curr(rq, p, 0); |
97 | } | 97 | } |
98 | 98 | ||
99 | /* | 99 | /* |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 552310798dad..cdf5740ab03e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -102,12 +102,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
102 | 102 | ||
103 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 103 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
104 | { | 104 | { |
105 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | ||
105 | struct sched_rt_entity *rt_se = rt_rq->rt_se; | 106 | struct sched_rt_entity *rt_se = rt_rq->rt_se; |
106 | 107 | ||
107 | if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) { | 108 | if (rt_rq->rt_nr_running) { |
108 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 109 | if (rt_se && !on_rt_rq(rt_se)) |
109 | 110 | enqueue_rt_entity(rt_se); | |
110 | enqueue_rt_entity(rt_se); | ||
111 | if (rt_rq->highest_prio < curr->prio) | 111 | if (rt_rq->highest_prio < curr->prio) |
112 | resched_task(curr); | 112 | resched_task(curr); |
113 | } | 113 | } |
@@ -231,6 +231,9 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) | |||
231 | #endif /* CONFIG_RT_GROUP_SCHED */ | 231 | #endif /* CONFIG_RT_GROUP_SCHED */ |
232 | 232 | ||
233 | #ifdef CONFIG_SMP | 233 | #ifdef CONFIG_SMP |
234 | /* | ||
235 | * We ran out of runtime, see if we can borrow some from our neighbours. | ||
236 | */ | ||
234 | static int do_balance_runtime(struct rt_rq *rt_rq) | 237 | static int do_balance_runtime(struct rt_rq *rt_rq) |
235 | { | 238 | { |
236 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 239 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
@@ -250,9 +253,18 @@ static int do_balance_runtime(struct rt_rq *rt_rq) | |||
250 | continue; | 253 | continue; |
251 | 254 | ||
252 | spin_lock(&iter->rt_runtime_lock); | 255 | spin_lock(&iter->rt_runtime_lock); |
256 | /* | ||
257 | * Either all rqs have inf runtime and there's nothing to steal | ||
258 | * or __disable_runtime() below sets a specific rq to inf to | ||
259 | * indicate its been disabled and disalow stealing. | ||
260 | */ | ||
253 | if (iter->rt_runtime == RUNTIME_INF) | 261 | if (iter->rt_runtime == RUNTIME_INF) |
254 | goto next; | 262 | goto next; |
255 | 263 | ||
264 | /* | ||
265 | * From runqueues with spare time, take 1/n part of their | ||
266 | * spare time, but no more than our period. | ||
267 | */ | ||
256 | diff = iter->rt_runtime - iter->rt_time; | 268 | diff = iter->rt_runtime - iter->rt_time; |
257 | if (diff > 0) { | 269 | if (diff > 0) { |
258 | diff = div_u64((u64)diff, weight); | 270 | diff = div_u64((u64)diff, weight); |
@@ -274,6 +286,9 @@ next: | |||
274 | return more; | 286 | return more; |
275 | } | 287 | } |
276 | 288 | ||
289 | /* | ||
290 | * Ensure this RQ takes back all the runtime it lend to its neighbours. | ||
291 | */ | ||
277 | static void __disable_runtime(struct rq *rq) | 292 | static void __disable_runtime(struct rq *rq) |
278 | { | 293 | { |
279 | struct root_domain *rd = rq->rd; | 294 | struct root_domain *rd = rq->rd; |
@@ -289,17 +304,33 @@ static void __disable_runtime(struct rq *rq) | |||
289 | 304 | ||
290 | spin_lock(&rt_b->rt_runtime_lock); | 305 | spin_lock(&rt_b->rt_runtime_lock); |
291 | spin_lock(&rt_rq->rt_runtime_lock); | 306 | spin_lock(&rt_rq->rt_runtime_lock); |
307 | /* | ||
308 | * Either we're all inf and nobody needs to borrow, or we're | ||
309 | * already disabled and thus have nothing to do, or we have | ||
310 | * exactly the right amount of runtime to take out. | ||
311 | */ | ||
292 | if (rt_rq->rt_runtime == RUNTIME_INF || | 312 | if (rt_rq->rt_runtime == RUNTIME_INF || |
293 | rt_rq->rt_runtime == rt_b->rt_runtime) | 313 | rt_rq->rt_runtime == rt_b->rt_runtime) |
294 | goto balanced; | 314 | goto balanced; |
295 | spin_unlock(&rt_rq->rt_runtime_lock); | 315 | spin_unlock(&rt_rq->rt_runtime_lock); |
296 | 316 | ||
317 | /* | ||
318 | * Calculate the difference between what we started out with | ||
319 | * and what we current have, that's the amount of runtime | ||
320 | * we lend and now have to reclaim. | ||
321 | */ | ||
297 | want = rt_b->rt_runtime - rt_rq->rt_runtime; | 322 | want = rt_b->rt_runtime - rt_rq->rt_runtime; |
298 | 323 | ||
324 | /* | ||
325 | * Greedy reclaim, take back as much as we can. | ||
326 | */ | ||
299 | for_each_cpu_mask(i, rd->span) { | 327 | for_each_cpu_mask(i, rd->span) { |
300 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); | 328 | struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); |
301 | s64 diff; | 329 | s64 diff; |
302 | 330 | ||
331 | /* | ||
332 | * Can't reclaim from ourselves or disabled runqueues. | ||
333 | */ | ||
303 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) | 334 | if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) |
304 | continue; | 335 | continue; |
305 | 336 | ||
@@ -319,8 +350,16 @@ static void __disable_runtime(struct rq *rq) | |||
319 | } | 350 | } |
320 | 351 | ||
321 | spin_lock(&rt_rq->rt_runtime_lock); | 352 | spin_lock(&rt_rq->rt_runtime_lock); |
353 | /* | ||
354 | * We cannot be left wanting - that would mean some runtime | ||
355 | * leaked out of the system. | ||
356 | */ | ||
322 | BUG_ON(want); | 357 | BUG_ON(want); |
323 | balanced: | 358 | balanced: |
359 | /* | ||
360 | * Disable all the borrow logic by pretending we have inf | ||
361 | * runtime - in which case borrowing doesn't make sense. | ||
362 | */ | ||
324 | rt_rq->rt_runtime = RUNTIME_INF; | 363 | rt_rq->rt_runtime = RUNTIME_INF; |
325 | spin_unlock(&rt_rq->rt_runtime_lock); | 364 | spin_unlock(&rt_rq->rt_runtime_lock); |
326 | spin_unlock(&rt_b->rt_runtime_lock); | 365 | spin_unlock(&rt_b->rt_runtime_lock); |
@@ -343,6 +382,9 @@ static void __enable_runtime(struct rq *rq) | |||
343 | if (unlikely(!scheduler_running)) | 382 | if (unlikely(!scheduler_running)) |
344 | return; | 383 | return; |
345 | 384 | ||
385 | /* | ||
386 | * Reset each runqueue's bandwidth settings | ||
387 | */ | ||
346 | for_each_leaf_rt_rq(rt_rq, rq) { | 388 | for_each_leaf_rt_rq(rt_rq, rq) { |
347 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); | 389 | struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); |
348 | 390 | ||
@@ -350,6 +392,7 @@ static void __enable_runtime(struct rq *rq) | |||
350 | spin_lock(&rt_rq->rt_runtime_lock); | 392 | spin_lock(&rt_rq->rt_runtime_lock); |
351 | rt_rq->rt_runtime = rt_b->rt_runtime; | 393 | rt_rq->rt_runtime = rt_b->rt_runtime; |
352 | rt_rq->rt_time = 0; | 394 | rt_rq->rt_time = 0; |
395 | rt_rq->rt_throttled = 0; | ||
353 | spin_unlock(&rt_rq->rt_runtime_lock); | 396 | spin_unlock(&rt_rq->rt_runtime_lock); |
354 | spin_unlock(&rt_b->rt_runtime_lock); | 397 | spin_unlock(&rt_b->rt_runtime_lock); |
355 | } | 398 | } |
@@ -388,7 +431,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
388 | int i, idle = 1; | 431 | int i, idle = 1; |
389 | cpumask_t span; | 432 | cpumask_t span; |
390 | 433 | ||
391 | if (rt_b->rt_runtime == RUNTIME_INF) | 434 | if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) |
392 | return 1; | 435 | return 1; |
393 | 436 | ||
394 | span = sched_rt_period_mask(); | 437 | span = sched_rt_period_mask(); |
@@ -486,6 +529,9 @@ static void update_curr_rt(struct rq *rq) | |||
486 | curr->se.exec_start = rq->clock; | 529 | curr->se.exec_start = rq->clock; |
487 | cpuacct_charge(curr, delta_exec); | 530 | cpuacct_charge(curr, delta_exec); |
488 | 531 | ||
532 | if (!rt_bandwidth_enabled()) | ||
533 | return; | ||
534 | |||
489 | for_each_sched_rt_entity(rt_se) { | 535 | for_each_sched_rt_entity(rt_se) { |
490 | rt_rq = rt_rq_of_se(rt_se); | 536 | rt_rq = rt_rq_of_se(rt_se); |
491 | 537 | ||
@@ -783,7 +829,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) | |||
783 | /* | 829 | /* |
784 | * Preempt the current task with a newly woken task if needed: | 830 | * Preempt the current task with a newly woken task if needed: |
785 | */ | 831 | */ |
786 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) | 832 | static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync) |
787 | { | 833 | { |
788 | if (p->prio < rq->curr->prio) { | 834 | if (p->prio < rq->curr->prio) { |
789 | resched_task(rq->curr); | 835 | resched_task(rq->curr); |
diff --git a/kernel/softirq.c b/kernel/softirq.c index c506f266a6b9..be7a8292f992 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -46,7 +46,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned; | |||
46 | EXPORT_SYMBOL(irq_stat); | 46 | EXPORT_SYMBOL(irq_stat); |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; | 49 | static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; |
50 | 50 | ||
51 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); | 51 | static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); |
52 | 52 | ||
@@ -205,7 +205,18 @@ restart: | |||
205 | 205 | ||
206 | do { | 206 | do { |
207 | if (pending & 1) { | 207 | if (pending & 1) { |
208 | int prev_count = preempt_count(); | ||
209 | |||
208 | h->action(h); | 210 | h->action(h); |
211 | |||
212 | if (unlikely(prev_count != preempt_count())) { | ||
213 | printk(KERN_ERR "huh, entered softirq %d %p" | ||
214 | "with preempt_count %08x," | ||
215 | " exited with %08x?\n", h - softirq_vec, | ||
216 | h->action, prev_count, preempt_count()); | ||
217 | preempt_count() = prev_count; | ||
218 | } | ||
219 | |||
209 | rcu_bh_qsctr_inc(cpu); | 220 | rcu_bh_qsctr_inc(cpu); |
210 | } | 221 | } |
211 | h++; | 222 | h++; |
diff --git a/kernel/sys.c b/kernel/sys.c index 038a7bc0901d..234d9454294e 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1060,9 +1060,7 @@ asmlinkage long sys_setsid(void) | |||
1060 | group_leader->signal->leader = 1; | 1060 | group_leader->signal->leader = 1; |
1061 | __set_special_pids(sid); | 1061 | __set_special_pids(sid); |
1062 | 1062 | ||
1063 | spin_lock(&group_leader->sighand->siglock); | 1063 | proc_clear_tty(group_leader); |
1064 | group_leader->signal->tty = NULL; | ||
1065 | spin_unlock(&group_leader->sighand->siglock); | ||
1066 | 1064 | ||
1067 | err = session; | 1065 | err = session; |
1068 | out: | 1066 | out: |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 08d6e1bb99ac..503d8d4eb80a 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -125,6 +125,7 @@ cond_syscall(sys_vm86old); | |||
125 | cond_syscall(sys_vm86); | 125 | cond_syscall(sys_vm86); |
126 | cond_syscall(compat_sys_ipc); | 126 | cond_syscall(compat_sys_ipc); |
127 | cond_syscall(compat_sys_sysctl); | 127 | cond_syscall(compat_sys_sysctl); |
128 | cond_syscall(sys_flock); | ||
128 | 129 | ||
129 | /* arch-specific weak syscall entries */ | 130 | /* arch-specific weak syscall entries */ |
130 | cond_syscall(sys_pciconfig_read); | 131 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 50ec0886fa3d..cfc5295f1e82 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -80,7 +80,6 @@ extern int pid_max_min, pid_max_max; | |||
80 | extern int sysctl_drop_caches; | 80 | extern int sysctl_drop_caches; |
81 | extern int percpu_pagelist_fraction; | 81 | extern int percpu_pagelist_fraction; |
82 | extern int compat_log; | 82 | extern int compat_log; |
83 | extern int maps_protect; | ||
84 | extern int latencytop_enabled; | 83 | extern int latencytop_enabled; |
85 | extern int sysctl_nr_open_min, sysctl_nr_open_max; | 84 | extern int sysctl_nr_open_min, sysctl_nr_open_max; |
86 | #ifdef CONFIG_RCU_TORTURE_TEST | 85 | #ifdef CONFIG_RCU_TORTURE_TEST |
@@ -97,7 +96,7 @@ static int sixty = 60; | |||
97 | static int neg_one = -1; | 96 | static int neg_one = -1; |
98 | #endif | 97 | #endif |
99 | 98 | ||
100 | #ifdef CONFIG_MMU | 99 | #if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING) |
101 | static int two = 2; | 100 | static int two = 2; |
102 | #endif | 101 | #endif |
103 | 102 | ||
@@ -118,10 +117,8 @@ extern char modprobe_path[]; | |||
118 | extern int sg_big_buff; | 117 | extern int sg_big_buff; |
119 | #endif | 118 | #endif |
120 | 119 | ||
121 | #ifdef __sparc__ | 120 | #ifdef CONFIG_SPARC |
122 | extern char reboot_command []; | 121 | #include <asm/system.h> |
123 | extern int stop_a_enabled; | ||
124 | extern int scons_pwroff; | ||
125 | #endif | 122 | #endif |
126 | 123 | ||
127 | #ifdef __hppa__ | 124 | #ifdef __hppa__ |
@@ -415,7 +412,7 @@ static struct ctl_table kern_table[] = { | |||
415 | .mode = 0644, | 412 | .mode = 0644, |
416 | .proc_handler = &proc_dointvec, | 413 | .proc_handler = &proc_dointvec, |
417 | }, | 414 | }, |
418 | #ifdef __sparc__ | 415 | #ifdef CONFIG_SPARC |
419 | { | 416 | { |
420 | .ctl_name = KERN_SPARC_REBOOT, | 417 | .ctl_name = KERN_SPARC_REBOOT, |
421 | .procname = "reboot-cmd", | 418 | .procname = "reboot-cmd", |
@@ -810,16 +807,6 @@ static struct ctl_table kern_table[] = { | |||
810 | .proc_handler = &proc_dointvec, | 807 | .proc_handler = &proc_dointvec, |
811 | }, | 808 | }, |
812 | #endif | 809 | #endif |
813 | #ifdef CONFIG_PROC_FS | ||
814 | { | ||
815 | .ctl_name = CTL_UNNUMBERED, | ||
816 | .procname = "maps_protect", | ||
817 | .data = &maps_protect, | ||
818 | .maxlen = sizeof(int), | ||
819 | .mode = 0644, | ||
820 | .proc_handler = &proc_dointvec, | ||
821 | }, | ||
822 | #endif | ||
823 | { | 810 | { |
824 | .ctl_name = CTL_UNNUMBERED, | 811 | .ctl_name = CTL_UNNUMBERED, |
825 | .procname = "poweroff_cmd", | 812 | .procname = "poweroff_cmd", |
@@ -1261,6 +1248,7 @@ static struct ctl_table fs_table[] = { | |||
1261 | .extra1 = &minolduid, | 1248 | .extra1 = &minolduid, |
1262 | .extra2 = &maxolduid, | 1249 | .extra2 = &maxolduid, |
1263 | }, | 1250 | }, |
1251 | #ifdef CONFIG_FILE_LOCKING | ||
1264 | { | 1252 | { |
1265 | .ctl_name = FS_LEASES, | 1253 | .ctl_name = FS_LEASES, |
1266 | .procname = "leases-enable", | 1254 | .procname = "leases-enable", |
@@ -1269,6 +1257,7 @@ static struct ctl_table fs_table[] = { | |||
1269 | .mode = 0644, | 1257 | .mode = 0644, |
1270 | .proc_handler = &proc_dointvec, | 1258 | .proc_handler = &proc_dointvec, |
1271 | }, | 1259 | }, |
1260 | #endif | ||
1272 | #ifdef CONFIG_DNOTIFY | 1261 | #ifdef CONFIG_DNOTIFY |
1273 | { | 1262 | { |
1274 | .ctl_name = FS_DIR_NOTIFY, | 1263 | .ctl_name = FS_DIR_NOTIFY, |
@@ -1280,6 +1269,7 @@ static struct ctl_table fs_table[] = { | |||
1280 | }, | 1269 | }, |
1281 | #endif | 1270 | #endif |
1282 | #ifdef CONFIG_MMU | 1271 | #ifdef CONFIG_MMU |
1272 | #ifdef CONFIG_FILE_LOCKING | ||
1283 | { | 1273 | { |
1284 | .ctl_name = FS_LEASE_TIME, | 1274 | .ctl_name = FS_LEASE_TIME, |
1285 | .procname = "lease-break-time", | 1275 | .procname = "lease-break-time", |
@@ -1291,6 +1281,7 @@ static struct ctl_table fs_table[] = { | |||
1291 | .extra1 = &zero, | 1281 | .extra1 = &zero, |
1292 | .extra2 = &two, | 1282 | .extra2 = &two, |
1293 | }, | 1283 | }, |
1284 | #endif | ||
1294 | { | 1285 | { |
1295 | .procname = "aio-nr", | 1286 | .procname = "aio-nr", |
1296 | .data = &aio_nr, | 1287 | .data = &aio_nr, |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 1876b526c778..f8d968063cea 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
@@ -72,6 +72,16 @@ void clockevents_set_mode(struct clock_event_device *dev, | |||
72 | } | 72 | } |
73 | 73 | ||
74 | /** | 74 | /** |
75 | * clockevents_shutdown - shutdown the device and clear next_event | ||
76 | * @dev: device to shutdown | ||
77 | */ | ||
78 | void clockevents_shutdown(struct clock_event_device *dev) | ||
79 | { | ||
80 | clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN); | ||
81 | dev->next_event.tv64 = KTIME_MAX; | ||
82 | } | ||
83 | |||
84 | /** | ||
75 | * clockevents_program_event - Reprogram the clock event device. | 85 | * clockevents_program_event - Reprogram the clock event device. |
76 | * @expires: absolute expiry time (monotonic clock) | 86 | * @expires: absolute expiry time (monotonic clock) |
77 | * | 87 | * |
@@ -206,7 +216,7 @@ void clockevents_exchange_device(struct clock_event_device *old, | |||
206 | 216 | ||
207 | if (new) { | 217 | if (new) { |
208 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); | 218 | BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED); |
209 | clockevents_set_mode(new, CLOCK_EVT_MODE_SHUTDOWN); | 219 | clockevents_shutdown(new); |
210 | } | 220 | } |
211 | local_irq_restore(flags); | 221 | local_irq_restore(flags); |
212 | } | 222 | } |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 2f5a38294bf9..cb01cd8f919b 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -235,9 +235,9 @@ static void tick_do_broadcast_on_off(void *why) | |||
235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: | 235 | case CLOCK_EVT_NOTIFY_BROADCAST_FORCE: |
236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { | 236 | if (!cpu_isset(cpu, tick_broadcast_mask)) { |
237 | cpu_set(cpu, tick_broadcast_mask); | 237 | cpu_set(cpu, tick_broadcast_mask); |
238 | if (td->mode == TICKDEV_MODE_PERIODIC) | 238 | if (tick_broadcast_device.mode == |
239 | clockevents_set_mode(dev, | 239 | TICKDEV_MODE_PERIODIC) |
240 | CLOCK_EVT_MODE_SHUTDOWN); | 240 | clockevents_shutdown(dev); |
241 | } | 241 | } |
242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) | 242 | if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE) |
243 | tick_broadcast_force = 1; | 243 | tick_broadcast_force = 1; |
@@ -246,7 +246,8 @@ static void tick_do_broadcast_on_off(void *why) | |||
246 | if (!tick_broadcast_force && | 246 | if (!tick_broadcast_force && |
247 | cpu_isset(cpu, tick_broadcast_mask)) { | 247 | cpu_isset(cpu, tick_broadcast_mask)) { |
248 | cpu_clear(cpu, tick_broadcast_mask); | 248 | cpu_clear(cpu, tick_broadcast_mask); |
249 | if (td->mode == TICKDEV_MODE_PERIODIC) | 249 | if (tick_broadcast_device.mode == |
250 | TICKDEV_MODE_PERIODIC) | ||
250 | tick_setup_periodic(dev, 0); | 251 | tick_setup_periodic(dev, 0); |
251 | } | 252 | } |
252 | break; | 253 | break; |
@@ -254,7 +255,7 @@ static void tick_do_broadcast_on_off(void *why) | |||
254 | 255 | ||
255 | if (cpus_empty(tick_broadcast_mask)) { | 256 | if (cpus_empty(tick_broadcast_mask)) { |
256 | if (!bc_stopped) | 257 | if (!bc_stopped) |
257 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 258 | clockevents_shutdown(bc); |
258 | } else if (bc_stopped) { | 259 | } else if (bc_stopped) { |
259 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) | 260 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) |
260 | tick_broadcast_start_periodic(bc); | 261 | tick_broadcast_start_periodic(bc); |
@@ -306,7 +307,7 @@ void tick_shutdown_broadcast(unsigned int *cpup) | |||
306 | 307 | ||
307 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { | 308 | if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { |
308 | if (bc && cpus_empty(tick_broadcast_mask)) | 309 | if (bc && cpus_empty(tick_broadcast_mask)) |
309 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 310 | clockevents_shutdown(bc); |
310 | } | 311 | } |
311 | 312 | ||
312 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 313 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
@@ -321,7 +322,7 @@ void tick_suspend_broadcast(void) | |||
321 | 322 | ||
322 | bc = tick_broadcast_device.evtdev; | 323 | bc = tick_broadcast_device.evtdev; |
323 | if (bc) | 324 | if (bc) |
324 | clockevents_set_mode(bc, CLOCK_EVT_MODE_SHUTDOWN); | 325 | clockevents_shutdown(bc); |
325 | 326 | ||
326 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 327 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
327 | } | 328 | } |
@@ -576,4 +577,12 @@ void tick_shutdown_broadcast_oneshot(unsigned int *cpup) | |||
576 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); | 577 | spin_unlock_irqrestore(&tick_broadcast_lock, flags); |
577 | } | 578 | } |
578 | 579 | ||
580 | /* | ||
581 | * Check, whether the broadcast device is in one shot mode | ||
582 | */ | ||
583 | int tick_broadcast_oneshot_active(void) | ||
584 | { | ||
585 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | ||
586 | } | ||
587 | |||
579 | #endif | 588 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index c4777193d567..df12434b43ca 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct tick_device, tick_cpu_device); | |||
33 | */ | 33 | */ |
34 | ktime_t tick_next_period; | 34 | ktime_t tick_next_period; |
35 | ktime_t tick_period; | 35 | ktime_t tick_period; |
36 | int tick_do_timer_cpu __read_mostly = -1; | 36 | int tick_do_timer_cpu __read_mostly = TICK_DO_TIMER_BOOT; |
37 | DEFINE_SPINLOCK(tick_device_lock); | 37 | DEFINE_SPINLOCK(tick_device_lock); |
38 | 38 | ||
39 | /* | 39 | /* |
@@ -109,7 +109,8 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) | |||
109 | if (!tick_device_is_functional(dev)) | 109 | if (!tick_device_is_functional(dev)) |
110 | return; | 110 | return; |
111 | 111 | ||
112 | if (dev->features & CLOCK_EVT_FEAT_PERIODIC) { | 112 | if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) && |
113 | !tick_broadcast_oneshot_active()) { | ||
113 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); | 114 | clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC); |
114 | } else { | 115 | } else { |
115 | unsigned long seq; | 116 | unsigned long seq; |
@@ -148,7 +149,7 @@ static void tick_setup_device(struct tick_device *td, | |||
148 | * If no cpu took the do_timer update, assign it to | 149 | * If no cpu took the do_timer update, assign it to |
149 | * this cpu: | 150 | * this cpu: |
150 | */ | 151 | */ |
151 | if (tick_do_timer_cpu == -1) { | 152 | if (tick_do_timer_cpu == TICK_DO_TIMER_BOOT) { |
152 | tick_do_timer_cpu = cpu; | 153 | tick_do_timer_cpu = cpu; |
153 | tick_next_period = ktime_get(); | 154 | tick_next_period = ktime_get(); |
154 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); | 155 | tick_period = ktime_set(0, NSEC_PER_SEC / HZ); |
@@ -249,7 +250,7 @@ static int tick_check_new_device(struct clock_event_device *newdev) | |||
249 | * not give it back to the clockevents layer ! | 250 | * not give it back to the clockevents layer ! |
250 | */ | 251 | */ |
251 | if (tick_is_broadcast_device(curdev)) { | 252 | if (tick_is_broadcast_device(curdev)) { |
252 | clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN); | 253 | clockevents_shutdown(curdev); |
253 | curdev = NULL; | 254 | curdev = NULL; |
254 | } | 255 | } |
255 | clockevents_exchange_device(curdev, newdev); | 256 | clockevents_exchange_device(curdev, newdev); |
@@ -300,7 +301,8 @@ static void tick_shutdown(unsigned int *cpup) | |||
300 | if (*cpup == tick_do_timer_cpu) { | 301 | if (*cpup == tick_do_timer_cpu) { |
301 | int cpu = first_cpu(cpu_online_map); | 302 | int cpu = first_cpu(cpu_online_map); |
302 | 303 | ||
303 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : -1; | 304 | tick_do_timer_cpu = (cpu != NR_CPUS) ? cpu : |
305 | TICK_DO_TIMER_NONE; | ||
304 | } | 306 | } |
305 | spin_unlock_irqrestore(&tick_device_lock, flags); | 307 | spin_unlock_irqrestore(&tick_device_lock, flags); |
306 | } | 308 | } |
@@ -311,7 +313,7 @@ static void tick_suspend(void) | |||
311 | unsigned long flags; | 313 | unsigned long flags; |
312 | 314 | ||
313 | spin_lock_irqsave(&tick_device_lock, flags); | 315 | spin_lock_irqsave(&tick_device_lock, flags); |
314 | clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_SHUTDOWN); | 316 | clockevents_shutdown(td->evtdev); |
315 | spin_unlock_irqrestore(&tick_device_lock, flags); | 317 | spin_unlock_irqrestore(&tick_device_lock, flags); |
316 | } | 318 | } |
317 | 319 | ||
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 0ffc2918ea6f..469248782c23 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -1,6 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * tick internal variable and functions used by low/high res code | 2 | * tick internal variable and functions used by low/high res code |
3 | */ | 3 | */ |
4 | |||
5 | #define TICK_DO_TIMER_NONE -1 | ||
6 | #define TICK_DO_TIMER_BOOT -2 | ||
7 | |||
4 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); | 8 | DECLARE_PER_CPU(struct tick_device, tick_cpu_device); |
5 | extern spinlock_t tick_device_lock; | 9 | extern spinlock_t tick_device_lock; |
6 | extern ktime_t tick_next_period; | 10 | extern ktime_t tick_next_period; |
@@ -10,6 +14,8 @@ extern int tick_do_timer_cpu __read_mostly; | |||
10 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); | 14 | extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); |
11 | extern void tick_handle_periodic(struct clock_event_device *dev); | 15 | extern void tick_handle_periodic(struct clock_event_device *dev); |
12 | 16 | ||
17 | extern void clockevents_shutdown(struct clock_event_device *dev); | ||
18 | |||
13 | /* | 19 | /* |
14 | * NO_HZ / high resolution timer shared code | 20 | * NO_HZ / high resolution timer shared code |
15 | */ | 21 | */ |
@@ -29,6 +35,7 @@ extern void tick_broadcast_oneshot_control(unsigned long reason); | |||
29 | extern void tick_broadcast_switch_to_oneshot(void); | 35 | extern void tick_broadcast_switch_to_oneshot(void); |
30 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | 36 | extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); |
31 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 37 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
38 | extern int tick_broadcast_oneshot_active(void); | ||
32 | # else /* BROADCAST */ | 39 | # else /* BROADCAST */ |
33 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
34 | { | 41 | { |
@@ -37,6 +44,7 @@ static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | |||
37 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } | 44 | static inline void tick_broadcast_oneshot_control(unsigned long reason) { } |
38 | static inline void tick_broadcast_switch_to_oneshot(void) { } | 45 | static inline void tick_broadcast_switch_to_oneshot(void) { } |
39 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
40 | # endif /* !BROADCAST */ | 48 | # endif /* !BROADCAST */ |
41 | 49 | ||
42 | #else /* !ONESHOT */ | 50 | #else /* !ONESHOT */ |
@@ -66,6 +74,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
66 | { | 74 | { |
67 | return 0; | 75 | return 0; |
68 | } | 76 | } |
77 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | ||
69 | #endif /* !TICK_ONESHOT */ | 78 | #endif /* !TICK_ONESHOT */ |
70 | 79 | ||
71 | /* | 80 | /* |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index a87b0468568b..b711ffcb106c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
25 | 26 | ||
@@ -75,6 +76,9 @@ static void tick_do_update_jiffies64(ktime_t now) | |||
75 | incr * ticks); | 76 | incr * ticks); |
76 | } | 77 | } |
77 | do_timer(++ticks); | 78 | do_timer(++ticks); |
79 | |||
80 | /* Keep the tick_next_period variable up to date */ | ||
81 | tick_next_period = ktime_add(last_jiffies_update, tick_period); | ||
78 | } | 82 | } |
79 | write_sequnlock(&xtime_lock); | 83 | write_sequnlock(&xtime_lock); |
80 | } | 84 | } |
@@ -187,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
187 | { | 191 | { |
188 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
189 | 193 | ||
190 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | 194 | if (!tick_nohz_enabled) |
195 | return -1; | ||
196 | |||
197 | if (ts->idle_active) | ||
198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
199 | else | ||
200 | *last_update_time = ktime_to_us(ktime_get()); | ||
201 | |||
191 | return ktime_to_us(ts->idle_sleeptime); | 202 | return ktime_to_us(ts->idle_sleeptime); |
192 | } | 203 | } |
204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | ||
193 | 205 | ||
194 | /** | 206 | /** |
195 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |
@@ -221,7 +233,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
221 | */ | 233 | */ |
222 | if (unlikely(!cpu_online(cpu))) { | 234 | if (unlikely(!cpu_online(cpu))) { |
223 | if (cpu == tick_do_timer_cpu) | 235 | if (cpu == tick_do_timer_cpu) |
224 | tick_do_timer_cpu = -1; | 236 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
225 | } | 237 | } |
226 | 238 | ||
227 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) | 239 | if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) |
@@ -258,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
258 | next_jiffies = get_next_timer_interrupt(last_jiffies); | 270 | next_jiffies = get_next_timer_interrupt(last_jiffies); |
259 | delta_jiffies = next_jiffies - last_jiffies; | 271 | delta_jiffies = next_jiffies - last_jiffies; |
260 | 272 | ||
261 | if (rcu_needs_cpu(cpu)) | 273 | if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu)) |
262 | delta_jiffies = 1; | 274 | delta_jiffies = 1; |
263 | /* | 275 | /* |
264 | * Do not stop the tick, if we are only one off | 276 | * Do not stop the tick, if we are only one off |
@@ -303,7 +315,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
303 | * invoked. | 315 | * invoked. |
304 | */ | 316 | */ |
305 | if (cpu == tick_do_timer_cpu) | 317 | if (cpu == tick_do_timer_cpu) |
306 | tick_do_timer_cpu = -1; | 318 | tick_do_timer_cpu = TICK_DO_TIMER_NONE; |
307 | 319 | ||
308 | ts->idle_sleeps++; | 320 | ts->idle_sleeps++; |
309 | 321 | ||
@@ -468,7 +480,7 @@ static void tick_nohz_handler(struct clock_event_device *dev) | |||
468 | * this duty, then the jiffies update is still serialized by | 480 | * this duty, then the jiffies update is still serialized by |
469 | * xtime_lock. | 481 | * xtime_lock. |
470 | */ | 482 | */ |
471 | if (unlikely(tick_do_timer_cpu == -1)) | 483 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
472 | tick_do_timer_cpu = cpu; | 484 | tick_do_timer_cpu = cpu; |
473 | 485 | ||
474 | /* Check, if the jiffies need an update */ | 486 | /* Check, if the jiffies need an update */ |
@@ -570,7 +582,7 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer) | |||
570 | * this duty, then the jiffies update is still serialized by | 582 | * this duty, then the jiffies update is still serialized by |
571 | * xtime_lock. | 583 | * xtime_lock. |
572 | */ | 584 | */ |
573 | if (unlikely(tick_do_timer_cpu == -1)) | 585 | if (unlikely(tick_do_timer_cpu == TICK_DO_TIMER_NONE)) |
574 | tick_do_timer_cpu = cpu; | 586 | tick_do_timer_cpu = cpu; |
575 | #endif | 587 | #endif |
576 | 588 | ||
@@ -622,7 +634,7 @@ void tick_setup_sched_timer(void) | |||
622 | */ | 634 | */ |
623 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | 635 | hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); |
624 | ts->sched_timer.function = tick_sched_timer; | 636 | ts->sched_timer.function = tick_sched_timer; |
625 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 637 | ts->sched_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
626 | 638 | ||
627 | /* Get the next period (per cpu) */ | 639 | /* Get the next period (per cpu) */ |
628 | ts->sched_timer.expires = tick_init_jiffy_update(); | 640 | ts->sched_timer.expires = tick_init_jiffy_update(); |
diff --git a/kernel/timer.c b/kernel/timer.c index 03bc7f1f1593..510fe69351ca 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -978,6 +978,7 @@ void update_process_times(int user_tick) | |||
978 | run_local_timers(); | 978 | run_local_timers(); |
979 | if (rcu_pending(cpu)) | 979 | if (rcu_pending(cpu)) |
980 | rcu_check_callbacks(cpu, user_tick); | 980 | rcu_check_callbacks(cpu, user_tick); |
981 | printk_tick(); | ||
981 | scheduler_tick(); | 982 | scheduler_tick(); |
982 | run_posix_cpu_timers(p); | 983 | run_posix_cpu_timers(p); |
983 | } | 984 | } |
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index bb948e52ce20..db58fb66a135 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -202,7 +202,7 @@ static void start_stack_timer(int cpu) | |||
202 | 202 | ||
203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 203 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
204 | hrtimer->function = stack_trace_timer_fn; | 204 | hrtimer->function = stack_trace_timer_fn; |
205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_NO_SOFTIRQ; | 205 | hrtimer->cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; |
206 | 206 | ||
207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); | 207 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), HRTIMER_MODE_REL); |
208 | } | 208 | } |
diff --git a/kernel/user.c b/kernel/user.c index 865ecf57a096..39d6159fae43 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -169,7 +169,7 @@ static ssize_t cpu_rt_runtime_show(struct kobject *kobj, | |||
169 | { | 169 | { |
170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); | 170 | struct user_struct *up = container_of(kobj, struct user_struct, kobj); |
171 | 171 | ||
172 | return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); | 172 | return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg)); |
173 | } | 173 | } |
174 | 174 | ||
175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | 175 | static ssize_t cpu_rt_runtime_store(struct kobject *kobj, |
@@ -180,7 +180,7 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj, | |||
180 | unsigned long rt_runtime; | 180 | unsigned long rt_runtime; |
181 | int rc; | 181 | int rc; |
182 | 182 | ||
183 | sscanf(buf, "%lu", &rt_runtime); | 183 | sscanf(buf, "%ld", &rt_runtime); |
184 | 184 | ||
185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); | 185 | rc = sched_group_set_rt_runtime(up->tg, rt_runtime); |
186 | 186 | ||