diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/spurious.c | 2 | ||||
| -rw-r--r-- | kernel/kthread.c | 23 | ||||
| -rw-r--r-- | kernel/rcutree.c | 16 | ||||
| -rw-r--r-- | kernel/rcutree.h | 7 | ||||
| -rw-r--r-- | kernel/sched.c | 43 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 73 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 2 | ||||
| -rw-r--r-- | kernel/user.c | 2 | ||||
| -rw-r--r-- | kernel/workqueue.c | 28 |
10 files changed, 121 insertions, 81 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 114e704760fe..bd7273e6282e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void) | |||
| 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
| 122 | continue; | 122 | continue; |
| 123 | 123 | ||
| 124 | local_irq_disable(); | ||
| 124 | try_one_irq(i, desc); | 125 | try_one_irq(i, desc); |
| 126 | local_irq_enable(); | ||
| 125 | } | 127 | } |
| 126 | } | 128 | } |
| 127 | 129 | ||
diff --git a/kernel/kthread.c b/kernel/kthread.c index 5fe709982caa..ab7ae57773e1 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
| 150 | EXPORT_SYMBOL(kthread_create); | 150 | EXPORT_SYMBOL(kthread_create); |
| 151 | 151 | ||
| 152 | /** | 152 | /** |
| 153 | * kthread_bind - bind a just-created kthread to a cpu. | ||
| 154 | * @k: thread created by kthread_create(). | ||
| 155 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
| 156 | * | ||
| 157 | * Description: This function is equivalent to set_cpus_allowed(), | ||
| 158 | * except that @cpu doesn't need to be online, and the thread must be | ||
| 159 | * stopped (i.e., just returned from kthread_create()). | ||
| 160 | */ | ||
| 161 | void kthread_bind(struct task_struct *k, unsigned int cpu) | ||
| 162 | { | ||
| 163 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
| 164 | if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) { | ||
| 165 | WARN_ON(1); | ||
| 166 | return; | ||
| 167 | } | ||
| 168 | set_task_cpu(k, cpu); | ||
| 169 | k->cpus_allowed = cpumask_of_cpu(cpu); | ||
| 170 | k->rt.nr_cpus_allowed = 1; | ||
| 171 | k->flags |= PF_THREAD_BOUND; | ||
| 172 | } | ||
| 173 | EXPORT_SYMBOL(kthread_bind); | ||
| 174 | |||
| 175 | /** | ||
| 176 | * kthread_stop - stop a thread created by kthread_create(). | 153 | * kthread_stop - stop a thread created by kthread_create(). |
| 177 | * @k: thread created by kthread_create(). | 154 | * @k: thread created by kthread_create(). |
| 178 | * | 155 | * |
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 0536125b0497..f3077c0ab181 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
| @@ -59,7 +59,7 @@ | |||
| 59 | NUM_RCU_LVL_2, \ | 59 | NUM_RCU_LVL_2, \ |
| 60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ | 60 | NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ |
| 61 | }, \ | 61 | }, \ |
| 62 | .signaled = RCU_SIGNAL_INIT, \ | 62 | .signaled = RCU_GP_IDLE, \ |
| 63 | .gpnum = -300, \ | 63 | .gpnum = -300, \ |
| 64 | .completed = -300, \ | 64 | .completed = -300, \ |
| 65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ | 65 | .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ |
| @@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) | |||
| 657 | * irqs disabled. | 657 | * irqs disabled. |
| 658 | */ | 658 | */ |
| 659 | rcu_for_each_node_breadth_first(rsp, rnp) { | 659 | rcu_for_each_node_breadth_first(rsp, rnp) { |
| 660 | spin_lock(&rnp->lock); /* irqs already disabled. */ | 660 | spin_lock(&rnp->lock); /* irqs already disabled. */ |
| 661 | rcu_preempt_check_blocked_tasks(rnp); | 661 | rcu_preempt_check_blocked_tasks(rnp); |
| 662 | rnp->qsmask = rnp->qsmaskinit; | 662 | rnp->qsmask = rnp->qsmaskinit; |
| 663 | rnp->gpnum = rsp->gpnum; | 663 | rnp->gpnum = rsp->gpnum; |
| 664 | spin_unlock(&rnp->lock); /* irqs already disabled. */ | 664 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ |
| 665 | } | 665 | } |
| 666 | 666 | ||
| 667 | rnp = rcu_get_root(rsp); | ||
| 668 | spin_lock(&rnp->lock); /* irqs already disabled. */ | ||
| 667 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ | 669 | rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ |
| 670 | spin_unlock(&rnp->lock); /* irqs remain disabled. */ | ||
| 668 | spin_unlock_irqrestore(&rsp->onofflock, flags); | 671 | spin_unlock_irqrestore(&rsp->onofflock, flags); |
| 669 | } | 672 | } |
| 670 | 673 | ||
| @@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags) | |||
| 706 | { | 709 | { |
| 707 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); | 710 | WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); |
| 708 | rsp->completed = rsp->gpnum; | 711 | rsp->completed = rsp->gpnum; |
| 712 | rsp->signaled = RCU_GP_IDLE; | ||
| 709 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); | 713 | rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); |
| 710 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ | 714 | rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ |
| 711 | } | 715 | } |
| @@ -1162,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1162 | } | 1166 | } |
| 1163 | spin_unlock(&rnp->lock); | 1167 | spin_unlock(&rnp->lock); |
| 1164 | switch (signaled) { | 1168 | switch (signaled) { |
| 1169 | case RCU_GP_IDLE: | ||
| 1165 | case RCU_GP_INIT: | 1170 | case RCU_GP_INIT: |
| 1166 | 1171 | ||
| 1167 | break; /* grace period still initializing, ignore. */ | 1172 | break; /* grace period idle or initializing, ignore. */ |
| 1168 | 1173 | ||
| 1169 | case RCU_SAVE_DYNTICK: | 1174 | case RCU_SAVE_DYNTICK: |
| 1170 | 1175 | ||
| @@ -1178,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) | |||
| 1178 | 1183 | ||
| 1179 | /* Update state, record completion counter. */ | 1184 | /* Update state, record completion counter. */ |
| 1180 | spin_lock(&rnp->lock); | 1185 | spin_lock(&rnp->lock); |
| 1181 | if (lastcomp == rsp->completed) { | 1186 | if (lastcomp == rsp->completed && |
| 1187 | rsp->signaled == RCU_SAVE_DYNTICK) { | ||
| 1182 | rsp->signaled = RCU_FORCE_QS; | 1188 | rsp->signaled = RCU_FORCE_QS; |
| 1183 | dyntick_record_completed(rsp, lastcomp); | 1189 | dyntick_record_completed(rsp, lastcomp); |
| 1184 | } | 1190 | } |
diff --git a/kernel/rcutree.h b/kernel/rcutree.h index 1823c6e20609..1899023b0962 100644 --- a/kernel/rcutree.h +++ b/kernel/rcutree.h | |||
| @@ -201,9 +201,10 @@ struct rcu_data { | |||
| 201 | }; | 201 | }; |
| 202 | 202 | ||
| 203 | /* Values for signaled field in struct rcu_state. */ | 203 | /* Values for signaled field in struct rcu_state. */ |
| 204 | #define RCU_GP_INIT 0 /* Grace period being initialized. */ | 204 | #define RCU_GP_IDLE 0 /* No grace period in progress. */ |
| 205 | #define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ | 205 | #define RCU_GP_INIT 1 /* Grace period being initialized. */ |
| 206 | #define RCU_FORCE_QS 2 /* Need to force quiescent state. */ | 206 | #define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */ |
| 207 | #define RCU_FORCE_QS 3 /* Need to force quiescent state. */ | ||
| 207 | #ifdef CONFIG_NO_HZ | 208 | #ifdef CONFIG_NO_HZ |
| 208 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK | 209 | #define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK |
| 209 | #else /* #ifdef CONFIG_NO_HZ */ | 210 | #else /* #ifdef CONFIG_NO_HZ */ |
diff --git a/kernel/sched.c b/kernel/sched.c index a455dca884a6..3c11ae0a948d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq); | |||
| 309 | */ | 309 | */ |
| 310 | static DEFINE_SPINLOCK(task_group_lock); | 310 | static DEFINE_SPINLOCK(task_group_lock); |
| 311 | 311 | ||
| 312 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 313 | |||
| 312 | #ifdef CONFIG_SMP | 314 | #ifdef CONFIG_SMP |
| 313 | static int root_task_group_empty(void) | 315 | static int root_task_group_empty(void) |
| 314 | { | 316 | { |
| @@ -316,7 +318,6 @@ static int root_task_group_empty(void) | |||
| 316 | } | 318 | } |
| 317 | #endif | 319 | #endif |
| 318 | 320 | ||
| 319 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
| 320 | #ifdef CONFIG_USER_SCHED | 321 | #ifdef CONFIG_USER_SCHED |
| 321 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) | 322 | # define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) |
| 322 | #else /* !CONFIG_USER_SCHED */ | 323 | #else /* !CONFIG_USER_SCHED */ |
| @@ -1992,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
| 1992 | p->sched_class->prio_changed(rq, p, oldprio, running); | 1993 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 1993 | } | 1994 | } |
| 1994 | 1995 | ||
| 1996 | /** | ||
| 1997 | * kthread_bind - bind a just-created kthread to a cpu. | ||
| 1998 | * @p: thread created by kthread_create(). | ||
| 1999 | * @cpu: cpu (might not be online, must be possible) for @k to run on. | ||
| 2000 | * | ||
| 2001 | * Description: This function is equivalent to set_cpus_allowed(), | ||
| 2002 | * except that @cpu doesn't need to be online, and the thread must be | ||
| 2003 | * stopped (i.e., just returned from kthread_create()). | ||
| 2004 | * | ||
| 2005 | * Function lives here instead of kthread.c because it messes with | ||
| 2006 | * scheduler internals which require locking. | ||
| 2007 | */ | ||
| 2008 | void kthread_bind(struct task_struct *p, unsigned int cpu) | ||
| 2009 | { | ||
| 2010 | struct rq *rq = cpu_rq(cpu); | ||
| 2011 | unsigned long flags; | ||
| 2012 | |||
| 2013 | /* Must have done schedule() in kthread() before we set_task_cpu */ | ||
| 2014 | if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) { | ||
| 2015 | WARN_ON(1); | ||
| 2016 | return; | ||
| 2017 | } | ||
| 2018 | |||
| 2019 | spin_lock_irqsave(&rq->lock, flags); | ||
| 2020 | set_task_cpu(p, cpu); | ||
| 2021 | p->cpus_allowed = cpumask_of_cpu(cpu); | ||
| 2022 | p->rt.nr_cpus_allowed = 1; | ||
| 2023 | p->flags |= PF_THREAD_BOUND; | ||
| 2024 | spin_unlock_irqrestore(&rq->lock, flags); | ||
| 2025 | } | ||
| 2026 | EXPORT_SYMBOL(kthread_bind); | ||
| 2027 | |||
| 1995 | #ifdef CONFIG_SMP | 2028 | #ifdef CONFIG_SMP |
| 1996 | /* | 2029 | /* |
| 1997 | * Is this task likely cache-hot: | 2030 | * Is this task likely cache-hot: |
| @@ -2004,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd) | |||
| 2004 | /* | 2037 | /* |
| 2005 | * Buddy candidates are cache hot: | 2038 | * Buddy candidates are cache hot: |
| 2006 | */ | 2039 | */ |
| 2007 | if (sched_feat(CACHE_HOT_BUDDY) && | 2040 | if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running && |
| 2008 | (&p->se == cfs_rq_of(&p->se)->next || | 2041 | (&p->se == cfs_rq_of(&p->se)->next || |
| 2009 | &p->se == cfs_rq_of(&p->se)->last)) | 2042 | &p->se == cfs_rq_of(&p->se)->last)) |
| 2010 | return 1; | 2043 | return 1; |
| @@ -9532,13 +9565,13 @@ void __init sched_init(void) | |||
| 9532 | current->sched_class = &fair_sched_class; | 9565 | current->sched_class = &fair_sched_class; |
| 9533 | 9566 | ||
| 9534 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ | 9567 | /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ |
| 9535 | alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); | 9568 | zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); |
| 9536 | #ifdef CONFIG_SMP | 9569 | #ifdef CONFIG_SMP |
| 9537 | #ifdef CONFIG_NO_HZ | 9570 | #ifdef CONFIG_NO_HZ |
| 9538 | alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); | 9571 | zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); |
| 9539 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); | 9572 | alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); |
| 9540 | #endif | 9573 | #endif |
| 9541 | alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); | 9574 | zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); |
| 9542 | #endif /* SMP */ | 9575 | #endif /* SMP */ |
| 9543 | 9576 | ||
| 9544 | perf_event_init(); | 9577 | perf_event_init(); |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c32c3e643daa..37087a7fac22 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) | |||
| 822 | * re-elected due to buddy favours. | 822 | * re-elected due to buddy favours. |
| 823 | */ | 823 | */ |
| 824 | clear_buddies(cfs_rq, curr); | 824 | clear_buddies(cfs_rq, curr); |
| 825 | return; | ||
| 826 | } | ||
| 827 | |||
| 828 | /* | ||
| 829 | * Ensure that a task that missed wakeup preemption by a | ||
| 830 | * narrow margin doesn't have to wait for a full slice. | ||
| 831 | * This also mitigates buddy induced latencies under load. | ||
| 832 | */ | ||
| 833 | if (!sched_feat(WAKEUP_PREEMPT)) | ||
| 834 | return; | ||
| 835 | |||
| 836 | if (delta_exec < sysctl_sched_min_granularity) | ||
| 837 | return; | ||
| 838 | |||
| 839 | if (cfs_rq->nr_running > 1) { | ||
| 840 | struct sched_entity *se = __pick_next_entity(cfs_rq); | ||
| 841 | s64 delta = curr->vruntime - se->vruntime; | ||
| 842 | |||
| 843 | if (delta > ideal_runtime) | ||
| 844 | resched_task(rq_of(cfs_rq)->curr); | ||
| 825 | } | 845 | } |
| 826 | } | 846 | } |
| 827 | 847 | ||
| @@ -861,21 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); | |||
| 861 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) | 881 | static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) |
| 862 | { | 882 | { |
| 863 | struct sched_entity *se = __pick_next_entity(cfs_rq); | 883 | struct sched_entity *se = __pick_next_entity(cfs_rq); |
| 864 | struct sched_entity *buddy; | 884 | struct sched_entity *left = se; |
| 865 | 885 | ||
| 866 | if (cfs_rq->next) { | 886 | if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1) |
| 867 | buddy = cfs_rq->next; | 887 | se = cfs_rq->next; |
| 868 | cfs_rq->next = NULL; | ||
| 869 | if (wakeup_preempt_entity(buddy, se) < 1) | ||
| 870 | return buddy; | ||
| 871 | } | ||
| 872 | 888 | ||
| 873 | if (cfs_rq->last) { | 889 | /* |
| 874 | buddy = cfs_rq->last; | 890 | * Prefer last buddy, try to return the CPU to a preempted task. |
| 875 | cfs_rq->last = NULL; | 891 | */ |
| 876 | if (wakeup_preempt_entity(buddy, se) < 1) | 892 | if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1) |
| 877 | return buddy; | 893 | se = cfs_rq->last; |
| 878 | } | 894 | |
| 895 | clear_buddies(cfs_rq, se); | ||
| 879 | 896 | ||
| 880 | return se; | 897 | return se; |
| 881 | } | 898 | } |
| @@ -1577,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1577 | struct sched_entity *se = &curr->se, *pse = &p->se; | 1594 | struct sched_entity *se = &curr->se, *pse = &p->se; |
| 1578 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1595 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 1579 | int sync = wake_flags & WF_SYNC; | 1596 | int sync = wake_flags & WF_SYNC; |
| 1597 | int scale = cfs_rq->nr_running >= sched_nr_latency; | ||
| 1580 | 1598 | ||
| 1581 | update_curr(cfs_rq); | 1599 | update_curr(cfs_rq); |
| 1582 | 1600 | ||
| @@ -1591,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1591 | if (unlikely(se == pse)) | 1609 | if (unlikely(se == pse)) |
| 1592 | return; | 1610 | return; |
| 1593 | 1611 | ||
| 1594 | /* | 1612 | if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) |
| 1595 | * Only set the backward buddy when the current task is still on the | ||
| 1596 | * rq. This can happen when a wakeup gets interleaved with schedule on | ||
| 1597 | * the ->pre_schedule() or idle_balance() point, either of which can | ||
| 1598 | * drop the rq lock. | ||
| 1599 | * | ||
| 1600 | * Also, during early boot the idle thread is in the fair class, for | ||
| 1601 | * obvious reasons its a bad idea to schedule back to the idle thread. | ||
| 1602 | */ | ||
| 1603 | if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle)) | ||
| 1604 | set_last_buddy(se); | ||
| 1605 | if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK)) | ||
| 1606 | set_next_buddy(pse); | 1613 | set_next_buddy(pse); |
| 1607 | 1614 | ||
| 1608 | /* | 1615 | /* |
| @@ -1648,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1648 | 1655 | ||
| 1649 | BUG_ON(!pse); | 1656 | BUG_ON(!pse); |
| 1650 | 1657 | ||
| 1651 | if (wakeup_preempt_entity(se, pse) == 1) | 1658 | if (wakeup_preempt_entity(se, pse) == 1) { |
| 1652 | resched_task(curr); | 1659 | resched_task(curr); |
| 1660 | /* | ||
| 1661 | * Only set the backward buddy when the current task is still | ||
| 1662 | * on the rq. This can happen when a wakeup gets interleaved | ||
| 1663 | * with schedule on the ->pre_schedule() or idle_balance() | ||
| 1664 | * point, either of which can * drop the rq lock. | ||
| 1665 | * | ||
| 1666 | * Also, during early boot the idle thread is in the fair class, | ||
| 1667 | * for obvious reasons its a bad idea to schedule back to it. | ||
| 1668 | */ | ||
| 1669 | if (unlikely(!se->on_rq || curr == rq->idle)) | ||
| 1670 | return; | ||
| 1671 | if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se)) | ||
| 1672 | set_last_buddy(se); | ||
| 1673 | } | ||
| 1653 | } | 1674 | } |
| 1654 | 1675 | ||
| 1655 | static struct task_struct *pick_next_task_fair(struct rq *rq) | 1676 | static struct task_struct *pick_next_task_fair(struct rq *rq) |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 9c451a1930b6..6dc4e5ef7a01 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -2222,15 +2222,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
| 2222 | ret = ftrace_process_regex(parser->buffer, | 2222 | ret = ftrace_process_regex(parser->buffer, |
| 2223 | parser->idx, enable); | 2223 | parser->idx, enable); |
| 2224 | if (ret) | 2224 | if (ret) |
| 2225 | goto out; | 2225 | goto out_unlock; |
| 2226 | 2226 | ||
| 2227 | trace_parser_clear(parser); | 2227 | trace_parser_clear(parser); |
| 2228 | } | 2228 | } |
| 2229 | 2229 | ||
| 2230 | ret = read; | 2230 | ret = read; |
| 2231 | 2231 | out_unlock: | |
| 2232 | mutex_unlock(&ftrace_regex_lock); | 2232 | mutex_unlock(&ftrace_regex_lock); |
| 2233 | out: | 2233 | |
| 2234 | return ret; | 2234 | return ret; |
| 2235 | } | 2235 | } |
| 2236 | 2236 | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 3ffa502fb243..5dd017fea6f5 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -1193,6 +1193,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1193 | atomic_inc(&cpu_buffer->record_disabled); | 1193 | atomic_inc(&cpu_buffer->record_disabled); |
| 1194 | synchronize_sched(); | 1194 | synchronize_sched(); |
| 1195 | 1195 | ||
| 1196 | spin_lock_irq(&cpu_buffer->reader_lock); | ||
| 1196 | rb_head_page_deactivate(cpu_buffer); | 1197 | rb_head_page_deactivate(cpu_buffer); |
| 1197 | 1198 | ||
| 1198 | for (i = 0; i < nr_pages; i++) { | 1199 | for (i = 0; i < nr_pages; i++) { |
| @@ -1207,6 +1208,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) | |||
| 1207 | return; | 1208 | return; |
| 1208 | 1209 | ||
| 1209 | rb_reset_cpu(cpu_buffer); | 1210 | rb_reset_cpu(cpu_buffer); |
| 1211 | spin_unlock_irq(&cpu_buffer->reader_lock); | ||
| 1210 | 1212 | ||
| 1211 | rb_check_pages(cpu_buffer); | 1213 | rb_check_pages(cpu_buffer); |
| 1212 | 1214 | ||
diff --git a/kernel/user.c b/kernel/user.c index 2c000e7132ac..46d0165ca70c 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
| @@ -330,9 +330,9 @@ done: | |||
| 330 | */ | 330 | */ |
| 331 | static void free_user(struct user_struct *up, unsigned long flags) | 331 | static void free_user(struct user_struct *up, unsigned long flags) |
| 332 | { | 332 | { |
| 333 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 334 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); | 333 | INIT_DELAYED_WORK(&up->work, cleanup_user_struct); |
| 335 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); | 334 | schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); |
| 335 | spin_unlock_irqrestore(&uidhash_lock, flags); | ||
| 336 | } | 336 | } |
| 337 | 337 | ||
| 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ | 338 | #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 12328147132c..67e526b6ae81 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func) | |||
| 692 | if (!works) | 692 | if (!works) |
| 693 | return -ENOMEM; | 693 | return -ENOMEM; |
| 694 | 694 | ||
| 695 | get_online_cpus(); | ||
| 696 | |||
| 695 | /* | 697 | /* |
| 696 | * when running in keventd don't schedule a work item on itself. | 698 | * When running in keventd don't schedule a work item on |
| 697 | * Can just call directly because the work queue is already bound. | 699 | * itself. Can just call directly because the work queue is |
| 698 | * This also is faster. | 700 | * already bound. This also is faster. |
| 699 | * Make this a generic parameter for other workqueues? | ||
| 700 | */ | 701 | */ |
| 701 | if (current_is_keventd()) { | 702 | if (current_is_keventd()) |
| 702 | orig = raw_smp_processor_id(); | 703 | orig = raw_smp_processor_id(); |
| 703 | INIT_WORK(per_cpu_ptr(works, orig), func); | ||
| 704 | func(per_cpu_ptr(works, orig)); | ||
| 705 | } | ||
| 706 | 704 | ||
| 707 | get_online_cpus(); | ||
| 708 | for_each_online_cpu(cpu) { | 705 | for_each_online_cpu(cpu) { |
| 709 | struct work_struct *work = per_cpu_ptr(works, cpu); | 706 | struct work_struct *work = per_cpu_ptr(works, cpu); |
| 710 | 707 | ||
| 711 | if (cpu == orig) | ||
| 712 | continue; | ||
| 713 | INIT_WORK(work, func); | 708 | INIT_WORK(work, func); |
| 714 | schedule_work_on(cpu, work); | ||
| 715 | } | ||
| 716 | for_each_online_cpu(cpu) { | ||
| 717 | if (cpu != orig) | 709 | if (cpu != orig) |
| 718 | flush_work(per_cpu_ptr(works, cpu)); | 710 | schedule_work_on(cpu, work); |
| 719 | } | 711 | } |
| 712 | if (orig >= 0) | ||
| 713 | func(per_cpu_ptr(works, orig)); | ||
| 714 | |||
| 715 | for_each_online_cpu(cpu) | ||
| 716 | flush_work(per_cpu_ptr(works, cpu)); | ||
| 717 | |||
| 720 | put_online_cpus(); | 718 | put_online_cpus(); |
| 721 | free_percpu(works); | 719 | free_percpu(works); |
| 722 | return 0; | 720 | return 0; |
