aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-11-15 03:50:38 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-15 03:50:41 -0500
commit39dc78b6510323848e3356452f7dab9499736978 (patch)
treecf8a8fede74e41b203fd00e3ccd21ead2e851442 /kernel
parent4c49b12853fbb5eff4849b7b6a1e895776f027a1 (diff)
parent156171c71a0dc4bce12b4408bb1591f8fe32dc1a (diff)
Merge commit 'v2.6.32-rc7' into perf/core
Merge reason: pick up perf fixlets Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/spurious.c2
-rw-r--r--kernel/kthread.c23
-rw-r--r--kernel/rcutree.c16
-rw-r--r--kernel/rcutree.h7
-rw-r--r--kernel/sched.c43
-rw-r--r--kernel/sched_fair.c73
-rw-r--r--kernel/trace/ftrace.c6
-rw-r--r--kernel/trace/ring_buffer.c2
-rw-r--r--kernel/user.c2
9 files changed, 108 insertions, 66 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 114e704760fe..bd7273e6282e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -121,7 +121,9 @@ static void poll_all_shared_irqs(void)
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue; 122 continue;
123 123
124 local_irq_disable();
124 try_one_irq(i, desc); 125 try_one_irq(i, desc);
126 local_irq_enable();
125 } 127 }
126} 128}
127 129
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 5fe709982caa..ab7ae57773e1 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -150,29 +150,6 @@ struct task_struct *kthread_create(int (*threadfn)(void *data),
150EXPORT_SYMBOL(kthread_create); 150EXPORT_SYMBOL(kthread_create);
151 151
152/** 152/**
153 * kthread_bind - bind a just-created kthread to a cpu.
154 * @k: thread created by kthread_create().
155 * @cpu: cpu (might not be online, must be possible) for @k to run on.
156 *
157 * Description: This function is equivalent to set_cpus_allowed(),
158 * except that @cpu doesn't need to be online, and the thread must be
159 * stopped (i.e., just returned from kthread_create()).
160 */
161void kthread_bind(struct task_struct *k, unsigned int cpu)
162{
163 /* Must have done schedule() in kthread() before we set_task_cpu */
164 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
165 WARN_ON(1);
166 return;
167 }
168 set_task_cpu(k, cpu);
169 k->cpus_allowed = cpumask_of_cpu(cpu);
170 k->rt.nr_cpus_allowed = 1;
171 k->flags |= PF_THREAD_BOUND;
172}
173EXPORT_SYMBOL(kthread_bind);
174
175/**
176 * kthread_stop - stop a thread created by kthread_create(). 153 * kthread_stop - stop a thread created by kthread_create().
177 * @k: thread created by kthread_create(). 154 * @k: thread created by kthread_create().
178 * 155 *
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0536125b0497..f3077c0ab181 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -59,7 +59,7 @@
59 NUM_RCU_LVL_2, \ 59 NUM_RCU_LVL_2, \
60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \ 60 NUM_RCU_LVL_3, /* == MAX_RCU_LVLS */ \
61 }, \ 61 }, \
62 .signaled = RCU_SIGNAL_INIT, \ 62 .signaled = RCU_GP_IDLE, \
63 .gpnum = -300, \ 63 .gpnum = -300, \
64 .completed = -300, \ 64 .completed = -300, \
65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \ 65 .onofflock = __SPIN_LOCK_UNLOCKED(&name.onofflock), \
@@ -657,14 +657,17 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
657 * irqs disabled. 657 * irqs disabled.
658 */ 658 */
659 rcu_for_each_node_breadth_first(rsp, rnp) { 659 rcu_for_each_node_breadth_first(rsp, rnp) {
660 spin_lock(&rnp->lock); /* irqs already disabled. */ 660 spin_lock(&rnp->lock); /* irqs already disabled. */
661 rcu_preempt_check_blocked_tasks(rnp); 661 rcu_preempt_check_blocked_tasks(rnp);
662 rnp->qsmask = rnp->qsmaskinit; 662 rnp->qsmask = rnp->qsmaskinit;
663 rnp->gpnum = rsp->gpnum; 663 rnp->gpnum = rsp->gpnum;
664 spin_unlock(&rnp->lock); /* irqs already disabled. */ 664 spin_unlock(&rnp->lock); /* irqs remain disabled. */
665 } 665 }
666 666
667 rnp = rcu_get_root(rsp);
668 spin_lock(&rnp->lock); /* irqs already disabled. */
667 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */ 669 rsp->signaled = RCU_SIGNAL_INIT; /* force_quiescent_state now OK. */
670 spin_unlock(&rnp->lock); /* irqs remain disabled. */
668 spin_unlock_irqrestore(&rsp->onofflock, flags); 671 spin_unlock_irqrestore(&rsp->onofflock, flags);
669} 672}
670 673
@@ -706,6 +709,7 @@ static void cpu_quiet_msk_finish(struct rcu_state *rsp, unsigned long flags)
706{ 709{
707 WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); 710 WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
708 rsp->completed = rsp->gpnum; 711 rsp->completed = rsp->gpnum;
712 rsp->signaled = RCU_GP_IDLE;
709 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]); 713 rcu_process_gp_end(rsp, rsp->rda[smp_processor_id()]);
710 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */ 714 rcu_start_gp(rsp, flags); /* releases root node's rnp->lock. */
711} 715}
@@ -1162,9 +1166,10 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1162 } 1166 }
1163 spin_unlock(&rnp->lock); 1167 spin_unlock(&rnp->lock);
1164 switch (signaled) { 1168 switch (signaled) {
1169 case RCU_GP_IDLE:
1165 case RCU_GP_INIT: 1170 case RCU_GP_INIT:
1166 1171
1167 break; /* grace period still initializing, ignore. */ 1172 break; /* grace period idle or initializing, ignore. */
1168 1173
1169 case RCU_SAVE_DYNTICK: 1174 case RCU_SAVE_DYNTICK:
1170 1175
@@ -1178,7 +1183,8 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
1178 1183
1179 /* Update state, record completion counter. */ 1184 /* Update state, record completion counter. */
1180 spin_lock(&rnp->lock); 1185 spin_lock(&rnp->lock);
1181 if (lastcomp == rsp->completed) { 1186 if (lastcomp == rsp->completed &&
1187 rsp->signaled == RCU_SAVE_DYNTICK) {
1182 rsp->signaled = RCU_FORCE_QS; 1188 rsp->signaled = RCU_FORCE_QS;
1183 dyntick_record_completed(rsp, lastcomp); 1189 dyntick_record_completed(rsp, lastcomp);
1184 } 1190 }
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 1823c6e20609..1899023b0962 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -201,9 +201,10 @@ struct rcu_data {
201}; 201};
202 202
203/* Values for signaled field in struct rcu_state. */ 203/* Values for signaled field in struct rcu_state. */
204#define RCU_GP_INIT 0 /* Grace period being initialized. */ 204#define RCU_GP_IDLE 0 /* No grace period in progress. */
205#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ 205#define RCU_GP_INIT 1 /* Grace period being initialized. */
206#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ 206#define RCU_SAVE_DYNTICK 2 /* Need to scan dyntick state. */
207#define RCU_FORCE_QS 3 /* Need to force quiescent state. */
207#ifdef CONFIG_NO_HZ 208#ifdef CONFIG_NO_HZ
208#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK 209#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK
209#else /* #ifdef CONFIG_NO_HZ */ 210#else /* #ifdef CONFIG_NO_HZ */
diff --git a/kernel/sched.c b/kernel/sched.c
index a455dca884a6..3c11ae0a948d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -309,6 +309,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
309 */ 309 */
310static DEFINE_SPINLOCK(task_group_lock); 310static DEFINE_SPINLOCK(task_group_lock);
311 311
312#ifdef CONFIG_FAIR_GROUP_SCHED
313
312#ifdef CONFIG_SMP 314#ifdef CONFIG_SMP
313static int root_task_group_empty(void) 315static int root_task_group_empty(void)
314{ 316{
@@ -316,7 +318,6 @@ static int root_task_group_empty(void)
316} 318}
317#endif 319#endif
318 320
319#ifdef CONFIG_FAIR_GROUP_SCHED
320#ifdef CONFIG_USER_SCHED 321#ifdef CONFIG_USER_SCHED
321# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD) 322# define INIT_TASK_GROUP_LOAD (2*NICE_0_LOAD)
322#else /* !CONFIG_USER_SCHED */ 323#else /* !CONFIG_USER_SCHED */
@@ -1992,6 +1993,38 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p,
1992 p->sched_class->prio_changed(rq, p, oldprio, running); 1993 p->sched_class->prio_changed(rq, p, oldprio, running);
1993} 1994}
1994 1995
1996/**
1997 * kthread_bind - bind a just-created kthread to a cpu.
1998 * @p: thread created by kthread_create().
1999 * @cpu: cpu (might not be online, must be possible) for @k to run on.
2000 *
2001 * Description: This function is equivalent to set_cpus_allowed(),
2002 * except that @cpu doesn't need to be online, and the thread must be
2003 * stopped (i.e., just returned from kthread_create()).
2004 *
2005 * Function lives here instead of kthread.c because it messes with
2006 * scheduler internals which require locking.
2007 */
2008void kthread_bind(struct task_struct *p, unsigned int cpu)
2009{
2010 struct rq *rq = cpu_rq(cpu);
2011 unsigned long flags;
2012
2013 /* Must have done schedule() in kthread() before we set_task_cpu */
2014 if (!wait_task_inactive(p, TASK_UNINTERRUPTIBLE)) {
2015 WARN_ON(1);
2016 return;
2017 }
2018
2019 spin_lock_irqsave(&rq->lock, flags);
2020 set_task_cpu(p, cpu);
2021 p->cpus_allowed = cpumask_of_cpu(cpu);
2022 p->rt.nr_cpus_allowed = 1;
2023 p->flags |= PF_THREAD_BOUND;
2024 spin_unlock_irqrestore(&rq->lock, flags);
2025}
2026EXPORT_SYMBOL(kthread_bind);
2027
1995#ifdef CONFIG_SMP 2028#ifdef CONFIG_SMP
1996/* 2029/*
1997 * Is this task likely cache-hot: 2030 * Is this task likely cache-hot:
@@ -2004,7 +2037,7 @@ task_hot(struct task_struct *p, u64 now, struct sched_domain *sd)
2004 /* 2037 /*
2005 * Buddy candidates are cache hot: 2038 * Buddy candidates are cache hot:
2006 */ 2039 */
2007 if (sched_feat(CACHE_HOT_BUDDY) && 2040 if (sched_feat(CACHE_HOT_BUDDY) && this_rq()->nr_running &&
2008 (&p->se == cfs_rq_of(&p->se)->next || 2041 (&p->se == cfs_rq_of(&p->se)->next ||
2009 &p->se == cfs_rq_of(&p->se)->last)) 2042 &p->se == cfs_rq_of(&p->se)->last))
2010 return 1; 2043 return 1;
@@ -9532,13 +9565,13 @@ void __init sched_init(void)
9532 current->sched_class = &fair_sched_class; 9565 current->sched_class = &fair_sched_class;
9533 9566
9534 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */ 9567 /* Allocate the nohz_cpu_mask if CONFIG_CPUMASK_OFFSTACK */
9535 alloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT); 9568 zalloc_cpumask_var(&nohz_cpu_mask, GFP_NOWAIT);
9536#ifdef CONFIG_SMP 9569#ifdef CONFIG_SMP
9537#ifdef CONFIG_NO_HZ 9570#ifdef CONFIG_NO_HZ
9538 alloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT); 9571 zalloc_cpumask_var(&nohz.cpu_mask, GFP_NOWAIT);
9539 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT); 9572 alloc_cpumask_var(&nohz.ilb_grp_nohz_mask, GFP_NOWAIT);
9540#endif 9573#endif
9541 alloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); 9574 zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT);
9542#endif /* SMP */ 9575#endif /* SMP */
9543 9576
9544 perf_event_init(); 9577 perf_event_init();
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index c32c3e643daa..37087a7fac22 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -822,6 +822,26 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
822 * re-elected due to buddy favours. 822 * re-elected due to buddy favours.
823 */ 823 */
824 clear_buddies(cfs_rq, curr); 824 clear_buddies(cfs_rq, curr);
825 return;
826 }
827
828 /*
829 * Ensure that a task that missed wakeup preemption by a
830 * narrow margin doesn't have to wait for a full slice.
831 * This also mitigates buddy induced latencies under load.
832 */
833 if (!sched_feat(WAKEUP_PREEMPT))
834 return;
835
836 if (delta_exec < sysctl_sched_min_granularity)
837 return;
838
839 if (cfs_rq->nr_running > 1) {
840 struct sched_entity *se = __pick_next_entity(cfs_rq);
841 s64 delta = curr->vruntime - se->vruntime;
842
843 if (delta > ideal_runtime)
844 resched_task(rq_of(cfs_rq)->curr);
825 } 845 }
826} 846}
827 847
@@ -861,21 +881,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
861static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) 881static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq)
862{ 882{
863 struct sched_entity *se = __pick_next_entity(cfs_rq); 883 struct sched_entity *se = __pick_next_entity(cfs_rq);
864 struct sched_entity *buddy; 884 struct sched_entity *left = se;
865 885
866 if (cfs_rq->next) { 886 if (cfs_rq->next && wakeup_preempt_entity(cfs_rq->next, left) < 1)
867 buddy = cfs_rq->next; 887 se = cfs_rq->next;
868 cfs_rq->next = NULL;
869 if (wakeup_preempt_entity(buddy, se) < 1)
870 return buddy;
871 }
872 888
873 if (cfs_rq->last) { 889 /*
874 buddy = cfs_rq->last; 890 * Prefer last buddy, try to return the CPU to a preempted task.
875 cfs_rq->last = NULL; 891 */
876 if (wakeup_preempt_entity(buddy, se) < 1) 892 if (cfs_rq->last && wakeup_preempt_entity(cfs_rq->last, left) < 1)
877 return buddy; 893 se = cfs_rq->last;
878 } 894
895 clear_buddies(cfs_rq, se);
879 896
880 return se; 897 return se;
881} 898}
@@ -1577,6 +1594,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1577 struct sched_entity *se = &curr->se, *pse = &p->se; 1594 struct sched_entity *se = &curr->se, *pse = &p->se;
1578 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1595 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1579 int sync = wake_flags & WF_SYNC; 1596 int sync = wake_flags & WF_SYNC;
1597 int scale = cfs_rq->nr_running >= sched_nr_latency;
1580 1598
1581 update_curr(cfs_rq); 1599 update_curr(cfs_rq);
1582 1600
@@ -1591,18 +1609,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1591 if (unlikely(se == pse)) 1609 if (unlikely(se == pse))
1592 return; 1610 return;
1593 1611
1594 /* 1612 if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK))
1595 * Only set the backward buddy when the current task is still on the
1596 * rq. This can happen when a wakeup gets interleaved with schedule on
1597 * the ->pre_schedule() or idle_balance() point, either of which can
1598 * drop the rq lock.
1599 *
1600 * Also, during early boot the idle thread is in the fair class, for
1601 * obvious reasons its a bad idea to schedule back to the idle thread.
1602 */
1603 if (sched_feat(LAST_BUDDY) && likely(se->on_rq && curr != rq->idle))
1604 set_last_buddy(se);
1605 if (sched_feat(NEXT_BUDDY) && !(wake_flags & WF_FORK))
1606 set_next_buddy(pse); 1613 set_next_buddy(pse);
1607 1614
1608 /* 1615 /*
@@ -1648,8 +1655,22 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
1648 1655
1649 BUG_ON(!pse); 1656 BUG_ON(!pse);
1650 1657
1651 if (wakeup_preempt_entity(se, pse) == 1) 1658 if (wakeup_preempt_entity(se, pse) == 1) {
1652 resched_task(curr); 1659 resched_task(curr);
1660 /*
1661 * Only set the backward buddy when the current task is still
1662 * on the rq. This can happen when a wakeup gets interleaved
1663 * with schedule on the ->pre_schedule() or idle_balance()
1664 * point, either of which can * drop the rq lock.
1665 *
1666 * Also, during early boot the idle thread is in the fair class,
1667 * for obvious reasons its a bad idea to schedule back to it.
1668 */
1669 if (unlikely(!se->on_rq || curr == rq->idle))
1670 return;
1671 if (sched_feat(LAST_BUDDY) && scale && entity_is_task(se))
1672 set_last_buddy(se);
1673 }
1653} 1674}
1654 1675
1655static struct task_struct *pick_next_task_fair(struct rq *rq) 1676static struct task_struct *pick_next_task_fair(struct rq *rq)
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1ed514fe3a30..7cb6f1922598 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2199,15 +2199,15 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2199 ret = ftrace_process_regex(parser->buffer, 2199 ret = ftrace_process_regex(parser->buffer,
2200 parser->idx, enable); 2200 parser->idx, enable);
2201 if (ret) 2201 if (ret)
2202 goto out; 2202 goto out_unlock;
2203 2203
2204 trace_parser_clear(parser); 2204 trace_parser_clear(parser);
2205 } 2205 }
2206 2206
2207 ret = read; 2207 ret = read;
2208 2208out_unlock:
2209 mutex_unlock(&ftrace_regex_lock); 2209 mutex_unlock(&ftrace_regex_lock);
2210out: 2210
2211 return ret; 2211 return ret;
2212} 2212}
2213 2213
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 63446f12e470..db223fe8887f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1196,6 +1196,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1196 atomic_inc(&cpu_buffer->record_disabled); 1196 atomic_inc(&cpu_buffer->record_disabled);
1197 synchronize_sched(); 1197 synchronize_sched();
1198 1198
1199 spin_lock_irq(&cpu_buffer->reader_lock);
1199 rb_head_page_deactivate(cpu_buffer); 1200 rb_head_page_deactivate(cpu_buffer);
1200 1201
1201 for (i = 0; i < nr_pages; i++) { 1202 for (i = 0; i < nr_pages; i++) {
@@ -1210,6 +1211,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
1210 return; 1211 return;
1211 1212
1212 rb_reset_cpu(cpu_buffer); 1213 rb_reset_cpu(cpu_buffer);
1214 spin_unlock_irq(&cpu_buffer->reader_lock);
1213 1215
1214 rb_check_pages(cpu_buffer); 1216 rb_check_pages(cpu_buffer);
1215 1217
diff --git a/kernel/user.c b/kernel/user.c
index 2c000e7132ac..46d0165ca70c 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -330,9 +330,9 @@ done:
330 */ 330 */
331static void free_user(struct user_struct *up, unsigned long flags) 331static void free_user(struct user_struct *up, unsigned long flags)
332{ 332{
333 spin_unlock_irqrestore(&uidhash_lock, flags);
334 INIT_DELAYED_WORK(&up->work, cleanup_user_struct); 333 INIT_DELAYED_WORK(&up->work, cleanup_user_struct);
335 schedule_delayed_work(&up->work, msecs_to_jiffies(1000)); 334 schedule_delayed_work(&up->work, msecs_to_jiffies(1000));
335 spin_unlock_irqrestore(&uidhash_lock, flags);
336} 336}
337 337
338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */ 338#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */