aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-02-21 06:51:33 -0500
committerRalf Baechle <ralf@linux-mips.org>2013-02-21 06:51:33 -0500
commit8bfc245f9ad7bd4e461179e4e7852ef99b8b6144 (patch)
tree0ad091f645fbc8318634599d278966a53d3922ee /kernel
parent612663a974065c3445e641d046769fe4c55a6438 (diff)
parent535237cecab2b078114be712c67e89a0db61965f (diff)
Merge branch 'mips-next-3.9' of git://git.linux-mips.org/pub/scm/john/linux-john into mips-for-linux-next
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c20
-rw-r--r--kernel/rcutree_plugin.h13
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
5 files changed, 32 insertions, 9 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d06f24..7b6646a8c067 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
908} 908}
909 909
910/* 910/*
911 * Initialize event state based on the perf_event_attr::disabled.
912 */
913static inline void perf_event__state_init(struct perf_event *event)
914{
915 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
916 PERF_EVENT_STATE_INACTIVE;
917}
918
919/*
911 * Called at perf_event creation and when events are attached/detached from a 920 * Called at perf_event creation and when events are attached/detached from a
912 * group. 921 * group.
913 */ 922 */
@@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6179 event->overflow_handler = overflow_handler; 6188 event->overflow_handler = overflow_handler;
6180 event->overflow_handler_context = context; 6189 event->overflow_handler_context = context;
6181 6190
6182 if (attr->disabled) 6191 perf_event__state_init(event);
6183 event->state = PERF_EVENT_STATE_OFF;
6184 6192
6185 pmu = NULL; 6193 pmu = NULL;
6186 6194
@@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,
6609 6617
6610 mutex_lock(&gctx->mutex); 6618 mutex_lock(&gctx->mutex);
6611 perf_remove_from_context(group_leader); 6619 perf_remove_from_context(group_leader);
6620
6621 /*
6622 * Removing from the context ends up with disabled
6623 * event. What we want here is event in the initial
6624 * startup state, ready to be add into new context.
6625 */
6626 perf_event__state_init(group_leader);
6612 list_for_each_entry(sibling, &group_leader->sibling_list, 6627 list_for_each_entry(sibling, &group_leader->sibling_list,
6613 group_entry) { 6628 group_entry) {
6614 perf_remove_from_context(sibling); 6629 perf_remove_from_context(sibling);
6630 perf_event__state_init(sibling);
6615 put_ctx(gctx); 6631 put_ctx(gctx);
6616 } 6632 }
6617 mutex_unlock(&gctx->mutex); 6633 mutex_unlock(&gctx->mutex);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f6e5ec2932b4..c1cc7e17ff9d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -40,8 +40,7 @@
40#ifdef CONFIG_RCU_NOCB_CPU 40#ifdef CONFIG_RCU_NOCB_CPU
41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 43static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44module_param(rcu_nocb_poll, bool, 0444);
45static char __initdata nocb_buf[NR_CPUS * 5]; 44static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 45#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47 46
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
2159} 2158}
2160__setup("rcu_nocbs=", rcu_nocb_setup); 2159__setup("rcu_nocbs=", rcu_nocb_setup);
2161 2160
2161static int __init parse_rcu_nocb_poll(char *arg)
2162{
2163 rcu_nocb_poll = 1;
2164 return 0;
2165}
2166early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2167
2162/* Is the specified CPU a no-CPUs CPU? */ 2168/* Is the specified CPU a no-CPUs CPU? */
2163static bool is_nocb_cpu(int cpu) 2169static bool is_nocb_cpu(int cpu)
2164{ 2170{
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
2366 for (;;) { 2372 for (;;) {
2367 /* If not polling, wait for next batch of callbacks. */ 2373 /* If not polling, wait for next batch of callbacks. */
2368 if (!rcu_nocb_poll) 2374 if (!rcu_nocb_poll)
2369 wait_event(rdp->nocb_wq, rdp->nocb_head); 2375 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2370 list = ACCESS_ONCE(rdp->nocb_head); 2376 list = ACCESS_ONCE(rdp->nocb_head);
2371 if (!list) { 2377 if (!list) {
2372 schedule_timeout_interruptible(1); 2378 schedule_timeout_interruptible(1);
2379 flush_signals(current);
2373 continue; 2380 continue;
2374 } 2381 }
2375 2382
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b4e582..7ae4c4c5420e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
222 cfs_rq->runnable_load_avg); 222 cfs_rq->runnable_load_avg);
223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
224 cfs_rq->blocked_load_avg); 224 cfs_rq->blocked_load_avg);
225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 225 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
226 atomic64_read(&cfs_rq->tg->load_avg)); 226 (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
228 cfs_rq->tg_load_contrib); 228 cfs_rq->tg_load_contrib);
229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eea8707234a..81fa53643409 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2663 hrtimer_cancel(&cfs_b->slack_timer); 2663 hrtimer_cancel(&cfs_b->slack_timer);
2664} 2664}
2665 2665
2666static void unthrottle_offline_cfs_rqs(struct rq *rq) 2666static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2667{ 2667{
2668 struct cfs_rq *cfs_rq; 2668 struct cfs_rq *cfs_rq;
2669 2669
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb01344e..4f02b2847357 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566static int do_balance_runtime(struct rt_rq *rt_rq) 566static int do_balance_runtime(struct rt_rq *rt_rq)
567{ 567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 569 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
570 int i, weight, more = 0; 570 int i, weight, more = 0;
571 u64 rt_period; 571 u64 rt_period;
572 572