aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-02-08 18:02:14 -0500
committerDavid S. Miller <davem@davemloft.net>2013-02-08 18:02:14 -0500
commitfd5023111cf720db890ef34f305ac5d427e690a0 (patch)
tree4d21e9a02bfbdafe5fc598af0755db791238dbe7 /kernel
parent8b9a4d56866e0dca6ae886ed9bff777e50d0b70c (diff)
parent836dc9e3fbbab0c30aa6e664417225f5c1fb1c39 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Synchronize with 'net' in order to sort out some l2tp, wireless, and ipv6 GRE fixes that will be built on top of in 'net-next'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c20
-rw-r--r--kernel/printk.c9
-rw-r--r--kernel/rcutree_plugin.h13
-rw-r--r--kernel/sched/debug.c4
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/smp.c13
7 files changed, 44 insertions, 19 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d06f24..7b6646a8c067 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
908} 908}
909 909
910/* 910/*
911 * Initialize event state based on the perf_event_attr::disabled.
912 */
913static inline void perf_event__state_init(struct perf_event *event)
914{
915 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
916 PERF_EVENT_STATE_INACTIVE;
917}
918
919/*
911 * Called at perf_event creation and when events are attached/detached from a 920 * Called at perf_event creation and when events are attached/detached from a
912 * group. 921 * group.
913 */ 922 */
@@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6179 event->overflow_handler = overflow_handler; 6188 event->overflow_handler = overflow_handler;
6180 event->overflow_handler_context = context; 6189 event->overflow_handler_context = context;
6181 6190
6182 if (attr->disabled) 6191 perf_event__state_init(event);
6183 event->state = PERF_EVENT_STATE_OFF;
6184 6192
6185 pmu = NULL; 6193 pmu = NULL;
6186 6194
@@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,
6609 6617
6610 mutex_lock(&gctx->mutex); 6618 mutex_lock(&gctx->mutex);
6611 perf_remove_from_context(group_leader); 6619 perf_remove_from_context(group_leader);
6620
6621 /*
6622 * Removing from the context ends up with disabled
6623 * event. What we want here is event in the initial
6624 * startup state, ready to be add into new context.
6625 */
6626 perf_event__state_init(group_leader);
6612 list_for_each_entry(sibling, &group_leader->sibling_list, 6627 list_for_each_entry(sibling, &group_leader->sibling_list,
6613 group_entry) { 6628 group_entry) {
6614 perf_remove_from_context(sibling); 6629 perf_remove_from_context(sibling);
6630 perf_event__state_init(sibling);
6615 put_ctx(gctx); 6631 put_ctx(gctx);
6616 } 6632 }
6617 mutex_unlock(&gctx->mutex); 6633 mutex_unlock(&gctx->mutex);
diff --git a/kernel/printk.c b/kernel/printk.c
index 357f714ddd49..267ce780abe8 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -87,12 +87,6 @@ static DEFINE_SEMAPHORE(console_sem);
87struct console *console_drivers; 87struct console *console_drivers;
88EXPORT_SYMBOL_GPL(console_drivers); 88EXPORT_SYMBOL_GPL(console_drivers);
89 89
90#ifdef CONFIG_LOCKDEP
91static struct lockdep_map console_lock_dep_map = {
92 .name = "console_lock"
93};
94#endif
95
96/* 90/*
97 * This is used for debugging the mess that is the VT code by 91 * This is used for debugging the mess that is the VT code by
98 * keeping track if we have the console semaphore held. It's 92 * keeping track if we have the console semaphore held. It's
@@ -1924,7 +1918,6 @@ void console_lock(void)
1924 return; 1918 return;
1925 console_locked = 1; 1919 console_locked = 1;
1926 console_may_schedule = 1; 1920 console_may_schedule = 1;
1927 mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_);
1928} 1921}
1929EXPORT_SYMBOL(console_lock); 1922EXPORT_SYMBOL(console_lock);
1930 1923
@@ -1946,7 +1939,6 @@ int console_trylock(void)
1946 } 1939 }
1947 console_locked = 1; 1940 console_locked = 1;
1948 console_may_schedule = 0; 1941 console_may_schedule = 0;
1949 mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_);
1950 return 1; 1942 return 1;
1951} 1943}
1952EXPORT_SYMBOL(console_trylock); 1944EXPORT_SYMBOL(console_trylock);
@@ -2107,7 +2099,6 @@ skip:
2107 local_irq_restore(flags); 2099 local_irq_restore(flags);
2108 } 2100 }
2109 console_locked = 0; 2101 console_locked = 0;
2110 mutex_release(&console_lock_dep_map, 1, _RET_IP_);
2111 2102
2112 /* Release the exclusive_console once it is used */ 2103 /* Release the exclusive_console once it is used */
2113 if (unlikely(exclusive_console)) 2104 if (unlikely(exclusive_console))
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index f6e5ec2932b4..c1cc7e17ff9d 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -40,8 +40,7 @@
40#ifdef CONFIG_RCU_NOCB_CPU 40#ifdef CONFIG_RCU_NOCB_CPU
41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ 41static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ 42static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
43static bool rcu_nocb_poll; /* Offload kthread are to poll. */ 43static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */
44module_param(rcu_nocb_poll, bool, 0444);
45static char __initdata nocb_buf[NR_CPUS * 5]; 44static char __initdata nocb_buf[NR_CPUS * 5];
46#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ 45#endif /* #ifdef CONFIG_RCU_NOCB_CPU */
47 46
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str)
2159} 2158}
2160__setup("rcu_nocbs=", rcu_nocb_setup); 2159__setup("rcu_nocbs=", rcu_nocb_setup);
2161 2160
2161static int __init parse_rcu_nocb_poll(char *arg)
2162{
2163 rcu_nocb_poll = 1;
2164 return 0;
2165}
2166early_param("rcu_nocb_poll", parse_rcu_nocb_poll);
2167
2162/* Is the specified CPU a no-CPUs CPU? */ 2168/* Is the specified CPU a no-CPUs CPU? */
2163static bool is_nocb_cpu(int cpu) 2169static bool is_nocb_cpu(int cpu)
2164{ 2170{
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg)
2366 for (;;) { 2372 for (;;) {
2367 /* If not polling, wait for next batch of callbacks. */ 2373 /* If not polling, wait for next batch of callbacks. */
2368 if (!rcu_nocb_poll) 2374 if (!rcu_nocb_poll)
2369 wait_event(rdp->nocb_wq, rdp->nocb_head); 2375 wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head);
2370 list = ACCESS_ONCE(rdp->nocb_head); 2376 list = ACCESS_ONCE(rdp->nocb_head);
2371 if (!list) { 2377 if (!list) {
2372 schedule_timeout_interruptible(1); 2378 schedule_timeout_interruptible(1);
2379 flush_signals(current);
2373 continue; 2380 continue;
2374 } 2381 }
2375 2382
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 2cd3c1b4e582..7ae4c4c5420e 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -222,8 +222,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
222 cfs_rq->runnable_load_avg); 222 cfs_rq->runnable_load_avg);
223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg", 223 SEQ_printf(m, " .%-30s: %lld\n", "blocked_load_avg",
224 cfs_rq->blocked_load_avg); 224 cfs_rq->blocked_load_avg);
225 SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg", 225 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_avg",
226 atomic64_read(&cfs_rq->tg->load_avg)); 226 (unsigned long long)atomic64_read(&cfs_rq->tg->load_avg));
227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib", 227 SEQ_printf(m, " .%-30s: %lld\n", "tg_load_contrib",
228 cfs_rq->tg_load_contrib); 228 cfs_rq->tg_load_contrib);
229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib", 229 SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5eea8707234a..81fa53643409 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2663,7 +2663,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2663 hrtimer_cancel(&cfs_b->slack_timer); 2663 hrtimer_cancel(&cfs_b->slack_timer);
2664} 2664}
2665 2665
2666static void unthrottle_offline_cfs_rqs(struct rq *rq) 2666static void __maybe_unused unthrottle_offline_cfs_rqs(struct rq *rq)
2667{ 2667{
2668 struct cfs_rq *cfs_rq; 2668 struct cfs_rq *cfs_rq;
2669 2669
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 418feb01344e..4f02b2847357 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -566,7 +566,7 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
566static int do_balance_runtime(struct rt_rq *rt_rq) 566static int do_balance_runtime(struct rt_rq *rt_rq)
567{ 567{
568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 568 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
569 struct root_domain *rd = cpu_rq(smp_processor_id())->rd; 569 struct root_domain *rd = rq_of_rt_rq(rt_rq)->rd;
570 int i, weight, more = 0; 570 int i, weight, more = 0;
571 u64 rt_period; 571 u64 rt_period;
572 572
diff --git a/kernel/smp.c b/kernel/smp.c
index 29dd40a9f2f4..69f38bd98b42 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -33,6 +33,7 @@ struct call_function_data {
33 struct call_single_data csd; 33 struct call_single_data csd;
34 atomic_t refs; 34 atomic_t refs;
35 cpumask_var_t cpumask; 35 cpumask_var_t cpumask;
36 cpumask_var_t cpumask_ipi;
36}; 37};
37 38
38static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); 39static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
56 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, 57 if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
57 cpu_to_node(cpu))) 58 cpu_to_node(cpu)))
58 return notifier_from_errno(-ENOMEM); 59 return notifier_from_errno(-ENOMEM);
60 if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL,
61 cpu_to_node(cpu)))
62 return notifier_from_errno(-ENOMEM);
59 break; 63 break;
60 64
61#ifdef CONFIG_HOTPLUG_CPU 65#ifdef CONFIG_HOTPLUG_CPU
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
65 case CPU_DEAD: 69 case CPU_DEAD:
66 case CPU_DEAD_FROZEN: 70 case CPU_DEAD_FROZEN:
67 free_cpumask_var(cfd->cpumask); 71 free_cpumask_var(cfd->cpumask);
72 free_cpumask_var(cfd->cpumask_ipi);
68 break; 73 break;
69#endif 74#endif
70 }; 75 };
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask,
526 return; 531 return;
527 } 532 }
528 533
534 /*
535 * After we put an entry into the list, data->cpumask
536 * may be cleared again when another CPU sends another IPI for
537 * a SMP function call, so data->cpumask will be zero.
538 */
539 cpumask_copy(data->cpumask_ipi, data->cpumask);
529 raw_spin_lock_irqsave(&call_function.lock, flags); 540 raw_spin_lock_irqsave(&call_function.lock, flags);
530 /* 541 /*
531 * Place entry at the _HEAD_ of the list, so that any cpu still 542 * Place entry at the _HEAD_ of the list, so that any cpu still
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask,
549 smp_mb(); 560 smp_mb();
550 561
551 /* Send a message to all CPUs in the map */ 562 /* Send a message to all CPUs in the map */
552 arch_send_call_function_ipi_mask(data->cpumask); 563 arch_send_call_function_ipi_mask(data->cpumask_ipi);
553 564
554 /* Optionally wait for the CPUs to complete */ 565 /* Optionally wait for the CPUs to complete */
555 if (wait) 566 if (wait)