diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/printk.c | 9 | ||||
-rw-r--r-- | kernel/rcutree_plugin.h | 13 | ||||
-rw-r--r-- | kernel/smp.c | 13 |
3 files changed, 22 insertions, 13 deletions
diff --git a/kernel/printk.c b/kernel/printk.c index 357f714ddd49..267ce780abe8 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -87,12 +87,6 @@ static DEFINE_SEMAPHORE(console_sem); | |||
87 | struct console *console_drivers; | 87 | struct console *console_drivers; |
88 | EXPORT_SYMBOL_GPL(console_drivers); | 88 | EXPORT_SYMBOL_GPL(console_drivers); |
89 | 89 | ||
90 | #ifdef CONFIG_LOCKDEP | ||
91 | static struct lockdep_map console_lock_dep_map = { | ||
92 | .name = "console_lock" | ||
93 | }; | ||
94 | #endif | ||
95 | |||
96 | /* | 90 | /* |
97 | * This is used for debugging the mess that is the VT code by | 91 | * This is used for debugging the mess that is the VT code by |
98 | * keeping track if we have the console semaphore held. It's | 92 | * keeping track if we have the console semaphore held. It's |
@@ -1924,7 +1918,6 @@ void console_lock(void) | |||
1924 | return; | 1918 | return; |
1925 | console_locked = 1; | 1919 | console_locked = 1; |
1926 | console_may_schedule = 1; | 1920 | console_may_schedule = 1; |
1927 | mutex_acquire(&console_lock_dep_map, 0, 0, _RET_IP_); | ||
1928 | } | 1921 | } |
1929 | EXPORT_SYMBOL(console_lock); | 1922 | EXPORT_SYMBOL(console_lock); |
1930 | 1923 | ||
@@ -1946,7 +1939,6 @@ int console_trylock(void) | |||
1946 | } | 1939 | } |
1947 | console_locked = 1; | 1940 | console_locked = 1; |
1948 | console_may_schedule = 0; | 1941 | console_may_schedule = 0; |
1949 | mutex_acquire(&console_lock_dep_map, 0, 1, _RET_IP_); | ||
1950 | return 1; | 1942 | return 1; |
1951 | } | 1943 | } |
1952 | EXPORT_SYMBOL(console_trylock); | 1944 | EXPORT_SYMBOL(console_trylock); |
@@ -2107,7 +2099,6 @@ skip: | |||
2107 | local_irq_restore(flags); | 2099 | local_irq_restore(flags); |
2108 | } | 2100 | } |
2109 | console_locked = 0; | 2101 | console_locked = 0; |
2110 | mutex_release(&console_lock_dep_map, 1, _RET_IP_); | ||
2111 | 2102 | ||
2112 | /* Release the exclusive_console once it is used */ | 2103 | /* Release the exclusive_console once it is used */ |
2113 | if (unlikely(exclusive_console)) | 2104 | if (unlikely(exclusive_console)) |
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index f6e5ec2932b4..c1cc7e17ff9d 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h | |||
@@ -40,8 +40,7 @@ | |||
40 | #ifdef CONFIG_RCU_NOCB_CPU | 40 | #ifdef CONFIG_RCU_NOCB_CPU |
41 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ | 41 | static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ |
42 | static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ | 42 | static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ |
43 | static bool rcu_nocb_poll; /* Offload kthread are to poll. */ | 43 | static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ |
44 | module_param(rcu_nocb_poll, bool, 0444); | ||
45 | static char __initdata nocb_buf[NR_CPUS * 5]; | 44 | static char __initdata nocb_buf[NR_CPUS * 5]; |
46 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ | 45 | #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ |
47 | 46 | ||
@@ -2159,6 +2158,13 @@ static int __init rcu_nocb_setup(char *str) | |||
2159 | } | 2158 | } |
2160 | __setup("rcu_nocbs=", rcu_nocb_setup); | 2159 | __setup("rcu_nocbs=", rcu_nocb_setup); |
2161 | 2160 | ||
2161 | static int __init parse_rcu_nocb_poll(char *arg) | ||
2162 | { | ||
2163 | rcu_nocb_poll = 1; | ||
2164 | return 0; | ||
2165 | } | ||
2166 | early_param("rcu_nocb_poll", parse_rcu_nocb_poll); | ||
2167 | |||
2162 | /* Is the specified CPU a no-CPUs CPU? */ | 2168 | /* Is the specified CPU a no-CPUs CPU? */ |
2163 | static bool is_nocb_cpu(int cpu) | 2169 | static bool is_nocb_cpu(int cpu) |
2164 | { | 2170 | { |
@@ -2366,10 +2372,11 @@ static int rcu_nocb_kthread(void *arg) | |||
2366 | for (;;) { | 2372 | for (;;) { |
2367 | /* If not polling, wait for next batch of callbacks. */ | 2373 | /* If not polling, wait for next batch of callbacks. */ |
2368 | if (!rcu_nocb_poll) | 2374 | if (!rcu_nocb_poll) |
2369 | wait_event(rdp->nocb_wq, rdp->nocb_head); | 2375 | wait_event_interruptible(rdp->nocb_wq, rdp->nocb_head); |
2370 | list = ACCESS_ONCE(rdp->nocb_head); | 2376 | list = ACCESS_ONCE(rdp->nocb_head); |
2371 | if (!list) { | 2377 | if (!list) { |
2372 | schedule_timeout_interruptible(1); | 2378 | schedule_timeout_interruptible(1); |
2379 | flush_signals(current); | ||
2373 | continue; | 2380 | continue; |
2374 | } | 2381 | } |
2375 | 2382 | ||
diff --git a/kernel/smp.c b/kernel/smp.c index 29dd40a9f2f4..69f38bd98b42 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -33,6 +33,7 @@ struct call_function_data { | |||
33 | struct call_single_data csd; | 33 | struct call_single_data csd; |
34 | atomic_t refs; | 34 | atomic_t refs; |
35 | cpumask_var_t cpumask; | 35 | cpumask_var_t cpumask; |
36 | cpumask_var_t cpumask_ipi; | ||
36 | }; | 37 | }; |
37 | 38 | ||
38 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); | 39 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data); |
@@ -56,6 +57,9 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
56 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, | 57 | if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, |
57 | cpu_to_node(cpu))) | 58 | cpu_to_node(cpu))) |
58 | return notifier_from_errno(-ENOMEM); | 59 | return notifier_from_errno(-ENOMEM); |
60 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, | ||
61 | cpu_to_node(cpu))) | ||
62 | return notifier_from_errno(-ENOMEM); | ||
59 | break; | 63 | break; |
60 | 64 | ||
61 | #ifdef CONFIG_HOTPLUG_CPU | 65 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -65,6 +69,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
65 | case CPU_DEAD: | 69 | case CPU_DEAD: |
66 | case CPU_DEAD_FROZEN: | 70 | case CPU_DEAD_FROZEN: |
67 | free_cpumask_var(cfd->cpumask); | 71 | free_cpumask_var(cfd->cpumask); |
72 | free_cpumask_var(cfd->cpumask_ipi); | ||
68 | break; | 73 | break; |
69 | #endif | 74 | #endif |
70 | }; | 75 | }; |
@@ -526,6 +531,12 @@ void smp_call_function_many(const struct cpumask *mask, | |||
526 | return; | 531 | return; |
527 | } | 532 | } |
528 | 533 | ||
534 | /* | ||
535 | * After we put an entry into the list, data->cpumask | ||
536 | * may be cleared again when another CPU sends another IPI for | ||
537 | * a SMP function call, so data->cpumask will be zero. | ||
538 | */ | ||
539 | cpumask_copy(data->cpumask_ipi, data->cpumask); | ||
529 | raw_spin_lock_irqsave(&call_function.lock, flags); | 540 | raw_spin_lock_irqsave(&call_function.lock, flags); |
530 | /* | 541 | /* |
531 | * Place entry at the _HEAD_ of the list, so that any cpu still | 542 | * Place entry at the _HEAD_ of the list, so that any cpu still |
@@ -549,7 +560,7 @@ void smp_call_function_many(const struct cpumask *mask, | |||
549 | smp_mb(); | 560 | smp_mb(); |
550 | 561 | ||
551 | /* Send a message to all CPUs in the map */ | 562 | /* Send a message to all CPUs in the map */ |
552 | arch_send_call_function_ipi_mask(data->cpumask); | 563 | arch_send_call_function_ipi_mask(data->cpumask_ipi); |
553 | 564 | ||
554 | /* Optionally wait for the CPUs to complete */ | 565 | /* Optionally wait for the CPUs to complete */ |
555 | if (wait) | 566 | if (wait) |