aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c15
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c7
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/smp.c40
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/trace/trace_events.c6
8 files changed, 66 insertions, 18 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 8ce10043e4ac..6ba0f1ecb212 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -401,6 +401,7 @@ int disable_nonboot_cpus(void)
401 break; 401 break;
402 } 402 }
403 } 403 }
404
404 if (!error) { 405 if (!error) {
405 BUG_ON(num_online_cpus() > 1); 406 BUG_ON(num_online_cpus() > 1);
406 /* Make sure the CPUs won't be enabled by someone else */ 407 /* Make sure the CPUs won't be enabled by someone else */
@@ -413,6 +414,14 @@ int disable_nonboot_cpus(void)
413 return error; 414 return error;
414} 415}
415 416
417void __weak arch_enable_nonboot_cpus_begin(void)
418{
419}
420
421void __weak arch_enable_nonboot_cpus_end(void)
422{
423}
424
416void __ref enable_nonboot_cpus(void) 425void __ref enable_nonboot_cpus(void)
417{ 426{
418 int cpu, error; 427 int cpu, error;
@@ -424,6 +433,9 @@ void __ref enable_nonboot_cpus(void)
424 goto out; 433 goto out;
425 434
426 printk("Enabling non-boot CPUs ...\n"); 435 printk("Enabling non-boot CPUs ...\n");
436
437 arch_enable_nonboot_cpus_begin();
438
427 for_each_cpu(cpu, frozen_cpus) { 439 for_each_cpu(cpu, frozen_cpus) {
428 error = _cpu_up(cpu, 1); 440 error = _cpu_up(cpu, 1);
429 if (!error) { 441 if (!error) {
@@ -432,6 +444,9 @@ void __ref enable_nonboot_cpus(void)
432 } 444 }
433 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 445 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
434 } 446 }
447
448 arch_enable_nonboot_cpus_end();
449
435 cpumask_clear(frozen_cpus); 450 cpumask_clear(frozen_cpus);
436out: 451out:
437 cpu_maps_update_done(); 452 cpu_maps_update_done();
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index 22e9dcfaa3d3..654efd09f6a9 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -34,7 +34,7 @@ config GCOV_KERNEL
34config GCOV_PROFILE_ALL 34config GCOV_PROFILE_ALL
35 bool "Profile entire Kernel" 35 bool "Profile entire Kernel"
36 depends on GCOV_KERNEL 36 depends on GCOV_KERNEL
37 depends on S390 || X86 37 depends on S390 || X86 || (PPC && EXPERIMENTAL)
38 default n 38 default n
39 ---help--- 39 ---help---
40 This options activates profiling for the entire kernel. 40 This options activates profiling for the entire kernel.
diff --git a/kernel/module.c b/kernel/module.c
index 46580edff0cb..05ce49ced8f6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(find_module);
369 369
370#ifdef CONFIG_SMP 370#ifdef CONFIG_SMP
371 371
372#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 372#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
373 373
374static void *percpu_modalloc(unsigned long size, unsigned long align, 374static void *percpu_modalloc(unsigned long size, unsigned long align,
375 const char *name) 375 const char *name)
@@ -394,7 +394,7 @@ static void percpu_modfree(void *freeme)
394 free_percpu(freeme); 394 free_percpu(freeme);
395} 395}
396 396
397#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 397#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
398 398
399/* Number of blocks used and allocated. */ 399/* Number of blocks used and allocated. */
400static unsigned int pcpu_num_used, pcpu_num_allocated; 400static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -540,7 +540,7 @@ static int percpu_modinit(void)
540} 540}
541__initcall(percpu_modinit); 541__initcall(percpu_modinit);
542 542
543#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 543#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
544 544
545static unsigned int find_pcpusec(Elf_Ehdr *hdr, 545static unsigned int find_pcpusec(Elf_Ehdr *hdr,
546 Elf_Shdr *sechdrs, 546 Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e0d91fdf0c3c..8cb94a52d1bb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -106,16 +106,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
106 106
107void __weak perf_counter_print_debug(void) { } 107void __weak perf_counter_print_debug(void) { }
108 108
109static DEFINE_PER_CPU(int, disable_count); 109static DEFINE_PER_CPU(int, perf_disable_count);
110 110
111void __perf_disable(void) 111void __perf_disable(void)
112{ 112{
113 __get_cpu_var(disable_count)++; 113 __get_cpu_var(perf_disable_count)++;
114} 114}
115 115
116bool __perf_enable(void) 116bool __perf_enable(void)
117{ 117{
118 return !--__get_cpu_var(disable_count); 118 return !--__get_cpu_var(perf_disable_count);
119} 119}
120 120
121void perf_disable(void) 121void perf_disable(void)
@@ -4215,6 +4215,7 @@ static int perf_copy_attr(struct perf_counter_attr __user *uattr,
4215 if (val) 4215 if (val)
4216 goto err_size; 4216 goto err_size;
4217 } 4217 }
4218 size = sizeof(*attr);
4218 } 4219 }
4219 4220
4220 ret = copy_from_user(attr, uattr, size); 4221 ret = copy_from_user(attr, uattr, size);
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a53685ed9..d9db3fb17573 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -295,12 +295,12 @@ struct task_group root_task_group;
295/* Default task group's sched entity on each cpu */ 295/* Default task group's sched entity on each cpu */
296static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 296static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
297/* Default task group's cfs_rq on each cpu */ 297/* Default task group's cfs_rq on each cpu */
298static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp; 298static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
299#endif /* CONFIG_FAIR_GROUP_SCHED */ 299#endif /* CONFIG_FAIR_GROUP_SCHED */
300 300
301#ifdef CONFIG_RT_GROUP_SCHED 301#ifdef CONFIG_RT_GROUP_SCHED
302static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 302static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
303static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 303static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
304#endif /* CONFIG_RT_GROUP_SCHED */ 304#endif /* CONFIG_RT_GROUP_SCHED */
305#else /* !CONFIG_USER_SCHED */ 305#else /* !CONFIG_USER_SCHED */
306#define root_task_group init_task_group 306#define root_task_group init_task_group
diff --git a/kernel/smp.c b/kernel/smp.c
index 94188b8ecc33..8e218500ab14 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -177,6 +177,11 @@ void generic_smp_call_function_interrupt(void)
177 int cpu = get_cpu(); 177 int cpu = get_cpu();
178 178
179 /* 179 /*
180 * Shouldn't receive this interrupt on a cpu that is not yet online.
181 */
182 WARN_ON_ONCE(!cpu_online(cpu));
183
184 /*
180 * Ensure entry is visible on call_function_queue after we have 185 * Ensure entry is visible on call_function_queue after we have
181 * entered the IPI. See comment in smp_call_function_many. 186 * entered the IPI. See comment in smp_call_function_many.
182 * If we don't have this, then we may miss an entry on the list 187 * If we don't have this, then we may miss an entry on the list
@@ -230,6 +235,11 @@ void generic_smp_call_function_single_interrupt(void)
230 unsigned int data_flags; 235 unsigned int data_flags;
231 LIST_HEAD(list); 236 LIST_HEAD(list);
232 237
238 /*
239 * Shouldn't receive this interrupt on a cpu that is not yet online.
240 */
241 WARN_ON_ONCE(!cpu_online(smp_processor_id()));
242
233 spin_lock(&q->lock); 243 spin_lock(&q->lock);
234 list_replace_init(&q->list, &list); 244 list_replace_init(&q->list, &list);
235 spin_unlock(&q->lock); 245 spin_unlock(&q->lock);
@@ -285,8 +295,14 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
285 */ 295 */
286 this_cpu = get_cpu(); 296 this_cpu = get_cpu();
287 297
288 /* Can deadlock when called with interrupts disabled */ 298 /*
289 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); 299 * Can deadlock when called with interrupts disabled.
300 * We allow cpu's that are not yet online though, as no one else can
301 * send smp call function interrupt to this cpu and as such deadlocks
302 * can't happen.
303 */
304 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
305 && !oops_in_progress);
290 306
291 if (cpu == this_cpu) { 307 if (cpu == this_cpu) {
292 local_irq_save(flags); 308 local_irq_save(flags);
@@ -329,8 +345,14 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
329{ 345{
330 csd_lock(data); 346 csd_lock(data);
331 347
332 /* Can deadlock when called with interrupts disabled */ 348 /*
333 WARN_ON_ONCE(wait && irqs_disabled() && !oops_in_progress); 349 * Can deadlock when called with interrupts disabled.
350 * We allow cpu's that are not yet online though, as no one else can
351 * send smp call function interrupt to this cpu and as such deadlocks
352 * can't happen.
353 */
354 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
355 && !oops_in_progress);
334 356
335 generic_exec_single(cpu, data, wait); 357 generic_exec_single(cpu, data, wait);
336} 358}
@@ -365,8 +387,14 @@ void smp_call_function_many(const struct cpumask *mask,
365 unsigned long flags; 387 unsigned long flags;
366 int cpu, next_cpu, this_cpu = smp_processor_id(); 388 int cpu, next_cpu, this_cpu = smp_processor_id();
367 389
368 /* Can deadlock when called with interrupts disabled */ 390 /*
369 WARN_ON_ONCE(irqs_disabled() && !oops_in_progress); 391 * Can deadlock when called with interrupts disabled.
392 * We allow cpu's that are not yet online though, as no one else can
393 * send smp call function interrupt to this cpu and as such deadlocks
394 * can't happen.
395 */
396 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
397 && !oops_in_progress);
370 398
371 /* So, what's a CPU they want? Ignoring this one. */ 399 /* So, what's a CPU they want? Ignoring this one. */
372 cpu = cpumask_first_and(mask, cpu_online_mask); 400 cpu = cpumask_first_and(mask, cpu_online_mask);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 6bb59f707402..1a631ba684a4 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -91,7 +91,9 @@ extern int sysctl_nr_trim_pages;
91#ifdef CONFIG_RCU_TORTURE_TEST 91#ifdef CONFIG_RCU_TORTURE_TEST
92extern int rcutorture_runnable; 92extern int rcutorture_runnable;
93#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ 93#endif /* #ifdef CONFIG_RCU_TORTURE_TEST */
94#ifdef CONFIG_BLOCK
94extern int blk_iopoll_enabled; 95extern int blk_iopoll_enabled;
96#endif
95 97
96/* Constants used for minimum and maximum */ 98/* Constants used for minimum and maximum */
97#ifdef CONFIG_DETECT_SOFTLOCKUP 99#ifdef CONFIG_DETECT_SOFTLOCKUP
@@ -998,6 +1000,7 @@ static struct ctl_table kern_table[] = {
998 .proc_handler = &proc_dointvec, 1000 .proc_handler = &proc_dointvec,
999 }, 1001 },
1000#endif 1002#endif
1003#ifdef CONFIG_BLOCK
1001 { 1004 {
1002 .ctl_name = CTL_UNNUMBERED, 1005 .ctl_name = CTL_UNNUMBERED,
1003 .procname = "blk_iopoll", 1006 .procname = "blk_iopoll",
@@ -1006,6 +1009,7 @@ static struct ctl_table kern_table[] = {
1006 .mode = 0644, 1009 .mode = 0644,
1007 .proc_handler = &proc_dointvec, 1010 .proc_handler = &proc_dointvec,
1008 }, 1011 },
1012#endif
1009/* 1013/*
1010 * NOTE: do not add new entries to this table unless you have read 1014 * NOTE: do not add new entries to this table unless you have read
1011 * Documentation/sysctl/ctl_unnumbered.txt 1015 * Documentation/sysctl/ctl_unnumbered.txt
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..97e2c4d2e9eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
1432 1432
1433#ifdef CONFIG_FUNCTION_TRACER 1433#ifdef CONFIG_FUNCTION_TRACER
1434 1434
1435static DEFINE_PER_CPU(atomic_t, test_event_disable); 1435static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1436 1436
1437static void 1437static void
1438function_test_events_call(unsigned long ip, unsigned long parent_ip) 1438function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1449 pc = preempt_count(); 1449 pc = preempt_count();
1450 resched = ftrace_preempt_disable(); 1450 resched = ftrace_preempt_disable();
1451 cpu = raw_smp_processor_id(); 1451 cpu = raw_smp_processor_id();
1452 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1452 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1453 1453
1454 if (disabled != 1) 1454 if (disabled != 1)
1455 goto out; 1455 goto out;
@@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1469 1469
1470 out: 1470 out:
1471 atomic_dec(&per_cpu(test_event_disable, cpu)); 1471 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1472 ftrace_preempt_enable(resched); 1472 ftrace_preempt_enable(resched);
1473} 1473}
1474 1474