aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /kernel
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c6
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/trace/trace_events.c6
4 files changed, 11 insertions, 11 deletions
diff --git a/kernel/module.c b/kernel/module.c
index 46580edff0cb..05ce49ced8f6 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -369,7 +369,7 @@ EXPORT_SYMBOL_GPL(find_module);
369 369
370#ifdef CONFIG_SMP 370#ifdef CONFIG_SMP
371 371
372#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 372#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
373 373
374static void *percpu_modalloc(unsigned long size, unsigned long align, 374static void *percpu_modalloc(unsigned long size, unsigned long align,
375 const char *name) 375 const char *name)
@@ -394,7 +394,7 @@ static void percpu_modfree(void *freeme)
394 free_percpu(freeme); 394 free_percpu(freeme);
395} 395}
396 396
397#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 397#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
398 398
399/* Number of blocks used and allocated. */ 399/* Number of blocks used and allocated. */
400static unsigned int pcpu_num_used, pcpu_num_allocated; 400static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -540,7 +540,7 @@ static int percpu_modinit(void)
540} 540}
541__initcall(percpu_modinit); 541__initcall(percpu_modinit);
542 542
543#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 543#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
544 544
545static unsigned int find_pcpusec(Elf_Ehdr *hdr, 545static unsigned int find_pcpusec(Elf_Ehdr *hdr,
546 Elf_Shdr *sechdrs, 546 Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index e7f60f8e31ed..8cb94a52d1bb 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -106,16 +106,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
106 106
107void __weak perf_counter_print_debug(void) { } 107void __weak perf_counter_print_debug(void) { }
108 108
109static DEFINE_PER_CPU(int, disable_count); 109static DEFINE_PER_CPU(int, perf_disable_count);
110 110
111void __perf_disable(void) 111void __perf_disable(void)
112{ 112{
113 __get_cpu_var(disable_count)++; 113 __get_cpu_var(perf_disable_count)++;
114} 114}
115 115
116bool __perf_enable(void) 116bool __perf_enable(void)
117{ 117{
118 return !--__get_cpu_var(disable_count); 118 return !--__get_cpu_var(perf_disable_count);
119} 119}
120 120
121void perf_disable(void) 121void perf_disable(void)
diff --git a/kernel/sched.c b/kernel/sched.c
index e27a53685ed9..d9db3fb17573 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -295,12 +295,12 @@ struct task_group root_task_group;
295/* Default task group's sched entity on each cpu */ 295/* Default task group's sched entity on each cpu */
296static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 296static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
297/* Default task group's cfs_rq on each cpu */ 297/* Default task group's cfs_rq on each cpu */
298static DEFINE_PER_CPU(struct cfs_rq, init_tg_cfs_rq) ____cacheline_aligned_in_smp; 298static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_tg_cfs_rq);
299#endif /* CONFIG_FAIR_GROUP_SCHED */ 299#endif /* CONFIG_FAIR_GROUP_SCHED */
300 300
301#ifdef CONFIG_RT_GROUP_SCHED 301#ifdef CONFIG_RT_GROUP_SCHED
302static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 302static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
303static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 303static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
304#endif /* CONFIG_RT_GROUP_SCHED */ 304#endif /* CONFIG_RT_GROUP_SCHED */
305#else /* !CONFIG_USER_SCHED */ 305#else /* !CONFIG_USER_SCHED */
306#define root_task_group init_task_group 306#define root_task_group init_task_group
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..97e2c4d2e9eb 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1432,7 +1432,7 @@ static __init void event_trace_self_tests(void)
1432 1432
1433#ifdef CONFIG_FUNCTION_TRACER 1433#ifdef CONFIG_FUNCTION_TRACER
1434 1434
1435static DEFINE_PER_CPU(atomic_t, test_event_disable); 1435static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1436 1436
1437static void 1437static void
1438function_test_events_call(unsigned long ip, unsigned long parent_ip) 1438function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1449,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1449 pc = preempt_count(); 1449 pc = preempt_count();
1450 resched = ftrace_preempt_disable(); 1450 resched = ftrace_preempt_disable();
1451 cpu = raw_smp_processor_id(); 1451 cpu = raw_smp_processor_id();
1452 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1452 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1453 1453
1454 if (disabled != 1) 1454 if (disabled != 1)
1455 goto out; 1455 goto out;
@@ -1468,7 +1468,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1469 1469
1470 out: 1470 out:
1471 atomic_dec(&per_cpu(test_event_disable, cpu)); 1471 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1472 ftrace_preempt_enable(resched); 1472 ftrace_preempt_enable(resched);
1473} 1473}
1474 1474