aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
committerTejun Heo <tj@kernel.org>2009-09-14 20:57:19 -0400
commit5579fd7e6aed8860ea0c8e3f11897493153b10ad (patch)
tree8f797ccd0f1a2c88f1605ae9e90b3ac17485de27 /kernel
parent04a13c7c632e1fe04a5f6e6c83565d2559e37598 (diff)
parentc2a7e818019f20a5cf7fb26a6eb59e212e6c0cd8 (diff)
Merge branch 'for-next' into for-linus
* pcpu_chunk_page_occupied() doesn't exist in for-next. * pcpu_chunk_addr_search() updated to use raw_smp_processor_id(). Conflicts: mm/percpu.c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/perf_counter.c6
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/trace/trace_events.c6
4 files changed, 11 insertions, 11 deletions
diff --git a/kernel/module.c b/kernel/module.c
index fd1411403558..3a4db71ea494 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -364,7 +364,7 @@ EXPORT_SYMBOL_GPL(find_module);
364 364
365#ifdef CONFIG_SMP 365#ifdef CONFIG_SMP
366 366
367#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA 367#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
368 368
369static void *percpu_modalloc(unsigned long size, unsigned long align, 369static void *percpu_modalloc(unsigned long size, unsigned long align,
370 const char *name) 370 const char *name)
@@ -389,7 +389,7 @@ static void percpu_modfree(void *freeme)
389 free_percpu(freeme); 389 free_percpu(freeme);
390} 390}
391 391
392#else /* ... !CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 392#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
393 393
394/* Number of blocks used and allocated. */ 394/* Number of blocks used and allocated. */
395static unsigned int pcpu_num_used, pcpu_num_allocated; 395static unsigned int pcpu_num_used, pcpu_num_allocated;
@@ -535,7 +535,7 @@ static int percpu_modinit(void)
535} 535}
536__initcall(percpu_modinit); 536__initcall(percpu_modinit);
537 537
538#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ 538#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
539 539
540static unsigned int find_pcpusec(Elf_Ehdr *hdr, 540static unsigned int find_pcpusec(Elf_Ehdr *hdr,
541 Elf_Shdr *sechdrs, 541 Elf_Shdr *sechdrs,
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 534e20d14d63..b0bdb36ccfc8 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -100,16 +100,16 @@ hw_perf_group_sched_in(struct perf_counter *group_leader,
100 100
101void __weak perf_counter_print_debug(void) { } 101void __weak perf_counter_print_debug(void) { }
102 102
103static DEFINE_PER_CPU(int, disable_count); 103static DEFINE_PER_CPU(int, perf_disable_count);
104 104
105void __perf_disable(void) 105void __perf_disable(void)
106{ 106{
107 __get_cpu_var(disable_count)++; 107 __get_cpu_var(perf_disable_count)++;
108} 108}
109 109
110bool __perf_enable(void) 110bool __perf_enable(void)
111{ 111{
112 return !--__get_cpu_var(disable_count); 112 return !--__get_cpu_var(perf_disable_count);
113} 113}
114 114
115void perf_disable(void) 115void perf_disable(void)
diff --git a/kernel/sched.c b/kernel/sched.c
index 1b59e265273b..d3d7e7694da6 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -318,12 +318,12 @@ struct task_group root_task_group;
318/* Default task group's sched entity on each cpu */ 318/* Default task group's sched entity on each cpu */
319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity); 319static DEFINE_PER_CPU(struct sched_entity, init_sched_entity);
320/* Default task group's cfs_rq on each cpu */ 320/* Default task group's cfs_rq on each cpu */
321static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp; 321static DEFINE_PER_CPU_SHARED_ALIGNED(struct cfs_rq, init_cfs_rq);
322#endif /* CONFIG_FAIR_GROUP_SCHED */ 322#endif /* CONFIG_FAIR_GROUP_SCHED */
323 323
324#ifdef CONFIG_RT_GROUP_SCHED 324#ifdef CONFIG_RT_GROUP_SCHED
325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 325static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
326static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 326static DEFINE_PER_CPU_SHARED_ALIGNED(struct rt_rq, init_rt_rq);
327#endif /* CONFIG_RT_GROUP_SCHED */ 327#endif /* CONFIG_RT_GROUP_SCHED */
328#else /* !CONFIG_USER_SCHED */ 328#else /* !CONFIG_USER_SCHED */
329#define root_task_group init_task_group 329#define root_task_group init_task_group
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e75276a49cf5..0db0a41e0079 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -1334,7 +1334,7 @@ static __init void event_trace_self_tests(void)
1334 1334
1335#ifdef CONFIG_FUNCTION_TRACER 1335#ifdef CONFIG_FUNCTION_TRACER
1336 1336
1337static DEFINE_PER_CPU(atomic_t, test_event_disable); 1337static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1338 1338
1339static void 1339static void
1340function_test_events_call(unsigned long ip, unsigned long parent_ip) 1340function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1350,7 +1350,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1350 pc = preempt_count(); 1350 pc = preempt_count();
1351 resched = ftrace_preempt_disable(); 1351 resched = ftrace_preempt_disable();
1352 cpu = raw_smp_processor_id(); 1352 cpu = raw_smp_processor_id();
1353 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1353 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1354 1354
1355 if (disabled != 1) 1355 if (disabled != 1)
1356 goto out; 1356 goto out;
@@ -1368,7 +1368,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1368 trace_nowake_buffer_unlock_commit(event, flags, pc); 1368 trace_nowake_buffer_unlock_commit(event, flags, pc);
1369 1369
1370 out: 1370 out:
1371 atomic_dec(&per_cpu(test_event_disable, cpu)); 1371 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1372 ftrace_preempt_enable(resched); 1372 ftrace_preempt_enable(resched);
1373} 1373}
1374 1374