aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
committerTejun Heo <tj@kernel.org>2009-06-24 02:13:48 -0400
commit245b2e70eabd797932adb263a65da0bab3711753 (patch)
tree30f0b790dadd2b70bf06e534abcf66a76e97b05a /drivers
parentb9bf3121af348d9255f1c917830fe8c2df52efcb (diff)
percpu: clean up percpu variable definitions
Percpu variable definition is about to be updated such that all percpu symbols including the static ones must be unique. Update percpu variable definitions accordingly. * as,cfq: rename ioc_count uniquely * cpufreq: rename cpu_dbs_info uniquely * xen: move nesting_count out of xen_evtchn_do_upcall() and rename it * mm: move ratelimits out of balance_dirty_pages_ratelimited_nr() and rename it * ipv4,6: rename cookie_scratch uniquely * x86 perf_counter: rename prev_left to pmc_prev_left, irq_entry to pmc_irq_entry and nmi_entry to pmc_nmi_entry * perf_counter: rename disable_count to perf_disable_count * ftrace: rename test_event_disable to ftrace_test_event_disable * kmemleak: rename test_pointer to kmemleak_test_pointer * mce: rename next_interval to mce_next_interval [ Impact: percpu usage cleanups, no duplicate static percpu var names ] Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Christoph Lameter <cl@linux-foundation.org> Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru> Cc: Jens Axboe <jens.axboe@oracle.com> Cc: Dave Jones <davej@redhat.com> Cc: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: linux-mm <linux-mm@kvack.org> Cc: David S. Miller <davem@davemloft.net> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Steven Rostedt <srostedt@redhat.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Andi Kleen <andi@firstfloor.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/xen/events.c9
3 files changed, 19 insertions, 17 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 7fc58af748b4..a7ef465c83b9 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -65,7 +65,7 @@ struct cpu_dbs_info_s {
65 int cpu; 65 int cpu;
66 unsigned int enable:1; 66 unsigned int enable:1;
67}; 67};
68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 68static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
69 69
70static unsigned int dbs_enable; /* number of CPUs using this policy */ 70static unsigned int dbs_enable; /* number of CPUs using this policy */
71 71
@@ -138,7 +138,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
138 void *data) 138 void *data)
139{ 139{
140 struct cpufreq_freqs *freq = data; 140 struct cpufreq_freqs *freq = data;
141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 141 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
142 freq->cpu); 142 freq->cpu);
143 143
144 struct cpufreq_policy *policy; 144 struct cpufreq_policy *policy;
@@ -298,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
298 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
299 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
300 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
301 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
303 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
304 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +388,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
388 cputime64_t cur_wall_time, cur_idle_time; 388 cputime64_t cur_wall_time, cur_idle_time;
389 unsigned int idle_time, wall_time; 389 unsigned int idle_time, wall_time;
390 390
391 j_dbs_info = &per_cpu(cpu_dbs_info, j); 391 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
392 392
393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394 394
@@ -528,7 +528,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
528 unsigned int j; 528 unsigned int j;
529 int rc; 529 int rc;
530 530
531 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 531 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
532 532
533 switch (event) { 533 switch (event) {
534 case CPUFREQ_GOV_START: 534 case CPUFREQ_GOV_START:
@@ -548,7 +548,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 548
549 for_each_cpu(j, policy->cpus) { 549 for_each_cpu(j, policy->cpus) {
550 struct cpu_dbs_info_s *j_dbs_info; 550 struct cpu_dbs_info_s *j_dbs_info;
551 j_dbs_info = &per_cpu(cpu_dbs_info, j); 551 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
552 j_dbs_info->cur_policy = policy; 552 j_dbs_info->cur_policy = policy;
553 553
554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 554 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1911d1729353..36f292a7bd01 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -73,7 +73,7 @@ struct cpu_dbs_info_s {
73 unsigned int enable:1, 73 unsigned int enable:1,
74 sample_type:1; 74 sample_type:1;
75}; 75};
76static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 76static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
77 77
78static unsigned int dbs_enable; /* number of CPUs using this policy */ 78static unsigned int dbs_enable; /* number of CPUs using this policy */
79 79
@@ -151,7 +151,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
151 unsigned int freq_hi, freq_lo; 151 unsigned int freq_hi, freq_lo;
152 unsigned int index = 0; 152 unsigned int index = 0;
153 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 153 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
154 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 154 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
155 policy->cpu);
155 156
156 if (!dbs_info->freq_table) { 157 if (!dbs_info->freq_table) {
157 dbs_info->freq_lo = 0; 158 dbs_info->freq_lo = 0;
@@ -196,7 +197,7 @@ static void ondemand_powersave_bias_init(void)
196{ 197{
197 int i; 198 int i;
198 for_each_online_cpu(i) { 199 for_each_online_cpu(i) {
199 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); 200 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i);
200 dbs_info->freq_table = cpufreq_frequency_get_table(i); 201 dbs_info->freq_table = cpufreq_frequency_get_table(i);
201 dbs_info->freq_lo = 0; 202 dbs_info->freq_lo = 0;
202 } 203 }
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(od_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -391,7 +392,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
391 unsigned int load, load_freq; 392 unsigned int load, load_freq;
392 int freq_avg; 393 int freq_avg;
393 394
394 j_dbs_info = &per_cpu(cpu_dbs_info, j); 395 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
395 396
396 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 397 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
397 398
@@ -548,7 +549,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
548 unsigned int j; 549 unsigned int j;
549 int rc; 550 int rc;
550 551
551 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 552 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
552 553
553 switch (event) { 554 switch (event) {
554 case CPUFREQ_GOV_START: 555 case CPUFREQ_GOV_START:
@@ -570,7 +571,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
570 571
571 for_each_cpu(j, policy->cpus) { 572 for_each_cpu(j, policy->cpus) {
572 struct cpu_dbs_info_s *j_dbs_info; 573 struct cpu_dbs_info_s *j_dbs_info;
573 j_dbs_info = &per_cpu(cpu_dbs_info, j); 574 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
574 j_dbs_info->cur_policy = policy; 575 j_dbs_info->cur_policy = policy;
575 576
576 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 577 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ab581fa62681..7d2987e9b1bb 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
602 return IRQ_HANDLED; 602 return IRQ_HANDLED;
603} 603}
604 604
605static DEFINE_PER_CPU(unsigned, xed_nesting_count);
606
605/* 607/*
606 * Search the CPUs pending events bitmasks. For each one found, map 608 * Search the CPUs pending events bitmasks. For each one found, map
607 * the event number to an irq, and feed it into do_IRQ() for 609 * the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
617 struct pt_regs *old_regs = set_irq_regs(regs); 619 struct pt_regs *old_regs = set_irq_regs(regs);
618 struct shared_info *s = HYPERVISOR_shared_info; 620 struct shared_info *s = HYPERVISOR_shared_info;
619 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 621 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
620 static DEFINE_PER_CPU(unsigned, nesting_count);
621 unsigned count; 622 unsigned count;
622 623
623 exit_idle(); 624 exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
628 629
629 vcpu_info->evtchn_upcall_pending = 0; 630 vcpu_info->evtchn_upcall_pending = 0;
630 631
631 if (__get_cpu_var(nesting_count)++) 632 if (__get_cpu_var(xed_nesting_count)++)
632 goto out; 633 goto out;
633 634
634#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 635#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
653 654
654 BUG_ON(!irqs_disabled()); 655 BUG_ON(!irqs_disabled());
655 656
656 count = __get_cpu_var(nesting_count); 657 count = __get_cpu_var(xed_nesting_count);
657 __get_cpu_var(nesting_count) = 0; 658 __get_cpu_var(xed_nesting_count) = 0;
658 } while(count != 1); 659 } while(count != 1);
659 660
660out: 661out: