aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-15 12:39:44 -0400
commitada3fa15057205b7d3f727bba5cd26b5912e350f (patch)
tree60962fc9e4021b92f484d1a58e72cd3906d4f3db /drivers
parent2f82af08fcc7dc01a7e98a49a5995a77e32a2925 (diff)
parent5579fd7e6aed8860ea0c8e3f11897493153b10ad (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu: (46 commits) powerpc64: convert to dynamic percpu allocator sparc64: use embedding percpu first chunk allocator percpu: kill lpage first chunk allocator x86,percpu: use embedding for 64bit NUMA and page for 32bit NUMA percpu: update embedding first chunk allocator to handle sparse units percpu: use group information to allocate vmap areas sparsely vmalloc: implement pcpu_get_vm_areas() vmalloc: separate out insert_vmalloc_vm() percpu: add chunk->base_addr percpu: add pcpu_unit_offsets[] percpu: introduce pcpu_alloc_info and pcpu_group_info percpu: move pcpu_lpage_build_unit_map() and pcpul_lpage_dump_cfg() upward percpu: add @align to pcpu_fc_alloc_fn_t percpu: make @dyn_size mandatory for pcpu_setup_first_chunk() percpu: drop @static_size from first chunk allocators percpu: generalize first chunk allocator selection percpu: build first chunk allocators selectively percpu: rename 4k first chunk allocator to page percpu: improve boot messages percpu: fix pcpu_reclaim() locking ... Fix trivial conflict as by Tejun Heo in kernel/sched.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
-rw-r--r--drivers/xen/events.c13
3 files changed, 21 insertions, 19 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bdea7e2f94ba..bc33ddc9c97c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -71,7 +71,7 @@ struct cpu_dbs_info_s {
71 */ 71 */
72 struct mutex timer_mutex; 72 struct mutex timer_mutex;
73}; 73};
74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
75 75
76static unsigned int dbs_enable; /* number of CPUs using this policy */ 76static unsigned int dbs_enable; /* number of CPUs using this policy */
77 77
@@ -137,7 +137,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
137 void *data) 137 void *data)
138{ 138{
139 struct cpufreq_freqs *freq = data; 139 struct cpufreq_freqs *freq = data;
140 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 140 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
141 freq->cpu); 141 freq->cpu);
142 142
143 struct cpufreq_policy *policy; 143 struct cpufreq_policy *policy;
@@ -297,7 +297,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 297 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 298 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 299 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 300 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 302 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 303 if (dbs_tuners_ins.ignore_nice)
@@ -387,7 +387,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
387 cputime64_t cur_wall_time, cur_idle_time; 387 cputime64_t cur_wall_time, cur_idle_time;
388 unsigned int idle_time, wall_time; 388 unsigned int idle_time, wall_time;
389 389
390 j_dbs_info = &per_cpu(cpu_dbs_info, j); 390 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
391 391
392 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 392 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
393 393
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 unsigned int j; 521 unsigned int j;
522 int rc; 522 int rc;
523 523
524 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 524 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
525 525
526 switch (event) { 526 switch (event) {
527 case CPUFREQ_GOV_START: 527 case CPUFREQ_GOV_START:
@@ -538,7 +538,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
538 538
539 for_each_cpu(j, policy->cpus) { 539 for_each_cpu(j, policy->cpus) {
540 struct cpu_dbs_info_s *j_dbs_info; 540 struct cpu_dbs_info_s *j_dbs_info;
541 j_dbs_info = &per_cpu(cpu_dbs_info, j); 541 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
542 j_dbs_info->cur_policy = policy; 542 j_dbs_info->cur_policy = policy;
543 543
544 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 544 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d6ba14276bb1..d7a528c80de8 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -78,7 +78,7 @@ struct cpu_dbs_info_s {
78 */ 78 */
79 struct mutex timer_mutex; 79 struct mutex timer_mutex;
80}; 80};
81static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 81static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
82 82
83static unsigned int dbs_enable; /* number of CPUs using this policy */ 83static unsigned int dbs_enable; /* number of CPUs using this policy */
84 84
@@ -149,7 +149,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
149 unsigned int freq_hi, freq_lo; 149 unsigned int freq_hi, freq_lo;
150 unsigned int index = 0; 150 unsigned int index = 0;
151 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 151 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
152 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 152 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
153 policy->cpu);
153 154
154 if (!dbs_info->freq_table) { 155 if (!dbs_info->freq_table) {
155 dbs_info->freq_lo = 0; 156 dbs_info->freq_lo = 0;
@@ -192,7 +193,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
192 193
193static void ondemand_powersave_bias_init_cpu(int cpu) 194static void ondemand_powersave_bias_init_cpu(int cpu)
194{ 195{
195 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 196 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
196 dbs_info->freq_table = cpufreq_frequency_get_table(cpu); 197 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
197 dbs_info->freq_lo = 0; 198 dbs_info->freq_lo = 0;
198} 199}
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 298 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 299 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 300 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 301 dbs_info = &per_cpu(od_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 302 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 303 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 304 if (dbs_tuners_ins.ignore_nice)
@@ -388,7 +389,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
388 unsigned int load, load_freq; 389 unsigned int load, load_freq;
389 int freq_avg; 390 int freq_avg;
390 391
391 j_dbs_info = &per_cpu(cpu_dbs_info, j); 392 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
392 393
393 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 394 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
394 395
@@ -535,7 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
535 unsigned int j; 536 unsigned int j;
536 int rc; 537 int rc;
537 538
538 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 539 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
539 540
540 switch (event) { 541 switch (event) {
541 case CPUFREQ_GOV_START: 542 case CPUFREQ_GOV_START:
@@ -553,7 +554,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
553 dbs_enable++; 554 dbs_enable++;
554 for_each_cpu(j, policy->cpus) { 555 for_each_cpu(j, policy->cpus) {
555 struct cpu_dbs_info_s *j_dbs_info; 556 struct cpu_dbs_info_s *j_dbs_info;
556 j_dbs_info = &per_cpu(cpu_dbs_info, j); 557 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
557 j_dbs_info->cur_policy = policy; 558 j_dbs_info->cur_policy = policy;
558 559
559 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 560 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index abad71b1632b..2f57276e87a2 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -47,10 +47,10 @@
47static DEFINE_SPINLOCK(irq_mapping_update_lock); 47static DEFINE_SPINLOCK(irq_mapping_update_lock);
48 48
49/* IRQ <-> VIRQ mapping. */ 49/* IRQ <-> VIRQ mapping. */
50static DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1}; 50static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
51 51
52/* IRQ <-> IPI mapping */ 52/* IRQ <-> IPI mapping */
53static DEFINE_PER_CPU(int, ipi_to_irq[XEN_NR_IPIS]) = {[0 ... XEN_NR_IPIS-1] = -1}; 53static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
54 54
55/* Interrupt types. */ 55/* Interrupt types. */
56enum xen_irq_type { 56enum xen_irq_type {
@@ -602,6 +602,8 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
602 return IRQ_HANDLED; 602 return IRQ_HANDLED;
603} 603}
604 604
605static DEFINE_PER_CPU(unsigned, xed_nesting_count);
606
605/* 607/*
606 * Search the CPUs pending events bitmasks. For each one found, map 608 * Search the CPUs pending events bitmasks. For each one found, map
607 * the event number to an irq, and feed it into do_IRQ() for 609 * the event number to an irq, and feed it into do_IRQ() for
@@ -617,7 +619,6 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
617 struct pt_regs *old_regs = set_irq_regs(regs); 619 struct pt_regs *old_regs = set_irq_regs(regs);
618 struct shared_info *s = HYPERVISOR_shared_info; 620 struct shared_info *s = HYPERVISOR_shared_info;
619 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu); 621 struct vcpu_info *vcpu_info = __get_cpu_var(xen_vcpu);
620 static DEFINE_PER_CPU(unsigned, nesting_count);
621 unsigned count; 622 unsigned count;
622 623
623 exit_idle(); 624 exit_idle();
@@ -628,7 +629,7 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
628 629
629 vcpu_info->evtchn_upcall_pending = 0; 630 vcpu_info->evtchn_upcall_pending = 0;
630 631
631 if (__get_cpu_var(nesting_count)++) 632 if (__get_cpu_var(xed_nesting_count)++)
632 goto out; 633 goto out;
633 634
634#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */ 635#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
@@ -653,8 +654,8 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
653 654
654 BUG_ON(!irqs_disabled()); 655 BUG_ON(!irqs_disabled());
655 656
656 count = __get_cpu_var(nesting_count); 657 count = __get_cpu_var(xed_nesting_count);
657 __get_cpu_var(nesting_count) = 0; 658 __get_cpu_var(xed_nesting_count) = 0;
658 } while(count != 1); 659 } while(count != 1);
659 660
660out: 661out: