aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-02-04 06:38:51 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-08 19:18:34 -0500
commit3361b7b173341fdaa85153e1b322099949c9f8c8 (patch)
treee40cad5ff71df10ff9ce2cb629ae461f2defd14e /drivers/cpufreq/cpufreq.c
parent1dd538f072f0b7ba327613253d41ebb329c6d490 (diff)
cpufreq: Don't check cpu_online(policy->cpu)
policy->cpu or cpus in policy->cpus can't be offline anymore. And so we don't need to check if they are online or not. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c17
1 files changed, 3 insertions, 14 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b63b3cbfe2c4..0dc9933069c5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -76,10 +76,6 @@ static int lock_policy_rwsem_##mode \
76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ 76 int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \
77 BUG_ON(policy_cpu == -1); \ 77 BUG_ON(policy_cpu == -1); \
78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ 78 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
79 if (unlikely(!cpu_online(cpu))) { \
80 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
81 return -1; \
82 } \
83 \ 79 \
84 return 0; \ 80 return 0; \
85} 81}
@@ -720,8 +716,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu,
720 716
721 if (j == cpu) 717 if (j == cpu)
722 continue; 718 continue;
723 if (!cpu_online(j))
724 continue;
725 719
726 pr_debug("CPU %u already managed, adding link\n", j); 720 pr_debug("CPU %u already managed, adding link\n", j);
727 managed_policy = cpufreq_cpu_get(cpu); 721 managed_policy = cpufreq_cpu_get(cpu);
@@ -778,8 +772,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu,
778 772
779 spin_lock_irqsave(&cpufreq_driver_lock, flags); 773 spin_lock_irqsave(&cpufreq_driver_lock, flags);
780 for_each_cpu(j, policy->cpus) { 774 for_each_cpu(j, policy->cpus) {
781 if (!cpu_online(j))
782 continue;
783 per_cpu(cpufreq_cpu_data, j) = policy; 775 per_cpu(cpufreq_cpu_data, j) = policy;
784 per_cpu(cpufreq_policy_cpu, j) = policy->cpu; 776 per_cpu(cpufreq_policy_cpu, j) = policy->cpu;
785 } 777 }
@@ -1006,11 +998,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1006 policy->last_cpu = policy->cpu; 998 policy->last_cpu = policy->cpu;
1007 policy->cpu = cpu; 999 policy->cpu = cpu;
1008 1000
1009 for_each_cpu(j, policy->cpus) { 1001 for_each_cpu(j, policy->cpus)
1010 if (!cpu_online(j))
1011 continue;
1012 per_cpu(cpufreq_policy_cpu, j) = cpu; 1002 per_cpu(cpufreq_policy_cpu, j) = cpu;
1013 }
1014 1003
1015#ifdef CONFIG_CPU_FREQ_TABLE 1004#ifdef CONFIG_CPU_FREQ_TABLE
1016 cpufreq_frequency_table_update_policy_cpu(policy); 1005 cpufreq_frequency_table_update_policy_cpu(policy);
@@ -1470,7 +1459,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1470 if (target_freq == policy->cur) 1459 if (target_freq == policy->cur)
1471 return 0; 1460 return 0;
1472 1461
1473 if (cpu_online(policy->cpu) && cpufreq_driver->target) 1462 if (cpufreq_driver->target)
1474 retval = cpufreq_driver->target(policy, target_freq, relation); 1463 retval = cpufreq_driver->target(policy, target_freq, relation);
1475 1464
1476 return retval; 1465 return retval;
@@ -1508,7 +1497,7 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu)
1508 if (cpufreq_disabled()) 1497 if (cpufreq_disabled())
1509 return ret; 1498 return ret;
1510 1499
1511 if (!(cpu_online(cpu) && cpufreq_driver->getavg)) 1500 if (!cpufreq_driver->getavg)
1512 return 0; 1501 return 0;
1513 1502
1514 policy = cpufreq_cpu_get(policy->cpu); 1503 policy = cpufreq_cpu_get(policy->cpu);