aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNishanth Menon <nm@ti.com>2014-05-05 09:33:49 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-05-06 18:35:51 -0400
commit0f5c890e9b9754d9aa5bf6ae2fc00cae65780d23 (patch)
treee2621d55ce881e6478880a582e15bc37112dcf7c
parentca654dc3a93d3b47dddc0c24a98043060bbb256b (diff)
PM / OPP: Remove cpufreq wrapper dependency on internal data organization
CPUFREQ custom functions for OPP (Operating Performance Points) currently exist inside the OPP library. These custom functions currently depend on internal data structures to pick up OPP information to create the cpufreq table. For example, the cpufreq table is created precisely in the same order of how OPP entries are stored inside the list implementation. This kind of tight interdependency is purely artificial since the same functionality can be achieved using the generic OPP functions meant to do the same. This interdependency also limits the independent modification of cpufreq and OPP library. So use the generic dev_pm_opp_find_freq_ceil function that achieves the table organization as we currently use. As a result of this, we dont need to use the internal device_opp structure anymore, and we hence we can switch over to rcu lock instead of the mutex holding the internal list lock. This breaking of dependency on internal data structure imposes no change to usage of these. NOTE: This change is a precursor to moving this cpufreq specific logic out of the generic library into cpufreq. Cc: Kevin Hilman <khilman@deeprootsystems.com> Signed-off-by: Nishanth Menon <nm@ti.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/base/power/opp.c55
1 files changed, 28 insertions, 27 deletions
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 25538675d59e..38b43bb20878 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -617,53 +617,54 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
617 * the table if any of the mentioned functions have been invoked in the interim. 617 * the table if any of the mentioned functions have been invoked in the interim.
618 * 618 *
619 * Locking: The internal device_opp and opp structures are RCU protected. 619 * Locking: The internal device_opp and opp structures are RCU protected.
620 * To simplify the logic, we pretend we are updater and hold relevant mutex here 620 * Since we just use the regular accessor functions to access the internal data
621 * Callers should ensure that this function is *NOT* called under RCU protection 621 * structures, we use RCU read lock inside this function. As a result, users of
622 * or in contexts where mutex locking cannot be used. 622 * this function DONOT need to use explicit locks for invoking.
623 */ 623 */
624int dev_pm_opp_init_cpufreq_table(struct device *dev, 624int dev_pm_opp_init_cpufreq_table(struct device *dev,
625 struct cpufreq_frequency_table **table) 625 struct cpufreq_frequency_table **table)
626{ 626{
627 struct device_opp *dev_opp;
628 struct dev_pm_opp *opp; 627 struct dev_pm_opp *opp;
629 struct cpufreq_frequency_table *freq_table; 628 struct cpufreq_frequency_table *freq_table = NULL;
630 int i = 0; 629 int i, max_opps, ret = 0;
630 unsigned long rate;
631 631
632 /* Pretend as if I am an updater */ 632 rcu_read_lock();
633 mutex_lock(&dev_opp_list_lock);
634 633
635 dev_opp = find_device_opp(dev); 634 max_opps = dev_pm_opp_get_opp_count(dev);
636 if (IS_ERR(dev_opp)) { 635 if (max_opps <= 0) {
637 int r = PTR_ERR(dev_opp); 636 ret = max_opps ? max_opps : -ENODATA;
638 mutex_unlock(&dev_opp_list_lock); 637 goto out;
639 dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
640 return r;
641 } 638 }
642 639
643 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * 640 freq_table = kzalloc(sizeof(*freq_table) * (max_opps + 1), GFP_KERNEL);
644 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
645 if (!freq_table) { 641 if (!freq_table) {
646 mutex_unlock(&dev_opp_list_lock); 642 ret = -ENOMEM;
647 dev_warn(dev, "%s: Unable to allocate frequency table\n", 643 goto out;
648 __func__);
649 return -ENOMEM;
650 } 644 }
651 645
652 list_for_each_entry(opp, &dev_opp->opp_list, node) { 646 for (i = 0, rate = 0; i < max_opps; i++, rate++) {
653 if (opp->available) { 647 /* find next rate */
654 freq_table[i].driver_data = i; 648 opp = dev_pm_opp_find_freq_ceil(dev, &rate);
655 freq_table[i].frequency = opp->rate / 1000; 649 if (IS_ERR(opp)) {
656 i++; 650 ret = PTR_ERR(opp);
651 goto out;
657 } 652 }
653 freq_table[i].driver_data = i;
654 freq_table[i].frequency = rate / 1000;
658 } 655 }
659 mutex_unlock(&dev_opp_list_lock);
660 656
661 freq_table[i].driver_data = i; 657 freq_table[i].driver_data = i;
662 freq_table[i].frequency = CPUFREQ_TABLE_END; 658 freq_table[i].frequency = CPUFREQ_TABLE_END;
663 659
664 *table = &freq_table[0]; 660 *table = &freq_table[0];
665 661
666 return 0; 662out:
663 rcu_read_unlock();
664 if (ret)
665 kfree(freq_table);
666
667 return ret;
667} 668}
668EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table); 669EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
669 670