aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-05-06 16:05:16 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2016-05-06 16:05:16 -0400
commitc8541203a680a63ba7dcc4e50cd25d40e9a13dff (patch)
treee33573ab6d220be72986bde903fef61a8fa304e3
parent21f8a99ce61b2d4b74bd425a5bf7e9efbe162788 (diff)
parentf47b72a15a9679dd4dc1af681d4d2f1ca2815552 (diff)
Merge back new material for v4.7.
-rw-r--r--drivers/base/power/opp/Makefile1
-rw-r--r--drivers/base/power/opp/core.c440
-rw-r--r--drivers/base/power/opp/cpu.c199
-rw-r--r--drivers/base/power/opp/of.c591
-rw-r--r--drivers/base/power/opp/opp.h14
-rw-r--r--include/linux/pm_opp.h62
6 files changed, 763 insertions, 544 deletions
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
index 19837ef04d8e..e70ceb406fe9 100644
--- a/drivers/base/power/opp/Makefile
+++ b/drivers/base/power/opp/Makefile
@@ -1,3 +1,4 @@
1ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG 1ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
2obj-y += core.o cpu.o 2obj-y += core.o cpu.o
3obj-$(CONFIG_OF) += of.o
3obj-$(CONFIG_DEBUG_FS) += debugfs.o 4obj-$(CONFIG_DEBUG_FS) += debugfs.o
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c
index d8f4cc22856c..7c04c87738a6 100644
--- a/drivers/base/power/opp/core.c
+++ b/drivers/base/power/opp/core.c
@@ -18,7 +18,6 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/of.h>
22#include <linux/export.h> 21#include <linux/export.h>
23#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
24 23
@@ -29,7 +28,7 @@
29 * from here, with each opp_table containing the list of opps it supports in 28 * from here, with each opp_table containing the list of opps it supports in
30 * various states of availability. 29 * various states of availability.
31 */ 30 */
32static LIST_HEAD(opp_tables); 31LIST_HEAD(opp_tables);
33/* Lock to allow exclusive modification to the device and opp lists */ 32/* Lock to allow exclusive modification to the device and opp lists */
34DEFINE_MUTEX(opp_table_lock); 33DEFINE_MUTEX(opp_table_lock);
35 34
@@ -53,26 +52,6 @@ static struct opp_device *_find_opp_dev(const struct device *dev,
53 return NULL; 52 return NULL;
54} 53}
55 54
56static struct opp_table *_managed_opp(const struct device_node *np)
57{
58 struct opp_table *opp_table;
59
60 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
61 if (opp_table->np == np) {
62 /*
63 * Multiple devices can point to the same OPP table and
64 * so will have same node-pointer, np.
65 *
66 * But the OPPs will be considered as shared only if the
67 * OPP table contains a "opp-shared" property.
68 */
69 return opp_table->shared_opp ? opp_table : NULL;
70 }
71 }
72
73 return NULL;
74}
75
76/** 55/**
77 * _find_opp_table() - find opp_table struct using device pointer 56 * _find_opp_table() - find opp_table struct using device pointer
78 * @dev: device pointer used to lookup OPP table 57 * @dev: device pointer used to lookup OPP table
@@ -757,7 +736,6 @@ static struct opp_table *_add_opp_table(struct device *dev)
757{ 736{
758 struct opp_table *opp_table; 737 struct opp_table *opp_table;
759 struct opp_device *opp_dev; 738 struct opp_device *opp_dev;
760 struct device_node *np;
761 int ret; 739 int ret;
762 740
763 /* Check for existing table for 'dev' first */ 741 /* Check for existing table for 'dev' first */
@@ -781,20 +759,7 @@ static struct opp_table *_add_opp_table(struct device *dev)
781 return NULL; 759 return NULL;
782 } 760 }
783 761
784 /* 762 _of_init_opp_table(opp_table, dev);
785 * Only required for backward compatibility with v1 bindings, but isn't
786 * harmful for other cases. And so we do it unconditionally.
787 */
788 np = of_node_get(dev->of_node);
789 if (np) {
790 u32 val;
791
792 if (!of_property_read_u32(np, "clock-latency", &val))
793 opp_table->clock_latency_ns_max = val;
794 of_property_read_u32(np, "voltage-tolerance",
795 &opp_table->voltage_tolerance_v1);
796 of_node_put(np);
797 }
798 763
799 /* Set regulator to a non-NULL error value */ 764 /* Set regulator to a non-NULL error value */
800 opp_table->regulator = ERR_PTR(-ENXIO); 765 opp_table->regulator = ERR_PTR(-ENXIO);
@@ -890,8 +855,8 @@ static void _kfree_opp_rcu(struct rcu_head *head)
890 * It is assumed that the caller holds required mutex for an RCU updater 855 * It is assumed that the caller holds required mutex for an RCU updater
891 * strategy. 856 * strategy.
892 */ 857 */
893static void _opp_remove(struct opp_table *opp_table, 858void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp,
894 struct dev_pm_opp *opp, bool notify) 859 bool notify)
895{ 860{
896 /* 861 /*
897 * Notify the changes in the availability of the operable 862 * Notify the changes in the availability of the operable
@@ -952,8 +917,8 @@ unlock:
952} 917}
953EXPORT_SYMBOL_GPL(dev_pm_opp_remove); 918EXPORT_SYMBOL_GPL(dev_pm_opp_remove);
954 919
955static struct dev_pm_opp *_allocate_opp(struct device *dev, 920struct dev_pm_opp *_allocate_opp(struct device *dev,
956 struct opp_table **opp_table) 921 struct opp_table **opp_table)
957{ 922{
958 struct dev_pm_opp *opp; 923 struct dev_pm_opp *opp;
959 924
@@ -989,8 +954,8 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp,
989 return true; 954 return true;
990} 955}
991 956
992static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, 957int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
993 struct opp_table *opp_table) 958 struct opp_table *opp_table)
994{ 959{
995 struct dev_pm_opp *opp; 960 struct dev_pm_opp *opp;
996 struct list_head *head = &opp_table->opp_list; 961 struct list_head *head = &opp_table->opp_list;
@@ -1066,8 +1031,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
1066 * Duplicate OPPs (both freq and volt are same) and !opp->available 1031 * Duplicate OPPs (both freq and volt are same) and !opp->available
1067 * -ENOMEM Memory allocation failure 1032 * -ENOMEM Memory allocation failure
1068 */ 1033 */
1069static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, 1034int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt,
1070 bool dynamic) 1035 bool dynamic)
1071{ 1036{
1072 struct opp_table *opp_table; 1037 struct opp_table *opp_table;
1073 struct dev_pm_opp *new_opp; 1038 struct dev_pm_opp *new_opp;
@@ -1112,83 +1077,6 @@ unlock:
1112 return ret; 1077 return ret;
1113} 1078}
1114 1079
1115/* TODO: Support multiple regulators */
1116static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
1117 struct opp_table *opp_table)
1118{
1119 u32 microvolt[3] = {0};
1120 u32 val;
1121 int count, ret;
1122 struct property *prop = NULL;
1123 char name[NAME_MAX];
1124
1125 /* Search for "opp-microvolt-<name>" */
1126 if (opp_table->prop_name) {
1127 snprintf(name, sizeof(name), "opp-microvolt-%s",
1128 opp_table->prop_name);
1129 prop = of_find_property(opp->np, name, NULL);
1130 }
1131
1132 if (!prop) {
1133 /* Search for "opp-microvolt" */
1134 sprintf(name, "opp-microvolt");
1135 prop = of_find_property(opp->np, name, NULL);
1136
1137 /* Missing property isn't a problem, but an invalid entry is */
1138 if (!prop)
1139 return 0;
1140 }
1141
1142 count = of_property_count_u32_elems(opp->np, name);
1143 if (count < 0) {
1144 dev_err(dev, "%s: Invalid %s property (%d)\n",
1145 __func__, name, count);
1146 return count;
1147 }
1148
1149 /* There can be one or three elements here */
1150 if (count != 1 && count != 3) {
1151 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
1152 __func__, name, count);
1153 return -EINVAL;
1154 }
1155
1156 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
1157 if (ret) {
1158 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
1159 return -EINVAL;
1160 }
1161
1162 opp->u_volt = microvolt[0];
1163
1164 if (count == 1) {
1165 opp->u_volt_min = opp->u_volt;
1166 opp->u_volt_max = opp->u_volt;
1167 } else {
1168 opp->u_volt_min = microvolt[1];
1169 opp->u_volt_max = microvolt[2];
1170 }
1171
1172 /* Search for "opp-microamp-<name>" */
1173 prop = NULL;
1174 if (opp_table->prop_name) {
1175 snprintf(name, sizeof(name), "opp-microamp-%s",
1176 opp_table->prop_name);
1177 prop = of_find_property(opp->np, name, NULL);
1178 }
1179
1180 if (!prop) {
1181 /* Search for "opp-microamp" */
1182 sprintf(name, "opp-microamp");
1183 prop = of_find_property(opp->np, name, NULL);
1184 }
1185
1186 if (prop && !of_property_read_u32(opp->np, name, &val))
1187 opp->u_amp = val;
1188
1189 return 0;
1190}
1191
1192/** 1080/**
1193 * dev_pm_opp_set_supported_hw() - Set supported platforms 1081 * dev_pm_opp_set_supported_hw() - Set supported platforms
1194 * @dev: Device for which supported-hw has to be set. 1082 * @dev: Device for which supported-hw has to be set.
@@ -1517,144 +1405,6 @@ unlock:
1517} 1405}
1518EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); 1406EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator);
1519 1407
1520static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
1521 struct device_node *np)
1522{
1523 unsigned int count = opp_table->supported_hw_count;
1524 u32 version;
1525 int ret;
1526
1527 if (!opp_table->supported_hw)
1528 return true;
1529
1530 while (count--) {
1531 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
1532 &version);
1533 if (ret) {
1534 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
1535 __func__, count, ret);
1536 return false;
1537 }
1538
1539 /* Both of these are bitwise masks of the versions */
1540 if (!(version & opp_table->supported_hw[count]))
1541 return false;
1542 }
1543
1544 return true;
1545}
1546
1547/**
1548 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
1549 * @dev: device for which we do this operation
1550 * @np: device node
1551 *
1552 * This function adds an opp definition to the opp table and returns status. The
1553 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
1554 * removed by dev_pm_opp_remove.
1555 *
1556 * Locking: The internal opp_table and opp structures are RCU protected.
1557 * Hence this function internally uses RCU updater strategy with mutex locks
1558 * to keep the integrity of the internal data structures. Callers should ensure
1559 * that this function is *NOT* called under RCU protection or in contexts where
1560 * mutex cannot be locked.
1561 *
1562 * Return:
1563 * 0 On success OR
1564 * Duplicate OPPs (both freq and volt are same) and opp->available
1565 * -EEXIST Freq are same and volt are different OR
1566 * Duplicate OPPs (both freq and volt are same) and !opp->available
1567 * -ENOMEM Memory allocation failure
1568 * -EINVAL Failed parsing the OPP node
1569 */
1570static int _opp_add_static_v2(struct device *dev, struct device_node *np)
1571{
1572 struct opp_table *opp_table;
1573 struct dev_pm_opp *new_opp;
1574 u64 rate;
1575 u32 val;
1576 int ret;
1577
1578 /* Hold our table modification lock here */
1579 mutex_lock(&opp_table_lock);
1580
1581 new_opp = _allocate_opp(dev, &opp_table);
1582 if (!new_opp) {
1583 ret = -ENOMEM;
1584 goto unlock;
1585 }
1586
1587 ret = of_property_read_u64(np, "opp-hz", &rate);
1588 if (ret < 0) {
1589 dev_err(dev, "%s: opp-hz not found\n", __func__);
1590 goto free_opp;
1591 }
1592
1593 /* Check if the OPP supports hardware's hierarchy of versions or not */
1594 if (!_opp_is_supported(dev, opp_table, np)) {
1595 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
1596 goto free_opp;
1597 }
1598
1599 /*
1600 * Rate is defined as an unsigned long in clk API, and so casting
1601 * explicitly to its type. Must be fixed once rate is 64 bit
1602 * guaranteed in clk API.
1603 */
1604 new_opp->rate = (unsigned long)rate;
1605 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
1606
1607 new_opp->np = np;
1608 new_opp->dynamic = false;
1609 new_opp->available = true;
1610
1611 if (!of_property_read_u32(np, "clock-latency-ns", &val))
1612 new_opp->clock_latency_ns = val;
1613
1614 ret = opp_parse_supplies(new_opp, dev, opp_table);
1615 if (ret)
1616 goto free_opp;
1617
1618 ret = _opp_add(dev, new_opp, opp_table);
1619 if (ret)
1620 goto free_opp;
1621
1622 /* OPP to select on device suspend */
1623 if (of_property_read_bool(np, "opp-suspend")) {
1624 if (opp_table->suspend_opp) {
1625 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
1626 __func__, opp_table->suspend_opp->rate,
1627 new_opp->rate);
1628 } else {
1629 new_opp->suspend = true;
1630 opp_table->suspend_opp = new_opp;
1631 }
1632 }
1633
1634 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
1635 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
1636
1637 mutex_unlock(&opp_table_lock);
1638
1639 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
1640 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
1641 new_opp->u_volt_min, new_opp->u_volt_max,
1642 new_opp->clock_latency_ns);
1643
1644 /*
1645 * Notify the changes in the availability of the operable
1646 * frequency/voltage list.
1647 */
1648 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
1649 return 0;
1650
1651free_opp:
1652 _opp_remove(opp_table, new_opp, false);
1653unlock:
1654 mutex_unlock(&opp_table_lock);
1655 return ret;
1656}
1657
1658/** 1408/**
1659 * dev_pm_opp_add() - Add an OPP table from a table definitions 1409 * dev_pm_opp_add() - Add an OPP table from a table definitions
1660 * @dev: device for which we do this operation 1410 * @dev: device for which we do this operation
@@ -1842,21 +1592,11 @@ struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
1842} 1592}
1843EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); 1593EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
1844 1594
1845#ifdef CONFIG_OF 1595/*
1846/** 1596 * Free OPPs either created using static entries present in DT or even the
1847 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT 1597 * dynamically added entries based on remove_all param.
1848 * entries
1849 * @dev: device pointer used to lookup OPP table.
1850 *
1851 * Free OPPs created using static entries present in DT.
1852 *
1853 * Locking: The internal opp_table and opp structures are RCU protected.
1854 * Hence this function indirectly uses RCU updater strategy with mutex locks
1855 * to keep the integrity of the internal data structures. Callers should ensure
1856 * that this function is *NOT* called under RCU protection or in contexts where
1857 * mutex cannot be locked.
1858 */ 1598 */
1859void dev_pm_opp_of_remove_table(struct device *dev) 1599void _dev_pm_opp_remove_table(struct device *dev, bool remove_all)
1860{ 1600{
1861 struct opp_table *opp_table; 1601 struct opp_table *opp_table;
1862 struct dev_pm_opp *opp, *tmp; 1602 struct dev_pm_opp *opp, *tmp;
@@ -1881,7 +1621,7 @@ void dev_pm_opp_of_remove_table(struct device *dev)
1881 if (list_is_singular(&opp_table->dev_list)) { 1621 if (list_is_singular(&opp_table->dev_list)) {
1882 /* Free static OPPs */ 1622 /* Free static OPPs */
1883 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { 1623 list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) {
1884 if (!opp->dynamic) 1624 if (remove_all || !opp->dynamic)
1885 _opp_remove(opp_table, opp, true); 1625 _opp_remove(opp_table, opp, true);
1886 } 1626 }
1887 } else { 1627 } else {
@@ -1891,160 +1631,22 @@ void dev_pm_opp_of_remove_table(struct device *dev)
1891unlock: 1631unlock:
1892 mutex_unlock(&opp_table_lock); 1632 mutex_unlock(&opp_table_lock);
1893} 1633}
1894EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
1895
1896/* Returns opp descriptor node for a device, caller must do of_node_put() */
1897struct device_node *_of_get_opp_desc_node(struct device *dev)
1898{
1899 /*
1900 * TODO: Support for multiple OPP tables.
1901 *
1902 * There should be only ONE phandle present in "operating-points-v2"
1903 * property.
1904 */
1905
1906 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
1907}
1908
1909/* Initializes OPP tables based on new bindings */
1910static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
1911{
1912 struct device_node *np;
1913 struct opp_table *opp_table;
1914 int ret = 0, count = 0;
1915
1916 mutex_lock(&opp_table_lock);
1917
1918 opp_table = _managed_opp(opp_np);
1919 if (opp_table) {
1920 /* OPPs are already managed */
1921 if (!_add_opp_dev(dev, opp_table))
1922 ret = -ENOMEM;
1923 mutex_unlock(&opp_table_lock);
1924 return ret;
1925 }
1926 mutex_unlock(&opp_table_lock);
1927
1928 /* We have opp-table node now, iterate over it and add OPPs */
1929 for_each_available_child_of_node(opp_np, np) {
1930 count++;
1931
1932 ret = _opp_add_static_v2(dev, np);
1933 if (ret) {
1934 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
1935 ret);
1936 goto free_table;
1937 }
1938 }
1939
1940 /* There should be one of more OPP defined */
1941 if (WARN_ON(!count))
1942 return -ENOENT;
1943
1944 mutex_lock(&opp_table_lock);
1945
1946 opp_table = _find_opp_table(dev);
1947 if (WARN_ON(IS_ERR(opp_table))) {
1948 ret = PTR_ERR(opp_table);
1949 mutex_unlock(&opp_table_lock);
1950 goto free_table;
1951 }
1952
1953 opp_table->np = opp_np;
1954 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
1955
1956 mutex_unlock(&opp_table_lock);
1957
1958 return 0;
1959
1960free_table:
1961 dev_pm_opp_of_remove_table(dev);
1962
1963 return ret;
1964}
1965
1966/* Initializes OPP tables based on old-deprecated bindings */
1967static int _of_add_opp_table_v1(struct device *dev)
1968{
1969 const struct property *prop;
1970 const __be32 *val;
1971 int nr;
1972
1973 prop = of_find_property(dev->of_node, "operating-points", NULL);
1974 if (!prop)
1975 return -ENODEV;
1976 if (!prop->value)
1977 return -ENODATA;
1978
1979 /*
1980 * Each OPP is a set of tuples consisting of frequency and
1981 * voltage like <freq-kHz vol-uV>.
1982 */
1983 nr = prop->length / sizeof(u32);
1984 if (nr % 2) {
1985 dev_err(dev, "%s: Invalid OPP table\n", __func__);
1986 return -EINVAL;
1987 }
1988
1989 val = prop->value;
1990 while (nr) {
1991 unsigned long freq = be32_to_cpup(val++) * 1000;
1992 unsigned long volt = be32_to_cpup(val++);
1993
1994 if (_opp_add_v1(dev, freq, volt, false))
1995 dev_warn(dev, "%s: Failed to add OPP %ld\n",
1996 __func__, freq);
1997 nr -= 2;
1998 }
1999
2000 return 0;
2001}
2002 1634
2003/** 1635/**
2004 * dev_pm_opp_of_add_table() - Initialize opp table from device tree 1636 * dev_pm_opp_remove_table() - Free all OPPs associated with the device
2005 * @dev: device pointer used to lookup OPP table. 1637 * @dev: device pointer used to lookup OPP table.
2006 * 1638 *
2007 * Register the initial OPP table with the OPP library for given device. 1639 * Free both OPPs created using static entries present in DT and the
1640 * dynamically added entries.
2008 * 1641 *
2009 * Locking: The internal opp_table and opp structures are RCU protected. 1642 * Locking: The internal opp_table and opp structures are RCU protected.
2010 * Hence this function indirectly uses RCU updater strategy with mutex locks 1643 * Hence this function indirectly uses RCU updater strategy with mutex locks
2011 * to keep the integrity of the internal data structures. Callers should ensure 1644 * to keep the integrity of the internal data structures. Callers should ensure
2012 * that this function is *NOT* called under RCU protection or in contexts where 1645 * that this function is *NOT* called under RCU protection or in contexts where
2013 * mutex cannot be locked. 1646 * mutex cannot be locked.
2014 *
2015 * Return:
2016 * 0 On success OR
2017 * Duplicate OPPs (both freq and volt are same) and opp->available
2018 * -EEXIST Freq are same and volt are different OR
2019 * Duplicate OPPs (both freq and volt are same) and !opp->available
2020 * -ENOMEM Memory allocation failure
2021 * -ENODEV when 'operating-points' property is not found or is invalid data
2022 * in device node.
2023 * -ENODATA when empty 'operating-points' property is found
2024 * -EINVAL when invalid entries are found in opp-v2 table
2025 */ 1647 */
2026int dev_pm_opp_of_add_table(struct device *dev) 1648void dev_pm_opp_remove_table(struct device *dev)
2027{ 1649{
2028 struct device_node *opp_np; 1650 _dev_pm_opp_remove_table(dev, true);
2029 int ret;
2030
2031 /*
2032 * OPPs have two version of bindings now. The older one is deprecated,
2033 * try for the new binding first.
2034 */
2035 opp_np = _of_get_opp_desc_node(dev);
2036 if (!opp_np) {
2037 /*
2038 * Try old-deprecated bindings for backward compatibility with
2039 * older dtbs.
2040 */
2041 return _of_add_opp_table_v1(dev);
2042 }
2043
2044 ret = _of_add_opp_table_v2(dev, opp_np);
2045 of_node_put(opp_np);
2046
2047 return ret;
2048} 1651}
2049EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); 1652EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table);
2050#endif
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
index ba2bdbd932ef..83d6e7ba1a34 100644
--- a/drivers/base/power/opp/cpu.c
+++ b/drivers/base/power/opp/cpu.c
@@ -18,7 +18,6 @@
18#include <linux/err.h> 18#include <linux/err.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/export.h> 20#include <linux/export.h>
21#include <linux/of.h>
22#include <linux/slab.h> 21#include <linux/slab.h>
23 22
24#include "opp.h" 23#include "opp.h"
@@ -119,8 +118,66 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev,
119EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); 118EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
120#endif /* CONFIG_CPU_FREQ */ 119#endif /* CONFIG_CPU_FREQ */
121 120
122/* Required only for V1 bindings, as v2 can manage it from DT itself */ 121void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of)
123int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 122{
123 struct device *cpu_dev;
124 int cpu;
125
126 WARN_ON(cpumask_empty(cpumask));
127
128 for_each_cpu(cpu, cpumask) {
129 cpu_dev = get_cpu_device(cpu);
130 if (!cpu_dev) {
131 pr_err("%s: failed to get cpu%d device\n", __func__,
132 cpu);
133 continue;
134 }
135
136 if (of)
137 dev_pm_opp_of_remove_table(cpu_dev);
138 else
139 dev_pm_opp_remove_table(cpu_dev);
140 }
141}
142
143/**
144 * dev_pm_opp_cpumask_remove_table() - Removes OPP table for @cpumask
145 * @cpumask: cpumask for which OPP table needs to be removed
146 *
147 * This removes the OPP tables for CPUs present in the @cpumask.
148 * This should be used to remove all the OPPs entries associated with
149 * the cpus in @cpumask.
150 *
151 * Locking: The internal opp_table and opp structures are RCU protected.
152 * Hence this function internally uses RCU updater strategy with mutex locks
153 * to keep the integrity of the internal data structures. Callers should ensure
154 * that this function is *NOT* called under RCU protection or in contexts where
155 * mutex cannot be locked.
156 */
157void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
158{
159 _dev_pm_opp_cpumask_remove_table(cpumask, false);
160}
161EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table);
162
163/**
164 * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs
165 * @cpu_dev: CPU device for which we do this operation
166 * @cpumask: cpumask of the CPUs which share the OPP table with @cpu_dev
167 *
168 * This marks OPP table of the @cpu_dev as shared by the CPUs present in
169 * @cpumask.
170 *
171 * Returns -ENODEV if OPP table isn't already present.
172 *
173 * Locking: The internal opp_table and opp structures are RCU protected.
174 * Hence this function internally uses RCU updater strategy with mutex locks
175 * to keep the integrity of the internal data structures. Callers should ensure
176 * that this function is *NOT* called under RCU protection or in contexts where
177 * mutex cannot be locked.
178 */
179int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev,
180 const struct cpumask *cpumask)
124{ 181{
125 struct opp_device *opp_dev; 182 struct opp_device *opp_dev;
126 struct opp_table *opp_table; 183 struct opp_table *opp_table;
@@ -131,7 +188,7 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
131 188
132 opp_table = _find_opp_table(cpu_dev); 189 opp_table = _find_opp_table(cpu_dev);
133 if (IS_ERR(opp_table)) { 190 if (IS_ERR(opp_table)) {
134 ret = -EINVAL; 191 ret = PTR_ERR(opp_table);
135 goto unlock; 192 goto unlock;
136 } 193 }
137 194
@@ -152,6 +209,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
152 __func__, cpu); 209 __func__, cpu);
153 continue; 210 continue;
154 } 211 }
212
213 /* Mark opp-table as multiple CPUs are sharing it now */
214 opp_table->shared_opp = true;
155 } 215 }
156unlock: 216unlock:
157 mutex_unlock(&opp_table_lock); 217 mutex_unlock(&opp_table_lock);
@@ -160,112 +220,47 @@ unlock:
160} 220}
161EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); 221EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
162 222
163#ifdef CONFIG_OF 223/**
164void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask) 224 * dev_pm_opp_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with @cpu_dev
165{ 225 * @cpu_dev: CPU device for which we do this operation
166 struct device *cpu_dev; 226 * @cpumask: cpumask to update with information of sharing CPUs
167 int cpu; 227 *
168 228 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
169 WARN_ON(cpumask_empty(cpumask));
170
171 for_each_cpu(cpu, cpumask) {
172 cpu_dev = get_cpu_device(cpu);
173 if (!cpu_dev) {
174 pr_err("%s: failed to get cpu%d device\n", __func__,
175 cpu);
176 continue;
177 }
178
179 dev_pm_opp_of_remove_table(cpu_dev);
180 }
181}
182EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
183
184int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
185{
186 struct device *cpu_dev;
187 int cpu, ret = 0;
188
189 WARN_ON(cpumask_empty(cpumask));
190
191 for_each_cpu(cpu, cpumask) {
192 cpu_dev = get_cpu_device(cpu);
193 if (!cpu_dev) {
194 pr_err("%s: failed to get cpu%d device\n", __func__,
195 cpu);
196 continue;
197 }
198
199 ret = dev_pm_opp_of_add_table(cpu_dev);
200 if (ret) {
201 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
202 __func__, cpu, ret);
203
204 /* Free all other OPPs */
205 dev_pm_opp_of_cpumask_remove_table(cpumask);
206 break;
207 }
208 }
209
210 return ret;
211}
212EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
213
214/*
215 * Works only for OPP v2 bindings.
216 * 229 *
217 * Returns -ENOENT if operating-points-v2 bindings aren't supported. 230 * Returns -ENODEV if OPP table isn't already present.
231 *
232 * Locking: The internal opp_table and opp structures are RCU protected.
233 * Hence this function internally uses RCU updater strategy with mutex locks
234 * to keep the integrity of the internal data structures. Callers should ensure
235 * that this function is *NOT* called under RCU protection or in contexts where
236 * mutex cannot be locked.
218 */ 237 */
219int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 238int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
220{ 239{
221 struct device_node *np, *tmp_np; 240 struct opp_device *opp_dev;
222 struct device *tcpu_dev; 241 struct opp_table *opp_table;
223 int cpu, ret = 0; 242 int ret = 0;
224
225 /* Get OPP descriptor node */
226 np = _of_get_opp_desc_node(cpu_dev);
227 if (!np) {
228 dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
229 return -ENOENT;
230 }
231
232 cpumask_set_cpu(cpu_dev->id, cpumask);
233
234 /* OPPs are shared ? */
235 if (!of_property_read_bool(np, "opp-shared"))
236 goto put_cpu_node;
237
238 for_each_possible_cpu(cpu) {
239 if (cpu == cpu_dev->id)
240 continue;
241 243
242 tcpu_dev = get_cpu_device(cpu); 244 mutex_lock(&opp_table_lock);
243 if (!tcpu_dev) {
244 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
245 __func__, cpu);
246 ret = -ENODEV;
247 goto put_cpu_node;
248 }
249 245
250 /* Get OPP descriptor node */ 246 opp_table = _find_opp_table(cpu_dev);
251 tmp_np = _of_get_opp_desc_node(tcpu_dev); 247 if (IS_ERR(opp_table)) {
252 if (!tmp_np) { 248 ret = PTR_ERR(opp_table);
253 dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", 249 goto unlock;
254 __func__); 250 }
255 ret = -ENOENT;
256 goto put_cpu_node;
257 }
258 251
259 /* CPUs are sharing opp node */ 252 cpumask_clear(cpumask);
260 if (np == tmp_np)
261 cpumask_set_cpu(cpu, cpumask);
262 253
263 of_node_put(tmp_np); 254 if (opp_table->shared_opp) {
255 list_for_each_entry(opp_dev, &opp_table->dev_list, node)
256 cpumask_set_cpu(opp_dev->dev->id, cpumask);
257 } else {
258 cpumask_set_cpu(cpu_dev->id, cpumask);
264 } 259 }
265 260
266put_cpu_node: 261unlock:
267 of_node_put(np); 262 mutex_unlock(&opp_table_lock);
263
268 return ret; 264 return ret;
269} 265}
270EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); 266EXPORT_SYMBOL_GPL(dev_pm_opp_get_sharing_cpus);
271#endif
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
new file mode 100644
index 000000000000..94d2010558e3
--- /dev/null
+++ b/drivers/base/power/opp/of.c
@@ -0,0 +1,591 @@
1/*
2 * Generic OPP OF helpers
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 * Nishanth Menon
6 * Romit Dasgupta
7 * Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/cpu.h>
17#include <linux/errno.h>
18#include <linux/device.h>
19#include <linux/of.h>
20#include <linux/export.h>
21
22#include "opp.h"
23
24static struct opp_table *_managed_opp(const struct device_node *np)
25{
26 struct opp_table *opp_table;
27
28 list_for_each_entry_rcu(opp_table, &opp_tables, node) {
29 if (opp_table->np == np) {
30 /*
31 * Multiple devices can point to the same OPP table and
32 * so will have same node-pointer, np.
33 *
34 * But the OPPs will be considered as shared only if the
35 * OPP table contains a "opp-shared" property.
36 */
37 return opp_table->shared_opp ? opp_table : NULL;
38 }
39 }
40
41 return NULL;
42}
43
44void _of_init_opp_table(struct opp_table *opp_table, struct device *dev)
45{
46 struct device_node *np;
47
48 /*
49 * Only required for backward compatibility with v1 bindings, but isn't
50 * harmful for other cases. And so we do it unconditionally.
51 */
52 np = of_node_get(dev->of_node);
53 if (np) {
54 u32 val;
55
56 if (!of_property_read_u32(np, "clock-latency", &val))
57 opp_table->clock_latency_ns_max = val;
58 of_property_read_u32(np, "voltage-tolerance",
59 &opp_table->voltage_tolerance_v1);
60 of_node_put(np);
61 }
62}
63
64static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table,
65 struct device_node *np)
66{
67 unsigned int count = opp_table->supported_hw_count;
68 u32 version;
69 int ret;
70
71 if (!opp_table->supported_hw)
72 return true;
73
74 while (count--) {
75 ret = of_property_read_u32_index(np, "opp-supported-hw", count,
76 &version);
77 if (ret) {
78 dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n",
79 __func__, count, ret);
80 return false;
81 }
82
83 /* Both of these are bitwise masks of the versions */
84 if (!(version & opp_table->supported_hw[count]))
85 return false;
86 }
87
88 return true;
89}
90
91/* TODO: Support multiple regulators */
92static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev,
93 struct opp_table *opp_table)
94{
95 u32 microvolt[3] = {0};
96 u32 val;
97 int count, ret;
98 struct property *prop = NULL;
99 char name[NAME_MAX];
100
101 /* Search for "opp-microvolt-<name>" */
102 if (opp_table->prop_name) {
103 snprintf(name, sizeof(name), "opp-microvolt-%s",
104 opp_table->prop_name);
105 prop = of_find_property(opp->np, name, NULL);
106 }
107
108 if (!prop) {
109 /* Search for "opp-microvolt" */
110 sprintf(name, "opp-microvolt");
111 prop = of_find_property(opp->np, name, NULL);
112
113 /* Missing property isn't a problem, but an invalid entry is */
114 if (!prop)
115 return 0;
116 }
117
118 count = of_property_count_u32_elems(opp->np, name);
119 if (count < 0) {
120 dev_err(dev, "%s: Invalid %s property (%d)\n",
121 __func__, name, count);
122 return count;
123 }
124
125 /* There can be one or three elements here */
126 if (count != 1 && count != 3) {
127 dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n",
128 __func__, name, count);
129 return -EINVAL;
130 }
131
132 ret = of_property_read_u32_array(opp->np, name, microvolt, count);
133 if (ret) {
134 dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret);
135 return -EINVAL;
136 }
137
138 opp->u_volt = microvolt[0];
139
140 if (count == 1) {
141 opp->u_volt_min = opp->u_volt;
142 opp->u_volt_max = opp->u_volt;
143 } else {
144 opp->u_volt_min = microvolt[1];
145 opp->u_volt_max = microvolt[2];
146 }
147
148 /* Search for "opp-microamp-<name>" */
149 prop = NULL;
150 if (opp_table->prop_name) {
151 snprintf(name, sizeof(name), "opp-microamp-%s",
152 opp_table->prop_name);
153 prop = of_find_property(opp->np, name, NULL);
154 }
155
156 if (!prop) {
157 /* Search for "opp-microamp" */
158 sprintf(name, "opp-microamp");
159 prop = of_find_property(opp->np, name, NULL);
160 }
161
162 if (prop && !of_property_read_u32(opp->np, name, &val))
163 opp->u_amp = val;
164
165 return 0;
166}
167
168/**
169 * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
170 * entries
171 * @dev: device pointer used to lookup OPP table.
172 *
173 * Free OPPs created using static entries present in DT.
174 *
175 * Locking: The internal opp_table and opp structures are RCU protected.
176 * Hence this function indirectly uses RCU updater strategy with mutex locks
177 * to keep the integrity of the internal data structures. Callers should ensure
178 * that this function is *NOT* called under RCU protection or in contexts where
179 * mutex cannot be locked.
180 */
181void dev_pm_opp_of_remove_table(struct device *dev)
182{
183 _dev_pm_opp_remove_table(dev, false);
184}
185EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
186
187/* Returns opp descriptor node for a device, caller must do of_node_put() */
188struct device_node *_of_get_opp_desc_node(struct device *dev)
189{
190 /*
191 * TODO: Support for multiple OPP tables.
192 *
193 * There should be only ONE phandle present in "operating-points-v2"
194 * property.
195 */
196
197 return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
198}
199
200/**
201 * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings)
202 * @dev: device for which we do this operation
203 * @np: device node
204 *
205 * This function adds an opp definition to the opp table and returns status. The
206 * opp can be controlled using dev_pm_opp_enable/disable functions and may be
207 * removed by dev_pm_opp_remove.
208 *
209 * Locking: The internal opp_table and opp structures are RCU protected.
210 * Hence this function internally uses RCU updater strategy with mutex locks
211 * to keep the integrity of the internal data structures. Callers should ensure
212 * that this function is *NOT* called under RCU protection or in contexts where
213 * mutex cannot be locked.
214 *
215 * Return:
216 * 0 On success OR
217 * Duplicate OPPs (both freq and volt are same) and opp->available
218 * -EEXIST Freq are same and volt are different OR
219 * Duplicate OPPs (both freq and volt are same) and !opp->available
220 * -ENOMEM Memory allocation failure
221 * -EINVAL Failed parsing the OPP node
222 */
223static int _opp_add_static_v2(struct device *dev, struct device_node *np)
224{
225 struct opp_table *opp_table;
226 struct dev_pm_opp *new_opp;
227 u64 rate;
228 u32 val;
229 int ret;
230
231 /* Hold our table modification lock here */
232 mutex_lock(&opp_table_lock);
233
234 new_opp = _allocate_opp(dev, &opp_table);
235 if (!new_opp) {
236 ret = -ENOMEM;
237 goto unlock;
238 }
239
240 ret = of_property_read_u64(np, "opp-hz", &rate);
241 if (ret < 0) {
242 dev_err(dev, "%s: opp-hz not found\n", __func__);
243 goto free_opp;
244 }
245
246 /* Check if the OPP supports hardware's hierarchy of versions or not */
247 if (!_opp_is_supported(dev, opp_table, np)) {
248 dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate);
249 goto free_opp;
250 }
251
252 /*
253 * Rate is defined as an unsigned long in clk API, and so casting
254 * explicitly to its type. Must be fixed once rate is 64 bit
255 * guaranteed in clk API.
256 */
257 new_opp->rate = (unsigned long)rate;
258 new_opp->turbo = of_property_read_bool(np, "turbo-mode");
259
260 new_opp->np = np;
261 new_opp->dynamic = false;
262 new_opp->available = true;
263
264 if (!of_property_read_u32(np, "clock-latency-ns", &val))
265 new_opp->clock_latency_ns = val;
266
267 ret = opp_parse_supplies(new_opp, dev, opp_table);
268 if (ret)
269 goto free_opp;
270
271 ret = _opp_add(dev, new_opp, opp_table);
272 if (ret)
273 goto free_opp;
274
275 /* OPP to select on device suspend */
276 if (of_property_read_bool(np, "opp-suspend")) {
277 if (opp_table->suspend_opp) {
278 dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n",
279 __func__, opp_table->suspend_opp->rate,
280 new_opp->rate);
281 } else {
282 new_opp->suspend = true;
283 opp_table->suspend_opp = new_opp;
284 }
285 }
286
287 if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max)
288 opp_table->clock_latency_ns_max = new_opp->clock_latency_ns;
289
290 mutex_unlock(&opp_table_lock);
291
292 pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n",
293 __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt,
294 new_opp->u_volt_min, new_opp->u_volt_max,
295 new_opp->clock_latency_ns);
296
297 /*
298 * Notify the changes in the availability of the operable
299 * frequency/voltage list.
300 */
301 srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp);
302 return 0;
303
304free_opp:
305 _opp_remove(opp_table, new_opp, false);
306unlock:
307 mutex_unlock(&opp_table_lock);
308 return ret;
309}
310
311/* Initializes OPP tables based on new bindings */
312static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
313{
314 struct device_node *np;
315 struct opp_table *opp_table;
316 int ret = 0, count = 0;
317
318 mutex_lock(&opp_table_lock);
319
320 opp_table = _managed_opp(opp_np);
321 if (opp_table) {
322 /* OPPs are already managed */
323 if (!_add_opp_dev(dev, opp_table))
324 ret = -ENOMEM;
325 mutex_unlock(&opp_table_lock);
326 return ret;
327 }
328 mutex_unlock(&opp_table_lock);
329
330 /* We have opp-table node now, iterate over it and add OPPs */
331 for_each_available_child_of_node(opp_np, np) {
332 count++;
333
334 ret = _opp_add_static_v2(dev, np);
335 if (ret) {
336 dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
337 ret);
338 goto free_table;
339 }
340 }
341
342 /* There should be one of more OPP defined */
343 if (WARN_ON(!count))
344 return -ENOENT;
345
346 mutex_lock(&opp_table_lock);
347
348 opp_table = _find_opp_table(dev);
349 if (WARN_ON(IS_ERR(opp_table))) {
350 ret = PTR_ERR(opp_table);
351 mutex_unlock(&opp_table_lock);
352 goto free_table;
353 }
354
355 opp_table->np = opp_np;
356 opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared");
357
358 mutex_unlock(&opp_table_lock);
359
360 return 0;
361
362free_table:
363 dev_pm_opp_of_remove_table(dev);
364
365 return ret;
366}
367
368/* Initializes OPP tables based on old-deprecated bindings */
369static int _of_add_opp_table_v1(struct device *dev)
370{
371 const struct property *prop;
372 const __be32 *val;
373 int nr;
374
375 prop = of_find_property(dev->of_node, "operating-points", NULL);
376 if (!prop)
377 return -ENODEV;
378 if (!prop->value)
379 return -ENODATA;
380
381 /*
382 * Each OPP is a set of tuples consisting of frequency and
383 * voltage like <freq-kHz vol-uV>.
384 */
385 nr = prop->length / sizeof(u32);
386 if (nr % 2) {
387 dev_err(dev, "%s: Invalid OPP table\n", __func__);
388 return -EINVAL;
389 }
390
391 val = prop->value;
392 while (nr) {
393 unsigned long freq = be32_to_cpup(val++) * 1000;
394 unsigned long volt = be32_to_cpup(val++);
395
396 if (_opp_add_v1(dev, freq, volt, false))
397 dev_warn(dev, "%s: Failed to add OPP %ld\n",
398 __func__, freq);
399 nr -= 2;
400 }
401
402 return 0;
403}
404
405/**
406 * dev_pm_opp_of_add_table() - Initialize opp table from device tree
407 * @dev: device pointer used to lookup OPP table.
408 *
409 * Register the initial OPP table with the OPP library for given device.
410 *
411 * Locking: The internal opp_table and opp structures are RCU protected.
412 * Hence this function indirectly uses RCU updater strategy with mutex locks
413 * to keep the integrity of the internal data structures. Callers should ensure
414 * that this function is *NOT* called under RCU protection or in contexts where
415 * mutex cannot be locked.
416 *
417 * Return:
418 * 0 On success OR
419 * Duplicate OPPs (both freq and volt are same) and opp->available
420 * -EEXIST Freq are same and volt are different OR
421 * Duplicate OPPs (both freq and volt are same) and !opp->available
422 * -ENOMEM Memory allocation failure
423 * -ENODEV when 'operating-points' property is not found or is invalid data
424 * in device node.
425 * -ENODATA when empty 'operating-points' property is found
426 * -EINVAL when invalid entries are found in opp-v2 table
427 */
428int dev_pm_opp_of_add_table(struct device *dev)
429{
430 struct device_node *opp_np;
431 int ret;
432
433 /*
434 * OPPs have two version of bindings now. The older one is deprecated,
435 * try for the new binding first.
436 */
437 opp_np = _of_get_opp_desc_node(dev);
438 if (!opp_np) {
439 /*
440 * Try old-deprecated bindings for backward compatibility with
441 * older dtbs.
442 */
443 return _of_add_opp_table_v1(dev);
444 }
445
446 ret = _of_add_opp_table_v2(dev, opp_np);
447 of_node_put(opp_np);
448
449 return ret;
450}
451EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
452
453/* CPU device specific helpers */
454
455/**
456 * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask
457 * @cpumask: cpumask for which OPP table needs to be removed
458 *
459 * This removes the OPP tables for CPUs present in the @cpumask.
460 * This should be used only to remove static entries created from DT.
461 *
462 * Locking: The internal opp_table and opp structures are RCU protected.
463 * Hence this function internally uses RCU updater strategy with mutex locks
464 * to keep the integrity of the internal data structures. Callers should ensure
465 * that this function is *NOT* called under RCU protection or in contexts where
466 * mutex cannot be locked.
467 */
468void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
469{
470 _dev_pm_opp_cpumask_remove_table(cpumask, true);
471}
472EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
473
474/**
475 * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask
476 * @cpumask: cpumask for which OPP table needs to be added.
477 *
478 * This adds the OPP tables for CPUs present in the @cpumask.
479 *
480 * Locking: The internal opp_table and opp structures are RCU protected.
481 * Hence this function internally uses RCU updater strategy with mutex locks
482 * to keep the integrity of the internal data structures. Callers should ensure
483 * that this function is *NOT* called under RCU protection or in contexts where
484 * mutex cannot be locked.
485 */
486int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
487{
488 struct device *cpu_dev;
489 int cpu, ret = 0;
490
491 WARN_ON(cpumask_empty(cpumask));
492
493 for_each_cpu(cpu, cpumask) {
494 cpu_dev = get_cpu_device(cpu);
495 if (!cpu_dev) {
496 pr_err("%s: failed to get cpu%d device\n", __func__,
497 cpu);
498 continue;
499 }
500
501 ret = dev_pm_opp_of_add_table(cpu_dev);
502 if (ret) {
503 pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
504 __func__, cpu, ret);
505
506 /* Free all other OPPs */
507 dev_pm_opp_of_cpumask_remove_table(cpumask);
508 break;
509 }
510 }
511
512 return ret;
513}
514EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
515
516/*
517 * Works only for OPP v2 bindings.
518 *
519 * Returns -ENOENT if operating-points-v2 bindings aren't supported.
520 */
521/**
522 * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with
523 * @cpu_dev using operating-points-v2
524 * bindings.
525 *
526 * @cpu_dev: CPU device for which we do this operation
527 * @cpumask: cpumask to update with information of sharing CPUs
528 *
529 * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev.
530 *
531 * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev.
532 *
533 * Locking: The internal opp_table and opp structures are RCU protected.
534 * Hence this function internally uses RCU updater strategy with mutex locks
535 * to keep the integrity of the internal data structures. Callers should ensure
536 * that this function is *NOT* called under RCU protection or in contexts where
537 * mutex cannot be locked.
538 */
539int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev,
540 struct cpumask *cpumask)
541{
542 struct device_node *np, *tmp_np;
543 struct device *tcpu_dev;
544 int cpu, ret = 0;
545
546 /* Get OPP descriptor node */
547 np = _of_get_opp_desc_node(cpu_dev);
548 if (!np) {
549 dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
550 return -ENOENT;
551 }
552
553 cpumask_set_cpu(cpu_dev->id, cpumask);
554
555 /* OPPs are shared ? */
556 if (!of_property_read_bool(np, "opp-shared"))
557 goto put_cpu_node;
558
559 for_each_possible_cpu(cpu) {
560 if (cpu == cpu_dev->id)
561 continue;
562
563 tcpu_dev = get_cpu_device(cpu);
564 if (!tcpu_dev) {
565 dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
566 __func__, cpu);
567 ret = -ENODEV;
568 goto put_cpu_node;
569 }
570
571 /* Get OPP descriptor node */
572 tmp_np = _of_get_opp_desc_node(tcpu_dev);
573 if (!tmp_np) {
574 dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
575 __func__);
576 ret = -ENOENT;
577 goto put_cpu_node;
578 }
579
580 /* CPUs are sharing opp node */
581 if (np == tmp_np)
582 cpumask_set_cpu(cpu, cpumask);
583
584 of_node_put(tmp_np);
585 }
586
587put_cpu_node:
588 of_node_put(np);
589 return ret;
590}
591EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
index f67f806fcf3a..20f3be22e060 100644
--- a/drivers/base/power/opp/opp.h
+++ b/drivers/base/power/opp/opp.h
@@ -28,6 +28,8 @@ struct regulator;
28/* Lock to allow exclusive modification to the device and opp lists */ 28/* Lock to allow exclusive modification to the device and opp lists */
29extern struct mutex opp_table_lock; 29extern struct mutex opp_table_lock;
30 30
31extern struct list_head opp_tables;
32
31/* 33/*
32 * Internal data structure organization with the OPP layer library is as 34 * Internal data structure organization with the OPP layer library is as
33 * follows: 35 * follows:
@@ -183,6 +185,18 @@ struct opp_table {
183struct opp_table *_find_opp_table(struct device *dev); 185struct opp_table *_find_opp_table(struct device *dev);
184struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); 186struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table);
185struct device_node *_of_get_opp_desc_node(struct device *dev); 187struct device_node *_of_get_opp_desc_node(struct device *dev);
188void _dev_pm_opp_remove_table(struct device *dev, bool remove_all);
189struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table);
190int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table);
191void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify);
192int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic);
193void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of);
194
195#ifdef CONFIG_OF
196void _of_init_opp_table(struct opp_table *opp_table, struct device *dev);
197#else
198static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {}
199#endif
186 200
187#ifdef CONFIG_DEBUG_FS 201#ifdef CONFIG_DEBUG_FS
188void opp_debug_remove_one(struct dev_pm_opp *opp); 202void opp_debug_remove_one(struct dev_pm_opp *opp);
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index cccaf4a29e9f..bca26157f5b6 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -65,6 +65,10 @@ void dev_pm_opp_put_prop_name(struct device *dev);
65int dev_pm_opp_set_regulator(struct device *dev, const char *name); 65int dev_pm_opp_set_regulator(struct device *dev, const char *name);
66void dev_pm_opp_put_regulator(struct device *dev); 66void dev_pm_opp_put_regulator(struct device *dev);
67int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq); 67int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq);
68int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask);
69int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
70void dev_pm_opp_remove_table(struct device *dev);
71void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask);
68#else 72#else
69static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) 73static inline unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
70{ 74{
@@ -109,25 +113,25 @@ static inline struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev)
109static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, 113static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
110 unsigned long freq, bool available) 114 unsigned long freq, bool available)
111{ 115{
112 return ERR_PTR(-EINVAL); 116 return ERR_PTR(-ENOTSUPP);
113} 117}
114 118
115static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, 119static inline struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
116 unsigned long *freq) 120 unsigned long *freq)
117{ 121{
118 return ERR_PTR(-EINVAL); 122 return ERR_PTR(-ENOTSUPP);
119} 123}
120 124
121static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, 125static inline struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
122 unsigned long *freq) 126 unsigned long *freq)
123{ 127{
124 return ERR_PTR(-EINVAL); 128 return ERR_PTR(-ENOTSUPP);
125} 129}
126 130
127static inline int dev_pm_opp_add(struct device *dev, unsigned long freq, 131static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
128 unsigned long u_volt) 132 unsigned long u_volt)
129{ 133{
130 return -EINVAL; 134 return -ENOTSUPP;
131} 135}
132 136
133static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) 137static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq)
@@ -147,73 +151,85 @@ static inline int dev_pm_opp_disable(struct device *dev, unsigned long freq)
147static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( 151static inline struct srcu_notifier_head *dev_pm_opp_get_notifier(
148 struct device *dev) 152 struct device *dev)
149{ 153{
150 return ERR_PTR(-EINVAL); 154 return ERR_PTR(-ENOTSUPP);
151} 155}
152 156
153static inline int dev_pm_opp_set_supported_hw(struct device *dev, 157static inline int dev_pm_opp_set_supported_hw(struct device *dev,
154 const u32 *versions, 158 const u32 *versions,
155 unsigned int count) 159 unsigned int count)
156{ 160{
157 return -EINVAL; 161 return -ENOTSUPP;
158} 162}
159 163
160static inline void dev_pm_opp_put_supported_hw(struct device *dev) {} 164static inline void dev_pm_opp_put_supported_hw(struct device *dev) {}
161 165
162static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name) 166static inline int dev_pm_opp_set_prop_name(struct device *dev, const char *name)
163{ 167{
164 return -EINVAL; 168 return -ENOTSUPP;
165} 169}
166 170
167static inline void dev_pm_opp_put_prop_name(struct device *dev) {} 171static inline void dev_pm_opp_put_prop_name(struct device *dev) {}
168 172
169static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name) 173static inline int dev_pm_opp_set_regulator(struct device *dev, const char *name)
170{ 174{
171 return -EINVAL; 175 return -ENOTSUPP;
172} 176}
173 177
174static inline void dev_pm_opp_put_regulator(struct device *dev) {} 178static inline void dev_pm_opp_put_regulator(struct device *dev) {}
175 179
176static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) 180static inline int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
177{ 181{
182 return -ENOTSUPP;
183}
184
185static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, const struct cpumask *cpumask)
186{
187 return -ENOTSUPP;
188}
189
190static inline int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
191{
178 return -EINVAL; 192 return -EINVAL;
179} 193}
180 194
195static inline void dev_pm_opp_remove_table(struct device *dev)
196{
197}
198
199static inline void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask)
200{
201}
202
181#endif /* CONFIG_PM_OPP */ 203#endif /* CONFIG_PM_OPP */
182 204
183#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) 205#if defined(CONFIG_PM_OPP) && defined(CONFIG_OF)
184int dev_pm_opp_of_add_table(struct device *dev); 206int dev_pm_opp_of_add_table(struct device *dev);
185void dev_pm_opp_of_remove_table(struct device *dev); 207void dev_pm_opp_of_remove_table(struct device *dev);
186int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask); 208int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask);
187void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask); 209void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask);
188int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask); 210int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask);
189int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask);
190#else 211#else
191static inline int dev_pm_opp_of_add_table(struct device *dev) 212static inline int dev_pm_opp_of_add_table(struct device *dev)
192{ 213{
193 return -EINVAL; 214 return -ENOTSUPP;
194} 215}
195 216
196static inline void dev_pm_opp_of_remove_table(struct device *dev) 217static inline void dev_pm_opp_of_remove_table(struct device *dev)
197{ 218{
198} 219}
199 220
200static inline int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask) 221static inline int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask)
201{
202 return -ENOSYS;
203}
204
205static inline void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
206{ 222{
223 return -ENOTSUPP;
207} 224}
208 225
209static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 226static inline void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask)
210{ 227{
211 return -ENOSYS;
212} 228}
213 229
214static inline int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask) 230static inline int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask)
215{ 231{
216 return -ENOSYS; 232 return -ENOTSUPP;
217} 233}
218#endif 234#endif
219 235