diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2016-05-05 06:50:33 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2016-05-06 07:22:49 -0400 |
commit | f47b72a15a9679dd4dc1af681d4d2f1ca2815552 (patch) | |
tree | 1e8f306d2aff410c329024c8691a49bac57f716d | |
parent | 411466c5081d2f649b3583cae0f6c9ad5edec636 (diff) |
PM / OPP: Move CONFIG_OF dependent code in a separate file
Recently, a few issues were noticed in the code where CONFIG_OF wasn't
consistently used for many routines. The core file is big enough now and
ifdef hackery makes it less readable.
Move OF-specific code to another file and compile that only if CONFIG_OF
is enabled.
Compile-tested:
- For ARM (exynos) with CONFIG_OF enabled
- For X86 with CONFIG_OF disabled (have to enable CONFIG_PM_OPP separately)
No functional changes.
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Tested-by: Sudeep Holla <sudeep.holla@arm.com>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r-- | drivers/base/power/opp/Makefile | 1 | ||||
-rw-r--r-- | drivers/base/power/opp/core.c | 448 | ||||
-rw-r--r-- | drivers/base/power/opp/cpu.c | 143 | ||||
-rw-r--r-- | drivers/base/power/opp/of.c | 591 | ||||
-rw-r--r-- | drivers/base/power/opp/opp.h | 14 |
5 files changed, 618 insertions, 579 deletions
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile index 19837ef04d8e..e70ceb406fe9 100644 --- a/drivers/base/power/opp/Makefile +++ b/drivers/base/power/opp/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 1 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
2 | obj-y += core.o cpu.o | 2 | obj-y += core.o cpu.o |
3 | obj-$(CONFIG_OF) += of.o | ||
3 | obj-$(CONFIG_DEBUG_FS) += debugfs.o | 4 | obj-$(CONFIG_DEBUG_FS) += debugfs.o |
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 9f8bf04b4dbe..f98b01a3a4e7 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/of.h> | ||
22 | #include <linux/export.h> | 21 | #include <linux/export.h> |
23 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
24 | 23 | ||
@@ -29,7 +28,7 @@ | |||
29 | * from here, with each opp_table containing the list of opps it supports in | 28 | * from here, with each opp_table containing the list of opps it supports in |
30 | * various states of availability. | 29 | * various states of availability. |
31 | */ | 30 | */ |
32 | static LIST_HEAD(opp_tables); | 31 | LIST_HEAD(opp_tables); |
33 | /* Lock to allow exclusive modification to the device and opp lists */ | 32 | /* Lock to allow exclusive modification to the device and opp lists */ |
34 | DEFINE_MUTEX(opp_table_lock); | 33 | DEFINE_MUTEX(opp_table_lock); |
35 | 34 | ||
@@ -53,26 +52,6 @@ static struct opp_device *_find_opp_dev(const struct device *dev, | |||
53 | return NULL; | 52 | return NULL; |
54 | } | 53 | } |
55 | 54 | ||
56 | static struct opp_table *_managed_opp(const struct device_node *np) | ||
57 | { | ||
58 | struct opp_table *opp_table; | ||
59 | |||
60 | list_for_each_entry_rcu(opp_table, &opp_tables, node) { | ||
61 | if (opp_table->np == np) { | ||
62 | /* | ||
63 | * Multiple devices can point to the same OPP table and | ||
64 | * so will have same node-pointer, np. | ||
65 | * | ||
66 | * But the OPPs will be considered as shared only if the | ||
67 | * OPP table contains a "opp-shared" property. | ||
68 | */ | ||
69 | return opp_table->shared_opp ? opp_table : NULL; | ||
70 | } | ||
71 | } | ||
72 | |||
73 | return NULL; | ||
74 | } | ||
75 | |||
76 | /** | 55 | /** |
77 | * _find_opp_table() - find opp_table struct using device pointer | 56 | * _find_opp_table() - find opp_table struct using device pointer |
78 | * @dev: device pointer used to lookup OPP table | 57 | * @dev: device pointer used to lookup OPP table |
@@ -760,7 +739,6 @@ static struct opp_table *_add_opp_table(struct device *dev) | |||
760 | { | 739 | { |
761 | struct opp_table *opp_table; | 740 | struct opp_table *opp_table; |
762 | struct opp_device *opp_dev; | 741 | struct opp_device *opp_dev; |
763 | struct device_node *np; | ||
764 | int ret; | 742 | int ret; |
765 | 743 | ||
766 | /* Check for existing table for 'dev' first */ | 744 | /* Check for existing table for 'dev' first */ |
@@ -784,20 +762,7 @@ static struct opp_table *_add_opp_table(struct device *dev) | |||
784 | return NULL; | 762 | return NULL; |
785 | } | 763 | } |
786 | 764 | ||
787 | /* | 765 | _of_init_opp_table(opp_table, dev); |
788 | * Only required for backward compatibility with v1 bindings, but isn't | ||
789 | * harmful for other cases. And so we do it unconditionally. | ||
790 | */ | ||
791 | np = of_node_get(dev->of_node); | ||
792 | if (np) { | ||
793 | u32 val; | ||
794 | |||
795 | if (!of_property_read_u32(np, "clock-latency", &val)) | ||
796 | opp_table->clock_latency_ns_max = val; | ||
797 | of_property_read_u32(np, "voltage-tolerance", | ||
798 | &opp_table->voltage_tolerance_v1); | ||
799 | of_node_put(np); | ||
800 | } | ||
801 | 766 | ||
802 | /* Set regulator to a non-NULL error value */ | 767 | /* Set regulator to a non-NULL error value */ |
803 | opp_table->regulator = ERR_PTR(-ENXIO); | 768 | opp_table->regulator = ERR_PTR(-ENXIO); |
@@ -893,8 +858,8 @@ static void _kfree_opp_rcu(struct rcu_head *head) | |||
893 | * It is assumed that the caller holds required mutex for an RCU updater | 858 | * It is assumed that the caller holds required mutex for an RCU updater |
894 | * strategy. | 859 | * strategy. |
895 | */ | 860 | */ |
896 | static void _opp_remove(struct opp_table *opp_table, | 861 | void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, |
897 | struct dev_pm_opp *opp, bool notify) | 862 | bool notify) |
898 | { | 863 | { |
899 | /* | 864 | /* |
900 | * Notify the changes in the availability of the operable | 865 | * Notify the changes in the availability of the operable |
@@ -955,8 +920,8 @@ unlock: | |||
955 | } | 920 | } |
956 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | 921 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
957 | 922 | ||
958 | static struct dev_pm_opp *_allocate_opp(struct device *dev, | 923 | struct dev_pm_opp *_allocate_opp(struct device *dev, |
959 | struct opp_table **opp_table) | 924 | struct opp_table **opp_table) |
960 | { | 925 | { |
961 | struct dev_pm_opp *opp; | 926 | struct dev_pm_opp *opp; |
962 | 927 | ||
@@ -992,8 +957,8 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, | |||
992 | return true; | 957 | return true; |
993 | } | 958 | } |
994 | 959 | ||
995 | static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | 960 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, |
996 | struct opp_table *opp_table) | 961 | struct opp_table *opp_table) |
997 | { | 962 | { |
998 | struct dev_pm_opp *opp; | 963 | struct dev_pm_opp *opp; |
999 | struct list_head *head = &opp_table->opp_list; | 964 | struct list_head *head = &opp_table->opp_list; |
@@ -1069,8 +1034,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
1069 | * Duplicate OPPs (both freq and volt are same) and !opp->available | 1034 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
1070 | * -ENOMEM Memory allocation failure | 1035 | * -ENOMEM Memory allocation failure |
1071 | */ | 1036 | */ |
1072 | static int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, | 1037 | int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, |
1073 | bool dynamic) | 1038 | bool dynamic) |
1074 | { | 1039 | { |
1075 | struct opp_table *opp_table; | 1040 | struct opp_table *opp_table; |
1076 | struct dev_pm_opp *new_opp; | 1041 | struct dev_pm_opp *new_opp; |
@@ -1115,83 +1080,6 @@ unlock: | |||
1115 | return ret; | 1080 | return ret; |
1116 | } | 1081 | } |
1117 | 1082 | ||
1118 | /* TODO: Support multiple regulators */ | ||
1119 | static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, | ||
1120 | struct opp_table *opp_table) | ||
1121 | { | ||
1122 | u32 microvolt[3] = {0}; | ||
1123 | u32 val; | ||
1124 | int count, ret; | ||
1125 | struct property *prop = NULL; | ||
1126 | char name[NAME_MAX]; | ||
1127 | |||
1128 | /* Search for "opp-microvolt-<name>" */ | ||
1129 | if (opp_table->prop_name) { | ||
1130 | snprintf(name, sizeof(name), "opp-microvolt-%s", | ||
1131 | opp_table->prop_name); | ||
1132 | prop = of_find_property(opp->np, name, NULL); | ||
1133 | } | ||
1134 | |||
1135 | if (!prop) { | ||
1136 | /* Search for "opp-microvolt" */ | ||
1137 | sprintf(name, "opp-microvolt"); | ||
1138 | prop = of_find_property(opp->np, name, NULL); | ||
1139 | |||
1140 | /* Missing property isn't a problem, but an invalid entry is */ | ||
1141 | if (!prop) | ||
1142 | return 0; | ||
1143 | } | ||
1144 | |||
1145 | count = of_property_count_u32_elems(opp->np, name); | ||
1146 | if (count < 0) { | ||
1147 | dev_err(dev, "%s: Invalid %s property (%d)\n", | ||
1148 | __func__, name, count); | ||
1149 | return count; | ||
1150 | } | ||
1151 | |||
1152 | /* There can be one or three elements here */ | ||
1153 | if (count != 1 && count != 3) { | ||
1154 | dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", | ||
1155 | __func__, name, count); | ||
1156 | return -EINVAL; | ||
1157 | } | ||
1158 | |||
1159 | ret = of_property_read_u32_array(opp->np, name, microvolt, count); | ||
1160 | if (ret) { | ||
1161 | dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); | ||
1162 | return -EINVAL; | ||
1163 | } | ||
1164 | |||
1165 | opp->u_volt = microvolt[0]; | ||
1166 | |||
1167 | if (count == 1) { | ||
1168 | opp->u_volt_min = opp->u_volt; | ||
1169 | opp->u_volt_max = opp->u_volt; | ||
1170 | } else { | ||
1171 | opp->u_volt_min = microvolt[1]; | ||
1172 | opp->u_volt_max = microvolt[2]; | ||
1173 | } | ||
1174 | |||
1175 | /* Search for "opp-microamp-<name>" */ | ||
1176 | prop = NULL; | ||
1177 | if (opp_table->prop_name) { | ||
1178 | snprintf(name, sizeof(name), "opp-microamp-%s", | ||
1179 | opp_table->prop_name); | ||
1180 | prop = of_find_property(opp->np, name, NULL); | ||
1181 | } | ||
1182 | |||
1183 | if (!prop) { | ||
1184 | /* Search for "opp-microamp" */ | ||
1185 | sprintf(name, "opp-microamp"); | ||
1186 | prop = of_find_property(opp->np, name, NULL); | ||
1187 | } | ||
1188 | |||
1189 | if (prop && !of_property_read_u32(opp->np, name, &val)) | ||
1190 | opp->u_amp = val; | ||
1191 | |||
1192 | return 0; | ||
1193 | } | ||
1194 | |||
1195 | /** | 1083 | /** |
1196 | * dev_pm_opp_set_supported_hw() - Set supported platforms | 1084 | * dev_pm_opp_set_supported_hw() - Set supported platforms |
1197 | * @dev: Device for which supported-hw has to be set. | 1085 | * @dev: Device for which supported-hw has to be set. |
@@ -1520,144 +1408,6 @@ unlock: | |||
1520 | } | 1408 | } |
1521 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); | 1409 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulator); |
1522 | 1410 | ||
1523 | static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, | ||
1524 | struct device_node *np) | ||
1525 | { | ||
1526 | unsigned int count = opp_table->supported_hw_count; | ||
1527 | u32 version; | ||
1528 | int ret; | ||
1529 | |||
1530 | if (!opp_table->supported_hw) | ||
1531 | return true; | ||
1532 | |||
1533 | while (count--) { | ||
1534 | ret = of_property_read_u32_index(np, "opp-supported-hw", count, | ||
1535 | &version); | ||
1536 | if (ret) { | ||
1537 | dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", | ||
1538 | __func__, count, ret); | ||
1539 | return false; | ||
1540 | } | ||
1541 | |||
1542 | /* Both of these are bitwise masks of the versions */ | ||
1543 | if (!(version & opp_table->supported_hw[count])) | ||
1544 | return false; | ||
1545 | } | ||
1546 | |||
1547 | return true; | ||
1548 | } | ||
1549 | |||
1550 | /** | ||
1551 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) | ||
1552 | * @dev: device for which we do this operation | ||
1553 | * @np: device node | ||
1554 | * | ||
1555 | * This function adds an opp definition to the opp table and returns status. The | ||
1556 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be | ||
1557 | * removed by dev_pm_opp_remove. | ||
1558 | * | ||
1559 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
1560 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
1561 | * to keep the integrity of the internal data structures. Callers should ensure | ||
1562 | * that this function is *NOT* called under RCU protection or in contexts where | ||
1563 | * mutex cannot be locked. | ||
1564 | * | ||
1565 | * Return: | ||
1566 | * 0 On success OR | ||
1567 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
1568 | * -EEXIST Freq are same and volt are different OR | ||
1569 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
1570 | * -ENOMEM Memory allocation failure | ||
1571 | * -EINVAL Failed parsing the OPP node | ||
1572 | */ | ||
1573 | static int _opp_add_static_v2(struct device *dev, struct device_node *np) | ||
1574 | { | ||
1575 | struct opp_table *opp_table; | ||
1576 | struct dev_pm_opp *new_opp; | ||
1577 | u64 rate; | ||
1578 | u32 val; | ||
1579 | int ret; | ||
1580 | |||
1581 | /* Hold our table modification lock here */ | ||
1582 | mutex_lock(&opp_table_lock); | ||
1583 | |||
1584 | new_opp = _allocate_opp(dev, &opp_table); | ||
1585 | if (!new_opp) { | ||
1586 | ret = -ENOMEM; | ||
1587 | goto unlock; | ||
1588 | } | ||
1589 | |||
1590 | ret = of_property_read_u64(np, "opp-hz", &rate); | ||
1591 | if (ret < 0) { | ||
1592 | dev_err(dev, "%s: opp-hz not found\n", __func__); | ||
1593 | goto free_opp; | ||
1594 | } | ||
1595 | |||
1596 | /* Check if the OPP supports hardware's hierarchy of versions or not */ | ||
1597 | if (!_opp_is_supported(dev, opp_table, np)) { | ||
1598 | dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); | ||
1599 | goto free_opp; | ||
1600 | } | ||
1601 | |||
1602 | /* | ||
1603 | * Rate is defined as an unsigned long in clk API, and so casting | ||
1604 | * explicitly to its type. Must be fixed once rate is 64 bit | ||
1605 | * guaranteed in clk API. | ||
1606 | */ | ||
1607 | new_opp->rate = (unsigned long)rate; | ||
1608 | new_opp->turbo = of_property_read_bool(np, "turbo-mode"); | ||
1609 | |||
1610 | new_opp->np = np; | ||
1611 | new_opp->dynamic = false; | ||
1612 | new_opp->available = true; | ||
1613 | |||
1614 | if (!of_property_read_u32(np, "clock-latency-ns", &val)) | ||
1615 | new_opp->clock_latency_ns = val; | ||
1616 | |||
1617 | ret = opp_parse_supplies(new_opp, dev, opp_table); | ||
1618 | if (ret) | ||
1619 | goto free_opp; | ||
1620 | |||
1621 | ret = _opp_add(dev, new_opp, opp_table); | ||
1622 | if (ret) | ||
1623 | goto free_opp; | ||
1624 | |||
1625 | /* OPP to select on device suspend */ | ||
1626 | if (of_property_read_bool(np, "opp-suspend")) { | ||
1627 | if (opp_table->suspend_opp) { | ||
1628 | dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", | ||
1629 | __func__, opp_table->suspend_opp->rate, | ||
1630 | new_opp->rate); | ||
1631 | } else { | ||
1632 | new_opp->suspend = true; | ||
1633 | opp_table->suspend_opp = new_opp; | ||
1634 | } | ||
1635 | } | ||
1636 | |||
1637 | if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) | ||
1638 | opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; | ||
1639 | |||
1640 | mutex_unlock(&opp_table_lock); | ||
1641 | |||
1642 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", | ||
1643 | __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, | ||
1644 | new_opp->u_volt_min, new_opp->u_volt_max, | ||
1645 | new_opp->clock_latency_ns); | ||
1646 | |||
1647 | /* | ||
1648 | * Notify the changes in the availability of the operable | ||
1649 | * frequency/voltage list. | ||
1650 | */ | ||
1651 | srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); | ||
1652 | return 0; | ||
1653 | |||
1654 | free_opp: | ||
1655 | _opp_remove(opp_table, new_opp, false); | ||
1656 | unlock: | ||
1657 | mutex_unlock(&opp_table_lock); | ||
1658 | return ret; | ||
1659 | } | ||
1660 | |||
1661 | /** | 1411 | /** |
1662 | * dev_pm_opp_add() - Add an OPP table from a table definitions | 1412 | * dev_pm_opp_add() - Add an OPP table from a table definitions |
1663 | * @dev: device for which we do this operation | 1413 | * @dev: device for which we do this operation |
@@ -1849,7 +1599,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); | |||
1849 | * Free OPPs either created using static entries present in DT or even the | 1599 | * Free OPPs either created using static entries present in DT or even the |
1850 | * dynamically added entries based on remove_all param. | 1600 | * dynamically added entries based on remove_all param. |
1851 | */ | 1601 | */ |
1852 | static void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) | 1602 | void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) |
1853 | { | 1603 | { |
1854 | struct opp_table *opp_table; | 1604 | struct opp_table *opp_table; |
1855 | struct dev_pm_opp *opp, *tmp; | 1605 | struct dev_pm_opp *opp, *tmp; |
@@ -1903,179 +1653,3 @@ void dev_pm_opp_remove_table(struct device *dev) | |||
1903 | _dev_pm_opp_remove_table(dev, true); | 1653 | _dev_pm_opp_remove_table(dev, true); |
1904 | } | 1654 | } |
1905 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); | 1655 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); |
1906 | |||
1907 | #ifdef CONFIG_OF | ||
1908 | /** | ||
1909 | * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT | ||
1910 | * entries | ||
1911 | * @dev: device pointer used to lookup OPP table. | ||
1912 | * | ||
1913 | * Free OPPs created using static entries present in DT. | ||
1914 | * | ||
1915 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
1916 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
1917 | * to keep the integrity of the internal data structures. Callers should ensure | ||
1918 | * that this function is *NOT* called under RCU protection or in contexts where | ||
1919 | * mutex cannot be locked. | ||
1920 | */ | ||
1921 | void dev_pm_opp_of_remove_table(struct device *dev) | ||
1922 | { | ||
1923 | _dev_pm_opp_remove_table(dev, false); | ||
1924 | } | ||
1925 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); | ||
1926 | |||
1927 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | ||
1928 | struct device_node *_of_get_opp_desc_node(struct device *dev) | ||
1929 | { | ||
1930 | /* | ||
1931 | * TODO: Support for multiple OPP tables. | ||
1932 | * | ||
1933 | * There should be only ONE phandle present in "operating-points-v2" | ||
1934 | * property. | ||
1935 | */ | ||
1936 | |||
1937 | return of_parse_phandle(dev->of_node, "operating-points-v2", 0); | ||
1938 | } | ||
1939 | |||
1940 | /* Initializes OPP tables based on new bindings */ | ||
1941 | static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | ||
1942 | { | ||
1943 | struct device_node *np; | ||
1944 | struct opp_table *opp_table; | ||
1945 | int ret = 0, count = 0; | ||
1946 | |||
1947 | mutex_lock(&opp_table_lock); | ||
1948 | |||
1949 | opp_table = _managed_opp(opp_np); | ||
1950 | if (opp_table) { | ||
1951 | /* OPPs are already managed */ | ||
1952 | if (!_add_opp_dev(dev, opp_table)) | ||
1953 | ret = -ENOMEM; | ||
1954 | mutex_unlock(&opp_table_lock); | ||
1955 | return ret; | ||
1956 | } | ||
1957 | mutex_unlock(&opp_table_lock); | ||
1958 | |||
1959 | /* We have opp-table node now, iterate over it and add OPPs */ | ||
1960 | for_each_available_child_of_node(opp_np, np) { | ||
1961 | count++; | ||
1962 | |||
1963 | ret = _opp_add_static_v2(dev, np); | ||
1964 | if (ret) { | ||
1965 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, | ||
1966 | ret); | ||
1967 | goto free_table; | ||
1968 | } | ||
1969 | } | ||
1970 | |||
1971 | /* There should be one of more OPP defined */ | ||
1972 | if (WARN_ON(!count)) | ||
1973 | return -ENOENT; | ||
1974 | |||
1975 | mutex_lock(&opp_table_lock); | ||
1976 | |||
1977 | opp_table = _find_opp_table(dev); | ||
1978 | if (WARN_ON(IS_ERR(opp_table))) { | ||
1979 | ret = PTR_ERR(opp_table); | ||
1980 | mutex_unlock(&opp_table_lock); | ||
1981 | goto free_table; | ||
1982 | } | ||
1983 | |||
1984 | opp_table->np = opp_np; | ||
1985 | opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); | ||
1986 | |||
1987 | mutex_unlock(&opp_table_lock); | ||
1988 | |||
1989 | return 0; | ||
1990 | |||
1991 | free_table: | ||
1992 | dev_pm_opp_of_remove_table(dev); | ||
1993 | |||
1994 | return ret; | ||
1995 | } | ||
1996 | |||
1997 | /* Initializes OPP tables based on old-deprecated bindings */ | ||
1998 | static int _of_add_opp_table_v1(struct device *dev) | ||
1999 | { | ||
2000 | const struct property *prop; | ||
2001 | const __be32 *val; | ||
2002 | int nr; | ||
2003 | |||
2004 | prop = of_find_property(dev->of_node, "operating-points", NULL); | ||
2005 | if (!prop) | ||
2006 | return -ENODEV; | ||
2007 | if (!prop->value) | ||
2008 | return -ENODATA; | ||
2009 | |||
2010 | /* | ||
2011 | * Each OPP is a set of tuples consisting of frequency and | ||
2012 | * voltage like <freq-kHz vol-uV>. | ||
2013 | */ | ||
2014 | nr = prop->length / sizeof(u32); | ||
2015 | if (nr % 2) { | ||
2016 | dev_err(dev, "%s: Invalid OPP table\n", __func__); | ||
2017 | return -EINVAL; | ||
2018 | } | ||
2019 | |||
2020 | val = prop->value; | ||
2021 | while (nr) { | ||
2022 | unsigned long freq = be32_to_cpup(val++) * 1000; | ||
2023 | unsigned long volt = be32_to_cpup(val++); | ||
2024 | |||
2025 | if (_opp_add_v1(dev, freq, volt, false)) | ||
2026 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | ||
2027 | __func__, freq); | ||
2028 | nr -= 2; | ||
2029 | } | ||
2030 | |||
2031 | return 0; | ||
2032 | } | ||
2033 | |||
2034 | /** | ||
2035 | * dev_pm_opp_of_add_table() - Initialize opp table from device tree | ||
2036 | * @dev: device pointer used to lookup OPP table. | ||
2037 | * | ||
2038 | * Register the initial OPP table with the OPP library for given device. | ||
2039 | * | ||
2040 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
2041 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
2042 | * to keep the integrity of the internal data structures. Callers should ensure | ||
2043 | * that this function is *NOT* called under RCU protection or in contexts where | ||
2044 | * mutex cannot be locked. | ||
2045 | * | ||
2046 | * Return: | ||
2047 | * 0 On success OR | ||
2048 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
2049 | * -EEXIST Freq are same and volt are different OR | ||
2050 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
2051 | * -ENOMEM Memory allocation failure | ||
2052 | * -ENODEV when 'operating-points' property is not found or is invalid data | ||
2053 | * in device node. | ||
2054 | * -ENODATA when empty 'operating-points' property is found | ||
2055 | * -EINVAL when invalid entries are found in opp-v2 table | ||
2056 | */ | ||
2057 | int dev_pm_opp_of_add_table(struct device *dev) | ||
2058 | { | ||
2059 | struct device_node *opp_np; | ||
2060 | int ret; | ||
2061 | |||
2062 | /* | ||
2063 | * OPPs have two version of bindings now. The older one is deprecated, | ||
2064 | * try for the new binding first. | ||
2065 | */ | ||
2066 | opp_np = _of_get_opp_desc_node(dev); | ||
2067 | if (!opp_np) { | ||
2068 | /* | ||
2069 | * Try old-deprecated bindings for backward compatibility with | ||
2070 | * older dtbs. | ||
2071 | */ | ||
2072 | return _of_add_opp_table_v1(dev); | ||
2073 | } | ||
2074 | |||
2075 | ret = _of_add_opp_table_v2(dev, opp_np); | ||
2076 | of_node_put(opp_np); | ||
2077 | |||
2078 | return ret; | ||
2079 | } | ||
2080 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); | ||
2081 | #endif | ||
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 357781e0b791..83d6e7ba1a34 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/of.h> | ||
22 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
23 | 22 | ||
24 | #include "opp.h" | 23 | #include "opp.h" |
@@ -119,8 +118,7 @@ void dev_pm_opp_free_cpufreq_table(struct device *dev, | |||
119 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); | 118 | EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table); |
120 | #endif /* CONFIG_CPU_FREQ */ | 119 | #endif /* CONFIG_CPU_FREQ */ |
121 | 120 | ||
122 | static void | 121 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) |
123 | _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) | ||
124 | { | 122 | { |
125 | struct device *cpu_dev; | 123 | struct device *cpu_dev; |
126 | int cpu; | 124 | int cpu; |
@@ -162,145 +160,6 @@ void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) | |||
162 | } | 160 | } |
163 | EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); | 161 | EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); |
164 | 162 | ||
165 | #ifdef CONFIG_OF | ||
166 | /** | ||
167 | * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask | ||
168 | * @cpumask: cpumask for which OPP table needs to be removed | ||
169 | * | ||
170 | * This removes the OPP tables for CPUs present in the @cpumask. | ||
171 | * This should be used only to remove static entries created from DT. | ||
172 | * | ||
173 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
174 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
175 | * to keep the integrity of the internal data structures. Callers should ensure | ||
176 | * that this function is *NOT* called under RCU protection or in contexts where | ||
177 | * mutex cannot be locked. | ||
178 | */ | ||
179 | void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) | ||
180 | { | ||
181 | _dev_pm_opp_cpumask_remove_table(cpumask, true); | ||
182 | } | ||
183 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); | ||
184 | |||
185 | /** | ||
186 | * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask | ||
187 | * @cpumask: cpumask for which OPP table needs to be added. | ||
188 | * | ||
189 | * This adds the OPP tables for CPUs present in the @cpumask. | ||
190 | * | ||
191 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
192 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
193 | * to keep the integrity of the internal data structures. Callers should ensure | ||
194 | * that this function is *NOT* called under RCU protection or in contexts where | ||
195 | * mutex cannot be locked. | ||
196 | */ | ||
197 | int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) | ||
198 | { | ||
199 | struct device *cpu_dev; | ||
200 | int cpu, ret = 0; | ||
201 | |||
202 | WARN_ON(cpumask_empty(cpumask)); | ||
203 | |||
204 | for_each_cpu(cpu, cpumask) { | ||
205 | cpu_dev = get_cpu_device(cpu); | ||
206 | if (!cpu_dev) { | ||
207 | pr_err("%s: failed to get cpu%d device\n", __func__, | ||
208 | cpu); | ||
209 | continue; | ||
210 | } | ||
211 | |||
212 | ret = dev_pm_opp_of_add_table(cpu_dev); | ||
213 | if (ret) { | ||
214 | pr_err("%s: couldn't find opp table for cpu:%d, %d\n", | ||
215 | __func__, cpu, ret); | ||
216 | |||
217 | /* Free all other OPPs */ | ||
218 | dev_pm_opp_of_cpumask_remove_table(cpumask); | ||
219 | break; | ||
220 | } | ||
221 | } | ||
222 | |||
223 | return ret; | ||
224 | } | ||
225 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); | ||
226 | |||
227 | /* | ||
228 | * Works only for OPP v2 bindings. | ||
229 | * | ||
230 | * Returns -ENOENT if operating-points-v2 bindings aren't supported. | ||
231 | */ | ||
232 | /** | ||
233 | * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with | ||
234 | * @cpu_dev using operating-points-v2 | ||
235 | * bindings. | ||
236 | * | ||
237 | * @cpu_dev: CPU device for which we do this operation | ||
238 | * @cpumask: cpumask to update with information of sharing CPUs | ||
239 | * | ||
240 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | ||
241 | * | ||
242 | * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. | ||
243 | * | ||
244 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
245 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
246 | * to keep the integrity of the internal data structures. Callers should ensure | ||
247 | * that this function is *NOT* called under RCU protection or in contexts where | ||
248 | * mutex cannot be locked. | ||
249 | */ | ||
250 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | ||
251 | { | ||
252 | struct device_node *np, *tmp_np; | ||
253 | struct device *tcpu_dev; | ||
254 | int cpu, ret = 0; | ||
255 | |||
256 | /* Get OPP descriptor node */ | ||
257 | np = _of_get_opp_desc_node(cpu_dev); | ||
258 | if (!np) { | ||
259 | dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); | ||
260 | return -ENOENT; | ||
261 | } | ||
262 | |||
263 | cpumask_set_cpu(cpu_dev->id, cpumask); | ||
264 | |||
265 | /* OPPs are shared ? */ | ||
266 | if (!of_property_read_bool(np, "opp-shared")) | ||
267 | goto put_cpu_node; | ||
268 | |||
269 | for_each_possible_cpu(cpu) { | ||
270 | if (cpu == cpu_dev->id) | ||
271 | continue; | ||
272 | |||
273 | tcpu_dev = get_cpu_device(cpu); | ||
274 | if (!tcpu_dev) { | ||
275 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | ||
276 | __func__, cpu); | ||
277 | ret = -ENODEV; | ||
278 | goto put_cpu_node; | ||
279 | } | ||
280 | |||
281 | /* Get OPP descriptor node */ | ||
282 | tmp_np = _of_get_opp_desc_node(tcpu_dev); | ||
283 | if (!tmp_np) { | ||
284 | dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", | ||
285 | __func__); | ||
286 | ret = -ENOENT; | ||
287 | goto put_cpu_node; | ||
288 | } | ||
289 | |||
290 | /* CPUs are sharing opp node */ | ||
291 | if (np == tmp_np) | ||
292 | cpumask_set_cpu(cpu, cpumask); | ||
293 | |||
294 | of_node_put(tmp_np); | ||
295 | } | ||
296 | |||
297 | put_cpu_node: | ||
298 | of_node_put(np); | ||
299 | return ret; | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); | ||
302 | #endif | ||
303 | |||
304 | /** | 163 | /** |
305 | * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs | 164 | * dev_pm_opp_set_sharing_cpus() - Mark OPP table as shared by few CPUs |
306 | * @cpu_dev: CPU device for which we do this operation | 165 | * @cpu_dev: CPU device for which we do this operation |
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c new file mode 100644 index 000000000000..94d2010558e3 --- /dev/null +++ b/drivers/base/power/opp/of.c | |||
@@ -0,0 +1,591 @@ | |||
1 | /* | ||
2 | * Generic OPP OF helpers | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/of.h> | ||
20 | #include <linux/export.h> | ||
21 | |||
22 | #include "opp.h" | ||
23 | |||
24 | static struct opp_table *_managed_opp(const struct device_node *np) | ||
25 | { | ||
26 | struct opp_table *opp_table; | ||
27 | |||
28 | list_for_each_entry_rcu(opp_table, &opp_tables, node) { | ||
29 | if (opp_table->np == np) { | ||
30 | /* | ||
31 | * Multiple devices can point to the same OPP table and | ||
32 | * so will have same node-pointer, np. | ||
33 | * | ||
34 | * But the OPPs will be considered as shared only if the | ||
35 | * OPP table contains a "opp-shared" property. | ||
36 | */ | ||
37 | return opp_table->shared_opp ? opp_table : NULL; | ||
38 | } | ||
39 | } | ||
40 | |||
41 | return NULL; | ||
42 | } | ||
43 | |||
44 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) | ||
45 | { | ||
46 | struct device_node *np; | ||
47 | |||
48 | /* | ||
49 | * Only required for backward compatibility with v1 bindings, but isn't | ||
50 | * harmful for other cases. And so we do it unconditionally. | ||
51 | */ | ||
52 | np = of_node_get(dev->of_node); | ||
53 | if (np) { | ||
54 | u32 val; | ||
55 | |||
56 | if (!of_property_read_u32(np, "clock-latency", &val)) | ||
57 | opp_table->clock_latency_ns_max = val; | ||
58 | of_property_read_u32(np, "voltage-tolerance", | ||
59 | &opp_table->voltage_tolerance_v1); | ||
60 | of_node_put(np); | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static bool _opp_is_supported(struct device *dev, struct opp_table *opp_table, | ||
65 | struct device_node *np) | ||
66 | { | ||
67 | unsigned int count = opp_table->supported_hw_count; | ||
68 | u32 version; | ||
69 | int ret; | ||
70 | |||
71 | if (!opp_table->supported_hw) | ||
72 | return true; | ||
73 | |||
74 | while (count--) { | ||
75 | ret = of_property_read_u32_index(np, "opp-supported-hw", count, | ||
76 | &version); | ||
77 | if (ret) { | ||
78 | dev_warn(dev, "%s: failed to read opp-supported-hw property at index %d: %d\n", | ||
79 | __func__, count, ret); | ||
80 | return false; | ||
81 | } | ||
82 | |||
83 | /* Both of these are bitwise masks of the versions */ | ||
84 | if (!(version & opp_table->supported_hw[count])) | ||
85 | return false; | ||
86 | } | ||
87 | |||
88 | return true; | ||
89 | } | ||
90 | |||
91 | /* TODO: Support multiple regulators */ | ||
92 | static int opp_parse_supplies(struct dev_pm_opp *opp, struct device *dev, | ||
93 | struct opp_table *opp_table) | ||
94 | { | ||
95 | u32 microvolt[3] = {0}; | ||
96 | u32 val; | ||
97 | int count, ret; | ||
98 | struct property *prop = NULL; | ||
99 | char name[NAME_MAX]; | ||
100 | |||
101 | /* Search for "opp-microvolt-<name>" */ | ||
102 | if (opp_table->prop_name) { | ||
103 | snprintf(name, sizeof(name), "opp-microvolt-%s", | ||
104 | opp_table->prop_name); | ||
105 | prop = of_find_property(opp->np, name, NULL); | ||
106 | } | ||
107 | |||
108 | if (!prop) { | ||
109 | /* Search for "opp-microvolt" */ | ||
110 | sprintf(name, "opp-microvolt"); | ||
111 | prop = of_find_property(opp->np, name, NULL); | ||
112 | |||
113 | /* Missing property isn't a problem, but an invalid entry is */ | ||
114 | if (!prop) | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | count = of_property_count_u32_elems(opp->np, name); | ||
119 | if (count < 0) { | ||
120 | dev_err(dev, "%s: Invalid %s property (%d)\n", | ||
121 | __func__, name, count); | ||
122 | return count; | ||
123 | } | ||
124 | |||
125 | /* There can be one or three elements here */ | ||
126 | if (count != 1 && count != 3) { | ||
127 | dev_err(dev, "%s: Invalid number of elements in %s property (%d)\n", | ||
128 | __func__, name, count); | ||
129 | return -EINVAL; | ||
130 | } | ||
131 | |||
132 | ret = of_property_read_u32_array(opp->np, name, microvolt, count); | ||
133 | if (ret) { | ||
134 | dev_err(dev, "%s: error parsing %s: %d\n", __func__, name, ret); | ||
135 | return -EINVAL; | ||
136 | } | ||
137 | |||
138 | opp->u_volt = microvolt[0]; | ||
139 | |||
140 | if (count == 1) { | ||
141 | opp->u_volt_min = opp->u_volt; | ||
142 | opp->u_volt_max = opp->u_volt; | ||
143 | } else { | ||
144 | opp->u_volt_min = microvolt[1]; | ||
145 | opp->u_volt_max = microvolt[2]; | ||
146 | } | ||
147 | |||
148 | /* Search for "opp-microamp-<name>" */ | ||
149 | prop = NULL; | ||
150 | if (opp_table->prop_name) { | ||
151 | snprintf(name, sizeof(name), "opp-microamp-%s", | ||
152 | opp_table->prop_name); | ||
153 | prop = of_find_property(opp->np, name, NULL); | ||
154 | } | ||
155 | |||
156 | if (!prop) { | ||
157 | /* Search for "opp-microamp" */ | ||
158 | sprintf(name, "opp-microamp"); | ||
159 | prop = of_find_property(opp->np, name, NULL); | ||
160 | } | ||
161 | |||
162 | if (prop && !of_property_read_u32(opp->np, name, &val)) | ||
163 | opp->u_amp = val; | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT | ||
170 | * entries | ||
171 | * @dev: device pointer used to lookup OPP table. | ||
172 | * | ||
173 | * Free OPPs created using static entries present in DT. | ||
174 | * | ||
175 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
176 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
177 | * to keep the integrity of the internal data structures. Callers should ensure | ||
178 | * that this function is *NOT* called under RCU protection or in contexts where | ||
179 | * mutex cannot be locked. | ||
180 | */ | ||
181 | void dev_pm_opp_of_remove_table(struct device *dev) | ||
182 | { | ||
183 | _dev_pm_opp_remove_table(dev, false); | ||
184 | } | ||
185 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); | ||
186 | |||
187 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | ||
188 | struct device_node *_of_get_opp_desc_node(struct device *dev) | ||
189 | { | ||
190 | /* | ||
191 | * TODO: Support for multiple OPP tables. | ||
192 | * | ||
193 | * There should be only ONE phandle present in "operating-points-v2" | ||
194 | * property. | ||
195 | */ | ||
196 | |||
197 | return of_parse_phandle(dev->of_node, "operating-points-v2", 0); | ||
198 | } | ||
199 | |||
200 | /** | ||
201 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) | ||
202 | * @dev: device for which we do this operation | ||
203 | * @np: device node | ||
204 | * | ||
205 | * This function adds an opp definition to the opp table and returns status. The | ||
206 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be | ||
207 | * removed by dev_pm_opp_remove. | ||
208 | * | ||
209 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
210 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
211 | * to keep the integrity of the internal data structures. Callers should ensure | ||
212 | * that this function is *NOT* called under RCU protection or in contexts where | ||
213 | * mutex cannot be locked. | ||
214 | * | ||
215 | * Return: | ||
216 | * 0 On success OR | ||
217 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
218 | * -EEXIST Freq are same and volt are different OR | ||
219 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
220 | * -ENOMEM Memory allocation failure | ||
221 | * -EINVAL Failed parsing the OPP node | ||
222 | */ | ||
223 | static int _opp_add_static_v2(struct device *dev, struct device_node *np) | ||
224 | { | ||
225 | struct opp_table *opp_table; | ||
226 | struct dev_pm_opp *new_opp; | ||
227 | u64 rate; | ||
228 | u32 val; | ||
229 | int ret; | ||
230 | |||
231 | /* Hold our table modification lock here */ | ||
232 | mutex_lock(&opp_table_lock); | ||
233 | |||
234 | new_opp = _allocate_opp(dev, &opp_table); | ||
235 | if (!new_opp) { | ||
236 | ret = -ENOMEM; | ||
237 | goto unlock; | ||
238 | } | ||
239 | |||
240 | ret = of_property_read_u64(np, "opp-hz", &rate); | ||
241 | if (ret < 0) { | ||
242 | dev_err(dev, "%s: opp-hz not found\n", __func__); | ||
243 | goto free_opp; | ||
244 | } | ||
245 | |||
246 | /* Check if the OPP supports hardware's hierarchy of versions or not */ | ||
247 | if (!_opp_is_supported(dev, opp_table, np)) { | ||
248 | dev_dbg(dev, "OPP not supported by hardware: %llu\n", rate); | ||
249 | goto free_opp; | ||
250 | } | ||
251 | |||
252 | /* | ||
253 | * Rate is defined as an unsigned long in clk API, and so casting | ||
254 | * explicitly to its type. Must be fixed once rate is 64 bit | ||
255 | * guaranteed in clk API. | ||
256 | */ | ||
257 | new_opp->rate = (unsigned long)rate; | ||
258 | new_opp->turbo = of_property_read_bool(np, "turbo-mode"); | ||
259 | |||
260 | new_opp->np = np; | ||
261 | new_opp->dynamic = false; | ||
262 | new_opp->available = true; | ||
263 | |||
264 | if (!of_property_read_u32(np, "clock-latency-ns", &val)) | ||
265 | new_opp->clock_latency_ns = val; | ||
266 | |||
267 | ret = opp_parse_supplies(new_opp, dev, opp_table); | ||
268 | if (ret) | ||
269 | goto free_opp; | ||
270 | |||
271 | ret = _opp_add(dev, new_opp, opp_table); | ||
272 | if (ret) | ||
273 | goto free_opp; | ||
274 | |||
275 | /* OPP to select on device suspend */ | ||
276 | if (of_property_read_bool(np, "opp-suspend")) { | ||
277 | if (opp_table->suspend_opp) { | ||
278 | dev_warn(dev, "%s: Multiple suspend OPPs found (%lu %lu)\n", | ||
279 | __func__, opp_table->suspend_opp->rate, | ||
280 | new_opp->rate); | ||
281 | } else { | ||
282 | new_opp->suspend = true; | ||
283 | opp_table->suspend_opp = new_opp; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) | ||
288 | opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; | ||
289 | |||
290 | mutex_unlock(&opp_table_lock); | ||
291 | |||
292 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", | ||
293 | __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, | ||
294 | new_opp->u_volt_min, new_opp->u_volt_max, | ||
295 | new_opp->clock_latency_ns); | ||
296 | |||
297 | /* | ||
298 | * Notify the changes in the availability of the operable | ||
299 | * frequency/voltage list. | ||
300 | */ | ||
301 | srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); | ||
302 | return 0; | ||
303 | |||
304 | free_opp: | ||
305 | _opp_remove(opp_table, new_opp, false); | ||
306 | unlock: | ||
307 | mutex_unlock(&opp_table_lock); | ||
308 | return ret; | ||
309 | } | ||
310 | |||
311 | /* Initializes OPP tables based on new bindings */ | ||
312 | static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | ||
313 | { | ||
314 | struct device_node *np; | ||
315 | struct opp_table *opp_table; | ||
316 | int ret = 0, count = 0; | ||
317 | |||
318 | mutex_lock(&opp_table_lock); | ||
319 | |||
320 | opp_table = _managed_opp(opp_np); | ||
321 | if (opp_table) { | ||
322 | /* OPPs are already managed */ | ||
323 | if (!_add_opp_dev(dev, opp_table)) | ||
324 | ret = -ENOMEM; | ||
325 | mutex_unlock(&opp_table_lock); | ||
326 | return ret; | ||
327 | } | ||
328 | mutex_unlock(&opp_table_lock); | ||
329 | |||
330 | /* We have opp-table node now, iterate over it and add OPPs */ | ||
331 | for_each_available_child_of_node(opp_np, np) { | ||
332 | count++; | ||
333 | |||
334 | ret = _opp_add_static_v2(dev, np); | ||
335 | if (ret) { | ||
336 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, | ||
337 | ret); | ||
338 | goto free_table; | ||
339 | } | ||
340 | } | ||
341 | |||
342 | /* There should be one of more OPP defined */ | ||
343 | if (WARN_ON(!count)) | ||
344 | return -ENOENT; | ||
345 | |||
346 | mutex_lock(&opp_table_lock); | ||
347 | |||
348 | opp_table = _find_opp_table(dev); | ||
349 | if (WARN_ON(IS_ERR(opp_table))) { | ||
350 | ret = PTR_ERR(opp_table); | ||
351 | mutex_unlock(&opp_table_lock); | ||
352 | goto free_table; | ||
353 | } | ||
354 | |||
355 | opp_table->np = opp_np; | ||
356 | opp_table->shared_opp = of_property_read_bool(opp_np, "opp-shared"); | ||
357 | |||
358 | mutex_unlock(&opp_table_lock); | ||
359 | |||
360 | return 0; | ||
361 | |||
362 | free_table: | ||
363 | dev_pm_opp_of_remove_table(dev); | ||
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | /* Initializes OPP tables based on old-deprecated bindings */ | ||
369 | static int _of_add_opp_table_v1(struct device *dev) | ||
370 | { | ||
371 | const struct property *prop; | ||
372 | const __be32 *val; | ||
373 | int nr; | ||
374 | |||
375 | prop = of_find_property(dev->of_node, "operating-points", NULL); | ||
376 | if (!prop) | ||
377 | return -ENODEV; | ||
378 | if (!prop->value) | ||
379 | return -ENODATA; | ||
380 | |||
381 | /* | ||
382 | * Each OPP is a set of tuples consisting of frequency and | ||
383 | * voltage like <freq-kHz vol-uV>. | ||
384 | */ | ||
385 | nr = prop->length / sizeof(u32); | ||
386 | if (nr % 2) { | ||
387 | dev_err(dev, "%s: Invalid OPP table\n", __func__); | ||
388 | return -EINVAL; | ||
389 | } | ||
390 | |||
391 | val = prop->value; | ||
392 | while (nr) { | ||
393 | unsigned long freq = be32_to_cpup(val++) * 1000; | ||
394 | unsigned long volt = be32_to_cpup(val++); | ||
395 | |||
396 | if (_opp_add_v1(dev, freq, volt, false)) | ||
397 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | ||
398 | __func__, freq); | ||
399 | nr -= 2; | ||
400 | } | ||
401 | |||
402 | return 0; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * dev_pm_opp_of_add_table() - Initialize opp table from device tree | ||
407 | * @dev: device pointer used to lookup OPP table. | ||
408 | * | ||
409 | * Register the initial OPP table with the OPP library for given device. | ||
410 | * | ||
411 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
412 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
413 | * to keep the integrity of the internal data structures. Callers should ensure | ||
414 | * that this function is *NOT* called under RCU protection or in contexts where | ||
415 | * mutex cannot be locked. | ||
416 | * | ||
417 | * Return: | ||
418 | * 0 On success OR | ||
419 | * Duplicate OPPs (both freq and volt are same) and opp->available | ||
420 | * -EEXIST Freq are same and volt are different OR | ||
421 | * Duplicate OPPs (both freq and volt are same) and !opp->available | ||
422 | * -ENOMEM Memory allocation failure | ||
423 | * -ENODEV when 'operating-points' property is not found or is invalid data | ||
424 | * in device node. | ||
425 | * -ENODATA when empty 'operating-points' property is found | ||
426 | * -EINVAL when invalid entries are found in opp-v2 table | ||
427 | */ | ||
428 | int dev_pm_opp_of_add_table(struct device *dev) | ||
429 | { | ||
430 | struct device_node *opp_np; | ||
431 | int ret; | ||
432 | |||
433 | /* | ||
434 | * OPPs have two version of bindings now. The older one is deprecated, | ||
435 | * try for the new binding first. | ||
436 | */ | ||
437 | opp_np = _of_get_opp_desc_node(dev); | ||
438 | if (!opp_np) { | ||
439 | /* | ||
440 | * Try old-deprecated bindings for backward compatibility with | ||
441 | * older dtbs. | ||
442 | */ | ||
443 | return _of_add_opp_table_v1(dev); | ||
444 | } | ||
445 | |||
446 | ret = _of_add_opp_table_v2(dev, opp_np); | ||
447 | of_node_put(opp_np); | ||
448 | |||
449 | return ret; | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); | ||
452 | |||
453 | /* CPU device specific helpers */ | ||
454 | |||
455 | /** | ||
456 | * dev_pm_opp_of_cpumask_remove_table() - Removes OPP table for @cpumask | ||
457 | * @cpumask: cpumask for which OPP table needs to be removed | ||
458 | * | ||
459 | * This removes the OPP tables for CPUs present in the @cpumask. | ||
460 | * This should be used only to remove static entries created from DT. | ||
461 | * | ||
462 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
463 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
464 | * to keep the integrity of the internal data structures. Callers should ensure | ||
465 | * that this function is *NOT* called under RCU protection or in contexts where | ||
466 | * mutex cannot be locked. | ||
467 | */ | ||
468 | void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) | ||
469 | { | ||
470 | _dev_pm_opp_cpumask_remove_table(cpumask, true); | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); | ||
473 | |||
474 | /** | ||
475 | * dev_pm_opp_of_cpumask_add_table() - Adds OPP table for @cpumask | ||
476 | * @cpumask: cpumask for which OPP table needs to be added. | ||
477 | * | ||
478 | * This adds the OPP tables for CPUs present in the @cpumask. | ||
479 | * | ||
480 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
481 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
482 | * to keep the integrity of the internal data structures. Callers should ensure | ||
483 | * that this function is *NOT* called under RCU protection or in contexts where | ||
484 | * mutex cannot be locked. | ||
485 | */ | ||
486 | int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) | ||
487 | { | ||
488 | struct device *cpu_dev; | ||
489 | int cpu, ret = 0; | ||
490 | |||
491 | WARN_ON(cpumask_empty(cpumask)); | ||
492 | |||
493 | for_each_cpu(cpu, cpumask) { | ||
494 | cpu_dev = get_cpu_device(cpu); | ||
495 | if (!cpu_dev) { | ||
496 | pr_err("%s: failed to get cpu%d device\n", __func__, | ||
497 | cpu); | ||
498 | continue; | ||
499 | } | ||
500 | |||
501 | ret = dev_pm_opp_of_add_table(cpu_dev); | ||
502 | if (ret) { | ||
503 | pr_err("%s: couldn't find opp table for cpu:%d, %d\n", | ||
504 | __func__, cpu, ret); | ||
505 | |||
506 | /* Free all other OPPs */ | ||
507 | dev_pm_opp_of_cpumask_remove_table(cpumask); | ||
508 | break; | ||
509 | } | ||
510 | } | ||
511 | |||
512 | return ret; | ||
513 | } | ||
514 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); | ||
515 | |||
516 | /* | ||
517 | * Works only for OPP v2 bindings. | ||
518 | * | ||
519 | * Returns -ENOENT if operating-points-v2 bindings aren't supported. | ||
520 | */ | ||
521 | /** | ||
522 | * dev_pm_opp_of_get_sharing_cpus() - Get cpumask of CPUs sharing OPPs with | ||
523 | * @cpu_dev using operating-points-v2 | ||
524 | * bindings. | ||
525 | * | ||
526 | * @cpu_dev: CPU device for which we do this operation | ||
527 | * @cpumask: cpumask to update with information of sharing CPUs | ||
528 | * | ||
529 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | ||
530 | * | ||
531 | * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. | ||
532 | * | ||
533 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
534 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
535 | * to keep the integrity of the internal data structures. Callers should ensure | ||
536 | * that this function is *NOT* called under RCU protection or in contexts where | ||
537 | * mutex cannot be locked. | ||
538 | */ | ||
539 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | ||
540 | struct cpumask *cpumask) | ||
541 | { | ||
542 | struct device_node *np, *tmp_np; | ||
543 | struct device *tcpu_dev; | ||
544 | int cpu, ret = 0; | ||
545 | |||
546 | /* Get OPP descriptor node */ | ||
547 | np = _of_get_opp_desc_node(cpu_dev); | ||
548 | if (!np) { | ||
549 | dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__); | ||
550 | return -ENOENT; | ||
551 | } | ||
552 | |||
553 | cpumask_set_cpu(cpu_dev->id, cpumask); | ||
554 | |||
555 | /* OPPs are shared ? */ | ||
556 | if (!of_property_read_bool(np, "opp-shared")) | ||
557 | goto put_cpu_node; | ||
558 | |||
559 | for_each_possible_cpu(cpu) { | ||
560 | if (cpu == cpu_dev->id) | ||
561 | continue; | ||
562 | |||
563 | tcpu_dev = get_cpu_device(cpu); | ||
564 | if (!tcpu_dev) { | ||
565 | dev_err(cpu_dev, "%s: failed to get cpu%d device\n", | ||
566 | __func__, cpu); | ||
567 | ret = -ENODEV; | ||
568 | goto put_cpu_node; | ||
569 | } | ||
570 | |||
571 | /* Get OPP descriptor node */ | ||
572 | tmp_np = _of_get_opp_desc_node(tcpu_dev); | ||
573 | if (!tmp_np) { | ||
574 | dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n", | ||
575 | __func__); | ||
576 | ret = -ENOENT; | ||
577 | goto put_cpu_node; | ||
578 | } | ||
579 | |||
580 | /* CPUs are sharing opp node */ | ||
581 | if (np == tmp_np) | ||
582 | cpumask_set_cpu(cpu, cpumask); | ||
583 | |||
584 | of_node_put(tmp_np); | ||
585 | } | ||
586 | |||
587 | put_cpu_node: | ||
588 | of_node_put(np); | ||
589 | return ret; | ||
590 | } | ||
591 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus); | ||
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index f67f806fcf3a..20f3be22e060 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h | |||
@@ -28,6 +28,8 @@ struct regulator; | |||
28 | /* Lock to allow exclusive modification to the device and opp lists */ | 28 | /* Lock to allow exclusive modification to the device and opp lists */ |
29 | extern struct mutex opp_table_lock; | 29 | extern struct mutex opp_table_lock; |
30 | 30 | ||
31 | extern struct list_head opp_tables; | ||
32 | |||
31 | /* | 33 | /* |
32 | * Internal data structure organization with the OPP layer library is as | 34 | * Internal data structure organization with the OPP layer library is as |
33 | * follows: | 35 | * follows: |
@@ -183,6 +185,18 @@ struct opp_table { | |||
183 | struct opp_table *_find_opp_table(struct device *dev); | 185 | struct opp_table *_find_opp_table(struct device *dev); |
184 | struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); | 186 | struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); |
185 | struct device_node *_of_get_opp_desc_node(struct device *dev); | 187 | struct device_node *_of_get_opp_desc_node(struct device *dev); |
188 | void _dev_pm_opp_remove_table(struct device *dev, bool remove_all); | ||
189 | struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table); | ||
190 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); | ||
191 | void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify); | ||
192 | int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic); | ||
193 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); | ||
194 | |||
195 | #ifdef CONFIG_OF | ||
196 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev); | ||
197 | #else | ||
198 | static inline void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) {} | ||
199 | #endif | ||
186 | 200 | ||
187 | #ifdef CONFIG_DEBUG_FS | 201 | #ifdef CONFIG_DEBUG_FS |
188 | void opp_debug_remove_one(struct dev_pm_opp *opp); | 202 | void opp_debug_remove_one(struct dev_pm_opp *opp); |