diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-02-20 08:22:50 -0500 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2017-02-20 08:22:50 -0500 |
| commit | 64f758a07a8cdb5c2a08e0d3dfec323af1d2bac3 (patch) | |
| tree | 1eeae3c431523b85076884c4b893cf3337314fb2 /drivers/base | |
| parent | 7089db84e356562f8ba737c29e472cc42d530dbc (diff) | |
| parent | 0764c604c8128f17fd740ff8b1701d0a1301eb7e (diff) | |
Merge branch 'pm-opp'
* pm-opp: (24 commits)
PM / OPP: Expose _of_get_opp_desc_node as dev_pm_opp API
PM / OPP: Make _find_opp_table_unlocked() static
PM / OPP: Update Documentation to remove RCU specific bits
PM / OPP: Simplify dev_pm_opp_get_max_volt_latency()
PM / OPP: Simplify _opp_set_availability()
PM / OPP: Move away from RCU locking
PM / OPP: Take kref from _find_opp_table()
PM / OPP: Update OPP users to put reference
PM / OPP: Add 'struct kref' to struct dev_pm_opp
PM / OPP: Use dev_pm_opp_get_opp_table() instead of _add_opp_table()
PM / OPP: Take reference of the OPP table while adding/removing OPPs
PM / OPP: Return opp_table from dev_pm_opp_set_*() routines
PM / OPP: Add 'struct kref' to OPP table
PM / OPP: Add per OPP table mutex
PM / OPP: Split out part of _add_opp_table() and _remove_opp_table()
PM / OPP: Don't expose srcu_head to register notifiers
PM / OPP: Rename dev_pm_opp_get_suspend_opp() and return OPP rate
PM / OPP: Don't allocate OPP table from _opp_allocate()
PM / OPP: Rename and split _dev_pm_opp_remove_table()
PM / OPP: Add light weight _opp_free() routine
...
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/power/opp/core.c | 1011 | ||||
| -rw-r--r-- | drivers/base/power/opp/cpu.c | 66 | ||||
| -rw-r--r-- | drivers/base/power/opp/of.c | 154 | ||||
| -rw-r--r-- | drivers/base/power/opp/opp.h | 40 |
4 files changed, 476 insertions, 795 deletions
diff --git a/drivers/base/power/opp/core.c b/drivers/base/power/opp/core.c index 35ff06283738..91ec3232d630 100644 --- a/drivers/base/power/opp/core.c +++ b/drivers/base/power/opp/core.c | |||
| @@ -32,13 +32,7 @@ LIST_HEAD(opp_tables); | |||
| 32 | /* Lock to allow exclusive modification to the device and opp lists */ | 32 | /* Lock to allow exclusive modification to the device and opp lists */ |
| 33 | DEFINE_MUTEX(opp_table_lock); | 33 | DEFINE_MUTEX(opp_table_lock); |
| 34 | 34 | ||
| 35 | #define opp_rcu_lockdep_assert() \ | 35 | static void dev_pm_opp_get(struct dev_pm_opp *opp); |
| 36 | do { \ | ||
| 37 | RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ | ||
| 38 | !lockdep_is_held(&opp_table_lock), \ | ||
| 39 | "Missing rcu_read_lock() or " \ | ||
| 40 | "opp_table_lock protection"); \ | ||
| 41 | } while (0) | ||
| 42 | 36 | ||
| 43 | static struct opp_device *_find_opp_dev(const struct device *dev, | 37 | static struct opp_device *_find_opp_dev(const struct device *dev, |
| 44 | struct opp_table *opp_table) | 38 | struct opp_table *opp_table) |
| @@ -52,38 +46,46 @@ static struct opp_device *_find_opp_dev(const struct device *dev, | |||
| 52 | return NULL; | 46 | return NULL; |
| 53 | } | 47 | } |
| 54 | 48 | ||
| 49 | static struct opp_table *_find_opp_table_unlocked(struct device *dev) | ||
| 50 | { | ||
| 51 | struct opp_table *opp_table; | ||
| 52 | |||
| 53 | list_for_each_entry(opp_table, &opp_tables, node) { | ||
| 54 | if (_find_opp_dev(dev, opp_table)) { | ||
| 55 | _get_opp_table_kref(opp_table); | ||
| 56 | |||
| 57 | return opp_table; | ||
| 58 | } | ||
| 59 | } | ||
| 60 | |||
| 61 | return ERR_PTR(-ENODEV); | ||
| 62 | } | ||
| 63 | |||
| 55 | /** | 64 | /** |
| 56 | * _find_opp_table() - find opp_table struct using device pointer | 65 | * _find_opp_table() - find opp_table struct using device pointer |
| 57 | * @dev: device pointer used to lookup OPP table | 66 | * @dev: device pointer used to lookup OPP table |
| 58 | * | 67 | * |
| 59 | * Search OPP table for one containing matching device. Does a RCU reader | 68 | * Search OPP table for one containing matching device. |
| 60 | * operation to grab the pointer needed. | ||
| 61 | * | 69 | * |
| 62 | * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or | 70 | * Return: pointer to 'struct opp_table' if found, otherwise -ENODEV or |
| 63 | * -EINVAL based on type of error. | 71 | * -EINVAL based on type of error. |
| 64 | * | 72 | * |
| 65 | * Locking: For readers, this function must be called under rcu_read_lock(). | 73 | * The callers must call dev_pm_opp_put_opp_table() after the table is used. |
| 66 | * opp_table is a RCU protected pointer, which means that opp_table is valid | ||
| 67 | * as long as we are under RCU lock. | ||
| 68 | * | ||
| 69 | * For Writers, this function must be called with opp_table_lock held. | ||
| 70 | */ | 74 | */ |
| 71 | struct opp_table *_find_opp_table(struct device *dev) | 75 | struct opp_table *_find_opp_table(struct device *dev) |
| 72 | { | 76 | { |
| 73 | struct opp_table *opp_table; | 77 | struct opp_table *opp_table; |
| 74 | 78 | ||
| 75 | opp_rcu_lockdep_assert(); | ||
| 76 | |||
| 77 | if (IS_ERR_OR_NULL(dev)) { | 79 | if (IS_ERR_OR_NULL(dev)) { |
| 78 | pr_err("%s: Invalid parameters\n", __func__); | 80 | pr_err("%s: Invalid parameters\n", __func__); |
| 79 | return ERR_PTR(-EINVAL); | 81 | return ERR_PTR(-EINVAL); |
| 80 | } | 82 | } |
| 81 | 83 | ||
| 82 | list_for_each_entry_rcu(opp_table, &opp_tables, node) | 84 | mutex_lock(&opp_table_lock); |
| 83 | if (_find_opp_dev(dev, opp_table)) | 85 | opp_table = _find_opp_table_unlocked(dev); |
| 84 | return opp_table; | 86 | mutex_unlock(&opp_table_lock); |
| 85 | 87 | ||
| 86 | return ERR_PTR(-ENODEV); | 88 | return opp_table; |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 89 | /** | 91 | /** |
| @@ -94,29 +96,15 @@ struct opp_table *_find_opp_table(struct device *dev) | |||
| 94 | * return 0 | 96 | * return 0 |
| 95 | * | 97 | * |
| 96 | * This is useful only for devices with single power supply. | 98 | * This is useful only for devices with single power supply. |
| 97 | * | ||
| 98 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
| 99 | * protected pointer. This means that opp which could have been fetched by | ||
| 100 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
| 101 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
| 102 | * used in the same section as the usage of this function with the pointer | ||
| 103 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
| 104 | * pointer. | ||
| 105 | */ | 99 | */ |
| 106 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) | 100 | unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp) |
| 107 | { | 101 | { |
| 108 | struct dev_pm_opp *tmp_opp; | 102 | if (IS_ERR_OR_NULL(opp)) { |
| 109 | unsigned long v = 0; | ||
| 110 | |||
| 111 | opp_rcu_lockdep_assert(); | ||
| 112 | |||
| 113 | tmp_opp = rcu_dereference(opp); | ||
| 114 | if (IS_ERR_OR_NULL(tmp_opp)) | ||
| 115 | pr_err("%s: Invalid parameters\n", __func__); | 103 | pr_err("%s: Invalid parameters\n", __func__); |
| 116 | else | 104 | return 0; |
| 117 | v = tmp_opp->supplies[0].u_volt; | 105 | } |
| 118 | 106 | ||
| 119 | return v; | 107 | return opp->supplies[0].u_volt; |
| 120 | } | 108 | } |
| 121 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); | 109 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); |
| 122 | 110 | ||
| @@ -126,29 +114,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage); | |||
| 126 | * | 114 | * |
| 127 | * Return: frequency in hertz corresponding to the opp, else | 115 | * Return: frequency in hertz corresponding to the opp, else |
| 128 | * return 0 | 116 | * return 0 |
| 129 | * | ||
| 130 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
| 131 | * protected pointer. This means that opp which could have been fetched by | ||
| 132 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
| 133 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
| 134 | * used in the same section as the usage of this function with the pointer | ||
| 135 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
| 136 | * pointer. | ||
| 137 | */ | 117 | */ |
| 138 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) | 118 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) |
| 139 | { | 119 | { |
| 140 | struct dev_pm_opp *tmp_opp; | 120 | if (IS_ERR_OR_NULL(opp) || !opp->available) { |
| 141 | unsigned long f = 0; | ||
| 142 | |||
| 143 | opp_rcu_lockdep_assert(); | ||
| 144 | |||
| 145 | tmp_opp = rcu_dereference(opp); | ||
| 146 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) | ||
| 147 | pr_err("%s: Invalid parameters\n", __func__); | 121 | pr_err("%s: Invalid parameters\n", __func__); |
| 148 | else | 122 | return 0; |
| 149 | f = tmp_opp->rate; | 123 | } |
| 150 | 124 | ||
| 151 | return f; | 125 | return opp->rate; |
| 152 | } | 126 | } |
| 153 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); | 127 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); |
| 154 | 128 | ||
| @@ -161,28 +135,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); | |||
| 161 | * quickly. Running on them for longer times may overheat the chip. | 135 | * quickly. Running on them for longer times may overheat the chip. |
| 162 | * | 136 | * |
| 163 | * Return: true if opp is turbo opp, else false. | 137 | * Return: true if opp is turbo opp, else false. |
| 164 | * | ||
| 165 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
| 166 | * protected pointer. This means that opp which could have been fetched by | ||
| 167 | * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are | ||
| 168 | * under RCU lock. The pointer returned by the opp_find_freq family must be | ||
| 169 | * used in the same section as the usage of this function with the pointer | ||
| 170 | * prior to unlocking with rcu_read_unlock() to maintain the integrity of the | ||
| 171 | * pointer. | ||
| 172 | */ | 138 | */ |
| 173 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) | 139 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) |
| 174 | { | 140 | { |
| 175 | struct dev_pm_opp *tmp_opp; | 141 | if (IS_ERR_OR_NULL(opp) || !opp->available) { |
| 176 | |||
| 177 | opp_rcu_lockdep_assert(); | ||
| 178 | |||
| 179 | tmp_opp = rcu_dereference(opp); | ||
| 180 | if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { | ||
| 181 | pr_err("%s: Invalid parameters\n", __func__); | 142 | pr_err("%s: Invalid parameters\n", __func__); |
| 182 | return false; | 143 | return false; |
| 183 | } | 144 | } |
| 184 | 145 | ||
| 185 | return tmp_opp->turbo; | 146 | return opp->turbo; |
| 186 | } | 147 | } |
| 187 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); | 148 | EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); |
| 188 | 149 | ||
| @@ -191,52 +152,29 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); | |||
| 191 | * @dev: device for which we do this operation | 152 | * @dev: device for which we do this operation |
| 192 | * | 153 | * |
| 193 | * Return: This function returns the max clock latency in nanoseconds. | 154 | * Return: This function returns the max clock latency in nanoseconds. |
| 194 | * | ||
| 195 | * Locking: This function takes rcu_read_lock(). | ||
| 196 | */ | 155 | */ |
| 197 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) | 156 | unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) |
| 198 | { | 157 | { |
| 199 | struct opp_table *opp_table; | 158 | struct opp_table *opp_table; |
| 200 | unsigned long clock_latency_ns; | 159 | unsigned long clock_latency_ns; |
| 201 | 160 | ||
| 202 | rcu_read_lock(); | ||
| 203 | |||
| 204 | opp_table = _find_opp_table(dev); | 161 | opp_table = _find_opp_table(dev); |
| 205 | if (IS_ERR(opp_table)) | 162 | if (IS_ERR(opp_table)) |
| 206 | clock_latency_ns = 0; | 163 | return 0; |
| 207 | else | ||
| 208 | clock_latency_ns = opp_table->clock_latency_ns_max; | ||
| 209 | |||
| 210 | rcu_read_unlock(); | ||
| 211 | return clock_latency_ns; | ||
| 212 | } | ||
| 213 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); | ||
| 214 | |||
| 215 | static int _get_regulator_count(struct device *dev) | ||
| 216 | { | ||
| 217 | struct opp_table *opp_table; | ||
| 218 | int count; | ||
| 219 | 164 | ||
| 220 | rcu_read_lock(); | 165 | clock_latency_ns = opp_table->clock_latency_ns_max; |
| 221 | 166 | ||
| 222 | opp_table = _find_opp_table(dev); | 167 | dev_pm_opp_put_opp_table(opp_table); |
| 223 | if (!IS_ERR(opp_table)) | ||
| 224 | count = opp_table->regulator_count; | ||
| 225 | else | ||
| 226 | count = 0; | ||
| 227 | 168 | ||
| 228 | rcu_read_unlock(); | 169 | return clock_latency_ns; |
| 229 | |||
| 230 | return count; | ||
| 231 | } | 170 | } |
| 171 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); | ||
| 232 | 172 | ||
| 233 | /** | 173 | /** |
| 234 | * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds | 174 | * dev_pm_opp_get_max_volt_latency() - Get max voltage latency in nanoseconds |
| 235 | * @dev: device for which we do this operation | 175 | * @dev: device for which we do this operation |
| 236 | * | 176 | * |
| 237 | * Return: This function returns the max voltage latency in nanoseconds. | 177 | * Return: This function returns the max voltage latency in nanoseconds. |
| 238 | * | ||
| 239 | * Locking: This function takes rcu_read_lock(). | ||
| 240 | */ | 178 | */ |
| 241 | unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | 179 | unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) |
| 242 | { | 180 | { |
| @@ -250,35 +188,33 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | |||
| 250 | unsigned long max; | 188 | unsigned long max; |
| 251 | } *uV; | 189 | } *uV; |
| 252 | 190 | ||
| 253 | count = _get_regulator_count(dev); | 191 | opp_table = _find_opp_table(dev); |
| 192 | if (IS_ERR(opp_table)) | ||
| 193 | return 0; | ||
| 194 | |||
| 195 | count = opp_table->regulator_count; | ||
| 254 | 196 | ||
| 255 | /* Regulator may not be required for the device */ | 197 | /* Regulator may not be required for the device */ |
| 256 | if (!count) | 198 | if (!count) |
| 257 | return 0; | 199 | goto put_opp_table; |
| 258 | 200 | ||
| 259 | regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL); | 201 | regulators = kmalloc_array(count, sizeof(*regulators), GFP_KERNEL); |
| 260 | if (!regulators) | 202 | if (!regulators) |
| 261 | return 0; | 203 | goto put_opp_table; |
| 262 | 204 | ||
| 263 | uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); | 205 | uV = kmalloc_array(count, sizeof(*uV), GFP_KERNEL); |
| 264 | if (!uV) | 206 | if (!uV) |
| 265 | goto free_regulators; | 207 | goto free_regulators; |
| 266 | 208 | ||
| 267 | rcu_read_lock(); | ||
| 268 | |||
| 269 | opp_table = _find_opp_table(dev); | ||
| 270 | if (IS_ERR(opp_table)) { | ||
| 271 | rcu_read_unlock(); | ||
| 272 | goto free_uV; | ||
| 273 | } | ||
| 274 | |||
| 275 | memcpy(regulators, opp_table->regulators, count * sizeof(*regulators)); | 209 | memcpy(regulators, opp_table->regulators, count * sizeof(*regulators)); |
| 276 | 210 | ||
| 211 | mutex_lock(&opp_table->lock); | ||
| 212 | |||
| 277 | for (i = 0; i < count; i++) { | 213 | for (i = 0; i < count; i++) { |
| 278 | uV[i].min = ~0; | 214 | uV[i].min = ~0; |
| 279 | uV[i].max = 0; | 215 | uV[i].max = 0; |
| 280 | 216 | ||
| 281 | list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { | 217 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
| 282 | if (!opp->available) | 218 | if (!opp->available) |
| 283 | continue; | 219 | continue; |
| 284 | 220 | ||
| @@ -289,7 +225,7 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | |||
| 289 | } | 225 | } |
| 290 | } | 226 | } |
| 291 | 227 | ||
| 292 | rcu_read_unlock(); | 228 | mutex_unlock(&opp_table->lock); |
| 293 | 229 | ||
| 294 | /* | 230 | /* |
| 295 | * The caller needs to ensure that opp_table (and hence the regulator) | 231 | * The caller needs to ensure that opp_table (and hence the regulator) |
| @@ -301,10 +237,11 @@ unsigned long dev_pm_opp_get_max_volt_latency(struct device *dev) | |||
| 301 | latency_ns += ret * 1000; | 237 | latency_ns += ret * 1000; |
| 302 | } | 238 | } |
| 303 | 239 | ||
| 304 | free_uV: | ||
| 305 | kfree(uV); | 240 | kfree(uV); |
| 306 | free_regulators: | 241 | free_regulators: |
| 307 | kfree(regulators); | 242 | kfree(regulators); |
| 243 | put_opp_table: | ||
| 244 | dev_pm_opp_put_opp_table(opp_table); | ||
| 308 | 245 | ||
| 309 | return latency_ns; | 246 | return latency_ns; |
| 310 | } | 247 | } |
| @@ -317,8 +254,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_volt_latency); | |||
| 317 | * | 254 | * |
| 318 | * Return: This function returns the max transition latency, in nanoseconds, to | 255 | * Return: This function returns the max transition latency, in nanoseconds, to |
| 319 | * switch from one OPP to other. | 256 | * switch from one OPP to other. |
| 320 | * | ||
| 321 | * Locking: This function takes rcu_read_lock(). | ||
| 322 | */ | 257 | */ |
| 323 | unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) | 258 | unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) |
| 324 | { | 259 | { |
| @@ -328,32 +263,29 @@ unsigned long dev_pm_opp_get_max_transition_latency(struct device *dev) | |||
| 328 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); | 263 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_transition_latency); |
| 329 | 264 | ||
| 330 | /** | 265 | /** |
| 331 | * dev_pm_opp_get_suspend_opp() - Get suspend opp | 266 | * dev_pm_opp_get_suspend_opp_freq() - Get frequency of suspend opp in Hz |
| 332 | * @dev: device for which we do this operation | 267 | * @dev: device for which we do this operation |
| 333 | * | 268 | * |
| 334 | * Return: This function returns pointer to the suspend opp if it is | 269 | * Return: This function returns the frequency of the OPP marked as suspend_opp |
| 335 | * defined and available, otherwise it returns NULL. | 270 | * if one is available, else returns 0; |
| 336 | * | ||
| 337 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | ||
| 338 | * protected pointer. The reason for the same is that the opp pointer which is | ||
| 339 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
| 340 | * under the locked area. The pointer returned must be used prior to unlocking | ||
| 341 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
| 342 | */ | 271 | */ |
| 343 | struct dev_pm_opp *dev_pm_opp_get_suspend_opp(struct device *dev) | 272 | unsigned long dev_pm_opp_get_suspend_opp_freq(struct device *dev) |
| 344 | { | 273 | { |
| 345 | struct opp_table *opp_table; | 274 | struct opp_table *opp_table; |
| 346 | 275 | unsigned long freq = 0; | |
| 347 | opp_rcu_lockdep_assert(); | ||
| 348 | 276 | ||
| 349 | opp_table = _find_opp_table(dev); | 277 | opp_table = _find_opp_table(dev); |
| 350 | if (IS_ERR(opp_table) || !opp_table->suspend_opp || | 278 | if (IS_ERR(opp_table)) |
| 351 | !opp_table->suspend_opp->available) | 279 | return 0; |
| 352 | return NULL; | 280 | |
| 281 | if (opp_table->suspend_opp && opp_table->suspend_opp->available) | ||
| 282 | freq = dev_pm_opp_get_freq(opp_table->suspend_opp); | ||
| 353 | 283 | ||
| 354 | return opp_table->suspend_opp; | 284 | dev_pm_opp_put_opp_table(opp_table); |
| 285 | |||
| 286 | return freq; | ||
| 355 | } | 287 | } |
| 356 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); | 288 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp_freq); |
| 357 | 289 | ||
| 358 | /** | 290 | /** |
| 359 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table | 291 | * dev_pm_opp_get_opp_count() - Get number of opps available in the opp table |
| @@ -361,8 +293,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_suspend_opp); | |||
| 361 | * | 293 | * |
| 362 | * Return: This function returns the number of available opps if there are any, | 294 | * Return: This function returns the number of available opps if there are any, |
| 363 | * else returns 0 if none or the corresponding error value. | 295 | * else returns 0 if none or the corresponding error value. |
| 364 | * | ||
| 365 | * Locking: This function takes rcu_read_lock(). | ||
| 366 | */ | 296 | */ |
| 367 | int dev_pm_opp_get_opp_count(struct device *dev) | 297 | int dev_pm_opp_get_opp_count(struct device *dev) |
| 368 | { | 298 | { |
| @@ -370,23 +300,24 @@ int dev_pm_opp_get_opp_count(struct device *dev) | |||
| 370 | struct dev_pm_opp *temp_opp; | 300 | struct dev_pm_opp *temp_opp; |
| 371 | int count = 0; | 301 | int count = 0; |
| 372 | 302 | ||
| 373 | rcu_read_lock(); | ||
| 374 | |||
| 375 | opp_table = _find_opp_table(dev); | 303 | opp_table = _find_opp_table(dev); |
| 376 | if (IS_ERR(opp_table)) { | 304 | if (IS_ERR(opp_table)) { |
| 377 | count = PTR_ERR(opp_table); | 305 | count = PTR_ERR(opp_table); |
| 378 | dev_err(dev, "%s: OPP table not found (%d)\n", | 306 | dev_err(dev, "%s: OPP table not found (%d)\n", |
| 379 | __func__, count); | 307 | __func__, count); |
| 380 | goto out_unlock; | 308 | return count; |
| 381 | } | 309 | } |
| 382 | 310 | ||
| 383 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { | 311 | mutex_lock(&opp_table->lock); |
| 312 | |||
| 313 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
| 384 | if (temp_opp->available) | 314 | if (temp_opp->available) |
| 385 | count++; | 315 | count++; |
| 386 | } | 316 | } |
| 387 | 317 | ||
| 388 | out_unlock: | 318 | mutex_unlock(&opp_table->lock); |
| 389 | rcu_read_unlock(); | 319 | dev_pm_opp_put_opp_table(opp_table); |
| 320 | |||
| 390 | return count; | 321 | return count; |
| 391 | } | 322 | } |
| 392 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); | 323 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
| @@ -411,11 +342,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); | |||
| 411 | * This provides a mechanism to enable an opp which is not available currently | 342 | * This provides a mechanism to enable an opp which is not available currently |
| 412 | * or the opposite as well. | 343 | * or the opposite as well. |
| 413 | * | 344 | * |
| 414 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 345 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
| 415 | * protected pointer. The reason for the same is that the opp pointer which is | 346 | * use. |
| 416 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
| 417 | * under the locked area. The pointer returned must be used prior to unlocking | ||
| 418 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
| 419 | */ | 347 | */ |
| 420 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | 348 | struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, |
| 421 | unsigned long freq, | 349 | unsigned long freq, |
| @@ -424,8 +352,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | |||
| 424 | struct opp_table *opp_table; | 352 | struct opp_table *opp_table; |
| 425 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 353 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
| 426 | 354 | ||
| 427 | opp_rcu_lockdep_assert(); | ||
| 428 | |||
| 429 | opp_table = _find_opp_table(dev); | 355 | opp_table = _find_opp_table(dev); |
| 430 | if (IS_ERR(opp_table)) { | 356 | if (IS_ERR(opp_table)) { |
| 431 | int r = PTR_ERR(opp_table); | 357 | int r = PTR_ERR(opp_table); |
| @@ -434,14 +360,22 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | |||
| 434 | return ERR_PTR(r); | 360 | return ERR_PTR(r); |
| 435 | } | 361 | } |
| 436 | 362 | ||
| 437 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { | 363 | mutex_lock(&opp_table->lock); |
| 364 | |||
| 365 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
| 438 | if (temp_opp->available == available && | 366 | if (temp_opp->available == available && |
| 439 | temp_opp->rate == freq) { | 367 | temp_opp->rate == freq) { |
| 440 | opp = temp_opp; | 368 | opp = temp_opp; |
| 369 | |||
| 370 | /* Increment the reference count of OPP */ | ||
| 371 | dev_pm_opp_get(opp); | ||
| 441 | break; | 372 | break; |
| 442 | } | 373 | } |
| 443 | } | 374 | } |
| 444 | 375 | ||
| 376 | mutex_unlock(&opp_table->lock); | ||
| 377 | dev_pm_opp_put_opp_table(opp_table); | ||
| 378 | |||
| 445 | return opp; | 379 | return opp; |
| 446 | } | 380 | } |
| 447 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); | 381 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact); |
| @@ -451,14 +385,21 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, | |||
| 451 | { | 385 | { |
| 452 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 386 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
| 453 | 387 | ||
| 454 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { | 388 | mutex_lock(&opp_table->lock); |
| 389 | |||
| 390 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
| 455 | if (temp_opp->available && temp_opp->rate >= *freq) { | 391 | if (temp_opp->available && temp_opp->rate >= *freq) { |
| 456 | opp = temp_opp; | 392 | opp = temp_opp; |
| 457 | *freq = opp->rate; | 393 | *freq = opp->rate; |
| 394 | |||
| 395 | /* Increment the reference count of OPP */ | ||
| 396 | dev_pm_opp_get(opp); | ||
| 458 | break; | 397 | break; |
| 459 | } | 398 | } |
| 460 | } | 399 | } |
| 461 | 400 | ||
| 401 | mutex_unlock(&opp_table->lock); | ||
| 402 | |||
| 462 | return opp; | 403 | return opp; |
| 463 | } | 404 | } |
| 464 | 405 | ||
| @@ -477,18 +418,14 @@ static noinline struct dev_pm_opp *_find_freq_ceil(struct opp_table *opp_table, | |||
| 477 | * ERANGE: no match found for search | 418 | * ERANGE: no match found for search |
| 478 | * ENODEV: if device not found in list of registered devices | 419 | * ENODEV: if device not found in list of registered devices |
| 479 | * | 420 | * |
| 480 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 421 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
| 481 | * protected pointer. The reason for the same is that the opp pointer which is | 422 | * use. |
| 482 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
| 483 | * under the locked area. The pointer returned must be used prior to unlocking | ||
| 484 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
| 485 | */ | 423 | */ |
| 486 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | 424 | struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, |
| 487 | unsigned long *freq) | 425 | unsigned long *freq) |
| 488 | { | 426 | { |
| 489 | struct opp_table *opp_table; | 427 | struct opp_table *opp_table; |
| 490 | 428 | struct dev_pm_opp *opp; | |
| 491 | opp_rcu_lockdep_assert(); | ||
| 492 | 429 | ||
| 493 | if (!dev || !freq) { | 430 | if (!dev || !freq) { |
| 494 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 431 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
| @@ -499,7 +436,11 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | |||
| 499 | if (IS_ERR(opp_table)) | 436 | if (IS_ERR(opp_table)) |
| 500 | return ERR_CAST(opp_table); | 437 | return ERR_CAST(opp_table); |
| 501 | 438 | ||
| 502 | return _find_freq_ceil(opp_table, freq); | 439 | opp = _find_freq_ceil(opp_table, freq); |
| 440 | |||
| 441 | dev_pm_opp_put_opp_table(opp_table); | ||
| 442 | |||
| 443 | return opp; | ||
| 503 | } | 444 | } |
| 504 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); | 445 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); |
| 505 | 446 | ||
| @@ -518,11 +459,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil); | |||
| 518 | * ERANGE: no match found for search | 459 | * ERANGE: no match found for search |
| 519 | * ENODEV: if device not found in list of registered devices | 460 | * ENODEV: if device not found in list of registered devices |
| 520 | * | 461 | * |
| 521 | * Locking: This function must be called under rcu_read_lock(). opp is a rcu | 462 | * The callers are required to call dev_pm_opp_put() for the returned OPP after |
| 522 | * protected pointer. The reason for the same is that the opp pointer which is | 463 | * use. |
| 523 | * returned will remain valid for use with opp_get_{voltage, freq} only while | ||
| 524 | * under the locked area. The pointer returned must be used prior to unlocking | ||
| 525 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
| 526 | */ | 464 | */ |
| 527 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | 465 | struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, |
| 528 | unsigned long *freq) | 466 | unsigned long *freq) |
| @@ -530,8 +468,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
| 530 | struct opp_table *opp_table; | 468 | struct opp_table *opp_table; |
| 531 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 469 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
| 532 | 470 | ||
| 533 | opp_rcu_lockdep_assert(); | ||
| 534 | |||
| 535 | if (!dev || !freq) { | 471 | if (!dev || !freq) { |
| 536 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 472 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
| 537 | return ERR_PTR(-EINVAL); | 473 | return ERR_PTR(-EINVAL); |
| @@ -541,7 +477,9 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
| 541 | if (IS_ERR(opp_table)) | 477 | if (IS_ERR(opp_table)) |
| 542 | return ERR_CAST(opp_table); | 478 | return ERR_CAST(opp_table); |
| 543 | 479 | ||
| 544 | list_for_each_entry_rcu(temp_opp, &opp_table->opp_list, node) { | 480 | mutex_lock(&opp_table->lock); |
| 481 | |||
| 482 | list_for_each_entry(temp_opp, &opp_table->opp_list, node) { | ||
| 545 | if (temp_opp->available) { | 483 | if (temp_opp->available) { |
| 546 | /* go to the next node, before choosing prev */ | 484 | /* go to the next node, before choosing prev */ |
| 547 | if (temp_opp->rate > *freq) | 485 | if (temp_opp->rate > *freq) |
| @@ -550,6 +488,13 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
| 550 | opp = temp_opp; | 488 | opp = temp_opp; |
| 551 | } | 489 | } |
| 552 | } | 490 | } |
| 491 | |||
| 492 | /* Increment the reference count of OPP */ | ||
| 493 | if (!IS_ERR(opp)) | ||
| 494 | dev_pm_opp_get(opp); | ||
| 495 | mutex_unlock(&opp_table->lock); | ||
| 496 | dev_pm_opp_put_opp_table(opp_table); | ||
| 497 | |||
| 553 | if (!IS_ERR(opp)) | 498 | if (!IS_ERR(opp)) |
| 554 | *freq = opp->rate; | 499 | *freq = opp->rate; |
| 555 | 500 | ||
| @@ -557,34 +502,6 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
| 557 | } | 502 | } |
| 558 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | 503 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
| 559 | 504 | ||
| 560 | /* | ||
| 561 | * The caller needs to ensure that opp_table (and hence the clk) isn't freed, | ||
| 562 | * while clk returned here is used. | ||
| 563 | */ | ||
| 564 | static struct clk *_get_opp_clk(struct device *dev) | ||
| 565 | { | ||
| 566 | struct opp_table *opp_table; | ||
| 567 | struct clk *clk; | ||
| 568 | |||
| 569 | rcu_read_lock(); | ||
| 570 | |||
| 571 | opp_table = _find_opp_table(dev); | ||
| 572 | if (IS_ERR(opp_table)) { | ||
| 573 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); | ||
| 574 | clk = ERR_CAST(opp_table); | ||
| 575 | goto unlock; | ||
| 576 | } | ||
| 577 | |||
| 578 | clk = opp_table->clk; | ||
| 579 | if (IS_ERR(clk)) | ||
| 580 | dev_err(dev, "%s: No clock available for the device\n", | ||
| 581 | __func__); | ||
| 582 | |||
| 583 | unlock: | ||
| 584 | rcu_read_unlock(); | ||
| 585 | return clk; | ||
| 586 | } | ||
| 587 | |||
| 588 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, | 505 | static int _set_opp_voltage(struct device *dev, struct regulator *reg, |
| 589 | struct dev_pm_opp_supply *supply) | 506 | struct dev_pm_opp_supply *supply) |
| 590 | { | 507 | { |
| @@ -680,8 +597,6 @@ restore_voltage: | |||
| 680 | * | 597 | * |
| 681 | * This configures the power-supplies and clock source to the levels specified | 598 | * This configures the power-supplies and clock source to the levels specified |
| 682 | * by the OPP corresponding to the target_freq. | 599 | * by the OPP corresponding to the target_freq. |
| 683 | * | ||
| 684 | * Locking: This function takes rcu_read_lock(). | ||
| 685 | */ | 600 | */ |
| 686 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | 601 | int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) |
| 687 | { | 602 | { |
| @@ -700,9 +615,19 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 700 | return -EINVAL; | 615 | return -EINVAL; |
| 701 | } | 616 | } |
| 702 | 617 | ||
| 703 | clk = _get_opp_clk(dev); | 618 | opp_table = _find_opp_table(dev); |
| 704 | if (IS_ERR(clk)) | 619 | if (IS_ERR(opp_table)) { |
| 705 | return PTR_ERR(clk); | 620 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); |
| 621 | return PTR_ERR(opp_table); | ||
| 622 | } | ||
| 623 | |||
| 624 | clk = opp_table->clk; | ||
| 625 | if (IS_ERR(clk)) { | ||
| 626 | dev_err(dev, "%s: No clock available for the device\n", | ||
| 627 | __func__); | ||
| 628 | ret = PTR_ERR(clk); | ||
| 629 | goto put_opp_table; | ||
| 630 | } | ||
| 706 | 631 | ||
| 707 | freq = clk_round_rate(clk, target_freq); | 632 | freq = clk_round_rate(clk, target_freq); |
| 708 | if ((long)freq <= 0) | 633 | if ((long)freq <= 0) |
| @@ -714,16 +639,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 714 | if (old_freq == freq) { | 639 | if (old_freq == freq) { |
| 715 | dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", | 640 | dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n", |
| 716 | __func__, freq); | 641 | __func__, freq); |
| 717 | return 0; | 642 | ret = 0; |
| 718 | } | 643 | goto put_opp_table; |
| 719 | |||
| 720 | rcu_read_lock(); | ||
| 721 | |||
| 722 | opp_table = _find_opp_table(dev); | ||
| 723 | if (IS_ERR(opp_table)) { | ||
| 724 | dev_err(dev, "%s: device opp doesn't exist\n", __func__); | ||
| 725 | rcu_read_unlock(); | ||
| 726 | return PTR_ERR(opp_table); | ||
| 727 | } | 644 | } |
| 728 | 645 | ||
| 729 | old_opp = _find_freq_ceil(opp_table, &old_freq); | 646 | old_opp = _find_freq_ceil(opp_table, &old_freq); |
| @@ -737,8 +654,7 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 737 | ret = PTR_ERR(opp); | 654 | ret = PTR_ERR(opp); |
| 738 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", | 655 | dev_err(dev, "%s: failed to find OPP for freq %lu (%d)\n", |
| 739 | __func__, freq, ret); | 656 | __func__, freq, ret); |
| 740 | rcu_read_unlock(); | 657 | goto put_old_opp; |
| 741 | return ret; | ||
| 742 | } | 658 | } |
| 743 | 659 | ||
| 744 | dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, | 660 | dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", __func__, |
| @@ -748,8 +664,8 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 748 | 664 | ||
| 749 | /* Only frequency scaling */ | 665 | /* Only frequency scaling */ |
| 750 | if (!regulators) { | 666 | if (!regulators) { |
| 751 | rcu_read_unlock(); | 667 | ret = _generic_set_opp_clk_only(dev, clk, old_freq, freq); |
| 752 | return _generic_set_opp_clk_only(dev, clk, old_freq, freq); | 668 | goto put_opps; |
| 753 | } | 669 | } |
| 754 | 670 | ||
| 755 | if (opp_table->set_opp) | 671 | if (opp_table->set_opp) |
| @@ -773,28 +689,26 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq) | |||
| 773 | data->new_opp.rate = freq; | 689 | data->new_opp.rate = freq; |
| 774 | memcpy(data->new_opp.supplies, opp->supplies, size); | 690 | memcpy(data->new_opp.supplies, opp->supplies, size); |
| 775 | 691 | ||
| 776 | rcu_read_unlock(); | 692 | ret = set_opp(data); |
| 777 | 693 | ||
| 778 | return set_opp(data); | 694 | put_opps: |
| 695 | dev_pm_opp_put(opp); | ||
| 696 | put_old_opp: | ||
| 697 | if (!IS_ERR(old_opp)) | ||
| 698 | dev_pm_opp_put(old_opp); | ||
| 699 | put_opp_table: | ||
| 700 | dev_pm_opp_put_opp_table(opp_table); | ||
| 701 | return ret; | ||
| 779 | } | 702 | } |
| 780 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); | 703 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_rate); |
| 781 | 704 | ||
| 782 | /* OPP-dev Helpers */ | 705 | /* OPP-dev Helpers */ |
| 783 | static void _kfree_opp_dev_rcu(struct rcu_head *head) | ||
| 784 | { | ||
| 785 | struct opp_device *opp_dev; | ||
| 786 | |||
| 787 | opp_dev = container_of(head, struct opp_device, rcu_head); | ||
| 788 | kfree_rcu(opp_dev, rcu_head); | ||
| 789 | } | ||
| 790 | |||
| 791 | static void _remove_opp_dev(struct opp_device *opp_dev, | 706 | static void _remove_opp_dev(struct opp_device *opp_dev, |
| 792 | struct opp_table *opp_table) | 707 | struct opp_table *opp_table) |
| 793 | { | 708 | { |
| 794 | opp_debug_unregister(opp_dev, opp_table); | 709 | opp_debug_unregister(opp_dev, opp_table); |
| 795 | list_del(&opp_dev->node); | 710 | list_del(&opp_dev->node); |
| 796 | call_srcu(&opp_table->srcu_head.srcu, &opp_dev->rcu_head, | 711 | kfree(opp_dev); |
| 797 | _kfree_opp_dev_rcu); | ||
| 798 | } | 712 | } |
| 799 | 713 | ||
| 800 | struct opp_device *_add_opp_dev(const struct device *dev, | 714 | struct opp_device *_add_opp_dev(const struct device *dev, |
| @@ -809,7 +723,7 @@ struct opp_device *_add_opp_dev(const struct device *dev, | |||
| 809 | 723 | ||
| 810 | /* Initialize opp-dev */ | 724 | /* Initialize opp-dev */ |
| 811 | opp_dev->dev = dev; | 725 | opp_dev->dev = dev; |
| 812 | list_add_rcu(&opp_dev->node, &opp_table->dev_list); | 726 | list_add(&opp_dev->node, &opp_table->dev_list); |
| 813 | 727 | ||
| 814 | /* Create debugfs entries for the opp_table */ | 728 | /* Create debugfs entries for the opp_table */ |
| 815 | ret = opp_debug_register(opp_dev, opp_table); | 729 | ret = opp_debug_register(opp_dev, opp_table); |
| @@ -820,26 +734,12 @@ struct opp_device *_add_opp_dev(const struct device *dev, | |||
| 820 | return opp_dev; | 734 | return opp_dev; |
| 821 | } | 735 | } |
| 822 | 736 | ||
| 823 | /** | 737 | static struct opp_table *_allocate_opp_table(struct device *dev) |
| 824 | * _add_opp_table() - Find OPP table or allocate a new one | ||
| 825 | * @dev: device for which we do this operation | ||
| 826 | * | ||
| 827 | * It tries to find an existing table first, if it couldn't find one, it | ||
| 828 | * allocates a new OPP table and returns that. | ||
| 829 | * | ||
| 830 | * Return: valid opp_table pointer if success, else NULL. | ||
| 831 | */ | ||
| 832 | static struct opp_table *_add_opp_table(struct device *dev) | ||
| 833 | { | 738 | { |
| 834 | struct opp_table *opp_table; | 739 | struct opp_table *opp_table; |
| 835 | struct opp_device *opp_dev; | 740 | struct opp_device *opp_dev; |
| 836 | int ret; | 741 | int ret; |
| 837 | 742 | ||
| 838 | /* Check for existing table for 'dev' first */ | ||
| 839 | opp_table = _find_opp_table(dev); | ||
| 840 | if (!IS_ERR(opp_table)) | ||
| 841 | return opp_table; | ||
| 842 | |||
| 843 | /* | 743 | /* |
| 844 | * Allocate a new OPP table. In the infrequent case where a new | 744 | * Allocate a new OPP table. In the infrequent case where a new |
| 845 | * device is needed to be added, we pay this penalty. | 745 | * device is needed to be added, we pay this penalty. |
| @@ -867,50 +767,45 @@ static struct opp_table *_add_opp_table(struct device *dev) | |||
| 867 | ret); | 767 | ret); |
| 868 | } | 768 | } |
| 869 | 769 | ||
| 870 | srcu_init_notifier_head(&opp_table->srcu_head); | 770 | BLOCKING_INIT_NOTIFIER_HEAD(&opp_table->head); |
| 871 | INIT_LIST_HEAD(&opp_table->opp_list); | 771 | INIT_LIST_HEAD(&opp_table->opp_list); |
| 772 | mutex_init(&opp_table->lock); | ||
| 773 | kref_init(&opp_table->kref); | ||
| 872 | 774 | ||
| 873 | /* Secure the device table modification */ | 775 | /* Secure the device table modification */ |
| 874 | list_add_rcu(&opp_table->node, &opp_tables); | 776 | list_add(&opp_table->node, &opp_tables); |
| 875 | return opp_table; | 777 | return opp_table; |
| 876 | } | 778 | } |
| 877 | 779 | ||
| 878 | /** | 780 | void _get_opp_table_kref(struct opp_table *opp_table) |
| 879 | * _kfree_device_rcu() - Free opp_table RCU handler | ||
| 880 | * @head: RCU head | ||
| 881 | */ | ||
| 882 | static void _kfree_device_rcu(struct rcu_head *head) | ||
| 883 | { | 781 | { |
| 884 | struct opp_table *opp_table = container_of(head, struct opp_table, | 782 | kref_get(&opp_table->kref); |
| 885 | rcu_head); | ||
| 886 | |||
| 887 | kfree_rcu(opp_table, rcu_head); | ||
| 888 | } | 783 | } |
| 889 | 784 | ||
| 890 | /** | 785 | struct opp_table *dev_pm_opp_get_opp_table(struct device *dev) |
| 891 | * _remove_opp_table() - Removes a OPP table | ||
| 892 | * @opp_table: OPP table to be removed. | ||
| 893 | * | ||
| 894 | * Removes/frees OPP table if it doesn't contain any OPPs. | ||
| 895 | */ | ||
| 896 | static void _remove_opp_table(struct opp_table *opp_table) | ||
| 897 | { | 786 | { |
| 898 | struct opp_device *opp_dev; | 787 | struct opp_table *opp_table; |
| 899 | 788 | ||
| 900 | if (!list_empty(&opp_table->opp_list)) | 789 | /* Hold our table modification lock here */ |
| 901 | return; | 790 | mutex_lock(&opp_table_lock); |
| 902 | 791 | ||
| 903 | if (opp_table->supported_hw) | 792 | opp_table = _find_opp_table_unlocked(dev); |
| 904 | return; | 793 | if (!IS_ERR(opp_table)) |
| 794 | goto unlock; | ||
| 905 | 795 | ||
| 906 | if (opp_table->prop_name) | 796 | opp_table = _allocate_opp_table(dev); |
| 907 | return; | ||
| 908 | 797 | ||
| 909 | if (opp_table->regulators) | 798 | unlock: |
| 910 | return; | 799 | mutex_unlock(&opp_table_lock); |
| 911 | 800 | ||
| 912 | if (opp_table->set_opp) | 801 | return opp_table; |
| 913 | return; | 802 | } |
| 803 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_table); | ||
| 804 | |||
| 805 | static void _opp_table_kref_release(struct kref *kref) | ||
| 806 | { | ||
| 807 | struct opp_table *opp_table = container_of(kref, struct opp_table, kref); | ||
| 808 | struct opp_device *opp_dev; | ||
| 914 | 809 | ||
| 915 | /* Release clk */ | 810 | /* Release clk */ |
| 916 | if (!IS_ERR(opp_table->clk)) | 811 | if (!IS_ERR(opp_table->clk)) |
| @@ -924,63 +819,60 @@ static void _remove_opp_table(struct opp_table *opp_table) | |||
| 924 | /* dev_list must be empty now */ | 819 | /* dev_list must be empty now */ |
| 925 | WARN_ON(!list_empty(&opp_table->dev_list)); | 820 | WARN_ON(!list_empty(&opp_table->dev_list)); |
| 926 | 821 | ||
| 927 | list_del_rcu(&opp_table->node); | 822 | mutex_destroy(&opp_table->lock); |
| 928 | call_srcu(&opp_table->srcu_head.srcu, &opp_table->rcu_head, | 823 | list_del(&opp_table->node); |
| 929 | _kfree_device_rcu); | 824 | kfree(opp_table); |
| 825 | |||
| 826 | mutex_unlock(&opp_table_lock); | ||
| 930 | } | 827 | } |
| 931 | 828 | ||
| 932 | /** | 829 | void dev_pm_opp_put_opp_table(struct opp_table *opp_table) |
| 933 | * _kfree_opp_rcu() - Free OPP RCU handler | ||
| 934 | * @head: RCU head | ||
| 935 | */ | ||
| 936 | static void _kfree_opp_rcu(struct rcu_head *head) | ||
| 937 | { | 830 | { |
| 938 | struct dev_pm_opp *opp = container_of(head, struct dev_pm_opp, rcu_head); | 831 | kref_put_mutex(&opp_table->kref, _opp_table_kref_release, |
| 832 | &opp_table_lock); | ||
| 833 | } | ||
| 834 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_opp_table); | ||
| 939 | 835 | ||
| 940 | kfree_rcu(opp, rcu_head); | 836 | void _opp_free(struct dev_pm_opp *opp) |
| 837 | { | ||
| 838 | kfree(opp); | ||
| 941 | } | 839 | } |
| 942 | 840 | ||
| 943 | /** | 841 | static void _opp_kref_release(struct kref *kref) |
| 944 | * _opp_remove() - Remove an OPP from a table definition | ||
| 945 | * @opp_table: points back to the opp_table struct this opp belongs to | ||
| 946 | * @opp: pointer to the OPP to remove | ||
| 947 | * @notify: OPP_EVENT_REMOVE notification should be sent or not | ||
| 948 | * | ||
| 949 | * This function removes an opp definition from the opp table. | ||
| 950 | * | ||
| 951 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 952 | * It is assumed that the caller holds required mutex for an RCU updater | ||
| 953 | * strategy. | ||
| 954 | */ | ||
| 955 | void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, | ||
| 956 | bool notify) | ||
| 957 | { | 842 | { |
| 843 | struct dev_pm_opp *opp = container_of(kref, struct dev_pm_opp, kref); | ||
| 844 | struct opp_table *opp_table = opp->opp_table; | ||
| 845 | |||
| 958 | /* | 846 | /* |
| 959 | * Notify the changes in the availability of the operable | 847 | * Notify the changes in the availability of the operable |
| 960 | * frequency/voltage list. | 848 | * frequency/voltage list. |
| 961 | */ | 849 | */ |
| 962 | if (notify) | 850 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_REMOVE, opp); |
| 963 | srcu_notifier_call_chain(&opp_table->srcu_head, | ||
| 964 | OPP_EVENT_REMOVE, opp); | ||
| 965 | opp_debug_remove_one(opp); | 851 | opp_debug_remove_one(opp); |
| 966 | list_del_rcu(&opp->node); | 852 | list_del(&opp->node); |
| 967 | call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); | 853 | kfree(opp); |
| 968 | 854 | ||
| 969 | _remove_opp_table(opp_table); | 855 | mutex_unlock(&opp_table->lock); |
| 856 | dev_pm_opp_put_opp_table(opp_table); | ||
| 857 | } | ||
| 858 | |||
| 859 | static void dev_pm_opp_get(struct dev_pm_opp *opp) | ||
| 860 | { | ||
| 861 | kref_get(&opp->kref); | ||
| 970 | } | 862 | } |
| 971 | 863 | ||
| 864 | void dev_pm_opp_put(struct dev_pm_opp *opp) | ||
| 865 | { | ||
| 866 | kref_put_mutex(&opp->kref, _opp_kref_release, &opp->opp_table->lock); | ||
| 867 | } | ||
| 868 | EXPORT_SYMBOL_GPL(dev_pm_opp_put); | ||
| 869 | |||
| 972 | /** | 870 | /** |
| 973 | * dev_pm_opp_remove() - Remove an OPP from OPP table | 871 | * dev_pm_opp_remove() - Remove an OPP from OPP table |
| 974 | * @dev: device for which we do this operation | 872 | * @dev: device for which we do this operation |
| 975 | * @freq: OPP to remove with matching 'freq' | 873 | * @freq: OPP to remove with matching 'freq' |
| 976 | * | 874 | * |
| 977 | * This function removes an opp from the opp table. | 875 | * This function removes an opp from the opp table. |
| 978 | * | ||
| 979 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 980 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 981 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 982 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 983 | * mutex cannot be locked. | ||
| 984 | */ | 876 | */ |
| 985 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) | 877 | void dev_pm_opp_remove(struct device *dev, unsigned long freq) |
| 986 | { | 878 | { |
| @@ -988,12 +880,11 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |||
| 988 | struct opp_table *opp_table; | 880 | struct opp_table *opp_table; |
| 989 | bool found = false; | 881 | bool found = false; |
| 990 | 882 | ||
| 991 | /* Hold our table modification lock here */ | ||
| 992 | mutex_lock(&opp_table_lock); | ||
| 993 | |||
| 994 | opp_table = _find_opp_table(dev); | 883 | opp_table = _find_opp_table(dev); |
| 995 | if (IS_ERR(opp_table)) | 884 | if (IS_ERR(opp_table)) |
| 996 | goto unlock; | 885 | return; |
| 886 | |||
| 887 | mutex_lock(&opp_table->lock); | ||
| 997 | 888 | ||
| 998 | list_for_each_entry(opp, &opp_table->opp_list, node) { | 889 | list_for_each_entry(opp, &opp_table->opp_list, node) { |
| 999 | if (opp->rate == freq) { | 890 | if (opp->rate == freq) { |
| @@ -1002,28 +893,23 @@ void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |||
| 1002 | } | 893 | } |
| 1003 | } | 894 | } |
| 1004 | 895 | ||
| 1005 | if (!found) { | 896 | mutex_unlock(&opp_table->lock); |
| 897 | |||
| 898 | if (found) { | ||
| 899 | dev_pm_opp_put(opp); | ||
| 900 | } else { | ||
| 1006 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", | 901 | dev_warn(dev, "%s: Couldn't find OPP with freq: %lu\n", |
| 1007 | __func__, freq); | 902 | __func__, freq); |
| 1008 | goto unlock; | ||
| 1009 | } | 903 | } |
| 1010 | 904 | ||
| 1011 | _opp_remove(opp_table, opp, true); | 905 | dev_pm_opp_put_opp_table(opp_table); |
| 1012 | unlock: | ||
| 1013 | mutex_unlock(&opp_table_lock); | ||
| 1014 | } | 906 | } |
| 1015 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | 907 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove); |
| 1016 | 908 | ||
| 1017 | struct dev_pm_opp *_allocate_opp(struct device *dev, | 909 | struct dev_pm_opp *_opp_allocate(struct opp_table *table) |
| 1018 | struct opp_table **opp_table) | ||
| 1019 | { | 910 | { |
| 1020 | struct dev_pm_opp *opp; | 911 | struct dev_pm_opp *opp; |
| 1021 | int count, supply_size; | 912 | int count, supply_size; |
| 1022 | struct opp_table *table; | ||
| 1023 | |||
| 1024 | table = _add_opp_table(dev); | ||
| 1025 | if (!table) | ||
| 1026 | return NULL; | ||
| 1027 | 913 | ||
| 1028 | /* Allocate space for at least one supply */ | 914 | /* Allocate space for at least one supply */ |
| 1029 | count = table->regulator_count ? table->regulator_count : 1; | 915 | count = table->regulator_count ? table->regulator_count : 1; |
| @@ -1031,17 +917,13 @@ struct dev_pm_opp *_allocate_opp(struct device *dev, | |||
| 1031 | 917 | ||
| 1032 | /* allocate new OPP node and supplies structures */ | 918 | /* allocate new OPP node and supplies structures */ |
| 1033 | opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); | 919 | opp = kzalloc(sizeof(*opp) + supply_size, GFP_KERNEL); |
| 1034 | if (!opp) { | 920 | if (!opp) |
| 1035 | kfree(table); | ||
| 1036 | return NULL; | 921 | return NULL; |
| 1037 | } | ||
| 1038 | 922 | ||
| 1039 | /* Put the supplies at the end of the OPP structure as an empty array */ | 923 | /* Put the supplies at the end of the OPP structure as an empty array */ |
| 1040 | opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); | 924 | opp->supplies = (struct dev_pm_opp_supply *)(opp + 1); |
| 1041 | INIT_LIST_HEAD(&opp->node); | 925 | INIT_LIST_HEAD(&opp->node); |
| 1042 | 926 | ||
| 1043 | *opp_table = table; | ||
| 1044 | |||
| 1045 | return opp; | 927 | return opp; |
| 1046 | } | 928 | } |
| 1047 | 929 | ||
| @@ -1067,11 +949,21 @@ static bool _opp_supported_by_regulators(struct dev_pm_opp *opp, | |||
| 1067 | return true; | 949 | return true; |
| 1068 | } | 950 | } |
| 1069 | 951 | ||
| 952 | /* | ||
| 953 | * Returns: | ||
| 954 | * 0: On success. And appropriate error message for duplicate OPPs. | ||
| 955 | * -EBUSY: For OPP with same freq/volt and is available. The callers of | ||
| 956 | * _opp_add() must return 0 if they receive -EBUSY from it. This is to make | ||
| 957 | * sure we don't print error messages unnecessarily if different parts of | ||
| 958 | * kernel try to initialize the OPP table. | ||
| 959 | * -EEXIST: For OPP with same freq but different volt or is unavailable. This | ||
| 960 | * should be considered an error by the callers of _opp_add(). | ||
| 961 | */ | ||
| 1070 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | 962 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, |
| 1071 | struct opp_table *opp_table) | 963 | struct opp_table *opp_table) |
| 1072 | { | 964 | { |
| 1073 | struct dev_pm_opp *opp; | 965 | struct dev_pm_opp *opp; |
| 1074 | struct list_head *head = &opp_table->opp_list; | 966 | struct list_head *head; |
| 1075 | int ret; | 967 | int ret; |
| 1076 | 968 | ||
| 1077 | /* | 969 | /* |
| @@ -1082,7 +974,10 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1082 | * loop, don't replace it with head otherwise it will become an infinite | 974 | * loop, don't replace it with head otherwise it will become an infinite |
| 1083 | * loop. | 975 | * loop. |
| 1084 | */ | 976 | */ |
| 1085 | list_for_each_entry_rcu(opp, &opp_table->opp_list, node) { | 977 | mutex_lock(&opp_table->lock); |
| 978 | head = &opp_table->opp_list; | ||
| 979 | |||
| 980 | list_for_each_entry(opp, &opp_table->opp_list, node) { | ||
| 1086 | if (new_opp->rate > opp->rate) { | 981 | if (new_opp->rate > opp->rate) { |
| 1087 | head = &opp->node; | 982 | head = &opp->node; |
| 1088 | continue; | 983 | continue; |
| @@ -1098,12 +993,21 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1098 | new_opp->supplies[0].u_volt, new_opp->available); | 993 | new_opp->supplies[0].u_volt, new_opp->available); |
| 1099 | 994 | ||
| 1100 | /* Should we compare voltages for all regulators here ? */ | 995 | /* Should we compare voltages for all regulators here ? */ |
| 1101 | return opp->available && | 996 | ret = opp->available && |
| 1102 | new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? 0 : -EEXIST; | 997 | new_opp->supplies[0].u_volt == opp->supplies[0].u_volt ? -EBUSY : -EEXIST; |
| 998 | |||
| 999 | mutex_unlock(&opp_table->lock); | ||
| 1000 | return ret; | ||
| 1103 | } | 1001 | } |
| 1104 | 1002 | ||
| 1003 | list_add(&new_opp->node, head); | ||
| 1004 | mutex_unlock(&opp_table->lock); | ||
| 1005 | |||
| 1105 | new_opp->opp_table = opp_table; | 1006 | new_opp->opp_table = opp_table; |
| 1106 | list_add_rcu(&new_opp->node, head); | 1007 | kref_init(&new_opp->kref); |
| 1008 | |||
| 1009 | /* Get a reference to the OPP table */ | ||
| 1010 | _get_opp_table_kref(opp_table); | ||
| 1107 | 1011 | ||
| 1108 | ret = opp_debug_create_one(new_opp, opp_table); | 1012 | ret = opp_debug_create_one(new_opp, opp_table); |
| 1109 | if (ret) | 1013 | if (ret) |
| @@ -1121,6 +1025,7 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1121 | 1025 | ||
| 1122 | /** | 1026 | /** |
| 1123 | * _opp_add_v1() - Allocate a OPP based on v1 bindings. | 1027 | * _opp_add_v1() - Allocate a OPP based on v1 bindings. |
| 1028 | * @opp_table: OPP table | ||
| 1124 | * @dev: device for which we do this operation | 1029 | * @dev: device for which we do this operation |
| 1125 | * @freq: Frequency in Hz for this OPP | 1030 | * @freq: Frequency in Hz for this OPP |
| 1126 | * @u_volt: Voltage in uVolts for this OPP | 1031 | * @u_volt: Voltage in uVolts for this OPP |
| @@ -1133,12 +1038,6 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1133 | * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table | 1038 | * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table |
| 1134 | * and freed by dev_pm_opp_of_remove_table. | 1039 | * and freed by dev_pm_opp_of_remove_table. |
| 1135 | * | 1040 | * |
| 1136 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1137 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1138 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1139 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1140 | * mutex cannot be locked. | ||
| 1141 | * | ||
| 1142 | * Return: | 1041 | * Return: |
| 1143 | * 0 On success OR | 1042 | * 0 On success OR |
| 1144 | * Duplicate OPPs (both freq and volt are same) and opp->available | 1043 | * Duplicate OPPs (both freq and volt are same) and opp->available |
| @@ -1146,22 +1045,16 @@ int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, | |||
| 1146 | * Duplicate OPPs (both freq and volt are same) and !opp->available | 1045 | * Duplicate OPPs (both freq and volt are same) and !opp->available |
| 1147 | * -ENOMEM Memory allocation failure | 1046 | * -ENOMEM Memory allocation failure |
| 1148 | */ | 1047 | */ |
| 1149 | int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, | 1048 | int _opp_add_v1(struct opp_table *opp_table, struct device *dev, |
| 1150 | bool dynamic) | 1049 | unsigned long freq, long u_volt, bool dynamic) |
| 1151 | { | 1050 | { |
| 1152 | struct opp_table *opp_table; | ||
| 1153 | struct dev_pm_opp *new_opp; | 1051 | struct dev_pm_opp *new_opp; |
| 1154 | unsigned long tol; | 1052 | unsigned long tol; |
| 1155 | int ret; | 1053 | int ret; |
| 1156 | 1054 | ||
| 1157 | /* Hold our table modification lock here */ | 1055 | new_opp = _opp_allocate(opp_table); |
| 1158 | mutex_lock(&opp_table_lock); | 1056 | if (!new_opp) |
| 1159 | 1057 | return -ENOMEM; | |
| 1160 | new_opp = _allocate_opp(dev, &opp_table); | ||
| 1161 | if (!new_opp) { | ||
| 1162 | ret = -ENOMEM; | ||
| 1163 | goto unlock; | ||
| 1164 | } | ||
| 1165 | 1058 | ||
| 1166 | /* populate the opp table */ | 1059 | /* populate the opp table */ |
| 1167 | new_opp->rate = freq; | 1060 | new_opp->rate = freq; |
| @@ -1173,22 +1066,23 @@ int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, | |||
| 1173 | new_opp->dynamic = dynamic; | 1066 | new_opp->dynamic = dynamic; |
| 1174 | 1067 | ||
| 1175 | ret = _opp_add(dev, new_opp, opp_table); | 1068 | ret = _opp_add(dev, new_opp, opp_table); |
| 1176 | if (ret) | 1069 | if (ret) { |
| 1070 | /* Don't return error for duplicate OPPs */ | ||
| 1071 | if (ret == -EBUSY) | ||
| 1072 | ret = 0; | ||
| 1177 | goto free_opp; | 1073 | goto free_opp; |
| 1178 | 1074 | } | |
| 1179 | mutex_unlock(&opp_table_lock); | ||
| 1180 | 1075 | ||
| 1181 | /* | 1076 | /* |
| 1182 | * Notify the changes in the availability of the operable | 1077 | * Notify the changes in the availability of the operable |
| 1183 | * frequency/voltage list. | 1078 | * frequency/voltage list. |
| 1184 | */ | 1079 | */ |
| 1185 | srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); | 1080 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); |
| 1186 | return 0; | 1081 | return 0; |
| 1187 | 1082 | ||
| 1188 | free_opp: | 1083 | free_opp: |
| 1189 | _opp_remove(opp_table, new_opp, false); | 1084 | _opp_free(new_opp); |
| 1190 | unlock: | 1085 | |
| 1191 | mutex_unlock(&opp_table_lock); | ||
| 1192 | return ret; | 1086 | return ret; |
| 1193 | } | 1087 | } |
| 1194 | 1088 | ||
| @@ -1202,27 +1096,16 @@ unlock: | |||
| 1202 | * specify the hierarchy of versions it supports. OPP layer will then enable | 1096 | * specify the hierarchy of versions it supports. OPP layer will then enable |
| 1203 | * OPPs, which are available for those versions, based on its 'opp-supported-hw' | 1097 | * OPPs, which are available for those versions, based on its 'opp-supported-hw' |
| 1204 | * property. | 1098 | * property. |
| 1205 | * | ||
| 1206 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1207 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1208 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1209 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1210 | * mutex cannot be locked. | ||
| 1211 | */ | 1099 | */ |
| 1212 | int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, | 1100 | struct opp_table *dev_pm_opp_set_supported_hw(struct device *dev, |
| 1213 | unsigned int count) | 1101 | const u32 *versions, unsigned int count) |
| 1214 | { | 1102 | { |
| 1215 | struct opp_table *opp_table; | 1103 | struct opp_table *opp_table; |
| 1216 | int ret = 0; | 1104 | int ret; |
| 1217 | |||
| 1218 | /* Hold our table modification lock here */ | ||
| 1219 | mutex_lock(&opp_table_lock); | ||
| 1220 | 1105 | ||
| 1221 | opp_table = _add_opp_table(dev); | 1106 | opp_table = dev_pm_opp_get_opp_table(dev); |
| 1222 | if (!opp_table) { | 1107 | if (!opp_table) |
| 1223 | ret = -ENOMEM; | 1108 | return ERR_PTR(-ENOMEM); |
| 1224 | goto unlock; | ||
| 1225 | } | ||
| 1226 | 1109 | ||
| 1227 | /* Make sure there are no concurrent readers while updating opp_table */ | 1110 | /* Make sure there are no concurrent readers while updating opp_table */ |
| 1228 | WARN_ON(!list_empty(&opp_table->opp_list)); | 1111 | WARN_ON(!list_empty(&opp_table->opp_list)); |
| @@ -1243,65 +1126,40 @@ int dev_pm_opp_set_supported_hw(struct device *dev, const u32 *versions, | |||
| 1243 | } | 1126 | } |
| 1244 | 1127 | ||
| 1245 | opp_table->supported_hw_count = count; | 1128 | opp_table->supported_hw_count = count; |
| 1246 | mutex_unlock(&opp_table_lock); | 1129 | |
| 1247 | return 0; | 1130 | return opp_table; |
| 1248 | 1131 | ||
| 1249 | err: | 1132 | err: |
| 1250 | _remove_opp_table(opp_table); | 1133 | dev_pm_opp_put_opp_table(opp_table); |
| 1251 | unlock: | ||
| 1252 | mutex_unlock(&opp_table_lock); | ||
| 1253 | 1134 | ||
| 1254 | return ret; | 1135 | return ERR_PTR(ret); |
| 1255 | } | 1136 | } |
| 1256 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); | 1137 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_supported_hw); |
| 1257 | 1138 | ||
| 1258 | /** | 1139 | /** |
| 1259 | * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw | 1140 | * dev_pm_opp_put_supported_hw() - Releases resources blocked for supported hw |
| 1260 | * @dev: Device for which supported-hw has to be put. | 1141 | * @opp_table: OPP table returned by dev_pm_opp_set_supported_hw(). |
| 1261 | * | 1142 | * |
| 1262 | * This is required only for the V2 bindings, and is called for a matching | 1143 | * This is required only for the V2 bindings, and is called for a matching |
| 1263 | * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure | 1144 | * dev_pm_opp_set_supported_hw(). Until this is called, the opp_table structure |
| 1264 | * will not be freed. | 1145 | * will not be freed. |
| 1265 | * | ||
| 1266 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1267 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1268 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1269 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1270 | * mutex cannot be locked. | ||
| 1271 | */ | 1146 | */ |
| 1272 | void dev_pm_opp_put_supported_hw(struct device *dev) | 1147 | void dev_pm_opp_put_supported_hw(struct opp_table *opp_table) |
| 1273 | { | 1148 | { |
| 1274 | struct opp_table *opp_table; | ||
| 1275 | |||
| 1276 | /* Hold our table modification lock here */ | ||
| 1277 | mutex_lock(&opp_table_lock); | ||
| 1278 | |||
| 1279 | /* Check for existing table for 'dev' first */ | ||
| 1280 | opp_table = _find_opp_table(dev); | ||
| 1281 | if (IS_ERR(opp_table)) { | ||
| 1282 | dev_err(dev, "Failed to find opp_table: %ld\n", | ||
| 1283 | PTR_ERR(opp_table)); | ||
| 1284 | goto unlock; | ||
| 1285 | } | ||
| 1286 | |||
| 1287 | /* Make sure there are no concurrent readers while updating opp_table */ | 1149 | /* Make sure there are no concurrent readers while updating opp_table */ |
| 1288 | WARN_ON(!list_empty(&opp_table->opp_list)); | 1150 | WARN_ON(!list_empty(&opp_table->opp_list)); |
| 1289 | 1151 | ||
| 1290 | if (!opp_table->supported_hw) { | 1152 | if (!opp_table->supported_hw) { |
| 1291 | dev_err(dev, "%s: Doesn't have supported hardware list\n", | 1153 | pr_err("%s: Doesn't have supported hardware list\n", |
| 1292 | __func__); | 1154 | __func__); |
| 1293 | goto unlock; | 1155 | return; |
| 1294 | } | 1156 | } |
| 1295 | 1157 | ||
| 1296 | kfree(opp_table->supported_hw); | 1158 | kfree(opp_table->supported_hw); |
| 1297 | opp_table->supported_hw = NULL; | 1159 | opp_table->supported_hw = NULL; |
| 1298 | opp_table->supported_hw_count = 0; | 1160 | opp_table->supported_hw_count = 0; |
| 1299 | 1161 | ||
| 1300 | /* Try freeing opp_table if this was the last blocking resource */ | 1162 | dev_pm_opp_put_opp_table(opp_table); |
| 1301 | _remove_opp_table(opp_table); | ||
| 1302 | |||
| 1303 | unlock: | ||
| 1304 | mutex_unlock(&opp_table_lock); | ||
| 1305 | } | 1163 | } |
| 1306 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); | 1164 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); |
| 1307 | 1165 | ||
| @@ -1314,26 +1172,15 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_supported_hw); | |||
| 1314 | * specify the extn to be used for certain property names. The properties to | 1172 | * specify the extn to be used for certain property names. The properties to |
| 1315 | * which the extension will apply are opp-microvolt and opp-microamp. OPP core | 1173 | * which the extension will apply are opp-microvolt and opp-microamp. OPP core |
| 1316 | * should postfix the property name with -<name> while looking for them. | 1174 | * should postfix the property name with -<name> while looking for them. |
| 1317 | * | ||
| 1318 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1319 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1320 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1321 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1322 | * mutex cannot be locked. | ||
| 1323 | */ | 1175 | */ |
| 1324 | int dev_pm_opp_set_prop_name(struct device *dev, const char *name) | 1176 | struct opp_table *dev_pm_opp_set_prop_name(struct device *dev, const char *name) |
| 1325 | { | 1177 | { |
| 1326 | struct opp_table *opp_table; | 1178 | struct opp_table *opp_table; |
| 1327 | int ret = 0; | 1179 | int ret; |
| 1328 | |||
| 1329 | /* Hold our table modification lock here */ | ||
| 1330 | mutex_lock(&opp_table_lock); | ||
| 1331 | 1180 | ||
| 1332 | opp_table = _add_opp_table(dev); | 1181 | opp_table = dev_pm_opp_get_opp_table(dev); |
| 1333 | if (!opp_table) { | 1182 | if (!opp_table) |
| 1334 | ret = -ENOMEM; | 1183 | return ERR_PTR(-ENOMEM); |
| 1335 | goto unlock; | ||
| 1336 | } | ||
| 1337 | 1184 | ||
| 1338 | /* Make sure there are no concurrent readers while updating opp_table */ | 1185 | /* Make sure there are no concurrent readers while updating opp_table */ |
| 1339 | WARN_ON(!list_empty(&opp_table->opp_list)); | 1186 | WARN_ON(!list_empty(&opp_table->opp_list)); |
| @@ -1352,63 +1199,37 @@ int dev_pm_opp_set_prop_name(struct device *dev, const char *name) | |||
| 1352 | goto err; | 1199 | goto err; |
| 1353 | } | 1200 | } |
| 1354 | 1201 | ||
| 1355 | mutex_unlock(&opp_table_lock); | 1202 | return opp_table; |
| 1356 | return 0; | ||
| 1357 | 1203 | ||
| 1358 | err: | 1204 | err: |
| 1359 | _remove_opp_table(opp_table); | 1205 | dev_pm_opp_put_opp_table(opp_table); |
| 1360 | unlock: | ||
| 1361 | mutex_unlock(&opp_table_lock); | ||
| 1362 | 1206 | ||
| 1363 | return ret; | 1207 | return ERR_PTR(ret); |
| 1364 | } | 1208 | } |
| 1365 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); | 1209 | EXPORT_SYMBOL_GPL(dev_pm_opp_set_prop_name); |
| 1366 | 1210 | ||
| 1367 | /** | 1211 | /** |
| 1368 | * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name | 1212 | * dev_pm_opp_put_prop_name() - Releases resources blocked for prop-name |
| 1369 | * @dev: Device for which the prop-name has to be put. | 1213 | * @opp_table: OPP table returned by dev_pm_opp_set_prop_name(). |
| 1370 | * | 1214 | * |
| 1371 | * This is required only for the V2 bindings, and is called for a matching | 1215 | * This is required only for the V2 bindings, and is called for a matching |
| 1372 | * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure | 1216 | * dev_pm_opp_set_prop_name(). Until this is called, the opp_table structure |
| 1373 | * will not be freed. | 1217 | * will not be freed. |
| 1374 | * | ||
| 1375 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1376 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1377 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1378 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1379 | * mutex cannot be locked. | ||
| 1380 | */ | 1218 | */ |
| 1381 | void dev_pm_opp_put_prop_name(struct device *dev) | 1219 | void dev_pm_opp_put_prop_name(struct opp_table *opp_table) |
| 1382 | { | 1220 | { |
| 1383 | struct opp_table *opp_table; | ||
| 1384 | |||
| 1385 | /* Hold our table modification lock here */ | ||
| 1386 | mutex_lock(&opp_table_lock); | ||
| 1387 | |||
| 1388 | /* Check for existing table for 'dev' first */ | ||
| 1389 | opp_table = _find_opp_table(dev); | ||
| 1390 | if (IS_ERR(opp_table)) { | ||
| 1391 | dev_err(dev, "Failed to find opp_table: %ld\n", | ||
| 1392 | PTR_ERR(opp_table)); | ||
| 1393 | goto unlock; | ||
| 1394 | } | ||
| 1395 | |||
| 1396 | /* Make sure there are no concurrent readers while updating opp_table */ | 1221 | /* Make sure there are no concurrent readers while updating opp_table */ |
| 1397 | WARN_ON(!list_empty(&opp_table->opp_list)); | 1222 | WARN_ON(!list_empty(&opp_table->opp_list)); |
| 1398 | 1223 | ||
| 1399 | if (!opp_table->prop_name) { | 1224 | if (!opp_table->prop_name) { |
| 1400 | dev_err(dev, "%s: Doesn't have a prop-name\n", __func__); | 1225 | pr_err("%s: Doesn't have a prop-name\n", __func__); |
| 1401 | goto unlock; | 1226 | return; |
| 1402 | } | 1227 | } |
| 1403 | 1228 | ||
| 1404 | kfree(opp_table->prop_name); | 1229 | kfree(opp_table->prop_name); |
| 1405 | opp_table->prop_name = NULL; | 1230 | opp_table->prop_name = NULL; |
| 1406 | 1231 | ||
| 1407 | /* Try freeing opp_table if this was the last blocking resource */ | 1232 | dev_pm_opp_put_opp_table(opp_table); |
| 1408 | _remove_opp_table(opp_table); | ||
| 1409 | |||
| 1410 | unlock: | ||
| 1411 | mutex_unlock(&opp_table_lock); | ||
| 1412 | } | 1233 | } |
| 1413 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); | 1234 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_prop_name); |
| 1414 | 1235 | ||
| @@ -1455,12 +1276,6 @@ static void _free_set_opp_data(struct opp_table *opp_table) | |||
| 1455 | * well. | 1276 | * well. |
| 1456 | * | 1277 | * |
| 1457 | * This must be called before any OPPs are initialized for the device. | 1278 | * This must be called before any OPPs are initialized for the device. |
| 1458 | * | ||
| 1459 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1460 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1461 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1462 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1463 | * mutex cannot be locked. | ||
| 1464 | */ | 1279 | */ |
| 1465 | struct opp_table *dev_pm_opp_set_regulators(struct device *dev, | 1280 | struct opp_table *dev_pm_opp_set_regulators(struct device *dev, |
| 1466 | const char * const names[], | 1281 | const char * const names[], |
| @@ -1470,13 +1285,9 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, | |||
| 1470 | struct regulator *reg; | 1285 | struct regulator *reg; |
| 1471 | int ret, i; | 1286 | int ret, i; |
| 1472 | 1287 | ||
| 1473 | mutex_lock(&opp_table_lock); | 1288 | opp_table = dev_pm_opp_get_opp_table(dev); |
| 1474 | 1289 | if (!opp_table) | |
| 1475 | opp_table = _add_opp_table(dev); | 1290 | return ERR_PTR(-ENOMEM); |
| 1476 | if (!opp_table) { | ||
| 1477 | ret = -ENOMEM; | ||
| 1478 | goto unlock; | ||
| 1479 | } | ||
| 1480 | 1291 | ||
| 1481 | /* This should be called before OPPs are initialized */ | 1292 | /* This should be called before OPPs are initialized */ |
| 1482 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { | 1293 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
| @@ -1518,7 +1329,6 @@ struct opp_table *dev_pm_opp_set_regulators(struct device *dev, | |||
| 1518 | if (ret) | 1329 | if (ret) |
| 1519 | goto free_regulators; | 1330 | goto free_regulators; |
| 1520 | 1331 | ||
| 1521 | mutex_unlock(&opp_table_lock); | ||
| 1522 | return opp_table; | 1332 | return opp_table; |
| 1523 | 1333 | ||
| 1524 | free_regulators: | 1334 | free_regulators: |
| @@ -1529,9 +1339,7 @@ free_regulators: | |||
| 1529 | opp_table->regulators = NULL; | 1339 | opp_table->regulators = NULL; |
| 1530 | opp_table->regulator_count = 0; | 1340 | opp_table->regulator_count = 0; |
| 1531 | err: | 1341 | err: |
| 1532 | _remove_opp_table(opp_table); | 1342 | dev_pm_opp_put_opp_table(opp_table); |
| 1533 | unlock: | ||
| 1534 | mutex_unlock(&opp_table_lock); | ||
| 1535 | 1343 | ||
| 1536 | return ERR_PTR(ret); | 1344 | return ERR_PTR(ret); |
| 1537 | } | 1345 | } |
| @@ -1540,22 +1348,14 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_regulators); | |||
| 1540 | /** | 1348 | /** |
| 1541 | * dev_pm_opp_put_regulators() - Releases resources blocked for regulator | 1349 | * dev_pm_opp_put_regulators() - Releases resources blocked for regulator |
| 1542 | * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). | 1350 | * @opp_table: OPP table returned from dev_pm_opp_set_regulators(). |
| 1543 | * | ||
| 1544 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1545 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1546 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1547 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1548 | * mutex cannot be locked. | ||
| 1549 | */ | 1351 | */ |
| 1550 | void dev_pm_opp_put_regulators(struct opp_table *opp_table) | 1352 | void dev_pm_opp_put_regulators(struct opp_table *opp_table) |
| 1551 | { | 1353 | { |
| 1552 | int i; | 1354 | int i; |
| 1553 | 1355 | ||
| 1554 | mutex_lock(&opp_table_lock); | ||
| 1555 | |||
| 1556 | if (!opp_table->regulators) { | 1356 | if (!opp_table->regulators) { |
| 1557 | pr_err("%s: Doesn't have regulators set\n", __func__); | 1357 | pr_err("%s: Doesn't have regulators set\n", __func__); |
| 1558 | goto unlock; | 1358 | return; |
| 1559 | } | 1359 | } |
| 1560 | 1360 | ||
| 1561 | /* Make sure there are no concurrent readers while updating opp_table */ | 1361 | /* Make sure there are no concurrent readers while updating opp_table */ |
| @@ -1570,11 +1370,7 @@ void dev_pm_opp_put_regulators(struct opp_table *opp_table) | |||
| 1570 | opp_table->regulators = NULL; | 1370 | opp_table->regulators = NULL; |
| 1571 | opp_table->regulator_count = 0; | 1371 | opp_table->regulator_count = 0; |
| 1572 | 1372 | ||
| 1573 | /* Try freeing opp_table if this was the last blocking resource */ | 1373 | dev_pm_opp_put_opp_table(opp_table); |
| 1574 | _remove_opp_table(opp_table); | ||
| 1575 | |||
| 1576 | unlock: | ||
| 1577 | mutex_unlock(&opp_table_lock); | ||
| 1578 | } | 1374 | } |
| 1579 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); | 1375 | EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); |
| 1580 | 1376 | ||
| @@ -1587,29 +1383,19 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_put_regulators); | |||
| 1587 | * regulators per device), instead of the generic OPP set rate helper. | 1383 | * regulators per device), instead of the generic OPP set rate helper. |
| 1588 | * | 1384 | * |
| 1589 | * This must be called before any OPPs are initialized for the device. | 1385 | * This must be called before any OPPs are initialized for the device. |
| 1590 | * | ||
| 1591 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1592 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1593 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1594 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1595 | * mutex cannot be locked. | ||
| 1596 | */ | 1386 | */ |
| 1597 | int dev_pm_opp_register_set_opp_helper(struct device *dev, | 1387 | struct opp_table *dev_pm_opp_register_set_opp_helper(struct device *dev, |
| 1598 | int (*set_opp)(struct dev_pm_set_opp_data *data)) | 1388 | int (*set_opp)(struct dev_pm_set_opp_data *data)) |
| 1599 | { | 1389 | { |
| 1600 | struct opp_table *opp_table; | 1390 | struct opp_table *opp_table; |
| 1601 | int ret; | 1391 | int ret; |
| 1602 | 1392 | ||
| 1603 | if (!set_opp) | 1393 | if (!set_opp) |
| 1604 | return -EINVAL; | 1394 | return ERR_PTR(-EINVAL); |
| 1605 | |||
| 1606 | mutex_lock(&opp_table_lock); | ||
| 1607 | 1395 | ||
| 1608 | opp_table = _add_opp_table(dev); | 1396 | opp_table = dev_pm_opp_get_opp_table(dev); |
| 1609 | if (!opp_table) { | 1397 | if (!opp_table) |
| 1610 | ret = -ENOMEM; | 1398 | return ERR_PTR(-ENOMEM); |
| 1611 | goto unlock; | ||
| 1612 | } | ||
| 1613 | 1399 | ||
| 1614 | /* This should be called before OPPs are initialized */ | 1400 | /* This should be called before OPPs are initialized */ |
| 1615 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { | 1401 | if (WARN_ON(!list_empty(&opp_table->opp_list))) { |
| @@ -1625,47 +1411,28 @@ int dev_pm_opp_register_set_opp_helper(struct device *dev, | |||
| 1625 | 1411 | ||
| 1626 | opp_table->set_opp = set_opp; | 1412 | opp_table->set_opp = set_opp; |
| 1627 | 1413 | ||
| 1628 | mutex_unlock(&opp_table_lock); | 1414 | return opp_table; |
| 1629 | return 0; | ||
| 1630 | 1415 | ||
| 1631 | err: | 1416 | err: |
| 1632 | _remove_opp_table(opp_table); | 1417 | dev_pm_opp_put_opp_table(opp_table); |
| 1633 | unlock: | ||
| 1634 | mutex_unlock(&opp_table_lock); | ||
| 1635 | 1418 | ||
| 1636 | return ret; | 1419 | return ERR_PTR(ret); |
| 1637 | } | 1420 | } |
| 1638 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); | 1421 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_set_opp_helper); |
| 1639 | 1422 | ||
| 1640 | /** | 1423 | /** |
| 1641 | * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for | 1424 | * dev_pm_opp_register_put_opp_helper() - Releases resources blocked for |
| 1642 | * set_opp helper | 1425 | * set_opp helper |
| 1643 | * @dev: Device for which custom set_opp helper has to be cleared. | 1426 | * @opp_table: OPP table returned from dev_pm_opp_register_set_opp_helper(). |
| 1644 | * | 1427 | * |
| 1645 | * Locking: The internal opp_table and opp structures are RCU protected. | 1428 | * Release resources blocked for platform specific set_opp helper. |
| 1646 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1647 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1648 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1649 | * mutex cannot be locked. | ||
| 1650 | */ | 1429 | */ |
| 1651 | void dev_pm_opp_register_put_opp_helper(struct device *dev) | 1430 | void dev_pm_opp_register_put_opp_helper(struct opp_table *opp_table) |
| 1652 | { | 1431 | { |
| 1653 | struct opp_table *opp_table; | ||
| 1654 | |||
| 1655 | mutex_lock(&opp_table_lock); | ||
| 1656 | |||
| 1657 | /* Check for existing table for 'dev' first */ | ||
| 1658 | opp_table = _find_opp_table(dev); | ||
| 1659 | if (IS_ERR(opp_table)) { | ||
| 1660 | dev_err(dev, "Failed to find opp_table: %ld\n", | ||
| 1661 | PTR_ERR(opp_table)); | ||
| 1662 | goto unlock; | ||
| 1663 | } | ||
| 1664 | |||
| 1665 | if (!opp_table->set_opp) { | 1432 | if (!opp_table->set_opp) { |
| 1666 | dev_err(dev, "%s: Doesn't have custom set_opp helper set\n", | 1433 | pr_err("%s: Doesn't have custom set_opp helper set\n", |
| 1667 | __func__); | 1434 | __func__); |
| 1668 | goto unlock; | 1435 | return; |
| 1669 | } | 1436 | } |
| 1670 | 1437 | ||
| 1671 | /* Make sure there are no concurrent readers while updating opp_table */ | 1438 | /* Make sure there are no concurrent readers while updating opp_table */ |
| @@ -1673,11 +1440,7 @@ void dev_pm_opp_register_put_opp_helper(struct device *dev) | |||
| 1673 | 1440 | ||
| 1674 | opp_table->set_opp = NULL; | 1441 | opp_table->set_opp = NULL; |
| 1675 | 1442 | ||
| 1676 | /* Try freeing opp_table if this was the last blocking resource */ | 1443 | dev_pm_opp_put_opp_table(opp_table); |
| 1677 | _remove_opp_table(opp_table); | ||
| 1678 | |||
| 1679 | unlock: | ||
| 1680 | mutex_unlock(&opp_table_lock); | ||
| 1681 | } | 1444 | } |
| 1682 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); | 1445 | EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); |
| 1683 | 1446 | ||
| @@ -1691,12 +1454,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); | |||
| 1691 | * The opp is made available by default and it can be controlled using | 1454 | * The opp is made available by default and it can be controlled using |
| 1692 | * dev_pm_opp_enable/disable functions. | 1455 | * dev_pm_opp_enable/disable functions. |
| 1693 | * | 1456 | * |
| 1694 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1695 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 1696 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1697 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1698 | * mutex cannot be locked. | ||
| 1699 | * | ||
| 1700 | * Return: | 1457 | * Return: |
| 1701 | * 0 On success OR | 1458 | * 0 On success OR |
| 1702 | * Duplicate OPPs (both freq and volt are same) and opp->available | 1459 | * Duplicate OPPs (both freq and volt are same) and opp->available |
| @@ -1706,7 +1463,17 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_register_put_opp_helper); | |||
| 1706 | */ | 1463 | */ |
| 1707 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) | 1464 | int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) |
| 1708 | { | 1465 | { |
| 1709 | return _opp_add_v1(dev, freq, u_volt, true); | 1466 | struct opp_table *opp_table; |
| 1467 | int ret; | ||
| 1468 | |||
| 1469 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
| 1470 | if (!opp_table) | ||
| 1471 | return -ENOMEM; | ||
| 1472 | |||
| 1473 | ret = _opp_add_v1(opp_table, dev, freq, u_volt, true); | ||
| 1474 | |||
| 1475 | dev_pm_opp_put_opp_table(opp_table); | ||
| 1476 | return ret; | ||
| 1710 | } | 1477 | } |
| 1711 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); | 1478 | EXPORT_SYMBOL_GPL(dev_pm_opp_add); |
| 1712 | 1479 | ||
| @@ -1716,41 +1483,30 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_add); | |||
| 1716 | * @freq: OPP frequency to modify availability | 1483 | * @freq: OPP frequency to modify availability |
| 1717 | * @availability_req: availability status requested for this opp | 1484 | * @availability_req: availability status requested for this opp |
| 1718 | * | 1485 | * |
| 1719 | * Set the availability of an OPP with an RCU operation, opp_{enable,disable} | 1486 | * Set the availability of an OPP, opp_{enable,disable} share a common logic |
| 1720 | * share a common logic which is isolated here. | 1487 | * which is isolated here. |
| 1721 | * | 1488 | * |
| 1722 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1489 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
| 1723 | * copy operation, returns 0 if no modification was done OR modification was | 1490 | * copy operation, returns 0 if no modification was done OR modification was |
| 1724 | * successful. | 1491 | * successful. |
| 1725 | * | ||
| 1726 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1727 | * Hence this function internally uses RCU updater strategy with mutex locks to | ||
| 1728 | * keep the integrity of the internal data structures. Callers should ensure | ||
| 1729 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1730 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
| 1731 | */ | 1492 | */ |
| 1732 | static int _opp_set_availability(struct device *dev, unsigned long freq, | 1493 | static int _opp_set_availability(struct device *dev, unsigned long freq, |
| 1733 | bool availability_req) | 1494 | bool availability_req) |
| 1734 | { | 1495 | { |
| 1735 | struct opp_table *opp_table; | 1496 | struct opp_table *opp_table; |
| 1736 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | 1497 | struct dev_pm_opp *tmp_opp, *opp = ERR_PTR(-ENODEV); |
| 1737 | int r = 0; | 1498 | int r = 0; |
| 1738 | 1499 | ||
| 1739 | /* keep the node allocated */ | ||
| 1740 | new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL); | ||
| 1741 | if (!new_opp) | ||
| 1742 | return -ENOMEM; | ||
| 1743 | |||
| 1744 | mutex_lock(&opp_table_lock); | ||
| 1745 | |||
| 1746 | /* Find the opp_table */ | 1500 | /* Find the opp_table */ |
| 1747 | opp_table = _find_opp_table(dev); | 1501 | opp_table = _find_opp_table(dev); |
| 1748 | if (IS_ERR(opp_table)) { | 1502 | if (IS_ERR(opp_table)) { |
| 1749 | r = PTR_ERR(opp_table); | 1503 | r = PTR_ERR(opp_table); |
| 1750 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | 1504 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); |
| 1751 | goto unlock; | 1505 | return r; |
| 1752 | } | 1506 | } |
| 1753 | 1507 | ||
| 1508 | mutex_lock(&opp_table->lock); | ||
| 1509 | |||
| 1754 | /* Do we have the frequency? */ | 1510 | /* Do we have the frequency? */ |
| 1755 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { | 1511 | list_for_each_entry(tmp_opp, &opp_table->opp_list, node) { |
| 1756 | if (tmp_opp->rate == freq) { | 1512 | if (tmp_opp->rate == freq) { |
| @@ -1758,6 +1514,7 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, | |||
| 1758 | break; | 1514 | break; |
| 1759 | } | 1515 | } |
| 1760 | } | 1516 | } |
| 1517 | |||
| 1761 | if (IS_ERR(opp)) { | 1518 | if (IS_ERR(opp)) { |
| 1762 | r = PTR_ERR(opp); | 1519 | r = PTR_ERR(opp); |
| 1763 | goto unlock; | 1520 | goto unlock; |
| @@ -1766,29 +1523,20 @@ static int _opp_set_availability(struct device *dev, unsigned long freq, | |||
| 1766 | /* Is update really needed? */ | 1523 | /* Is update really needed? */ |
| 1767 | if (opp->available == availability_req) | 1524 | if (opp->available == availability_req) |
| 1768 | goto unlock; | 1525 | goto unlock; |
| 1769 | /* copy the old data over */ | ||
| 1770 | *new_opp = *opp; | ||
| 1771 | 1526 | ||
| 1772 | /* plug in new node */ | 1527 | opp->available = availability_req; |
| 1773 | new_opp->available = availability_req; | ||
| 1774 | |||
| 1775 | list_replace_rcu(&opp->node, &new_opp->node); | ||
| 1776 | mutex_unlock(&opp_table_lock); | ||
| 1777 | call_srcu(&opp_table->srcu_head.srcu, &opp->rcu_head, _kfree_opp_rcu); | ||
| 1778 | 1528 | ||
| 1779 | /* Notify the change of the OPP availability */ | 1529 | /* Notify the change of the OPP availability */ |
| 1780 | if (availability_req) | 1530 | if (availability_req) |
| 1781 | srcu_notifier_call_chain(&opp_table->srcu_head, | 1531 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ENABLE, |
| 1782 | OPP_EVENT_ENABLE, new_opp); | 1532 | opp); |
| 1783 | else | 1533 | else |
| 1784 | srcu_notifier_call_chain(&opp_table->srcu_head, | 1534 | blocking_notifier_call_chain(&opp_table->head, |
| 1785 | OPP_EVENT_DISABLE, new_opp); | 1535 | OPP_EVENT_DISABLE, opp); |
| 1786 | |||
| 1787 | return 0; | ||
| 1788 | 1536 | ||
| 1789 | unlock: | 1537 | unlock: |
| 1790 | mutex_unlock(&opp_table_lock); | 1538 | mutex_unlock(&opp_table->lock); |
| 1791 | kfree(new_opp); | 1539 | dev_pm_opp_put_opp_table(opp_table); |
| 1792 | return r; | 1540 | return r; |
| 1793 | } | 1541 | } |
| 1794 | 1542 | ||
| @@ -1801,12 +1549,6 @@ unlock: | |||
| 1801 | * corresponding error value. It is meant to be used for users an OPP available | 1549 | * corresponding error value. It is meant to be used for users an OPP available |
| 1802 | * after being temporarily made unavailable with dev_pm_opp_disable. | 1550 | * after being temporarily made unavailable with dev_pm_opp_disable. |
| 1803 | * | 1551 | * |
| 1804 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1805 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
| 1806 | * integrity of the internal data structures. Callers should ensure that | ||
| 1807 | * this function is *NOT* called under RCU protection or in contexts where | ||
| 1808 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
| 1809 | * | ||
| 1810 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1552 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
| 1811 | * copy operation, returns 0 if no modification was done OR modification was | 1553 | * copy operation, returns 0 if no modification was done OR modification was |
| 1812 | * successful. | 1554 | * successful. |
| @@ -1827,12 +1569,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_enable); | |||
| 1827 | * control by users to make this OPP not available until the circumstances are | 1569 | * control by users to make this OPP not available until the circumstances are |
| 1828 | * right to make it available again (with a call to dev_pm_opp_enable). | 1570 | * right to make it available again (with a call to dev_pm_opp_enable). |
| 1829 | * | 1571 | * |
| 1830 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1831 | * Hence this function indirectly uses RCU and mutex locks to keep the | ||
| 1832 | * integrity of the internal data structures. Callers should ensure that | ||
| 1833 | * this function is *NOT* called under RCU protection or in contexts where | ||
| 1834 | * mutex locking or synchronize_rcu() blocking calls cannot be used. | ||
| 1835 | * | ||
| 1836 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the | 1572 | * Return: -EINVAL for bad pointers, -ENOMEM if no memory available for the |
| 1837 | * copy operation, returns 0 if no modification was done OR modification was | 1573 | * copy operation, returns 0 if no modification was done OR modification was |
| 1838 | * successful. | 1574 | * successful. |
| @@ -1844,41 +1580,78 @@ int dev_pm_opp_disable(struct device *dev, unsigned long freq) | |||
| 1844 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); | 1580 | EXPORT_SYMBOL_GPL(dev_pm_opp_disable); |
| 1845 | 1581 | ||
| 1846 | /** | 1582 | /** |
| 1847 | * dev_pm_opp_get_notifier() - find notifier_head of the device with opp | 1583 | * dev_pm_opp_register_notifier() - Register OPP notifier for the device |
| 1848 | * @dev: device pointer used to lookup OPP table. | 1584 | * @dev: Device for which notifier needs to be registered |
| 1585 | * @nb: Notifier block to be registered | ||
| 1849 | * | 1586 | * |
| 1850 | * Return: pointer to notifier head if found, otherwise -ENODEV or | 1587 | * Return: 0 on success or a negative error value. |
| 1851 | * -EINVAL based on type of error casted as pointer. value must be checked | 1588 | */ |
| 1852 | * with IS_ERR to determine valid pointer or error result. | 1589 | int dev_pm_opp_register_notifier(struct device *dev, struct notifier_block *nb) |
| 1590 | { | ||
| 1591 | struct opp_table *opp_table; | ||
| 1592 | int ret; | ||
| 1593 | |||
| 1594 | opp_table = _find_opp_table(dev); | ||
| 1595 | if (IS_ERR(opp_table)) | ||
| 1596 | return PTR_ERR(opp_table); | ||
| 1597 | |||
| 1598 | ret = blocking_notifier_chain_register(&opp_table->head, nb); | ||
| 1599 | |||
| 1600 | dev_pm_opp_put_opp_table(opp_table); | ||
| 1601 | |||
| 1602 | return ret; | ||
| 1603 | } | ||
| 1604 | EXPORT_SYMBOL(dev_pm_opp_register_notifier); | ||
| 1605 | |||
| 1606 | /** | ||
| 1607 | * dev_pm_opp_unregister_notifier() - Unregister OPP notifier for the device | ||
| 1608 | * @dev: Device for which notifier needs to be unregistered | ||
| 1609 | * @nb: Notifier block to be unregistered | ||
| 1853 | * | 1610 | * |
| 1854 | * Locking: This function must be called under rcu_read_lock(). opp_table is a | 1611 | * Return: 0 on success or a negative error value. |
| 1855 | * RCU protected pointer. The reason for the same is that the opp pointer which | ||
| 1856 | * is returned will remain valid for use with opp_get_{voltage, freq} only while | ||
| 1857 | * under the locked area. The pointer returned must be used prior to unlocking | ||
| 1858 | * with rcu_read_unlock() to maintain the integrity of the pointer. | ||
| 1859 | */ | 1612 | */ |
| 1860 | struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev) | 1613 | int dev_pm_opp_unregister_notifier(struct device *dev, |
| 1614 | struct notifier_block *nb) | ||
| 1861 | { | 1615 | { |
| 1862 | struct opp_table *opp_table = _find_opp_table(dev); | 1616 | struct opp_table *opp_table; |
| 1617 | int ret; | ||
| 1863 | 1618 | ||
| 1619 | opp_table = _find_opp_table(dev); | ||
| 1864 | if (IS_ERR(opp_table)) | 1620 | if (IS_ERR(opp_table)) |
| 1865 | return ERR_CAST(opp_table); /* matching type */ | 1621 | return PTR_ERR(opp_table); |
| 1622 | |||
| 1623 | ret = blocking_notifier_chain_unregister(&opp_table->head, nb); | ||
| 1866 | 1624 | ||
| 1867 | return &opp_table->srcu_head; | 1625 | dev_pm_opp_put_opp_table(opp_table); |
| 1626 | |||
| 1627 | return ret; | ||
| 1868 | } | 1628 | } |
| 1869 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier); | 1629 | EXPORT_SYMBOL(dev_pm_opp_unregister_notifier); |
| 1870 | 1630 | ||
| 1871 | /* | 1631 | /* |
| 1872 | * Free OPPs either created using static entries present in DT or even the | 1632 | * Free OPPs either created using static entries present in DT or even the |
| 1873 | * dynamically added entries based on remove_all param. | 1633 | * dynamically added entries based on remove_all param. |
| 1874 | */ | 1634 | */ |
| 1875 | void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) | 1635 | void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, |
| 1636 | bool remove_all) | ||
| 1876 | { | 1637 | { |
| 1877 | struct opp_table *opp_table; | ||
| 1878 | struct dev_pm_opp *opp, *tmp; | 1638 | struct dev_pm_opp *opp, *tmp; |
| 1879 | 1639 | ||
| 1880 | /* Hold our table modification lock here */ | 1640 | /* Find if opp_table manages a single device */ |
| 1881 | mutex_lock(&opp_table_lock); | 1641 | if (list_is_singular(&opp_table->dev_list)) { |
| 1642 | /* Free static OPPs */ | ||
| 1643 | list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { | ||
| 1644 | if (remove_all || !opp->dynamic) | ||
| 1645 | dev_pm_opp_put(opp); | ||
| 1646 | } | ||
| 1647 | } else { | ||
| 1648 | _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); | ||
| 1649 | } | ||
| 1650 | } | ||
| 1651 | |||
| 1652 | void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all) | ||
| 1653 | { | ||
| 1654 | struct opp_table *opp_table; | ||
| 1882 | 1655 | ||
| 1883 | /* Check for existing table for 'dev' */ | 1656 | /* Check for existing table for 'dev' */ |
| 1884 | opp_table = _find_opp_table(dev); | 1657 | opp_table = _find_opp_table(dev); |
| @@ -1890,22 +1663,12 @@ void _dev_pm_opp_remove_table(struct device *dev, bool remove_all) | |||
| 1890 | IS_ERR_OR_NULL(dev) ? | 1663 | IS_ERR_OR_NULL(dev) ? |
| 1891 | "Invalid device" : dev_name(dev), | 1664 | "Invalid device" : dev_name(dev), |
| 1892 | error); | 1665 | error); |
| 1893 | goto unlock; | 1666 | return; |
| 1894 | } | 1667 | } |
| 1895 | 1668 | ||
| 1896 | /* Find if opp_table manages a single device */ | 1669 | _dev_pm_opp_remove_table(opp_table, dev, remove_all); |
| 1897 | if (list_is_singular(&opp_table->dev_list)) { | ||
| 1898 | /* Free static OPPs */ | ||
| 1899 | list_for_each_entry_safe(opp, tmp, &opp_table->opp_list, node) { | ||
| 1900 | if (remove_all || !opp->dynamic) | ||
| 1901 | _opp_remove(opp_table, opp, true); | ||
| 1902 | } | ||
| 1903 | } else { | ||
| 1904 | _remove_opp_dev(_find_opp_dev(dev, opp_table), opp_table); | ||
| 1905 | } | ||
| 1906 | 1670 | ||
| 1907 | unlock: | 1671 | dev_pm_opp_put_opp_table(opp_table); |
| 1908 | mutex_unlock(&opp_table_lock); | ||
| 1909 | } | 1672 | } |
| 1910 | 1673 | ||
| 1911 | /** | 1674 | /** |
| @@ -1914,15 +1677,9 @@ unlock: | |||
| 1914 | * | 1677 | * |
| 1915 | * Free both OPPs created using static entries present in DT and the | 1678 | * Free both OPPs created using static entries present in DT and the |
| 1916 | * dynamically added entries. | 1679 | * dynamically added entries. |
| 1917 | * | ||
| 1918 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 1919 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
| 1920 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 1921 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 1922 | * mutex cannot be locked. | ||
| 1923 | */ | 1680 | */ |
| 1924 | void dev_pm_opp_remove_table(struct device *dev) | 1681 | void dev_pm_opp_remove_table(struct device *dev) |
| 1925 | { | 1682 | { |
| 1926 | _dev_pm_opp_remove_table(dev, true); | 1683 | _dev_pm_opp_find_and_remove_table(dev, true); |
| 1927 | } | 1684 | } |
| 1928 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); | 1685 | EXPORT_SYMBOL_GPL(dev_pm_opp_remove_table); |
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c index 8c3434bdb26d..2d87bc1adf38 100644 --- a/drivers/base/power/opp/cpu.c +++ b/drivers/base/power/opp/cpu.c | |||
| @@ -42,11 +42,6 @@ | |||
| 42 | * | 42 | * |
| 43 | * WARNING: It is important for the callers to ensure refreshing their copy of | 43 | * WARNING: It is important for the callers to ensure refreshing their copy of |
| 44 | * the table if any of the mentioned functions have been invoked in the interim. | 44 | * the table if any of the mentioned functions have been invoked in the interim. |
| 45 | * | ||
| 46 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 47 | * Since we just use the regular accessor functions to access the internal data | ||
| 48 | * structures, we use RCU read lock inside this function. As a result, users of | ||
| 49 | * this function DONOT need to use explicit locks for invoking. | ||
| 50 | */ | 45 | */ |
| 51 | int dev_pm_opp_init_cpufreq_table(struct device *dev, | 46 | int dev_pm_opp_init_cpufreq_table(struct device *dev, |
| 52 | struct cpufreq_frequency_table **table) | 47 | struct cpufreq_frequency_table **table) |
| @@ -56,19 +51,13 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, | |||
| 56 | int i, max_opps, ret = 0; | 51 | int i, max_opps, ret = 0; |
| 57 | unsigned long rate; | 52 | unsigned long rate; |
| 58 | 53 | ||
| 59 | rcu_read_lock(); | ||
| 60 | |||
| 61 | max_opps = dev_pm_opp_get_opp_count(dev); | 54 | max_opps = dev_pm_opp_get_opp_count(dev); |
| 62 | if (max_opps <= 0) { | 55 | if (max_opps <= 0) |
| 63 | ret = max_opps ? max_opps : -ENODATA; | 56 | return max_opps ? max_opps : -ENODATA; |
| 64 | goto out; | ||
| 65 | } | ||
| 66 | 57 | ||
| 67 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); | 58 | freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC); |
| 68 | if (!freq_table) { | 59 | if (!freq_table) |
| 69 | ret = -ENOMEM; | 60 | return -ENOMEM; |
| 70 | goto out; | ||
| 71 | } | ||
| 72 | 61 | ||
| 73 | for (i = 0, rate = 0; i < max_opps; i++, rate++) { | 62 | for (i = 0, rate = 0; i < max_opps; i++, rate++) { |
| 74 | /* find next rate */ | 63 | /* find next rate */ |
| @@ -83,6 +72,8 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, | |||
| 83 | /* Is Boost/turbo opp ? */ | 72 | /* Is Boost/turbo opp ? */ |
| 84 | if (dev_pm_opp_is_turbo(opp)) | 73 | if (dev_pm_opp_is_turbo(opp)) |
| 85 | freq_table[i].flags = CPUFREQ_BOOST_FREQ; | 74 | freq_table[i].flags = CPUFREQ_BOOST_FREQ; |
| 75 | |||
| 76 | dev_pm_opp_put(opp); | ||
| 86 | } | 77 | } |
| 87 | 78 | ||
| 88 | freq_table[i].driver_data = i; | 79 | freq_table[i].driver_data = i; |
| @@ -91,7 +82,6 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev, | |||
| 91 | *table = &freq_table[0]; | 82 | *table = &freq_table[0]; |
| 92 | 83 | ||
| 93 | out: | 84 | out: |
| 94 | rcu_read_unlock(); | ||
| 95 | if (ret) | 85 | if (ret) |
| 96 | kfree(freq_table); | 86 | kfree(freq_table); |
| 97 | 87 | ||
| @@ -147,12 +137,6 @@ void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of) | |||
| 147 | * This removes the OPP tables for CPUs present in the @cpumask. | 137 | * This removes the OPP tables for CPUs present in the @cpumask. |
| 148 | * This should be used to remove all the OPPs entries associated with | 138 | * This should be used to remove all the OPPs entries associated with |
| 149 | * the cpus in @cpumask. | 139 | * the cpus in @cpumask. |
| 150 | * | ||
| 151 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 152 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 153 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 154 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 155 | * mutex cannot be locked. | ||
| 156 | */ | 140 | */ |
| 157 | void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) | 141 | void dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask) |
| 158 | { | 142 | { |
| @@ -169,12 +153,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_cpumask_remove_table); | |||
| 169 | * @cpumask. | 153 | * @cpumask. |
| 170 | * | 154 | * |
| 171 | * Returns -ENODEV if OPP table isn't already present. | 155 | * Returns -ENODEV if OPP table isn't already present. |
| 172 | * | ||
| 173 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 174 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 175 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 176 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 177 | * mutex cannot be locked. | ||
| 178 | */ | 156 | */ |
| 179 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | 157 | int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, |
| 180 | const struct cpumask *cpumask) | 158 | const struct cpumask *cpumask) |
| @@ -184,13 +162,9 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | |||
| 184 | struct device *dev; | 162 | struct device *dev; |
| 185 | int cpu, ret = 0; | 163 | int cpu, ret = 0; |
| 186 | 164 | ||
| 187 | mutex_lock(&opp_table_lock); | ||
| 188 | |||
| 189 | opp_table = _find_opp_table(cpu_dev); | 165 | opp_table = _find_opp_table(cpu_dev); |
| 190 | if (IS_ERR(opp_table)) { | 166 | if (IS_ERR(opp_table)) |
| 191 | ret = PTR_ERR(opp_table); | 167 | return PTR_ERR(opp_table); |
| 192 | goto unlock; | ||
| 193 | } | ||
| 194 | 168 | ||
| 195 | for_each_cpu(cpu, cpumask) { | 169 | for_each_cpu(cpu, cpumask) { |
| 196 | if (cpu == cpu_dev->id) | 170 | if (cpu == cpu_dev->id) |
| @@ -213,8 +187,8 @@ int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, | |||
| 213 | /* Mark opp-table as multiple CPUs are sharing it now */ | 187 | /* Mark opp-table as multiple CPUs are sharing it now */ |
| 214 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; | 188 | opp_table->shared_opp = OPP_TABLE_ACCESS_SHARED; |
| 215 | } | 189 | } |
| 216 | unlock: | 190 | |
| 217 | mutex_unlock(&opp_table_lock); | 191 | dev_pm_opp_put_opp_table(opp_table); |
| 218 | 192 | ||
| 219 | return ret; | 193 | return ret; |
| 220 | } | 194 | } |
| @@ -229,12 +203,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus); | |||
| 229 | * | 203 | * |
| 230 | * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP | 204 | * Returns -ENODEV if OPP table isn't already present and -EINVAL if the OPP |
| 231 | * table's status is access-unknown. | 205 | * table's status is access-unknown. |
| 232 | * | ||
| 233 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 234 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 235 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 236 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 237 | * mutex cannot be locked. | ||
| 238 | */ | 206 | */ |
| 239 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | 207 | int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) |
| 240 | { | 208 | { |
| @@ -242,17 +210,13 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | |||
| 242 | struct opp_table *opp_table; | 210 | struct opp_table *opp_table; |
| 243 | int ret = 0; | 211 | int ret = 0; |
| 244 | 212 | ||
| 245 | mutex_lock(&opp_table_lock); | ||
| 246 | |||
| 247 | opp_table = _find_opp_table(cpu_dev); | 213 | opp_table = _find_opp_table(cpu_dev); |
| 248 | if (IS_ERR(opp_table)) { | 214 | if (IS_ERR(opp_table)) |
| 249 | ret = PTR_ERR(opp_table); | 215 | return PTR_ERR(opp_table); |
| 250 | goto unlock; | ||
| 251 | } | ||
| 252 | 216 | ||
| 253 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { | 217 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_UNKNOWN) { |
| 254 | ret = -EINVAL; | 218 | ret = -EINVAL; |
| 255 | goto unlock; | 219 | goto put_opp_table; |
| 256 | } | 220 | } |
| 257 | 221 | ||
| 258 | cpumask_clear(cpumask); | 222 | cpumask_clear(cpumask); |
| @@ -264,8 +228,8 @@ int dev_pm_opp_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) | |||
| 264 | cpumask_set_cpu(cpu_dev->id, cpumask); | 228 | cpumask_set_cpu(cpu_dev->id, cpumask); |
| 265 | } | 229 | } |
| 266 | 230 | ||
| 267 | unlock: | 231 | put_opp_table: |
| 268 | mutex_unlock(&opp_table_lock); | 232 | dev_pm_opp_put_opp_table(opp_table); |
| 269 | 233 | ||
| 270 | return ret; | 234 | return ret; |
| 271 | } | 235 | } |
diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c index 3f7d2591b173..779428676f63 100644 --- a/drivers/base/power/opp/of.c +++ b/drivers/base/power/opp/of.c | |||
| @@ -24,9 +24,11 @@ | |||
| 24 | 24 | ||
| 25 | static struct opp_table *_managed_opp(const struct device_node *np) | 25 | static struct opp_table *_managed_opp(const struct device_node *np) |
| 26 | { | 26 | { |
| 27 | struct opp_table *opp_table; | 27 | struct opp_table *opp_table, *managed_table = NULL; |
| 28 | |||
| 29 | mutex_lock(&opp_table_lock); | ||
| 28 | 30 | ||
| 29 | list_for_each_entry_rcu(opp_table, &opp_tables, node) { | 31 | list_for_each_entry(opp_table, &opp_tables, node) { |
| 30 | if (opp_table->np == np) { | 32 | if (opp_table->np == np) { |
| 31 | /* | 33 | /* |
| 32 | * Multiple devices can point to the same OPP table and | 34 | * Multiple devices can point to the same OPP table and |
| @@ -35,14 +37,18 @@ static struct opp_table *_managed_opp(const struct device_node *np) | |||
| 35 | * But the OPPs will be considered as shared only if the | 37 | * But the OPPs will be considered as shared only if the |
| 36 | * OPP table contains a "opp-shared" property. | 38 | * OPP table contains a "opp-shared" property. |
| 37 | */ | 39 | */ |
| 38 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) | 40 | if (opp_table->shared_opp == OPP_TABLE_ACCESS_SHARED) { |
| 39 | return opp_table; | 41 | _get_opp_table_kref(opp_table); |
| 42 | managed_table = opp_table; | ||
| 43 | } | ||
| 40 | 44 | ||
| 41 | return NULL; | 45 | break; |
| 42 | } | 46 | } |
| 43 | } | 47 | } |
| 44 | 48 | ||
| 45 | return NULL; | 49 | mutex_unlock(&opp_table_lock); |
| 50 | |||
| 51 | return managed_table; | ||
| 46 | } | 52 | } |
| 47 | 53 | ||
| 48 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) | 54 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev) |
| @@ -229,34 +235,28 @@ free_microvolt: | |||
| 229 | * @dev: device pointer used to lookup OPP table. | 235 | * @dev: device pointer used to lookup OPP table. |
| 230 | * | 236 | * |
| 231 | * Free OPPs created using static entries present in DT. | 237 | * Free OPPs created using static entries present in DT. |
| 232 | * | ||
| 233 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 234 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
| 235 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 236 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 237 | * mutex cannot be locked. | ||
| 238 | */ | 238 | */ |
| 239 | void dev_pm_opp_of_remove_table(struct device *dev) | 239 | void dev_pm_opp_of_remove_table(struct device *dev) |
| 240 | { | 240 | { |
| 241 | _dev_pm_opp_remove_table(dev, false); | 241 | _dev_pm_opp_find_and_remove_table(dev, false); |
| 242 | } | 242 | } |
| 243 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); | 243 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table); |
| 244 | 244 | ||
| 245 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ | 245 | /* Returns opp descriptor node for a device, caller must do of_node_put() */ |
| 246 | static struct device_node *_of_get_opp_desc_node(struct device *dev) | 246 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev) |
| 247 | { | 247 | { |
| 248 | /* | 248 | /* |
| 249 | * TODO: Support for multiple OPP tables. | ||
| 250 | * | ||
| 251 | * There should be only ONE phandle present in "operating-points-v2" | 249 | * There should be only ONE phandle present in "operating-points-v2" |
| 252 | * property. | 250 | * property. |
| 253 | */ | 251 | */ |
| 254 | 252 | ||
| 255 | return of_parse_phandle(dev->of_node, "operating-points-v2", 0); | 253 | return of_parse_phandle(dev->of_node, "operating-points-v2", 0); |
| 256 | } | 254 | } |
| 255 | EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_opp_desc_node); | ||
| 257 | 256 | ||
| 258 | /** | 257 | /** |
| 259 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) | 258 | * _opp_add_static_v2() - Allocate static OPPs (As per 'v2' DT bindings) |
| 259 | * @opp_table: OPP table | ||
| 260 | * @dev: device for which we do this operation | 260 | * @dev: device for which we do this operation |
| 261 | * @np: device node | 261 | * @np: device node |
| 262 | * | 262 | * |
| @@ -264,12 +264,6 @@ static struct device_node *_of_get_opp_desc_node(struct device *dev) | |||
| 264 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be | 264 | * opp can be controlled using dev_pm_opp_enable/disable functions and may be |
| 265 | * removed by dev_pm_opp_remove. | 265 | * removed by dev_pm_opp_remove. |
| 266 | * | 266 | * |
| 267 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 268 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 269 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 270 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 271 | * mutex cannot be locked. | ||
| 272 | * | ||
| 273 | * Return: | 267 | * Return: |
| 274 | * 0 On success OR | 268 | * 0 On success OR |
| 275 | * Duplicate OPPs (both freq and volt are same) and opp->available | 269 | * Duplicate OPPs (both freq and volt are same) and opp->available |
| @@ -278,22 +272,17 @@ static struct device_node *_of_get_opp_desc_node(struct device *dev) | |||
| 278 | * -ENOMEM Memory allocation failure | 272 | * -ENOMEM Memory allocation failure |
| 279 | * -EINVAL Failed parsing the OPP node | 273 | * -EINVAL Failed parsing the OPP node |
| 280 | */ | 274 | */ |
| 281 | static int _opp_add_static_v2(struct device *dev, struct device_node *np) | 275 | static int _opp_add_static_v2(struct opp_table *opp_table, struct device *dev, |
| 276 | struct device_node *np) | ||
| 282 | { | 277 | { |
| 283 | struct opp_table *opp_table; | ||
| 284 | struct dev_pm_opp *new_opp; | 278 | struct dev_pm_opp *new_opp; |
| 285 | u64 rate; | 279 | u64 rate; |
| 286 | u32 val; | 280 | u32 val; |
| 287 | int ret; | 281 | int ret; |
| 288 | 282 | ||
| 289 | /* Hold our table modification lock here */ | 283 | new_opp = _opp_allocate(opp_table); |
| 290 | mutex_lock(&opp_table_lock); | 284 | if (!new_opp) |
| 291 | 285 | return -ENOMEM; | |
| 292 | new_opp = _allocate_opp(dev, &opp_table); | ||
| 293 | if (!new_opp) { | ||
| 294 | ret = -ENOMEM; | ||
| 295 | goto unlock; | ||
| 296 | } | ||
| 297 | 286 | ||
| 298 | ret = of_property_read_u64(np, "opp-hz", &rate); | 287 | ret = of_property_read_u64(np, "opp-hz", &rate); |
| 299 | if (ret < 0) { | 288 | if (ret < 0) { |
| @@ -327,8 +316,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) | |||
| 327 | goto free_opp; | 316 | goto free_opp; |
| 328 | 317 | ||
| 329 | ret = _opp_add(dev, new_opp, opp_table); | 318 | ret = _opp_add(dev, new_opp, opp_table); |
| 330 | if (ret) | 319 | if (ret) { |
| 320 | /* Don't return error for duplicate OPPs */ | ||
| 321 | if (ret == -EBUSY) | ||
| 322 | ret = 0; | ||
| 331 | goto free_opp; | 323 | goto free_opp; |
| 324 | } | ||
| 332 | 325 | ||
| 333 | /* OPP to select on device suspend */ | 326 | /* OPP to select on device suspend */ |
| 334 | if (of_property_read_bool(np, "opp-suspend")) { | 327 | if (of_property_read_bool(np, "opp-suspend")) { |
| @@ -345,8 +338,6 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) | |||
| 345 | if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) | 338 | if (new_opp->clock_latency_ns > opp_table->clock_latency_ns_max) |
| 346 | opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; | 339 | opp_table->clock_latency_ns_max = new_opp->clock_latency_ns; |
| 347 | 340 | ||
| 348 | mutex_unlock(&opp_table_lock); | ||
| 349 | |||
| 350 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", | 341 | pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", |
| 351 | __func__, new_opp->turbo, new_opp->rate, | 342 | __func__, new_opp->turbo, new_opp->rate, |
| 352 | new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, | 343 | new_opp->supplies[0].u_volt, new_opp->supplies[0].u_volt_min, |
| @@ -356,13 +347,12 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) | |||
| 356 | * Notify the changes in the availability of the operable | 347 | * Notify the changes in the availability of the operable |
| 357 | * frequency/voltage list. | 348 | * frequency/voltage list. |
| 358 | */ | 349 | */ |
| 359 | srcu_notifier_call_chain(&opp_table->srcu_head, OPP_EVENT_ADD, new_opp); | 350 | blocking_notifier_call_chain(&opp_table->head, OPP_EVENT_ADD, new_opp); |
| 360 | return 0; | 351 | return 0; |
| 361 | 352 | ||
| 362 | free_opp: | 353 | free_opp: |
| 363 | _opp_remove(opp_table, new_opp, false); | 354 | _opp_free(new_opp); |
| 364 | unlock: | 355 | |
| 365 | mutex_unlock(&opp_table_lock); | ||
| 366 | return ret; | 356 | return ret; |
| 367 | } | 357 | } |
| 368 | 358 | ||
| @@ -373,41 +363,35 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | |||
| 373 | struct opp_table *opp_table; | 363 | struct opp_table *opp_table; |
| 374 | int ret = 0, count = 0; | 364 | int ret = 0, count = 0; |
| 375 | 365 | ||
| 376 | mutex_lock(&opp_table_lock); | ||
| 377 | |||
| 378 | opp_table = _managed_opp(opp_np); | 366 | opp_table = _managed_opp(opp_np); |
| 379 | if (opp_table) { | 367 | if (opp_table) { |
| 380 | /* OPPs are already managed */ | 368 | /* OPPs are already managed */ |
| 381 | if (!_add_opp_dev(dev, opp_table)) | 369 | if (!_add_opp_dev(dev, opp_table)) |
| 382 | ret = -ENOMEM; | 370 | ret = -ENOMEM; |
| 383 | mutex_unlock(&opp_table_lock); | 371 | goto put_opp_table; |
| 384 | return ret; | ||
| 385 | } | 372 | } |
| 386 | mutex_unlock(&opp_table_lock); | 373 | |
| 374 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
| 375 | if (!opp_table) | ||
| 376 | return -ENOMEM; | ||
| 387 | 377 | ||
| 388 | /* We have opp-table node now, iterate over it and add OPPs */ | 378 | /* We have opp-table node now, iterate over it and add OPPs */ |
| 389 | for_each_available_child_of_node(opp_np, np) { | 379 | for_each_available_child_of_node(opp_np, np) { |
| 390 | count++; | 380 | count++; |
| 391 | 381 | ||
| 392 | ret = _opp_add_static_v2(dev, np); | 382 | ret = _opp_add_static_v2(opp_table, dev, np); |
| 393 | if (ret) { | 383 | if (ret) { |
| 394 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, | 384 | dev_err(dev, "%s: Failed to add OPP, %d\n", __func__, |
| 395 | ret); | 385 | ret); |
| 396 | goto free_table; | 386 | _dev_pm_opp_remove_table(opp_table, dev, false); |
| 387 | goto put_opp_table; | ||
| 397 | } | 388 | } |
| 398 | } | 389 | } |
| 399 | 390 | ||
| 400 | /* There should be one of more OPP defined */ | 391 | /* There should be one of more OPP defined */ |
| 401 | if (WARN_ON(!count)) | 392 | if (WARN_ON(!count)) { |
| 402 | return -ENOENT; | 393 | ret = -ENOENT; |
| 403 | 394 | goto put_opp_table; | |
| 404 | mutex_lock(&opp_table_lock); | ||
| 405 | |||
| 406 | opp_table = _find_opp_table(dev); | ||
| 407 | if (WARN_ON(IS_ERR(opp_table))) { | ||
| 408 | ret = PTR_ERR(opp_table); | ||
| 409 | mutex_unlock(&opp_table_lock); | ||
| 410 | goto free_table; | ||
| 411 | } | 395 | } |
| 412 | 396 | ||
| 413 | opp_table->np = opp_np; | 397 | opp_table->np = opp_np; |
| @@ -416,12 +400,8 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np) | |||
| 416 | else | 400 | else |
| 417 | opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; | 401 | opp_table->shared_opp = OPP_TABLE_ACCESS_EXCLUSIVE; |
| 418 | 402 | ||
| 419 | mutex_unlock(&opp_table_lock); | 403 | put_opp_table: |
| 420 | 404 | dev_pm_opp_put_opp_table(opp_table); | |
| 421 | return 0; | ||
| 422 | |||
| 423 | free_table: | ||
| 424 | dev_pm_opp_of_remove_table(dev); | ||
| 425 | 405 | ||
| 426 | return ret; | 406 | return ret; |
| 427 | } | 407 | } |
| @@ -429,9 +409,10 @@ free_table: | |||
| 429 | /* Initializes OPP tables based on old-deprecated bindings */ | 409 | /* Initializes OPP tables based on old-deprecated bindings */ |
| 430 | static int _of_add_opp_table_v1(struct device *dev) | 410 | static int _of_add_opp_table_v1(struct device *dev) |
| 431 | { | 411 | { |
| 412 | struct opp_table *opp_table; | ||
| 432 | const struct property *prop; | 413 | const struct property *prop; |
| 433 | const __be32 *val; | 414 | const __be32 *val; |
| 434 | int nr; | 415 | int nr, ret = 0; |
| 435 | 416 | ||
| 436 | prop = of_find_property(dev->of_node, "operating-points", NULL); | 417 | prop = of_find_property(dev->of_node, "operating-points", NULL); |
| 437 | if (!prop) | 418 | if (!prop) |
| @@ -449,18 +430,27 @@ static int _of_add_opp_table_v1(struct device *dev) | |||
| 449 | return -EINVAL; | 430 | return -EINVAL; |
| 450 | } | 431 | } |
| 451 | 432 | ||
| 433 | opp_table = dev_pm_opp_get_opp_table(dev); | ||
| 434 | if (!opp_table) | ||
| 435 | return -ENOMEM; | ||
| 436 | |||
| 452 | val = prop->value; | 437 | val = prop->value; |
| 453 | while (nr) { | 438 | while (nr) { |
| 454 | unsigned long freq = be32_to_cpup(val++) * 1000; | 439 | unsigned long freq = be32_to_cpup(val++) * 1000; |
| 455 | unsigned long volt = be32_to_cpup(val++); | 440 | unsigned long volt = be32_to_cpup(val++); |
| 456 | 441 | ||
| 457 | if (_opp_add_v1(dev, freq, volt, false)) | 442 | ret = _opp_add_v1(opp_table, dev, freq, volt, false); |
| 458 | dev_warn(dev, "%s: Failed to add OPP %ld\n", | 443 | if (ret) { |
| 459 | __func__, freq); | 444 | dev_err(dev, "%s: Failed to add OPP %ld (%d)\n", |
| 445 | __func__, freq, ret); | ||
| 446 | _dev_pm_opp_remove_table(opp_table, dev, false); | ||
| 447 | break; | ||
| 448 | } | ||
| 460 | nr -= 2; | 449 | nr -= 2; |
| 461 | } | 450 | } |
| 462 | 451 | ||
| 463 | return 0; | 452 | dev_pm_opp_put_opp_table(opp_table); |
| 453 | return ret; | ||
| 464 | } | 454 | } |
| 465 | 455 | ||
| 466 | /** | 456 | /** |
| @@ -469,12 +459,6 @@ static int _of_add_opp_table_v1(struct device *dev) | |||
| 469 | * | 459 | * |
| 470 | * Register the initial OPP table with the OPP library for given device. | 460 | * Register the initial OPP table with the OPP library for given device. |
| 471 | * | 461 | * |
| 472 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 473 | * Hence this function indirectly uses RCU updater strategy with mutex locks | ||
| 474 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 475 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 476 | * mutex cannot be locked. | ||
| 477 | * | ||
| 478 | * Return: | 462 | * Return: |
| 479 | * 0 On success OR | 463 | * 0 On success OR |
| 480 | * Duplicate OPPs (both freq and volt are same) and opp->available | 464 | * Duplicate OPPs (both freq and volt are same) and opp->available |
| @@ -495,7 +479,7 @@ int dev_pm_opp_of_add_table(struct device *dev) | |||
| 495 | * OPPs have two version of bindings now. The older one is deprecated, | 479 | * OPPs have two version of bindings now. The older one is deprecated, |
| 496 | * try for the new binding first. | 480 | * try for the new binding first. |
| 497 | */ | 481 | */ |
| 498 | opp_np = _of_get_opp_desc_node(dev); | 482 | opp_np = dev_pm_opp_of_get_opp_desc_node(dev); |
| 499 | if (!opp_np) { | 483 | if (!opp_np) { |
| 500 | /* | 484 | /* |
| 501 | * Try old-deprecated bindings for backward compatibility with | 485 | * Try old-deprecated bindings for backward compatibility with |
| @@ -519,12 +503,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table); | |||
| 519 | * | 503 | * |
| 520 | * This removes the OPP tables for CPUs present in the @cpumask. | 504 | * This removes the OPP tables for CPUs present in the @cpumask. |
| 521 | * This should be used only to remove static entries created from DT. | 505 | * This should be used only to remove static entries created from DT. |
| 522 | * | ||
| 523 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 524 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 525 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 526 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 527 | * mutex cannot be locked. | ||
| 528 | */ | 506 | */ |
| 529 | void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) | 507 | void dev_pm_opp_of_cpumask_remove_table(const struct cpumask *cpumask) |
| 530 | { | 508 | { |
| @@ -537,12 +515,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table); | |||
| 537 | * @cpumask: cpumask for which OPP table needs to be added. | 515 | * @cpumask: cpumask for which OPP table needs to be added. |
| 538 | * | 516 | * |
| 539 | * This adds the OPP tables for CPUs present in the @cpumask. | 517 | * This adds the OPP tables for CPUs present in the @cpumask. |
| 540 | * | ||
| 541 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 542 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 543 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 544 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 545 | * mutex cannot be locked. | ||
| 546 | */ | 518 | */ |
| 547 | int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) | 519 | int dev_pm_opp_of_cpumask_add_table(const struct cpumask *cpumask) |
| 548 | { | 520 | { |
| @@ -590,12 +562,6 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table); | |||
| 590 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. | 562 | * This updates the @cpumask with CPUs that are sharing OPPs with @cpu_dev. |
| 591 | * | 563 | * |
| 592 | * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. | 564 | * Returns -ENOENT if operating-points-v2 isn't present for @cpu_dev. |
| 593 | * | ||
| 594 | * Locking: The internal opp_table and opp structures are RCU protected. | ||
| 595 | * Hence this function internally uses RCU updater strategy with mutex locks | ||
| 596 | * to keep the integrity of the internal data structures. Callers should ensure | ||
| 597 | * that this function is *NOT* called under RCU protection or in contexts where | ||
| 598 | * mutex cannot be locked. | ||
| 599 | */ | 565 | */ |
| 600 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | 566 | int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, |
| 601 | struct cpumask *cpumask) | 567 | struct cpumask *cpumask) |
| @@ -605,7 +571,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | |||
| 605 | int cpu, ret = 0; | 571 | int cpu, ret = 0; |
| 606 | 572 | ||
| 607 | /* Get OPP descriptor node */ | 573 | /* Get OPP descriptor node */ |
| 608 | np = _of_get_opp_desc_node(cpu_dev); | 574 | np = dev_pm_opp_of_get_opp_desc_node(cpu_dev); |
| 609 | if (!np) { | 575 | if (!np) { |
| 610 | dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); | 576 | dev_dbg(cpu_dev, "%s: Couldn't find opp node.\n", __func__); |
| 611 | return -ENOENT; | 577 | return -ENOENT; |
| @@ -630,7 +596,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, | |||
| 630 | } | 596 | } |
| 631 | 597 | ||
| 632 | /* Get OPP descriptor node */ | 598 | /* Get OPP descriptor node */ |
| 633 | tmp_np = _of_get_opp_desc_node(tcpu_dev); | 599 | tmp_np = dev_pm_opp_of_get_opp_desc_node(tcpu_dev); |
| 634 | if (!tmp_np) { | 600 | if (!tmp_np) { |
| 635 | dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", | 601 | dev_err(tcpu_dev, "%s: Couldn't find opp node.\n", |
| 636 | __func__); | 602 | __func__); |
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h index af9f2b849a66..166eef990599 100644 --- a/drivers/base/power/opp/opp.h +++ b/drivers/base/power/opp/opp.h | |||
| @@ -16,11 +16,11 @@ | |||
| 16 | 16 | ||
| 17 | #include <linux/device.h> | 17 | #include <linux/device.h> |
| 18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
| 19 | #include <linux/kref.h> | ||
| 19 | #include <linux/list.h> | 20 | #include <linux/list.h> |
| 20 | #include <linux/limits.h> | 21 | #include <linux/limits.h> |
| 21 | #include <linux/pm_opp.h> | 22 | #include <linux/pm_opp.h> |
| 22 | #include <linux/rculist.h> | 23 | #include <linux/notifier.h> |
| 23 | #include <linux/rcupdate.h> | ||
| 24 | 24 | ||
| 25 | struct clk; | 25 | struct clk; |
| 26 | struct regulator; | 26 | struct regulator; |
| @@ -51,11 +51,9 @@ extern struct list_head opp_tables; | |||
| 51 | * @node: opp table node. The nodes are maintained throughout the lifetime | 51 | * @node: opp table node. The nodes are maintained throughout the lifetime |
| 52 | * of boot. It is expected only an optimal set of OPPs are | 52 | * of boot. It is expected only an optimal set of OPPs are |
| 53 | * added to the library by the SoC framework. | 53 | * added to the library by the SoC framework. |
| 54 | * RCU usage: opp table is traversed with RCU locks. node | ||
| 55 | * modification is possible realtime, hence the modifications | ||
| 56 | * are protected by the opp_table_lock for integrity. | ||
| 57 | * IMPORTANT: the opp nodes should be maintained in increasing | 54 | * IMPORTANT: the opp nodes should be maintained in increasing |
| 58 | * order. | 55 | * order. |
| 56 | * @kref: for reference count of the OPP. | ||
| 59 | * @available: true/false - marks if this OPP as available or not | 57 | * @available: true/false - marks if this OPP as available or not |
| 60 | * @dynamic: not-created from static DT entries. | 58 | * @dynamic: not-created from static DT entries. |
| 61 | * @turbo: true if turbo (boost) OPP | 59 | * @turbo: true if turbo (boost) OPP |
| @@ -65,7 +63,6 @@ extern struct list_head opp_tables; | |||
| 65 | * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's | 63 | * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's |
| 66 | * frequency from any other OPP's frequency. | 64 | * frequency from any other OPP's frequency. |
| 67 | * @opp_table: points back to the opp_table struct this opp belongs to | 65 | * @opp_table: points back to the opp_table struct this opp belongs to |
| 68 | * @rcu_head: RCU callback head used for deferred freeing | ||
| 69 | * @np: OPP's device node. | 66 | * @np: OPP's device node. |
| 70 | * @dentry: debugfs dentry pointer (per opp) | 67 | * @dentry: debugfs dentry pointer (per opp) |
| 71 | * | 68 | * |
| @@ -73,6 +70,7 @@ extern struct list_head opp_tables; | |||
| 73 | */ | 70 | */ |
| 74 | struct dev_pm_opp { | 71 | struct dev_pm_opp { |
| 75 | struct list_head node; | 72 | struct list_head node; |
| 73 | struct kref kref; | ||
| 76 | 74 | ||
| 77 | bool available; | 75 | bool available; |
| 78 | bool dynamic; | 76 | bool dynamic; |
| @@ -85,7 +83,6 @@ struct dev_pm_opp { | |||
| 85 | unsigned long clock_latency_ns; | 83 | unsigned long clock_latency_ns; |
| 86 | 84 | ||
| 87 | struct opp_table *opp_table; | 85 | struct opp_table *opp_table; |
| 88 | struct rcu_head rcu_head; | ||
| 89 | 86 | ||
| 90 | struct device_node *np; | 87 | struct device_node *np; |
| 91 | 88 | ||
| @@ -98,7 +95,6 @@ struct dev_pm_opp { | |||
| 98 | * struct opp_device - devices managed by 'struct opp_table' | 95 | * struct opp_device - devices managed by 'struct opp_table' |
| 99 | * @node: list node | 96 | * @node: list node |
| 100 | * @dev: device to which the struct object belongs | 97 | * @dev: device to which the struct object belongs |
| 101 | * @rcu_head: RCU callback head used for deferred freeing | ||
| 102 | * @dentry: debugfs dentry pointer (per device) | 98 | * @dentry: debugfs dentry pointer (per device) |
| 103 | * | 99 | * |
| 104 | * This is an internal data structure maintaining the devices that are managed | 100 | * This is an internal data structure maintaining the devices that are managed |
| @@ -107,7 +103,6 @@ struct dev_pm_opp { | |||
| 107 | struct opp_device { | 103 | struct opp_device { |
| 108 | struct list_head node; | 104 | struct list_head node; |
| 109 | const struct device *dev; | 105 | const struct device *dev; |
| 110 | struct rcu_head rcu_head; | ||
| 111 | 106 | ||
| 112 | #ifdef CONFIG_DEBUG_FS | 107 | #ifdef CONFIG_DEBUG_FS |
| 113 | struct dentry *dentry; | 108 | struct dentry *dentry; |
| @@ -125,12 +120,11 @@ enum opp_table_access { | |||
| 125 | * @node: table node - contains the devices with OPPs that | 120 | * @node: table node - contains the devices with OPPs that |
| 126 | * have been registered. Nodes once added are not modified in this | 121 | * have been registered. Nodes once added are not modified in this |
| 127 | * table. | 122 | * table. |
| 128 | * RCU usage: nodes are not modified in the table of opp_table, | 123 | * @head: notifier head to notify the OPP availability changes. |
| 129 | * however addition is possible and is secured by opp_table_lock | ||
| 130 | * @srcu_head: notifier head to notify the OPP availability changes. | ||
| 131 | * @rcu_head: RCU callback head used for deferred freeing | ||
| 132 | * @dev_list: list of devices that share these OPPs | 124 | * @dev_list: list of devices that share these OPPs |
| 133 | * @opp_list: table of opps | 125 | * @opp_list: table of opps |
| 126 | * @kref: for reference count of the table. | ||
| 127 | * @lock: mutex protecting the opp_list. | ||
| 134 | * @np: struct device_node pointer for opp's DT node. | 128 | * @np: struct device_node pointer for opp's DT node. |
| 135 | * @clock_latency_ns_max: Max clock latency in nanoseconds. | 129 | * @clock_latency_ns_max: Max clock latency in nanoseconds. |
| 136 | * @shared_opp: OPP is shared between multiple devices. | 130 | * @shared_opp: OPP is shared between multiple devices. |
| @@ -151,18 +145,15 @@ enum opp_table_access { | |||
| 151 | * This is an internal data structure maintaining the link to opps attached to | 145 | * This is an internal data structure maintaining the link to opps attached to |
| 152 | * a device. This structure is not meant to be shared to users as it is | 146 | * a device. This structure is not meant to be shared to users as it is |
| 153 | * meant for book keeping and private to OPP library. | 147 | * meant for book keeping and private to OPP library. |
| 154 | * | ||
| 155 | * Because the opp structures can be used from both rcu and srcu readers, we | ||
| 156 | * need to wait for the grace period of both of them before freeing any | ||
| 157 | * resources. And so we have used kfree_rcu() from within call_srcu() handlers. | ||
| 158 | */ | 148 | */ |
| 159 | struct opp_table { | 149 | struct opp_table { |
| 160 | struct list_head node; | 150 | struct list_head node; |
| 161 | 151 | ||
| 162 | struct srcu_notifier_head srcu_head; | 152 | struct blocking_notifier_head head; |
| 163 | struct rcu_head rcu_head; | ||
| 164 | struct list_head dev_list; | 153 | struct list_head dev_list; |
| 165 | struct list_head opp_list; | 154 | struct list_head opp_list; |
| 155 | struct kref kref; | ||
| 156 | struct mutex lock; | ||
| 166 | 157 | ||
| 167 | struct device_node *np; | 158 | struct device_node *np; |
| 168 | unsigned long clock_latency_ns_max; | 159 | unsigned long clock_latency_ns_max; |
| @@ -190,14 +181,17 @@ struct opp_table { | |||
| 190 | }; | 181 | }; |
| 191 | 182 | ||
| 192 | /* Routines internal to opp core */ | 183 | /* Routines internal to opp core */ |
| 184 | void _get_opp_table_kref(struct opp_table *opp_table); | ||
| 193 | struct opp_table *_find_opp_table(struct device *dev); | 185 | struct opp_table *_find_opp_table(struct device *dev); |
| 194 | struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); | 186 | struct opp_device *_add_opp_dev(const struct device *dev, struct opp_table *opp_table); |
| 195 | void _dev_pm_opp_remove_table(struct device *dev, bool remove_all); | 187 | void _dev_pm_opp_remove_table(struct opp_table *opp_table, struct device *dev, bool remove_all); |
| 196 | struct dev_pm_opp *_allocate_opp(struct device *dev, struct opp_table **opp_table); | 188 | void _dev_pm_opp_find_and_remove_table(struct device *dev, bool remove_all); |
| 189 | struct dev_pm_opp *_opp_allocate(struct opp_table *opp_table); | ||
| 190 | void _opp_free(struct dev_pm_opp *opp); | ||
| 197 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); | 191 | int _opp_add(struct device *dev, struct dev_pm_opp *new_opp, struct opp_table *opp_table); |
| 198 | void _opp_remove(struct opp_table *opp_table, struct dev_pm_opp *opp, bool notify); | 192 | int _opp_add_v1(struct opp_table *opp_table, struct device *dev, unsigned long freq, long u_volt, bool dynamic); |
| 199 | int _opp_add_v1(struct device *dev, unsigned long freq, long u_volt, bool dynamic); | ||
| 200 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); | 193 | void _dev_pm_opp_cpumask_remove_table(const struct cpumask *cpumask, bool of); |
| 194 | struct opp_table *_add_opp_table(struct device *dev); | ||
| 201 | 195 | ||
| 202 | #ifdef CONFIG_OF | 196 | #ifdef CONFIG_OF |
| 203 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev); | 197 | void _of_init_opp_table(struct opp_table *opp_table, struct device *dev); |
