diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-12-17 19:43:16 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-12-17 19:43:16 -0500 |
commit | 437574c9cb523d9f329f22fd04877cdde793d1d5 (patch) | |
tree | 2b9f4d0960b24bbf302872f4f8bf04b2df26233d | |
parent | 7c1ac18dc02c105a199167ecc495944bd0e14d5a (diff) | |
parent | b4718c02f49ab5e1452353f0fae78beabe81467c (diff) |
Merge branch 'pm-opp' into pm-cpufreq
-rw-r--r-- | drivers/base/power/opp.c | 117 |
1 files changed, 75 insertions, 42 deletions
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 2d195f3a1998..106c69359306 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
@@ -84,7 +84,11 @@ struct dev_pm_opp { | |||
84 | * | 84 | * |
85 | * This is an internal data structure maintaining the link to opps attached to | 85 | * This is an internal data structure maintaining the link to opps attached to |
86 | * a device. This structure is not meant to be shared to users as it is | 86 | * a device. This structure is not meant to be shared to users as it is |
87 | * meant for book keeping and private to OPP library | 87 | * meant for book keeping and private to OPP library. |
88 | * | ||
89 | * Because the opp structures can be used from both rcu and srcu readers, we | ||
90 | * need to wait for the grace period of both of them before freeing any | ||
91 | * resources. And so we have used kfree_rcu() from within call_srcu() handlers. | ||
88 | */ | 92 | */ |
89 | struct device_opp { | 93 | struct device_opp { |
90 | struct list_head node; | 94 | struct list_head node; |
@@ -104,6 +108,14 @@ static LIST_HEAD(dev_opp_list); | |||
104 | /* Lock to allow exclusive modification to the device and opp lists */ | 108 | /* Lock to allow exclusive modification to the device and opp lists */ |
105 | static DEFINE_MUTEX(dev_opp_list_lock); | 109 | static DEFINE_MUTEX(dev_opp_list_lock); |
106 | 110 | ||
111 | #define opp_rcu_lockdep_assert() \ | ||
112 | do { \ | ||
113 | rcu_lockdep_assert(rcu_read_lock_held() || \ | ||
114 | lockdep_is_held(&dev_opp_list_lock), \ | ||
115 | "Missing rcu_read_lock() or " \ | ||
116 | "dev_opp_list_lock protection"); \ | ||
117 | } while (0) | ||
118 | |||
107 | /** | 119 | /** |
108 | * find_device_opp() - find device_opp struct using device pointer | 120 | * find_device_opp() - find device_opp struct using device pointer |
109 | * @dev: device pointer used to lookup device OPPs | 121 | * @dev: device pointer used to lookup device OPPs |
@@ -204,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); | |||
204 | * This function returns the number of available opps if there are any, | 216 | * This function returns the number of available opps if there are any, |
205 | * else returns 0 if none or the corresponding error value. | 217 | * else returns 0 if none or the corresponding error value. |
206 | * | 218 | * |
207 | * Locking: This function must be called under rcu_read_lock(). This function | 219 | * Locking: This function takes rcu_read_lock(). |
208 | * internally references two RCU protected structures: device_opp and opp which | ||
209 | * are safe as long as we are under a common RCU locked section. | ||
210 | */ | 220 | */ |
211 | int dev_pm_opp_get_opp_count(struct device *dev) | 221 | int dev_pm_opp_get_opp_count(struct device *dev) |
212 | { | 222 | { |
@@ -214,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev) | |||
214 | struct dev_pm_opp *temp_opp; | 224 | struct dev_pm_opp *temp_opp; |
215 | int count = 0; | 225 | int count = 0; |
216 | 226 | ||
227 | rcu_read_lock(); | ||
228 | |||
217 | dev_opp = find_device_opp(dev); | 229 | dev_opp = find_device_opp(dev); |
218 | if (IS_ERR(dev_opp)) { | 230 | if (IS_ERR(dev_opp)) { |
219 | int r = PTR_ERR(dev_opp); | 231 | count = PTR_ERR(dev_opp); |
220 | dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r); | 232 | dev_err(dev, "%s: device OPP not found (%d)\n", |
221 | return r; | 233 | __func__, count); |
234 | goto out_unlock; | ||
222 | } | 235 | } |
223 | 236 | ||
224 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { | 237 | list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) { |
@@ -226,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev) | |||
226 | count++; | 239 | count++; |
227 | } | 240 | } |
228 | 241 | ||
242 | out_unlock: | ||
243 | rcu_read_unlock(); | ||
229 | return count; | 244 | return count; |
230 | } | 245 | } |
231 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); | 246 | EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count); |
@@ -263,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, | |||
263 | struct device_opp *dev_opp; | 278 | struct device_opp *dev_opp; |
264 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 279 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
265 | 280 | ||
281 | opp_rcu_lockdep_assert(); | ||
282 | |||
266 | dev_opp = find_device_opp(dev); | 283 | dev_opp = find_device_opp(dev); |
267 | if (IS_ERR(dev_opp)) { | 284 | if (IS_ERR(dev_opp)) { |
268 | int r = PTR_ERR(dev_opp); | 285 | int r = PTR_ERR(dev_opp); |
@@ -309,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev, | |||
309 | struct device_opp *dev_opp; | 326 | struct device_opp *dev_opp; |
310 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 327 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
311 | 328 | ||
329 | opp_rcu_lockdep_assert(); | ||
330 | |||
312 | if (!dev || !freq) { | 331 | if (!dev || !freq) { |
313 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 332 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
314 | return ERR_PTR(-EINVAL); | 333 | return ERR_PTR(-EINVAL); |
@@ -357,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
357 | struct device_opp *dev_opp; | 376 | struct device_opp *dev_opp; |
358 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); | 377 | struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE); |
359 | 378 | ||
379 | opp_rcu_lockdep_assert(); | ||
380 | |||
360 | if (!dev || !freq) { | 381 | if (!dev || !freq) { |
361 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); | 382 | dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); |
362 | return ERR_PTR(-EINVAL); | 383 | return ERR_PTR(-EINVAL); |
@@ -382,12 +403,34 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev, | |||
382 | } | 403 | } |
383 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); | 404 | EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor); |
384 | 405 | ||
406 | static struct device_opp *add_device_opp(struct device *dev) | ||
407 | { | ||
408 | struct device_opp *dev_opp; | ||
409 | |||
410 | /* | ||
411 | * Allocate a new device OPP table. In the infrequent case where a new | ||
412 | * device is needed to be added, we pay this penalty. | ||
413 | */ | ||
414 | dev_opp = kzalloc(sizeof(*dev_opp), GFP_KERNEL); | ||
415 | if (!dev_opp) | ||
416 | return NULL; | ||
417 | |||
418 | dev_opp->dev = dev; | ||
419 | srcu_init_notifier_head(&dev_opp->srcu_head); | ||
420 | INIT_LIST_HEAD(&dev_opp->opp_list); | ||
421 | |||
422 | /* Secure the device list modification */ | ||
423 | list_add_rcu(&dev_opp->node, &dev_opp_list); | ||
424 | return dev_opp; | ||
425 | } | ||
426 | |||
385 | static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | 427 | static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, |
386 | unsigned long u_volt, bool dynamic) | 428 | unsigned long u_volt, bool dynamic) |
387 | { | 429 | { |
388 | struct device_opp *dev_opp = NULL; | 430 | struct device_opp *dev_opp = NULL; |
389 | struct dev_pm_opp *opp, *new_opp; | 431 | struct dev_pm_opp *opp, *new_opp; |
390 | struct list_head *head; | 432 | struct list_head *head; |
433 | int ret; | ||
391 | 434 | ||
392 | /* allocate new OPP node */ | 435 | /* allocate new OPP node */ |
393 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); | 436 | new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL); |
@@ -400,7 +443,6 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | |||
400 | mutex_lock(&dev_opp_list_lock); | 443 | mutex_lock(&dev_opp_list_lock); |
401 | 444 | ||
402 | /* populate the opp table */ | 445 | /* populate the opp table */ |
403 | new_opp->dev_opp = dev_opp; | ||
404 | new_opp->rate = freq; | 446 | new_opp->rate = freq; |
405 | new_opp->u_volt = u_volt; | 447 | new_opp->u_volt = u_volt; |
406 | new_opp->available = true; | 448 | new_opp->available = true; |
@@ -409,27 +451,12 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | |||
409 | /* Check for existing list for 'dev' */ | 451 | /* Check for existing list for 'dev' */ |
410 | dev_opp = find_device_opp(dev); | 452 | dev_opp = find_device_opp(dev); |
411 | if (IS_ERR(dev_opp)) { | 453 | if (IS_ERR(dev_opp)) { |
412 | /* | 454 | dev_opp = add_device_opp(dev); |
413 | * Allocate a new device OPP table. In the infrequent case | ||
414 | * where a new device is needed to be added, we pay this | ||
415 | * penalty. | ||
416 | */ | ||
417 | dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL); | ||
418 | if (!dev_opp) { | 455 | if (!dev_opp) { |
419 | mutex_unlock(&dev_opp_list_lock); | 456 | ret = -ENOMEM; |
420 | kfree(new_opp); | 457 | goto free_opp; |
421 | dev_warn(dev, | ||
422 | "%s: Unable to create device OPP structure\n", | ||
423 | __func__); | ||
424 | return -ENOMEM; | ||
425 | } | 458 | } |
426 | 459 | ||
427 | dev_opp->dev = dev; | ||
428 | srcu_init_notifier_head(&dev_opp->srcu_head); | ||
429 | INIT_LIST_HEAD(&dev_opp->opp_list); | ||
430 | |||
431 | /* Secure the device list modification */ | ||
432 | list_add_rcu(&dev_opp->node, &dev_opp_list); | ||
433 | head = &dev_opp->opp_list; | 460 | head = &dev_opp->opp_list; |
434 | goto list_add; | 461 | goto list_add; |
435 | } | 462 | } |
@@ -448,18 +475,17 @@ static int dev_pm_opp_add_dynamic(struct device *dev, unsigned long freq, | |||
448 | 475 | ||
449 | /* Duplicate OPPs ? */ | 476 | /* Duplicate OPPs ? */ |
450 | if (new_opp->rate == opp->rate) { | 477 | if (new_opp->rate == opp->rate) { |
451 | int ret = opp->available && new_opp->u_volt == opp->u_volt ? | 478 | ret = opp->available && new_opp->u_volt == opp->u_volt ? |
452 | 0 : -EEXIST; | 479 | 0 : -EEXIST; |
453 | 480 | ||
454 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", | 481 | dev_warn(dev, "%s: duplicate OPPs detected. Existing: freq: %lu, volt: %lu, enabled: %d. New: freq: %lu, volt: %lu, enabled: %d\n", |
455 | __func__, opp->rate, opp->u_volt, opp->available, | 482 | __func__, opp->rate, opp->u_volt, opp->available, |
456 | new_opp->rate, new_opp->u_volt, new_opp->available); | 483 | new_opp->rate, new_opp->u_volt, new_opp->available); |
457 | mutex_unlock(&dev_opp_list_lock); | 484 | goto free_opp; |
458 | kfree(new_opp); | ||
459 | return ret; | ||
460 | } | 485 | } |
461 | 486 | ||
462 | list_add: | 487 | list_add: |
488 | new_opp->dev_opp = dev_opp; | ||
463 | list_add_rcu(&new_opp->node, head); | 489 | list_add_rcu(&new_opp->node, head); |
464 | mutex_unlock(&dev_opp_list_lock); | 490 | mutex_unlock(&dev_opp_list_lock); |
465 | 491 | ||
@@ -469,6 +495,11 @@ list_add: | |||
469 | */ | 495 | */ |
470 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); | 496 | srcu_notifier_call_chain(&dev_opp->srcu_head, OPP_EVENT_ADD, new_opp); |
471 | return 0; | 497 | return 0; |
498 | |||
499 | free_opp: | ||
500 | mutex_unlock(&dev_opp_list_lock); | ||
501 | kfree(new_opp); | ||
502 | return ret; | ||
472 | } | 503 | } |
473 | 504 | ||
474 | /** | 505 | /** |
@@ -511,10 +542,11 @@ static void kfree_device_rcu(struct rcu_head *head) | |||
511 | { | 542 | { |
512 | struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); | 543 | struct device_opp *device_opp = container_of(head, struct device_opp, rcu_head); |
513 | 544 | ||
514 | kfree(device_opp); | 545 | kfree_rcu(device_opp, rcu_head); |
515 | } | 546 | } |
516 | 547 | ||
517 | void __dev_pm_opp_remove(struct device_opp *dev_opp, struct dev_pm_opp *opp) | 548 | static void __dev_pm_opp_remove(struct device_opp *dev_opp, |
549 | struct dev_pm_opp *opp) | ||
518 | { | 550 | { |
519 | /* | 551 | /* |
520 | * Notify the changes in the availability of the operable | 552 | * Notify the changes in the availability of the operable |
@@ -592,7 +624,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_remove); | |||
592 | static int opp_set_availability(struct device *dev, unsigned long freq, | 624 | static int opp_set_availability(struct device *dev, unsigned long freq, |
593 | bool availability_req) | 625 | bool availability_req) |
594 | { | 626 | { |
595 | struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); | 627 | struct device_opp *dev_opp; |
596 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); | 628 | struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); |
597 | int r = 0; | 629 | int r = 0; |
598 | 630 | ||
@@ -606,12 +638,7 @@ static int opp_set_availability(struct device *dev, unsigned long freq, | |||
606 | mutex_lock(&dev_opp_list_lock); | 638 | mutex_lock(&dev_opp_list_lock); |
607 | 639 | ||
608 | /* Find the device_opp */ | 640 | /* Find the device_opp */ |
609 | list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) { | 641 | dev_opp = find_device_opp(dev); |
610 | if (dev == tmp_dev_opp->dev) { | ||
611 | dev_opp = tmp_dev_opp; | ||
612 | break; | ||
613 | } | ||
614 | } | ||
615 | if (IS_ERR(dev_opp)) { | 642 | if (IS_ERR(dev_opp)) { |
616 | r = PTR_ERR(dev_opp); | 643 | r = PTR_ERR(dev_opp); |
617 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); | 644 | dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r); |
@@ -768,14 +795,20 @@ EXPORT_SYMBOL_GPL(of_init_opp_table); | |||
768 | */ | 795 | */ |
769 | void of_free_opp_table(struct device *dev) | 796 | void of_free_opp_table(struct device *dev) |
770 | { | 797 | { |
771 | struct device_opp *dev_opp = find_device_opp(dev); | 798 | struct device_opp *dev_opp; |
772 | struct dev_pm_opp *opp, *tmp; | 799 | struct dev_pm_opp *opp, *tmp; |
773 | 800 | ||
774 | /* Check for existing list for 'dev' */ | 801 | /* Check for existing list for 'dev' */ |
775 | dev_opp = find_device_opp(dev); | 802 | dev_opp = find_device_opp(dev); |
776 | if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev), | 803 | if (IS_ERR(dev_opp)) { |
777 | PTR_ERR(dev_opp))) | 804 | int error = PTR_ERR(dev_opp); |
805 | if (error != -ENODEV) | ||
806 | WARN(1, "%s: dev_opp: %d\n", | ||
807 | IS_ERR_OR_NULL(dev) ? | ||
808 | "Invalid device" : dev_name(dev), | ||
809 | error); | ||
778 | return; | 810 | return; |
811 | } | ||
779 | 812 | ||
780 | /* Hold our list modification lock here */ | 813 | /* Hold our list modification lock here */ |
781 | mutex_lock(&dev_opp_list_lock); | 814 | mutex_lock(&dev_opp_list_lock); |