aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/bus.c82
-rw-r--r--drivers/base/class.c29
-rw-r--r--drivers/base/core.c88
-rw-r--r--drivers/base/cpu.c39
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devres.c31
-rw-r--r--drivers/base/devtmpfs.c6
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--drivers/base/firmware_class.c38
-rw-r--r--drivers/base/platform.c21
-rw-r--r--drivers/base/power/main.c80
-rw-r--r--drivers/base/power/opp.c115
-rw-r--r--drivers/base/power/runtime.c5
13 files changed, 247 insertions, 291 deletions
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 4c289ab91357..73f6c2925281 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -591,37 +591,6 @@ void bus_remove_device(struct device *dev)
591 bus_put(dev->bus); 591 bus_put(dev->bus);
592} 592}
593 593
594static int driver_add_attrs(struct bus_type *bus, struct device_driver *drv)
595{
596 int error = 0;
597 int i;
598
599 if (bus->drv_attrs) {
600 for (i = 0; bus->drv_attrs[i].attr.name; i++) {
601 error = driver_create_file(drv, &bus->drv_attrs[i]);
602 if (error)
603 goto err;
604 }
605 }
606done:
607 return error;
608err:
609 while (--i >= 0)
610 driver_remove_file(drv, &bus->drv_attrs[i]);
611 goto done;
612}
613
614static void driver_remove_attrs(struct bus_type *bus,
615 struct device_driver *drv)
616{
617 int i;
618
619 if (bus->drv_attrs) {
620 for (i = 0; bus->drv_attrs[i].attr.name; i++)
621 driver_remove_file(drv, &bus->drv_attrs[i]);
622 }
623}
624
625static int __must_check add_bind_files(struct device_driver *drv) 594static int __must_check add_bind_files(struct device_driver *drv)
626{ 595{
627 int ret; 596 int ret;
@@ -720,16 +689,12 @@ int bus_add_driver(struct device_driver *drv)
720 printk(KERN_ERR "%s: uevent attr (%s) failed\n", 689 printk(KERN_ERR "%s: uevent attr (%s) failed\n",
721 __func__, drv->name); 690 __func__, drv->name);
722 } 691 }
723 error = driver_add_attrs(bus, drv); 692 error = driver_add_groups(drv, bus->drv_groups);
724 if (error) { 693 if (error) {
725 /* How the hell do we get out of this pickle? Give up */ 694 /* How the hell do we get out of this pickle? Give up */
726 printk(KERN_ERR "%s: driver_add_attrs(%s) failed\n",
727 __func__, drv->name);
728 }
729 error = driver_add_groups(drv, bus->drv_groups);
730 if (error)
731 printk(KERN_ERR "%s: driver_create_groups(%s) failed\n", 695 printk(KERN_ERR "%s: driver_create_groups(%s) failed\n",
732 __func__, drv->name); 696 __func__, drv->name);
697 }
733 698
734 if (!drv->suppress_bind_attrs) { 699 if (!drv->suppress_bind_attrs) {
735 error = add_bind_files(drv); 700 error = add_bind_files(drv);
@@ -766,7 +731,6 @@ void bus_remove_driver(struct device_driver *drv)
766 731
767 if (!drv->suppress_bind_attrs) 732 if (!drv->suppress_bind_attrs)
768 remove_bind_files(drv); 733 remove_bind_files(drv);
769 driver_remove_attrs(drv->bus, drv);
770 driver_remove_groups(drv, drv->bus->drv_groups); 734 driver_remove_groups(drv, drv->bus->drv_groups);
771 driver_remove_file(drv, &driver_attr_uevent); 735 driver_remove_file(drv, &driver_attr_uevent);
772 klist_remove(&drv->p->knode_bus); 736 klist_remove(&drv->p->knode_bus);
@@ -846,42 +810,6 @@ struct bus_type *find_bus(char *name)
846} 810}
847#endif /* 0 */ 811#endif /* 0 */
848 812
849
850/**
851 * bus_add_attrs - Add default attributes for this bus.
852 * @bus: Bus that has just been registered.
853 */
854
855static int bus_add_attrs(struct bus_type *bus)
856{
857 int error = 0;
858 int i;
859
860 if (bus->bus_attrs) {
861 for (i = 0; bus->bus_attrs[i].attr.name; i++) {
862 error = bus_create_file(bus, &bus->bus_attrs[i]);
863 if (error)
864 goto err;
865 }
866 }
867done:
868 return error;
869err:
870 while (--i >= 0)
871 bus_remove_file(bus, &bus->bus_attrs[i]);
872 goto done;
873}
874
875static void bus_remove_attrs(struct bus_type *bus)
876{
877 int i;
878
879 if (bus->bus_attrs) {
880 for (i = 0; bus->bus_attrs[i].attr.name; i++)
881 bus_remove_file(bus, &bus->bus_attrs[i]);
882 }
883}
884
885static int bus_add_groups(struct bus_type *bus, 813static int bus_add_groups(struct bus_type *bus,
886 const struct attribute_group **groups) 814 const struct attribute_group **groups)
887{ 815{
@@ -983,9 +911,6 @@ int bus_register(struct bus_type *bus)
983 if (retval) 911 if (retval)
984 goto bus_probe_files_fail; 912 goto bus_probe_files_fail;
985 913
986 retval = bus_add_attrs(bus);
987 if (retval)
988 goto bus_attrs_fail;
989 retval = bus_add_groups(bus, bus->bus_groups); 914 retval = bus_add_groups(bus, bus->bus_groups);
990 if (retval) 915 if (retval)
991 goto bus_groups_fail; 916 goto bus_groups_fail;
@@ -994,8 +919,6 @@ int bus_register(struct bus_type *bus)
994 return 0; 919 return 0;
995 920
996bus_groups_fail: 921bus_groups_fail:
997 bus_remove_attrs(bus);
998bus_attrs_fail:
999 remove_probe_files(bus); 922 remove_probe_files(bus);
1000bus_probe_files_fail: 923bus_probe_files_fail:
1001 kset_unregister(bus->p->drivers_kset); 924 kset_unregister(bus->p->drivers_kset);
@@ -1024,7 +947,6 @@ void bus_unregister(struct bus_type *bus)
1024 pr_debug("bus: '%s': unregistering\n", bus->name); 947 pr_debug("bus: '%s': unregistering\n", bus->name);
1025 if (bus->dev_root) 948 if (bus->dev_root)
1026 device_unregister(bus->dev_root); 949 device_unregister(bus->dev_root);
1027 bus_remove_attrs(bus);
1028 bus_remove_groups(bus, bus->bus_groups); 950 bus_remove_groups(bus, bus->bus_groups);
1029 remove_probe_files(bus); 951 remove_probe_files(bus);
1030 kset_unregister(bus->p->drivers_kset); 952 kset_unregister(bus->p->drivers_kset);
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 8b7818b80056..f96f70419a78 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -47,18 +47,6 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
47 return ret; 47 return ret;
48} 48}
49 49
50static const void *class_attr_namespace(struct kobject *kobj,
51 const struct attribute *attr)
52{
53 struct class_attribute *class_attr = to_class_attr(attr);
54 struct subsys_private *cp = to_subsys_private(kobj);
55 const void *ns = NULL;
56
57 if (class_attr->namespace)
58 ns = class_attr->namespace(cp->class, class_attr);
59 return ns;
60}
61
62static void class_release(struct kobject *kobj) 50static void class_release(struct kobject *kobj)
63{ 51{
64 struct subsys_private *cp = to_subsys_private(kobj); 52 struct subsys_private *cp = to_subsys_private(kobj);
@@ -86,7 +74,6 @@ static const struct kobj_ns_type_operations *class_child_ns_type(struct kobject
86static const struct sysfs_ops class_sysfs_ops = { 74static const struct sysfs_ops class_sysfs_ops = {
87 .show = class_attr_show, 75 .show = class_attr_show,
88 .store = class_attr_store, 76 .store = class_attr_store,
89 .namespace = class_attr_namespace,
90}; 77};
91 78
92static struct kobj_type class_ktype = { 79static struct kobj_type class_ktype = {
@@ -99,21 +86,23 @@ static struct kobj_type class_ktype = {
99static struct kset *class_kset; 86static struct kset *class_kset;
100 87
101 88
102int class_create_file(struct class *cls, const struct class_attribute *attr) 89int class_create_file_ns(struct class *cls, const struct class_attribute *attr,
90 const void *ns)
103{ 91{
104 int error; 92 int error;
105 if (cls) 93 if (cls)
106 error = sysfs_create_file(&cls->p->subsys.kobj, 94 error = sysfs_create_file_ns(&cls->p->subsys.kobj,
107 &attr->attr); 95 &attr->attr, ns);
108 else 96 else
109 error = -EINVAL; 97 error = -EINVAL;
110 return error; 98 return error;
111} 99}
112 100
113void class_remove_file(struct class *cls, const struct class_attribute *attr) 101void class_remove_file_ns(struct class *cls, const struct class_attribute *attr,
102 const void *ns)
114{ 103{
115 if (cls) 104 if (cls)
116 sysfs_remove_file(&cls->p->subsys.kobj, &attr->attr); 105 sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns);
117} 106}
118 107
119static struct class *class_get(struct class *cls) 108static struct class *class_get(struct class *cls)
@@ -600,8 +589,8 @@ int __init classes_init(void)
600 return 0; 589 return 0;
601} 590}
602 591
603EXPORT_SYMBOL_GPL(class_create_file); 592EXPORT_SYMBOL_GPL(class_create_file_ns);
604EXPORT_SYMBOL_GPL(class_remove_file); 593EXPORT_SYMBOL_GPL(class_remove_file_ns);
605EXPORT_SYMBOL_GPL(class_unregister); 594EXPORT_SYMBOL_GPL(class_unregister);
606EXPORT_SYMBOL_GPL(class_destroy); 595EXPORT_SYMBOL_GPL(class_destroy);
607 596
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 34abf4d8a45f..67b180d855b2 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -455,64 +455,6 @@ static ssize_t online_store(struct device *dev, struct device_attribute *attr,
455} 455}
456static DEVICE_ATTR_RW(online); 456static DEVICE_ATTR_RW(online);
457 457
458static int device_add_attributes(struct device *dev,
459 struct device_attribute *attrs)
460{
461 int error = 0;
462 int i;
463
464 if (attrs) {
465 for (i = 0; attrs[i].attr.name; i++) {
466 error = device_create_file(dev, &attrs[i]);
467 if (error)
468 break;
469 }
470 if (error)
471 while (--i >= 0)
472 device_remove_file(dev, &attrs[i]);
473 }
474 return error;
475}
476
477static void device_remove_attributes(struct device *dev,
478 struct device_attribute *attrs)
479{
480 int i;
481
482 if (attrs)
483 for (i = 0; attrs[i].attr.name; i++)
484 device_remove_file(dev, &attrs[i]);
485}
486
487static int device_add_bin_attributes(struct device *dev,
488 struct bin_attribute *attrs)
489{
490 int error = 0;
491 int i;
492
493 if (attrs) {
494 for (i = 0; attrs[i].attr.name; i++) {
495 error = device_create_bin_file(dev, &attrs[i]);
496 if (error)
497 break;
498 }
499 if (error)
500 while (--i >= 0)
501 device_remove_bin_file(dev, &attrs[i]);
502 }
503 return error;
504}
505
506static void device_remove_bin_attributes(struct device *dev,
507 struct bin_attribute *attrs)
508{
509 int i;
510
511 if (attrs)
512 for (i = 0; attrs[i].attr.name; i++)
513 device_remove_bin_file(dev, &attrs[i]);
514}
515
516int device_add_groups(struct device *dev, const struct attribute_group **groups) 458int device_add_groups(struct device *dev, const struct attribute_group **groups)
517{ 459{
518 return sysfs_create_groups(&dev->kobj, groups); 460 return sysfs_create_groups(&dev->kobj, groups);
@@ -534,18 +476,12 @@ static int device_add_attrs(struct device *dev)
534 error = device_add_groups(dev, class->dev_groups); 476 error = device_add_groups(dev, class->dev_groups);
535 if (error) 477 if (error)
536 return error; 478 return error;
537 error = device_add_attributes(dev, class->dev_attrs);
538 if (error)
539 goto err_remove_class_groups;
540 error = device_add_bin_attributes(dev, class->dev_bin_attrs);
541 if (error)
542 goto err_remove_class_attrs;
543 } 479 }
544 480
545 if (type) { 481 if (type) {
546 error = device_add_groups(dev, type->groups); 482 error = device_add_groups(dev, type->groups);
547 if (error) 483 if (error)
548 goto err_remove_class_bin_attrs; 484 goto err_remove_class_groups;
549 } 485 }
550 486
551 error = device_add_groups(dev, dev->groups); 487 error = device_add_groups(dev, dev->groups);
@@ -563,12 +499,6 @@ static int device_add_attrs(struct device *dev)
563 err_remove_type_groups: 499 err_remove_type_groups:
564 if (type) 500 if (type)
565 device_remove_groups(dev, type->groups); 501 device_remove_groups(dev, type->groups);
566 err_remove_class_bin_attrs:
567 if (class)
568 device_remove_bin_attributes(dev, class->dev_bin_attrs);
569 err_remove_class_attrs:
570 if (class)
571 device_remove_attributes(dev, class->dev_attrs);
572 err_remove_class_groups: 502 err_remove_class_groups:
573 if (class) 503 if (class)
574 device_remove_groups(dev, class->dev_groups); 504 device_remove_groups(dev, class->dev_groups);
@@ -587,11 +517,8 @@ static void device_remove_attrs(struct device *dev)
587 if (type) 517 if (type)
588 device_remove_groups(dev, type->groups); 518 device_remove_groups(dev, type->groups);
589 519
590 if (class) { 520 if (class)
591 device_remove_attributes(dev, class->dev_attrs);
592 device_remove_bin_attributes(dev, class->dev_bin_attrs);
593 device_remove_groups(dev, class->dev_groups); 521 device_remove_groups(dev, class->dev_groups);
594 }
595} 522}
596 523
597static ssize_t dev_show(struct device *dev, struct device_attribute *attr, 524static ssize_t dev_show(struct device *dev, struct device_attribute *attr,
@@ -1881,6 +1808,7 @@ EXPORT_SYMBOL_GPL(device_destroy);
1881 */ 1808 */
1882int device_rename(struct device *dev, const char *new_name) 1809int device_rename(struct device *dev, const char *new_name)
1883{ 1810{
1811 struct kobject *kobj = &dev->kobj;
1884 char *old_device_name = NULL; 1812 char *old_device_name = NULL;
1885 int error; 1813 int error;
1886 1814
@@ -1888,8 +1816,7 @@ int device_rename(struct device *dev, const char *new_name)
1888 if (!dev) 1816 if (!dev)
1889 return -EINVAL; 1817 return -EINVAL;
1890 1818
1891 pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev), 1819 dev_dbg(dev, "renaming to %s\n", new_name);
1892 __func__, new_name);
1893 1820
1894 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL); 1821 old_device_name = kstrdup(dev_name(dev), GFP_KERNEL);
1895 if (!old_device_name) { 1822 if (!old_device_name) {
@@ -1898,13 +1825,14 @@ int device_rename(struct device *dev, const char *new_name)
1898 } 1825 }
1899 1826
1900 if (dev->class) { 1827 if (dev->class) {
1901 error = sysfs_rename_link(&dev->class->p->subsys.kobj, 1828 error = sysfs_rename_link_ns(&dev->class->p->subsys.kobj,
1902 &dev->kobj, old_device_name, new_name); 1829 kobj, old_device_name,
1830 new_name, kobject_namespace(kobj));
1903 if (error) 1831 if (error)
1904 goto out; 1832 goto out;
1905 } 1833 }
1906 1834
1907 error = kobject_rename(&dev->kobj, new_name); 1835 error = kobject_rename(kobj, new_name);
1908 if (error) 1836 if (error)
1909 goto out; 1837 goto out;
1910 1838
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 848ebbd25717..f48370dfc908 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -44,13 +44,11 @@ static int __ref cpu_subsys_online(struct device *dev)
44 struct cpu *cpu = container_of(dev, struct cpu, dev); 44 struct cpu *cpu = container_of(dev, struct cpu, dev);
45 int cpuid = dev->id; 45 int cpuid = dev->id;
46 int from_nid, to_nid; 46 int from_nid, to_nid;
47 int ret = -ENODEV; 47 int ret;
48
49 cpu_hotplug_driver_lock();
50 48
51 from_nid = cpu_to_node(cpuid); 49 from_nid = cpu_to_node(cpuid);
52 if (from_nid == NUMA_NO_NODE) 50 if (from_nid == NUMA_NO_NODE)
53 goto out; 51 return -ENODEV;
54 52
55 ret = cpu_up(cpuid); 53 ret = cpu_up(cpuid);
56 /* 54 /*
@@ -61,19 +59,12 @@ static int __ref cpu_subsys_online(struct device *dev)
61 if (from_nid != to_nid) 59 if (from_nid != to_nid)
62 change_cpu_under_node(cpu, from_nid, to_nid); 60 change_cpu_under_node(cpu, from_nid, to_nid);
63 61
64 out:
65 cpu_hotplug_driver_unlock();
66 return ret; 62 return ret;
67} 63}
68 64
69static int cpu_subsys_offline(struct device *dev) 65static int cpu_subsys_offline(struct device *dev)
70{ 66{
71 int ret; 67 return cpu_down(dev->id);
72
73 cpu_hotplug_driver_lock();
74 ret = cpu_down(dev->id);
75 cpu_hotplug_driver_unlock();
76 return ret;
77} 68}
78 69
79void unregister_cpu(struct cpu *cpu) 70void unregister_cpu(struct cpu *cpu)
@@ -93,7 +84,17 @@ static ssize_t cpu_probe_store(struct device *dev,
93 const char *buf, 84 const char *buf,
94 size_t count) 85 size_t count)
95{ 86{
96 return arch_cpu_probe(buf, count); 87 ssize_t cnt;
88 int ret;
89
90 ret = lock_device_hotplug_sysfs();
91 if (ret)
92 return ret;
93
94 cnt = arch_cpu_probe(buf, count);
95
96 unlock_device_hotplug();
97 return cnt;
97} 98}
98 99
99static ssize_t cpu_release_store(struct device *dev, 100static ssize_t cpu_release_store(struct device *dev,
@@ -101,7 +102,17 @@ static ssize_t cpu_release_store(struct device *dev,
101 const char *buf, 102 const char *buf,
102 size_t count) 103 size_t count)
103{ 104{
104 return arch_cpu_release(buf, count); 105 ssize_t cnt;
106 int ret;
107
108 ret = lock_device_hotplug_sysfs();
109 if (ret)
110 return ret;
111
112 cnt = arch_cpu_release(buf, count);
113
114 unlock_device_hotplug();
115 return cnt;
105} 116}
106 117
107static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); 118static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 35fa36898916..06051767393f 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -499,7 +499,7 @@ static void __device_release_driver(struct device *dev)
499 BUS_NOTIFY_UNBIND_DRIVER, 499 BUS_NOTIFY_UNBIND_DRIVER,
500 dev); 500 dev);
501 501
502 pm_runtime_put(dev); 502 pm_runtime_put_sync(dev);
503 503
504 if (dev->bus && dev->bus->remove) 504 if (dev->bus && dev->bus->remove)
505 dev->bus->remove(dev); 505 dev->bus->remove(dev);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 507379e7b763..545c4de412c3 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -91,7 +91,8 @@ static __always_inline struct devres * alloc_dr(dr_release_t release,
91 if (unlikely(!dr)) 91 if (unlikely(!dr))
92 return NULL; 92 return NULL;
93 93
94 memset(dr, 0, tot_size); 94 memset(dr, 0, offsetof(struct devres, data));
95
95 INIT_LIST_HEAD(&dr->node.entry); 96 INIT_LIST_HEAD(&dr->node.entry);
96 dr->node.release = release; 97 dr->node.release = release;
97 return dr; 98 return dr;
@@ -110,7 +111,7 @@ void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
110{ 111{
111 struct devres *dr; 112 struct devres *dr;
112 113
113 dr = alloc_dr(release, size, gfp); 114 dr = alloc_dr(release, size, gfp | __GFP_ZERO);
114 if (unlikely(!dr)) 115 if (unlikely(!dr))
115 return NULL; 116 return NULL;
116 set_node_dbginfo(&dr->node, name, size); 117 set_node_dbginfo(&dr->node, name, size);
@@ -135,7 +136,7 @@ void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
135{ 136{
136 struct devres *dr; 137 struct devres *dr;
137 138
138 dr = alloc_dr(release, size, gfp); 139 dr = alloc_dr(release, size, gfp | __GFP_ZERO);
139 if (unlikely(!dr)) 140 if (unlikely(!dr))
140 return NULL; 141 return NULL;
141 return dr->data; 142 return dr->data;
@@ -745,58 +746,62 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
745EXPORT_SYMBOL_GPL(devm_remove_action); 746EXPORT_SYMBOL_GPL(devm_remove_action);
746 747
747/* 748/*
748 * Managed kzalloc/kfree 749 * Managed kmalloc/kfree
749 */ 750 */
750static void devm_kzalloc_release(struct device *dev, void *res) 751static void devm_kmalloc_release(struct device *dev, void *res)
751{ 752{
752 /* noop */ 753 /* noop */
753} 754}
754 755
755static int devm_kzalloc_match(struct device *dev, void *res, void *data) 756static int devm_kmalloc_match(struct device *dev, void *res, void *data)
756{ 757{
757 return res == data; 758 return res == data;
758} 759}
759 760
760/** 761/**
761 * devm_kzalloc - Resource-managed kzalloc 762 * devm_kmalloc - Resource-managed kmalloc
762 * @dev: Device to allocate memory for 763 * @dev: Device to allocate memory for
763 * @size: Allocation size 764 * @size: Allocation size
764 * @gfp: Allocation gfp flags 765 * @gfp: Allocation gfp flags
765 * 766 *
766 * Managed kzalloc. Memory allocated with this function is 767 * Managed kmalloc. Memory allocated with this function is
767 * automatically freed on driver detach. Like all other devres 768 * automatically freed on driver detach. Like all other devres
768 * resources, guaranteed alignment is unsigned long long. 769 * resources, guaranteed alignment is unsigned long long.
769 * 770 *
770 * RETURNS: 771 * RETURNS:
771 * Pointer to allocated memory on success, NULL on failure. 772 * Pointer to allocated memory on success, NULL on failure.
772 */ 773 */
773void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) 774void * devm_kmalloc(struct device *dev, size_t size, gfp_t gfp)
774{ 775{
775 struct devres *dr; 776 struct devres *dr;
776 777
777 /* use raw alloc_dr for kmalloc caller tracing */ 778 /* use raw alloc_dr for kmalloc caller tracing */
778 dr = alloc_dr(devm_kzalloc_release, size, gfp); 779 dr = alloc_dr(devm_kmalloc_release, size, gfp);
779 if (unlikely(!dr)) 780 if (unlikely(!dr))
780 return NULL; 781 return NULL;
781 782
783 /*
784 * This is named devm_kzalloc_release for historical reasons
785 * The initial implementation did not support kmalloc, only kzalloc
786 */
782 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size); 787 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
783 devres_add(dev, dr->data); 788 devres_add(dev, dr->data);
784 return dr->data; 789 return dr->data;
785} 790}
786EXPORT_SYMBOL_GPL(devm_kzalloc); 791EXPORT_SYMBOL_GPL(devm_kmalloc);
787 792
788/** 793/**
789 * devm_kfree - Resource-managed kfree 794 * devm_kfree - Resource-managed kfree
790 * @dev: Device this memory belongs to 795 * @dev: Device this memory belongs to
791 * @p: Memory to free 796 * @p: Memory to free
792 * 797 *
793 * Free memory allocated with devm_kzalloc(). 798 * Free memory allocated with devm_kmalloc().
794 */ 799 */
795void devm_kfree(struct device *dev, void *p) 800void devm_kfree(struct device *dev, void *p)
796{ 801{
797 int rc; 802 int rc;
798 803
799 rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p); 804 rc = devres_destroy(dev, devm_kmalloc_release, devm_kmalloc_match, p);
800 WARN_ON(rc); 805 WARN_ON(rc);
801} 806}
802EXPORT_SYMBOL_GPL(devm_kfree); 807EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 7413d065906b..0f3820121e02 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -216,7 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, kuid_t uid,
216 newattrs.ia_gid = gid; 216 newattrs.ia_gid = gid;
217 newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID; 217 newattrs.ia_valid = ATTR_MODE|ATTR_UID|ATTR_GID;
218 mutex_lock(&dentry->d_inode->i_mutex); 218 mutex_lock(&dentry->d_inode->i_mutex);
219 notify_change(dentry, &newattrs); 219 notify_change(dentry, &newattrs, NULL);
220 mutex_unlock(&dentry->d_inode->i_mutex); 220 mutex_unlock(&dentry->d_inode->i_mutex);
221 221
222 /* mark as kernel-created inode */ 222 /* mark as kernel-created inode */
@@ -322,9 +322,9 @@ static int handle_remove(const char *nodename, struct device *dev)
322 newattrs.ia_valid = 322 newattrs.ia_valid =
323 ATTR_UID|ATTR_GID|ATTR_MODE; 323 ATTR_UID|ATTR_GID|ATTR_MODE;
324 mutex_lock(&dentry->d_inode->i_mutex); 324 mutex_lock(&dentry->d_inode->i_mutex);
325 notify_change(dentry, &newattrs); 325 notify_change(dentry, &newattrs, NULL);
326 mutex_unlock(&dentry->d_inode->i_mutex); 326 mutex_unlock(&dentry->d_inode->i_mutex);
327 err = vfs_unlink(parent.dentry->d_inode, dentry); 327 err = vfs_unlink(parent.dentry->d_inode, dentry, NULL);
328 if (!err || err == -ENOENT) 328 if (!err || err == -ENOENT)
329 deleted = 1; 329 deleted = 1;
330 } 330 }
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 99802d6f3c60..165c2c299e57 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -49,7 +49,7 @@ struct cma *dma_contiguous_default_area;
49 49
50/* 50/*
51 * Default global CMA area size can be defined in kernel's .config. 51 * Default global CMA area size can be defined in kernel's .config.
52 * This is usefull mainly for distro maintainers to create a kernel 52 * This is useful mainly for distro maintainers to create a kernel
53 * that works correctly for most supported systems. 53 * that works correctly for most supported systems.
54 * The size can be set in bytes or as a percentage of the total memory 54 * The size can be set in bytes or as a percentage of the total memory
55 * in the system. 55 * in the system.
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 10a4467c63f1..eb8fb94ae2c5 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -282,31 +282,35 @@ static noinline_for_stack long fw_file_size(struct file *file)
282 return st.size; 282 return st.size;
283} 283}
284 284
285static bool fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf) 285static int fw_read_file_contents(struct file *file, struct firmware_buf *fw_buf)
286{ 286{
287 long size; 287 long size;
288 char *buf; 288 char *buf;
289 int rc;
289 290
290 size = fw_file_size(file); 291 size = fw_file_size(file);
291 if (size <= 0) 292 if (size <= 0)
292 return false; 293 return -EINVAL;
293 buf = vmalloc(size); 294 buf = vmalloc(size);
294 if (!buf) 295 if (!buf)
295 return false; 296 return -ENOMEM;
296 if (kernel_read(file, 0, buf, size) != size) { 297 rc = kernel_read(file, 0, buf, size);
298 if (rc != size) {
299 if (rc > 0)
300 rc = -EIO;
297 vfree(buf); 301 vfree(buf);
298 return false; 302 return rc;
299 } 303 }
300 fw_buf->data = buf; 304 fw_buf->data = buf;
301 fw_buf->size = size; 305 fw_buf->size = size;
302 return true; 306 return 0;
303} 307}
304 308
305static bool fw_get_filesystem_firmware(struct device *device, 309static int fw_get_filesystem_firmware(struct device *device,
306 struct firmware_buf *buf) 310 struct firmware_buf *buf)
307{ 311{
308 int i; 312 int i;
309 bool success = false; 313 int rc = -ENOENT;
310 char *path = __getname(); 314 char *path = __getname();
311 315
312 for (i = 0; i < ARRAY_SIZE(fw_path); i++) { 316 for (i = 0; i < ARRAY_SIZE(fw_path); i++) {
@@ -321,14 +325,17 @@ static bool fw_get_filesystem_firmware(struct device *device,
321 file = filp_open(path, O_RDONLY, 0); 325 file = filp_open(path, O_RDONLY, 0);
322 if (IS_ERR(file)) 326 if (IS_ERR(file))
323 continue; 327 continue;
324 success = fw_read_file_contents(file, buf); 328 rc = fw_read_file_contents(file, buf);
325 fput(file); 329 fput(file);
326 if (success) 330 if (rc)
331 dev_warn(device, "firmware, attempted to load %s, but failed with error %d\n",
332 path, rc);
333 else
327 break; 334 break;
328 } 335 }
329 __putname(path); 336 __putname(path);
330 337
331 if (success) { 338 if (!rc) {
332 dev_dbg(device, "firmware: direct-loading firmware %s\n", 339 dev_dbg(device, "firmware: direct-loading firmware %s\n",
333 buf->fw_id); 340 buf->fw_id);
334 mutex_lock(&fw_lock); 341 mutex_lock(&fw_lock);
@@ -337,7 +344,7 @@ static bool fw_get_filesystem_firmware(struct device *device,
337 mutex_unlock(&fw_lock); 344 mutex_unlock(&fw_lock);
338 } 345 }
339 346
340 return success; 347 return rc;
341} 348}
342 349
343/* firmware holds the ownership of pages */ 350/* firmware holds the ownership of pages */
@@ -1086,9 +1093,14 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
1086 } 1093 }
1087 } 1094 }
1088 1095
1089 if (!fw_get_filesystem_firmware(device, fw->priv)) 1096 ret = fw_get_filesystem_firmware(device, fw->priv);
1097 if (ret) {
1098 dev_warn(device, "Direct firmware load failed with error %d\n",
1099 ret);
1100 dev_warn(device, "Falling back to user helper\n");
1090 ret = fw_load_from_user_helper(fw, name, device, 1101 ret = fw_load_from_user_helper(fw, name, device,
1091 uevent, nowait, timeout); 1102 uevent, nowait, timeout);
1103 }
1092 1104
1093 /* don't cache firmware handled without uevent */ 1105 /* don't cache firmware handled without uevent */
1094 if (!ret) 1106 if (!ret)
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 4f8bef3eb5a8..3a94b799f166 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -432,7 +432,7 @@ struct platform_device *platform_device_register_full(
432 goto err_alloc; 432 goto err_alloc;
433 433
434 pdev->dev.parent = pdevinfo->parent; 434 pdev->dev.parent = pdevinfo->parent;
435 ACPI_HANDLE_SET(&pdev->dev, pdevinfo->acpi_node.handle); 435 ACPI_COMPANION_SET(&pdev->dev, pdevinfo->acpi_node.companion);
436 436
437 if (pdevinfo->dma_mask) { 437 if (pdevinfo->dma_mask) {
438 /* 438 /*
@@ -463,7 +463,7 @@ struct platform_device *platform_device_register_full(
463 ret = platform_device_add(pdev); 463 ret = platform_device_add(pdev);
464 if (ret) { 464 if (ret) {
465err: 465err:
466 ACPI_HANDLE_SET(&pdev->dev, NULL); 466 ACPI_COMPANION_SET(&pdev->dev, NULL);
467 kfree(pdev->dev.dma_mask); 467 kfree(pdev->dev.dma_mask);
468 468
469err_alloc: 469err_alloc:
@@ -488,6 +488,11 @@ static int platform_drv_probe(struct device *_dev)
488 if (ret && ACPI_HANDLE(_dev)) 488 if (ret && ACPI_HANDLE(_dev))
489 acpi_dev_pm_detach(_dev, true); 489 acpi_dev_pm_detach(_dev, true);
490 490
491 if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
492 dev_warn(_dev, "probe deferral not supported\n");
493 ret = -ENXIO;
494 }
495
491 return ret; 496 return ret;
492} 497}
493 498
@@ -553,8 +558,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
553/** 558/**
554 * platform_driver_probe - register driver for non-hotpluggable device 559 * platform_driver_probe - register driver for non-hotpluggable device
555 * @drv: platform driver structure 560 * @drv: platform driver structure
556 * @probe: the driver probe routine, probably from an __init section, 561 * @probe: the driver probe routine, probably from an __init section
557 * must not return -EPROBE_DEFER.
558 * 562 *
559 * Use this instead of platform_driver_register() when you know the device 563 * Use this instead of platform_driver_register() when you know the device
560 * is not hotpluggable and has already been registered, and you want to 564 * is not hotpluggable and has already been registered, and you want to
@@ -565,8 +569,7 @@ EXPORT_SYMBOL_GPL(platform_driver_unregister);
565 * into system-on-chip processors, where the controller devices have been 569 * into system-on-chip processors, where the controller devices have been
566 * configured as part of board setup. 570 * configured as part of board setup.
567 * 571 *
568 * This is incompatible with deferred probing so probe() must not 572 * Note that this is incompatible with deferred probing.
569 * return -EPROBE_DEFER.
570 * 573 *
571 * Returns zero if the driver registered and bound to a device, else returns 574 * Returns zero if the driver registered and bound to a device, else returns
572 * a negative error code and with the driver not registered. 575 * a negative error code and with the driver not registered.
@@ -576,6 +579,12 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv,
576{ 579{
577 int retval, code; 580 int retval, code;
578 581
582 /*
583 * Prevent driver from requesting probe deferral to avoid further
584 * futile probe attempts.
585 */
586 drv->prevent_deferred_probe = true;
587
579 /* make sure driver won't have bind/unbind attributes */ 588 /* make sure driver won't have bind/unbind attributes */
580 drv->driver.suppress_bind_attrs = true; 589 drv->driver.suppress_bind_attrs = true;
581 590
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 9f098a82cf04..1b41fca3d65a 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -30,6 +30,8 @@
30#include <linux/suspend.h> 30#include <linux/suspend.h>
31#include <trace/events/power.h> 31#include <trace/events/power.h>
32#include <linux/cpuidle.h> 32#include <linux/cpuidle.h>
33#include <linux/timer.h>
34
33#include "../base.h" 35#include "../base.h"
34#include "power.h" 36#include "power.h"
35 37
@@ -390,6 +392,71 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
390 return error; 392 return error;
391} 393}
392 394
395#ifdef CONFIG_DPM_WATCHDOG
396struct dpm_watchdog {
397 struct device *dev;
398 struct task_struct *tsk;
399 struct timer_list timer;
400};
401
402#define DECLARE_DPM_WATCHDOG_ON_STACK(wd) \
403 struct dpm_watchdog wd
404
405/**
406 * dpm_watchdog_handler - Driver suspend / resume watchdog handler.
407 * @data: Watchdog object address.
408 *
409 * Called when a driver has timed out suspending or resuming.
410 * There's not much we can do here to recover so panic() to
411 * capture a crash-dump in pstore.
412 */
413static void dpm_watchdog_handler(unsigned long data)
414{
415 struct dpm_watchdog *wd = (void *)data;
416
417 dev_emerg(wd->dev, "**** DPM device timeout ****\n");
418 show_stack(wd->tsk, NULL);
419 panic("%s %s: unrecoverable failure\n",
420 dev_driver_string(wd->dev), dev_name(wd->dev));
421}
422
423/**
424 * dpm_watchdog_set - Enable pm watchdog for given device.
425 * @wd: Watchdog. Must be allocated on the stack.
426 * @dev: Device to handle.
427 */
428static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev)
429{
430 struct timer_list *timer = &wd->timer;
431
432 wd->dev = dev;
433 wd->tsk = current;
434
435 init_timer_on_stack(timer);
436 /* use same timeout value for both suspend and resume */
437 timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT;
438 timer->function = dpm_watchdog_handler;
439 timer->data = (unsigned long)wd;
440 add_timer(timer);
441}
442
443/**
444 * dpm_watchdog_clear - Disable suspend/resume watchdog.
445 * @wd: Watchdog to disable.
446 */
447static void dpm_watchdog_clear(struct dpm_watchdog *wd)
448{
449 struct timer_list *timer = &wd->timer;
450
451 del_timer_sync(timer);
452 destroy_timer_on_stack(timer);
453}
454#else
455#define DECLARE_DPM_WATCHDOG_ON_STACK(wd)
456#define dpm_watchdog_set(x, y)
457#define dpm_watchdog_clear(x)
458#endif
459
393/*------------------------- Resume routines -------------------------*/ 460/*------------------------- Resume routines -------------------------*/
394 461
395/** 462/**
@@ -576,6 +643,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
576 pm_callback_t callback = NULL; 643 pm_callback_t callback = NULL;
577 char *info = NULL; 644 char *info = NULL;
578 int error = 0; 645 int error = 0;
646 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
579 647
580 TRACE_DEVICE(dev); 648 TRACE_DEVICE(dev);
581 TRACE_RESUME(0); 649 TRACE_RESUME(0);
@@ -584,6 +652,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
584 goto Complete; 652 goto Complete;
585 653
586 dpm_wait(dev->parent, async); 654 dpm_wait(dev->parent, async);
655 dpm_watchdog_set(&wd, dev);
587 device_lock(dev); 656 device_lock(dev);
588 657
589 /* 658 /*
@@ -642,6 +711,7 @@ static int device_resume(struct device *dev, pm_message_t state, bool async)
642 711
643 Unlock: 712 Unlock:
644 device_unlock(dev); 713 device_unlock(dev);
714 dpm_watchdog_clear(&wd);
645 715
646 Complete: 716 Complete:
647 complete_all(&dev->power.completion); 717 complete_all(&dev->power.completion);
@@ -687,7 +757,7 @@ void dpm_resume(pm_message_t state)
687 async_error = 0; 757 async_error = 0;
688 758
689 list_for_each_entry(dev, &dpm_suspended_list, power.entry) { 759 list_for_each_entry(dev, &dpm_suspended_list, power.entry) {
690 INIT_COMPLETION(dev->power.completion); 760 reinit_completion(&dev->power.completion);
691 if (is_async(dev)) { 761 if (is_async(dev)) {
692 get_device(dev); 762 get_device(dev);
693 async_schedule(async_resume, dev); 763 async_schedule(async_resume, dev);
@@ -1060,6 +1130,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1060 pm_callback_t callback = NULL; 1130 pm_callback_t callback = NULL;
1061 char *info = NULL; 1131 char *info = NULL;
1062 int error = 0; 1132 int error = 0;
1133 DECLARE_DPM_WATCHDOG_ON_STACK(wd);
1063 1134
1064 dpm_wait_for_children(dev, async); 1135 dpm_wait_for_children(dev, async);
1065 1136
@@ -1083,6 +1154,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1083 if (dev->power.syscore) 1154 if (dev->power.syscore)
1084 goto Complete; 1155 goto Complete;
1085 1156
1157 dpm_watchdog_set(&wd, dev);
1086 device_lock(dev); 1158 device_lock(dev);
1087 1159
1088 if (dev->pm_domain) { 1160 if (dev->pm_domain) {
@@ -1139,6 +1211,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1139 } 1211 }
1140 1212
1141 device_unlock(dev); 1213 device_unlock(dev);
1214 dpm_watchdog_clear(&wd);
1142 1215
1143 Complete: 1216 Complete:
1144 complete_all(&dev->power.completion); 1217 complete_all(&dev->power.completion);
@@ -1164,7 +1237,7 @@ static void async_suspend(void *data, async_cookie_t cookie)
1164 1237
1165static int device_suspend(struct device *dev) 1238static int device_suspend(struct device *dev)
1166{ 1239{
1167 INIT_COMPLETION(dev->power.completion); 1240 reinit_completion(&dev->power.completion);
1168 1241
1169 if (pm_async_enabled && dev->power.async_suspend) { 1242 if (pm_async_enabled && dev->power.async_suspend) {
1170 get_device(dev); 1243 get_device(dev);
@@ -1277,6 +1350,9 @@ static int device_prepare(struct device *dev, pm_message_t state)
1277 1350
1278 device_unlock(dev); 1351 device_unlock(dev);
1279 1352
1353 if (error)
1354 pm_runtime_put(dev);
1355
1280 return error; 1356 return error;
1281} 1357}
1282 1358
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index ef89897c6043..fa4187418440 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -21,7 +21,7 @@
21#include <linux/list.h> 21#include <linux/list.h>
22#include <linux/rculist.h> 22#include <linux/rculist.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/opp.h> 24#include <linux/pm_opp.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/export.h> 26#include <linux/export.h>
27 27
@@ -42,7 +42,7 @@
42 */ 42 */
43 43
44/** 44/**
45 * struct opp - Generic OPP description structure 45 * struct dev_pm_opp - Generic OPP description structure
46 * @node: opp list node. The nodes are maintained throughout the lifetime 46 * @node: opp list node. The nodes are maintained throughout the lifetime
47 * of boot. It is expected only an optimal set of OPPs are 47 * of boot. It is expected only an optimal set of OPPs are
48 * added to the library by the SoC framework. 48 * added to the library by the SoC framework.
@@ -59,7 +59,7 @@
59 * 59 *
60 * This structure stores the OPP information for a given device. 60 * This structure stores the OPP information for a given device.
61 */ 61 */
62struct opp { 62struct dev_pm_opp {
63 struct list_head node; 63 struct list_head node;
64 64
65 bool available; 65 bool available;
@@ -136,7 +136,7 @@ static struct device_opp *find_device_opp(struct device *dev)
136} 136}
137 137
138/** 138/**
139 * opp_get_voltage() - Gets the voltage corresponding to an available opp 139 * dev_pm_opp_get_voltage() - Gets the voltage corresponding to an available opp
140 * @opp: opp for which voltage has to be returned for 140 * @opp: opp for which voltage has to be returned for
141 * 141 *
142 * Return voltage in micro volt corresponding to the opp, else 142 * Return voltage in micro volt corresponding to the opp, else
@@ -150,9 +150,9 @@ static struct device_opp *find_device_opp(struct device *dev)
150 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 150 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
151 * pointer. 151 * pointer.
152 */ 152 */
153unsigned long opp_get_voltage(struct opp *opp) 153unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp)
154{ 154{
155 struct opp *tmp_opp; 155 struct dev_pm_opp *tmp_opp;
156 unsigned long v = 0; 156 unsigned long v = 0;
157 157
158 tmp_opp = rcu_dereference(opp); 158 tmp_opp = rcu_dereference(opp);
@@ -163,10 +163,10 @@ unsigned long opp_get_voltage(struct opp *opp)
163 163
164 return v; 164 return v;
165} 165}
166EXPORT_SYMBOL_GPL(opp_get_voltage); 166EXPORT_SYMBOL_GPL(dev_pm_opp_get_voltage);
167 167
168/** 168/**
169 * opp_get_freq() - Gets the frequency corresponding to an available opp 169 * dev_pm_opp_get_freq() - Gets the frequency corresponding to an available opp
170 * @opp: opp for which frequency has to be returned for 170 * @opp: opp for which frequency has to be returned for
171 * 171 *
172 * Return frequency in hertz corresponding to the opp, else 172 * Return frequency in hertz corresponding to the opp, else
@@ -180,9 +180,9 @@ EXPORT_SYMBOL_GPL(opp_get_voltage);
180 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the 180 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
181 * pointer. 181 * pointer.
182 */ 182 */
183unsigned long opp_get_freq(struct opp *opp) 183unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp)
184{ 184{
185 struct opp *tmp_opp; 185 struct dev_pm_opp *tmp_opp;
186 unsigned long f = 0; 186 unsigned long f = 0;
187 187
188 tmp_opp = rcu_dereference(opp); 188 tmp_opp = rcu_dereference(opp);
@@ -193,10 +193,10 @@ unsigned long opp_get_freq(struct opp *opp)
193 193
194 return f; 194 return f;
195} 195}
196EXPORT_SYMBOL_GPL(opp_get_freq); 196EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
197 197
198/** 198/**
199 * opp_get_opp_count() - Get number of opps available in the opp list 199 * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list
200 * @dev: device for which we do this operation 200 * @dev: device for which we do this operation
201 * 201 *
202 * This function returns the number of available opps if there are any, 202 * This function returns the number of available opps if there are any,
@@ -206,10 +206,10 @@ EXPORT_SYMBOL_GPL(opp_get_freq);
206 * internally references two RCU protected structures: device_opp and opp which 206 * internally references two RCU protected structures: device_opp and opp which
207 * are safe as long as we are under a common RCU locked section. 207 * are safe as long as we are under a common RCU locked section.
208 */ 208 */
209int opp_get_opp_count(struct device *dev) 209int dev_pm_opp_get_opp_count(struct device *dev)
210{ 210{
211 struct device_opp *dev_opp; 211 struct device_opp *dev_opp;
212 struct opp *temp_opp; 212 struct dev_pm_opp *temp_opp;
213 int count = 0; 213 int count = 0;
214 214
215 dev_opp = find_device_opp(dev); 215 dev_opp = find_device_opp(dev);
@@ -226,10 +226,10 @@ int opp_get_opp_count(struct device *dev)
226 226
227 return count; 227 return count;
228} 228}
229EXPORT_SYMBOL_GPL(opp_get_opp_count); 229EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
230 230
231/** 231/**
232 * opp_find_freq_exact() - search for an exact frequency 232 * dev_pm_opp_find_freq_exact() - search for an exact frequency
233 * @dev: device for which we do this operation 233 * @dev: device for which we do this operation
234 * @freq: frequency to search for 234 * @freq: frequency to search for
235 * @available: true/false - match for available opp 235 * @available: true/false - match for available opp
@@ -254,11 +254,12 @@ EXPORT_SYMBOL_GPL(opp_get_opp_count);
254 * under the locked area. The pointer returned must be used prior to unlocking 254 * under the locked area. The pointer returned must be used prior to unlocking
255 * with rcu_read_unlock() to maintain the integrity of the pointer. 255 * with rcu_read_unlock() to maintain the integrity of the pointer.
256 */ 256 */
257struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, 257struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
258 bool available) 258 unsigned long freq,
259 bool available)
259{ 260{
260 struct device_opp *dev_opp; 261 struct device_opp *dev_opp;
261 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 262 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
262 263
263 dev_opp = find_device_opp(dev); 264 dev_opp = find_device_opp(dev);
264 if (IS_ERR(dev_opp)) { 265 if (IS_ERR(dev_opp)) {
@@ -277,10 +278,10 @@ struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
277 278
278 return opp; 279 return opp;
279} 280}
280EXPORT_SYMBOL_GPL(opp_find_freq_exact); 281EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_exact);
281 282
282/** 283/**
283 * opp_find_freq_ceil() - Search for an rounded ceil freq 284 * dev_pm_opp_find_freq_ceil() - Search for an rounded ceil freq
284 * @dev: device for which we do this operation 285 * @dev: device for which we do this operation
285 * @freq: Start frequency 286 * @freq: Start frequency
286 * 287 *
@@ -300,10 +301,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_exact);
300 * under the locked area. The pointer returned must be used prior to unlocking 301 * under the locked area. The pointer returned must be used prior to unlocking
301 * with rcu_read_unlock() to maintain the integrity of the pointer. 302 * with rcu_read_unlock() to maintain the integrity of the pointer.
302 */ 303 */
303struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq) 304struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
305 unsigned long *freq)
304{ 306{
305 struct device_opp *dev_opp; 307 struct device_opp *dev_opp;
306 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 308 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
307 309
308 if (!dev || !freq) { 310 if (!dev || !freq) {
309 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 311 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -324,10 +326,10 @@ struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
324 326
325 return opp; 327 return opp;
326} 328}
327EXPORT_SYMBOL_GPL(opp_find_freq_ceil); 329EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_ceil);
328 330
329/** 331/**
330 * opp_find_freq_floor() - Search for a rounded floor freq 332 * dev_pm_opp_find_freq_floor() - Search for a rounded floor freq
331 * @dev: device for which we do this operation 333 * @dev: device for which we do this operation
332 * @freq: Start frequency 334 * @freq: Start frequency
333 * 335 *
@@ -347,10 +349,11 @@ EXPORT_SYMBOL_GPL(opp_find_freq_ceil);
347 * under the locked area. The pointer returned must be used prior to unlocking 349 * under the locked area. The pointer returned must be used prior to unlocking
348 * with rcu_read_unlock() to maintain the integrity of the pointer. 350 * with rcu_read_unlock() to maintain the integrity of the pointer.
349 */ 351 */
350struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq) 352struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
353 unsigned long *freq)
351{ 354{
352 struct device_opp *dev_opp; 355 struct device_opp *dev_opp;
353 struct opp *temp_opp, *opp = ERR_PTR(-ERANGE); 356 struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
354 357
355 if (!dev || !freq) { 358 if (!dev || !freq) {
356 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq); 359 dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
@@ -375,17 +378,17 @@ struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
375 378
376 return opp; 379 return opp;
377} 380}
378EXPORT_SYMBOL_GPL(opp_find_freq_floor); 381EXPORT_SYMBOL_GPL(dev_pm_opp_find_freq_floor);
379 382
380/** 383/**
381 * opp_add() - Add an OPP table from a table definitions 384 * dev_pm_opp_add() - Add an OPP table from a table definitions
382 * @dev: device for which we do this operation 385 * @dev: device for which we do this operation
383 * @freq: Frequency in Hz for this OPP 386 * @freq: Frequency in Hz for this OPP
384 * @u_volt: Voltage in uVolts for this OPP 387 * @u_volt: Voltage in uVolts for this OPP
385 * 388 *
386 * This function adds an opp definition to the opp list and returns status. 389 * This function adds an opp definition to the opp list and returns status.
387 * The opp is made available by default and it can be controlled using 390 * The opp is made available by default and it can be controlled using
388 * opp_enable/disable functions. 391 * dev_pm_opp_enable/disable functions.
389 * 392 *
390 * Locking: The internal device_opp and opp structures are RCU protected. 393 * Locking: The internal device_opp and opp structures are RCU protected.
391 * Hence this function internally uses RCU updater strategy with mutex locks 394 * Hence this function internally uses RCU updater strategy with mutex locks
@@ -393,14 +396,14 @@ EXPORT_SYMBOL_GPL(opp_find_freq_floor);
393 * that this function is *NOT* called under RCU protection or in contexts where 396 * that this function is *NOT* called under RCU protection or in contexts where
394 * mutex cannot be locked. 397 * mutex cannot be locked.
395 */ 398 */
396int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt) 399int dev_pm_opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
397{ 400{
398 struct device_opp *dev_opp = NULL; 401 struct device_opp *dev_opp = NULL;
399 struct opp *opp, *new_opp; 402 struct dev_pm_opp *opp, *new_opp;
400 struct list_head *head; 403 struct list_head *head;
401 404
402 /* allocate new OPP node */ 405 /* allocate new OPP node */
403 new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL); 406 new_opp = kzalloc(sizeof(*new_opp), GFP_KERNEL);
404 if (!new_opp) { 407 if (!new_opp) {
405 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__); 408 dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
406 return -ENOMEM; 409 return -ENOMEM;
@@ -460,7 +463,7 @@ int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
460 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp); 463 srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
461 return 0; 464 return 0;
462} 465}
463EXPORT_SYMBOL_GPL(opp_add); 466EXPORT_SYMBOL_GPL(dev_pm_opp_add);
464 467
465/** 468/**
466 * opp_set_availability() - helper to set the availability of an opp 469 * opp_set_availability() - helper to set the availability of an opp
@@ -485,11 +488,11 @@ static int opp_set_availability(struct device *dev, unsigned long freq,
485 bool availability_req) 488 bool availability_req)
486{ 489{
487 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV); 490 struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
488 struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV); 491 struct dev_pm_opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
489 int r = 0; 492 int r = 0;
490 493
491 /* keep the node allocated */ 494 /* keep the node allocated */
492 new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL); 495 new_opp = kmalloc(sizeof(*new_opp), GFP_KERNEL);
493 if (!new_opp) { 496 if (!new_opp) {
494 dev_warn(dev, "%s: Unable to create OPP\n", __func__); 497 dev_warn(dev, "%s: Unable to create OPP\n", __func__);
495 return -ENOMEM; 498 return -ENOMEM;
@@ -552,13 +555,13 @@ unlock:
552} 555}
553 556
554/** 557/**
555 * opp_enable() - Enable a specific OPP 558 * dev_pm_opp_enable() - Enable a specific OPP
556 * @dev: device for which we do this operation 559 * @dev: device for which we do this operation
557 * @freq: OPP frequency to enable 560 * @freq: OPP frequency to enable
558 * 561 *
559 * Enables a provided opp. If the operation is valid, this returns 0, else the 562 * Enables a provided opp. If the operation is valid, this returns 0, else the
560 * corresponding error value. It is meant to be used for users an OPP available 563 * corresponding error value. It is meant to be used for users an OPP available
561 * after being temporarily made unavailable with opp_disable. 564 * after being temporarily made unavailable with dev_pm_opp_disable.
562 * 565 *
563 * Locking: The internal device_opp and opp structures are RCU protected. 566 * Locking: The internal device_opp and opp structures are RCU protected.
564 * Hence this function indirectly uses RCU and mutex locks to keep the 567 * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -566,21 +569,21 @@ unlock:
566 * this function is *NOT* called under RCU protection or in contexts where 569 * this function is *NOT* called under RCU protection or in contexts where
567 * mutex locking or synchronize_rcu() blocking calls cannot be used. 570 * mutex locking or synchronize_rcu() blocking calls cannot be used.
568 */ 571 */
569int opp_enable(struct device *dev, unsigned long freq) 572int dev_pm_opp_enable(struct device *dev, unsigned long freq)
570{ 573{
571 return opp_set_availability(dev, freq, true); 574 return opp_set_availability(dev, freq, true);
572} 575}
573EXPORT_SYMBOL_GPL(opp_enable); 576EXPORT_SYMBOL_GPL(dev_pm_opp_enable);
574 577
575/** 578/**
576 * opp_disable() - Disable a specific OPP 579 * dev_pm_opp_disable() - Disable a specific OPP
577 * @dev: device for which we do this operation 580 * @dev: device for which we do this operation
578 * @freq: OPP frequency to disable 581 * @freq: OPP frequency to disable
579 * 582 *
580 * Disables a provided opp. If the operation is valid, this returns 583 * Disables a provided opp. If the operation is valid, this returns
581 * 0, else the corresponding error value. It is meant to be a temporary 584 * 0, else the corresponding error value. It is meant to be a temporary
582 * control by users to make this OPP not available until the circumstances are 585 * control by users to make this OPP not available until the circumstances are
583 * right to make it available again (with a call to opp_enable). 586 * right to make it available again (with a call to dev_pm_opp_enable).
584 * 587 *
585 * Locking: The internal device_opp and opp structures are RCU protected. 588 * Locking: The internal device_opp and opp structures are RCU protected.
586 * Hence this function indirectly uses RCU and mutex locks to keep the 589 * Hence this function indirectly uses RCU and mutex locks to keep the
@@ -588,15 +591,15 @@ EXPORT_SYMBOL_GPL(opp_enable);
588 * this function is *NOT* called under RCU protection or in contexts where 591 * this function is *NOT* called under RCU protection or in contexts where
589 * mutex locking or synchronize_rcu() blocking calls cannot be used. 592 * mutex locking or synchronize_rcu() blocking calls cannot be used.
590 */ 593 */
591int opp_disable(struct device *dev, unsigned long freq) 594int dev_pm_opp_disable(struct device *dev, unsigned long freq)
592{ 595{
593 return opp_set_availability(dev, freq, false); 596 return opp_set_availability(dev, freq, false);
594} 597}
595EXPORT_SYMBOL_GPL(opp_disable); 598EXPORT_SYMBOL_GPL(dev_pm_opp_disable);
596 599
597#ifdef CONFIG_CPU_FREQ 600#ifdef CONFIG_CPU_FREQ
598/** 601/**
599 * opp_init_cpufreq_table() - create a cpufreq table for a device 602 * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
600 * @dev: device for which we do this operation 603 * @dev: device for which we do this operation
601 * @table: Cpufreq table returned back to caller 604 * @table: Cpufreq table returned back to caller
602 * 605 *
@@ -619,11 +622,11 @@ EXPORT_SYMBOL_GPL(opp_disable);
619 * Callers should ensure that this function is *NOT* called under RCU protection 622 * Callers should ensure that this function is *NOT* called under RCU protection
620 * or in contexts where mutex locking cannot be used. 623 * or in contexts where mutex locking cannot be used.
621 */ 624 */
622int opp_init_cpufreq_table(struct device *dev, 625int dev_pm_opp_init_cpufreq_table(struct device *dev,
623 struct cpufreq_frequency_table **table) 626 struct cpufreq_frequency_table **table)
624{ 627{
625 struct device_opp *dev_opp; 628 struct device_opp *dev_opp;
626 struct opp *opp; 629 struct dev_pm_opp *opp;
627 struct cpufreq_frequency_table *freq_table; 630 struct cpufreq_frequency_table *freq_table;
628 int i = 0; 631 int i = 0;
629 632
@@ -639,7 +642,7 @@ int opp_init_cpufreq_table(struct device *dev,
639 } 642 }
640 643
641 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) * 644 freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
642 (opp_get_opp_count(dev) + 1), GFP_KERNEL); 645 (dev_pm_opp_get_opp_count(dev) + 1), GFP_KERNEL);
643 if (!freq_table) { 646 if (!freq_table) {
644 mutex_unlock(&dev_opp_list_lock); 647 mutex_unlock(&dev_opp_list_lock);
645 dev_warn(dev, "%s: Unable to allocate frequency table\n", 648 dev_warn(dev, "%s: Unable to allocate frequency table\n",
@@ -663,16 +666,16 @@ int opp_init_cpufreq_table(struct device *dev,
663 666
664 return 0; 667 return 0;
665} 668}
666EXPORT_SYMBOL_GPL(opp_init_cpufreq_table); 669EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
667 670
668/** 671/**
669 * opp_free_cpufreq_table() - free the cpufreq table 672 * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
670 * @dev: device for which we do this operation 673 * @dev: device for which we do this operation
671 * @table: table to free 674 * @table: table to free
672 * 675 *
673 * Free up the table allocated by opp_init_cpufreq_table 676 * Free up the table allocated by dev_pm_opp_init_cpufreq_table
674 */ 677 */
675void opp_free_cpufreq_table(struct device *dev, 678void dev_pm_opp_free_cpufreq_table(struct device *dev,
676 struct cpufreq_frequency_table **table) 679 struct cpufreq_frequency_table **table)
677{ 680{
678 if (!table) 681 if (!table)
@@ -681,14 +684,14 @@ void opp_free_cpufreq_table(struct device *dev,
681 kfree(*table); 684 kfree(*table);
682 *table = NULL; 685 *table = NULL;
683} 686}
684EXPORT_SYMBOL_GPL(opp_free_cpufreq_table); 687EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
685#endif /* CONFIG_CPU_FREQ */ 688#endif /* CONFIG_CPU_FREQ */
686 689
687/** 690/**
688 * opp_get_notifier() - find notifier_head of the device with opp 691 * dev_pm_opp_get_notifier() - find notifier_head of the device with opp
689 * @dev: device pointer used to lookup device OPPs. 692 * @dev: device pointer used to lookup device OPPs.
690 */ 693 */
691struct srcu_notifier_head *opp_get_notifier(struct device *dev) 694struct srcu_notifier_head *dev_pm_opp_get_notifier(struct device *dev)
692{ 695{
693 struct device_opp *dev_opp = find_device_opp(dev); 696 struct device_opp *dev_opp = find_device_opp(dev);
694 697
@@ -732,7 +735,7 @@ int of_init_opp_table(struct device *dev)
732 unsigned long freq = be32_to_cpup(val++) * 1000; 735 unsigned long freq = be32_to_cpup(val++) * 1000;
733 unsigned long volt = be32_to_cpup(val++); 736 unsigned long volt = be32_to_cpup(val++);
734 737
735 if (opp_add(dev, freq, volt)) { 738 if (dev_pm_opp_add(dev, freq, volt)) {
736 dev_warn(dev, "%s: Failed to add OPP %ld\n", 739 dev_warn(dev, "%s: Failed to add OPP %ld\n",
737 __func__, freq); 740 __func__, freq);
738 continue; 741 continue;
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 268a35097578..72e00e66ecc5 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -258,7 +258,8 @@ static int __rpm_callback(int (*cb)(struct device *), struct device *dev)
258 * Check if the device's runtime PM status allows it to be suspended. If 258 * Check if the device's runtime PM status allows it to be suspended. If
259 * another idle notification has been started earlier, return immediately. If 259 * another idle notification has been started earlier, return immediately. If
260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise 260 * the RPM_ASYNC flag is set then queue an idle-notification request; otherwise
261 * run the ->runtime_idle() callback directly. 261 * run the ->runtime_idle() callback directly. If the ->runtime_idle callback
262 * doesn't exist or if it returns 0, call rpm_suspend with the RPM_AUTO flag.
262 * 263 *
263 * This function must be called under dev->power.lock with interrupts disabled. 264 * This function must be called under dev->power.lock with interrupts disabled.
264 */ 265 */
@@ -331,7 +332,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
331 332
332 out: 333 out:
333 trace_rpm_return_int(dev, _THIS_IP_, retval); 334 trace_rpm_return_int(dev, _THIS_IP_, retval);
334 return retval ? retval : rpm_suspend(dev, rpmflags); 335 return retval ? retval : rpm_suspend(dev, rpmflags | RPM_AUTO);
335} 336}
336 337
337/** 338/**