diff options
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/Kconfig | 51 | ||||
| -rw-r--r-- | drivers/base/bus.c | 26 | ||||
| -rw-r--r-- | drivers/base/class.c | 16 | ||||
| -rw-r--r-- | drivers/base/core.c | 46 | ||||
| -rw-r--r-- | drivers/base/cpu.c | 105 | ||||
| -rw-r--r-- | drivers/base/dd.c | 38 | ||||
| -rw-r--r-- | drivers/base/devtmpfs.c | 13 | ||||
| -rw-r--r-- | drivers/base/firmware_class.c | 11 | ||||
| -rw-r--r-- | drivers/base/memory.c | 35 | ||||
| -rw-r--r-- | drivers/base/node.c | 81 | ||||
| -rw-r--r-- | drivers/base/platform.c | 76 | ||||
| -rw-r--r-- | drivers/base/power/Makefile | 1 | ||||
| -rw-r--r-- | drivers/base/power/generic_ops.c | 233 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 163 | ||||
| -rw-r--r-- | drivers/base/power/power.h | 6 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 45 | ||||
| -rw-r--r-- | drivers/base/power/sysfs.c | 100 | ||||
| -rw-r--r-- | drivers/base/sys.c | 17 |
18 files changed, 823 insertions, 240 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index ee377270beb9..fd52c48ee762 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
| @@ -3,35 +3,50 @@ menu "Generic Driver Options" | |||
| 3 | config UEVENT_HELPER_PATH | 3 | config UEVENT_HELPER_PATH |
| 4 | string "path to uevent helper" | 4 | string "path to uevent helper" |
| 5 | depends on HOTPLUG | 5 | depends on HOTPLUG |
| 6 | default "/sbin/hotplug" | 6 | default "" |
| 7 | help | 7 | help |
| 8 | Path to uevent helper program forked by the kernel for | 8 | Path to uevent helper program forked by the kernel for |
| 9 | every uevent. | 9 | every uevent. |
| 10 | Before the switch to the netlink-based uevent source, this was | ||
| 11 | used to hook hotplug scripts into kernel device events. It | ||
| 12 | usually pointed to a shell script at /sbin/hotplug. | ||
| 13 | This should not be used today, because usual systems create | ||
| 14 | many events at bootup or device discovery in a very short time | ||
| 15 | frame. One forked process per event can create so many processes | ||
| 16 | that it creates a high system load, or on smaller systems | ||
| 17 | it is known to create out-of-memory situations during bootup. | ||
| 10 | 18 | ||
| 11 | config DEVTMPFS | 19 | config DEVTMPFS |
| 12 | bool "Create a kernel maintained /dev tmpfs (EXPERIMENTAL)" | 20 | bool "Maintain a devtmpfs filesystem to mount at /dev" |
| 13 | depends on HOTPLUG && SHMEM && TMPFS | 21 | depends on HOTPLUG && SHMEM && TMPFS |
| 14 | help | 22 | help |
| 15 | This creates a tmpfs filesystem, and mounts it at bootup | 23 | This creates a tmpfs filesystem instance early at bootup. |
| 16 | and mounts it at /dev. The kernel driver core creates device | 24 | In this filesystem, the kernel driver core maintains device |
| 17 | nodes for all registered devices in that filesystem. All device | 25 | nodes with their default names and permissions for all |
| 18 | nodes are owned by root and have the default mode of 0600. | 26 | registered devices with an assigned major/minor number. |
| 19 | Userspace can add and delete the nodes as needed. This is | 27 | Userspace can modify the filesystem content as needed, add |
| 20 | intended to simplify bootup, and make it possible to delay | 28 | symlinks, and apply needed permissions. |
| 21 | the initial coldplug at bootup done by udev in userspace. | 29 | It provides a fully functional /dev directory, where usually |
| 22 | It should also provide a simpler way for rescue systems | 30 | udev runs on top, managing permissions and adding meaningful |
| 23 | to bring up a kernel with dynamic major/minor numbers. | 31 | symlinks. |
| 24 | Meaningful symlinks, permissions and device ownership must | 32 | In very limited environments, it may provide a sufficient |
| 25 | still be handled by userspace. | 33 | functional /dev without any further help. It also allows simple |
| 26 | If unsure, say N here. | 34 | rescue systems, and reliably handles dynamic major/minor numbers. |
| 27 | 35 | ||
| 28 | config DEVTMPFS_MOUNT | 36 | config DEVTMPFS_MOUNT |
| 29 | bool "Automount devtmpfs at /dev" | 37 | bool "Automount devtmpfs at /dev, after the kernel mounted the rootfs" |
| 30 | depends on DEVTMPFS | 38 | depends on DEVTMPFS |
| 31 | help | 39 | help |
| 32 | This will mount devtmpfs at /dev if the kernel mounts the root | 40 | This will instruct the kernel to automatically mount the |
| 33 | filesystem. It will not affect initramfs based mounting. | 41 | devtmpfs filesystem at /dev, directly after the kernel has |
| 34 | If unsure, say N here. | 42 | mounted the root filesystem. The behavior can be overridden |
| 43 | with the commandline parameter: devtmpfs.mount=0|1. | ||
| 44 | This option does not affect initramfs based booting, here | ||
| 45 | the devtmpfs filesystem always needs to be mounted manually | ||
| 46 | after the roots is mounted. | ||
| 47 | With this option enabled, it allows to bring up a system in | ||
| 48 | rescue mode with init=/bin/sh, even when the /dev directory | ||
| 49 | on the rootfs is completely empty. | ||
| 35 | 50 | ||
| 36 | config STANDALONE | 51 | config STANDALONE |
| 37 | bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL | 52 | bool "Select only drivers that don't need compile-time external firmware" if EXPERIMENTAL |
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index c0c5a43d9fb3..71f6af5c8b0b 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c | |||
| @@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 70 | return ret; | 70 | return ret; |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static struct sysfs_ops driver_sysfs_ops = { | 73 | static const struct sysfs_ops driver_sysfs_ops = { |
| 74 | .show = drv_attr_show, | 74 | .show = drv_attr_show, |
| 75 | .store = drv_attr_store, | 75 | .store = drv_attr_store, |
| 76 | }; | 76 | }; |
| @@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 115 | return ret; | 115 | return ret; |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static struct sysfs_ops bus_sysfs_ops = { | 118 | static const struct sysfs_ops bus_sysfs_ops = { |
| 119 | .show = bus_attr_show, | 119 | .show = bus_attr_show, |
| 120 | .store = bus_attr_store, | 120 | .store = bus_attr_store, |
| 121 | }; | 121 | }; |
| @@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj) | |||
| 154 | return 0; | 154 | return 0; |
| 155 | } | 155 | } |
| 156 | 156 | ||
| 157 | static struct kset_uevent_ops bus_uevent_ops = { | 157 | static const struct kset_uevent_ops bus_uevent_ops = { |
| 158 | .filter = bus_uevent_filter, | 158 | .filter = bus_uevent_filter, |
| 159 | }; | 159 | }; |
| 160 | 160 | ||
| @@ -173,10 +173,10 @@ static ssize_t driver_unbind(struct device_driver *drv, | |||
| 173 | dev = bus_find_device_by_name(bus, NULL, buf); | 173 | dev = bus_find_device_by_name(bus, NULL, buf); |
| 174 | if (dev && dev->driver == drv) { | 174 | if (dev && dev->driver == drv) { |
| 175 | if (dev->parent) /* Needed for USB */ | 175 | if (dev->parent) /* Needed for USB */ |
| 176 | down(&dev->parent->sem); | 176 | device_lock(dev->parent); |
| 177 | device_release_driver(dev); | 177 | device_release_driver(dev); |
| 178 | if (dev->parent) | 178 | if (dev->parent) |
| 179 | up(&dev->parent->sem); | 179 | device_unlock(dev->parent); |
| 180 | err = count; | 180 | err = count; |
| 181 | } | 181 | } |
| 182 | put_device(dev); | 182 | put_device(dev); |
| @@ -200,12 +200,12 @@ static ssize_t driver_bind(struct device_driver *drv, | |||
| 200 | dev = bus_find_device_by_name(bus, NULL, buf); | 200 | dev = bus_find_device_by_name(bus, NULL, buf); |
| 201 | if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { | 201 | if (dev && dev->driver == NULL && driver_match_device(drv, dev)) { |
| 202 | if (dev->parent) /* Needed for USB */ | 202 | if (dev->parent) /* Needed for USB */ |
| 203 | down(&dev->parent->sem); | 203 | device_lock(dev->parent); |
| 204 | down(&dev->sem); | 204 | device_lock(dev); |
| 205 | err = driver_probe_device(drv, dev); | 205 | err = driver_probe_device(drv, dev); |
| 206 | up(&dev->sem); | 206 | device_unlock(dev); |
| 207 | if (dev->parent) | 207 | if (dev->parent) |
| 208 | up(&dev->parent->sem); | 208 | device_unlock(dev->parent); |
| 209 | 209 | ||
| 210 | if (err > 0) { | 210 | if (err > 0) { |
| 211 | /* success */ | 211 | /* success */ |
| @@ -744,10 +744,10 @@ static int __must_check bus_rescan_devices_helper(struct device *dev, | |||
| 744 | 744 | ||
| 745 | if (!dev->driver) { | 745 | if (!dev->driver) { |
| 746 | if (dev->parent) /* Needed for USB */ | 746 | if (dev->parent) /* Needed for USB */ |
| 747 | down(&dev->parent->sem); | 747 | device_lock(dev->parent); |
| 748 | ret = device_attach(dev); | 748 | ret = device_attach(dev); |
| 749 | if (dev->parent) | 749 | if (dev->parent) |
| 750 | up(&dev->parent->sem); | 750 | device_unlock(dev->parent); |
| 751 | } | 751 | } |
| 752 | return ret < 0 ? ret : 0; | 752 | return ret < 0 ? ret : 0; |
| 753 | } | 753 | } |
| @@ -779,10 +779,10 @@ int device_reprobe(struct device *dev) | |||
| 779 | { | 779 | { |
| 780 | if (dev->driver) { | 780 | if (dev->driver) { |
| 781 | if (dev->parent) /* Needed for USB */ | 781 | if (dev->parent) /* Needed for USB */ |
| 782 | down(&dev->parent->sem); | 782 | device_lock(dev->parent); |
| 783 | device_release_driver(dev); | 783 | device_release_driver(dev); |
| 784 | if (dev->parent) | 784 | if (dev->parent) |
| 785 | up(&dev->parent->sem); | 785 | device_unlock(dev->parent); |
| 786 | } | 786 | } |
| 787 | return bus_rescan_devices_helper(dev, NULL); | 787 | return bus_rescan_devices_helper(dev, NULL); |
| 788 | } | 788 | } |
diff --git a/drivers/base/class.c b/drivers/base/class.c index 6e2c3b064f53..0147f476b8a9 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c | |||
| @@ -31,7 +31,7 @@ static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr, | |||
| 31 | ssize_t ret = -EIO; | 31 | ssize_t ret = -EIO; |
| 32 | 32 | ||
| 33 | if (class_attr->show) | 33 | if (class_attr->show) |
| 34 | ret = class_attr->show(cp->class, buf); | 34 | ret = class_attr->show(cp->class, class_attr, buf); |
| 35 | return ret; | 35 | return ret; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| @@ -43,7 +43,7 @@ static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 43 | ssize_t ret = -EIO; | 43 | ssize_t ret = -EIO; |
| 44 | 44 | ||
| 45 | if (class_attr->store) | 45 | if (class_attr->store) |
| 46 | ret = class_attr->store(cp->class, buf, count); | 46 | ret = class_attr->store(cp->class, class_attr, buf, count); |
| 47 | return ret; | 47 | return ret; |
| 48 | } | 48 | } |
| 49 | 49 | ||
| @@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj) | |||
| 63 | kfree(cp); | 63 | kfree(cp); |
| 64 | } | 64 | } |
| 65 | 65 | ||
| 66 | static struct sysfs_ops class_sysfs_ops = { | 66 | static const struct sysfs_ops class_sysfs_ops = { |
| 67 | .show = class_attr_show, | 67 | .show = class_attr_show, |
| 68 | .store = class_attr_store, | 68 | .store = class_attr_store, |
| 69 | }; | 69 | }; |
| @@ -490,6 +490,16 @@ void class_interface_unregister(struct class_interface *class_intf) | |||
| 490 | class_put(parent); | 490 | class_put(parent); |
| 491 | } | 491 | } |
| 492 | 492 | ||
| 493 | ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr, | ||
| 494 | char *buf) | ||
| 495 | { | ||
| 496 | struct class_attribute_string *cs; | ||
| 497 | cs = container_of(attr, struct class_attribute_string, attr); | ||
| 498 | return snprintf(buf, PAGE_SIZE, "%s\n", cs->str); | ||
| 499 | } | ||
| 500 | |||
| 501 | EXPORT_SYMBOL_GPL(show_class_attr_string); | ||
| 502 | |||
| 493 | struct class_compat { | 503 | struct class_compat { |
| 494 | struct kobject *kobj; | 504 | struct kobject *kobj; |
| 495 | }; | 505 | }; |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 282025770429..ef55df34ddd0 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 100 | return ret; | 100 | return ret; |
| 101 | } | 101 | } |
| 102 | 102 | ||
| 103 | static struct sysfs_ops dev_sysfs_ops = { | 103 | static const struct sysfs_ops dev_sysfs_ops = { |
| 104 | .show = dev_attr_show, | 104 | .show = dev_attr_show, |
| 105 | .store = dev_attr_store, | 105 | .store = dev_attr_store, |
| 106 | }; | 106 | }; |
| @@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj, | |||
| 252 | return retval; | 252 | return retval; |
| 253 | } | 253 | } |
| 254 | 254 | ||
| 255 | static struct kset_uevent_ops device_uevent_ops = { | 255 | static const struct kset_uevent_ops device_uevent_ops = { |
| 256 | .filter = dev_uevent_filter, | 256 | .filter = dev_uevent_filter, |
| 257 | .name = dev_uevent_name, | 257 | .name = dev_uevent_name, |
| 258 | .uevent = dev_uevent, | 258 | .uevent = dev_uevent, |
| @@ -306,15 +306,10 @@ static ssize_t store_uevent(struct device *dev, struct device_attribute *attr, | |||
| 306 | { | 306 | { |
| 307 | enum kobject_action action; | 307 | enum kobject_action action; |
| 308 | 308 | ||
| 309 | if (kobject_action_type(buf, count, &action) == 0) { | 309 | if (kobject_action_type(buf, count, &action) == 0) |
| 310 | kobject_uevent(&dev->kobj, action); | 310 | kobject_uevent(&dev->kobj, action); |
| 311 | goto out; | 311 | else |
| 312 | } | 312 | dev_err(dev, "uevent: unknown action-string\n"); |
| 313 | |||
| 314 | dev_err(dev, "uevent: unsupported action-string; this will " | ||
| 315 | "be ignored in a future kernel version\n"); | ||
| 316 | kobject_uevent(&dev->kobj, KOBJ_ADD); | ||
| 317 | out: | ||
| 318 | return count; | 313 | return count; |
| 319 | } | 314 | } |
| 320 | 315 | ||
| @@ -607,6 +602,7 @@ static struct kobject *get_device_parent(struct device *dev, | |||
| 607 | int retval; | 602 | int retval; |
| 608 | 603 | ||
| 609 | if (dev->class) { | 604 | if (dev->class) { |
| 605 | static DEFINE_MUTEX(gdp_mutex); | ||
| 610 | struct kobject *kobj = NULL; | 606 | struct kobject *kobj = NULL; |
| 611 | struct kobject *parent_kobj; | 607 | struct kobject *parent_kobj; |
| 612 | struct kobject *k; | 608 | struct kobject *k; |
| @@ -623,6 +619,8 @@ static struct kobject *get_device_parent(struct device *dev, | |||
| 623 | else | 619 | else |
| 624 | parent_kobj = &parent->kobj; | 620 | parent_kobj = &parent->kobj; |
| 625 | 621 | ||
| 622 | mutex_lock(&gdp_mutex); | ||
| 623 | |||
| 626 | /* find our class-directory at the parent and reference it */ | 624 | /* find our class-directory at the parent and reference it */ |
| 627 | spin_lock(&dev->class->p->class_dirs.list_lock); | 625 | spin_lock(&dev->class->p->class_dirs.list_lock); |
| 628 | list_for_each_entry(k, &dev->class->p->class_dirs.list, entry) | 626 | list_for_each_entry(k, &dev->class->p->class_dirs.list, entry) |
| @@ -631,20 +629,26 @@ static struct kobject *get_device_parent(struct device *dev, | |||
| 631 | break; | 629 | break; |
| 632 | } | 630 | } |
| 633 | spin_unlock(&dev->class->p->class_dirs.list_lock); | 631 | spin_unlock(&dev->class->p->class_dirs.list_lock); |
| 634 | if (kobj) | 632 | if (kobj) { |
| 633 | mutex_unlock(&gdp_mutex); | ||
| 635 | return kobj; | 634 | return kobj; |
| 635 | } | ||
| 636 | 636 | ||
| 637 | /* or create a new class-directory at the parent device */ | 637 | /* or create a new class-directory at the parent device */ |
| 638 | k = kobject_create(); | 638 | k = kobject_create(); |
| 639 | if (!k) | 639 | if (!k) { |
| 640 | mutex_unlock(&gdp_mutex); | ||
| 640 | return NULL; | 641 | return NULL; |
| 642 | } | ||
| 641 | k->kset = &dev->class->p->class_dirs; | 643 | k->kset = &dev->class->p->class_dirs; |
| 642 | retval = kobject_add(k, parent_kobj, "%s", dev->class->name); | 644 | retval = kobject_add(k, parent_kobj, "%s", dev->class->name); |
| 643 | if (retval < 0) { | 645 | if (retval < 0) { |
| 646 | mutex_unlock(&gdp_mutex); | ||
| 644 | kobject_put(k); | 647 | kobject_put(k); |
| 645 | return NULL; | 648 | return NULL; |
| 646 | } | 649 | } |
| 647 | /* do not emit an uevent for this simple "glue" directory */ | 650 | /* do not emit an uevent for this simple "glue" directory */ |
| 651 | mutex_unlock(&gdp_mutex); | ||
| 648 | return k; | 652 | return k; |
| 649 | } | 653 | } |
| 650 | 654 | ||
| @@ -1574,22 +1578,16 @@ int device_rename(struct device *dev, char *new_name) | |||
| 1574 | if (old_class_name) { | 1578 | if (old_class_name) { |
| 1575 | new_class_name = make_class_name(dev->class->name, &dev->kobj); | 1579 | new_class_name = make_class_name(dev->class->name, &dev->kobj); |
| 1576 | if (new_class_name) { | 1580 | if (new_class_name) { |
| 1577 | error = sysfs_create_link_nowarn(&dev->parent->kobj, | 1581 | error = sysfs_rename_link(&dev->parent->kobj, |
| 1578 | &dev->kobj, | 1582 | &dev->kobj, |
| 1579 | new_class_name); | 1583 | old_class_name, |
| 1580 | if (error) | 1584 | new_class_name); |
| 1581 | goto out; | ||
| 1582 | sysfs_remove_link(&dev->parent->kobj, old_class_name); | ||
| 1583 | } | 1585 | } |
| 1584 | } | 1586 | } |
| 1585 | #else | 1587 | #else |
| 1586 | if (dev->class) { | 1588 | if (dev->class) { |
| 1587 | error = sysfs_create_link_nowarn(&dev->class->p->class_subsys.kobj, | 1589 | error = sysfs_rename_link(&dev->class->p->class_subsys.kobj, |
| 1588 | &dev->kobj, dev_name(dev)); | 1590 | &dev->kobj, old_device_name, new_name); |
| 1589 | if (error) | ||
| 1590 | goto out; | ||
| 1591 | sysfs_remove_link(&dev->class->p->class_subsys.kobj, | ||
| 1592 | old_device_name); | ||
| 1593 | } | 1591 | } |
| 1594 | #endif | 1592 | #endif |
| 1595 | 1593 | ||
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 958bd1540c30..7036e8e96ab8 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c | |||
| @@ -13,8 +13,11 @@ | |||
| 13 | 13 | ||
| 14 | #include "base.h" | 14 | #include "base.h" |
| 15 | 15 | ||
| 16 | static struct sysdev_class_attribute *cpu_sysdev_class_attrs[]; | ||
| 17 | |||
| 16 | struct sysdev_class cpu_sysdev_class = { | 18 | struct sysdev_class cpu_sysdev_class = { |
| 17 | .name = "cpu", | 19 | .name = "cpu", |
| 20 | .attrs = cpu_sysdev_class_attrs, | ||
| 18 | }; | 21 | }; |
| 19 | EXPORT_SYMBOL(cpu_sysdev_class); | 22 | EXPORT_SYMBOL(cpu_sysdev_class); |
| 20 | 23 | ||
| @@ -76,34 +79,24 @@ void unregister_cpu(struct cpu *cpu) | |||
| 76 | } | 79 | } |
| 77 | 80 | ||
| 78 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | 81 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE |
| 79 | static ssize_t cpu_probe_store(struct class *class, const char *buf, | 82 | static ssize_t cpu_probe_store(struct sys_device *dev, |
| 83 | struct sysdev_attribute *attr, | ||
| 84 | const char *buf, | ||
| 80 | size_t count) | 85 | size_t count) |
| 81 | { | 86 | { |
| 82 | return arch_cpu_probe(buf, count); | 87 | return arch_cpu_probe(buf, count); |
| 83 | } | 88 | } |
| 84 | 89 | ||
| 85 | static ssize_t cpu_release_store(struct class *class, const char *buf, | 90 | static ssize_t cpu_release_store(struct sys_device *dev, |
| 91 | struct sysdev_attribute *attr, | ||
| 92 | const char *buf, | ||
| 86 | size_t count) | 93 | size_t count) |
| 87 | { | 94 | { |
| 88 | return arch_cpu_release(buf, count); | 95 | return arch_cpu_release(buf, count); |
| 89 | } | 96 | } |
| 90 | 97 | ||
| 91 | static CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); | 98 | static SYSDEV_ATTR(probe, S_IWUSR, NULL, cpu_probe_store); |
| 92 | static CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store); | 99 | static SYSDEV_ATTR(release, S_IWUSR, NULL, cpu_release_store); |
| 93 | |||
| 94 | int __init cpu_probe_release_init(void) | ||
| 95 | { | ||
| 96 | int rc; | ||
| 97 | |||
| 98 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
| 99 | &class_attr_probe.attr); | ||
| 100 | if (!rc) | ||
| 101 | rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
| 102 | &class_attr_release.attr); | ||
| 103 | |||
| 104 | return rc; | ||
| 105 | } | ||
| 106 | device_initcall(cpu_probe_release_init); | ||
| 107 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ | 100 | #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */ |
| 108 | 101 | ||
| 109 | #else /* ... !CONFIG_HOTPLUG_CPU */ | 102 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
| @@ -141,31 +134,39 @@ static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL); | |||
| 141 | /* | 134 | /* |
| 142 | * Print cpu online, possible, present, and system maps | 135 | * Print cpu online, possible, present, and system maps |
| 143 | */ | 136 | */ |
| 144 | static ssize_t print_cpus_map(char *buf, const struct cpumask *map) | 137 | |
| 138 | struct cpu_attr { | ||
| 139 | struct sysdev_class_attribute attr; | ||
| 140 | const struct cpumask *const * const map; | ||
| 141 | }; | ||
| 142 | |||
| 143 | static ssize_t show_cpus_attr(struct sysdev_class *class, | ||
| 144 | struct sysdev_class_attribute *attr, | ||
| 145 | char *buf) | ||
| 145 | { | 146 | { |
| 146 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, map); | 147 | struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr); |
| 148 | int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map)); | ||
| 147 | 149 | ||
| 148 | buf[n++] = '\n'; | 150 | buf[n++] = '\n'; |
| 149 | buf[n] = '\0'; | 151 | buf[n] = '\0'; |
| 150 | return n; | 152 | return n; |
| 151 | } | 153 | } |
| 152 | 154 | ||
| 153 | #define print_cpus_func(type) \ | 155 | #define _CPU_ATTR(name, map) \ |
| 154 | static ssize_t print_cpus_##type(struct sysdev_class *class, char *buf) \ | 156 | { _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map } |
| 155 | { \ | ||
| 156 | return print_cpus_map(buf, cpu_##type##_mask); \ | ||
| 157 | } \ | ||
| 158 | static struct sysdev_class_attribute attr_##type##_map = \ | ||
| 159 | _SYSDEV_CLASS_ATTR(type, 0444, print_cpus_##type, NULL) | ||
| 160 | 157 | ||
| 161 | print_cpus_func(online); | 158 | /* Keep in sync with cpu_sysdev_class_attrs */ |
| 162 | print_cpus_func(possible); | 159 | static struct cpu_attr cpu_attrs[] = { |
| 163 | print_cpus_func(present); | 160 | _CPU_ATTR(online, &cpu_online_mask), |
| 161 | _CPU_ATTR(possible, &cpu_possible_mask), | ||
| 162 | _CPU_ATTR(present, &cpu_present_mask), | ||
| 163 | }; | ||
| 164 | 164 | ||
| 165 | /* | 165 | /* |
| 166 | * Print values for NR_CPUS and offlined cpus | 166 | * Print values for NR_CPUS and offlined cpus |
| 167 | */ | 167 | */ |
| 168 | static ssize_t print_cpus_kernel_max(struct sysdev_class *class, char *buf) | 168 | static ssize_t print_cpus_kernel_max(struct sysdev_class *class, |
| 169 | struct sysdev_class_attribute *attr, char *buf) | ||
| 169 | { | 170 | { |
| 170 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); | 171 | int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1); |
| 171 | return n; | 172 | return n; |
| @@ -175,7 +176,8 @@ static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL); | |||
| 175 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ | 176 | /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */ |
| 176 | unsigned int total_cpus; | 177 | unsigned int total_cpus; |
| 177 | 178 | ||
| 178 | static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) | 179 | static ssize_t print_cpus_offline(struct sysdev_class *class, |
| 180 | struct sysdev_class_attribute *attr, char *buf) | ||
| 179 | { | 181 | { |
| 180 | int n = 0, len = PAGE_SIZE-2; | 182 | int n = 0, len = PAGE_SIZE-2; |
| 181 | cpumask_var_t offline; | 183 | cpumask_var_t offline; |
| @@ -204,29 +206,6 @@ static ssize_t print_cpus_offline(struct sysdev_class *class, char *buf) | |||
| 204 | } | 206 | } |
| 205 | static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); | 207 | static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL); |
| 206 | 208 | ||
| 207 | static struct sysdev_class_attribute *cpu_state_attr[] = { | ||
| 208 | &attr_online_map, | ||
| 209 | &attr_possible_map, | ||
| 210 | &attr_present_map, | ||
| 211 | &attr_kernel_max, | ||
| 212 | &attr_offline, | ||
| 213 | }; | ||
| 214 | |||
| 215 | static int cpu_states_init(void) | ||
| 216 | { | ||
| 217 | int i; | ||
| 218 | int err = 0; | ||
| 219 | |||
| 220 | for (i = 0; i < ARRAY_SIZE(cpu_state_attr); i++) { | ||
| 221 | int ret; | ||
| 222 | ret = sysdev_class_create_file(&cpu_sysdev_class, | ||
| 223 | cpu_state_attr[i]); | ||
| 224 | if (!err) | ||
| 225 | err = ret; | ||
| 226 | } | ||
| 227 | return err; | ||
| 228 | } | ||
| 229 | |||
| 230 | /* | 209 | /* |
| 231 | * register_cpu - Setup a sysfs device for a CPU. | 210 | * register_cpu - Setup a sysfs device for a CPU. |
| 232 | * @cpu - cpu->hotpluggable field set to 1 will generate a control file in | 211 | * @cpu - cpu->hotpluggable field set to 1 will generate a control file in |
| @@ -272,9 +251,6 @@ int __init cpu_dev_init(void) | |||
| 272 | int err; | 251 | int err; |
| 273 | 252 | ||
| 274 | err = sysdev_class_register(&cpu_sysdev_class); | 253 | err = sysdev_class_register(&cpu_sysdev_class); |
| 275 | if (!err) | ||
| 276 | err = cpu_states_init(); | ||
| 277 | |||
| 278 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 254 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
| 279 | if (!err) | 255 | if (!err) |
| 280 | err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); | 256 | err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class); |
| @@ -282,3 +258,16 @@ int __init cpu_dev_init(void) | |||
| 282 | 258 | ||
| 283 | return err; | 259 | return err; |
| 284 | } | 260 | } |
| 261 | |||
| 262 | static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = { | ||
| 263 | #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE | ||
| 264 | &attr_probe, | ||
| 265 | &attr_release, | ||
| 266 | #endif | ||
| 267 | &cpu_attrs[0].attr, | ||
| 268 | &cpu_attrs[1].attr, | ||
| 269 | &cpu_attrs[2].attr, | ||
| 270 | &attr_kernel_max, | ||
| 271 | &attr_offline, | ||
| 272 | NULL | ||
| 273 | }; | ||
diff --git a/drivers/base/dd.c b/drivers/base/dd.c index ee95c76bfd3d..c89291f8a16b 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c | |||
| @@ -85,7 +85,7 @@ static void driver_sysfs_remove(struct device *dev) | |||
| 85 | * for before calling this. (It is ok to call with no other effort | 85 | * for before calling this. (It is ok to call with no other effort |
| 86 | * from a driver's probe() method.) | 86 | * from a driver's probe() method.) |
| 87 | * | 87 | * |
| 88 | * This function must be called with @dev->sem held. | 88 | * This function must be called with the device lock held. |
| 89 | */ | 89 | */ |
| 90 | int device_bind_driver(struct device *dev) | 90 | int device_bind_driver(struct device *dev) |
| 91 | { | 91 | { |
| @@ -190,8 +190,8 @@ EXPORT_SYMBOL_GPL(wait_for_device_probe); | |||
| 190 | * This function returns -ENODEV if the device is not registered, | 190 | * This function returns -ENODEV if the device is not registered, |
| 191 | * 1 if the device is bound successfully and 0 otherwise. | 191 | * 1 if the device is bound successfully and 0 otherwise. |
| 192 | * | 192 | * |
| 193 | * This function must be called with @dev->sem held. When called for a | 193 | * This function must be called with @dev lock held. When called for a |
| 194 | * USB interface, @dev->parent->sem must be held as well. | 194 | * USB interface, @dev->parent lock must be held as well. |
| 195 | */ | 195 | */ |
| 196 | int driver_probe_device(struct device_driver *drv, struct device *dev) | 196 | int driver_probe_device(struct device_driver *drv, struct device *dev) |
| 197 | { | 197 | { |
| @@ -233,13 +233,13 @@ static int __device_attach(struct device_driver *drv, void *data) | |||
| 233 | * 0 if no matching driver was found; | 233 | * 0 if no matching driver was found; |
| 234 | * -ENODEV if the device is not registered. | 234 | * -ENODEV if the device is not registered. |
| 235 | * | 235 | * |
| 236 | * When called for a USB interface, @dev->parent->sem must be held. | 236 | * When called for a USB interface, @dev->parent lock must be held. |
| 237 | */ | 237 | */ |
| 238 | int device_attach(struct device *dev) | 238 | int device_attach(struct device *dev) |
| 239 | { | 239 | { |
| 240 | int ret = 0; | 240 | int ret = 0; |
| 241 | 241 | ||
| 242 | down(&dev->sem); | 242 | device_lock(dev); |
| 243 | if (dev->driver) { | 243 | if (dev->driver) { |
| 244 | ret = device_bind_driver(dev); | 244 | ret = device_bind_driver(dev); |
| 245 | if (ret == 0) | 245 | if (ret == 0) |
| @@ -253,7 +253,7 @@ int device_attach(struct device *dev) | |||
| 253 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); | 253 | ret = bus_for_each_drv(dev->bus, NULL, dev, __device_attach); |
| 254 | pm_runtime_put_sync(dev); | 254 | pm_runtime_put_sync(dev); |
| 255 | } | 255 | } |
| 256 | up(&dev->sem); | 256 | device_unlock(dev); |
| 257 | return ret; | 257 | return ret; |
| 258 | } | 258 | } |
| 259 | EXPORT_SYMBOL_GPL(device_attach); | 259 | EXPORT_SYMBOL_GPL(device_attach); |
| @@ -276,13 +276,13 @@ static int __driver_attach(struct device *dev, void *data) | |||
| 276 | return 0; | 276 | return 0; |
| 277 | 277 | ||
| 278 | if (dev->parent) /* Needed for USB */ | 278 | if (dev->parent) /* Needed for USB */ |
| 279 | down(&dev->parent->sem); | 279 | device_lock(dev->parent); |
| 280 | down(&dev->sem); | 280 | device_lock(dev); |
| 281 | if (!dev->driver) | 281 | if (!dev->driver) |
| 282 | driver_probe_device(drv, dev); | 282 | driver_probe_device(drv, dev); |
| 283 | up(&dev->sem); | 283 | device_unlock(dev); |
| 284 | if (dev->parent) | 284 | if (dev->parent) |
| 285 | up(&dev->parent->sem); | 285 | device_unlock(dev->parent); |
| 286 | 286 | ||
| 287 | return 0; | 287 | return 0; |
| 288 | } | 288 | } |
| @@ -303,8 +303,8 @@ int driver_attach(struct device_driver *drv) | |||
| 303 | EXPORT_SYMBOL_GPL(driver_attach); | 303 | EXPORT_SYMBOL_GPL(driver_attach); |
| 304 | 304 | ||
| 305 | /* | 305 | /* |
| 306 | * __device_release_driver() must be called with @dev->sem held. | 306 | * __device_release_driver() must be called with @dev lock held. |
| 307 | * When called for a USB interface, @dev->parent->sem must be held as well. | 307 | * When called for a USB interface, @dev->parent lock must be held as well. |
| 308 | */ | 308 | */ |
| 309 | static void __device_release_driver(struct device *dev) | 309 | static void __device_release_driver(struct device *dev) |
| 310 | { | 310 | { |
| @@ -343,7 +343,7 @@ static void __device_release_driver(struct device *dev) | |||
| 343 | * @dev: device. | 343 | * @dev: device. |
| 344 | * | 344 | * |
| 345 | * Manually detach device from driver. | 345 | * Manually detach device from driver. |
| 346 | * When called for a USB interface, @dev->parent->sem must be held. | 346 | * When called for a USB interface, @dev->parent lock must be held. |
| 347 | */ | 347 | */ |
| 348 | void device_release_driver(struct device *dev) | 348 | void device_release_driver(struct device *dev) |
| 349 | { | 349 | { |
| @@ -352,9 +352,9 @@ void device_release_driver(struct device *dev) | |||
| 352 | * within their ->remove callback for the same device, they | 352 | * within their ->remove callback for the same device, they |
| 353 | * will deadlock right here. | 353 | * will deadlock right here. |
| 354 | */ | 354 | */ |
| 355 | down(&dev->sem); | 355 | device_lock(dev); |
| 356 | __device_release_driver(dev); | 356 | __device_release_driver(dev); |
| 357 | up(&dev->sem); | 357 | device_unlock(dev); |
| 358 | } | 358 | } |
| 359 | EXPORT_SYMBOL_GPL(device_release_driver); | 359 | EXPORT_SYMBOL_GPL(device_release_driver); |
| 360 | 360 | ||
| @@ -381,13 +381,13 @@ void driver_detach(struct device_driver *drv) | |||
| 381 | spin_unlock(&drv->p->klist_devices.k_lock); | 381 | spin_unlock(&drv->p->klist_devices.k_lock); |
| 382 | 382 | ||
| 383 | if (dev->parent) /* Needed for USB */ | 383 | if (dev->parent) /* Needed for USB */ |
| 384 | down(&dev->parent->sem); | 384 | device_lock(dev->parent); |
| 385 | down(&dev->sem); | 385 | device_lock(dev); |
| 386 | if (dev->driver == drv) | 386 | if (dev->driver == drv) |
| 387 | __device_release_driver(dev); | 387 | __device_release_driver(dev); |
| 388 | up(&dev->sem); | 388 | device_unlock(dev); |
| 389 | if (dev->parent) | 389 | if (dev->parent) |
| 390 | up(&dev->parent->sem); | 390 | device_unlock(dev->parent); |
| 391 | put_device(dev); | 391 | put_device(dev); |
| 392 | } | 392 | } |
| 393 | } | 393 | } |
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 42ae452b36b0..dac478c6e460 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c | |||
| @@ -301,6 +301,19 @@ int devtmpfs_delete_node(struct device *dev) | |||
| 301 | if (dentry->d_inode) { | 301 | if (dentry->d_inode) { |
| 302 | err = vfs_getattr(nd.path.mnt, dentry, &stat); | 302 | err = vfs_getattr(nd.path.mnt, dentry, &stat); |
| 303 | if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { | 303 | if (!err && dev_mynode(dev, dentry->d_inode, &stat)) { |
| 304 | struct iattr newattrs; | ||
| 305 | /* | ||
| 306 | * before unlinking this node, reset permissions | ||
| 307 | * of possible references like hardlinks | ||
| 308 | */ | ||
| 309 | newattrs.ia_uid = 0; | ||
| 310 | newattrs.ia_gid = 0; | ||
| 311 | newattrs.ia_mode = stat.mode & ~0777; | ||
| 312 | newattrs.ia_valid = | ||
| 313 | ATTR_UID|ATTR_GID|ATTR_MODE; | ||
| 314 | mutex_lock(&dentry->d_inode->i_mutex); | ||
| 315 | notify_change(dentry, &newattrs); | ||
| 316 | mutex_unlock(&dentry->d_inode->i_mutex); | ||
| 304 | err = vfs_unlink(nd.path.dentry->d_inode, | 317 | err = vfs_unlink(nd.path.dentry->d_inode, |
| 305 | dentry); | 318 | dentry); |
| 306 | if (!err || err == -ENOENT) | 319 | if (!err || err == -ENOENT) |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index a95024166b66..d0dc26ad5387 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -19,7 +19,6 @@ | |||
| 19 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
| 20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
| 21 | #include <linux/firmware.h> | 21 | #include <linux/firmware.h> |
| 22 | #include "base.h" | ||
| 23 | 22 | ||
| 24 | #define to_dev(obj) container_of(obj, struct device, kobj) | 23 | #define to_dev(obj) container_of(obj, struct device, kobj) |
| 25 | 24 | ||
| @@ -69,7 +68,9 @@ fw_load_abort(struct firmware_priv *fw_priv) | |||
| 69 | } | 68 | } |
| 70 | 69 | ||
| 71 | static ssize_t | 70 | static ssize_t |
| 72 | firmware_timeout_show(struct class *class, char *buf) | 71 | firmware_timeout_show(struct class *class, |
| 72 | struct class_attribute *attr, | ||
| 73 | char *buf) | ||
| 73 | { | 74 | { |
| 74 | return sprintf(buf, "%d\n", loading_timeout); | 75 | return sprintf(buf, "%d\n", loading_timeout); |
| 75 | } | 76 | } |
| @@ -87,7 +88,9 @@ firmware_timeout_show(struct class *class, char *buf) | |||
| 87 | * Note: zero means 'wait forever'. | 88 | * Note: zero means 'wait forever'. |
| 88 | **/ | 89 | **/ |
| 89 | static ssize_t | 90 | static ssize_t |
| 90 | firmware_timeout_store(struct class *class, const char *buf, size_t count) | 91 | firmware_timeout_store(struct class *class, |
| 92 | struct class_attribute *attr, | ||
| 93 | const char *buf, size_t count) | ||
| 91 | { | 94 | { |
| 92 | loading_timeout = simple_strtol(buf, NULL, 10); | 95 | loading_timeout = simple_strtol(buf, NULL, 10); |
| 93 | if (loading_timeout < 0) | 96 | if (loading_timeout < 0) |
| @@ -610,7 +613,7 @@ request_firmware_work_func(void *arg) | |||
| 610 | } | 613 | } |
| 611 | 614 | ||
| 612 | /** | 615 | /** |
| 613 | * request_firmware_nowait: asynchronous version of request_firmware | 616 | * request_firmware_nowait - asynchronous version of request_firmware |
| 614 | * @module: module requesting the firmware | 617 | * @module: module requesting the firmware |
| 615 | * @uevent: sends uevent to copy the firmware image if this flag | 618 | * @uevent: sends uevent to copy the firmware image if this flag |
| 616 | * is non-zero else the firmware copy must be done manually. | 619 | * is non-zero else the firmware copy must be done manually. |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index bd025059711f..db0848e54cc6 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev | |||
| 44 | return retval; | 44 | return retval; |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static struct kset_uevent_ops memory_uevent_ops = { | 47 | static const struct kset_uevent_ops memory_uevent_ops = { |
| 48 | .name = memory_uevent_name, | 48 | .name = memory_uevent_name, |
| 49 | .uevent = memory_uevent, | 49 | .uevent = memory_uevent, |
| 50 | }; | 50 | }; |
| @@ -309,17 +309,18 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); | |||
| 309 | * Block size attribute stuff | 309 | * Block size attribute stuff |
| 310 | */ | 310 | */ |
| 311 | static ssize_t | 311 | static ssize_t |
| 312 | print_block_size(struct class *class, char *buf) | 312 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, |
| 313 | char *buf) | ||
| 313 | { | 314 | { |
| 314 | return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); | 315 | return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); |
| 315 | } | 316 | } |
| 316 | 317 | ||
| 317 | static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); | 318 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
| 318 | 319 | ||
| 319 | static int block_size_init(void) | 320 | static int block_size_init(void) |
| 320 | { | 321 | { |
| 321 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, | 322 | return sysfs_create_file(&memory_sysdev_class.kset.kobj, |
| 322 | &class_attr_block_size_bytes.attr); | 323 | &attr_block_size_bytes.attr); |
| 323 | } | 324 | } |
| 324 | 325 | ||
| 325 | /* | 326 | /* |
| @@ -330,7 +331,8 @@ static int block_size_init(void) | |||
| 330 | */ | 331 | */ |
| 331 | #ifdef CONFIG_ARCH_MEMORY_PROBE | 332 | #ifdef CONFIG_ARCH_MEMORY_PROBE |
| 332 | static ssize_t | 333 | static ssize_t |
| 333 | memory_probe_store(struct class *class, const char *buf, size_t count) | 334 | memory_probe_store(struct class *class, struct class_attribute *attr, |
| 335 | const char *buf, size_t count) | ||
| 334 | { | 336 | { |
| 335 | u64 phys_addr; | 337 | u64 phys_addr; |
| 336 | int nid; | 338 | int nid; |
| @@ -367,7 +369,9 @@ static inline int memory_probe_init(void) | |||
| 367 | 369 | ||
| 368 | /* Soft offline a page */ | 370 | /* Soft offline a page */ |
| 369 | static ssize_t | 371 | static ssize_t |
| 370 | store_soft_offline_page(struct class *class, const char *buf, size_t count) | 372 | store_soft_offline_page(struct class *class, |
| 373 | struct class_attribute *attr, | ||
| 374 | const char *buf, size_t count) | ||
| 371 | { | 375 | { |
| 372 | int ret; | 376 | int ret; |
| 373 | u64 pfn; | 377 | u64 pfn; |
| @@ -384,7 +388,9 @@ store_soft_offline_page(struct class *class, const char *buf, size_t count) | |||
| 384 | 388 | ||
| 385 | /* Forcibly offline a page, including killing processes. */ | 389 | /* Forcibly offline a page, including killing processes. */ |
| 386 | static ssize_t | 390 | static ssize_t |
| 387 | store_hard_offline_page(struct class *class, const char *buf, size_t count) | 391 | store_hard_offline_page(struct class *class, |
| 392 | struct class_attribute *attr, | ||
| 393 | const char *buf, size_t count) | ||
| 388 | { | 394 | { |
| 389 | int ret; | 395 | int ret; |
| 390 | u64 pfn; | 396 | u64 pfn; |
| @@ -423,12 +429,16 @@ static inline int memory_fail_init(void) | |||
| 423 | * differentiation between which *physical* devices each | 429 | * differentiation between which *physical* devices each |
| 424 | * section belongs to... | 430 | * section belongs to... |
| 425 | */ | 431 | */ |
| 432 | int __weak arch_get_memory_phys_device(unsigned long start_pfn) | ||
| 433 | { | ||
| 434 | return 0; | ||
| 435 | } | ||
| 426 | 436 | ||
| 427 | static int add_memory_block(int nid, struct mem_section *section, | 437 | static int add_memory_block(int nid, struct mem_section *section, |
| 428 | unsigned long state, int phys_device, | 438 | unsigned long state, enum mem_add_context context) |
| 429 | enum mem_add_context context) | ||
| 430 | { | 439 | { |
| 431 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); | 440 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); |
| 441 | unsigned long start_pfn; | ||
| 432 | int ret = 0; | 442 | int ret = 0; |
| 433 | 443 | ||
| 434 | if (!mem) | 444 | if (!mem) |
| @@ -437,7 +447,8 @@ static int add_memory_block(int nid, struct mem_section *section, | |||
| 437 | mem->phys_index = __section_nr(section); | 447 | mem->phys_index = __section_nr(section); |
| 438 | mem->state = state; | 448 | mem->state = state; |
| 439 | mutex_init(&mem->state_mutex); | 449 | mutex_init(&mem->state_mutex); |
| 440 | mem->phys_device = phys_device; | 450 | start_pfn = section_nr_to_pfn(mem->phys_index); |
| 451 | mem->phys_device = arch_get_memory_phys_device(start_pfn); | ||
| 441 | 452 | ||
| 442 | ret = register_memory(mem, section); | 453 | ret = register_memory(mem, section); |
| 443 | if (!ret) | 454 | if (!ret) |
| @@ -509,7 +520,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
| 509 | */ | 520 | */ |
| 510 | int register_new_memory(int nid, struct mem_section *section) | 521 | int register_new_memory(int nid, struct mem_section *section) |
| 511 | { | 522 | { |
| 512 | return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG); | 523 | return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); |
| 513 | } | 524 | } |
| 514 | 525 | ||
| 515 | int unregister_memory_section(struct mem_section *section) | 526 | int unregister_memory_section(struct mem_section *section) |
| @@ -542,7 +553,7 @@ int __init memory_dev_init(void) | |||
| 542 | if (!present_section_nr(i)) | 553 | if (!present_section_nr(i)) |
| 543 | continue; | 554 | continue; |
| 544 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, | 555 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, |
| 545 | 0, BOOT); | 556 | BOOT); |
| 546 | if (!ret) | 557 | if (!ret) |
| 547 | ret = err; | 558 | ret = err; |
| 548 | } | 559 | } |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 70122791683d..ad43185ec15a 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -16,8 +16,11 @@ | |||
| 16 | #include <linux/device.h> | 16 | #include <linux/device.h> |
| 17 | #include <linux/swap.h> | 17 | #include <linux/swap.h> |
| 18 | 18 | ||
| 19 | static struct sysdev_class_attribute *node_state_attrs[]; | ||
| 20 | |||
| 19 | static struct sysdev_class node_class = { | 21 | static struct sysdev_class node_class = { |
| 20 | .name = "node", | 22 | .name = "node", |
| 23 | .attrs = node_state_attrs, | ||
| 21 | }; | 24 | }; |
| 22 | 25 | ||
| 23 | 26 | ||
| @@ -544,76 +547,52 @@ static ssize_t print_nodes_state(enum node_states state, char *buf) | |||
| 544 | return n; | 547 | return n; |
| 545 | } | 548 | } |
| 546 | 549 | ||
| 547 | static ssize_t print_nodes_possible(struct sysdev_class *class, char *buf) | 550 | struct node_attr { |
| 548 | { | 551 | struct sysdev_class_attribute attr; |
| 549 | return print_nodes_state(N_POSSIBLE, buf); | 552 | enum node_states state; |
| 550 | } | 553 | }; |
| 551 | |||
| 552 | static ssize_t print_nodes_online(struct sysdev_class *class, char *buf) | ||
| 553 | { | ||
| 554 | return print_nodes_state(N_ONLINE, buf); | ||
| 555 | } | ||
| 556 | |||
| 557 | static ssize_t print_nodes_has_normal_memory(struct sysdev_class *class, | ||
| 558 | char *buf) | ||
| 559 | { | ||
| 560 | return print_nodes_state(N_NORMAL_MEMORY, buf); | ||
| 561 | } | ||
| 562 | 554 | ||
| 563 | static ssize_t print_nodes_has_cpu(struct sysdev_class *class, char *buf) | 555 | static ssize_t show_node_state(struct sysdev_class *class, |
| 556 | struct sysdev_class_attribute *attr, char *buf) | ||
| 564 | { | 557 | { |
| 565 | return print_nodes_state(N_CPU, buf); | 558 | struct node_attr *na = container_of(attr, struct node_attr, attr); |
| 559 | return print_nodes_state(na->state, buf); | ||
| 566 | } | 560 | } |
| 567 | 561 | ||
| 568 | static SYSDEV_CLASS_ATTR(possible, 0444, print_nodes_possible, NULL); | 562 | #define _NODE_ATTR(name, state) \ |
| 569 | static SYSDEV_CLASS_ATTR(online, 0444, print_nodes_online, NULL); | 563 | { _SYSDEV_CLASS_ATTR(name, 0444, show_node_state, NULL), state } |
| 570 | static SYSDEV_CLASS_ATTR(has_normal_memory, 0444, print_nodes_has_normal_memory, | ||
| 571 | NULL); | ||
| 572 | static SYSDEV_CLASS_ATTR(has_cpu, 0444, print_nodes_has_cpu, NULL); | ||
| 573 | 564 | ||
| 565 | static struct node_attr node_state_attr[] = { | ||
| 566 | _NODE_ATTR(possible, N_POSSIBLE), | ||
| 567 | _NODE_ATTR(online, N_ONLINE), | ||
| 568 | _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY), | ||
| 569 | _NODE_ATTR(has_cpu, N_CPU), | ||
| 574 | #ifdef CONFIG_HIGHMEM | 570 | #ifdef CONFIG_HIGHMEM |
| 575 | static ssize_t print_nodes_has_high_memory(struct sysdev_class *class, | 571 | _NODE_ATTR(has_high_memory, N_HIGH_MEMORY), |
| 576 | char *buf) | ||
| 577 | { | ||
| 578 | return print_nodes_state(N_HIGH_MEMORY, buf); | ||
| 579 | } | ||
| 580 | |||
| 581 | static SYSDEV_CLASS_ATTR(has_high_memory, 0444, print_nodes_has_high_memory, | ||
| 582 | NULL); | ||
| 583 | #endif | 572 | #endif |
| 573 | }; | ||
| 584 | 574 | ||
| 585 | struct sysdev_class_attribute *node_state_attr[] = { | 575 | static struct sysdev_class_attribute *node_state_attrs[] = { |
| 586 | &attr_possible, | 576 | &node_state_attr[0].attr, |
| 587 | &attr_online, | 577 | &node_state_attr[1].attr, |
| 588 | &attr_has_normal_memory, | 578 | &node_state_attr[2].attr, |
| 579 | &node_state_attr[3].attr, | ||
| 589 | #ifdef CONFIG_HIGHMEM | 580 | #ifdef CONFIG_HIGHMEM |
| 590 | &attr_has_high_memory, | 581 | &node_state_attr[4].attr, |
| 591 | #endif | 582 | #endif |
| 592 | &attr_has_cpu, | 583 | NULL |
| 593 | }; | 584 | }; |
| 594 | 585 | ||
| 595 | static int node_states_init(void) | ||
| 596 | { | ||
| 597 | int i; | ||
| 598 | int err = 0; | ||
| 599 | |||
| 600 | for (i = 0; i < NR_NODE_STATES; i++) { | ||
| 601 | int ret; | ||
| 602 | ret = sysdev_class_create_file(&node_class, node_state_attr[i]); | ||
| 603 | if (!err) | ||
| 604 | err = ret; | ||
| 605 | } | ||
| 606 | return err; | ||
| 607 | } | ||
| 608 | |||
| 609 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ | 586 | #define NODE_CALLBACK_PRI 2 /* lower than SLAB */ |
| 610 | static int __init register_node_type(void) | 587 | static int __init register_node_type(void) |
| 611 | { | 588 | { |
| 612 | int ret; | 589 | int ret; |
| 613 | 590 | ||
| 591 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES); | ||
| 592 | BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES); | ||
| 593 | |||
| 614 | ret = sysdev_class_register(&node_class); | 594 | ret = sysdev_class_register(&node_class); |
| 615 | if (!ret) { | 595 | if (!ret) { |
| 616 | ret = node_states_init(); | ||
| 617 | hotplug_memory_notifier(node_memory_callback, | 596 | hotplug_memory_notifier(node_memory_callback, |
| 618 | NODE_CALLBACK_PRI); | 597 | NODE_CALLBACK_PRI); |
| 619 | } | 598 | } |
diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 58efaf2f1259..1ba9d617d241 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c | |||
| @@ -128,7 +128,7 @@ struct platform_object { | |||
| 128 | }; | 128 | }; |
| 129 | 129 | ||
| 130 | /** | 130 | /** |
| 131 | * platform_device_put | 131 | * platform_device_put - destroy a platform device |
| 132 | * @pdev: platform device to free | 132 | * @pdev: platform device to free |
| 133 | * | 133 | * |
| 134 | * Free all memory associated with a platform device. This function must | 134 | * Free all memory associated with a platform device. This function must |
| @@ -152,7 +152,7 @@ static void platform_device_release(struct device *dev) | |||
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /** | 154 | /** |
| 155 | * platform_device_alloc | 155 | * platform_device_alloc - create a platform device |
| 156 | * @name: base name of the device we're adding | 156 | * @name: base name of the device we're adding |
| 157 | * @id: instance id | 157 | * @id: instance id |
| 158 | * | 158 | * |
| @@ -177,7 +177,7 @@ struct platform_device *platform_device_alloc(const char *name, int id) | |||
| 177 | EXPORT_SYMBOL_GPL(platform_device_alloc); | 177 | EXPORT_SYMBOL_GPL(platform_device_alloc); |
| 178 | 178 | ||
| 179 | /** | 179 | /** |
| 180 | * platform_device_add_resources | 180 | * platform_device_add_resources - add resources to a platform device |
| 181 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 181 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
| 182 | * @res: set of resources that needs to be allocated for the device | 182 | * @res: set of resources that needs to be allocated for the device |
| 183 | * @num: number of resources | 183 | * @num: number of resources |
| @@ -202,7 +202,7 @@ int platform_device_add_resources(struct platform_device *pdev, | |||
| 202 | EXPORT_SYMBOL_GPL(platform_device_add_resources); | 202 | EXPORT_SYMBOL_GPL(platform_device_add_resources); |
| 203 | 203 | ||
| 204 | /** | 204 | /** |
| 205 | * platform_device_add_data | 205 | * platform_device_add_data - add platform-specific data to a platform device |
| 206 | * @pdev: platform device allocated by platform_device_alloc to add resources to | 206 | * @pdev: platform device allocated by platform_device_alloc to add resources to |
| 207 | * @data: platform specific data for this platform device | 207 | * @data: platform specific data for this platform device |
| 208 | * @size: size of platform specific data | 208 | * @size: size of platform specific data |
| @@ -344,7 +344,7 @@ void platform_device_unregister(struct platform_device *pdev) | |||
| 344 | EXPORT_SYMBOL_GPL(platform_device_unregister); | 344 | EXPORT_SYMBOL_GPL(platform_device_unregister); |
| 345 | 345 | ||
| 346 | /** | 346 | /** |
| 347 | * platform_device_register_simple | 347 | * platform_device_register_simple - add a platform-level device and its resources |
| 348 | * @name: base name of the device we're adding | 348 | * @name: base name of the device we're adding |
| 349 | * @id: instance id | 349 | * @id: instance id |
| 350 | * @res: set of resources that needs to be allocated for the device | 350 | * @res: set of resources that needs to be allocated for the device |
| @@ -396,7 +396,7 @@ error: | |||
| 396 | EXPORT_SYMBOL_GPL(platform_device_register_simple); | 396 | EXPORT_SYMBOL_GPL(platform_device_register_simple); |
| 397 | 397 | ||
| 398 | /** | 398 | /** |
| 399 | * platform_device_register_data | 399 | * platform_device_register_data - add a platform-level device with platform-specific data |
| 400 | * @parent: parent device for the device we're adding | 400 | * @parent: parent device for the device we're adding |
| 401 | * @name: base name of the device we're adding | 401 | * @name: base name of the device we're adding |
| 402 | * @id: instance id | 402 | * @id: instance id |
| @@ -473,7 +473,7 @@ static void platform_drv_shutdown(struct device *_dev) | |||
| 473 | } | 473 | } |
| 474 | 474 | ||
| 475 | /** | 475 | /** |
| 476 | * platform_driver_register | 476 | * platform_driver_register - register a driver for platform-level devices |
| 477 | * @drv: platform driver structure | 477 | * @drv: platform driver structure |
| 478 | */ | 478 | */ |
| 479 | int platform_driver_register(struct platform_driver *drv) | 479 | int platform_driver_register(struct platform_driver *drv) |
| @@ -491,7 +491,7 @@ int platform_driver_register(struct platform_driver *drv) | |||
| 491 | EXPORT_SYMBOL_GPL(platform_driver_register); | 491 | EXPORT_SYMBOL_GPL(platform_driver_register); |
| 492 | 492 | ||
| 493 | /** | 493 | /** |
| 494 | * platform_driver_unregister | 494 | * platform_driver_unregister - unregister a driver for platform-level devices |
| 495 | * @drv: platform driver structure | 495 | * @drv: platform driver structure |
| 496 | */ | 496 | */ |
| 497 | void platform_driver_unregister(struct platform_driver *drv) | 497 | void platform_driver_unregister(struct platform_driver *drv) |
| @@ -548,6 +548,64 @@ int __init_or_module platform_driver_probe(struct platform_driver *drv, | |||
| 548 | } | 548 | } |
| 549 | EXPORT_SYMBOL_GPL(platform_driver_probe); | 549 | EXPORT_SYMBOL_GPL(platform_driver_probe); |
| 550 | 550 | ||
| 551 | /** | ||
| 552 | * platform_create_bundle - register driver and create corresponding device | ||
| 553 | * @driver: platform driver structure | ||
| 554 | * @probe: the driver probe routine, probably from an __init section | ||
| 555 | * @res: set of resources that needs to be allocated for the device | ||
| 556 | * @n_res: number of resources | ||
| 557 | * @data: platform specific data for this platform device | ||
| 558 | * @size: size of platform specific data | ||
| 559 | * | ||
| 560 | * Use this in legacy-style modules that probe hardware directly and | ||
| 561 | * register a single platform device and corresponding platform driver. | ||
| 562 | */ | ||
| 563 | struct platform_device * __init_or_module platform_create_bundle( | ||
| 564 | struct platform_driver *driver, | ||
| 565 | int (*probe)(struct platform_device *), | ||
| 566 | struct resource *res, unsigned int n_res, | ||
| 567 | const void *data, size_t size) | ||
| 568 | { | ||
| 569 | struct platform_device *pdev; | ||
| 570 | int error; | ||
| 571 | |||
| 572 | pdev = platform_device_alloc(driver->driver.name, -1); | ||
| 573 | if (!pdev) { | ||
| 574 | error = -ENOMEM; | ||
| 575 | goto err_out; | ||
| 576 | } | ||
| 577 | |||
| 578 | if (res) { | ||
| 579 | error = platform_device_add_resources(pdev, res, n_res); | ||
| 580 | if (error) | ||
| 581 | goto err_pdev_put; | ||
| 582 | } | ||
| 583 | |||
| 584 | if (data) { | ||
| 585 | error = platform_device_add_data(pdev, data, size); | ||
| 586 | if (error) | ||
| 587 | goto err_pdev_put; | ||
| 588 | } | ||
| 589 | |||
| 590 | error = platform_device_add(pdev); | ||
| 591 | if (error) | ||
| 592 | goto err_pdev_put; | ||
| 593 | |||
| 594 | error = platform_driver_probe(driver, probe); | ||
| 595 | if (error) | ||
| 596 | goto err_pdev_del; | ||
| 597 | |||
| 598 | return pdev; | ||
| 599 | |||
| 600 | err_pdev_del: | ||
| 601 | platform_device_del(pdev); | ||
| 602 | err_pdev_put: | ||
| 603 | platform_device_put(pdev); | ||
| 604 | err_out: | ||
| 605 | return ERR_PTR(error); | ||
| 606 | } | ||
| 607 | EXPORT_SYMBOL_GPL(platform_create_bundle); | ||
| 608 | |||
| 551 | /* modalias support enables more hands-off userspace setup: | 609 | /* modalias support enables more hands-off userspace setup: |
| 552 | * (a) environment variable lets new-style hotplug events work once system is | 610 | * (a) environment variable lets new-style hotplug events work once system is |
| 553 | * fully running: "modprobe $MODALIAS" | 611 | * fully running: "modprobe $MODALIAS" |
| @@ -578,7 +636,7 @@ static int platform_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
| 578 | } | 636 | } |
| 579 | 637 | ||
| 580 | static const struct platform_device_id *platform_match_id( | 638 | static const struct platform_device_id *platform_match_id( |
| 581 | struct platform_device_id *id, | 639 | const struct platform_device_id *id, |
| 582 | struct platform_device *pdev) | 640 | struct platform_device *pdev) |
| 583 | { | 641 | { |
| 584 | while (id->name[0]) { | 642 | while (id->name[0]) { |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index 3ce3519e8f30..89de75325cea 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
| @@ -1,6 +1,7 @@ | |||
| 1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o |
| 2 | obj-$(CONFIG_PM_SLEEP) += main.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o |
| 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
| 4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
| 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
| 5 | 6 | ||
| 6 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG | 7 | ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG |
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c new file mode 100644 index 000000000000..4b29d4981253 --- /dev/null +++ b/drivers/base/power/generic_ops.c | |||
| @@ -0,0 +1,233 @@ | |||
| 1 | /* | ||
| 2 | * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems | ||
| 3 | * | ||
| 4 | * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/pm.h> | ||
| 10 | #include <linux/pm_runtime.h> | ||
| 11 | |||
| 12 | #ifdef CONFIG_PM_RUNTIME | ||
| 13 | /** | ||
| 14 | * pm_generic_runtime_idle - Generic runtime idle callback for subsystems. | ||
| 15 | * @dev: Device to handle. | ||
| 16 | * | ||
| 17 | * If PM operations are defined for the @dev's driver and they include | ||
| 18 | * ->runtime_idle(), execute it and return its error code, if nonzero. | ||
| 19 | * Otherwise, execute pm_runtime_suspend() for the device and return 0. | ||
| 20 | */ | ||
| 21 | int pm_generic_runtime_idle(struct device *dev) | ||
| 22 | { | ||
| 23 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 24 | |||
| 25 | if (pm && pm->runtime_idle) { | ||
| 26 | int ret = pm->runtime_idle(dev); | ||
| 27 | if (ret) | ||
| 28 | return ret; | ||
| 29 | } | ||
| 30 | |||
| 31 | pm_runtime_suspend(dev); | ||
| 32 | return 0; | ||
| 33 | } | ||
| 34 | EXPORT_SYMBOL_GPL(pm_generic_runtime_idle); | ||
| 35 | |||
| 36 | /** | ||
| 37 | * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems. | ||
| 38 | * @dev: Device to suspend. | ||
| 39 | * | ||
| 40 | * If PM operations are defined for the @dev's driver and they include | ||
| 41 | * ->runtime_suspend(), execute it and return its error code. Otherwise, | ||
| 42 | * return -EINVAL. | ||
| 43 | */ | ||
| 44 | int pm_generic_runtime_suspend(struct device *dev) | ||
| 45 | { | ||
| 46 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 47 | int ret; | ||
| 48 | |||
| 49 | ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL; | ||
| 50 | |||
| 51 | return ret; | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend); | ||
| 54 | |||
| 55 | /** | ||
| 56 | * pm_generic_runtime_resume - Generic runtime resume callback for subsystems. | ||
| 57 | * @dev: Device to resume. | ||
| 58 | * | ||
| 59 | * If PM operations are defined for the @dev's driver and they include | ||
| 60 | * ->runtime_resume(), execute it and return its error code. Otherwise, | ||
| 61 | * return -EINVAL. | ||
| 62 | */ | ||
| 63 | int pm_generic_runtime_resume(struct device *dev) | ||
| 64 | { | ||
| 65 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 66 | int ret; | ||
| 67 | |||
| 68 | ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL; | ||
| 69 | |||
| 70 | return ret; | ||
| 71 | } | ||
| 72 | EXPORT_SYMBOL_GPL(pm_generic_runtime_resume); | ||
| 73 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 74 | |||
| 75 | #ifdef CONFIG_PM_SLEEP | ||
| 76 | /** | ||
| 77 | * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback. | ||
| 78 | * @dev: Device to handle. | ||
| 79 | * @event: PM transition of the system under way. | ||
| 80 | * | ||
| 81 | * If the device has not been suspended at run time, execute the | ||
| 82 | * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and | ||
| 83 | * return its error code. Otherwise, return zero. | ||
| 84 | */ | ||
| 85 | static int __pm_generic_call(struct device *dev, int event) | ||
| 86 | { | ||
| 87 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 88 | int (*callback)(struct device *); | ||
| 89 | |||
| 90 | if (!pm || pm_runtime_suspended(dev)) | ||
| 91 | return 0; | ||
| 92 | |||
| 93 | switch (event) { | ||
| 94 | case PM_EVENT_SUSPEND: | ||
| 95 | callback = pm->suspend; | ||
| 96 | break; | ||
| 97 | case PM_EVENT_FREEZE: | ||
| 98 | callback = pm->freeze; | ||
| 99 | break; | ||
| 100 | case PM_EVENT_HIBERNATE: | ||
| 101 | callback = pm->poweroff; | ||
| 102 | break; | ||
| 103 | case PM_EVENT_THAW: | ||
| 104 | callback = pm->thaw; | ||
| 105 | break; | ||
| 106 | default: | ||
| 107 | callback = NULL; | ||
| 108 | break; | ||
| 109 | } | ||
| 110 | |||
| 111 | return callback ? callback(dev) : 0; | ||
| 112 | } | ||
| 113 | |||
| 114 | /** | ||
| 115 | * pm_generic_suspend - Generic suspend callback for subsystems. | ||
| 116 | * @dev: Device to suspend. | ||
| 117 | */ | ||
| 118 | int pm_generic_suspend(struct device *dev) | ||
| 119 | { | ||
| 120 | return __pm_generic_call(dev, PM_EVENT_SUSPEND); | ||
| 121 | } | ||
| 122 | EXPORT_SYMBOL_GPL(pm_generic_suspend); | ||
| 123 | |||
| 124 | /** | ||
| 125 | * pm_generic_freeze - Generic freeze callback for subsystems. | ||
| 126 | * @dev: Device to freeze. | ||
| 127 | */ | ||
| 128 | int pm_generic_freeze(struct device *dev) | ||
| 129 | { | ||
| 130 | return __pm_generic_call(dev, PM_EVENT_FREEZE); | ||
| 131 | } | ||
| 132 | EXPORT_SYMBOL_GPL(pm_generic_freeze); | ||
| 133 | |||
| 134 | /** | ||
| 135 | * pm_generic_poweroff - Generic poweroff callback for subsystems. | ||
| 136 | * @dev: Device to handle. | ||
| 137 | */ | ||
| 138 | int pm_generic_poweroff(struct device *dev) | ||
| 139 | { | ||
| 140 | return __pm_generic_call(dev, PM_EVENT_HIBERNATE); | ||
| 141 | } | ||
| 142 | EXPORT_SYMBOL_GPL(pm_generic_poweroff); | ||
| 143 | |||
| 144 | /** | ||
| 145 | * pm_generic_thaw - Generic thaw callback for subsystems. | ||
| 146 | * @dev: Device to thaw. | ||
| 147 | */ | ||
| 148 | int pm_generic_thaw(struct device *dev) | ||
| 149 | { | ||
| 150 | return __pm_generic_call(dev, PM_EVENT_THAW); | ||
| 151 | } | ||
| 152 | EXPORT_SYMBOL_GPL(pm_generic_thaw); | ||
| 153 | |||
| 154 | /** | ||
| 155 | * __pm_generic_resume - Generic resume/restore callback for subsystems. | ||
| 156 | * @dev: Device to handle. | ||
| 157 | * @event: PM transition of the system under way. | ||
| 158 | * | ||
| 159 | * Execute the resume/resotre callback provided by the @dev's driver, if | ||
| 160 | * defined. If it returns 0, change the device's runtime PM status to 'active'. | ||
| 161 | * Return the callback's error code. | ||
| 162 | */ | ||
| 163 | static int __pm_generic_resume(struct device *dev, int event) | ||
| 164 | { | ||
| 165 | const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; | ||
| 166 | int (*callback)(struct device *); | ||
| 167 | int ret; | ||
| 168 | |||
| 169 | if (!pm) | ||
| 170 | return 0; | ||
| 171 | |||
| 172 | switch (event) { | ||
| 173 | case PM_EVENT_RESUME: | ||
| 174 | callback = pm->resume; | ||
| 175 | break; | ||
| 176 | case PM_EVENT_RESTORE: | ||
| 177 | callback = pm->restore; | ||
| 178 | break; | ||
| 179 | default: | ||
| 180 | callback = NULL; | ||
| 181 | break; | ||
| 182 | } | ||
| 183 | |||
| 184 | if (!callback) | ||
| 185 | return 0; | ||
| 186 | |||
| 187 | ret = callback(dev); | ||
| 188 | if (!ret) { | ||
| 189 | pm_runtime_disable(dev); | ||
| 190 | pm_runtime_set_active(dev); | ||
| 191 | pm_runtime_enable(dev); | ||
| 192 | } | ||
| 193 | |||
| 194 | return ret; | ||
| 195 | } | ||
| 196 | |||
| 197 | /** | ||
| 198 | * pm_generic_resume - Generic resume callback for subsystems. | ||
| 199 | * @dev: Device to resume. | ||
| 200 | */ | ||
| 201 | int pm_generic_resume(struct device *dev) | ||
| 202 | { | ||
| 203 | return __pm_generic_resume(dev, PM_EVENT_RESUME); | ||
| 204 | } | ||
| 205 | EXPORT_SYMBOL_GPL(pm_generic_resume); | ||
| 206 | |||
| 207 | /** | ||
| 208 | * pm_generic_restore - Generic restore callback for subsystems. | ||
| 209 | * @dev: Device to restore. | ||
| 210 | */ | ||
| 211 | int pm_generic_restore(struct device *dev) | ||
| 212 | { | ||
| 213 | return __pm_generic_resume(dev, PM_EVENT_RESTORE); | ||
| 214 | } | ||
| 215 | EXPORT_SYMBOL_GPL(pm_generic_restore); | ||
| 216 | #endif /* CONFIG_PM_SLEEP */ | ||
| 217 | |||
| 218 | struct dev_pm_ops generic_subsys_pm_ops = { | ||
| 219 | #ifdef CONFIG_PM_SLEEP | ||
| 220 | .suspend = pm_generic_suspend, | ||
| 221 | .resume = pm_generic_resume, | ||
| 222 | .freeze = pm_generic_freeze, | ||
| 223 | .thaw = pm_generic_thaw, | ||
| 224 | .poweroff = pm_generic_poweroff, | ||
| 225 | .restore = pm_generic_restore, | ||
| 226 | #endif | ||
| 227 | #ifdef CONFIG_PM_RUNTIME | ||
| 228 | .runtime_suspend = pm_generic_runtime_suspend, | ||
| 229 | .runtime_resume = pm_generic_runtime_resume, | ||
| 230 | .runtime_idle = pm_generic_runtime_idle, | ||
| 231 | #endif | ||
| 232 | }; | ||
| 233 | EXPORT_SYMBOL_GPL(generic_subsys_pm_ops); | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index a5142bddef41..d477f4dc5e51 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/resume-trace.h> | 25 | #include <linux/resume-trace.h> |
| 26 | #include <linux/interrupt.h> | 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
| 28 | #include <linux/async.h> | ||
| 28 | 29 | ||
| 29 | #include "../base.h" | 30 | #include "../base.h" |
| 30 | #include "power.h" | 31 | #include "power.h" |
| @@ -34,14 +35,15 @@ | |||
| 34 | * because children are guaranteed to be discovered after parents, and | 35 | * because children are guaranteed to be discovered after parents, and |
| 35 | * are inserted at the back of the list on discovery. | 36 | * are inserted at the back of the list on discovery. |
| 36 | * | 37 | * |
| 37 | * Since device_pm_add() may be called with a device semaphore held, | 38 | * Since device_pm_add() may be called with a device lock held, |
| 38 | * we must never try to acquire a device semaphore while holding | 39 | * we must never try to acquire a device lock while holding |
| 39 | * dpm_list_mutex. | 40 | * dpm_list_mutex. |
| 40 | */ | 41 | */ |
| 41 | 42 | ||
| 42 | LIST_HEAD(dpm_list); | 43 | LIST_HEAD(dpm_list); |
| 43 | 44 | ||
| 44 | static DEFINE_MUTEX(dpm_list_mtx); | 45 | static DEFINE_MUTEX(dpm_list_mtx); |
| 46 | static pm_message_t pm_transition; | ||
| 45 | 47 | ||
| 46 | /* | 48 | /* |
| 47 | * Set once the preparation of devices for a PM transition has started, reset | 49 | * Set once the preparation of devices for a PM transition has started, reset |
| @@ -56,6 +58,7 @@ static bool transition_started; | |||
| 56 | void device_pm_init(struct device *dev) | 58 | void device_pm_init(struct device *dev) |
| 57 | { | 59 | { |
| 58 | dev->power.status = DPM_ON; | 60 | dev->power.status = DPM_ON; |
| 61 | init_completion(&dev->power.completion); | ||
| 59 | pm_runtime_init(dev); | 62 | pm_runtime_init(dev); |
| 60 | } | 63 | } |
| 61 | 64 | ||
| @@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev) | |||
| 111 | pr_debug("PM: Removing info for %s:%s\n", | 114 | pr_debug("PM: Removing info for %s:%s\n", |
| 112 | dev->bus ? dev->bus->name : "No Bus", | 115 | dev->bus ? dev->bus->name : "No Bus", |
| 113 | kobject_name(&dev->kobj)); | 116 | kobject_name(&dev->kobj)); |
| 117 | complete_all(&dev->power.completion); | ||
| 114 | mutex_lock(&dpm_list_mtx); | 118 | mutex_lock(&dpm_list_mtx); |
| 115 | list_del_init(&dev->power.entry); | 119 | list_del_init(&dev->power.entry); |
| 116 | mutex_unlock(&dpm_list_mtx); | 120 | mutex_unlock(&dpm_list_mtx); |
| @@ -188,6 +192,31 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime, | |||
| 188 | } | 192 | } |
| 189 | 193 | ||
| 190 | /** | 194 | /** |
| 195 | * dpm_wait - Wait for a PM operation to complete. | ||
| 196 | * @dev: Device to wait for. | ||
| 197 | * @async: If unset, wait only if the device's power.async_suspend flag is set. | ||
| 198 | */ | ||
| 199 | static void dpm_wait(struct device *dev, bool async) | ||
| 200 | { | ||
| 201 | if (!dev) | ||
| 202 | return; | ||
| 203 | |||
| 204 | if (async || (pm_async_enabled && dev->power.async_suspend)) | ||
| 205 | wait_for_completion(&dev->power.completion); | ||
| 206 | } | ||
| 207 | |||
| 208 | static int dpm_wait_fn(struct device *dev, void *async_ptr) | ||
| 209 | { | ||
| 210 | dpm_wait(dev, *((bool *)async_ptr)); | ||
| 211 | return 0; | ||
| 212 | } | ||
| 213 | |||
| 214 | static void dpm_wait_for_children(struct device *dev, bool async) | ||
| 215 | { | ||
| 216 | device_for_each_child(dev, &async, dpm_wait_fn); | ||
| 217 | } | ||
| 218 | |||
| 219 | /** | ||
| 191 | * pm_op - Execute the PM operation appropriate for given PM event. | 220 | * pm_op - Execute the PM operation appropriate for given PM event. |
| 192 | * @dev: Device to handle. | 221 | * @dev: Device to handle. |
| 193 | * @ops: PM operations to choose from. | 222 | * @ops: PM operations to choose from. |
| @@ -271,8 +300,9 @@ static int pm_noirq_op(struct device *dev, | |||
| 271 | ktime_t calltime, delta, rettime; | 300 | ktime_t calltime, delta, rettime; |
| 272 | 301 | ||
| 273 | if (initcall_debug) { | 302 | if (initcall_debug) { |
| 274 | pr_info("calling %s_i+ @ %i\n", | 303 | pr_info("calling %s+ @ %i, parent: %s\n", |
| 275 | dev_name(dev), task_pid_nr(current)); | 304 | dev_name(dev), task_pid_nr(current), |
| 305 | dev->parent ? dev_name(dev->parent) : "none"); | ||
| 276 | calltime = ktime_get(); | 306 | calltime = ktime_get(); |
| 277 | } | 307 | } |
| 278 | 308 | ||
| @@ -468,15 +498,19 @@ static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) | |||
| 468 | * device_resume - Execute "resume" callbacks for given device. | 498 | * device_resume - Execute "resume" callbacks for given device. |
| 469 | * @dev: Device to handle. | 499 | * @dev: Device to handle. |
| 470 | * @state: PM transition of the system being carried out. | 500 | * @state: PM transition of the system being carried out. |
| 501 | * @async: If true, the device is being resumed asynchronously. | ||
| 471 | */ | 502 | */ |
| 472 | static int device_resume(struct device *dev, pm_message_t state) | 503 | static int device_resume(struct device *dev, pm_message_t state, bool async) |
| 473 | { | 504 | { |
| 474 | int error = 0; | 505 | int error = 0; |
| 475 | 506 | ||
| 476 | TRACE_DEVICE(dev); | 507 | TRACE_DEVICE(dev); |
| 477 | TRACE_RESUME(0); | 508 | TRACE_RESUME(0); |
| 478 | 509 | ||
| 479 | down(&dev->sem); | 510 | dpm_wait(dev->parent, async); |
| 511 | device_lock(dev); | ||
| 512 | |||
| 513 | dev->power.status = DPM_RESUMING; | ||
| 480 | 514 | ||
| 481 | if (dev->bus) { | 515 | if (dev->bus) { |
| 482 | if (dev->bus->pm) { | 516 | if (dev->bus->pm) { |
| @@ -509,12 +543,30 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 509 | } | 543 | } |
| 510 | } | 544 | } |
| 511 | End: | 545 | End: |
| 512 | up(&dev->sem); | 546 | device_unlock(dev); |
| 547 | complete_all(&dev->power.completion); | ||
| 513 | 548 | ||
| 514 | TRACE_RESUME(error); | 549 | TRACE_RESUME(error); |
| 515 | return error; | 550 | return error; |
| 516 | } | 551 | } |
| 517 | 552 | ||
| 553 | static void async_resume(void *data, async_cookie_t cookie) | ||
| 554 | { | ||
| 555 | struct device *dev = (struct device *)data; | ||
| 556 | int error; | ||
| 557 | |||
| 558 | error = device_resume(dev, pm_transition, true); | ||
| 559 | if (error) | ||
| 560 | pm_dev_err(dev, pm_transition, " async", error); | ||
| 561 | put_device(dev); | ||
| 562 | } | ||
| 563 | |||
| 564 | static bool is_async(struct device *dev) | ||
| 565 | { | ||
| 566 | return dev->power.async_suspend && pm_async_enabled | ||
| 567 | && !pm_trace_is_enabled(); | ||
| 568 | } | ||
| 569 | |||
| 518 | /** | 570 | /** |
| 519 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. | 571 | * dpm_resume - Execute "resume" callbacks for non-sysdev devices. |
| 520 | * @state: PM transition of the system being carried out. | 572 | * @state: PM transition of the system being carried out. |
| @@ -525,21 +577,33 @@ static int device_resume(struct device *dev, pm_message_t state) | |||
| 525 | static void dpm_resume(pm_message_t state) | 577 | static void dpm_resume(pm_message_t state) |
| 526 | { | 578 | { |
| 527 | struct list_head list; | 579 | struct list_head list; |
| 580 | struct device *dev; | ||
| 528 | ktime_t starttime = ktime_get(); | 581 | ktime_t starttime = ktime_get(); |
| 529 | 582 | ||
| 530 | INIT_LIST_HEAD(&list); | 583 | INIT_LIST_HEAD(&list); |
| 531 | mutex_lock(&dpm_list_mtx); | 584 | mutex_lock(&dpm_list_mtx); |
| 532 | while (!list_empty(&dpm_list)) { | 585 | pm_transition = state; |
| 533 | struct device *dev = to_device(dpm_list.next); | 586 | |
| 587 | list_for_each_entry(dev, &dpm_list, power.entry) { | ||
| 588 | if (dev->power.status < DPM_OFF) | ||
| 589 | continue; | ||
| 590 | |||
| 591 | INIT_COMPLETION(dev->power.completion); | ||
| 592 | if (is_async(dev)) { | ||
| 593 | get_device(dev); | ||
| 594 | async_schedule(async_resume, dev); | ||
| 595 | } | ||
| 596 | } | ||
| 534 | 597 | ||
| 598 | while (!list_empty(&dpm_list)) { | ||
| 599 | dev = to_device(dpm_list.next); | ||
| 535 | get_device(dev); | 600 | get_device(dev); |
| 536 | if (dev->power.status >= DPM_OFF) { | 601 | if (dev->power.status >= DPM_OFF && !is_async(dev)) { |
| 537 | int error; | 602 | int error; |
| 538 | 603 | ||
| 539 | dev->power.status = DPM_RESUMING; | ||
| 540 | mutex_unlock(&dpm_list_mtx); | 604 | mutex_unlock(&dpm_list_mtx); |
| 541 | 605 | ||
| 542 | error = device_resume(dev, state); | 606 | error = device_resume(dev, state, false); |
| 543 | 607 | ||
| 544 | mutex_lock(&dpm_list_mtx); | 608 | mutex_lock(&dpm_list_mtx); |
| 545 | if (error) | 609 | if (error) |
| @@ -554,6 +618,7 @@ static void dpm_resume(pm_message_t state) | |||
| 554 | } | 618 | } |
| 555 | list_splice(&list, &dpm_list); | 619 | list_splice(&list, &dpm_list); |
| 556 | mutex_unlock(&dpm_list_mtx); | 620 | mutex_unlock(&dpm_list_mtx); |
| 621 | async_synchronize_full(); | ||
| 557 | dpm_show_time(starttime, state, NULL); | 622 | dpm_show_time(starttime, state, NULL); |
| 558 | } | 623 | } |
| 559 | 624 | ||
| @@ -564,7 +629,7 @@ static void dpm_resume(pm_message_t state) | |||
| 564 | */ | 629 | */ |
| 565 | static void device_complete(struct device *dev, pm_message_t state) | 630 | static void device_complete(struct device *dev, pm_message_t state) |
| 566 | { | 631 | { |
| 567 | down(&dev->sem); | 632 | device_lock(dev); |
| 568 | 633 | ||
| 569 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 634 | if (dev->class && dev->class->pm && dev->class->pm->complete) { |
| 570 | pm_dev_dbg(dev, state, "completing class "); | 635 | pm_dev_dbg(dev, state, "completing class "); |
| @@ -581,7 +646,7 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
| 581 | dev->bus->pm->complete(dev); | 646 | dev->bus->pm->complete(dev); |
| 582 | } | 647 | } |
| 583 | 648 | ||
| 584 | up(&dev->sem); | 649 | device_unlock(dev); |
| 585 | } | 650 | } |
| 586 | 651 | ||
| 587 | /** | 652 | /** |
| @@ -731,16 +796,23 @@ static int legacy_suspend(struct device *dev, pm_message_t state, | |||
| 731 | return error; | 796 | return error; |
| 732 | } | 797 | } |
| 733 | 798 | ||
| 799 | static int async_error; | ||
| 800 | |||
| 734 | /** | 801 | /** |
| 735 | * device_suspend - Execute "suspend" callbacks for given device. | 802 | * device_suspend - Execute "suspend" callbacks for given device. |
| 736 | * @dev: Device to handle. | 803 | * @dev: Device to handle. |
| 737 | * @state: PM transition of the system being carried out. | 804 | * @state: PM transition of the system being carried out. |
| 805 | * @async: If true, the device is being suspended asynchronously. | ||
| 738 | */ | 806 | */ |
| 739 | static int device_suspend(struct device *dev, pm_message_t state) | 807 | static int __device_suspend(struct device *dev, pm_message_t state, bool async) |
| 740 | { | 808 | { |
| 741 | int error = 0; | 809 | int error = 0; |
| 742 | 810 | ||
| 743 | down(&dev->sem); | 811 | dpm_wait_for_children(dev, async); |
| 812 | device_lock(dev); | ||
| 813 | |||
| 814 | if (async_error) | ||
| 815 | goto End; | ||
| 744 | 816 | ||
| 745 | if (dev->class) { | 817 | if (dev->class) { |
| 746 | if (dev->class->pm) { | 818 | if (dev->class->pm) { |
| @@ -772,12 +844,44 @@ static int device_suspend(struct device *dev, pm_message_t state) | |||
| 772 | error = legacy_suspend(dev, state, dev->bus->suspend); | 844 | error = legacy_suspend(dev, state, dev->bus->suspend); |
| 773 | } | 845 | } |
| 774 | } | 846 | } |
| 847 | |||
| 848 | if (!error) | ||
| 849 | dev->power.status = DPM_OFF; | ||
| 850 | |||
| 775 | End: | 851 | End: |
| 776 | up(&dev->sem); | 852 | device_unlock(dev); |
| 853 | complete_all(&dev->power.completion); | ||
| 777 | 854 | ||
| 778 | return error; | 855 | return error; |
| 779 | } | 856 | } |
| 780 | 857 | ||
| 858 | static void async_suspend(void *data, async_cookie_t cookie) | ||
| 859 | { | ||
| 860 | struct device *dev = (struct device *)data; | ||
| 861 | int error; | ||
| 862 | |||
| 863 | error = __device_suspend(dev, pm_transition, true); | ||
| 864 | if (error) { | ||
| 865 | pm_dev_err(dev, pm_transition, " async", error); | ||
| 866 | async_error = error; | ||
| 867 | } | ||
| 868 | |||
| 869 | put_device(dev); | ||
| 870 | } | ||
| 871 | |||
| 872 | static int device_suspend(struct device *dev) | ||
| 873 | { | ||
| 874 | INIT_COMPLETION(dev->power.completion); | ||
| 875 | |||
| 876 | if (pm_async_enabled && dev->power.async_suspend) { | ||
| 877 | get_device(dev); | ||
| 878 | async_schedule(async_suspend, dev); | ||
| 879 | return 0; | ||
| 880 | } | ||
| 881 | |||
| 882 | return __device_suspend(dev, pm_transition, false); | ||
| 883 | } | ||
| 884 | |||
| 781 | /** | 885 | /** |
| 782 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. | 886 | * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. |
| 783 | * @state: PM transition of the system being carried out. | 887 | * @state: PM transition of the system being carried out. |
| @@ -790,13 +894,15 @@ static int dpm_suspend(pm_message_t state) | |||
| 790 | 894 | ||
| 791 | INIT_LIST_HEAD(&list); | 895 | INIT_LIST_HEAD(&list); |
| 792 | mutex_lock(&dpm_list_mtx); | 896 | mutex_lock(&dpm_list_mtx); |
| 897 | pm_transition = state; | ||
| 898 | async_error = 0; | ||
| 793 | while (!list_empty(&dpm_list)) { | 899 | while (!list_empty(&dpm_list)) { |
| 794 | struct device *dev = to_device(dpm_list.prev); | 900 | struct device *dev = to_device(dpm_list.prev); |
| 795 | 901 | ||
| 796 | get_device(dev); | 902 | get_device(dev); |
| 797 | mutex_unlock(&dpm_list_mtx); | 903 | mutex_unlock(&dpm_list_mtx); |
| 798 | 904 | ||
| 799 | error = device_suspend(dev, state); | 905 | error = device_suspend(dev); |
| 800 | 906 | ||
| 801 | mutex_lock(&dpm_list_mtx); | 907 | mutex_lock(&dpm_list_mtx); |
| 802 | if (error) { | 908 | if (error) { |
| @@ -804,13 +910,17 @@ static int dpm_suspend(pm_message_t state) | |||
| 804 | put_device(dev); | 910 | put_device(dev); |
| 805 | break; | 911 | break; |
| 806 | } | 912 | } |
| 807 | dev->power.status = DPM_OFF; | ||
| 808 | if (!list_empty(&dev->power.entry)) | 913 | if (!list_empty(&dev->power.entry)) |
| 809 | list_move(&dev->power.entry, &list); | 914 | list_move(&dev->power.entry, &list); |
| 810 | put_device(dev); | 915 | put_device(dev); |
| 916 | if (async_error) | ||
| 917 | break; | ||
| 811 | } | 918 | } |
| 812 | list_splice(&list, dpm_list.prev); | 919 | list_splice(&list, dpm_list.prev); |
| 813 | mutex_unlock(&dpm_list_mtx); | 920 | mutex_unlock(&dpm_list_mtx); |
| 921 | async_synchronize_full(); | ||
| 922 | if (!error) | ||
| 923 | error = async_error; | ||
| 814 | if (!error) | 924 | if (!error) |
| 815 | dpm_show_time(starttime, state, NULL); | 925 | dpm_show_time(starttime, state, NULL); |
| 816 | return error; | 926 | return error; |
| @@ -828,7 +938,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 828 | { | 938 | { |
| 829 | int error = 0; | 939 | int error = 0; |
| 830 | 940 | ||
| 831 | down(&dev->sem); | 941 | device_lock(dev); |
| 832 | 942 | ||
| 833 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 943 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { |
| 834 | pm_dev_dbg(dev, state, "preparing "); | 944 | pm_dev_dbg(dev, state, "preparing "); |
| @@ -852,7 +962,7 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 852 | suspend_report_result(dev->class->pm->prepare, error); | 962 | suspend_report_result(dev->class->pm->prepare, error); |
| 853 | } | 963 | } |
| 854 | End: | 964 | End: |
| 855 | up(&dev->sem); | 965 | device_unlock(dev); |
| 856 | 966 | ||
| 857 | return error; | 967 | return error; |
| 858 | } | 968 | } |
| @@ -936,3 +1046,14 @@ void __suspend_report_result(const char *function, void *fn, int ret) | |||
| 936 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); | 1046 | printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); |
| 937 | } | 1047 | } |
| 938 | EXPORT_SYMBOL_GPL(__suspend_report_result); | 1048 | EXPORT_SYMBOL_GPL(__suspend_report_result); |
| 1049 | |||
| 1050 | /** | ||
| 1051 | * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. | ||
| 1052 | * @dev: Device to wait for. | ||
| 1053 | * @subordinate: Device that needs to wait for @dev. | ||
| 1054 | */ | ||
| 1055 | void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) | ||
| 1056 | { | ||
| 1057 | dpm_wait(dev, subordinate->power.async_suspend); | ||
| 1058 | } | ||
| 1059 | EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); | ||
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b8fa1aa5225a..c0bd03c83b9c 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
| @@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {} | |||
| 12 | 12 | ||
| 13 | #ifdef CONFIG_PM_SLEEP | 13 | #ifdef CONFIG_PM_SLEEP |
| 14 | 14 | ||
| 15 | /* | 15 | /* kernel/power/main.c */ |
| 16 | * main.c | 16 | extern int pm_async_enabled; |
| 17 | */ | ||
| 18 | 17 | ||
| 18 | /* drivers/base/power/main.c */ | ||
| 19 | extern struct list_head dpm_list; /* The active device list */ | 19 | extern struct list_head dpm_list; /* The active device list */ |
| 20 | 20 | ||
| 21 | static inline struct device *to_device(struct list_head *entry) | 21 | static inline struct device *to_device(struct list_head *entry) |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index f8b044e8aef7..626dd147b75f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -1011,6 +1011,50 @@ void pm_runtime_enable(struct device *dev) | |||
| 1011 | EXPORT_SYMBOL_GPL(pm_runtime_enable); | 1011 | EXPORT_SYMBOL_GPL(pm_runtime_enable); |
| 1012 | 1012 | ||
| 1013 | /** | 1013 | /** |
| 1014 | * pm_runtime_forbid - Block run-time PM of a device. | ||
| 1015 | * @dev: Device to handle. | ||
| 1016 | * | ||
| 1017 | * Increase the device's usage count and clear its power.runtime_auto flag, | ||
| 1018 | * so that it cannot be suspended at run time until pm_runtime_allow() is called | ||
| 1019 | * for it. | ||
| 1020 | */ | ||
| 1021 | void pm_runtime_forbid(struct device *dev) | ||
| 1022 | { | ||
| 1023 | spin_lock_irq(&dev->power.lock); | ||
| 1024 | if (!dev->power.runtime_auto) | ||
| 1025 | goto out; | ||
| 1026 | |||
| 1027 | dev->power.runtime_auto = false; | ||
| 1028 | atomic_inc(&dev->power.usage_count); | ||
| 1029 | __pm_runtime_resume(dev, false); | ||
| 1030 | |||
| 1031 | out: | ||
| 1032 | spin_unlock_irq(&dev->power.lock); | ||
| 1033 | } | ||
| 1034 | EXPORT_SYMBOL_GPL(pm_runtime_forbid); | ||
| 1035 | |||
| 1036 | /** | ||
| 1037 | * pm_runtime_allow - Unblock run-time PM of a device. | ||
| 1038 | * @dev: Device to handle. | ||
| 1039 | * | ||
| 1040 | * Decrease the device's usage count and set its power.runtime_auto flag. | ||
| 1041 | */ | ||
| 1042 | void pm_runtime_allow(struct device *dev) | ||
| 1043 | { | ||
| 1044 | spin_lock_irq(&dev->power.lock); | ||
| 1045 | if (dev->power.runtime_auto) | ||
| 1046 | goto out; | ||
| 1047 | |||
| 1048 | dev->power.runtime_auto = true; | ||
| 1049 | if (atomic_dec_and_test(&dev->power.usage_count)) | ||
| 1050 | __pm_runtime_idle(dev); | ||
| 1051 | |||
| 1052 | out: | ||
| 1053 | spin_unlock_irq(&dev->power.lock); | ||
| 1054 | } | ||
| 1055 | EXPORT_SYMBOL_GPL(pm_runtime_allow); | ||
| 1056 | |||
| 1057 | /** | ||
| 1014 | * pm_runtime_init - Initialize run-time PM fields in given device object. | 1058 | * pm_runtime_init - Initialize run-time PM fields in given device object. |
| 1015 | * @dev: Device object to initialize. | 1059 | * @dev: Device object to initialize. |
| 1016 | */ | 1060 | */ |
| @@ -1028,6 +1072,7 @@ void pm_runtime_init(struct device *dev) | |||
| 1028 | 1072 | ||
| 1029 | atomic_set(&dev->power.child_count, 0); | 1073 | atomic_set(&dev->power.child_count, 0); |
| 1030 | pm_suspend_ignore_children(dev, false); | 1074 | pm_suspend_ignore_children(dev, false); |
| 1075 | dev->power.runtime_auto = true; | ||
| 1031 | 1076 | ||
| 1032 | dev->power.request_pending = false; | 1077 | dev->power.request_pending = false; |
| 1033 | dev->power.request = RPM_REQ_NONE; | 1078 | dev->power.request = RPM_REQ_NONE; |
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 596aeecfdffe..86fd9373447e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
| @@ -4,9 +4,25 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/device.h> | 5 | #include <linux/device.h> |
| 6 | #include <linux/string.h> | 6 | #include <linux/string.h> |
| 7 | #include <linux/pm_runtime.h> | ||
| 7 | #include "power.h" | 8 | #include "power.h" |
| 8 | 9 | ||
| 9 | /* | 10 | /* |
| 11 | * control - Report/change current runtime PM setting of the device | ||
| 12 | * | ||
| 13 | * Runtime power management of a device can be blocked with the help of | ||
| 14 | * this attribute. All devices have one of the following two values for | ||
| 15 | * the power/control file: | ||
| 16 | * | ||
| 17 | * + "auto\n" to allow the device to be power managed at run time; | ||
| 18 | * + "on\n" to prevent the device from being power managed at run time; | ||
| 19 | * | ||
| 20 | * The default for all devices is "auto", which means that devices may be | ||
| 21 | * subject to automatic power management, depending on their drivers. | ||
| 22 | * Changing this attribute to "on" prevents the driver from power managing | ||
| 23 | * the device at run time. Doing that while the device is suspended causes | ||
| 24 | * it to be woken up. | ||
| 25 | * | ||
| 10 | * wakeup - Report/change current wakeup option for device | 26 | * wakeup - Report/change current wakeup option for device |
| 11 | * | 27 | * |
| 12 | * Some devices support "wakeup" events, which are hardware signals | 28 | * Some devices support "wakeup" events, which are hardware signals |
| @@ -38,11 +54,61 @@ | |||
| 38 | * wakeup events internally (unless they are disabled), keeping | 54 | * wakeup events internally (unless they are disabled), keeping |
| 39 | * their hardware in low power modes whenever they're unused. This | 55 | * their hardware in low power modes whenever they're unused. This |
| 40 | * saves runtime power, without requiring system-wide sleep states. | 56 | * saves runtime power, without requiring system-wide sleep states. |
| 57 | * | ||
| 58 | * async - Report/change current async suspend setting for the device | ||
| 59 | * | ||
| 60 | * Asynchronous suspend and resume of the device during system-wide power | ||
| 61 | * state transitions can be enabled by writing "enabled" to this file. | ||
| 62 | * Analogously, if "disabled" is written to this file, the device will be | ||
| 63 | * suspended and resumed synchronously. | ||
| 64 | * | ||
| 65 | * All devices have one of the following two values for power/async: | ||
| 66 | * | ||
| 67 | * + "enabled\n" to permit the asynchronous suspend/resume of the device; | ||
| 68 | * + "disabled\n" to forbid it; | ||
| 69 | * | ||
| 70 | * NOTE: It generally is unsafe to permit the asynchronous suspend/resume | ||
| 71 | * of a device unless it is certain that all of the PM dependencies of the | ||
| 72 | * device are known to the PM core. However, for some devices this | ||
| 73 | * attribute is set to "enabled" by bus type code or device drivers and in | ||
| 74 | * that cases it should be safe to leave the default value. | ||
| 41 | */ | 75 | */ |
| 42 | 76 | ||
| 43 | static const char enabled[] = "enabled"; | 77 | static const char enabled[] = "enabled"; |
| 44 | static const char disabled[] = "disabled"; | 78 | static const char disabled[] = "disabled"; |
| 45 | 79 | ||
| 80 | #ifdef CONFIG_PM_RUNTIME | ||
| 81 | static const char ctrl_auto[] = "auto"; | ||
| 82 | static const char ctrl_on[] = "on"; | ||
| 83 | |||
| 84 | static ssize_t control_show(struct device *dev, struct device_attribute *attr, | ||
| 85 | char *buf) | ||
| 86 | { | ||
| 87 | return sprintf(buf, "%s\n", | ||
| 88 | dev->power.runtime_auto ? ctrl_auto : ctrl_on); | ||
| 89 | } | ||
| 90 | |||
| 91 | static ssize_t control_store(struct device * dev, struct device_attribute *attr, | ||
| 92 | const char * buf, size_t n) | ||
| 93 | { | ||
| 94 | char *cp; | ||
| 95 | int len = n; | ||
| 96 | |||
| 97 | cp = memchr(buf, '\n', n); | ||
| 98 | if (cp) | ||
| 99 | len = cp - buf; | ||
| 100 | if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) | ||
| 101 | pm_runtime_allow(dev); | ||
| 102 | else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) | ||
| 103 | pm_runtime_forbid(dev); | ||
| 104 | else | ||
| 105 | return -EINVAL; | ||
| 106 | return n; | ||
| 107 | } | ||
| 108 | |||
| 109 | static DEVICE_ATTR(control, 0644, control_show, control_store); | ||
| 110 | #endif | ||
| 111 | |||
| 46 | static ssize_t | 112 | static ssize_t |
| 47 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) | 113 | wake_show(struct device * dev, struct device_attribute *attr, char * buf) |
| 48 | { | 114 | { |
| @@ -77,9 +143,43 @@ wake_store(struct device * dev, struct device_attribute *attr, | |||
| 77 | 143 | ||
| 78 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); | 144 | static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); |
| 79 | 145 | ||
| 146 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
| 147 | static ssize_t async_show(struct device *dev, struct device_attribute *attr, | ||
| 148 | char *buf) | ||
| 149 | { | ||
| 150 | return sprintf(buf, "%s\n", | ||
| 151 | device_async_suspend_enabled(dev) ? enabled : disabled); | ||
| 152 | } | ||
| 153 | |||
| 154 | static ssize_t async_store(struct device *dev, struct device_attribute *attr, | ||
| 155 | const char *buf, size_t n) | ||
| 156 | { | ||
| 157 | char *cp; | ||
| 158 | int len = n; | ||
| 159 | |||
| 160 | cp = memchr(buf, '\n', n); | ||
| 161 | if (cp) | ||
| 162 | len = cp - buf; | ||
| 163 | if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) | ||
| 164 | device_enable_async_suspend(dev); | ||
| 165 | else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) | ||
| 166 | device_disable_async_suspend(dev); | ||
| 167 | else | ||
| 168 | return -EINVAL; | ||
| 169 | return n; | ||
| 170 | } | ||
| 171 | |||
| 172 | static DEVICE_ATTR(async, 0644, async_show, async_store); | ||
| 173 | #endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ | ||
| 80 | 174 | ||
| 81 | static struct attribute * power_attrs[] = { | 175 | static struct attribute * power_attrs[] = { |
| 176 | #ifdef CONFIG_PM_RUNTIME | ||
| 177 | &dev_attr_control.attr, | ||
| 178 | #endif | ||
| 82 | &dev_attr_wakeup.attr, | 179 | &dev_attr_wakeup.attr, |
| 180 | #ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG | ||
| 181 | &dev_attr_async.attr, | ||
| 182 | #endif | ||
| 83 | NULL, | 183 | NULL, |
| 84 | }; | 184 | }; |
| 85 | static struct attribute_group pm_attr_group = { | 185 | static struct attribute_group pm_attr_group = { |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 0d903909af7e..8980feec5d14 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
| @@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr, | |||
| 54 | return -EIO; | 54 | return -EIO; |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static struct sysfs_ops sysfs_ops = { | 57 | static const struct sysfs_ops sysfs_ops = { |
| 58 | .show = sysdev_show, | 58 | .show = sysdev_show, |
| 59 | .store = sysdev_store, | 59 | .store = sysdev_store, |
| 60 | }; | 60 | }; |
| @@ -89,7 +89,7 @@ static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr, | |||
| 89 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); | 89 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); |
| 90 | 90 | ||
| 91 | if (class_attr->show) | 91 | if (class_attr->show) |
| 92 | return class_attr->show(class, buffer); | 92 | return class_attr->show(class, class_attr, buffer); |
| 93 | return -EIO; | 93 | return -EIO; |
| 94 | } | 94 | } |
| 95 | 95 | ||
| @@ -100,11 +100,11 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr, | |||
| 100 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); | 100 | struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr); |
| 101 | 101 | ||
| 102 | if (class_attr->store) | 102 | if (class_attr->store) |
| 103 | return class_attr->store(class, buffer, count); | 103 | return class_attr->store(class, class_attr, buffer, count); |
| 104 | return -EIO; | 104 | return -EIO; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static struct sysfs_ops sysfs_class_ops = { | 107 | static const struct sysfs_ops sysfs_class_ops = { |
| 108 | .show = sysdev_class_show, | 108 | .show = sysdev_class_show, |
| 109 | .store = sysdev_class_store, | 109 | .store = sysdev_class_store, |
| 110 | }; | 110 | }; |
| @@ -145,13 +145,20 @@ int sysdev_class_register(struct sysdev_class *cls) | |||
| 145 | if (retval) | 145 | if (retval) |
| 146 | return retval; | 146 | return retval; |
| 147 | 147 | ||
| 148 | return kset_register(&cls->kset); | 148 | retval = kset_register(&cls->kset); |
| 149 | if (!retval && cls->attrs) | ||
| 150 | retval = sysfs_create_files(&cls->kset.kobj, | ||
| 151 | (const struct attribute **)cls->attrs); | ||
| 152 | return retval; | ||
| 149 | } | 153 | } |
| 150 | 154 | ||
| 151 | void sysdev_class_unregister(struct sysdev_class *cls) | 155 | void sysdev_class_unregister(struct sysdev_class *cls) |
| 152 | { | 156 | { |
| 153 | pr_debug("Unregistering sysdev class '%s'\n", | 157 | pr_debug("Unregistering sysdev class '%s'\n", |
| 154 | kobject_name(&cls->kset.kobj)); | 158 | kobject_name(&cls->kset.kobj)); |
| 159 | if (cls->attrs) | ||
| 160 | sysfs_remove_files(&cls->kset.kobj, | ||
| 161 | (const struct attribute **)cls->attrs); | ||
| 155 | kset_unregister(&cls->kset); | 162 | kset_unregister(&cls->kset); |
| 156 | } | 163 | } |
| 157 | 164 | ||
