diff options
Diffstat (limited to 'drivers/base')
| -rw-r--r-- | drivers/base/Kconfig | 2 | ||||
| -rw-r--r-- | drivers/base/Makefile | 2 | ||||
| -rw-r--r-- | drivers/base/core.c | 34 | ||||
| -rw-r--r-- | drivers/base/firmware_class.c | 7 | ||||
| -rw-r--r-- | drivers/base/memory.c | 197 | ||||
| -rw-r--r-- | drivers/base/node.c | 12 | ||||
| -rw-r--r-- | drivers/base/power/Makefile | 3 | ||||
| -rw-r--r-- | drivers/base/power/main.c | 175 | ||||
| -rw-r--r-- | drivers/base/power/opp.c | 2 | ||||
| -rw-r--r-- | drivers/base/power/power.h | 21 | ||||
| -rw-r--r-- | drivers/base/power/runtime.c | 46 | ||||
| -rw-r--r-- | drivers/base/power/sysfs.c | 78 | ||||
| -rw-r--r-- | drivers/base/power/trace.c | 6 | ||||
| -rw-r--r-- | drivers/base/power/wakeup.c | 109 | ||||
| -rw-r--r-- | drivers/base/sys.c | 65 | ||||
| -rw-r--r-- | drivers/base/syscore.c | 117 |
16 files changed, 618 insertions, 258 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index fd96345bc35c..d57e8d0fb823 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig | |||
| @@ -70,7 +70,7 @@ config PREVENT_FIRMWARE_BUILD | |||
| 70 | If unsure say Y here. | 70 | If unsure say Y here. |
| 71 | 71 | ||
| 72 | config FW_LOADER | 72 | config FW_LOADER |
| 73 | tristate "Userspace firmware loading support" if EMBEDDED | 73 | tristate "Userspace firmware loading support" if EXPERT |
| 74 | default y | 74 | default y |
| 75 | ---help--- | 75 | ---help--- |
| 76 | This option is provided for the case where no in-kernel-tree modules | 76 | This option is provided for the case where no in-kernel-tree modules |
diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 5f51c3b4451e..4c5701c15f53 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile | |||
| @@ -1,6 +1,6 @@ | |||
| 1 | # Makefile for the Linux device tree | 1 | # Makefile for the Linux device tree |
| 2 | 2 | ||
| 3 | obj-y := core.o sys.o bus.o dd.o \ | 3 | obj-y := core.o sys.o bus.o dd.o syscore.o \ |
| 4 | driver.o class.o platform.o \ | 4 | driver.o class.o platform.o \ |
| 5 | cpu.o firmware.o init.o map.o devres.o \ | 5 | cpu.o firmware.o init.o map.o devres.o \ |
| 6 | attribute_container.o transport_class.o | 6 | attribute_container.o transport_class.o |
diff --git a/drivers/base/core.c b/drivers/base/core.c index 080e9ca11017..81b78ede37c4 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c | |||
| @@ -1320,7 +1320,10 @@ struct root_device | |||
| 1320 | struct module *owner; | 1320 | struct module *owner; |
| 1321 | }; | 1321 | }; |
| 1322 | 1322 | ||
| 1323 | #define to_root_device(dev) container_of(dev, struct root_device, dev) | 1323 | inline struct root_device *to_root_device(struct device *d) |
| 1324 | { | ||
| 1325 | return container_of(d, struct root_device, dev); | ||
| 1326 | } | ||
| 1324 | 1327 | ||
| 1325 | static void root_device_release(struct device *dev) | 1328 | static void root_device_release(struct device *dev) |
| 1326 | { | 1329 | { |
| @@ -1551,7 +1554,34 @@ EXPORT_SYMBOL_GPL(device_destroy); | |||
| 1551 | * on the same device to ensure that new_name is valid and | 1554 | * on the same device to ensure that new_name is valid and |
| 1552 | * won't conflict with other devices. | 1555 | * won't conflict with other devices. |
| 1553 | * | 1556 | * |
| 1554 | * "Never use this function, bad things will happen" - gregkh | 1557 | * Note: Don't call this function. Currently, the networking layer calls this |
| 1558 | * function, but that will change. The following text from Kay Sievers offers | ||
| 1559 | * some insight: | ||
| 1560 | * | ||
| 1561 | * Renaming devices is racy at many levels, symlinks and other stuff are not | ||
| 1562 | * replaced atomically, and you get a "move" uevent, but it's not easy to | ||
| 1563 | * connect the event to the old and new device. Device nodes are not renamed at | ||
| 1564 | * all, there isn't even support for that in the kernel now. | ||
| 1565 | * | ||
| 1566 | * In the meantime, during renaming, your target name might be taken by another | ||
| 1567 | * driver, creating conflicts. Or the old name is taken directly after you | ||
| 1568 | * renamed it -- then you get events for the same DEVPATH, before you even see | ||
| 1569 | * the "move" event. It's just a mess, and nothing new should ever rely on | ||
| 1570 | * kernel device renaming. Besides that, it's not even implemented now for | ||
| 1571 | * other things than (driver-core wise very simple) network devices. | ||
| 1572 | * | ||
| 1573 | * We are currently about to change network renaming in udev to completely | ||
| 1574 | * disallow renaming of devices in the same namespace as the kernel uses, | ||
| 1575 | * because we can't solve the problems properly, that arise with swapping names | ||
| 1576 | * of multiple interfaces without races. Means, renaming of eth[0-9]* will only | ||
| 1577 | * be allowed to some other name than eth[0-9]*, for the aforementioned | ||
| 1578 | * reasons. | ||
| 1579 | * | ||
| 1580 | * Make up a "real" name in the driver before you register anything, or add | ||
| 1581 | * some other attributes for userspace to find the device, or use udev to add | ||
| 1582 | * symlinks -- but never rename kernel devices later, it's a complete mess. We | ||
| 1583 | * don't even want to get into that and try to implement the missing pieces in | ||
| 1584 | * the core. We really have other pieces to fix in the driver core mess. :) | ||
| 1555 | */ | 1585 | */ |
| 1556 | int device_rename(struct device *dev, const char *new_name) | 1586 | int device_rename(struct device *dev, const char *new_name) |
| 1557 | { | 1587 | { |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 40af43ebd92d..8c798ef7f13f 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
| @@ -593,8 +593,7 @@ int | |||
| 593 | request_firmware(const struct firmware **firmware_p, const char *name, | 593 | request_firmware(const struct firmware **firmware_p, const char *name, |
| 594 | struct device *device) | 594 | struct device *device) |
| 595 | { | 595 | { |
| 596 | int uevent = 1; | 596 | return _request_firmware(firmware_p, name, device, true, false); |
| 597 | return _request_firmware(firmware_p, name, device, uevent, false); | ||
| 598 | } | 597 | } |
| 599 | 598 | ||
| 600 | /** | 599 | /** |
| @@ -618,7 +617,7 @@ struct firmware_work { | |||
| 618 | struct device *device; | 617 | struct device *device; |
| 619 | void *context; | 618 | void *context; |
| 620 | void (*cont)(const struct firmware *fw, void *context); | 619 | void (*cont)(const struct firmware *fw, void *context); |
| 621 | int uevent; | 620 | bool uevent; |
| 622 | }; | 621 | }; |
| 623 | 622 | ||
| 624 | static int request_firmware_work_func(void *arg) | 623 | static int request_firmware_work_func(void *arg) |
| @@ -661,7 +660,7 @@ static int request_firmware_work_func(void *arg) | |||
| 661 | **/ | 660 | **/ |
| 662 | int | 661 | int |
| 663 | request_firmware_nowait( | 662 | request_firmware_nowait( |
| 664 | struct module *module, int uevent, | 663 | struct module *module, bool uevent, |
| 665 | const char *name, struct device *device, gfp_t gfp, void *context, | 664 | const char *name, struct device *device, gfp_t gfp, void *context, |
| 666 | void (*cont)(const struct firmware *fw, void *context)) | 665 | void (*cont)(const struct firmware *fw, void *context)) |
| 667 | { | 666 | { |
diff --git a/drivers/base/memory.c b/drivers/base/memory.c index cafeaaf0428f..3da6a43b7756 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c | |||
| @@ -30,6 +30,14 @@ | |||
| 30 | static DEFINE_MUTEX(mem_sysfs_mutex); | 30 | static DEFINE_MUTEX(mem_sysfs_mutex); |
| 31 | 31 | ||
| 32 | #define MEMORY_CLASS_NAME "memory" | 32 | #define MEMORY_CLASS_NAME "memory" |
| 33 | #define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS) | ||
| 34 | |||
| 35 | static int sections_per_block; | ||
| 36 | |||
| 37 | static inline int base_memory_block_id(int section_nr) | ||
| 38 | { | ||
| 39 | return section_nr / sections_per_block; | ||
| 40 | } | ||
| 33 | 41 | ||
| 34 | static struct sysdev_class memory_sysdev_class = { | 42 | static struct sysdev_class memory_sysdev_class = { |
| 35 | .name = MEMORY_CLASS_NAME, | 43 | .name = MEMORY_CLASS_NAME, |
| @@ -84,39 +92,72 @@ EXPORT_SYMBOL(unregister_memory_isolate_notifier); | |||
| 84 | * register_memory - Setup a sysfs device for a memory block | 92 | * register_memory - Setup a sysfs device for a memory block |
| 85 | */ | 93 | */ |
| 86 | static | 94 | static |
| 87 | int register_memory(struct memory_block *memory, struct mem_section *section) | 95 | int register_memory(struct memory_block *memory) |
| 88 | { | 96 | { |
| 89 | int error; | 97 | int error; |
| 90 | 98 | ||
| 91 | memory->sysdev.cls = &memory_sysdev_class; | 99 | memory->sysdev.cls = &memory_sysdev_class; |
| 92 | memory->sysdev.id = __section_nr(section); | 100 | memory->sysdev.id = memory->start_section_nr / sections_per_block; |
| 93 | 101 | ||
| 94 | error = sysdev_register(&memory->sysdev); | 102 | error = sysdev_register(&memory->sysdev); |
| 95 | return error; | 103 | return error; |
| 96 | } | 104 | } |
| 97 | 105 | ||
| 98 | static void | 106 | static void |
| 99 | unregister_memory(struct memory_block *memory, struct mem_section *section) | 107 | unregister_memory(struct memory_block *memory) |
| 100 | { | 108 | { |
| 101 | BUG_ON(memory->sysdev.cls != &memory_sysdev_class); | 109 | BUG_ON(memory->sysdev.cls != &memory_sysdev_class); |
| 102 | BUG_ON(memory->sysdev.id != __section_nr(section)); | ||
| 103 | 110 | ||
| 104 | /* drop the ref. we got in remove_memory_block() */ | 111 | /* drop the ref. we got in remove_memory_block() */ |
| 105 | kobject_put(&memory->sysdev.kobj); | 112 | kobject_put(&memory->sysdev.kobj); |
| 106 | sysdev_unregister(&memory->sysdev); | 113 | sysdev_unregister(&memory->sysdev); |
| 107 | } | 114 | } |
| 108 | 115 | ||
| 116 | unsigned long __weak memory_block_size_bytes(void) | ||
| 117 | { | ||
| 118 | return MIN_MEMORY_BLOCK_SIZE; | ||
| 119 | } | ||
| 120 | |||
| 121 | static unsigned long get_memory_block_size(void) | ||
| 122 | { | ||
| 123 | unsigned long block_sz; | ||
| 124 | |||
| 125 | block_sz = memory_block_size_bytes(); | ||
| 126 | |||
| 127 | /* Validate blk_sz is a power of 2 and not less than section size */ | ||
| 128 | if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) { | ||
| 129 | WARN_ON(1); | ||
| 130 | block_sz = MIN_MEMORY_BLOCK_SIZE; | ||
| 131 | } | ||
| 132 | |||
| 133 | return block_sz; | ||
| 134 | } | ||
| 135 | |||
| 109 | /* | 136 | /* |
| 110 | * use this as the physical section index that this memsection | 137 | * use this as the physical section index that this memsection |
| 111 | * uses. | 138 | * uses. |
| 112 | */ | 139 | */ |
| 113 | 140 | ||
| 114 | static ssize_t show_mem_phys_index(struct sys_device *dev, | 141 | static ssize_t show_mem_start_phys_index(struct sys_device *dev, |
| 115 | struct sysdev_attribute *attr, char *buf) | 142 | struct sysdev_attribute *attr, char *buf) |
| 116 | { | 143 | { |
| 117 | struct memory_block *mem = | 144 | struct memory_block *mem = |
| 118 | container_of(dev, struct memory_block, sysdev); | 145 | container_of(dev, struct memory_block, sysdev); |
| 119 | return sprintf(buf, "%08lx\n", mem->phys_index); | 146 | unsigned long phys_index; |
| 147 | |||
| 148 | phys_index = mem->start_section_nr / sections_per_block; | ||
| 149 | return sprintf(buf, "%08lx\n", phys_index); | ||
| 150 | } | ||
| 151 | |||
| 152 | static ssize_t show_mem_end_phys_index(struct sys_device *dev, | ||
| 153 | struct sysdev_attribute *attr, char *buf) | ||
| 154 | { | ||
| 155 | struct memory_block *mem = | ||
| 156 | container_of(dev, struct memory_block, sysdev); | ||
| 157 | unsigned long phys_index; | ||
| 158 | |||
| 159 | phys_index = mem->end_section_nr / sections_per_block; | ||
| 160 | return sprintf(buf, "%08lx\n", phys_index); | ||
| 120 | } | 161 | } |
| 121 | 162 | ||
| 122 | /* | 163 | /* |
| @@ -125,13 +166,16 @@ static ssize_t show_mem_phys_index(struct sys_device *dev, | |||
| 125 | static ssize_t show_mem_removable(struct sys_device *dev, | 166 | static ssize_t show_mem_removable(struct sys_device *dev, |
| 126 | struct sysdev_attribute *attr, char *buf) | 167 | struct sysdev_attribute *attr, char *buf) |
| 127 | { | 168 | { |
| 128 | unsigned long start_pfn; | 169 | unsigned long i, pfn; |
| 129 | int ret; | 170 | int ret = 1; |
| 130 | struct memory_block *mem = | 171 | struct memory_block *mem = |
| 131 | container_of(dev, struct memory_block, sysdev); | 172 | container_of(dev, struct memory_block, sysdev); |
| 132 | 173 | ||
| 133 | start_pfn = section_nr_to_pfn(mem->phys_index); | 174 | for (i = 0; i < sections_per_block; i++) { |
| 134 | ret = is_mem_section_removable(start_pfn, PAGES_PER_SECTION); | 175 | pfn = section_nr_to_pfn(mem->start_section_nr + i); |
| 176 | ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); | ||
| 177 | } | ||
| 178 | |||
| 135 | return sprintf(buf, "%d\n", ret); | 179 | return sprintf(buf, "%d\n", ret); |
| 136 | } | 180 | } |
| 137 | 181 | ||
| @@ -184,17 +228,14 @@ int memory_isolate_notify(unsigned long val, void *v) | |||
| 184 | * OK to have direct references to sparsemem variables in here. | 228 | * OK to have direct references to sparsemem variables in here. |
| 185 | */ | 229 | */ |
| 186 | static int | 230 | static int |
| 187 | memory_block_action(struct memory_block *mem, unsigned long action) | 231 | memory_section_action(unsigned long phys_index, unsigned long action) |
| 188 | { | 232 | { |
| 189 | int i; | 233 | int i; |
| 190 | unsigned long psection; | ||
| 191 | unsigned long start_pfn, start_paddr; | 234 | unsigned long start_pfn, start_paddr; |
| 192 | struct page *first_page; | 235 | struct page *first_page; |
| 193 | int ret; | 236 | int ret; |
| 194 | int old_state = mem->state; | ||
| 195 | 237 | ||
| 196 | psection = mem->phys_index; | 238 | first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT); |
| 197 | first_page = pfn_to_page(psection << PFN_SECTION_SHIFT); | ||
| 198 | 239 | ||
| 199 | /* | 240 | /* |
| 200 | * The probe routines leave the pages reserved, just | 241 | * The probe routines leave the pages reserved, just |
| @@ -207,8 +248,8 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
| 207 | continue; | 248 | continue; |
| 208 | 249 | ||
| 209 | printk(KERN_WARNING "section number %ld page number %d " | 250 | printk(KERN_WARNING "section number %ld page number %d " |
| 210 | "not reserved, was it already online? \n", | 251 | "not reserved, was it already online?\n", |
| 211 | psection, i); | 252 | phys_index, i); |
| 212 | return -EBUSY; | 253 | return -EBUSY; |
| 213 | } | 254 | } |
| 214 | } | 255 | } |
| @@ -219,18 +260,13 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
| 219 | ret = online_pages(start_pfn, PAGES_PER_SECTION); | 260 | ret = online_pages(start_pfn, PAGES_PER_SECTION); |
| 220 | break; | 261 | break; |
| 221 | case MEM_OFFLINE: | 262 | case MEM_OFFLINE: |
| 222 | mem->state = MEM_GOING_OFFLINE; | ||
| 223 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; | 263 | start_paddr = page_to_pfn(first_page) << PAGE_SHIFT; |
| 224 | ret = remove_memory(start_paddr, | 264 | ret = remove_memory(start_paddr, |
| 225 | PAGES_PER_SECTION << PAGE_SHIFT); | 265 | PAGES_PER_SECTION << PAGE_SHIFT); |
| 226 | if (ret) { | ||
| 227 | mem->state = old_state; | ||
| 228 | break; | ||
| 229 | } | ||
| 230 | break; | 266 | break; |
| 231 | default: | 267 | default: |
| 232 | WARN(1, KERN_WARNING "%s(%p, %ld) unknown action: %ld\n", | 268 | WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: " |
| 233 | __func__, mem, action, action); | 269 | "%ld\n", __func__, phys_index, action, action); |
| 234 | ret = -EINVAL; | 270 | ret = -EINVAL; |
| 235 | } | 271 | } |
| 236 | 272 | ||
| @@ -240,7 +276,8 @@ memory_block_action(struct memory_block *mem, unsigned long action) | |||
| 240 | static int memory_block_change_state(struct memory_block *mem, | 276 | static int memory_block_change_state(struct memory_block *mem, |
| 241 | unsigned long to_state, unsigned long from_state_req) | 277 | unsigned long to_state, unsigned long from_state_req) |
| 242 | { | 278 | { |
| 243 | int ret = 0; | 279 | int i, ret = 0; |
| 280 | |||
| 244 | mutex_lock(&mem->state_mutex); | 281 | mutex_lock(&mem->state_mutex); |
| 245 | 282 | ||
| 246 | if (mem->state != from_state_req) { | 283 | if (mem->state != from_state_req) { |
| @@ -248,8 +285,23 @@ static int memory_block_change_state(struct memory_block *mem, | |||
| 248 | goto out; | 285 | goto out; |
| 249 | } | 286 | } |
| 250 | 287 | ||
| 251 | ret = memory_block_action(mem, to_state); | 288 | if (to_state == MEM_OFFLINE) |
| 252 | if (!ret) | 289 | mem->state = MEM_GOING_OFFLINE; |
| 290 | |||
| 291 | for (i = 0; i < sections_per_block; i++) { | ||
| 292 | ret = memory_section_action(mem->start_section_nr + i, | ||
| 293 | to_state); | ||
| 294 | if (ret) | ||
| 295 | break; | ||
| 296 | } | ||
| 297 | |||
| 298 | if (ret) { | ||
| 299 | for (i = 0; i < sections_per_block; i++) | ||
| 300 | memory_section_action(mem->start_section_nr + i, | ||
| 301 | from_state_req); | ||
| 302 | |||
| 303 | mem->state = from_state_req; | ||
| 304 | } else | ||
| 253 | mem->state = to_state; | 305 | mem->state = to_state; |
| 254 | 306 | ||
| 255 | out: | 307 | out: |
| @@ -262,20 +314,15 @@ store_mem_state(struct sys_device *dev, | |||
| 262 | struct sysdev_attribute *attr, const char *buf, size_t count) | 314 | struct sysdev_attribute *attr, const char *buf, size_t count) |
| 263 | { | 315 | { |
| 264 | struct memory_block *mem; | 316 | struct memory_block *mem; |
| 265 | unsigned int phys_section_nr; | ||
| 266 | int ret = -EINVAL; | 317 | int ret = -EINVAL; |
| 267 | 318 | ||
| 268 | mem = container_of(dev, struct memory_block, sysdev); | 319 | mem = container_of(dev, struct memory_block, sysdev); |
| 269 | phys_section_nr = mem->phys_index; | ||
| 270 | |||
| 271 | if (!present_section_nr(phys_section_nr)) | ||
| 272 | goto out; | ||
| 273 | 320 | ||
| 274 | if (!strncmp(buf, "online", min((int)count, 6))) | 321 | if (!strncmp(buf, "online", min((int)count, 6))) |
| 275 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); | 322 | ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE); |
| 276 | else if(!strncmp(buf, "offline", min((int)count, 7))) | 323 | else if(!strncmp(buf, "offline", min((int)count, 7))) |
| 277 | ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); | 324 | ret = memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE); |
| 278 | out: | 325 | |
| 279 | if (ret) | 326 | if (ret) |
| 280 | return ret; | 327 | return ret; |
| 281 | return count; | 328 | return count; |
| @@ -298,7 +345,8 @@ static ssize_t show_phys_device(struct sys_device *dev, | |||
| 298 | return sprintf(buf, "%d\n", mem->phys_device); | 345 | return sprintf(buf, "%d\n", mem->phys_device); |
| 299 | } | 346 | } |
| 300 | 347 | ||
| 301 | static SYSDEV_ATTR(phys_index, 0444, show_mem_phys_index, NULL); | 348 | static SYSDEV_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); |
| 349 | static SYSDEV_ATTR(end_phys_index, 0444, show_mem_end_phys_index, NULL); | ||
| 302 | static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); | 350 | static SYSDEV_ATTR(state, 0644, show_mem_state, store_mem_state); |
| 303 | static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); | 351 | static SYSDEV_ATTR(phys_device, 0444, show_phys_device, NULL); |
| 304 | static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); | 352 | static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); |
| @@ -315,7 +363,7 @@ static ssize_t | |||
| 315 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, | 363 | print_block_size(struct sysdev_class *class, struct sysdev_class_attribute *attr, |
| 316 | char *buf) | 364 | char *buf) |
| 317 | { | 365 | { |
| 318 | return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); | 366 | return sprintf(buf, "%lx\n", get_memory_block_size()); |
| 319 | } | 367 | } |
| 320 | 368 | ||
| 321 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); | 369 | static SYSDEV_CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); |
| @@ -339,12 +387,19 @@ memory_probe_store(struct class *class, struct class_attribute *attr, | |||
| 339 | { | 387 | { |
| 340 | u64 phys_addr; | 388 | u64 phys_addr; |
| 341 | int nid; | 389 | int nid; |
| 342 | int ret; | 390 | int i, ret; |
| 343 | 391 | ||
| 344 | phys_addr = simple_strtoull(buf, NULL, 0); | 392 | phys_addr = simple_strtoull(buf, NULL, 0); |
| 345 | 393 | ||
| 346 | nid = memory_add_physaddr_to_nid(phys_addr); | 394 | for (i = 0; i < sections_per_block; i++) { |
| 347 | ret = add_memory(nid, phys_addr, PAGES_PER_SECTION << PAGE_SHIFT); | 395 | nid = memory_add_physaddr_to_nid(phys_addr); |
| 396 | ret = add_memory(nid, phys_addr, | ||
| 397 | PAGES_PER_SECTION << PAGE_SHIFT); | ||
| 398 | if (ret) | ||
| 399 | break; | ||
| 400 | |||
| 401 | phys_addr += MIN_MEMORY_BLOCK_SIZE; | ||
| 402 | } | ||
| 348 | 403 | ||
| 349 | if (ret) | 404 | if (ret) |
| 350 | count = ret; | 405 | count = ret; |
| @@ -444,6 +499,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, | |||
| 444 | struct sys_device *sysdev; | 499 | struct sys_device *sysdev; |
| 445 | struct memory_block *mem; | 500 | struct memory_block *mem; |
| 446 | char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; | 501 | char name[sizeof(MEMORY_CLASS_NAME) + 9 + 1]; |
| 502 | int block_id = base_memory_block_id(__section_nr(section)); | ||
| 447 | 503 | ||
| 448 | kobj = hint ? &hint->sysdev.kobj : NULL; | 504 | kobj = hint ? &hint->sysdev.kobj : NULL; |
| 449 | 505 | ||
| @@ -451,7 +507,7 @@ struct memory_block *find_memory_block_hinted(struct mem_section *section, | |||
| 451 | * This only works because we know that section == sysdev->id | 507 | * This only works because we know that section == sysdev->id |
| 452 | * slightly redundant with sysdev_register() | 508 | * slightly redundant with sysdev_register() |
| 453 | */ | 509 | */ |
| 454 | sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, __section_nr(section)); | 510 | sprintf(&name[0], "%s%d", MEMORY_CLASS_NAME, block_id); |
| 455 | 511 | ||
| 456 | kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj); | 512 | kobj = kset_find_obj_hinted(&memory_sysdev_class.kset, name, kobj); |
| 457 | if (!kobj) | 513 | if (!kobj) |
| @@ -476,36 +532,62 @@ struct memory_block *find_memory_block(struct mem_section *section) | |||
| 476 | return find_memory_block_hinted(section, NULL); | 532 | return find_memory_block_hinted(section, NULL); |
| 477 | } | 533 | } |
| 478 | 534 | ||
| 479 | static int add_memory_block(int nid, struct mem_section *section, | 535 | static int init_memory_block(struct memory_block **memory, |
| 480 | unsigned long state, enum mem_add_context context) | 536 | struct mem_section *section, unsigned long state) |
| 481 | { | 537 | { |
| 482 | struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL); | 538 | struct memory_block *mem; |
| 483 | unsigned long start_pfn; | 539 | unsigned long start_pfn; |
| 540 | int scn_nr; | ||
| 484 | int ret = 0; | 541 | int ret = 0; |
| 485 | 542 | ||
| 543 | mem = kzalloc(sizeof(*mem), GFP_KERNEL); | ||
| 486 | if (!mem) | 544 | if (!mem) |
| 487 | return -ENOMEM; | 545 | return -ENOMEM; |
| 488 | 546 | ||
| 489 | mutex_lock(&mem_sysfs_mutex); | 547 | scn_nr = __section_nr(section); |
| 490 | 548 | mem->start_section_nr = | |
| 491 | mem->phys_index = __section_nr(section); | 549 | base_memory_block_id(scn_nr) * sections_per_block; |
| 550 | mem->end_section_nr = mem->start_section_nr + sections_per_block - 1; | ||
| 492 | mem->state = state; | 551 | mem->state = state; |
| 493 | mem->section_count++; | 552 | mem->section_count++; |
| 494 | mutex_init(&mem->state_mutex); | 553 | mutex_init(&mem->state_mutex); |
| 495 | start_pfn = section_nr_to_pfn(mem->phys_index); | 554 | start_pfn = section_nr_to_pfn(mem->start_section_nr); |
| 496 | mem->phys_device = arch_get_memory_phys_device(start_pfn); | 555 | mem->phys_device = arch_get_memory_phys_device(start_pfn); |
| 497 | 556 | ||
| 498 | ret = register_memory(mem, section); | 557 | ret = register_memory(mem); |
| 499 | if (!ret) | 558 | if (!ret) |
| 500 | ret = mem_create_simple_file(mem, phys_index); | 559 | ret = mem_create_simple_file(mem, phys_index); |
| 501 | if (!ret) | 560 | if (!ret) |
| 561 | ret = mem_create_simple_file(mem, end_phys_index); | ||
| 562 | if (!ret) | ||
| 502 | ret = mem_create_simple_file(mem, state); | 563 | ret = mem_create_simple_file(mem, state); |
| 503 | if (!ret) | 564 | if (!ret) |
| 504 | ret = mem_create_simple_file(mem, phys_device); | 565 | ret = mem_create_simple_file(mem, phys_device); |
| 505 | if (!ret) | 566 | if (!ret) |
| 506 | ret = mem_create_simple_file(mem, removable); | 567 | ret = mem_create_simple_file(mem, removable); |
| 568 | |||
| 569 | *memory = mem; | ||
| 570 | return ret; | ||
| 571 | } | ||
| 572 | |||
| 573 | static int add_memory_section(int nid, struct mem_section *section, | ||
| 574 | unsigned long state, enum mem_add_context context) | ||
| 575 | { | ||
| 576 | struct memory_block *mem; | ||
| 577 | int ret = 0; | ||
| 578 | |||
| 579 | mutex_lock(&mem_sysfs_mutex); | ||
| 580 | |||
| 581 | mem = find_memory_block(section); | ||
| 582 | if (mem) { | ||
| 583 | mem->section_count++; | ||
| 584 | kobject_put(&mem->sysdev.kobj); | ||
| 585 | } else | ||
| 586 | ret = init_memory_block(&mem, section, state); | ||
| 587 | |||
| 507 | if (!ret) { | 588 | if (!ret) { |
| 508 | if (context == HOTPLUG) | 589 | if (context == HOTPLUG && |
| 590 | mem->section_count == sections_per_block) | ||
| 509 | ret = register_mem_sect_under_node(mem, nid); | 591 | ret = register_mem_sect_under_node(mem, nid); |
| 510 | } | 592 | } |
| 511 | 593 | ||
| @@ -520,16 +602,19 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
| 520 | 602 | ||
| 521 | mutex_lock(&mem_sysfs_mutex); | 603 | mutex_lock(&mem_sysfs_mutex); |
| 522 | mem = find_memory_block(section); | 604 | mem = find_memory_block(section); |
| 605 | unregister_mem_sect_under_nodes(mem, __section_nr(section)); | ||
| 523 | 606 | ||
| 524 | mem->section_count--; | 607 | mem->section_count--; |
| 525 | if (mem->section_count == 0) { | 608 | if (mem->section_count == 0) { |
| 526 | unregister_mem_sect_under_nodes(mem); | ||
| 527 | mem_remove_simple_file(mem, phys_index); | 609 | mem_remove_simple_file(mem, phys_index); |
| 610 | mem_remove_simple_file(mem, end_phys_index); | ||
| 528 | mem_remove_simple_file(mem, state); | 611 | mem_remove_simple_file(mem, state); |
| 529 | mem_remove_simple_file(mem, phys_device); | 612 | mem_remove_simple_file(mem, phys_device); |
| 530 | mem_remove_simple_file(mem, removable); | 613 | mem_remove_simple_file(mem, removable); |
| 531 | unregister_memory(mem, section); | 614 | unregister_memory(mem); |
| 532 | } | 615 | kfree(mem); |
| 616 | } else | ||
| 617 | kobject_put(&mem->sysdev.kobj); | ||
| 533 | 618 | ||
| 534 | mutex_unlock(&mem_sysfs_mutex); | 619 | mutex_unlock(&mem_sysfs_mutex); |
| 535 | return 0; | 620 | return 0; |
| @@ -541,7 +626,7 @@ int remove_memory_block(unsigned long node_id, struct mem_section *section, | |||
| 541 | */ | 626 | */ |
| 542 | int register_new_memory(int nid, struct mem_section *section) | 627 | int register_new_memory(int nid, struct mem_section *section) |
| 543 | { | 628 | { |
| 544 | return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG); | 629 | return add_memory_section(nid, section, MEM_OFFLINE, HOTPLUG); |
| 545 | } | 630 | } |
| 546 | 631 | ||
| 547 | int unregister_memory_section(struct mem_section *section) | 632 | int unregister_memory_section(struct mem_section *section) |
| @@ -560,12 +645,16 @@ int __init memory_dev_init(void) | |||
| 560 | unsigned int i; | 645 | unsigned int i; |
| 561 | int ret; | 646 | int ret; |
| 562 | int err; | 647 | int err; |
| 648 | unsigned long block_sz; | ||
| 563 | 649 | ||
| 564 | memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; | 650 | memory_sysdev_class.kset.uevent_ops = &memory_uevent_ops; |
| 565 | ret = sysdev_class_register(&memory_sysdev_class); | 651 | ret = sysdev_class_register(&memory_sysdev_class); |
| 566 | if (ret) | 652 | if (ret) |
| 567 | goto out; | 653 | goto out; |
| 568 | 654 | ||
| 655 | block_sz = get_memory_block_size(); | ||
| 656 | sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE; | ||
| 657 | |||
| 569 | /* | 658 | /* |
| 570 | * Create entries for memory sections that were found | 659 | * Create entries for memory sections that were found |
| 571 | * during boot and have been initialized | 660 | * during boot and have been initialized |
| @@ -573,8 +662,8 @@ int __init memory_dev_init(void) | |||
| 573 | for (i = 0; i < NR_MEM_SECTIONS; i++) { | 662 | for (i = 0; i < NR_MEM_SECTIONS; i++) { |
| 574 | if (!present_section_nr(i)) | 663 | if (!present_section_nr(i)) |
| 575 | continue; | 664 | continue; |
| 576 | err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE, | 665 | err = add_memory_section(0, __nr_to_section(i), MEM_ONLINE, |
| 577 | BOOT); | 666 | BOOT); |
| 578 | if (!ret) | 667 | if (!ret) |
| 579 | ret = err; | 668 | ret = err; |
| 580 | } | 669 | } |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 36b43052001d..b3b72d64e805 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -375,8 +375,10 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
| 375 | return -EFAULT; | 375 | return -EFAULT; |
| 376 | if (!node_online(nid)) | 376 | if (!node_online(nid)) |
| 377 | return 0; | 377 | return 0; |
| 378 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 378 | |
| 379 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 379 | sect_start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); |
| 380 | sect_end_pfn = section_nr_to_pfn(mem_blk->end_section_nr); | ||
| 381 | sect_end_pfn += PAGES_PER_SECTION - 1; | ||
| 380 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 382 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
| 381 | int page_nid; | 383 | int page_nid; |
| 382 | 384 | ||
| @@ -400,7 +402,8 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) | |||
| 400 | } | 402 | } |
| 401 | 403 | ||
| 402 | /* unregister memory section under all nodes that it spans */ | 404 | /* unregister memory section under all nodes that it spans */ |
| 403 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | 405 | int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, |
| 406 | unsigned long phys_index) | ||
| 404 | { | 407 | { |
| 405 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); | 408 | NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); |
| 406 | unsigned long pfn, sect_start_pfn, sect_end_pfn; | 409 | unsigned long pfn, sect_start_pfn, sect_end_pfn; |
| @@ -412,7 +415,8 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) | |||
| 412 | if (!unlinked_nodes) | 415 | if (!unlinked_nodes) |
| 413 | return -ENOMEM; | 416 | return -ENOMEM; |
| 414 | nodes_clear(*unlinked_nodes); | 417 | nodes_clear(*unlinked_nodes); |
| 415 | sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); | 418 | |
| 419 | sect_start_pfn = section_nr_to_pfn(phys_index); | ||
| 416 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; | 420 | sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; |
| 417 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { | 421 | for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { |
| 418 | int nid; | 422 | int nid; |
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile index abe46edfe5b4..118c1b92a511 100644 --- a/drivers/base/power/Makefile +++ b/drivers/base/power/Makefile | |||
| @@ -1,7 +1,6 @@ | |||
| 1 | obj-$(CONFIG_PM) += sysfs.o | 1 | obj-$(CONFIG_PM) += sysfs.o generic_ops.o |
| 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o | 2 | obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o |
| 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o | 3 | obj-$(CONFIG_PM_RUNTIME) += runtime.o |
| 4 | obj-$(CONFIG_PM_OPS) += generic_ops.o | ||
| 5 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o | 4 | obj-$(CONFIG_PM_TRACE_RTC) += trace.o |
| 6 | obj-$(CONFIG_PM_OPP) += opp.o | 5 | obj-$(CONFIG_PM_OPP) += opp.o |
| 7 | 6 | ||
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 83404973f97a..052dc53eef38 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c | |||
| @@ -423,26 +423,22 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) | |||
| 423 | TRACE_DEVICE(dev); | 423 | TRACE_DEVICE(dev); |
| 424 | TRACE_RESUME(0); | 424 | TRACE_RESUME(0); |
| 425 | 425 | ||
| 426 | if (dev->bus && dev->bus->pm) { | 426 | if (dev->pwr_domain) { |
| 427 | pm_dev_dbg(dev, state, "EARLY "); | 427 | pm_dev_dbg(dev, state, "EARLY power domain "); |
| 428 | error = pm_noirq_op(dev, dev->bus->pm, state); | 428 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); |
| 429 | if (error) | ||
| 430 | goto End; | ||
| 431 | } | 429 | } |
| 432 | 430 | ||
| 433 | if (dev->type && dev->type->pm) { | 431 | if (dev->type && dev->type->pm) { |
| 434 | pm_dev_dbg(dev, state, "EARLY type "); | 432 | pm_dev_dbg(dev, state, "EARLY type "); |
| 435 | error = pm_noirq_op(dev, dev->type->pm, state); | 433 | error = pm_noirq_op(dev, dev->type->pm, state); |
| 436 | if (error) | 434 | } else if (dev->class && dev->class->pm) { |
| 437 | goto End; | ||
| 438 | } | ||
| 439 | |||
| 440 | if (dev->class && dev->class->pm) { | ||
| 441 | pm_dev_dbg(dev, state, "EARLY class "); | 435 | pm_dev_dbg(dev, state, "EARLY class "); |
| 442 | error = pm_noirq_op(dev, dev->class->pm, state); | 436 | error = pm_noirq_op(dev, dev->class->pm, state); |
| 437 | } else if (dev->bus && dev->bus->pm) { | ||
| 438 | pm_dev_dbg(dev, state, "EARLY "); | ||
| 439 | error = pm_noirq_op(dev, dev->bus->pm, state); | ||
| 443 | } | 440 | } |
| 444 | 441 | ||
| 445 | End: | ||
| 446 | TRACE_RESUME(error); | 442 | TRACE_RESUME(error); |
| 447 | return error; | 443 | return error; |
| 448 | } | 444 | } |
| @@ -518,36 +514,39 @@ static int device_resume(struct device *dev, pm_message_t state, bool async) | |||
| 518 | 514 | ||
| 519 | dev->power.in_suspend = false; | 515 | dev->power.in_suspend = false; |
| 520 | 516 | ||
| 521 | if (dev->bus) { | 517 | if (dev->pwr_domain) { |
| 522 | if (dev->bus->pm) { | 518 | pm_dev_dbg(dev, state, "power domain "); |
| 523 | pm_dev_dbg(dev, state, ""); | 519 | pm_op(dev, &dev->pwr_domain->ops, state); |
| 524 | error = pm_op(dev, dev->bus->pm, state); | ||
| 525 | } else if (dev->bus->resume) { | ||
| 526 | pm_dev_dbg(dev, state, "legacy "); | ||
| 527 | error = legacy_resume(dev, dev->bus->resume); | ||
| 528 | } | ||
| 529 | if (error) | ||
| 530 | goto End; | ||
| 531 | } | 520 | } |
| 532 | 521 | ||
| 533 | if (dev->type) { | 522 | if (dev->type && dev->type->pm) { |
| 534 | if (dev->type->pm) { | 523 | pm_dev_dbg(dev, state, "type "); |
| 535 | pm_dev_dbg(dev, state, "type "); | 524 | error = pm_op(dev, dev->type->pm, state); |
| 536 | error = pm_op(dev, dev->type->pm, state); | 525 | goto End; |
| 537 | } | ||
| 538 | if (error) | ||
| 539 | goto End; | ||
| 540 | } | 526 | } |
| 541 | 527 | ||
| 542 | if (dev->class) { | 528 | if (dev->class) { |
| 543 | if (dev->class->pm) { | 529 | if (dev->class->pm) { |
| 544 | pm_dev_dbg(dev, state, "class "); | 530 | pm_dev_dbg(dev, state, "class "); |
| 545 | error = pm_op(dev, dev->class->pm, state); | 531 | error = pm_op(dev, dev->class->pm, state); |
| 532 | goto End; | ||
| 546 | } else if (dev->class->resume) { | 533 | } else if (dev->class->resume) { |
| 547 | pm_dev_dbg(dev, state, "legacy class "); | 534 | pm_dev_dbg(dev, state, "legacy class "); |
| 548 | error = legacy_resume(dev, dev->class->resume); | 535 | error = legacy_resume(dev, dev->class->resume); |
| 536 | goto End; | ||
| 549 | } | 537 | } |
| 550 | } | 538 | } |
| 539 | |||
| 540 | if (dev->bus) { | ||
| 541 | if (dev->bus->pm) { | ||
| 542 | pm_dev_dbg(dev, state, ""); | ||
| 543 | error = pm_op(dev, dev->bus->pm, state); | ||
| 544 | } else if (dev->bus->resume) { | ||
| 545 | pm_dev_dbg(dev, state, "legacy "); | ||
| 546 | error = legacy_resume(dev, dev->bus->resume); | ||
| 547 | } | ||
| 548 | } | ||
| 549 | |||
| 551 | End: | 550 | End: |
| 552 | device_unlock(dev); | 551 | device_unlock(dev); |
| 553 | complete_all(&dev->power.completion); | 552 | complete_all(&dev->power.completion); |
| @@ -629,19 +628,23 @@ static void device_complete(struct device *dev, pm_message_t state) | |||
| 629 | { | 628 | { |
| 630 | device_lock(dev); | 629 | device_lock(dev); |
| 631 | 630 | ||
| 632 | if (dev->class && dev->class->pm && dev->class->pm->complete) { | 631 | if (dev->pwr_domain && dev->pwr_domain->ops.complete) { |
| 633 | pm_dev_dbg(dev, state, "completing class "); | 632 | pm_dev_dbg(dev, state, "completing power domain "); |
| 634 | dev->class->pm->complete(dev); | 633 | dev->pwr_domain->ops.complete(dev); |
| 635 | } | 634 | } |
| 636 | 635 | ||
| 637 | if (dev->type && dev->type->pm && dev->type->pm->complete) { | 636 | if (dev->type && dev->type->pm) { |
| 638 | pm_dev_dbg(dev, state, "completing type "); | 637 | pm_dev_dbg(dev, state, "completing type "); |
| 639 | dev->type->pm->complete(dev); | 638 | if (dev->type->pm->complete) |
| 640 | } | 639 | dev->type->pm->complete(dev); |
| 641 | 640 | } else if (dev->class && dev->class->pm) { | |
| 642 | if (dev->bus && dev->bus->pm && dev->bus->pm->complete) { | 641 | pm_dev_dbg(dev, state, "completing class "); |
| 642 | if (dev->class->pm->complete) | ||
| 643 | dev->class->pm->complete(dev); | ||
| 644 | } else if (dev->bus && dev->bus->pm) { | ||
| 643 | pm_dev_dbg(dev, state, "completing "); | 645 | pm_dev_dbg(dev, state, "completing "); |
| 644 | dev->bus->pm->complete(dev); | 646 | if (dev->bus->pm->complete) |
| 647 | dev->bus->pm->complete(dev); | ||
| 645 | } | 648 | } |
| 646 | 649 | ||
| 647 | device_unlock(dev); | 650 | device_unlock(dev); |
| @@ -669,7 +672,6 @@ static void dpm_complete(pm_message_t state) | |||
| 669 | mutex_unlock(&dpm_list_mtx); | 672 | mutex_unlock(&dpm_list_mtx); |
| 670 | 673 | ||
| 671 | device_complete(dev, state); | 674 | device_complete(dev, state); |
| 672 | pm_runtime_put_sync(dev); | ||
| 673 | 675 | ||
| 674 | mutex_lock(&dpm_list_mtx); | 676 | mutex_lock(&dpm_list_mtx); |
| 675 | put_device(dev); | 677 | put_device(dev); |
| @@ -727,29 +729,31 @@ static pm_message_t resume_event(pm_message_t sleep_state) | |||
| 727 | */ | 729 | */ |
| 728 | static int device_suspend_noirq(struct device *dev, pm_message_t state) | 730 | static int device_suspend_noirq(struct device *dev, pm_message_t state) |
| 729 | { | 731 | { |
| 730 | int error = 0; | 732 | int error; |
| 731 | |||
| 732 | if (dev->class && dev->class->pm) { | ||
| 733 | pm_dev_dbg(dev, state, "LATE class "); | ||
| 734 | error = pm_noirq_op(dev, dev->class->pm, state); | ||
| 735 | if (error) | ||
| 736 | goto End; | ||
| 737 | } | ||
| 738 | 733 | ||
| 739 | if (dev->type && dev->type->pm) { | 734 | if (dev->type && dev->type->pm) { |
| 740 | pm_dev_dbg(dev, state, "LATE type "); | 735 | pm_dev_dbg(dev, state, "LATE type "); |
| 741 | error = pm_noirq_op(dev, dev->type->pm, state); | 736 | error = pm_noirq_op(dev, dev->type->pm, state); |
| 742 | if (error) | 737 | if (error) |
| 743 | goto End; | 738 | return error; |
| 744 | } | 739 | } else if (dev->class && dev->class->pm) { |
| 745 | 740 | pm_dev_dbg(dev, state, "LATE class "); | |
| 746 | if (dev->bus && dev->bus->pm) { | 741 | error = pm_noirq_op(dev, dev->class->pm, state); |
| 742 | if (error) | ||
| 743 | return error; | ||
| 744 | } else if (dev->bus && dev->bus->pm) { | ||
| 747 | pm_dev_dbg(dev, state, "LATE "); | 745 | pm_dev_dbg(dev, state, "LATE "); |
| 748 | error = pm_noirq_op(dev, dev->bus->pm, state); | 746 | error = pm_noirq_op(dev, dev->bus->pm, state); |
| 747 | if (error) | ||
| 748 | return error; | ||
| 749 | } | 749 | } |
| 750 | 750 | ||
| 751 | End: | 751 | if (dev->pwr_domain) { |
| 752 | return error; | 752 | pm_dev_dbg(dev, state, "LATE power domain "); |
| 753 | pm_noirq_op(dev, &dev->pwr_domain->ops, state); | ||
| 754 | } | ||
| 755 | |||
| 756 | return 0; | ||
| 753 | } | 757 | } |
| 754 | 758 | ||
| 755 | /** | 759 | /** |
| @@ -836,25 +840,22 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 836 | goto End; | 840 | goto End; |
| 837 | } | 841 | } |
| 838 | 842 | ||
| 843 | if (dev->type && dev->type->pm) { | ||
| 844 | pm_dev_dbg(dev, state, "type "); | ||
| 845 | error = pm_op(dev, dev->type->pm, state); | ||
| 846 | goto Domain; | ||
| 847 | } | ||
| 848 | |||
| 839 | if (dev->class) { | 849 | if (dev->class) { |
| 840 | if (dev->class->pm) { | 850 | if (dev->class->pm) { |
| 841 | pm_dev_dbg(dev, state, "class "); | 851 | pm_dev_dbg(dev, state, "class "); |
| 842 | error = pm_op(dev, dev->class->pm, state); | 852 | error = pm_op(dev, dev->class->pm, state); |
| 853 | goto Domain; | ||
| 843 | } else if (dev->class->suspend) { | 854 | } else if (dev->class->suspend) { |
| 844 | pm_dev_dbg(dev, state, "legacy class "); | 855 | pm_dev_dbg(dev, state, "legacy class "); |
| 845 | error = legacy_suspend(dev, state, dev->class->suspend); | 856 | error = legacy_suspend(dev, state, dev->class->suspend); |
| 857 | goto Domain; | ||
| 846 | } | 858 | } |
| 847 | if (error) | ||
| 848 | goto End; | ||
| 849 | } | ||
| 850 | |||
| 851 | if (dev->type) { | ||
| 852 | if (dev->type->pm) { | ||
| 853 | pm_dev_dbg(dev, state, "type "); | ||
| 854 | error = pm_op(dev, dev->type->pm, state); | ||
| 855 | } | ||
| 856 | if (error) | ||
| 857 | goto End; | ||
| 858 | } | 859 | } |
| 859 | 860 | ||
| 860 | if (dev->bus) { | 861 | if (dev->bus) { |
| @@ -867,6 +868,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) | |||
| 867 | } | 868 | } |
| 868 | } | 869 | } |
| 869 | 870 | ||
| 871 | Domain: | ||
| 872 | if (!error && dev->pwr_domain) { | ||
| 873 | pm_dev_dbg(dev, state, "power domain "); | ||
| 874 | pm_op(dev, &dev->pwr_domain->ops, state); | ||
| 875 | } | ||
| 876 | |||
| 870 | End: | 877 | End: |
| 871 | device_unlock(dev); | 878 | device_unlock(dev); |
| 872 | complete_all(&dev->power.completion); | 879 | complete_all(&dev->power.completion); |
| @@ -957,27 +964,34 @@ static int device_prepare(struct device *dev, pm_message_t state) | |||
| 957 | 964 | ||
| 958 | device_lock(dev); | 965 | device_lock(dev); |
| 959 | 966 | ||
| 960 | if (dev->bus && dev->bus->pm && dev->bus->pm->prepare) { | 967 | if (dev->type && dev->type->pm) { |
| 968 | pm_dev_dbg(dev, state, "preparing type "); | ||
| 969 | if (dev->type->pm->prepare) | ||
| 970 | error = dev->type->pm->prepare(dev); | ||
| 971 | suspend_report_result(dev->type->pm->prepare, error); | ||
| 972 | if (error) | ||
| 973 | goto End; | ||
| 974 | } else if (dev->class && dev->class->pm) { | ||
| 975 | pm_dev_dbg(dev, state, "preparing class "); | ||
| 976 | if (dev->class->pm->prepare) | ||
| 977 | error = dev->class->pm->prepare(dev); | ||
| 978 | suspend_report_result(dev->class->pm->prepare, error); | ||
| 979 | if (error) | ||
| 980 | goto End; | ||
| 981 | } else if (dev->bus && dev->bus->pm) { | ||
| 961 | pm_dev_dbg(dev, state, "preparing "); | 982 | pm_dev_dbg(dev, state, "preparing "); |
| 962 | error = dev->bus->pm->prepare(dev); | 983 | if (dev->bus->pm->prepare) |
| 984 | error = dev->bus->pm->prepare(dev); | ||
| 963 | suspend_report_result(dev->bus->pm->prepare, error); | 985 | suspend_report_result(dev->bus->pm->prepare, error); |
| 964 | if (error) | 986 | if (error) |
| 965 | goto End; | 987 | goto End; |
| 966 | } | 988 | } |
| 967 | 989 | ||
| 968 | if (dev->type && dev->type->pm && dev->type->pm->prepare) { | 990 | if (dev->pwr_domain && dev->pwr_domain->ops.prepare) { |
| 969 | pm_dev_dbg(dev, state, "preparing type "); | 991 | pm_dev_dbg(dev, state, "preparing power domain "); |
| 970 | error = dev->type->pm->prepare(dev); | 992 | dev->pwr_domain->ops.prepare(dev); |
| 971 | suspend_report_result(dev->type->pm->prepare, error); | ||
| 972 | if (error) | ||
| 973 | goto End; | ||
| 974 | } | 993 | } |
| 975 | 994 | ||
| 976 | if (dev->class && dev->class->pm && dev->class->pm->prepare) { | ||
| 977 | pm_dev_dbg(dev, state, "preparing class "); | ||
| 978 | error = dev->class->pm->prepare(dev); | ||
| 979 | suspend_report_result(dev->class->pm->prepare, error); | ||
| 980 | } | ||
| 981 | End: | 995 | End: |
| 982 | device_unlock(dev); | 996 | device_unlock(dev); |
| 983 | 997 | ||
| @@ -1005,12 +1019,9 @@ static int dpm_prepare(pm_message_t state) | |||
| 1005 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) | 1019 | if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) |
| 1006 | pm_wakeup_event(dev, 0); | 1020 | pm_wakeup_event(dev, 0); |
| 1007 | 1021 | ||
| 1008 | if (pm_wakeup_pending()) { | 1022 | pm_runtime_put_sync(dev); |
| 1009 | pm_runtime_put_sync(dev); | 1023 | error = pm_wakeup_pending() ? |
| 1010 | error = -EBUSY; | 1024 | -EBUSY : device_prepare(dev, state); |
| 1011 | } else { | ||
| 1012 | error = device_prepare(dev, state); | ||
| 1013 | } | ||
| 1014 | 1025 | ||
| 1015 | mutex_lock(&dpm_list_mtx); | 1026 | mutex_lock(&dpm_list_mtx); |
| 1016 | if (error) { | 1027 | if (error) { |
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 2bb9b4cf59d7..56a6899f5e9e 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c | |||
| @@ -222,7 +222,7 @@ int opp_get_opp_count(struct device *dev) | |||
| 222 | * opp_find_freq_exact() - search for an exact frequency | 222 | * opp_find_freq_exact() - search for an exact frequency |
| 223 | * @dev: device for which we do this operation | 223 | * @dev: device for which we do this operation |
| 224 | * @freq: frequency to search for | 224 | * @freq: frequency to search for |
| 225 | * @is_available: true/false - match for available opp | 225 | * @available: true/false - match for available opp |
| 226 | * | 226 | * |
| 227 | * Searches for exact match in the opp list and returns pointer to the matching | 227 | * Searches for exact match in the opp list and returns pointer to the matching |
| 228 | * opp if found, else returns ERR_PTR in case of error and should be handled | 228 | * opp if found, else returns ERR_PTR in case of error and should be handled |
diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 698dde742587..f2a25f18fde7 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h | |||
| @@ -58,19 +58,18 @@ static inline void device_pm_move_last(struct device *dev) {} | |||
| 58 | * sysfs.c | 58 | * sysfs.c |
| 59 | */ | 59 | */ |
| 60 | 60 | ||
| 61 | extern int dpm_sysfs_add(struct device *); | 61 | extern int dpm_sysfs_add(struct device *dev); |
| 62 | extern void dpm_sysfs_remove(struct device *); | 62 | extern void dpm_sysfs_remove(struct device *dev); |
| 63 | extern void rpm_sysfs_remove(struct device *); | 63 | extern void rpm_sysfs_remove(struct device *dev); |
| 64 | extern int wakeup_sysfs_add(struct device *dev); | ||
| 65 | extern void wakeup_sysfs_remove(struct device *dev); | ||
| 64 | 66 | ||
| 65 | #else /* CONFIG_PM */ | 67 | #else /* CONFIG_PM */ |
| 66 | 68 | ||
| 67 | static inline int dpm_sysfs_add(struct device *dev) | 69 | static inline int dpm_sysfs_add(struct device *dev) { return 0; } |
| 68 | { | 70 | static inline void dpm_sysfs_remove(struct device *dev) {} |
| 69 | return 0; | 71 | static inline void rpm_sysfs_remove(struct device *dev) {} |
| 70 | } | 72 | static inline int wakeup_sysfs_add(struct device *dev) { return 0; } |
| 71 | 73 | static inline void wakeup_sysfs_remove(struct device *dev) {} | |
| 72 | static inline void dpm_sysfs_remove(struct device *dev) | ||
| 73 | { | ||
| 74 | } | ||
| 75 | 74 | ||
| 76 | #endif | 75 | #endif |
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 656493a5e073..54597c859ecb 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c | |||
| @@ -168,6 +168,7 @@ static int rpm_check_suspend_allowed(struct device *dev) | |||
| 168 | static int rpm_idle(struct device *dev, int rpmflags) | 168 | static int rpm_idle(struct device *dev, int rpmflags) |
| 169 | { | 169 | { |
| 170 | int (*callback)(struct device *); | 170 | int (*callback)(struct device *); |
| 171 | int (*domain_callback)(struct device *); | ||
| 171 | int retval; | 172 | int retval; |
| 172 | 173 | ||
| 173 | retval = rpm_check_suspend_allowed(dev); | 174 | retval = rpm_check_suspend_allowed(dev); |
| @@ -213,19 +214,28 @@ static int rpm_idle(struct device *dev, int rpmflags) | |||
| 213 | 214 | ||
| 214 | dev->power.idle_notification = true; | 215 | dev->power.idle_notification = true; |
| 215 | 216 | ||
| 216 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_idle) | 217 | if (dev->type && dev->type->pm) |
| 217 | callback = dev->bus->pm->runtime_idle; | ||
| 218 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) | ||
| 219 | callback = dev->type->pm->runtime_idle; | 218 | callback = dev->type->pm->runtime_idle; |
| 220 | else if (dev->class && dev->class->pm) | 219 | else if (dev->class && dev->class->pm) |
| 221 | callback = dev->class->pm->runtime_idle; | 220 | callback = dev->class->pm->runtime_idle; |
| 221 | else if (dev->bus && dev->bus->pm) | ||
| 222 | callback = dev->bus->pm->runtime_idle; | ||
| 222 | else | 223 | else |
| 223 | callback = NULL; | 224 | callback = NULL; |
| 224 | 225 | ||
| 225 | if (callback) { | 226 | if (dev->pwr_domain) |
| 227 | domain_callback = dev->pwr_domain->ops.runtime_idle; | ||
| 228 | else | ||
| 229 | domain_callback = NULL; | ||
| 230 | |||
| 231 | if (callback || domain_callback) { | ||
| 226 | spin_unlock_irq(&dev->power.lock); | 232 | spin_unlock_irq(&dev->power.lock); |
| 227 | 233 | ||
| 228 | callback(dev); | 234 | if (domain_callback) |
| 235 | retval = domain_callback(dev); | ||
| 236 | |||
| 237 | if (!retval && callback) | ||
| 238 | callback(dev); | ||
| 229 | 239 | ||
| 230 | spin_lock_irq(&dev->power.lock); | 240 | spin_lock_irq(&dev->power.lock); |
| 231 | } | 241 | } |
| @@ -372,12 +382,12 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 372 | 382 | ||
| 373 | __update_runtime_status(dev, RPM_SUSPENDING); | 383 | __update_runtime_status(dev, RPM_SUSPENDING); |
| 374 | 384 | ||
| 375 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) | 385 | if (dev->type && dev->type->pm) |
| 376 | callback = dev->bus->pm->runtime_suspend; | ||
| 377 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_suspend) | ||
| 378 | callback = dev->type->pm->runtime_suspend; | 386 | callback = dev->type->pm->runtime_suspend; |
| 379 | else if (dev->class && dev->class->pm) | 387 | else if (dev->class && dev->class->pm) |
| 380 | callback = dev->class->pm->runtime_suspend; | 388 | callback = dev->class->pm->runtime_suspend; |
| 389 | else if (dev->bus && dev->bus->pm) | ||
| 390 | callback = dev->bus->pm->runtime_suspend; | ||
| 381 | else | 391 | else |
| 382 | callback = NULL; | 392 | callback = NULL; |
| 383 | 393 | ||
| @@ -390,6 +400,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 390 | else | 400 | else |
| 391 | pm_runtime_cancel_pending(dev); | 401 | pm_runtime_cancel_pending(dev); |
| 392 | } else { | 402 | } else { |
| 403 | if (dev->pwr_domain) | ||
| 404 | rpm_callback(dev->pwr_domain->ops.runtime_suspend, dev); | ||
| 393 | no_callback: | 405 | no_callback: |
| 394 | __update_runtime_status(dev, RPM_SUSPENDED); | 406 | __update_runtime_status(dev, RPM_SUSPENDED); |
| 395 | pm_runtime_deactivate_timer(dev); | 407 | pm_runtime_deactivate_timer(dev); |
| @@ -407,12 +419,15 @@ static int rpm_suspend(struct device *dev, int rpmflags) | |||
| 407 | goto out; | 419 | goto out; |
| 408 | } | 420 | } |
| 409 | 421 | ||
| 422 | /* Maybe the parent is now able to suspend. */ | ||
| 410 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { | 423 | if (parent && !parent->power.ignore_children && !dev->power.irq_safe) { |
| 411 | spin_unlock_irq(&dev->power.lock); | 424 | spin_unlock(&dev->power.lock); |
| 412 | 425 | ||
| 413 | pm_request_idle(parent); | 426 | spin_lock(&parent->power.lock); |
| 427 | rpm_idle(parent, RPM_ASYNC); | ||
| 428 | spin_unlock(&parent->power.lock); | ||
| 414 | 429 | ||
| 415 | spin_lock_irq(&dev->power.lock); | 430 | spin_lock(&dev->power.lock); |
| 416 | } | 431 | } |
| 417 | 432 | ||
| 418 | out: | 433 | out: |
| @@ -566,12 +581,15 @@ static int rpm_resume(struct device *dev, int rpmflags) | |||
| 566 | 581 | ||
| 567 | __update_runtime_status(dev, RPM_RESUMING); | 582 | __update_runtime_status(dev, RPM_RESUMING); |
| 568 | 583 | ||
| 569 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) | 584 | if (dev->pwr_domain) |
| 570 | callback = dev->bus->pm->runtime_resume; | 585 | rpm_callback(dev->pwr_domain->ops.runtime_resume, dev); |
| 571 | else if (dev->type && dev->type->pm && dev->type->pm->runtime_resume) | 586 | |
| 587 | if (dev->type && dev->type->pm) | ||
| 572 | callback = dev->type->pm->runtime_resume; | 588 | callback = dev->type->pm->runtime_resume; |
| 573 | else if (dev->class && dev->class->pm) | 589 | else if (dev->class && dev->class->pm) |
| 574 | callback = dev->class->pm->runtime_resume; | 590 | callback = dev->class->pm->runtime_resume; |
| 591 | else if (dev->bus && dev->bus->pm) | ||
| 592 | callback = dev->bus->pm->runtime_resume; | ||
| 575 | else | 593 | else |
| 576 | callback = NULL; | 594 | callback = NULL; |
| 577 | 595 | ||
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 0b1e46bf3e56..fff49bee781d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c | |||
| @@ -431,26 +431,18 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr, | |||
| 431 | static DEVICE_ATTR(async, 0644, async_show, async_store); | 431 | static DEVICE_ATTR(async, 0644, async_show, async_store); |
| 432 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ | 432 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
| 433 | 433 | ||
| 434 | static struct attribute * power_attrs[] = { | 434 | static struct attribute *power_attrs[] = { |
| 435 | &dev_attr_wakeup.attr, | ||
| 436 | #ifdef CONFIG_PM_SLEEP | ||
| 437 | &dev_attr_wakeup_count.attr, | ||
| 438 | &dev_attr_wakeup_active_count.attr, | ||
| 439 | &dev_attr_wakeup_hit_count.attr, | ||
| 440 | &dev_attr_wakeup_active.attr, | ||
| 441 | &dev_attr_wakeup_total_time_ms.attr, | ||
| 442 | &dev_attr_wakeup_max_time_ms.attr, | ||
| 443 | &dev_attr_wakeup_last_time_ms.attr, | ||
| 444 | #endif | ||
| 445 | #ifdef CONFIG_PM_ADVANCED_DEBUG | 435 | #ifdef CONFIG_PM_ADVANCED_DEBUG |
| 436 | #ifdef CONFIG_PM_SLEEP | ||
| 446 | &dev_attr_async.attr, | 437 | &dev_attr_async.attr, |
| 438 | #endif | ||
| 447 | #ifdef CONFIG_PM_RUNTIME | 439 | #ifdef CONFIG_PM_RUNTIME |
| 448 | &dev_attr_runtime_status.attr, | 440 | &dev_attr_runtime_status.attr, |
| 449 | &dev_attr_runtime_usage.attr, | 441 | &dev_attr_runtime_usage.attr, |
| 450 | &dev_attr_runtime_active_kids.attr, | 442 | &dev_attr_runtime_active_kids.attr, |
| 451 | &dev_attr_runtime_enabled.attr, | 443 | &dev_attr_runtime_enabled.attr, |
| 452 | #endif | 444 | #endif |
| 453 | #endif | 445 | #endif /* CONFIG_PM_ADVANCED_DEBUG */ |
| 454 | NULL, | 446 | NULL, |
| 455 | }; | 447 | }; |
| 456 | static struct attribute_group pm_attr_group = { | 448 | static struct attribute_group pm_attr_group = { |
| @@ -458,9 +450,26 @@ static struct attribute_group pm_attr_group = { | |||
| 458 | .attrs = power_attrs, | 450 | .attrs = power_attrs, |
| 459 | }; | 451 | }; |
| 460 | 452 | ||
| 461 | #ifdef CONFIG_PM_RUNTIME | 453 | static struct attribute *wakeup_attrs[] = { |
| 454 | #ifdef CONFIG_PM_SLEEP | ||
| 455 | &dev_attr_wakeup.attr, | ||
| 456 | &dev_attr_wakeup_count.attr, | ||
| 457 | &dev_attr_wakeup_active_count.attr, | ||
| 458 | &dev_attr_wakeup_hit_count.attr, | ||
| 459 | &dev_attr_wakeup_active.attr, | ||
| 460 | &dev_attr_wakeup_total_time_ms.attr, | ||
| 461 | &dev_attr_wakeup_max_time_ms.attr, | ||
| 462 | &dev_attr_wakeup_last_time_ms.attr, | ||
| 463 | #endif | ||
| 464 | NULL, | ||
| 465 | }; | ||
| 466 | static struct attribute_group pm_wakeup_attr_group = { | ||
| 467 | .name = power_group_name, | ||
| 468 | .attrs = wakeup_attrs, | ||
| 469 | }; | ||
| 462 | 470 | ||
| 463 | static struct attribute *runtime_attrs[] = { | 471 | static struct attribute *runtime_attrs[] = { |
| 472 | #ifdef CONFIG_PM_RUNTIME | ||
| 464 | #ifndef CONFIG_PM_ADVANCED_DEBUG | 473 | #ifndef CONFIG_PM_ADVANCED_DEBUG |
| 465 | &dev_attr_runtime_status.attr, | 474 | &dev_attr_runtime_status.attr, |
| 466 | #endif | 475 | #endif |
| @@ -468,6 +477,7 @@ static struct attribute *runtime_attrs[] = { | |||
| 468 | &dev_attr_runtime_suspended_time.attr, | 477 | &dev_attr_runtime_suspended_time.attr, |
| 469 | &dev_attr_runtime_active_time.attr, | 478 | &dev_attr_runtime_active_time.attr, |
| 470 | &dev_attr_autosuspend_delay_ms.attr, | 479 | &dev_attr_autosuspend_delay_ms.attr, |
| 480 | #endif /* CONFIG_PM_RUNTIME */ | ||
| 471 | NULL, | 481 | NULL, |
| 472 | }; | 482 | }; |
| 473 | static struct attribute_group pm_runtime_attr_group = { | 483 | static struct attribute_group pm_runtime_attr_group = { |
| @@ -480,35 +490,49 @@ int dpm_sysfs_add(struct device *dev) | |||
| 480 | int rc; | 490 | int rc; |
| 481 | 491 | ||
| 482 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); | 492 | rc = sysfs_create_group(&dev->kobj, &pm_attr_group); |
| 483 | if (rc == 0 && !dev->power.no_callbacks) { | 493 | if (rc) |
| 494 | return rc; | ||
| 495 | |||
| 496 | if (pm_runtime_callbacks_present(dev)) { | ||
| 484 | rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); | 497 | rc = sysfs_merge_group(&dev->kobj, &pm_runtime_attr_group); |
| 485 | if (rc) | 498 | if (rc) |
| 486 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | 499 | goto err_out; |
| 500 | } | ||
| 501 | |||
| 502 | if (device_can_wakeup(dev)) { | ||
| 503 | rc = sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
| 504 | if (rc) { | ||
| 505 | if (pm_runtime_callbacks_present(dev)) | ||
| 506 | sysfs_unmerge_group(&dev->kobj, | ||
| 507 | &pm_runtime_attr_group); | ||
| 508 | goto err_out; | ||
| 509 | } | ||
| 487 | } | 510 | } |
| 511 | return 0; | ||
| 512 | |||
| 513 | err_out: | ||
| 514 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | ||
| 488 | return rc; | 515 | return rc; |
| 489 | } | 516 | } |
| 490 | 517 | ||
| 491 | void rpm_sysfs_remove(struct device *dev) | 518 | int wakeup_sysfs_add(struct device *dev) |
| 492 | { | 519 | { |
| 493 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); | 520 | return sysfs_merge_group(&dev->kobj, &pm_wakeup_attr_group); |
| 494 | } | 521 | } |
| 495 | 522 | ||
| 496 | void dpm_sysfs_remove(struct device *dev) | 523 | void wakeup_sysfs_remove(struct device *dev) |
| 497 | { | 524 | { |
| 498 | rpm_sysfs_remove(dev); | 525 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); |
| 499 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | ||
| 500 | } | 526 | } |
| 501 | 527 | ||
| 502 | #else /* CONFIG_PM_RUNTIME */ | 528 | void rpm_sysfs_remove(struct device *dev) |
| 503 | |||
| 504 | int dpm_sysfs_add(struct device * dev) | ||
| 505 | { | 529 | { |
| 506 | return sysfs_create_group(&dev->kobj, &pm_attr_group); | 530 | sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); |
| 507 | } | 531 | } |
| 508 | 532 | ||
| 509 | void dpm_sysfs_remove(struct device * dev) | 533 | void dpm_sysfs_remove(struct device *dev) |
| 510 | { | 534 | { |
| 535 | rpm_sysfs_remove(dev); | ||
| 536 | sysfs_unmerge_group(&dev->kobj, &pm_wakeup_attr_group); | ||
| 511 | sysfs_remove_group(&dev->kobj, &pm_attr_group); | 537 | sysfs_remove_group(&dev->kobj, &pm_attr_group); |
| 512 | } | 538 | } |
| 513 | |||
| 514 | #endif | ||
diff --git a/drivers/base/power/trace.c b/drivers/base/power/trace.c index 9f4258df4cfd..c80e138b62fe 100644 --- a/drivers/base/power/trace.c +++ b/drivers/base/power/trace.c | |||
| @@ -112,7 +112,7 @@ static unsigned int read_magic_time(void) | |||
| 112 | unsigned int val; | 112 | unsigned int val; |
| 113 | 113 | ||
| 114 | get_rtc_time(&time); | 114 | get_rtc_time(&time); |
| 115 | printk("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", | 115 | pr_info("Time: %2d:%02d:%02d Date: %02d/%02d/%02d\n", |
| 116 | time.tm_hour, time.tm_min, time.tm_sec, | 116 | time.tm_hour, time.tm_min, time.tm_sec, |
| 117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); | 117 | time.tm_mon + 1, time.tm_mday, time.tm_year % 100); |
| 118 | val = time.tm_year; /* 100 years */ | 118 | val = time.tm_year; /* 100 years */ |
| @@ -179,7 +179,7 @@ static int show_file_hash(unsigned int value) | |||
| 179 | unsigned int hash = hash_string(lineno, file, FILEHASH); | 179 | unsigned int hash = hash_string(lineno, file, FILEHASH); |
| 180 | if (hash != value) | 180 | if (hash != value) |
| 181 | continue; | 181 | continue; |
| 182 | printk(" hash matches %s:%u\n", file, lineno); | 182 | pr_info(" hash matches %s:%u\n", file, lineno); |
| 183 | match++; | 183 | match++; |
| 184 | } | 184 | } |
| 185 | return match; | 185 | return match; |
| @@ -255,7 +255,7 @@ static int late_resume_init(void) | |||
| 255 | val = val / FILEHASH; | 255 | val = val / FILEHASH; |
| 256 | dev = val /* % DEVHASH */; | 256 | dev = val /* % DEVHASH */; |
| 257 | 257 | ||
| 258 | printk(" Magic number: %d:%d:%d\n", user, file, dev); | 258 | pr_info(" Magic number: %d:%d:%d\n", user, file, dev); |
| 259 | show_file_hash(file); | 259 | show_file_hash(file); |
| 260 | show_dev_hash(dev); | 260 | show_dev_hash(dev); |
| 261 | return 0; | 261 | return 0; |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 8ec406d8f548..4573c83df6dd 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
| @@ -24,12 +24,26 @@ | |||
| 24 | */ | 24 | */ |
| 25 | bool events_check_enabled; | 25 | bool events_check_enabled; |
| 26 | 26 | ||
| 27 | /* The counter of registered wakeup events. */ | 27 | /* |
| 28 | static atomic_t event_count = ATOMIC_INIT(0); | 28 | * Combined counters of registered wakeup events and wakeup events in progress. |
| 29 | /* A preserved old value of event_count. */ | 29 | * They need to be modified together atomically, so it's better to use one |
| 30 | * atomic variable to hold them both. | ||
| 31 | */ | ||
| 32 | static atomic_t combined_event_count = ATOMIC_INIT(0); | ||
| 33 | |||
| 34 | #define IN_PROGRESS_BITS (sizeof(int) * 4) | ||
| 35 | #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1) | ||
| 36 | |||
| 37 | static void split_counters(unsigned int *cnt, unsigned int *inpr) | ||
| 38 | { | ||
| 39 | unsigned int comb = atomic_read(&combined_event_count); | ||
| 40 | |||
| 41 | *cnt = (comb >> IN_PROGRESS_BITS); | ||
| 42 | *inpr = comb & MAX_IN_PROGRESS; | ||
| 43 | } | ||
| 44 | |||
| 45 | /* A preserved old value of the events counter. */ | ||
| 30 | static unsigned int saved_count; | 46 | static unsigned int saved_count; |
| 31 | /* The counter of wakeup events being processed. */ | ||
| 32 | static atomic_t events_in_progress = ATOMIC_INIT(0); | ||
| 33 | 47 | ||
| 34 | static DEFINE_SPINLOCK(events_lock); | 48 | static DEFINE_SPINLOCK(events_lock); |
| 35 | 49 | ||
| @@ -228,6 +242,35 @@ int device_wakeup_disable(struct device *dev) | |||
| 228 | EXPORT_SYMBOL_GPL(device_wakeup_disable); | 242 | EXPORT_SYMBOL_GPL(device_wakeup_disable); |
| 229 | 243 | ||
| 230 | /** | 244 | /** |
| 245 | * device_set_wakeup_capable - Set/reset device wakeup capability flag. | ||
| 246 | * @dev: Device to handle. | ||
| 247 | * @capable: Whether or not @dev is capable of waking up the system from sleep. | ||
| 248 | * | ||
| 249 | * If @capable is set, set the @dev's power.can_wakeup flag and add its | ||
| 250 | * wakeup-related attributes to sysfs. Otherwise, unset the @dev's | ||
| 251 | * power.can_wakeup flag and remove its wakeup-related attributes from sysfs. | ||
| 252 | * | ||
| 253 | * This function may sleep and it can't be called from any context where | ||
| 254 | * sleeping is not allowed. | ||
| 255 | */ | ||
| 256 | void device_set_wakeup_capable(struct device *dev, bool capable) | ||
| 257 | { | ||
| 258 | if (!!dev->power.can_wakeup == !!capable) | ||
| 259 | return; | ||
| 260 | |||
| 261 | if (device_is_registered(dev)) { | ||
| 262 | if (capable) { | ||
| 263 | if (wakeup_sysfs_add(dev)) | ||
| 264 | return; | ||
| 265 | } else { | ||
| 266 | wakeup_sysfs_remove(dev); | ||
| 267 | } | ||
| 268 | } | ||
| 269 | dev->power.can_wakeup = capable; | ||
| 270 | } | ||
| 271 | EXPORT_SYMBOL_GPL(device_set_wakeup_capable); | ||
| 272 | |||
| 273 | /** | ||
| 231 | * device_init_wakeup - Device wakeup initialization. | 274 | * device_init_wakeup - Device wakeup initialization. |
| 232 | * @dev: Device to handle. | 275 | * @dev: Device to handle. |
| 233 | * @enable: Whether or not to enable @dev as a wakeup device. | 276 | * @enable: Whether or not to enable @dev as a wakeup device. |
| @@ -307,7 +350,8 @@ static void wakeup_source_activate(struct wakeup_source *ws) | |||
| 307 | ws->timer_expires = jiffies; | 350 | ws->timer_expires = jiffies; |
| 308 | ws->last_time = ktime_get(); | 351 | ws->last_time = ktime_get(); |
| 309 | 352 | ||
| 310 | atomic_inc(&events_in_progress); | 353 | /* Increment the counter of events in progress. */ |
| 354 | atomic_inc(&combined_event_count); | ||
| 311 | } | 355 | } |
| 312 | 356 | ||
| 313 | /** | 357 | /** |
| @@ -394,14 +438,10 @@ static void wakeup_source_deactivate(struct wakeup_source *ws) | |||
| 394 | del_timer(&ws->timer); | 438 | del_timer(&ws->timer); |
| 395 | 439 | ||
| 396 | /* | 440 | /* |
| 397 | * event_count has to be incremented before events_in_progress is | 441 | * Increment the counter of registered wakeup events and decrement the |
| 398 | * modified, so that the callers of pm_check_wakeup_events() and | 442 | * couter of wakeup events in progress simultaneously. |
| 399 | * pm_save_wakeup_count() don't see the old value of event_count and | ||
| 400 | * events_in_progress equal to zero at the same time. | ||
| 401 | */ | 443 | */ |
| 402 | atomic_inc(&event_count); | 444 | atomic_add(MAX_IN_PROGRESS, &combined_event_count); |
| 403 | smp_mb__before_atomic_dec(); | ||
| 404 | atomic_dec(&events_in_progress); | ||
| 405 | } | 445 | } |
| 406 | 446 | ||
| 407 | /** | 447 | /** |
| @@ -556,8 +596,10 @@ bool pm_wakeup_pending(void) | |||
| 556 | 596 | ||
| 557 | spin_lock_irqsave(&events_lock, flags); | 597 | spin_lock_irqsave(&events_lock, flags); |
| 558 | if (events_check_enabled) { | 598 | if (events_check_enabled) { |
| 559 | ret = ((unsigned int)atomic_read(&event_count) != saved_count) | 599 | unsigned int cnt, inpr; |
| 560 | || atomic_read(&events_in_progress); | 600 | |
| 601 | split_counters(&cnt, &inpr); | ||
| 602 | ret = (cnt != saved_count || inpr > 0); | ||
| 561 | events_check_enabled = !ret; | 603 | events_check_enabled = !ret; |
| 562 | } | 604 | } |
| 563 | spin_unlock_irqrestore(&events_lock, flags); | 605 | spin_unlock_irqrestore(&events_lock, flags); |
| @@ -573,25 +615,25 @@ bool pm_wakeup_pending(void) | |||
| 573 | * Store the number of registered wakeup events at the address in @count. Block | 615 | * Store the number of registered wakeup events at the address in @count. Block |
| 574 | * if the current number of wakeup events being processed is nonzero. | 616 | * if the current number of wakeup events being processed is nonzero. |
| 575 | * | 617 | * |
| 576 | * Return false if the wait for the number of wakeup events being processed to | 618 | * Return 'false' if the wait for the number of wakeup events being processed to |
| 577 | * drop down to zero has been interrupted by a signal (and the current number | 619 | * drop down to zero has been interrupted by a signal (and the current number |
| 578 | * of wakeup events being processed is still nonzero). Otherwise return true. | 620 | * of wakeup events being processed is still nonzero). Otherwise return 'true'. |
| 579 | */ | 621 | */ |
| 580 | bool pm_get_wakeup_count(unsigned int *count) | 622 | bool pm_get_wakeup_count(unsigned int *count) |
| 581 | { | 623 | { |
| 582 | bool ret; | 624 | unsigned int cnt, inpr; |
| 583 | |||
| 584 | if (capable(CAP_SYS_ADMIN)) | ||
| 585 | events_check_enabled = false; | ||
| 586 | 625 | ||
| 587 | while (atomic_read(&events_in_progress) && !signal_pending(current)) { | 626 | for (;;) { |
| 627 | split_counters(&cnt, &inpr); | ||
| 628 | if (inpr == 0 || signal_pending(current)) | ||
| 629 | break; | ||
| 588 | pm_wakeup_update_hit_counts(); | 630 | pm_wakeup_update_hit_counts(); |
| 589 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); | 631 | schedule_timeout_interruptible(msecs_to_jiffies(TIMEOUT)); |
| 590 | } | 632 | } |
| 591 | 633 | ||
| 592 | ret = !atomic_read(&events_in_progress); | 634 | split_counters(&cnt, &inpr); |
| 593 | *count = atomic_read(&event_count); | 635 | *count = cnt; |
| 594 | return ret; | 636 | return !inpr; |
| 595 | } | 637 | } |
| 596 | 638 | ||
| 597 | /** | 639 | /** |
| @@ -600,24 +642,25 @@ bool pm_get_wakeup_count(unsigned int *count) | |||
| 600 | * | 642 | * |
| 601 | * If @count is equal to the current number of registered wakeup events and the | 643 | * If @count is equal to the current number of registered wakeup events and the |
| 602 | * current number of wakeup events being processed is zero, store @count as the | 644 | * current number of wakeup events being processed is zero, store @count as the |
| 603 | * old number of registered wakeup events to be used by pm_check_wakeup_events() | 645 | * old number of registered wakeup events for pm_check_wakeup_events(), enable |
| 604 | * and return true. Otherwise return false. | 646 | * wakeup events detection and return 'true'. Otherwise disable wakeup events |
| 647 | * detection and return 'false'. | ||
| 605 | */ | 648 | */ |
| 606 | bool pm_save_wakeup_count(unsigned int count) | 649 | bool pm_save_wakeup_count(unsigned int count) |
| 607 | { | 650 | { |
| 608 | bool ret = false; | 651 | unsigned int cnt, inpr; |
| 609 | 652 | ||
| 653 | events_check_enabled = false; | ||
| 610 | spin_lock_irq(&events_lock); | 654 | spin_lock_irq(&events_lock); |
| 611 | if (count == (unsigned int)atomic_read(&event_count) | 655 | split_counters(&cnt, &inpr); |
| 612 | && !atomic_read(&events_in_progress)) { | 656 | if (cnt == count && inpr == 0) { |
| 613 | saved_count = count; | 657 | saved_count = count; |
| 614 | events_check_enabled = true; | 658 | events_check_enabled = true; |
| 615 | ret = true; | ||
| 616 | } | 659 | } |
| 617 | spin_unlock_irq(&events_lock); | 660 | spin_unlock_irq(&events_lock); |
| 618 | if (!ret) | 661 | if (!events_check_enabled) |
| 619 | pm_wakeup_update_hit_counts(); | 662 | pm_wakeup_update_hit_counts(); |
| 620 | return ret; | 663 | return events_check_enabled; |
| 621 | } | 664 | } |
| 622 | 665 | ||
| 623 | static struct dentry *wakeup_sources_stats_dentry; | 666 | static struct dentry *wakeup_sources_stats_dentry; |
diff --git a/drivers/base/sys.c b/drivers/base/sys.c index 1667aaf4fde6..f6fb54741602 100644 --- a/drivers/base/sys.c +++ b/drivers/base/sys.c | |||
| @@ -166,6 +166,36 @@ EXPORT_SYMBOL_GPL(sysdev_class_unregister); | |||
| 166 | 166 | ||
| 167 | static DEFINE_MUTEX(sysdev_drivers_lock); | 167 | static DEFINE_MUTEX(sysdev_drivers_lock); |
| 168 | 168 | ||
| 169 | /* | ||
| 170 | * @dev != NULL means that we're unwinding because some drv->add() | ||
| 171 | * failed for some reason. You need to grab sysdev_drivers_lock before | ||
| 172 | * calling this. | ||
| 173 | */ | ||
| 174 | static void __sysdev_driver_remove(struct sysdev_class *cls, | ||
| 175 | struct sysdev_driver *drv, | ||
| 176 | struct sys_device *from_dev) | ||
| 177 | { | ||
| 178 | struct sys_device *dev = from_dev; | ||
| 179 | |||
| 180 | list_del_init(&drv->entry); | ||
| 181 | if (!cls) | ||
| 182 | return; | ||
| 183 | |||
| 184 | if (!drv->remove) | ||
| 185 | goto kset_put; | ||
| 186 | |||
| 187 | if (dev) | ||
| 188 | list_for_each_entry_continue_reverse(dev, &cls->kset.list, | ||
| 189 | kobj.entry) | ||
| 190 | drv->remove(dev); | ||
| 191 | else | ||
| 192 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | ||
| 193 | drv->remove(dev); | ||
| 194 | |||
| 195 | kset_put: | ||
| 196 | kset_put(&cls->kset); | ||
| 197 | } | ||
| 198 | |||
| 169 | /** | 199 | /** |
| 170 | * sysdev_driver_register - Register auxillary driver | 200 | * sysdev_driver_register - Register auxillary driver |
| 171 | * @cls: Device class driver belongs to. | 201 | * @cls: Device class driver belongs to. |
| @@ -175,14 +205,14 @@ static DEFINE_MUTEX(sysdev_drivers_lock); | |||
| 175 | * called on each operation on devices of that class. The refcount | 205 | * called on each operation on devices of that class. The refcount |
| 176 | * of @cls is incremented. | 206 | * of @cls is incremented. |
| 177 | */ | 207 | */ |
| 178 | |||
| 179 | int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | 208 | int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) |
| 180 | { | 209 | { |
| 210 | struct sys_device *dev = NULL; | ||
| 181 | int err = 0; | 211 | int err = 0; |
| 182 | 212 | ||
| 183 | if (!cls) { | 213 | if (!cls) { |
| 184 | WARN(1, KERN_WARNING "sysdev: invalid class passed to " | 214 | WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n", |
| 185 | "sysdev_driver_register!\n"); | 215 | __func__); |
| 186 | return -EINVAL; | 216 | return -EINVAL; |
| 187 | } | 217 | } |
| 188 | 218 | ||
| @@ -198,19 +228,27 @@ int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv) | |||
| 198 | 228 | ||
| 199 | /* If devices of this class already exist, tell the driver */ | 229 | /* If devices of this class already exist, tell the driver */ |
| 200 | if (drv->add) { | 230 | if (drv->add) { |
| 201 | struct sys_device *dev; | 231 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) { |
| 202 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | 232 | err = drv->add(dev); |
| 203 | drv->add(dev); | 233 | if (err) |
| 234 | goto unwind; | ||
| 235 | } | ||
| 204 | } | 236 | } |
| 205 | } else { | 237 | } else { |
| 206 | err = -EINVAL; | 238 | err = -EINVAL; |
| 207 | WARN(1, KERN_ERR "%s: invalid device class\n", __func__); | 239 | WARN(1, KERN_ERR "%s: invalid device class\n", __func__); |
| 208 | } | 240 | } |
| 241 | |||
| 242 | goto unlock; | ||
| 243 | |||
| 244 | unwind: | ||
| 245 | __sysdev_driver_remove(cls, drv, dev); | ||
| 246 | |||
| 247 | unlock: | ||
| 209 | mutex_unlock(&sysdev_drivers_lock); | 248 | mutex_unlock(&sysdev_drivers_lock); |
| 210 | return err; | 249 | return err; |
| 211 | } | 250 | } |
| 212 | 251 | ||
| 213 | |||
| 214 | /** | 252 | /** |
| 215 | * sysdev_driver_unregister - Remove an auxillary driver. | 253 | * sysdev_driver_unregister - Remove an auxillary driver. |
| 216 | * @cls: Class driver belongs to. | 254 | * @cls: Class driver belongs to. |
| @@ -220,23 +258,12 @@ void sysdev_driver_unregister(struct sysdev_class *cls, | |||
| 220 | struct sysdev_driver *drv) | 258 | struct sysdev_driver *drv) |
| 221 | { | 259 | { |
| 222 | mutex_lock(&sysdev_drivers_lock); | 260 | mutex_lock(&sysdev_drivers_lock); |
| 223 | list_del_init(&drv->entry); | 261 | __sysdev_driver_remove(cls, drv, NULL); |
| 224 | if (cls) { | ||
| 225 | if (drv->remove) { | ||
| 226 | struct sys_device *dev; | ||
| 227 | list_for_each_entry(dev, &cls->kset.list, kobj.entry) | ||
| 228 | drv->remove(dev); | ||
| 229 | } | ||
| 230 | kset_put(&cls->kset); | ||
| 231 | } | ||
| 232 | mutex_unlock(&sysdev_drivers_lock); | 262 | mutex_unlock(&sysdev_drivers_lock); |
| 233 | } | 263 | } |
| 234 | |||
| 235 | EXPORT_SYMBOL_GPL(sysdev_driver_register); | 264 | EXPORT_SYMBOL_GPL(sysdev_driver_register); |
| 236 | EXPORT_SYMBOL_GPL(sysdev_driver_unregister); | 265 | EXPORT_SYMBOL_GPL(sysdev_driver_unregister); |
| 237 | 266 | ||
| 238 | |||
| 239 | |||
| 240 | /** | 267 | /** |
| 241 | * sysdev_register - add a system device to the tree | 268 | * sysdev_register - add a system device to the tree |
| 242 | * @sysdev: device in question | 269 | * @sysdev: device in question |
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c new file mode 100644 index 000000000000..90af2943f9e4 --- /dev/null +++ b/drivers/base/syscore.c | |||
| @@ -0,0 +1,117 @@ | |||
| 1 | /* | ||
| 2 | * syscore.c - Execution of system core operations. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc. | ||
| 5 | * | ||
| 6 | * This file is released under the GPLv2. | ||
| 7 | */ | ||
| 8 | |||
| 9 | #include <linux/syscore_ops.h> | ||
| 10 | #include <linux/mutex.h> | ||
| 11 | #include <linux/module.h> | ||
| 12 | |||
| 13 | static LIST_HEAD(syscore_ops_list); | ||
| 14 | static DEFINE_MUTEX(syscore_ops_lock); | ||
| 15 | |||
| 16 | /** | ||
| 17 | * register_syscore_ops - Register a set of system core operations. | ||
| 18 | * @ops: System core operations to register. | ||
| 19 | */ | ||
| 20 | void register_syscore_ops(struct syscore_ops *ops) | ||
| 21 | { | ||
| 22 | mutex_lock(&syscore_ops_lock); | ||
| 23 | list_add_tail(&ops->node, &syscore_ops_list); | ||
| 24 | mutex_unlock(&syscore_ops_lock); | ||
| 25 | } | ||
| 26 | EXPORT_SYMBOL_GPL(register_syscore_ops); | ||
| 27 | |||
| 28 | /** | ||
| 29 | * unregister_syscore_ops - Unregister a set of system core operations. | ||
| 30 | * @ops: System core operations to unregister. | ||
| 31 | */ | ||
| 32 | void unregister_syscore_ops(struct syscore_ops *ops) | ||
| 33 | { | ||
| 34 | mutex_lock(&syscore_ops_lock); | ||
| 35 | list_del(&ops->node); | ||
| 36 | mutex_unlock(&syscore_ops_lock); | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL_GPL(unregister_syscore_ops); | ||
| 39 | |||
| 40 | #ifdef CONFIG_PM_SLEEP | ||
| 41 | /** | ||
| 42 | * syscore_suspend - Execute all the registered system core suspend callbacks. | ||
| 43 | * | ||
| 44 | * This function is executed with one CPU on-line and disabled interrupts. | ||
| 45 | */ | ||
| 46 | int syscore_suspend(void) | ||
| 47 | { | ||
| 48 | struct syscore_ops *ops; | ||
| 49 | int ret = 0; | ||
| 50 | |||
| 51 | WARN_ONCE(!irqs_disabled(), | ||
| 52 | "Interrupts enabled before system core suspend.\n"); | ||
| 53 | |||
| 54 | list_for_each_entry_reverse(ops, &syscore_ops_list, node) | ||
| 55 | if (ops->suspend) { | ||
| 56 | if (initcall_debug) | ||
| 57 | pr_info("PM: Calling %pF\n", ops->suspend); | ||
| 58 | ret = ops->suspend(); | ||
| 59 | if (ret) | ||
| 60 | goto err_out; | ||
| 61 | WARN_ONCE(!irqs_disabled(), | ||
| 62 | "Interrupts enabled after %pF\n", ops->suspend); | ||
| 63 | } | ||
| 64 | |||
| 65 | return 0; | ||
| 66 | |||
| 67 | err_out: | ||
| 68 | pr_err("PM: System core suspend callback %pF failed.\n", ops->suspend); | ||
| 69 | |||
| 70 | list_for_each_entry_continue(ops, &syscore_ops_list, node) | ||
| 71 | if (ops->resume) | ||
| 72 | ops->resume(); | ||
| 73 | |||
| 74 | return ret; | ||
| 75 | } | ||
| 76 | |||
| 77 | /** | ||
| 78 | * syscore_resume - Execute all the registered system core resume callbacks. | ||
| 79 | * | ||
| 80 | * This function is executed with one CPU on-line and disabled interrupts. | ||
| 81 | */ | ||
| 82 | void syscore_resume(void) | ||
| 83 | { | ||
| 84 | struct syscore_ops *ops; | ||
| 85 | |||
| 86 | WARN_ONCE(!irqs_disabled(), | ||
| 87 | "Interrupts enabled before system core resume.\n"); | ||
| 88 | |||
| 89 | list_for_each_entry(ops, &syscore_ops_list, node) | ||
| 90 | if (ops->resume) { | ||
| 91 | if (initcall_debug) | ||
| 92 | pr_info("PM: Calling %pF\n", ops->resume); | ||
| 93 | ops->resume(); | ||
| 94 | WARN_ONCE(!irqs_disabled(), | ||
| 95 | "Interrupts enabled after %pF\n", ops->resume); | ||
| 96 | } | ||
| 97 | } | ||
| 98 | #endif /* CONFIG_PM_SLEEP */ | ||
| 99 | |||
| 100 | /** | ||
| 101 | * syscore_shutdown - Execute all the registered system core shutdown callbacks. | ||
| 102 | */ | ||
| 103 | void syscore_shutdown(void) | ||
| 104 | { | ||
| 105 | struct syscore_ops *ops; | ||
| 106 | |||
| 107 | mutex_lock(&syscore_ops_lock); | ||
| 108 | |||
| 109 | list_for_each_entry_reverse(ops, &syscore_ops_list, node) | ||
| 110 | if (ops->shutdown) { | ||
| 111 | if (initcall_debug) | ||
| 112 | pr_info("PM: Calling %pF\n", ops->shutdown); | ||
| 113 | ops->shutdown(); | ||
| 114 | } | ||
| 115 | |||
| 116 | mutex_unlock(&syscore_ops_lock); | ||
| 117 | } | ||
