aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2010-03-01 02:55:20 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2010-03-01 02:55:20 -0500
commit35858adbfca13678af99fb31618ef4428d6dedb0 (patch)
tree3336feaa61324486945816cb52c347733e7c0821 /drivers/base
parent197d4db752e67160d79fed09968c2140376a80a3 (diff)
parent4b70858ba8d4537daf782defebe5f2ff80ccef2b (diff)
Merge branch 'next' into for-linus
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/bus.c2
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/devtmpfs.c22
-rw-r--r--drivers/base/driver.c4
-rw-r--r--drivers/base/memory.c82
-rw-r--r--drivers/base/node.c196
-rw-r--r--drivers/base/platform.c1
-rw-r--r--drivers/base/power/main.c145
-rw-r--r--drivers/base/power/runtime.c55
9 files changed, 458 insertions, 65 deletions
diff --git a/drivers/base/bus.c b/drivers/base/bus.c
index 63c143e54a57..c0c5a43d9fb3 100644
--- a/drivers/base/bus.c
+++ b/drivers/base/bus.c
@@ -703,9 +703,9 @@ int bus_add_driver(struct device_driver *drv)
703 return 0; 703 return 0;
704 704
705out_unregister: 705out_unregister:
706 kobject_put(&priv->kobj);
706 kfree(drv->p); 707 kfree(drv->p);
707 drv->p = NULL; 708 drv->p = NULL;
708 kobject_put(&priv->kobj);
709out_put_bus: 709out_put_bus:
710 bus_put(bus); 710 bus_put(bus);
711 return error; 711 return error;
diff --git a/drivers/base/core.c b/drivers/base/core.c
index f1290cbd1350..282025770429 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -446,7 +446,8 @@ struct kset *devices_kset;
446 * @dev: device. 446 * @dev: device.
447 * @attr: device attribute descriptor. 447 * @attr: device attribute descriptor.
448 */ 448 */
449int device_create_file(struct device *dev, struct device_attribute *attr) 449int device_create_file(struct device *dev,
450 const struct device_attribute *attr)
450{ 451{
451 int error = 0; 452 int error = 0;
452 if (dev) 453 if (dev)
@@ -459,7 +460,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr)
459 * @dev: device. 460 * @dev: device.
460 * @attr: device attribute descriptor. 461 * @attr: device attribute descriptor.
461 */ 462 */
462void device_remove_file(struct device *dev, struct device_attribute *attr) 463void device_remove_file(struct device *dev,
464 const struct device_attribute *attr)
463{ 465{
464 if (dev) 466 if (dev)
465 sysfs_remove_file(&dev->kobj, &attr->attr); 467 sysfs_remove_file(&dev->kobj, &attr->attr);
@@ -470,7 +472,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr)
470 * @dev: device. 472 * @dev: device.
471 * @attr: device binary attribute descriptor. 473 * @attr: device binary attribute descriptor.
472 */ 474 */
473int device_create_bin_file(struct device *dev, struct bin_attribute *attr) 475int device_create_bin_file(struct device *dev,
476 const struct bin_attribute *attr)
474{ 477{
475 int error = -EINVAL; 478 int error = -EINVAL;
476 if (dev) 479 if (dev)
@@ -484,7 +487,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file);
484 * @dev: device. 487 * @dev: device.
485 * @attr: device binary attribute descriptor. 488 * @attr: device binary attribute descriptor.
486 */ 489 */
487void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) 490void device_remove_bin_file(struct device *dev,
491 const struct bin_attribute *attr)
488{ 492{
489 if (dev) 493 if (dev)
490 sysfs_remove_bin_file(&dev->kobj, attr); 494 sysfs_remove_bin_file(&dev->kobj, attr);
@@ -905,8 +909,10 @@ int device_add(struct device *dev)
905 dev->init_name = NULL; 909 dev->init_name = NULL;
906 } 910 }
907 911
908 if (!dev_name(dev)) 912 if (!dev_name(dev)) {
913 error = -EINVAL;
909 goto name_error; 914 goto name_error;
915 }
910 916
911 pr_debug("device: '%s': %s\n", dev_name(dev), __func__); 917 pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
912 918
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
index 50375bb8e51d..42ae452b36b0 100644
--- a/drivers/base/devtmpfs.c
+++ b/drivers/base/devtmpfs.c
@@ -32,7 +32,7 @@ static int dev_mount = 1;
32static int dev_mount; 32static int dev_mount;
33#endif 33#endif
34 34
35static rwlock_t dirlock; 35static DEFINE_MUTEX(dirlock);
36 36
37static int __init mount_param(char *str) 37static int __init mount_param(char *str)
38{ 38{
@@ -93,7 +93,7 @@ static int create_path(const char *nodepath)
93{ 93{
94 int err; 94 int err;
95 95
96 read_lock(&dirlock); 96 mutex_lock(&dirlock);
97 err = dev_mkdir(nodepath, 0755); 97 err = dev_mkdir(nodepath, 0755);
98 if (err == -ENOENT) { 98 if (err == -ENOENT) {
99 char *path; 99 char *path;
@@ -101,8 +101,10 @@ static int create_path(const char *nodepath)
101 101
102 /* parent directories do not exist, create them */ 102 /* parent directories do not exist, create them */
103 path = kstrdup(nodepath, GFP_KERNEL); 103 path = kstrdup(nodepath, GFP_KERNEL);
104 if (!path) 104 if (!path) {
105 return -ENOMEM; 105 err = -ENOMEM;
106 goto out;
107 }
106 s = path; 108 s = path;
107 for (;;) { 109 for (;;) {
108 s = strchr(s, '/'); 110 s = strchr(s, '/');
@@ -117,7 +119,8 @@ static int create_path(const char *nodepath)
117 } 119 }
118 kfree(path); 120 kfree(path);
119 } 121 }
120 read_unlock(&dirlock); 122out:
123 mutex_unlock(&dirlock);
121 return err; 124 return err;
122} 125}
123 126
@@ -229,7 +232,7 @@ static int delete_path(const char *nodepath)
229 if (!path) 232 if (!path)
230 return -ENOMEM; 233 return -ENOMEM;
231 234
232 write_lock(&dirlock); 235 mutex_lock(&dirlock);
233 for (;;) { 236 for (;;) {
234 char *base; 237 char *base;
235 238
@@ -241,7 +244,7 @@ static int delete_path(const char *nodepath)
241 if (err) 244 if (err)
242 break; 245 break;
243 } 246 }
244 write_unlock(&dirlock); 247 mutex_unlock(&dirlock);
245 248
246 kfree(path); 249 kfree(path);
247 return err; 250 return err;
@@ -351,8 +354,7 @@ int __init devtmpfs_init(void)
351{ 354{
352 int err; 355 int err;
353 struct vfsmount *mnt; 356 struct vfsmount *mnt;
354 357 char options[] = "mode=0755";
355 rwlock_init(&dirlock);
356 358
357 err = register_filesystem(&dev_fs_type); 359 err = register_filesystem(&dev_fs_type);
358 if (err) { 360 if (err) {
@@ -361,7 +363,7 @@ int __init devtmpfs_init(void)
361 return err; 363 return err;
362 } 364 }
363 365
364 mnt = kern_mount_data(&dev_fs_type, "mode=0755"); 366 mnt = kern_mount_data(&dev_fs_type, options);
365 if (IS_ERR(mnt)) { 367 if (IS_ERR(mnt)) {
366 err = PTR_ERR(mnt); 368 err = PTR_ERR(mnt);
367 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); 369 printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err);
diff --git a/drivers/base/driver.c b/drivers/base/driver.c
index f367885a7646..90c9fff09ead 100644
--- a/drivers/base/driver.c
+++ b/drivers/base/driver.c
@@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(driver_find_device);
98 * @attr: driver attribute descriptor. 98 * @attr: driver attribute descriptor.
99 */ 99 */
100int driver_create_file(struct device_driver *drv, 100int driver_create_file(struct device_driver *drv,
101 struct driver_attribute *attr) 101 const struct driver_attribute *attr)
102{ 102{
103 int error; 103 int error;
104 if (drv) 104 if (drv)
@@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_create_file);
115 * @attr: driver attribute descriptor. 115 * @attr: driver attribute descriptor.
116 */ 116 */
117void driver_remove_file(struct device_driver *drv, 117void driver_remove_file(struct device_driver *drv,
118 struct driver_attribute *attr) 118 const struct driver_attribute *attr)
119{ 119{
120 if (drv) 120 if (drv)
121 sysfs_remove_file(&drv->p->kobj, &attr->attr); 121 sysfs_remove_file(&drv->p->kobj, &attr->attr);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 989429cfed88..bd025059711f 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb)
63} 63}
64EXPORT_SYMBOL(unregister_memory_notifier); 64EXPORT_SYMBOL(unregister_memory_notifier);
65 65
66static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67
68int register_memory_isolate_notifier(struct notifier_block *nb)
69{
70 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71}
72EXPORT_SYMBOL(register_memory_isolate_notifier);
73
74void unregister_memory_isolate_notifier(struct notifier_block *nb)
75{
76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77}
78EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79
66/* 80/*
67 * register_memory - Setup a sysfs device for a memory block 81 * register_memory - Setup a sysfs device for a memory block
68 */ 82 */
@@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v)
157 return blocking_notifier_call_chain(&memory_chain, val, v); 171 return blocking_notifier_call_chain(&memory_chain, val, v);
158} 172}
159 173
174int memory_isolate_notify(unsigned long val, void *v)
175{
176 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
177}
178
160/* 179/*
161 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is 180 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
162 * OK to have direct references to sparsemem variables in here. 181 * OK to have direct references to sparsemem variables in here.
@@ -292,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL);
292static ssize_t 311static ssize_t
293print_block_size(struct class *class, char *buf) 312print_block_size(struct class *class, char *buf)
294{ 313{
295 return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); 314 return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE);
296} 315}
297 316
298static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); 317static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL);
@@ -341,6 +360,64 @@ static inline int memory_probe_init(void)
341} 360}
342#endif 361#endif
343 362
363#ifdef CONFIG_MEMORY_FAILURE
364/*
365 * Support for offlining pages of memory
366 */
367
368/* Soft offline a page */
369static ssize_t
370store_soft_offline_page(struct class *class, const char *buf, size_t count)
371{
372 int ret;
373 u64 pfn;
374 if (!capable(CAP_SYS_ADMIN))
375 return -EPERM;
376 if (strict_strtoull(buf, 0, &pfn) < 0)
377 return -EINVAL;
378 pfn >>= PAGE_SHIFT;
379 if (!pfn_valid(pfn))
380 return -ENXIO;
381 ret = soft_offline_page(pfn_to_page(pfn), 0);
382 return ret == 0 ? count : ret;
383}
384
385/* Forcibly offline a page, including killing processes. */
386static ssize_t
387store_hard_offline_page(struct class *class, const char *buf, size_t count)
388{
389 int ret;
390 u64 pfn;
391 if (!capable(CAP_SYS_ADMIN))
392 return -EPERM;
393 if (strict_strtoull(buf, 0, &pfn) < 0)
394 return -EINVAL;
395 pfn >>= PAGE_SHIFT;
396 ret = __memory_failure(pfn, 0, 0);
397 return ret ? ret : count;
398}
399
400static CLASS_ATTR(soft_offline_page, 0644, NULL, store_soft_offline_page);
401static CLASS_ATTR(hard_offline_page, 0644, NULL, store_hard_offline_page);
402
403static __init int memory_fail_init(void)
404{
405 int err;
406
407 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
408 &class_attr_soft_offline_page.attr);
409 if (!err)
410 err = sysfs_create_file(&memory_sysdev_class.kset.kobj,
411 &class_attr_hard_offline_page.attr);
412 return err;
413}
414#else
415static inline int memory_fail_init(void)
416{
417 return 0;
418}
419#endif
420
344/* 421/*
345 * Note that phys_device is optional. It is here to allow for 422 * Note that phys_device is optional. It is here to allow for
346 * differentiation between which *physical* devices each 423 * differentiation between which *physical* devices each
@@ -473,6 +550,9 @@ int __init memory_dev_init(void)
473 err = memory_probe_init(); 550 err = memory_probe_init();
474 if (!ret) 551 if (!ret)
475 ret = err; 552 ret = err;
553 err = memory_fail_init();
554 if (!ret)
555 ret = err;
476 err = block_size_init(); 556 err = block_size_init();
477 if (!ret) 557 if (!ret)
478 ret = err; 558 ret = err;
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 1fe5536d404f..70122791683d 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -173,6 +173,47 @@ static ssize_t node_read_distance(struct sys_device * dev,
173} 173}
174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL); 174static SYSDEV_ATTR(distance, S_IRUGO, node_read_distance, NULL);
175 175
176#ifdef CONFIG_HUGETLBFS
177/*
178 * hugetlbfs per node attributes registration interface:
179 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
180 * it will register its per node attributes for all online nodes with
181 * memory. It will also call register_hugetlbfs_with_node(), below, to
182 * register its attribute registration functions with this node driver.
183 * Once these hooks have been initialized, the node driver will call into
184 * the hugetlb module to [un]register attributes for hot-plugged nodes.
185 */
186static node_registration_func_t __hugetlb_register_node;
187static node_registration_func_t __hugetlb_unregister_node;
188
189static inline bool hugetlb_register_node(struct node *node)
190{
191 if (__hugetlb_register_node &&
192 node_state(node->sysdev.id, N_HIGH_MEMORY)) {
193 __hugetlb_register_node(node);
194 return true;
195 }
196 return false;
197}
198
199static inline void hugetlb_unregister_node(struct node *node)
200{
201 if (__hugetlb_unregister_node)
202 __hugetlb_unregister_node(node);
203}
204
205void register_hugetlbfs_with_node(node_registration_func_t doregister,
206 node_registration_func_t unregister)
207{
208 __hugetlb_register_node = doregister;
209 __hugetlb_unregister_node = unregister;
210}
211#else
212static inline void hugetlb_register_node(struct node *node) {}
213
214static inline void hugetlb_unregister_node(struct node *node) {}
215#endif
216
176 217
177/* 218/*
178 * register_node - Setup a sysfs device for a node. 219 * register_node - Setup a sysfs device for a node.
@@ -196,6 +237,8 @@ int register_node(struct node *node, int num, struct node *parent)
196 sysdev_create_file(&node->sysdev, &attr_distance); 237 sysdev_create_file(&node->sysdev, &attr_distance);
197 238
198 scan_unevictable_register_node(node); 239 scan_unevictable_register_node(node);
240
241 hugetlb_register_node(node);
199 } 242 }
200 return error; 243 return error;
201} 244}
@@ -216,6 +259,7 @@ void unregister_node(struct node *node)
216 sysdev_remove_file(&node->sysdev, &attr_distance); 259 sysdev_remove_file(&node->sysdev, &attr_distance);
217 260
218 scan_unevictable_unregister_node(node); 261 scan_unevictable_unregister_node(node);
262 hugetlb_unregister_node(node); /* no-op, if memoryless node */
219 263
220 sysdev_unregister(&node->sysdev); 264 sysdev_unregister(&node->sysdev);
221} 265}
@@ -227,26 +271,43 @@ struct node node_devices[MAX_NUMNODES];
227 */ 271 */
228int register_cpu_under_node(unsigned int cpu, unsigned int nid) 272int register_cpu_under_node(unsigned int cpu, unsigned int nid)
229{ 273{
230 if (node_online(nid)) { 274 int ret;
231 struct sys_device *obj = get_cpu_sysdev(cpu); 275 struct sys_device *obj;
232 if (!obj)
233 return 0;
234 return sysfs_create_link(&node_devices[nid].sysdev.kobj,
235 &obj->kobj,
236 kobject_name(&obj->kobj));
237 }
238 276
239 return 0; 277 if (!node_online(nid))
278 return 0;
279
280 obj = get_cpu_sysdev(cpu);
281 if (!obj)
282 return 0;
283
284 ret = sysfs_create_link(&node_devices[nid].sysdev.kobj,
285 &obj->kobj,
286 kobject_name(&obj->kobj));
287 if (ret)
288 return ret;
289
290 return sysfs_create_link(&obj->kobj,
291 &node_devices[nid].sysdev.kobj,
292 kobject_name(&node_devices[nid].sysdev.kobj));
240} 293}
241 294
242int unregister_cpu_under_node(unsigned int cpu, unsigned int nid) 295int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
243{ 296{
244 if (node_online(nid)) { 297 struct sys_device *obj;
245 struct sys_device *obj = get_cpu_sysdev(cpu); 298
246 if (obj) 299 if (!node_online(nid))
247 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 300 return 0;
248 kobject_name(&obj->kobj)); 301
249 } 302 obj = get_cpu_sysdev(cpu);
303 if (!obj)
304 return 0;
305
306 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
307 kobject_name(&obj->kobj));
308 sysfs_remove_link(&obj->kobj,
309 kobject_name(&node_devices[nid].sysdev.kobj));
310
250 return 0; 311 return 0;
251} 312}
252 313
@@ -268,6 +329,7 @@ static int get_nid_for_pfn(unsigned long pfn)
268/* register memory section under specified node if it spans that node */ 329/* register memory section under specified node if it spans that node */
269int register_mem_sect_under_node(struct memory_block *mem_blk, int nid) 330int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
270{ 331{
332 int ret;
271 unsigned long pfn, sect_start_pfn, sect_end_pfn; 333 unsigned long pfn, sect_start_pfn, sect_end_pfn;
272 334
273 if (!mem_blk) 335 if (!mem_blk)
@@ -284,9 +346,15 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
284 continue; 346 continue;
285 if (page_nid != nid) 347 if (page_nid != nid)
286 continue; 348 continue;
287 return sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj, 349 ret = sysfs_create_link_nowarn(&node_devices[nid].sysdev.kobj,
288 &mem_blk->sysdev.kobj, 350 &mem_blk->sysdev.kobj,
289 kobject_name(&mem_blk->sysdev.kobj)); 351 kobject_name(&mem_blk->sysdev.kobj));
352 if (ret)
353 return ret;
354
355 return sysfs_create_link_nowarn(&mem_blk->sysdev.kobj,
356 &node_devices[nid].sysdev.kobj,
357 kobject_name(&node_devices[nid].sysdev.kobj));
290 } 358 }
291 /* mem section does not span the specified node */ 359 /* mem section does not span the specified node */
292 return 0; 360 return 0;
@@ -295,12 +363,16 @@ int register_mem_sect_under_node(struct memory_block *mem_blk, int nid)
295/* unregister memory section under all nodes that it spans */ 363/* unregister memory section under all nodes that it spans */
296int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) 364int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
297{ 365{
298 nodemask_t unlinked_nodes; 366 NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL);
299 unsigned long pfn, sect_start_pfn, sect_end_pfn; 367 unsigned long pfn, sect_start_pfn, sect_end_pfn;
300 368
301 if (!mem_blk) 369 if (!mem_blk) {
370 NODEMASK_FREE(unlinked_nodes);
302 return -EFAULT; 371 return -EFAULT;
303 nodes_clear(unlinked_nodes); 372 }
373 if (!unlinked_nodes)
374 return -ENOMEM;
375 nodes_clear(*unlinked_nodes);
304 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); 376 sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index);
305 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; 377 sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1;
306 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { 378 for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) {
@@ -311,11 +383,14 @@ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk)
311 continue; 383 continue;
312 if (!node_online(nid)) 384 if (!node_online(nid))
313 continue; 385 continue;
314 if (node_test_and_set(nid, unlinked_nodes)) 386 if (node_test_and_set(nid, *unlinked_nodes))
315 continue; 387 continue;
316 sysfs_remove_link(&node_devices[nid].sysdev.kobj, 388 sysfs_remove_link(&node_devices[nid].sysdev.kobj,
317 kobject_name(&mem_blk->sysdev.kobj)); 389 kobject_name(&mem_blk->sysdev.kobj));
390 sysfs_remove_link(&mem_blk->sysdev.kobj,
391 kobject_name(&node_devices[nid].sysdev.kobj));
318 } 392 }
393 NODEMASK_FREE(unlinked_nodes);
319 return 0; 394 return 0;
320} 395}
321 396
@@ -345,9 +420,77 @@ static int link_mem_sections(int nid)
345 } 420 }
346 return err; 421 return err;
347} 422}
348#else 423
424#ifdef CONFIG_HUGETLBFS
425/*
426 * Handle per node hstate attribute [un]registration on transistions
427 * to/from memoryless state.
428 */
429static void node_hugetlb_work(struct work_struct *work)
430{
431 struct node *node = container_of(work, struct node, node_work);
432
433 /*
434 * We only get here when a node transitions to/from memoryless state.
435 * We can detect which transition occurred by examining whether the
436 * node has memory now. hugetlb_register_node() already check this
437 * so we try to register the attributes. If that fails, then the
438 * node has transitioned to memoryless, try to unregister the
439 * attributes.
440 */
441 if (!hugetlb_register_node(node))
442 hugetlb_unregister_node(node);
443}
444
445static void init_node_hugetlb_work(int nid)
446{
447 INIT_WORK(&node_devices[nid].node_work, node_hugetlb_work);
448}
449
450static int node_memory_callback(struct notifier_block *self,
451 unsigned long action, void *arg)
452{
453 struct memory_notify *mnb = arg;
454 int nid = mnb->status_change_nid;
455
456 switch (action) {
457 case MEM_ONLINE:
458 case MEM_OFFLINE:
459 /*
460 * offload per node hstate [un]registration to a work thread
461 * when transitioning to/from memoryless state.
462 */
463 if (nid != NUMA_NO_NODE)
464 schedule_work(&node_devices[nid].node_work);
465 break;
466
467 case MEM_GOING_ONLINE:
468 case MEM_GOING_OFFLINE:
469 case MEM_CANCEL_ONLINE:
470 case MEM_CANCEL_OFFLINE:
471 default:
472 break;
473 }
474
475 return NOTIFY_OK;
476}
477#endif /* CONFIG_HUGETLBFS */
478#else /* !CONFIG_MEMORY_HOTPLUG_SPARSE */
479
349static int link_mem_sections(int nid) { return 0; } 480static int link_mem_sections(int nid) { return 0; }
350#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 481#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
482
483#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
484 !defined(CONFIG_HUGETLBFS)
485static inline int node_memory_callback(struct notifier_block *self,
486 unsigned long action, void *arg)
487{
488 return NOTIFY_OK;
489}
490
491static void init_node_hugetlb_work(int nid) { }
492
493#endif
351 494
352int register_one_node(int nid) 495int register_one_node(int nid)
353{ 496{
@@ -371,6 +514,9 @@ int register_one_node(int nid)
371 514
372 /* link memory sections under this node */ 515 /* link memory sections under this node */
373 error = link_mem_sections(nid); 516 error = link_mem_sections(nid);
517
518 /* initialize work queue for memory hot plug */
519 init_node_hugetlb_work(nid);
374 } 520 }
375 521
376 return error; 522 return error;
@@ -460,13 +606,17 @@ static int node_states_init(void)
460 return err; 606 return err;
461} 607}
462 608
609#define NODE_CALLBACK_PRI 2 /* lower than SLAB */
463static int __init register_node_type(void) 610static int __init register_node_type(void)
464{ 611{
465 int ret; 612 int ret;
466 613
467 ret = sysdev_class_register(&node_class); 614 ret = sysdev_class_register(&node_class);
468 if (!ret) 615 if (!ret) {
469 ret = node_states_init(); 616 ret = node_states_init();
617 hotplug_memory_notifier(node_memory_callback,
618 NODE_CALLBACK_PRI);
619 }
470 620
471 /* 621 /*
472 * Note: we're not going to unregister the node class if we fail 622 * Note: we're not going to unregister the node class if we fail
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9d2ee25deaf5..58efaf2f1259 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -441,6 +441,7 @@ error:
441 platform_device_put(pdev); 441 platform_device_put(pdev);
442 return ERR_PTR(retval); 442 return ERR_PTR(retval);
443} 443}
444EXPORT_SYMBOL_GPL(platform_device_register_data);
444 445
445static int platform_drv_probe(struct device *_dev) 446static int platform_drv_probe(struct device *_dev)
446{ 447{
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 8aa2443182d5..a5142bddef41 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -23,8 +23,8 @@
23#include <linux/pm.h> 23#include <linux/pm.h>
24#include <linux/pm_runtime.h> 24#include <linux/pm_runtime.h>
25#include <linux/resume-trace.h> 25#include <linux/resume-trace.h>
26#include <linux/rwsem.h>
27#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/sched.h>
28 28
29#include "../base.h" 29#include "../base.h"
30#include "power.h" 30#include "power.h"
@@ -161,6 +161,32 @@ void device_pm_move_last(struct device *dev)
161 list_move_tail(&dev->power.entry, &dpm_list); 161 list_move_tail(&dev->power.entry, &dpm_list);
162} 162}
163 163
164static ktime_t initcall_debug_start(struct device *dev)
165{
166 ktime_t calltime = ktime_set(0, 0);
167
168 if (initcall_debug) {
169 pr_info("calling %s+ @ %i\n",
170 dev_name(dev), task_pid_nr(current));
171 calltime = ktime_get();
172 }
173
174 return calltime;
175}
176
177static void initcall_debug_report(struct device *dev, ktime_t calltime,
178 int error)
179{
180 ktime_t delta, rettime;
181
182 if (initcall_debug) {
183 rettime = ktime_get();
184 delta = ktime_sub(rettime, calltime);
185 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
186 error, (unsigned long long)ktime_to_ns(delta) >> 10);
187 }
188}
189
164/** 190/**
165 * pm_op - Execute the PM operation appropriate for given PM event. 191 * pm_op - Execute the PM operation appropriate for given PM event.
166 * @dev: Device to handle. 192 * @dev: Device to handle.
@@ -172,6 +198,9 @@ static int pm_op(struct device *dev,
172 pm_message_t state) 198 pm_message_t state)
173{ 199{
174 int error = 0; 200 int error = 0;
201 ktime_t calltime;
202
203 calltime = initcall_debug_start(dev);
175 204
176 switch (state.event) { 205 switch (state.event) {
177#ifdef CONFIG_SUSPEND 206#ifdef CONFIG_SUSPEND
@@ -219,6 +248,9 @@ static int pm_op(struct device *dev,
219 default: 248 default:
220 error = -EINVAL; 249 error = -EINVAL;
221 } 250 }
251
252 initcall_debug_report(dev, calltime, error);
253
222 return error; 254 return error;
223} 255}
224 256
@@ -236,6 +268,13 @@ static int pm_noirq_op(struct device *dev,
236 pm_message_t state) 268 pm_message_t state)
237{ 269{
238 int error = 0; 270 int error = 0;
271 ktime_t calltime, delta, rettime;
272
273 if (initcall_debug) {
274 pr_info("calling %s_i+ @ %i\n",
275 dev_name(dev), task_pid_nr(current));
276 calltime = ktime_get();
277 }
239 278
240 switch (state.event) { 279 switch (state.event) {
241#ifdef CONFIG_SUSPEND 280#ifdef CONFIG_SUSPEND
@@ -283,6 +322,15 @@ static int pm_noirq_op(struct device *dev,
283 default: 322 default:
284 error = -EINVAL; 323 error = -EINVAL;
285 } 324 }
325
326 if (initcall_debug) {
327 rettime = ktime_get();
328 delta = ktime_sub(rettime, calltime);
329 printk("initcall %s_i+ returned %d after %Ld usecs\n",
330 dev_name(dev), error,
331 (unsigned long long)ktime_to_ns(delta) >> 10);
332 }
333
286 return error; 334 return error;
287} 335}
288 336
@@ -324,6 +372,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info,
324 kobject_name(&dev->kobj), pm_verb(state.event), info, error); 372 kobject_name(&dev->kobj), pm_verb(state.event), info, error);
325} 373}
326 374
375static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info)
376{
377 ktime_t calltime;
378 s64 usecs64;
379 int usecs;
380
381 calltime = ktime_get();
382 usecs64 = ktime_to_ns(ktime_sub(calltime, starttime));
383 do_div(usecs64, NSEC_PER_USEC);
384 usecs = usecs64;
385 if (usecs == 0)
386 usecs = 1;
387 pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n",
388 info ?: "", info ? " " : "", pm_verb(state.event),
389 usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC);
390}
391
327/*------------------------- Resume routines -------------------------*/ 392/*------------------------- Resume routines -------------------------*/
328 393
329/** 394/**
@@ -341,14 +406,11 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
341 TRACE_DEVICE(dev); 406 TRACE_DEVICE(dev);
342 TRACE_RESUME(0); 407 TRACE_RESUME(0);
343 408
344 if (!dev->bus) 409 if (dev->bus && dev->bus->pm) {
345 goto End;
346
347 if (dev->bus->pm) {
348 pm_dev_dbg(dev, state, "EARLY "); 410 pm_dev_dbg(dev, state, "EARLY ");
349 error = pm_noirq_op(dev, dev->bus->pm, state); 411 error = pm_noirq_op(dev, dev->bus->pm, state);
350 } 412 }
351 End: 413
352 TRACE_RESUME(error); 414 TRACE_RESUME(error);
353 return error; 415 return error;
354} 416}
@@ -363,6 +425,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
363void dpm_resume_noirq(pm_message_t state) 425void dpm_resume_noirq(pm_message_t state)
364{ 426{
365 struct device *dev; 427 struct device *dev;
428 ktime_t starttime = ktime_get();
366 429
367 mutex_lock(&dpm_list_mtx); 430 mutex_lock(&dpm_list_mtx);
368 transition_started = false; 431 transition_started = false;
@@ -376,11 +439,32 @@ void dpm_resume_noirq(pm_message_t state)
376 pm_dev_err(dev, state, " early", error); 439 pm_dev_err(dev, state, " early", error);
377 } 440 }
378 mutex_unlock(&dpm_list_mtx); 441 mutex_unlock(&dpm_list_mtx);
442 dpm_show_time(starttime, state, "early");
379 resume_device_irqs(); 443 resume_device_irqs();
380} 444}
381EXPORT_SYMBOL_GPL(dpm_resume_noirq); 445EXPORT_SYMBOL_GPL(dpm_resume_noirq);
382 446
383/** 447/**
448 * legacy_resume - Execute a legacy (bus or class) resume callback for device.
449 * @dev: Device to resume.
450 * @cb: Resume callback to execute.
451 */
452static int legacy_resume(struct device *dev, int (*cb)(struct device *dev))
453{
454 int error;
455 ktime_t calltime;
456
457 calltime = initcall_debug_start(dev);
458
459 error = cb(dev);
460 suspend_report_result(cb, error);
461
462 initcall_debug_report(dev, calltime, error);
463
464 return error;
465}
466
467/**
384 * device_resume - Execute "resume" callbacks for given device. 468 * device_resume - Execute "resume" callbacks for given device.
385 * @dev: Device to handle. 469 * @dev: Device to handle.
386 * @state: PM transition of the system being carried out. 470 * @state: PM transition of the system being carried out.
@@ -400,7 +484,7 @@ static int device_resume(struct device *dev, pm_message_t state)
400 error = pm_op(dev, dev->bus->pm, state); 484 error = pm_op(dev, dev->bus->pm, state);
401 } else if (dev->bus->resume) { 485 } else if (dev->bus->resume) {
402 pm_dev_dbg(dev, state, "legacy "); 486 pm_dev_dbg(dev, state, "legacy ");
403 error = dev->bus->resume(dev); 487 error = legacy_resume(dev, dev->bus->resume);
404 } 488 }
405 if (error) 489 if (error)
406 goto End; 490 goto End;
@@ -421,7 +505,7 @@ static int device_resume(struct device *dev, pm_message_t state)
421 error = pm_op(dev, dev->class->pm, state); 505 error = pm_op(dev, dev->class->pm, state);
422 } else if (dev->class->resume) { 506 } else if (dev->class->resume) {
423 pm_dev_dbg(dev, state, "legacy class "); 507 pm_dev_dbg(dev, state, "legacy class ");
424 error = dev->class->resume(dev); 508 error = legacy_resume(dev, dev->class->resume);
425 } 509 }
426 } 510 }
427 End: 511 End:
@@ -441,6 +525,7 @@ static int device_resume(struct device *dev, pm_message_t state)
441static void dpm_resume(pm_message_t state) 525static void dpm_resume(pm_message_t state)
442{ 526{
443 struct list_head list; 527 struct list_head list;
528 ktime_t starttime = ktime_get();
444 529
445 INIT_LIST_HEAD(&list); 530 INIT_LIST_HEAD(&list);
446 mutex_lock(&dpm_list_mtx); 531 mutex_lock(&dpm_list_mtx);
@@ -469,6 +554,7 @@ static void dpm_resume(pm_message_t state)
469 } 554 }
470 list_splice(&list, &dpm_list); 555 list_splice(&list, &dpm_list);
471 mutex_unlock(&dpm_list_mtx); 556 mutex_unlock(&dpm_list_mtx);
557 dpm_show_time(starttime, state, NULL);
472} 558}
473 559
474/** 560/**
@@ -521,7 +607,7 @@ static void dpm_complete(pm_message_t state)
521 mutex_unlock(&dpm_list_mtx); 607 mutex_unlock(&dpm_list_mtx);
522 608
523 device_complete(dev, state); 609 device_complete(dev, state);
524 pm_runtime_put_noidle(dev); 610 pm_runtime_put_sync(dev);
525 611
526 mutex_lock(&dpm_list_mtx); 612 mutex_lock(&dpm_list_mtx);
527 } 613 }
@@ -584,10 +670,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
584{ 670{
585 int error = 0; 671 int error = 0;
586 672
587 if (!dev->bus) 673 if (dev->bus && dev->bus->pm) {
588 return 0;
589
590 if (dev->bus->pm) {
591 pm_dev_dbg(dev, state, "LATE "); 674 pm_dev_dbg(dev, state, "LATE ");
592 error = pm_noirq_op(dev, dev->bus->pm, state); 675 error = pm_noirq_op(dev, dev->bus->pm, state);
593 } 676 }
@@ -604,6 +687,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
604int dpm_suspend_noirq(pm_message_t state) 687int dpm_suspend_noirq(pm_message_t state)
605{ 688{
606 struct device *dev; 689 struct device *dev;
690 ktime_t starttime = ktime_get();
607 int error = 0; 691 int error = 0;
608 692
609 suspend_device_irqs(); 693 suspend_device_irqs();
@@ -619,11 +703,35 @@ int dpm_suspend_noirq(pm_message_t state)
619 mutex_unlock(&dpm_list_mtx); 703 mutex_unlock(&dpm_list_mtx);
620 if (error) 704 if (error)
621 dpm_resume_noirq(resume_event(state)); 705 dpm_resume_noirq(resume_event(state));
706 else
707 dpm_show_time(starttime, state, "late");
622 return error; 708 return error;
623} 709}
624EXPORT_SYMBOL_GPL(dpm_suspend_noirq); 710EXPORT_SYMBOL_GPL(dpm_suspend_noirq);
625 711
626/** 712/**
713 * legacy_suspend - Execute a legacy (bus or class) suspend callback for device.
714 * @dev: Device to suspend.
715 * @state: PM transition of the system being carried out.
716 * @cb: Suspend callback to execute.
717 */
718static int legacy_suspend(struct device *dev, pm_message_t state,
719 int (*cb)(struct device *dev, pm_message_t state))
720{
721 int error;
722 ktime_t calltime;
723
724 calltime = initcall_debug_start(dev);
725
726 error = cb(dev, state);
727 suspend_report_result(cb, error);
728
729 initcall_debug_report(dev, calltime, error);
730
731 return error;
732}
733
734/**
627 * device_suspend - Execute "suspend" callbacks for given device. 735 * device_suspend - Execute "suspend" callbacks for given device.
628 * @dev: Device to handle. 736 * @dev: Device to handle.
629 * @state: PM transition of the system being carried out. 737 * @state: PM transition of the system being carried out.
@@ -640,8 +748,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
640 error = pm_op(dev, dev->class->pm, state); 748 error = pm_op(dev, dev->class->pm, state);
641 } else if (dev->class->suspend) { 749 } else if (dev->class->suspend) {
642 pm_dev_dbg(dev, state, "legacy class "); 750 pm_dev_dbg(dev, state, "legacy class ");
643 error = dev->class->suspend(dev, state); 751 error = legacy_suspend(dev, state, dev->class->suspend);
644 suspend_report_result(dev->class->suspend, error);
645 } 752 }
646 if (error) 753 if (error)
647 goto End; 754 goto End;
@@ -662,8 +769,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
662 error = pm_op(dev, dev->bus->pm, state); 769 error = pm_op(dev, dev->bus->pm, state);
663 } else if (dev->bus->suspend) { 770 } else if (dev->bus->suspend) {
664 pm_dev_dbg(dev, state, "legacy "); 771 pm_dev_dbg(dev, state, "legacy ");
665 error = dev->bus->suspend(dev, state); 772 error = legacy_suspend(dev, state, dev->bus->suspend);
666 suspend_report_result(dev->bus->suspend, error);
667 } 773 }
668 } 774 }
669 End: 775 End:
@@ -679,6 +785,7 @@ static int device_suspend(struct device *dev, pm_message_t state)
679static int dpm_suspend(pm_message_t state) 785static int dpm_suspend(pm_message_t state)
680{ 786{
681 struct list_head list; 787 struct list_head list;
788 ktime_t starttime = ktime_get();
682 int error = 0; 789 int error = 0;
683 790
684 INIT_LIST_HEAD(&list); 791 INIT_LIST_HEAD(&list);
@@ -704,6 +811,8 @@ static int dpm_suspend(pm_message_t state)
704 } 811 }
705 list_splice(&list, dpm_list.prev); 812 list_splice(&list, dpm_list.prev);
706 mutex_unlock(&dpm_list_mtx); 813 mutex_unlock(&dpm_list_mtx);
814 if (!error)
815 dpm_show_time(starttime, state, NULL);
707 return error; 816 return error;
708} 817}
709 818
@@ -772,7 +881,7 @@ static int dpm_prepare(pm_message_t state)
772 pm_runtime_get_noresume(dev); 881 pm_runtime_get_noresume(dev);
773 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { 882 if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) {
774 /* Wake-up requested during system sleep transition. */ 883 /* Wake-up requested during system sleep transition. */
775 pm_runtime_put_noidle(dev); 884 pm_runtime_put_sync(dev);
776 error = -EBUSY; 885 error = -EBUSY;
777 } else { 886 } else {
778 error = device_prepare(dev, state); 887 error = device_prepare(dev, state);
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 5a01ecef4af3..f8b044e8aef7 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev)
85 dev->bus->pm->runtime_idle(dev); 85 dev->bus->pm->runtime_idle(dev);
86 86
87 spin_lock_irq(&dev->power.lock); 87 spin_lock_irq(&dev->power.lock);
88 } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) {
89 spin_unlock_irq(&dev->power.lock);
90
91 dev->type->pm->runtime_idle(dev);
92
93 spin_lock_irq(&dev->power.lock);
94 } else if (dev->class && dev->class->pm
95 && dev->class->pm->runtime_idle) {
96 spin_unlock_irq(&dev->power.lock);
97
98 dev->class->pm->runtime_idle(dev);
99
100 spin_lock_irq(&dev->power.lock);
88 } 101 }
89 102
90 dev->power.idle_notification = false; 103 dev->power.idle_notification = false;
@@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq)
194 207
195 spin_lock_irq(&dev->power.lock); 208 spin_lock_irq(&dev->power.lock);
196 dev->power.runtime_error = retval; 209 dev->power.runtime_error = retval;
210 } else if (dev->type && dev->type->pm
211 && dev->type->pm->runtime_suspend) {
212 spin_unlock_irq(&dev->power.lock);
213
214 retval = dev->type->pm->runtime_suspend(dev);
215
216 spin_lock_irq(&dev->power.lock);
217 dev->power.runtime_error = retval;
218 } else if (dev->class && dev->class->pm
219 && dev->class->pm->runtime_suspend) {
220 spin_unlock_irq(&dev->power.lock);
221
222 retval = dev->class->pm->runtime_suspend(dev);
223
224 spin_lock_irq(&dev->power.lock);
225 dev->power.runtime_error = retval;
197 } else { 226 } else {
198 retval = -ENOSYS; 227 retval = -ENOSYS;
199 } 228 }
@@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
359 388
360 spin_lock_irq(&dev->power.lock); 389 spin_lock_irq(&dev->power.lock);
361 dev->power.runtime_error = retval; 390 dev->power.runtime_error = retval;
391 } else if (dev->type && dev->type->pm
392 && dev->type->pm->runtime_resume) {
393 spin_unlock_irq(&dev->power.lock);
394
395 retval = dev->type->pm->runtime_resume(dev);
396
397 spin_lock_irq(&dev->power.lock);
398 dev->power.runtime_error = retval;
399 } else if (dev->class && dev->class->pm
400 && dev->class->pm->runtime_resume) {
401 spin_unlock_irq(&dev->power.lock);
402
403 retval = dev->class->pm->runtime_resume(dev);
404
405 spin_lock_irq(&dev->power.lock);
406 dev->power.runtime_error = retval;
362 } else { 407 } else {
363 retval = -ENOSYS; 408 retval = -ENOSYS;
364 } 409 }
@@ -701,15 +746,15 @@ EXPORT_SYMBOL_GPL(pm_request_resume);
701 * @dev: Device to handle. 746 * @dev: Device to handle.
702 * @sync: If set and the device is suspended, resume it synchronously. 747 * @sync: If set and the device is suspended, resume it synchronously.
703 * 748 *
704 * Increment the usage count of the device and if it was zero previously, 749 * Increment the usage count of the device and resume it or submit a resume
705 * resume it or submit a resume request for it, depending on the value of @sync. 750 * request for it, depending on the value of @sync.
706 */ 751 */
707int __pm_runtime_get(struct device *dev, bool sync) 752int __pm_runtime_get(struct device *dev, bool sync)
708{ 753{
709 int retval = 1; 754 int retval;
710 755
711 if (atomic_add_return(1, &dev->power.usage_count) == 1) 756 atomic_inc(&dev->power.usage_count);
712 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev); 757 retval = sync ? pm_runtime_resume(dev) : pm_request_resume(dev);
713 758
714 return retval; 759 return retval;
715} 760}