aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/base/firmware_class.c208
-rw-r--r--drivers/base/power/runtime.c3
-rw-r--r--include/linux/kmod.h27
-rw-r--r--include/linux/pm_qos.h4
-rw-r--r--kernel/kmod.c117
-rw-r--r--kernel/power/hibernate.c18
-rw-r--r--kernel/power/process.c8
-rw-r--r--kernel/power/qos.c50
-rw-r--r--kernel/power/suspend.c7
-rw-r--r--kernel/power/user.c10
10 files changed, 284 insertions, 168 deletions
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 6c9387d646ec..5401814c874d 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -16,10 +16,11 @@
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include <linux/mutex.h> 18#include <linux/mutex.h>
19#include <linux/kthread.h> 19#include <linux/workqueue.h>
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/firmware.h> 21#include <linux/firmware.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/sched.h>
23 24
24#define to_dev(obj) container_of(obj, struct device, kobj) 25#define to_dev(obj) container_of(obj, struct device, kobj)
25 26
@@ -81,6 +82,11 @@ enum {
81 82
82static int loading_timeout = 60; /* In seconds */ 83static int loading_timeout = 60; /* In seconds */
83 84
85static inline long firmware_loading_timeout(void)
86{
87 return loading_timeout > 0 ? loading_timeout * HZ : MAX_SCHEDULE_TIMEOUT;
88}
89
84/* fw_lock could be moved to 'struct firmware_priv' but since it is just 90/* fw_lock could be moved to 'struct firmware_priv' but since it is just
85 * guarding for corner cases a global lock should be OK */ 91 * guarding for corner cases a global lock should be OK */
86static DEFINE_MUTEX(fw_lock); 92static DEFINE_MUTEX(fw_lock);
@@ -440,13 +446,11 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
440{ 446{
441 struct firmware_priv *fw_priv; 447 struct firmware_priv *fw_priv;
442 struct device *f_dev; 448 struct device *f_dev;
443 int error;
444 449
445 fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL); 450 fw_priv = kzalloc(sizeof(*fw_priv) + strlen(fw_name) + 1 , GFP_KERNEL);
446 if (!fw_priv) { 451 if (!fw_priv) {
447 dev_err(device, "%s: kmalloc failed\n", __func__); 452 dev_err(device, "%s: kmalloc failed\n", __func__);
448 error = -ENOMEM; 453 return ERR_PTR(-ENOMEM);
449 goto err_out;
450 } 454 }
451 455
452 fw_priv->fw = firmware; 456 fw_priv->fw = firmware;
@@ -463,98 +467,80 @@ fw_create_instance(struct firmware *firmware, const char *fw_name,
463 f_dev->parent = device; 467 f_dev->parent = device;
464 f_dev->class = &firmware_class; 468 f_dev->class = &firmware_class;
465 469
466 dev_set_uevent_suppress(f_dev, true);
467
468 /* Need to pin this module until class device is destroyed */
469 __module_get(THIS_MODULE);
470
471 error = device_add(f_dev);
472 if (error) {
473 dev_err(device, "%s: device_register failed\n", __func__);
474 goto err_put_dev;
475 }
476
477 error = device_create_bin_file(f_dev, &firmware_attr_data);
478 if (error) {
479 dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
480 goto err_del_dev;
481 }
482
483 error = device_create_file(f_dev, &dev_attr_loading);
484 if (error) {
485 dev_err(device, "%s: device_create_file failed\n", __func__);
486 goto err_del_bin_attr;
487 }
488
489 if (uevent)
490 dev_set_uevent_suppress(f_dev, false);
491
492 return fw_priv; 470 return fw_priv;
493
494err_del_bin_attr:
495 device_remove_bin_file(f_dev, &firmware_attr_data);
496err_del_dev:
497 device_del(f_dev);
498err_put_dev:
499 put_device(f_dev);
500err_out:
501 return ERR_PTR(error);
502} 471}
503 472
504static void fw_destroy_instance(struct firmware_priv *fw_priv) 473static struct firmware_priv *
505{ 474_request_firmware_prepare(const struct firmware **firmware_p, const char *name,
506 struct device *f_dev = &fw_priv->dev; 475 struct device *device, bool uevent, bool nowait)
507
508 device_remove_file(f_dev, &dev_attr_loading);
509 device_remove_bin_file(f_dev, &firmware_attr_data);
510 device_unregister(f_dev);
511}
512
513static int _request_firmware(const struct firmware **firmware_p,
514 const char *name, struct device *device,
515 bool uevent, bool nowait)
516{ 476{
517 struct firmware_priv *fw_priv;
518 struct firmware *firmware; 477 struct firmware *firmware;
519 int retval = 0; 478 struct firmware_priv *fw_priv;
520 479
521 if (!firmware_p) 480 if (!firmware_p)
522 return -EINVAL; 481 return ERR_PTR(-EINVAL);
523 482
524 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); 483 *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL);
525 if (!firmware) { 484 if (!firmware) {
526 dev_err(device, "%s: kmalloc(struct firmware) failed\n", 485 dev_err(device, "%s: kmalloc(struct firmware) failed\n",
527 __func__); 486 __func__);
528 return -ENOMEM; 487 return ERR_PTR(-ENOMEM);
529 } 488 }
530 489
531 if (fw_get_builtin_firmware(firmware, name)) { 490 if (fw_get_builtin_firmware(firmware, name)) {
532 dev_dbg(device, "firmware: using built-in firmware %s\n", name); 491 dev_dbg(device, "firmware: using built-in firmware %s\n", name);
533 return 0; 492 return NULL;
493 }
494
495 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait);
496 if (IS_ERR(fw_priv)) {
497 release_firmware(firmware);
498 *firmware_p = NULL;
534 } 499 }
500 return fw_priv;
501}
535 502
536 read_lock_usermodehelper(); 503static void _request_firmware_cleanup(const struct firmware **firmware_p)
504{
505 release_firmware(*firmware_p);
506 *firmware_p = NULL;
507}
537 508
538 if (WARN_ON(usermodehelper_is_disabled())) { 509static int _request_firmware_load(struct firmware_priv *fw_priv, bool uevent,
539 dev_err(device, "firmware: %s will not be loaded\n", name); 510 long timeout)
540 retval = -EBUSY; 511{
541 goto out; 512 int retval = 0;
513 struct device *f_dev = &fw_priv->dev;
514
515 dev_set_uevent_suppress(f_dev, true);
516
517 /* Need to pin this module until class device is destroyed */
518 __module_get(THIS_MODULE);
519
520 retval = device_add(f_dev);
521 if (retval) {
522 dev_err(f_dev, "%s: device_register failed\n", __func__);
523 goto err_put_dev;
542 } 524 }
543 525
544 if (uevent) 526 retval = device_create_bin_file(f_dev, &firmware_attr_data);
545 dev_dbg(device, "firmware: requesting %s\n", name); 527 if (retval) {
528 dev_err(f_dev, "%s: sysfs_create_bin_file failed\n", __func__);
529 goto err_del_dev;
530 }
546 531
547 fw_priv = fw_create_instance(firmware, name, device, uevent, nowait); 532 retval = device_create_file(f_dev, &dev_attr_loading);
548 if (IS_ERR(fw_priv)) { 533 if (retval) {
549 retval = PTR_ERR(fw_priv); 534 dev_err(f_dev, "%s: device_create_file failed\n", __func__);
550 goto out; 535 goto err_del_bin_attr;
551 } 536 }
552 537
553 if (uevent) { 538 if (uevent) {
554 if (loading_timeout > 0) 539 dev_set_uevent_suppress(f_dev, false);
540 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_id);
541 if (timeout != MAX_SCHEDULE_TIMEOUT)
555 mod_timer(&fw_priv->timeout, 542 mod_timer(&fw_priv->timeout,
556 round_jiffies_up(jiffies + 543 round_jiffies_up(jiffies + timeout));
557 loading_timeout * HZ));
558 544
559 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD); 545 kobject_uevent(&fw_priv->dev.kobj, KOBJ_ADD);
560 } 546 }
@@ -570,16 +556,13 @@ static int _request_firmware(const struct firmware **firmware_p,
570 fw_priv->fw = NULL; 556 fw_priv->fw = NULL;
571 mutex_unlock(&fw_lock); 557 mutex_unlock(&fw_lock);
572 558
573 fw_destroy_instance(fw_priv); 559 device_remove_file(f_dev, &dev_attr_loading);
574 560err_del_bin_attr:
575out: 561 device_remove_bin_file(f_dev, &firmware_attr_data);
576 read_unlock_usermodehelper(); 562err_del_dev:
577 563 device_del(f_dev);
578 if (retval) { 564err_put_dev:
579 release_firmware(firmware); 565 put_device(f_dev);
580 *firmware_p = NULL;
581 }
582
583 return retval; 566 return retval;
584} 567}
585 568
@@ -602,7 +585,26 @@ int
602request_firmware(const struct firmware **firmware_p, const char *name, 585request_firmware(const struct firmware **firmware_p, const char *name,
603 struct device *device) 586 struct device *device)
604{ 587{
605 return _request_firmware(firmware_p, name, device, true, false); 588 struct firmware_priv *fw_priv;
589 int ret;
590
591 fw_priv = _request_firmware_prepare(firmware_p, name, device, true,
592 false);
593 if (IS_ERR_OR_NULL(fw_priv))
594 return PTR_RET(fw_priv);
595
596 ret = usermodehelper_read_trylock();
597 if (WARN_ON(ret)) {
598 dev_err(device, "firmware: %s will not be loaded\n", name);
599 } else {
600 ret = _request_firmware_load(fw_priv, true,
601 firmware_loading_timeout());
602 usermodehelper_read_unlock();
603 }
604 if (ret)
605 _request_firmware_cleanup(firmware_p);
606
607 return ret;
606} 608}
607 609
608/** 610/**
@@ -629,25 +631,39 @@ struct firmware_work {
629 bool uevent; 631 bool uevent;
630}; 632};
631 633
632static int request_firmware_work_func(void *arg) 634static void request_firmware_work_func(struct work_struct *work)
633{ 635{
634 struct firmware_work *fw_work = arg; 636 struct firmware_work *fw_work;
635 const struct firmware *fw; 637 const struct firmware *fw;
638 struct firmware_priv *fw_priv;
639 long timeout;
636 int ret; 640 int ret;
637 641
638 if (!arg) { 642 fw_work = container_of(work, struct firmware_work, work);
639 WARN_ON(1); 643 fw_priv = _request_firmware_prepare(&fw, fw_work->name, fw_work->device,
640 return 0; 644 fw_work->uevent, true);
645 if (IS_ERR_OR_NULL(fw_priv)) {
646 ret = PTR_RET(fw_priv);
647 goto out;
648 }
649
650 timeout = usermodehelper_read_lock_wait(firmware_loading_timeout());
651 if (timeout) {
652 ret = _request_firmware_load(fw_priv, fw_work->uevent, timeout);
653 usermodehelper_read_unlock();
654 } else {
655 dev_dbg(fw_work->device, "firmware: %s loading timed out\n",
656 fw_work->name);
657 ret = -EAGAIN;
641 } 658 }
659 if (ret)
660 _request_firmware_cleanup(&fw);
642 661
643 ret = _request_firmware(&fw, fw_work->name, fw_work->device, 662 out:
644 fw_work->uevent, true);
645 fw_work->cont(fw, fw_work->context); 663 fw_work->cont(fw, fw_work->context);
646 664
647 module_put(fw_work->module); 665 module_put(fw_work->module);
648 kfree(fw_work); 666 kfree(fw_work);
649
650 return ret;
651} 667}
652 668
653/** 669/**
@@ -673,7 +689,6 @@ request_firmware_nowait(
673 const char *name, struct device *device, gfp_t gfp, void *context, 689 const char *name, struct device *device, gfp_t gfp, void *context,
674 void (*cont)(const struct firmware *fw, void *context)) 690 void (*cont)(const struct firmware *fw, void *context))
675{ 691{
676 struct task_struct *task;
677 struct firmware_work *fw_work; 692 struct firmware_work *fw_work;
678 693
679 fw_work = kzalloc(sizeof (struct firmware_work), gfp); 694 fw_work = kzalloc(sizeof (struct firmware_work), gfp);
@@ -692,15 +707,8 @@ request_firmware_nowait(
692 return -EFAULT; 707 return -EFAULT;
693 } 708 }
694 709
695 task = kthread_run(request_firmware_work_func, fw_work, 710 INIT_WORK(&fw_work->work, request_firmware_work_func);
696 "firmware/%s", name); 711 schedule_work(&fw_work->work);
697 if (IS_ERR(task)) {
698 fw_work->cont(NULL, fw_work->context);
699 module_put(fw_work->module);
700 kfree(fw_work);
701 return PTR_ERR(task);
702 }
703
704 return 0; 712 return 0;
705} 713}
706 714
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 541f821d4ea6..bd0f3949bcf9 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -532,6 +532,8 @@ static int rpm_suspend(struct device *dev, int rpmflags)
532 dev->power.suspend_time = ktime_set(0, 0); 532 dev->power.suspend_time = ktime_set(0, 0);
533 dev->power.max_time_suspended_ns = -1; 533 dev->power.max_time_suspended_ns = -1;
534 dev->power.deferred_resume = false; 534 dev->power.deferred_resume = false;
535 wake_up_all(&dev->power.wait_queue);
536
535 if (retval == -EAGAIN || retval == -EBUSY) { 537 if (retval == -EAGAIN || retval == -EBUSY) {
536 dev->power.runtime_error = 0; 538 dev->power.runtime_error = 0;
537 539
@@ -547,7 +549,6 @@ static int rpm_suspend(struct device *dev, int rpmflags)
547 } else { 549 } else {
548 pm_runtime_cancel_pending(dev); 550 pm_runtime_cancel_pending(dev);
549 } 551 }
550 wake_up_all(&dev->power.wait_queue);
551 goto out; 552 goto out;
552} 553}
553 554
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index 9efeae679106..dd99c329e161 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -110,12 +110,29 @@ call_usermodehelper(char *path, char **argv, char **envp, int wait)
110 110
111extern struct ctl_table usermodehelper_table[]; 111extern struct ctl_table usermodehelper_table[];
112 112
113enum umh_disable_depth {
114 UMH_ENABLED = 0,
115 UMH_FREEZING,
116 UMH_DISABLED,
117};
118
113extern void usermodehelper_init(void); 119extern void usermodehelper_init(void);
114 120
115extern int usermodehelper_disable(void); 121extern int __usermodehelper_disable(enum umh_disable_depth depth);
116extern void usermodehelper_enable(void); 122extern void __usermodehelper_set_disable_depth(enum umh_disable_depth depth);
117extern bool usermodehelper_is_disabled(void); 123
118extern void read_lock_usermodehelper(void); 124static inline int usermodehelper_disable(void)
119extern void read_unlock_usermodehelper(void); 125{
126 return __usermodehelper_disable(UMH_DISABLED);
127}
128
129static inline void usermodehelper_enable(void)
130{
131 __usermodehelper_set_disable_depth(UMH_ENABLED);
132}
133
134extern int usermodehelper_read_trylock(void);
135extern long usermodehelper_read_lock_wait(long timeout);
136extern void usermodehelper_read_unlock(void);
120 137
121#endif /* __LINUX_KMOD_H__ */ 138#endif /* __LINUX_KMOD_H__ */
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 2e9191a712f3..233149cb19f4 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -8,6 +8,7 @@
8#include <linux/notifier.h> 8#include <linux/notifier.h>
9#include <linux/miscdevice.h> 9#include <linux/miscdevice.h>
10#include <linux/device.h> 10#include <linux/device.h>
11#include <linux/workqueue.h>
11 12
12enum { 13enum {
13 PM_QOS_RESERVED = 0, 14 PM_QOS_RESERVED = 0,
@@ -29,6 +30,7 @@ enum {
29struct pm_qos_request { 30struct pm_qos_request {
30 struct plist_node node; 31 struct plist_node node;
31 int pm_qos_class; 32 int pm_qos_class;
33 struct delayed_work work; /* for pm_qos_update_request_timeout */
32}; 34};
33 35
34struct dev_pm_qos_request { 36struct dev_pm_qos_request {
@@ -73,6 +75,8 @@ void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
73 s32 value); 75 s32 value);
74void pm_qos_update_request(struct pm_qos_request *req, 76void pm_qos_update_request(struct pm_qos_request *req,
75 s32 new_value); 77 s32 new_value);
78void pm_qos_update_request_timeout(struct pm_qos_request *req,
79 s32 new_value, unsigned long timeout_us);
76void pm_qos_remove_request(struct pm_qos_request *req); 80void pm_qos_remove_request(struct pm_qos_request *req);
77 81
78int pm_qos_request(int pm_qos_class); 82int pm_qos_request(int pm_qos_class);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 957a7aab8ebc..05698a7415fe 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -322,7 +322,7 @@ static void __call_usermodehelper(struct work_struct *work)
322 * land has been frozen during a system-wide hibernation or suspend operation). 322 * land has been frozen during a system-wide hibernation or suspend operation).
323 * Should always be manipulated under umhelper_sem acquired for write. 323 * Should always be manipulated under umhelper_sem acquired for write.
324 */ 324 */
325static int usermodehelper_disabled = 1; 325static enum umh_disable_depth usermodehelper_disabled = UMH_DISABLED;
326 326
327/* Number of helpers running */ 327/* Number of helpers running */
328static atomic_t running_helpers = ATOMIC_INIT(0); 328static atomic_t running_helpers = ATOMIC_INIT(0);
@@ -334,32 +334,110 @@ static atomic_t running_helpers = ATOMIC_INIT(0);
334static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq); 334static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
335 335
336/* 336/*
337 * Used by usermodehelper_read_lock_wait() to wait for usermodehelper_disabled
338 * to become 'false'.
339 */
340static DECLARE_WAIT_QUEUE_HEAD(usermodehelper_disabled_waitq);
341
342/*
337 * Time to wait for running_helpers to become zero before the setting of 343 * Time to wait for running_helpers to become zero before the setting of
338 * usermodehelper_disabled in usermodehelper_disable() fails 344 * usermodehelper_disabled in usermodehelper_disable() fails
339 */ 345 */
340#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 346#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
341 347
342void read_lock_usermodehelper(void) 348int usermodehelper_read_trylock(void)
343{ 349{
350 DEFINE_WAIT(wait);
351 int ret = 0;
352
344 down_read(&umhelper_sem); 353 down_read(&umhelper_sem);
354 for (;;) {
355 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
356 TASK_INTERRUPTIBLE);
357 if (!usermodehelper_disabled)
358 break;
359
360 if (usermodehelper_disabled == UMH_DISABLED)
361 ret = -EAGAIN;
362
363 up_read(&umhelper_sem);
364
365 if (ret)
366 break;
367
368 schedule();
369 try_to_freeze();
370
371 down_read(&umhelper_sem);
372 }
373 finish_wait(&usermodehelper_disabled_waitq, &wait);
374 return ret;
375}
376EXPORT_SYMBOL_GPL(usermodehelper_read_trylock);
377
378long usermodehelper_read_lock_wait(long timeout)
379{
380 DEFINE_WAIT(wait);
381
382 if (timeout < 0)
383 return -EINVAL;
384
385 down_read(&umhelper_sem);
386 for (;;) {
387 prepare_to_wait(&usermodehelper_disabled_waitq, &wait,
388 TASK_UNINTERRUPTIBLE);
389 if (!usermodehelper_disabled)
390 break;
391
392 up_read(&umhelper_sem);
393
394 timeout = schedule_timeout(timeout);
395 if (!timeout)
396 break;
397
398 down_read(&umhelper_sem);
399 }
400 finish_wait(&usermodehelper_disabled_waitq, &wait);
401 return timeout;
345} 402}
346EXPORT_SYMBOL_GPL(read_lock_usermodehelper); 403EXPORT_SYMBOL_GPL(usermodehelper_read_lock_wait);
347 404
348void read_unlock_usermodehelper(void) 405void usermodehelper_read_unlock(void)
349{ 406{
350 up_read(&umhelper_sem); 407 up_read(&umhelper_sem);
351} 408}
352EXPORT_SYMBOL_GPL(read_unlock_usermodehelper); 409EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
353 410
354/** 411/**
355 * usermodehelper_disable - prevent new helpers from being started 412 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
413 * depth: New value to assign to usermodehelper_disabled.
414 *
415 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
416 * writing) and wakeup tasks waiting for it to change.
356 */ 417 */
357int usermodehelper_disable(void) 418void __usermodehelper_set_disable_depth(enum umh_disable_depth depth)
419{
420 down_write(&umhelper_sem);
421 usermodehelper_disabled = depth;
422 wake_up(&usermodehelper_disabled_waitq);
423 up_write(&umhelper_sem);
424}
425
426/**
427 * __usermodehelper_disable - Prevent new helpers from being started.
428 * @depth: New value to assign to usermodehelper_disabled.
429 *
430 * Set usermodehelper_disabled to @depth and wait for running helpers to exit.
431 */
432int __usermodehelper_disable(enum umh_disable_depth depth)
358{ 433{
359 long retval; 434 long retval;
360 435
436 if (!depth)
437 return -EINVAL;
438
361 down_write(&umhelper_sem); 439 down_write(&umhelper_sem);
362 usermodehelper_disabled = 1; 440 usermodehelper_disabled = depth;
363 up_write(&umhelper_sem); 441 up_write(&umhelper_sem);
364 442
365 /* 443 /*
@@ -374,31 +452,10 @@ int usermodehelper_disable(void)
374 if (retval) 452 if (retval)
375 return 0; 453 return 0;
376 454
377 down_write(&umhelper_sem); 455 __usermodehelper_set_disable_depth(UMH_ENABLED);
378 usermodehelper_disabled = 0;
379 up_write(&umhelper_sem);
380 return -EAGAIN; 456 return -EAGAIN;
381} 457}
382 458
383/**
384 * usermodehelper_enable - allow new helpers to be started again
385 */
386void usermodehelper_enable(void)
387{
388 down_write(&umhelper_sem);
389 usermodehelper_disabled = 0;
390 up_write(&umhelper_sem);
391}
392
393/**
394 * usermodehelper_is_disabled - check if new helpers are allowed to be started
395 */
396bool usermodehelper_is_disabled(void)
397{
398 return usermodehelper_disabled;
399}
400EXPORT_SYMBOL_GPL(usermodehelper_is_disabled);
401
402static void helper_lock(void) 459static void helper_lock(void)
403{ 460{
404 atomic_inc(&running_helpers); 461 atomic_inc(&running_helpers);
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 0a186cfde788..e09dfbfeecee 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -16,7 +16,6 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/device.h> 17#include <linux/device.h>
18#include <linux/async.h> 18#include <linux/async.h>
19#include <linux/kmod.h>
20#include <linux/delay.h> 19#include <linux/delay.h>
21#include <linux/fs.h> 20#include <linux/fs.h>
22#include <linux/mount.h> 21#include <linux/mount.h>
@@ -611,14 +610,10 @@ int hibernate(void)
611 if (error) 610 if (error)
612 goto Exit; 611 goto Exit;
613 612
614 error = usermodehelper_disable();
615 if (error)
616 goto Exit;
617
618 /* Allocate memory management structures */ 613 /* Allocate memory management structures */
619 error = create_basic_memory_bitmaps(); 614 error = create_basic_memory_bitmaps();
620 if (error) 615 if (error)
621 goto Enable_umh; 616 goto Exit;
622 617
623 printk(KERN_INFO "PM: Syncing filesystems ... "); 618 printk(KERN_INFO "PM: Syncing filesystems ... ");
624 sys_sync(); 619 sys_sync();
@@ -661,8 +656,6 @@ int hibernate(void)
661 656
662 Free_bitmaps: 657 Free_bitmaps:
663 free_basic_memory_bitmaps(); 658 free_basic_memory_bitmaps();
664 Enable_umh:
665 usermodehelper_enable();
666 Exit: 659 Exit:
667 pm_notifier_call_chain(PM_POST_HIBERNATION); 660 pm_notifier_call_chain(PM_POST_HIBERNATION);
668 pm_restore_console(); 661 pm_restore_console();
@@ -777,15 +770,9 @@ static int software_resume(void)
777 if (error) 770 if (error)
778 goto close_finish; 771 goto close_finish;
779 772
780 error = usermodehelper_disable();
781 if (error)
782 goto close_finish;
783
784 error = create_basic_memory_bitmaps(); 773 error = create_basic_memory_bitmaps();
785 if (error) { 774 if (error)
786 usermodehelper_enable();
787 goto close_finish; 775 goto close_finish;
788 }
789 776
790 pr_debug("PM: Preparing processes for restore.\n"); 777 pr_debug("PM: Preparing processes for restore.\n");
791 error = freeze_processes(); 778 error = freeze_processes();
@@ -806,7 +793,6 @@ static int software_resume(void)
806 thaw_processes(); 793 thaw_processes();
807 Done: 794 Done:
808 free_basic_memory_bitmaps(); 795 free_basic_memory_bitmaps();
809 usermodehelper_enable();
810 Finish: 796 Finish:
811 pm_notifier_call_chain(PM_POST_RESTORE); 797 pm_notifier_call_chain(PM_POST_RESTORE);
812 pm_restore_console(); 798 pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0d2aeb226108..19db29f67558 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -16,6 +16,7 @@
16#include <linux/freezer.h> 16#include <linux/freezer.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/workqueue.h> 18#include <linux/workqueue.h>
19#include <linux/kmod.h>
19 20
20/* 21/*
21 * Timeout for stopping processes 22 * Timeout for stopping processes
@@ -122,6 +123,10 @@ int freeze_processes(void)
122{ 123{
123 int error; 124 int error;
124 125
126 error = __usermodehelper_disable(UMH_FREEZING);
127 if (error)
128 return error;
129
125 if (!pm_freezing) 130 if (!pm_freezing)
126 atomic_inc(&system_freezing_cnt); 131 atomic_inc(&system_freezing_cnt);
127 132
@@ -130,6 +135,7 @@ int freeze_processes(void)
130 error = try_to_freeze_tasks(true); 135 error = try_to_freeze_tasks(true);
131 if (!error) { 136 if (!error) {
132 printk("done."); 137 printk("done.");
138 __usermodehelper_set_disable_depth(UMH_DISABLED);
133 oom_killer_disable(); 139 oom_killer_disable();
134 } 140 }
135 printk("\n"); 141 printk("\n");
@@ -187,6 +193,8 @@ void thaw_processes(void)
187 } while_each_thread(g, p); 193 } while_each_thread(g, p);
188 read_unlock(&tasklist_lock); 194 read_unlock(&tasklist_lock);
189 195
196 usermodehelper_enable();
197
190 schedule(); 198 schedule();
191 printk("done.\n"); 199 printk("done.\n");
192} 200}
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index d6d6dbd1ecc0..6a031e684026 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -230,6 +230,21 @@ int pm_qos_request_active(struct pm_qos_request *req)
230EXPORT_SYMBOL_GPL(pm_qos_request_active); 230EXPORT_SYMBOL_GPL(pm_qos_request_active);
231 231
232/** 232/**
233 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
234 * @work: work struct for the delayed work (timeout)
235 *
236 * This cancels the timeout request by falling back to the default at timeout.
237 */
238static void pm_qos_work_fn(struct work_struct *work)
239{
240 struct pm_qos_request *req = container_of(to_delayed_work(work),
241 struct pm_qos_request,
242 work);
243
244 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
245}
246
247/**
233 * pm_qos_add_request - inserts new qos request into the list 248 * pm_qos_add_request - inserts new qos request into the list
234 * @req: pointer to a preallocated handle 249 * @req: pointer to a preallocated handle
235 * @pm_qos_class: identifies which list of qos request to use 250 * @pm_qos_class: identifies which list of qos request to use
@@ -253,6 +268,7 @@ void pm_qos_add_request(struct pm_qos_request *req,
253 return; 268 return;
254 } 269 }
255 req->pm_qos_class = pm_qos_class; 270 req->pm_qos_class = pm_qos_class;
271 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
256 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints, 272 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
257 &req->node, PM_QOS_ADD_REQ, value); 273 &req->node, PM_QOS_ADD_REQ, value);
258} 274}
@@ -279,6 +295,9 @@ void pm_qos_update_request(struct pm_qos_request *req,
279 return; 295 return;
280 } 296 }
281 297
298 if (delayed_work_pending(&req->work))
299 cancel_delayed_work_sync(&req->work);
300
282 if (new_value != req->node.prio) 301 if (new_value != req->node.prio)
283 pm_qos_update_target( 302 pm_qos_update_target(
284 pm_qos_array[req->pm_qos_class]->constraints, 303 pm_qos_array[req->pm_qos_class]->constraints,
@@ -287,6 +306,34 @@ void pm_qos_update_request(struct pm_qos_request *req,
287EXPORT_SYMBOL_GPL(pm_qos_update_request); 306EXPORT_SYMBOL_GPL(pm_qos_update_request);
288 307
289/** 308/**
309 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
310 * @req : handle to list element holding a pm_qos request to use
311 * @new_value: defines the temporal qos request
312 * @timeout_us: the effective duration of this qos request in usecs.
313 *
314 * After timeout_us, this qos request is cancelled automatically.
315 */
316void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
317 unsigned long timeout_us)
318{
319 if (!req)
320 return;
321 if (WARN(!pm_qos_request_active(req),
322 "%s called for unknown object.", __func__))
323 return;
324
325 if (delayed_work_pending(&req->work))
326 cancel_delayed_work_sync(&req->work);
327
328 if (new_value != req->node.prio)
329 pm_qos_update_target(
330 pm_qos_array[req->pm_qos_class]->constraints,
331 &req->node, PM_QOS_UPDATE_REQ, new_value);
332
333 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
334}
335
336/**
290 * pm_qos_remove_request - modifies an existing qos request 337 * pm_qos_remove_request - modifies an existing qos request
291 * @req: handle to request list element 338 * @req: handle to request list element
292 * 339 *
@@ -305,6 +352,9 @@ void pm_qos_remove_request(struct pm_qos_request *req)
305 return; 352 return;
306 } 353 }
307 354
355 if (delayed_work_pending(&req->work))
356 cancel_delayed_work_sync(&req->work);
357
308 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, 358 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
309 &req->node, PM_QOS_REMOVE_REQ, 359 &req->node, PM_QOS_REMOVE_REQ,
310 PM_QOS_DEFAULT_VALUE); 360 PM_QOS_DEFAULT_VALUE);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 88e5c967370d..396d262b8fd0 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -12,7 +12,6 @@
12#include <linux/delay.h> 12#include <linux/delay.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/kmod.h>
16#include <linux/console.h> 15#include <linux/console.h>
17#include <linux/cpu.h> 16#include <linux/cpu.h>
18#include <linux/syscalls.h> 17#include <linux/syscalls.h>
@@ -102,17 +101,12 @@ static int suspend_prepare(void)
102 if (error) 101 if (error)
103 goto Finish; 102 goto Finish;
104 103
105 error = usermodehelper_disable();
106 if (error)
107 goto Finish;
108
109 error = suspend_freeze_processes(); 104 error = suspend_freeze_processes();
110 if (!error) 105 if (!error)
111 return 0; 106 return 0;
112 107
113 suspend_stats.failed_freeze++; 108 suspend_stats.failed_freeze++;
114 dpm_save_failed_step(SUSPEND_FREEZE); 109 dpm_save_failed_step(SUSPEND_FREEZE);
115 usermodehelper_enable();
116 Finish: 110 Finish:
117 pm_notifier_call_chain(PM_POST_SUSPEND); 111 pm_notifier_call_chain(PM_POST_SUSPEND);
118 pm_restore_console(); 112 pm_restore_console();
@@ -259,7 +253,6 @@ int suspend_devices_and_enter(suspend_state_t state)
259static void suspend_finish(void) 253static void suspend_finish(void)
260{ 254{
261 suspend_thaw_processes(); 255 suspend_thaw_processes();
262 usermodehelper_enable();
263 pm_notifier_call_chain(PM_POST_SUSPEND); 256 pm_notifier_call_chain(PM_POST_SUSPEND);
264 pm_restore_console(); 257 pm_restore_console();
265} 258}
diff --git a/kernel/power/user.c b/kernel/power/user.c
index 33c4329205af..91b0fd021a95 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -12,7 +12,6 @@
12#include <linux/suspend.h> 12#include <linux/suspend.h>
13#include <linux/syscalls.h> 13#include <linux/syscalls.h>
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/kmod.h>
16#include <linux/string.h> 15#include <linux/string.h>
17#include <linux/device.h> 16#include <linux/device.h>
18#include <linux/miscdevice.h> 17#include <linux/miscdevice.h>
@@ -222,14 +221,8 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
222 sys_sync(); 221 sys_sync();
223 printk("done.\n"); 222 printk("done.\n");
224 223
225 error = usermodehelper_disable();
226 if (error)
227 break;
228
229 error = freeze_processes(); 224 error = freeze_processes();
230 if (error) 225 if (!error)
231 usermodehelper_enable();
232 else
233 data->frozen = 1; 226 data->frozen = 1;
234 break; 227 break;
235 228
@@ -238,7 +231,6 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
238 break; 231 break;
239 pm_restore_gfp_mask(); 232 pm_restore_gfp_mask();
240 thaw_processes(); 233 thaw_processes();
241 usermodehelper_enable();
242 data->frozen = 0; 234 data->frozen = 0;
243 break; 235 break;
244 236