aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig19
-rw-r--r--drivers/base/core.c4
-rw-r--r--drivers/base/power/domain.c42
3 files changed, 48 insertions, 17 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 61a33f4ba608..df04227d00cf 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -171,20 +171,23 @@ config WANT_DEV_COREDUMP
171 Drivers should "select" this option if they desire to use the 171 Drivers should "select" this option if they desire to use the
172 device coredump mechanism. 172 device coredump mechanism.
173 173
174config DISABLE_DEV_COREDUMP 174config ALLOW_DEV_COREDUMP
175 bool "Disable device coredump" if EXPERT 175 bool "Allow device coredump" if EXPERT
176 default y
176 help 177 help
177 Disable the device coredump mechanism despite drivers wanting to 178 This option controls if the device coredump mechanism is available or
178 use it; this allows for more sensitive systems or systems that 179 not; if disabled, the mechanism will be omitted even if drivers that
179 don't want to ever access the information to not have the code, 180 can use it are enabled.
180 nor keep any data. 181 Say 'N' for more sensitive systems or systems that don't want
182 to ever access the information to not have the code, nor keep any
183 data.
181 184
182 If unsure, say N. 185 If unsure, say Y.
183 186
184config DEV_COREDUMP 187config DEV_COREDUMP
185 bool 188 bool
186 default y if WANT_DEV_COREDUMP 189 default y if WANT_DEV_COREDUMP
187 depends on !DISABLE_DEV_COREDUMP 190 depends on ALLOW_DEV_COREDUMP
188 191
189config DEBUG_DRIVER 192config DEBUG_DRIVER
190 bool "Driver Core verbose debug messages" 193 bool "Driver Core verbose debug messages"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d162952c3b..842d04707de6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
724 return &dir->kobj; 724 return &dir->kobj;
725} 725}
726 726
727static DEFINE_MUTEX(gdp_mutex);
727 728
728static struct kobject *get_device_parent(struct device *dev, 729static struct kobject *get_device_parent(struct device *dev,
729 struct device *parent) 730 struct device *parent)
730{ 731{
731 if (dev->class) { 732 if (dev->class) {
732 static DEFINE_MUTEX(gdp_mutex);
733 struct kobject *kobj = NULL; 733 struct kobject *kobj = NULL;
734 struct kobject *parent_kobj; 734 struct kobject *parent_kobj;
735 struct kobject *k; 735 struct kobject *k;
@@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
793 glue_dir->kset != &dev->class->p->glue_dirs) 793 glue_dir->kset != &dev->class->p->glue_dirs)
794 return; 794 return;
795 795
796 mutex_lock(&gdp_mutex);
796 kobject_put(glue_dir); 797 kobject_put(glue_dir);
798 mutex_unlock(&gdp_mutex);
797} 799}
798 800
799static void cleanup_device_parent(struct device *dev) 801static void cleanup_device_parent(struct device *dev)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 40bc2f4072cc..fb83d4acd400 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
361 struct device *dev = pdd->dev; 361 struct device *dev = pdd->dev;
362 int ret = 0; 362 int ret = 0;
363 363
364 if (gpd_data->need_restore) 364 if (gpd_data->need_restore > 0)
365 return 0; 365 return 0;
366 366
367 /*
368 * If the value of the need_restore flag is still unknown at this point,
369 * we trust that pm_genpd_poweroff() has verified that the device is
370 * already runtime PM suspended.
371 */
372 if (gpd_data->need_restore < 0) {
373 gpd_data->need_restore = 1;
374 return 0;
375 }
376
367 mutex_unlock(&genpd->lock); 377 mutex_unlock(&genpd->lock);
368 378
369 genpd_start_dev(genpd, dev); 379 genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
373 mutex_lock(&genpd->lock); 383 mutex_lock(&genpd->lock);
374 384
375 if (!ret) 385 if (!ret)
376 gpd_data->need_restore = true; 386 gpd_data->need_restore = 1;
377 387
378 return ret; 388 return ret;
379} 389}
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
389{ 399{
390 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 400 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
391 struct device *dev = pdd->dev; 401 struct device *dev = pdd->dev;
392 bool need_restore = gpd_data->need_restore; 402 int need_restore = gpd_data->need_restore;
393 403
394 gpd_data->need_restore = false; 404 gpd_data->need_restore = 0;
395 mutex_unlock(&genpd->lock); 405 mutex_unlock(&genpd->lock);
396 406
397 genpd_start_dev(genpd, dev); 407 genpd_start_dev(genpd, dev);
408
409 /*
410 * Call genpd_restore_dev() for recently added devices too (need_restore
411 * is negative then).
412 */
398 if (need_restore) 413 if (need_restore)
399 genpd_restore_dev(genpd, dev); 414 genpd_restore_dev(genpd, dev);
400 415
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
603static int pm_genpd_runtime_suspend(struct device *dev) 618static int pm_genpd_runtime_suspend(struct device *dev)
604{ 619{
605 struct generic_pm_domain *genpd; 620 struct generic_pm_domain *genpd;
621 struct generic_pm_domain_data *gpd_data;
606 bool (*stop_ok)(struct device *__dev); 622 bool (*stop_ok)(struct device *__dev);
607 int ret; 623 int ret;
608 624
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
628 return 0; 644 return 0;
629 645
630 mutex_lock(&genpd->lock); 646 mutex_lock(&genpd->lock);
647
648 /*
649 * If we have an unknown state of the need_restore flag, it means none
650 * of the runtime PM callbacks has been invoked yet. Let's update the
651 * flag to reflect that the current state is active.
652 */
653 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
654 if (gpd_data->need_restore < 0)
655 gpd_data->need_restore = 0;
656
631 genpd->in_progress++; 657 genpd->in_progress++;
632 pm_genpd_poweroff(genpd); 658 pm_genpd_poweroff(genpd);
633 genpd->in_progress--; 659 genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1437 spin_unlock_irq(&dev->power.lock); 1463 spin_unlock_irq(&dev->power.lock);
1438 1464
1439 if (genpd->attach_dev) 1465 if (genpd->attach_dev)
1440 genpd->attach_dev(dev); 1466 genpd->attach_dev(genpd, dev);
1441 1467
1442 mutex_lock(&gpd_data->lock); 1468 mutex_lock(&gpd_data->lock);
1443 gpd_data->base.dev = dev; 1469 gpd_data->base.dev = dev;
1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1445 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1471 gpd_data->need_restore = -1;
1446 gpd_data->td.constraint_changed = true; 1472 gpd_data->td.constraint_changed = true;
1447 gpd_data->td.effective_constraint_ns = -1; 1473 gpd_data->td.effective_constraint_ns = -1;
1448 mutex_unlock(&gpd_data->lock); 1474 mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1499 genpd->max_off_time_changed = true; 1525 genpd->max_off_time_changed = true;
1500 1526
1501 if (genpd->detach_dev) 1527 if (genpd->detach_dev)
1502 genpd->detach_dev(dev); 1528 genpd->detach_dev(genpd, dev);
1503 1529
1504 spin_lock_irq(&dev->power.lock); 1530 spin_lock_irq(&dev->power.lock);
1505 1531
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
1546 1572
1547 psd = dev_to_psd(dev); 1573 psd = dev_to_psd(dev);
1548 if (psd && psd->domain_data) 1574 if (psd && psd->domain_data)
1549 to_gpd_data(psd->domain_data)->need_restore = val; 1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1550 1576
1551 spin_unlock_irqrestore(&dev->power.lock, flags); 1577 spin_unlock_irqrestore(&dev->power.lock, flags);
1552} 1578}