aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/osl.c12
-rw-r--r--drivers/acpi/sleep.c3
-rw-r--r--drivers/acpi/video.c2
-rw-r--r--drivers/base/power/main.c30
-rw-r--r--drivers/base/syscore.c5
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c2
-rw-r--r--drivers/cpufreq/cpufreq.c67
-rw-r--r--drivers/cpufreq/cpufreq_governor.c67
-rw-r--r--drivers/cpufreq/cpufreq_governor.h7
-rw-r--r--drivers/cpufreq/intel_pstate.c6
-rw-r--r--drivers/cpufreq/ppc-corenet-cpufreq.c9
-rw-r--r--drivers/cpufreq/tegra-cpufreq.c100
-rw-r--r--drivers/pci/hotplug/acpiphp.h10
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c60
16 files changed, 303 insertions, 82 deletions
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 147bc1b91b42..3f2bdc812d23 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1810,6 +1810,16 @@ acpi_status __init acpi_os_initialize(void)
1810 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1810 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1811 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block); 1811 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1812 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block); 1812 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1813 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1814 /*
1815 * Use acpi_os_map_generic_address to pre-map the reset
1816 * register if it's in system memory.
1817 */
1818 int rv;
1819
1820 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1821 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1822 }
1813 1823
1814 return AE_OK; 1824 return AE_OK;
1815} 1825}
@@ -1838,6 +1848,8 @@ acpi_status acpi_os_terminate(void)
1838 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block); 1848 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1839 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block); 1849 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1840 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block); 1850 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1851 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1852 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1841 1853
1842 destroy_workqueue(kacpid_wq); 1854 destroy_workqueue(kacpid_wq);
1843 destroy_workqueue(kacpi_notify_wq); 1855 destroy_workqueue(kacpi_notify_wq);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index c11e3795431b..b3e3cc73ba79 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -19,6 +19,7 @@
19#include <linux/acpi.h> 19#include <linux/acpi.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <asm/io.h> 21#include <asm/io.h>
22#include <trace/events/power.h>
22 23
23#include "internal.h" 24#include "internal.h"
24#include "sleep.h" 25#include "sleep.h"
@@ -501,6 +502,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
501 502
502 ACPI_FLUSH_CPU_CACHE(); 503 ACPI_FLUSH_CPU_CACHE();
503 504
505 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, true);
504 switch (acpi_state) { 506 switch (acpi_state) {
505 case ACPI_STATE_S1: 507 case ACPI_STATE_S1:
506 barrier(); 508 barrier();
@@ -516,6 +518,7 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
516 pr_info(PREFIX "Low-level resume complete\n"); 518 pr_info(PREFIX "Low-level resume complete\n");
517 break; 519 break;
518 } 520 }
521 trace_suspend_resume(TPS("acpi_suspend"), acpi_state, false);
519 522
520 /* This violates the spec but is required for bug compatibility. */ 523 /* This violates the spec but is required for bug compatibility. */
521 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1); 524 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 101fb090dcb9..fb9ffe9adc64 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -82,7 +82,7 @@ module_param(allow_duplicates, bool, 0644);
82 * For Windows 8 systems: used to decide if video module 82 * For Windows 8 systems: used to decide if video module
83 * should skip registering backlight interface of its own. 83 * should skip registering backlight interface of its own.
84 */ 84 */
85static int use_native_backlight_param = -1; 85static int use_native_backlight_param = 1;
86module_param_named(use_native_backlight, use_native_backlight_param, int, 0444); 86module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
87static bool use_native_backlight_dmi = false; 87static bool use_native_backlight_dmi = false;
88 88
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 343ffad59377..bf412961a934 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -214,9 +214,6 @@ static void initcall_debug_report(struct device *dev, ktime_t calltime,
214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), 214 pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev),
215 error, (unsigned long long)nsecs >> 10); 215 error, (unsigned long long)nsecs >> 10);
216 } 216 }
217
218 trace_device_pm_report_time(dev, info, nsecs, pm_verb(state.event),
219 error);
220} 217}
221 218
222/** 219/**
@@ -387,7 +384,9 @@ static int dpm_run_callback(pm_callback_t cb, struct device *dev,
387 calltime = initcall_debug_start(dev); 384 calltime = initcall_debug_start(dev);
388 385
389 pm_dev_dbg(dev, state, info); 386 pm_dev_dbg(dev, state, info);
387 trace_device_pm_callback_start(dev, info, state.event);
390 error = cb(dev); 388 error = cb(dev);
389 trace_device_pm_callback_end(dev, error);
391 suspend_report_result(cb, error); 390 suspend_report_result(cb, error);
392 391
393 initcall_debug_report(dev, calltime, error, state, info); 392 initcall_debug_report(dev, calltime, error, state, info);
@@ -545,6 +544,7 @@ static void dpm_resume_noirq(pm_message_t state)
545 struct device *dev; 544 struct device *dev;
546 ktime_t starttime = ktime_get(); 545 ktime_t starttime = ktime_get();
547 546
547 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, true);
548 mutex_lock(&dpm_list_mtx); 548 mutex_lock(&dpm_list_mtx);
549 pm_transition = state; 549 pm_transition = state;
550 550
@@ -587,6 +587,7 @@ static void dpm_resume_noirq(pm_message_t state)
587 dpm_show_time(starttime, state, "noirq"); 587 dpm_show_time(starttime, state, "noirq");
588 resume_device_irqs(); 588 resume_device_irqs();
589 cpuidle_resume(); 589 cpuidle_resume();
590 trace_suspend_resume(TPS("dpm_resume_noirq"), state.event, false);
590} 591}
591 592
592/** 593/**
@@ -664,6 +665,7 @@ static void dpm_resume_early(pm_message_t state)
664 struct device *dev; 665 struct device *dev;
665 ktime_t starttime = ktime_get(); 666 ktime_t starttime = ktime_get();
666 667
668 trace_suspend_resume(TPS("dpm_resume_early"), state.event, true);
667 mutex_lock(&dpm_list_mtx); 669 mutex_lock(&dpm_list_mtx);
668 pm_transition = state; 670 pm_transition = state;
669 671
@@ -703,6 +705,7 @@ static void dpm_resume_early(pm_message_t state)
703 mutex_unlock(&dpm_list_mtx); 705 mutex_unlock(&dpm_list_mtx);
704 async_synchronize_full(); 706 async_synchronize_full();
705 dpm_show_time(starttime, state, "early"); 707 dpm_show_time(starttime, state, "early");
708 trace_suspend_resume(TPS("dpm_resume_early"), state.event, false);
706} 709}
707 710
708/** 711/**
@@ -834,6 +837,7 @@ void dpm_resume(pm_message_t state)
834 struct device *dev; 837 struct device *dev;
835 ktime_t starttime = ktime_get(); 838 ktime_t starttime = ktime_get();
836 839
840 trace_suspend_resume(TPS("dpm_resume"), state.event, true);
837 might_sleep(); 841 might_sleep();
838 842
839 mutex_lock(&dpm_list_mtx); 843 mutex_lock(&dpm_list_mtx);
@@ -875,6 +879,7 @@ void dpm_resume(pm_message_t state)
875 dpm_show_time(starttime, state, NULL); 879 dpm_show_time(starttime, state, NULL);
876 880
877 cpufreq_resume(); 881 cpufreq_resume();
882 trace_suspend_resume(TPS("dpm_resume"), state.event, false);
878} 883}
879 884
880/** 885/**
@@ -913,7 +918,9 @@ static void device_complete(struct device *dev, pm_message_t state)
913 918
914 if (callback) { 919 if (callback) {
915 pm_dev_dbg(dev, state, info); 920 pm_dev_dbg(dev, state, info);
921 trace_device_pm_callback_start(dev, info, state.event);
916 callback(dev); 922 callback(dev);
923 trace_device_pm_callback_end(dev, 0);
917 } 924 }
918 925
919 device_unlock(dev); 926 device_unlock(dev);
@@ -932,6 +939,7 @@ void dpm_complete(pm_message_t state)
932{ 939{
933 struct list_head list; 940 struct list_head list;
934 941
942 trace_suspend_resume(TPS("dpm_complete"), state.event, true);
935 might_sleep(); 943 might_sleep();
936 944
937 INIT_LIST_HEAD(&list); 945 INIT_LIST_HEAD(&list);
@@ -951,6 +959,7 @@ void dpm_complete(pm_message_t state)
951 } 959 }
952 list_splice(&list, &dpm_list); 960 list_splice(&list, &dpm_list);
953 mutex_unlock(&dpm_list_mtx); 961 mutex_unlock(&dpm_list_mtx);
962 trace_suspend_resume(TPS("dpm_complete"), state.event, false);
954} 963}
955 964
956/** 965/**
@@ -1086,6 +1095,7 @@ static int dpm_suspend_noirq(pm_message_t state)
1086 ktime_t starttime = ktime_get(); 1095 ktime_t starttime = ktime_get();
1087 int error = 0; 1096 int error = 0;
1088 1097
1098 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, true);
1089 cpuidle_pause(); 1099 cpuidle_pause();
1090 suspend_device_irqs(); 1100 suspend_device_irqs();
1091 mutex_lock(&dpm_list_mtx); 1101 mutex_lock(&dpm_list_mtx);
@@ -1126,6 +1136,7 @@ static int dpm_suspend_noirq(pm_message_t state)
1126 } else { 1136 } else {
1127 dpm_show_time(starttime, state, "noirq"); 1137 dpm_show_time(starttime, state, "noirq");
1128 } 1138 }
1139 trace_suspend_resume(TPS("dpm_suspend_noirq"), state.event, false);
1129 return error; 1140 return error;
1130} 1141}
1131 1142
@@ -1222,6 +1233,7 @@ static int dpm_suspend_late(pm_message_t state)
1222 ktime_t starttime = ktime_get(); 1233 ktime_t starttime = ktime_get();
1223 int error = 0; 1234 int error = 0;
1224 1235
1236 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, true);
1225 mutex_lock(&dpm_list_mtx); 1237 mutex_lock(&dpm_list_mtx);
1226 pm_transition = state; 1238 pm_transition = state;
1227 async_error = 0; 1239 async_error = 0;
@@ -1257,6 +1269,7 @@ static int dpm_suspend_late(pm_message_t state)
1257 } else { 1269 } else {
1258 dpm_show_time(starttime, state, "late"); 1270 dpm_show_time(starttime, state, "late");
1259 } 1271 }
1272 trace_suspend_resume(TPS("dpm_suspend_late"), state.event, false);
1260 return error; 1273 return error;
1261} 1274}
1262 1275
@@ -1295,7 +1308,9 @@ static int legacy_suspend(struct device *dev, pm_message_t state,
1295 1308
1296 calltime = initcall_debug_start(dev); 1309 calltime = initcall_debug_start(dev);
1297 1310
1311 trace_device_pm_callback_start(dev, info, state.event);
1298 error = cb(dev, state); 1312 error = cb(dev, state);
1313 trace_device_pm_callback_end(dev, error);
1299 suspend_report_result(cb, error); 1314 suspend_report_result(cb, error);
1300 1315
1301 initcall_debug_report(dev, calltime, error, state, info); 1316 initcall_debug_report(dev, calltime, error, state, info);
@@ -1461,6 +1476,7 @@ int dpm_suspend(pm_message_t state)
1461 ktime_t starttime = ktime_get(); 1476 ktime_t starttime = ktime_get();
1462 int error = 0; 1477 int error = 0;
1463 1478
1479 trace_suspend_resume(TPS("dpm_suspend"), state.event, true);
1464 might_sleep(); 1480 might_sleep();
1465 1481
1466 cpufreq_suspend(); 1482 cpufreq_suspend();
@@ -1498,6 +1514,7 @@ int dpm_suspend(pm_message_t state)
1498 dpm_save_failed_step(SUSPEND_SUSPEND); 1514 dpm_save_failed_step(SUSPEND_SUSPEND);
1499 } else 1515 } else
1500 dpm_show_time(starttime, state, NULL); 1516 dpm_show_time(starttime, state, NULL);
1517 trace_suspend_resume(TPS("dpm_suspend"), state.event, false);
1501 return error; 1518 return error;
1502} 1519}
1503 1520
@@ -1549,8 +1566,11 @@ static int device_prepare(struct device *dev, pm_message_t state)
1549 callback = dev->driver->pm->prepare; 1566 callback = dev->driver->pm->prepare;
1550 } 1567 }
1551 1568
1552 if (callback) 1569 if (callback) {
1570 trace_device_pm_callback_start(dev, info, state.event);
1553 ret = callback(dev); 1571 ret = callback(dev);
1572 trace_device_pm_callback_end(dev, ret);
1573 }
1554 1574
1555 device_unlock(dev); 1575 device_unlock(dev);
1556 1576
@@ -1582,6 +1602,7 @@ int dpm_prepare(pm_message_t state)
1582{ 1602{
1583 int error = 0; 1603 int error = 0;
1584 1604
1605 trace_suspend_resume(TPS("dpm_prepare"), state.event, true);
1585 might_sleep(); 1606 might_sleep();
1586 1607
1587 mutex_lock(&dpm_list_mtx); 1608 mutex_lock(&dpm_list_mtx);
@@ -1612,6 +1633,7 @@ int dpm_prepare(pm_message_t state)
1612 put_device(dev); 1633 put_device(dev);
1613 } 1634 }
1614 mutex_unlock(&dpm_list_mtx); 1635 mutex_unlock(&dpm_list_mtx);
1636 trace_suspend_resume(TPS("dpm_prepare"), state.event, false);
1615 return error; 1637 return error;
1616} 1638}
1617 1639
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index e8d11b6630ee..dbb8350ea8dc 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
10#include <linux/mutex.h> 10#include <linux/mutex.h>
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <trace/events/power.h>
13 14
14static LIST_HEAD(syscore_ops_list); 15static LIST_HEAD(syscore_ops_list);
15static DEFINE_MUTEX(syscore_ops_lock); 16static DEFINE_MUTEX(syscore_ops_lock);
@@ -49,6 +50,7 @@ int syscore_suspend(void)
49 struct syscore_ops *ops; 50 struct syscore_ops *ops;
50 int ret = 0; 51 int ret = 0;
51 52
53 trace_suspend_resume(TPS("syscore_suspend"), 0, true);
52 pr_debug("Checking wakeup interrupts\n"); 54 pr_debug("Checking wakeup interrupts\n");
53 55
54 /* Return error code if there are any wakeup interrupts pending. */ 56 /* Return error code if there are any wakeup interrupts pending. */
@@ -70,6 +72,7 @@ int syscore_suspend(void)
70 "Interrupts enabled after %pF\n", ops->suspend); 72 "Interrupts enabled after %pF\n", ops->suspend);
71 } 73 }
72 74
75 trace_suspend_resume(TPS("syscore_suspend"), 0, false);
73 return 0; 76 return 0;
74 77
75 err_out: 78 err_out:
@@ -92,6 +95,7 @@ void syscore_resume(void)
92{ 95{
93 struct syscore_ops *ops; 96 struct syscore_ops *ops;
94 97
98 trace_suspend_resume(TPS("syscore_resume"), 0, true);
95 WARN_ONCE(!irqs_disabled(), 99 WARN_ONCE(!irqs_disabled(),
96 "Interrupts enabled before system core resume.\n"); 100 "Interrupts enabled before system core resume.\n");
97 101
@@ -103,6 +107,7 @@ void syscore_resume(void)
103 WARN_ONCE(!irqs_disabled(), 107 WARN_ONCE(!irqs_disabled(),
104 "Interrupts enabled after %pF\n", ops->resume); 108 "Interrupts enabled after %pF\n", ops->resume);
105 } 109 }
110 trace_suspend_resume(TPS("syscore_resume"), 0, false);
106} 111}
107EXPORT_SYMBOL_GPL(syscore_resume); 112EXPORT_SYMBOL_GPL(syscore_resume);
108#endif /* CONFIG_PM_SLEEP */ 113#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 1fbe11f2a146..e473d6555f96 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE
185 185
186config GENERIC_CPUFREQ_CPU0 186config GENERIC_CPUFREQ_CPU0
187 tristate "Generic CPU0 cpufreq driver" 187 tristate "Generic CPU0 cpufreq driver"
188 depends on HAVE_CLK && REGULATOR && OF && THERMAL && CPU_THERMAL 188 depends on HAVE_CLK && OF
189 select PM_OPP 189 select PM_OPP
190 help 190 help
191 This adds a generic cpufreq driver for CPU0 frequency management. 191 This adds a generic cpufreq driver for CPU0 frequency management.
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 36d20d0fce27..ebac67115009 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -5,8 +5,7 @@
5# big LITTLE core layer and glue drivers 5# big LITTLE core layer and glue drivers
6config ARM_BIG_LITTLE_CPUFREQ 6config ARM_BIG_LITTLE_CPUFREQ
7 tristate "Generic ARM big LITTLE CPUfreq driver" 7 tristate "Generic ARM big LITTLE CPUfreq driver"
8 depends on (BIG_LITTLE && ARM_CPU_TOPOLOGY) || (ARM64 && SMP) 8 depends on ARM && BIG_LITTLE && ARM_CPU_TOPOLOGY && HAVE_CLK
9 depends on HAVE_CLK
10 select PM_OPP 9 select PM_OPP
11 help 10 help
12 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. 11 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 09b9129c7bd3..ee1ae303a07c 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -104,7 +104,7 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
104} 104}
105 105
106static struct cpufreq_driver cpu0_cpufreq_driver = { 106static struct cpufreq_driver cpu0_cpufreq_driver = {
107 .flags = CPUFREQ_STICKY, 107 .flags = CPUFREQ_STICKY | CPUFREQ_NEED_INITIAL_FREQ_CHECK,
108 .verify = cpufreq_generic_frequency_table_verify, 108 .verify = cpufreq_generic_frequency_table_verify,
109 .target_index = cpu0_set_target, 109 .target_index = cpu0_set_target,
110 .get = cpufreq_generic_get, 110 .get = cpufreq_generic_get,
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ae11dd51f81d..aed2b0cb83dc 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1816,20 +1816,55 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1816 * GOVERNORS * 1816 * GOVERNORS *
1817 *********************************************************************/ 1817 *********************************************************************/
1818 1818
1819/* Must set freqs->new to intermediate frequency */
1820static int __target_intermediate(struct cpufreq_policy *policy,
1821 struct cpufreq_freqs *freqs, int index)
1822{
1823 int ret;
1824
1825 freqs->new = cpufreq_driver->get_intermediate(policy, index);
1826
1827 /* We don't need to switch to intermediate freq */
1828 if (!freqs->new)
1829 return 0;
1830
1831 pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
1832 __func__, policy->cpu, freqs->old, freqs->new);
1833
1834 cpufreq_freq_transition_begin(policy, freqs);
1835 ret = cpufreq_driver->target_intermediate(policy, index);
1836 cpufreq_freq_transition_end(policy, freqs, ret);
1837
1838 if (ret)
1839 pr_err("%s: Failed to change to intermediate frequency: %d\n",
1840 __func__, ret);
1841
1842 return ret;
1843}
1844
1819static int __target_index(struct cpufreq_policy *policy, 1845static int __target_index(struct cpufreq_policy *policy,
1820 struct cpufreq_frequency_table *freq_table, int index) 1846 struct cpufreq_frequency_table *freq_table, int index)
1821{ 1847{
1822 struct cpufreq_freqs freqs; 1848 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
1849 unsigned int intermediate_freq = 0;
1823 int retval = -EINVAL; 1850 int retval = -EINVAL;
1824 bool notify; 1851 bool notify;
1825 1852
1826 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION); 1853 notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
1827
1828 if (notify) { 1854 if (notify) {
1829 freqs.old = policy->cur; 1855 /* Handle switching to intermediate frequency */
1830 freqs.new = freq_table[index].frequency; 1856 if (cpufreq_driver->get_intermediate) {
1831 freqs.flags = 0; 1857 retval = __target_intermediate(policy, &freqs, index);
1858 if (retval)
1859 return retval;
1860
1861 intermediate_freq = freqs.new;
1862 /* Set old freq to intermediate */
1863 if (intermediate_freq)
1864 freqs.old = freqs.new;
1865 }
1832 1866
1867 freqs.new = freq_table[index].frequency;
1833 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n", 1868 pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
1834 __func__, policy->cpu, freqs.old, freqs.new); 1869 __func__, policy->cpu, freqs.old, freqs.new);
1835 1870
@@ -1841,9 +1876,23 @@ static int __target_index(struct cpufreq_policy *policy,
1841 pr_err("%s: Failed to change cpu frequency: %d\n", __func__, 1876 pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
1842 retval); 1877 retval);
1843 1878
1844 if (notify) 1879 if (notify) {
1845 cpufreq_freq_transition_end(policy, &freqs, retval); 1880 cpufreq_freq_transition_end(policy, &freqs, retval);
1846 1881
1882 /*
1883 * Failed after setting to intermediate freq? Driver should have
1884 * reverted back to initial frequency and so should we. Check
1885 * here for intermediate_freq instead of get_intermediate, in
1886 * case we have't switched to intermediate freq at all.
1887 */
1888 if (unlikely(retval && intermediate_freq)) {
1889 freqs.old = intermediate_freq;
1890 freqs.new = policy->restore_freq;
1891 cpufreq_freq_transition_begin(policy, &freqs);
1892 cpufreq_freq_transition_end(policy, &freqs, 0);
1893 }
1894 }
1895
1847 return retval; 1896 return retval;
1848} 1897}
1849 1898
@@ -1875,6 +1924,9 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
1875 if (target_freq == policy->cur) 1924 if (target_freq == policy->cur)
1876 return 0; 1925 return 0;
1877 1926
1927 /* Save last value to restore later on errors */
1928 policy->restore_freq = policy->cur;
1929
1878 if (cpufreq_driver->target) 1930 if (cpufreq_driver->target)
1879 retval = cpufreq_driver->target(policy, target_freq, relation); 1931 retval = cpufreq_driver->target(policy, target_freq, relation);
1880 else if (cpufreq_driver->target_index) { 1932 else if (cpufreq_driver->target_index) {
@@ -2361,7 +2413,8 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2361 !(driver_data->setpolicy || driver_data->target_index || 2413 !(driver_data->setpolicy || driver_data->target_index ||
2362 driver_data->target) || 2414 driver_data->target) ||
2363 (driver_data->setpolicy && (driver_data->target_index || 2415 (driver_data->setpolicy && (driver_data->target_index ||
2364 driver_data->target))) 2416 driver_data->target)) ||
2417 (!!driver_data->get_intermediate != !!driver_data->target_intermediate))
2365 return -EINVAL; 2418 return -EINVAL;
2366 2419
2367 pr_debug("trying to register driver %s\n", driver_data->name); 2420 pr_debug("trying to register driver %s\n", driver_data->name);
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index e1c6433b16e0..1b44496b2d2b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -36,14 +36,29 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
36 struct od_dbs_tuners *od_tuners = dbs_data->tuners; 36 struct od_dbs_tuners *od_tuners = dbs_data->tuners;
37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; 37 struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
38 struct cpufreq_policy *policy; 38 struct cpufreq_policy *policy;
39 unsigned int sampling_rate;
39 unsigned int max_load = 0; 40 unsigned int max_load = 0;
40 unsigned int ignore_nice; 41 unsigned int ignore_nice;
41 unsigned int j; 42 unsigned int j;
42 43
43 if (dbs_data->cdata->governor == GOV_ONDEMAND) 44 if (dbs_data->cdata->governor == GOV_ONDEMAND) {
45 struct od_cpu_dbs_info_s *od_dbs_info =
46 dbs_data->cdata->get_cpu_dbs_info_s(cpu);
47
48 /*
49 * Sometimes, the ondemand governor uses an additional
50 * multiplier to give long delays. So apply this multiplier to
51 * the 'sampling_rate', so as to keep the wake-up-from-idle
52 * detection logic a bit conservative.
53 */
54 sampling_rate = od_tuners->sampling_rate;
55 sampling_rate *= od_dbs_info->rate_mult;
56
44 ignore_nice = od_tuners->ignore_nice_load; 57 ignore_nice = od_tuners->ignore_nice_load;
45 else 58 } else {
59 sampling_rate = cs_tuners->sampling_rate;
46 ignore_nice = cs_tuners->ignore_nice_load; 60 ignore_nice = cs_tuners->ignore_nice_load;
61 }
47 62
48 policy = cdbs->cur_policy; 63 policy = cdbs->cur_policy;
49 64
@@ -96,7 +111,46 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
96 if (unlikely(!wall_time || wall_time < idle_time)) 111 if (unlikely(!wall_time || wall_time < idle_time))
97 continue; 112 continue;
98 113
99 load = 100 * (wall_time - idle_time) / wall_time; 114 /*
115 * If the CPU had gone completely idle, and a task just woke up
116 * on this CPU now, it would be unfair to calculate 'load' the
117 * usual way for this elapsed time-window, because it will show
118 * near-zero load, irrespective of how CPU intensive that task
119 * actually is. This is undesirable for latency-sensitive bursty
120 * workloads.
121 *
122 * To avoid this, we reuse the 'load' from the previous
123 * time-window and give this task a chance to start with a
124 * reasonably high CPU frequency. (However, we shouldn't over-do
125 * this copy, lest we get stuck at a high load (high frequency)
126 * for too long, even when the current system load has actually
127 * dropped down. So we perform the copy only once, upon the
128 * first wake-up from idle.)
129 *
130 * Detecting this situation is easy: the governor's deferrable
131 * timer would not have fired during CPU-idle periods. Hence
132 * an unusually large 'wall_time' (as compared to the sampling
133 * rate) indicates this scenario.
134 *
135 * prev_load can be zero in two cases and we must recalculate it
136 * for both cases:
137 * - during long idle intervals
138 * - explicitly set to zero
139 */
140 if (unlikely(wall_time > (2 * sampling_rate) &&
141 j_cdbs->prev_load)) {
142 load = j_cdbs->prev_load;
143
144 /*
145 * Perform a destructive copy, to ensure that we copy
146 * the previous load only once, upon the first wake-up
147 * from idle.
148 */
149 j_cdbs->prev_load = 0;
150 } else {
151 load = 100 * (wall_time - idle_time) / wall_time;
152 j_cdbs->prev_load = load;
153 }
100 154
101 if (load > max_load) 155 if (load > max_load)
102 max_load = load; 156 max_load = load;
@@ -318,11 +372,18 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
318 for_each_cpu(j, policy->cpus) { 372 for_each_cpu(j, policy->cpus) {
319 struct cpu_dbs_common_info *j_cdbs = 373 struct cpu_dbs_common_info *j_cdbs =
320 dbs_data->cdata->get_cpu_cdbs(j); 374 dbs_data->cdata->get_cpu_cdbs(j);
375 unsigned int prev_load;
321 376
322 j_cdbs->cpu = j; 377 j_cdbs->cpu = j;
323 j_cdbs->cur_policy = policy; 378 j_cdbs->cur_policy = policy;
324 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, 379 j_cdbs->prev_cpu_idle = get_cpu_idle_time(j,
325 &j_cdbs->prev_cpu_wall, io_busy); 380 &j_cdbs->prev_cpu_wall, io_busy);
381
382 prev_load = (unsigned int)
383 (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
384 j_cdbs->prev_load = 100 * prev_load /
385 (unsigned int) j_cdbs->prev_cpu_wall;
386
326 if (ignore_nice) 387 if (ignore_nice)
327 j_cdbs->prev_cpu_nice = 388 j_cdbs->prev_cpu_nice =
328 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 389 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index bfb9ae14142c..cc401d147e72 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -134,6 +134,13 @@ struct cpu_dbs_common_info {
134 u64 prev_cpu_idle; 134 u64 prev_cpu_idle;
135 u64 prev_cpu_wall; 135 u64 prev_cpu_wall;
136 u64 prev_cpu_nice; 136 u64 prev_cpu_nice;
137 /*
138 * Used to keep track of load in the previous interval. However, when
139 * explicitly set to zero, it is used as a flag to ensure that we copy
140 * the previous load to the current interval only once, upon the first
141 * wake-up from idle.
142 */
143 unsigned int prev_load;
137 struct cpufreq_policy *cur_policy; 144 struct cpufreq_policy *cur_policy;
138 struct delayed_work work; 145 struct delayed_work work;
139 /* 146 /*
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index aebd4572eb6d..4e7f492ad583 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -691,14 +691,8 @@ MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
691 691
692static int intel_pstate_init_cpu(unsigned int cpunum) 692static int intel_pstate_init_cpu(unsigned int cpunum)
693{ 693{
694
695 const struct x86_cpu_id *id;
696 struct cpudata *cpu; 694 struct cpudata *cpu;
697 695
698 id = x86_match_cpu(intel_pstate_cpu_ids);
699 if (!id)
700 return -ENODEV;
701
702 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 696 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL);
703 if (!all_cpu_data[cpunum]) 697 if (!all_cpu_data[cpunum])
704 return -ENOMEM; 698 return -ENOMEM;
diff --git a/drivers/cpufreq/ppc-corenet-cpufreq.c b/drivers/cpufreq/ppc-corenet-cpufreq.c
index 0af618abebaf..3607070797af 100644
--- a/drivers/cpufreq/ppc-corenet-cpufreq.c
+++ b/drivers/cpufreq/ppc-corenet-cpufreq.c
@@ -138,7 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
138 struct cpufreq_frequency_table *table; 138 struct cpufreq_frequency_table *table;
139 struct cpu_data *data; 139 struct cpu_data *data;
140 unsigned int cpu = policy->cpu; 140 unsigned int cpu = policy->cpu;
141 u64 transition_latency_hz; 141 u64 u64temp;
142 142
143 np = of_get_cpu_node(cpu, NULL); 143 np = of_get_cpu_node(cpu, NULL);
144 if (!np) 144 if (!np)
@@ -206,9 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
206 for_each_cpu(i, per_cpu(cpu_mask, cpu)) 206 for_each_cpu(i, per_cpu(cpu_mask, cpu))
207 per_cpu(cpu_data, i) = data; 207 per_cpu(cpu_data, i) = data;
208 208
209 transition_latency_hz = 12ULL * NSEC_PER_SEC; 209 /* Minimum transition latency is 12 platform clocks */
210 policy->cpuinfo.transition_latency = 210 u64temp = 12ULL * NSEC_PER_SEC;
211 do_div(transition_latency_hz, fsl_get_sys_freq()); 211 do_div(u64temp, fsl_get_sys_freq());
212 policy->cpuinfo.transition_latency = u64temp + 1;
212 213
213 of_node_put(np); 214 of_node_put(np);
214 215
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c
index 6e774c6ac20b..8084c7f7e206 100644
--- a/drivers/cpufreq/tegra-cpufreq.c
+++ b/drivers/cpufreq/tegra-cpufreq.c
@@ -45,46 +45,54 @@ static struct clk *cpu_clk;
45static struct clk *pll_x_clk; 45static struct clk *pll_x_clk;
46static struct clk *pll_p_clk; 46static struct clk *pll_p_clk;
47static struct clk *emc_clk; 47static struct clk *emc_clk;
48static bool pll_x_prepared;
48 49
49static int tegra_cpu_clk_set_rate(unsigned long rate) 50static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy,
51 unsigned int index)
52{
53 unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
54
55 /*
56 * Don't switch to intermediate freq if:
57 * - we are already at it, i.e. policy->cur == ifreq
58 * - index corresponds to ifreq
59 */
60 if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq))
61 return 0;
62
63 return ifreq;
64}
65
66static int tegra_target_intermediate(struct cpufreq_policy *policy,
67 unsigned int index)
50{ 68{
51 int ret; 69 int ret;
52 70
53 /* 71 /*
54 * Take an extra reference to the main pll so it doesn't turn 72 * Take an extra reference to the main pll so it doesn't turn
55 * off when we move the cpu off of it 73 * off when we move the cpu off of it as enabling it again while we
74 * switch to it from tegra_target() would take additional time.
75 *
76 * When target-freq is equal to intermediate freq we don't need to
77 * switch to an intermediate freq and so this routine isn't called.
78 * Also, we wouldn't be using pll_x anymore and must not take extra
79 * reference to it, as it can be disabled now to save some power.
56 */ 80 */
57 clk_prepare_enable(pll_x_clk); 81 clk_prepare_enable(pll_x_clk);
58 82
59 ret = clk_set_parent(cpu_clk, pll_p_clk); 83 ret = clk_set_parent(cpu_clk, pll_p_clk);
60 if (ret) { 84 if (ret)
61 pr_err("Failed to switch cpu to clock pll_p\n"); 85 clk_disable_unprepare(pll_x_clk);
62 goto out; 86 else
63 } 87 pll_x_prepared = true;
64
65 if (rate == clk_get_rate(pll_p_clk))
66 goto out;
67
68 ret = clk_set_rate(pll_x_clk, rate);
69 if (ret) {
70 pr_err("Failed to change pll_x to %lu\n", rate);
71 goto out;
72 }
73
74 ret = clk_set_parent(cpu_clk, pll_x_clk);
75 if (ret) {
76 pr_err("Failed to switch cpu to clock pll_x\n");
77 goto out;
78 }
79 88
80out:
81 clk_disable_unprepare(pll_x_clk);
82 return ret; 89 return ret;
83} 90}
84 91
85static int tegra_target(struct cpufreq_policy *policy, unsigned int index) 92static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
86{ 93{
87 unsigned long rate = freq_table[index].frequency; 94 unsigned long rate = freq_table[index].frequency;
95 unsigned int ifreq = clk_get_rate(pll_p_clk) / 1000;
88 int ret = 0; 96 int ret = 0;
89 97
90 /* 98 /*
@@ -98,10 +106,30 @@ static int tegra_target(struct cpufreq_policy *policy, unsigned int index)
98 else 106 else
99 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ 107 clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */
100 108
101 ret = tegra_cpu_clk_set_rate(rate * 1000); 109 /*
110 * target freq == pll_p, don't need to take extra reference to pll_x_clk
111 * as it isn't used anymore.
112 */
113 if (rate == ifreq)
114 return clk_set_parent(cpu_clk, pll_p_clk);
115
116 ret = clk_set_rate(pll_x_clk, rate * 1000);
117 /* Restore to earlier frequency on error, i.e. pll_x */
102 if (ret) 118 if (ret)
103 pr_err("cpu-tegra: Failed to set cpu frequency to %lu kHz\n", 119 pr_err("Failed to change pll_x to %lu\n", rate);
104 rate); 120
121 ret = clk_set_parent(cpu_clk, pll_x_clk);
122 /* This shouldn't fail while changing or restoring */
123 WARN_ON(ret);
124
125 /*
126 * Drop count to pll_x clock only if we switched to intermediate freq
127 * earlier while transitioning to a target frequency.
128 */
129 if (pll_x_prepared) {
130 clk_disable_unprepare(pll_x_clk);
131 pll_x_prepared = false;
132 }
105 133
106 return ret; 134 return ret;
107} 135}
@@ -137,16 +165,18 @@ static int tegra_cpu_exit(struct cpufreq_policy *policy)
137} 165}
138 166
139static struct cpufreq_driver tegra_cpufreq_driver = { 167static struct cpufreq_driver tegra_cpufreq_driver = {
140 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, 168 .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK,
141 .verify = cpufreq_generic_frequency_table_verify, 169 .verify = cpufreq_generic_frequency_table_verify,
142 .target_index = tegra_target, 170 .get_intermediate = tegra_get_intermediate,
143 .get = cpufreq_generic_get, 171 .target_intermediate = tegra_target_intermediate,
144 .init = tegra_cpu_init, 172 .target_index = tegra_target,
145 .exit = tegra_cpu_exit, 173 .get = cpufreq_generic_get,
146 .name = "tegra", 174 .init = tegra_cpu_init,
147 .attr = cpufreq_generic_attr, 175 .exit = tegra_cpu_exit,
176 .name = "tegra",
177 .attr = cpufreq_generic_attr,
148#ifdef CONFIG_PM 178#ifdef CONFIG_PM
149 .suspend = cpufreq_generic_suspend, 179 .suspend = cpufreq_generic_suspend,
150#endif 180#endif
151}; 181};
152 182
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index 2b859249303b..b0e61bf261a7 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -142,6 +142,16 @@ static inline acpi_handle func_to_handle(struct acpiphp_func *func)
142 return func_to_acpi_device(func)->handle; 142 return func_to_acpi_device(func)->handle;
143} 143}
144 144
145struct acpiphp_root_context {
146 struct acpi_hotplug_context hp;
147 struct acpiphp_bridge *root_bridge;
148};
149
150static inline struct acpiphp_root_context *to_acpiphp_root_context(struct acpi_hotplug_context *hp)
151{
152 return container_of(hp, struct acpiphp_root_context, hp);
153}
154
145/* 155/*
146 * struct acpiphp_attention_info - device specific attention registration 156 * struct acpiphp_attention_info - device specific attention registration
147 * 157 *
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 75e178330215..91aa3d780138 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -373,17 +373,13 @@ static acpi_status acpiphp_add_context(acpi_handle handle, u32 lvl, void *data,
373 373
374static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev) 374static struct acpiphp_bridge *acpiphp_dev_to_bridge(struct acpi_device *adev)
375{ 375{
376 struct acpiphp_context *context;
377 struct acpiphp_bridge *bridge = NULL; 376 struct acpiphp_bridge *bridge = NULL;
378 377
379 acpi_lock_hp_context(); 378 acpi_lock_hp_context();
380 context = acpiphp_get_context(adev); 379 if (adev->hp) {
381 if (context) { 380 bridge = to_acpiphp_root_context(adev->hp)->root_bridge;
382 bridge = context->bridge;
383 if (bridge) 381 if (bridge)
384 get_bridge(bridge); 382 get_bridge(bridge);
385
386 acpiphp_put_context(context);
387 } 383 }
388 acpi_unlock_hp_context(); 384 acpi_unlock_hp_context();
389 return bridge; 385 return bridge;
@@ -881,7 +877,17 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
881 */ 877 */
882 get_device(&bus->dev); 878 get_device(&bus->dev);
883 879
884 if (!pci_is_root_bus(bridge->pci_bus)) { 880 acpi_lock_hp_context();
881 if (pci_is_root_bus(bridge->pci_bus)) {
882 struct acpiphp_root_context *root_context;
883
884 root_context = kzalloc(sizeof(*root_context), GFP_KERNEL);
885 if (!root_context)
886 goto err;
887
888 root_context->root_bridge = bridge;
889 acpi_set_hp_context(adev, &root_context->hp, NULL, NULL, NULL);
890 } else {
885 struct acpiphp_context *context; 891 struct acpiphp_context *context;
886 892
887 /* 893 /*
@@ -890,21 +896,16 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
890 * parent is going to be handled by pciehp, in which case this 896 * parent is going to be handled by pciehp, in which case this
891 * bridge is not interesting to us either. 897 * bridge is not interesting to us either.
892 */ 898 */
893 acpi_lock_hp_context();
894 context = acpiphp_get_context(adev); 899 context = acpiphp_get_context(adev);
895 if (!context) { 900 if (!context)
896 acpi_unlock_hp_context(); 901 goto err;
897 put_device(&bus->dev); 902
898 pci_dev_put(bridge->pci_dev);
899 kfree(bridge);
900 return;
901 }
902 bridge->context = context; 903 bridge->context = context;
903 context->bridge = bridge; 904 context->bridge = bridge;
904 /* Get a reference to the parent bridge. */ 905 /* Get a reference to the parent bridge. */
905 get_bridge(context->func.parent); 906 get_bridge(context->func.parent);
906 acpi_unlock_hp_context();
907 } 907 }
908 acpi_unlock_hp_context();
908 909
909 /* Must be added to the list prior to calling acpiphp_add_context(). */ 910 /* Must be added to the list prior to calling acpiphp_add_context(). */
910 mutex_lock(&bridge_mutex); 911 mutex_lock(&bridge_mutex);
@@ -919,6 +920,30 @@ void acpiphp_enumerate_slots(struct pci_bus *bus)
919 cleanup_bridge(bridge); 920 cleanup_bridge(bridge);
920 put_bridge(bridge); 921 put_bridge(bridge);
921 } 922 }
923 return;
924
925 err:
926 acpi_unlock_hp_context();
927 put_device(&bus->dev);
928 pci_dev_put(bridge->pci_dev);
929 kfree(bridge);
930}
931
932void acpiphp_drop_bridge(struct acpiphp_bridge *bridge)
933{
934 if (pci_is_root_bus(bridge->pci_bus)) {
935 struct acpiphp_root_context *root_context;
936 struct acpi_device *adev;
937
938 acpi_lock_hp_context();
939 adev = ACPI_COMPANION(bridge->pci_bus->bridge);
940 root_context = to_acpiphp_root_context(adev->hp);
941 adev->hp = NULL;
942 acpi_unlock_hp_context();
943 kfree(root_context);
944 }
945 cleanup_bridge(bridge);
946 put_bridge(bridge);
922} 947}
923 948
924/** 949/**
@@ -936,8 +961,7 @@ void acpiphp_remove_slots(struct pci_bus *bus)
936 list_for_each_entry(bridge, &bridge_list, list) 961 list_for_each_entry(bridge, &bridge_list, list)
937 if (bridge->pci_bus == bus) { 962 if (bridge->pci_bus == bus) {
938 mutex_unlock(&bridge_mutex); 963 mutex_unlock(&bridge_mutex);
939 cleanup_bridge(bridge); 964 acpiphp_drop_bridge(bridge);
940 put_bridge(bridge);
941 return; 965 return;
942 } 966 }
943 967