aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2012-07-03 13:07:42 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2012-07-03 13:07:42 -0400
commitcbc9ef0287ab764d3da0129efa673808df641fe3 (patch)
tree43f128194beaeeda86dcc962a28af8c03f924404 /drivers/base
parente3b8cdd8e4ea51e46d3ff54d7e3568afc24654ec (diff)
PM / Domains: Add preliminary support for cpuidle, v2
On some systems there are CPU cores located in the same power domains as I/O devices. Then, power can only be removed from the domain if all I/O devices in it are not in use and the CPU core is idle. Add preliminary support for that to the generic PM domains framework. First, the platform is expected to provide a cpuidle driver with one extra state designated for use with the generic PM domains code. This state should be initially disabled and its exit_latency value should be set to whatever time is needed to bring up the CPU core itself after restoring power to it, not including the domain's power on latency. Its .enter() callback should point to a procedure that will remove power from the domain containing the CPU core at the end of the CPU power transition. The remaining characteristics of the extra cpuidle state, referred to as the "domain" cpuidle state below, (e.g. power usage, target residency) should be populated in accordance with the properties of the hardware. Next, the platform should execute genpd_attach_cpuidle() on the PM domain containing the CPU core. That will cause the generic PM domains framework to treat that domain in a special way such that: * When all devices in the domain have been suspended and it is about to be turned off, the states of the devices will be saved, but power will not be removed from the domain. Instead, the "domain" cpuidle state will be enabled so that power can be removed from the domain when the CPU core is idle and the state has been chosen as the target by the cpuidle governor. * When the first I/O device in the domain is resumed and __pm_genpd_poweron(() is called for the first time after power has been removed from the domain, the "domain" cpuidle state will be disabled to avoid subsequent surprise power removals via cpuidle. The effective exit_latency value of the "domain" cpuidle state depends on the time needed to bring up the CPU core itself after restoring power to it as well as on the power on latency of the domain containing the CPU core. Thus the "domain" cpuidle state's exit_latency has to be recomputed every time the domain's power on latency is updated, which may happen every time power is restored to the domain, if the measured power on latency is greater than the latency stored in the corresponding generic_pm_domain structure. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Reviewed-by: Kevin Hilman <khilman@ti.com>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/power/domain.c117
1 files changed, 117 insertions, 0 deletions
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index da1d52576ec9..4b5f090fccb6 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -139,6 +139,19 @@ static void genpd_set_active(struct generic_pm_domain *genpd)
139 genpd->status = GPD_STATE_ACTIVE; 139 genpd->status = GPD_STATE_ACTIVE;
140} 140}
141 141
142static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
143{
144 s64 usecs64;
145
146 if (!genpd->cpu_data)
147 return;
148
149 usecs64 = genpd->power_on_latency_ns;
150 do_div(usecs64, NSEC_PER_USEC);
151 usecs64 += genpd->cpu_data->saved_exit_latency;
152 genpd->cpu_data->idle_state->exit_latency = usecs64;
153}
154
142/** 155/**
143 * __pm_genpd_poweron - Restore power to a given PM domain and its masters. 156 * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
144 * @genpd: PM domain to power up. 157 * @genpd: PM domain to power up.
@@ -176,6 +189,13 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
176 return 0; 189 return 0;
177 } 190 }
178 191
192 if (genpd->cpu_data) {
193 cpuidle_pause_and_lock();
194 genpd->cpu_data->idle_state->disabled = true;
195 cpuidle_resume_and_unlock();
196 goto out;
197 }
198
179 /* 199 /*
180 * The list is guaranteed not to change while the loop below is being 200 * The list is guaranteed not to change while the loop below is being
181 * executed, unless one of the masters' .power_on() callbacks fiddles 201 * executed, unless one of the masters' .power_on() callbacks fiddles
@@ -215,6 +235,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
215 if (elapsed_ns > genpd->power_on_latency_ns) { 235 if (elapsed_ns > genpd->power_on_latency_ns) {
216 genpd->power_on_latency_ns = elapsed_ns; 236 genpd->power_on_latency_ns = elapsed_ns;
217 genpd->max_off_time_changed = true; 237 genpd->max_off_time_changed = true;
238 genpd_recalc_cpu_exit_latency(genpd);
218 if (genpd->name) 239 if (genpd->name)
219 pr_warning("%s: Power-on latency exceeded, " 240 pr_warning("%s: Power-on latency exceeded, "
220 "new value %lld ns\n", genpd->name, 241 "new value %lld ns\n", genpd->name,
@@ -222,6 +243,7 @@ int __pm_genpd_poweron(struct generic_pm_domain *genpd)
222 } 243 }
223 } 244 }
224 245
246 out:
225 genpd_set_active(genpd); 247 genpd_set_active(genpd);
226 248
227 return 0; 249 return 0;
@@ -455,6 +477,21 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
455 } 477 }
456 } 478 }
457 479
480 if (genpd->cpu_data) {
481 /*
482 * If cpu_data is set, cpuidle should turn the domain off when
483 * the CPU in it is idle. In that case we don't decrement the
484 * subdomain counts of the master domains, so that power is not
485 * removed from the current domain prematurely as a result of
486 * cutting off the masters' power.
487 */
488 genpd->status = GPD_STATE_POWER_OFF;
489 cpuidle_pause_and_lock();
490 genpd->cpu_data->idle_state->disabled = false;
491 cpuidle_resume_and_unlock();
492 goto out;
493 }
494
458 if (genpd->power_off) { 495 if (genpd->power_off) {
459 ktime_t time_start; 496 ktime_t time_start;
460 s64 elapsed_ns; 497 s64 elapsed_ns;
@@ -1600,6 +1637,86 @@ int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
1600} 1637}
1601EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks); 1638EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
1602 1639
1640int genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
1641{
1642 struct cpuidle_driver *cpuidle_drv;
1643 struct gpd_cpu_data *cpu_data;
1644 struct cpuidle_state *idle_state;
1645 int ret = 0;
1646
1647 if (IS_ERR_OR_NULL(genpd) || state < 0)
1648 return -EINVAL;
1649
1650 genpd_acquire_lock(genpd);
1651
1652 if (genpd->cpu_data) {
1653 ret = -EEXIST;
1654 goto out;
1655 }
1656 cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
1657 if (!cpu_data) {
1658 ret = -ENOMEM;
1659 goto out;
1660 }
1661 cpuidle_drv = cpuidle_driver_ref();
1662 if (!cpuidle_drv) {
1663 ret = -ENODEV;
1664 goto out;
1665 }
1666 if (cpuidle_drv->state_count <= state) {
1667 ret = -EINVAL;
1668 goto err;
1669 }
1670 idle_state = &cpuidle_drv->states[state];
1671 if (!idle_state->disabled) {
1672 ret = -EAGAIN;
1673 goto err;
1674 }
1675 cpu_data->idle_state = idle_state;
1676 cpu_data->saved_exit_latency = idle_state->exit_latency;
1677 genpd->cpu_data = cpu_data;
1678 genpd_recalc_cpu_exit_latency(genpd);
1679
1680 out:
1681 genpd_release_lock(genpd);
1682 return ret;
1683
1684 err:
1685 cpuidle_driver_unref();
1686 goto out;
1687}
1688
1689int genpd_detach_cpuidle(struct generic_pm_domain *genpd)
1690{
1691 struct gpd_cpu_data *cpu_data;
1692 struct cpuidle_state *idle_state;
1693 int ret = 0;
1694
1695 if (IS_ERR_OR_NULL(genpd))
1696 return -EINVAL;
1697
1698 genpd_acquire_lock(genpd);
1699
1700 cpu_data = genpd->cpu_data;
1701 if (!cpu_data) {
1702 ret = -ENODEV;
1703 goto out;
1704 }
1705 idle_state = cpu_data->idle_state;
1706 if (!idle_state->disabled) {
1707 ret = -EAGAIN;
1708 goto out;
1709 }
1710 idle_state->exit_latency = cpu_data->saved_exit_latency;
1711 cpuidle_driver_unref();
1712 genpd->cpu_data = NULL;
1713 kfree(cpu_data);
1714
1715 out:
1716 genpd_release_lock(genpd);
1717 return ret;
1718}
1719
1603/* Default device callbacks for generic PM domains. */ 1720/* Default device callbacks for generic PM domains. */
1604 1721
1605/** 1722/**