aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base/power/qos.c
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-10 18:35:38 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-02-10 18:35:38 -0500
commit2d984ad132a87ca2112f81f21039493176a8bca0 (patch)
tree5bcec9039870a698baf6febef19742c1c3622d50 /drivers/base/power/qos.c
parent327adaedf2218b0e318eb393aa79cf2be64c199f (diff)
PM / QoS: Introcuce latency tolerance device PM QoS type
Add a new latency tolerance device PM QoS type to be use for specifying active state (RPM_ACTIVE) memory access (DMA) latency tolerance requirements for devices. It may be used to prevent hardware from choosing overly aggressive energy-saving operation modes (causing too much latency to appear) for the whole platform. This feature reqiures hardware support, so it only will be available for devices having a new .set_latency_tolerance() callback in struct dev_pm_info populated, in which case the routine pointed to by it should implement whatever is necessary to transfer the effective requirement value to the hardware. Whenever the effective latency tolerance changes for the device, its .set_latency_tolerance() callback will be executed and the effective value will be passed to it. If that value is negative, which means that the list of latency tolerance requirements for the device is empty, the callback is expected to switch the underlying hardware latency tolerance control mechanism to an autonomous mode if available. If that value is PM_QOS_LATENCY_ANY, in turn, and the hardware supports a special "no requirement" setting, the callback is expected to use it. That allows software to prevent the hardware from automatically updating the device's latency tolerance in response to its power state changes (e.g. during transitions from D3cold to D0), which generally may be done in the autonomous latency tolerance control mode. If .set_latency_tolerance() is present for the device, a new pm_qos_latency_tolerance_us attribute will be present in the devivce's power directory in sysfs. Then, user space can use that attribute to specify its latency tolerance requirement for the device, if any. Writing "any" to it means "no requirement, but do not let the hardware control latency tolerance" and writing "auto" to it allows the hardware to be switched to the autonomous mode if there are no other requirements from the kernel side in the device's list. This changeset includes a fix from Mika Westerberg. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/base/power/qos.c')
-rw-r--r--drivers/base/power/qos.c144
1 files changed, 122 insertions, 22 deletions
diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c
index c754e55f9dcb..84756f7f09d9 100644
--- a/drivers/base/power/qos.c
+++ b/drivers/base/power/qos.c
@@ -151,6 +151,14 @@ static int apply_constraint(struct dev_pm_qos_request *req,
151 req); 151 req);
152 } 152 }
153 break; 153 break;
154 case DEV_PM_QOS_LATENCY_TOLERANCE:
155 ret = pm_qos_update_target(&qos->latency_tolerance,
156 &req->data.pnode, action, value);
157 if (ret) {
158 value = pm_qos_read_value(&qos->latency_tolerance);
159 req->dev->power.set_latency_tolerance(req->dev, value);
160 }
161 break;
154 case DEV_PM_QOS_FLAGS: 162 case DEV_PM_QOS_FLAGS:
155 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, 163 ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
156 action, value); 164 action, value);
@@ -194,6 +202,13 @@ static int dev_pm_qos_constraints_allocate(struct device *dev)
194 c->type = PM_QOS_MIN; 202 c->type = PM_QOS_MIN;
195 c->notifiers = n; 203 c->notifiers = n;
196 204
205 c = &qos->latency_tolerance;
206 plist_head_init(&c->list);
207 c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
208 c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
209 c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
210 c->type = PM_QOS_MIN;
211
197 INIT_LIST_HEAD(&qos->flags.list); 212 INIT_LIST_HEAD(&qos->flags.list);
198 213
199 spin_lock_irq(&dev->power.lock); 214 spin_lock_irq(&dev->power.lock);
@@ -247,6 +262,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
247 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 262 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
248 memset(req, 0, sizeof(*req)); 263 memset(req, 0, sizeof(*req));
249 } 264 }
265 c = &qos->latency_tolerance;
266 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
267 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
268 memset(req, 0, sizeof(*req));
269 }
250 f = &qos->flags; 270 f = &qos->flags;
251 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { 271 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
252 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); 272 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
@@ -266,6 +286,40 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
266 mutex_unlock(&dev_pm_qos_sysfs_mtx); 286 mutex_unlock(&dev_pm_qos_sysfs_mtx);
267} 287}
268 288
289static bool dev_pm_qos_invalid_request(struct device *dev,
290 struct dev_pm_qos_request *req)
291{
292 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE
293 && !dev->power.set_latency_tolerance);
294}
295
296static int __dev_pm_qos_add_request(struct device *dev,
297 struct dev_pm_qos_request *req,
298 enum dev_pm_qos_req_type type, s32 value)
299{
300 int ret = 0;
301
302 if (!dev || dev_pm_qos_invalid_request(dev, req))
303 return -EINVAL;
304
305 if (WARN(dev_pm_qos_request_active(req),
306 "%s() called for already added request\n", __func__))
307 return -EINVAL;
308
309 if (IS_ERR(dev->power.qos))
310 ret = -ENODEV;
311 else if (!dev->power.qos)
312 ret = dev_pm_qos_constraints_allocate(dev);
313
314 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
315 if (!ret) {
316 req->dev = dev;
317 req->type = type;
318 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
319 }
320 return ret;
321}
322
269/** 323/**
270 * dev_pm_qos_add_request - inserts new qos request into the list 324 * dev_pm_qos_add_request - inserts new qos request into the list
271 * @dev: target device for the constraint 325 * @dev: target device for the constraint
@@ -291,31 +345,11 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
291int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, 345int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
292 enum dev_pm_qos_req_type type, s32 value) 346 enum dev_pm_qos_req_type type, s32 value)
293{ 347{
294 int ret = 0; 348 int ret;
295
296 if (!dev || !req) /*guard against callers passing in null */
297 return -EINVAL;
298
299 if (WARN(dev_pm_qos_request_active(req),
300 "%s() called for already added request\n", __func__))
301 return -EINVAL;
302 349
303 mutex_lock(&dev_pm_qos_mtx); 350 mutex_lock(&dev_pm_qos_mtx);
304 351 ret = __dev_pm_qos_add_request(dev, req, type, value);
305 if (IS_ERR(dev->power.qos))
306 ret = -ENODEV;
307 else if (!dev->power.qos)
308 ret = dev_pm_qos_constraints_allocate(dev);
309
310 trace_dev_pm_qos_add_request(dev_name(dev), type, value);
311 if (!ret) {
312 req->dev = dev;
313 req->type = type;
314 ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
315 }
316
317 mutex_unlock(&dev_pm_qos_mtx); 352 mutex_unlock(&dev_pm_qos_mtx);
318
319 return ret; 353 return ret;
320} 354}
321EXPORT_SYMBOL_GPL(dev_pm_qos_add_request); 355EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
@@ -343,6 +377,7 @@ static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
343 377
344 switch(req->type) { 378 switch(req->type) {
345 case DEV_PM_QOS_RESUME_LATENCY: 379 case DEV_PM_QOS_RESUME_LATENCY:
380 case DEV_PM_QOS_LATENCY_TOLERANCE:
346 curr_value = req->data.pnode.prio; 381 curr_value = req->data.pnode.prio;
347 break; 382 break;
348 case DEV_PM_QOS_FLAGS: 383 case DEV_PM_QOS_FLAGS:
@@ -563,6 +598,10 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
563 req = dev->power.qos->resume_latency_req; 598 req = dev->power.qos->resume_latency_req;
564 dev->power.qos->resume_latency_req = NULL; 599 dev->power.qos->resume_latency_req = NULL;
565 break; 600 break;
601 case DEV_PM_QOS_LATENCY_TOLERANCE:
602 req = dev->power.qos->latency_tolerance_req;
603 dev->power.qos->latency_tolerance_req = NULL;
604 break;
566 case DEV_PM_QOS_FLAGS: 605 case DEV_PM_QOS_FLAGS:
567 req = dev->power.qos->flags_req; 606 req = dev->power.qos->flags_req;
568 dev->power.qos->flags_req = NULL; 607 dev->power.qos->flags_req = NULL;
@@ -768,6 +807,67 @@ int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
768 pm_runtime_put(dev); 807 pm_runtime_put(dev);
769 return ret; 808 return ret;
770} 809}
810
811/**
812 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
813 * @dev: Device to obtain the user space latency tolerance for.
814 */
815s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
816{
817 s32 ret;
818
819 mutex_lock(&dev_pm_qos_mtx);
820 ret = IS_ERR_OR_NULL(dev->power.qos)
821 || !dev->power.qos->latency_tolerance_req ?
822 PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
823 dev->power.qos->latency_tolerance_req->data.pnode.prio;
824 mutex_unlock(&dev_pm_qos_mtx);
825 return ret;
826}
827
828/**
829 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
830 * @dev: Device to update the user space latency tolerance for.
831 * @val: New user space latency tolerance for @dev (negative values disable).
832 */
833int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
834{
835 int ret;
836
837 mutex_lock(&dev_pm_qos_mtx);
838
839 if (IS_ERR_OR_NULL(dev->power.qos)
840 || !dev->power.qos->latency_tolerance_req) {
841 struct dev_pm_qos_request *req;
842
843 if (val < 0) {
844 ret = -EINVAL;
845 goto out;
846 }
847 req = kzalloc(sizeof(*req), GFP_KERNEL);
848 if (!req) {
849 ret = -ENOMEM;
850 goto out;
851 }
852 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
853 if (ret < 0) {
854 kfree(req);
855 goto out;
856 }
857 dev->power.qos->latency_tolerance_req = req;
858 } else {
859 if (val < 0) {
860 __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
861 ret = 0;
862 } else {
863 ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
864 }
865 }
866
867 out:
868 mutex_unlock(&dev_pm_qos_mtx);
869 return ret;
870}
771#else /* !CONFIG_PM_RUNTIME */ 871#else /* !CONFIG_PM_RUNTIME */
772static void __dev_pm_qos_hide_latency_limit(struct device *dev) {} 872static void __dev_pm_qos_hide_latency_limit(struct device *dev) {}
773static void __dev_pm_qos_hide_flags(struct device *dev) {} 873static void __dev_pm_qos_hide_flags(struct device *dev) {}