aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pm_qos_params.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index aeaa7f846821..6a8fad82a3ad 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -53,11 +53,17 @@ enum pm_qos_type {
53 PM_QOS_MIN /* return the smallest value */ 53 PM_QOS_MIN /* return the smallest value */
54}; 54};
55 55
56/*
57 * Note: The lockless read path depends on the CPU accessing
58 * target_value atomically. Atomic access is only guaranteed on all CPU
59 * types linux supports for 32 bit quantites
60 */
56struct pm_qos_object { 61struct pm_qos_object {
57 struct plist_head requests; 62 struct plist_head requests;
58 struct blocking_notifier_head *notifiers; 63 struct blocking_notifier_head *notifiers;
59 struct miscdevice pm_qos_power_miscdev; 64 struct miscdevice pm_qos_power_miscdev;
60 char *name; 65 char *name;
66 s32 target_value; /* Do not change to 64 bit */
61 s32 default_value; 67 s32 default_value;
62 enum pm_qos_type type; 68 enum pm_qos_type type;
63}; 69};
@@ -70,7 +76,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
70 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 76 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
71 .notifiers = &cpu_dma_lat_notifier, 77 .notifiers = &cpu_dma_lat_notifier,
72 .name = "cpu_dma_latency", 78 .name = "cpu_dma_latency",
73 .default_value = 2000 * USEC_PER_SEC, 79 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
80 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
74 .type = PM_QOS_MIN, 81 .type = PM_QOS_MIN,
75}; 82};
76 83
@@ -79,7 +86,8 @@ static struct pm_qos_object network_lat_pm_qos = {
79 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 86 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
80 .notifiers = &network_lat_notifier, 87 .notifiers = &network_lat_notifier,
81 .name = "network_latency", 88 .name = "network_latency",
82 .default_value = 2000 * USEC_PER_SEC, 89 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
90 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
83 .type = PM_QOS_MIN 91 .type = PM_QOS_MIN
84}; 92};
85 93
@@ -89,7 +97,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
89 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 97 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
90 .notifiers = &network_throughput_notifier, 98 .notifiers = &network_throughput_notifier,
91 .name = "network_throughput", 99 .name = "network_throughput",
92 .default_value = 0, 100 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
101 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
93 .type = PM_QOS_MAX, 102 .type = PM_QOS_MAX,
94}; 103};
95 104
@@ -132,6 +141,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
132 } 141 }
133} 142}
134 143
144static inline s32 pm_qos_read_value(struct pm_qos_object *o)
145{
146 return o->target_value;
147}
148
149static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
150{
151 o->target_value = value;
152}
153
135static void update_target(struct pm_qos_object *o, struct plist_node *node, 154static void update_target(struct pm_qos_object *o, struct plist_node *node,
136 int del, int value) 155 int del, int value)
137{ 156{
@@ -156,6 +175,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
156 plist_add(node, &o->requests); 175 plist_add(node, &o->requests);
157 } 176 }
158 curr_value = pm_qos_get_value(o); 177 curr_value = pm_qos_get_value(o);
178 pm_qos_set_value(o, curr_value);
159 spin_unlock_irqrestore(&pm_qos_lock, flags); 179 spin_unlock_irqrestore(&pm_qos_lock, flags);
160 180
161 if (prev_value != curr_value) 181 if (prev_value != curr_value)
@@ -190,18 +210,11 @@ static int find_pm_qos_object_by_minor(int minor)
190 * pm_qos_request - returns current system wide qos expectation 210 * pm_qos_request - returns current system wide qos expectation
191 * @pm_qos_class: identification of which qos value is requested 211 * @pm_qos_class: identification of which qos value is requested
192 * 212 *
193 * This function returns the current target value in an atomic manner. 213 * This function returns the current target value.
194 */ 214 */
195int pm_qos_request(int pm_qos_class) 215int pm_qos_request(int pm_qos_class)
196{ 216{
197 unsigned long flags; 217 return pm_qos_read_value(pm_qos_array[pm_qos_class]);
198 int value;
199
200 spin_lock_irqsave(&pm_qos_lock, flags);
201 value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
202 spin_unlock_irqrestore(&pm_qos_lock, flags);
203
204 return value;
205} 218}
206EXPORT_SYMBOL_GPL(pm_qos_request); 219EXPORT_SYMBOL_GPL(pm_qos_request);
207 220