aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-05-29 14:18:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-29 14:18:09 -0400
commitf310642123e0d32d919c60ca3fab5acd130c4ba3 (patch)
treef3844152e2e8c0fdd01621a400f84c8a159252a0 /kernel
parentef1d57599dc904fdb31b8e9b5336350d21a1fde1 (diff)
parent5d4c47e0195b989f284907358bd5c268a44b91c7 (diff)
Merge branch 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6
* 'idle-release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-idle-2.6: x86 idle: deprecate mwait_idle() and "idle=mwait" cmdline param x86 idle: deprecate "no-hlt" cmdline param x86 idle APM: deprecate CONFIG_APM_CPU_IDLE x86 idle floppy: deprecate disable_hlt() x86 idle: EXPORT_SYMBOL(default_idle, pm_idle) only when APM demands it x86 idle: clarify AMD erratum 400 workaround idle governor: Avoid lock acquisition to read pm_qos before entering idle cpuidle: menu: fixed wrapping timers at 4.294 seconds
Diffstat (limited to 'kernel')
-rw-r--r--kernel/pm_qos_params.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index fd8d1e035df9..6824ca7d4d0c 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -54,11 +54,17 @@ enum pm_qos_type {
54 PM_QOS_MIN /* return the smallest value */ 54 PM_QOS_MIN /* return the smallest value */
55}; 55};
56 56
57/*
58 * Note: The lockless read path depends on the CPU accessing
59 * target_value atomically. Atomic access is only guaranteed on all CPU
60 * types linux supports for 32 bit quantites
61 */
57struct pm_qos_object { 62struct pm_qos_object {
58 struct plist_head requests; 63 struct plist_head requests;
59 struct blocking_notifier_head *notifiers; 64 struct blocking_notifier_head *notifiers;
60 struct miscdevice pm_qos_power_miscdev; 65 struct miscdevice pm_qos_power_miscdev;
61 char *name; 66 char *name;
67 s32 target_value; /* Do not change to 64 bit */
62 s32 default_value; 68 s32 default_value;
63 enum pm_qos_type type; 69 enum pm_qos_type type;
64}; 70};
@@ -71,7 +77,8 @@ static struct pm_qos_object cpu_dma_pm_qos = {
71 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock), 77 .requests = PLIST_HEAD_INIT(cpu_dma_pm_qos.requests, pm_qos_lock),
72 .notifiers = &cpu_dma_lat_notifier, 78 .notifiers = &cpu_dma_lat_notifier,
73 .name = "cpu_dma_latency", 79 .name = "cpu_dma_latency",
74 .default_value = 2000 * USEC_PER_SEC, 80 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
81 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
75 .type = PM_QOS_MIN, 82 .type = PM_QOS_MIN,
76}; 83};
77 84
@@ -80,7 +87,8 @@ static struct pm_qos_object network_lat_pm_qos = {
80 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock), 87 .requests = PLIST_HEAD_INIT(network_lat_pm_qos.requests, pm_qos_lock),
81 .notifiers = &network_lat_notifier, 88 .notifiers = &network_lat_notifier,
82 .name = "network_latency", 89 .name = "network_latency",
83 .default_value = 2000 * USEC_PER_SEC, 90 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
91 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .type = PM_QOS_MIN 92 .type = PM_QOS_MIN
85}; 93};
86 94
@@ -90,7 +98,8 @@ static struct pm_qos_object network_throughput_pm_qos = {
90 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock), 98 .requests = PLIST_HEAD_INIT(network_throughput_pm_qos.requests, pm_qos_lock),
91 .notifiers = &network_throughput_notifier, 99 .notifiers = &network_throughput_notifier,
92 .name = "network_throughput", 100 .name = "network_throughput",
93 .default_value = 0, 101 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
102 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
94 .type = PM_QOS_MAX, 103 .type = PM_QOS_MAX,
95}; 104};
96 105
@@ -136,6 +145,16 @@ static inline int pm_qos_get_value(struct pm_qos_object *o)
136 } 145 }
137} 146}
138 147
148static inline s32 pm_qos_read_value(struct pm_qos_object *o)
149{
150 return o->target_value;
151}
152
153static inline void pm_qos_set_value(struct pm_qos_object *o, s32 value)
154{
155 o->target_value = value;
156}
157
139static void update_target(struct pm_qos_object *o, struct plist_node *node, 158static void update_target(struct pm_qos_object *o, struct plist_node *node,
140 int del, int value) 159 int del, int value)
141{ 160{
@@ -160,6 +179,7 @@ static void update_target(struct pm_qos_object *o, struct plist_node *node,
160 plist_add(node, &o->requests); 179 plist_add(node, &o->requests);
161 } 180 }
162 curr_value = pm_qos_get_value(o); 181 curr_value = pm_qos_get_value(o);
182 pm_qos_set_value(o, curr_value);
163 spin_unlock_irqrestore(&pm_qos_lock, flags); 183 spin_unlock_irqrestore(&pm_qos_lock, flags);
164 184
165 if (prev_value != curr_value) 185 if (prev_value != curr_value)
@@ -194,18 +214,11 @@ static int find_pm_qos_object_by_minor(int minor)
194 * pm_qos_request - returns current system wide qos expectation 214 * pm_qos_request - returns current system wide qos expectation
195 * @pm_qos_class: identification of which qos value is requested 215 * @pm_qos_class: identification of which qos value is requested
196 * 216 *
197 * This function returns the current target value in an atomic manner. 217 * This function returns the current target value.
198 */ 218 */
199int pm_qos_request(int pm_qos_class) 219int pm_qos_request(int pm_qos_class)
200{ 220{
201 unsigned long flags; 221 return pm_qos_read_value(pm_qos_array[pm_qos_class]);
202 int value;
203
204 spin_lock_irqsave(&pm_qos_lock, flags);
205 value = pm_qos_get_value(pm_qos_array[pm_qos_class]);
206 spin_unlock_irqrestore(&pm_qos_lock, flags);
207
208 return value;
209} 222}
210EXPORT_SYMBOL_GPL(pm_qos_request); 223EXPORT_SYMBOL_GPL(pm_qos_request);
211 224