aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/acpi/processor_idle.c38
-rw-r--r--drivers/net/wireless/ipw2100.c10
-rw-r--r--include/linux/latency.h25
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/latency.c279
5 files changed, 349 insertions, 5 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 71066066d626..0a395fca843b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -38,6 +38,7 @@
38#include <linux/dmi.h> 38#include <linux/dmi.h>
39#include <linux/moduleparam.h> 39#include <linux/moduleparam.h>
40#include <linux/sched.h> /* need_resched() */ 40#include <linux/sched.h> /* need_resched() */
41#include <linux/latency.h>
41 42
42#include <asm/io.h> 43#include <asm/io.h>
43#include <asm/uaccess.h> 44#include <asm/uaccess.h>
@@ -453,7 +454,8 @@ static void acpi_processor_idle(void)
453 */ 454 */
454 if (cx->promotion.state && 455 if (cx->promotion.state &&
455 ((cx->promotion.state - pr->power.states) <= max_cstate)) { 456 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
456 if (sleep_ticks > cx->promotion.threshold.ticks) { 457 if (sleep_ticks > cx->promotion.threshold.ticks &&
458 cx->promotion.state->latency <= system_latency_constraint()) {
457 cx->promotion.count++; 459 cx->promotion.count++;
458 cx->demotion.count = 0; 460 cx->demotion.count = 0;
459 if (cx->promotion.count >= 461 if (cx->promotion.count >=
@@ -494,8 +496,10 @@ static void acpi_processor_idle(void)
494 end: 496 end:
495 /* 497 /*
496 * Demote if current state exceeds max_cstate 498 * Demote if current state exceeds max_cstate
499 * or if the latency of the current state is unacceptable
497 */ 500 */
498 if ((pr->power.state - pr->power.states) > max_cstate) { 501 if ((pr->power.state - pr->power.states) > max_cstate ||
502 pr->power.state->latency > system_latency_constraint()) {
499 if (cx->demotion.state) 503 if (cx->demotion.state)
500 next_state = cx->demotion.state; 504 next_state = cx->demotion.state;
501 } 505 }
@@ -1009,9 +1013,11 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1009 1013
1010 seq_printf(seq, "active state: C%zd\n" 1014 seq_printf(seq, "active state: C%zd\n"
1011 "max_cstate: C%d\n" 1015 "max_cstate: C%d\n"
1012 "bus master activity: %08x\n", 1016 "bus master activity: %08x\n"
1017 "maximum allowed latency: %d usec\n",
1013 pr->power.state ? pr->power.state - pr->power.states : 0, 1018 pr->power.state ? pr->power.state - pr->power.states : 0,
1014 max_cstate, (unsigned)pr->power.bm_activity); 1019 max_cstate, (unsigned)pr->power.bm_activity,
1020 system_latency_constraint());
1015 1021
1016 seq_puts(seq, "states:\n"); 1022 seq_puts(seq, "states:\n");
1017 1023
@@ -1077,6 +1083,28 @@ static const struct file_operations acpi_processor_power_fops = {
1077 .release = single_release, 1083 .release = single_release,
1078}; 1084};
1079 1085
1086static void smp_callback(void *v)
1087{
1088 /* we already woke the CPU up, nothing more to do */
1089}
1090
1091/*
1092 * This function gets called when a part of the kernel has a new latency
1093 * requirement. This means we need to get all processors out of their C-state,
1094 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1095 * wakes them all right up.
1096 */
1097static int acpi_processor_latency_notify(struct notifier_block *b,
1098 unsigned long l, void *v)
1099{
1100 smp_call_function(smp_callback, NULL, 0, 1);
1101 return NOTIFY_OK;
1102}
1103
1104static struct notifier_block acpi_processor_latency_notifier = {
1105 .notifier_call = acpi_processor_latency_notify,
1106};
1107
1080int acpi_processor_power_init(struct acpi_processor *pr, 1108int acpi_processor_power_init(struct acpi_processor *pr,
1081 struct acpi_device *device) 1109 struct acpi_device *device)
1082{ 1110{
@@ -1093,6 +1121,7 @@ int acpi_processor_power_init(struct acpi_processor *pr,
1093 "ACPI: processor limited to max C-state %d\n", 1121 "ACPI: processor limited to max C-state %d\n",
1094 max_cstate); 1122 max_cstate);
1095 first_run++; 1123 first_run++;
1124 register_latency_notifier(&acpi_processor_latency_notifier);
1096 } 1125 }
1097 1126
1098 if (!pr) 1127 if (!pr)
@@ -1164,6 +1193,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
1164 * copies of pm_idle before proceeding. 1193 * copies of pm_idle before proceeding.
1165 */ 1194 */
1166 cpu_idle_wait(); 1195 cpu_idle_wait();
1196 unregister_latency_notifier(&acpi_processor_latency_notifier);
1167 } 1197 }
1168 1198
1169 return 0; 1199 return 0;
diff --git a/drivers/net/wireless/ipw2100.c b/drivers/net/wireless/ipw2100.c
index 6c5add701a6f..97937809de09 100644
--- a/drivers/net/wireless/ipw2100.c
+++ b/drivers/net/wireless/ipw2100.c
@@ -163,6 +163,7 @@ that only one external action is invoked at a time.
163#include <linux/firmware.h> 163#include <linux/firmware.h>
164#include <linux/acpi.h> 164#include <linux/acpi.h>
165#include <linux/ctype.h> 165#include <linux/ctype.h>
166#include <linux/latency.h>
166 167
167#include "ipw2100.h" 168#include "ipw2100.h"
168 169
@@ -1697,6 +1698,11 @@ static int ipw2100_up(struct ipw2100_priv *priv, int deferred)
1697 return 0; 1698 return 0;
1698 } 1699 }
1699 1700
1701 /* the ipw2100 hardware really doesn't want power management delays
1702 * longer than 175usec
1703 */
1704 modify_acceptable_latency("ipw2100", 175);
1705
1700 /* If the interrupt is enabled, turn it off... */ 1706 /* If the interrupt is enabled, turn it off... */
1701 spin_lock_irqsave(&priv->low_lock, flags); 1707 spin_lock_irqsave(&priv->low_lock, flags);
1702 ipw2100_disable_interrupts(priv); 1708 ipw2100_disable_interrupts(priv);
@@ -1849,6 +1855,8 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1849 ipw2100_disable_interrupts(priv); 1855 ipw2100_disable_interrupts(priv);
1850 spin_unlock_irqrestore(&priv->low_lock, flags); 1856 spin_unlock_irqrestore(&priv->low_lock, flags);
1851 1857
1858 modify_acceptable_latency("ipw2100", INFINITE_LATENCY);
1859
1852#ifdef ACPI_CSTATE_LIMIT_DEFINED 1860#ifdef ACPI_CSTATE_LIMIT_DEFINED
1853 if (priv->config & CFG_C3_DISABLED) { 1861 if (priv->config & CFG_C3_DISABLED) {
1854 IPW_DEBUG_INFO(": Resetting C3 transitions.\n"); 1862 IPW_DEBUG_INFO(": Resetting C3 transitions.\n");
@@ -6534,6 +6542,7 @@ static int __init ipw2100_init(void)
6534 6542
6535 ret = pci_register_driver(&ipw2100_pci_driver); 6543 ret = pci_register_driver(&ipw2100_pci_driver);
6536 6544
6545 set_acceptable_latency("ipw2100", INFINITE_LATENCY);
6537#ifdef CONFIG_IPW2100_DEBUG 6546#ifdef CONFIG_IPW2100_DEBUG
6538 ipw2100_debug_level = debug; 6547 ipw2100_debug_level = debug;
6539 driver_create_file(&ipw2100_pci_driver.driver, 6548 driver_create_file(&ipw2100_pci_driver.driver,
@@ -6554,6 +6563,7 @@ static void __exit ipw2100_exit(void)
6554 &driver_attr_debug_level); 6563 &driver_attr_debug_level);
6555#endif 6564#endif
6556 pci_unregister_driver(&ipw2100_pci_driver); 6565 pci_unregister_driver(&ipw2100_pci_driver);
6566 remove_acceptable_latency("ipw2100");
6557} 6567}
6558 6568
6559module_init(ipw2100_init); 6569module_init(ipw2100_init);
diff --git a/include/linux/latency.h b/include/linux/latency.h
new file mode 100644
index 000000000000..c08b52bb55b0
--- /dev/null
+++ b/include/linux/latency.h
@@ -0,0 +1,25 @@
1/*
2 * latency.h: Explicit system-wide latency-expectation infrastructure
3 *
4 * (C) Copyright 2006 Intel Corporation
5 * Author: Arjan van de Ven <arjan@linux.intel.com>
6 *
7 */
8
9#ifndef _INCLUDE_GUARD_LATENCY_H_
10#define _INCLUDE_GUARD_LATENCY_H_
11
12#include <linux/notifier.h>
13
14void set_acceptable_latency(char *identifier, int usecs);
15void modify_acceptable_latency(char *identifier, int usecs);
16void remove_acceptable_latency(char *identifier);
17void synchronize_acceptable_latency(void);
18int system_latency_constraint(void);
19
20int register_latency_notifier(struct notifier_block * nb);
21int unregister_latency_notifier(struct notifier_block * nb);
22
23#define INFINITE_LATENCY 1000000
24
25#endif
diff --git a/kernel/Makefile b/kernel/Makefile
index d62ec66c1af2..e210e8cf7237 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,7 +8,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o 11 hrtimer.o rwsem.o latency.o
12 12
13obj-$(CONFIG_STACKTRACE) += stacktrace.o 13obj-$(CONFIG_STACKTRACE) += stacktrace.o
14obj-y += time/ 14obj-y += time/
diff --git a/kernel/latency.c b/kernel/latency.c
new file mode 100644
index 000000000000..258f2555abbc
--- /dev/null
+++ b/kernel/latency.c
@@ -0,0 +1,279 @@
1/*
2 * latency.c: Explicit system-wide latency-expectation infrastructure
3 *
4 * The purpose of this infrastructure is to allow device drivers to set
5 * latency constraint they have and to collect and summarize these
6 * expectations globally. The cummulated result can then be used by
7 * power management and similar users to make decisions that have
8 * tradoffs with a latency component.
9 *
10 * An example user of this are the x86 C-states; each higher C state saves
11 * more power, but has a higher exit latency. For the idle loop power
12 * code to make a good decision which C-state to use, information about
13 * acceptable latencies is required.
14 *
15 * An example announcer of latency is an audio driver that knowns it
16 * will get an interrupt when the hardware has 200 usec of samples
17 * left in the DMA buffer; in that case the driver can set a latency
18 * constraint of, say, 150 usec.
19 *
20 * Multiple drivers can each announce their maximum accepted latency,
21 * to keep these appart, a string based identifier is used.
22 *
23 *
24 * (C) Copyright 2006 Intel Corporation
25 * Author: Arjan van de Ven <arjan@linux.intel.com>
26 *
27 * This program is free software; you can redistribute it and/or
28 * modify it under the terms of the GNU General Public License
29 * as published by the Free Software Foundation; version 2
30 * of the License.
31 */
32
33#include <linux/latency.h>
34#include <linux/list.h>
35#include <linux/spinlock.h>
36#include <linux/slab.h>
37#include <linux/module.h>
38#include <linux/notifier.h>
39#include <asm/atomic.h>
40
41struct latency_info {
42 struct list_head list;
43 int usecs;
44 char *identifier;
45};
46
47/*
48 * locking rule: all modifications to current_max_latency and
49 * latency_list need to be done while holding the latency_lock.
50 * latency_lock needs to be taken _irqsave.
51 */
52static atomic_t current_max_latency;
53static DEFINE_SPINLOCK(latency_lock);
54
55static LIST_HEAD(latency_list);
56static BLOCKING_NOTIFIER_HEAD(latency_notifier);
57
58/*
59 * This function returns the maximum latency allowed, which
60 * happens to be the minimum of all maximum latencies on the
61 * list.
62 */
63static int __find_max_latency(void)
64{
65 int min = INFINITE_LATENCY;
66 struct latency_info *info;
67
68 list_for_each_entry(info, &latency_list, list) {
69 if (info->usecs < min)
70 min = info->usecs;
71 }
72 return min;
73}
74
75/**
76 * set_acceptable_latency - sets the maximum latency acceptable
77 * @identifier: string that identifies this driver
78 * @usecs: maximum acceptable latency for this driver
79 *
80 * This function informs the kernel that this device(driver)
81 * can accept at most usecs latency. This setting is used for
82 * power management and similar tradeoffs.
83 *
84 * This function sleeps and can only be called from process
85 * context.
86 * Calling this function with an existing identifier is valid
87 * and will cause the existing latency setting to be changed.
88 */
89void set_acceptable_latency(char *identifier, int usecs)
90{
91 struct latency_info *info, *iter;
92 unsigned long flags;
93 int found_old = 0;
94
95 info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
96 if (!info)
97 return;
98 info->usecs = usecs;
99 info->identifier = kstrdup(identifier, GFP_KERNEL);
100 if (!info->identifier)
101 goto free_info;
102
103 spin_lock_irqsave(&latency_lock, flags);
104 list_for_each_entry(iter, &latency_list, list) {
105 if (strcmp(iter->identifier, identifier)==0) {
106 found_old = 1;
107 iter->usecs = usecs;
108 break;
109 }
110 }
111 if (!found_old)
112 list_add(&info->list, &latency_list);
113
114 if (usecs < atomic_read(&current_max_latency))
115 atomic_set(&current_max_latency, usecs);
116
117 spin_unlock_irqrestore(&latency_lock, flags);
118
119 blocking_notifier_call_chain(&latency_notifier,
120 atomic_read(&current_max_latency), NULL);
121
122 /*
123 * if we inserted the new one, we're done; otherwise there was
124 * an existing one so we need to free the redundant data
125 */
126 if (!found_old)
127 return;
128
129 kfree(info->identifier);
130free_info:
131 kfree(info);
132}
133EXPORT_SYMBOL_GPL(set_acceptable_latency);
134
135/**
136 * modify_acceptable_latency - changes the maximum latency acceptable
137 * @identifier: string that identifies this driver
138 * @usecs: maximum acceptable latency for this driver
139 *
140 * This function informs the kernel that this device(driver)
141 * can accept at most usecs latency. This setting is used for
142 * power management and similar tradeoffs.
143 *
144 * This function does not sleep and can be called in any context.
145 * Trying to use a non-existing identifier silently gets ignored.
146 *
147 * Due to the atomic nature of this function, the modified latency
148 * value will only be used for future decisions; past decisions
149 * can still lead to longer latencies in the near future.
150 */
151void modify_acceptable_latency(char *identifier, int usecs)
152{
153 struct latency_info *iter;
154 unsigned long flags;
155
156 spin_lock_irqsave(&latency_lock, flags);
157 list_for_each_entry(iter, &latency_list, list) {
158 if (strcmp(iter->identifier, identifier) == 0) {
159 iter->usecs = usecs;
160 break;
161 }
162 }
163 if (usecs < atomic_read(&current_max_latency))
164 atomic_set(&current_max_latency, usecs);
165 spin_unlock_irqrestore(&latency_lock, flags);
166}
167EXPORT_SYMBOL_GPL(modify_acceptable_latency);
168
169/**
170 * remove_acceptable_latency - removes the maximum latency acceptable
171 * @identifier: string that identifies this driver
172 *
173 * This function removes a previously set maximum latency setting
174 * for the driver and frees up any resources associated with the
175 * bookkeeping needed for this.
176 *
177 * This function does not sleep and can be called in any context.
178 * Trying to use a non-existing identifier silently gets ignored.
179 */
180void remove_acceptable_latency(char *identifier)
181{
182 unsigned long flags;
183 int newmax = 0;
184 struct latency_info *iter, *temp;
185
186 spin_lock_irqsave(&latency_lock, flags);
187
188 list_for_each_entry_safe(iter, temp, &latency_list, list) {
189 if (strcmp(iter->identifier, identifier) == 0) {
190 list_del(&iter->list);
191 newmax = iter->usecs;
192 kfree(iter->identifier);
193 kfree(iter);
194 break;
195 }
196 }
197
198 /* If we just deleted the system wide value, we need to
199 * recalculate with a full search
200 */
201 if (newmax == atomic_read(&current_max_latency)) {
202 newmax = __find_max_latency();
203 atomic_set(&current_max_latency, newmax);
204 }
205 spin_unlock_irqrestore(&latency_lock, flags);
206}
207EXPORT_SYMBOL_GPL(remove_acceptable_latency);
208
209/**
210 * system_latency_constraint - queries the system wide latency maximum
211 *
212 * This function returns the system wide maximum latency in
213 * microseconds.
214 *
215 * This function does not sleep and can be called in any context.
216 */
217int system_latency_constraint(void)
218{
219 return atomic_read(&current_max_latency);
220}
221EXPORT_SYMBOL_GPL(system_latency_constraint);
222
223/**
224 * synchronize_acceptable_latency - recalculates all latency decisions
225 *
226 * This function will cause a callback to various kernel pieces that
227 * will make those pieces rethink their latency decisions. This implies
228 * that if there are overlong latencies in hardware state already, those
229 * latencies get taken right now. When this call completes no overlong
230 * latency decisions should be active anymore.
231 *
232 * Typical usecase of this is after a modify_acceptable_latency() call,
233 * which in itself is non-blocking and non-synchronizing.
234 *
235 * This function blocks and should not be called with locks held.
236 */
237
238void synchronize_acceptable_latency(void)
239{
240 blocking_notifier_call_chain(&latency_notifier,
241 atomic_read(&current_max_latency), NULL);
242}
243EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
244
245/*
246 * Latency notifier: this notifier gets called when a non-atomic new
247 * latency value gets set. The expectation nof the caller of the
248 * non-atomic set is that when the call returns, future latencies
249 * are within bounds, so the functions on the notifier list are
250 * expected to take the overlong latencies immediately, inside the
251 * callback, and not make a overlong latency decision anymore.
252 *
253 * The callback gets called when the new latency value is made
254 * active so system_latency_constraint() returns the new latency.
255 */
256int register_latency_notifier(struct notifier_block * nb)
257{
258 return blocking_notifier_chain_register(&latency_notifier, nb);
259}
260EXPORT_SYMBOL_GPL(register_latency_notifier);
261
262int unregister_latency_notifier(struct notifier_block * nb)
263{
264 return blocking_notifier_chain_unregister(&latency_notifier, nb);
265}
266EXPORT_SYMBOL_GPL(unregister_latency_notifier);
267
268static __init int latency_init(void)
269{
270 atomic_set(&current_max_latency, INFINITE_LATENCY);
271 /*
272 * we don't want by default to have longer latencies than 2 ticks,
273 * since that would cause lost ticks
274 */
275 set_acceptable_latency("kernel", 2*1000000/HZ);
276 return 0;
277}
278
279module_init(latency_init);