diff options
Diffstat (limited to 'drivers/cpuidle/governors')
-rw-r--r-- | drivers/cpuidle/governors/Makefile | 6 | ||||
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 166 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 137 |
3 files changed, 309 insertions, 0 deletions
diff --git a/drivers/cpuidle/governors/Makefile b/drivers/cpuidle/governors/Makefile new file mode 100644 index 000000000000..1b512722689f --- /dev/null +++ b/drivers/cpuidle/governors/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | # | ||
2 | # Makefile for cpuidle governors. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o | ||
6 | obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o | ||
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c new file mode 100644 index 000000000000..eb666ecae7c9 --- /dev/null +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -0,0 +1,166 @@ | |||
1 | /* | ||
2 | * ladder.c - the residency ladder algorithm | ||
3 | * | ||
4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> | ||
5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> | ||
6 | * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> | ||
7 | * | ||
8 | * (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
9 | * Shaohua Li <shaohua.li@intel.com> | ||
10 | * Adam Belay <abelay@novell.com> | ||
11 | * | ||
12 | * This code is licenced under the GPL. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/cpuidle.h> | ||
17 | #include <linux/latency.h> | ||
18 | #include <linux/moduleparam.h> | ||
19 | #include <linux/jiffies.h> | ||
20 | |||
21 | #include <asm/io.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define PROMOTION_COUNT 4 | ||
25 | #define DEMOTION_COUNT 1 | ||
26 | |||
27 | struct ladder_device_state { | ||
28 | struct { | ||
29 | u32 promotion_count; | ||
30 | u32 demotion_count; | ||
31 | u32 promotion_time; | ||
32 | u32 demotion_time; | ||
33 | } threshold; | ||
34 | struct { | ||
35 | int promotion_count; | ||
36 | int demotion_count; | ||
37 | } stats; | ||
38 | }; | ||
39 | |||
40 | struct ladder_device { | ||
41 | struct ladder_device_state states[CPUIDLE_STATE_MAX]; | ||
42 | int last_state_idx; | ||
43 | }; | ||
44 | |||
45 | static DEFINE_PER_CPU(struct ladder_device, ladder_devices); | ||
46 | |||
47 | /** | ||
48 | * ladder_do_selection - prepares private data for a state change | ||
49 | * @ldev: the ladder device | ||
50 | * @old_idx: the current state index | ||
51 | * @new_idx: the new target state index | ||
52 | */ | ||
53 | static inline void ladder_do_selection(struct ladder_device *ldev, | ||
54 | int old_idx, int new_idx) | ||
55 | { | ||
56 | ldev->states[old_idx].stats.promotion_count = 0; | ||
57 | ldev->states[old_idx].stats.demotion_count = 0; | ||
58 | ldev->last_state_idx = new_idx; | ||
59 | } | ||
60 | |||
61 | /** | ||
62 | * ladder_select_state - selects the next state to enter | ||
63 | * @dev: the CPU | ||
64 | */ | ||
65 | static int ladder_select_state(struct cpuidle_device *dev) | ||
66 | { | ||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | ||
68 | struct ladder_device_state *last_state; | ||
69 | int last_residency, last_idx = ldev->last_state_idx; | ||
70 | |||
71 | if (unlikely(!ldev)) | ||
72 | return 0; | ||
73 | |||
74 | last_state = &ldev->states[last_idx]; | ||
75 | |||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | ||
77 | last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; | ||
78 | else | ||
79 | last_residency = last_state->threshold.promotion_time + 1; | ||
80 | |||
81 | /* consider promotion */ | ||
82 | if (last_idx < dev->state_count - 1 && | ||
83 | last_residency > last_state->threshold.promotion_time && | ||
84 | dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) { | ||
85 | last_state->stats.promotion_count++; | ||
86 | last_state->stats.demotion_count = 0; | ||
87 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | ||
88 | ladder_do_selection(ldev, last_idx, last_idx + 1); | ||
89 | return last_idx + 1; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* consider demotion */ | ||
94 | if (last_idx > 0 && | ||
95 | last_residency < last_state->threshold.demotion_time) { | ||
96 | last_state->stats.demotion_count++; | ||
97 | last_state->stats.promotion_count = 0; | ||
98 | if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) { | ||
99 | ladder_do_selection(ldev, last_idx, last_idx - 1); | ||
100 | return last_idx - 1; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* otherwise remain at the current state */ | ||
105 | return last_idx; | ||
106 | } | ||
107 | |||
108 | /** | ||
109 | * ladder_enable_device - setup for the governor | ||
110 | * @dev: the CPU | ||
111 | */ | ||
112 | static int ladder_enable_device(struct cpuidle_device *dev) | ||
113 | { | ||
114 | int i; | ||
115 | struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); | ||
116 | struct ladder_device_state *lstate; | ||
117 | struct cpuidle_state *state; | ||
118 | |||
119 | ldev->last_state_idx = 0; | ||
120 | |||
121 | for (i = 0; i < dev->state_count; i++) { | ||
122 | state = &dev->states[i]; | ||
123 | lstate = &ldev->states[i]; | ||
124 | |||
125 | lstate->stats.promotion_count = 0; | ||
126 | lstate->stats.demotion_count = 0; | ||
127 | |||
128 | lstate->threshold.promotion_count = PROMOTION_COUNT; | ||
129 | lstate->threshold.demotion_count = DEMOTION_COUNT; | ||
130 | |||
131 | if (i < dev->state_count - 1) | ||
132 | lstate->threshold.promotion_time = state->exit_latency; | ||
133 | if (i > 0) | ||
134 | lstate->threshold.demotion_time = state->exit_latency; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static struct cpuidle_governor ladder_governor = { | ||
141 | .name = "ladder", | ||
142 | .rating = 10, | ||
143 | .enable = ladder_enable_device, | ||
144 | .select = ladder_select_state, | ||
145 | .owner = THIS_MODULE, | ||
146 | }; | ||
147 | |||
148 | /** | ||
149 | * init_ladder - initializes the governor | ||
150 | */ | ||
151 | static int __init init_ladder(void) | ||
152 | { | ||
153 | return cpuidle_register_governor(&ladder_governor); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * exit_ladder - exits the governor | ||
158 | */ | ||
159 | static void __exit exit_ladder(void) | ||
160 | { | ||
161 | cpuidle_unregister_governor(&ladder_governor); | ||
162 | } | ||
163 | |||
164 | MODULE_LICENSE("GPL"); | ||
165 | module_init(init_ladder); | ||
166 | module_exit(exit_ladder); | ||
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c new file mode 100644 index 000000000000..299d45c3bdd2 --- /dev/null +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -0,0 +1,137 @@ | |||
1 | /* | ||
2 | * menu.c - the menu idle governor | ||
3 | * | ||
4 | * Copyright (C) 2006-2007 Adam Belay <abelay@novell.com> | ||
5 | * | ||
6 | * This code is licenced under the GPL. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/cpuidle.h> | ||
11 | #include <linux/latency.h> | ||
12 | #include <linux/time.h> | ||
13 | #include <linux/ktime.h> | ||
14 | #include <linux/hrtimer.h> | ||
15 | #include <linux/tick.h> | ||
16 | |||
17 | #define BREAK_FUZZ 4 /* 4 us */ | ||
18 | |||
19 | struct menu_device { | ||
20 | int last_state_idx; | ||
21 | |||
22 | unsigned int expected_us; | ||
23 | unsigned int predicted_us; | ||
24 | unsigned int last_measured_us; | ||
25 | unsigned int elapsed_us; | ||
26 | }; | ||
27 | |||
28 | static DEFINE_PER_CPU(struct menu_device, menu_devices); | ||
29 | |||
30 | /** | ||
31 | * menu_select - selects the next idle state to enter | ||
32 | * @dev: the CPU | ||
33 | */ | ||
34 | static int menu_select(struct cpuidle_device *dev) | ||
35 | { | ||
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
37 | int i; | ||
38 | |||
39 | /* determine the expected residency time */ | ||
40 | data->expected_us = | ||
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | ||
42 | |||
43 | /* find the deepest idle state that satisfies our constraints */ | ||
44 | for (i = 1; i < dev->state_count; i++) { | ||
45 | struct cpuidle_state *s = &dev->states[i]; | ||
46 | |||
47 | if (s->target_residency > data->expected_us) | ||
48 | break; | ||
49 | if (s->target_residency > data->predicted_us) | ||
50 | break; | ||
51 | if (s->exit_latency > system_latency_constraint()) | ||
52 | break; | ||
53 | } | ||
54 | |||
55 | data->last_state_idx = i - 1; | ||
56 | return i - 1; | ||
57 | } | ||
58 | |||
59 | /** | ||
60 | * menu_reflect - attempts to guess what happened after entry | ||
61 | * @dev: the CPU | ||
62 | * | ||
63 | * NOTE: it's important to be fast here because this operation will add to | ||
64 | * the overall exit latency. | ||
65 | */ | ||
66 | static void menu_reflect(struct cpuidle_device *dev) | ||
67 | { | ||
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | ||
69 | int last_idx = data->last_state_idx; | ||
70 | unsigned int measured_us = | ||
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | ||
73 | |||
74 | /* | ||
75 | * Ugh, this idle state doesn't support residency measurements, so we | ||
76 | * are basically lost in the dark. As a compromise, assume we slept | ||
77 | * for one full standard timer tick. However, be aware that this | ||
78 | * could potentially result in a suboptimal state transition. | ||
79 | */ | ||
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | ||
81 | measured_us = USEC_PER_SEC / HZ; | ||
82 | |||
83 | /* Predict time remaining until next break event */ | ||
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | ||
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | ||
87 | data->elapsed_us = 0; | ||
88 | } else { | ||
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | ||
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /** | ||
98 | * menu_enable_device - scans a CPU's states and does setup | ||
99 | * @dev: the CPU | ||
100 | */ | ||
101 | static int menu_enable_device(struct cpuidle_device *dev) | ||
102 | { | ||
103 | struct menu_device *data = &per_cpu(menu_devices, dev->cpu); | ||
104 | |||
105 | memset(data, 0, sizeof(struct menu_device)); | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static struct cpuidle_governor menu_governor = { | ||
111 | .name = "menu", | ||
112 | .rating = 20, | ||
113 | .enable = menu_enable_device, | ||
114 | .select = menu_select, | ||
115 | .reflect = menu_reflect, | ||
116 | .owner = THIS_MODULE, | ||
117 | }; | ||
118 | |||
119 | /** | ||
120 | * init_menu - initializes the governor | ||
121 | */ | ||
122 | static int __init init_menu(void) | ||
123 | { | ||
124 | return cpuidle_register_governor(&menu_governor); | ||
125 | } | ||
126 | |||
127 | /** | ||
128 | * exit_menu - exits the governor | ||
129 | */ | ||
130 | static void __exit exit_menu(void) | ||
131 | { | ||
132 | cpuidle_unregister_governor(&menu_governor); | ||
133 | } | ||
134 | |||
135 | MODULE_LICENSE("GPL"); | ||
136 | module_init(init_menu); | ||
137 | module_exit(exit_menu); | ||