aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpuidle/governors
diff options
context:
space:
mode:
authorDeepthi Dharwar <deepthi@linux.vnet.ibm.com>2011-10-28 06:50:42 -0400
committerLen Brown <len.brown@intel.com>2011-11-06 21:13:58 -0500
commit46bcfad7a819bd17ac4e831b04405152d59784ab (patch)
tree20041e788154d103edff2699f88d4a30320e3ee2 /drivers/cpuidle/governors
parent4202735e8ab6ecfb0381631a0d0b58fefe0bd4e2 (diff)
cpuidle: Single/Global registration of idle states
This patch makes the cpuidle_states structure global (single copy) instead of per-cpu. The statistics needed on per-cpu basis by the governor are kept per-cpu. This simplifies the cpuidle subsystem as state registration is done by single cpu only. Having single copy of cpuidle_states saves memory. Rare case of asymmetric C-states can be handled within the cpuidle driver and architectures such as POWER do not have asymmetric C-states. Having single/global registration of all the idle states, dynamic C-state transitions on x86 are handled by the boot cpu. Here, the boot cpu would disable all the devices, re-populate the states and later enable all the devices, irrespective of the cpu that would receive the notification first. Reference: https://lkml.org/lkml/2011/4/25/83 Signed-off-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com> Signed-off-by: Trinabh Gupta <g.trinabh@gmail.com> Tested-by: Jean Pihet <j-pihet@ti.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Acked-by: Arjan van de Ven <arjan@linux.intel.com> Acked-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/cpuidle/governors')
-rw-r--r--drivers/cpuidle/governors/ladder.c28
-rw-r--r--drivers/cpuidle/governors/menu.c20
2 files changed, 29 insertions, 19 deletions
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 6a686a76711f..ef6b9e4727a7 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -60,9 +60,11 @@ static inline void ladder_do_selection(struct ladder_device *ldev,
60 60
61/** 61/**
62 * ladder_select_state - selects the next state to enter 62 * ladder_select_state - selects the next state to enter
63 * @drv: cpuidle driver
63 * @dev: the CPU 64 * @dev: the CPU
64 */ 65 */
65static int ladder_select_state(struct cpuidle_device *dev) 66static int ladder_select_state(struct cpuidle_driver *drv,
67 struct cpuidle_device *dev)
66{ 68{
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 69 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 70 struct ladder_device_state *last_state;
@@ -77,15 +79,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
77 79
78 last_state = &ldev->states[last_idx]; 80 last_state = &ldev->states[last_idx];
79 81
80 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) 82 if (drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) {
81 last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency; 83 last_residency = cpuidle_get_last_residency(dev) - \
84 drv->states[last_idx].exit_latency;
85 }
82 else 86 else
83 last_residency = last_state->threshold.promotion_time + 1; 87 last_residency = last_state->threshold.promotion_time + 1;
84 88
85 /* consider promotion */ 89 /* consider promotion */
86 if (last_idx < dev->state_count - 1 && 90 if (last_idx < drv->state_count - 1 &&
87 last_residency > last_state->threshold.promotion_time && 91 last_residency > last_state->threshold.promotion_time &&
88 dev->states[last_idx + 1].exit_latency <= latency_req) { 92 drv->states[last_idx + 1].exit_latency <= latency_req) {
89 last_state->stats.promotion_count++; 93 last_state->stats.promotion_count++;
90 last_state->stats.demotion_count = 0; 94 last_state->stats.demotion_count = 0;
91 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 95 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -96,11 +100,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
96 100
97 /* consider demotion */ 101 /* consider demotion */
98 if (last_idx > CPUIDLE_DRIVER_STATE_START && 102 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
99 dev->states[last_idx].exit_latency > latency_req) { 103 drv->states[last_idx].exit_latency > latency_req) {
100 int i; 104 int i;
101 105
102 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { 106 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
103 if (dev->states[i].exit_latency <= latency_req) 107 if (drv->states[i].exit_latency <= latency_req)
104 break; 108 break;
105 } 109 }
106 ladder_do_selection(ldev, last_idx, i); 110 ladder_do_selection(ldev, last_idx, i);
@@ -123,9 +127,11 @@ static int ladder_select_state(struct cpuidle_device *dev)
123 127
124/** 128/**
125 * ladder_enable_device - setup for the governor 129 * ladder_enable_device - setup for the governor
130 * @drv: cpuidle driver
126 * @dev: the CPU 131 * @dev: the CPU
127 */ 132 */
128static int ladder_enable_device(struct cpuidle_device *dev) 133static int ladder_enable_device(struct cpuidle_driver *drv,
134 struct cpuidle_device *dev)
129{ 135{
130 int i; 136 int i;
131 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu); 137 struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
@@ -134,8 +140,8 @@ static int ladder_enable_device(struct cpuidle_device *dev)
134 140
135 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; 141 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
136 142
137 for (i = 0; i < dev->state_count; i++) { 143 for (i = 0; i < drv->state_count; i++) {
138 state = &dev->states[i]; 144 state = &drv->states[i];
139 lstate = &ldev->states[i]; 145 lstate = &ldev->states[i];
140 146
141 lstate->stats.promotion_count = 0; 147 lstate->stats.promotion_count = 0;
@@ -144,7 +150,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
144 lstate->threshold.promotion_count = PROMOTION_COUNT; 150 lstate->threshold.promotion_count = PROMOTION_COUNT;
145 lstate->threshold.demotion_count = DEMOTION_COUNT; 151 lstate->threshold.demotion_count = DEMOTION_COUNT;
146 152
147 if (i < dev->state_count - 1) 153 if (i < drv->state_count - 1)
148 lstate->threshold.promotion_time = state->exit_latency; 154 lstate->threshold.promotion_time = state->exit_latency;
149 if (i > 0) 155 if (i > 0)
150 lstate->threshold.demotion_time = state->exit_latency; 156 lstate->threshold.demotion_time = state->exit_latency;
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index af724e823c8e..bcbe88142135 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -182,7 +182,7 @@ static inline int performance_multiplier(void)
182 182
183static DEFINE_PER_CPU(struct menu_device, menu_devices); 183static DEFINE_PER_CPU(struct menu_device, menu_devices);
184 184
185static void menu_update(struct cpuidle_device *dev); 185static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev);
186 186
187/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */ 187/* This implements DIV_ROUND_CLOSEST but avoids 64 bit division */
188static u64 div_round64(u64 dividend, u32 divisor) 188static u64 div_round64(u64 dividend, u32 divisor)
@@ -228,9 +228,10 @@ static void detect_repeating_patterns(struct menu_device *data)
228 228
229/** 229/**
230 * menu_select - selects the next idle state to enter 230 * menu_select - selects the next idle state to enter
231 * @drv: cpuidle driver containing state data
231 * @dev: the CPU 232 * @dev: the CPU
232 */ 233 */
233static int menu_select(struct cpuidle_device *dev) 234static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
234{ 235{
235 struct menu_device *data = &__get_cpu_var(menu_devices); 236 struct menu_device *data = &__get_cpu_var(menu_devices);
236 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); 237 int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
@@ -240,7 +241,7 @@ static int menu_select(struct cpuidle_device *dev)
240 struct timespec t; 241 struct timespec t;
241 242
242 if (data->needs_update) { 243 if (data->needs_update) {
243 menu_update(dev); 244 menu_update(drv, dev);
244 data->needs_update = 0; 245 data->needs_update = 0;
245 } 246 }
246 247
@@ -285,8 +286,8 @@ static int menu_select(struct cpuidle_device *dev)
285 * Find the idle state with the lowest power while satisfying 286 * Find the idle state with the lowest power while satisfying
286 * our constraints. 287 * our constraints.
287 */ 288 */
288 for (i = CPUIDLE_DRIVER_STATE_START; i < dev->state_count; i++) { 289 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
289 struct cpuidle_state *s = &dev->states[i]; 290 struct cpuidle_state *s = &drv->states[i];
290 291
291 if (s->target_residency > data->predicted_us) 292 if (s->target_residency > data->predicted_us)
292 continue; 293 continue;
@@ -323,14 +324,15 @@ static void menu_reflect(struct cpuidle_device *dev, int index)
323 324
324/** 325/**
325 * menu_update - attempts to guess what happened after entry 326 * menu_update - attempts to guess what happened after entry
327 * @drv: cpuidle driver containing state data
326 * @dev: the CPU 328 * @dev: the CPU
327 */ 329 */
328static void menu_update(struct cpuidle_device *dev) 330static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
329{ 331{
330 struct menu_device *data = &__get_cpu_var(menu_devices); 332 struct menu_device *data = &__get_cpu_var(menu_devices);
331 int last_idx = data->last_state_idx; 333 int last_idx = data->last_state_idx;
332 unsigned int last_idle_us = cpuidle_get_last_residency(dev); 334 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
333 struct cpuidle_state *target = &dev->states[last_idx]; 335 struct cpuidle_state *target = &drv->states[last_idx];
334 unsigned int measured_us; 336 unsigned int measured_us;
335 u64 new_factor; 337 u64 new_factor;
336 338
@@ -384,9 +386,11 @@ static void menu_update(struct cpuidle_device *dev)
384 386
385/** 387/**
386 * menu_enable_device - scans a CPU's states and does setup 388 * menu_enable_device - scans a CPU's states and does setup
389 * @drv: cpuidle driver
387 * @dev: the CPU 390 * @dev: the CPU
388 */ 391 */
389static int menu_enable_device(struct cpuidle_device *dev) 392static int menu_enable_device(struct cpuidle_driver *drv,
393 struct cpuidle_device *dev)
390{ 394{
391 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 395 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
392 396