diff options
author | Andi Kleen <ak@linux.intel.com> | 2008-08-15 15:26:12 -0400 |
---|---|---|
committer | Andi Kleen <ak@linux.intel.com> | 2008-08-15 15:26:12 -0400 |
commit | 22d9aac23562ea0e921db152f11cf39e3c2b9945 (patch) | |
tree | b7f83149a5ea0e17ac492fe4bb2f75871603252b | |
parent | a6006834866229aa40393ae1dac7ecf05b11e6e0 (diff) | |
parent | 06d9e908b2248f983b186aaf569c58e1430db85d (diff) |
Merge branch 'cpuidle' into release-2.6.27
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 26 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 42 |
2 files changed, 50 insertions, 18 deletions
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index ba7b9a6b17a1..a4bec3f919aa 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 68 | struct ladder_device_state *last_state; |
69 | int last_residency, last_idx = ldev->last_state_idx; | 69 | int last_residency, last_idx = ldev->last_state_idx; |
70 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | ||
70 | 71 | ||
71 | if (unlikely(!ldev)) | 72 | if (unlikely(!ldev)) |
72 | return 0; | 73 | return 0; |
73 | 74 | ||
75 | /* Special case when user has set very strict latency requirement */ | ||
76 | if (unlikely(latency_req == 0)) { | ||
77 | ladder_do_selection(ldev, last_idx, 0); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
74 | last_state = &ldev->states[last_idx]; | 81 | last_state = &ldev->states[last_idx]; |
75 | 82 | ||
76 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) | 83 | if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) |
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
81 | /* consider promotion */ | 88 | /* consider promotion */ |
82 | if (last_idx < dev->state_count - 1 && | 89 | if (last_idx < dev->state_count - 1 && |
83 | last_residency > last_state->threshold.promotion_time && | 90 | last_residency > last_state->threshold.promotion_time && |
84 | dev->states[last_idx + 1].exit_latency <= | 91 | dev->states[last_idx + 1].exit_latency <= latency_req) { |
85 | pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) { | ||
86 | last_state->stats.promotion_count++; | 92 | last_state->stats.promotion_count++; |
87 | last_state->stats.demotion_count = 0; | 93 | last_state->stats.demotion_count = 0; |
88 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { | 94 | if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { |
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
92 | } | 98 | } |
93 | 99 | ||
94 | /* consider demotion */ | 100 | /* consider demotion */ |
95 | if (last_idx > 0 && | 101 | if (last_idx > CPUIDLE_DRIVER_STATE_START && |
102 | dev->states[last_idx].exit_latency > latency_req) { | ||
103 | int i; | ||
104 | |||
105 | for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) { | ||
106 | if (dev->states[i].exit_latency <= latency_req) | ||
107 | break; | ||
108 | } | ||
109 | ladder_do_selection(ldev, last_idx, i); | ||
110 | return i; | ||
111 | } | ||
112 | |||
113 | if (last_idx > CPUIDLE_DRIVER_STATE_START && | ||
96 | last_residency < last_state->threshold.demotion_time) { | 114 | last_residency < last_state->threshold.demotion_time) { |
97 | last_state->stats.demotion_count++; | 115 | last_state->stats.demotion_count++; |
98 | last_state->stats.promotion_count = 0; | 116 | last_state->stats.promotion_count = 0; |
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev) | |||
117 | struct ladder_device_state *lstate; | 135 | struct ladder_device_state *lstate; |
118 | struct cpuidle_state *state; | 136 | struct cpuidle_state *state; |
119 | 137 | ||
120 | ldev->last_state_idx = 0; | 138 | ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START; |
121 | 139 | ||
122 | for (i = 0; i < dev->state_count; i++) { | 140 | for (i = 0; i < dev->state_count; i++) { |
123 | state = &dev->states[i]; | 141 | state = &dev->states[i]; |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 78d77c5dc35c..8d7cf3f31450 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | |||
34 | static int menu_select(struct cpuidle_device *dev) | 34 | static int menu_select(struct cpuidle_device *dev) |
35 | { | 35 | { |
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | 36 | struct menu_device *data = &__get_cpu_var(menu_devices); |
37 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | ||
37 | int i; | 38 | int i; |
38 | 39 | ||
40 | /* Special case when user has set very strict latency requirement */ | ||
41 | if (unlikely(latency_req == 0)) { | ||
42 | data->last_state_idx = 0; | ||
43 | return 0; | ||
44 | } | ||
45 | |||
39 | /* determine the expected residency time */ | 46 | /* determine the expected residency time */ |
40 | data->expected_us = | 47 | data->expected_us = |
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | 48 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; |
42 | 49 | ||
43 | /* find the deepest idle state that satisfies our constraints */ | 50 | /* find the deepest idle state that satisfies our constraints */ |
44 | for (i = 1; i < dev->state_count; i++) { | 51 | for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { |
45 | struct cpuidle_state *s = &dev->states[i]; | 52 | struct cpuidle_state *s = &dev->states[i]; |
46 | 53 | ||
47 | if (s->target_residency > data->expected_us) | 54 | if (s->target_residency > data->expected_us) |
48 | break; | 55 | break; |
49 | if (s->target_residency > data->predicted_us) | 56 | if (s->target_residency > data->predicted_us) |
50 | break; | 57 | break; |
51 | if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) | 58 | if (s->exit_latency > latency_req) |
52 | break; | 59 | break; |
53 | } | 60 | } |
54 | 61 | ||
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
67 | { | 74 | { |
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | 75 | struct menu_device *data = &__get_cpu_var(menu_devices); |
69 | int last_idx = data->last_state_idx; | 76 | int last_idx = data->last_state_idx; |
70 | unsigned int measured_us = | 77 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | 78 | struct cpuidle_state *target = &dev->states[last_idx]; |
79 | unsigned int measured_us; | ||
73 | 80 | ||
74 | /* | 81 | /* |
75 | * Ugh, this idle state doesn't support residency measurements, so we | 82 | * Ugh, this idle state doesn't support residency measurements, so we |
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
77 | * for one full standard timer tick. However, be aware that this | 84 | * for one full standard timer tick. However, be aware that this |
78 | * could potentially result in a suboptimal state transition. | 85 | * could potentially result in a suboptimal state transition. |
79 | */ | 86 | */ |
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | 87 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
81 | measured_us = USEC_PER_SEC / HZ; | 88 | last_idle_us = USEC_PER_SEC / HZ; |
89 | |||
90 | /* | ||
91 | * measured_us and elapsed_us are the cumulative idle time, since the | ||
92 | * last time we were woken out of idle by an interrupt. | ||
93 | */ | ||
94 | if (data->elapsed_us <= data->elapsed_us + last_idle_us) | ||
95 | measured_us = data->elapsed_us + last_idle_us; | ||
96 | else | ||
97 | measured_us = -1; | ||
98 | |||
99 | /* Predict time until next break event */ | ||
100 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
82 | 101 | ||
83 | /* Predict time remaining until next break event */ | 102 | if (last_idle_us + BREAK_FUZZ < |
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | 103 | data->expected_us - target->exit_latency) { |
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | 104 | data->last_measured_us = measured_us; |
87 | data->elapsed_us = 0; | 105 | data->elapsed_us = 0; |
88 | } else { | 106 | } else { |
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | 107 | data->elapsed_us = measured_us; |
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | 108 | } |
95 | } | 109 | } |
96 | 110 | ||