diff options
author | H. Peter Anvin <hpa@zytor.com> | 2008-08-27 22:17:07 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2008-08-27 22:17:07 -0400 |
commit | b30a72a7edfc64c8929104d5c2178aca489aa559 (patch) | |
tree | 2b26c155c318d08d946bd2d70cf815dcdb29a243 /drivers/cpuidle/governors/menu.c | |
parent | f1c5d30e1d79bbfb60eaf189db862d3cb2bcac92 (diff) | |
parent | c1b362e3b4d331a63915b268a33207311a439d60 (diff) |
Merge branch 'x86/urgent' into x86/cpu
Conflicts:
arch/x86/kernel/cpu/cyrix.c
Diffstat (limited to 'drivers/cpuidle/governors/menu.c')
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 42 |
1 files changed, 28 insertions, 14 deletions
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index 78d77c5dc35c..8d7cf3f31450 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices); | |||
34 | static int menu_select(struct cpuidle_device *dev) | 34 | static int menu_select(struct cpuidle_device *dev) |
35 | { | 35 | { |
36 | struct menu_device *data = &__get_cpu_var(menu_devices); | 36 | struct menu_device *data = &__get_cpu_var(menu_devices); |
37 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | ||
37 | int i; | 38 | int i; |
38 | 39 | ||
40 | /* Special case when user has set very strict latency requirement */ | ||
41 | if (unlikely(latency_req == 0)) { | ||
42 | data->last_state_idx = 0; | ||
43 | return 0; | ||
44 | } | ||
45 | |||
39 | /* determine the expected residency time */ | 46 | /* determine the expected residency time */ |
40 | data->expected_us = | 47 | data->expected_us = |
41 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; | 48 | (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; |
42 | 49 | ||
43 | /* find the deepest idle state that satisfies our constraints */ | 50 | /* find the deepest idle state that satisfies our constraints */ |
44 | for (i = 1; i < dev->state_count; i++) { | 51 | for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) { |
45 | struct cpuidle_state *s = &dev->states[i]; | 52 | struct cpuidle_state *s = &dev->states[i]; |
46 | 53 | ||
47 | if (s->target_residency > data->expected_us) | 54 | if (s->target_residency > data->expected_us) |
48 | break; | 55 | break; |
49 | if (s->target_residency > data->predicted_us) | 56 | if (s->target_residency > data->predicted_us) |
50 | break; | 57 | break; |
51 | if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) | 58 | if (s->exit_latency > latency_req) |
52 | break; | 59 | break; |
53 | } | 60 | } |
54 | 61 | ||
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
67 | { | 74 | { |
68 | struct menu_device *data = &__get_cpu_var(menu_devices); | 75 | struct menu_device *data = &__get_cpu_var(menu_devices); |
69 | int last_idx = data->last_state_idx; | 76 | int last_idx = data->last_state_idx; |
70 | unsigned int measured_us = | 77 | unsigned int last_idle_us = cpuidle_get_last_residency(dev); |
71 | cpuidle_get_last_residency(dev) + data->elapsed_us; | ||
72 | struct cpuidle_state *target = &dev->states[last_idx]; | 78 | struct cpuidle_state *target = &dev->states[last_idx]; |
79 | unsigned int measured_us; | ||
73 | 80 | ||
74 | /* | 81 | /* |
75 | * Ugh, this idle state doesn't support residency measurements, so we | 82 | * Ugh, this idle state doesn't support residency measurements, so we |
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev) | |||
77 | * for one full standard timer tick. However, be aware that this | 84 | * for one full standard timer tick. However, be aware that this |
78 | * could potentially result in a suboptimal state transition. | 85 | * could potentially result in a suboptimal state transition. |
79 | */ | 86 | */ |
80 | if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) | 87 | if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID))) |
81 | measured_us = USEC_PER_SEC / HZ; | 88 | last_idle_us = USEC_PER_SEC / HZ; |
89 | |||
90 | /* | ||
91 | * measured_us and elapsed_us are the cumulative idle time, since the | ||
92 | * last time we were woken out of idle by an interrupt. | ||
93 | */ | ||
94 | if (data->elapsed_us <= data->elapsed_us + last_idle_us) | ||
95 | measured_us = data->elapsed_us + last_idle_us; | ||
96 | else | ||
97 | measured_us = -1; | ||
98 | |||
99 | /* Predict time until next break event */ | ||
100 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
82 | 101 | ||
83 | /* Predict time remaining until next break event */ | 102 | if (last_idle_us + BREAK_FUZZ < |
84 | if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { | 103 | data->expected_us - target->exit_latency) { |
85 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
86 | data->last_measured_us = measured_us; | 104 | data->last_measured_us = measured_us; |
87 | data->elapsed_us = 0; | 105 | data->elapsed_us = 0; |
88 | } else { | 106 | } else { |
89 | if (data->elapsed_us < data->elapsed_us + measured_us) | 107 | data->elapsed_us = measured_us; |
90 | data->elapsed_us = measured_us; | ||
91 | else | ||
92 | data->elapsed_us = -1; | ||
93 | data->predicted_us = max(measured_us, data->last_measured_us); | ||
94 | } | 108 | } |
95 | } | 109 | } |
96 | 110 | ||