aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-08-15 15:47:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-08-15 15:47:16 -0400
commit40a342664078ebcb4006a89af96f7982fac36032 (patch)
treee8bcaac5a116ab9a95ffa5d04046b0aa2159d59e
parent4ad193b43f6da6f15e19eda338c71e5b41383912 (diff)
parent22d9aac23562ea0e921db152f11cf39e3c2b9945 (diff)
Merge branch 'release-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-acpi-2.6
* 'release-2.6.27' of git://git.kernel.org/pub/scm/linux/kernel/git/ak/linux-acpi-2.6: cpuidle: Make ladder governor honor latency requirements fully cpuidle: Menu governor fix wrong usage of measured_us cpuidle: Do not use poll_idle unless user asks for it x86: Fix ioremap off by one BUG
-rw-r--r--arch/x86/mm/ioremap.c2
-rw-r--r--drivers/cpuidle/governors/ladder.c26
-rw-r--r--drivers/cpuidle/governors/menu.c42
3 files changed, 51 insertions, 19 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 016f335bbeea..6ba6f889c79d 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -170,7 +170,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
170 phys_addr &= PAGE_MASK; 170 phys_addr &= PAGE_MASK;
171 size = PAGE_ALIGN(last_addr+1) - phys_addr; 171 size = PAGE_ALIGN(last_addr+1) - phys_addr;
172 172
173 retval = reserve_memtype(phys_addr, phys_addr + size, 173 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
174 prot_val, &new_prot_val); 174 prot_val, &new_prot_val);
175 if (retval) { 175 if (retval) {
176 pr_debug("Warning: reserve_memtype returned %d\n", retval); 176 pr_debug("Warning: reserve_memtype returned %d\n", retval);
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index ba7b9a6b17a1..a4bec3f919aa 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -67,10 +67,17 @@ static int ladder_select_state(struct cpuidle_device *dev)
67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices); 67 struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
68 struct ladder_device_state *last_state; 68 struct ladder_device_state *last_state;
69 int last_residency, last_idx = ldev->last_state_idx; 69 int last_residency, last_idx = ldev->last_state_idx;
70 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
70 71
71 if (unlikely(!ldev)) 72 if (unlikely(!ldev))
72 return 0; 73 return 0;
73 74
75 /* Special case when user has set very strict latency requirement */
76 if (unlikely(latency_req == 0)) {
77 ladder_do_selection(ldev, last_idx, 0);
78 return 0;
79 }
80
74 last_state = &ldev->states[last_idx]; 81 last_state = &ldev->states[last_idx];
75 82
76 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID) 83 if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
@@ -81,8 +88,7 @@ static int ladder_select_state(struct cpuidle_device *dev)
81 /* consider promotion */ 88 /* consider promotion */
82 if (last_idx < dev->state_count - 1 && 89 if (last_idx < dev->state_count - 1 &&
83 last_residency > last_state->threshold.promotion_time && 90 last_residency > last_state->threshold.promotion_time &&
84 dev->states[last_idx + 1].exit_latency <= 91 dev->states[last_idx + 1].exit_latency <= latency_req) {
85 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
86 last_state->stats.promotion_count++; 92 last_state->stats.promotion_count++;
87 last_state->stats.demotion_count = 0; 93 last_state->stats.demotion_count = 0;
88 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) { 94 if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
@@ -92,7 +98,19 @@ static int ladder_select_state(struct cpuidle_device *dev)
92 } 98 }
93 99
94 /* consider demotion */ 100 /* consider demotion */
95 if (last_idx > 0 && 101 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
102 dev->states[last_idx].exit_latency > latency_req) {
103 int i;
104
105 for (i = last_idx - 1; i > CPUIDLE_DRIVER_STATE_START; i--) {
106 if (dev->states[i].exit_latency <= latency_req)
107 break;
108 }
109 ladder_do_selection(ldev, last_idx, i);
110 return i;
111 }
112
113 if (last_idx > CPUIDLE_DRIVER_STATE_START &&
96 last_residency < last_state->threshold.demotion_time) { 114 last_residency < last_state->threshold.demotion_time) {
97 last_state->stats.demotion_count++; 115 last_state->stats.demotion_count++;
98 last_state->stats.promotion_count = 0; 116 last_state->stats.promotion_count = 0;
@@ -117,7 +135,7 @@ static int ladder_enable_device(struct cpuidle_device *dev)
117 struct ladder_device_state *lstate; 135 struct ladder_device_state *lstate;
118 struct cpuidle_state *state; 136 struct cpuidle_state *state;
119 137
120 ldev->last_state_idx = 0; 138 ldev->last_state_idx = CPUIDLE_DRIVER_STATE_START;
121 139
122 for (i = 0; i < dev->state_count; i++) { 140 for (i = 0; i < dev->state_count; i++) {
123 state = &dev->states[i]; 141 state = &dev->states[i];
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 78d77c5dc35c..8d7cf3f31450 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -34,21 +34,28 @@ static DEFINE_PER_CPU(struct menu_device, menu_devices);
34static int menu_select(struct cpuidle_device *dev) 34static int menu_select(struct cpuidle_device *dev)
35{ 35{
36 struct menu_device *data = &__get_cpu_var(menu_devices); 36 struct menu_device *data = &__get_cpu_var(menu_devices);
37 int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY);
37 int i; 38 int i;
38 39
40 /* Special case when user has set very strict latency requirement */
41 if (unlikely(latency_req == 0)) {
42 data->last_state_idx = 0;
43 return 0;
44 }
45
39 /* determine the expected residency time */ 46 /* determine the expected residency time */
40 data->expected_us = 47 data->expected_us =
41 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000; 48 (u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
42 49
43 /* find the deepest idle state that satisfies our constraints */ 50 /* find the deepest idle state that satisfies our constraints */
44 for (i = 1; i < dev->state_count; i++) { 51 for (i = CPUIDLE_DRIVER_STATE_START + 1; i < dev->state_count; i++) {
45 struct cpuidle_state *s = &dev->states[i]; 52 struct cpuidle_state *s = &dev->states[i];
46 53
47 if (s->target_residency > data->expected_us) 54 if (s->target_residency > data->expected_us)
48 break; 55 break;
49 if (s->target_residency > data->predicted_us) 56 if (s->target_residency > data->predicted_us)
50 break; 57 break;
51 if (s->exit_latency > pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) 58 if (s->exit_latency > latency_req)
52 break; 59 break;
53 } 60 }
54 61
@@ -67,9 +74,9 @@ static void menu_reflect(struct cpuidle_device *dev)
67{ 74{
68 struct menu_device *data = &__get_cpu_var(menu_devices); 75 struct menu_device *data = &__get_cpu_var(menu_devices);
69 int last_idx = data->last_state_idx; 76 int last_idx = data->last_state_idx;
70 unsigned int measured_us = 77 unsigned int last_idle_us = cpuidle_get_last_residency(dev);
71 cpuidle_get_last_residency(dev) + data->elapsed_us;
72 struct cpuidle_state *target = &dev->states[last_idx]; 78 struct cpuidle_state *target = &dev->states[last_idx];
79 unsigned int measured_us;
73 80
74 /* 81 /*
75 * Ugh, this idle state doesn't support residency measurements, so we 82 * Ugh, this idle state doesn't support residency measurements, so we
@@ -77,20 +84,27 @@ static void menu_reflect(struct cpuidle_device *dev)
77 * for one full standard timer tick. However, be aware that this 84 * for one full standard timer tick. However, be aware that this
78 * could potentially result in a suboptimal state transition. 85 * could potentially result in a suboptimal state transition.
79 */ 86 */
80 if (!(target->flags & CPUIDLE_FLAG_TIME_VALID)) 87 if (unlikely(!(target->flags & CPUIDLE_FLAG_TIME_VALID)))
81 measured_us = USEC_PER_SEC / HZ; 88 last_idle_us = USEC_PER_SEC / HZ;
89
90 /*
91 * measured_us and elapsed_us are the cumulative idle time, since the
92 * last time we were woken out of idle by an interrupt.
93 */
94 if (data->elapsed_us <= data->elapsed_us + last_idle_us)
95 measured_us = data->elapsed_us + last_idle_us;
96 else
97 measured_us = -1;
98
99 /* Predict time until next break event */
100 data->predicted_us = max(measured_us, data->last_measured_us);
82 101
83 /* Predict time remaining until next break event */ 102 if (last_idle_us + BREAK_FUZZ <
84 if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) { 103 data->expected_us - target->exit_latency) {
85 data->predicted_us = max(measured_us, data->last_measured_us);
86 data->last_measured_us = measured_us; 104 data->last_measured_us = measured_us;
87 data->elapsed_us = 0; 105 data->elapsed_us = 0;
88 } else { 106 } else {
89 if (data->elapsed_us < data->elapsed_us + measured_us) 107 data->elapsed_us = measured_us;
90 data->elapsed_us = measured_us;
91 else
92 data->elapsed_us = -1;
93 data->predicted_us = max(measured_us, data->last_measured_us);
94 } 108 }
95} 109}
96 110