aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:52:36 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-23 10:52:36 -0400
commit37224470c8c6d90a4062e76a08d4dc1fcf91fc89 (patch)
tree627f537177bf8e951c12bec04c4a85f0125f5ece /arch
parente83319510b04dd51a60da8a0b4ccf8b92b3ab1ad (diff)
parentae6c859b7dcd708efadf1c76279c33db213e3506 (diff)
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (65 commits) ACPI: suppress power button event on S3 resume ACPI: resolve merge conflict between sem2mutex and processor_perflib.c ACPI: use for_each_possible_cpu() instead of for_each_cpu() ACPI: delete newly added debugging macros in processor_perflib.c ACPI: UP build fix for bugzilla-5737 Enable P-state software coordination via _PDC P-state software coordination for speedstep-centrino P-state software coordination for acpi-cpufreq P-state software coordination for ACPI core ACPI: create acpi_thermal_resume() ACPI: create acpi_fan_suspend()/acpi_fan_resume() ACPI: pass pm_message_t from acpi_device_suspend() to root_suspend() ACPI: create acpi_device_suspend()/acpi_device_resume() ACPI: replace spin_lock_irq with mutex for ec poll mode ACPI: Allow a WAN module enable/disable on a Thinkpad X60. sem2mutex: acpi, acpi_link_lock ACPI: delete unused acpi_bus_drivers_lock sem2mutex: drivers/acpi/processor_perflib.c ACPI add ia64 exports to build acpi_memhotplug as a module ACPI: asus_acpi_init(): propagate correct return value ... Manual resolve of conflicts in: arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c include/acpi/processor.h
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/acpi/boot.c8
-rw-r--r--arch/i386/kernel/acpi/processor.c2
-rw-r--r--arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c291
-rw-r--r--arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c254
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c2
-rw-r--r--arch/ia64/kernel/acpi.c4
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/x86_64/Kconfig1
-rw-r--r--arch/x86_64/kernel/acpi/Makefile1
-rw-r--r--arch/x86_64/kernel/acpi/processor.c72
11 files changed, 401 insertions, 237 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index fbe93084244c..97ca17189af5 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -217,7 +217,7 @@ static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
217{ 217{
218 struct acpi_table_madt *madt = NULL; 218 struct acpi_table_madt *madt = NULL;
219 219
220 if (!phys_addr || !size) 220 if (!phys_addr || !size || !cpu_has_apic)
221 return -EINVAL; 221 return -EINVAL;
222 222
223 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size); 223 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
@@ -623,9 +623,9 @@ extern u32 pmtmr_ioport;
623 623
624static int __init acpi_parse_fadt(unsigned long phys, unsigned long size) 624static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
625{ 625{
626 struct fadt_descriptor_rev2 *fadt = NULL; 626 struct fadt_descriptor *fadt = NULL;
627 627
628 fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size); 628 fadt = (struct fadt_descriptor *)__acpi_map_table(phys, size);
629 if (!fadt) { 629 if (!fadt) {
630 printk(KERN_WARNING PREFIX "Unable to map FADT\n"); 630 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
631 return 0; 631 return 0;
@@ -756,7 +756,7 @@ static int __init acpi_parse_madt_ioapic_entries(void)
756 return -ENODEV; 756 return -ENODEV;
757 } 757 }
758 758
759 if (!cpu_has_apic) 759 if (!cpu_has_apic)
760 return -ENODEV; 760 return -ENODEV;
761 761
762 /* 762 /*
diff --git a/arch/i386/kernel/acpi/processor.c b/arch/i386/kernel/acpi/processor.c
index 9f4cc02717ec..b54fded49834 100644
--- a/arch/i386/kernel/acpi/processor.c
+++ b/arch/i386/kernel/acpi/processor.c
@@ -47,7 +47,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
47 buf[2] = ACPI_PDC_C_CAPABILITY_SMP; 47 buf[2] = ACPI_PDC_C_CAPABILITY_SMP;
48 48
49 if (cpu_has(c, X86_FEATURE_EST)) 49 if (cpu_has(c, X86_FEATURE_EST))
50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP; 50 buf[2] |= ACPI_PDC_EST_CAPABILITY_SWSMP;
51 51
52 obj->type = ACPI_TYPE_BUFFER; 52 obj->type = ACPI_TYPE_BUFFER;
53 obj->buffer.length = 12; 53 obj->buffer.length = 12;
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
index 1a7bdcef1926..05668e3598c0 100644
--- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -48,12 +48,13 @@ MODULE_LICENSE("GPL");
48 48
49 49
50struct cpufreq_acpi_io { 50struct cpufreq_acpi_io {
51 struct acpi_processor_performance acpi_data; 51 struct acpi_processor_performance *acpi_data;
52 struct cpufreq_frequency_table *freq_table; 52 struct cpufreq_frequency_table *freq_table;
53 unsigned int resume; 53 unsigned int resume;
54}; 54};
55 55
56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS]; 56static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
57static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
57 58
58static struct cpufreq_driver acpi_cpufreq_driver; 59static struct cpufreq_driver acpi_cpufreq_driver;
59 60
@@ -104,64 +105,43 @@ acpi_processor_set_performance (
104{ 105{
105 u16 port = 0; 106 u16 port = 0;
106 u8 bit_width = 0; 107 u8 bit_width = 0;
107 int ret;
108 u32 value = 0;
109 int i = 0; 108 int i = 0;
110 struct cpufreq_freqs cpufreq_freqs; 109 int ret = 0;
111 cpumask_t saved_mask; 110 u32 value = 0;
112 int retval; 111 int retval;
112 struct acpi_processor_performance *perf;
113 113
114 dprintk("acpi_processor_set_performance\n"); 114 dprintk("acpi_processor_set_performance\n");
115 115
116 /* 116 retval = 0;
117 * TBD: Use something other than set_cpus_allowed. 117 perf = data->acpi_data;
118 * As set_cpus_allowed is a bit racy, 118 if (state == perf->state) {
119 * with any other set_cpus_allowed for this process.
120 */
121 saved_mask = current->cpus_allowed;
122 set_cpus_allowed(current, cpumask_of_cpu(cpu));
123 if (smp_processor_id() != cpu) {
124 return (-EAGAIN);
125 }
126
127 if (state == data->acpi_data.state) {
128 if (unlikely(data->resume)) { 119 if (unlikely(data->resume)) {
129 dprintk("Called after resume, resetting to P%d\n", state); 120 dprintk("Called after resume, resetting to P%d\n", state);
130 data->resume = 0; 121 data->resume = 0;
131 } else { 122 } else {
132 dprintk("Already at target state (P%d)\n", state); 123 dprintk("Already at target state (P%d)\n", state);
133 retval = 0; 124 return (retval);
134 goto migrate_end;
135 } 125 }
136 } 126 }
137 127
138 dprintk("Transitioning from P%d to P%d\n", 128 dprintk("Transitioning from P%d to P%d\n", perf->state, state);
139 data->acpi_data.state, state);
140
141 /* cpufreq frequency struct */
142 cpufreq_freqs.cpu = cpu;
143 cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency;
144 cpufreq_freqs.new = data->freq_table[state].frequency;
145
146 /* notify cpufreq */
147 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
148 129
149 /* 130 /*
150 * First we write the target state's 'control' value to the 131 * First we write the target state's 'control' value to the
151 * control_register. 132 * control_register.
152 */ 133 */
153 134
154 port = data->acpi_data.control_register.address; 135 port = perf->control_register.address;
155 bit_width = data->acpi_data.control_register.bit_width; 136 bit_width = perf->control_register.bit_width;
156 value = (u32) data->acpi_data.states[state].control; 137 value = (u32) perf->states[state].control;
157 138
158 dprintk("Writing 0x%08x to port 0x%04x\n", value, port); 139 dprintk("Writing 0x%08x to port 0x%04x\n", value, port);
159 140
160 ret = acpi_processor_write_port(port, bit_width, value); 141 ret = acpi_processor_write_port(port, bit_width, value);
161 if (ret) { 142 if (ret) {
162 dprintk("Invalid port width 0x%04x\n", bit_width); 143 dprintk("Invalid port width 0x%04x\n", bit_width);
163 retval = ret; 144 return (ret);
164 goto migrate_end;
165 } 145 }
166 146
167 /* 147 /*
@@ -177,48 +157,35 @@ acpi_processor_set_performance (
177 * before giving up. 157 * before giving up.
178 */ 158 */
179 159
180 port = data->acpi_data.status_register.address; 160 port = perf->status_register.address;
181 bit_width = data->acpi_data.status_register.bit_width; 161 bit_width = perf->status_register.bit_width;
182 162
183 dprintk("Looking for 0x%08x from port 0x%04x\n", 163 dprintk("Looking for 0x%08x from port 0x%04x\n",
184 (u32) data->acpi_data.states[state].status, port); 164 (u32) perf->states[state].status, port);
185 165
186 for (i=0; i<100; i++) { 166 for (i = 0; i < 100; i++) {
187 ret = acpi_processor_read_port(port, bit_width, &value); 167 ret = acpi_processor_read_port(port, bit_width, &value);
188 if (ret) { 168 if (ret) {
189 dprintk("Invalid port width 0x%04x\n", bit_width); 169 dprintk("Invalid port width 0x%04x\n", bit_width);
190 retval = ret; 170 return (ret);
191 goto migrate_end;
192 } 171 }
193 if (value == (u32) data->acpi_data.states[state].status) 172 if (value == (u32) perf->states[state].status)
194 break; 173 break;
195 udelay(10); 174 udelay(10);
196 } 175 }
197 } else { 176 } else {
198 value = (u32) data->acpi_data.states[state].status; 177 value = (u32) perf->states[state].status;
199 } 178 }
200 179
201 /* notify cpufreq */ 180 if (unlikely(value != (u32) perf->states[state].status)) {
202 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
203
204 if (unlikely(value != (u32) data->acpi_data.states[state].status)) {
205 unsigned int tmp = cpufreq_freqs.new;
206 cpufreq_freqs.new = cpufreq_freqs.old;
207 cpufreq_freqs.old = tmp;
208 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE);
209 cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE);
210 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n"); 181 printk(KERN_WARNING "acpi-cpufreq: Transition failed\n");
211 retval = -ENODEV; 182 retval = -ENODEV;
212 goto migrate_end; 183 return (retval);
213 } 184 }
214 185
215 dprintk("Transition successful after %d microseconds\n", i * 10); 186 dprintk("Transition successful after %d microseconds\n", i * 10);
216 187
217 data->acpi_data.state = state; 188 perf->state = state;
218
219 retval = 0;
220migrate_end:
221 set_cpus_allowed(current, saved_mask);
222 return (retval); 189 return (retval);
223} 190}
224 191
@@ -230,8 +197,17 @@ acpi_cpufreq_target (
230 unsigned int relation) 197 unsigned int relation)
231{ 198{
232 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; 199 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu];
200 struct acpi_processor_performance *perf;
201 struct cpufreq_freqs freqs;
202 cpumask_t online_policy_cpus;
203 cpumask_t saved_mask;
204 cpumask_t set_mask;
205 cpumask_t covered_cpus;
206 unsigned int cur_state = 0;
233 unsigned int next_state = 0; 207 unsigned int next_state = 0;
234 unsigned int result = 0; 208 unsigned int result = 0;
209 unsigned int j;
210 unsigned int tmp;
235 211
236 dprintk("acpi_cpufreq_setpolicy\n"); 212 dprintk("acpi_cpufreq_setpolicy\n");
237 213
@@ -240,11 +216,95 @@ acpi_cpufreq_target (
240 target_freq, 216 target_freq,
241 relation, 217 relation,
242 &next_state); 218 &next_state);
243 if (result) 219 if (unlikely(result))
244 return (result); 220 return (result);
245 221
246 result = acpi_processor_set_performance (data, policy->cpu, next_state); 222 perf = data->acpi_data;
223 cur_state = perf->state;
224 freqs.old = data->freq_table[cur_state].frequency;
225 freqs.new = data->freq_table[next_state].frequency;
226
227#ifdef CONFIG_HOTPLUG_CPU
228 /* cpufreq holds the hotplug lock, so we are safe from here on */
229 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
230#else
231 online_policy_cpus = policy->cpus;
232#endif
233
234 for_each_cpu_mask(j, online_policy_cpus) {
235 freqs.cpu = j;
236 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
237 }
238
239 /*
240 * We need to call driver->target() on all or any CPU in
241 * policy->cpus, depending on policy->shared_type.
242 */
243 saved_mask = current->cpus_allowed;
244 cpus_clear(covered_cpus);
245 for_each_cpu_mask(j, online_policy_cpus) {
246 /*
247 * Support for SMP systems.
248 * Make sure we are running on CPU that wants to change freq
249 */
250 cpus_clear(set_mask);
251 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
252 cpus_or(set_mask, set_mask, online_policy_cpus);
253 else
254 cpu_set(j, set_mask);
255
256 set_cpus_allowed(current, set_mask);
257 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
258 dprintk("couldn't limit to CPUs in this domain\n");
259 result = -EAGAIN;
260 break;
261 }
262
263 result = acpi_processor_set_performance (data, j, next_state);
264 if (result) {
265 result = -EAGAIN;
266 break;
267 }
268
269 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
270 break;
271
272 cpu_set(j, covered_cpus);
273 }
274
275 for_each_cpu_mask(j, online_policy_cpus) {
276 freqs.cpu = j;
277 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
278 }
247 279
280 if (unlikely(result)) {
281 /*
282 * We have failed halfway through the frequency change.
283 * We have sent callbacks to online_policy_cpus and
284 * acpi_processor_set_performance() has been called on
285 * coverd_cpus. Best effort undo..
286 */
287
288 if (!cpus_empty(covered_cpus)) {
289 for_each_cpu_mask(j, covered_cpus) {
290 policy->cpu = j;
291 acpi_processor_set_performance (data,
292 j,
293 cur_state);
294 }
295 }
296
297 tmp = freqs.new;
298 freqs.new = freqs.old;
299 freqs.old = tmp;
300 for_each_cpu_mask(j, online_policy_cpus) {
301 freqs.cpu = j;
302 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
303 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
304 }
305 }
306
307 set_cpus_allowed(current, saved_mask);
248 return (result); 308 return (result);
249} 309}
250 310
@@ -270,30 +330,65 @@ acpi_cpufreq_guess_freq (
270 struct cpufreq_acpi_io *data, 330 struct cpufreq_acpi_io *data,
271 unsigned int cpu) 331 unsigned int cpu)
272{ 332{
333 struct acpi_processor_performance *perf = data->acpi_data;
334
273 if (cpu_khz) { 335 if (cpu_khz) {
274 /* search the closest match to cpu_khz */ 336 /* search the closest match to cpu_khz */
275 unsigned int i; 337 unsigned int i;
276 unsigned long freq; 338 unsigned long freq;
277 unsigned long freqn = data->acpi_data.states[0].core_frequency * 1000; 339 unsigned long freqn = perf->states[0].core_frequency * 1000;
278 340
279 for (i=0; i < (data->acpi_data.state_count - 1); i++) { 341 for (i = 0; i < (perf->state_count - 1); i++) {
280 freq = freqn; 342 freq = freqn;
281 freqn = data->acpi_data.states[i+1].core_frequency * 1000; 343 freqn = perf->states[i+1].core_frequency * 1000;
282 if ((2 * cpu_khz) > (freqn + freq)) { 344 if ((2 * cpu_khz) > (freqn + freq)) {
283 data->acpi_data.state = i; 345 perf->state = i;
284 return (freq); 346 return (freq);
285 } 347 }
286 } 348 }
287 data->acpi_data.state = data->acpi_data.state_count - 1; 349 perf->state = perf->state_count - 1;
288 return (freqn); 350 return (freqn);
289 } else 351 } else {
290 /* assume CPU is at P0... */ 352 /* assume CPU is at P0... */
291 data->acpi_data.state = 0; 353 perf->state = 0;
292 return data->acpi_data.states[0].core_frequency * 1000; 354 return perf->states[0].core_frequency * 1000;
293 355 }
294} 356}
295 357
296 358
359/*
360 * acpi_cpufreq_early_init - initialize ACPI P-States library
361 *
362 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
363 * in order to determine correct frequency and voltage pairings. We can
364 * do _PDC and _PSD and find out the processor dependency for the
365 * actual init that will happen later...
366 */
367static int acpi_cpufreq_early_init_acpi(void)
368{
369 struct acpi_processor_performance *data;
370 unsigned int i, j;
371
372 dprintk("acpi_cpufreq_early_init\n");
373
374 for_each_cpu(i) {
375 data = kzalloc(sizeof(struct acpi_processor_performance),
376 GFP_KERNEL);
377 if (!data) {
378 for_each_cpu(j) {
379 kfree(acpi_perf_data[j]);
380 acpi_perf_data[j] = NULL;
381 }
382 return (-ENOMEM);
383 }
384 acpi_perf_data[i] = data;
385 }
386
387 /* Do initialization in ACPI core */
388 acpi_processor_preregister_performance(acpi_perf_data);
389 return 0;
390}
391
297static int 392static int
298acpi_cpufreq_cpu_init ( 393acpi_cpufreq_cpu_init (
299 struct cpufreq_policy *policy) 394 struct cpufreq_policy *policy)
@@ -303,41 +398,51 @@ acpi_cpufreq_cpu_init (
303 struct cpufreq_acpi_io *data; 398 struct cpufreq_acpi_io *data;
304 unsigned int result = 0; 399 unsigned int result = 0;
305 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 400 struct cpuinfo_x86 *c = &cpu_data[policy->cpu];
401 struct acpi_processor_performance *perf;
306 402
307 dprintk("acpi_cpufreq_cpu_init\n"); 403 dprintk("acpi_cpufreq_cpu_init\n");
308 404
405 if (!acpi_perf_data[cpu])
406 return (-ENODEV);
407
309 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); 408 data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL);
310 if (!data) 409 if (!data)
311 return (-ENOMEM); 410 return (-ENOMEM);
312 411
412 data->acpi_data = acpi_perf_data[cpu];
313 acpi_io_data[cpu] = data; 413 acpi_io_data[cpu] = data;
314 414
315 result = acpi_processor_register_performance(&data->acpi_data, cpu); 415 result = acpi_processor_register_performance(data->acpi_data, cpu);
316 416
317 if (result) 417 if (result)
318 goto err_free; 418 goto err_free;
319 419
420 perf = data->acpi_data;
421 policy->cpus = perf->shared_cpu_map;
422 policy->shared_type = perf->shared_type;
423
320 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 424 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
321 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; 425 acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
322 } 426 }
323 427
324 /* capability check */ 428 /* capability check */
325 if (data->acpi_data.state_count <= 1) { 429 if (perf->state_count <= 1) {
326 dprintk("No P-States\n"); 430 dprintk("No P-States\n");
327 result = -ENODEV; 431 result = -ENODEV;
328 goto err_unreg; 432 goto err_unreg;
329 } 433 }
330 if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) || 434
331 (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) { 435 if ((perf->control_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO) ||
436 (perf->status_register.space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
332 dprintk("Unsupported address space [%d, %d]\n", 437 dprintk("Unsupported address space [%d, %d]\n",
333 (u32) (data->acpi_data.control_register.space_id), 438 (u32) (perf->control_register.space_id),
334 (u32) (data->acpi_data.status_register.space_id)); 439 (u32) (perf->status_register.space_id));
335 result = -ENODEV; 440 result = -ENODEV;
336 goto err_unreg; 441 goto err_unreg;
337 } 442 }
338 443
339 /* alloc freq_table */ 444 /* alloc freq_table */
340 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (data->acpi_data.state_count + 1), GFP_KERNEL); 445 data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * (perf->state_count + 1), GFP_KERNEL);
341 if (!data->freq_table) { 446 if (!data->freq_table) {
342 result = -ENOMEM; 447 result = -ENOMEM;
343 goto err_unreg; 448 goto err_unreg;
@@ -345,9 +450,9 @@ acpi_cpufreq_cpu_init (
345 450
346 /* detect transition latency */ 451 /* detect transition latency */
347 policy->cpuinfo.transition_latency = 0; 452 policy->cpuinfo.transition_latency = 0;
348 for (i=0; i<data->acpi_data.state_count; i++) { 453 for (i=0; i<perf->state_count; i++) {
349 if ((data->acpi_data.states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency) 454 if ((perf->states[i].transition_latency * 1000) > policy->cpuinfo.transition_latency)
350 policy->cpuinfo.transition_latency = data->acpi_data.states[i].transition_latency * 1000; 455 policy->cpuinfo.transition_latency = perf->states[i].transition_latency * 1000;
351 } 456 }
352 policy->governor = CPUFREQ_DEFAULT_GOVERNOR; 457 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
353 458
@@ -355,11 +460,11 @@ acpi_cpufreq_cpu_init (
355 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); 460 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
356 461
357 /* table init */ 462 /* table init */
358 for (i=0; i<=data->acpi_data.state_count; i++) 463 for (i=0; i<=perf->state_count; i++)
359 { 464 {
360 data->freq_table[i].index = i; 465 data->freq_table[i].index = i;
361 if (i<data->acpi_data.state_count) 466 if (i<perf->state_count)
362 data->freq_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; 467 data->freq_table[i].frequency = perf->states[i].core_frequency * 1000;
363 else 468 else
364 data->freq_table[i].frequency = CPUFREQ_TABLE_END; 469 data->freq_table[i].frequency = CPUFREQ_TABLE_END;
365 } 470 }
@@ -374,12 +479,12 @@ acpi_cpufreq_cpu_init (
374 479
375 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n", 480 printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management activated.\n",
376 cpu); 481 cpu);
377 for (i = 0; i < data->acpi_data.state_count; i++) 482 for (i = 0; i < perf->state_count; i++)
378 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n", 483 dprintk(" %cP%d: %d MHz, %d mW, %d uS\n",
379 (i == data->acpi_data.state?'*':' '), i, 484 (i == perf->state?'*':' '), i,
380 (u32) data->acpi_data.states[i].core_frequency, 485 (u32) perf->states[i].core_frequency,
381 (u32) data->acpi_data.states[i].power, 486 (u32) perf->states[i].power,
382 (u32) data->acpi_data.states[i].transition_latency); 487 (u32) perf->states[i].transition_latency);
383 488
384 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); 489 cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu);
385 490
@@ -394,7 +499,7 @@ acpi_cpufreq_cpu_init (
394 err_freqfree: 499 err_freqfree:
395 kfree(data->freq_table); 500 kfree(data->freq_table);
396 err_unreg: 501 err_unreg:
397 acpi_processor_unregister_performance(&data->acpi_data, cpu); 502 acpi_processor_unregister_performance(perf, cpu);
398 err_free: 503 err_free:
399 kfree(data); 504 kfree(data);
400 acpi_io_data[cpu] = NULL; 505 acpi_io_data[cpu] = NULL;
@@ -415,7 +520,7 @@ acpi_cpufreq_cpu_exit (
415 if (data) { 520 if (data) {
416 cpufreq_frequency_table_put_attr(policy->cpu); 521 cpufreq_frequency_table_put_attr(policy->cpu);
417 acpi_io_data[policy->cpu] = NULL; 522 acpi_io_data[policy->cpu] = NULL;
418 acpi_processor_unregister_performance(&data->acpi_data, policy->cpu); 523 acpi_processor_unregister_performance(data->acpi_data, policy->cpu);
419 kfree(data); 524 kfree(data);
420 } 525 }
421 526
@@ -462,7 +567,10 @@ acpi_cpufreq_init (void)
462 567
463 dprintk("acpi_cpufreq_init\n"); 568 dprintk("acpi_cpufreq_init\n");
464 569
465 result = cpufreq_register_driver(&acpi_cpufreq_driver); 570 result = acpi_cpufreq_early_init_acpi();
571
572 if (!result)
573 result = cpufreq_register_driver(&acpi_cpufreq_driver);
466 574
467 return (result); 575 return (result);
468} 576}
@@ -471,10 +579,15 @@ acpi_cpufreq_init (void)
471static void __exit 579static void __exit
472acpi_cpufreq_exit (void) 580acpi_cpufreq_exit (void)
473{ 581{
582 unsigned int i;
474 dprintk("acpi_cpufreq_exit\n"); 583 dprintk("acpi_cpufreq_exit\n");
475 584
476 cpufreq_unregister_driver(&acpi_cpufreq_driver); 585 cpufreq_unregister_driver(&acpi_cpufreq_driver);
477 586
587 for_each_cpu(i) {
588 kfree(acpi_perf_data[i]);
589 acpi_perf_data[i] = NULL;
590 }
478 return; 591 return;
479} 592}
480 593
diff --git a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
index f1a82c5de1ba..31c3a5baaa7f 100644
--- a/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/i386/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -347,7 +347,36 @@ static unsigned int get_cur_freq(unsigned int cpu)
347 347
348#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI 348#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
349 349
350static struct acpi_processor_performance p; 350static struct acpi_processor_performance *acpi_perf_data[NR_CPUS];
351
352/*
353 * centrino_cpu_early_init_acpi - Do the preregistering with ACPI P-States
354 * library
355 *
356 * Before doing the actual init, we need to do _PSD related setup whenever
357 * supported by the BIOS. These are handled by this early_init routine.
358 */
359static int centrino_cpu_early_init_acpi(void)
360{
361 unsigned int i, j;
362 struct acpi_processor_performance *data;
363
364 for_each_cpu(i) {
365 data = kzalloc(sizeof(struct acpi_processor_performance),
366 GFP_KERNEL);
367 if (!data) {
368 for_each_cpu(j) {
369 kfree(acpi_perf_data[j]);
370 acpi_perf_data[j] = NULL;
371 }
372 return (-ENOMEM);
373 }
374 acpi_perf_data[i] = data;
375 }
376
377 acpi_processor_preregister_performance(acpi_perf_data);
378 return 0;
379}
351 380
352/* 381/*
353 * centrino_cpu_init_acpi - register with ACPI P-States library 382 * centrino_cpu_init_acpi - register with ACPI P-States library
@@ -361,46 +390,51 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
361 unsigned long cur_freq; 390 unsigned long cur_freq;
362 int result = 0, i; 391 int result = 0, i;
363 unsigned int cpu = policy->cpu; 392 unsigned int cpu = policy->cpu;
393 struct acpi_processor_performance *p;
394
395 p = acpi_perf_data[cpu];
364 396
365 /* register with ACPI core */ 397 /* register with ACPI core */
366 if (acpi_processor_register_performance(&p, cpu)) { 398 if (acpi_processor_register_performance(p, cpu)) {
367 dprintk("obtaining ACPI data failed\n"); 399 dprintk(PFX "obtaining ACPI data failed\n");
368 return -EIO; 400 return -EIO;
369 } 401 }
402 policy->cpus = p->shared_cpu_map;
403 policy->shared_type = p->shared_type;
370 404
371 /* verify the acpi_data */ 405 /* verify the acpi_data */
372 if (p.state_count <= 1) { 406 if (p->state_count <= 1) {
373 dprintk("No P-States\n"); 407 dprintk("No P-States\n");
374 result = -ENODEV; 408 result = -ENODEV;
375 goto err_unreg; 409 goto err_unreg;
376 } 410 }
377 411
378 if ((p.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || 412 if ((p->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) ||
379 (p.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { 413 (p->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
380 dprintk("Invalid control/status registers (%x - %x)\n", 414 dprintk("Invalid control/status registers (%x - %x)\n",
381 p.control_register.space_id, p.status_register.space_id); 415 p->control_register.space_id, p->status_register.space_id);
382 result = -EIO; 416 result = -EIO;
383 goto err_unreg; 417 goto err_unreg;
384 } 418 }
385 419
386 for (i=0; i<p.state_count; i++) { 420 for (i=0; i<p->state_count; i++) {
387 if (p.states[i].control != p.states[i].status) { 421 if (p->states[i].control != p->states[i].status) {
388 dprintk("Different control (%llu) and status values (%llu)\n", 422 dprintk("Different control (%llu) and status values (%llu)\n",
389 p.states[i].control, p.states[i].status); 423 p->states[i].control, p->states[i].status);
390 result = -EINVAL; 424 result = -EINVAL;
391 goto err_unreg; 425 goto err_unreg;
392 } 426 }
393 427
394 if (!p.states[i].core_frequency) { 428 if (!p->states[i].core_frequency) {
395 dprintk("Zero core frequency for state %u\n", i); 429 dprintk("Zero core frequency for state %u\n", i);
396 result = -EINVAL; 430 result = -EINVAL;
397 goto err_unreg; 431 goto err_unreg;
398 } 432 }
399 433
400 if (p.states[i].core_frequency > p.states[0].core_frequency) { 434 if (p->states[i].core_frequency > p->states[0].core_frequency) {
401 dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i, 435 dprintk("P%u has larger frequency (%llu) than P0 (%llu), skipping\n", i,
402 p.states[i].core_frequency, p.states[0].core_frequency); 436 p->states[i].core_frequency, p->states[0].core_frequency);
403 p.states[i].core_frequency = 0; 437 p->states[i].core_frequency = 0;
404 continue; 438 continue;
405 } 439 }
406 } 440 }
@@ -412,26 +446,26 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
412 } 446 }
413 447
414 centrino_model[cpu]->model_name=NULL; 448 centrino_model[cpu]->model_name=NULL;
415 centrino_model[cpu]->max_freq = p.states[0].core_frequency * 1000; 449 centrino_model[cpu]->max_freq = p->states[0].core_frequency * 1000;
416 centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) * 450 centrino_model[cpu]->op_points = kmalloc(sizeof(struct cpufreq_frequency_table) *
417 (p.state_count + 1), GFP_KERNEL); 451 (p->state_count + 1), GFP_KERNEL);
418 if (!centrino_model[cpu]->op_points) { 452 if (!centrino_model[cpu]->op_points) {
419 result = -ENOMEM; 453 result = -ENOMEM;
420 goto err_kfree; 454 goto err_kfree;
421 } 455 }
422 456
423 for (i=0; i<p.state_count; i++) { 457 for (i=0; i<p->state_count; i++) {
424 centrino_model[cpu]->op_points[i].index = p.states[i].control; 458 centrino_model[cpu]->op_points[i].index = p->states[i].control;
425 centrino_model[cpu]->op_points[i].frequency = p.states[i].core_frequency * 1000; 459 centrino_model[cpu]->op_points[i].frequency = p->states[i].core_frequency * 1000;
426 dprintk("adding state %i with frequency %u and control value %04x\n", 460 dprintk("adding state %i with frequency %u and control value %04x\n",
427 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index); 461 i, centrino_model[cpu]->op_points[i].frequency, centrino_model[cpu]->op_points[i].index);
428 } 462 }
429 centrino_model[cpu]->op_points[p.state_count].frequency = CPUFREQ_TABLE_END; 463 centrino_model[cpu]->op_points[p->state_count].frequency = CPUFREQ_TABLE_END;
430 464
431 cur_freq = get_cur_freq(cpu); 465 cur_freq = get_cur_freq(cpu);
432 466
433 for (i=0; i<p.state_count; i++) { 467 for (i=0; i<p->state_count; i++) {
434 if (!p.states[i].core_frequency) { 468 if (!p->states[i].core_frequency) {
435 dprintk("skipping state %u\n", i); 469 dprintk("skipping state %u\n", i);
436 centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID; 470 centrino_model[cpu]->op_points[i].frequency = CPUFREQ_ENTRY_INVALID;
437 continue; 471 continue;
@@ -447,7 +481,7 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
447 } 481 }
448 482
449 if (cur_freq == centrino_model[cpu]->op_points[i].frequency) 483 if (cur_freq == centrino_model[cpu]->op_points[i].frequency)
450 p.state = i; 484 p->state = i;
451 } 485 }
452 486
453 /* notify BIOS that we exist */ 487 /* notify BIOS that we exist */
@@ -460,12 +494,13 @@ static int centrino_cpu_init_acpi(struct cpufreq_policy *policy)
460 err_kfree: 494 err_kfree:
461 kfree(centrino_model[cpu]); 495 kfree(centrino_model[cpu]);
462 err_unreg: 496 err_unreg:
463 acpi_processor_unregister_performance(&p, cpu); 497 acpi_processor_unregister_performance(p, cpu);
464 dprintk("invalid ACPI data\n"); 498 dprintk(PFX "invalid ACPI data\n");
465 return (result); 499 return (result);
466} 500}
467#else 501#else
468static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; } 502static inline int centrino_cpu_init_acpi(struct cpufreq_policy *policy) { return -ENODEV; }
503static inline int centrino_cpu_early_init_acpi(void) { return 0; }
469#endif 504#endif
470 505
471static int centrino_cpu_init(struct cpufreq_policy *policy) 506static int centrino_cpu_init(struct cpufreq_policy *policy)
@@ -551,10 +586,15 @@ static int centrino_cpu_exit(struct cpufreq_policy *policy)
551 586
552#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI 587#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
553 if (!centrino_model[cpu]->model_name) { 588 if (!centrino_model[cpu]->model_name) {
554 dprintk("unregistering and freeing ACPI data\n"); 589 static struct acpi_processor_performance *p;
555 acpi_processor_unregister_performance(&p, cpu); 590
556 kfree(centrino_model[cpu]->op_points); 591 if (acpi_perf_data[cpu]) {
557 kfree(centrino_model[cpu]); 592 p = acpi_perf_data[cpu];
593 dprintk("unregistering and freeing ACPI data\n");
594 acpi_processor_unregister_performance(p, cpu);
595 kfree(centrino_model[cpu]->op_points);
596 kfree(centrino_model[cpu]);
597 }
558 } 598 }
559#endif 599#endif
560 600
@@ -588,63 +628,128 @@ static int centrino_target (struct cpufreq_policy *policy,
588 unsigned int relation) 628 unsigned int relation)
589{ 629{
590 unsigned int newstate = 0; 630 unsigned int newstate = 0;
591 unsigned int msr, oldmsr, h, cpu = policy->cpu; 631 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu;
592 struct cpufreq_freqs freqs; 632 struct cpufreq_freqs freqs;
633 cpumask_t online_policy_cpus;
593 cpumask_t saved_mask; 634 cpumask_t saved_mask;
594 int retval; 635 cpumask_t set_mask;
636 cpumask_t covered_cpus;
637 int retval = 0;
638 unsigned int j, k, first_cpu, tmp;
595 639
596 if (centrino_model[cpu] == NULL) 640 if (unlikely(centrino_model[cpu] == NULL))
597 return -ENODEV; 641 return -ENODEV;
598 642
599 /* 643 if (unlikely(cpufreq_frequency_table_target(policy,
600 * Support for SMP systems. 644 centrino_model[cpu]->op_points,
601 * Make sure we are running on the CPU that wants to change frequency 645 target_freq,
602 */ 646 relation,
603 saved_mask = current->cpus_allowed; 647 &newstate))) {
604 set_cpus_allowed(current, policy->cpus); 648 return -EINVAL;
605 if (!cpu_isset(smp_processor_id(), policy->cpus)) {
606 dprintk("couldn't limit to CPUs in this domain\n");
607 return(-EAGAIN);
608 } 649 }
609 650
610 if (cpufreq_frequency_table_target(policy, centrino_model[cpu]->op_points, target_freq, 651#ifdef CONFIG_HOTPLUG_CPU
611 relation, &newstate)) { 652 /* cpufreq holds the hotplug lock, so we are safe from here on */
612 retval = -EINVAL; 653 cpus_and(online_policy_cpus, cpu_online_map, policy->cpus);
613 goto migrate_end; 654#else
614 } 655 online_policy_cpus = policy->cpus;
656#endif
615 657
616 msr = centrino_model[cpu]->op_points[newstate].index; 658 saved_mask = current->cpus_allowed;
617 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h); 659 first_cpu = 1;
660 cpus_clear(covered_cpus);
661 for_each_cpu_mask(j, online_policy_cpus) {
662 /*
663 * Support for SMP systems.
664 * Make sure we are running on CPU that wants to change freq
665 */
666 cpus_clear(set_mask);
667 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
668 cpus_or(set_mask, set_mask, online_policy_cpus);
669 else
670 cpu_set(j, set_mask);
671
672 set_cpus_allowed(current, set_mask);
673 if (unlikely(!cpu_isset(smp_processor_id(), set_mask))) {
674 dprintk("couldn't limit to CPUs in this domain\n");
675 retval = -EAGAIN;
676 if (first_cpu) {
677 /* We haven't started the transition yet. */
678 goto migrate_end;
679 }
680 break;
681 }
618 682
619 if (msr == (oldmsr & 0xffff)) { 683 msr = centrino_model[cpu]->op_points[newstate].index;
620 retval = 0; 684
621 dprintk("no change needed - msr was and needs to be %x\n", oldmsr); 685 if (first_cpu) {
622 goto migrate_end; 686 rdmsr(MSR_IA32_PERF_CTL, oldmsr, h);
623 } 687 if (msr == (oldmsr & 0xffff)) {
688 dprintk("no change needed - msr was and needs "
689 "to be %x\n", oldmsr);
690 retval = 0;
691 goto migrate_end;
692 }
693
694 freqs.old = extract_clock(oldmsr, cpu, 0);
695 freqs.new = extract_clock(msr, cpu, 0);
696
697 dprintk("target=%dkHz old=%d new=%d msr=%04x\n",
698 target_freq, freqs.old, freqs.new, msr);
699
700 for_each_cpu_mask(k, online_policy_cpus) {
701 freqs.cpu = k;
702 cpufreq_notify_transition(&freqs,
703 CPUFREQ_PRECHANGE);
704 }
705
706 first_cpu = 0;
707 /* all but 16 LSB are reserved, treat them with care */
708 oldmsr &= ~0xffff;
709 msr &= 0xffff;
710 oldmsr |= msr;
711 }
624 712
625 freqs.cpu = cpu; 713 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
626 freqs.old = extract_clock(oldmsr, cpu, 0); 714 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
627 freqs.new = extract_clock(msr, cpu, 0); 715 break;
628 716
629 dprintk("target=%dkHz old=%d new=%d msr=%04x\n", 717 cpu_set(j, covered_cpus);
630 target_freq, freqs.old, freqs.new, msr); 718 }
631 719
632 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); 720 for_each_cpu_mask(k, online_policy_cpus) {
721 freqs.cpu = k;
722 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
723 }
633 724
634 /* all but 16 LSB are "reserved", so treat them with 725 if (unlikely(retval)) {
635 care */ 726 /*
636 oldmsr &= ~0xffff; 727 * We have failed halfway through the frequency change.
637 msr &= 0xffff; 728 * We have sent callbacks to policy->cpus and
638 oldmsr |= msr; 729 * MSRs have already been written on coverd_cpus.
730 * Best effort undo..
731 */
639 732
640 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); 733 if (!cpus_empty(covered_cpus)) {
734 for_each_cpu_mask(j, covered_cpus) {
735 set_cpus_allowed(current, cpumask_of_cpu(j));
736 wrmsr(MSR_IA32_PERF_CTL, oldmsr, h);
737 }
738 }
641 739
642 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); 740 tmp = freqs.new;
741 freqs.new = freqs.old;
742 freqs.old = tmp;
743 for_each_cpu_mask(j, online_policy_cpus) {
744 freqs.cpu = j;
745 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
746 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
747 }
748 }
643 749
644 retval = 0;
645migrate_end: 750migrate_end:
646 set_cpus_allowed(current, saved_mask); 751 set_cpus_allowed(current, saved_mask);
647 return (retval); 752 return 0;
648} 753}
649 754
650static struct freq_attr* centrino_attr[] = { 755static struct freq_attr* centrino_attr[] = {
@@ -686,12 +791,25 @@ static int __init centrino_init(void)
686 if (!cpu_has(cpu, X86_FEATURE_EST)) 791 if (!cpu_has(cpu, X86_FEATURE_EST))
687 return -ENODEV; 792 return -ENODEV;
688 793
794 centrino_cpu_early_init_acpi();
795
689 return cpufreq_register_driver(&centrino_driver); 796 return cpufreq_register_driver(&centrino_driver);
690} 797}
691 798
692static void __exit centrino_exit(void) 799static void __exit centrino_exit(void)
693{ 800{
801#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
802 unsigned int j;
803#endif
804
694 cpufreq_unregister_driver(&centrino_driver); 805 cpufreq_unregister_driver(&centrino_driver);
806
807#ifdef CONFIG_X86_SPEEDSTEP_CENTRINO_ACPI
808 for_each_cpu(j) {
809 kfree(acpi_perf_data[j]);
810 acpi_perf_data[j] = NULL;
811 }
812#endif
695} 813}
696 814
697MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>"); 815MODULE_AUTHOR ("Jeremy Fitzhardinge <jeremy@goop.org>");
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c1c9b4224081..fbb25b00629b 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -77,6 +77,7 @@ choice
77config IA64_GENERIC 77config IA64_GENERIC
78 bool "generic" 78 bool "generic"
79 select ACPI 79 select ACPI
80 select PCI
80 select NUMA 81 select NUMA
81 select ACPI_NUMA 82 select ACPI_NUMA
82 help 83 help
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 3ce443e6c016..5825ddee58d6 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -1999,7 +1999,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
1999 if (!iovp_shift) 1999 if (!iovp_shift)
2000 iovp_shift = min(PAGE_SHIFT, 16); 2000 iovp_shift = min(PAGE_SHIFT, 16);
2001 } 2001 }
2002 ACPI_MEM_FREE(dev_info); 2002 kfree(dev_info);
2003 2003
2004 /* 2004 /*
2005 * default anything not caught above or specified on cmdline to 4k 2005 * default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d1c52cf67882..6ea642beaaee 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -68,8 +68,6 @@ EXPORT_SYMBOL(pm_power_off);
68unsigned char acpi_kbd_controller_present = 1; 68unsigned char acpi_kbd_controller_present = 1;
69unsigned char acpi_legacy_devices; 69unsigned char acpi_legacy_devices;
70 70
71static unsigned int __initdata acpi_madt_rev;
72
73unsigned int acpi_cpei_override; 71unsigned int acpi_cpei_override;
74unsigned int acpi_cpei_phys_cpuid; 72unsigned int acpi_cpei_phys_cpuid;
75 73
@@ -243,6 +241,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
243 return iosapic_init(iosapic->address, iosapic->global_irq_base); 241 return iosapic_init(iosapic->address, iosapic->global_irq_base);
244} 242}
245 243
244static unsigned int __initdata acpi_madt_rev;
245
246static int __init 246static int __init
247acpi_parse_plat_int_src(acpi_table_entry_header * header, 247acpi_parse_plat_int_src(acpi_table_entry_header * header,
248 const unsigned long end) 248 const unsigned long end)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cafa8776a53d..11f08001f8c2 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -671,9 +671,11 @@ int add_memory(u64 start, u64 size)
671 671
672 return ret; 672 return ret;
673} 673}
674EXPORT_SYMBOL_GPL(add_memory);
674 675
675int remove_memory(u64 start, u64 size) 676int remove_memory(u64 start, u64 size)
676{ 677{
677 return -EINVAL; 678 return -EINVAL;
678} 679}
680EXPORT_SYMBOL_GPL(remove_memory);
679#endif 681#endif
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 7d3bc5ac5db0..af44130f0d65 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -299,6 +299,7 @@ config X86_64_ACPI_NUMA
299 bool "ACPI NUMA detection" 299 bool "ACPI NUMA detection"
300 depends on NUMA 300 depends on NUMA
301 select ACPI 301 select ACPI
302 select PCI
302 select ACPI_NUMA 303 select ACPI_NUMA
303 default y 304 default y
304 help 305 help
diff --git a/arch/x86_64/kernel/acpi/Makefile b/arch/x86_64/kernel/acpi/Makefile
index 4fe97071f297..080b9963f1bc 100644
--- a/arch/x86_64/kernel/acpi/Makefile
+++ b/arch/x86_64/kernel/acpi/Makefile
@@ -4,5 +4,6 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup.o
4 4
5ifneq ($(CONFIG_ACPI_PROCESSOR),) 5ifneq ($(CONFIG_ACPI_PROCESSOR),)
6obj-y += processor.o 6obj-y += processor.o
7processor-y := ../../../i386/kernel/acpi/processor.o ../../../i386/kernel/acpi/cstate.o
7endif 8endif
8 9
diff --git a/arch/x86_64/kernel/acpi/processor.c b/arch/x86_64/kernel/acpi/processor.c
deleted file mode 100644
index 3bdc2baa5bb1..000000000000
--- a/arch/x86_64/kernel/acpi/processor.c
+++ /dev/null
@@ -1,72 +0,0 @@
1/*
2 * arch/x86_64/kernel/acpi/processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
48
49 obj->type = ACPI_TYPE_BUFFER;
50 obj->buffer.length = 12;
51 obj->buffer.pointer = (u8 *) buf;
52 obj_list->count = 1;
53 obj_list->pointer = obj;
54 pr->pdc = obj_list;
55
56 return;
57}
58
59/* Initialize _PDC data based on the CPU vendor */
60void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
61{
62 unsigned int cpu = pr->id;
63 struct cpuinfo_x86 *c = cpu_data + cpu;
64
65 pr->pdc = NULL;
66 if (c->x86_vendor == X86_VENDOR_INTEL && cpu_has(c, X86_FEATURE_EST))
67 init_intel_pdc(pr, c);
68
69 return;
70}
71
72EXPORT_SYMBOL(arch_acpi_processor_init_pdc);