diff options
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/acpi_pad.c | 37 | ||||
-rw-r--r-- | drivers/acpi/blacklist.c | 14 | ||||
-rw-r--r-- | drivers/acpi/bus.c | 35 | ||||
-rw-r--r-- | drivers/acpi/ec.c | 136 | ||||
-rw-r--r-- | drivers/acpi/internal.h | 1 | ||||
-rw-r--r-- | drivers/acpi/pci_link.c | 2 | ||||
-rw-r--r-- | drivers/acpi/pci_root.c | 2 | ||||
-rw-r--r-- | drivers/acpi/power.c | 2 | ||||
-rw-r--r-- | drivers/acpi/power_meter.c | 4 | ||||
-rw-r--r-- | drivers/acpi/processor_core.c | 71 | ||||
-rw-r--r-- | drivers/acpi/processor_idle.c | 64 | ||||
-rw-r--r-- | drivers/acpi/processor_pdc.c | 195 | ||||
-rw-r--r-- | drivers/acpi/processor_thermal.c | 3 | ||||
-rw-r--r-- | drivers/acpi/sbs.c | 3 | ||||
-rw-r--r-- | drivers/acpi/sbshc.c | 2 | ||||
-rw-r--r-- | drivers/acpi/sleep.c | 29 | ||||
-rw-r--r-- | drivers/acpi/video.c | 51 |
18 files changed, 449 insertions, 203 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index c7b10b4298e9..66cc3f36a954 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile | |||
@@ -32,6 +32,7 @@ acpi-$(CONFIG_ACPI_SLEEP) += proc.o | |||
32 | # | 32 | # |
33 | acpi-y += bus.o glue.o | 33 | acpi-y += bus.o glue.o |
34 | acpi-y += scan.o | 34 | acpi-y += scan.o |
35 | acpi-y += processor_pdc.o | ||
35 | acpi-y += ec.o | 36 | acpi-y += ec.o |
36 | acpi-$(CONFIG_ACPI_DOCK) += dock.o | 37 | acpi-$(CONFIG_ACPI_DOCK) += dock.o |
37 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o | 38 | acpi-y += pci_root.o pci_link.o pci_irq.o pci_bind.o |
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index 97991ac6f5fc..7e52295f1ecc 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c | |||
@@ -208,7 +208,7 @@ static int power_saving_thread(void *data) | |||
208 | * the mechanism only works when all CPUs have RT task running, | 208 | * the mechanism only works when all CPUs have RT task running, |
209 | * as if one CPU hasn't RT task, RT task from other CPUs will | 209 | * as if one CPU hasn't RT task, RT task from other CPUs will |
210 | * borrow CPU time from this CPU and cause RT task use > 95% | 210 | * borrow CPU time from this CPU and cause RT task use > 95% |
211 | * CPU time. To make 'avoid staration' work, takes a nap here. | 211 | * CPU time. To make 'avoid starvation' work, takes a nap here. |
212 | */ | 212 | */ |
213 | if (do_sleep) | 213 | if (do_sleep) |
214 | schedule_timeout_killable(HZ * idle_pct / 100); | 214 | schedule_timeout_killable(HZ * idle_pct / 100); |
@@ -222,14 +222,18 @@ static struct task_struct *ps_tsks[NR_CPUS]; | |||
222 | static unsigned int ps_tsk_num; | 222 | static unsigned int ps_tsk_num; |
223 | static int create_power_saving_task(void) | 223 | static int create_power_saving_task(void) |
224 | { | 224 | { |
225 | int rc = -ENOMEM; | ||
226 | |||
225 | ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, | 227 | ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread, |
226 | (void *)(unsigned long)ps_tsk_num, | 228 | (void *)(unsigned long)ps_tsk_num, |
227 | "power_saving/%d", ps_tsk_num); | 229 | "power_saving/%d", ps_tsk_num); |
228 | if (ps_tsks[ps_tsk_num]) { | 230 | rc = IS_ERR(ps_tsks[ps_tsk_num]) ? PTR_ERR(ps_tsks[ps_tsk_num]) : 0; |
231 | if (!rc) | ||
229 | ps_tsk_num++; | 232 | ps_tsk_num++; |
230 | return 0; | 233 | else |
231 | } | 234 | ps_tsks[ps_tsk_num] = NULL; |
232 | return -EINVAL; | 235 | |
236 | return rc; | ||
233 | } | 237 | } |
234 | 238 | ||
235 | static void destroy_power_saving_task(void) | 239 | static void destroy_power_saving_task(void) |
@@ -237,6 +241,7 @@ static void destroy_power_saving_task(void) | |||
237 | if (ps_tsk_num > 0) { | 241 | if (ps_tsk_num > 0) { |
238 | ps_tsk_num--; | 242 | ps_tsk_num--; |
239 | kthread_stop(ps_tsks[ps_tsk_num]); | 243 | kthread_stop(ps_tsks[ps_tsk_num]); |
244 | ps_tsks[ps_tsk_num] = NULL; | ||
240 | } | 245 | } |
241 | } | 246 | } |
242 | 247 | ||
@@ -253,7 +258,7 @@ static void set_power_saving_task_num(unsigned int num) | |||
253 | } | 258 | } |
254 | } | 259 | } |
255 | 260 | ||
256 | static int acpi_pad_idle_cpus(unsigned int num_cpus) | 261 | static void acpi_pad_idle_cpus(unsigned int num_cpus) |
257 | { | 262 | { |
258 | get_online_cpus(); | 263 | get_online_cpus(); |
259 | 264 | ||
@@ -261,7 +266,6 @@ static int acpi_pad_idle_cpus(unsigned int num_cpus) | |||
261 | set_power_saving_task_num(num_cpus); | 266 | set_power_saving_task_num(num_cpus); |
262 | 267 | ||
263 | put_online_cpus(); | 268 | put_online_cpus(); |
264 | return 0; | ||
265 | } | 269 | } |
266 | 270 | ||
267 | static uint32_t acpi_pad_idle_cpus_num(void) | 271 | static uint32_t acpi_pad_idle_cpus_num(void) |
@@ -369,19 +373,21 @@ static void acpi_pad_remove_sysfs(struct acpi_device *device) | |||
369 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) | 373 | static int acpi_pad_pur(acpi_handle handle, int *num_cpus) |
370 | { | 374 | { |
371 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; | 375 | struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; |
372 | acpi_status status; | ||
373 | union acpi_object *package; | 376 | union acpi_object *package; |
374 | int rev, num, ret = -EINVAL; | 377 | int rev, num, ret = -EINVAL; |
375 | 378 | ||
376 | status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer); | 379 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_PUR", NULL, &buffer))) |
377 | if (ACPI_FAILURE(status)) | 380 | return -EINVAL; |
381 | |||
382 | if (!buffer.length || !buffer.pointer) | ||
378 | return -EINVAL; | 383 | return -EINVAL; |
384 | |||
379 | package = buffer.pointer; | 385 | package = buffer.pointer; |
380 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) | 386 | if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2) |
381 | goto out; | 387 | goto out; |
382 | rev = package->package.elements[0].integer.value; | 388 | rev = package->package.elements[0].integer.value; |
383 | num = package->package.elements[1].integer.value; | 389 | num = package->package.elements[1].integer.value; |
384 | if (rev != 1) | 390 | if (rev != 1 || num < 0) |
385 | goto out; | 391 | goto out; |
386 | *num_cpus = num; | 392 | *num_cpus = num; |
387 | ret = 0; | 393 | ret = 0; |
@@ -410,7 +416,7 @@ static void acpi_pad_ost(acpi_handle handle, int stat, | |||
410 | 416 | ||
411 | static void acpi_pad_handle_notify(acpi_handle handle) | 417 | static void acpi_pad_handle_notify(acpi_handle handle) |
412 | { | 418 | { |
413 | int num_cpus, ret; | 419 | int num_cpus; |
414 | uint32_t idle_cpus; | 420 | uint32_t idle_cpus; |
415 | 421 | ||
416 | mutex_lock(&isolated_cpus_lock); | 422 | mutex_lock(&isolated_cpus_lock); |
@@ -418,12 +424,9 @@ static void acpi_pad_handle_notify(acpi_handle handle) | |||
418 | mutex_unlock(&isolated_cpus_lock); | 424 | mutex_unlock(&isolated_cpus_lock); |
419 | return; | 425 | return; |
420 | } | 426 | } |
421 | ret = acpi_pad_idle_cpus(num_cpus); | 427 | acpi_pad_idle_cpus(num_cpus); |
422 | idle_cpus = acpi_pad_idle_cpus_num(); | 428 | idle_cpus = acpi_pad_idle_cpus_num(); |
423 | if (!ret) | 429 | acpi_pad_ost(handle, 0, idle_cpus); |
424 | acpi_pad_ost(handle, 0, idle_cpus); | ||
425 | else | ||
426 | acpi_pad_ost(handle, 1, 0); | ||
427 | mutex_unlock(&isolated_cpus_lock); | 430 | mutex_unlock(&isolated_cpus_lock); |
428 | } | 431 | } |
429 | 432 | ||
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 23e5a0519af5..2815df66f6f7 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -185,6 +185,12 @@ static int __init dmi_disable_osi_vista(const struct dmi_system_id *d) | |||
185 | acpi_osi_setup("!Windows 2006"); | 185 | acpi_osi_setup("!Windows 2006"); |
186 | return 0; | 186 | return 0; |
187 | } | 187 | } |
188 | static int __init dmi_disable_osi_win7(const struct dmi_system_id *d) | ||
189 | { | ||
190 | printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident); | ||
191 | acpi_osi_setup("!Windows 2009"); | ||
192 | return 0; | ||
193 | } | ||
188 | 194 | ||
189 | static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | 195 | static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { |
190 | { | 196 | { |
@@ -211,6 +217,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
211 | DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), | 217 | DMI_MATCH(DMI_PRODUCT_NAME, "Sony VGN-SR290J"), |
212 | }, | 218 | }, |
213 | }, | 219 | }, |
220 | { | ||
221 | .callback = dmi_disable_osi_win7, | ||
222 | .ident = "ASUS K50IJ", | ||
223 | .matches = { | ||
224 | DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), | ||
225 | DMI_MATCH(DMI_PRODUCT_NAME, "K50IJ"), | ||
226 | }, | ||
227 | }, | ||
214 | 228 | ||
215 | /* | 229 | /* |
216 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. | 230 | * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. |
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 65f7e335f122..a52126e46307 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c | |||
@@ -397,6 +397,7 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) | |||
397 | union acpi_object *out_obj; | 397 | union acpi_object *out_obj; |
398 | u8 uuid[16]; | 398 | u8 uuid[16]; |
399 | u32 errors; | 399 | u32 errors; |
400 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; | ||
400 | 401 | ||
401 | if (!context) | 402 | if (!context) |
402 | return AE_ERROR; | 403 | return AE_ERROR; |
@@ -419,16 +420,16 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) | |||
419 | in_params[3].buffer.length = context->cap.length; | 420 | in_params[3].buffer.length = context->cap.length; |
420 | in_params[3].buffer.pointer = context->cap.pointer; | 421 | in_params[3].buffer.pointer = context->cap.pointer; |
421 | 422 | ||
422 | status = acpi_evaluate_object(handle, "_OSC", &input, &context->ret); | 423 | status = acpi_evaluate_object(handle, "_OSC", &input, &output); |
423 | if (ACPI_FAILURE(status)) | 424 | if (ACPI_FAILURE(status)) |
424 | return status; | 425 | return status; |
425 | 426 | ||
426 | /* return buffer should have the same length as cap buffer */ | 427 | if (!output.length) |
427 | if (context->ret.length != context->cap.length) | ||
428 | return AE_NULL_OBJECT; | 428 | return AE_NULL_OBJECT; |
429 | 429 | ||
430 | out_obj = context->ret.pointer; | 430 | out_obj = output.pointer; |
431 | if (out_obj->type != ACPI_TYPE_BUFFER) { | 431 | if (out_obj->type != ACPI_TYPE_BUFFER |
432 | || out_obj->buffer.length != context->cap.length) { | ||
432 | acpi_print_osc_error(handle, context, | 433 | acpi_print_osc_error(handle, context, |
433 | "_OSC evaluation returned wrong type"); | 434 | "_OSC evaluation returned wrong type"); |
434 | status = AE_TYPE; | 435 | status = AE_TYPE; |
@@ -457,11 +458,20 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context) | |||
457 | goto out_kfree; | 458 | goto out_kfree; |
458 | } | 459 | } |
459 | out_success: | 460 | out_success: |
460 | return AE_OK; | 461 | context->ret.length = out_obj->buffer.length; |
462 | context->ret.pointer = kmalloc(context->ret.length, GFP_KERNEL); | ||
463 | if (!context->ret.pointer) { | ||
464 | status = AE_NO_MEMORY; | ||
465 | goto out_kfree; | ||
466 | } | ||
467 | memcpy(context->ret.pointer, out_obj->buffer.pointer, | ||
468 | context->ret.length); | ||
469 | status = AE_OK; | ||
461 | 470 | ||
462 | out_kfree: | 471 | out_kfree: |
463 | kfree(context->ret.pointer); | 472 | kfree(output.pointer); |
464 | context->ret.pointer = NULL; | 473 | if (status != AE_OK) |
474 | context->ret.pointer = NULL; | ||
465 | return status; | 475 | return status; |
466 | } | 476 | } |
467 | EXPORT_SYMBOL(acpi_run_osc); | 477 | EXPORT_SYMBOL(acpi_run_osc); |
@@ -480,9 +490,14 @@ static void acpi_bus_osc_support(void) | |||
480 | 490 | ||
481 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; | 491 | capbuf[OSC_QUERY_TYPE] = OSC_QUERY_ENABLE; |
482 | capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ | 492 | capbuf[OSC_SUPPORT_TYPE] = OSC_SB_PR3_SUPPORT; /* _PR3 is in use */ |
483 | #ifdef CONFIG_ACPI_PROCESSOR_AGGREGATOR | 493 | #if defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR) ||\ |
494 | defined(CONFIG_ACPI_PROCESSOR_AGGREGATOR_MODULE) | ||
484 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; | 495 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PAD_SUPPORT; |
485 | #endif | 496 | #endif |
497 | |||
498 | #if defined(CONFIG_ACPI_PROCESSOR) || defined(CONFIG_ACPI_PROCESSOR_MODULE) | ||
499 | capbuf[OSC_SUPPORT_TYPE] |= OSC_SB_PPC_OST_SUPPORT; | ||
500 | #endif | ||
486 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) | 501 | if (ACPI_FAILURE(acpi_get_handle(NULL, "\\_SB", &handle))) |
487 | return; | 502 | return; |
488 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) | 503 | if (ACPI_SUCCESS(acpi_run_osc(handle, &context))) |
@@ -888,6 +903,8 @@ static int __init acpi_bus_init(void) | |||
888 | goto error1; | 903 | goto error1; |
889 | } | 904 | } |
890 | 905 | ||
906 | acpi_early_processor_set_pdc(); | ||
907 | |||
891 | /* | 908 | /* |
892 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it | 909 | * Maybe EC region is required at bus_scan/acpi_get_devices. So it |
893 | * is necessary to enable it as early as possible. | 910 | * is necessary to enable it as early as possible. |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 75b147f5c8fd..d6471bb6852f 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -201,14 +201,13 @@ unlock: | |||
201 | spin_unlock_irqrestore(&ec->curr_lock, flags); | 201 | spin_unlock_irqrestore(&ec->curr_lock, flags); |
202 | } | 202 | } |
203 | 203 | ||
204 | static void acpi_ec_gpe_query(void *ec_cxt); | 204 | static int acpi_ec_sync_query(struct acpi_ec *ec); |
205 | 205 | ||
206 | static int ec_check_sci(struct acpi_ec *ec, u8 state) | 206 | static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) |
207 | { | 207 | { |
208 | if (state & ACPI_EC_FLAG_SCI) { | 208 | if (state & ACPI_EC_FLAG_SCI) { |
209 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) | 209 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) |
210 | return acpi_os_execute(OSL_EC_BURST_HANDLER, | 210 | return acpi_ec_sync_query(ec); |
211 | acpi_ec_gpe_query, ec); | ||
212 | } | 211 | } |
213 | return 0; | 212 | return 0; |
214 | } | 213 | } |
@@ -249,11 +248,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | |||
249 | { | 248 | { |
250 | unsigned long tmp; | 249 | unsigned long tmp; |
251 | int ret = 0; | 250 | int ret = 0; |
252 | pr_debug(PREFIX "transaction start\n"); | ||
253 | /* disable GPE during transaction if storm is detected */ | ||
254 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
255 | acpi_disable_gpe(NULL, ec->gpe); | ||
256 | } | ||
257 | if (EC_FLAGS_MSI) | 251 | if (EC_FLAGS_MSI) |
258 | udelay(ACPI_EC_MSI_UDELAY); | 252 | udelay(ACPI_EC_MSI_UDELAY); |
259 | /* start transaction */ | 253 | /* start transaction */ |
@@ -265,20 +259,9 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec, | |||
265 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); | 259 | clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); |
266 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | 260 | spin_unlock_irqrestore(&ec->curr_lock, tmp); |
267 | ret = ec_poll(ec); | 261 | ret = ec_poll(ec); |
268 | pr_debug(PREFIX "transaction end\n"); | ||
269 | spin_lock_irqsave(&ec->curr_lock, tmp); | 262 | spin_lock_irqsave(&ec->curr_lock, tmp); |
270 | ec->curr = NULL; | 263 | ec->curr = NULL; |
271 | spin_unlock_irqrestore(&ec->curr_lock, tmp); | 264 | spin_unlock_irqrestore(&ec->curr_lock, tmp); |
272 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
273 | /* check if we received SCI during transaction */ | ||
274 | ec_check_sci(ec, acpi_ec_read_status(ec)); | ||
275 | /* it is safe to enable GPE outside of transaction */ | ||
276 | acpi_enable_gpe(NULL, ec->gpe); | ||
277 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | ||
278 | pr_info(PREFIX "GPE storm detected, " | ||
279 | "transactions will use polling mode\n"); | ||
280 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | ||
281 | } | ||
282 | return ret; | 265 | return ret; |
283 | } | 266 | } |
284 | 267 | ||
@@ -321,7 +304,26 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) | |||
321 | status = -ETIME; | 304 | status = -ETIME; |
322 | goto end; | 305 | goto end; |
323 | } | 306 | } |
307 | pr_debug(PREFIX "transaction start\n"); | ||
308 | /* disable GPE during transaction if storm is detected */ | ||
309 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
310 | acpi_disable_gpe(NULL, ec->gpe); | ||
311 | } | ||
312 | |||
324 | status = acpi_ec_transaction_unlocked(ec, t); | 313 | status = acpi_ec_transaction_unlocked(ec, t); |
314 | |||
315 | /* check if we received SCI during transaction */ | ||
316 | ec_check_sci_sync(ec, acpi_ec_read_status(ec)); | ||
317 | if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { | ||
318 | msleep(1); | ||
319 | /* it is safe to enable GPE outside of transaction */ | ||
320 | acpi_enable_gpe(NULL, ec->gpe); | ||
321 | } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { | ||
322 | pr_info(PREFIX "GPE storm detected, " | ||
323 | "transactions will use polling mode\n"); | ||
324 | set_bit(EC_FLAGS_GPE_STORM, &ec->flags); | ||
325 | } | ||
326 | pr_debug(PREFIX "transaction end\n"); | ||
325 | end: | 327 | end: |
326 | if (ec->global_lock) | 328 | if (ec->global_lock) |
327 | acpi_release_global_lock(glk); | 329 | acpi_release_global_lock(glk); |
@@ -443,7 +445,7 @@ int ec_transaction(u8 command, | |||
443 | 445 | ||
444 | EXPORT_SYMBOL(ec_transaction); | 446 | EXPORT_SYMBOL(ec_transaction); |
445 | 447 | ||
446 | static int acpi_ec_query(struct acpi_ec *ec, u8 * data) | 448 | static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) |
447 | { | 449 | { |
448 | int result; | 450 | int result; |
449 | u8 d; | 451 | u8 d; |
@@ -452,20 +454,16 @@ static int acpi_ec_query(struct acpi_ec *ec, u8 * data) | |||
452 | .wlen = 0, .rlen = 1}; | 454 | .wlen = 0, .rlen = 1}; |
453 | if (!ec || !data) | 455 | if (!ec || !data) |
454 | return -EINVAL; | 456 | return -EINVAL; |
455 | |||
456 | /* | 457 | /* |
457 | * Query the EC to find out which _Qxx method we need to evaluate. | 458 | * Query the EC to find out which _Qxx method we need to evaluate. |
458 | * Note that successful completion of the query causes the ACPI_EC_SCI | 459 | * Note that successful completion of the query causes the ACPI_EC_SCI |
459 | * bit to be cleared (and thus clearing the interrupt source). | 460 | * bit to be cleared (and thus clearing the interrupt source). |
460 | */ | 461 | */ |
461 | 462 | result = acpi_ec_transaction_unlocked(ec, &t); | |
462 | result = acpi_ec_transaction(ec, &t); | ||
463 | if (result) | 463 | if (result) |
464 | return result; | 464 | return result; |
465 | |||
466 | if (!d) | 465 | if (!d) |
467 | return -ENODATA; | 466 | return -ENODATA; |
468 | |||
469 | *data = d; | 467 | *data = d; |
470 | return 0; | 468 | return 0; |
471 | } | 469 | } |
@@ -509,43 +507,79 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) | |||
509 | 507 | ||
510 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); | 508 | EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); |
511 | 509 | ||
512 | static void acpi_ec_gpe_query(void *ec_cxt) | 510 | static void acpi_ec_run(void *cxt) |
513 | { | 511 | { |
514 | struct acpi_ec *ec = ec_cxt; | 512 | struct acpi_ec_query_handler *handler = cxt; |
515 | u8 value = 0; | 513 | if (!handler) |
516 | struct acpi_ec_query_handler *handler, copy; | ||
517 | |||
518 | if (!ec || acpi_ec_query(ec, &value)) | ||
519 | return; | 514 | return; |
520 | mutex_lock(&ec->lock); | 515 | pr_debug(PREFIX "start query execution\n"); |
516 | if (handler->func) | ||
517 | handler->func(handler->data); | ||
518 | else if (handler->handle) | ||
519 | acpi_evaluate_object(handler->handle, NULL, NULL, NULL); | ||
520 | pr_debug(PREFIX "stop query execution\n"); | ||
521 | kfree(handler); | ||
522 | } | ||
523 | |||
524 | static int acpi_ec_sync_query(struct acpi_ec *ec) | ||
525 | { | ||
526 | u8 value = 0; | ||
527 | int status; | ||
528 | struct acpi_ec_query_handler *handler, *copy; | ||
529 | if ((status = acpi_ec_query_unlocked(ec, &value))) | ||
530 | return status; | ||
521 | list_for_each_entry(handler, &ec->list, node) { | 531 | list_for_each_entry(handler, &ec->list, node) { |
522 | if (value == handler->query_bit) { | 532 | if (value == handler->query_bit) { |
523 | /* have custom handler for this bit */ | 533 | /* have custom handler for this bit */ |
524 | memcpy(©, handler, sizeof(copy)); | 534 | copy = kmalloc(sizeof(*handler), GFP_KERNEL); |
525 | mutex_unlock(&ec->lock); | 535 | if (!copy) |
526 | if (copy.func) { | 536 | return -ENOMEM; |
527 | copy.func(copy.data); | 537 | memcpy(copy, handler, sizeof(*copy)); |
528 | } else if (copy.handle) { | 538 | pr_debug(PREFIX "push query execution (0x%2x) on queue\n", value); |
529 | acpi_evaluate_object(copy.handle, NULL, NULL, NULL); | 539 | return acpi_os_execute((copy->func) ? |
530 | } | 540 | OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, |
531 | return; | 541 | acpi_ec_run, copy); |
532 | } | 542 | } |
533 | } | 543 | } |
544 | return 0; | ||
545 | } | ||
546 | |||
547 | static void acpi_ec_gpe_query(void *ec_cxt) | ||
548 | { | ||
549 | struct acpi_ec *ec = ec_cxt; | ||
550 | if (!ec) | ||
551 | return; | ||
552 | mutex_lock(&ec->lock); | ||
553 | acpi_ec_sync_query(ec); | ||
534 | mutex_unlock(&ec->lock); | 554 | mutex_unlock(&ec->lock); |
535 | } | 555 | } |
536 | 556 | ||
557 | static void acpi_ec_gpe_query(void *ec_cxt); | ||
558 | |||
559 | static int ec_check_sci(struct acpi_ec *ec, u8 state) | ||
560 | { | ||
561 | if (state & ACPI_EC_FLAG_SCI) { | ||
562 | if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { | ||
563 | pr_debug(PREFIX "push gpe query to the queue\n"); | ||
564 | return acpi_os_execute(OSL_NOTIFY_HANDLER, | ||
565 | acpi_ec_gpe_query, ec); | ||
566 | } | ||
567 | } | ||
568 | return 0; | ||
569 | } | ||
570 | |||
537 | static u32 acpi_ec_gpe_handler(void *data) | 571 | static u32 acpi_ec_gpe_handler(void *data) |
538 | { | 572 | { |
539 | struct acpi_ec *ec = data; | 573 | struct acpi_ec *ec = data; |
540 | u8 status; | ||
541 | 574 | ||
542 | pr_debug(PREFIX "~~~> interrupt\n"); | 575 | pr_debug(PREFIX "~~~> interrupt\n"); |
543 | status = acpi_ec_read_status(ec); | ||
544 | 576 | ||
545 | advance_transaction(ec, status); | 577 | advance_transaction(ec, acpi_ec_read_status(ec)); |
546 | if (ec_transaction_done(ec) && (status & ACPI_EC_FLAG_IBF) == 0) | 578 | if (ec_transaction_done(ec) && |
579 | (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) { | ||
547 | wake_up(&ec->wait); | 580 | wake_up(&ec->wait); |
548 | ec_check_sci(ec, status); | 581 | ec_check_sci(ec, acpi_ec_read_status(ec)); |
582 | } | ||
549 | return ACPI_INTERRUPT_HANDLED; | 583 | return ACPI_INTERRUPT_HANDLED; |
550 | } | 584 | } |
551 | 585 | ||
@@ -916,6 +950,7 @@ static int ec_validate_ecdt(const struct dmi_system_id *id) | |||
916 | /* MSI EC needs special treatment, enable it */ | 950 | /* MSI EC needs special treatment, enable it */ |
917 | static int ec_flag_msi(const struct dmi_system_id *id) | 951 | static int ec_flag_msi(const struct dmi_system_id *id) |
918 | { | 952 | { |
953 | printk(KERN_DEBUG PREFIX "Detected MSI hardware, enabling workarounds.\n"); | ||
919 | EC_FLAGS_MSI = 1; | 954 | EC_FLAGS_MSI = 1; |
920 | EC_FLAGS_VALIDATE_ECDT = 1; | 955 | EC_FLAGS_VALIDATE_ECDT = 1; |
921 | return 0; | 956 | return 0; |
@@ -928,8 +963,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { | |||
928 | DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, | 963 | DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL}, |
929 | { | 964 | { |
930 | ec_flag_msi, "MSI hardware", { | 965 | ec_flag_msi, "MSI hardware", { |
931 | DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"), | 966 | DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star")}, NULL}, |
932 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL}, | 967 | { |
968 | ec_flag_msi, "MSI hardware", { | ||
969 | DMI_MATCH(DMI_SYS_VENDOR, "Micro-Star")}, NULL}, | ||
970 | { | ||
971 | ec_flag_msi, "MSI hardware", { | ||
972 | DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star")}, NULL}, | ||
933 | { | 973 | { |
934 | ec_validate_ecdt, "ASUS hardware", { | 974 | ec_validate_ecdt, "ASUS hardware", { |
935 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, | 975 | DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL}, |
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 074cf8682d52..cb28e0502acc 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h | |||
@@ -43,6 +43,7 @@ int acpi_power_transition(struct acpi_device *device, int state); | |||
43 | extern int acpi_power_nocheck; | 43 | extern int acpi_power_nocheck; |
44 | 44 | ||
45 | int acpi_wakeup_device_init(void); | 45 | int acpi_wakeup_device_init(void); |
46 | void acpi_early_processor_set_pdc(void); | ||
46 | 47 | ||
47 | /* -------------------------------------------------------------------------- | 48 | /* -------------------------------------------------------------------------- |
48 | Embedded Controller | 49 | Embedded Controller |
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index 394ae89409c2..04b0f007c9b7 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c | |||
@@ -56,7 +56,7 @@ ACPI_MODULE_NAME("pci_link"); | |||
56 | static int acpi_pci_link_add(struct acpi_device *device); | 56 | static int acpi_pci_link_add(struct acpi_device *device); |
57 | static int acpi_pci_link_remove(struct acpi_device *device, int type); | 57 | static int acpi_pci_link_remove(struct acpi_device *device, int type); |
58 | 58 | ||
59 | static struct acpi_device_id link_device_ids[] = { | 59 | static const struct acpi_device_id link_device_ids[] = { |
60 | {"PNP0C0F", 0}, | 60 | {"PNP0C0F", 0}, |
61 | {"", 0}, | 61 | {"", 0}, |
62 | }; | 62 | }; |
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index 101cce3681d1..64f55b6db73c 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c | |||
@@ -46,7 +46,7 @@ static int acpi_pci_root_add(struct acpi_device *device); | |||
46 | static int acpi_pci_root_remove(struct acpi_device *device, int type); | 46 | static int acpi_pci_root_remove(struct acpi_device *device, int type); |
47 | static int acpi_pci_root_start(struct acpi_device *device); | 47 | static int acpi_pci_root_start(struct acpi_device *device); |
48 | 48 | ||
49 | static struct acpi_device_id root_device_ids[] = { | 49 | static const struct acpi_device_id root_device_ids[] = { |
50 | {"PNP0A03", 0}, | 50 | {"PNP0A03", 0}, |
51 | {"", 0}, | 51 | {"", 0}, |
52 | }; | 52 | }; |
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 22b297916519..0f30c3c1eea4 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -65,7 +65,7 @@ static int acpi_power_remove(struct acpi_device *device, int type); | |||
65 | static int acpi_power_resume(struct acpi_device *device); | 65 | static int acpi_power_resume(struct acpi_device *device); |
66 | static int acpi_power_open_fs(struct inode *inode, struct file *file); | 66 | static int acpi_power_open_fs(struct inode *inode, struct file *file); |
67 | 67 | ||
68 | static struct acpi_device_id power_device_ids[] = { | 68 | static const struct acpi_device_id power_device_ids[] = { |
69 | {ACPI_POWER_HID, 0}, | 69 | {ACPI_POWER_HID, 0}, |
70 | {"", 0}, | 70 | {"", 0}, |
71 | }; | 71 | }; |
diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c index 2ef7030a0c28..dc4ffadf8122 100644 --- a/drivers/acpi/power_meter.c +++ b/drivers/acpi/power_meter.c | |||
@@ -64,7 +64,7 @@ static int can_cap_in_hardware(void) | |||
64 | return force_cap_on || cap_in_hardware; | 64 | return force_cap_on || cap_in_hardware; |
65 | } | 65 | } |
66 | 66 | ||
67 | static struct acpi_device_id power_meter_ids[] = { | 67 | static const struct acpi_device_id power_meter_ids[] = { |
68 | {"ACPI000D", 0}, | 68 | {"ACPI000D", 0}, |
69 | {"", 0}, | 69 | {"", 0}, |
70 | }; | 70 | }; |
@@ -534,6 +534,7 @@ static void remove_domain_devices(struct acpi_power_meter_resource *resource) | |||
534 | 534 | ||
535 | kfree(resource->domain_devices); | 535 | kfree(resource->domain_devices); |
536 | kobject_put(resource->holders_dir); | 536 | kobject_put(resource->holders_dir); |
537 | resource->num_domain_devices = 0; | ||
537 | } | 538 | } |
538 | 539 | ||
539 | static int read_domain_devices(struct acpi_power_meter_resource *resource) | 540 | static int read_domain_devices(struct acpi_power_meter_resource *resource) |
@@ -740,7 +741,6 @@ skip_unsafe_cap: | |||
740 | 741 | ||
741 | return res; | 742 | return res; |
742 | error: | 743 | error: |
743 | remove_domain_devices(resource); | ||
744 | remove_attrs(resource); | 744 | remove_attrs(resource); |
745 | return res; | 745 | return res; |
746 | } | 746 | } |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 41731236f9a1..9863c98c81ba 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -124,29 +124,6 @@ static const struct file_operations acpi_processor_info_fops = { | |||
124 | 124 | ||
125 | DEFINE_PER_CPU(struct acpi_processor *, processors); | 125 | DEFINE_PER_CPU(struct acpi_processor *, processors); |
126 | struct acpi_processor_errata errata __read_mostly; | 126 | struct acpi_processor_errata errata __read_mostly; |
127 | static int set_no_mwait(const struct dmi_system_id *id) | ||
128 | { | ||
129 | printk(KERN_NOTICE PREFIX "%s detected - " | ||
130 | "disabling mwait for CPU C-states\n", id->ident); | ||
131 | idle_nomwait = 1; | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { | ||
136 | { | ||
137 | set_no_mwait, "IFL91 board", { | ||
138 | DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), | ||
139 | DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), | ||
140 | DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), | ||
141 | DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, | ||
142 | { | ||
143 | set_no_mwait, "Extensa 5220", { | ||
144 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | ||
145 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
146 | DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), | ||
147 | DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, | ||
148 | {}, | ||
149 | }; | ||
150 | 127 | ||
151 | /* -------------------------------------------------------------------------- | 128 | /* -------------------------------------------------------------------------- |
152 | Errata Handling | 129 | Errata Handling |
@@ -277,45 +254,6 @@ static int acpi_processor_errata(struct acpi_processor *pr) | |||
277 | } | 254 | } |
278 | 255 | ||
279 | /* -------------------------------------------------------------------------- | 256 | /* -------------------------------------------------------------------------- |
280 | Common ACPI processor functions | ||
281 | -------------------------------------------------------------------------- */ | ||
282 | |||
283 | /* | ||
284 | * _PDC is required for a BIOS-OS handshake for most of the newer | ||
285 | * ACPI processor features. | ||
286 | */ | ||
287 | static int acpi_processor_set_pdc(struct acpi_processor *pr) | ||
288 | { | ||
289 | struct acpi_object_list *pdc_in = pr->pdc; | ||
290 | acpi_status status = AE_OK; | ||
291 | |||
292 | |||
293 | if (!pdc_in) | ||
294 | return status; | ||
295 | if (idle_nomwait) { | ||
296 | /* | ||
297 | * If mwait is disabled for CPU C-states, the C2C3_FFH access | ||
298 | * mode will be disabled in the parameter of _PDC object. | ||
299 | * Of course C1_FFH access mode will also be disabled. | ||
300 | */ | ||
301 | union acpi_object *obj; | ||
302 | u32 *buffer = NULL; | ||
303 | |||
304 | obj = pdc_in->pointer; | ||
305 | buffer = (u32 *)(obj->buffer.pointer); | ||
306 | buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); | ||
307 | |||
308 | } | ||
309 | status = acpi_evaluate_object(pr->handle, "_PDC", pdc_in, NULL); | ||
310 | |||
311 | if (ACPI_FAILURE(status)) | ||
312 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
313 | "Could not evaluate _PDC, using legacy perf. control...\n")); | ||
314 | |||
315 | return status; | ||
316 | } | ||
317 | |||
318 | /* -------------------------------------------------------------------------- | ||
319 | FS Interface (/proc) | 257 | FS Interface (/proc) |
320 | -------------------------------------------------------------------------- */ | 258 | -------------------------------------------------------------------------- */ |
321 | 259 | ||
@@ -825,9 +763,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) | |||
825 | } | 763 | } |
826 | 764 | ||
827 | /* _PDC call should be done before doing anything else (if reqd.). */ | 765 | /* _PDC call should be done before doing anything else (if reqd.). */ |
828 | arch_acpi_processor_init_pdc(pr); | 766 | acpi_processor_set_pdc(pr->handle); |
829 | acpi_processor_set_pdc(pr); | ||
830 | arch_acpi_processor_cleanup_pdc(pr); | ||
831 | 767 | ||
832 | #ifdef CONFIG_CPU_FREQ | 768 | #ifdef CONFIG_CPU_FREQ |
833 | acpi_processor_ppc_has_changed(pr, 0); | 769 | acpi_processor_ppc_has_changed(pr, 0); |
@@ -1145,11 +1081,6 @@ static int __init acpi_processor_init(void) | |||
1145 | if (!acpi_processor_dir) | 1081 | if (!acpi_processor_dir) |
1146 | return -ENOMEM; | 1082 | return -ENOMEM; |
1147 | #endif | 1083 | #endif |
1148 | /* | ||
1149 | * Check whether the system is DMI table. If yes, OSPM | ||
1150 | * should not use mwait for CPU-states. | ||
1151 | */ | ||
1152 | dmi_check_system(processor_idle_dmi_table); | ||
1153 | result = cpuidle_register_driver(&acpi_idle_driver); | 1084 | result = cpuidle_register_driver(&acpi_idle_driver); |
1154 | if (result < 0) | 1085 | if (result < 0) |
1155 | goto out_proc; | 1086 | goto out_proc; |
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index d1676b1754d9..7c0441f63b39 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -305,6 +305,28 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) | |||
305 | pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; | 305 | pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; |
306 | pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; | 306 | pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; |
307 | 307 | ||
308 | /* | ||
309 | * FADT specified C2 latency must be less than or equal to | ||
310 | * 100 microseconds. | ||
311 | */ | ||
312 | if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { | ||
313 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
314 | "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); | ||
315 | /* invalidate C2 */ | ||
316 | pr->power.states[ACPI_STATE_C2].address = 0; | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * FADT supplied C3 latency must be less than or equal to | ||
321 | * 1000 microseconds. | ||
322 | */ | ||
323 | if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { | ||
324 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
325 | "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency)); | ||
326 | /* invalidate C3 */ | ||
327 | pr->power.states[ACPI_STATE_C3].address = 0; | ||
328 | } | ||
329 | |||
308 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 330 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
309 | "lvl2[0x%08x] lvl3[0x%08x]\n", | 331 | "lvl2[0x%08x] lvl3[0x%08x]\n", |
310 | pr->power.states[ACPI_STATE_C2].address, | 332 | pr->power.states[ACPI_STATE_C2].address, |
@@ -494,33 +516,6 @@ static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) | |||
494 | return status; | 516 | return status; |
495 | } | 517 | } |
496 | 518 | ||
497 | static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) | ||
498 | { | ||
499 | |||
500 | if (!cx->address) | ||
501 | return; | ||
502 | |||
503 | /* | ||
504 | * C2 latency must be less than or equal to 100 | ||
505 | * microseconds. | ||
506 | */ | ||
507 | else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { | ||
508 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
509 | "latency too large [%d]\n", cx->latency)); | ||
510 | return; | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * Otherwise we've met all of our C2 requirements. | ||
515 | * Normalize the C2 latency to expidite policy | ||
516 | */ | ||
517 | cx->valid = 1; | ||
518 | |||
519 | cx->latency_ticks = cx->latency; | ||
520 | |||
521 | return; | ||
522 | } | ||
523 | |||
524 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | 519 | static void acpi_processor_power_verify_c3(struct acpi_processor *pr, |
525 | struct acpi_processor_cx *cx) | 520 | struct acpi_processor_cx *cx) |
526 | { | 521 | { |
@@ -532,16 +527,6 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr, | |||
532 | return; | 527 | return; |
533 | 528 | ||
534 | /* | 529 | /* |
535 | * C3 latency must be less than or equal to 1000 | ||
536 | * microseconds. | ||
537 | */ | ||
538 | else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { | ||
539 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
540 | "latency too large [%d]\n", cx->latency)); | ||
541 | return; | ||
542 | } | ||
543 | |||
544 | /* | ||
545 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) | 530 | * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) |
546 | * DMA transfers are used by any ISA device to avoid livelock. | 531 | * DMA transfers are used by any ISA device to avoid livelock. |
547 | * Note that we could disable Type-F DMA (as recommended by | 532 | * Note that we could disable Type-F DMA (as recommended by |
@@ -629,7 +614,10 @@ static int acpi_processor_power_verify(struct acpi_processor *pr) | |||
629 | break; | 614 | break; |
630 | 615 | ||
631 | case ACPI_STATE_C2: | 616 | case ACPI_STATE_C2: |
632 | acpi_processor_power_verify_c2(cx); | 617 | if (!cx->address) |
618 | break; | ||
619 | cx->valid = 1; | ||
620 | cx->latency_ticks = cx->latency; /* Normalize latency */ | ||
633 | break; | 621 | break; |
634 | 622 | ||
635 | case ACPI_STATE_C3: | 623 | case ACPI_STATE_C3: |
diff --git a/drivers/acpi/processor_pdc.c b/drivers/acpi/processor_pdc.c new file mode 100644 index 000000000000..7247819dbd80 --- /dev/null +++ b/drivers/acpi/processor_pdc.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Intel Corporation | ||
3 | * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. | ||
4 | * | ||
5 | * Alex Chiang <achiang@hp.com> | ||
6 | * - Unified x86/ia64 implementations | ||
7 | * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> | ||
8 | * - Added _PDC for platforms with Intel CPUs | ||
9 | */ | ||
10 | #include <linux/dmi.h> | ||
11 | |||
12 | #include <acpi/acpi_drivers.h> | ||
13 | #include <acpi/processor.h> | ||
14 | |||
15 | #include "internal.h" | ||
16 | |||
17 | #define PREFIX "ACPI: " | ||
18 | #define _COMPONENT ACPI_PROCESSOR_COMPONENT | ||
19 | ACPI_MODULE_NAME("processor_pdc"); | ||
20 | |||
21 | static int set_no_mwait(const struct dmi_system_id *id) | ||
22 | { | ||
23 | printk(KERN_NOTICE PREFIX "%s detected - " | ||
24 | "disabling mwait for CPU C-states\n", id->ident); | ||
25 | idle_nomwait = 1; | ||
26 | return 0; | ||
27 | } | ||
28 | |||
29 | static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = { | ||
30 | { | ||
31 | set_no_mwait, "IFL91 board", { | ||
32 | DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"), | ||
33 | DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"), | ||
34 | DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"), | ||
35 | DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL}, | ||
36 | { | ||
37 | set_no_mwait, "Extensa 5220", { | ||
38 | DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"), | ||
39 | DMI_MATCH(DMI_SYS_VENDOR, "Acer"), | ||
40 | DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), | ||
41 | DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL}, | ||
42 | {}, | ||
43 | }; | ||
44 | |||
45 | static void acpi_set_pdc_bits(u32 *buf) | ||
46 | { | ||
47 | buf[0] = ACPI_PDC_REVISION_ID; | ||
48 | buf[1] = 1; | ||
49 | |||
50 | /* Enable coordination with firmware's _TSD info */ | ||
51 | buf[2] = ACPI_PDC_SMP_T_SWCOORD; | ||
52 | |||
53 | /* Twiddle arch-specific bits needed for _PDC */ | ||
54 | arch_acpi_set_pdc_bits(buf); | ||
55 | } | ||
56 | |||
57 | static struct acpi_object_list *acpi_processor_alloc_pdc(void) | ||
58 | { | ||
59 | struct acpi_object_list *obj_list; | ||
60 | union acpi_object *obj; | ||
61 | u32 *buf; | ||
62 | |||
63 | /* allocate and initialize pdc. It will be used later. */ | ||
64 | obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL); | ||
65 | if (!obj_list) { | ||
66 | printk(KERN_ERR "Memory allocation error\n"); | ||
67 | return NULL; | ||
68 | } | ||
69 | |||
70 | obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL); | ||
71 | if (!obj) { | ||
72 | printk(KERN_ERR "Memory allocation error\n"); | ||
73 | kfree(obj_list); | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | buf = kmalloc(12, GFP_KERNEL); | ||
78 | if (!buf) { | ||
79 | printk(KERN_ERR "Memory allocation error\n"); | ||
80 | kfree(obj); | ||
81 | kfree(obj_list); | ||
82 | return NULL; | ||
83 | } | ||
84 | |||
85 | acpi_set_pdc_bits(buf); | ||
86 | |||
87 | obj->type = ACPI_TYPE_BUFFER; | ||
88 | obj->buffer.length = 12; | ||
89 | obj->buffer.pointer = (u8 *) buf; | ||
90 | obj_list->count = 1; | ||
91 | obj_list->pointer = obj; | ||
92 | |||
93 | return obj_list; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * _PDC is required for a BIOS-OS handshake for most of the newer | ||
98 | * ACPI processor features. | ||
99 | */ | ||
100 | static int | ||
101 | acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in) | ||
102 | { | ||
103 | acpi_status status = AE_OK; | ||
104 | |||
105 | if (idle_nomwait) { | ||
106 | /* | ||
107 | * If mwait is disabled for CPU C-states, the C2C3_FFH access | ||
108 | * mode will be disabled in the parameter of _PDC object. | ||
109 | * Of course C1_FFH access mode will also be disabled. | ||
110 | */ | ||
111 | union acpi_object *obj; | ||
112 | u32 *buffer = NULL; | ||
113 | |||
114 | obj = pdc_in->pointer; | ||
115 | buffer = (u32 *)(obj->buffer.pointer); | ||
116 | buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH); | ||
117 | |||
118 | } | ||
119 | status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL); | ||
120 | |||
121 | if (ACPI_FAILURE(status)) | ||
122 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | ||
123 | "Could not evaluate _PDC, using legacy perf. control.\n")); | ||
124 | |||
125 | return status; | ||
126 | } | ||
127 | |||
128 | void acpi_processor_set_pdc(acpi_handle handle) | ||
129 | { | ||
130 | struct acpi_object_list *obj_list; | ||
131 | |||
132 | if (arch_has_acpi_pdc() == false) | ||
133 | return; | ||
134 | |||
135 | obj_list = acpi_processor_alloc_pdc(); | ||
136 | if (!obj_list) | ||
137 | return; | ||
138 | |||
139 | acpi_processor_eval_pdc(handle, obj_list); | ||
140 | |||
141 | kfree(obj_list->pointer->buffer.pointer); | ||
142 | kfree(obj_list->pointer); | ||
143 | kfree(obj_list); | ||
144 | } | ||
145 | EXPORT_SYMBOL_GPL(acpi_processor_set_pdc); | ||
146 | |||
147 | static int early_pdc_optin; | ||
148 | static int set_early_pdc_optin(const struct dmi_system_id *id) | ||
149 | { | ||
150 | early_pdc_optin = 1; | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static struct dmi_system_id __cpuinitdata early_pdc_optin_table[] = { | ||
155 | { | ||
156 | set_early_pdc_optin, "HP Envy", { | ||
157 | DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), | ||
158 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Envy") }, NULL}, | ||
159 | { | ||
160 | set_early_pdc_optin, "HP Pavilion dv6", { | ||
161 | DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), | ||
162 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6") }, NULL}, | ||
163 | { | ||
164 | set_early_pdc_optin, "HP Pavilion dv7", { | ||
165 | DMI_MATCH(DMI_BIOS_VENDOR, "Hewlett-Packard"), | ||
166 | DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7") }, NULL}, | ||
167 | {}, | ||
168 | }; | ||
169 | |||
170 | static acpi_status | ||
171 | early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv) | ||
172 | { | ||
173 | acpi_processor_set_pdc(handle); | ||
174 | return AE_OK; | ||
175 | } | ||
176 | |||
177 | void __init acpi_early_processor_set_pdc(void) | ||
178 | { | ||
179 | /* | ||
180 | * Check whether the system is DMI table. If yes, OSPM | ||
181 | * should not use mwait for CPU-states. | ||
182 | */ | ||
183 | dmi_check_system(processor_idle_dmi_table); | ||
184 | |||
185 | /* | ||
186 | * Allow systems to opt-in to early _PDC evaluation. | ||
187 | */ | ||
188 | dmi_check_system(early_pdc_optin_table); | ||
189 | if (!early_pdc_optin) | ||
190 | return; | ||
191 | |||
192 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | ||
193 | ACPI_UINT32_MAX, | ||
194 | early_init_pdc, NULL, NULL, NULL); | ||
195 | } | ||
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 140c5c5b423c..6deafb4aa0da 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -443,8 +443,7 @@ struct thermal_cooling_device_ops processor_cooling_ops = { | |||
443 | #ifdef CONFIG_ACPI_PROCFS | 443 | #ifdef CONFIG_ACPI_PROCFS |
444 | static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) | 444 | static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset) |
445 | { | 445 | { |
446 | struct acpi_processor *pr = (struct acpi_processor *)seq->private; | 446 | struct acpi_processor *pr = seq->private; |
447 | |||
448 | 447 | ||
449 | if (!pr) | 448 | if (!pr) |
450 | goto end; | 449 | goto end; |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 52b9db8afc20..b16ddbf23a9c 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -822,7 +822,10 @@ static int acpi_battery_add(struct acpi_sbs *sbs, int id) | |||
822 | 822 | ||
823 | static void acpi_battery_remove(struct acpi_sbs *sbs, int id) | 823 | static void acpi_battery_remove(struct acpi_sbs *sbs, int id) |
824 | { | 824 | { |
825 | #if defined(CONFIG_ACPI_SYSFS_POWER) || defined(CONFIG_ACPI_PROCFS_POWER) | ||
825 | struct acpi_battery *battery = &sbs->battery[id]; | 826 | struct acpi_battery *battery = &sbs->battery[id]; |
827 | #endif | ||
828 | |||
826 | #ifdef CONFIG_ACPI_SYSFS_POWER | 829 | #ifdef CONFIG_ACPI_SYSFS_POWER |
827 | if (battery->bat.dev) { | 830 | if (battery->bat.dev) { |
828 | if (battery->have_sysfs_alarm) | 831 | if (battery->have_sysfs_alarm) |
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c index d9339806df45..fd09229282ea 100644 --- a/drivers/acpi/sbshc.c +++ b/drivers/acpi/sbshc.c | |||
@@ -242,7 +242,7 @@ static int smbus_alarm(void *context) | |||
242 | case ACPI_SBS_CHARGER: | 242 | case ACPI_SBS_CHARGER: |
243 | case ACPI_SBS_MANAGER: | 243 | case ACPI_SBS_MANAGER: |
244 | case ACPI_SBS_BATTERY: | 244 | case ACPI_SBS_BATTERY: |
245 | acpi_os_execute(OSL_GPE_HANDLER, | 245 | acpi_os_execute(OSL_NOTIFY_HANDLER, |
246 | acpi_smbus_callback, hc); | 246 | acpi_smbus_callback, hc); |
247 | default:; | 247 | default:; |
248 | } | 248 | } |
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 5f2c379ab7bf..79d33d908b5a 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c | |||
@@ -81,6 +81,23 @@ static int acpi_sleep_prepare(u32 acpi_state) | |||
81 | #ifdef CONFIG_ACPI_SLEEP | 81 | #ifdef CONFIG_ACPI_SLEEP |
82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; | 82 | static u32 acpi_target_sleep_state = ACPI_STATE_S0; |
83 | /* | 83 | /* |
84 | * According to the ACPI specification the BIOS should make sure that ACPI is | ||
85 | * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, | ||
86 | * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI | ||
87 | * on such systems during resume. Unfortunately that doesn't help in | ||
88 | * particularly pathological cases in which SCI_EN has to be set directly on | ||
89 | * resume, although the specification states very clearly that this flag is | ||
90 | * owned by the hardware. The set_sci_en_on_resume variable will be set in such | ||
91 | * cases. | ||
92 | */ | ||
93 | static bool set_sci_en_on_resume; | ||
94 | |||
95 | void __init acpi_set_sci_en_on_resume(void) | ||
96 | { | ||
97 | set_sci_en_on_resume = true; | ||
98 | } | ||
99 | |||
100 | /* | ||
84 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the | 101 | * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the |
85 | * user to request that behavior by using the 'acpi_old_suspend_ordering' | 102 | * user to request that behavior by using the 'acpi_old_suspend_ordering' |
86 | * kernel command line option that causes the following variable to be set. | 103 | * kernel command line option that causes the following variable to be set. |
@@ -170,18 +187,6 @@ static void acpi_pm_end(void) | |||
170 | #endif /* CONFIG_ACPI_SLEEP */ | 187 | #endif /* CONFIG_ACPI_SLEEP */ |
171 | 188 | ||
172 | #ifdef CONFIG_SUSPEND | 189 | #ifdef CONFIG_SUSPEND |
173 | /* | ||
174 | * According to the ACPI specification the BIOS should make sure that ACPI is | ||
175 | * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still, | ||
176 | * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI | ||
177 | * on such systems during resume. Unfortunately that doesn't help in | ||
178 | * particularly pathological cases in which SCI_EN has to be set directly on | ||
179 | * resume, although the specification states very clearly that this flag is | ||
180 | * owned by the hardware. The set_sci_en_on_resume variable will be set in such | ||
181 | * cases. | ||
182 | */ | ||
183 | static bool set_sci_en_on_resume; | ||
184 | |||
185 | extern void do_suspend_lowlevel(void); | 190 | extern void do_suspend_lowlevel(void); |
186 | 191 | ||
187 | static u32 acpi_suspend_states[] = { | 192 | static u32 acpi_suspend_states[] = { |
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 05dff631591c..b765790b32be 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -78,6 +78,13 @@ MODULE_LICENSE("GPL"); | |||
78 | static int brightness_switch_enabled = 1; | 78 | static int brightness_switch_enabled = 1; |
79 | module_param(brightness_switch_enabled, bool, 0644); | 79 | module_param(brightness_switch_enabled, bool, 0644); |
80 | 80 | ||
81 | /* | ||
82 | * By default, we don't allow duplicate ACPI video bus devices | ||
83 | * under the same VGA controller | ||
84 | */ | ||
85 | static int allow_duplicates; | ||
86 | module_param(allow_duplicates, bool, 0644); | ||
87 | |||
81 | static int register_count = 0; | 88 | static int register_count = 0; |
82 | static int acpi_video_bus_add(struct acpi_device *device); | 89 | static int acpi_video_bus_add(struct acpi_device *device); |
83 | static int acpi_video_bus_remove(struct acpi_device *device, int type); | 90 | static int acpi_video_bus_remove(struct acpi_device *device, int type); |
@@ -999,8 +1006,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device) | |||
999 | sprintf(name, "acpi_video%d", count++); | 1006 | sprintf(name, "acpi_video%d", count++); |
1000 | device->backlight = backlight_device_register(name, | 1007 | device->backlight = backlight_device_register(name, |
1001 | NULL, device, &acpi_backlight_ops); | 1008 | NULL, device, &acpi_backlight_ops); |
1002 | device->backlight->props.max_brightness = device->brightness->count-3; | ||
1003 | kfree(name); | 1009 | kfree(name); |
1010 | if (IS_ERR(device->backlight)) | ||
1011 | return; | ||
1012 | device->backlight->props.max_brightness = device->brightness->count-3; | ||
1004 | 1013 | ||
1005 | result = sysfs_create_link(&device->backlight->dev.kobj, | 1014 | result = sysfs_create_link(&device->backlight->dev.kobj, |
1006 | &device->dev->dev.kobj, "device"); | 1015 | &device->dev->dev.kobj, "device"); |
@@ -1979,6 +1988,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event) | |||
1979 | unsigned long long level_current, level_next; | 1988 | unsigned long long level_current, level_next; |
1980 | int result = -EINVAL; | 1989 | int result = -EINVAL; |
1981 | 1990 | ||
1991 | /* no warning message if acpi_backlight=vendor is used */ | ||
1992 | if (!acpi_video_backlight_support()) | ||
1993 | return 0; | ||
1994 | |||
1982 | if (!device->brightness) | 1995 | if (!device->brightness) |
1983 | goto out; | 1996 | goto out; |
1984 | 1997 | ||
@@ -2233,11 +2246,47 @@ static int acpi_video_resume(struct acpi_device *device) | |||
2233 | return AE_OK; | 2246 | return AE_OK; |
2234 | } | 2247 | } |
2235 | 2248 | ||
2249 | static acpi_status | ||
2250 | acpi_video_bus_match(acpi_handle handle, u32 level, void *context, | ||
2251 | void **return_value) | ||
2252 | { | ||
2253 | struct acpi_device *device = context; | ||
2254 | struct acpi_device *sibling; | ||
2255 | int result; | ||
2256 | |||
2257 | if (handle == device->handle) | ||
2258 | return AE_CTRL_TERMINATE; | ||
2259 | |||
2260 | result = acpi_bus_get_device(handle, &sibling); | ||
2261 | if (result) | ||
2262 | return AE_OK; | ||
2263 | |||
2264 | if (!strcmp(acpi_device_name(sibling), ACPI_VIDEO_BUS_NAME)) | ||
2265 | return AE_ALREADY_EXISTS; | ||
2266 | |||
2267 | return AE_OK; | ||
2268 | } | ||
2269 | |||
2236 | static int acpi_video_bus_add(struct acpi_device *device) | 2270 | static int acpi_video_bus_add(struct acpi_device *device) |
2237 | { | 2271 | { |
2238 | struct acpi_video_bus *video; | 2272 | struct acpi_video_bus *video; |
2239 | struct input_dev *input; | 2273 | struct input_dev *input; |
2240 | int error; | 2274 | int error; |
2275 | acpi_status status; | ||
2276 | |||
2277 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, | ||
2278 | device->parent->handle, 1, | ||
2279 | acpi_video_bus_match, NULL, | ||
2280 | device, NULL); | ||
2281 | if (status == AE_ALREADY_EXISTS) { | ||
2282 | printk(KERN_WARNING FW_BUG | ||
2283 | "Duplicate ACPI video bus devices for the" | ||
2284 | " same VGA controller, please try module " | ||
2285 | "parameter \"video.allow_duplicates=1\"" | ||
2286 | "if the current driver doesn't work.\n"); | ||
2287 | if (!allow_duplicates) | ||
2288 | return -ENODEV; | ||
2289 | } | ||
2241 | 2290 | ||
2242 | video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL); | 2291 | video = kzalloc(sizeof(struct acpi_video_bus), GFP_KERNEL); |
2243 | if (!video) | 2292 | if (!video) |