aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/dispatcher/dsobject.c91
-rw-r--r--drivers/acpi/processor_idle.c19
-rw-r--r--drivers/acpi/processor_throttling.c36
3 files changed, 127 insertions, 19 deletions
diff --git a/drivers/acpi/dispatcher/dsobject.c b/drivers/acpi/dispatcher/dsobject.c
index a474ca2334d5..954ac8ce958a 100644
--- a/drivers/acpi/dispatcher/dsobject.c
+++ b/drivers/acpi/dispatcher/dsobject.c
@@ -137,6 +137,71 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
137 return_ACPI_STATUS(status); 137 return_ACPI_STATUS(status);
138 } 138 }
139 } 139 }
140
141 /* Special object resolution for elements of a package */
142
143 if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) ||
144 (op->common.parent->common.aml_opcode ==
145 AML_VAR_PACKAGE_OP)) {
146 /*
147 * Attempt to resolve the node to a value before we insert it into
148 * the package. If this is a reference to a common data type,
149 * resolve it immediately. According to the ACPI spec, package
150 * elements can only be "data objects" or method references.
151 * Attempt to resolve to an Integer, Buffer, String or Package.
152 * If cannot, return the named reference (for things like Devices,
153 * Methods, etc.) Buffer Fields and Fields will resolve to simple
154 * objects (int/buf/str/pkg).
155 *
156 * NOTE: References to things like Devices, Methods, Mutexes, etc.
157 * will remain as named references. This behavior is not described
158 * in the ACPI spec, but it appears to be an oversight.
159 */
160 obj_desc = (union acpi_operand_object *)op->common.node;
161
162 status =
163 acpi_ex_resolve_node_to_value(ACPI_CAST_INDIRECT_PTR
164 (struct
165 acpi_namespace_node,
166 &obj_desc),
167 walk_state);
168 if (ACPI_FAILURE(status)) {
169 return_ACPI_STATUS(status);
170 }
171
172 switch (op->common.node->type) {
173 /*
174 * For these types, we need the actual node, not the subobject.
175 * However, the subobject got an extra reference count above.
176 */
177 case ACPI_TYPE_MUTEX:
178 case ACPI_TYPE_METHOD:
179 case ACPI_TYPE_POWER:
180 case ACPI_TYPE_PROCESSOR:
181 case ACPI_TYPE_EVENT:
182 case ACPI_TYPE_REGION:
183 case ACPI_TYPE_DEVICE:
184 case ACPI_TYPE_THERMAL:
185
186 obj_desc =
187 (union acpi_operand_object *)op->common.
188 node;
189 break;
190
191 default:
192 break;
193 }
194
195 /*
196 * If above resolved to an operand object, we are done. Otherwise,
197 * we have a NS node, we must create the package entry as a named
198 * reference.
199 */
200 if (ACPI_GET_DESCRIPTOR_TYPE(obj_desc) !=
201 ACPI_DESC_TYPE_NAMED) {
202 goto exit;
203 }
204 }
140 } 205 }
141 206
142 /* Create and init a new internal ACPI object */ 207 /* Create and init a new internal ACPI object */
@@ -156,6 +221,7 @@ acpi_ds_build_internal_object(struct acpi_walk_state *walk_state,
156 return_ACPI_STATUS(status); 221 return_ACPI_STATUS(status);
157 } 222 }
158 223
224 exit:
159 *obj_desc_ptr = obj_desc; 225 *obj_desc_ptr = obj_desc;
160 return_ACPI_STATUS(AE_OK); 226 return_ACPI_STATUS(AE_OK);
161} 227}
@@ -356,12 +422,25 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
356 arg = arg->common.next; 422 arg = arg->common.next;
357 for (i = 0; arg && (i < element_count); i++) { 423 for (i = 0; arg && (i < element_count); i++) {
358 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) { 424 if (arg->common.aml_opcode == AML_INT_RETURN_VALUE_OP) {
359 425 if (arg->common.node->type == ACPI_TYPE_METHOD) {
360 /* This package element is already built, just get it */ 426 /*
361 427 * A method reference "looks" to the parser to be a method
362 obj_desc->package.elements[i] = 428 * invocation, so we special case it here
363 ACPI_CAST_PTR(union acpi_operand_object, 429 */
364 arg->common.node); 430 arg->common.aml_opcode = AML_INT_NAMEPATH_OP;
431 status =
432 acpi_ds_build_internal_object(walk_state,
433 arg,
434 &obj_desc->
435 package.
436 elements[i]);
437 } else {
438 /* This package element is already built, just get it */
439
440 obj_desc->package.elements[i] =
441 ACPI_CAST_PTR(union acpi_operand_object,
442 arg->common.node);
443 }
365 } else { 444 } else {
366 status = acpi_ds_build_internal_object(walk_state, arg, 445 status = acpi_ds_build_internal_object(walk_state, arg,
367 &obj_desc-> 446 &obj_desc->
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index b1fbee3f7fe1..2fe34cc73c13 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -531,6 +531,11 @@ static void acpi_processor_idle(void)
531 531
532 case ACPI_STATE_C3: 532 case ACPI_STATE_C3:
533 /* 533 /*
534 * Must be done before busmaster disable as we might
535 * need to access HPET !
536 */
537 acpi_state_timer_broadcast(pr, cx, 1);
538 /*
534 * disable bus master 539 * disable bus master
535 * bm_check implies we need ARB_DIS 540 * bm_check implies we need ARB_DIS
536 * !bm_check implies we need cache flush 541 * !bm_check implies we need cache flush
@@ -557,7 +562,6 @@ static void acpi_processor_idle(void)
557 /* Get start time (ticks) */ 562 /* Get start time (ticks) */
558 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 563 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
559 /* Invoke C3 */ 564 /* Invoke C3 */
560 acpi_state_timer_broadcast(pr, cx, 1);
561 /* Tell the scheduler that we are going deep-idle: */ 565 /* Tell the scheduler that we are going deep-idle: */
562 sched_clock_idle_sleep_event(); 566 sched_clock_idle_sleep_event();
563 acpi_cstate_enter(cx); 567 acpi_cstate_enter(cx);
@@ -1401,9 +1405,6 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1401 if (acpi_idle_suspend) 1405 if (acpi_idle_suspend)
1402 return(acpi_idle_enter_c1(dev, state)); 1406 return(acpi_idle_enter_c1(dev, state));
1403 1407
1404 if (pr->flags.bm_check)
1405 acpi_idle_update_bm_rld(pr, cx);
1406
1407 local_irq_disable(); 1408 local_irq_disable();
1408 current_thread_info()->status &= ~TS_POLLING; 1409 current_thread_info()->status &= ~TS_POLLING;
1409 /* 1410 /*
@@ -1418,13 +1419,21 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1418 return 0; 1419 return 0;
1419 } 1420 }
1420 1421
1422 /*
1423 * Must be done before busmaster disable as we might need to
1424 * access HPET !
1425 */
1426 acpi_state_timer_broadcast(pr, cx, 1);
1427
1428 if (pr->flags.bm_check)
1429 acpi_idle_update_bm_rld(pr, cx);
1430
1421 if (cx->type == ACPI_STATE_C3) 1431 if (cx->type == ACPI_STATE_C3)
1422 ACPI_FLUSH_CPU_CACHE(); 1432 ACPI_FLUSH_CPU_CACHE();
1423 1433
1424 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1434 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1425 /* Tell the scheduler that we are going deep-idle: */ 1435 /* Tell the scheduler that we are going deep-idle: */
1426 sched_clock_idle_sleep_event(); 1436 sched_clock_idle_sleep_event();
1427 acpi_state_timer_broadcast(pr, cx, 1);
1428 acpi_idle_do_entry(cx); 1437 acpi_idle_do_entry(cx);
1429 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 1438 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1430 1439
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index c26c61fb36c3..6742d7bc4777 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/sched.h>
32#include <linux/cpufreq.h> 33#include <linux/cpufreq.h>
33#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
34#include <linux/seq_file.h> 35#include <linux/seq_file.h>
@@ -413,7 +414,7 @@ static int acpi_throttling_rdmsr(struct acpi_processor *pr,
413 } else { 414 } else {
414 msr_low = 0; 415 msr_low = 0;
415 msr_high = 0; 416 msr_high = 0;
416 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, 417 rdmsr_safe(MSR_IA32_THERM_CONTROL,
417 (u32 *)&msr_low , (u32 *) &msr_high); 418 (u32 *)&msr_low , (u32 *) &msr_high);
418 msr = (msr_high << 32) | msr_low; 419 msr = (msr_high << 32) | msr_low;
419 *value = (acpi_integer) msr; 420 *value = (acpi_integer) msr;
@@ -438,7 +439,7 @@ static int acpi_throttling_wrmsr(struct acpi_processor *pr, acpi_integer value)
438 "HARDWARE addr space,NOT supported yet\n"); 439 "HARDWARE addr space,NOT supported yet\n");
439 } else { 440 } else {
440 msr = value; 441 msr = value;
441 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, 442 wrmsr_safe(MSR_IA32_THERM_CONTROL,
442 msr & 0xffffffff, msr >> 32); 443 msr & 0xffffffff, msr >> 32);
443 ret = 0; 444 ret = 0;
444 } 445 }
@@ -572,21 +573,32 @@ static int acpi_processor_get_throttling_ptc(struct acpi_processor *pr)
572 return -ENODEV; 573 return -ENODEV;
573 574
574 pr->throttling.state = 0; 575 pr->throttling.state = 0;
575 local_irq_disable(); 576
576 value = 0; 577 value = 0;
577 ret = acpi_read_throttling_status(pr, &value); 578 ret = acpi_read_throttling_status(pr, &value);
578 if (ret >= 0) { 579 if (ret >= 0) {
579 state = acpi_get_throttling_state(pr, value); 580 state = acpi_get_throttling_state(pr, value);
580 pr->throttling.state = state; 581 pr->throttling.state = state;
581 } 582 }
582 local_irq_enable();
583 583
584 return 0; 584 return 0;
585} 585}
586 586
587static int acpi_processor_get_throttling(struct acpi_processor *pr) 587static int acpi_processor_get_throttling(struct acpi_processor *pr)
588{ 588{
589 return pr->throttling.acpi_processor_get_throttling(pr); 589 cpumask_t saved_mask;
590 int ret;
591
592 /*
593 * Migrate task to the cpu pointed by pr.
594 */
595 saved_mask = current->cpus_allowed;
596 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
597 ret = pr->throttling.acpi_processor_get_throttling(pr);
598 /* restore the previous state */
599 set_cpus_allowed(current, saved_mask);
600
601 return ret;
590} 602}
591 603
592static int acpi_processor_get_fadt_info(struct acpi_processor *pr) 604static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
@@ -717,21 +729,29 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
717 if (state < pr->throttling_platform_limit) 729 if (state < pr->throttling_platform_limit)
718 return -EPERM; 730 return -EPERM;
719 731
720 local_irq_disable();
721 value = 0; 732 value = 0;
722 ret = acpi_get_throttling_value(pr, state, &value); 733 ret = acpi_get_throttling_value(pr, state, &value);
723 if (ret >= 0) { 734 if (ret >= 0) {
724 acpi_write_throttling_state(pr, value); 735 acpi_write_throttling_state(pr, value);
725 pr->throttling.state = state; 736 pr->throttling.state = state;
726 } 737 }
727 local_irq_enable();
728 738
729 return 0; 739 return 0;
730} 740}
731 741
732int acpi_processor_set_throttling(struct acpi_processor *pr, int state) 742int acpi_processor_set_throttling(struct acpi_processor *pr, int state)
733{ 743{
734 return pr->throttling.acpi_processor_set_throttling(pr, state); 744 cpumask_t saved_mask;
745 int ret;
746 /*
747 * Migrate task to the cpu pointed by pr.
748 */
749 saved_mask = current->cpus_allowed;
750 set_cpus_allowed(current, cpumask_of_cpu(pr->id));
751 ret = pr->throttling.acpi_processor_set_throttling(pr, state);
752 /* restore the previous state */
753 set_cpus_allowed(current, saved_mask);
754 return ret;
735} 755}
736 756
737int acpi_processor_get_throttling_info(struct acpi_processor *pr) 757int acpi_processor_get_throttling_info(struct acpi_processor *pr)