diff options
author | Bob Moore <robert.moore@intel.com> | 2006-06-23 17:04:00 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2006-06-28 03:11:38 -0400 |
commit | 967440e3be1af06ad4dc7bb18d2e3c16130fe067 (patch) | |
tree | c9bbf70475333f4f169838ed88233f8410010677 | |
parent | 95b38b3f453c16de0f8cddcde3e71050bbfb37b9 (diff) |
ACPI: ACPICA 20060623
Implemented a new acpi_spinlock type for the OSL lock
interfaces. This allows the type to be customized to
the host OS for improved efficiency (since a spinlock is
usually a very small object.)
Implemented support for "ignored" bits in the ACPI
registers. According to the ACPI specification, these
bits should be preserved when writing the registers via
a read/modify/write cycle. There are 3 bits preserved
in this manner: PM1_CONTROL[0] (SCI_EN), PM1_CONTROL[9],
and PM1_STATUS[11].
http://bugzilla.kernel.org/show_bug.cgi?id=3691
Implemented the initial deployment of new OSL mutex
interfaces. Since some host operating systems have
separate mutex and semaphore objects, this feature was
requested. The base code now uses mutexes (and the new
mutex interfaces) wherever a binary semaphore was used
previously. However, for the current release, the mutex
interfaces are defined as macros to map them to the
existing semaphore interfaces.
Fixed several problems with the support for the control
method SyncLevel parameter. The SyncLevel now works
according to the ACPI specification and in concert with the
Mutex SyncLevel parameter, since the current SyncLevel is
a property of the executing thread. Mutual exclusion for
control methods is now implemented with a mutex instead
of a semaphore.
Fixed three instances of the use of the C shift operator
in the bitfield support code (exfldio.c) to avoid the use
of a shift value larger than the target data width. The
behavior of C compilers is undefined in this case and can
cause unpredictable results, and therefore the case must
be detected and avoided. (Fiodor Suietov)
Added an info message whenever an SSDT or OEM table
is loaded dynamically via the Load() or LoadTable()
ASL operators. This should improve debugging capability
since it will show exactly what tables have been loaded
(beyond the tables present in the RSDT/XSDT.)
Signed-off-by: Bob Moore <robert.moore@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
28 files changed, 563 insertions, 470 deletions
diff --git a/drivers/acpi/dispatcher/dsinit.c b/drivers/acpi/dispatcher/dsinit.c index bbdf990e9f65..daf51b5b5875 100644 --- a/drivers/acpi/dispatcher/dsinit.c +++ b/drivers/acpi/dispatcher/dsinit.c | |||
@@ -125,37 +125,7 @@ acpi_ds_init_one_object(acpi_handle obj_handle, | |||
125 | if (info->table_desc->pointer->revision == 1) { | 125 | if (info->table_desc->pointer->revision == 1) { |
126 | node->flags |= ANOBJ_DATA_WIDTH_32; | 126 | node->flags |= ANOBJ_DATA_WIDTH_32; |
127 | } | 127 | } |
128 | #ifdef ACPI_INIT_PARSE_METHODS | ||
129 | /* | ||
130 | * Note 11/2005: Removed this code to parse all methods during table | ||
131 | * load because it causes problems if there are any errors during the | ||
132 | * parse. Also, it seems like overkill and we probably don't want to | ||
133 | * abort a table load because of an issue with a single method. | ||
134 | */ | ||
135 | |||
136 | /* | ||
137 | * Print a dot for each method unless we are going to print | ||
138 | * the entire pathname | ||
139 | */ | ||
140 | if (!(acpi_dbg_level & ACPI_LV_INIT_NAMES)) { | ||
141 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_INIT, ".")); | ||
142 | } | ||
143 | 128 | ||
144 | /* | ||
145 | * Always parse methods to detect errors, we will delete | ||
146 | * the parse tree below | ||
147 | */ | ||
148 | status = acpi_ds_parse_method(obj_handle); | ||
149 | if (ACPI_FAILURE(status)) { | ||
150 | ACPI_ERROR((AE_INFO, | ||
151 | "Method %p [%4.4s] - parse failure, %s", | ||
152 | obj_handle, | ||
153 | acpi_ut_get_node_name(obj_handle), | ||
154 | acpi_format_exception(status))); | ||
155 | |||
156 | /* This parse failed, but we will continue parsing more methods */ | ||
157 | } | ||
158 | #endif | ||
159 | info->method_count++; | 129 | info->method_count++; |
160 | break; | 130 | break; |
161 | 131 | ||
diff --git a/drivers/acpi/dispatcher/dsmethod.c b/drivers/acpi/dispatcher/dsmethod.c index bc9aca4e7401..a39a33f4847a 100644 --- a/drivers/acpi/dispatcher/dsmethod.c +++ b/drivers/acpi/dispatcher/dsmethod.c | |||
@@ -52,6 +52,10 @@ | |||
52 | #define _COMPONENT ACPI_DISPATCHER | 52 | #define _COMPONENT ACPI_DISPATCHER |
53 | ACPI_MODULE_NAME("dsmethod") | 53 | ACPI_MODULE_NAME("dsmethod") |
54 | 54 | ||
55 | /* Local prototypes */ | ||
56 | static acpi_status | ||
57 | acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); | ||
58 | |||
55 | /******************************************************************************* | 59 | /******************************************************************************* |
56 | * | 60 | * |
57 | * FUNCTION: acpi_ds_method_error | 61 | * FUNCTION: acpi_ds_method_error |
@@ -67,6 +71,7 @@ ACPI_MODULE_NAME("dsmethod") | |||
67 | * Note: Allows the exception handler to change the status code | 71 | * Note: Allows the exception handler to change the status code |
68 | * | 72 | * |
69 | ******************************************************************************/ | 73 | ******************************************************************************/ |
74 | |||
70 | acpi_status | 75 | acpi_status |
71 | acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) | 76 | acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) |
72 | { | 77 | { |
@@ -113,11 +118,51 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) | |||
113 | 118 | ||
114 | /******************************************************************************* | 119 | /******************************************************************************* |
115 | * | 120 | * |
121 | * FUNCTION: acpi_ds_create_method_mutex | ||
122 | * | ||
123 | * PARAMETERS: obj_desc - The method object | ||
124 | * | ||
125 | * RETURN: Status | ||
126 | * | ||
127 | * DESCRIPTION: Create a mutex object for a serialized control method | ||
128 | * | ||
129 | ******************************************************************************/ | ||
130 | |||
131 | static acpi_status | ||
132 | acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) | ||
133 | { | ||
134 | union acpi_operand_object *mutex_desc; | ||
135 | acpi_status status; | ||
136 | |||
137 | ACPI_FUNCTION_NAME(ds_create_method_mutex); | ||
138 | |||
139 | /* Create the new mutex object */ | ||
140 | |||
141 | mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); | ||
142 | if (!mutex_desc) { | ||
143 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
144 | } | ||
145 | |||
146 | /* Create the actual OS Mutex */ | ||
147 | |||
148 | status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); | ||
149 | if (ACPI_FAILURE(status)) { | ||
150 | return_ACPI_STATUS(status); | ||
151 | } | ||
152 | |||
153 | mutex_desc->mutex.sync_level = method_desc->method.sync_level; | ||
154 | method_desc->method.mutex = mutex_desc; | ||
155 | return_ACPI_STATUS(AE_OK); | ||
156 | } | ||
157 | |||
158 | /******************************************************************************* | ||
159 | * | ||
116 | * FUNCTION: acpi_ds_begin_method_execution | 160 | * FUNCTION: acpi_ds_begin_method_execution |
117 | * | 161 | * |
118 | * PARAMETERS: method_node - Node of the method | 162 | * PARAMETERS: method_node - Node of the method |
119 | * obj_desc - The method object | 163 | * obj_desc - The method object |
120 | * calling_method_node - Caller of this method (if non-null) | 164 | * walk_state - current state, NULL if not yet executing |
165 | * a method. | ||
121 | * | 166 | * |
122 | * RETURN: Status | 167 | * RETURN: Status |
123 | * | 168 | * |
@@ -128,9 +173,9 @@ acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) | |||
128 | ******************************************************************************/ | 173 | ******************************************************************************/ |
129 | 174 | ||
130 | acpi_status | 175 | acpi_status |
131 | acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node, | 176 | acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, |
132 | union acpi_operand_object * obj_desc, | 177 | union acpi_operand_object *obj_desc, |
133 | struct acpi_namespace_node * calling_method_node) | 178 | struct acpi_walk_state *walk_state) |
134 | { | 179 | { |
135 | acpi_status status = AE_OK; | 180 | acpi_status status = AE_OK; |
136 | 181 | ||
@@ -149,35 +194,80 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node, | |||
149 | } | 194 | } |
150 | 195 | ||
151 | /* | 196 | /* |
152 | * If there is a concurrency limit on this method, we need to | 197 | * If this method is serialized, we need to acquire the method mutex. |
153 | * obtain a unit from the method semaphore. | ||
154 | */ | 198 | */ |
155 | if (obj_desc->method.semaphore) { | 199 | if (obj_desc->method.method_flags & AML_METHOD_SERIALIZED) { |
156 | /* | 200 | /* |
157 | * Allow recursive method calls, up to the reentrancy/concurrency | 201 | * Create a mutex for the method if it is defined to be Serialized |
158 | * limit imposed by the SERIALIZED rule and the sync_level method | 202 | * and a mutex has not already been created. We defer the mutex creation |
159 | * parameter. | 203 | * until a method is actually executed, to minimize the object count |
160 | * | ||
161 | * The point of this code is to avoid permanently blocking a | ||
162 | * thread that is making recursive method calls. | ||
163 | */ | 204 | */ |
164 | if (method_node == calling_method_node) { | 205 | if (!obj_desc->method.mutex) { |
165 | if (obj_desc->method.thread_count >= | 206 | status = acpi_ds_create_method_mutex(obj_desc); |
166 | obj_desc->method.concurrency) { | 207 | if (ACPI_FAILURE(status)) { |
167 | return_ACPI_STATUS(AE_AML_METHOD_LIMIT); | 208 | return_ACPI_STATUS(status); |
168 | } | 209 | } |
169 | } | 210 | } |
170 | 211 | ||
171 | /* | 212 | /* |
172 | * Get a unit from the method semaphore. This releases the | 213 | * The current_sync_level (per-thread) must be less than or equal to |
173 | * interpreter if we block (then reacquires it) | 214 | * the sync level of the method. This mechanism provides some |
215 | * deadlock prevention | ||
216 | * | ||
217 | * Top-level method invocation has no walk state at this point | ||
174 | */ | 218 | */ |
175 | status = | 219 | if (walk_state && |
176 | acpi_ex_system_wait_semaphore(obj_desc->method.semaphore, | 220 | (walk_state->thread->current_sync_level > |
177 | ACPI_WAIT_FOREVER); | 221 | obj_desc->method.mutex->mutex.sync_level)) { |
178 | if (ACPI_FAILURE(status)) { | 222 | ACPI_ERROR((AE_INFO, |
179 | return_ACPI_STATUS(status); | 223 | "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)", |
224 | acpi_ut_get_node_name(method_node), | ||
225 | walk_state->thread->current_sync_level)); | ||
226 | |||
227 | return_ACPI_STATUS(AE_AML_MUTEX_ORDER); | ||
180 | } | 228 | } |
229 | |||
230 | /* | ||
231 | * Obtain the method mutex if necessary. Do not acquire mutex for a | ||
232 | * recursive call. | ||
233 | */ | ||
234 | if (!walk_state || | ||
235 | !obj_desc->method.mutex->mutex.owner_thread || | ||
236 | (walk_state->thread != | ||
237 | obj_desc->method.mutex->mutex.owner_thread)) { | ||
238 | /* | ||
239 | * Acquire the method mutex. This releases the interpreter if we | ||
240 | * block (and reacquires it before it returns) | ||
241 | */ | ||
242 | status = | ||
243 | acpi_ex_system_wait_mutex(obj_desc->method.mutex-> | ||
244 | mutex.os_mutex, | ||
245 | ACPI_WAIT_FOREVER); | ||
246 | if (ACPI_FAILURE(status)) { | ||
247 | return_ACPI_STATUS(status); | ||
248 | } | ||
249 | |||
250 | /* Update the mutex and walk info and save the original sync_level */ | ||
251 | |||
252 | if (walk_state) { | ||
253 | obj_desc->method.mutex->mutex. | ||
254 | original_sync_level = | ||
255 | walk_state->thread->current_sync_level; | ||
256 | |||
257 | obj_desc->method.mutex->mutex.owner_thread = | ||
258 | walk_state->thread; | ||
259 | walk_state->thread->current_sync_level = | ||
260 | obj_desc->method.sync_level; | ||
261 | } else { | ||
262 | obj_desc->method.mutex->mutex. | ||
263 | original_sync_level = | ||
264 | obj_desc->method.mutex->mutex.sync_level; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | /* Always increase acquisition depth */ | ||
269 | |||
270 | obj_desc->method.mutex->mutex.acquisition_depth++; | ||
181 | } | 271 | } |
182 | 272 | ||
183 | /* | 273 | /* |
@@ -200,10 +290,10 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node, | |||
200 | return_ACPI_STATUS(status); | 290 | return_ACPI_STATUS(status); |
201 | 291 | ||
202 | cleanup: | 292 | cleanup: |
203 | /* On error, must signal the method semaphore if present */ | 293 | /* On error, must release the method mutex (if present) */ |
204 | 294 | ||
205 | if (obj_desc->method.semaphore) { | 295 | if (obj_desc->method.mutex) { |
206 | (void)acpi_os_signal_semaphore(obj_desc->method.semaphore, 1); | 296 | acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); |
207 | } | 297 | } |
208 | return_ACPI_STATUS(status); | 298 | return_ACPI_STATUS(status); |
209 | } | 299 | } |
@@ -253,10 +343,10 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, | |||
253 | return_ACPI_STATUS(AE_NULL_OBJECT); | 343 | return_ACPI_STATUS(AE_NULL_OBJECT); |
254 | } | 344 | } |
255 | 345 | ||
256 | /* Init for new method, possibly wait on concurrency semaphore */ | 346 | /* Init for new method, possibly wait on method mutex */ |
257 | 347 | ||
258 | status = acpi_ds_begin_method_execution(method_node, obj_desc, | 348 | status = acpi_ds_begin_method_execution(method_node, obj_desc, |
259 | this_walk_state->method_node); | 349 | this_walk_state); |
260 | if (ACPI_FAILURE(status)) { | 350 | if (ACPI_FAILURE(status)) { |
261 | return_ACPI_STATUS(status); | 351 | return_ACPI_STATUS(status); |
262 | } | 352 | } |
@@ -478,6 +568,8 @@ acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, | |||
478 | * created, delete all locals and arguments, and delete the parse | 568 | * created, delete all locals and arguments, and delete the parse |
479 | * tree if requested. | 569 | * tree if requested. |
480 | * | 570 | * |
571 | * MUTEX: Interpreter is locked | ||
572 | * | ||
481 | ******************************************************************************/ | 573 | ******************************************************************************/ |
482 | 574 | ||
483 | void | 575 | void |
@@ -503,26 +595,21 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
503 | } | 595 | } |
504 | 596 | ||
505 | /* | 597 | /* |
506 | * Lock the parser while we terminate this method. | 598 | * If method is serialized, release the mutex and restore the |
507 | * If this is the last thread executing the method, | 599 | * current sync level for this thread |
508 | * we have additional cleanup to perform | ||
509 | */ | 600 | */ |
510 | status = acpi_ut_acquire_mutex(ACPI_MTX_CONTROL_METHOD); | 601 | if (method_desc->method.mutex) { |
511 | if (ACPI_FAILURE(status)) { | ||
512 | return_VOID; | ||
513 | } | ||
514 | 602 | ||
515 | /* Signal completion of the execution of this method if necessary */ | 603 | /* Acquisition Depth handles recursive calls */ |
516 | 604 | ||
517 | if (method_desc->method.semaphore) { | 605 | method_desc->method.mutex->mutex.acquisition_depth--; |
518 | status = | 606 | if (!method_desc->method.mutex->mutex.acquisition_depth) { |
519 | acpi_os_signal_semaphore(method_desc->method.semaphore, 1); | 607 | walk_state->thread->current_sync_level = |
520 | if (ACPI_FAILURE(status)) { | 608 | method_desc->method.mutex->mutex. |
521 | 609 | original_sync_level; | |
522 | /* Ignore error and continue */ | ||
523 | 610 | ||
524 | ACPI_EXCEPTION((AE_INFO, status, | 611 | acpi_os_release_mutex(method_desc->method.mutex->mutex. |
525 | "Could not signal method semaphore")); | 612 | os_mutex); |
526 | } | 613 | } |
527 | } | 614 | } |
528 | 615 | ||
@@ -537,7 +624,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
537 | 624 | ||
538 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); | 625 | status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); |
539 | if (ACPI_FAILURE(status)) { | 626 | if (ACPI_FAILURE(status)) { |
540 | goto exit; | 627 | return_VOID; |
541 | } | 628 | } |
542 | 629 | ||
543 | /* | 630 | /* |
@@ -580,18 +667,16 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
580 | /* | 667 | /* |
581 | * Support to dynamically change a method from not_serialized to | 668 | * Support to dynamically change a method from not_serialized to |
582 | * Serialized if it appears that the method is incorrectly written and | 669 | * Serialized if it appears that the method is incorrectly written and |
583 | * does not support multiple thread execution. The best example of this | 670 | * does not support multiple thread execution. The best example of this |
584 | * is if such a method creates namespace objects and blocks. A second | 671 | * is if such a method creates namespace objects and blocks. A second |
585 | * thread will fail with an AE_ALREADY_EXISTS exception | 672 | * thread will fail with an AE_ALREADY_EXISTS exception |
586 | * | 673 | * |
587 | * This code is here because we must wait until the last thread exits | 674 | * This code is here because we must wait until the last thread exits |
588 | * before creating the synchronization semaphore. | 675 | * before creating the synchronization semaphore. |
589 | */ | 676 | */ |
590 | if ((method_desc->method.concurrency == 1) && | 677 | if ((method_desc->method.method_flags & AML_METHOD_SERIALIZED) |
591 | (!method_desc->method.semaphore)) { | 678 | && (!method_desc->method.mutex)) { |
592 | status = acpi_os_create_semaphore(1, 1, | 679 | status = acpi_ds_create_method_mutex(method_desc); |
593 | &method_desc->method. | ||
594 | semaphore); | ||
595 | } | 680 | } |
596 | 681 | ||
597 | /* No more threads, we can free the owner_id */ | 682 | /* No more threads, we can free the owner_id */ |
@@ -599,144 +684,5 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
599 | acpi_ut_release_owner_id(&method_desc->method.owner_id); | 684 | acpi_ut_release_owner_id(&method_desc->method.owner_id); |
600 | } | 685 | } |
601 | 686 | ||
602 | exit: | ||
603 | (void)acpi_ut_release_mutex(ACPI_MTX_CONTROL_METHOD); | ||
604 | return_VOID; | 687 | return_VOID; |
605 | } | 688 | } |
606 | |||
607 | #ifdef ACPI_INIT_PARSE_METHODS | ||
608 | /* | ||
609 | * Note 11/2005: Removed this code to parse all methods during table | ||
610 | * load because it causes problems if there are any errors during the | ||
611 | * parse. Also, it seems like overkill and we probably don't want to | ||
612 | * abort a table load because of an issue with a single method. | ||
613 | */ | ||
614 | |||
615 | /******************************************************************************* | ||
616 | * | ||
617 | * FUNCTION: acpi_ds_parse_method | ||
618 | * | ||
619 | * PARAMETERS: Node - Method node | ||
620 | * | ||
621 | * RETURN: Status | ||
622 | * | ||
623 | * DESCRIPTION: Parse the AML that is associated with the method. | ||
624 | * | ||
625 | * MUTEX: Assumes parser is locked | ||
626 | * | ||
627 | ******************************************************************************/ | ||
628 | |||
629 | acpi_status acpi_ds_parse_method(struct acpi_namespace_node *node) | ||
630 | { | ||
631 | acpi_status status; | ||
632 | union acpi_operand_object *obj_desc; | ||
633 | union acpi_parse_object *op; | ||
634 | struct acpi_walk_state *walk_state; | ||
635 | |||
636 | ACPI_FUNCTION_TRACE_PTR(ds_parse_method, node); | ||
637 | |||
638 | /* Parameter Validation */ | ||
639 | |||
640 | if (!node) { | ||
641 | return_ACPI_STATUS(AE_NULL_ENTRY); | ||
642 | } | ||
643 | |||
644 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, | ||
645 | "**** Parsing [%4.4s] **** NamedObj=%p\n", | ||
646 | acpi_ut_get_node_name(node), node)); | ||
647 | |||
648 | /* Extract the method object from the method Node */ | ||
649 | |||
650 | obj_desc = acpi_ns_get_attached_object(node); | ||
651 | if (!obj_desc) { | ||
652 | return_ACPI_STATUS(AE_NULL_OBJECT); | ||
653 | } | ||
654 | |||
655 | /* Create a mutex for the method if there is a concurrency limit */ | ||
656 | |||
657 | if ((obj_desc->method.concurrency != ACPI_INFINITE_CONCURRENCY) && | ||
658 | (!obj_desc->method.semaphore)) { | ||
659 | status = acpi_os_create_semaphore(obj_desc->method.concurrency, | ||
660 | obj_desc->method.concurrency, | ||
661 | &obj_desc->method.semaphore); | ||
662 | if (ACPI_FAILURE(status)) { | ||
663 | return_ACPI_STATUS(status); | ||
664 | } | ||
665 | } | ||
666 | |||
667 | /* | ||
668 | * Allocate a new parser op to be the root of the parsed | ||
669 | * method tree | ||
670 | */ | ||
671 | op = acpi_ps_alloc_op(AML_METHOD_OP); | ||
672 | if (!op) { | ||
673 | return_ACPI_STATUS(AE_NO_MEMORY); | ||
674 | } | ||
675 | |||
676 | /* Init new op with the method name and pointer back to the Node */ | ||
677 | |||
678 | acpi_ps_set_name(op, node->name.integer); | ||
679 | op->common.node = node; | ||
680 | |||
681 | /* | ||
682 | * Get a new owner_id for objects created by this method. Namespace | ||
683 | * objects (such as Operation Regions) can be created during the | ||
684 | * first pass parse. | ||
685 | */ | ||
686 | status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); | ||
687 | if (ACPI_FAILURE(status)) { | ||
688 | goto cleanup; | ||
689 | } | ||
690 | |||
691 | /* Create and initialize a new walk state */ | ||
692 | |||
693 | walk_state = | ||
694 | acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, NULL, | ||
695 | NULL); | ||
696 | if (!walk_state) { | ||
697 | status = AE_NO_MEMORY; | ||
698 | goto cleanup2; | ||
699 | } | ||
700 | |||
701 | status = acpi_ds_init_aml_walk(walk_state, op, node, | ||
702 | obj_desc->method.aml_start, | ||
703 | obj_desc->method.aml_length, NULL, 1); | ||
704 | if (ACPI_FAILURE(status)) { | ||
705 | acpi_ds_delete_walk_state(walk_state); | ||
706 | goto cleanup2; | ||
707 | } | ||
708 | |||
709 | /* | ||
710 | * Parse the method, first pass | ||
711 | * | ||
712 | * The first pass load is where newly declared named objects are added into | ||
713 | * the namespace. Actual evaluation of the named objects (what would be | ||
714 | * called a "second pass") happens during the actual execution of the | ||
715 | * method so that operands to the named objects can take on dynamic | ||
716 | * run-time values. | ||
717 | */ | ||
718 | status = acpi_ps_parse_aml(walk_state); | ||
719 | if (ACPI_FAILURE(status)) { | ||
720 | goto cleanup2; | ||
721 | } | ||
722 | |||
723 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, | ||
724 | "**** [%4.4s] Parsed **** NamedObj=%p Op=%p\n", | ||
725 | acpi_ut_get_node_name(node), node, op)); | ||
726 | |||
727 | /* | ||
728 | * Delete the parse tree. We simply re-parse the method for every | ||
729 | * execution since there isn't much overhead (compared to keeping lots | ||
730 | * of parse trees around) | ||
731 | */ | ||
732 | acpi_ns_delete_namespace_subtree(node); | ||
733 | acpi_ns_delete_namespace_by_owner(obj_desc->method.owner_id); | ||
734 | |||
735 | cleanup2: | ||
736 | acpi_ut_release_owner_id(&obj_desc->method.owner_id); | ||
737 | |||
738 | cleanup: | ||
739 | acpi_ps_delete_parse_tree(op); | ||
740 | return_ACPI_STATUS(status); | ||
741 | } | ||
742 | #endif | ||
diff --git a/drivers/acpi/dispatcher/dswexec.c b/drivers/acpi/dispatcher/dswexec.c index 3acbd9145d72..b1ded62d0df1 100644 --- a/drivers/acpi/dispatcher/dswexec.c +++ b/drivers/acpi/dispatcher/dswexec.c | |||
@@ -472,7 +472,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
472 | acpi_ds_result_push(walk_state->result_obj, | 472 | acpi_ds_result_push(walk_state->result_obj, |
473 | walk_state); | 473 | walk_state); |
474 | } | 474 | } |
475 | |||
476 | break; | 475 | break; |
477 | 476 | ||
478 | default: | 477 | default: |
@@ -510,6 +509,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
510 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, | 509 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
511 | "Method Reference in a Package, Op=%p\n", | 510 | "Method Reference in a Package, Op=%p\n", |
512 | op)); | 511 | op)); |
512 | |||
513 | op->common.node = | 513 | op->common.node = |
514 | (struct acpi_namespace_node *)op->asl.value. | 514 | (struct acpi_namespace_node *)op->asl.value. |
515 | arg->asl.node->object; | 515 | arg->asl.node->object; |
@@ -670,7 +670,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
670 | 670 | ||
671 | status = acpi_ds_result_stack_pop(walk_state); | 671 | status = acpi_ds_result_stack_pop(walk_state); |
672 | } | 672 | } |
673 | |||
674 | break; | 673 | break; |
675 | 674 | ||
676 | case AML_TYPE_UNDEFINED: | 675 | case AML_TYPE_UNDEFINED: |
@@ -708,7 +707,6 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state) | |||
708 | * Check if we just completed the evaluation of a | 707 | * Check if we just completed the evaluation of a |
709 | * conditional predicate | 708 | * conditional predicate |
710 | */ | 709 | */ |
711 | |||
712 | if ((ACPI_SUCCESS(status)) && | 710 | if ((ACPI_SUCCESS(status)) && |
713 | (walk_state->control_state) && | 711 | (walk_state->control_state) && |
714 | (walk_state->control_state->common.state == | 712 | (walk_state->control_state->common.state == |
diff --git a/drivers/acpi/dispatcher/dswload.c b/drivers/acpi/dispatcher/dswload.c index 35074399c617..e3ca7f6539c1 100644 --- a/drivers/acpi/dispatcher/dswload.c +++ b/drivers/acpi/dispatcher/dswload.c | |||
@@ -175,7 +175,7 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
175 | if (status == AE_NOT_FOUND) { | 175 | if (status == AE_NOT_FOUND) { |
176 | /* | 176 | /* |
177 | * Table disassembly: | 177 | * Table disassembly: |
178 | * Target of Scope() not found. Generate an External for it, and | 178 | * Target of Scope() not found. Generate an External for it, and |
179 | * insert the name into the namespace. | 179 | * insert the name into the namespace. |
180 | */ | 180 | */ |
181 | acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0); | 181 | acpi_dm_add_to_external_list(path, ACPI_TYPE_DEVICE, 0); |
@@ -210,16 +210,15 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
210 | case ACPI_TYPE_BUFFER: | 210 | case ACPI_TYPE_BUFFER: |
211 | 211 | ||
212 | /* | 212 | /* |
213 | * These types we will allow, but we will change the type. This | 213 | * These types we will allow, but we will change the type. This |
214 | * enables some existing code of the form: | 214 | * enables some existing code of the form: |
215 | * | 215 | * |
216 | * Name (DEB, 0) | 216 | * Name (DEB, 0) |
217 | * Scope (DEB) { ... } | 217 | * Scope (DEB) { ... } |
218 | * | 218 | * |
219 | * Note: silently change the type here. On the second pass, we will report | 219 | * Note: silently change the type here. On the second pass, we will report |
220 | * a warning | 220 | * a warning |
221 | */ | 221 | */ |
222 | |||
223 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, | 222 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
224 | "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", | 223 | "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)\n", |
225 | path, | 224 | path, |
@@ -242,7 +241,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
242 | break; | 241 | break; |
243 | 242 | ||
244 | default: | 243 | default: |
245 | |||
246 | /* | 244 | /* |
247 | * For all other named opcodes, we will enter the name into | 245 | * For all other named opcodes, we will enter the name into |
248 | * the namespace. | 246 | * the namespace. |
@@ -259,7 +257,6 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
259 | * buffer_field, or Package), the name of the object is already | 257 | * buffer_field, or Package), the name of the object is already |
260 | * in the namespace. | 258 | * in the namespace. |
261 | */ | 259 | */ |
262 | |||
263 | if (walk_state->deferred_node) { | 260 | if (walk_state->deferred_node) { |
264 | 261 | ||
265 | /* This name is already in the namespace, get the node */ | 262 | /* This name is already in the namespace, get the node */ |
@@ -293,8 +290,8 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
293 | } | 290 | } |
294 | 291 | ||
295 | /* | 292 | /* |
296 | * Enter the named type into the internal namespace. We enter the name | 293 | * Enter the named type into the internal namespace. We enter the name |
297 | * as we go downward in the parse tree. Any necessary subobjects that | 294 | * as we go downward in the parse tree. Any necessary subobjects that |
298 | * involve arguments to the opcode must be created as we go back up the | 295 | * involve arguments to the opcode must be created as we go back up the |
299 | * parse tree later. | 296 | * parse tree later. |
300 | */ | 297 | */ |
@@ -327,12 +324,12 @@ acpi_ds_load1_begin_op(struct acpi_walk_state * walk_state, | |||
327 | (status); | 324 | (status); |
328 | } | 325 | } |
329 | } | 326 | } |
327 | |||
330 | status = AE_OK; | 328 | status = AE_OK; |
331 | } | 329 | } |
332 | } | 330 | } |
333 | 331 | ||
334 | if (ACPI_FAILURE(status)) { | 332 | if (ACPI_FAILURE(status)) { |
335 | |||
336 | ACPI_ERROR_NAMESPACE(path, status); | 333 | ACPI_ERROR_NAMESPACE(path, status); |
337 | return_ACPI_STATUS(status); | 334 | return_ACPI_STATUS(status); |
338 | } | 335 | } |
@@ -434,9 +431,13 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) | |||
434 | status = | 431 | status = |
435 | acpi_ex_create_region(op->named.data, | 432 | acpi_ex_create_region(op->named.data, |
436 | op->named.length, | 433 | op->named.length, |
437 | (acpi_adr_space_type) | 434 | (acpi_adr_space_type) ((op-> |
438 | ((op->common.value.arg)-> | 435 | common. |
439 | common.value.integer), | 436 | value. |
437 | arg)-> | ||
438 | common. | ||
439 | value. | ||
440 | integer), | ||
440 | walk_state); | 441 | walk_state); |
441 | if (ACPI_FAILURE(status)) { | 442 | if (ACPI_FAILURE(status)) { |
442 | return_ACPI_STATUS(status); | 443 | return_ACPI_STATUS(status); |
@@ -474,7 +475,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) | |||
474 | * method_op pkg_length name_string method_flags term_list | 475 | * method_op pkg_length name_string method_flags term_list |
475 | * | 476 | * |
476 | * Note: We must create the method node/object pair as soon as we | 477 | * Note: We must create the method node/object pair as soon as we |
477 | * see the method declaration. This allows later pass1 parsing | 478 | * see the method declaration. This allows later pass1 parsing |
478 | * of invocations of the method (need to know the number of | 479 | * of invocations of the method (need to know the number of |
479 | * arguments.) | 480 | * arguments.) |
480 | */ | 481 | */ |
@@ -499,6 +500,7 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state) | |||
499 | length, | 500 | length, |
500 | walk_state); | 501 | walk_state); |
501 | } | 502 | } |
503 | |||
502 | walk_state->operands[0] = NULL; | 504 | walk_state->operands[0] = NULL; |
503 | walk_state->num_operands = 0; | 505 | walk_state->num_operands = 0; |
504 | 506 | ||
@@ -570,7 +572,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
570 | #ifdef ACPI_ENABLE_MODULE_LEVEL_CODE | 572 | #ifdef ACPI_ENABLE_MODULE_LEVEL_CODE |
571 | if ((walk_state->op_info->class == AML_CLASS_EXECUTE) || | 573 | if ((walk_state->op_info->class == AML_CLASS_EXECUTE) || |
572 | (walk_state->op_info->class == AML_CLASS_CONTROL)) { | 574 | (walk_state->op_info->class == AML_CLASS_CONTROL)) { |
573 | |||
574 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, | 575 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
575 | "Begin/EXEC: %s (fl %8.8X)\n", | 576 | "Begin/EXEC: %s (fl %8.8X)\n", |
576 | walk_state->op_info->name, | 577 | walk_state->op_info->name, |
@@ -602,7 +603,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
602 | } else { | 603 | } else { |
603 | /* Get name from the op */ | 604 | /* Get name from the op */ |
604 | 605 | ||
605 | buffer_ptr = (char *)&op->named.name; | 606 | buffer_ptr = ACPI_CAST_PTR(char, &op->named.name); |
606 | } | 607 | } |
607 | } else { | 608 | } else { |
608 | /* Get the namestring from the raw AML */ | 609 | /* Get the namestring from the raw AML */ |
@@ -629,7 +630,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
629 | break; | 630 | break; |
630 | 631 | ||
631 | case AML_INT_NAMEPATH_OP: | 632 | case AML_INT_NAMEPATH_OP: |
632 | |||
633 | /* | 633 | /* |
634 | * The name_path is an object reference to an existing object. | 634 | * The name_path is an object reference to an existing object. |
635 | * Don't enter the name into the namespace, but look it up | 635 | * Don't enter the name into the namespace, but look it up |
@@ -642,7 +642,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
642 | break; | 642 | break; |
643 | 643 | ||
644 | case AML_SCOPE_OP: | 644 | case AML_SCOPE_OP: |
645 | |||
646 | /* | 645 | /* |
647 | * The Path is an object reference to an existing object. | 646 | * The Path is an object reference to an existing object. |
648 | * Don't enter the name into the namespace, but look it up | 647 | * Don't enter the name into the namespace, but look it up |
@@ -664,6 +663,7 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
664 | #endif | 663 | #endif |
665 | return_ACPI_STATUS(status); | 664 | return_ACPI_STATUS(status); |
666 | } | 665 | } |
666 | |||
667 | /* | 667 | /* |
668 | * We must check to make sure that the target is | 668 | * We must check to make sure that the target is |
669 | * one of the opcodes that actually opens a scope | 669 | * one of the opcodes that actually opens a scope |
@@ -683,13 +683,12 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
683 | case ACPI_TYPE_BUFFER: | 683 | case ACPI_TYPE_BUFFER: |
684 | 684 | ||
685 | /* | 685 | /* |
686 | * These types we will allow, but we will change the type. This | 686 | * These types we will allow, but we will change the type. This |
687 | * enables some existing code of the form: | 687 | * enables some existing code of the form: |
688 | * | 688 | * |
689 | * Name (DEB, 0) | 689 | * Name (DEB, 0) |
690 | * Scope (DEB) { ... } | 690 | * Scope (DEB) { ... } |
691 | */ | 691 | */ |
692 | |||
693 | ACPI_WARNING((AE_INFO, | 692 | ACPI_WARNING((AE_INFO, |
694 | "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)", | 693 | "Type override - [%4.4s] had invalid type (%s) for Scope operator, changed to (Scope)", |
695 | buffer_ptr, | 694 | buffer_ptr, |
@@ -729,14 +728,14 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
729 | if (ACPI_FAILURE(status)) { | 728 | if (ACPI_FAILURE(status)) { |
730 | return_ACPI_STATUS(status); | 729 | return_ACPI_STATUS(status); |
731 | } | 730 | } |
732 | |||
733 | } | 731 | } |
732 | |||
734 | return_ACPI_STATUS(AE_OK); | 733 | return_ACPI_STATUS(AE_OK); |
735 | } | 734 | } |
736 | 735 | ||
737 | /* | 736 | /* |
738 | * Enter the named type into the internal namespace. We enter the name | 737 | * Enter the named type into the internal namespace. We enter the name |
739 | * as we go downward in the parse tree. Any necessary subobjects that | 738 | * as we go downward in the parse tree. Any necessary subobjects that |
740 | * involve arguments to the opcode must be created as we go back up the | 739 | * involve arguments to the opcode must be created as we go back up the |
741 | * parse tree later. | 740 | * parse tree later. |
742 | * | 741 | * |
@@ -787,7 +786,6 @@ acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, | |||
787 | * can get it again quickly when this scope is closed | 786 | * can get it again quickly when this scope is closed |
788 | */ | 787 | */ |
789 | op->common.node = node; | 788 | op->common.node = node; |
790 | |||
791 | return_ACPI_STATUS(status); | 789 | return_ACPI_STATUS(status); |
792 | } | 790 | } |
793 | 791 | ||
@@ -922,7 +920,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
922 | #ifndef ACPI_NO_METHOD_EXECUTION | 920 | #ifndef ACPI_NO_METHOD_EXECUTION |
923 | 921 | ||
924 | case AML_TYPE_CREATE_FIELD: | 922 | case AML_TYPE_CREATE_FIELD: |
925 | |||
926 | /* | 923 | /* |
927 | * Create the field object, but the field buffer and index must | 924 | * Create the field object, but the field buffer and index must |
928 | * be evaluated later during the execution phase | 925 | * be evaluated later during the execution phase |
@@ -931,7 +928,6 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
931 | break; | 928 | break; |
932 | 929 | ||
933 | case AML_TYPE_NAMED_FIELD: | 930 | case AML_TYPE_NAMED_FIELD: |
934 | |||
935 | /* | 931 | /* |
936 | * If we are executing a method, initialize the field | 932 | * If we are executing a method, initialize the field |
937 | */ | 933 | */ |
@@ -1051,6 +1047,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
1051 | * argument is the space_id. (We must save the address of the | 1047 | * argument is the space_id. (We must save the address of the |
1052 | * AML of the address and length operands) | 1048 | * AML of the address and length operands) |
1053 | */ | 1049 | */ |
1050 | |||
1054 | /* | 1051 | /* |
1055 | * If we have a valid region, initialize it | 1052 | * If we have a valid region, initialize it |
1056 | * Namespace is NOT locked at this point. | 1053 | * Namespace is NOT locked at this point. |
@@ -1080,7 +1077,7 @@ acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state) | |||
1080 | * method_op pkg_length name_string method_flags term_list | 1077 | * method_op pkg_length name_string method_flags term_list |
1081 | * | 1078 | * |
1082 | * Note: We must create the method node/object pair as soon as we | 1079 | * Note: We must create the method node/object pair as soon as we |
1083 | * see the method declaration. This allows later pass1 parsing | 1080 | * see the method declaration. This allows later pass1 parsing |
1084 | * of invocations of the method (need to know the number of | 1081 | * of invocations of the method (need to know the number of |
1085 | * arguments.) | 1082 | * arguments.) |
1086 | */ | 1083 | */ |
diff --git a/drivers/acpi/events/evgpe.c b/drivers/acpi/events/evgpe.c index f01d339407f8..c76c0583ca6a 100644 --- a/drivers/acpi/events/evgpe.c +++ b/drivers/acpi/events/evgpe.c | |||
@@ -382,7 +382,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
382 | u32 status_reg; | 382 | u32 status_reg; |
383 | u32 enable_reg; | 383 | u32 enable_reg; |
384 | acpi_cpu_flags flags; | 384 | acpi_cpu_flags flags; |
385 | acpi_cpu_flags hw_flags; | ||
386 | acpi_native_uint i; | 385 | acpi_native_uint i; |
387 | acpi_native_uint j; | 386 | acpi_native_uint j; |
388 | 387 | ||
@@ -394,8 +393,11 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
394 | return (int_status); | 393 | return (int_status); |
395 | } | 394 | } |
396 | 395 | ||
397 | /* We need to hold the GPE lock now, hardware lock in the loop */ | 396 | /* |
398 | 397 | * We need to obtain the GPE lock for both the data structs and registers | |
398 | * Note: Not necessary to obtain the hardware lock, since the GPE registers | ||
399 | * are owned by the gpe_lock. | ||
400 | */ | ||
399 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); | 401 | flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); |
400 | 402 | ||
401 | /* Examine all GPE blocks attached to this interrupt level */ | 403 | /* Examine all GPE blocks attached to this interrupt level */ |
@@ -413,8 +415,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
413 | 415 | ||
414 | gpe_register_info = &gpe_block->register_info[i]; | 416 | gpe_register_info = &gpe_block->register_info[i]; |
415 | 417 | ||
416 | hw_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); | ||
417 | |||
418 | /* Read the Status Register */ | 418 | /* Read the Status Register */ |
419 | 419 | ||
420 | status = | 420 | status = |
@@ -423,8 +423,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
423 | &gpe_register_info-> | 423 | &gpe_register_info-> |
424 | status_address); | 424 | status_address); |
425 | if (ACPI_FAILURE(status)) { | 425 | if (ACPI_FAILURE(status)) { |
426 | acpi_os_release_lock(acpi_gbl_hardware_lock, | ||
427 | hw_flags); | ||
428 | goto unlock_and_exit; | 426 | goto unlock_and_exit; |
429 | } | 427 | } |
430 | 428 | ||
@@ -435,8 +433,6 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) | |||
435 | &enable_reg, | 433 | &enable_reg, |
436 | &gpe_register_info-> | 434 | &gpe_register_info-> |
437 | enable_address); | 435 | enable_address); |
438 | acpi_os_release_lock(acpi_gbl_hardware_lock, hw_flags); | ||
439 | |||
440 | if (ACPI_FAILURE(status)) { | 436 | if (ACPI_FAILURE(status)) { |
441 | goto unlock_and_exit; | 437 | goto unlock_and_exit; |
442 | } | 438 | } |
diff --git a/drivers/acpi/executer/exconfig.c b/drivers/acpi/executer/exconfig.c index 823352435e08..83fed079a276 100644 --- a/drivers/acpi/executer/exconfig.c +++ b/drivers/acpi/executer/exconfig.c | |||
@@ -266,6 +266,10 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state, | |||
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
269 | ACPI_INFO((AE_INFO, | ||
270 | "Dynamic OEM Table Load - [%4.4s] OemId [%6.6s] OemTableId [%8.8s]", | ||
271 | table->signature, table->oem_id, table->oem_table_id)); | ||
272 | |||
269 | *return_desc = ddb_handle; | 273 | *return_desc = ddb_handle; |
270 | return_ACPI_STATUS(status); | 274 | return_ACPI_STATUS(status); |
271 | } | 275 | } |
@@ -446,6 +450,10 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc, | |||
446 | return_ACPI_STATUS(status); | 450 | return_ACPI_STATUS(status); |
447 | } | 451 | } |
448 | 452 | ||
453 | ACPI_INFO((AE_INFO, | ||
454 | "Dynamic SSDT Load - OemId [%6.6s] OemTableId [%8.8s]", | ||
455 | table_ptr->oem_id, table_ptr->oem_table_id)); | ||
456 | |||
449 | cleanup: | 457 | cleanup: |
450 | if (ACPI_FAILURE(status)) { | 458 | if (ACPI_FAILURE(status)) { |
451 | ACPI_FREE(table_ptr); | 459 | ACPI_FREE(table_ptr); |
diff --git a/drivers/acpi/executer/excreate.c b/drivers/acpi/executer/excreate.c index 106dc7219df7..34eec82c1b1e 100644 --- a/drivers/acpi/executer/excreate.c +++ b/drivers/acpi/executer/excreate.c | |||
@@ -177,7 +177,7 @@ acpi_status acpi_ex_create_event(struct acpi_walk_state *walk_state) | |||
177 | * that the event is created in an unsignalled state | 177 | * that the event is created in an unsignalled state |
178 | */ | 178 | */ |
179 | status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, | 179 | status = acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, |
180 | &obj_desc->event.semaphore); | 180 | &obj_desc->event.os_semaphore); |
181 | if (ACPI_FAILURE(status)) { | 181 | if (ACPI_FAILURE(status)) { |
182 | goto cleanup; | 182 | goto cleanup; |
183 | } | 183 | } |
@@ -226,12 +226,9 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state) | |||
226 | goto cleanup; | 226 | goto cleanup; |
227 | } | 227 | } |
228 | 228 | ||
229 | /* | 229 | /* Create the actual OS Mutex */ |
230 | * Create the actual OS semaphore. | 230 | |
231 | * One unit max to make it a mutex, with one initial unit to allow | 231 | status = acpi_os_create_mutex(&obj_desc->mutex.os_mutex); |
232 | * the mutex to be acquired. | ||
233 | */ | ||
234 | status = acpi_os_create_semaphore(1, 1, &obj_desc->mutex.semaphore); | ||
235 | if (ACPI_FAILURE(status)) { | 232 | if (ACPI_FAILURE(status)) { |
236 | goto cleanup; | 233 | goto cleanup; |
237 | } | 234 | } |
@@ -565,7 +562,7 @@ acpi_ex_create_method(u8 * aml_start, | |||
565 | obj_desc->method.aml_length = aml_length; | 562 | obj_desc->method.aml_length = aml_length; |
566 | 563 | ||
567 | /* | 564 | /* |
568 | * Disassemble the method flags. Split off the Arg Count | 565 | * Disassemble the method flags. Split off the Arg Count |
569 | * for efficiency | 566 | * for efficiency |
570 | */ | 567 | */ |
571 | method_flags = (u8) operand[1]->integer.value; | 568 | method_flags = (u8) operand[1]->integer.value; |
@@ -576,21 +573,19 @@ acpi_ex_create_method(u8 * aml_start, | |||
576 | (u8) (method_flags & AML_METHOD_ARG_COUNT); | 573 | (u8) (method_flags & AML_METHOD_ARG_COUNT); |
577 | 574 | ||
578 | /* | 575 | /* |
579 | * Get the concurrency count. If required, a semaphore will be | 576 | * Get the sync_level. If method is serialized, a mutex will be |
580 | * created for this method when it is parsed. | 577 | * created for this method when it is parsed. |
581 | */ | 578 | */ |
582 | if (acpi_gbl_all_methods_serialized) { | 579 | if (acpi_gbl_all_methods_serialized) { |
583 | obj_desc->method.concurrency = 1; | 580 | obj_desc->method.sync_level = 0; |
584 | obj_desc->method.method_flags |= AML_METHOD_SERIALIZED; | 581 | obj_desc->method.method_flags |= AML_METHOD_SERIALIZED; |
585 | } else if (method_flags & AML_METHOD_SERIALIZED) { | 582 | } else if (method_flags & AML_METHOD_SERIALIZED) { |
586 | /* | 583 | /* |
587 | * ACPI 1.0: Concurrency = 1 | 584 | * ACPI 1.0: sync_level = 0 |
588 | * ACPI 2.0: Concurrency = (sync_level (in method declaration) + 1) | 585 | * ACPI 2.0: sync_level = sync_level in method declaration |
589 | */ | 586 | */ |
590 | obj_desc->method.concurrency = (u8) | 587 | obj_desc->method.sync_level = (u8) |
591 | (((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4) + 1); | 588 | ((method_flags & AML_METHOD_SYNCH_LEVEL) >> 4); |
592 | } else { | ||
593 | obj_desc->method.concurrency = ACPI_INFINITE_CONCURRENCY; | ||
594 | } | 589 | } |
595 | 590 | ||
596 | /* Attach the new object to the method Node */ | 591 | /* Attach the new object to the method Node */ |
diff --git a/drivers/acpi/executer/exdump.c b/drivers/acpi/executer/exdump.c index 7b9718e976bf..2450943add33 100644 --- a/drivers/acpi/executer/exdump.c +++ b/drivers/acpi/executer/exdump.c | |||
@@ -118,14 +118,14 @@ static struct acpi_exdump_info acpi_ex_dump_device[4] = { | |||
118 | 118 | ||
119 | static struct acpi_exdump_info acpi_ex_dump_event[2] = { | 119 | static struct acpi_exdump_info acpi_ex_dump_event[2] = { |
120 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, | 120 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_event), NULL}, |
121 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.semaphore), "Semaphore"} | 121 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(event.os_semaphore), "OsSemaphore"} |
122 | }; | 122 | }; |
123 | 123 | ||
124 | static struct acpi_exdump_info acpi_ex_dump_method[8] = { | 124 | static struct acpi_exdump_info acpi_ex_dump_method[8] = { |
125 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, | 125 | {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_method), NULL}, |
126 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"}, | 126 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.param_count), "ParamCount"}, |
127 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.concurrency), "Concurrency"}, | 127 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.sync_level), "Sync Level"}, |
128 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.semaphore), "Semaphore"}, | 128 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(method.mutex), "Mutex"}, |
129 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, | 129 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.owner_id), "Owner Id"}, |
130 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, | 130 | {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(method.thread_count), "Thread Count"}, |
131 | {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, | 131 | {ACPI_EXD_UINT32, ACPI_EXD_OFFSET(method.aml_length), "Aml Length"}, |
@@ -138,7 +138,7 @@ static struct acpi_exdump_info acpi_ex_dump_mutex[5] = { | |||
138 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, | 138 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.owner_thread), "Owner Thread"}, |
139 | {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), | 139 | {ACPI_EXD_UINT16, ACPI_EXD_OFFSET(mutex.acquisition_depth), |
140 | "Acquire Depth"}, | 140 | "Acquire Depth"}, |
141 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.semaphore), "Semaphore"} | 141 | {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(mutex.os_mutex), "OsMutex"} |
142 | }; | 142 | }; |
143 | 143 | ||
144 | static struct acpi_exdump_info acpi_ex_dump_region[7] = { | 144 | static struct acpi_exdump_info acpi_ex_dump_region[7] = { |
diff --git a/drivers/acpi/executer/exfldio.c b/drivers/acpi/executer/exfldio.c index 051053f7cccb..40f0bee6faa5 100644 --- a/drivers/acpi/executer/exfldio.c +++ b/drivers/acpi/executer/exfldio.c | |||
@@ -727,11 +727,23 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, | |||
727 | return_ACPI_STATUS(status); | 727 | return_ACPI_STATUS(status); |
728 | } | 728 | } |
729 | 729 | ||
730 | /* Merge with previous datum if necessary */ | 730 | /* |
731 | 731 | * Merge with previous datum if necessary. | |
732 | merged_datum |= raw_datum << | 732 | * |
733 | (obj_desc->common_field.access_bit_width - | 733 | * Note: Before the shift, check if the shift value will be larger than |
734 | obj_desc->common_field.start_field_bit_offset); | 734 | * the integer size. If so, there is no need to perform the operation. |
735 | * This avoids the differences in behavior between different compilers | ||
736 | * concerning shift values larger than the target data width. | ||
737 | */ | ||
738 | if ((obj_desc->common_field.access_bit_width - | ||
739 | obj_desc->common_field.start_field_bit_offset) < | ||
740 | ACPI_INTEGER_BIT_SIZE) { | ||
741 | merged_datum |= | ||
742 | raw_datum << (obj_desc->common_field. | ||
743 | access_bit_width - | ||
744 | obj_desc->common_field. | ||
745 | start_field_bit_offset); | ||
746 | } | ||
735 | 747 | ||
736 | if (i == datum_count) { | 748 | if (i == datum_count) { |
737 | break; | 749 | break; |
@@ -808,13 +820,23 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
808 | return_ACPI_STATUS(AE_BUFFER_OVERFLOW); | 820 | return_ACPI_STATUS(AE_BUFFER_OVERFLOW); |
809 | } | 821 | } |
810 | 822 | ||
811 | /* Compute the number of datums (access width data items) */ | 823 | /* |
824 | * Create the bitmasks used for bit insertion. | ||
825 | * Note: This if/else is used to bypass compiler differences with the | ||
826 | * shift operator | ||
827 | */ | ||
828 | if (obj_desc->common_field.access_bit_width == ACPI_INTEGER_BIT_SIZE) { | ||
829 | width_mask = ACPI_INTEGER_MAX; | ||
830 | } else { | ||
831 | width_mask = | ||
832 | ACPI_MASK_BITS_ABOVE(obj_desc->common_field. | ||
833 | access_bit_width); | ||
834 | } | ||
812 | 835 | ||
813 | width_mask = | 836 | mask = width_mask & |
814 | ACPI_MASK_BITS_ABOVE(obj_desc->common_field.access_bit_width); | 837 | ACPI_MASK_BITS_BELOW(obj_desc->common_field.start_field_bit_offset); |
815 | mask = | 838 | |
816 | width_mask & ACPI_MASK_BITS_BELOW(obj_desc->common_field. | 839 | /* Compute the number of datums (access width data items) */ |
817 | start_field_bit_offset); | ||
818 | 840 | ||
819 | datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, | 841 | datum_count = ACPI_ROUND_UP_TO(obj_desc->common_field.bit_length, |
820 | obj_desc->common_field.access_bit_width); | 842 | obj_desc->common_field.access_bit_width); |
@@ -848,12 +870,29 @@ acpi_ex_insert_into_field(union acpi_operand_object *obj_desc, | |||
848 | return_ACPI_STATUS(status); | 870 | return_ACPI_STATUS(status); |
849 | } | 871 | } |
850 | 872 | ||
851 | /* Start new output datum by merging with previous input datum */ | ||
852 | |||
853 | field_offset += obj_desc->common_field.access_byte_width; | 873 | field_offset += obj_desc->common_field.access_byte_width; |
854 | merged_datum = raw_datum >> | 874 | |
855 | (obj_desc->common_field.access_bit_width - | 875 | /* |
856 | obj_desc->common_field.start_field_bit_offset); | 876 | * Start new output datum by merging with previous input datum |
877 | * if necessary. | ||
878 | * | ||
879 | * Note: Before the shift, check if the shift value will be larger than | ||
880 | * the integer size. If so, there is no need to perform the operation. | ||
881 | * This avoids the differences in behavior between different compilers | ||
882 | * concerning shift values larger than the target data width. | ||
883 | */ | ||
884 | if ((obj_desc->common_field.access_bit_width - | ||
885 | obj_desc->common_field.start_field_bit_offset) < | ||
886 | ACPI_INTEGER_BIT_SIZE) { | ||
887 | merged_datum = | ||
888 | raw_datum >> (obj_desc->common_field. | ||
889 | access_bit_width - | ||
890 | obj_desc->common_field. | ||
891 | start_field_bit_offset); | ||
892 | } else { | ||
893 | merged_datum = 0; | ||
894 | } | ||
895 | |||
857 | mask = width_mask; | 896 | mask = width_mask; |
858 | 897 | ||
859 | if (i == datum_count) { | 898 | if (i == datum_count) { |
diff --git a/drivers/acpi/executer/exmutex.c b/drivers/acpi/executer/exmutex.c index 93098d68cadf..d8ac2877cf05 100644 --- a/drivers/acpi/executer/exmutex.c +++ b/drivers/acpi/executer/exmutex.c | |||
@@ -161,12 +161,13 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, | |||
161 | 161 | ||
162 | /* | 162 | /* |
163 | * Current Sync must be less than or equal to the sync level of the | 163 | * Current Sync must be less than or equal to the sync level of the |
164 | * mutex. This mechanism provides some deadlock prevention | 164 | * mutex. This mechanism provides some deadlock prevention |
165 | */ | 165 | */ |
166 | if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { | 166 | if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { |
167 | ACPI_ERROR((AE_INFO, | 167 | ACPI_ERROR((AE_INFO, |
168 | "Cannot acquire Mutex [%4.4s], incorrect SyncLevel", | 168 | "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)", |
169 | acpi_ut_get_node_name(obj_desc->mutex.node))); | 169 | acpi_ut_get_node_name(obj_desc->mutex.node), |
170 | walk_state->thread->current_sync_level)); | ||
170 | return_ACPI_STATUS(AE_AML_MUTEX_ORDER); | 171 | return_ACPI_STATUS(AE_AML_MUTEX_ORDER); |
171 | } | 172 | } |
172 | 173 | ||
@@ -178,8 +179,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc, | |||
178 | 179 | ||
179 | if ((obj_desc->mutex.owner_thread->thread_id == | 180 | if ((obj_desc->mutex.owner_thread->thread_id == |
180 | walk_state->thread->thread_id) || | 181 | walk_state->thread->thread_id) || |
181 | (obj_desc->mutex.semaphore == | 182 | (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK)) { |
182 | acpi_gbl_global_lock_semaphore)) { | ||
183 | /* | 183 | /* |
184 | * The mutex is already owned by this thread, | 184 | * The mutex is already owned by this thread, |
185 | * just increment the acquisition depth | 185 | * just increment the acquisition depth |
@@ -264,7 +264,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc, | |||
264 | */ | 264 | */ |
265 | if ((obj_desc->mutex.owner_thread->thread_id != | 265 | if ((obj_desc->mutex.owner_thread->thread_id != |
266 | walk_state->thread->thread_id) | 266 | walk_state->thread->thread_id) |
267 | && (obj_desc->mutex.semaphore != acpi_gbl_global_lock_semaphore)) { | 267 | && (obj_desc->mutex.os_mutex != ACPI_GLOBAL_LOCK)) { |
268 | ACPI_ERROR((AE_INFO, | 268 | ACPI_ERROR((AE_INFO, |
269 | "Thread %X cannot release Mutex [%4.4s] acquired by thread %X", | 269 | "Thread %X cannot release Mutex [%4.4s] acquired by thread %X", |
270 | walk_state->thread->thread_id, | 270 | walk_state->thread->thread_id, |
diff --git a/drivers/acpi/executer/exsystem.c b/drivers/acpi/executer/exsystem.c index 52beee3674a0..6b5d1e6ce94b 100644 --- a/drivers/acpi/executer/exsystem.c +++ b/drivers/acpi/executer/exsystem.c | |||
@@ -63,14 +63,14 @@ ACPI_MODULE_NAME("exsystem") | |||
63 | * interpreter is released. | 63 | * interpreter is released. |
64 | * | 64 | * |
65 | ******************************************************************************/ | 65 | ******************************************************************************/ |
66 | acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout) | 66 | acpi_status acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout) |
67 | { | 67 | { |
68 | acpi_status status; | 68 | acpi_status status; |
69 | acpi_status status2; | 69 | acpi_status status2; |
70 | 70 | ||
71 | ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); | 71 | ACPI_FUNCTION_TRACE(ex_system_wait_semaphore); |
72 | 72 | ||
73 | status = acpi_os_wait_semaphore(semaphore, 1, 0); | 73 | status = acpi_os_wait_semaphore(semaphore, 1, ACPI_DO_NOT_WAIT); |
74 | if (ACPI_SUCCESS(status)) { | 74 | if (ACPI_SUCCESS(status)) { |
75 | return_ACPI_STATUS(status); | 75 | return_ACPI_STATUS(status); |
76 | } | 76 | } |
@@ -103,6 +103,59 @@ acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout) | |||
103 | 103 | ||
104 | /******************************************************************************* | 104 | /******************************************************************************* |
105 | * | 105 | * |
106 | * FUNCTION: acpi_ex_system_wait_mutex | ||
107 | * | ||
108 | * PARAMETERS: Mutex - Mutex to wait on | ||
109 | * Timeout - Max time to wait | ||
110 | * | ||
111 | * RETURN: Status | ||
112 | * | ||
113 | * DESCRIPTION: Implements a semaphore wait with a check to see if the | ||
114 | * semaphore is available immediately. If it is not, the | ||
115 | * interpreter is released. | ||
116 | * | ||
117 | ******************************************************************************/ | ||
118 | |||
119 | acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout) | ||
120 | { | ||
121 | acpi_status status; | ||
122 | acpi_status status2; | ||
123 | |||
124 | ACPI_FUNCTION_TRACE(ex_system_wait_mutex); | ||
125 | |||
126 | status = acpi_os_acquire_mutex(mutex, ACPI_DO_NOT_WAIT); | ||
127 | if (ACPI_SUCCESS(status)) { | ||
128 | return_ACPI_STATUS(status); | ||
129 | } | ||
130 | |||
131 | if (status == AE_TIME) { | ||
132 | |||
133 | /* We must wait, so unlock the interpreter */ | ||
134 | |||
135 | acpi_ex_exit_interpreter(); | ||
136 | |||
137 | status = acpi_os_acquire_mutex(mutex, timeout); | ||
138 | |||
139 | ACPI_DEBUG_PRINT((ACPI_DB_EXEC, | ||
140 | "*** Thread awake after blocking, %s\n", | ||
141 | acpi_format_exception(status))); | ||
142 | |||
143 | /* Reacquire the interpreter */ | ||
144 | |||
145 | status2 = acpi_ex_enter_interpreter(); | ||
146 | if (ACPI_FAILURE(status2)) { | ||
147 | |||
148 | /* Report fatal error, could not acquire interpreter */ | ||
149 | |||
150 | return_ACPI_STATUS(status2); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | return_ACPI_STATUS(status); | ||
155 | } | ||
156 | |||
157 | /******************************************************************************* | ||
158 | * | ||
106 | * FUNCTION: acpi_ex_system_do_stall | 159 | * FUNCTION: acpi_ex_system_do_stall |
107 | * | 160 | * |
108 | * PARAMETERS: how_long - The amount of time to stall, | 161 | * PARAMETERS: how_long - The amount of time to stall, |
@@ -176,7 +229,7 @@ acpi_status acpi_ex_system_do_suspend(acpi_integer how_long) | |||
176 | * | 229 | * |
177 | * FUNCTION: acpi_ex_system_acquire_mutex | 230 | * FUNCTION: acpi_ex_system_acquire_mutex |
178 | * | 231 | * |
179 | * PARAMETERS: time_desc - The 'time to delay' object descriptor | 232 | * PARAMETERS: time_desc - Maximum time to wait for the mutex |
180 | * obj_desc - The object descriptor for this op | 233 | * obj_desc - The object descriptor for this op |
181 | * | 234 | * |
182 | * RETURN: Status | 235 | * RETURN: Status |
@@ -201,14 +254,14 @@ acpi_ex_system_acquire_mutex(union acpi_operand_object * time_desc, | |||
201 | 254 | ||
202 | /* Support for the _GL_ Mutex object -- go get the global lock */ | 255 | /* Support for the _GL_ Mutex object -- go get the global lock */ |
203 | 256 | ||
204 | if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { | 257 | if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) { |
205 | status = | 258 | status = |
206 | acpi_ev_acquire_global_lock((u16) time_desc->integer.value); | 259 | acpi_ev_acquire_global_lock((u16) time_desc->integer.value); |
207 | return_ACPI_STATUS(status); | 260 | return_ACPI_STATUS(status); |
208 | } | 261 | } |
209 | 262 | ||
210 | status = acpi_ex_system_wait_semaphore(obj_desc->mutex.semaphore, | 263 | status = acpi_ex_system_wait_mutex(obj_desc->mutex.os_mutex, |
211 | (u16) time_desc->integer.value); | 264 | (u16) time_desc->integer.value); |
212 | return_ACPI_STATUS(status); | 265 | return_ACPI_STATUS(status); |
213 | } | 266 | } |
214 | 267 | ||
@@ -239,13 +292,13 @@ acpi_status acpi_ex_system_release_mutex(union acpi_operand_object *obj_desc) | |||
239 | 292 | ||
240 | /* Support for the _GL_ Mutex object -- release the global lock */ | 293 | /* Support for the _GL_ Mutex object -- release the global lock */ |
241 | 294 | ||
242 | if (obj_desc->mutex.semaphore == acpi_gbl_global_lock_semaphore) { | 295 | if (obj_desc->mutex.os_mutex == ACPI_GLOBAL_LOCK) { |
243 | status = acpi_ev_release_global_lock(); | 296 | status = acpi_ev_release_global_lock(); |
244 | return_ACPI_STATUS(status); | 297 | return_ACPI_STATUS(status); |
245 | } | 298 | } |
246 | 299 | ||
247 | status = acpi_os_signal_semaphore(obj_desc->mutex.semaphore, 1); | 300 | acpi_os_release_mutex(obj_desc->mutex.os_mutex); |
248 | return_ACPI_STATUS(status); | 301 | return_ACPI_STATUS(AE_OK); |
249 | } | 302 | } |
250 | 303 | ||
251 | /******************************************************************************* | 304 | /******************************************************************************* |
@@ -268,7 +321,8 @@ acpi_status acpi_ex_system_signal_event(union acpi_operand_object *obj_desc) | |||
268 | ACPI_FUNCTION_TRACE(ex_system_signal_event); | 321 | ACPI_FUNCTION_TRACE(ex_system_signal_event); |
269 | 322 | ||
270 | if (obj_desc) { | 323 | if (obj_desc) { |
271 | status = acpi_os_signal_semaphore(obj_desc->event.semaphore, 1); | 324 | status = |
325 | acpi_os_signal_semaphore(obj_desc->event.os_semaphore, 1); | ||
272 | } | 326 | } |
273 | 327 | ||
274 | return_ACPI_STATUS(status); | 328 | return_ACPI_STATUS(status); |
@@ -299,7 +353,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc, | |||
299 | 353 | ||
300 | if (obj_desc) { | 354 | if (obj_desc) { |
301 | status = | 355 | status = |
302 | acpi_ex_system_wait_semaphore(obj_desc->event.semaphore, | 356 | acpi_ex_system_wait_semaphore(obj_desc->event.os_semaphore, |
303 | (u16) time_desc->integer. | 357 | (u16) time_desc->integer. |
304 | value); | 358 | value); |
305 | } | 359 | } |
@@ -322,7 +376,7 @@ acpi_ex_system_wait_event(union acpi_operand_object *time_desc, | |||
322 | acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) | 376 | acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) |
323 | { | 377 | { |
324 | acpi_status status = AE_OK; | 378 | acpi_status status = AE_OK; |
325 | void *temp_semaphore; | 379 | acpi_semaphore temp_semaphore; |
326 | 380 | ||
327 | ACPI_FUNCTION_ENTRY(); | 381 | ACPI_FUNCTION_ENTRY(); |
328 | 382 | ||
@@ -333,8 +387,8 @@ acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc) | |||
333 | status = | 387 | status = |
334 | acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); | 388 | acpi_os_create_semaphore(ACPI_NO_UNIT_LIMIT, 0, &temp_semaphore); |
335 | if (ACPI_SUCCESS(status)) { | 389 | if (ACPI_SUCCESS(status)) { |
336 | (void)acpi_os_delete_semaphore(obj_desc->event.semaphore); | 390 | (void)acpi_os_delete_semaphore(obj_desc->event.os_semaphore); |
337 | obj_desc->event.semaphore = temp_semaphore; | 391 | obj_desc->event.os_semaphore = temp_semaphore; |
338 | } | 392 | } |
339 | 393 | ||
340 | return (status); | 394 | return (status); |
diff --git a/drivers/acpi/hardware/hwregs.c b/drivers/acpi/hardware/hwregs.c index ae142de19507..3143f36fcec9 100644 --- a/drivers/acpi/hardware/hwregs.c +++ b/drivers/acpi/hardware/hwregs.c | |||
@@ -172,9 +172,9 @@ acpi_get_sleep_type_data(u8 sleep_state, u8 * sleep_type_a, u8 * sleep_type_b) | |||
172 | } | 172 | } |
173 | 173 | ||
174 | /* | 174 | /* |
175 | * The package must have at least two elements. NOTE (March 2005): This | 175 | * The package must have at least two elements. NOTE (March 2005): This |
176 | * goes against the current ACPI spec which defines this object as a | 176 | * goes against the current ACPI spec which defines this object as a |
177 | * package with one encoded DWORD element. However, existing practice | 177 | * package with one encoded DWORD element. However, existing practice |
178 | * by BIOS vendors seems to be to have 2 or more elements, at least | 178 | * by BIOS vendors seems to be to have 2 or more elements, at least |
179 | * one per sleep type (A/B). | 179 | * one per sleep type (A/B). |
180 | */ | 180 | */ |
@@ -255,7 +255,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id) | |||
255 | * return_value - Value that was read from the register | 255 | * return_value - Value that was read from the register |
256 | * Flags - Lock the hardware or not | 256 | * Flags - Lock the hardware or not |
257 | * | 257 | * |
258 | * RETURN: Status and the value read from specified Register. Value | 258 | * RETURN: Status and the value read from specified Register. Value |
259 | * returned is normalized to bit0 (is shifted all the way right) | 259 | * returned is normalized to bit0 (is shifted all the way right) |
260 | * | 260 | * |
261 | * DESCRIPTION: ACPI bit_register read function. | 261 | * DESCRIPTION: ACPI bit_register read function. |
@@ -361,8 +361,8 @@ acpi_status acpi_set_register(u32 register_id, u32 value, u32 flags) | |||
361 | case ACPI_REGISTER_PM1_STATUS: | 361 | case ACPI_REGISTER_PM1_STATUS: |
362 | 362 | ||
363 | /* | 363 | /* |
364 | * Status Registers are different from the rest. Clear by | 364 | * Status Registers are different from the rest. Clear by |
365 | * writing 1, and writing 0 has no effect. So, the only relevant | 365 | * writing 1, and writing 0 has no effect. So, the only relevant |
366 | * information is the single bit we're interested in, all others should | 366 | * information is the single bit we're interested in, all others should |
367 | * be written as 0 so they will be left unchanged. | 367 | * be written as 0 so they will be left unchanged. |
368 | */ | 368 | */ |
@@ -467,14 +467,13 @@ ACPI_EXPORT_SYMBOL(acpi_set_register) | |||
467 | * | 467 | * |
468 | * FUNCTION: acpi_hw_register_read | 468 | * FUNCTION: acpi_hw_register_read |
469 | * | 469 | * |
470 | * PARAMETERS: use_lock - Mutex hw access | 470 | * PARAMETERS: use_lock - Lock hardware? True/False |
471 | * register_id - register_iD + Offset | 471 | * register_id - ACPI Register ID |
472 | * return_value - Where the register value is returned | 472 | * return_value - Where the register value is returned |
473 | * | 473 | * |
474 | * RETURN: Status and the value read. | 474 | * RETURN: Status and the value read. |
475 | * | 475 | * |
476 | * DESCRIPTION: Acpi register read function. Registers are read at the | 476 | * DESCRIPTION: Read from the specified ACPI register |
477 | * given offset. | ||
478 | * | 477 | * |
479 | ******************************************************************************/ | 478 | ******************************************************************************/ |
480 | acpi_status | 479 | acpi_status |
@@ -580,14 +579,26 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value) | |||
580 | * | 579 | * |
581 | * FUNCTION: acpi_hw_register_write | 580 | * FUNCTION: acpi_hw_register_write |
582 | * | 581 | * |
583 | * PARAMETERS: use_lock - Mutex hw access | 582 | * PARAMETERS: use_lock - Lock hardware? True/False |
584 | * register_id - register_iD + Offset | 583 | * register_id - ACPI Register ID |
585 | * Value - The value to write | 584 | * Value - The value to write |
586 | * | 585 | * |
587 | * RETURN: Status | 586 | * RETURN: Status |
588 | * | 587 | * |
589 | * DESCRIPTION: Acpi register Write function. Registers are written at the | 588 | * DESCRIPTION: Write to the specified ACPI register |
590 | * given offset. | 589 | * |
590 | * NOTE: In accordance with the ACPI specification, this function automatically | ||
591 | * preserves the value of the following bits, meaning that these bits cannot be | ||
592 | * changed via this interface: | ||
593 | * | ||
594 | * PM1_CONTROL[0] = SCI_EN | ||
595 | * PM1_CONTROL[9] | ||
596 | * PM1_STATUS[11] | ||
597 | * | ||
598 | * ACPI References: | ||
599 | * 1) Hardware Ignored Bits: When software writes to a register with ignored | ||
600 | * bit fields, it preserves the ignored bit fields | ||
601 | * 2) SCI_EN: OSPM always preserves this bit position | ||
591 | * | 602 | * |
592 | ******************************************************************************/ | 603 | ******************************************************************************/ |
593 | 604 | ||
@@ -595,6 +606,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value) | |||
595 | { | 606 | { |
596 | acpi_status status; | 607 | acpi_status status; |
597 | acpi_cpu_flags lock_flags = 0; | 608 | acpi_cpu_flags lock_flags = 0; |
609 | u32 read_value; | ||
598 | 610 | ||
599 | ACPI_FUNCTION_TRACE(hw_register_write); | 611 | ACPI_FUNCTION_TRACE(hw_register_write); |
600 | 612 | ||
@@ -605,6 +617,22 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value) | |||
605 | switch (register_id) { | 617 | switch (register_id) { |
606 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ | 618 | case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */ |
607 | 619 | ||
620 | /* Perform a read first to preserve certain bits (per ACPI spec) */ | ||
621 | |||
622 | status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, | ||
623 | ACPI_REGISTER_PM1_STATUS, | ||
624 | &read_value); | ||
625 | if (ACPI_FAILURE(status)) { | ||
626 | goto unlock_and_exit; | ||
627 | } | ||
628 | |||
629 | /* Insert the bits to be preserved */ | ||
630 | |||
631 | ACPI_INSERT_BITS(value, ACPI_PM1_STATUS_PRESERVED_BITS, | ||
632 | read_value); | ||
633 | |||
634 | /* Now we can write the data */ | ||
635 | |||
608 | status = | 636 | status = |
609 | acpi_hw_low_level_write(16, value, | 637 | acpi_hw_low_level_write(16, value, |
610 | &acpi_gbl_FADT->xpm1a_evt_blk); | 638 | &acpi_gbl_FADT->xpm1a_evt_blk); |
@@ -635,6 +663,25 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value) | |||
635 | 663 | ||
636 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ | 664 | case ACPI_REGISTER_PM1_CONTROL: /* 16-bit access */ |
637 | 665 | ||
666 | /* | ||
667 | * Perform a read first to preserve certain bits (per ACPI spec) | ||
668 | * | ||
669 | * Note: This includes SCI_EN, we never want to change this bit | ||
670 | */ | ||
671 | status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK, | ||
672 | ACPI_REGISTER_PM1_CONTROL, | ||
673 | &read_value); | ||
674 | if (ACPI_FAILURE(status)) { | ||
675 | goto unlock_and_exit; | ||
676 | } | ||
677 | |||
678 | /* Insert the bits to be preserved */ | ||
679 | |||
680 | ACPI_INSERT_BITS(value, ACPI_PM1_CONTROL_PRESERVED_BITS, | ||
681 | read_value); | ||
682 | |||
683 | /* Now we can write the data */ | ||
684 | |||
638 | status = | 685 | status = |
639 | acpi_hw_low_level_write(16, value, | 686 | acpi_hw_low_level_write(16, value, |
640 | &acpi_gbl_FADT->xpm1a_cnt_blk); | 687 | &acpi_gbl_FADT->xpm1a_cnt_blk); |
@@ -726,7 +773,7 @@ acpi_hw_low_level_read(u32 width, u32 * value, struct acpi_generic_address *reg) | |||
726 | return (AE_OK); | 773 | return (AE_OK); |
727 | } | 774 | } |
728 | 775 | ||
729 | /* Get a local copy of the address. Handles possible alignment issues */ | 776 | /* Get a local copy of the address. Handles possible alignment issues */ |
730 | 777 | ||
731 | ACPI_MOVE_64_TO_64(&address, ®->address); | 778 | ACPI_MOVE_64_TO_64(&address, ®->address); |
732 | if (!address) { | 779 | if (!address) { |
@@ -798,7 +845,7 @@ acpi_hw_low_level_write(u32 width, u32 value, struct acpi_generic_address * reg) | |||
798 | return (AE_OK); | 845 | return (AE_OK); |
799 | } | 846 | } |
800 | 847 | ||
801 | /* Get a local copy of the address. Handles possible alignment issues */ | 848 | /* Get a local copy of the address. Handles possible alignment issues */ |
802 | 849 | ||
803 | ACPI_MOVE_64_TO_64(&address, ®->address); | 850 | ACPI_MOVE_64_TO_64(&address, ®->address); |
804 | if (!address) { | 851 | if (!address) { |
diff --git a/drivers/acpi/namespace/nsaccess.c b/drivers/acpi/namespace/nsaccess.c index 48fadade52e2..c1c6c236df9a 100644 --- a/drivers/acpi/namespace/nsaccess.c +++ b/drivers/acpi/namespace/nsaccess.c | |||
@@ -196,33 +196,30 @@ acpi_status acpi_ns_root_initialize(void) | |||
196 | (u8) (ACPI_TO_INTEGER(val) - 1); | 196 | (u8) (ACPI_TO_INTEGER(val) - 1); |
197 | 197 | ||
198 | if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { | 198 | if (ACPI_STRCMP(init_val->name, "_GL_") == 0) { |
199 | /* | 199 | |
200 | * Create a counting semaphore for the | 200 | /* Create a counting semaphore for the global lock */ |
201 | * global lock | 201 | |
202 | */ | ||
203 | status = | 202 | status = |
204 | acpi_os_create_semaphore | 203 | acpi_os_create_semaphore |
205 | (ACPI_NO_UNIT_LIMIT, 1, | 204 | (ACPI_NO_UNIT_LIMIT, 1, |
206 | &obj_desc->mutex.semaphore); | 205 | &acpi_gbl_global_lock_semaphore); |
207 | if (ACPI_FAILURE(status)) { | 206 | if (ACPI_FAILURE(status)) { |
208 | acpi_ut_remove_reference | 207 | acpi_ut_remove_reference |
209 | (obj_desc); | 208 | (obj_desc); |
210 | goto unlock_and_exit; | 209 | goto unlock_and_exit; |
211 | } | 210 | } |
212 | 211 | ||
213 | /* | 212 | /* Mark this mutex as very special */ |
214 | * We just created the mutex for the | 213 | |
215 | * global lock, save it | 214 | obj_desc->mutex.os_mutex = |
216 | */ | 215 | ACPI_GLOBAL_LOCK; |
217 | acpi_gbl_global_lock_semaphore = | ||
218 | obj_desc->mutex.semaphore; | ||
219 | } else { | 216 | } else { |
220 | /* Create a mutex */ | 217 | /* Create a mutex */ |
221 | 218 | ||
222 | status = acpi_os_create_semaphore(1, 1, | 219 | status = |
223 | &obj_desc-> | 220 | acpi_os_create_mutex(&obj_desc-> |
224 | mutex. | 221 | mutex. |
225 | semaphore); | 222 | os_mutex); |
226 | if (ACPI_FAILURE(status)) { | 223 | if (ACPI_FAILURE(status)) { |
227 | acpi_ut_remove_reference | 224 | acpi_ut_remove_reference |
228 | (obj_desc); | 225 | (obj_desc); |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 8e46d1b39491..afd937b158b3 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -688,18 +688,9 @@ EXPORT_SYMBOL(acpi_os_wait_events_complete); | |||
688 | /* | 688 | /* |
689 | * Allocate the memory for a spinlock and initialize it. | 689 | * Allocate the memory for a spinlock and initialize it. |
690 | */ | 690 | */ |
691 | acpi_status acpi_os_create_lock(acpi_handle * out_handle) | 691 | acpi_status acpi_os_create_lock(acpi_spinlock * handle) |
692 | { | 692 | { |
693 | spinlock_t *lock_ptr; | 693 | spin_lock_init(*handle); |
694 | |||
695 | |||
696 | lock_ptr = acpi_os_allocate(sizeof(spinlock_t)); | ||
697 | |||
698 | spin_lock_init(lock_ptr); | ||
699 | |||
700 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating spinlock[%p].\n", lock_ptr)); | ||
701 | |||
702 | *out_handle = lock_ptr; | ||
703 | 694 | ||
704 | return AE_OK; | 695 | return AE_OK; |
705 | } | 696 | } |
@@ -707,13 +698,8 @@ acpi_status acpi_os_create_lock(acpi_handle * out_handle) | |||
707 | /* | 698 | /* |
708 | * Deallocate the memory for a spinlock. | 699 | * Deallocate the memory for a spinlock. |
709 | */ | 700 | */ |
710 | void acpi_os_delete_lock(acpi_handle handle) | 701 | void acpi_os_delete_lock(acpi_spinlock handle) |
711 | { | 702 | { |
712 | |||
713 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting spinlock[%p].\n", handle)); | ||
714 | |||
715 | acpi_os_free(handle); | ||
716 | |||
717 | return; | 703 | return; |
718 | } | 704 | } |
719 | 705 | ||
@@ -1037,10 +1023,10 @@ EXPORT_SYMBOL(max_cstate); | |||
1037 | * handle is a pointer to the spinlock_t. | 1023 | * handle is a pointer to the spinlock_t. |
1038 | */ | 1024 | */ |
1039 | 1025 | ||
1040 | acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) | 1026 | acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp) |
1041 | { | 1027 | { |
1042 | acpi_cpu_flags flags; | 1028 | acpi_cpu_flags flags; |
1043 | spin_lock_irqsave((spinlock_t *) handle, flags); | 1029 | spin_lock_irqsave(lockp, flags); |
1044 | return flags; | 1030 | return flags; |
1045 | } | 1031 | } |
1046 | 1032 | ||
@@ -1048,9 +1034,9 @@ acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle) | |||
1048 | * Release a spinlock. See above. | 1034 | * Release a spinlock. See above. |
1049 | */ | 1035 | */ |
1050 | 1036 | ||
1051 | void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags) | 1037 | void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags) |
1052 | { | 1038 | { |
1053 | spin_unlock_irqrestore((spinlock_t *) handle, flags); | 1039 | spin_unlock_irqrestore(lockp, flags); |
1054 | } | 1040 | } |
1055 | 1041 | ||
1056 | #ifndef ACPI_USE_LOCAL_CACHE | 1042 | #ifndef ACPI_USE_LOCAL_CACHE |
diff --git a/drivers/acpi/parser/psparse.c b/drivers/acpi/parser/psparse.c index 7ee2f2e77525..a02aa62fe1e5 100644 --- a/drivers/acpi/parser/psparse.c +++ b/drivers/acpi/parser/psparse.c | |||
@@ -469,6 +469,16 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
469 | } | 469 | } |
470 | 470 | ||
471 | walk_state->thread = thread; | 471 | walk_state->thread = thread; |
472 | |||
473 | /* | ||
474 | * If executing a method, the starting sync_level is this method's | ||
475 | * sync_level | ||
476 | */ | ||
477 | if (walk_state->method_desc) { | ||
478 | walk_state->thread->current_sync_level = | ||
479 | walk_state->method_desc->method.sync_level; | ||
480 | } | ||
481 | |||
472 | acpi_ds_push_walk_state(walk_state, thread); | 482 | acpi_ds_push_walk_state(walk_state, thread); |
473 | 483 | ||
474 | /* | 484 | /* |
@@ -505,6 +515,10 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
505 | status = | 515 | status = |
506 | acpi_ds_call_control_method(thread, walk_state, | 516 | acpi_ds_call_control_method(thread, walk_state, |
507 | NULL); | 517 | NULL); |
518 | if (ACPI_FAILURE(status)) { | ||
519 | status = | ||
520 | acpi_ds_method_error(status, walk_state); | ||
521 | } | ||
508 | 522 | ||
509 | /* | 523 | /* |
510 | * If the transfer to the new method method call worked, a new walk | 524 | * If the transfer to the new method method call worked, a new walk |
@@ -525,7 +539,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
525 | /* Check for possible multi-thread reentrancy problem */ | 539 | /* Check for possible multi-thread reentrancy problem */ |
526 | 540 | ||
527 | if ((status == AE_ALREADY_EXISTS) && | 541 | if ((status == AE_ALREADY_EXISTS) && |
528 | (!walk_state->method_desc->method.semaphore)) { | 542 | (!walk_state->method_desc->method.mutex)) { |
529 | /* | 543 | /* |
530 | * Method tried to create an object twice. The probable cause is | 544 | * Method tried to create an object twice. The probable cause is |
531 | * that the method cannot handle reentrancy. | 545 | * that the method cannot handle reentrancy. |
@@ -537,7 +551,7 @@ acpi_status acpi_ps_parse_aml(struct acpi_walk_state *walk_state) | |||
537 | */ | 551 | */ |
538 | walk_state->method_desc->method.method_flags |= | 552 | walk_state->method_desc->method.method_flags |= |
539 | AML_METHOD_SERIALIZED; | 553 | AML_METHOD_SERIALIZED; |
540 | walk_state->method_desc->method.concurrency = 1; | 554 | walk_state->method_desc->method.sync_level = 0; |
541 | } | 555 | } |
542 | } | 556 | } |
543 | 557 | ||
diff --git a/drivers/acpi/utilities/utdelete.c b/drivers/acpi/utilities/utdelete.c index 67b9f325c6fa..38ebe1c54330 100644 --- a/drivers/acpi/utilities/utdelete.c +++ b/drivers/acpi/utilities/utdelete.c | |||
@@ -155,21 +155,30 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) | |||
155 | case ACPI_TYPE_MUTEX: | 155 | case ACPI_TYPE_MUTEX: |
156 | 156 | ||
157 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, | 157 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, |
158 | "***** Mutex %p, Semaphore %p\n", | 158 | "***** Mutex %p, OS Mutex %p\n", |
159 | object, object->mutex.semaphore)); | 159 | object, object->mutex.os_mutex)); |
160 | 160 | ||
161 | acpi_ex_unlink_mutex(object); | 161 | if (object->mutex.os_mutex != ACPI_GLOBAL_LOCK) { |
162 | (void)acpi_os_delete_semaphore(object->mutex.semaphore); | 162 | acpi_ex_unlink_mutex(object); |
163 | acpi_os_delete_mutex(object->mutex.os_mutex); | ||
164 | } else { | ||
165 | /* Global Lock "mutex" is actually a counting semaphore */ | ||
166 | |||
167 | (void) | ||
168 | acpi_os_delete_semaphore | ||
169 | (acpi_gbl_global_lock_semaphore); | ||
170 | acpi_gbl_global_lock_semaphore = NULL; | ||
171 | } | ||
163 | break; | 172 | break; |
164 | 173 | ||
165 | case ACPI_TYPE_EVENT: | 174 | case ACPI_TYPE_EVENT: |
166 | 175 | ||
167 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, | 176 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, |
168 | "***** Event %p, Semaphore %p\n", | 177 | "***** Event %p, OS Semaphore %p\n", |
169 | object, object->event.semaphore)); | 178 | object, object->event.os_semaphore)); |
170 | 179 | ||
171 | (void)acpi_os_delete_semaphore(object->event.semaphore); | 180 | (void)acpi_os_delete_semaphore(object->event.os_semaphore); |
172 | object->event.semaphore = NULL; | 181 | object->event.os_semaphore = NULL; |
173 | break; | 182 | break; |
174 | 183 | ||
175 | case ACPI_TYPE_METHOD: | 184 | case ACPI_TYPE_METHOD: |
@@ -177,12 +186,13 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object) | |||
177 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, | 186 | ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, |
178 | "***** Method %p\n", object)); | 187 | "***** Method %p\n", object)); |
179 | 188 | ||
180 | /* Delete the method semaphore if it exists */ | 189 | /* Delete the method mutex if it exists */ |
181 | 190 | ||
182 | if (object->method.semaphore) { | 191 | if (object->method.mutex) { |
183 | (void)acpi_os_delete_semaphore(object->method. | 192 | acpi_os_delete_mutex(object->method.mutex->mutex. |
184 | semaphore); | 193 | os_mutex); |
185 | object->method.semaphore = NULL; | 194 | acpi_ut_delete_object_desc(object->method.mutex); |
195 | object->method.mutex = NULL; | ||
186 | } | 196 | } |
187 | break; | 197 | break; |
188 | 198 | ||
diff --git a/drivers/acpi/utilities/utglobal.c b/drivers/acpi/utilities/utglobal.c index e5999c65c0b8..014030af8b50 100644 --- a/drivers/acpi/utilities/utglobal.c +++ b/drivers/acpi/utilities/utglobal.c | |||
@@ -794,6 +794,7 @@ void acpi_ut_init_globals(void) | |||
794 | 794 | ||
795 | /* Global Lock support */ | 795 | /* Global Lock support */ |
796 | 796 | ||
797 | acpi_gbl_global_lock_semaphore = NULL; | ||
797 | acpi_gbl_global_lock_acquired = FALSE; | 798 | acpi_gbl_global_lock_acquired = FALSE; |
798 | acpi_gbl_global_lock_thread_count = 0; | 799 | acpi_gbl_global_lock_thread_count = 0; |
799 | acpi_gbl_global_lock_handle = 0; | 800 | acpi_gbl_global_lock_handle = 0; |
diff --git a/drivers/acpi/utilities/utmutex.c b/drivers/acpi/utilities/utmutex.c index 25eb34369afa..dfc8f30ca892 100644 --- a/drivers/acpi/utilities/utmutex.c +++ b/drivers/acpi/utilities/utmutex.c | |||
@@ -82,12 +82,9 @@ acpi_status acpi_ut_mutex_initialize(void) | |||
82 | 82 | ||
83 | /* Create the spinlocks for use at interrupt level */ | 83 | /* Create the spinlocks for use at interrupt level */ |
84 | 84 | ||
85 | status = acpi_os_create_lock(&acpi_gbl_gpe_lock); | 85 | spin_lock_init(acpi_gbl_gpe_lock); |
86 | if (ACPI_FAILURE(status)) { | 86 | spin_lock_init(acpi_gbl_hardware_lock); |
87 | return_ACPI_STATUS(status); | ||
88 | } | ||
89 | 87 | ||
90 | status = acpi_os_create_lock(&acpi_gbl_hardware_lock); | ||
91 | return_ACPI_STATUS(status); | 88 | return_ACPI_STATUS(status); |
92 | } | 89 | } |
93 | 90 | ||
@@ -146,9 +143,8 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id) | |||
146 | } | 143 | } |
147 | 144 | ||
148 | if (!acpi_gbl_mutex_info[mutex_id].mutex) { | 145 | if (!acpi_gbl_mutex_info[mutex_id].mutex) { |
149 | status = acpi_os_create_semaphore(1, 1, | 146 | status = |
150 | &acpi_gbl_mutex_info | 147 | acpi_os_create_mutex(&acpi_gbl_mutex_info[mutex_id].mutex); |
151 | [mutex_id].mutex); | ||
152 | acpi_gbl_mutex_info[mutex_id].thread_id = | 148 | acpi_gbl_mutex_info[mutex_id].thread_id = |
153 | ACPI_MUTEX_NOT_ACQUIRED; | 149 | ACPI_MUTEX_NOT_ACQUIRED; |
154 | acpi_gbl_mutex_info[mutex_id].use_count = 0; | 150 | acpi_gbl_mutex_info[mutex_id].use_count = 0; |
@@ -171,7 +167,6 @@ static acpi_status acpi_ut_create_mutex(acpi_mutex_handle mutex_id) | |||
171 | 167 | ||
172 | static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) | 168 | static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) |
173 | { | 169 | { |
174 | acpi_status status; | ||
175 | 170 | ||
176 | ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); | 171 | ACPI_FUNCTION_TRACE_U32(ut_delete_mutex, mutex_id); |
177 | 172 | ||
@@ -179,12 +174,12 @@ static acpi_status acpi_ut_delete_mutex(acpi_mutex_handle mutex_id) | |||
179 | return_ACPI_STATUS(AE_BAD_PARAMETER); | 174 | return_ACPI_STATUS(AE_BAD_PARAMETER); |
180 | } | 175 | } |
181 | 176 | ||
182 | status = acpi_os_delete_semaphore(acpi_gbl_mutex_info[mutex_id].mutex); | 177 | acpi_os_delete_mutex(acpi_gbl_mutex_info[mutex_id].mutex); |
183 | 178 | ||
184 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; | 179 | acpi_gbl_mutex_info[mutex_id].mutex = NULL; |
185 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; | 180 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; |
186 | 181 | ||
187 | return_ACPI_STATUS(status); | 182 | return_ACPI_STATUS(AE_OK); |
188 | } | 183 | } |
189 | 184 | ||
190 | /******************************************************************************* | 185 | /******************************************************************************* |
@@ -251,8 +246,8 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) | |||
251 | "Thread %X attempting to acquire Mutex [%s]\n", | 246 | "Thread %X attempting to acquire Mutex [%s]\n", |
252 | this_thread_id, acpi_ut_get_mutex_name(mutex_id))); | 247 | this_thread_id, acpi_ut_get_mutex_name(mutex_id))); |
253 | 248 | ||
254 | status = acpi_os_wait_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, | 249 | status = acpi_os_acquire_mutex(acpi_gbl_mutex_info[mutex_id].mutex, |
255 | 1, ACPI_WAIT_FOREVER); | 250 | ACPI_WAIT_FOREVER); |
256 | if (ACPI_SUCCESS(status)) { | 251 | if (ACPI_SUCCESS(status)) { |
257 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, | 252 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, |
258 | "Thread %X acquired Mutex [%s]\n", | 253 | "Thread %X acquired Mutex [%s]\n", |
@@ -284,7 +279,6 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id) | |||
284 | 279 | ||
285 | acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) | 280 | acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) |
286 | { | 281 | { |
287 | acpi_status status; | ||
288 | acpi_thread_id this_thread_id; | 282 | acpi_thread_id this_thread_id; |
289 | 283 | ||
290 | ACPI_FUNCTION_NAME(ut_release_mutex); | 284 | ACPI_FUNCTION_NAME(ut_release_mutex); |
@@ -340,19 +334,6 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id) | |||
340 | 334 | ||
341 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; | 335 | acpi_gbl_mutex_info[mutex_id].thread_id = ACPI_MUTEX_NOT_ACQUIRED; |
342 | 336 | ||
343 | status = | 337 | acpi_os_release_mutex(acpi_gbl_mutex_info[mutex_id].mutex); |
344 | acpi_os_signal_semaphore(acpi_gbl_mutex_info[mutex_id].mutex, 1); | 338 | return (AE_OK); |
345 | |||
346 | if (ACPI_FAILURE(status)) { | ||
347 | ACPI_EXCEPTION((AE_INFO, status, | ||
348 | "Thread %X could not release Mutex [%X]", | ||
349 | this_thread_id, mutex_id)); | ||
350 | } else { | ||
351 | ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, | ||
352 | "Thread %X released Mutex [%s]\n", | ||
353 | this_thread_id, | ||
354 | acpi_ut_get_mutex_name(mutex_id))); | ||
355 | } | ||
356 | |||
357 | return (status); | ||
358 | } | 339 | } |
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h index b9beceb33141..b492857fe721 100644 --- a/include/acpi/acconfig.h +++ b/include/acpi/acconfig.h | |||
@@ -63,7 +63,7 @@ | |||
63 | 63 | ||
64 | /* Current ACPICA subsystem version in YYYYMMDD format */ | 64 | /* Current ACPICA subsystem version in YYYYMMDD format */ |
65 | 65 | ||
66 | #define ACPI_CA_VERSION 0x20060608 | 66 | #define ACPI_CA_VERSION 0x20060623 |
67 | 67 | ||
68 | /* | 68 | /* |
69 | * OS name, used for the _OS object. The _OS object is essentially obsolete, | 69 | * OS name, used for the _OS object. The _OS object is essentially obsolete, |
diff --git a/include/acpi/acdispat.h b/include/acpi/acdispat.h index 288f84903af7..a22fe9cf8493 100644 --- a/include/acpi/acdispat.h +++ b/include/acpi/acdispat.h | |||
@@ -201,7 +201,7 @@ acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, | |||
201 | acpi_status | 201 | acpi_status |
202 | acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, | 202 | acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, |
203 | union acpi_operand_object *obj_desc, | 203 | union acpi_operand_object *obj_desc, |
204 | struct acpi_namespace_node *calling_method_node); | 204 | struct acpi_walk_state *walk_state); |
205 | 205 | ||
206 | acpi_status | 206 | acpi_status |
207 | acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state); | 207 | acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state); |
diff --git a/include/acpi/acglobal.h b/include/acpi/acglobal.h index 14531d48f6b6..06972e6637de 100644 --- a/include/acpi/acglobal.h +++ b/include/acpi/acglobal.h | |||
@@ -181,6 +181,12 @@ ACPI_EXTERN u8 acpi_gbl_integer_nybble_width; | |||
181 | extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1]; | 181 | extern struct acpi_table_list acpi_gbl_table_lists[ACPI_TABLE_ID_MAX + 1]; |
182 | extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; | 182 | extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; |
183 | 183 | ||
184 | /***************************************************************************** | ||
185 | * | ||
186 | * Mutual exlusion within ACPICA subsystem | ||
187 | * | ||
188 | ****************************************************************************/ | ||
189 | |||
184 | /* | 190 | /* |
185 | * Predefined mutex objects. This array contains the | 191 | * Predefined mutex objects. This array contains the |
186 | * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. | 192 | * actual OS mutex handles, indexed by the local ACPI_MUTEX_HANDLEs. |
@@ -188,6 +194,20 @@ extern struct acpi_table_support acpi_gbl_table_data[ACPI_TABLE_ID_MAX + 1]; | |||
188 | */ | 194 | */ |
189 | ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX]; | 195 | ACPI_EXTERN struct acpi_mutex_info acpi_gbl_mutex_info[ACPI_NUM_MUTEX]; |
190 | 196 | ||
197 | /* | ||
198 | * Global lock semaphore works in conjunction with the actual HW global lock | ||
199 | */ | ||
200 | ACPI_EXTERN acpi_semaphore acpi_gbl_global_lock_semaphore; | ||
201 | |||
202 | /* | ||
203 | * Spinlocks are used for interfaces that can be possibly called at | ||
204 | * interrupt level | ||
205 | */ | ||
206 | ACPI_EXTERN spinlock_t _acpi_gbl_gpe_lock; /* For GPE data structs and registers */ | ||
207 | ACPI_EXTERN spinlock_t _acpi_gbl_hardware_lock; /* For ACPI H/W except GPE registers */ | ||
208 | #define acpi_gbl_gpe_lock &_acpi_gbl_gpe_lock | ||
209 | #define acpi_gbl_hardware_lock &_acpi_gbl_hardware_lock | ||
210 | |||
191 | /***************************************************************************** | 211 | /***************************************************************************** |
192 | * | 212 | * |
193 | * Miscellaneous globals | 213 | * Miscellaneous globals |
@@ -217,7 +237,6 @@ ACPI_EXTERN struct acpi_object_notify_handler acpi_gbl_system_notify; | |||
217 | ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; | 237 | ACPI_EXTERN acpi_exception_handler acpi_gbl_exception_handler; |
218 | ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; | 238 | ACPI_EXTERN acpi_init_handler acpi_gbl_init_handler; |
219 | ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; | 239 | ACPI_EXTERN struct acpi_walk_state *acpi_gbl_breakpoint_walk; |
220 | ACPI_EXTERN acpi_handle acpi_gbl_global_lock_semaphore; | ||
221 | 240 | ||
222 | /* Misc */ | 241 | /* Misc */ |
223 | 242 | ||
@@ -315,11 +334,6 @@ ACPI_EXTERN struct acpi_gpe_xrupt_info *acpi_gbl_gpe_xrupt_list_head; | |||
315 | ACPI_EXTERN struct acpi_gpe_block_info | 334 | ACPI_EXTERN struct acpi_gpe_block_info |
316 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; | 335 | *acpi_gbl_gpe_fadt_blocks[ACPI_MAX_GPE_BLOCKS]; |
317 | 336 | ||
318 | /* Spinlocks */ | ||
319 | |||
320 | ACPI_EXTERN acpi_handle acpi_gbl_gpe_lock; | ||
321 | ACPI_EXTERN acpi_handle acpi_gbl_hardware_lock; | ||
322 | |||
323 | /***************************************************************************** | 337 | /***************************************************************************** |
324 | * | 338 | * |
325 | * Debugger globals | 339 | * Debugger globals |
diff --git a/include/acpi/acinterp.h b/include/acpi/acinterp.h index 9f22cfcb624b..216339a8f1f6 100644 --- a/include/acpi/acinterp.h +++ b/include/acpi/acinterp.h | |||
@@ -287,7 +287,10 @@ acpi_ex_system_wait_event(union acpi_operand_object *time, | |||
287 | 287 | ||
288 | acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc); | 288 | acpi_status acpi_ex_system_reset_event(union acpi_operand_object *obj_desc); |
289 | 289 | ||
290 | acpi_status acpi_ex_system_wait_semaphore(acpi_handle semaphore, u16 timeout); | 290 | acpi_status |
291 | acpi_ex_system_wait_semaphore(acpi_semaphore semaphore, u16 timeout); | ||
292 | |||
293 | acpi_status acpi_ex_system_wait_mutex(acpi_mutex mutex, u16 timeout); | ||
291 | 294 | ||
292 | /* | 295 | /* |
293 | * exoparg1 - ACPI AML execution, 1 operand | 296 | * exoparg1 - ACPI AML execution, 1 operand |
diff --git a/include/acpi/aclocal.h b/include/acpi/aclocal.h index 1eeca7adca95..56b802486161 100644 --- a/include/acpi/aclocal.h +++ b/include/acpi/aclocal.h | |||
@@ -47,10 +47,11 @@ | |||
47 | /* acpisrc:struct_defs -- for acpisrc conversion */ | 47 | /* acpisrc:struct_defs -- for acpisrc conversion */ |
48 | 48 | ||
49 | #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ | 49 | #define ACPI_WAIT_FOREVER 0xFFFF /* u16, as per ACPI spec */ |
50 | #define ACPI_INFINITE_CONCURRENCY 0xFF | 50 | #define ACPI_DO_NOT_WAIT 0 |
51 | #define ACPI_SERIALIZED 0xFF | ||
51 | 52 | ||
52 | typedef void *acpi_mutex; | ||
53 | typedef u32 acpi_mutex_handle; | 53 | typedef u32 acpi_mutex_handle; |
54 | #define ACPI_GLOBAL_LOCK (acpi_semaphore) (-1) | ||
54 | 55 | ||
55 | /* Total number of aml opcodes defined */ | 56 | /* Total number of aml opcodes defined */ |
56 | 57 | ||
@@ -79,16 +80,15 @@ union acpi_parse_object; | |||
79 | * table below also! | 80 | * table below also! |
80 | */ | 81 | */ |
81 | #define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */ | 82 | #define ACPI_MTX_INTERPRETER 0 /* AML Interpreter, main lock */ |
82 | #define ACPI_MTX_CONTROL_METHOD 1 /* Control method termination [TBD: may no longer be necessary] */ | 83 | #define ACPI_MTX_TABLES 1 /* Data for ACPI tables */ |
83 | #define ACPI_MTX_TABLES 2 /* Data for ACPI tables */ | 84 | #define ACPI_MTX_NAMESPACE 2 /* ACPI Namespace */ |
84 | #define ACPI_MTX_NAMESPACE 3 /* ACPI Namespace */ | 85 | #define ACPI_MTX_EVENTS 3 /* Data for ACPI events */ |
85 | #define ACPI_MTX_EVENTS 4 /* Data for ACPI events */ | 86 | #define ACPI_MTX_CACHES 4 /* Internal caches, general purposes */ |
86 | #define ACPI_MTX_CACHES 5 /* Internal caches, general purposes */ | 87 | #define ACPI_MTX_MEMORY 5 /* Debug memory tracking lists */ |
87 | #define ACPI_MTX_MEMORY 6 /* Debug memory tracking lists */ | 88 | #define ACPI_MTX_DEBUG_CMD_COMPLETE 6 /* AML debugger */ |
88 | #define ACPI_MTX_DEBUG_CMD_COMPLETE 7 /* AML debugger */ | 89 | #define ACPI_MTX_DEBUG_CMD_READY 7 /* AML debugger */ |
89 | #define ACPI_MTX_DEBUG_CMD_READY 8 /* AML debugger */ | 90 | |
90 | 91 | #define ACPI_MAX_MUTEX 7 | |
91 | #define ACPI_MAX_MUTEX 8 | ||
92 | #define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1 | 92 | #define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1 |
93 | 93 | ||
94 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) | 94 | #if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER) |
@@ -98,14 +98,13 @@ union acpi_parse_object; | |||
98 | 98 | ||
99 | static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { | 99 | static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = { |
100 | "ACPI_MTX_Interpreter", | 100 | "ACPI_MTX_Interpreter", |
101 | "ACPI_MTX_Method", | ||
102 | "ACPI_MTX_Tables", | 101 | "ACPI_MTX_Tables", |
103 | "ACPI_MTX_Namespace", | 102 | "ACPI_MTX_Namespace", |
104 | "ACPI_MTX_Events", | 103 | "ACPI_MTX_Events", |
105 | "ACPI_MTX_Caches", | 104 | "ACPI_MTX_Caches", |
106 | "ACPI_MTX_Memory", | 105 | "ACPI_MTX_Memory", |
107 | "ACPI_MTX_DebugCmdComplete", | 106 | "ACPI_MTX_CommandComplete", |
108 | "ACPI_MTX_DebugCmdReady" | 107 | "ACPI_MTX_CommandReady" |
109 | }; | 108 | }; |
110 | 109 | ||
111 | #endif | 110 | #endif |
@@ -705,6 +704,13 @@ struct acpi_bit_register_info { | |||
705 | }; | 704 | }; |
706 | 705 | ||
707 | /* | 706 | /* |
707 | * Some ACPI registers have bits that must be ignored -- meaning that they | ||
708 | * must be preserved. | ||
709 | */ | ||
710 | #define ACPI_PM1_STATUS_PRESERVED_BITS 0x0800 /* Bit 11 */ | ||
711 | #define ACPI_PM1_CONTROL_PRESERVED_BITS 0x0201 /* Bit 9, Bit 0 (SCI_EN) */ | ||
712 | |||
713 | /* | ||
708 | * Register IDs | 714 | * Register IDs |
709 | * These are the full ACPI registers | 715 | * These are the full ACPI registers |
710 | */ | 716 | */ |
diff --git a/include/acpi/acmacros.h b/include/acpi/acmacros.h index 38f9aa4bef00..4bb38068f40d 100644 --- a/include/acpi/acmacros.h +++ b/include/acpi/acmacros.h | |||
@@ -394,6 +394,8 @@ | |||
394 | #define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask) | 394 | #define ACPI_REGISTER_PREPARE_BITS(val, pos, mask) ((val << pos) & mask) |
395 | #define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) | 395 | #define ACPI_REGISTER_INSERT_VALUE(reg, pos, mask, val) reg = (reg & (~(mask))) | ACPI_REGISTER_PREPARE_BITS(val, pos, mask) |
396 | 396 | ||
397 | #define ACPI_INSERT_BITS(target, mask, source) target = ((target & (~(mask))) | (source & mask)) | ||
398 | |||
397 | /* Generate a UUID */ | 399 | /* Generate a UUID */ |
398 | 400 | ||
399 | #define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ | 401 | #define ACPI_INIT_UUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ |
diff --git a/include/acpi/acobject.h b/include/acpi/acobject.h index 1747d94084d8..8fdee31119f3 100644 --- a/include/acpi/acobject.h +++ b/include/acpi/acobject.h | |||
@@ -140,14 +140,14 @@ struct acpi_object_package { | |||
140 | *****************************************************************************/ | 140 | *****************************************************************************/ |
141 | 141 | ||
142 | struct acpi_object_event { | 142 | struct acpi_object_event { |
143 | ACPI_OBJECT_COMMON_HEADER void *semaphore; | 143 | ACPI_OBJECT_COMMON_HEADER acpi_semaphore os_semaphore; /* Actual OS synchronization object */ |
144 | }; | 144 | }; |
145 | 145 | ||
146 | struct acpi_object_mutex { | 146 | struct acpi_object_mutex { |
147 | ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ | 147 | ACPI_OBJECT_COMMON_HEADER u8 sync_level; /* 0-15, specified in Mutex() call */ |
148 | u16 acquisition_depth; /* Allow multiple Acquires, same thread */ | 148 | u16 acquisition_depth; /* Allow multiple Acquires, same thread */ |
149 | struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ | 149 | struct acpi_thread_state *owner_thread; /* Current owner of the mutex */ |
150 | void *semaphore; /* Actual OS synchronization object */ | 150 | acpi_mutex os_mutex; /* Actual OS synchronization object */ |
151 | union acpi_operand_object *prev; /* Link for list of acquired mutexes */ | 151 | union acpi_operand_object *prev; /* Link for list of acquired mutexes */ |
152 | union acpi_operand_object *next; /* Link for list of acquired mutexes */ | 152 | union acpi_operand_object *next; /* Link for list of acquired mutexes */ |
153 | struct acpi_namespace_node *node; /* Containing namespace node */ | 153 | struct acpi_namespace_node *node; /* Containing namespace node */ |
@@ -166,8 +166,8 @@ struct acpi_object_region { | |||
166 | struct acpi_object_method { | 166 | struct acpi_object_method { |
167 | ACPI_OBJECT_COMMON_HEADER u8 method_flags; | 167 | ACPI_OBJECT_COMMON_HEADER u8 method_flags; |
168 | u8 param_count; | 168 | u8 param_count; |
169 | u8 concurrency; | 169 | u8 sync_level; |
170 | void *semaphore; | 170 | union acpi_operand_object *mutex; |
171 | u8 *aml_start; | 171 | u8 *aml_start; |
172 | ACPI_INTERNAL_METHOD implementation; | 172 | ACPI_INTERNAL_METHOD implementation; |
173 | u32 aml_length; | 173 | u32 aml_length; |
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index 8f473c83b7c4..89bc4a16c2e8 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -96,25 +96,47 @@ acpi_os_table_override(struct acpi_table_header *existing_table, | |||
96 | struct acpi_table_header **new_table); | 96 | struct acpi_table_header **new_table); |
97 | 97 | ||
98 | /* | 98 | /* |
99 | * Synchronization primitives | 99 | * Spinlock primitives |
100 | */ | ||
101 | acpi_status acpi_os_create_lock(acpi_spinlock * out_handle); | ||
102 | |||
103 | void acpi_os_delete_lock(acpi_spinlock handle); | ||
104 | |||
105 | acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock handle); | ||
106 | |||
107 | void acpi_os_release_lock(acpi_spinlock handle, acpi_cpu_flags flags); | ||
108 | |||
109 | /* | ||
110 | * Semaphore primitives | ||
100 | */ | 111 | */ |
101 | acpi_status | 112 | acpi_status |
102 | acpi_os_create_semaphore(u32 max_units, | 113 | acpi_os_create_semaphore(u32 max_units, |
103 | u32 initial_units, acpi_handle * out_handle); | 114 | u32 initial_units, acpi_semaphore * out_handle); |
104 | 115 | ||
105 | acpi_status acpi_os_delete_semaphore(acpi_handle handle); | 116 | acpi_status acpi_os_delete_semaphore(acpi_semaphore handle); |
106 | 117 | ||
107 | acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout); | 118 | acpi_status |
119 | acpi_os_wait_semaphore(acpi_semaphore handle, u32 units, u16 timeout); | ||
120 | |||
121 | acpi_status acpi_os_signal_semaphore(acpi_semaphore handle, u32 units); | ||
122 | |||
123 | /* | ||
124 | * Mutex primitives | ||
125 | */ | ||
126 | acpi_status acpi_os_create_mutex(acpi_mutex * out_handle); | ||
108 | 127 | ||
109 | acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units); | 128 | void acpi_os_delete_mutex(acpi_mutex handle); |
110 | 129 | ||
111 | acpi_status acpi_os_create_lock(acpi_handle * out_handle); | 130 | acpi_status acpi_os_acquire_mutex(acpi_mutex handle, u16 timeout); |
112 | 131 | ||
113 | void acpi_os_delete_lock(acpi_handle handle); | 132 | void acpi_os_release_mutex(acpi_mutex handle); |
114 | 133 | ||
115 | acpi_cpu_flags acpi_os_acquire_lock(acpi_handle handle); | 134 | /* Temporary macros for Mutex* interfaces, map to existing semaphore xfaces */ |
116 | 135 | ||
117 | void acpi_os_release_lock(acpi_handle handle, acpi_cpu_flags flags); | 136 | #define acpi_os_create_mutex(out_handle) acpi_os_create_semaphore (1, 1, out_handle) |
137 | #define acpi_os_delete_mutex(handle) (void) acpi_os_delete_semaphore (handle) | ||
138 | #define acpi_os_acquire_mutex(handle,time) acpi_os_wait_semaphore (handle, 1, time) | ||
139 | #define acpi_os_release_mutex(handle) (void) acpi_os_signal_semaphore (handle, 1) | ||
118 | 140 | ||
119 | /* | 141 | /* |
120 | * Memory allocation and mapping | 142 | * Memory allocation and mapping |
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h index 77cf1236b05a..64b603cfe92e 100644 --- a/include/acpi/actypes.h +++ b/include/acpi/actypes.h | |||
@@ -241,7 +241,7 @@ typedef acpi_native_uint acpi_size; | |||
241 | 241 | ||
242 | /******************************************************************************* | 242 | /******************************************************************************* |
243 | * | 243 | * |
244 | * OS- or compiler-dependent types | 244 | * OS-dependent and compiler-dependent types |
245 | * | 245 | * |
246 | * If the defaults below are not appropriate for the host system, they can | 246 | * If the defaults below are not appropriate for the host system, they can |
247 | * be defined in the compiler-specific or OS-specific header, and this will | 247 | * be defined in the compiler-specific or OS-specific header, and this will |
@@ -249,29 +249,36 @@ typedef acpi_native_uint acpi_size; | |||
249 | * | 249 | * |
250 | ******************************************************************************/ | 250 | ******************************************************************************/ |
251 | 251 | ||
252 | /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ | 252 | /* Value returned by acpi_os_get_thread_id */ |
253 | 253 | ||
254 | #ifndef acpi_uintptr_t | 254 | #ifndef acpi_thread_id |
255 | #define acpi_uintptr_t void * | 255 | #define acpi_thread_id acpi_native_uint |
256 | #endif | 256 | #endif |
257 | 257 | ||
258 | /* | 258 | /* Object returned from acpi_os_create_lock */ |
259 | * If acpi_cache_t was not defined in the OS-dependent header, | 259 | |
260 | * define it now. This is typically the case where the local cache | 260 | #ifndef acpi_spinlock |
261 | * manager implementation is to be used (ACPI_USE_LOCAL_CACHE) | 261 | #define acpi_spinlock void * |
262 | */ | ||
263 | #ifndef acpi_cache_t | ||
264 | #define acpi_cache_t struct acpi_memory_list | ||
265 | #endif | 262 | #endif |
266 | 263 | ||
267 | /* | 264 | /* Flags for acpi_os_acquire_lock/acpi_os_release_lock */ |
268 | * Allow the CPU flags word to be defined per-OS to simplify the use of the | 265 | |
269 | * lock and unlock OSL interfaces. | ||
270 | */ | ||
271 | #ifndef acpi_cpu_flags | 266 | #ifndef acpi_cpu_flags |
272 | #define acpi_cpu_flags acpi_native_uint | 267 | #define acpi_cpu_flags acpi_native_uint |
273 | #endif | 268 | #endif |
274 | 269 | ||
270 | /* Object returned from acpi_os_create_cache */ | ||
271 | |||
272 | #ifndef acpi_cache_t | ||
273 | #define acpi_cache_t struct acpi_memory_list | ||
274 | #endif | ||
275 | |||
276 | /* Use C99 uintptr_t for pointer casting if available, "void *" otherwise */ | ||
277 | |||
278 | #ifndef acpi_uintptr_t | ||
279 | #define acpi_uintptr_t void * | ||
280 | #endif | ||
281 | |||
275 | /* | 282 | /* |
276 | * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because | 283 | * ACPI_PRINTF_LIKE is used to tag functions as "printf-like" because |
277 | * some compilers can catch printf format string problems | 284 | * some compilers can catch printf format string problems |
@@ -298,13 +305,6 @@ typedef acpi_native_uint acpi_size; | |||
298 | #define ACPI_EXPORT_SYMBOL(symbol) | 305 | #define ACPI_EXPORT_SYMBOL(symbol) |
299 | #endif | 306 | #endif |
300 | 307 | ||
301 | /* | ||
302 | * thread_id is returned by acpi_os_get_thread_id. | ||
303 | */ | ||
304 | #ifndef acpi_thread_id | ||
305 | #define acpi_thread_id acpi_native_uint | ||
306 | #endif | ||
307 | |||
308 | /******************************************************************************* | 308 | /******************************************************************************* |
309 | * | 309 | * |
310 | * Independent types | 310 | * Independent types |
@@ -380,6 +380,11 @@ struct uint32_struct { | |||
380 | u32 hi; | 380 | u32 hi; |
381 | }; | 381 | }; |
382 | 382 | ||
383 | /* Synchronization objects */ | ||
384 | |||
385 | #define acpi_mutex void * | ||
386 | #define acpi_semaphore void * | ||
387 | |||
383 | /* | 388 | /* |
384 | * Acpi integer width. In ACPI version 1, integers are | 389 | * Acpi integer width. In ACPI version 1, integers are |
385 | * 32 bits. In ACPI version 2, integers are 64 bits. | 390 | * 32 bits. In ACPI version 2, integers are 64 bits. |
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index 277d35bced03..3f853cabbd41 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h | |||
@@ -58,11 +58,13 @@ | |||
58 | #include <asm/div64.h> | 58 | #include <asm/div64.h> |
59 | #include <asm/acpi.h> | 59 | #include <asm/acpi.h> |
60 | #include <linux/slab.h> | 60 | #include <linux/slab.h> |
61 | #include <linux/spinlock_types.h> | ||
61 | 62 | ||
62 | /* Host-dependent types and defines */ | 63 | /* Host-dependent types and defines */ |
63 | 64 | ||
64 | #define ACPI_MACHINE_WIDTH BITS_PER_LONG | 65 | #define ACPI_MACHINE_WIDTH BITS_PER_LONG |
65 | #define acpi_cache_t kmem_cache_t | 66 | #define acpi_cache_t kmem_cache_t |
67 | #define acpi_spinlock spinlock_t * | ||
66 | #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); | 68 | #define ACPI_EXPORT_SYMBOL(symbol) EXPORT_SYMBOL(symbol); |
67 | #define strtoul simple_strtoul | 69 | #define strtoul simple_strtoul |
68 | 70 | ||