aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2011-03-18 18:06:08 -0400
committerLen Brown <len.brown@intel.com>2011-03-18 18:06:08 -0400
commit05534c9ffc9d5d950b14de8ba49a7609dc59b0b8 (patch)
tree65a01a1e0bc0e28c64fb5105cc763949f5412b4b /drivers
parentdd87cc53c42f3260b7eb7f60822de0fa9e58af59 (diff)
parent589c7a39ae2f2b74fd13ae344ca1dcca61da6bca (diff)
Merge branch 'acpica' into release
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acdispat.h38
-rw-r--r--drivers/acpi/acpica/acglobal.h4
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/dsargs.c391
-rw-r--r--drivers/acpi/acpica/dscontrol.c410
-rw-r--r--drivers/acpi/acpica/dsopcode.c725
-rw-r--r--drivers/acpi/acpica/dswload.c670
-rw-r--r--drivers/acpi/acpica/dswload2.c720
-rw-r--r--drivers/acpi/acpica/evgpe.c9
-rw-r--r--drivers/acpi/acpica/evregion.c2
-rw-r--r--drivers/acpi/acpica/evxfregn.c34
-rw-r--r--drivers/acpi/acpica/exfldio.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c5
-rw-r--r--drivers/acpi/acpica/utdecode.c548
-rw-r--r--drivers/acpi/acpica/utglobal.c484
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/agp/intel-gtt.c56
-rw-r--r--drivers/char/tpm/tpm.c10
-rw-r--r--drivers/gpu/drm/drm_irq.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c16
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c95
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c5
-rw-r--r--drivers/hwmon/ad7414.c1
-rw-r--r--drivers/hwmon/adt7411.c1
-rw-r--r--drivers/md/linear.c1
-rw-r--r--drivers/md/md.c31
-rw-r--r--drivers/md/md.h2
-rw-r--r--drivers/md/multipath.c1
-rw-r--r--drivers/md/raid0.c2
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/md/raid5.c1
-rw-r--r--drivers/nfc/Kconfig2
-rw-r--r--drivers/nfc/pn544.c4
-rw-r--r--drivers/pps/kapi.c2
-rw-r--r--drivers/rapidio/rio-sysfs.c12
-rw-r--r--drivers/regulator/mc13xxx-regulator-core.c2
-rw-r--r--drivers/regulator/wm831x-dcdc.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-ds3232.c14
-rw-r--r--drivers/thermal/Kconfig1
-rw-r--r--drivers/thermal/thermal_sys.c40
-rw-r--r--drivers/usb/core/hub.c18
-rw-r--r--drivers/usb/host/xhci-dbg.c9
-rw-r--r--drivers/usb/host/xhci-mem.c10
-rw-r--r--drivers/usb/host/xhci-ring.c40
-rw-r--r--drivers/usb/host/xhci.c14
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_core.c1
52 files changed, 2423 insertions, 2065 deletions
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index eec2eadd2431..a1224712fd0c 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -10,7 +10,7 @@ obj-y += acpi.o
10 10
11acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \ 11acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
12 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \ 12 dsmethod.o dsobject.o dsutils.o dswload.o dswstate.o \
13 dsinit.o 13 dsinit.o dsargs.o dscontrol.o dswload2.o
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
@@ -45,4 +45,4 @@ acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
45acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \ 45acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
46 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \ 46 utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
47 utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \ 47 utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
48 utosi.o utxferror.o 48 utosi.o utxferror.o utdecode.o
diff --git a/drivers/acpi/acpica/acdispat.h b/drivers/acpi/acpica/acdispat.h
index 666271b65418..2d1b7ffa377a 100644
--- a/drivers/acpi/acpica/acdispat.h
+++ b/drivers/acpi/acpica/acdispat.h
@@ -48,7 +48,7 @@
48#define NAMEOF_ARG_NTE "__A0" 48#define NAMEOF_ARG_NTE "__A0"
49 49
50/* 50/*
51 * dsopcode - support for late evaluation 51 * dsargs - execution of dynamic arguments for static objects
52 */ 52 */
53acpi_status 53acpi_status
54acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc); 54acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc);
@@ -62,6 +62,20 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc);
62 62
63acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc); 63acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc);
64 64
65/*
66 * dscontrol - support for execution control opcodes
67 */
68acpi_status
69acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
70 union acpi_parse_object *op);
71
72acpi_status
73acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
74 union acpi_parse_object *op);
75
76/*
77 * dsopcode - support for late operand evaluation
78 */
65acpi_status 79acpi_status
66acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state, 80acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
67 union acpi_parse_object *op); 81 union acpi_parse_object *op);
@@ -86,17 +100,6 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
86acpi_status acpi_ds_initialize_region(acpi_handle obj_handle); 100acpi_status acpi_ds_initialize_region(acpi_handle obj_handle);
87 101
88/* 102/*
89 * dsctrl - Parser/Interpreter interface, control stack routines
90 */
91acpi_status
92acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
93 union acpi_parse_object *op);
94
95acpi_status
96acpi_ds_exec_end_control_op(struct acpi_walk_state *walk_state,
97 union acpi_parse_object *op);
98
99/*
100 * dsexec - Parser/Interpreter interface, method execution callbacks 103 * dsexec - Parser/Interpreter interface, method execution callbacks
101 */ 104 */
102acpi_status 105acpi_status
@@ -136,23 +139,26 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
136 struct acpi_walk_state *walk_state); 139 struct acpi_walk_state *walk_state);
137 140
138/* 141/*
139 * dsload - Parser/Interpreter interface, namespace load callbacks 142 * dsload - Parser/Interpreter interface, pass 1 namespace load callbacks
140 */ 143 */
141acpi_status 144acpi_status
145acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
146
147acpi_status
142acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state, 148acpi_ds_load1_begin_op(struct acpi_walk_state *walk_state,
143 union acpi_parse_object **out_op); 149 union acpi_parse_object **out_op);
144 150
145acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state); 151acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state);
146 152
153/*
154 * dsload - Parser/Interpreter interface, pass 2 namespace load callbacks
155 */
147acpi_status 156acpi_status
148acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state, 157acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
149 union acpi_parse_object **out_op); 158 union acpi_parse_object **out_op);
150 159
151acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state); 160acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state);
152 161
153acpi_status
154acpi_ds_init_callbacks(struct acpi_walk_state *walk_state, u32 pass_number);
155
156/* 162/*
157 * dsmthdat - method data (locals/args) 163 * dsmthdat - method data (locals/args)
158 */ 164 */
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index 82a1bd283db8..d69750b83b36 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -273,6 +273,10 @@ ACPI_EXTERN u32 acpi_gbl_owner_id_mask[ACPI_NUM_OWNERID_MASKS];
273ACPI_EXTERN u8 acpi_gbl_last_owner_id_index; 273ACPI_EXTERN u8 acpi_gbl_last_owner_id_index;
274ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset; 274ACPI_EXTERN u8 acpi_gbl_next_owner_id_offset;
275 275
276/* Initialization sequencing */
277
278ACPI_EXTERN u8 acpi_gbl_reg_methods_executed;
279
276/* Misc */ 280/* Misc */
277 281
278ACPI_EXTERN u32 acpi_gbl_original_mode; 282ACPI_EXTERN u32 acpi_gbl_original_mode;
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index edc25867ad9d..c7f743ca395b 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -89,25 +89,6 @@ union acpi_parse_object;
89#define ACPI_MAX_MUTEX 7 89#define ACPI_MAX_MUTEX 7
90#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1 90#define ACPI_NUM_MUTEX ACPI_MAX_MUTEX+1
91 91
92#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
93#ifdef DEFINE_ACPI_GLOBALS
94
95/* Debug names for the mutexes above */
96
97static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
98 "ACPI_MTX_Interpreter",
99 "ACPI_MTX_Namespace",
100 "ACPI_MTX_Tables",
101 "ACPI_MTX_Events",
102 "ACPI_MTX_Caches",
103 "ACPI_MTX_Memory",
104 "ACPI_MTX_CommandComplete",
105 "ACPI_MTX_CommandReady"
106};
107
108#endif
109#endif
110
111/* Lock structure for reader/writer interfaces */ 92/* Lock structure for reader/writer interfaces */
112 93
113struct acpi_rw_lock { 94struct acpi_rw_lock {
diff --git a/drivers/acpi/acpica/dsargs.c b/drivers/acpi/acpica/dsargs.c
new file mode 100644
index 000000000000..8c7b99728aa2
--- /dev/null
+++ b/drivers/acpi/acpica/dsargs.c
@@ -0,0 +1,391 @@
1/******************************************************************************
2 *
3 * Module Name: dsargs - Support for execution of dynamic arguments for static
4 * objects (regions, fields, buffer fields, etc.)
5 *
6 *****************************************************************************/
7
8/*
9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGES.
43 */
44
45#include <acpi/acpi.h>
46#include "accommon.h"
47#include "acparser.h"
48#include "amlcode.h"
49#include "acdispat.h"
50#include "acnamesp.h"
51
52#define _COMPONENT ACPI_DISPATCHER
53ACPI_MODULE_NAME("dsargs")
54
55/* Local prototypes */
56static acpi_status
57acpi_ds_execute_arguments(struct acpi_namespace_node *node,
58 struct acpi_namespace_node *scope_node,
59 u32 aml_length, u8 *aml_start);
60
61/*******************************************************************************
62 *
63 * FUNCTION: acpi_ds_execute_arguments
64 *
65 * PARAMETERS: Node - Object NS node
66 * scope_node - Parent NS node
67 * aml_length - Length of executable AML
68 * aml_start - Pointer to the AML
69 *
70 * RETURN: Status.
71 *
72 * DESCRIPTION: Late (deferred) execution of region or field arguments
73 *
74 ******************************************************************************/
75
76static acpi_status
77acpi_ds_execute_arguments(struct acpi_namespace_node *node,
78 struct acpi_namespace_node *scope_node,
79 u32 aml_length, u8 *aml_start)
80{
81 acpi_status status;
82 union acpi_parse_object *op;
83 struct acpi_walk_state *walk_state;
84
85 ACPI_FUNCTION_TRACE(ds_execute_arguments);
86
87 /* Allocate a new parser op to be the root of the parsed tree */
88
89 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
90 if (!op) {
91 return_ACPI_STATUS(AE_NO_MEMORY);
92 }
93
94 /* Save the Node for use in acpi_ps_parse_aml */
95
96 op->common.node = scope_node;
97
98 /* Create and initialize a new parser state */
99
100 walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
101 if (!walk_state) {
102 status = AE_NO_MEMORY;
103 goto cleanup;
104 }
105
106 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
107 aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
108 if (ACPI_FAILURE(status)) {
109 acpi_ds_delete_walk_state(walk_state);
110 goto cleanup;
111 }
112
113 /* Mark this parse as a deferred opcode */
114
115 walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
116 walk_state->deferred_node = node;
117
118 /* Pass1: Parse the entire declaration */
119
120 status = acpi_ps_parse_aml(walk_state);
121 if (ACPI_FAILURE(status)) {
122 goto cleanup;
123 }
124
125 /* Get and init the Op created above */
126
127 op->common.node = node;
128 acpi_ps_delete_parse_tree(op);
129
130 /* Evaluate the deferred arguments */
131
132 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
133 if (!op) {
134 return_ACPI_STATUS(AE_NO_MEMORY);
135 }
136
137 op->common.node = scope_node;
138
139 /* Create and initialize a new parser state */
140
141 walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
142 if (!walk_state) {
143 status = AE_NO_MEMORY;
144 goto cleanup;
145 }
146
147 /* Execute the opcode and arguments */
148
149 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
150 aml_length, NULL, ACPI_IMODE_EXECUTE);
151 if (ACPI_FAILURE(status)) {
152 acpi_ds_delete_walk_state(walk_state);
153 goto cleanup;
154 }
155
156 /* Mark this execution as a deferred opcode */
157
158 walk_state->deferred_node = node;
159 status = acpi_ps_parse_aml(walk_state);
160
161 cleanup:
162 acpi_ps_delete_parse_tree(op);
163 return_ACPI_STATUS(status);
164}
165
166/*******************************************************************************
167 *
168 * FUNCTION: acpi_ds_get_buffer_field_arguments
169 *
170 * PARAMETERS: obj_desc - A valid buffer_field object
171 *
172 * RETURN: Status.
173 *
174 * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
175 * evaluation of these field attributes.
176 *
177 ******************************************************************************/
178
179acpi_status
180acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
181{
182 union acpi_operand_object *extra_desc;
183 struct acpi_namespace_node *node;
184 acpi_status status;
185
186 ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
187
188 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
189 return_ACPI_STATUS(AE_OK);
190 }
191
192 /* Get the AML pointer (method object) and buffer_field node */
193
194 extra_desc = acpi_ns_get_secondary_object(obj_desc);
195 node = obj_desc->buffer_field.node;
196
197 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname(ACPI_TYPE_BUFFER_FIELD,
198 node, NULL));
199
200 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
201 acpi_ut_get_node_name(node)));
202
203 /* Execute the AML code for the term_arg arguments */
204
205 status = acpi_ds_execute_arguments(node, node->parent,
206 extra_desc->extra.aml_length,
207 extra_desc->extra.aml_start);
208 return_ACPI_STATUS(status);
209}
210
211/*******************************************************************************
212 *
213 * FUNCTION: acpi_ds_get_bank_field_arguments
214 *
215 * PARAMETERS: obj_desc - A valid bank_field object
216 *
217 * RETURN: Status.
218 *
219 * DESCRIPTION: Get bank_field bank_value. This implements the late
220 * evaluation of these field attributes.
221 *
222 ******************************************************************************/
223
224acpi_status
225acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
226{
227 union acpi_operand_object *extra_desc;
228 struct acpi_namespace_node *node;
229 acpi_status status;
230
231 ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
232
233 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
234 return_ACPI_STATUS(AE_OK);
235 }
236
237 /* Get the AML pointer (method object) and bank_field node */
238
239 extra_desc = acpi_ns_get_secondary_object(obj_desc);
240 node = obj_desc->bank_field.node;
241
242 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
243 (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
244
245 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
246 acpi_ut_get_node_name(node)));
247
248 /* Execute the AML code for the term_arg arguments */
249
250 status = acpi_ds_execute_arguments(node, node->parent,
251 extra_desc->extra.aml_length,
252 extra_desc->extra.aml_start);
253 return_ACPI_STATUS(status);
254}
255
256/*******************************************************************************
257 *
258 * FUNCTION: acpi_ds_get_buffer_arguments
259 *
260 * PARAMETERS: obj_desc - A valid Buffer object
261 *
262 * RETURN: Status.
263 *
264 * DESCRIPTION: Get Buffer length and initializer byte list. This implements
265 * the late evaluation of these attributes.
266 *
267 ******************************************************************************/
268
269acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
270{
271 struct acpi_namespace_node *node;
272 acpi_status status;
273
274 ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
275
276 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
277 return_ACPI_STATUS(AE_OK);
278 }
279
280 /* Get the Buffer node */
281
282 node = obj_desc->buffer.node;
283 if (!node) {
284 ACPI_ERROR((AE_INFO,
285 "No pointer back to namespace node in buffer object %p",
286 obj_desc));
287 return_ACPI_STATUS(AE_AML_INTERNAL);
288 }
289
290 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
291
292 /* Execute the AML code for the term_arg arguments */
293
294 status = acpi_ds_execute_arguments(node, node,
295 obj_desc->buffer.aml_length,
296 obj_desc->buffer.aml_start);
297 return_ACPI_STATUS(status);
298}
299
300/*******************************************************************************
301 *
302 * FUNCTION: acpi_ds_get_package_arguments
303 *
304 * PARAMETERS: obj_desc - A valid Package object
305 *
306 * RETURN: Status.
307 *
308 * DESCRIPTION: Get Package length and initializer byte list. This implements
309 * the late evaluation of these attributes.
310 *
311 ******************************************************************************/
312
313acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
314{
315 struct acpi_namespace_node *node;
316 acpi_status status;
317
318 ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
319
320 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
321 return_ACPI_STATUS(AE_OK);
322 }
323
324 /* Get the Package node */
325
326 node = obj_desc->package.node;
327 if (!node) {
328 ACPI_ERROR((AE_INFO,
329 "No pointer back to namespace node in package %p",
330 obj_desc));
331 return_ACPI_STATUS(AE_AML_INTERNAL);
332 }
333
334 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
335
336 /* Execute the AML code for the term_arg arguments */
337
338 status = acpi_ds_execute_arguments(node, node,
339 obj_desc->package.aml_length,
340 obj_desc->package.aml_start);
341 return_ACPI_STATUS(status);
342}
343
344/*******************************************************************************
345 *
346 * FUNCTION: acpi_ds_get_region_arguments
347 *
348 * PARAMETERS: obj_desc - A valid region object
349 *
350 * RETURN: Status.
351 *
352 * DESCRIPTION: Get region address and length. This implements the late
353 * evaluation of these region attributes.
354 *
355 ******************************************************************************/
356
357acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
358{
359 struct acpi_namespace_node *node;
360 acpi_status status;
361 union acpi_operand_object *extra_desc;
362
363 ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
364
365 if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
366 return_ACPI_STATUS(AE_OK);
367 }
368
369 extra_desc = acpi_ns_get_secondary_object(obj_desc);
370 if (!extra_desc) {
371 return_ACPI_STATUS(AE_NOT_EXIST);
372 }
373
374 /* Get the Region node */
375
376 node = obj_desc->region.node;
377
378 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
379 (ACPI_TYPE_REGION, node, NULL));
380
381 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
382 acpi_ut_get_node_name(node),
383 extra_desc->extra.aml_start));
384
385 /* Execute the argument AML */
386
387 status = acpi_ds_execute_arguments(node, node->parent,
388 extra_desc->extra.aml_length,
389 extra_desc->extra.aml_start);
390 return_ACPI_STATUS(status);
391}
diff --git a/drivers/acpi/acpica/dscontrol.c b/drivers/acpi/acpica/dscontrol.c
new file mode 100644
index 000000000000..26c49fff58da
--- /dev/null
+++ b/drivers/acpi/acpica/dscontrol.c
@@ -0,0 +1,410 @@
1/******************************************************************************
2 *
3 * Module Name: dscontrol - Support for execution control opcodes -
4 * if/else/while/return
5 *
6 *****************************************************************************/
7
8/*
9 * Copyright (C) 2000 - 2011, Intel Corp.
10 * All rights reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
19 * substantially similar to the "NO WARRANTY" disclaimer below
20 * ("Disclaimer") and any redistribution must be conditioned upon
21 * including a substantially similar Disclaimer requirement for further
22 * binary redistribution.
23 * 3. Neither the names of the above-listed copyright holders nor the names
24 * of any contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * Alternatively, this software may be distributed under the terms of the
28 * GNU General Public License ("GPL") version 2 as published by the Free
29 * Software Foundation.
30 *
31 * NO WARRANTY
32 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
33 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
34 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
35 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
36 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
37 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
38 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
39 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
40 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
41 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
42 * POSSIBILITY OF SUCH DAMAGES.
43 */
44
45#include <acpi/acpi.h>
46#include "accommon.h"
47#include "amlcode.h"
48#include "acdispat.h"
49#include "acinterp.h"
50
51#define _COMPONENT ACPI_DISPATCHER
52ACPI_MODULE_NAME("dscontrol")
53
54/*******************************************************************************
55 *
56 * FUNCTION: acpi_ds_exec_begin_control_op
57 *
58 * PARAMETERS: walk_list - The list that owns the walk stack
59 * Op - The control Op
60 *
61 * RETURN: Status
62 *
63 * DESCRIPTION: Handles all control ops encountered during control method
64 * execution.
65 *
66 ******************************************************************************/
67acpi_status
68acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
69 union acpi_parse_object *op)
70{
71 acpi_status status = AE_OK;
72 union acpi_generic_state *control_state;
73
74 ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
75
76 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n",
77 op, op->common.aml_opcode, walk_state));
78
79 switch (op->common.aml_opcode) {
80 case AML_WHILE_OP:
81
82 /*
83 * If this is an additional iteration of a while loop, continue.
84 * There is no need to allocate a new control state.
85 */
86 if (walk_state->control_state) {
87 if (walk_state->control_state->control.
88 aml_predicate_start ==
89 (walk_state->parser_state.aml - 1)) {
90
91 /* Reset the state to start-of-loop */
92
93 walk_state->control_state->common.state =
94 ACPI_CONTROL_CONDITIONAL_EXECUTING;
95 break;
96 }
97 }
98
99 /*lint -fallthrough */
100
101 case AML_IF_OP:
102
103 /*
104 * IF/WHILE: Create a new control state to manage these
105 * constructs. We need to manage these as a stack, in order
106 * to handle nesting.
107 */
108 control_state = acpi_ut_create_control_state();
109 if (!control_state) {
110 status = AE_NO_MEMORY;
111 break;
112 }
113 /*
114 * Save a pointer to the predicate for multiple executions
115 * of a loop
116 */
117 control_state->control.aml_predicate_start =
118 walk_state->parser_state.aml - 1;
119 control_state->control.package_end =
120 walk_state->parser_state.pkg_end;
121 control_state->control.opcode = op->common.aml_opcode;
122
123 /* Push the control state on this walk's control stack */
124
125 acpi_ut_push_generic_state(&walk_state->control_state,
126 control_state);
127 break;
128
129 case AML_ELSE_OP:
130
131 /* Predicate is in the state object */
132 /* If predicate is true, the IF was executed, ignore ELSE part */
133
134 if (walk_state->last_predicate) {
135 status = AE_CTRL_TRUE;
136 }
137
138 break;
139
140 case AML_RETURN_OP:
141
142 break;
143
144 default:
145 break;
146 }
147
148 return (status);
149}
150
151/*******************************************************************************
152 *
153 * FUNCTION: acpi_ds_exec_end_control_op
154 *
155 * PARAMETERS: walk_list - The list that owns the walk stack
156 * Op - The control Op
157 *
158 * RETURN: Status
159 *
160 * DESCRIPTION: Handles all control ops encountered during control method
161 * execution.
162 *
163 ******************************************************************************/
164
165acpi_status
166acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
167 union acpi_parse_object * op)
168{
169 acpi_status status = AE_OK;
170 union acpi_generic_state *control_state;
171
172 ACPI_FUNCTION_NAME(ds_exec_end_control_op);
173
174 switch (op->common.aml_opcode) {
175 case AML_IF_OP:
176
177 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
178
179 /*
180 * Save the result of the predicate in case there is an
181 * ELSE to come
182 */
183 walk_state->last_predicate =
184 (u8)walk_state->control_state->common.value;
185
186 /*
187 * Pop the control state that was created at the start
188 * of the IF and free it
189 */
190 control_state =
191 acpi_ut_pop_generic_state(&walk_state->control_state);
192 acpi_ut_delete_generic_state(control_state);
193 break;
194
195 case AML_ELSE_OP:
196
197 break;
198
199 case AML_WHILE_OP:
200
201 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
202
203 control_state = walk_state->control_state;
204 if (control_state->common.value) {
205
206 /* Predicate was true, the body of the loop was just executed */
207
208 /*
209 * This loop counter mechanism allows the interpreter to escape
210 * possibly infinite loops. This can occur in poorly written AML
211 * when the hardware does not respond within a while loop and the
212 * loop does not implement a timeout.
213 */
214 control_state->control.loop_count++;
215 if (control_state->control.loop_count >
216 ACPI_MAX_LOOP_ITERATIONS) {
217 status = AE_AML_INFINITE_LOOP;
218 break;
219 }
220
221 /*
222 * Go back and evaluate the predicate and maybe execute the loop
223 * another time
224 */
225 status = AE_CTRL_PENDING;
226 walk_state->aml_last_while =
227 control_state->control.aml_predicate_start;
228 break;
229 }
230
231 /* Predicate was false, terminate this while loop */
232
233 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
234 "[WHILE_OP] termination! Op=%p\n", op));
235
236 /* Pop this control state and free it */
237
238 control_state =
239 acpi_ut_pop_generic_state(&walk_state->control_state);
240 acpi_ut_delete_generic_state(control_state);
241 break;
242
243 case AML_RETURN_OP:
244
245 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
246 "[RETURN_OP] Op=%p Arg=%p\n", op,
247 op->common.value.arg));
248
249 /*
250 * One optional operand -- the return value
251 * It can be either an immediate operand or a result that
252 * has been bubbled up the tree
253 */
254 if (op->common.value.arg) {
255
256 /* Since we have a real Return(), delete any implicit return */
257
258 acpi_ds_clear_implicit_return(walk_state);
259
260 /* Return statement has an immediate operand */
261
262 status =
263 acpi_ds_create_operands(walk_state,
264 op->common.value.arg);
265 if (ACPI_FAILURE(status)) {
266 return (status);
267 }
268
269 /*
270 * If value being returned is a Reference (such as
271 * an arg or local), resolve it now because it may
272 * cease to exist at the end of the method.
273 */
274 status =
275 acpi_ex_resolve_to_value(&walk_state->operands[0],
276 walk_state);
277 if (ACPI_FAILURE(status)) {
278 return (status);
279 }
280
281 /*
282 * Get the return value and save as the last result
283 * value. This is the only place where walk_state->return_desc
284 * is set to anything other than zero!
285 */
286 walk_state->return_desc = walk_state->operands[0];
287 } else if (walk_state->result_count) {
288
289 /* Since we have a real Return(), delete any implicit return */
290
291 acpi_ds_clear_implicit_return(walk_state);
292
293 /*
294 * The return value has come from a previous calculation.
295 *
296 * If value being returned is a Reference (such as
297 * an arg or local), resolve it now because it may
298 * cease to exist at the end of the method.
299 *
300 * Allow references created by the Index operator to return
301 * unchanged.
302 */
303 if ((ACPI_GET_DESCRIPTOR_TYPE
304 (walk_state->results->results.obj_desc[0]) ==
305 ACPI_DESC_TYPE_OPERAND)
306 && ((walk_state->results->results.obj_desc[0])->
307 common.type == ACPI_TYPE_LOCAL_REFERENCE)
308 && ((walk_state->results->results.obj_desc[0])->
309 reference.class != ACPI_REFCLASS_INDEX)) {
310 status =
311 acpi_ex_resolve_to_value(&walk_state->
312 results->results.
313 obj_desc[0],
314 walk_state);
315 if (ACPI_FAILURE(status)) {
316 return (status);
317 }
318 }
319
320 walk_state->return_desc =
321 walk_state->results->results.obj_desc[0];
322 } else {
323 /* No return operand */
324
325 if (walk_state->num_operands) {
326 acpi_ut_remove_reference(walk_state->
327 operands[0]);
328 }
329
330 walk_state->operands[0] = NULL;
331 walk_state->num_operands = 0;
332 walk_state->return_desc = NULL;
333 }
334
335 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
336 "Completed RETURN_OP State=%p, RetVal=%p\n",
337 walk_state, walk_state->return_desc));
338
339 /* End the control method execution right now */
340
341 status = AE_CTRL_TERMINATE;
342 break;
343
344 case AML_NOOP_OP:
345
346 /* Just do nothing! */
347 break;
348
349 case AML_BREAK_POINT_OP:
350
351 /*
352 * Set the single-step flag. This will cause the debugger (if present)
353 * to break to the console within the AML debugger at the start of the
354 * next AML instruction.
355 */
356 ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
357 ACPI_DEBUGGER_EXEC(acpi_os_printf
358 ("**break** Executed AML BreakPoint opcode\n"));
359
360 /* Call to the OSL in case OS wants a piece of the action */
361
362 status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
363 "Executed AML Breakpoint opcode");
364 break;
365
366 case AML_BREAK_OP:
367 case AML_CONTINUE_OP: /* ACPI 2.0 */
368
369 /* Pop and delete control states until we find a while */
370
371 while (walk_state->control_state &&
372 (walk_state->control_state->control.opcode !=
373 AML_WHILE_OP)) {
374 control_state =
375 acpi_ut_pop_generic_state(&walk_state->
376 control_state);
377 acpi_ut_delete_generic_state(control_state);
378 }
379
380 /* No while found? */
381
382 if (!walk_state->control_state) {
383 return (AE_AML_NO_WHILE);
384 }
385
386 /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
387
388 walk_state->aml_last_while =
389 walk_state->control_state->control.package_end;
390
391 /* Return status depending on opcode */
392
393 if (op->common.aml_opcode == AML_BREAK_OP) {
394 status = AE_CTRL_BREAK;
395 } else {
396 status = AE_CTRL_CONTINUE;
397 }
398 break;
399
400 default:
401
402 ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
403 op->common.aml_opcode, op));
404
405 status = AE_AML_BAD_OPCODE;
406 break;
407 }
408
409 return (status);
410}
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bbecf293aeeb..c627a288e027 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -1,7 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: dsopcode - Dispatcher Op Region support and handling of 3 * Module Name: dsopcode - Dispatcher suport for regions and fields
4 * "control" opcodes
5 * 4 *
6 *****************************************************************************/ 5 *****************************************************************************/
7 6
@@ -57,11 +56,6 @@ ACPI_MODULE_NAME("dsopcode")
57 56
58/* Local prototypes */ 57/* Local prototypes */
59static acpi_status 58static acpi_status
60acpi_ds_execute_arguments(struct acpi_namespace_node *node,
61 struct acpi_namespace_node *scope_node,
62 u32 aml_length, u8 * aml_start);
63
64static acpi_status
65acpi_ds_init_buffer_field(u16 aml_opcode, 59acpi_ds_init_buffer_field(u16 aml_opcode,
66 union acpi_operand_object *obj_desc, 60 union acpi_operand_object *obj_desc,
67 union acpi_operand_object *buffer_desc, 61 union acpi_operand_object *buffer_desc,
@@ -71,361 +65,6 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
71 65
72/******************************************************************************* 66/*******************************************************************************
73 * 67 *
74 * FUNCTION: acpi_ds_execute_arguments
75 *
76 * PARAMETERS: Node - Object NS node
77 * scope_node - Parent NS node
78 * aml_length - Length of executable AML
79 * aml_start - Pointer to the AML
80 *
81 * RETURN: Status.
82 *
83 * DESCRIPTION: Late (deferred) execution of region or field arguments
84 *
85 ******************************************************************************/
86
87static acpi_status
88acpi_ds_execute_arguments(struct acpi_namespace_node *node,
89 struct acpi_namespace_node *scope_node,
90 u32 aml_length, u8 * aml_start)
91{
92 acpi_status status;
93 union acpi_parse_object *op;
94 struct acpi_walk_state *walk_state;
95
96 ACPI_FUNCTION_TRACE(ds_execute_arguments);
97
98 /*
99 * Allocate a new parser op to be the root of the parsed tree
100 */
101 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
102 if (!op) {
103 return_ACPI_STATUS(AE_NO_MEMORY);
104 }
105
106 /* Save the Node for use in acpi_ps_parse_aml */
107
108 op->common.node = scope_node;
109
110 /* Create and initialize a new parser state */
111
112 walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
113 if (!walk_state) {
114 status = AE_NO_MEMORY;
115 goto cleanup;
116 }
117
118 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
119 aml_length, NULL, ACPI_IMODE_LOAD_PASS1);
120 if (ACPI_FAILURE(status)) {
121 acpi_ds_delete_walk_state(walk_state);
122 goto cleanup;
123 }
124
125 /* Mark this parse as a deferred opcode */
126
127 walk_state->parse_flags = ACPI_PARSE_DEFERRED_OP;
128 walk_state->deferred_node = node;
129
130 /* Pass1: Parse the entire declaration */
131
132 status = acpi_ps_parse_aml(walk_state);
133 if (ACPI_FAILURE(status)) {
134 goto cleanup;
135 }
136
137 /* Get and init the Op created above */
138
139 op->common.node = node;
140 acpi_ps_delete_parse_tree(op);
141
142 /* Evaluate the deferred arguments */
143
144 op = acpi_ps_alloc_op(AML_INT_EVAL_SUBTREE_OP);
145 if (!op) {
146 return_ACPI_STATUS(AE_NO_MEMORY);
147 }
148
149 op->common.node = scope_node;
150
151 /* Create and initialize a new parser state */
152
153 walk_state = acpi_ds_create_walk_state(0, NULL, NULL, NULL);
154 if (!walk_state) {
155 status = AE_NO_MEMORY;
156 goto cleanup;
157 }
158
159 /* Execute the opcode and arguments */
160
161 status = acpi_ds_init_aml_walk(walk_state, op, NULL, aml_start,
162 aml_length, NULL, ACPI_IMODE_EXECUTE);
163 if (ACPI_FAILURE(status)) {
164 acpi_ds_delete_walk_state(walk_state);
165 goto cleanup;
166 }
167
168 /* Mark this execution as a deferred opcode */
169
170 walk_state->deferred_node = node;
171 status = acpi_ps_parse_aml(walk_state);
172
173 cleanup:
174 acpi_ps_delete_parse_tree(op);
175 return_ACPI_STATUS(status);
176}
177
178/*******************************************************************************
179 *
180 * FUNCTION: acpi_ds_get_buffer_field_arguments
181 *
182 * PARAMETERS: obj_desc - A valid buffer_field object
183 *
184 * RETURN: Status.
185 *
186 * DESCRIPTION: Get buffer_field Buffer and Index. This implements the late
187 * evaluation of these field attributes.
188 *
189 ******************************************************************************/
190
191acpi_status
192acpi_ds_get_buffer_field_arguments(union acpi_operand_object *obj_desc)
193{
194 union acpi_operand_object *extra_desc;
195 struct acpi_namespace_node *node;
196 acpi_status status;
197
198 ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_field_arguments, obj_desc);
199
200 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
201 return_ACPI_STATUS(AE_OK);
202 }
203
204 /* Get the AML pointer (method object) and buffer_field node */
205
206 extra_desc = acpi_ns_get_secondary_object(obj_desc);
207 node = obj_desc->buffer_field.node;
208
209 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
210 (ACPI_TYPE_BUFFER_FIELD, node, NULL));
211 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BufferField Arg Init\n",
212 acpi_ut_get_node_name(node)));
213
214 /* Execute the AML code for the term_arg arguments */
215
216 status = acpi_ds_execute_arguments(node, node->parent,
217 extra_desc->extra.aml_length,
218 extra_desc->extra.aml_start);
219 return_ACPI_STATUS(status);
220}
221
222/*******************************************************************************
223 *
224 * FUNCTION: acpi_ds_get_bank_field_arguments
225 *
226 * PARAMETERS: obj_desc - A valid bank_field object
227 *
228 * RETURN: Status.
229 *
230 * DESCRIPTION: Get bank_field bank_value. This implements the late
231 * evaluation of these field attributes.
232 *
233 ******************************************************************************/
234
235acpi_status
236acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
237{
238 union acpi_operand_object *extra_desc;
239 struct acpi_namespace_node *node;
240 acpi_status status;
241
242 ACPI_FUNCTION_TRACE_PTR(ds_get_bank_field_arguments, obj_desc);
243
244 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
245 return_ACPI_STATUS(AE_OK);
246 }
247
248 /* Get the AML pointer (method object) and bank_field node */
249
250 extra_desc = acpi_ns_get_secondary_object(obj_desc);
251 node = obj_desc->bank_field.node;
252
253 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
254 (ACPI_TYPE_LOCAL_BANK_FIELD, node, NULL));
255 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] BankField Arg Init\n",
256 acpi_ut_get_node_name(node)));
257
258 /* Execute the AML code for the term_arg arguments */
259
260 status = acpi_ds_execute_arguments(node, node->parent,
261 extra_desc->extra.aml_length,
262 extra_desc->extra.aml_start);
263 return_ACPI_STATUS(status);
264}
265
266/*******************************************************************************
267 *
268 * FUNCTION: acpi_ds_get_buffer_arguments
269 *
270 * PARAMETERS: obj_desc - A valid Buffer object
271 *
272 * RETURN: Status.
273 *
274 * DESCRIPTION: Get Buffer length and initializer byte list. This implements
275 * the late evaluation of these attributes.
276 *
277 ******************************************************************************/
278
279acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
280{
281 struct acpi_namespace_node *node;
282 acpi_status status;
283
284 ACPI_FUNCTION_TRACE_PTR(ds_get_buffer_arguments, obj_desc);
285
286 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
287 return_ACPI_STATUS(AE_OK);
288 }
289
290 /* Get the Buffer node */
291
292 node = obj_desc->buffer.node;
293 if (!node) {
294 ACPI_ERROR((AE_INFO,
295 "No pointer back to namespace node in buffer object %p",
296 obj_desc));
297 return_ACPI_STATUS(AE_AML_INTERNAL);
298 }
299
300 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Buffer Arg Init\n"));
301
302 /* Execute the AML code for the term_arg arguments */
303
304 status = acpi_ds_execute_arguments(node, node,
305 obj_desc->buffer.aml_length,
306 obj_desc->buffer.aml_start);
307 return_ACPI_STATUS(status);
308}
309
310/*******************************************************************************
311 *
312 * FUNCTION: acpi_ds_get_package_arguments
313 *
314 * PARAMETERS: obj_desc - A valid Package object
315 *
316 * RETURN: Status.
317 *
318 * DESCRIPTION: Get Package length and initializer byte list. This implements
319 * the late evaluation of these attributes.
320 *
321 ******************************************************************************/
322
323acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
324{
325 struct acpi_namespace_node *node;
326 acpi_status status;
327
328 ACPI_FUNCTION_TRACE_PTR(ds_get_package_arguments, obj_desc);
329
330 if (obj_desc->common.flags & AOPOBJ_DATA_VALID) {
331 return_ACPI_STATUS(AE_OK);
332 }
333
334 /* Get the Package node */
335
336 node = obj_desc->package.node;
337 if (!node) {
338 ACPI_ERROR((AE_INFO,
339 "No pointer back to namespace node in package %p",
340 obj_desc));
341 return_ACPI_STATUS(AE_AML_INTERNAL);
342 }
343
344 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Package Arg Init\n"));
345
346 /* Execute the AML code for the term_arg arguments */
347
348 status = acpi_ds_execute_arguments(node, node,
349 obj_desc->package.aml_length,
350 obj_desc->package.aml_start);
351 return_ACPI_STATUS(status);
352}
353
354/*****************************************************************************
355 *
356 * FUNCTION: acpi_ds_get_region_arguments
357 *
358 * PARAMETERS: obj_desc - A valid region object
359 *
360 * RETURN: Status.
361 *
362 * DESCRIPTION: Get region address and length. This implements the late
363 * evaluation of these region attributes.
364 *
365 ****************************************************************************/
366
367acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
368{
369 struct acpi_namespace_node *node;
370 acpi_status status;
371 union acpi_operand_object *extra_desc;
372
373 ACPI_FUNCTION_TRACE_PTR(ds_get_region_arguments, obj_desc);
374
375 if (obj_desc->region.flags & AOPOBJ_DATA_VALID) {
376 return_ACPI_STATUS(AE_OK);
377 }
378
379 extra_desc = acpi_ns_get_secondary_object(obj_desc);
380 if (!extra_desc) {
381 return_ACPI_STATUS(AE_NOT_EXIST);
382 }
383
384 /* Get the Region node */
385
386 node = obj_desc->region.node;
387
388 ACPI_DEBUG_EXEC(acpi_ut_display_init_pathname
389 (ACPI_TYPE_REGION, node, NULL));
390
391 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "[%4.4s] OpRegion Arg Init at AML %p\n",
392 acpi_ut_get_node_name(node),
393 extra_desc->extra.aml_start));
394
395 /* Execute the argument AML */
396
397 status = acpi_ds_execute_arguments(node, node->parent,
398 extra_desc->extra.aml_length,
399 extra_desc->extra.aml_start);
400 if (ACPI_FAILURE(status)) {
401 return_ACPI_STATUS(status);
402 }
403
404 /* Validate the region address/length via the host OS */
405
406 status = acpi_os_validate_address(obj_desc->region.space_id,
407 obj_desc->region.address,
408 (acpi_size) obj_desc->region.length,
409 acpi_ut_get_node_name(node));
410
411 if (ACPI_FAILURE(status)) {
412 /*
413 * Invalid address/length. We will emit an error message and mark
414 * the region as invalid, so that it will cause an additional error if
415 * it is ever used. Then return AE_OK.
416 */
417 ACPI_EXCEPTION((AE_INFO, status,
418 "During address validation of OpRegion [%4.4s]",
419 node->name.ascii));
420 obj_desc->common.flags |= AOPOBJ_INVALID;
421 status = AE_OK;
422 }
423
424 return_ACPI_STATUS(status);
425}
426
427/*******************************************************************************
428 *
429 * FUNCTION: acpi_ds_initialize_region 68 * FUNCTION: acpi_ds_initialize_region
430 * 69 *
431 * PARAMETERS: obj_handle - Region namespace node 70 * PARAMETERS: obj_handle - Region namespace node
@@ -826,8 +465,9 @@ acpi_ds_eval_region_operands(struct acpi_walk_state *walk_state,
826 * 465 *
827 * RETURN: Status 466 * RETURN: Status
828 * 467 *
829 * DESCRIPTION: Get region address and length 468 * DESCRIPTION: Get region address and length.
830 * Called from acpi_ds_exec_end_op during data_table_region parse tree walk 469 * Called from acpi_ds_exec_end_op during data_table_region parse
470 * tree walk.
831 * 471 *
832 ******************************************************************************/ 472 ******************************************************************************/
833 473
@@ -1114,360 +754,3 @@ acpi_ds_eval_bank_field_operands(struct acpi_walk_state *walk_state,
1114 acpi_ut_remove_reference(operand_desc); 754 acpi_ut_remove_reference(operand_desc);
1115 return_ACPI_STATUS(status); 755 return_ACPI_STATUS(status);
1116} 756}
1117
1118/*******************************************************************************
1119 *
1120 * FUNCTION: acpi_ds_exec_begin_control_op
1121 *
1122 * PARAMETERS: walk_list - The list that owns the walk stack
1123 * Op - The control Op
1124 *
1125 * RETURN: Status
1126 *
1127 * DESCRIPTION: Handles all control ops encountered during control method
1128 * execution.
1129 *
1130 ******************************************************************************/
1131
1132acpi_status
1133acpi_ds_exec_begin_control_op(struct acpi_walk_state *walk_state,
1134 union acpi_parse_object *op)
1135{
1136 acpi_status status = AE_OK;
1137 union acpi_generic_state *control_state;
1138
1139 ACPI_FUNCTION_NAME(ds_exec_begin_control_op);
1140
1141 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p Opcode=%2.2X State=%p\n", op,
1142 op->common.aml_opcode, walk_state));
1143
1144 switch (op->common.aml_opcode) {
1145 case AML_WHILE_OP:
1146
1147 /*
1148 * If this is an additional iteration of a while loop, continue.
1149 * There is no need to allocate a new control state.
1150 */
1151 if (walk_state->control_state) {
1152 if (walk_state->control_state->control.aml_predicate_start
1153 == (walk_state->parser_state.aml - 1)) {
1154
1155 /* Reset the state to start-of-loop */
1156
1157 walk_state->control_state->common.state =
1158 ACPI_CONTROL_CONDITIONAL_EXECUTING;
1159 break;
1160 }
1161 }
1162
1163 /*lint -fallthrough */
1164
1165 case AML_IF_OP:
1166
1167 /*
1168 * IF/WHILE: Create a new control state to manage these
1169 * constructs. We need to manage these as a stack, in order
1170 * to handle nesting.
1171 */
1172 control_state = acpi_ut_create_control_state();
1173 if (!control_state) {
1174 status = AE_NO_MEMORY;
1175 break;
1176 }
1177 /*
1178 * Save a pointer to the predicate for multiple executions
1179 * of a loop
1180 */
1181 control_state->control.aml_predicate_start =
1182 walk_state->parser_state.aml - 1;
1183 control_state->control.package_end =
1184 walk_state->parser_state.pkg_end;
1185 control_state->control.opcode = op->common.aml_opcode;
1186
1187 /* Push the control state on this walk's control stack */
1188
1189 acpi_ut_push_generic_state(&walk_state->control_state,
1190 control_state);
1191 break;
1192
1193 case AML_ELSE_OP:
1194
1195 /* Predicate is in the state object */
1196 /* If predicate is true, the IF was executed, ignore ELSE part */
1197
1198 if (walk_state->last_predicate) {
1199 status = AE_CTRL_TRUE;
1200 }
1201
1202 break;
1203
1204 case AML_RETURN_OP:
1205
1206 break;
1207
1208 default:
1209 break;
1210 }
1211
1212 return (status);
1213}
1214
1215/*******************************************************************************
1216 *
1217 * FUNCTION: acpi_ds_exec_end_control_op
1218 *
1219 * PARAMETERS: walk_list - The list that owns the walk stack
1220 * Op - The control Op
1221 *
1222 * RETURN: Status
1223 *
1224 * DESCRIPTION: Handles all control ops encountered during control method
1225 * execution.
1226 *
1227 ******************************************************************************/
1228
1229acpi_status
1230acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1231 union acpi_parse_object * op)
1232{
1233 acpi_status status = AE_OK;
1234 union acpi_generic_state *control_state;
1235
1236 ACPI_FUNCTION_NAME(ds_exec_end_control_op);
1237
1238 switch (op->common.aml_opcode) {
1239 case AML_IF_OP:
1240
1241 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[IF_OP] Op=%p\n", op));
1242
1243 /*
1244 * Save the result of the predicate in case there is an
1245 * ELSE to come
1246 */
1247 walk_state->last_predicate =
1248 (u8) walk_state->control_state->common.value;
1249
1250 /*
1251 * Pop the control state that was created at the start
1252 * of the IF and free it
1253 */
1254 control_state =
1255 acpi_ut_pop_generic_state(&walk_state->control_state);
1256 acpi_ut_delete_generic_state(control_state);
1257 break;
1258
1259 case AML_ELSE_OP:
1260
1261 break;
1262
1263 case AML_WHILE_OP:
1264
1265 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "[WHILE_OP] Op=%p\n", op));
1266
1267 control_state = walk_state->control_state;
1268 if (control_state->common.value) {
1269
1270 /* Predicate was true, the body of the loop was just executed */
1271
1272 /*
1273 * This loop counter mechanism allows the interpreter to escape
1274 * possibly infinite loops. This can occur in poorly written AML
1275 * when the hardware does not respond within a while loop and the
1276 * loop does not implement a timeout.
1277 */
1278 control_state->control.loop_count++;
1279 if (control_state->control.loop_count >
1280 ACPI_MAX_LOOP_ITERATIONS) {
1281 status = AE_AML_INFINITE_LOOP;
1282 break;
1283 }
1284
1285 /*
1286 * Go back and evaluate the predicate and maybe execute the loop
1287 * another time
1288 */
1289 status = AE_CTRL_PENDING;
1290 walk_state->aml_last_while =
1291 control_state->control.aml_predicate_start;
1292 break;
1293 }
1294
1295 /* Predicate was false, terminate this while loop */
1296
1297 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1298 "[WHILE_OP] termination! Op=%p\n", op));
1299
1300 /* Pop this control state and free it */
1301
1302 control_state =
1303 acpi_ut_pop_generic_state(&walk_state->control_state);
1304 acpi_ut_delete_generic_state(control_state);
1305 break;
1306
1307 case AML_RETURN_OP:
1308
1309 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1310 "[RETURN_OP] Op=%p Arg=%p\n", op,
1311 op->common.value.arg));
1312
1313 /*
1314 * One optional operand -- the return value
1315 * It can be either an immediate operand or a result that
1316 * has been bubbled up the tree
1317 */
1318 if (op->common.value.arg) {
1319
1320 /* Since we have a real Return(), delete any implicit return */
1321
1322 acpi_ds_clear_implicit_return(walk_state);
1323
1324 /* Return statement has an immediate operand */
1325
1326 status =
1327 acpi_ds_create_operands(walk_state,
1328 op->common.value.arg);
1329 if (ACPI_FAILURE(status)) {
1330 return (status);
1331 }
1332
1333 /*
1334 * If value being returned is a Reference (such as
1335 * an arg or local), resolve it now because it may
1336 * cease to exist at the end of the method.
1337 */
1338 status =
1339 acpi_ex_resolve_to_value(&walk_state->operands[0],
1340 walk_state);
1341 if (ACPI_FAILURE(status)) {
1342 return (status);
1343 }
1344
1345 /*
1346 * Get the return value and save as the last result
1347 * value. This is the only place where walk_state->return_desc
1348 * is set to anything other than zero!
1349 */
1350 walk_state->return_desc = walk_state->operands[0];
1351 } else if (walk_state->result_count) {
1352
1353 /* Since we have a real Return(), delete any implicit return */
1354
1355 acpi_ds_clear_implicit_return(walk_state);
1356
1357 /*
1358 * The return value has come from a previous calculation.
1359 *
1360 * If value being returned is a Reference (such as
1361 * an arg or local), resolve it now because it may
1362 * cease to exist at the end of the method.
1363 *
1364 * Allow references created by the Index operator to return unchanged.
1365 */
1366 if ((ACPI_GET_DESCRIPTOR_TYPE
1367 (walk_state->results->results.obj_desc[0]) ==
1368 ACPI_DESC_TYPE_OPERAND)
1369 && ((walk_state->results->results.obj_desc[0])->
1370 common.type == ACPI_TYPE_LOCAL_REFERENCE)
1371 && ((walk_state->results->results.obj_desc[0])->
1372 reference.class != ACPI_REFCLASS_INDEX)) {
1373 status =
1374 acpi_ex_resolve_to_value(&walk_state->
1375 results->results.
1376 obj_desc[0],
1377 walk_state);
1378 if (ACPI_FAILURE(status)) {
1379 return (status);
1380 }
1381 }
1382
1383 walk_state->return_desc =
1384 walk_state->results->results.obj_desc[0];
1385 } else {
1386 /* No return operand */
1387
1388 if (walk_state->num_operands) {
1389 acpi_ut_remove_reference(walk_state->
1390 operands[0]);
1391 }
1392
1393 walk_state->operands[0] = NULL;
1394 walk_state->num_operands = 0;
1395 walk_state->return_desc = NULL;
1396 }
1397
1398 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1399 "Completed RETURN_OP State=%p, RetVal=%p\n",
1400 walk_state, walk_state->return_desc));
1401
1402 /* End the control method execution right now */
1403
1404 status = AE_CTRL_TERMINATE;
1405 break;
1406
1407 case AML_NOOP_OP:
1408
1409 /* Just do nothing! */
1410 break;
1411
1412 case AML_BREAK_POINT_OP:
1413
1414 /*
1415 * Set the single-step flag. This will cause the debugger (if present)
1416 * to break to the console within the AML debugger at the start of the
1417 * next AML instruction.
1418 */
1419 ACPI_DEBUGGER_EXEC(acpi_gbl_cm_single_step = TRUE);
1420 ACPI_DEBUGGER_EXEC(acpi_os_printf
1421 ("**break** Executed AML BreakPoint opcode\n"));
1422
1423 /* Call to the OSL in case OS wants a piece of the action */
1424
1425 status = acpi_os_signal(ACPI_SIGNAL_BREAKPOINT,
1426 "Executed AML Breakpoint opcode");
1427 break;
1428
1429 case AML_BREAK_OP:
1430 case AML_CONTINUE_OP: /* ACPI 2.0 */
1431
1432 /* Pop and delete control states until we find a while */
1433
1434 while (walk_state->control_state &&
1435 (walk_state->control_state->control.opcode !=
1436 AML_WHILE_OP)) {
1437 control_state =
1438 acpi_ut_pop_generic_state(&walk_state->
1439 control_state);
1440 acpi_ut_delete_generic_state(control_state);
1441 }
1442
1443 /* No while found? */
1444
1445 if (!walk_state->control_state) {
1446 return (AE_AML_NO_WHILE);
1447 }
1448
1449 /* Was: walk_state->aml_last_while = walk_state->control_state->Control.aml_predicate_start; */
1450
1451 walk_state->aml_last_while =
1452 walk_state->control_state->control.package_end;
1453
1454 /* Return status depending on opcode */
1455
1456 if (op->common.aml_opcode == AML_BREAK_OP) {
1457 status = AE_CTRL_BREAK;
1458 } else {
1459 status = AE_CTRL_CONTINUE;
1460 }
1461 break;
1462
1463 default:
1464
1465 ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
1466 op->common.aml_opcode, op));
1467
1468 status = AE_AML_BAD_OPCODE;
1469 break;
1470 }
1471
1472 return (status);
1473}
diff --git a/drivers/acpi/acpica/dswload.c b/drivers/acpi/acpica/dswload.c
index 52566ff5e903..23a3b1ab20c1 100644
--- a/drivers/acpi/acpica/dswload.c
+++ b/drivers/acpi/acpica/dswload.c
@@ -1,6 +1,6 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Module Name: dswload - Dispatcher namespace load callbacks 3 * Module Name: dswload - Dispatcher first pass namespace load callbacks
4 * 4 *
5 *****************************************************************************/ 5 *****************************************************************************/
6 6
@@ -48,7 +48,6 @@
48#include "acdispat.h" 48#include "acdispat.h"
49#include "acinterp.h" 49#include "acinterp.h"
50#include "acnamesp.h" 50#include "acnamesp.h"
51#include "acevents.h"
52 51
53#ifdef ACPI_ASL_COMPILER 52#ifdef ACPI_ASL_COMPILER
54#include <acpi/acdisasm.h> 53#include <acpi/acdisasm.h>
@@ -537,670 +536,3 @@ acpi_status acpi_ds_load1_end_op(struct acpi_walk_state *walk_state)
537 536
538 return_ACPI_STATUS(status); 537 return_ACPI_STATUS(status);
539} 538}
540
541/*******************************************************************************
542 *
543 * FUNCTION: acpi_ds_load2_begin_op
544 *
545 * PARAMETERS: walk_state - Current state of the parse tree walk
546 * out_op - Wher to return op if a new one is created
547 *
548 * RETURN: Status
549 *
550 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
551 *
552 ******************************************************************************/
553
554acpi_status
555acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
556 union acpi_parse_object **out_op)
557{
558 union acpi_parse_object *op;
559 struct acpi_namespace_node *node;
560 acpi_status status;
561 acpi_object_type object_type;
562 char *buffer_ptr;
563 u32 flags;
564
565 ACPI_FUNCTION_TRACE(ds_load2_begin_op);
566
567 op = walk_state->op;
568 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
569 walk_state));
570
571 if (op) {
572 if ((walk_state->control_state) &&
573 (walk_state->control_state->common.state ==
574 ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
575
576 /* We are executing a while loop outside of a method */
577
578 status = acpi_ds_exec_begin_op(walk_state, out_op);
579 return_ACPI_STATUS(status);
580 }
581
582 /* We only care about Namespace opcodes here */
583
584 if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
585 (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
586 (!(walk_state->op_info->flags & AML_NAMED))) {
587 return_ACPI_STATUS(AE_OK);
588 }
589
590 /* Get the name we are going to enter or lookup in the namespace */
591
592 if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
593
594 /* For Namepath op, get the path string */
595
596 buffer_ptr = op->common.value.string;
597 if (!buffer_ptr) {
598
599 /* No name, just exit */
600
601 return_ACPI_STATUS(AE_OK);
602 }
603 } else {
604 /* Get name from the op */
605
606 buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
607 }
608 } else {
609 /* Get the namestring from the raw AML */
610
611 buffer_ptr =
612 acpi_ps_get_next_namestring(&walk_state->parser_state);
613 }
614
615 /* Map the opcode into an internal object type */
616
617 object_type = walk_state->op_info->object_type;
618
619 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
620 "State=%p Op=%p Type=%X\n", walk_state, op,
621 object_type));
622
623 switch (walk_state->opcode) {
624 case AML_FIELD_OP:
625 case AML_BANK_FIELD_OP:
626 case AML_INDEX_FIELD_OP:
627
628 node = NULL;
629 status = AE_OK;
630 break;
631
632 case AML_INT_NAMEPATH_OP:
633 /*
634 * The name_path is an object reference to an existing object.
635 * Don't enter the name into the namespace, but look it up
636 * for use later.
637 */
638 status =
639 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
640 object_type, ACPI_IMODE_EXECUTE,
641 ACPI_NS_SEARCH_PARENT, walk_state, &(node));
642 break;
643
644 case AML_SCOPE_OP:
645
646 /* Special case for Scope(\) -> refers to the Root node */
647
648 if (op && (op->named.node == acpi_gbl_root_node)) {
649 node = op->named.node;
650
651 status =
652 acpi_ds_scope_stack_push(node, object_type,
653 walk_state);
654 if (ACPI_FAILURE(status)) {
655 return_ACPI_STATUS(status);
656 }
657 } else {
658 /*
659 * The Path is an object reference to an existing object.
660 * Don't enter the name into the namespace, but look it up
661 * for use later.
662 */
663 status =
664 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
665 object_type, ACPI_IMODE_EXECUTE,
666 ACPI_NS_SEARCH_PARENT, walk_state,
667 &(node));
668 if (ACPI_FAILURE(status)) {
669#ifdef ACPI_ASL_COMPILER
670 if (status == AE_NOT_FOUND) {
671 status = AE_OK;
672 } else {
673 ACPI_ERROR_NAMESPACE(buffer_ptr,
674 status);
675 }
676#else
677 ACPI_ERROR_NAMESPACE(buffer_ptr, status);
678#endif
679 return_ACPI_STATUS(status);
680 }
681 }
682
683 /*
684 * We must check to make sure that the target is
685 * one of the opcodes that actually opens a scope
686 */
687 switch (node->type) {
688 case ACPI_TYPE_ANY:
689 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
690 case ACPI_TYPE_DEVICE:
691 case ACPI_TYPE_POWER:
692 case ACPI_TYPE_PROCESSOR:
693 case ACPI_TYPE_THERMAL:
694
695 /* These are acceptable types */
696 break;
697
698 case ACPI_TYPE_INTEGER:
699 case ACPI_TYPE_STRING:
700 case ACPI_TYPE_BUFFER:
701
702 /*
703 * These types we will allow, but we will change the type.
704 * This enables some existing code of the form:
705 *
706 * Name (DEB, 0)
707 * Scope (DEB) { ... }
708 */
709 ACPI_WARNING((AE_INFO,
710 "Type override - [%4.4s] had invalid type (%s) "
711 "for Scope operator, changed to type ANY\n",
712 acpi_ut_get_node_name(node),
713 acpi_ut_get_type_name(node->type)));
714
715 node->type = ACPI_TYPE_ANY;
716 walk_state->scope_info->common.value = ACPI_TYPE_ANY;
717 break;
718
719 default:
720
721 /* All other types are an error */
722
723 ACPI_ERROR((AE_INFO,
724 "Invalid type (%s) for target of "
725 "Scope operator [%4.4s] (Cannot override)",
726 acpi_ut_get_type_name(node->type),
727 acpi_ut_get_node_name(node)));
728
729 return (AE_AML_OPERAND_TYPE);
730 }
731 break;
732
733 default:
734
735 /* All other opcodes */
736
737 if (op && op->common.node) {
738
739 /* This op/node was previously entered into the namespace */
740
741 node = op->common.node;
742
743 if (acpi_ns_opens_scope(object_type)) {
744 status =
745 acpi_ds_scope_stack_push(node, object_type,
746 walk_state);
747 if (ACPI_FAILURE(status)) {
748 return_ACPI_STATUS(status);
749 }
750 }
751
752 return_ACPI_STATUS(AE_OK);
753 }
754
755 /*
756 * Enter the named type into the internal namespace. We enter the name
757 * as we go downward in the parse tree. Any necessary subobjects that
758 * involve arguments to the opcode must be created as we go back up the
759 * parse tree later.
760 *
761 * Note: Name may already exist if we are executing a deferred opcode.
762 */
763 if (walk_state->deferred_node) {
764
765 /* This name is already in the namespace, get the node */
766
767 node = walk_state->deferred_node;
768 status = AE_OK;
769 break;
770 }
771
772 flags = ACPI_NS_NO_UPSEARCH;
773 if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
774
775 /* Execution mode, node cannot already exist, node is temporary */
776
777 flags |= ACPI_NS_ERROR_IF_FOUND;
778
779 if (!
780 (walk_state->
781 parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
782 flags |= ACPI_NS_TEMPORARY;
783 }
784 }
785
786 /* Add new entry or lookup existing entry */
787
788 status =
789 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
790 object_type, ACPI_IMODE_LOAD_PASS2, flags,
791 walk_state, &node);
792
793 if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
794 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
795 "***New Node [%4.4s] %p is temporary\n",
796 acpi_ut_get_node_name(node), node));
797 }
798 break;
799 }
800
801 if (ACPI_FAILURE(status)) {
802 ACPI_ERROR_NAMESPACE(buffer_ptr, status);
803 return_ACPI_STATUS(status);
804 }
805
806 if (!op) {
807
808 /* Create a new op */
809
810 op = acpi_ps_alloc_op(walk_state->opcode);
811 if (!op) {
812 return_ACPI_STATUS(AE_NO_MEMORY);
813 }
814
815 /* Initialize the new op */
816
817 if (node) {
818 op->named.name = node->name.integer;
819 }
820 *out_op = op;
821 }
822
823 /*
824 * Put the Node in the "op" object that the parser uses, so we
825 * can get it again quickly when this scope is closed
826 */
827 op->common.node = node;
828 return_ACPI_STATUS(status);
829}
830
831/*******************************************************************************
832 *
833 * FUNCTION: acpi_ds_load2_end_op
834 *
835 * PARAMETERS: walk_state - Current state of the parse tree walk
836 *
837 * RETURN: Status
838 *
839 * DESCRIPTION: Ascending callback used during the loading of the namespace,
840 * both control methods and everything else.
841 *
842 ******************************************************************************/
843
844acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
845{
846 union acpi_parse_object *op;
847 acpi_status status = AE_OK;
848 acpi_object_type object_type;
849 struct acpi_namespace_node *node;
850 union acpi_parse_object *arg;
851 struct acpi_namespace_node *new_node;
852#ifndef ACPI_NO_METHOD_EXECUTION
853 u32 i;
854 u8 region_space;
855#endif
856
857 ACPI_FUNCTION_TRACE(ds_load2_end_op);
858
859 op = walk_state->op;
860 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
861 walk_state->op_info->name, op, walk_state));
862
863 /* Check if opcode had an associated namespace object */
864
865 if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
866 return_ACPI_STATUS(AE_OK);
867 }
868
869 if (op->common.aml_opcode == AML_SCOPE_OP) {
870 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
871 "Ending scope Op=%p State=%p\n", op,
872 walk_state));
873 }
874
875 object_type = walk_state->op_info->object_type;
876
877 /*
878 * Get the Node/name from the earlier lookup
879 * (It was saved in the *op structure)
880 */
881 node = op->common.node;
882
883 /*
884 * Put the Node on the object stack (Contains the ACPI Name of
885 * this object)
886 */
887 walk_state->operands[0] = (void *)node;
888 walk_state->num_operands = 1;
889
890 /* Pop the scope stack */
891
892 if (acpi_ns_opens_scope(object_type) &&
893 (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
894 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
895 "(%s) Popping scope for Op %p\n",
896 acpi_ut_get_type_name(object_type), op));
897
898 status = acpi_ds_scope_stack_pop(walk_state);
899 if (ACPI_FAILURE(status)) {
900 goto cleanup;
901 }
902 }
903
904 /*
905 * Named operations are as follows:
906 *
907 * AML_ALIAS
908 * AML_BANKFIELD
909 * AML_CREATEBITFIELD
910 * AML_CREATEBYTEFIELD
911 * AML_CREATEDWORDFIELD
912 * AML_CREATEFIELD
913 * AML_CREATEQWORDFIELD
914 * AML_CREATEWORDFIELD
915 * AML_DATA_REGION
916 * AML_DEVICE
917 * AML_EVENT
918 * AML_FIELD
919 * AML_INDEXFIELD
920 * AML_METHOD
921 * AML_METHODCALL
922 * AML_MUTEX
923 * AML_NAME
924 * AML_NAMEDFIELD
925 * AML_OPREGION
926 * AML_POWERRES
927 * AML_PROCESSOR
928 * AML_SCOPE
929 * AML_THERMALZONE
930 */
931
932 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
933 "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
934 acpi_ps_get_opcode_name(op->common.aml_opcode),
935 walk_state, op, node));
936
937 /* Decode the opcode */
938
939 arg = op->common.value.arg;
940
941 switch (walk_state->op_info->type) {
942#ifndef ACPI_NO_METHOD_EXECUTION
943
944 case AML_TYPE_CREATE_FIELD:
945 /*
946 * Create the field object, but the field buffer and index must
947 * be evaluated later during the execution phase
948 */
949 status = acpi_ds_create_buffer_field(op, walk_state);
950 break;
951
952 case AML_TYPE_NAMED_FIELD:
953 /*
954 * If we are executing a method, initialize the field
955 */
956 if (walk_state->method_node) {
957 status = acpi_ds_init_field_objects(op, walk_state);
958 }
959
960 switch (op->common.aml_opcode) {
961 case AML_INDEX_FIELD_OP:
962
963 status =
964 acpi_ds_create_index_field(op,
965 (acpi_handle) arg->
966 common.node, walk_state);
967 break;
968
969 case AML_BANK_FIELD_OP:
970
971 status =
972 acpi_ds_create_bank_field(op, arg->common.node,
973 walk_state);
974 break;
975
976 case AML_FIELD_OP:
977
978 status =
979 acpi_ds_create_field(op, arg->common.node,
980 walk_state);
981 break;
982
983 default:
984 /* All NAMED_FIELD opcodes must be handled above */
985 break;
986 }
987 break;
988
989 case AML_TYPE_NAMED_SIMPLE:
990
991 status = acpi_ds_create_operands(walk_state, arg);
992 if (ACPI_FAILURE(status)) {
993 goto cleanup;
994 }
995
996 switch (op->common.aml_opcode) {
997 case AML_PROCESSOR_OP:
998
999 status = acpi_ex_create_processor(walk_state);
1000 break;
1001
1002 case AML_POWER_RES_OP:
1003
1004 status = acpi_ex_create_power_resource(walk_state);
1005 break;
1006
1007 case AML_MUTEX_OP:
1008
1009 status = acpi_ex_create_mutex(walk_state);
1010 break;
1011
1012 case AML_EVENT_OP:
1013
1014 status = acpi_ex_create_event(walk_state);
1015 break;
1016
1017 case AML_ALIAS_OP:
1018
1019 status = acpi_ex_create_alias(walk_state);
1020 break;
1021
1022 default:
1023 /* Unknown opcode */
1024
1025 status = AE_OK;
1026 goto cleanup;
1027 }
1028
1029 /* Delete operands */
1030
1031 for (i = 1; i < walk_state->num_operands; i++) {
1032 acpi_ut_remove_reference(walk_state->operands[i]);
1033 walk_state->operands[i] = NULL;
1034 }
1035
1036 break;
1037#endif /* ACPI_NO_METHOD_EXECUTION */
1038
1039 case AML_TYPE_NAMED_COMPLEX:
1040
1041 switch (op->common.aml_opcode) {
1042#ifndef ACPI_NO_METHOD_EXECUTION
1043 case AML_REGION_OP:
1044 case AML_DATA_REGION_OP:
1045
1046 if (op->common.aml_opcode == AML_REGION_OP) {
1047 region_space = (acpi_adr_space_type)
1048 ((op->common.value.arg)->common.value.
1049 integer);
1050 } else {
1051 region_space = REGION_DATA_TABLE;
1052 }
1053
1054 /*
1055 * The op_region is not fully parsed at this time. The only valid
1056 * argument is the space_id. (We must save the address of the
1057 * AML of the address and length operands)
1058 *
1059 * If we have a valid region, initialize it. The namespace is
1060 * unlocked at this point.
1061 *
1062 * Need to unlock interpreter if it is locked (if we are running
1063 * a control method), in order to allow _REG methods to be run
1064 * during acpi_ev_initialize_region.
1065 */
1066 if (walk_state->method_node) {
1067 /*
1068 * Executing a method: initialize the region and unlock
1069 * the interpreter
1070 */
1071 status =
1072 acpi_ex_create_region(op->named.data,
1073 op->named.length,
1074 region_space,
1075 walk_state);
1076 if (ACPI_FAILURE(status)) {
1077 return (status);
1078 }
1079
1080 acpi_ex_exit_interpreter();
1081 }
1082
1083 status =
1084 acpi_ev_initialize_region
1085 (acpi_ns_get_attached_object(node), FALSE);
1086 if (walk_state->method_node) {
1087 acpi_ex_enter_interpreter();
1088 }
1089
1090 if (ACPI_FAILURE(status)) {
1091 /*
1092 * If AE_NOT_EXIST is returned, it is not fatal
1093 * because many regions get created before a handler
1094 * is installed for said region.
1095 */
1096 if (AE_NOT_EXIST == status) {
1097 status = AE_OK;
1098 }
1099 }
1100 break;
1101
1102 case AML_NAME_OP:
1103
1104 status = acpi_ds_create_node(walk_state, node, op);
1105 break;
1106
1107 case AML_METHOD_OP:
1108 /*
1109 * method_op pkg_length name_string method_flags term_list
1110 *
1111 * Note: We must create the method node/object pair as soon as we
1112 * see the method declaration. This allows later pass1 parsing
1113 * of invocations of the method (need to know the number of
1114 * arguments.)
1115 */
1116 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1117 "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
1118 walk_state, op, op->named.node));
1119
1120 if (!acpi_ns_get_attached_object(op->named.node)) {
1121 walk_state->operands[0] =
1122 ACPI_CAST_PTR(void, op->named.node);
1123 walk_state->num_operands = 1;
1124
1125 status =
1126 acpi_ds_create_operands(walk_state,
1127 op->common.value.
1128 arg);
1129 if (ACPI_SUCCESS(status)) {
1130 status =
1131 acpi_ex_create_method(op->named.
1132 data,
1133 op->named.
1134 length,
1135 walk_state);
1136 }
1137 walk_state->operands[0] = NULL;
1138 walk_state->num_operands = 0;
1139
1140 if (ACPI_FAILURE(status)) {
1141 return_ACPI_STATUS(status);
1142 }
1143 }
1144 break;
1145
1146#endif /* ACPI_NO_METHOD_EXECUTION */
1147
1148 default:
1149 /* All NAMED_COMPLEX opcodes must be handled above */
1150 break;
1151 }
1152 break;
1153
1154 case AML_CLASS_INTERNAL:
1155
1156 /* case AML_INT_NAMEPATH_OP: */
1157 break;
1158
1159 case AML_CLASS_METHOD_CALL:
1160
1161 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
1162 "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
1163 walk_state, op, node));
1164
1165 /*
1166 * Lookup the method name and save the Node
1167 */
1168 status =
1169 acpi_ns_lookup(walk_state->scope_info,
1170 arg->common.value.string, ACPI_TYPE_ANY,
1171 ACPI_IMODE_LOAD_PASS2,
1172 ACPI_NS_SEARCH_PARENT |
1173 ACPI_NS_DONT_OPEN_SCOPE, walk_state,
1174 &(new_node));
1175 if (ACPI_SUCCESS(status)) {
1176 /*
1177 * Make sure that what we found is indeed a method
1178 * We didn't search for a method on purpose, to see if the name
1179 * would resolve
1180 */
1181 if (new_node->type != ACPI_TYPE_METHOD) {
1182 status = AE_AML_OPERAND_TYPE;
1183 }
1184
1185 /* We could put the returned object (Node) on the object stack for
1186 * later, but for now, we will put it in the "op" object that the
1187 * parser uses, so we can get it again at the end of this scope
1188 */
1189 op->common.node = new_node;
1190 } else {
1191 ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
1192 }
1193 break;
1194
1195 default:
1196 break;
1197 }
1198
1199 cleanup:
1200
1201 /* Remove the Node pushed at the very beginning */
1202
1203 walk_state->operands[0] = NULL;
1204 walk_state->num_operands = 0;
1205 return_ACPI_STATUS(status);
1206}
diff --git a/drivers/acpi/acpica/dswload2.c b/drivers/acpi/acpica/dswload2.c
new file mode 100644
index 000000000000..4be4e921dfe1
--- /dev/null
+++ b/drivers/acpi/acpica/dswload2.c
@@ -0,0 +1,720 @@
1/******************************************************************************
2 *
3 * Module Name: dswload2 - Dispatcher second pass namespace load callbacks
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acparser.h"
47#include "amlcode.h"
48#include "acdispat.h"
49#include "acinterp.h"
50#include "acnamesp.h"
51#include "acevents.h"
52
53#define _COMPONENT ACPI_DISPATCHER
54ACPI_MODULE_NAME("dswload2")
55
56/*******************************************************************************
57 *
58 * FUNCTION: acpi_ds_load2_begin_op
59 *
60 * PARAMETERS: walk_state - Current state of the parse tree walk
61 * out_op - Wher to return op if a new one is created
62 *
63 * RETURN: Status
64 *
65 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
66 *
67 ******************************************************************************/
68acpi_status
69acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
70 union acpi_parse_object **out_op)
71{
72 union acpi_parse_object *op;
73 struct acpi_namespace_node *node;
74 acpi_status status;
75 acpi_object_type object_type;
76 char *buffer_ptr;
77 u32 flags;
78
79 ACPI_FUNCTION_TRACE(ds_load2_begin_op);
80
81 op = walk_state->op;
82 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
83 walk_state));
84
85 if (op) {
86 if ((walk_state->control_state) &&
87 (walk_state->control_state->common.state ==
88 ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
89
90 /* We are executing a while loop outside of a method */
91
92 status = acpi_ds_exec_begin_op(walk_state, out_op);
93 return_ACPI_STATUS(status);
94 }
95
96 /* We only care about Namespace opcodes here */
97
98 if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
99 (walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
100 (!(walk_state->op_info->flags & AML_NAMED))) {
101 return_ACPI_STATUS(AE_OK);
102 }
103
104 /* Get the name we are going to enter or lookup in the namespace */
105
106 if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
107
108 /* For Namepath op, get the path string */
109
110 buffer_ptr = op->common.value.string;
111 if (!buffer_ptr) {
112
113 /* No name, just exit */
114
115 return_ACPI_STATUS(AE_OK);
116 }
117 } else {
118 /* Get name from the op */
119
120 buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
121 }
122 } else {
123 /* Get the namestring from the raw AML */
124
125 buffer_ptr =
126 acpi_ps_get_next_namestring(&walk_state->parser_state);
127 }
128
129 /* Map the opcode into an internal object type */
130
131 object_type = walk_state->op_info->object_type;
132
133 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
134 "State=%p Op=%p Type=%X\n", walk_state, op,
135 object_type));
136
137 switch (walk_state->opcode) {
138 case AML_FIELD_OP:
139 case AML_BANK_FIELD_OP:
140 case AML_INDEX_FIELD_OP:
141
142 node = NULL;
143 status = AE_OK;
144 break;
145
146 case AML_INT_NAMEPATH_OP:
147 /*
148 * The name_path is an object reference to an existing object.
149 * Don't enter the name into the namespace, but look it up
150 * for use later.
151 */
152 status =
153 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
154 object_type, ACPI_IMODE_EXECUTE,
155 ACPI_NS_SEARCH_PARENT, walk_state, &(node));
156 break;
157
158 case AML_SCOPE_OP:
159
160 /* Special case for Scope(\) -> refers to the Root node */
161
162 if (op && (op->named.node == acpi_gbl_root_node)) {
163 node = op->named.node;
164
165 status =
166 acpi_ds_scope_stack_push(node, object_type,
167 walk_state);
168 if (ACPI_FAILURE(status)) {
169 return_ACPI_STATUS(status);
170 }
171 } else {
172 /*
173 * The Path is an object reference to an existing object.
174 * Don't enter the name into the namespace, but look it up
175 * for use later.
176 */
177 status =
178 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
179 object_type, ACPI_IMODE_EXECUTE,
180 ACPI_NS_SEARCH_PARENT, walk_state,
181 &(node));
182 if (ACPI_FAILURE(status)) {
183#ifdef ACPI_ASL_COMPILER
184 if (status == AE_NOT_FOUND) {
185 status = AE_OK;
186 } else {
187 ACPI_ERROR_NAMESPACE(buffer_ptr,
188 status);
189 }
190#else
191 ACPI_ERROR_NAMESPACE(buffer_ptr, status);
192#endif
193 return_ACPI_STATUS(status);
194 }
195 }
196
197 /*
198 * We must check to make sure that the target is
199 * one of the opcodes that actually opens a scope
200 */
201 switch (node->type) {
202 case ACPI_TYPE_ANY:
203 case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
204 case ACPI_TYPE_DEVICE:
205 case ACPI_TYPE_POWER:
206 case ACPI_TYPE_PROCESSOR:
207 case ACPI_TYPE_THERMAL:
208
209 /* These are acceptable types */
210 break;
211
212 case ACPI_TYPE_INTEGER:
213 case ACPI_TYPE_STRING:
214 case ACPI_TYPE_BUFFER:
215
216 /*
217 * These types we will allow, but we will change the type.
218 * This enables some existing code of the form:
219 *
220 * Name (DEB, 0)
221 * Scope (DEB) { ... }
222 */
223 ACPI_WARNING((AE_INFO,
224 "Type override - [%4.4s] had invalid type (%s) "
225 "for Scope operator, changed to type ANY\n",
226 acpi_ut_get_node_name(node),
227 acpi_ut_get_type_name(node->type)));
228
229 node->type = ACPI_TYPE_ANY;
230 walk_state->scope_info->common.value = ACPI_TYPE_ANY;
231 break;
232
233 default:
234
235 /* All other types are an error */
236
237 ACPI_ERROR((AE_INFO,
238 "Invalid type (%s) for target of "
239 "Scope operator [%4.4s] (Cannot override)",
240 acpi_ut_get_type_name(node->type),
241 acpi_ut_get_node_name(node)));
242
243 return (AE_AML_OPERAND_TYPE);
244 }
245 break;
246
247 default:
248
249 /* All other opcodes */
250
251 if (op && op->common.node) {
252
253 /* This op/node was previously entered into the namespace */
254
255 node = op->common.node;
256
257 if (acpi_ns_opens_scope(object_type)) {
258 status =
259 acpi_ds_scope_stack_push(node, object_type,
260 walk_state);
261 if (ACPI_FAILURE(status)) {
262 return_ACPI_STATUS(status);
263 }
264 }
265
266 return_ACPI_STATUS(AE_OK);
267 }
268
269 /*
270 * Enter the named type into the internal namespace. We enter the name
271 * as we go downward in the parse tree. Any necessary subobjects that
272 * involve arguments to the opcode must be created as we go back up the
273 * parse tree later.
274 *
275 * Note: Name may already exist if we are executing a deferred opcode.
276 */
277 if (walk_state->deferred_node) {
278
279 /* This name is already in the namespace, get the node */
280
281 node = walk_state->deferred_node;
282 status = AE_OK;
283 break;
284 }
285
286 flags = ACPI_NS_NO_UPSEARCH;
287 if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
288
289 /* Execution mode, node cannot already exist, node is temporary */
290
291 flags |= ACPI_NS_ERROR_IF_FOUND;
292
293 if (!
294 (walk_state->
295 parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
296 flags |= ACPI_NS_TEMPORARY;
297 }
298 }
299
300 /* Add new entry or lookup existing entry */
301
302 status =
303 acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
304 object_type, ACPI_IMODE_LOAD_PASS2, flags,
305 walk_state, &node);
306
307 if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
308 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
309 "***New Node [%4.4s] %p is temporary\n",
310 acpi_ut_get_node_name(node), node));
311 }
312 break;
313 }
314
315 if (ACPI_FAILURE(status)) {
316 ACPI_ERROR_NAMESPACE(buffer_ptr, status);
317 return_ACPI_STATUS(status);
318 }
319
320 if (!op) {
321
322 /* Create a new op */
323
324 op = acpi_ps_alloc_op(walk_state->opcode);
325 if (!op) {
326 return_ACPI_STATUS(AE_NO_MEMORY);
327 }
328
329 /* Initialize the new op */
330
331 if (node) {
332 op->named.name = node->name.integer;
333 }
334 *out_op = op;
335 }
336
337 /*
338 * Put the Node in the "op" object that the parser uses, so we
339 * can get it again quickly when this scope is closed
340 */
341 op->common.node = node;
342 return_ACPI_STATUS(status);
343}
344
345/*******************************************************************************
346 *
347 * FUNCTION: acpi_ds_load2_end_op
348 *
349 * PARAMETERS: walk_state - Current state of the parse tree walk
350 *
351 * RETURN: Status
352 *
353 * DESCRIPTION: Ascending callback used during the loading of the namespace,
354 * both control methods and everything else.
355 *
356 ******************************************************************************/
357
358acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
359{
360 union acpi_parse_object *op;
361 acpi_status status = AE_OK;
362 acpi_object_type object_type;
363 struct acpi_namespace_node *node;
364 union acpi_parse_object *arg;
365 struct acpi_namespace_node *new_node;
366#ifndef ACPI_NO_METHOD_EXECUTION
367 u32 i;
368 u8 region_space;
369#endif
370
371 ACPI_FUNCTION_TRACE(ds_load2_end_op);
372
373 op = walk_state->op;
374 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
375 walk_state->op_info->name, op, walk_state));
376
377 /* Check if opcode had an associated namespace object */
378
379 if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
380 return_ACPI_STATUS(AE_OK);
381 }
382
383 if (op->common.aml_opcode == AML_SCOPE_OP) {
384 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
385 "Ending scope Op=%p State=%p\n", op,
386 walk_state));
387 }
388
389 object_type = walk_state->op_info->object_type;
390
391 /*
392 * Get the Node/name from the earlier lookup
393 * (It was saved in the *op structure)
394 */
395 node = op->common.node;
396
397 /*
398 * Put the Node on the object stack (Contains the ACPI Name of
399 * this object)
400 */
401 walk_state->operands[0] = (void *)node;
402 walk_state->num_operands = 1;
403
404 /* Pop the scope stack */
405
406 if (acpi_ns_opens_scope(object_type) &&
407 (op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
408 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
409 "(%s) Popping scope for Op %p\n",
410 acpi_ut_get_type_name(object_type), op));
411
412 status = acpi_ds_scope_stack_pop(walk_state);
413 if (ACPI_FAILURE(status)) {
414 goto cleanup;
415 }
416 }
417
418 /*
419 * Named operations are as follows:
420 *
421 * AML_ALIAS
422 * AML_BANKFIELD
423 * AML_CREATEBITFIELD
424 * AML_CREATEBYTEFIELD
425 * AML_CREATEDWORDFIELD
426 * AML_CREATEFIELD
427 * AML_CREATEQWORDFIELD
428 * AML_CREATEWORDFIELD
429 * AML_DATA_REGION
430 * AML_DEVICE
431 * AML_EVENT
432 * AML_FIELD
433 * AML_INDEXFIELD
434 * AML_METHOD
435 * AML_METHODCALL
436 * AML_MUTEX
437 * AML_NAME
438 * AML_NAMEDFIELD
439 * AML_OPREGION
440 * AML_POWERRES
441 * AML_PROCESSOR
442 * AML_SCOPE
443 * AML_THERMALZONE
444 */
445
446 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
447 "Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
448 acpi_ps_get_opcode_name(op->common.aml_opcode),
449 walk_state, op, node));
450
451 /* Decode the opcode */
452
453 arg = op->common.value.arg;
454
455 switch (walk_state->op_info->type) {
456#ifndef ACPI_NO_METHOD_EXECUTION
457
458 case AML_TYPE_CREATE_FIELD:
459 /*
460 * Create the field object, but the field buffer and index must
461 * be evaluated later during the execution phase
462 */
463 status = acpi_ds_create_buffer_field(op, walk_state);
464 break;
465
466 case AML_TYPE_NAMED_FIELD:
467 /*
468 * If we are executing a method, initialize the field
469 */
470 if (walk_state->method_node) {
471 status = acpi_ds_init_field_objects(op, walk_state);
472 }
473
474 switch (op->common.aml_opcode) {
475 case AML_INDEX_FIELD_OP:
476
477 status =
478 acpi_ds_create_index_field(op,
479 (acpi_handle) arg->
480 common.node, walk_state);
481 break;
482
483 case AML_BANK_FIELD_OP:
484
485 status =
486 acpi_ds_create_bank_field(op, arg->common.node,
487 walk_state);
488 break;
489
490 case AML_FIELD_OP:
491
492 status =
493 acpi_ds_create_field(op, arg->common.node,
494 walk_state);
495 break;
496
497 default:
498 /* All NAMED_FIELD opcodes must be handled above */
499 break;
500 }
501 break;
502
503 case AML_TYPE_NAMED_SIMPLE:
504
505 status = acpi_ds_create_operands(walk_state, arg);
506 if (ACPI_FAILURE(status)) {
507 goto cleanup;
508 }
509
510 switch (op->common.aml_opcode) {
511 case AML_PROCESSOR_OP:
512
513 status = acpi_ex_create_processor(walk_state);
514 break;
515
516 case AML_POWER_RES_OP:
517
518 status = acpi_ex_create_power_resource(walk_state);
519 break;
520
521 case AML_MUTEX_OP:
522
523 status = acpi_ex_create_mutex(walk_state);
524 break;
525
526 case AML_EVENT_OP:
527
528 status = acpi_ex_create_event(walk_state);
529 break;
530
531 case AML_ALIAS_OP:
532
533 status = acpi_ex_create_alias(walk_state);
534 break;
535
536 default:
537 /* Unknown opcode */
538
539 status = AE_OK;
540 goto cleanup;
541 }
542
543 /* Delete operands */
544
545 for (i = 1; i < walk_state->num_operands; i++) {
546 acpi_ut_remove_reference(walk_state->operands[i]);
547 walk_state->operands[i] = NULL;
548 }
549
550 break;
551#endif /* ACPI_NO_METHOD_EXECUTION */
552
553 case AML_TYPE_NAMED_COMPLEX:
554
555 switch (op->common.aml_opcode) {
556#ifndef ACPI_NO_METHOD_EXECUTION
557 case AML_REGION_OP:
558 case AML_DATA_REGION_OP:
559
560 if (op->common.aml_opcode == AML_REGION_OP) {
561 region_space = (acpi_adr_space_type)
562 ((op->common.value.arg)->common.value.
563 integer);
564 } else {
565 region_space = REGION_DATA_TABLE;
566 }
567
568 /*
569 * The op_region is not fully parsed at this time. The only valid
570 * argument is the space_id. (We must save the address of the
571 * AML of the address and length operands)
572 *
573 * If we have a valid region, initialize it. The namespace is
574 * unlocked at this point.
575 *
576 * Need to unlock interpreter if it is locked (if we are running
577 * a control method), in order to allow _REG methods to be run
578 * during acpi_ev_initialize_region.
579 */
580 if (walk_state->method_node) {
581 /*
582 * Executing a method: initialize the region and unlock
583 * the interpreter
584 */
585 status =
586 acpi_ex_create_region(op->named.data,
587 op->named.length,
588 region_space,
589 walk_state);
590 if (ACPI_FAILURE(status)) {
591 return (status);
592 }
593
594 acpi_ex_exit_interpreter();
595 }
596
597 status =
598 acpi_ev_initialize_region
599 (acpi_ns_get_attached_object(node), FALSE);
600 if (walk_state->method_node) {
601 acpi_ex_enter_interpreter();
602 }
603
604 if (ACPI_FAILURE(status)) {
605 /*
606 * If AE_NOT_EXIST is returned, it is not fatal
607 * because many regions get created before a handler
608 * is installed for said region.
609 */
610 if (AE_NOT_EXIST == status) {
611 status = AE_OK;
612 }
613 }
614 break;
615
616 case AML_NAME_OP:
617
618 status = acpi_ds_create_node(walk_state, node, op);
619 break;
620
621 case AML_METHOD_OP:
622 /*
623 * method_op pkg_length name_string method_flags term_list
624 *
625 * Note: We must create the method node/object pair as soon as we
626 * see the method declaration. This allows later pass1 parsing
627 * of invocations of the method (need to know the number of
628 * arguments.)
629 */
630 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
631 "LOADING-Method: State=%p Op=%p NamedObj=%p\n",
632 walk_state, op, op->named.node));
633
634 if (!acpi_ns_get_attached_object(op->named.node)) {
635 walk_state->operands[0] =
636 ACPI_CAST_PTR(void, op->named.node);
637 walk_state->num_operands = 1;
638
639 status =
640 acpi_ds_create_operands(walk_state,
641 op->common.value.
642 arg);
643 if (ACPI_SUCCESS(status)) {
644 status =
645 acpi_ex_create_method(op->named.
646 data,
647 op->named.
648 length,
649 walk_state);
650 }
651 walk_state->operands[0] = NULL;
652 walk_state->num_operands = 0;
653
654 if (ACPI_FAILURE(status)) {
655 return_ACPI_STATUS(status);
656 }
657 }
658 break;
659
660#endif /* ACPI_NO_METHOD_EXECUTION */
661
662 default:
663 /* All NAMED_COMPLEX opcodes must be handled above */
664 break;
665 }
666 break;
667
668 case AML_CLASS_INTERNAL:
669
670 /* case AML_INT_NAMEPATH_OP: */
671 break;
672
673 case AML_CLASS_METHOD_CALL:
674
675 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
676 "RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
677 walk_state, op, node));
678
679 /*
680 * Lookup the method name and save the Node
681 */
682 status =
683 acpi_ns_lookup(walk_state->scope_info,
684 arg->common.value.string, ACPI_TYPE_ANY,
685 ACPI_IMODE_LOAD_PASS2,
686 ACPI_NS_SEARCH_PARENT |
687 ACPI_NS_DONT_OPEN_SCOPE, walk_state,
688 &(new_node));
689 if (ACPI_SUCCESS(status)) {
690 /*
691 * Make sure that what we found is indeed a method
692 * We didn't search for a method on purpose, to see if the name
693 * would resolve
694 */
695 if (new_node->type != ACPI_TYPE_METHOD) {
696 status = AE_AML_OPERAND_TYPE;
697 }
698
699 /* We could put the returned object (Node) on the object stack for
700 * later, but for now, we will put it in the "op" object that the
701 * parser uses, so we can get it again at the end of this scope
702 */
703 op->common.node = new_node;
704 } else {
705 ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
706 }
707 break;
708
709 default:
710 break;
711 }
712
713 cleanup:
714
715 /* Remove the Node pushed at the very beginning */
716
717 walk_state->operands[0] = NULL;
718 walk_state->num_operands = 0;
719 return_ACPI_STATUS(status);
720}
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index f4725212eb48..65c79add3b19 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -373,6 +373,15 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
373 373
374 gpe_register_info = &gpe_block->register_info[i]; 374 gpe_register_info = &gpe_block->register_info[i];
375 375
376 /*
377 * Optimization: If there are no GPEs enabled within this
378 * register, we can safely ignore the entire register.
379 */
380 if (!(gpe_register_info->enable_for_run |
381 gpe_register_info->enable_for_wake)) {
382 continue;
383 }
384
376 /* Read the Status Register */ 385 /* Read the Status Register */
377 386
378 status = 387 status =
diff --git a/drivers/acpi/acpica/evregion.c b/drivers/acpi/acpica/evregion.c
index 785a5ee64585..bea7223d7a71 100644
--- a/drivers/acpi/acpica/evregion.c
+++ b/drivers/acpi/acpica/evregion.c
@@ -231,6 +231,8 @@ acpi_status acpi_ev_initialize_op_regions(void)
231 } 231 }
232 } 232 }
233 233
234 acpi_gbl_reg_methods_executed = TRUE;
235
234 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 236 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
235 return_ACPI_STATUS(status); 237 return_ACPI_STATUS(status);
236} 238}
diff --git a/drivers/acpi/acpica/evxfregn.c b/drivers/acpi/acpica/evxfregn.c
index eb7386763712..c85c8c45599d 100644
--- a/drivers/acpi/acpica/evxfregn.c
+++ b/drivers/acpi/acpica/evxfregn.c
@@ -110,9 +110,39 @@ acpi_install_address_space_handler(acpi_handle device,
110 goto unlock_and_exit; 110 goto unlock_and_exit;
111 } 111 }
112 112
113 /* Run all _REG methods for this address space */ 113 /*
114 * For the default space_iDs, (the IDs for which there are default region handlers
115 * installed) Only execute the _REG methods if the global initialization _REG
116 * methods have already been run (via acpi_initialize_objects). In other words,
117 * we will defer the execution of the _REG methods for these space_iDs until
118 * execution of acpi_initialize_objects. This is done because we need the handlers
119 * for the default spaces (mem/io/pci/table) to be installed before we can run
120 * any control methods (or _REG methods). There is known BIOS code that depends
121 * on this.
122 *
123 * For all other space_iDs, we can safely execute the _REG methods immediately.
124 * This means that for IDs like embedded_controller, this function should be called
125 * only after acpi_enable_subsystem has been called.
126 */
127 switch (space_id) {
128 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
129 case ACPI_ADR_SPACE_SYSTEM_IO:
130 case ACPI_ADR_SPACE_PCI_CONFIG:
131 case ACPI_ADR_SPACE_DATA_TABLE:
132
133 if (acpi_gbl_reg_methods_executed) {
134
135 /* Run all _REG methods for this address space */
136
137 status = acpi_ev_execute_reg_methods(node, space_id);
138 }
139 break;
140
141 default:
114 142
115 status = acpi_ev_execute_reg_methods(node, space_id); 143 status = acpi_ev_execute_reg_methods(node, space_id);
144 break;
145 }
116 146
117 unlock_and_exit: 147 unlock_and_exit:
118 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 148 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index 6c79c29f082d..f915a7f3f921 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -280,13 +280,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
280 if (ACPI_FAILURE(status)) { 280 if (ACPI_FAILURE(status)) {
281 if (status == AE_NOT_IMPLEMENTED) { 281 if (status == AE_NOT_IMPLEMENTED) {
282 ACPI_ERROR((AE_INFO, 282 ACPI_ERROR((AE_INFO,
283 "Region %s(0x%X) not implemented", 283 "Region %s (ID=%u) not implemented",
284 acpi_ut_get_region_name(rgn_desc->region. 284 acpi_ut_get_region_name(rgn_desc->region.
285 space_id), 285 space_id),
286 rgn_desc->region.space_id)); 286 rgn_desc->region.space_id));
287 } else if (status == AE_NOT_EXIST) { 287 } else if (status == AE_NOT_EXIST) {
288 ACPI_ERROR((AE_INFO, 288 ACPI_ERROR((AE_INFO,
289 "Region %s(0x%X) has no handler", 289 "Region %s (ID=%u) has no handler",
290 acpi_ut_get_region_name(rgn_desc->region. 290 acpi_ut_get_region_name(rgn_desc->region.
291 space_id), 291 space_id),
292 rgn_desc->region.space_id)); 292 rgn_desc->region.space_id));
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index 428d44e2d162..6f5588e62c0a 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -384,8 +384,11 @@ static void acpi_tb_convert_fadt(void)
384 * 384 *
385 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at 385 * The ACPI 1.0 reserved fields that will be zeroed are the bytes located at
386 * offset 45, 55, 95, and the word located at offset 109, 110. 386 * offset 45, 55, 95, and the word located at offset 109, 110.
387 *
388 * Note: The FADT revision value is unreliable. Only the length can be
389 * trusted.
387 */ 390 */
388 if (acpi_gbl_FADT.header.revision < FADT2_REVISION_ID) { 391 if (acpi_gbl_FADT.header.length <= ACPI_FADT_V2_SIZE) {
389 acpi_gbl_FADT.preferred_profile = 0; 392 acpi_gbl_FADT.preferred_profile = 0;
390 acpi_gbl_FADT.pstate_control = 0; 393 acpi_gbl_FADT.pstate_control = 0;
391 acpi_gbl_FADT.cst_control = 0; 394 acpi_gbl_FADT.cst_control = 0;
diff --git a/drivers/acpi/acpica/utdecode.c b/drivers/acpi/acpica/utdecode.c
new file mode 100644
index 000000000000..136a814cec69
--- /dev/null
+++ b/drivers/acpi/acpica/utdecode.c
@@ -0,0 +1,548 @@
1/******************************************************************************
2 *
3 * Module Name: utdecode - Utility decoding routines (value-to-string)
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2011, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acnamesp.h"
47
48#define _COMPONENT ACPI_UTILITIES
49ACPI_MODULE_NAME("utdecode")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_format_exception
54 *
55 * PARAMETERS: Status - The acpi_status code to be formatted
56 *
57 * RETURN: A string containing the exception text. A valid pointer is
58 * always returned.
59 *
60 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
61 * It is here instead of utxface.c so it is always present.
62 *
63 ******************************************************************************/
64const char *acpi_format_exception(acpi_status status)
65{
66 const char *exception = NULL;
67
68 ACPI_FUNCTION_ENTRY();
69
70 exception = acpi_ut_validate_exception(status);
71 if (!exception) {
72
73 /* Exception code was not recognized */
74
75 ACPI_ERROR((AE_INFO,
76 "Unknown exception code: 0x%8.8X", status));
77
78 exception = "UNKNOWN_STATUS_CODE";
79 }
80
81 return (ACPI_CAST_PTR(const char, exception));
82}
83
84ACPI_EXPORT_SYMBOL(acpi_format_exception)
85
86/*
87 * Properties of the ACPI Object Types, both internal and external.
88 * The table is indexed by values of acpi_object_type
89 */
90const u8 acpi_gbl_ns_properties[ACPI_NUM_NS_TYPES] = {
91 ACPI_NS_NORMAL, /* 00 Any */
92 ACPI_NS_NORMAL, /* 01 Number */
93 ACPI_NS_NORMAL, /* 02 String */
94 ACPI_NS_NORMAL, /* 03 Buffer */
95 ACPI_NS_NORMAL, /* 04 Package */
96 ACPI_NS_NORMAL, /* 05 field_unit */
97 ACPI_NS_NEWSCOPE, /* 06 Device */
98 ACPI_NS_NORMAL, /* 07 Event */
99 ACPI_NS_NEWSCOPE, /* 08 Method */
100 ACPI_NS_NORMAL, /* 09 Mutex */
101 ACPI_NS_NORMAL, /* 10 Region */
102 ACPI_NS_NEWSCOPE, /* 11 Power */
103 ACPI_NS_NEWSCOPE, /* 12 Processor */
104 ACPI_NS_NEWSCOPE, /* 13 Thermal */
105 ACPI_NS_NORMAL, /* 14 buffer_field */
106 ACPI_NS_NORMAL, /* 15 ddb_handle */
107 ACPI_NS_NORMAL, /* 16 Debug Object */
108 ACPI_NS_NORMAL, /* 17 def_field */
109 ACPI_NS_NORMAL, /* 18 bank_field */
110 ACPI_NS_NORMAL, /* 19 index_field */
111 ACPI_NS_NORMAL, /* 20 Reference */
112 ACPI_NS_NORMAL, /* 21 Alias */
113 ACPI_NS_NORMAL, /* 22 method_alias */
114 ACPI_NS_NORMAL, /* 23 Notify */
115 ACPI_NS_NORMAL, /* 24 Address Handler */
116 ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
117 ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
118 ACPI_NS_NEWSCOPE, /* 27 Scope */
119 ACPI_NS_NORMAL, /* 28 Extra */
120 ACPI_NS_NORMAL, /* 29 Data */
121 ACPI_NS_NORMAL /* 30 Invalid */
122};
123
124/*******************************************************************************
125 *
126 * FUNCTION: acpi_ut_hex_to_ascii_char
127 *
128 * PARAMETERS: Integer - Contains the hex digit
129 * Position - bit position of the digit within the
130 * integer (multiple of 4)
131 *
132 * RETURN: The converted Ascii character
133 *
134 * DESCRIPTION: Convert a hex digit to an Ascii character
135 *
136 ******************************************************************************/
137
138/* Hex to ASCII conversion table */
139
140static const char acpi_gbl_hex_to_ascii[] = {
141 '0', '1', '2', '3', '4', '5', '6', '7',
142 '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
143};
144
145char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
146{
147
148 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
149}
150
151/*******************************************************************************
152 *
153 * FUNCTION: acpi_ut_get_region_name
154 *
155 * PARAMETERS: Space ID - ID for the region
156 *
157 * RETURN: Decoded region space_id name
158 *
159 * DESCRIPTION: Translate a Space ID into a name string (Debug only)
160 *
161 ******************************************************************************/
162
163/* Region type decoding */
164
165const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
166 "SystemMemory",
167 "SystemIO",
168 "PCI_Config",
169 "EmbeddedControl",
170 "SMBus",
171 "SystemCMOS",
172 "PCIBARTarget",
173 "IPMI",
174 "DataTable"
175};
176
177char *acpi_ut_get_region_name(u8 space_id)
178{
179
180 if (space_id >= ACPI_USER_REGION_BEGIN) {
181 return ("UserDefinedRegion");
182 } else if (space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
183 return ("FunctionalFixedHW");
184 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
185 return ("InvalidSpaceId");
186 }
187
188 return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
189}
190
191/*******************************************************************************
192 *
193 * FUNCTION: acpi_ut_get_event_name
194 *
195 * PARAMETERS: event_id - Fixed event ID
196 *
197 * RETURN: Decoded event ID name
198 *
199 * DESCRIPTION: Translate a Event ID into a name string (Debug only)
200 *
201 ******************************************************************************/
202
203/* Event type decoding */
204
205static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
206 "PM_Timer",
207 "GlobalLock",
208 "PowerButton",
209 "SleepButton",
210 "RealTimeClock",
211};
212
213char *acpi_ut_get_event_name(u32 event_id)
214{
215
216 if (event_id > ACPI_EVENT_MAX) {
217 return ("InvalidEventID");
218 }
219
220 return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
221}
222
223/*******************************************************************************
224 *
225 * FUNCTION: acpi_ut_get_type_name
226 *
227 * PARAMETERS: Type - An ACPI object type
228 *
229 * RETURN: Decoded ACPI object type name
230 *
231 * DESCRIPTION: Translate a Type ID into a name string (Debug only)
232 *
233 ******************************************************************************/
234
235/*
236 * Elements of acpi_gbl_ns_type_names below must match
237 * one-to-one with values of acpi_object_type
238 *
239 * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
240 * when stored in a table it really means that we have thus far seen no
241 * evidence to indicate what type is actually going to be stored for this entry.
242 */
243static const char acpi_gbl_bad_type[] = "UNDEFINED";
244
245/* Printable names of the ACPI object types */
246
247static const char *acpi_gbl_ns_type_names[] = {
248 /* 00 */ "Untyped",
249 /* 01 */ "Integer",
250 /* 02 */ "String",
251 /* 03 */ "Buffer",
252 /* 04 */ "Package",
253 /* 05 */ "FieldUnit",
254 /* 06 */ "Device",
255 /* 07 */ "Event",
256 /* 08 */ "Method",
257 /* 09 */ "Mutex",
258 /* 10 */ "Region",
259 /* 11 */ "Power",
260 /* 12 */ "Processor",
261 /* 13 */ "Thermal",
262 /* 14 */ "BufferField",
263 /* 15 */ "DdbHandle",
264 /* 16 */ "DebugObject",
265 /* 17 */ "RegionField",
266 /* 18 */ "BankField",
267 /* 19 */ "IndexField",
268 /* 20 */ "Reference",
269 /* 21 */ "Alias",
270 /* 22 */ "MethodAlias",
271 /* 23 */ "Notify",
272 /* 24 */ "AddrHandler",
273 /* 25 */ "ResourceDesc",
274 /* 26 */ "ResourceFld",
275 /* 27 */ "Scope",
276 /* 28 */ "Extra",
277 /* 29 */ "Data",
278 /* 30 */ "Invalid"
279};
280
281char *acpi_ut_get_type_name(acpi_object_type type)
282{
283
284 if (type > ACPI_TYPE_INVALID) {
285 return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
286 }
287
288 return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
289}
290
291char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
292{
293
294 if (!obj_desc) {
295 return ("[NULL Object Descriptor]");
296 }
297
298 return (acpi_ut_get_type_name(obj_desc->common.type));
299}
300
301/*******************************************************************************
302 *
303 * FUNCTION: acpi_ut_get_node_name
304 *
305 * PARAMETERS: Object - A namespace node
306 *
307 * RETURN: ASCII name of the node
308 *
309 * DESCRIPTION: Validate the node and return the node's ACPI name.
310 *
311 ******************************************************************************/
312
313char *acpi_ut_get_node_name(void *object)
314{
315 struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
316
317 /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
318
319 if (!object) {
320 return ("NULL");
321 }
322
323 /* Check for Root node */
324
325 if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
326 return ("\"\\\" ");
327 }
328
329 /* Descriptor must be a namespace node */
330
331 if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
332 return ("####");
333 }
334
335 /*
336 * Ensure name is valid. The name was validated/repaired when the node
337 * was created, but make sure it has not been corrupted.
338 */
339 acpi_ut_repair_name(node->name.ascii);
340
341 /* Return the name */
342
343 return (node->name.ascii);
344}
345
346/*******************************************************************************
347 *
348 * FUNCTION: acpi_ut_get_descriptor_name
349 *
350 * PARAMETERS: Object - An ACPI object
351 *
352 * RETURN: Decoded name of the descriptor type
353 *
354 * DESCRIPTION: Validate object and return the descriptor type
355 *
356 ******************************************************************************/
357
358/* Printable names of object descriptor types */
359
360static const char *acpi_gbl_desc_type_names[] = {
361 /* 00 */ "Not a Descriptor",
362 /* 01 */ "Cached",
363 /* 02 */ "State-Generic",
364 /* 03 */ "State-Update",
365 /* 04 */ "State-Package",
366 /* 05 */ "State-Control",
367 /* 06 */ "State-RootParseScope",
368 /* 07 */ "State-ParseScope",
369 /* 08 */ "State-WalkScope",
370 /* 09 */ "State-Result",
371 /* 10 */ "State-Notify",
372 /* 11 */ "State-Thread",
373 /* 12 */ "Walk",
374 /* 13 */ "Parser",
375 /* 14 */ "Operand",
376 /* 15 */ "Node"
377};
378
379char *acpi_ut_get_descriptor_name(void *object)
380{
381
382 if (!object) {
383 return ("NULL OBJECT");
384 }
385
386 if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
387 return ("Not a Descriptor");
388 }
389
390 return (ACPI_CAST_PTR(char,
391 acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
392 (object)]));
393
394}
395
396/*******************************************************************************
397 *
398 * FUNCTION: acpi_ut_get_reference_name
399 *
400 * PARAMETERS: Object - An ACPI reference object
401 *
402 * RETURN: Decoded name of the type of reference
403 *
404 * DESCRIPTION: Decode a reference object sub-type to a string.
405 *
406 ******************************************************************************/
407
408/* Printable names of reference object sub-types */
409
410static const char *acpi_gbl_ref_class_names[] = {
411 /* 00 */ "Local",
412 /* 01 */ "Argument",
413 /* 02 */ "RefOf",
414 /* 03 */ "Index",
415 /* 04 */ "DdbHandle",
416 /* 05 */ "Named Object",
417 /* 06 */ "Debug"
418};
419
420const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
421{
422
423 if (!object) {
424 return ("NULL Object");
425 }
426
427 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND) {
428 return ("Not an Operand object");
429 }
430
431 if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE) {
432 return ("Not a Reference object");
433 }
434
435 if (object->reference.class > ACPI_REFCLASS_MAX) {
436 return ("Unknown Reference class");
437 }
438
439 return (acpi_gbl_ref_class_names[object->reference.class]);
440}
441
442#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
443/*
444 * Strings and procedures used for debug only
445 */
446
447/*******************************************************************************
448 *
449 * FUNCTION: acpi_ut_get_mutex_name
450 *
451 * PARAMETERS: mutex_id - The predefined ID for this mutex.
452 *
453 * RETURN: Decoded name of the internal mutex
454 *
455 * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
456 *
457 ******************************************************************************/
458
459/* Names for internal mutex objects, used for debug output */
460
461static char *acpi_gbl_mutex_names[ACPI_NUM_MUTEX] = {
462 "ACPI_MTX_Interpreter",
463 "ACPI_MTX_Namespace",
464 "ACPI_MTX_Tables",
465 "ACPI_MTX_Events",
466 "ACPI_MTX_Caches",
467 "ACPI_MTX_Memory",
468 "ACPI_MTX_CommandComplete",
469 "ACPI_MTX_CommandReady"
470};
471
472char *acpi_ut_get_mutex_name(u32 mutex_id)
473{
474
475 if (mutex_id > ACPI_MAX_MUTEX) {
476 return ("Invalid Mutex ID");
477 }
478
479 return (acpi_gbl_mutex_names[mutex_id]);
480}
481
482/*******************************************************************************
483 *
484 * FUNCTION: acpi_ut_get_notify_name
485 *
486 * PARAMETERS: notify_value - Value from the Notify() request
487 *
488 * RETURN: Decoded name for the notify value
489 *
490 * DESCRIPTION: Translate a Notify Value to a notify namestring.
491 *
492 ******************************************************************************/
493
494/* Names for Notify() values, used for debug output */
495
496static const char *acpi_gbl_notify_value_names[] = {
497 "Bus Check",
498 "Device Check",
499 "Device Wake",
500 "Eject Request",
501 "Device Check Light",
502 "Frequency Mismatch",
503 "Bus Mode Mismatch",
504 "Power Fault",
505 "Capabilities Check",
506 "Device PLD Check",
507 "Reserved",
508 "System Locality Update"
509};
510
511const char *acpi_ut_get_notify_name(u32 notify_value)
512{
513
514 if (notify_value <= ACPI_NOTIFY_MAX) {
515 return (acpi_gbl_notify_value_names[notify_value]);
516 } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
517 return ("Reserved");
518 } else { /* Greater or equal to 0x80 */
519
520 return ("**Device Specific**");
521 }
522}
523#endif
524
525/*******************************************************************************
526 *
527 * FUNCTION: acpi_ut_valid_object_type
528 *
529 * PARAMETERS: Type - Object type to be validated
530 *
531 * RETURN: TRUE if valid object type, FALSE otherwise
532 *
533 * DESCRIPTION: Validate an object type
534 *
535 ******************************************************************************/
536
537u8 acpi_ut_valid_object_type(acpi_object_type type)
538{
539
540 if (type > ACPI_TYPE_LOCAL_MAX) {
541
542 /* Note: Assumes all TYPEs are contiguous (external/local) */
543
544 return (FALSE);
545 }
546
547 return (TRUE);
548}
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index 97dd9bbf055a..833a38a9c905 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -45,7 +45,6 @@
45 45
46#include <acpi/acpi.h> 46#include <acpi/acpi.h>
47#include "accommon.h" 47#include "accommon.h"
48#include "acnamesp.h"
49 48
50#define _COMPONENT ACPI_UTILITIES 49#define _COMPONENT ACPI_UTILITIES
51ACPI_MODULE_NAME("utglobal") 50ACPI_MODULE_NAME("utglobal")
@@ -107,43 +106,6 @@ const char *acpi_gbl_highest_dstate_names[ACPI_NUM_sx_d_METHODS] = {
107 106
108/******************************************************************************* 107/*******************************************************************************
109 * 108 *
110 * FUNCTION: acpi_format_exception
111 *
112 * PARAMETERS: Status - The acpi_status code to be formatted
113 *
114 * RETURN: A string containing the exception text. A valid pointer is
115 * always returned.
116 *
117 * DESCRIPTION: This function translates an ACPI exception into an ASCII string
118 * It is here instead of utxface.c so it is always present.
119 *
120 ******************************************************************************/
121
122const char *acpi_format_exception(acpi_status status)
123{
124 const char *exception = NULL;
125
126 ACPI_FUNCTION_ENTRY();
127
128 exception = acpi_ut_validate_exception(status);
129 if (!exception) {
130
131 /* Exception code was not recognized */
132
133 ACPI_ERROR((AE_INFO,
134 "Unknown exception code: 0x%8.8X", status));
135
136 exception = "UNKNOWN_STATUS_CODE";
137 dump_stack();
138 }
139
140 return (ACPI_CAST_PTR(const char, exception));
141}
142
143ACPI_EXPORT_SYMBOL(acpi_format_exception)
144
145/*******************************************************************************
146 *
147 * Namespace globals 109 * Namespace globals
148 * 110 *
149 ******************************************************************************/ 111 ******************************************************************************/
@@ -177,71 +139,6 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
177 {NULL, ACPI_TYPE_ANY, NULL} 139 {NULL, ACPI_TYPE_ANY, NULL}
178}; 140};
179 141
180/*
181 * Properties of the ACPI Object Types, both internal and external.
182 * The table is indexed by values of acpi_object_type
183 */
184const u8 acpi_gbl_ns_properties[] = {
185 ACPI_NS_NORMAL, /* 00 Any */
186 ACPI_NS_NORMAL, /* 01 Number */
187 ACPI_NS_NORMAL, /* 02 String */
188 ACPI_NS_NORMAL, /* 03 Buffer */
189 ACPI_NS_NORMAL, /* 04 Package */
190 ACPI_NS_NORMAL, /* 05 field_unit */
191 ACPI_NS_NEWSCOPE, /* 06 Device */
192 ACPI_NS_NORMAL, /* 07 Event */
193 ACPI_NS_NEWSCOPE, /* 08 Method */
194 ACPI_NS_NORMAL, /* 09 Mutex */
195 ACPI_NS_NORMAL, /* 10 Region */
196 ACPI_NS_NEWSCOPE, /* 11 Power */
197 ACPI_NS_NEWSCOPE, /* 12 Processor */
198 ACPI_NS_NEWSCOPE, /* 13 Thermal */
199 ACPI_NS_NORMAL, /* 14 buffer_field */
200 ACPI_NS_NORMAL, /* 15 ddb_handle */
201 ACPI_NS_NORMAL, /* 16 Debug Object */
202 ACPI_NS_NORMAL, /* 17 def_field */
203 ACPI_NS_NORMAL, /* 18 bank_field */
204 ACPI_NS_NORMAL, /* 19 index_field */
205 ACPI_NS_NORMAL, /* 20 Reference */
206 ACPI_NS_NORMAL, /* 21 Alias */
207 ACPI_NS_NORMAL, /* 22 method_alias */
208 ACPI_NS_NORMAL, /* 23 Notify */
209 ACPI_NS_NORMAL, /* 24 Address Handler */
210 ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 25 Resource Desc */
211 ACPI_NS_NEWSCOPE | ACPI_NS_LOCAL, /* 26 Resource Field */
212 ACPI_NS_NEWSCOPE, /* 27 Scope */
213 ACPI_NS_NORMAL, /* 28 Extra */
214 ACPI_NS_NORMAL, /* 29 Data */
215 ACPI_NS_NORMAL /* 30 Invalid */
216};
217
218/* Hex to ASCII conversion table */
219
220static const char acpi_gbl_hex_to_ascii[] = {
221 '0', '1', '2', '3', '4', '5', '6', '7',
222 '8', '9', 'A', 'B', 'C', 'D', 'E', 'F'
223};
224
225/*******************************************************************************
226 *
227 * FUNCTION: acpi_ut_hex_to_ascii_char
228 *
229 * PARAMETERS: Integer - Contains the hex digit
230 * Position - bit position of the digit within the
231 * integer (multiple of 4)
232 *
233 * RETURN: The converted Ascii character
234 *
235 * DESCRIPTION: Convert a hex digit to an Ascii character
236 *
237 ******************************************************************************/
238
239char acpi_ut_hex_to_ascii_char(u64 integer, u32 position)
240{
241
242 return (acpi_gbl_hex_to_ascii[(integer >> position) & 0xF]);
243}
244
245/****************************************************************************** 142/******************************************************************************
246 * 143 *
247 * Event and Hardware globals 144 * Event and Hardware globals
@@ -341,386 +238,6 @@ struct acpi_fixed_event_info acpi_gbl_fixed_event_info[ACPI_NUM_FIXED_EVENTS] =
341 238
342/******************************************************************************* 239/*******************************************************************************
343 * 240 *
344 * FUNCTION: acpi_ut_get_region_name
345 *
346 * PARAMETERS: None.
347 *
348 * RETURN: Status
349 *
350 * DESCRIPTION: Translate a Space ID into a name string (Debug only)
351 *
352 ******************************************************************************/
353
354/* Region type decoding */
355
356const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
357 "SystemMemory",
358 "SystemIO",
359 "PCI_Config",
360 "EmbeddedControl",
361 "SMBus",
362 "SystemCMOS",
363 "PCIBARTarget",
364 "IPMI",
365 "DataTable"
366};
367
368char *acpi_ut_get_region_name(u8 space_id)
369{
370
371 if (space_id >= ACPI_USER_REGION_BEGIN) {
372 return ("UserDefinedRegion");
373 } else if (space_id >= ACPI_NUM_PREDEFINED_REGIONS) {
374 return ("InvalidSpaceId");
375 }
376
377 return (ACPI_CAST_PTR(char, acpi_gbl_region_types[space_id]));
378}
379
380/*******************************************************************************
381 *
382 * FUNCTION: acpi_ut_get_event_name
383 *
384 * PARAMETERS: None.
385 *
386 * RETURN: Status
387 *
388 * DESCRIPTION: Translate a Event ID into a name string (Debug only)
389 *
390 ******************************************************************************/
391
392/* Event type decoding */
393
394static const char *acpi_gbl_event_types[ACPI_NUM_FIXED_EVENTS] = {
395 "PM_Timer",
396 "GlobalLock",
397 "PowerButton",
398 "SleepButton",
399 "RealTimeClock",
400};
401
402char *acpi_ut_get_event_name(u32 event_id)
403{
404
405 if (event_id > ACPI_EVENT_MAX) {
406 return ("InvalidEventID");
407 }
408
409 return (ACPI_CAST_PTR(char, acpi_gbl_event_types[event_id]));
410}
411
412/*******************************************************************************
413 *
414 * FUNCTION: acpi_ut_get_type_name
415 *
416 * PARAMETERS: None.
417 *
418 * RETURN: Status
419 *
420 * DESCRIPTION: Translate a Type ID into a name string (Debug only)
421 *
422 ******************************************************************************/
423
424/*
425 * Elements of acpi_gbl_ns_type_names below must match
426 * one-to-one with values of acpi_object_type
427 *
428 * The type ACPI_TYPE_ANY (Untyped) is used as a "don't care" when searching;
429 * when stored in a table it really means that we have thus far seen no
430 * evidence to indicate what type is actually going to be stored for this entry.
431 */
432static const char acpi_gbl_bad_type[] = "UNDEFINED";
433
434/* Printable names of the ACPI object types */
435
436static const char *acpi_gbl_ns_type_names[] = {
437 /* 00 */ "Untyped",
438 /* 01 */ "Integer",
439 /* 02 */ "String",
440 /* 03 */ "Buffer",
441 /* 04 */ "Package",
442 /* 05 */ "FieldUnit",
443 /* 06 */ "Device",
444 /* 07 */ "Event",
445 /* 08 */ "Method",
446 /* 09 */ "Mutex",
447 /* 10 */ "Region",
448 /* 11 */ "Power",
449 /* 12 */ "Processor",
450 /* 13 */ "Thermal",
451 /* 14 */ "BufferField",
452 /* 15 */ "DdbHandle",
453 /* 16 */ "DebugObject",
454 /* 17 */ "RegionField",
455 /* 18 */ "BankField",
456 /* 19 */ "IndexField",
457 /* 20 */ "Reference",
458 /* 21 */ "Alias",
459 /* 22 */ "MethodAlias",
460 /* 23 */ "Notify",
461 /* 24 */ "AddrHandler",
462 /* 25 */ "ResourceDesc",
463 /* 26 */ "ResourceFld",
464 /* 27 */ "Scope",
465 /* 28 */ "Extra",
466 /* 29 */ "Data",
467 /* 30 */ "Invalid"
468};
469
470char *acpi_ut_get_type_name(acpi_object_type type)
471{
472
473 if (type > ACPI_TYPE_INVALID) {
474 return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
475 }
476
477 return (ACPI_CAST_PTR(char, acpi_gbl_ns_type_names[type]));
478}
479
480char *acpi_ut_get_object_type_name(union acpi_operand_object *obj_desc)
481{
482
483 if (!obj_desc) {
484 return ("[NULL Object Descriptor]");
485 }
486
487 return (acpi_ut_get_type_name(obj_desc->common.type));
488}
489
490/*******************************************************************************
491 *
492 * FUNCTION: acpi_ut_get_node_name
493 *
494 * PARAMETERS: Object - A namespace node
495 *
496 * RETURN: Pointer to a string
497 *
498 * DESCRIPTION: Validate the node and return the node's ACPI name.
499 *
500 ******************************************************************************/
501
502char *acpi_ut_get_node_name(void *object)
503{
504 struct acpi_namespace_node *node = (struct acpi_namespace_node *)object;
505
506 /* Must return a string of exactly 4 characters == ACPI_NAME_SIZE */
507
508 if (!object) {
509 return ("NULL");
510 }
511
512 /* Check for Root node */
513
514 if ((object == ACPI_ROOT_OBJECT) || (object == acpi_gbl_root_node)) {
515 return ("\"\\\" ");
516 }
517
518 /* Descriptor must be a namespace node */
519
520 if (ACPI_GET_DESCRIPTOR_TYPE(node) != ACPI_DESC_TYPE_NAMED) {
521 return ("####");
522 }
523
524 /* Name must be a valid ACPI name */
525
526 if (!acpi_ut_valid_acpi_name(node->name.integer)) {
527 node->name.integer = acpi_ut_repair_name(node->name.ascii);
528 }
529
530 /* Return the name */
531
532 return (node->name.ascii);
533}
534
535/*******************************************************************************
536 *
537 * FUNCTION: acpi_ut_get_descriptor_name
538 *
539 * PARAMETERS: Object - An ACPI object
540 *
541 * RETURN: Pointer to a string
542 *
543 * DESCRIPTION: Validate object and return the descriptor type
544 *
545 ******************************************************************************/
546
547/* Printable names of object descriptor types */
548
549static const char *acpi_gbl_desc_type_names[] = {
550 /* 00 */ "Invalid",
551 /* 01 */ "Cached",
552 /* 02 */ "State-Generic",
553 /* 03 */ "State-Update",
554 /* 04 */ "State-Package",
555 /* 05 */ "State-Control",
556 /* 06 */ "State-RootParseScope",
557 /* 07 */ "State-ParseScope",
558 /* 08 */ "State-WalkScope",
559 /* 09 */ "State-Result",
560 /* 10 */ "State-Notify",
561 /* 11 */ "State-Thread",
562 /* 12 */ "Walk",
563 /* 13 */ "Parser",
564 /* 14 */ "Operand",
565 /* 15 */ "Node"
566};
567
568char *acpi_ut_get_descriptor_name(void *object)
569{
570
571 if (!object) {
572 return ("NULL OBJECT");
573 }
574
575 if (ACPI_GET_DESCRIPTOR_TYPE(object) > ACPI_DESC_TYPE_MAX) {
576 return (ACPI_CAST_PTR(char, acpi_gbl_bad_type));
577 }
578
579 return (ACPI_CAST_PTR(char,
580 acpi_gbl_desc_type_names[ACPI_GET_DESCRIPTOR_TYPE
581 (object)]));
582
583}
584
585/*******************************************************************************
586 *
587 * FUNCTION: acpi_ut_get_reference_name
588 *
589 * PARAMETERS: Object - An ACPI reference object
590 *
591 * RETURN: Pointer to a string
592 *
593 * DESCRIPTION: Decode a reference object sub-type to a string.
594 *
595 ******************************************************************************/
596
597/* Printable names of reference object sub-types */
598
599static const char *acpi_gbl_ref_class_names[] = {
600 /* 00 */ "Local",
601 /* 01 */ "Argument",
602 /* 02 */ "RefOf",
603 /* 03 */ "Index",
604 /* 04 */ "DdbHandle",
605 /* 05 */ "Named Object",
606 /* 06 */ "Debug"
607};
608
609const char *acpi_ut_get_reference_name(union acpi_operand_object *object)
610{
611 if (!object)
612 return "NULL Object";
613
614 if (ACPI_GET_DESCRIPTOR_TYPE(object) != ACPI_DESC_TYPE_OPERAND)
615 return "Not an Operand object";
616
617 if (object->common.type != ACPI_TYPE_LOCAL_REFERENCE)
618 return "Not a Reference object";
619
620 if (object->reference.class > ACPI_REFCLASS_MAX)
621 return "Unknown Reference class";
622
623 return acpi_gbl_ref_class_names[object->reference.class];
624}
625
626#if defined(ACPI_DEBUG_OUTPUT) || defined(ACPI_DEBUGGER)
627/*
628 * Strings and procedures used for debug only
629 */
630
631/*******************************************************************************
632 *
633 * FUNCTION: acpi_ut_get_mutex_name
634 *
635 * PARAMETERS: mutex_id - The predefined ID for this mutex.
636 *
637 * RETURN: String containing the name of the mutex. Always returns a valid
638 * pointer.
639 *
640 * DESCRIPTION: Translate a mutex ID into a name string (Debug only)
641 *
642 ******************************************************************************/
643
644char *acpi_ut_get_mutex_name(u32 mutex_id)
645{
646
647 if (mutex_id > ACPI_MAX_MUTEX) {
648 return ("Invalid Mutex ID");
649 }
650
651 return (acpi_gbl_mutex_names[mutex_id]);
652}
653
654/*******************************************************************************
655 *
656 * FUNCTION: acpi_ut_get_notify_name
657 *
658 * PARAMETERS: notify_value - Value from the Notify() request
659 *
660 * RETURN: String corresponding to the Notify Value.
661 *
662 * DESCRIPTION: Translate a Notify Value to a notify namestring.
663 *
664 ******************************************************************************/
665
666/* Names for Notify() values, used for debug output */
667
668static const char *acpi_gbl_notify_value_names[] = {
669 "Bus Check",
670 "Device Check",
671 "Device Wake",
672 "Eject Request",
673 "Device Check Light",
674 "Frequency Mismatch",
675 "Bus Mode Mismatch",
676 "Power Fault",
677 "Capabilities Check",
678 "Device PLD Check",
679 "Reserved",
680 "System Locality Update"
681};
682
683const char *acpi_ut_get_notify_name(u32 notify_value)
684{
685
686 if (notify_value <= ACPI_NOTIFY_MAX) {
687 return (acpi_gbl_notify_value_names[notify_value]);
688 } else if (notify_value <= ACPI_MAX_SYS_NOTIFY) {
689 return ("Reserved");
690 } else { /* Greater or equal to 0x80 */
691
692 return ("**Device Specific**");
693 }
694}
695#endif
696
697/*******************************************************************************
698 *
699 * FUNCTION: acpi_ut_valid_object_type
700 *
701 * PARAMETERS: Type - Object type to be validated
702 *
703 * RETURN: TRUE if valid object type, FALSE otherwise
704 *
705 * DESCRIPTION: Validate an object type
706 *
707 ******************************************************************************/
708
709u8 acpi_ut_valid_object_type(acpi_object_type type)
710{
711
712 if (type > ACPI_TYPE_LOCAL_MAX) {
713
714 /* Note: Assumes all TYPEs are contiguous (external/local) */
715
716 return (FALSE);
717 }
718
719 return (TRUE);
720}
721
722/*******************************************************************************
723 *
724 * FUNCTION: acpi_ut_init_globals 241 * FUNCTION: acpi_ut_init_globals
725 * 242 *
726 * PARAMETERS: None 243 * PARAMETERS: None
@@ -806,6 +323,7 @@ acpi_status acpi_ut_init_globals(void)
806 acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT; 323 acpi_gbl_db_output_flags = ACPI_DB_CONSOLE_OUTPUT;
807 acpi_gbl_osi_data = 0; 324 acpi_gbl_osi_data = 0;
808 acpi_gbl_osi_mutex = NULL; 325 acpi_gbl_osi_mutex = NULL;
326 acpi_gbl_reg_methods_executed = FALSE;
809 327
810 /* Hardware oriented */ 328 /* Hardware oriented */
811 329
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index b9ba04fc2b34..77fc76f8aea9 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g,
3281 struct block_device *bdev = opened_bdev[cnt]; 3281 struct block_device *bdev = opened_bdev[cnt];
3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) 3282 if (!bdev || ITYPE(drive_state[cnt].fd_device) != type)
3283 continue; 3283 continue;
3284 __invalidate_device(bdev); 3284 __invalidate_device(bdev, true);
3285 } 3285 }
3286 mutex_unlock(&open_lock); 3286 mutex_unlock(&open_lock);
3287 } else { 3287 } else {
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index c195bfeade11..5feebe2800e9 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -130,6 +130,7 @@
130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 130#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
131 131
132#define I915_IFPADDR 0x60 132#define I915_IFPADDR 0x60
133#define I830_HIC 0x70
133 134
134/* Intel 965G registers */ 135/* Intel 965G registers */
135#define I965_MSAC 0x62 136#define I965_MSAC 0x62
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index fab3d3265adb..0d09b537bb9a 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -21,6 +21,7 @@
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/pagemap.h> 22#include <linux/pagemap.h>
23#include <linux/agp_backend.h> 23#include <linux/agp_backend.h>
24#include <linux/delay.h>
24#include <asm/smp.h> 25#include <asm/smp.h>
25#include "agp.h" 26#include "agp.h"
26#include "intel-agp.h" 27#include "intel-agp.h"
@@ -70,12 +71,8 @@ static struct _intel_private {
70 u32 __iomem *gtt; /* I915G */ 71 u32 __iomem *gtt; /* I915G */
71 bool clear_fake_agp; /* on first access via agp, fill with scratch */ 72 bool clear_fake_agp; /* on first access via agp, fill with scratch */
72 int num_dcache_entries; 73 int num_dcache_entries;
73 union { 74 void __iomem *i9xx_flush_page;
74 void __iomem *i9xx_flush_page;
75 void *i8xx_flush_page;
76 };
77 char *i81x_gtt_table; 75 char *i81x_gtt_table;
78 struct page *i8xx_page;
79 struct resource ifp_resource; 76 struct resource ifp_resource;
80 int resource_valid; 77 int resource_valid;
81 struct page *scratch_page; 78 struct page *scratch_page;
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void)
722 719
723static void i830_cleanup(void) 720static void i830_cleanup(void)
724{ 721{
725 if (intel_private.i8xx_flush_page) {
726 kunmap(intel_private.i8xx_flush_page);
727 intel_private.i8xx_flush_page = NULL;
728 }
729
730 __free_page(intel_private.i8xx_page);
731 intel_private.i8xx_page = NULL;
732}
733
734static void intel_i830_setup_flush(void)
735{
736 /* return if we've already set the flush mechanism up */
737 if (intel_private.i8xx_page)
738 return;
739
740 intel_private.i8xx_page = alloc_page(GFP_KERNEL);
741 if (!intel_private.i8xx_page)
742 return;
743
744 intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
745 if (!intel_private.i8xx_flush_page)
746 i830_cleanup();
747} 722}
748 723
749/* The chipset_flush interface needs to get data that has already been 724/* The chipset_flush interface needs to get data that has already been
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void)
758 */ 733 */
759static void i830_chipset_flush(void) 734static void i830_chipset_flush(void)
760{ 735{
761 unsigned int *pg = intel_private.i8xx_flush_page; 736 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
737
738 /* Forcibly evict everything from the CPU write buffers.
739 * clflush appears to be insufficient.
740 */
741 wbinvd_on_all_cpus();
742
743 /* Now we've only seen documents for this magic bit on 855GM,
744 * we hope it exists for the other gen2 chipsets...
745 *
746 * Also works as advertised on my 845G.
747 */
748 writel(readl(intel_private.registers+I830_HIC) | (1<<31),
749 intel_private.registers+I830_HIC);
762 750
763 memset(pg, 0, 1024); 751 while (readl(intel_private.registers+I830_HIC) & (1<<31)) {
752 if (time_after(jiffies, timeout))
753 break;
764 754
765 if (cpu_has_clflush) 755 udelay(50);
766 clflush_cache_range(pg, 1024); 756 }
767 else if (wbinvd_on_all_cpus() != 0)
768 printk(KERN_ERR "Timed out waiting for cache flush.\n");
769} 757}
770 758
771static void i830_write_entry(dma_addr_t addr, unsigned int entry, 759static void i830_write_entry(dma_addr_t addr, unsigned int entry,
@@ -849,8 +837,6 @@ static int i830_setup(void)
849 837
850 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; 838 intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
851 839
852 intel_i830_setup_flush();
853
854 return 0; 840 return 0;
855} 841}
856 842
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
index 36e0fa161c2b..1f46f1cd9225 100644
--- a/drivers/char/tpm/tpm.c
+++ b/drivers/char/tpm/tpm.c
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip,
364 tpm_protected_ordinal_duration[ordinal & 364 tpm_protected_ordinal_duration[ordinal &
365 TPM_PROTECTED_ORDINAL_MASK]; 365 TPM_PROTECTED_ORDINAL_MASK];
366 366
367 if (duration_idx != TPM_UNDEFINED) { 367 if (duration_idx != TPM_UNDEFINED)
368 duration = chip->vendor.duration[duration_idx]; 368 duration = chip->vendor.duration[duration_idx];
369 /* if duration is 0, it's because chip->vendor.duration wasn't */ 369 if (duration <= 0)
370 /* filled yet, so we set the lowest timeout just to give enough */
371 /* time for tpm_get_timeouts() to succeed */
372 return (duration <= 0 ? HZ : duration);
373 } else
374 return 2 * 60 * HZ; 370 return 2 * 60 * HZ;
371 else
372 return duration;
375} 373}
376EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); 374EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration);
377 375
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 53120a72a48c..28d1d3c24d65 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1012,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data,
1012 struct drm_file *file_priv) 1012 struct drm_file *file_priv)
1013{ 1013{
1014 struct drm_modeset_ctl *modeset = data; 1014 struct drm_modeset_ctl *modeset = data;
1015 int crtc, ret = 0; 1015 int ret = 0;
1016 unsigned int crtc;
1016 1017
1017 /* If drm_vblank_init() hasn't been called yet, just no-op */ 1018 /* If drm_vblank_init() hasn't been called yet, just no-op */
1018 if (!dev->num_crtcs) 1019 if (!dev->num_crtcs)
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 22a32b9932c5..79a04fde69b5 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
184static bool 184static bool
185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 185i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
186{ 186{
187 int tile_width; 187 int tile_width, tile_height;
188 188
189 /* Linear is always fine */ 189 /* Linear is always fine */
190 if (tiling_mode == I915_TILING_NONE) 190 if (tiling_mode == I915_TILING_NONE)
@@ -215,6 +215,20 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
215 } 215 }
216 } 216 }
217 217
218 if (IS_GEN2(dev) ||
219 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
220 tile_height = 32;
221 else
222 tile_height = 8;
223 /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even
224 * number of tile rows. */
225 if (IS_GEN2(dev))
226 tile_height *= 2;
227
228 /* Size needs to be aligned to a full tile row */
229 if (size & (tile_height * stride - 1))
230 return false;
231
218 /* 965+ just needs multiples of tile width */ 232 /* 965+ just needs multiples of tile width */
219 if (INTEL_INFO(dev)->gen >= 4) { 233 if (INTEL_INFO(dev)->gen >= 4) {
220 if (stride & (tile_width - 1)) 234 if (stride & (tile_width - 1))
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 97f946dcc1aa..8a9e08bf1cf7 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work)
316 struct drm_mode_config *mode_config = &dev->mode_config; 316 struct drm_mode_config *mode_config = &dev->mode_config;
317 struct intel_encoder *encoder; 317 struct intel_encoder *encoder;
318 318
319 DRM_DEBUG_KMS("running encoder hotplug functions\n");
320
319 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) 321 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
320 if (encoder->hot_plug) 322 if (encoder->hot_plug)
321 encoder->hot_plug(encoder); 323 encoder->hot_plug(encoder);
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
1649 } else { 1651 } else {
1650 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | 1652 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
1651 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; 1653 SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
1652 hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; 1654 hotplug_mask |= SDE_AUX_MASK;
1653 I915_WRITE(FDI_RXA_IMR, 0);
1654 I915_WRITE(FDI_RXB_IMR, 0);
1655 } 1655 }
1656 1656
1657 dev_priv->pch_irq_mask = ~hotplug_mask; 1657 dev_priv->pch_irq_mask = ~hotplug_mask;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3b006536b3d2..e79b25bbee6c 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; 1630 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
1631 1631
1632 wait_event(dev_priv->pending_flip_queue, 1632 wait_event(dev_priv->pending_flip_queue,
1633 atomic_read(&dev_priv->mm.wedged) ||
1633 atomic_read(&obj->pending_flip) == 0); 1634 atomic_read(&obj->pending_flip) == 0);
1634 1635
1635 /* Big Hammer, we also need to ensure that any pending 1636 /* Big Hammer, we also need to ensure that any pending
1636 * MI_WAIT_FOR_EVENT inside a user batch buffer on the 1637 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1637 * current scanout is retired before unpinning the old 1638 * current scanout is retired before unpinning the old
1638 * framebuffer. 1639 * framebuffer.
1640 *
1641 * This should only fail upon a hung GPU, in which case we
1642 * can safely continue.
1639 */ 1643 */
1640 ret = i915_gem_object_flush_gpu(obj, false); 1644 ret = i915_gem_object_flush_gpu(obj, false);
1641 if (ret) { 1645 (void) ret;
1642 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1643 mutex_unlock(&dev->struct_mutex);
1644 return ret;
1645 }
1646 } 1646 }
1647 1647
1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1648 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2045 atomic_read(&obj->pending_flip) == 0); 2045 atomic_read(&obj->pending_flip) == 0);
2046} 2046}
2047 2047
2048static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2049{
2050 struct drm_device *dev = crtc->dev;
2051 struct drm_mode_config *mode_config = &dev->mode_config;
2052 struct intel_encoder *encoder;
2053
2054 /*
2055 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2056 * must be driven by its own crtc; no sharing is possible.
2057 */
2058 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2059 if (encoder->base.crtc != crtc)
2060 continue;
2061
2062 switch (encoder->type) {
2063 case INTEL_OUTPUT_EDP:
2064 if (!intel_encoder_is_pch_edp(&encoder->base))
2065 return false;
2066 continue;
2067 }
2068 }
2069
2070 return true;
2071}
2072
2048static void ironlake_crtc_enable(struct drm_crtc *crtc) 2073static void ironlake_crtc_enable(struct drm_crtc *crtc)
2049{ 2074{
2050 struct drm_device *dev = crtc->dev; 2075 struct drm_device *dev = crtc->dev;
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2053 int pipe = intel_crtc->pipe; 2078 int pipe = intel_crtc->pipe;
2054 int plane = intel_crtc->plane; 2079 int plane = intel_crtc->plane;
2055 u32 reg, temp; 2080 u32 reg, temp;
2081 bool is_pch_port = false;
2056 2082
2057 if (intel_crtc->active) 2083 if (intel_crtc->active)
2058 return; 2084 return;
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2066 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); 2092 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
2067 } 2093 }
2068 2094
2069 ironlake_fdi_enable(crtc); 2095 is_pch_port = intel_crtc_driving_pch(crtc);
2096
2097 if (is_pch_port)
2098 ironlake_fdi_enable(crtc);
2099 else {
2100 /* disable CPU FDI tx and PCH FDI rx */
2101 reg = FDI_TX_CTL(pipe);
2102 temp = I915_READ(reg);
2103 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2104 POSTING_READ(reg);
2105
2106 reg = FDI_RX_CTL(pipe);
2107 temp = I915_READ(reg);
2108 temp &= ~(0x7 << 16);
2109 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2110 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2111
2112 POSTING_READ(reg);
2113 udelay(100);
2114
2115 /* Ironlake workaround, disable clock pointer after downing FDI */
2116 if (HAS_PCH_IBX(dev))
2117 I915_WRITE(FDI_RX_CHICKEN(pipe),
2118 I915_READ(FDI_RX_CHICKEN(pipe) &
2119 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2120
2121 /* still set train pattern 1 */
2122 reg = FDI_TX_CTL(pipe);
2123 temp = I915_READ(reg);
2124 temp &= ~FDI_LINK_TRAIN_NONE;
2125 temp |= FDI_LINK_TRAIN_PATTERN_1;
2126 I915_WRITE(reg, temp);
2127
2128 reg = FDI_RX_CTL(pipe);
2129 temp = I915_READ(reg);
2130 if (HAS_PCH_CPT(dev)) {
2131 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2132 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2133 } else {
2134 temp &= ~FDI_LINK_TRAIN_NONE;
2135 temp |= FDI_LINK_TRAIN_PATTERN_1;
2136 }
2137 /* BPC in FDI rx is consistent with that in PIPECONF */
2138 temp &= ~(0x07 << 16);
2139 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2140 I915_WRITE(reg, temp);
2141
2142 POSTING_READ(reg);
2143 udelay(100);
2144 }
2070 2145
2071 /* Enable panel fitting for LVDS */ 2146 /* Enable panel fitting for LVDS */
2072 if (dev_priv->pch_pf_size && 2147 if (dev_priv->pch_pf_size &&
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2100 intel_flush_display_plane(dev, plane); 2175 intel_flush_display_plane(dev, plane);
2101 } 2176 }
2102 2177
2178 /* Skip the PCH stuff if possible */
2179 if (!is_pch_port)
2180 goto done;
2181
2103 /* For PCH output, training FDI link */ 2182 /* For PCH output, training FDI link */
2104 if (IS_GEN6(dev)) 2183 if (IS_GEN6(dev))
2105 gen6_fdi_link_train(crtc); 2184 gen6_fdi_link_train(crtc);
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2184 I915_WRITE(reg, temp | TRANS_ENABLE); 2263 I915_WRITE(reg, temp | TRANS_ENABLE);
2185 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) 2264 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
2186 DRM_ERROR("failed to enable transcoder %d\n", pipe); 2265 DRM_ERROR("failed to enable transcoder %d\n", pipe);
2187 2266done:
2188 intel_crtc_load_lut(crtc); 2267 intel_crtc_load_lut(crtc);
2189 intel_update_fbc(dev); 2268 intel_update_fbc(dev);
2190 intel_crtc_update_cursor(crtc, true); 2269 intel_crtc_update_cursor(crtc, true);
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev)
6496 POSTING_READ(RSTDBYCTL); 6575 POSTING_READ(RSTDBYCTL);
6497 } 6576 }
6498 6577
6499 ironlake_disable_rc6(dev); 6578 ironlake_teardown_rc6(dev);
6500} 6579}
6501 6580
6502static int ironlake_setup_rc6(struct drm_device *dev) 6581static int ironlake_setup_rc6(struct drm_device *dev)
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index d38a4d9f9b0b..a52184007f5f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
49 DRM_ERROR("bo %p still attached to GEM object\n", bo); 49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50 50
51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL); 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
52 nouveau_vm_put(&nvbo->vma); 52 if (nvbo->vma.node) {
53 nouveau_vm_unmap(&nvbo->vma);
54 nouveau_vm_put(&nvbo->vma);
55 }
53 kfree(nvbo); 56 kfree(nvbo);
54} 57}
55 58
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c
index 86d822aa9bbf..d46c0c758ddf 100644
--- a/drivers/hwmon/ad7414.c
+++ b/drivers/hwmon/ad7414.c
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = {
242 { "ad7414", 0 }, 242 { "ad7414", 0 },
243 {} 243 {}
244}; 244};
245MODULE_DEVICE_TABLE(i2c, ad7414_id);
245 246
246static struct i2c_driver ad7414_driver = { 247static struct i2c_driver ad7414_driver = {
247 .driver = { 248 .driver = {
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
index f13c843a2964..5cc3e3784b42 100644
--- a/drivers/hwmon/adt7411.c
+++ b/drivers/hwmon/adt7411.c
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = {
334 { "adt7411", 0 }, 334 { "adt7411", 0 },
335 { } 335 { }
336}; 336};
337MODULE_DEVICE_TABLE(i2c, adt7411_id);
337 338
338static struct i2c_driver adt7411_driver = { 339static struct i2c_driver adt7411_driver = {
339 .driver = { 340 .driver = {
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 8a2f767f26d8..0ed7f6bc2a7f 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev)
216 216
217 if (md_check_no_bitmap(mddev)) 217 if (md_check_no_bitmap(mddev))
218 return -EINVAL; 218 return -EINVAL;
219 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
220 conf = linear_conf(mddev, mddev->raid_disks); 219 conf = linear_conf(mddev, mddev->raid_disks);
221 220
222 if (!conf) 221 if (!conf)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 0cc30ecda4c1..818313e277e7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit)
553{ 553{
554 mddev_t *mddev, *new = NULL; 554 mddev_t *mddev, *new = NULL;
555 555
556 if (unit && MAJOR(unit) != MD_MAJOR)
557 unit &= ~((1<<MdpMinorShift)-1);
558
556 retry: 559 retry:
557 spin_lock(&all_mddevs_lock); 560 spin_lock(&all_mddevs_lock);
558 561
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len)
4138 } 4141 }
4139 4142
4140 mddev->array_sectors = sectors; 4143 mddev->array_sectors = sectors;
4141 set_capacity(mddev->gendisk, mddev->array_sectors); 4144 if (mddev->pers) {
4142 if (mddev->pers) 4145 set_capacity(mddev->gendisk, mddev->array_sectors);
4143 revalidate_disk(mddev->gendisk); 4146 revalidate_disk(mddev->gendisk);
4144 4147 }
4145 return len; 4148 return len;
4146} 4149}
4147 4150
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev)
4624 } 4627 }
4625 set_capacity(mddev->gendisk, mddev->array_sectors); 4628 set_capacity(mddev->gendisk, mddev->array_sectors);
4626 revalidate_disk(mddev->gendisk); 4629 revalidate_disk(mddev->gendisk);
4630 mddev->changed = 1;
4627 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4631 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
4628out: 4632out:
4629 return err; 4633 return err;
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev)
4712 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4716 mddev->sync_speed_min = mddev->sync_speed_max = 0;
4713 mddev->recovery = 0; 4717 mddev->recovery = 0;
4714 mddev->in_sync = 0; 4718 mddev->in_sync = 0;
4719 mddev->changed = 0;
4715 mddev->degraded = 0; 4720 mddev->degraded = 0;
4716 mddev->safemode = 0; 4721 mddev->safemode = 0;
4717 mddev->bitmap_info.offset = 0; 4722 mddev->bitmap_info.offset = 0;
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open)
4827 4832
4828 set_capacity(disk, 0); 4833 set_capacity(disk, 0);
4829 mutex_unlock(&mddev->open_mutex); 4834 mutex_unlock(&mddev->open_mutex);
4835 mddev->changed = 1;
4830 revalidate_disk(disk); 4836 revalidate_disk(disk);
4831 4837
4832 if (mddev->ro) 4838 if (mddev->ro)
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode)
6011 atomic_inc(&mddev->openers); 6017 atomic_inc(&mddev->openers);
6012 mutex_unlock(&mddev->open_mutex); 6018 mutex_unlock(&mddev->open_mutex);
6013 6019
6014 check_disk_size_change(mddev->gendisk, bdev); 6020 check_disk_change(bdev);
6015 out: 6021 out:
6016 return err; 6022 return err;
6017} 6023}
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode)
6026 6032
6027 return 0; 6033 return 0;
6028} 6034}
6035
6036static int md_media_changed(struct gendisk *disk)
6037{
6038 mddev_t *mddev = disk->private_data;
6039
6040 return mddev->changed;
6041}
6042
6043static int md_revalidate(struct gendisk *disk)
6044{
6045 mddev_t *mddev = disk->private_data;
6046
6047 mddev->changed = 0;
6048 return 0;
6049}
6029static const struct block_device_operations md_fops = 6050static const struct block_device_operations md_fops =
6030{ 6051{
6031 .owner = THIS_MODULE, 6052 .owner = THIS_MODULE,
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops =
6036 .compat_ioctl = md_compat_ioctl, 6057 .compat_ioctl = md_compat_ioctl,
6037#endif 6058#endif
6038 .getgeo = md_getgeo, 6059 .getgeo = md_getgeo,
6060 .media_changed = md_media_changed,
6061 .revalidate_disk= md_revalidate,
6039}; 6062};
6040 6063
6041static int md_thread(void * arg) 6064static int md_thread(void * arg)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 7e90b8593b2a..12215d437fcc 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -274,6 +274,8 @@ struct mddev_s
274 atomic_t active; /* general refcount */ 274 atomic_t active; /* general refcount */
275 atomic_t openers; /* number of active opens */ 275 atomic_t openers; /* number of active opens */
276 276
277 int changed; /* True if we might need to
278 * reread partition info */
277 int degraded; /* whether md should consider 279 int degraded; /* whether md should consider
278 * adding a spare 280 * adding a spare
279 */ 281 */
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 6d7ddf32ef2e..3a62d440e27b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev)
435 * bookkeeping area. [whatever we allocate in multipath_run(), 435 * bookkeeping area. [whatever we allocate in multipath_run(),
436 * should be freed in multipath_stop()] 436 * should be freed in multipath_stop()]
437 */ 437 */
438 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
439 438
440 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); 439 conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL);
441 mddev->private = conf; 440 mddev->private = conf;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 637a96855edb..c0ac457f1218 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev)
361 if (md_check_no_bitmap(mddev)) 361 if (md_check_no_bitmap(mddev))
362 return -EINVAL; 362 return -EINVAL;
363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); 363 blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors);
364 mddev->queue->queue_lock = &mddev->queue->__queue_lock;
365 364
366 /* if private is not null, we are here after takeover */ 365 /* if private is not null, we are here after takeover */
367 if (mddev->private == NULL) { 366 if (mddev->private == NULL) {
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev)
670 mddev->new_layout = 0; 669 mddev->new_layout = 0;
671 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ 670 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */
672 mddev->delta_disks = 1 - mddev->raid_disks; 671 mddev->delta_disks = 1 - mddev->raid_disks;
672 mddev->raid_disks = 1;
673 /* make sure it will be not marked as dirty */ 673 /* make sure it will be not marked as dirty */
674 mddev->recovery_cp = MaxSector; 674 mddev->recovery_cp = MaxSector;
675 675
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a23ffa397ba9..06cd712807d0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf)
593 if (conf->pending_bio_list.head) { 593 if (conf->pending_bio_list.head) {
594 struct bio *bio; 594 struct bio *bio;
595 bio = bio_list_get(&conf->pending_bio_list); 595 bio = bio_list_get(&conf->pending_bio_list);
596 /* Only take the spinlock to quiet a warning */
597 spin_lock(conf->mddev->queue->queue_lock);
596 blk_remove_plug(conf->mddev->queue); 598 blk_remove_plug(conf->mddev->queue);
599 spin_unlock(conf->mddev->queue->queue_lock);
597 spin_unlock_irq(&conf->device_lock); 600 spin_unlock_irq(&conf->device_lock);
598 /* flush any pending bitmap writes to 601 /* flush any pending bitmap writes to
599 * disk before proceeding w/ I/O */ 602 * disk before proceeding w/ I/O */
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
959 atomic_inc(&r1_bio->remaining); 962 atomic_inc(&r1_bio->remaining);
960 spin_lock_irqsave(&conf->device_lock, flags); 963 spin_lock_irqsave(&conf->device_lock, flags);
961 bio_list_add(&conf->pending_bio_list, mbio); 964 bio_list_add(&conf->pending_bio_list, mbio);
962 blk_plug_device(mddev->queue); 965 blk_plug_device_unlocked(mddev->queue);
963 spin_unlock_irqrestore(&conf->device_lock, flags); 966 spin_unlock_irqrestore(&conf->device_lock, flags);
964 } 967 }
965 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); 968 r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL);
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev)
2021 if (IS_ERR(conf)) 2024 if (IS_ERR(conf))
2022 return PTR_ERR(conf); 2025 return PTR_ERR(conf);
2023 2026
2024 mddev->queue->queue_lock = &conf->device_lock;
2025 list_for_each_entry(rdev, &mddev->disks, same_set) { 2027 list_for_each_entry(rdev, &mddev->disks, same_set) {
2026 disk_stack_limits(mddev->gendisk, rdev->bdev, 2028 disk_stack_limits(mddev->gendisk, rdev->bdev,
2027 rdev->data_offset << 9); 2029 rdev->data_offset << 9);
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..747d061d8e05 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf)
662 if (conf->pending_bio_list.head) { 662 if (conf->pending_bio_list.head) {
663 struct bio *bio; 663 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 664 bio = bio_list_get(&conf->pending_bio_list);
665 /* Spinlock only taken to quiet a warning */
666 spin_lock(conf->mddev->queue->queue_lock);
665 blk_remove_plug(conf->mddev->queue); 667 blk_remove_plug(conf->mddev->queue);
668 spin_unlock(conf->mddev->queue->queue_lock);
666 spin_unlock_irq(&conf->device_lock); 669 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 670 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 671 * before proceeding w/ I/O */
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 974 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 975 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 976 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue); 977 blk_plug_device_unlocked(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 978 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 979 }
977 980
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev)
2304 if (!conf) 2307 if (!conf)
2305 goto out; 2308 goto out;
2306 2309
2307 mddev->queue->queue_lock = &conf->device_lock;
2308
2309 mddev->thread = conf->thread; 2310 mddev->thread = conf->thread;
2310 conf->thread = NULL; 2311 conf->thread = NULL;
2311 2312
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 702812824195..78536fdbd87f 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev)
5204 5204
5205 mddev->queue->backing_dev_info.congested_data = mddev; 5205 mddev->queue->backing_dev_info.congested_data = mddev;
5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 5206 mddev->queue->backing_dev_info.congested_fn = raid5_congested;
5207 mddev->queue->queue_lock = &conf->device_lock;
5208 mddev->queue->unplug_fn = raid5_unplug_queue; 5207 mddev->queue->unplug_fn = raid5_unplug_queue;
5209 5208
5210 chunk_size = mddev->chunk_sectors << 9; 5209 chunk_size = mddev->chunk_sectors << 9;
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig
index ffedfd492754..ea1580085347 100644
--- a/drivers/nfc/Kconfig
+++ b/drivers/nfc/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5menuconfig NFC_DEVICES 5menuconfig NFC_DEVICES
6 bool "NFC devices" 6 bool "Near Field Communication (NFC) devices"
7 default n 7 default n
8 ---help--- 8 ---help---
9 You'll have to say Y if your computer contains an NFC device that 9 You'll have to say Y if your computer contains an NFC device that
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c
index bae647264dd6..724f65d8f9e4 100644
--- a/drivers/nfc/pn544.c
+++ b/drivers/nfc/pn544.c
@@ -60,7 +60,7 @@ enum pn544_irq {
60struct pn544_info { 60struct pn544_info {
61 struct miscdevice miscdev; 61 struct miscdevice miscdev;
62 struct i2c_client *i2c_dev; 62 struct i2c_client *i2c_dev;
63 struct regulator_bulk_data regs[2]; 63 struct regulator_bulk_data regs[3];
64 64
65 enum pn544_state state; 65 enum pn544_state state;
66 wait_queue_head_t read_wait; 66 wait_queue_head_t read_wait;
@@ -74,6 +74,7 @@ struct pn544_info {
74 74
75static const char reg_vdd_io[] = "Vdd_IO"; 75static const char reg_vdd_io[] = "Vdd_IO";
76static const char reg_vbat[] = "VBat"; 76static const char reg_vbat[] = "VBat";
77static const char reg_vsim[] = "VSim";
77 78
78/* sysfs interface */ 79/* sysfs interface */
79static ssize_t pn544_test(struct device *dev, 80static ssize_t pn544_test(struct device *dev,
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client,
740 741
741 info->regs[0].supply = reg_vdd_io; 742 info->regs[0].supply = reg_vdd_io;
742 info->regs[1].supply = reg_vbat; 743 info->regs[1].supply = reg_vbat;
744 info->regs[2].supply = reg_vsim;
743 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), 745 r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs),
744 info->regs); 746 info->regs);
745 if (r < 0) 747 if (r < 0)
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index cba1b43f7519..a4e8eb9fece6 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event,
168{ 168{
169 unsigned long flags; 169 unsigned long flags;
170 int captured = 0; 170 int captured = 0;
171 struct pps_ktime ts_real; 171 struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 };
172 172
173 /* check event type */ 173 /* check event type */
174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); 174 BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0);
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c
index 76b41853a877..1269fbd2deca 100644
--- a/drivers/rapidio/rio-sysfs.c
+++ b/drivers/rapidio/rio-sysfs.c
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj,
77 77
78 /* Several chips lock up trying to read undefined config space */ 78 /* Several chips lock up trying to read undefined config space */
79 if (capable(CAP_SYS_ADMIN)) 79 if (capable(CAP_SYS_ADMIN))
80 size = 0x200000; 80 size = RIO_MAINT_SPACE_SZ;
81 81
82 if (off > size) 82 if (off >= size)
83 return 0; 83 return 0;
84 if (off + count > size) { 84 if (off + count > size) {
85 size -= off; 85 size -= off;
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj,
147 loff_t init_off = off; 147 loff_t init_off = off;
148 u8 *data = (u8 *) buf; 148 u8 *data = (u8 *) buf;
149 149
150 if (off > 0x200000) 150 if (off >= RIO_MAINT_SPACE_SZ)
151 return 0; 151 return 0;
152 if (off + count > 0x200000) { 152 if (off + count > RIO_MAINT_SPACE_SZ) {
153 size = 0x200000 - off; 153 size = RIO_MAINT_SPACE_SZ - off;
154 count = size; 154 count = size;
155 } 155 }
156 156
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = {
200 .name = "config", 200 .name = "config",
201 .mode = S_IRUGO | S_IWUSR, 201 .mode = S_IRUGO | S_IWUSR,
202 }, 202 },
203 .size = 0x200000, 203 .size = RIO_MAINT_SPACE_SZ,
204 .read = rio_read_config, 204 .read = rio_read_config,
205 .write = rio_write_config, 205 .write = rio_write_config,
206}; 206};
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c
index f53d31b950d4..2bb5de1f2421 100644
--- a/drivers/regulator/mc13xxx-regulator-core.c
+++ b/drivers/regulator/mc13xxx-regulator-core.c
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev)
174 174
175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); 175 dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
176 176
177 BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); 177 BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages);
178 178
179 return mc13xxx_regulators[id].voltages[val]; 179 return mc13xxx_regulators[id].voltages[val];
180} 180}
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 8b0d2c4bde91..06df898842c0 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev)
120 return REGULATOR_MODE_IDLE; 120 return REGULATOR_MODE_IDLE;
121 default: 121 default:
122 BUG(); 122 BUG();
123 return -EINVAL;
123 } 124 }
124} 125}
125 126
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index c36749e4c926..5469c52cba3d 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = {
309 .read_alarm = at91_rtc_readalarm, 309 .read_alarm = at91_rtc_readalarm,
310 .set_alarm = at91_rtc_setalarm, 310 .set_alarm = at91_rtc_setalarm,
311 .proc = at91_rtc_proc, 311 .proc = at91_rtc_proc,
312 .alarm_irq_enabled = at91_rtc_alarm_irq_enable, 312 .alarm_irq_enable = at91_rtc_alarm_irq_enable,
313}; 313};
314 314
315/* 315/*
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c
index 23a9ee19764c..950735415a7c 100644
--- a/drivers/rtc/rtc-ds3232.c
+++ b/drivers/rtc/rtc-ds3232.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C 2 * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C
3 * 3 *
4 * Copyright (C) 2009-2010 Freescale Semiconductor. 4 * Copyright (C) 2009-2011 Freescale Semiconductor.
5 * Author: Jack Lan <jack.lan@freescale.com> 5 * Author: Jack Lan <jack.lan@freescale.com>
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify it 7 * This program is free software; you can redistribute it and/or modify it
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time)
141 time->tm_hour = bcd2bin(hour); 141 time->tm_hour = bcd2bin(hour);
142 } 142 }
143 143
144 time->tm_wday = bcd2bin(week); 144 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
145 time->tm_wday = bcd2bin(week) - 1;
145 time->tm_mday = bcd2bin(day); 146 time->tm_mday = bcd2bin(day);
146 time->tm_mon = bcd2bin(month & 0x7F); 147 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
148 time->tm_mon = bcd2bin(month & 0x7F) - 1;
147 if (century) 149 if (century)
148 add_century = 100; 150 add_century = 100;
149 151
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time)
162 buf[0] = bin2bcd(time->tm_sec); 164 buf[0] = bin2bcd(time->tm_sec);
163 buf[1] = bin2bcd(time->tm_min); 165 buf[1] = bin2bcd(time->tm_min);
164 buf[2] = bin2bcd(time->tm_hour); 166 buf[2] = bin2bcd(time->tm_hour);
165 buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ 167 /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */
168 buf[3] = bin2bcd(time->tm_wday + 1);
166 buf[4] = bin2bcd(time->tm_mday); /* Date */ 169 buf[4] = bin2bcd(time->tm_mday); /* Date */
167 buf[5] = bin2bcd(time->tm_mon); 170 /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */
171 buf[5] = bin2bcd(time->tm_mon + 1);
168 if (time->tm_year >= 100) { 172 if (time->tm_year >= 100) {
169 buf[5] |= 0x80; 173 buf[5] |= 0x80;
170 buf[6] = bin2bcd(time->tm_year - 100); 174 buf[6] = bin2bcd(time->tm_year - 100);
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index f7a5dba3ca23..bf7c687519ef 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig THERMAL 5menuconfig THERMAL
6 tristate "Generic Thermal sysfs driver" 6 tristate "Generic Thermal sysfs driver"
7 depends on NET
8 help 7 help
9 Generic Thermal Sysfs driver offers a generic mechanism for 8 Generic Thermal Sysfs driver offers a generic mechanism for
10 thermal management. Usually it's made up of one or more thermal 9 thermal management. Usually it's made up of one or more thermal
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c
index 7d0e63c79280..713b7ea4a607 100644
--- a/drivers/thermal/thermal_sys.c
+++ b/drivers/thermal/thermal_sys.c
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock);
62 62
63static unsigned int thermal_event_seqnum; 63static unsigned int thermal_event_seqnum;
64 64
65static struct genl_family thermal_event_genl_family = {
66 .id = GENL_ID_GENERATE,
67 .name = THERMAL_GENL_FAMILY_NAME,
68 .version = THERMAL_GENL_VERSION,
69 .maxattr = THERMAL_GENL_ATTR_MAX,
70};
71
72static struct genl_multicast_group thermal_event_mcgrp = {
73 .name = THERMAL_GENL_MCAST_GROUP_NAME,
74};
75
76static int genetlink_init(void);
77static void genetlink_exit(void);
78
79static int get_idr(struct idr *idr, struct mutex *lock, int *id) 65static int get_idr(struct idr *idr, struct mutex *lock, int *id)
80{ 66{
81 int err; 67 int err;
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
1225 1211
1226EXPORT_SYMBOL(thermal_zone_device_unregister); 1212EXPORT_SYMBOL(thermal_zone_device_unregister);
1227 1213
1214#ifdef CONFIG_NET
1215static struct genl_family thermal_event_genl_family = {
1216 .id = GENL_ID_GENERATE,
1217 .name = THERMAL_GENL_FAMILY_NAME,
1218 .version = THERMAL_GENL_VERSION,
1219 .maxattr = THERMAL_GENL_ATTR_MAX,
1220};
1221
1222static struct genl_multicast_group thermal_event_mcgrp = {
1223 .name = THERMAL_GENL_MCAST_GROUP_NAME,
1224};
1225
1228int generate_netlink_event(u32 orig, enum events event) 1226int generate_netlink_event(u32 orig, enum events event)
1229{ 1227{
1230 struct sk_buff *skb; 1228 struct sk_buff *skb;
@@ -1301,6 +1299,15 @@ static int genetlink_init(void)
1301 return result; 1299 return result;
1302} 1300}
1303 1301
1302static void genetlink_exit(void)
1303{
1304 genl_unregister_family(&thermal_event_genl_family);
1305}
1306#else /* !CONFIG_NET */
1307static inline int genetlink_init(void) { return 0; }
1308static inline void genetlink_exit(void) {}
1309#endif /* !CONFIG_NET */
1310
1304static int __init thermal_init(void) 1311static int __init thermal_init(void)
1305{ 1312{
1306 int result = 0; 1313 int result = 0;
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void)
1316 return result; 1323 return result;
1317} 1324}
1318 1325
1319static void genetlink_exit(void)
1320{
1321 genl_unregister_family(&thermal_event_genl_family);
1322}
1323
1324static void __exit thermal_exit(void) 1326static void __exit thermal_exit(void)
1325{ 1327{
1326 class_unregister(&thermal_class); 1328 class_unregister(&thermal_class);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index d041c6826e43..0f299b7aad60 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
2681 2681
2682 mutex_lock(&usb_address0_mutex); 2682 mutex_lock(&usb_address0_mutex);
2683 2683
2684 if (!udev->config && oldspeed == USB_SPEED_SUPER) { 2684 /* Reset the device; full speed may morph to high speed */
2685 /* Don't reset USB 3.0 devices during an initial setup */ 2685 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */
2686 usb_set_device_state(udev, USB_STATE_DEFAULT); 2686 retval = hub_port_reset(hub, port1, udev, delay);
2687 } else { 2687 if (retval < 0) /* error or disconnect */
2688 /* Reset the device; full speed may morph to high speed */ 2688 goto fail;
2689 /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ 2689 /* success, speed is known */
2690 retval = hub_port_reset(hub, port1, udev, delay); 2690
2691 if (retval < 0) /* error or disconnect */
2692 goto fail;
2693 /* success, speed is known */
2694 }
2695 retval = -ENODEV; 2691 retval = -ENODEV;
2696 2692
2697 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { 2693 if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) {
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index fcbf4abbf381..0231814a97a5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci)
169 } 169 }
170} 170}
171 171
172void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) 172void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
173{ 173{
174 void *addr; 174 struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num];
175 void __iomem *addr;
175 u32 temp; 176 u32 temp;
176 u64 temp_64; 177 u64 temp_64;
177 178
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci,
449 } 450 }
450} 451}
451 452
452void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) 453static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
453{ 454{
454 /* Fields are 32 bits wide, DMA addresses are in bytes */ 455 /* Fields are 32 bits wide, DMA addresses are in bytes */
455 int field_size = 32 / 8; 456 int field_size = 32 / 8;
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
488 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); 489 dbg_rsvd64(xhci, (u64 *)slot_ctx, dma);
489} 490}
490 491
491void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, 492static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
492 struct xhci_container_ctx *ctx, 493 struct xhci_container_ctx *ctx,
493 unsigned int last_ep) 494 unsigned int last_ep)
494{ 495{
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 1d0f45f0e7a6..a9534396e85b 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci,
307 307
308/***************** Streams structures manipulation *************************/ 308/***************** Streams structures manipulation *************************/
309 309
310void xhci_free_stream_ctx(struct xhci_hcd *xhci, 310static void xhci_free_stream_ctx(struct xhci_hcd *xhci,
311 unsigned int num_stream_ctxs, 311 unsigned int num_stream_ctxs,
312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) 312 struct xhci_stream_ctx *stream_ctx, dma_addr_t dma)
313{ 313{
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci,
335 * The stream context array must be a power of 2, and can be as small as 335 * The stream context array must be a power of 2, and can be as small as
336 * 64 bytes or as large as 1MB. 336 * 64 bytes or as large as 1MB.
337 */ 337 */
338struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, 338static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci,
339 unsigned int num_stream_ctxs, dma_addr_t *dma, 339 unsigned int num_stream_ctxs, dma_addr_t *dma,
340 gfp_t mem_flags) 340 gfp_t mem_flags)
341{ 341{
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1900 val &= DBOFF_MASK; 1900 val &= DBOFF_MASK;
1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" 1901 xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x"
1902 " from cap regs base addr\n", val); 1902 " from cap regs base addr\n", val);
1903 xhci->dba = (void *) xhci->cap_regs + val; 1903 xhci->dba = (void __iomem *) xhci->cap_regs + val;
1904 xhci_dbg_regs(xhci); 1904 xhci_dbg_regs(xhci);
1905 xhci_print_run_regs(xhci); 1905 xhci_print_run_regs(xhci);
1906 /* Set ir_set to interrupt register set 0 */ 1906 /* Set ir_set to interrupt register set 0 */
1907 xhci->ir_set = (void *) xhci->run_regs->ir_set; 1907 xhci->ir_set = &xhci->run_regs->ir_set[0];
1908 1908
1909 /* 1909 /*
1910 * Event ring setup: Allocate a normal ring, but also setup 1910 * Event ring setup: Allocate a normal ring, but also setup
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
1961 /* Set the event ring dequeue address */ 1961 /* Set the event ring dequeue address */
1962 xhci_set_hc_event_deq(xhci); 1962 xhci_set_hc_event_deq(xhci);
1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); 1963 xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n");
1964 xhci_print_ir_set(xhci, xhci->ir_set, 0); 1964 xhci_print_ir_set(xhci, 0);
1965 1965
1966 /* 1966 /*
1967 * XXX: Might need to set the Interrupter Moderation Register to 1967 * XXX: Might need to set the Interrupter Moderation Register to
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 3e8211c1ce5a..3289bf4832c9 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
474 state->new_deq_seg = find_trb_seg(cur_td->start_seg, 474 state->new_deq_seg = find_trb_seg(cur_td->start_seg,
475 dev->eps[ep_index].stopped_trb, 475 dev->eps[ep_index].stopped_trb,
476 &state->new_cycle_state); 476 &state->new_cycle_state);
477 if (!state->new_deq_seg) 477 if (!state->new_deq_seg) {
478 BUG(); 478 WARN_ON(1);
479 return;
480 }
481
479 /* Dig out the cycle state saved by the xHC during the stop ep cmd */ 482 /* Dig out the cycle state saved by the xHC during the stop ep cmd */
480 xhci_dbg(xhci, "Finding endpoint context\n"); 483 xhci_dbg(xhci, "Finding endpoint context\n");
481 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); 484 ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
486 state->new_deq_seg = find_trb_seg(state->new_deq_seg, 489 state->new_deq_seg = find_trb_seg(state->new_deq_seg,
487 state->new_deq_ptr, 490 state->new_deq_ptr,
488 &state->new_cycle_state); 491 &state->new_cycle_state);
489 if (!state->new_deq_seg) 492 if (!state->new_deq_seg) {
490 BUG(); 493 WARN_ON(1);
494 return;
495 }
491 496
492 trb = &state->new_deq_ptr->generic; 497 trb = &state->new_deq_ptr->generic;
493 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && 498 if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) &&
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2363 2368
2364 /* Scatter gather list entries may cross 64KB boundaries */ 2369 /* Scatter gather list entries may cross 64KB boundaries */
2365 running_total = TRB_MAX_BUFF_SIZE - 2370 running_total = TRB_MAX_BUFF_SIZE -
2366 (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2371 (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1));
2372 running_total &= TRB_MAX_BUFF_SIZE - 1;
2367 if (running_total != 0) 2373 if (running_total != 0)
2368 num_trbs++; 2374 num_trbs++;
2369 2375
2370 /* How many more 64KB chunks to transfer, how many more TRBs? */ 2376 /* How many more 64KB chunks to transfer, how many more TRBs? */
2371 while (running_total < sg_dma_len(sg)) { 2377 while (running_total < sg_dma_len(sg) && running_total < temp) {
2372 num_trbs++; 2378 num_trbs++;
2373 running_total += TRB_MAX_BUFF_SIZE; 2379 running_total += TRB_MAX_BUFF_SIZE;
2374 } 2380 }
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb)
2394static void check_trb_math(struct urb *urb, int num_trbs, int running_total) 2400static void check_trb_math(struct urb *urb, int num_trbs, int running_total)
2395{ 2401{
2396 if (num_trbs != 0) 2402 if (num_trbs != 0)
2397 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " 2403 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of "
2398 "TRBs, %d left\n", __func__, 2404 "TRBs, %d left\n", __func__,
2399 urb->ep->desc.bEndpointAddress, num_trbs); 2405 urb->ep->desc.bEndpointAddress, num_trbs);
2400 if (running_total != urb->transfer_buffer_length) 2406 if (running_total != urb->transfer_buffer_length)
2401 dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " 2407 dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, "
2402 "queued %#x (%d), asked for %#x (%d)\n", 2408 "queued %#x (%d), asked for %#x (%d)\n",
2403 __func__, 2409 __func__,
2404 urb->ep->desc.bEndpointAddress, 2410 urb->ep->desc.bEndpointAddress,
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2533 sg = urb->sg; 2539 sg = urb->sg;
2534 addr = (u64) sg_dma_address(sg); 2540 addr = (u64) sg_dma_address(sg);
2535 this_sg_len = sg_dma_len(sg); 2541 this_sg_len = sg_dma_len(sg);
2536 trb_buff_len = TRB_MAX_BUFF_SIZE - 2542 trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2537 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1));
2538 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2543 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2539 if (trb_buff_len > urb->transfer_buffer_length) 2544 if (trb_buff_len > urb->transfer_buffer_length)
2540 trb_buff_len = urb->transfer_buffer_length; 2545 trb_buff_len = urb->transfer_buffer_length;
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2572 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2577 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
2573 (unsigned int) addr + trb_buff_len); 2578 (unsigned int) addr + trb_buff_len);
2574 if (TRB_MAX_BUFF_SIZE - 2579 if (TRB_MAX_BUFF_SIZE -
2575 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { 2580 (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) {
2576 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); 2581 xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n");
2577 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", 2582 xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n",
2578 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), 2583 (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1),
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2616 } 2621 }
2617 2622
2618 trb_buff_len = TRB_MAX_BUFF_SIZE - 2623 trb_buff_len = TRB_MAX_BUFF_SIZE -
2619 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2624 (addr & (TRB_MAX_BUFF_SIZE - 1));
2620 trb_buff_len = min_t(int, trb_buff_len, this_sg_len); 2625 trb_buff_len = min_t(int, trb_buff_len, this_sg_len);
2621 if (running_total + trb_buff_len > urb->transfer_buffer_length) 2626 if (running_total + trb_buff_len > urb->transfer_buffer_length)
2622 trb_buff_len = 2627 trb_buff_len =
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2656 num_trbs = 0; 2661 num_trbs = 0;
2657 /* How much data is (potentially) left before the 64KB boundary? */ 2662 /* How much data is (potentially) left before the 64KB boundary? */
2658 running_total = TRB_MAX_BUFF_SIZE - 2663 running_total = TRB_MAX_BUFF_SIZE -
2659 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2664 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2665 running_total &= TRB_MAX_BUFF_SIZE - 1;
2660 2666
2661 /* If there's some data on this 64KB chunk, or we have to send a 2667 /* If there's some data on this 64KB chunk, or we have to send a
2662 * zero-length transfer, we need at least one TRB 2668 * zero-length transfer, we need at least one TRB
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
2700 /* How much data is in the first TRB? */ 2706 /* How much data is in the first TRB? */
2701 addr = (u64) urb->transfer_dma; 2707 addr = (u64) urb->transfer_dma;
2702 trb_buff_len = TRB_MAX_BUFF_SIZE - 2708 trb_buff_len = TRB_MAX_BUFF_SIZE -
2703 (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2709 (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1));
2704 if (urb->transfer_buffer_length < trb_buff_len) 2710 if (trb_buff_len > urb->transfer_buffer_length)
2705 trb_buff_len = urb->transfer_buffer_length; 2711 trb_buff_len = urb->transfer_buffer_length;
2706 2712
2707 first_trb = true; 2713 first_trb = true;
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci,
2879 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); 2885 addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset);
2880 td_len = urb->iso_frame_desc[i].length; 2886 td_len = urb->iso_frame_desc[i].length;
2881 2887
2882 running_total = TRB_MAX_BUFF_SIZE - 2888 running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1));
2883 (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); 2889 running_total &= TRB_MAX_BUFF_SIZE - 1;
2884 if (running_total != 0) 2890 if (running_total != 0)
2885 num_trbs++; 2891 num_trbs++;
2886 2892
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 34cf4e165877..2083fc2179b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci)
109/* 109/*
110 * Set the run bit and wait for the host to be running. 110 * Set the run bit and wait for the host to be running.
111 */ 111 */
112int xhci_start(struct xhci_hcd *xhci) 112static int xhci_start(struct xhci_hcd *xhci)
113{ 113{
114 u32 temp; 114 u32 temp;
115 int ret; 115 int ret;
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd)
329 329
330 330
331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING 331#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
332void xhci_event_ring_work(unsigned long arg) 332static void xhci_event_ring_work(unsigned long arg)
333{ 333{
334 unsigned long flags; 334 unsigned long flags;
335 int temp; 335 int temp;
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd)
473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); 473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
474 xhci_writel(xhci, ER_IRQ_ENABLE(temp), 474 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
475 &xhci->ir_set->irq_pending); 475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, xhci->ir_set, 0); 476 xhci_print_ir_set(xhci, 0);
477 477
478 if (NUM_TEST_NOOPS > 0) 478 if (NUM_TEST_NOOPS > 0)
479 doorbell = xhci_setup_one_noop(xhci); 479 doorbell = xhci_setup_one_noop(xhci);
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd)
528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
529 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 529 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
530 &xhci->ir_set->irq_pending); 530 &xhci->ir_set->irq_pending);
531 xhci_print_ir_set(xhci, xhci->ir_set, 0); 531 xhci_print_ir_set(xhci, 0);
532 532
533 xhci_dbg(xhci, "cleaning up memory\n"); 533 xhci_dbg(xhci, "cleaning up memory\n");
534 xhci_mem_cleanup(xhci); 534 xhci_mem_cleanup(xhci);
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); 755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
756 xhci_writel(xhci, ER_IRQ_DISABLE(temp), 756 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
757 &xhci->ir_set->irq_pending); 757 &xhci->ir_set->irq_pending);
758 xhci_print_ir_set(xhci, xhci->ir_set, 0); 758 xhci_print_ir_set(xhci, 0);
759 759
760 xhci_dbg(xhci, "cleaning up memory\n"); 760 xhci_dbg(xhci, "cleaning up memory\n");
761 xhci_mem_cleanup(xhci); 761 xhci_mem_cleanup(xhci);
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
857/* Returns 1 if the arguments are OK; 857/* Returns 1 if the arguments are OK;
858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers. 858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
859 */ 859 */
860int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, 860static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, 861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
862 const char *func) { 862 const char *func) {
863 struct xhci_hcd *xhci; 863 struct xhci_hcd *xhci;
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); 1693 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1694} 1694}
1695 1695
1696void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, 1696static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1697 unsigned int slot_id, unsigned int ep_index, 1697 unsigned int slot_id, unsigned int ep_index,
1698 struct xhci_dequeue_state *deq_state) 1698 struct xhci_dequeue_state *deq_state)
1699{ 1699{
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 7f236fd22015..7f127df6dd55 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
1348} 1348}
1349 1349
1350/* xHCI debugging */ 1350/* xHCI debugging */
1351void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); 1351void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
1352void xhci_print_registers(struct xhci_hcd *xhci); 1352void xhci_print_registers(struct xhci_hcd *xhci);
1353void xhci_dbg_regs(struct xhci_hcd *xhci); 1353void xhci_dbg_regs(struct xhci_hcd *xhci);
1354void xhci_print_run_regs(struct xhci_hcd *xhci); 1354void xhci_print_run_regs(struct xhci_hcd *xhci);
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 54a8bd1047d6..c292d5c499e7 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev,
1864 INIT_LIST_HEAD(&musb->out_bulk); 1864 INIT_LIST_HEAD(&musb->out_bulk);
1865 1865
1866 hcd->uses_new_polling = 1; 1866 hcd->uses_new_polling = 1;
1867 hcd->has_tt = 1;
1867 1868
1868 musb->vbuserr_retry = VBUSERR_RETRY_COUNT; 1869 musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
1869 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; 1870 musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;