aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2010-06-01 22:53:36 -0400
committerLen Brown <len.brown@intel.com>2010-06-01 22:53:36 -0400
commitb42f5b0f0fd8c1c442c1b29a3fbcb338e8bd7732 (patch)
tree194e13dfa85d2d2af8bd125acd80a445ee0def62 /drivers/acpi
parentfe955682d2153b35dffcf1673dff0491096a3f0a (diff)
parent0a76a34ff0804f1f413807b2e2d12117c2b602ca (diff)
Merge branches 'bugzilla-14668' and 'misc-2.6.35' into release
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c22
-rw-r--r--drivers/acpi/acpica/Makefile4
-rw-r--r--drivers/acpi/acpica/acevents.h51
-rw-r--r--drivers/acpi/acpica/acglobal.h25
-rw-r--r--drivers/acpi/acpica/acinterp.h9
-rw-r--r--drivers/acpi/acpica/aclocal.h19
-rw-r--r--drivers/acpi/acpica/actables.h4
-rw-r--r--drivers/acpi/acpica/dsfield.c2
-rw-r--r--drivers/acpi/acpica/dsmethod.c2
-rw-r--r--drivers/acpi/acpica/dsmthdat.c10
-rw-r--r--drivers/acpi/acpica/dsobject.c14
-rw-r--r--drivers/acpi/acpica/dsopcode.c13
-rw-r--r--drivers/acpi/acpica/dswexec.c6
-rw-r--r--drivers/acpi/acpica/dswstate.c10
-rw-r--r--drivers/acpi/acpica/evevent.c2
-rw-r--r--drivers/acpi/acpica/evgpe.c167
-rw-r--r--drivers/acpi/acpica/evgpeblk.c766
-rw-r--r--drivers/acpi/acpica/evgpeinit.c653
-rw-r--r--drivers/acpi/acpica/evgpeutil.c337
-rw-r--r--drivers/acpi/acpica/evmisc.c2
-rw-r--r--drivers/acpi/acpica/evxface.c24
-rw-r--r--drivers/acpi/acpica/evxfevnt.c224
-rw-r--r--drivers/acpi/acpica/exconfig.c21
-rw-r--r--drivers/acpi/acpica/exconvrt.c4
-rw-r--r--drivers/acpi/acpica/excreate.c4
-rw-r--r--drivers/acpi/acpica/exdebug.c261
-rw-r--r--drivers/acpi/acpica/exfield.c2
-rw-r--r--drivers/acpi/acpica/exfldio.c16
-rw-r--r--drivers/acpi/acpica/exmisc.c8
-rw-r--r--drivers/acpi/acpica/exmutex.c46
-rw-r--r--drivers/acpi/acpica/exnames.c4
-rw-r--r--drivers/acpi/acpica/exoparg1.c18
-rw-r--r--drivers/acpi/acpica/exoparg2.c37
-rw-r--r--drivers/acpi/acpica/exoparg3.c4
-rw-r--r--drivers/acpi/acpica/exoparg6.c4
-rw-r--r--drivers/acpi/acpica/exprep.c4
-rw-r--r--drivers/acpi/acpica/exregion.c17
-rw-r--r--drivers/acpi/acpica/exresnte.c4
-rw-r--r--drivers/acpi/acpica/exresolv.c11
-rw-r--r--drivers/acpi/acpica/exresop.c8
-rw-r--r--drivers/acpi/acpica/exstore.c218
-rw-r--r--drivers/acpi/acpica/exsystem.c10
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/acpica/hwregs.c6
-rw-r--r--drivers/acpi/acpica/hwsleep.c2
-rw-r--r--drivers/acpi/acpica/hwvalid.c2
-rw-r--r--drivers/acpi/acpica/nsaccess.c2
-rw-r--r--drivers/acpi/acpica/nsdump.c4
-rw-r--r--drivers/acpi/acpica/nsnames.c2
-rw-r--r--drivers/acpi/acpica/nssearch.c2
-rw-r--r--drivers/acpi/acpica/nsutils.c4
-rw-r--r--drivers/acpi/acpica/psargs.c4
-rw-r--r--drivers/acpi/acpica/psloop.c3
-rw-r--r--drivers/acpi/acpica/psxface.c5
-rw-r--r--drivers/acpi/acpica/rscreate.c14
-rw-r--r--drivers/acpi/acpica/rslist.c6
-rw-r--r--drivers/acpi/acpica/rsmisc.c4
-rw-r--r--drivers/acpi/acpica/tbfadt.c16
-rw-r--r--drivers/acpi/acpica/tbfind.c2
-rw-r--r--drivers/acpi/acpica/tbinstal.c69
-rw-r--r--drivers/acpi/acpica/tbutils.c101
-rw-r--r--drivers/acpi/acpica/tbxface.c80
-rw-r--r--drivers/acpi/acpica/tbxfroot.c6
-rw-r--r--drivers/acpi/acpica/utalloc.c2
-rw-r--r--drivers/acpi/acpica/utcopy.c14
-rw-r--r--drivers/acpi/acpica/utdelete.c6
-rw-r--r--drivers/acpi/acpica/uteval.c2
-rw-r--r--drivers/acpi/acpica/utglobal.c1
-rw-r--r--drivers/acpi/acpica/utmisc.c6
-rw-r--r--drivers/acpi/acpica/utmutex.c4
-rw-r--r--drivers/acpi/acpica/utobject.c8
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/bus.c53
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/osl.c13
-rw-r--r--drivers/acpi/pci_irq.c8
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/power.c1
-rw-r--r--drivers/acpi/processor_driver.c15
-rw-r--r--drivers/acpi/processor_idle.c58
-rw-r--r--drivers/acpi/scan.c2
-rw-r--r--drivers/acpi/sleep.c157
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/system.c7
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/acpi/video_detect.c2
100 files changed, 5623 insertions, 1805 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
360 To compile this driver as a module, choose M here: 360 To compile this driver as a module, choose M here:
361 the modules will be called sbs and sbshc. 361 the modules will be called sbs and sbshc.
362 362
363config ACPI_HED
364 tristate "Hardware Error Device"
365 help
366 This driver supports the Hardware Error Device (PNP0C33),
367 which is used to report some hardware errors notified via
368 SCI, mainly the corrected errors.
369
370source "drivers/acpi/apei/Kconfig"
371
363endif # ACPI 372endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
19 19
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += hest.o 22acpi-y += atomicio.o
23 23
24# sleep related files 24# sleep related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
59obj-$(CONFIG_ACPI_SBS) += sbshc.o 59obj-$(CONFIG_ACPI_SBS) += sbshc.o
60obj-$(CONFIG_ACPI_SBS) += sbs.o 60obj-$(CONFIG_ACPI_SBS) += sbs.o
61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o 61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62obj-$(CONFIG_ACPI_HED) += hed.o
62 63
63# processor has its own "processor." module_param namespace 64# processor has its own "processor." module_param namespace
64processor-y := processor_driver.o processor_throttling.o 65processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
66processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 67processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
67 68
68obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 69obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
70
71obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693b..d269a8f3329c 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) 43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
44#define CPUID5_ECX_INTERRUPT_BREAK (0x2) 44#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
45static unsigned long power_saving_mwait_eax; 45static unsigned long power_saving_mwait_eax;
46
47static unsigned char tsc_detected_unstable;
48static unsigned char tsc_marked_unstable;
49
46static void power_saving_mwait_init(void) 50static void power_saving_mwait_init(void)
47{ 51{
48 unsigned int eax, ebx, ecx, edx; 52 unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
87 91
88 /*FALL THROUGH*/ 92 /*FALL THROUGH*/
89 default: 93 default:
90 /* TSC could halt in idle, so notify users */ 94 /* TSC could halt in idle */
91 mark_tsc_unstable("TSC halts in idle"); 95 tsc_detected_unstable = 1;
92 } 96 }
93#endif 97#endif
94} 98}
@@ -168,16 +172,14 @@ static int power_saving_thread(void *data)
168 172
169 do_sleep = 0; 173 do_sleep = 0;
170 174
171 current_thread_info()->status &= ~TS_POLLING;
172 /*
173 * TS_POLLING-cleared state must be visible before we test
174 * NEED_RESCHED:
175 */
176 smp_mb();
177
178 expire_time = jiffies + HZ * (100 - idle_pct) / 100; 175 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
179 176
180 while (!need_resched()) { 177 while (!need_resched()) {
178 if (tsc_detected_unstable && !tsc_marked_unstable) {
179 /* TSC could halt in idle, so notify users */
180 mark_tsc_unstable("TSC halts in idle");
181 tsc_marked_unstable = 1;
182 }
181 local_irq_disable(); 183 local_irq_disable();
182 cpu = smp_processor_id(); 184 cpu = smp_processor_id();
183 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 185 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
@@ -200,8 +202,6 @@ static int power_saving_thread(void *data)
200 } 202 }
201 } 203 }
202 204
203 current_thread_info()->status |= TS_POLLING;
204
205 /* 205 /*
206 * current sched_rt has threshold for rt task running time. 206 * current sched_rt has threshold for rt task running time.
207 * When a rt task uses 95% CPU time, the rt thread will be 207 * When a rt task uses 95% CPU time, the rt thread will be
diff --git a/drivers/acpi/acpica/Makefile b/drivers/acpi/acpica/Makefile
index 7423052ece5a..d93cc06f4bf8 100644
--- a/drivers/acpi/acpica/Makefile
+++ b/drivers/acpi/acpica/Makefile
@@ -14,12 +14,12 @@ acpi-y := dsfield.o dsmthdat.o dsopcode.o dswexec.o dswscope.o \
14 14
15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \ 15acpi-y += evevent.o evregion.o evsci.o evxfevnt.o \
16 evmisc.o evrgnini.o evxface.o evxfregn.o \ 16 evmisc.o evrgnini.o evxface.o evxfregn.o \
17 evgpe.o evgpeblk.o 17 evgpe.o evgpeblk.o evgpeinit.o evgpeutil.o
18 18
19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\ 19acpi-y += exconfig.o exfield.o exnames.o exoparg6.o exresolv.o exstorob.o\
20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\ 20 exconvrt.o exfldio.o exoparg1.o exprep.o exresop.o exsystem.o\
21 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \ 21 excreate.o exmisc.o exoparg2.o exregion.o exstore.o exutils.o \
22 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o 22 exdump.o exmutex.o exoparg3.o exresnte.o exstoren.o exdebug.o
23 23
24acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o 24acpi-y += hwacpi.o hwgpe.o hwregs.o hwsleep.o hwxface.o hwvalid.o
25 25
diff --git a/drivers/acpi/acpica/acevents.h b/drivers/acpi/acpica/acevents.h
index 3e6ba99e4053..64d1e5c2d4ae 100644
--- a/drivers/acpi/acpica/acevents.h
+++ b/drivers/acpi/acpica/acevents.h
@@ -73,8 +73,10 @@ acpi_ev_queue_notify_request(struct acpi_namespace_node *node,
73 u32 notify_value); 73 u32 notify_value);
74 74
75/* 75/*
76 * evgpe - GPE handling and dispatch 76 * evgpe - Low-level GPE support
77 */ 77 */
78u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list);
79
78acpi_status 80acpi_status
79acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info); 81acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info);
80 82
@@ -85,19 +87,13 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info);
85struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, 87struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
86 u32 gpe_number); 88 u32 gpe_number);
87 89
90struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
91 struct acpi_gpe_block_info
92 *gpe_block);
93
88/* 94/*
89 * evgpeblk 95 * evgpeblk - Upper-level GPE block support
90 */ 96 */
91u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
92
93acpi_status
94acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
95
96acpi_status
97acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
98 struct acpi_gpe_block_info *gpe_block,
99 void *context);
100
101acpi_status 97acpi_status
102acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device, 98acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
103 struct acpi_generic_address *gpe_block_address, 99 struct acpi_generic_address *gpe_block_address,
@@ -116,12 +112,37 @@ u32
116acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, 112acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info,
117 u32 gpe_number); 113 u32 gpe_number);
118 114
119u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list); 115/*
116 * evgpeinit - GPE initialization and update
117 */
118acpi_status acpi_ev_gpe_initialize(void);
119
120void acpi_ev_update_gpes(acpi_owner_id table_owner_id);
120 121
121acpi_status 122acpi_status
122acpi_ev_check_for_wake_only_gpe(struct acpi_gpe_event_info *gpe_event_info); 123acpi_ev_match_gpe_method(acpi_handle obj_handle,
124 u32 level, void *context, void **return_value);
123 125
124acpi_status acpi_ev_gpe_initialize(void); 126acpi_status
127acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
128 u32 level, void *context, void **return_value);
129
130/*
131 * evgpeutil - GPE utilities
132 */
133acpi_status
134acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context);
135
136u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info);
137
138struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number);
139
140acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
141
142acpi_status
143acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
144 struct acpi_gpe_block_info *gpe_block,
145 void *context);
125 146
126/* 147/*
127 * evregion - Address Space handling 148 * evregion - Address Space handling
diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h
index f8dd8f250ac4..9070f1fe8f17 100644
--- a/drivers/acpi/acpica/acglobal.h
+++ b/drivers/acpi/acpica/acglobal.h
@@ -112,6 +112,19 @@ u8 ACPI_INIT_GLOBAL(acpi_gbl_leave_wake_gpes_disabled, TRUE);
112 */ 112 */
113u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE); 113u8 ACPI_INIT_GLOBAL(acpi_gbl_use_default_register_widths, TRUE);
114 114
115/*
116 * Optionally enable output from the AML Debug Object.
117 */
118u8 ACPI_INIT_GLOBAL(acpi_gbl_enable_aml_debug_object, FALSE);
119
120/*
121 * Optionally copy the entire DSDT to local memory (instead of simply
122 * mapping it.) There are some BIOSs that corrupt or replace the original
123 * DSDT, creating the need for this option. Default is FALSE, do not copy
124 * the DSDT.
125 */
126u8 ACPI_INIT_GLOBAL(acpi_gbl_copy_dsdt_locally, FALSE);
127
115/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */ 128/* acpi_gbl_FADT is a local copy of the FADT, converted to a common format. */
116 129
117struct acpi_table_fadt acpi_gbl_FADT; 130struct acpi_table_fadt acpi_gbl_FADT;
@@ -145,11 +158,10 @@ ACPI_EXTERN u32 acpi_gbl_trace_dbg_layer;
145 ****************************************************************************/ 158 ****************************************************************************/
146 159
147/* 160/*
148 * acpi_gbl_root_table_list is the master list of ACPI tables found in the 161 * acpi_gbl_root_table_list is the master list of ACPI tables that were
149 * RSDT/XSDT. 162 * found in the RSDT/XSDT.
150 *
151 */ 163 */
152ACPI_EXTERN struct acpi_internal_rsdt acpi_gbl_root_table_list; 164ACPI_EXTERN struct acpi_table_list acpi_gbl_root_table_list;
153ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS; 165ACPI_EXTERN struct acpi_table_facs *acpi_gbl_FACS;
154 166
155/* These addresses are calculated from the FADT Event Block addresses */ 167/* These addresses are calculated from the FADT Event Block addresses */
@@ -160,6 +172,11 @@ ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1a_enable;
160ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status; 172ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_status;
161ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable; 173ACPI_EXTERN struct acpi_generic_address acpi_gbl_xpm1b_enable;
162 174
175/* DSDT information. Used to check for DSDT corruption */
176
177ACPI_EXTERN struct acpi_table_header *acpi_gbl_DSDT;
178ACPI_EXTERN struct acpi_table_header acpi_gbl_original_dsdt_header;
179
163/* 180/*
164 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is 181 * Handle both ACPI 1.0 and ACPI 2.0 Integer widths. The integer width is
165 * determined by the revision of the DSDT: If the DSDT revision is less than 182 * determined by the revision of the DSDT: If the DSDT revision is less than
diff --git a/drivers/acpi/acpica/acinterp.h b/drivers/acpi/acpica/acinterp.h
index 6df3f8428168..049e203bd621 100644
--- a/drivers/acpi/acpica/acinterp.h
+++ b/drivers/acpi/acpica/acinterp.h
@@ -121,6 +121,13 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
121 struct acpi_walk_state *walk_state); 121 struct acpi_walk_state *walk_state);
122 122
123/* 123/*
124 * exdebug - AML debug object
125 */
126void
127acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
128 u32 level, u32 index);
129
130/*
124 * exfield - ACPI AML (p-code) execution - field manipulation 131 * exfield - ACPI AML (p-code) execution - field manipulation
125 */ 132 */
126acpi_status 133acpi_status
@@ -274,7 +281,7 @@ acpi_status
274acpi_ex_system_do_notify_op(union acpi_operand_object *value, 281acpi_ex_system_do_notify_op(union acpi_operand_object *value,
275 union acpi_operand_object *obj_desc); 282 union acpi_operand_object *obj_desc);
276 283
277acpi_status acpi_ex_system_do_suspend(u64 time); 284acpi_status acpi_ex_system_do_sleep(u64 time);
278 285
279acpi_status acpi_ex_system_do_stall(u32 time); 286acpi_status acpi_ex_system_do_stall(u32 time);
280 287
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 24b8faa5c395..147a7e6bd38f 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -213,12 +213,12 @@ struct acpi_namespace_node {
213#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */ 213#define ANOBJ_IS_BIT_OFFSET 0x40 /* i_aSL only: Reference is a bit offset */
214#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */ 214#define ANOBJ_IS_REFERENCED 0x80 /* i_aSL only: Object was referenced */
215 215
216/* One internal RSDT for table management */ 216/* Internal ACPI table management - master table list */
217 217
218struct acpi_internal_rsdt { 218struct acpi_table_list {
219 struct acpi_table_desc *tables; 219 struct acpi_table_desc *tables; /* Table descriptor array */
220 u32 count; 220 u32 current_table_count; /* Tables currently in the array */
221 u32 size; 221 u32 max_table_count; /* Max tables array will hold */
222 u8 flags; 222 u8 flags;
223}; 223};
224 224
@@ -427,8 +427,8 @@ struct acpi_gpe_event_info {
427 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */ 427 struct acpi_gpe_register_info *register_info; /* Backpointer to register info */
428 u8 flags; /* Misc info about this GPE */ 428 u8 flags; /* Misc info about this GPE */
429 u8 gpe_number; /* This GPE */ 429 u8 gpe_number; /* This GPE */
430 u8 runtime_count; 430 u8 runtime_count; /* References to a run GPE */
431 u8 wakeup_count; 431 u8 wakeup_count; /* References to a wake GPE */
432}; 432};
433 433
434/* Information about a GPE register pair, one per each status/enable pair in an array */ 434/* Information about a GPE register pair, one per each status/enable pair in an array */
@@ -454,6 +454,7 @@ struct acpi_gpe_block_info {
454 struct acpi_gpe_event_info *event_info; /* One for each GPE */ 454 struct acpi_gpe_event_info *event_info; /* One for each GPE */
455 struct acpi_generic_address block_address; /* Base address of the block */ 455 struct acpi_generic_address block_address; /* Base address of the block */
456 u32 register_count; /* Number of register pairs in block */ 456 u32 register_count; /* Number of register pairs in block */
457 u16 gpe_count; /* Number of individual GPEs in block */
457 u8 block_base_number; /* Base GPE number for this block */ 458 u8 block_base_number; /* Base GPE number for this block */
458}; 459};
459 460
@@ -469,6 +470,10 @@ struct acpi_gpe_xrupt_info {
469struct acpi_gpe_walk_info { 470struct acpi_gpe_walk_info {
470 struct acpi_namespace_node *gpe_device; 471 struct acpi_namespace_node *gpe_device;
471 struct acpi_gpe_block_info *gpe_block; 472 struct acpi_gpe_block_info *gpe_block;
473 u16 count;
474 acpi_owner_id owner_id;
475 u8 enable_this_gpe;
476 u8 execute_by_owner_id;
472}; 477};
473 478
474struct acpi_gpe_device_info { 479struct acpi_gpe_device_info {
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index 8ff3b741df28..62a576e34361 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -107,6 +107,10 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length);
107acpi_status 107acpi_status
108acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length); 108acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length);
109 109
110void acpi_tb_check_dsdt_header(void);
111
112struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index);
113
110void 114void
111acpi_tb_install_table(acpi_physical_address address, 115acpi_tb_install_table(acpi_physical_address address,
112 char *signature, u32 table_index); 116 char *signature, u32 table_index);
diff --git a/drivers/acpi/acpica/dsfield.c b/drivers/acpi/acpica/dsfield.c
index bb13817e0c31..347bee1726f1 100644
--- a/drivers/acpi/acpica/dsfield.c
+++ b/drivers/acpi/acpica/dsfield.c
@@ -323,7 +323,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
323 default: 323 default:
324 324
325 ACPI_ERROR((AE_INFO, 325 ACPI_ERROR((AE_INFO,
326 "Invalid opcode in field list: %X", 326 "Invalid opcode in field list: 0x%X",
327 arg->common.aml_opcode)); 327 arg->common.aml_opcode));
328 return_ACPI_STATUS(AE_AML_BAD_OPCODE); 328 return_ACPI_STATUS(AE_AML_BAD_OPCODE);
329 } 329 }
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c
index 721039233aa7..2a9a561c2f01 100644
--- a/drivers/acpi/acpica/dsmethod.c
+++ b/drivers/acpi/acpica/dsmethod.c
@@ -225,7 +225,7 @@ acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
225 (walk_state->thread->current_sync_level > 225 (walk_state->thread->current_sync_level >
226 obj_desc->method.mutex->mutex.sync_level)) { 226 obj_desc->method.mutex->mutex.sync_level)) {
227 ACPI_ERROR((AE_INFO, 227 ACPI_ERROR((AE_INFO,
228 "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%d)", 228 "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
229 acpi_ut_get_node_name(method_node), 229 acpi_ut_get_node_name(method_node),
230 walk_state->thread->current_sync_level)); 230 walk_state->thread->current_sync_level));
231 231
diff --git a/drivers/acpi/acpica/dsmthdat.c b/drivers/acpi/acpica/dsmthdat.c
index cc343b959540..f3d52f59250b 100644
--- a/drivers/acpi/acpica/dsmthdat.c
+++ b/drivers/acpi/acpica/dsmthdat.c
@@ -262,7 +262,7 @@ acpi_ds_method_data_get_node(u8 type,
262 262
263 if (index > ACPI_METHOD_MAX_LOCAL) { 263 if (index > ACPI_METHOD_MAX_LOCAL) {
264 ACPI_ERROR((AE_INFO, 264 ACPI_ERROR((AE_INFO,
265 "Local index %d is invalid (max %d)", 265 "Local index %u is invalid (max %u)",
266 index, ACPI_METHOD_MAX_LOCAL)); 266 index, ACPI_METHOD_MAX_LOCAL));
267 return_ACPI_STATUS(AE_AML_INVALID_INDEX); 267 return_ACPI_STATUS(AE_AML_INVALID_INDEX);
268 } 268 }
@@ -276,7 +276,7 @@ acpi_ds_method_data_get_node(u8 type,
276 276
277 if (index > ACPI_METHOD_MAX_ARG) { 277 if (index > ACPI_METHOD_MAX_ARG) {
278 ACPI_ERROR((AE_INFO, 278 ACPI_ERROR((AE_INFO,
279 "Arg index %d is invalid (max %d)", 279 "Arg index %u is invalid (max %u)",
280 index, ACPI_METHOD_MAX_ARG)); 280 index, ACPI_METHOD_MAX_ARG));
281 return_ACPI_STATUS(AE_AML_INVALID_INDEX); 281 return_ACPI_STATUS(AE_AML_INVALID_INDEX);
282 } 282 }
@@ -287,7 +287,7 @@ acpi_ds_method_data_get_node(u8 type,
287 break; 287 break;
288 288
289 default: 289 default:
290 ACPI_ERROR((AE_INFO, "Type %d is invalid", type)); 290 ACPI_ERROR((AE_INFO, "Type %u is invalid", type));
291 return_ACPI_STATUS(AE_TYPE); 291 return_ACPI_STATUS(AE_TYPE);
292 } 292 }
293 293
@@ -424,7 +424,7 @@ acpi_ds_method_data_get_value(u8 type,
424 case ACPI_REFCLASS_ARG: 424 case ACPI_REFCLASS_ARG:
425 425
426 ACPI_ERROR((AE_INFO, 426 ACPI_ERROR((AE_INFO,
427 "Uninitialized Arg[%d] at node %p", 427 "Uninitialized Arg[%u] at node %p",
428 index, node)); 428 index, node));
429 429
430 return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); 430 return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG);
@@ -440,7 +440,7 @@ acpi_ds_method_data_get_value(u8 type,
440 default: 440 default:
441 441
442 ACPI_ERROR((AE_INFO, 442 ACPI_ERROR((AE_INFO,
443 "Not a Arg/Local opcode: %X", 443 "Not a Arg/Local opcode: 0x%X",
444 type)); 444 type));
445 return_ACPI_STATUS(AE_AML_INTERNAL); 445 return_ACPI_STATUS(AE_AML_INTERNAL);
446 } 446 }
diff --git a/drivers/acpi/acpica/dsobject.c b/drivers/acpi/acpica/dsobject.c
index 891e08bf560b..3607adcaf085 100644
--- a/drivers/acpi/acpica/dsobject.c
+++ b/drivers/acpi/acpica/dsobject.c
@@ -288,7 +288,7 @@ acpi_ds_build_internal_buffer_obj(struct acpi_walk_state *walk_state,
288 if (byte_list) { 288 if (byte_list) {
289 if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) { 289 if (byte_list->common.aml_opcode != AML_INT_BYTELIST_OP) {
290 ACPI_ERROR((AE_INFO, 290 ACPI_ERROR((AE_INFO,
291 "Expecting bytelist, got AML opcode %X in op %p", 291 "Expecting bytelist, found AML opcode 0x%X in op %p",
292 byte_list->common.aml_opcode, byte_list)); 292 byte_list->common.aml_opcode, byte_list));
293 293
294 acpi_ut_remove_reference(obj_desc); 294 acpi_ut_remove_reference(obj_desc);
@@ -511,7 +511,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
511 } 511 }
512 512
513 ACPI_INFO((AE_INFO, 513 ACPI_INFO((AE_INFO,
514 "Actual Package length (0x%X) is larger than NumElements field (0x%X), truncated\n", 514 "Actual Package length (%u) is larger than NumElements field (%u), truncated\n",
515 i, element_count)); 515 i, element_count));
516 } else if (i < element_count) { 516 } else if (i < element_count) {
517 /* 517 /*
@@ -519,7 +519,7 @@ acpi_ds_build_internal_package_obj(struct acpi_walk_state *walk_state,
519 * Note: this is not an error, the package is padded out with NULLs. 519 * Note: this is not an error, the package is padded out with NULLs.
520 */ 520 */
521 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 521 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
522 "Package List length (0x%X) smaller than NumElements count (0x%X), padded with null elements\n", 522 "Package List length (%u) smaller than NumElements count (%u), padded with null elements\n",
523 i, element_count)); 523 i, element_count));
524 } 524 }
525 525
@@ -701,7 +701,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
701 default: 701 default:
702 702
703 ACPI_ERROR((AE_INFO, 703 ACPI_ERROR((AE_INFO,
704 "Unknown constant opcode %X", 704 "Unknown constant opcode 0x%X",
705 opcode)); 705 opcode));
706 status = AE_AML_OPERAND_TYPE; 706 status = AE_AML_OPERAND_TYPE;
707 break; 707 break;
@@ -717,7 +717,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
717 break; 717 break;
718 718
719 default: 719 default:
720 ACPI_ERROR((AE_INFO, "Unknown Integer type %X", 720 ACPI_ERROR((AE_INFO, "Unknown Integer type 0x%X",
721 op_info->type)); 721 op_info->type));
722 status = AE_AML_OPERAND_TYPE; 722 status = AE_AML_OPERAND_TYPE;
723 break; 723 break;
@@ -806,7 +806,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
806 default: 806 default:
807 807
808 ACPI_ERROR((AE_INFO, 808 ACPI_ERROR((AE_INFO,
809 "Unimplemented reference type for AML opcode: %4.4X", 809 "Unimplemented reference type for AML opcode: 0x%4.4X",
810 opcode)); 810 opcode));
811 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 811 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
812 } 812 }
@@ -816,7 +816,7 @@ acpi_ds_init_object_from_op(struct acpi_walk_state *walk_state,
816 816
817 default: 817 default:
818 818
819 ACPI_ERROR((AE_INFO, "Unimplemented data type: %X", 819 ACPI_ERROR((AE_INFO, "Unimplemented data type: 0x%X",
820 obj_desc->common.type)); 820 obj_desc->common.type));
821 821
822 status = AE_AML_OPERAND_TYPE; 822 status = AE_AML_OPERAND_TYPE;
diff --git a/drivers/acpi/acpica/dsopcode.c b/drivers/acpi/acpica/dsopcode.c
index bf980cadb1e8..53a7e416f33e 100644
--- a/drivers/acpi/acpica/dsopcode.c
+++ b/drivers/acpi/acpica/dsopcode.c
@@ -292,7 +292,7 @@ acpi_status acpi_ds_get_buffer_arguments(union acpi_operand_object *obj_desc)
292 node = obj_desc->buffer.node; 292 node = obj_desc->buffer.node;
293 if (!node) { 293 if (!node) {
294 ACPI_ERROR((AE_INFO, 294 ACPI_ERROR((AE_INFO,
295 "No pointer back to NS node in buffer obj %p", 295 "No pointer back to namespace node in buffer object %p",
296 obj_desc)); 296 obj_desc));
297 return_ACPI_STATUS(AE_AML_INTERNAL); 297 return_ACPI_STATUS(AE_AML_INTERNAL);
298 } 298 }
@@ -336,7 +336,7 @@ acpi_status acpi_ds_get_package_arguments(union acpi_operand_object *obj_desc)
336 node = obj_desc->package.node; 336 node = obj_desc->package.node;
337 if (!node) { 337 if (!node) {
338 ACPI_ERROR((AE_INFO, 338 ACPI_ERROR((AE_INFO,
339 "No pointer back to NS node in package %p", 339 "No pointer back to namespace node in package %p",
340 obj_desc)); 340 obj_desc));
341 return_ACPI_STATUS(AE_AML_INTERNAL); 341 return_ACPI_STATUS(AE_AML_INTERNAL);
342 } 342 }
@@ -580,7 +580,8 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
580 default: 580 default:
581 581
582 ACPI_ERROR((AE_INFO, 582 ACPI_ERROR((AE_INFO,
583 "Unknown field creation opcode %02x", aml_opcode)); 583 "Unknown field creation opcode 0x%02X",
584 aml_opcode));
584 status = AE_AML_BAD_OPCODE; 585 status = AE_AML_BAD_OPCODE;
585 goto cleanup; 586 goto cleanup;
586 } 587 }
@@ -589,7 +590,7 @@ acpi_ds_init_buffer_field(u16 aml_opcode,
589 590
590 if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) { 591 if ((bit_offset + bit_count) > (8 * (u32) buffer_desc->buffer.length)) {
591 ACPI_ERROR((AE_INFO, 592 ACPI_ERROR((AE_INFO,
592 "Field [%4.4s] at %d exceeds Buffer [%4.4s] size %d (bits)", 593 "Field [%4.4s] at %u exceeds Buffer [%4.4s] size %u (bits)",
593 acpi_ut_get_node_name(result_desc), 594 acpi_ut_get_node_name(result_desc),
594 bit_offset + bit_count, 595 bit_offset + bit_count,
595 acpi_ut_get_node_name(buffer_desc->buffer.node), 596 acpi_ut_get_node_name(buffer_desc->buffer.node),
@@ -693,7 +694,7 @@ acpi_ds_eval_buffer_field_operands(struct acpi_walk_state *walk_state,
693 status = acpi_ex_resolve_operands(op->common.aml_opcode, 694 status = acpi_ex_resolve_operands(op->common.aml_opcode,
694 ACPI_WALK_OPERANDS, walk_state); 695 ACPI_WALK_OPERANDS, walk_state);
695 if (ACPI_FAILURE(status)) { 696 if (ACPI_FAILURE(status)) {
696 ACPI_ERROR((AE_INFO, "(%s) bad operand(s) (%X)", 697 ACPI_ERROR((AE_INFO, "(%s) bad operand(s), status 0x%X",
697 acpi_ps_get_opcode_name(op->common.aml_opcode), 698 acpi_ps_get_opcode_name(op->common.aml_opcode),
698 status)); 699 status));
699 700
@@ -1461,7 +1462,7 @@ acpi_ds_exec_end_control_op(struct acpi_walk_state * walk_state,
1461 1462
1462 default: 1463 default:
1463 1464
1464 ACPI_ERROR((AE_INFO, "Unknown control opcode=%X Op=%p", 1465 ACPI_ERROR((AE_INFO, "Unknown control opcode=0x%X Op=%p",
1465 op->common.aml_opcode, op)); 1466 op->common.aml_opcode, op));
1466 1467
1467 status = AE_AML_BAD_OPCODE; 1468 status = AE_AML_BAD_OPCODE;
diff --git a/drivers/acpi/acpica/dswexec.c b/drivers/acpi/acpica/dswexec.c
index 6b76c486d784..d555b374e314 100644
--- a/drivers/acpi/acpica/dswexec.c
+++ b/drivers/acpi/acpica/dswexec.c
@@ -140,7 +140,7 @@ acpi_ds_get_predicate_value(struct acpi_walk_state *walk_state,
140 140
141 if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) { 141 if (local_obj_desc->common.type != ACPI_TYPE_INTEGER) {
142 ACPI_ERROR((AE_INFO, 142 ACPI_ERROR((AE_INFO,
143 "Bad predicate (not an integer) ObjDesc=%p State=%p Type=%X", 143 "Bad predicate (not an integer) ObjDesc=%p State=%p Type=0x%X",
144 obj_desc, walk_state, obj_desc->common.type)); 144 obj_desc, walk_state, obj_desc->common.type));
145 145
146 status = AE_AML_OPERAND_TYPE; 146 status = AE_AML_OPERAND_TYPE;
@@ -354,7 +354,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
354 op_class = walk_state->op_info->class; 354 op_class = walk_state->op_info->class;
355 355
356 if (op_class == AML_CLASS_UNKNOWN) { 356 if (op_class == AML_CLASS_UNKNOWN) {
357 ACPI_ERROR((AE_INFO, "Unknown opcode %X", 357 ACPI_ERROR((AE_INFO, "Unknown opcode 0x%X",
358 op->common.aml_opcode)); 358 op->common.aml_opcode));
359 return_ACPI_STATUS(AE_NOT_IMPLEMENTED); 359 return_ACPI_STATUS(AE_NOT_IMPLEMENTED);
360 } 360 }
@@ -678,7 +678,7 @@ acpi_status acpi_ds_exec_end_op(struct acpi_walk_state *walk_state)
678 default: 678 default:
679 679
680 ACPI_ERROR((AE_INFO, 680 ACPI_ERROR((AE_INFO,
681 "Unimplemented opcode, class=%X type=%X Opcode=%X Op=%p", 681 "Unimplemented opcode, class=0x%X type=0x%X Opcode=-0x%X Op=%p",
682 op_class, op_type, op->common.aml_opcode, 682 op_class, op_type, op->common.aml_opcode,
683 op)); 683 op));
684 684
diff --git a/drivers/acpi/acpica/dswstate.c b/drivers/acpi/acpica/dswstate.c
index 050df8164165..83155dd8671e 100644
--- a/drivers/acpi/acpica/dswstate.c
+++ b/drivers/acpi/acpica/dswstate.c
@@ -179,7 +179,7 @@ acpi_ds_result_push(union acpi_operand_object * object,
179 179
180 if (!object) { 180 if (!object) {
181 ACPI_ERROR((AE_INFO, 181 ACPI_ERROR((AE_INFO,
182 "Null Object! Obj=%p State=%p Num=%X", 182 "Null Object! Obj=%p State=%p Num=%u",
183 object, walk_state, walk_state->result_count)); 183 object, walk_state, walk_state->result_count));
184 return (AE_BAD_PARAMETER); 184 return (AE_BAD_PARAMETER);
185 } 185 }
@@ -223,7 +223,7 @@ static acpi_status acpi_ds_result_stack_push(struct acpi_walk_state *walk_state)
223 223
224 if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) > 224 if (((u32) walk_state->result_size + ACPI_RESULTS_FRAME_OBJ_NUM) >
225 ACPI_RESULTS_OBJ_NUM_MAX) { 225 ACPI_RESULTS_OBJ_NUM_MAX) {
226 ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%X", 226 ACPI_ERROR((AE_INFO, "Result stack overflow: State=%p Num=%u",
227 walk_state, walk_state->result_size)); 227 walk_state, walk_state->result_size));
228 return (AE_STACK_OVERFLOW); 228 return (AE_STACK_OVERFLOW);
229 } 229 }
@@ -314,7 +314,7 @@ acpi_ds_obj_stack_push(void *object, struct acpi_walk_state * walk_state)
314 314
315 if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) { 315 if (walk_state->num_operands >= ACPI_OBJ_NUM_OPERANDS) {
316 ACPI_ERROR((AE_INFO, 316 ACPI_ERROR((AE_INFO,
317 "Object stack overflow! Obj=%p State=%p #Ops=%X", 317 "Object stack overflow! Obj=%p State=%p #Ops=%u",
318 object, walk_state, walk_state->num_operands)); 318 object, walk_state, walk_state->num_operands));
319 return (AE_STACK_OVERFLOW); 319 return (AE_STACK_OVERFLOW);
320 } 320 }
@@ -365,7 +365,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
365 365
366 if (walk_state->num_operands == 0) { 366 if (walk_state->num_operands == 0) {
367 ACPI_ERROR((AE_INFO, 367 ACPI_ERROR((AE_INFO,
368 "Object stack underflow! Count=%X State=%p #Ops=%X", 368 "Object stack underflow! Count=%X State=%p #Ops=%u",
369 pop_count, walk_state, 369 pop_count, walk_state,
370 walk_state->num_operands)); 370 walk_state->num_operands));
371 return (AE_STACK_UNDERFLOW); 371 return (AE_STACK_UNDERFLOW);
@@ -377,7 +377,7 @@ acpi_ds_obj_stack_pop(u32 pop_count, struct acpi_walk_state * walk_state)
377 walk_state->operands[walk_state->num_operands] = NULL; 377 walk_state->operands[walk_state->num_operands] = NULL;
378 } 378 }
379 379
380 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%X\n", 380 ACPI_DEBUG_PRINT((ACPI_DB_EXEC, "Count=%X State=%p #Ops=%u\n",
381 pop_count, walk_state, walk_state->num_operands)); 381 pop_count, walk_state, walk_state->num_operands));
382 382
383 return (AE_OK); 383 return (AE_OK);
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index c1e6f472d435..f5795915a2e9 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -302,7 +302,7 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
302 ACPI_DISABLE_EVENT); 302 ACPI_DISABLE_EVENT);
303 303
304 ACPI_ERROR((AE_INFO, 304 ACPI_ERROR((AE_INFO,
305 "No installed handler for fixed event [%08X]", 305 "No installed handler for fixed event [0x%08X]",
306 event)); 306 event));
307 307
308 return (ACPI_INTERRUPT_NOT_HANDLED); 308 return (ACPI_INTERRUPT_NOT_HANDLED);
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index 78c55508aff5..a221ad404167 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -60,7 +60,8 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
60 * 60 *
61 * RETURN: Status 61 * RETURN: Status
62 * 62 *
63 * DESCRIPTION: Updates GPE register enable masks based on the GPE type 63 * DESCRIPTION: Updates GPE register enable masks based upon whether there are
64 * references (either wake or run) to this GPE
64 * 65 *
65 ******************************************************************************/ 66 ******************************************************************************/
66 67
@@ -81,14 +82,20 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
81 (1 << 82 (1 <<
82 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number)); 83 (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
83 84
85 /* Clear the wake/run bits up front */
86
84 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit); 87 ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake, register_bit);
85 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); 88 ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
86 89
87 if (gpe_event_info->runtime_count) 90 /* Set the mask bits only if there are references to this GPE */
91
92 if (gpe_event_info->runtime_count) {
88 ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit); 93 ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
94 }
89 95
90 if (gpe_event_info->wakeup_count) 96 if (gpe_event_info->wakeup_count) {
91 ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit); 97 ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
98 }
92 99
93 return_ACPI_STATUS(AE_OK); 100 return_ACPI_STATUS(AE_OK);
94} 101}
@@ -101,7 +108,10 @@ acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info)
101 * 108 *
102 * RETURN: Status 109 * RETURN: Status
103 * 110 *
104 * DESCRIPTION: Enable a GPE based on the GPE type 111 * DESCRIPTION: Hardware-enable a GPE. Always enables the GPE, regardless
112 * of type or number of references.
113 *
114 * Note: The GPE lock should be already acquired when this function is called.
105 * 115 *
106 ******************************************************************************/ 116 ******************************************************************************/
107 117
@@ -109,20 +119,36 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
109{ 119{
110 acpi_status status; 120 acpi_status status;
111 121
122
112 ACPI_FUNCTION_TRACE(ev_enable_gpe); 123 ACPI_FUNCTION_TRACE(ev_enable_gpe);
113 124
114 /* Make sure HW enable masks are updated */ 125
126 /*
127 * We will only allow a GPE to be enabled if it has either an
128 * associated method (_Lxx/_Exx) or a handler. Otherwise, the
129 * GPE will be immediately disabled by acpi_ev_gpe_dispatch the
130 * first time it fires.
131 */
132 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)) {
133 return_ACPI_STATUS(AE_NO_HANDLER);
134 }
135
136 /* Ensure the HW enable masks are current */
115 137
116 status = acpi_ev_update_gpe_enable_masks(gpe_event_info); 138 status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
117 if (ACPI_FAILURE(status)) 139 if (ACPI_FAILURE(status)) {
118 return_ACPI_STATUS(status); 140 return_ACPI_STATUS(status);
141 }
142
143 /* Clear the GPE (of stale events) */
119 144
120 /* Clear the GPE (of stale events), then enable it */
121 status = acpi_hw_clear_gpe(gpe_event_info); 145 status = acpi_hw_clear_gpe(gpe_event_info);
122 if (ACPI_FAILURE(status)) 146 if (ACPI_FAILURE(status)) {
123 return_ACPI_STATUS(status); 147 return_ACPI_STATUS(status);
148 }
124 149
125 /* Enable the requested GPE */ 150 /* Enable the requested GPE */
151
126 status = acpi_hw_write_gpe_enable_reg(gpe_event_info); 152 status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
127 return_ACPI_STATUS(status); 153 return_ACPI_STATUS(status);
128} 154}
@@ -135,7 +161,10 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
135 * 161 *
136 * RETURN: Status 162 * RETURN: Status
137 * 163 *
138 * DESCRIPTION: Disable a GPE based on the GPE type 164 * DESCRIPTION: Hardware-disable a GPE. Always disables the requested GPE,
165 * regardless of the type or number of references.
166 *
167 * Note: The GPE lock should be already acquired when this function is called.
139 * 168 *
140 ******************************************************************************/ 169 ******************************************************************************/
141 170
@@ -145,24 +174,71 @@ acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
145 174
146 ACPI_FUNCTION_TRACE(ev_disable_gpe); 175 ACPI_FUNCTION_TRACE(ev_disable_gpe);
147 176
148 /* Make sure HW enable masks are updated */ 177
178 /*
179 * Note: Always disable the GPE, even if we think that that it is already
180 * disabled. It is possible that the AML or some other code has enabled
181 * the GPE behind our back.
182 */
183
184 /* Ensure the HW enable masks are current */
149 185
150 status = acpi_ev_update_gpe_enable_masks(gpe_event_info); 186 status = acpi_ev_update_gpe_enable_masks(gpe_event_info);
151 if (ACPI_FAILURE(status)) 187 if (ACPI_FAILURE(status)) {
152 return_ACPI_STATUS(status); 188 return_ACPI_STATUS(status);
189 }
153 190
154 /* 191 /*
155 * Even if we don't know the GPE type, make sure that we always 192 * Always H/W disable this GPE, even if we don't know the GPE type.
156 * disable it. low_disable_gpe will just clear the enable bit for this 193 * Simply clear the enable bit for this particular GPE, but do not
157 * GPE and write it. It will not write out the current GPE enable mask, 194 * write out the current GPE enable mask since this may inadvertently
158 * since this may inadvertently enable GPEs too early, if a rogue GPE has 195 * enable GPEs too early. An example is a rogue GPE that has arrived
159 * come in during ACPICA initialization - possibly as a result of AML or 196 * during ACPICA initialization - possibly because AML or other code
160 * other code that has enabled the GPE. 197 * has enabled the GPE.
161 */ 198 */
162 status = acpi_hw_low_disable_gpe(gpe_event_info); 199 status = acpi_hw_low_disable_gpe(gpe_event_info);
163 return_ACPI_STATUS(status); 200 return_ACPI_STATUS(status);
164} 201}
165 202
203
204/*******************************************************************************
205 *
206 * FUNCTION: acpi_ev_low_get_gpe_info
207 *
208 * PARAMETERS: gpe_number - Raw GPE number
209 * gpe_block - A GPE info block
210 *
211 * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number
212 * is not within the specified GPE block)
213 *
214 * DESCRIPTION: Returns the event_info struct associated with this GPE. This is
215 * the low-level implementation of ev_get_gpe_event_info.
216 *
217 ******************************************************************************/
218
219struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number,
220 struct acpi_gpe_block_info
221 *gpe_block)
222{
223 u32 gpe_index;
224
225 /*
226 * Validate that the gpe_number is within the specified gpe_block.
227 * (Two steps)
228 */
229 if (!gpe_block || (gpe_number < gpe_block->block_base_number)) {
230 return (NULL);
231 }
232
233 gpe_index = gpe_number - gpe_block->block_base_number;
234 if (gpe_index >= gpe_block->gpe_count) {
235 return (NULL);
236 }
237
238 return (&gpe_block->event_info[gpe_index]);
239}
240
241
166/******************************************************************************* 242/*******************************************************************************
167 * 243 *
168 * FUNCTION: acpi_ev_get_gpe_event_info 244 * FUNCTION: acpi_ev_get_gpe_event_info
@@ -184,29 +260,23 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
184 u32 gpe_number) 260 u32 gpe_number)
185{ 261{
186 union acpi_operand_object *obj_desc; 262 union acpi_operand_object *obj_desc;
187 struct acpi_gpe_block_info *gpe_block; 263 struct acpi_gpe_event_info *gpe_info;
188 u32 i; 264 u32 i;
189 265
190 ACPI_FUNCTION_ENTRY(); 266 ACPI_FUNCTION_ENTRY();
191 267
192 /* A NULL gpe_block means use the FADT-defined GPE block(s) */ 268 /* A NULL gpe_device means use the FADT-defined GPE block(s) */
193 269
194 if (!gpe_device) { 270 if (!gpe_device) {
195 271
196 /* Examine GPE Block 0 and 1 (These blocks are permanent) */ 272 /* Examine GPE Block 0 and 1 (These blocks are permanent) */
197 273
198 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { 274 for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
199 gpe_block = acpi_gbl_gpe_fadt_blocks[i]; 275 gpe_info = acpi_ev_low_get_gpe_info(gpe_number,
200 if (gpe_block) { 276 acpi_gbl_gpe_fadt_blocks
201 if ((gpe_number >= gpe_block->block_base_number) 277 [i]);
202 && (gpe_number < 278 if (gpe_info) {
203 gpe_block->block_base_number + 279 return (gpe_info);
204 (gpe_block->register_count * 8))) {
205 return (&gpe_block->
206 event_info[gpe_number -
207 gpe_block->
208 block_base_number]);
209 }
210 } 280 }
211 } 281 }
212 282
@@ -223,16 +293,8 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
223 return (NULL); 293 return (NULL);
224 } 294 }
225 295
226 gpe_block = obj_desc->device.gpe_block; 296 return (acpi_ev_low_get_gpe_info
227 297 (gpe_number, obj_desc->device.gpe_block));
228 if ((gpe_number >= gpe_block->block_base_number) &&
229 (gpe_number <
230 gpe_block->block_base_number + (gpe_block->register_count * 8))) {
231 return (&gpe_block->
232 event_info[gpe_number - gpe_block->block_base_number]);
233 }
234
235 return (NULL);
236} 298}
237 299
238/******************************************************************************* 300/*******************************************************************************
@@ -389,7 +451,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
389 return_VOID; 451 return_VOID;
390 } 452 }
391 453
392 /* Set the GPE flags for return to enabled state */ 454 /* Update the GPE register masks for return to enabled state */
393 455
394 (void)acpi_ev_update_gpe_enable_masks(gpe_event_info); 456 (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
395 457
@@ -499,7 +561,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
499 status = acpi_hw_clear_gpe(gpe_event_info); 561 status = acpi_hw_clear_gpe(gpe_event_info);
500 if (ACPI_FAILURE(status)) { 562 if (ACPI_FAILURE(status)) {
501 ACPI_EXCEPTION((AE_INFO, status, 563 ACPI_EXCEPTION((AE_INFO, status,
502 "Unable to clear GPE[%2X]", 564 "Unable to clear GPE[0x%2X]",
503 gpe_number)); 565 gpe_number));
504 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 566 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
505 } 567 }
@@ -532,7 +594,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
532 status = acpi_hw_clear_gpe(gpe_event_info); 594 status = acpi_hw_clear_gpe(gpe_event_info);
533 if (ACPI_FAILURE(status)) { 595 if (ACPI_FAILURE(status)) {
534 ACPI_EXCEPTION((AE_INFO, status, 596 ACPI_EXCEPTION((AE_INFO, status,
535 "Unable to clear GPE[%2X]", 597 "Unable to clear GPE[0x%2X]",
536 gpe_number)); 598 gpe_number));
537 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 599 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
538 } 600 }
@@ -548,7 +610,7 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
548 status = acpi_ev_disable_gpe(gpe_event_info); 610 status = acpi_ev_disable_gpe(gpe_event_info);
549 if (ACPI_FAILURE(status)) { 611 if (ACPI_FAILURE(status)) {
550 ACPI_EXCEPTION((AE_INFO, status, 612 ACPI_EXCEPTION((AE_INFO, status,
551 "Unable to disable GPE[%2X]", 613 "Unable to disable GPE[0x%2X]",
552 gpe_number)); 614 gpe_number));
553 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 615 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
554 } 616 }
@@ -562,27 +624,30 @@ acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
562 gpe_event_info); 624 gpe_event_info);
563 if (ACPI_FAILURE(status)) { 625 if (ACPI_FAILURE(status)) {
564 ACPI_EXCEPTION((AE_INFO, status, 626 ACPI_EXCEPTION((AE_INFO, status,
565 "Unable to queue handler for GPE[%2X] - event disabled", 627 "Unable to queue handler for GPE[0x%2X] - event disabled",
566 gpe_number)); 628 gpe_number));
567 } 629 }
568 break; 630 break;
569 631
570 default: 632 default:
571 633
572 /* No handler or method to run! */ 634 /*
573 635 * No handler or method to run!
636 * 03/2010: This case should no longer be possible. We will not allow
637 * a GPE to be enabled if it has no handler or method.
638 */
574 ACPI_ERROR((AE_INFO, 639 ACPI_ERROR((AE_INFO,
575 "No handler or method for GPE[%2X], disabling event", 640 "No handler or method for GPE[0x%2X], disabling event",
576 gpe_number)); 641 gpe_number));
577 642
578 /* 643 /*
579 * Disable the GPE. The GPE will remain disabled until the ACPICA 644 * Disable the GPE. The GPE will remain disabled a handler
580 * Core Subsystem is restarted, or a handler is installed. 645 * is installed or ACPICA is restarted.
581 */ 646 */
582 status = acpi_ev_disable_gpe(gpe_event_info); 647 status = acpi_ev_disable_gpe(gpe_event_info);
583 if (ACPI_FAILURE(status)) { 648 if (ACPI_FAILURE(status)) {
584 ACPI_EXCEPTION((AE_INFO, status, 649 ACPI_EXCEPTION((AE_INFO, status,
585 "Unable to disable GPE[%2X]", 650 "Unable to disable GPE[0x%2X]",
586 gpe_number)); 651 gpe_number));
587 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); 652 return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
588 } 653 }
diff --git a/drivers/acpi/acpica/evgpeblk.c b/drivers/acpi/acpica/evgpeblk.c
index fef721917eaf..7c28f2d9fd35 100644
--- a/drivers/acpi/acpica/evgpeblk.c
+++ b/drivers/acpi/acpica/evgpeblk.c
@@ -51,20 +51,6 @@ ACPI_MODULE_NAME("evgpeblk")
51 51
52/* Local prototypes */ 52/* Local prototypes */
53static acpi_status 53static acpi_status
54acpi_ev_save_method_info(acpi_handle obj_handle,
55 u32 level, void *obj_desc, void **return_value);
56
57static acpi_status
58acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
59 u32 level, void *info, void **return_value);
60
61static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
62 interrupt_number);
63
64static acpi_status
65acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt);
66
67static acpi_status
68acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block, 54acpi_ev_install_gpe_block(struct acpi_gpe_block_info *gpe_block,
69 u32 interrupt_number); 55 u32 interrupt_number);
70 56
@@ -73,527 +59,6 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block);
73 59
74/******************************************************************************* 60/*******************************************************************************
75 * 61 *
76 * FUNCTION: acpi_ev_valid_gpe_event
77 *
78 * PARAMETERS: gpe_event_info - Info for this GPE
79 *
80 * RETURN: TRUE if the gpe_event is valid
81 *
82 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
83 * Should be called only when the GPE lists are semaphore locked
84 * and not subject to change.
85 *
86 ******************************************************************************/
87
88u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
89{
90 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
91 struct acpi_gpe_block_info *gpe_block;
92
93 ACPI_FUNCTION_ENTRY();
94
95 /* No need for spin lock since we are not changing any list elements */
96
97 /* Walk the GPE interrupt levels */
98
99 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
100 while (gpe_xrupt_block) {
101 gpe_block = gpe_xrupt_block->gpe_block_list_head;
102
103 /* Walk the GPE blocks on this interrupt level */
104
105 while (gpe_block) {
106 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
107 (&gpe_block->event_info[((acpi_size)
108 gpe_block->
109 register_count) * 8] >
110 gpe_event_info)) {
111 return (TRUE);
112 }
113
114 gpe_block = gpe_block->next;
115 }
116
117 gpe_xrupt_block = gpe_xrupt_block->next;
118 }
119
120 return (FALSE);
121}
122
123/*******************************************************************************
124 *
125 * FUNCTION: acpi_ev_walk_gpe_list
126 *
127 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
128 * Context - Value passed to callback
129 *
130 * RETURN: Status
131 *
132 * DESCRIPTION: Walk the GPE lists.
133 *
134 ******************************************************************************/
135
136acpi_status
137acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
138{
139 struct acpi_gpe_block_info *gpe_block;
140 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
141 acpi_status status = AE_OK;
142 acpi_cpu_flags flags;
143
144 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
145
146 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
147
148 /* Walk the interrupt level descriptor list */
149
150 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
151 while (gpe_xrupt_info) {
152
153 /* Walk all Gpe Blocks attached to this interrupt level */
154
155 gpe_block = gpe_xrupt_info->gpe_block_list_head;
156 while (gpe_block) {
157
158 /* One callback per GPE block */
159
160 status =
161 gpe_walk_callback(gpe_xrupt_info, gpe_block,
162 context);
163 if (ACPI_FAILURE(status)) {
164 if (status == AE_CTRL_END) { /* Callback abort */
165 status = AE_OK;
166 }
167 goto unlock_and_exit;
168 }
169
170 gpe_block = gpe_block->next;
171 }
172
173 gpe_xrupt_info = gpe_xrupt_info->next;
174 }
175
176 unlock_and_exit:
177 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
178 return_ACPI_STATUS(status);
179}
180
181/*******************************************************************************
182 *
183 * FUNCTION: acpi_ev_delete_gpe_handlers
184 *
185 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
186 * gpe_block - Gpe Block info
187 *
188 * RETURN: Status
189 *
190 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
191 * Used only prior to termination.
192 *
193 ******************************************************************************/
194
195acpi_status
196acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
197 struct acpi_gpe_block_info *gpe_block,
198 void *context)
199{
200 struct acpi_gpe_event_info *gpe_event_info;
201 u32 i;
202 u32 j;
203
204 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
205
206 /* Examine each GPE Register within the block */
207
208 for (i = 0; i < gpe_block->register_count; i++) {
209
210 /* Now look at the individual GPEs in this byte register */
211
212 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
213 gpe_event_info = &gpe_block->event_info[((acpi_size) i *
214 ACPI_GPE_REGISTER_WIDTH)
215 + j];
216
217 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
218 ACPI_GPE_DISPATCH_HANDLER) {
219 ACPI_FREE(gpe_event_info->dispatch.handler);
220 gpe_event_info->dispatch.handler = NULL;
221 gpe_event_info->flags &=
222 ~ACPI_GPE_DISPATCH_MASK;
223 }
224 }
225 }
226
227 return_ACPI_STATUS(AE_OK);
228}
229
230/*******************************************************************************
231 *
232 * FUNCTION: acpi_ev_save_method_info
233 *
234 * PARAMETERS: Callback from walk_namespace
235 *
236 * RETURN: Status
237 *
238 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
239 * control method under the _GPE portion of the namespace.
240 * Extract the name and GPE type from the object, saving this
241 * information for quick lookup during GPE dispatch
242 *
243 * The name of each GPE control method is of the form:
244 * "_Lxx" or "_Exx"
245 * Where:
246 * L - means that the GPE is level triggered
247 * E - means that the GPE is edge triggered
248 * xx - is the GPE number [in HEX]
249 *
250 ******************************************************************************/
251
252static acpi_status
253acpi_ev_save_method_info(acpi_handle obj_handle,
254 u32 level, void *obj_desc, void **return_value)
255{
256 struct acpi_gpe_block_info *gpe_block = (void *)obj_desc;
257 struct acpi_gpe_event_info *gpe_event_info;
258 u32 gpe_number;
259 char name[ACPI_NAME_SIZE + 1];
260 u8 type;
261
262 ACPI_FUNCTION_TRACE(ev_save_method_info);
263
264 /*
265 * _Lxx and _Exx GPE method support
266 *
267 * 1) Extract the name from the object and convert to a string
268 */
269 ACPI_MOVE_32_TO_32(name,
270 &((struct acpi_namespace_node *)obj_handle)->name.
271 integer);
272 name[ACPI_NAME_SIZE] = 0;
273
274 /*
275 * 2) Edge/Level determination is based on the 2nd character
276 * of the method name
277 *
278 * NOTE: Default GPE type is RUNTIME. May be changed later to WAKE
279 * if a _PRW object is found that points to this GPE.
280 */
281 switch (name[1]) {
282 case 'L':
283 type = ACPI_GPE_LEVEL_TRIGGERED;
284 break;
285
286 case 'E':
287 type = ACPI_GPE_EDGE_TRIGGERED;
288 break;
289
290 default:
291 /* Unknown method type, just ignore it! */
292
293 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
294 "Ignoring unknown GPE method type: %s "
295 "(name not of form _Lxx or _Exx)", name));
296 return_ACPI_STATUS(AE_OK);
297 }
298
299 /* Convert the last two characters of the name to the GPE Number */
300
301 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
302 if (gpe_number == ACPI_UINT32_MAX) {
303
304 /* Conversion failed; invalid method, just ignore it */
305
306 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
307 "Could not extract GPE number from name: %s "
308 "(name is not of form _Lxx or _Exx)", name));
309 return_ACPI_STATUS(AE_OK);
310 }
311
312 /* Ensure that we have a valid GPE number for this GPE block */
313
314 if ((gpe_number < gpe_block->block_base_number) ||
315 (gpe_number >= (gpe_block->block_base_number +
316 (gpe_block->register_count * 8)))) {
317 /*
318 * Not valid for this GPE block, just ignore it. However, it may be
319 * valid for a different GPE block, since GPE0 and GPE1 methods both
320 * appear under \_GPE.
321 */
322 return_ACPI_STATUS(AE_OK);
323 }
324
325 /*
326 * Now we can add this information to the gpe_event_info block for use
327 * during dispatch of this GPE.
328 */
329 gpe_event_info =
330 &gpe_block->event_info[gpe_number - gpe_block->block_base_number];
331
332 gpe_event_info->flags = (u8) (type | ACPI_GPE_DISPATCH_METHOD);
333
334 gpe_event_info->dispatch.method_node =
335 (struct acpi_namespace_node *)obj_handle;
336
337 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
338 "Registered GPE method %s as GPE number 0x%.2X\n",
339 name, gpe_number));
340 return_ACPI_STATUS(AE_OK);
341}
342
343/*******************************************************************************
344 *
345 * FUNCTION: acpi_ev_match_prw_and_gpe
346 *
347 * PARAMETERS: Callback from walk_namespace
348 *
349 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
350 * not aborted on a single _PRW failure.
351 *
352 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
353 * Device. Run the _PRW method. If present, extract the GPE
354 * number and mark the GPE as a WAKE GPE.
355 *
356 ******************************************************************************/
357
358static acpi_status
359acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
360 u32 level, void *info, void **return_value)
361{
362 struct acpi_gpe_walk_info *gpe_info = (void *)info;
363 struct acpi_namespace_node *gpe_device;
364 struct acpi_gpe_block_info *gpe_block;
365 struct acpi_namespace_node *target_gpe_device;
366 struct acpi_gpe_event_info *gpe_event_info;
367 union acpi_operand_object *pkg_desc;
368 union acpi_operand_object *obj_desc;
369 u32 gpe_number;
370 acpi_status status;
371
372 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
373
374 /* Check for a _PRW method under this device */
375
376 status = acpi_ut_evaluate_object(obj_handle, METHOD_NAME__PRW,
377 ACPI_BTYPE_PACKAGE, &pkg_desc);
378 if (ACPI_FAILURE(status)) {
379
380 /* Ignore all errors from _PRW, we don't want to abort the subsystem */
381
382 return_ACPI_STATUS(AE_OK);
383 }
384
385 /* The returned _PRW package must have at least two elements */
386
387 if (pkg_desc->package.count < 2) {
388 goto cleanup;
389 }
390
391 /* Extract pointers from the input context */
392
393 gpe_device = gpe_info->gpe_device;
394 gpe_block = gpe_info->gpe_block;
395
396 /*
397 * The _PRW object must return a package, we are only interested in the
398 * first element
399 */
400 obj_desc = pkg_desc->package.elements[0];
401
402 if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
403
404 /* Use FADT-defined GPE device (from definition of _PRW) */
405
406 target_gpe_device = acpi_gbl_fadt_gpe_device;
407
408 /* Integer is the GPE number in the FADT described GPE blocks */
409
410 gpe_number = (u32) obj_desc->integer.value;
411 } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
412
413 /* Package contains a GPE reference and GPE number within a GPE block */
414
415 if ((obj_desc->package.count < 2) ||
416 ((obj_desc->package.elements[0])->common.type !=
417 ACPI_TYPE_LOCAL_REFERENCE) ||
418 ((obj_desc->package.elements[1])->common.type !=
419 ACPI_TYPE_INTEGER)) {
420 goto cleanup;
421 }
422
423 /* Get GPE block reference and decode */
424
425 target_gpe_device =
426 obj_desc->package.elements[0]->reference.node;
427 gpe_number = (u32) obj_desc->package.elements[1]->integer.value;
428 } else {
429 /* Unknown type, just ignore it */
430
431 goto cleanup;
432 }
433
434 /*
435 * Is this GPE within this block?
436 *
437 * TRUE if and only if these conditions are true:
438 * 1) The GPE devices match.
439 * 2) The GPE index(number) is within the range of the Gpe Block
440 * associated with the GPE device.
441 */
442 if ((gpe_device == target_gpe_device) &&
443 (gpe_number >= gpe_block->block_base_number) &&
444 (gpe_number < gpe_block->block_base_number +
445 (gpe_block->register_count * 8))) {
446 gpe_event_info = &gpe_block->event_info[gpe_number -
447 gpe_block->
448 block_base_number];
449
450 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
451 }
452
453 cleanup:
454 acpi_ut_remove_reference(pkg_desc);
455 return_ACPI_STATUS(AE_OK);
456}
457
458/*******************************************************************************
459 *
460 * FUNCTION: acpi_ev_get_gpe_xrupt_block
461 *
462 * PARAMETERS: interrupt_number - Interrupt for a GPE block
463 *
464 * RETURN: A GPE interrupt block
465 *
466 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
467 * block per unique interrupt level used for GPEs. Should be
468 * called only when the GPE lists are semaphore locked and not
469 * subject to change.
470 *
471 ******************************************************************************/
472
473static struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32
474 interrupt_number)
475{
476 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
477 struct acpi_gpe_xrupt_info *gpe_xrupt;
478 acpi_status status;
479 acpi_cpu_flags flags;
480
481 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
482
483 /* No need for lock since we are not changing any list elements here */
484
485 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
486 while (next_gpe_xrupt) {
487 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
488 return_PTR(next_gpe_xrupt);
489 }
490
491 next_gpe_xrupt = next_gpe_xrupt->next;
492 }
493
494 /* Not found, must allocate a new xrupt descriptor */
495
496 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
497 if (!gpe_xrupt) {
498 return_PTR(NULL);
499 }
500
501 gpe_xrupt->interrupt_number = interrupt_number;
502
503 /* Install new interrupt descriptor with spin lock */
504
505 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
506 if (acpi_gbl_gpe_xrupt_list_head) {
507 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
508 while (next_gpe_xrupt->next) {
509 next_gpe_xrupt = next_gpe_xrupt->next;
510 }
511
512 next_gpe_xrupt->next = gpe_xrupt;
513 gpe_xrupt->previous = next_gpe_xrupt;
514 } else {
515 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
516 }
517 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
518
519 /* Install new interrupt handler if not SCI_INT */
520
521 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
522 status = acpi_os_install_interrupt_handler(interrupt_number,
523 acpi_ev_gpe_xrupt_handler,
524 gpe_xrupt);
525 if (ACPI_FAILURE(status)) {
526 ACPI_ERROR((AE_INFO,
527 "Could not install GPE interrupt handler at level 0x%X",
528 interrupt_number));
529 return_PTR(NULL);
530 }
531 }
532
533 return_PTR(gpe_xrupt);
534}
535
536/*******************************************************************************
537 *
538 * FUNCTION: acpi_ev_delete_gpe_xrupt
539 *
540 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
541 *
542 * RETURN: Status
543 *
544 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
545 * interrupt handler if not the SCI interrupt.
546 *
547 ******************************************************************************/
548
549static acpi_status
550acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
551{
552 acpi_status status;
553 acpi_cpu_flags flags;
554
555 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
556
557 /* We never want to remove the SCI interrupt handler */
558
559 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
560 gpe_xrupt->gpe_block_list_head = NULL;
561 return_ACPI_STATUS(AE_OK);
562 }
563
564 /* Disable this interrupt */
565
566 status =
567 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
568 acpi_ev_gpe_xrupt_handler);
569 if (ACPI_FAILURE(status)) {
570 return_ACPI_STATUS(status);
571 }
572
573 /* Unlink the interrupt block with lock */
574
575 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
576 if (gpe_xrupt->previous) {
577 gpe_xrupt->previous->next = gpe_xrupt->next;
578 } else {
579 /* No previous, update list head */
580
581 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
582 }
583
584 if (gpe_xrupt->next) {
585 gpe_xrupt->next->previous = gpe_xrupt->previous;
586 }
587 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
588
589 /* Free the block */
590
591 ACPI_FREE(gpe_xrupt);
592 return_ACPI_STATUS(AE_OK);
593}
594
595/*******************************************************************************
596 *
597 * FUNCTION: acpi_ev_install_gpe_block 62 * FUNCTION: acpi_ev_install_gpe_block
598 * 63 *
599 * PARAMETERS: gpe_block - New GPE block 64 * PARAMETERS: gpe_block - New GPE block
@@ -705,8 +170,7 @@ acpi_status acpi_ev_delete_gpe_block(struct acpi_gpe_block_info *gpe_block)
705 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 170 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
706 } 171 }
707 172
708 acpi_current_gpe_count -= 173 acpi_current_gpe_count -= gpe_block->gpe_count;
709 gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH;
710 174
711 /* Free the gpe_block */ 175 /* Free the gpe_block */
712 176
@@ -760,9 +224,7 @@ acpi_ev_create_gpe_info_blocks(struct acpi_gpe_block_info *gpe_block)
760 * Allocate the GPE event_info block. There are eight distinct GPEs 224 * Allocate the GPE event_info block. There are eight distinct GPEs
761 * per register. Initialization to zeros is sufficient. 225 * per register. Initialization to zeros is sufficient.
762 */ 226 */
763 gpe_event_info = ACPI_ALLOCATE_ZEROED(((acpi_size) gpe_block-> 227 gpe_event_info = ACPI_ALLOCATE_ZEROED((acpi_size) gpe_block->gpe_count *
764 register_count *
765 ACPI_GPE_REGISTER_WIDTH) *
766 sizeof(struct 228 sizeof(struct
767 acpi_gpe_event_info)); 229 acpi_gpe_event_info));
768 if (!gpe_event_info) { 230 if (!gpe_event_info) {
@@ -880,6 +342,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
880{ 342{
881 acpi_status status; 343 acpi_status status;
882 struct acpi_gpe_block_info *gpe_block; 344 struct acpi_gpe_block_info *gpe_block;
345 struct acpi_gpe_walk_info walk_info;
883 346
884 ACPI_FUNCTION_TRACE(ev_create_gpe_block); 347 ACPI_FUNCTION_TRACE(ev_create_gpe_block);
885 348
@@ -897,6 +360,7 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
897 /* Initialize the new GPE block */ 360 /* Initialize the new GPE block */
898 361
899 gpe_block->node = gpe_device; 362 gpe_block->node = gpe_device;
363 gpe_block->gpe_count = (u16)(register_count * ACPI_GPE_REGISTER_WIDTH);
900 gpe_block->register_count = register_count; 364 gpe_block->register_count = register_count;
901 gpe_block->block_base_number = gpe_block_base_number; 365 gpe_block->block_base_number = gpe_block_base_number;
902 366
@@ -921,12 +385,17 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
921 return_ACPI_STATUS(status); 385 return_ACPI_STATUS(status);
922 } 386 }
923 387
924 /* Find all GPE methods (_Lxx, _Exx) for this block */ 388 /* Find all GPE methods (_Lxx or_Exx) for this block */
389
390 walk_info.gpe_block = gpe_block;
391 walk_info.gpe_device = gpe_device;
392 walk_info.enable_this_gpe = FALSE;
393 walk_info.execute_by_owner_id = FALSE;
925 394
926 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device, 395 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD, gpe_device,
927 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK, 396 ACPI_UINT32_MAX, ACPI_NS_WALK_NO_UNLOCK,
928 acpi_ev_save_method_info, NULL, 397 acpi_ev_match_gpe_method, NULL,
929 gpe_block, NULL); 398 &walk_info, NULL);
930 399
931 /* Return the new block */ 400 /* Return the new block */
932 401
@@ -938,14 +407,13 @@ acpi_ev_create_gpe_block(struct acpi_namespace_node *gpe_device,
938 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n", 407 "GPE %02X to %02X [%4.4s] %u regs on int 0x%X\n",
939 (u32) gpe_block->block_base_number, 408 (u32) gpe_block->block_base_number,
940 (u32) (gpe_block->block_base_number + 409 (u32) (gpe_block->block_base_number +
941 ((gpe_block->register_count * 410 (gpe_block->gpe_count - 1)),
942 ACPI_GPE_REGISTER_WIDTH) - 1)),
943 gpe_device->name.ascii, gpe_block->register_count, 411 gpe_device->name.ascii, gpe_block->register_count,
944 interrupt_number)); 412 interrupt_number));
945 413
946 /* Update global count of currently available GPEs */ 414 /* Update global count of currently available GPEs */
947 415
948 acpi_current_gpe_count += register_count * ACPI_GPE_REGISTER_WIDTH; 416 acpi_current_gpe_count += gpe_block->gpe_count;
949 return_ACPI_STATUS(AE_OK); 417 return_ACPI_STATUS(AE_OK);
950} 418}
951 419
@@ -969,10 +437,13 @@ acpi_status
969acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device, 437acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
970 struct acpi_gpe_block_info *gpe_block) 438 struct acpi_gpe_block_info *gpe_block)
971{ 439{
440 acpi_status status;
972 struct acpi_gpe_event_info *gpe_event_info; 441 struct acpi_gpe_event_info *gpe_event_info;
973 struct acpi_gpe_walk_info gpe_info; 442 struct acpi_gpe_walk_info walk_info;
974 u32 wake_gpe_count; 443 u32 wake_gpe_count;
975 u32 gpe_enabled_count; 444 u32 gpe_enabled_count;
445 u32 gpe_index;
446 u32 gpe_number;
976 u32 i; 447 u32 i;
977 u32 j; 448 u32 j;
978 449
@@ -995,210 +466,75 @@ acpi_ev_initialize_gpe_block(struct acpi_namespace_node *gpe_device,
995 * definition a wake GPE and will not be enabled while the machine 466 * definition a wake GPE and will not be enabled while the machine
996 * is running. 467 * is running.
997 */ 468 */
998 gpe_info.gpe_block = gpe_block; 469 walk_info.gpe_block = gpe_block;
999 gpe_info.gpe_device = gpe_device; 470 walk_info.gpe_device = gpe_device;
471 walk_info.execute_by_owner_id = FALSE;
1000 472
1001 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 473 status =
474 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1002 ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK, 475 ACPI_UINT32_MAX, ACPI_NS_WALK_UNLOCK,
1003 acpi_ev_match_prw_and_gpe, NULL, 476 acpi_ev_match_prw_and_gpe, NULL,
1004 &gpe_info, NULL); 477 &walk_info, NULL);
478 if (ACPI_FAILURE(status)) {
479 ACPI_EXCEPTION((AE_INFO, status,
480 "While executing _PRW methods"));
481 }
1005 } 482 }
1006 483
1007 /* 484 /*
1008 * Enable all GPEs that have a corresponding method and aren't 485 * Enable all GPEs that have a corresponding method and are not
1009 * capable of generating wakeups. Any other GPEs within this block 486 * capable of generating wakeups. Any other GPEs within this block
1010 * must be enabled via the acpi_enable_gpe() interface. 487 * must be enabled via the acpi_enable_gpe interface.
1011 */ 488 */
1012 wake_gpe_count = 0; 489 wake_gpe_count = 0;
1013 gpe_enabled_count = 0; 490 gpe_enabled_count = 0;
1014 if (gpe_device == acpi_gbl_fadt_gpe_device) 491
492 if (gpe_device == acpi_gbl_fadt_gpe_device) {
1015 gpe_device = NULL; 493 gpe_device = NULL;
494 }
1016 495
1017 for (i = 0; i < gpe_block->register_count; i++) { 496 for (i = 0; i < gpe_block->register_count; i++) {
1018 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { 497 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
1019 acpi_status status;
1020 acpi_size gpe_index;
1021 int gpe_number;
1022 498
1023 /* Get the info block for this particular GPE */ 499 /* Get the info block for this particular GPE */
1024 gpe_index = (acpi_size)i * ACPI_GPE_REGISTER_WIDTH + j; 500
501 gpe_index = (i * ACPI_GPE_REGISTER_WIDTH) + j;
1025 gpe_event_info = &gpe_block->event_info[gpe_index]; 502 gpe_event_info = &gpe_block->event_info[gpe_index];
1026 503
1027 if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) { 504 if (gpe_event_info->flags & ACPI_GPE_CAN_WAKE) {
1028 wake_gpe_count++; 505 wake_gpe_count++;
1029 if (acpi_gbl_leave_wake_gpes_disabled) 506 if (acpi_gbl_leave_wake_gpes_disabled) {
1030 continue; 507 continue;
508 }
1031 } 509 }
1032 510
1033 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) 511 /* Ignore GPEs that have no corresponding _Lxx/_Exx method */
512
513 if (!(gpe_event_info->flags & ACPI_GPE_DISPATCH_METHOD)) {
1034 continue; 514 continue;
515 }
516
517 /* Enable this GPE */
1035 518
1036 gpe_number = gpe_index + gpe_block->block_base_number; 519 gpe_number = gpe_index + gpe_block->block_base_number;
1037 status = acpi_enable_gpe(gpe_device, gpe_number, 520 status = acpi_enable_gpe(gpe_device, gpe_number,
1038 ACPI_GPE_TYPE_RUNTIME); 521 ACPI_GPE_TYPE_RUNTIME);
1039 if (ACPI_FAILURE(status))
1040 ACPI_ERROR((AE_INFO,
1041 "Failed to enable GPE %02X\n",
1042 gpe_number));
1043 else
1044 gpe_enabled_count++;
1045 }
1046 }
1047
1048 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1049 "Found %u Wake, Enabled %u Runtime GPEs in this block\n",
1050 wake_gpe_count, gpe_enabled_count));
1051
1052 return_ACPI_STATUS(AE_OK);
1053}
1054
1055/*******************************************************************************
1056 *
1057 * FUNCTION: acpi_ev_gpe_initialize
1058 *
1059 * PARAMETERS: None
1060 *
1061 * RETURN: Status
1062 *
1063 * DESCRIPTION: Initialize the GPE data structures
1064 *
1065 ******************************************************************************/
1066
1067acpi_status acpi_ev_gpe_initialize(void)
1068{
1069 u32 register_count0 = 0;
1070 u32 register_count1 = 0;
1071 u32 gpe_number_max = 0;
1072 acpi_status status;
1073
1074 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
1075
1076 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
1077 if (ACPI_FAILURE(status)) {
1078 return_ACPI_STATUS(status);
1079 }
1080
1081 /*
1082 * Initialize the GPE Block(s) defined in the FADT
1083 *
1084 * Why the GPE register block lengths are divided by 2: From the ACPI
1085 * Spec, section "General-Purpose Event Registers", we have:
1086 *
1087 * "Each register block contains two registers of equal length
1088 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
1089 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
1090 * The length of the GPE1_STS and GPE1_EN registers is equal to
1091 * half the GPE1_LEN. If a generic register block is not supported
1092 * then its respective block pointer and block length values in the
1093 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
1094 * to be the same size."
1095 */
1096
1097 /*
1098 * Determine the maximum GPE number for this machine.
1099 *
1100 * Note: both GPE0 and GPE1 are optional, and either can exist without
1101 * the other.
1102 *
1103 * If EITHER the register length OR the block address are zero, then that
1104 * particular block is not supported.
1105 */
1106 if (acpi_gbl_FADT.gpe0_block_length &&
1107 acpi_gbl_FADT.xgpe0_block.address) {
1108
1109 /* GPE block 0 exists (has both length and address > 0) */
1110
1111 register_count0 = (u16) (acpi_gbl_FADT.gpe0_block_length / 2);
1112
1113 gpe_number_max =
1114 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
1115
1116 /* Install GPE Block 0 */
1117
1118 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1119 &acpi_gbl_FADT.xgpe0_block,
1120 register_count0, 0,
1121 acpi_gbl_FADT.sci_interrupt,
1122 &acpi_gbl_gpe_fadt_blocks[0]);
1123
1124 if (ACPI_FAILURE(status)) {
1125 ACPI_EXCEPTION((AE_INFO, status,
1126 "Could not create GPE Block 0"));
1127 }
1128 }
1129
1130 if (acpi_gbl_FADT.gpe1_block_length &&
1131 acpi_gbl_FADT.xgpe1_block.address) {
1132
1133 /* GPE block 1 exists (has both length and address > 0) */
1134
1135 register_count1 = (u16) (acpi_gbl_FADT.gpe1_block_length / 2);
1136
1137 /* Check for GPE0/GPE1 overlap (if both banks exist) */
1138
1139 if ((register_count0) &&
1140 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
1141 ACPI_ERROR((AE_INFO,
1142 "GPE0 block (GPE 0 to %d) overlaps the GPE1 block "
1143 "(GPE %d to %d) - Ignoring GPE1",
1144 gpe_number_max, acpi_gbl_FADT.gpe1_base,
1145 acpi_gbl_FADT.gpe1_base +
1146 ((register_count1 *
1147 ACPI_GPE_REGISTER_WIDTH) - 1)));
1148
1149 /* Ignore GPE1 block by setting the register count to zero */
1150
1151 register_count1 = 0;
1152 } else {
1153 /* Install GPE Block 1 */
1154
1155 status =
1156 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
1157 &acpi_gbl_FADT.xgpe1_block,
1158 register_count1,
1159 acpi_gbl_FADT.gpe1_base,
1160 acpi_gbl_FADT.
1161 sci_interrupt,
1162 &acpi_gbl_gpe_fadt_blocks
1163 [1]);
1164
1165 if (ACPI_FAILURE(status)) { 522 if (ACPI_FAILURE(status)) {
1166 ACPI_EXCEPTION((AE_INFO, status, 523 ACPI_EXCEPTION((AE_INFO, status,
1167 "Could not create GPE Block 1")); 524 "Could not enable GPE 0x%02X",
525 gpe_number));
526 continue;
1168 } 527 }
1169 528
1170 /* 529 gpe_enabled_count++;
1171 * GPE0 and GPE1 do not have to be contiguous in the GPE number
1172 * space. However, GPE0 always starts at GPE number zero.
1173 */
1174 gpe_number_max = acpi_gbl_FADT.gpe1_base +
1175 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
1176 } 530 }
1177 } 531 }
1178 532
1179 /* Exit if there are no GPE registers */ 533 if (gpe_enabled_count || wake_gpe_count) {
1180
1181 if ((register_count0 + register_count1) == 0) {
1182
1183 /* GPEs are not required by ACPI, this is OK */
1184
1185 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 534 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
1186 "There are no GPE blocks defined in the FADT\n")); 535 "Enabled %u Runtime GPEs, added %u Wake GPEs in this block\n",
1187 status = AE_OK; 536 gpe_enabled_count, wake_gpe_count));
1188 goto cleanup;
1189 }
1190
1191 /* Check for Max GPE number out-of-range */
1192
1193 if (gpe_number_max > ACPI_GPE_MAX) {
1194 ACPI_ERROR((AE_INFO,
1195 "Maximum GPE number from FADT is too large: 0x%X",
1196 gpe_number_max));
1197 status = AE_BAD_VALUE;
1198 goto cleanup;
1199 } 537 }
1200 538
1201 cleanup:
1202 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
1203 return_ACPI_STATUS(AE_OK); 539 return_ACPI_STATUS(AE_OK);
1204} 540}
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
new file mode 100644
index 000000000000..3f6c2d26410d
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -0,0 +1,653 @@
1/******************************************************************************
2 *
3 * Module Name: evgpeinit - System GPE initialization and update
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2010, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47#include "acnamesp.h"
48#include "acinterp.h"
49
50#define _COMPONENT ACPI_EVENTS
51ACPI_MODULE_NAME("evgpeinit")
52
53/*******************************************************************************
54 *
55 * FUNCTION: acpi_ev_gpe_initialize
56 *
57 * PARAMETERS: None
58 *
59 * RETURN: Status
60 *
61 * DESCRIPTION: Initialize the GPE data structures and the FADT GPE 0/1 blocks
62 *
63 ******************************************************************************/
64acpi_status acpi_ev_gpe_initialize(void)
65{
66 u32 register_count0 = 0;
67 u32 register_count1 = 0;
68 u32 gpe_number_max = 0;
69 acpi_status status;
70
71 ACPI_FUNCTION_TRACE(ev_gpe_initialize);
72
73 status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
74 if (ACPI_FAILURE(status)) {
75 return_ACPI_STATUS(status);
76 }
77
78 /*
79 * Initialize the GPE Block(s) defined in the FADT
80 *
81 * Why the GPE register block lengths are divided by 2: From the ACPI
82 * Spec, section "General-Purpose Event Registers", we have:
83 *
84 * "Each register block contains two registers of equal length
85 * GPEx_STS and GPEx_EN (where x is 0 or 1). The length of the
86 * GPE0_STS and GPE0_EN registers is equal to half the GPE0_LEN
87 * The length of the GPE1_STS and GPE1_EN registers is equal to
88 * half the GPE1_LEN. If a generic register block is not supported
89 * then its respective block pointer and block length values in the
90 * FADT table contain zeros. The GPE0_LEN and GPE1_LEN do not need
91 * to be the same size."
92 */
93
94 /*
95 * Determine the maximum GPE number for this machine.
96 *
97 * Note: both GPE0 and GPE1 are optional, and either can exist without
98 * the other.
99 *
100 * If EITHER the register length OR the block address are zero, then that
101 * particular block is not supported.
102 */
103 if (acpi_gbl_FADT.gpe0_block_length &&
104 acpi_gbl_FADT.xgpe0_block.address) {
105
106 /* GPE block 0 exists (has both length and address > 0) */
107
108 register_count0 = (u16)(acpi_gbl_FADT.gpe0_block_length / 2);
109
110 gpe_number_max =
111 (register_count0 * ACPI_GPE_REGISTER_WIDTH) - 1;
112
113 /* Install GPE Block 0 */
114
115 status = acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
116 &acpi_gbl_FADT.xgpe0_block,
117 register_count0, 0,
118 acpi_gbl_FADT.sci_interrupt,
119 &acpi_gbl_gpe_fadt_blocks[0]);
120
121 if (ACPI_FAILURE(status)) {
122 ACPI_EXCEPTION((AE_INFO, status,
123 "Could not create GPE Block 0"));
124 }
125 }
126
127 if (acpi_gbl_FADT.gpe1_block_length &&
128 acpi_gbl_FADT.xgpe1_block.address) {
129
130 /* GPE block 1 exists (has both length and address > 0) */
131
132 register_count1 = (u16)(acpi_gbl_FADT.gpe1_block_length / 2);
133
134 /* Check for GPE0/GPE1 overlap (if both banks exist) */
135
136 if ((register_count0) &&
137 (gpe_number_max >= acpi_gbl_FADT.gpe1_base)) {
138 ACPI_ERROR((AE_INFO,
139 "GPE0 block (GPE 0 to %u) overlaps the GPE1 block "
140 "(GPE %u to %u) - Ignoring GPE1",
141 gpe_number_max, acpi_gbl_FADT.gpe1_base,
142 acpi_gbl_FADT.gpe1_base +
143 ((register_count1 *
144 ACPI_GPE_REGISTER_WIDTH) - 1)));
145
146 /* Ignore GPE1 block by setting the register count to zero */
147
148 register_count1 = 0;
149 } else {
150 /* Install GPE Block 1 */
151
152 status =
153 acpi_ev_create_gpe_block(acpi_gbl_fadt_gpe_device,
154 &acpi_gbl_FADT.xgpe1_block,
155 register_count1,
156 acpi_gbl_FADT.gpe1_base,
157 acpi_gbl_FADT.
158 sci_interrupt,
159 &acpi_gbl_gpe_fadt_blocks
160 [1]);
161
162 if (ACPI_FAILURE(status)) {
163 ACPI_EXCEPTION((AE_INFO, status,
164 "Could not create GPE Block 1"));
165 }
166
167 /*
168 * GPE0 and GPE1 do not have to be contiguous in the GPE number
169 * space. However, GPE0 always starts at GPE number zero.
170 */
171 gpe_number_max = acpi_gbl_FADT.gpe1_base +
172 ((register_count1 * ACPI_GPE_REGISTER_WIDTH) - 1);
173 }
174 }
175
176 /* Exit if there are no GPE registers */
177
178 if ((register_count0 + register_count1) == 0) {
179
180 /* GPEs are not required by ACPI, this is OK */
181
182 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
183 "There are no GPE blocks defined in the FADT\n"));
184 status = AE_OK;
185 goto cleanup;
186 }
187
188 /* Check for Max GPE number out-of-range */
189
190 if (gpe_number_max > ACPI_GPE_MAX) {
191 ACPI_ERROR((AE_INFO,
192 "Maximum GPE number from FADT is too large: 0x%X",
193 gpe_number_max));
194 status = AE_BAD_VALUE;
195 goto cleanup;
196 }
197
198 cleanup:
199 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
200 return_ACPI_STATUS(AE_OK);
201}
202
203/*******************************************************************************
204 *
205 * FUNCTION: acpi_ev_update_gpes
206 *
207 * PARAMETERS: table_owner_id - ID of the newly-loaded ACPI table
208 *
209 * RETURN: None
210 *
211 * DESCRIPTION: Check for new GPE methods (_Lxx/_Exx) made available as a
212 * result of a Load() or load_table() operation. If new GPE
213 * methods have been installed, register the new methods and
214 * enable and runtime GPEs that are associated with them. Also,
215 * run any newly loaded _PRW methods in order to discover any
216 * new CAN_WAKE GPEs.
217 *
218 ******************************************************************************/
219
220void acpi_ev_update_gpes(acpi_owner_id table_owner_id)
221{
222 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
223 struct acpi_gpe_block_info *gpe_block;
224 struct acpi_gpe_walk_info walk_info;
225 acpi_status status = AE_OK;
226 u32 new_wake_gpe_count = 0;
227
228 /* We will examine only _PRW/_Lxx/_Exx methods owned by this table */
229
230 walk_info.owner_id = table_owner_id;
231 walk_info.execute_by_owner_id = TRUE;
232 walk_info.count = 0;
233
234 if (acpi_gbl_leave_wake_gpes_disabled) {
235 /*
236 * 1) Run any newly-loaded _PRW methods to find any GPEs that
237 * can now be marked as CAN_WAKE GPEs. Note: We must run the
238 * _PRW methods before we process the _Lxx/_Exx methods because
239 * we will enable all runtime GPEs associated with the new
240 * _Lxx/_Exx methods at the time we process those methods.
241 *
242 * Unlock interpreter so that we can run the _PRW methods.
243 */
244 walk_info.gpe_block = NULL;
245 walk_info.gpe_device = NULL;
246
247 acpi_ex_exit_interpreter();
248
249 status =
250 acpi_ns_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
251 ACPI_UINT32_MAX,
252 ACPI_NS_WALK_NO_UNLOCK,
253 acpi_ev_match_prw_and_gpe, NULL,
254 &walk_info, NULL);
255 if (ACPI_FAILURE(status)) {
256 ACPI_EXCEPTION((AE_INFO, status,
257 "While executing _PRW methods"));
258 }
259
260 acpi_ex_enter_interpreter();
261 new_wake_gpe_count = walk_info.count;
262 }
263
264 /*
265 * 2) Find any _Lxx/_Exx GPE methods that have just been loaded.
266 *
267 * Any GPEs that correspond to new _Lxx/_Exx methods and are not
268 * marked as CAN_WAKE are immediately enabled.
269 *
270 * Examine the namespace underneath each gpe_device within the
271 * gpe_block lists.
272 */
273 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
274 if (ACPI_FAILURE(status)) {
275 return;
276 }
277
278 walk_info.count = 0;
279 walk_info.enable_this_gpe = TRUE;
280
281 /* Walk the interrupt level descriptor list */
282
283 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
284 while (gpe_xrupt_info) {
285
286 /* Walk all Gpe Blocks attached to this interrupt level */
287
288 gpe_block = gpe_xrupt_info->gpe_block_list_head;
289 while (gpe_block) {
290 walk_info.gpe_block = gpe_block;
291 walk_info.gpe_device = gpe_block->node;
292
293 status = acpi_ns_walk_namespace(ACPI_TYPE_METHOD,
294 walk_info.gpe_device,
295 ACPI_UINT32_MAX,
296 ACPI_NS_WALK_NO_UNLOCK,
297 acpi_ev_match_gpe_method,
298 NULL, &walk_info, NULL);
299 if (ACPI_FAILURE(status)) {
300 ACPI_EXCEPTION((AE_INFO, status,
301 "While decoding _Lxx/_Exx methods"));
302 }
303
304 gpe_block = gpe_block->next;
305 }
306
307 gpe_xrupt_info = gpe_xrupt_info->next;
308 }
309
310 if (walk_info.count || new_wake_gpe_count) {
311 ACPI_INFO((AE_INFO,
312 "Enabled %u new runtime GPEs, added %u new wakeup GPEs",
313 walk_info.count, new_wake_gpe_count));
314 }
315
316 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
317 return;
318}
319
320/*******************************************************************************
321 *
322 * FUNCTION: acpi_ev_match_gpe_method
323 *
324 * PARAMETERS: Callback from walk_namespace
325 *
326 * RETURN: Status
327 *
328 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
329 * control method under the _GPE portion of the namespace.
330 * Extract the name and GPE type from the object, saving this
331 * information for quick lookup during GPE dispatch. Allows a
332 * per-owner_id evaluation if execute_by_owner_id is TRUE in the
333 * walk_info parameter block.
334 *
335 * The name of each GPE control method is of the form:
336 * "_Lxx" or "_Exx", where:
337 * L - means that the GPE is level triggered
338 * E - means that the GPE is edge triggered
339 * xx - is the GPE number [in HEX]
340 *
341 * If walk_info->execute_by_owner_id is TRUE, we only execute examine GPE methods
342 * with that owner.
343 * If walk_info->enable_this_gpe is TRUE, the GPE that is referred to by a GPE
344 * method is immediately enabled (Used for Load/load_table operators)
345 *
346 ******************************************************************************/
347
348acpi_status
349acpi_ev_match_gpe_method(acpi_handle obj_handle,
350 u32 level, void *context, void **return_value)
351{
352 struct acpi_namespace_node *method_node =
353 ACPI_CAST_PTR(struct acpi_namespace_node, obj_handle);
354 struct acpi_gpe_walk_info *walk_info =
355 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
356 struct acpi_gpe_event_info *gpe_event_info;
357 struct acpi_namespace_node *gpe_device;
358 acpi_status status;
359 u32 gpe_number;
360 char name[ACPI_NAME_SIZE + 1];
361 u8 type;
362
363 ACPI_FUNCTION_TRACE(ev_match_gpe_method);
364
365 /* Check if requested owner_id matches this owner_id */
366
367 if ((walk_info->execute_by_owner_id) &&
368 (method_node->owner_id != walk_info->owner_id)) {
369 return_ACPI_STATUS(AE_OK);
370 }
371
372 /*
373 * Match and decode the _Lxx and _Exx GPE method names
374 *
375 * 1) Extract the method name and null terminate it
376 */
377 ACPI_MOVE_32_TO_32(name, &method_node->name.integer);
378 name[ACPI_NAME_SIZE] = 0;
379
380 /* 2) Name must begin with an underscore */
381
382 if (name[0] != '_') {
383 return_ACPI_STATUS(AE_OK); /* Ignore this method */
384 }
385
386 /*
387 * 3) Edge/Level determination is based on the 2nd character
388 * of the method name
389 *
390 * NOTE: Default GPE type is RUNTIME only. Later, if a _PRW object is
391 * found that points to this GPE, the ACPI_GPE_CAN_WAKE flag is set.
392 */
393 switch (name[1]) {
394 case 'L':
395 type = ACPI_GPE_LEVEL_TRIGGERED;
396 break;
397
398 case 'E':
399 type = ACPI_GPE_EDGE_TRIGGERED;
400 break;
401
402 default:
403 /* Unknown method type, just ignore it */
404
405 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
406 "Ignoring unknown GPE method type: %s "
407 "(name not of form _Lxx or _Exx)", name));
408 return_ACPI_STATUS(AE_OK);
409 }
410
411 /* 4) The last two characters of the name are the hex GPE Number */
412
413 gpe_number = ACPI_STRTOUL(&name[2], NULL, 16);
414 if (gpe_number == ACPI_UINT32_MAX) {
415
416 /* Conversion failed; invalid method, just ignore it */
417
418 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
419 "Could not extract GPE number from name: %s "
420 "(name is not of form _Lxx or _Exx)", name));
421 return_ACPI_STATUS(AE_OK);
422 }
423
424 /* Ensure that we have a valid GPE number for this GPE block */
425
426 gpe_event_info =
427 acpi_ev_low_get_gpe_info(gpe_number, walk_info->gpe_block);
428 if (!gpe_event_info) {
429 /*
430 * This gpe_number is not valid for this GPE block, just ignore it.
431 * However, it may be valid for a different GPE block, since GPE0
432 * and GPE1 methods both appear under \_GPE.
433 */
434 return_ACPI_STATUS(AE_OK);
435 }
436
437 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
438 ACPI_GPE_DISPATCH_HANDLER) {
439
440 /* If there is already a handler, ignore this GPE method */
441
442 return_ACPI_STATUS(AE_OK);
443 }
444
445 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
446 ACPI_GPE_DISPATCH_METHOD) {
447 /*
448 * If there is already a method, ignore this method. But check
449 * for a type mismatch (if both the _Lxx AND _Exx exist)
450 */
451 if (type != (gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
452 ACPI_ERROR((AE_INFO,
453 "For GPE 0x%.2X, found both _L%2.2X and _E%2.2X methods",
454 gpe_number, gpe_number, gpe_number));
455 }
456 return_ACPI_STATUS(AE_OK);
457 }
458
459 /*
460 * Add the GPE information from above to the gpe_event_info block for
461 * use during dispatch of this GPE.
462 */
463 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_METHOD);
464 gpe_event_info->dispatch.method_node = method_node;
465
466 /*
467 * Enable this GPE if requested. This only happens when during the
468 * execution of a Load or load_table operator. We have found a new
469 * GPE method and want to immediately enable the GPE if it is a
470 * runtime GPE.
471 */
472 if (walk_info->enable_this_gpe) {
473
474 /* Ignore GPEs that can wake the system */
475
476 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE) ||
477 !acpi_gbl_leave_wake_gpes_disabled) {
478 walk_info->count++;
479 gpe_device = walk_info->gpe_device;
480
481 if (gpe_device == acpi_gbl_fadt_gpe_device) {
482 gpe_device = NULL;
483 }
484
485 status = acpi_enable_gpe(gpe_device, gpe_number,
486 ACPI_GPE_TYPE_RUNTIME);
487 if (ACPI_FAILURE(status)) {
488 ACPI_EXCEPTION((AE_INFO, status,
489 "Could not enable GPE 0x%02X",
490 gpe_number));
491 }
492 }
493 }
494
495 ACPI_DEBUG_PRINT((ACPI_DB_LOAD,
496 "Registered GPE method %s as GPE number 0x%.2X\n",
497 name, gpe_number));
498 return_ACPI_STATUS(AE_OK);
499}
500
501/*******************************************************************************
502 *
503 * FUNCTION: acpi_ev_match_prw_and_gpe
504 *
505 * PARAMETERS: Callback from walk_namespace
506 *
507 * RETURN: Status. NOTE: We ignore errors so that the _PRW walk is
508 * not aborted on a single _PRW failure.
509 *
510 * DESCRIPTION: Called from acpi_walk_namespace. Expects each object to be a
511 * Device. Run the _PRW method. If present, extract the GPE
512 * number and mark the GPE as a CAN_WAKE GPE. Allows a
513 * per-owner_id execution if execute_by_owner_id is TRUE in the
514 * walk_info parameter block.
515 *
516 * If walk_info->execute_by_owner_id is TRUE, we only execute _PRWs with that
517 * owner.
518 * If walk_info->gpe_device is NULL, we execute every _PRW found. Otherwise,
519 * we only execute _PRWs that refer to the input gpe_device.
520 *
521 ******************************************************************************/
522
523acpi_status
524acpi_ev_match_prw_and_gpe(acpi_handle obj_handle,
525 u32 level, void *context, void **return_value)
526{
527 struct acpi_gpe_walk_info *walk_info =
528 ACPI_CAST_PTR(struct acpi_gpe_walk_info, context);
529 struct acpi_namespace_node *gpe_device;
530 struct acpi_gpe_block_info *gpe_block;
531 struct acpi_namespace_node *target_gpe_device;
532 struct acpi_namespace_node *prw_node;
533 struct acpi_gpe_event_info *gpe_event_info;
534 union acpi_operand_object *pkg_desc;
535 union acpi_operand_object *obj_desc;
536 u32 gpe_number;
537 acpi_status status;
538
539 ACPI_FUNCTION_TRACE(ev_match_prw_and_gpe);
540
541 /* Check for a _PRW method under this device */
542
543 status = acpi_ns_get_node(obj_handle, METHOD_NAME__PRW,
544 ACPI_NS_NO_UPSEARCH, &prw_node);
545 if (ACPI_FAILURE(status)) {
546 return_ACPI_STATUS(AE_OK);
547 }
548
549 /* Check if requested owner_id matches this owner_id */
550
551 if ((walk_info->execute_by_owner_id) &&
552 (prw_node->owner_id != walk_info->owner_id)) {
553 return_ACPI_STATUS(AE_OK);
554 }
555
556 /* Execute the _PRW */
557
558 status = acpi_ut_evaluate_object(prw_node, NULL,
559 ACPI_BTYPE_PACKAGE, &pkg_desc);
560 if (ACPI_FAILURE(status)) {
561 return_ACPI_STATUS(AE_OK);
562 }
563
564 /* The returned _PRW package must have at least two elements */
565
566 if (pkg_desc->package.count < 2) {
567 goto cleanup;
568 }
569
570 /* Extract pointers from the input context */
571
572 gpe_device = walk_info->gpe_device;
573 gpe_block = walk_info->gpe_block;
574
575 /*
576 * The _PRW object must return a package, we are only interested
577 * in the first element
578 */
579 obj_desc = pkg_desc->package.elements[0];
580
581 if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
582
583 /* Use FADT-defined GPE device (from definition of _PRW) */
584
585 target_gpe_device = NULL;
586 if (gpe_device) {
587 target_gpe_device = acpi_gbl_fadt_gpe_device;
588 }
589
590 /* Integer is the GPE number in the FADT described GPE blocks */
591
592 gpe_number = (u32)obj_desc->integer.value;
593 } else if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
594
595 /* Package contains a GPE reference and GPE number within a GPE block */
596
597 if ((obj_desc->package.count < 2) ||
598 ((obj_desc->package.elements[0])->common.type !=
599 ACPI_TYPE_LOCAL_REFERENCE) ||
600 ((obj_desc->package.elements[1])->common.type !=
601 ACPI_TYPE_INTEGER)) {
602 goto cleanup;
603 }
604
605 /* Get GPE block reference and decode */
606
607 target_gpe_device =
608 obj_desc->package.elements[0]->reference.node;
609 gpe_number = (u32)obj_desc->package.elements[1]->integer.value;
610 } else {
611 /* Unknown type, just ignore it */
612
613 goto cleanup;
614 }
615
616 /* Get the gpe_event_info for this GPE */
617
618 if (gpe_device) {
619 /*
620 * Is this GPE within this block?
621 *
622 * TRUE if and only if these conditions are true:
623 * 1) The GPE devices match.
624 * 2) The GPE index(number) is within the range of the Gpe Block
625 * associated with the GPE device.
626 */
627 if (gpe_device != target_gpe_device) {
628 goto cleanup;
629 }
630
631 gpe_event_info =
632 acpi_ev_low_get_gpe_info(gpe_number, gpe_block);
633 } else {
634 /* gpe_device is NULL, just match the target_device and gpe_number */
635
636 gpe_event_info =
637 acpi_ev_get_gpe_event_info(target_gpe_device, gpe_number);
638 }
639
640 if (gpe_event_info) {
641 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
642
643 /* This GPE can wake the system */
644
645 gpe_event_info->flags |= ACPI_GPE_CAN_WAKE;
646 walk_info->count++;
647 }
648 }
649
650 cleanup:
651 acpi_ut_remove_reference(pkg_desc);
652 return_ACPI_STATUS(AE_OK);
653}
diff --git a/drivers/acpi/acpica/evgpeutil.c b/drivers/acpi/acpica/evgpeutil.c
new file mode 100644
index 000000000000..19a0e513ea48
--- /dev/null
+++ b/drivers/acpi/acpica/evgpeutil.c
@@ -0,0 +1,337 @@
1/******************************************************************************
2 *
3 * Module Name: evgpeutil - GPE utilities
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2010, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acevents.h"
47
48#define _COMPONENT ACPI_EVENTS
49ACPI_MODULE_NAME("evgpeutil")
50
51/*******************************************************************************
52 *
53 * FUNCTION: acpi_ev_walk_gpe_list
54 *
55 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
56 * Context - Value passed to callback
57 *
58 * RETURN: Status
59 *
60 * DESCRIPTION: Walk the GPE lists.
61 *
62 ******************************************************************************/
63acpi_status
64acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
65{
66 struct acpi_gpe_block_info *gpe_block;
67 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
68 acpi_status status = AE_OK;
69 acpi_cpu_flags flags;
70
71 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
72
73 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
74
75 /* Walk the interrupt level descriptor list */
76
77 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
78 while (gpe_xrupt_info) {
79
80 /* Walk all Gpe Blocks attached to this interrupt level */
81
82 gpe_block = gpe_xrupt_info->gpe_block_list_head;
83 while (gpe_block) {
84
85 /* One callback per GPE block */
86
87 status =
88 gpe_walk_callback(gpe_xrupt_info, gpe_block,
89 context);
90 if (ACPI_FAILURE(status)) {
91 if (status == AE_CTRL_END) { /* Callback abort */
92 status = AE_OK;
93 }
94 goto unlock_and_exit;
95 }
96
97 gpe_block = gpe_block->next;
98 }
99
100 gpe_xrupt_info = gpe_xrupt_info->next;
101 }
102
103 unlock_and_exit:
104 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
105 return_ACPI_STATUS(status);
106}
107
108/*******************************************************************************
109 *
110 * FUNCTION: acpi_ev_valid_gpe_event
111 *
112 * PARAMETERS: gpe_event_info - Info for this GPE
113 *
114 * RETURN: TRUE if the gpe_event is valid
115 *
116 * DESCRIPTION: Validate a GPE event. DO NOT CALL FROM INTERRUPT LEVEL.
117 * Should be called only when the GPE lists are semaphore locked
118 * and not subject to change.
119 *
120 ******************************************************************************/
121
122u8 acpi_ev_valid_gpe_event(struct acpi_gpe_event_info *gpe_event_info)
123{
124 struct acpi_gpe_xrupt_info *gpe_xrupt_block;
125 struct acpi_gpe_block_info *gpe_block;
126
127 ACPI_FUNCTION_ENTRY();
128
129 /* No need for spin lock since we are not changing any list elements */
130
131 /* Walk the GPE interrupt levels */
132
133 gpe_xrupt_block = acpi_gbl_gpe_xrupt_list_head;
134 while (gpe_xrupt_block) {
135 gpe_block = gpe_xrupt_block->gpe_block_list_head;
136
137 /* Walk the GPE blocks on this interrupt level */
138
139 while (gpe_block) {
140 if ((&gpe_block->event_info[0] <= gpe_event_info) &&
141 (&gpe_block->event_info[gpe_block->gpe_count] >
142 gpe_event_info)) {
143 return (TRUE);
144 }
145
146 gpe_block = gpe_block->next;
147 }
148
149 gpe_xrupt_block = gpe_xrupt_block->next;
150 }
151
152 return (FALSE);
153}
154
155/*******************************************************************************
156 *
157 * FUNCTION: acpi_ev_get_gpe_xrupt_block
158 *
159 * PARAMETERS: interrupt_number - Interrupt for a GPE block
160 *
161 * RETURN: A GPE interrupt block
162 *
163 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
164 * block per unique interrupt level used for GPEs. Should be
165 * called only when the GPE lists are semaphore locked and not
166 * subject to change.
167 *
168 ******************************************************************************/
169
170struct acpi_gpe_xrupt_info *acpi_ev_get_gpe_xrupt_block(u32 interrupt_number)
171{
172 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
173 struct acpi_gpe_xrupt_info *gpe_xrupt;
174 acpi_status status;
175 acpi_cpu_flags flags;
176
177 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
178
179 /* No need for lock since we are not changing any list elements here */
180
181 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
182 while (next_gpe_xrupt) {
183 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
184 return_PTR(next_gpe_xrupt);
185 }
186
187 next_gpe_xrupt = next_gpe_xrupt->next;
188 }
189
190 /* Not found, must allocate a new xrupt descriptor */
191
192 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
193 if (!gpe_xrupt) {
194 return_PTR(NULL);
195 }
196
197 gpe_xrupt->interrupt_number = interrupt_number;
198
199 /* Install new interrupt descriptor with spin lock */
200
201 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
202 if (acpi_gbl_gpe_xrupt_list_head) {
203 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
204 while (next_gpe_xrupt->next) {
205 next_gpe_xrupt = next_gpe_xrupt->next;
206 }
207
208 next_gpe_xrupt->next = gpe_xrupt;
209 gpe_xrupt->previous = next_gpe_xrupt;
210 } else {
211 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
212 }
213 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
214
215 /* Install new interrupt handler if not SCI_INT */
216
217 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
218 status = acpi_os_install_interrupt_handler(interrupt_number,
219 acpi_ev_gpe_xrupt_handler,
220 gpe_xrupt);
221 if (ACPI_FAILURE(status)) {
222 ACPI_ERROR((AE_INFO,
223 "Could not install GPE interrupt handler at level 0x%X",
224 interrupt_number));
225 return_PTR(NULL);
226 }
227 }
228
229 return_PTR(gpe_xrupt);
230}
231
232/*******************************************************************************
233 *
234 * FUNCTION: acpi_ev_delete_gpe_xrupt
235 *
236 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
237 *
238 * RETURN: Status
239 *
240 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
241 * interrupt handler if not the SCI interrupt.
242 *
243 ******************************************************************************/
244
245acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
246{
247 acpi_status status;
248 acpi_cpu_flags flags;
249
250 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
251
252 /* We never want to remove the SCI interrupt handler */
253
254 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
255 gpe_xrupt->gpe_block_list_head = NULL;
256 return_ACPI_STATUS(AE_OK);
257 }
258
259 /* Disable this interrupt */
260
261 status =
262 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
263 acpi_ev_gpe_xrupt_handler);
264 if (ACPI_FAILURE(status)) {
265 return_ACPI_STATUS(status);
266 }
267
268 /* Unlink the interrupt block with lock */
269
270 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
271 if (gpe_xrupt->previous) {
272 gpe_xrupt->previous->next = gpe_xrupt->next;
273 } else {
274 /* No previous, update list head */
275
276 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
277 }
278
279 if (gpe_xrupt->next) {
280 gpe_xrupt->next->previous = gpe_xrupt->previous;
281 }
282 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
283
284 /* Free the block */
285
286 ACPI_FREE(gpe_xrupt);
287 return_ACPI_STATUS(AE_OK);
288}
289
290/*******************************************************************************
291 *
292 * FUNCTION: acpi_ev_delete_gpe_handlers
293 *
294 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
295 * gpe_block - Gpe Block info
296 *
297 * RETURN: Status
298 *
299 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
300 * Used only prior to termination.
301 *
302 ******************************************************************************/
303
304acpi_status
305acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
306 struct acpi_gpe_block_info *gpe_block,
307 void *context)
308{
309 struct acpi_gpe_event_info *gpe_event_info;
310 u32 i;
311 u32 j;
312
313 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
314
315 /* Examine each GPE Register within the block */
316
317 for (i = 0; i < gpe_block->register_count; i++) {
318
319 /* Now look at the individual GPEs in this byte register */
320
321 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
322 gpe_event_info = &gpe_block->event_info[((acpi_size) i *
323 ACPI_GPE_REGISTER_WIDTH)
324 + j];
325
326 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) ==
327 ACPI_GPE_DISPATCH_HANDLER) {
328 ACPI_FREE(gpe_event_info->dispatch.handler);
329 gpe_event_info->dispatch.handler = NULL;
330 gpe_event_info->flags &=
331 ~ACPI_GPE_DISPATCH_MASK;
332 }
333 }
334 }
335
336 return_ACPI_STATUS(AE_OK);
337}
diff --git a/drivers/acpi/acpica/evmisc.c b/drivers/acpi/acpica/evmisc.c
index 9a3cb7045a32..df0aea9a8cfd 100644
--- a/drivers/acpi/acpica/evmisc.c
+++ b/drivers/acpi/acpica/evmisc.c
@@ -590,7 +590,7 @@ void acpi_ev_terminate(void)
590 status = acpi_disable_event(i, 0); 590 status = acpi_disable_event(i, 0);
591 if (ACPI_FAILURE(status)) { 591 if (ACPI_FAILURE(status)) {
592 ACPI_ERROR((AE_INFO, 592 ACPI_ERROR((AE_INFO,
593 "Could not disable fixed event %d", 593 "Could not disable fixed event %u",
594 (u32) i)); 594 (u32) i));
595 } 595 }
596 } 596 }
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index b40757955f9b..cc825023012a 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -142,7 +142,7 @@ acpi_install_fixed_event_handler(u32 event,
142 if (ACPI_SUCCESS(status)) 142 if (ACPI_SUCCESS(status))
143 status = acpi_enable_event(event, 0); 143 status = acpi_enable_event(event, 0);
144 if (ACPI_FAILURE(status)) { 144 if (ACPI_FAILURE(status)) {
145 ACPI_WARNING((AE_INFO, "Could not enable fixed event %X", 145 ACPI_WARNING((AE_INFO, "Could not enable fixed event 0x%X",
146 event)); 146 event));
147 147
148 /* Remove the handler */ 148 /* Remove the handler */
@@ -203,7 +203,7 @@ acpi_remove_fixed_event_handler(u32 event, acpi_event_handler handler)
203 203
204 if (ACPI_FAILURE(status)) { 204 if (ACPI_FAILURE(status)) {
205 ACPI_WARNING((AE_INFO, 205 ACPI_WARNING((AE_INFO,
206 "Could not write to fixed event enable register %X", 206 "Could not write to fixed event enable register 0x%X",
207 event)); 207 event));
208 } else { 208 } else {
209 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n", 209 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Disabled fixed event %X\n",
@@ -682,14 +682,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
682 682
683 /* Parameter validation */ 683 /* Parameter validation */
684 684
685 if ((!address) || (type > ACPI_GPE_XRUPT_TYPE_MASK)) { 685 if ((!address) || (type & ~ACPI_GPE_XRUPT_TYPE_MASK)) {
686 status = AE_BAD_PARAMETER; 686 return_ACPI_STATUS(AE_BAD_PARAMETER);
687 goto exit;
688 } 687 }
689 688
690 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); 689 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
691 if (ACPI_FAILURE(status)) { 690 if (ACPI_FAILURE(status)) {
692 goto exit; 691 return_ACPI_STATUS(status);
693 } 692 }
694 693
695 /* Ensure that we have a valid GPE number */ 694 /* Ensure that we have a valid GPE number */
@@ -720,6 +719,13 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
720 handler->context = context; 719 handler->context = context;
721 handler->method_node = gpe_event_info->dispatch.method_node; 720 handler->method_node = gpe_event_info->dispatch.method_node;
722 721
722 /* Disable the GPE before installing the handler */
723
724 status = acpi_ev_disable_gpe(gpe_event_info);
725 if (ACPI_FAILURE (status)) {
726 goto unlock_and_exit;
727 }
728
723 /* Install the handler */ 729 /* Install the handler */
724 730
725 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 731 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
@@ -733,12 +739,8 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
733 739
734 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 740 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
735 741
736 unlock_and_exit: 742unlock_and_exit:
737 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS); 743 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
738 exit:
739 if (ACPI_FAILURE(status))
740 ACPI_EXCEPTION((AE_INFO, status,
741 "Installing notify handler failed"));
742 return_ACPI_STATUS(status); 744 return_ACPI_STATUS(status);
743} 745}
744 746
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 5ff32c78ea2d..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
69 69
70acpi_status acpi_enable(void) 70acpi_status acpi_enable(void)
71{ 71{
72 acpi_status status = AE_OK; 72 acpi_status status;
73 73
74 ACPI_FUNCTION_TRACE(acpi_enable); 74 ACPI_FUNCTION_TRACE(acpi_enable);
75 75
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
85 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 85 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
86 "System is already in ACPI mode\n")); 86 "System is already in ACPI mode\n"));
87 } else { 87 return_ACPI_STATUS(AE_OK);
88 /* Transition to ACPI mode */ 88 }
89 89
90 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); 90 /* Transition to ACPI mode */
91 if (ACPI_FAILURE(status)) {
92 ACPI_ERROR((AE_INFO,
93 "Could not transition to ACPI mode"));
94 return_ACPI_STATUS(status);
95 }
96 91
97 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 92 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
98 "Transition to ACPI mode successful\n")); 93 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO,
95 "Could not transition to ACPI mode"));
96 return_ACPI_STATUS(status);
99 } 97 }
100 98
101 return_ACPI_STATUS(status); 99 /* Sanity check that transition succeeded */
100
101 if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
102 ACPI_ERROR((AE_INFO,
103 "Hardware did not enter ACPI mode"));
104 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
105 }
106
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
108 "Transition to ACPI mode successful\n"));
109
110 return_ACPI_STATUS(AE_OK);
102} 111}
103 112
104ACPI_EXPORT_SYMBOL(acpi_enable) 113ACPI_EXPORT_SYMBOL(acpi_enable)
@@ -203,21 +212,26 @@ ACPI_EXPORT_SYMBOL(acpi_enable_event)
203 * 212 *
204 * FUNCTION: acpi_set_gpe 213 * FUNCTION: acpi_set_gpe
205 * 214 *
206 * PARAMETERS: gpe_device - Parent GPE Device 215 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
207 * gpe_number - GPE level within the GPE block 216 * gpe_number - GPE level within the GPE block
208 * action - Enable or disable 217 * action - ACPI_GPE_ENABLE or ACPI_GPE_DISABLE
209 * Called from ISR or not
210 * 218 *
211 * RETURN: Status 219 * RETURN: Status
212 * 220 *
213 * DESCRIPTION: Enable or disable an ACPI event (general purpose) 221 * DESCRIPTION: Enable or disable an individual GPE. This function bypasses
222 * the reference count mechanism used in the acpi_enable_gpe and
223 * acpi_disable_gpe interfaces -- and should be used with care.
224 *
225 * Note: Typically used to disable a runtime GPE for short period of time,
226 * then re-enable it, without disturbing the existing reference counts. This
227 * is useful, for example, in the Embedded Controller (EC) driver.
214 * 228 *
215 ******************************************************************************/ 229 ******************************************************************************/
216acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action) 230acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
217{ 231{
218 acpi_status status = AE_OK;
219 acpi_cpu_flags flags;
220 struct acpi_gpe_event_info *gpe_event_info; 232 struct acpi_gpe_event_info *gpe_event_info;
233 acpi_status status;
234 acpi_cpu_flags flags;
221 235
222 ACPI_FUNCTION_TRACE(acpi_set_gpe); 236 ACPI_FUNCTION_TRACE(acpi_set_gpe);
223 237
@@ -243,7 +257,6 @@ acpi_status acpi_set_gpe(acpi_handle gpe_device, u32 gpe_number, u8 action)
243 break; 257 break;
244 258
245 default: 259 default:
246 ACPI_ERROR((AE_INFO, "Invalid action\n"));
247 status = AE_BAD_PARAMETER; 260 status = AE_BAD_PARAMETER;
248 break; 261 break;
249 } 262 }
@@ -259,25 +272,31 @@ ACPI_EXPORT_SYMBOL(acpi_set_gpe)
259 * 272 *
260 * FUNCTION: acpi_enable_gpe 273 * FUNCTION: acpi_enable_gpe
261 * 274 *
262 * PARAMETERS: gpe_device - Parent GPE Device 275 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
263 * gpe_number - GPE level within the GPE block 276 * gpe_number - GPE level within the GPE block
264 * type - Purpose the GPE will be used for 277 * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
278 * or both
265 * 279 *
266 * RETURN: Status 280 * RETURN: Status
267 * 281 *
268 * DESCRIPTION: Take a reference to a GPE and enable it if necessary 282 * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is
283 * hardware-enabled (for runtime GPEs), or the GPE register mask
284 * is updated (for wake GPEs).
269 * 285 *
270 ******************************************************************************/ 286 ******************************************************************************/
271acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) 287acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
272{ 288{
273 acpi_status status = AE_OK; 289 acpi_status status = AE_OK;
274 acpi_cpu_flags flags;
275 struct acpi_gpe_event_info *gpe_event_info; 290 struct acpi_gpe_event_info *gpe_event_info;
291 acpi_cpu_flags flags;
276 292
277 ACPI_FUNCTION_TRACE(acpi_enable_gpe); 293 ACPI_FUNCTION_TRACE(acpi_enable_gpe);
278 294
279 if (type & ~ACPI_GPE_TYPE_WAKE_RUN) 295 /* Parameter validation */
296
297 if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
280 return_ACPI_STATUS(AE_BAD_PARAMETER); 298 return_ACPI_STATUS(AE_BAD_PARAMETER);
299 }
281 300
282 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 301 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
283 302
@@ -289,26 +308,43 @@ acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
289 goto unlock_and_exit; 308 goto unlock_and_exit;
290 } 309 }
291 310
292 if (type & ACPI_GPE_TYPE_RUNTIME) { 311 if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
293 if (++gpe_event_info->runtime_count == 1) { 312 if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) {
313 status = AE_LIMIT; /* Too many references */
314 goto unlock_and_exit;
315 }
316
317 gpe_event_info->runtime_count++;
318 if (gpe_event_info->runtime_count == 1) {
294 status = acpi_ev_enable_gpe(gpe_event_info); 319 status = acpi_ev_enable_gpe(gpe_event_info);
295 if (ACPI_FAILURE(status)) 320 if (ACPI_FAILURE(status)) {
296 gpe_event_info->runtime_count--; 321 gpe_event_info->runtime_count--;
322 goto unlock_and_exit;
323 }
297 } 324 }
298 } 325 }
299 326
300 if (type & ACPI_GPE_TYPE_WAKE) { 327 if (gpe_type & ACPI_GPE_TYPE_WAKE) {
328 /* The GPE must have the ability to wake the system */
329
301 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) { 330 if (!(gpe_event_info->flags & ACPI_GPE_CAN_WAKE)) {
302 status = AE_BAD_PARAMETER; 331 status = AE_TYPE;
332 goto unlock_and_exit;
333 }
334
335 if (gpe_event_info->wakeup_count == ACPI_UINT8_MAX) {
336 status = AE_LIMIT; /* Too many references */
303 goto unlock_and_exit; 337 goto unlock_and_exit;
304 } 338 }
305 339
306 /* 340 /*
307 * Wake-up GPEs are only enabled right prior to putting the 341 * Update the enable mask on the first wakeup reference. Wake GPEs
308 * system into a sleep state. 342 * are only hardware-enabled just before sleeping.
309 */ 343 */
310 if (++gpe_event_info->wakeup_count == 1) 344 gpe_event_info->wakeup_count++;
311 acpi_ev_update_gpe_enable_masks(gpe_event_info); 345 if (gpe_event_info->wakeup_count == 1) {
346 (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
347 }
312 } 348 }
313 349
314unlock_and_exit: 350unlock_and_exit:
@@ -321,27 +357,34 @@ ACPI_EXPORT_SYMBOL(acpi_enable_gpe)
321 * 357 *
322 * FUNCTION: acpi_disable_gpe 358 * FUNCTION: acpi_disable_gpe
323 * 359 *
324 * PARAMETERS: gpe_device - Parent GPE Device 360 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
325 * gpe_number - GPE level within the GPE block 361 * gpe_number - GPE level within the GPE block
326 * type - Purpose the GPE won't be used for any more 362 * gpe_type - ACPI_GPE_TYPE_RUNTIME or ACPI_GPE_TYPE_WAKE
363 * or both
327 * 364 *
328 * RETURN: Status 365 * RETURN: Status
329 * 366 *
330 * DESCRIPTION: Release a reference to a GPE and disable it if necessary 367 * DESCRIPTION: Remove a reference to a GPE. When the last reference is
368 * removed, only then is the GPE disabled (for runtime GPEs), or
369 * the GPE mask bit disabled (for wake GPEs)
331 * 370 *
332 ******************************************************************************/ 371 ******************************************************************************/
333acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type) 372acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 gpe_type)
334{ 373{
335 acpi_status status = AE_OK; 374 acpi_status status = AE_OK;
336 acpi_cpu_flags flags;
337 struct acpi_gpe_event_info *gpe_event_info; 375 struct acpi_gpe_event_info *gpe_event_info;
376 acpi_cpu_flags flags;
338 377
339 ACPI_FUNCTION_TRACE(acpi_disable_gpe); 378 ACPI_FUNCTION_TRACE(acpi_disable_gpe);
340 379
341 if (type & ~ACPI_GPE_TYPE_WAKE_RUN) 380 /* Parameter validation */
381
382 if (!gpe_type || (gpe_type & ~ACPI_GPE_TYPE_WAKE_RUN)) {
342 return_ACPI_STATUS(AE_BAD_PARAMETER); 383 return_ACPI_STATUS(AE_BAD_PARAMETER);
384 }
343 385
344 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); 386 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
387
345 /* Ensure that we have a valid GPE number */ 388 /* Ensure that we have a valid GPE number */
346 389
347 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number); 390 gpe_event_info = acpi_ev_get_gpe_event_info(gpe_device, gpe_number);
@@ -350,18 +393,39 @@ acpi_status acpi_disable_gpe(acpi_handle gpe_device, u32 gpe_number, u8 type)
350 goto unlock_and_exit; 393 goto unlock_and_exit;
351 } 394 }
352 395
353 if ((type & ACPI_GPE_TYPE_RUNTIME) && gpe_event_info->runtime_count) { 396 /* Hardware-disable a runtime GPE on removal of the last reference */
354 if (--gpe_event_info->runtime_count == 0) 397
398 if (gpe_type & ACPI_GPE_TYPE_RUNTIME) {
399 if (!gpe_event_info->runtime_count) {
400 status = AE_LIMIT; /* There are no references to remove */
401 goto unlock_and_exit;
402 }
403
404 gpe_event_info->runtime_count--;
405 if (!gpe_event_info->runtime_count) {
355 status = acpi_ev_disable_gpe(gpe_event_info); 406 status = acpi_ev_disable_gpe(gpe_event_info);
407 if (ACPI_FAILURE(status)) {
408 gpe_event_info->runtime_count++;
409 goto unlock_and_exit;
410 }
411 }
356 } 412 }
357 413
358 if ((type & ACPI_GPE_TYPE_WAKE) && gpe_event_info->wakeup_count) { 414 /*
359 /* 415 * Update masks for wake GPE on removal of the last reference.
360 * Wake-up GPEs are not enabled after leaving system sleep 416 * No need to hardware-disable wake GPEs here, they are not currently
361 * states, so we don't need to disable them here. 417 * enabled.
362 */ 418 */
363 if (--gpe_event_info->wakeup_count == 0) 419 if (gpe_type & ACPI_GPE_TYPE_WAKE) {
364 acpi_ev_update_gpe_enable_masks(gpe_event_info); 420 if (!gpe_event_info->wakeup_count) {
421 status = AE_LIMIT; /* There are no references to remove */
422 goto unlock_and_exit;
423 }
424
425 gpe_event_info->wakeup_count--;
426 if (!gpe_event_info->wakeup_count) {
427 (void)acpi_ev_update_gpe_enable_masks(gpe_event_info);
428 }
365 } 429 }
366 430
367unlock_and_exit: 431unlock_and_exit:
@@ -465,30 +529,23 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
465 * 529 *
466 * FUNCTION: acpi_clear_gpe 530 * FUNCTION: acpi_clear_gpe
467 * 531 *
468 * PARAMETERS: gpe_device - Parent GPE Device 532 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
469 * gpe_number - GPE level within the GPE block 533 * gpe_number - GPE level within the GPE block
470 * Flags - Called from an ISR or not
471 * 534 *
472 * RETURN: Status 535 * RETURN: Status
473 * 536 *
474 * DESCRIPTION: Clear an ACPI event (general purpose) 537 * DESCRIPTION: Clear an ACPI event (general purpose)
475 * 538 *
476 ******************************************************************************/ 539 ******************************************************************************/
477acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags) 540acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number)
478{ 541{
479 acpi_status status = AE_OK; 542 acpi_status status = AE_OK;
480 struct acpi_gpe_event_info *gpe_event_info; 543 struct acpi_gpe_event_info *gpe_event_info;
544 acpi_cpu_flags flags;
481 545
482 ACPI_FUNCTION_TRACE(acpi_clear_gpe); 546 ACPI_FUNCTION_TRACE(acpi_clear_gpe);
483 547
484 /* Use semaphore lock if not executing at interrupt level */ 548 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
485
486 if (flags & ACPI_NOT_ISR) {
487 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
488 if (ACPI_FAILURE(status)) {
489 return_ACPI_STATUS(status);
490 }
491 }
492 549
493 /* Ensure that we have a valid GPE number */ 550 /* Ensure that we have a valid GPE number */
494 551
@@ -501,9 +558,7 @@ acpi_status acpi_clear_gpe(acpi_handle gpe_device, u32 gpe_number, u32 flags)
501 status = acpi_hw_clear_gpe(gpe_event_info); 558 status = acpi_hw_clear_gpe(gpe_event_info);
502 559
503 unlock_and_exit: 560 unlock_and_exit:
504 if (flags & ACPI_NOT_ISR) { 561 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
505 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
506 }
507 return_ACPI_STATUS(status); 562 return_ACPI_STATUS(status);
508} 563}
509 564
@@ -569,9 +624,8 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
569 * 624 *
570 * FUNCTION: acpi_get_gpe_status 625 * FUNCTION: acpi_get_gpe_status
571 * 626 *
572 * PARAMETERS: gpe_device - Parent GPE Device 627 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
573 * gpe_number - GPE level within the GPE block 628 * gpe_number - GPE level within the GPE block
574 * Flags - Called from an ISR or not
575 * event_status - Where the current status of the event will 629 * event_status - Where the current status of the event will
576 * be returned 630 * be returned
577 * 631 *
@@ -582,21 +636,15 @@ ACPI_EXPORT_SYMBOL(acpi_get_event_status)
582 ******************************************************************************/ 636 ******************************************************************************/
583acpi_status 637acpi_status
584acpi_get_gpe_status(acpi_handle gpe_device, 638acpi_get_gpe_status(acpi_handle gpe_device,
585 u32 gpe_number, u32 flags, acpi_event_status * event_status) 639 u32 gpe_number, acpi_event_status *event_status)
586{ 640{
587 acpi_status status = AE_OK; 641 acpi_status status = AE_OK;
588 struct acpi_gpe_event_info *gpe_event_info; 642 struct acpi_gpe_event_info *gpe_event_info;
643 acpi_cpu_flags flags;
589 644
590 ACPI_FUNCTION_TRACE(acpi_get_gpe_status); 645 ACPI_FUNCTION_TRACE(acpi_get_gpe_status);
591 646
592 /* Use semaphore lock if not executing at interrupt level */ 647 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
593
594 if (flags & ACPI_NOT_ISR) {
595 status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
596 if (ACPI_FAILURE(status)) {
597 return_ACPI_STATUS(status);
598 }
599 }
600 648
601 /* Ensure that we have a valid GPE number */ 649 /* Ensure that we have a valid GPE number */
602 650
@@ -614,9 +662,7 @@ acpi_get_gpe_status(acpi_handle gpe_device,
614 *event_status |= ACPI_EVENT_FLAG_HANDLE; 662 *event_status |= ACPI_EVENT_FLAG_HANDLE;
615 663
616 unlock_and_exit: 664 unlock_and_exit:
617 if (flags & ACPI_NOT_ISR) { 665 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
618 (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
619 }
620 return_ACPI_STATUS(status); 666 return_ACPI_STATUS(status);
621} 667}
622 668
@@ -673,20 +719,15 @@ acpi_install_gpe_block(acpi_handle gpe_device,
673 goto unlock_and_exit; 719 goto unlock_and_exit;
674 } 720 }
675 721
676 /* Run the _PRW methods and enable the GPEs */ 722 /* Install block in the device_object attached to the node */
677
678 status = acpi_ev_initialize_gpe_block(node, gpe_block);
679 if (ACPI_FAILURE(status)) {
680 goto unlock_and_exit;
681 }
682
683 /* Get the device_object attached to the node */
684 723
685 obj_desc = acpi_ns_get_attached_object(node); 724 obj_desc = acpi_ns_get_attached_object(node);
686 if (!obj_desc) { 725 if (!obj_desc) {
687 726
688 /* No object, create a new one */ 727 /*
689 728 * No object, create a new one (Device nodes do not always have
729 * an attached object)
730 */
690 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE); 731 obj_desc = acpi_ut_create_internal_object(ACPI_TYPE_DEVICE);
691 if (!obj_desc) { 732 if (!obj_desc) {
692 status = AE_NO_MEMORY; 733 status = AE_NO_MEMORY;
@@ -705,10 +746,14 @@ acpi_install_gpe_block(acpi_handle gpe_device,
705 } 746 }
706 } 747 }
707 748
708 /* Install the GPE block in the device_object */ 749 /* Now install the GPE block in the device_object */
709 750
710 obj_desc->device.gpe_block = gpe_block; 751 obj_desc->device.gpe_block = gpe_block;
711 752
753 /* Run the _PRW methods and enable the runtime GPEs in the new block */
754
755 status = acpi_ev_initialize_gpe_block(node, gpe_block);
756
712 unlock_and_exit: 757 unlock_and_exit:
713 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); 758 (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
714 return_ACPI_STATUS(status); 759 return_ACPI_STATUS(status);
@@ -839,8 +884,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
839 884
840 /* Increment Index by the number of GPEs in this block */ 885 /* Increment Index by the number of GPEs in this block */
841 886
842 info->next_block_base_index += 887 info->next_block_base_index += gpe_block->gpe_count;
843 (gpe_block->register_count * ACPI_GPE_REGISTER_WIDTH);
844 888
845 if (info->index < info->next_block_base_index) { 889 if (info->index < info->next_block_base_index) {
846 /* 890 /*
diff --git a/drivers/acpi/acpica/exconfig.c b/drivers/acpi/acpica/exconfig.c
index 7e8b3bedc376..008621c5ad85 100644
--- a/drivers/acpi/acpica/exconfig.c
+++ b/drivers/acpi/acpica/exconfig.c
@@ -82,8 +82,9 @@ acpi_ex_add_table(u32 table_index,
82 struct acpi_namespace_node *parent_node, 82 struct acpi_namespace_node *parent_node,
83 union acpi_operand_object **ddb_handle) 83 union acpi_operand_object **ddb_handle)
84{ 84{
85 acpi_status status;
86 union acpi_operand_object *obj_desc; 85 union acpi_operand_object *obj_desc;
86 acpi_status status;
87 acpi_owner_id owner_id;
87 88
88 ACPI_FUNCTION_TRACE(ex_add_table); 89 ACPI_FUNCTION_TRACE(ex_add_table);
89 90
@@ -119,7 +120,14 @@ acpi_ex_add_table(u32 table_index,
119 acpi_ns_exec_module_code_list(); 120 acpi_ns_exec_module_code_list();
120 acpi_ex_enter_interpreter(); 121 acpi_ex_enter_interpreter();
121 122
122 return_ACPI_STATUS(status); 123 /* Update GPEs for any new _PRW or _Lxx/_Exx methods. Ignore errors */
124
125 status = acpi_tb_get_owner_id(table_index, &owner_id);
126 if (ACPI_SUCCESS(status)) {
127 acpi_ev_update_gpes(owner_id);
128 }
129
130 return_ACPI_STATUS(AE_OK);
123} 131}
124 132
125/******************************************************************************* 133/*******************************************************************************
@@ -248,10 +256,8 @@ acpi_ex_load_table_op(struct acpi_walk_state *walk_state,
248 256
249 status = acpi_get_table_by_index(table_index, &table); 257 status = acpi_get_table_by_index(table_index, &table);
250 if (ACPI_SUCCESS(status)) { 258 if (ACPI_SUCCESS(status)) {
251 ACPI_INFO((AE_INFO, 259 ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
252 "Dynamic OEM Table Load - [%.4s] OemId [%.6s] OemTableId [%.8s]", 260 acpi_tb_print_table_header(0, table);
253 table->signature, table->oem_id,
254 table->oem_table_id));
255 } 261 }
256 262
257 /* Invoke table handler if present */ 263 /* Invoke table handler if present */
@@ -525,6 +531,9 @@ acpi_ex_load_op(union acpi_operand_object *obj_desc,
525 return_ACPI_STATUS(status); 531 return_ACPI_STATUS(status);
526 } 532 }
527 533
534 ACPI_INFO((AE_INFO, "Dynamic OEM Table Load:"));
535 acpi_tb_print_table_header(0, table_desc.pointer);
536
528 /* Remove the reference by added by acpi_ex_store above */ 537 /* Remove the reference by added by acpi_ex_store above */
529 538
530 acpi_ut_remove_reference(ddb_handle); 539 acpi_ut_remove_reference(ddb_handle);
diff --git a/drivers/acpi/acpica/exconvrt.c b/drivers/acpi/acpica/exconvrt.c
index bda7aed0404b..b73bc50c5b76 100644
--- a/drivers/acpi/acpica/exconvrt.c
+++ b/drivers/acpi/acpica/exconvrt.c
@@ -650,7 +650,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
650 650
651 default: 651 default:
652 ACPI_ERROR((AE_INFO, 652 ACPI_ERROR((AE_INFO,
653 "Bad destination type during conversion: %X", 653 "Bad destination type during conversion: 0x%X",
654 destination_type)); 654 destination_type));
655 status = AE_AML_INTERNAL; 655 status = AE_AML_INTERNAL;
656 break; 656 break;
@@ -665,7 +665,7 @@ acpi_ex_convert_to_target_type(acpi_object_type destination_type,
665 665
666 default: 666 default:
667 ACPI_ERROR((AE_INFO, 667 ACPI_ERROR((AE_INFO,
668 "Unknown Target type ID 0x%X AmlOpcode %X DestType %s", 668 "Unknown Target type ID 0x%X AmlOpcode 0x%X DestType %s",
669 GET_CURRENT_ARG_TYPE(walk_state->op_info-> 669 GET_CURRENT_ARG_TYPE(walk_state->op_info->
670 runtime_args), 670 runtime_args),
671 walk_state->opcode, 671 walk_state->opcode,
diff --git a/drivers/acpi/acpica/excreate.c b/drivers/acpi/acpica/excreate.c
index 0aa57d938698..3c61b48c73f5 100644
--- a/drivers/acpi/acpica/excreate.c
+++ b/drivers/acpi/acpica/excreate.c
@@ -306,12 +306,12 @@ acpi_ex_create_region(u8 * aml_start,
306 */ 306 */
307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) && 307 if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
308 (region_space < ACPI_USER_REGION_BEGIN)) { 308 (region_space < ACPI_USER_REGION_BEGIN)) {
309 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type %X", 309 ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
310 region_space)); 310 region_space));
311 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID); 311 return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
312 } 312 }
313 313
314 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (%X)\n", 314 ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
315 acpi_ut_get_region_name(region_space), region_space)); 315 acpi_ut_get_region_name(region_space), region_space));
316 316
317 /* Create the region descriptor */ 317 /* Create the region descriptor */
diff --git a/drivers/acpi/acpica/exdebug.c b/drivers/acpi/acpica/exdebug.c
new file mode 100644
index 000000000000..be8c98b480d7
--- /dev/null
+++ b/drivers/acpi/acpica/exdebug.c
@@ -0,0 +1,261 @@
1/******************************************************************************
2 *
3 * Module Name: exdebug - Support for stores to the AML Debug Object
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2010, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acinterp.h"
47
48#define _COMPONENT ACPI_EXECUTER
49ACPI_MODULE_NAME("exdebug")
50
51#ifndef ACPI_NO_ERROR_MESSAGES
52/*******************************************************************************
53 *
54 * FUNCTION: acpi_ex_do_debug_object
55 *
56 * PARAMETERS: source_desc - Object to be output to "Debug Object"
57 * Level - Indentation level (used for packages)
58 * Index - Current package element, zero if not pkg
59 *
60 * RETURN: None
61 *
62 * DESCRIPTION: Handles stores to the AML Debug Object. For example:
63 * Store(INT1, Debug)
64 *
65 * This function is not compiled if ACPI_NO_ERROR_MESSAGES is set.
66 *
67 * This function is only enabled if acpi_gbl_enable_aml_debug_object is set, or
68 * if ACPI_LV_DEBUG_OBJECT is set in the acpi_dbg_level. Thus, in the normal
69 * operational case, stores to the debug object are ignored but can be easily
70 * enabled if necessary.
71 *
72 ******************************************************************************/
73void
74acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
75 u32 level, u32 index)
76{
77 u32 i;
78
79 ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
80
81 /* Output must be enabled via the debug_object global or the dbg_level */
82
83 if (!acpi_gbl_enable_aml_debug_object &&
84 !(acpi_dbg_level & ACPI_LV_DEBUG_OBJECT)) {
85 return_VOID;
86 }
87
88 /*
89 * Print line header as long as we are not in the middle of an
90 * object display
91 */
92 if (!((level > 0) && index == 0)) {
93 acpi_os_printf("[ACPI Debug] %*s", level, " ");
94 }
95
96 /* Display the index for package output only */
97
98 if (index > 0) {
99 acpi_os_printf("(%.2u) ", index - 1);
100 }
101
102 if (!source_desc) {
103 acpi_os_printf("[Null Object]\n");
104 return_VOID;
105 }
106
107 if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
108 acpi_os_printf("%s ",
109 acpi_ut_get_object_type_name(source_desc));
110
111 if (!acpi_ut_valid_internal_object(source_desc)) {
112 acpi_os_printf("%p, Invalid Internal Object!\n",
113 source_desc);
114 return_VOID;
115 }
116 } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
117 ACPI_DESC_TYPE_NAMED) {
118 acpi_os_printf("%s: %p\n",
119 acpi_ut_get_type_name(((struct
120 acpi_namespace_node *)
121 source_desc)->type),
122 source_desc);
123 return_VOID;
124 } else {
125 return_VOID;
126 }
127
128 /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
129
130 switch (source_desc->common.type) {
131 case ACPI_TYPE_INTEGER:
132
133 /* Output correct integer width */
134
135 if (acpi_gbl_integer_byte_width == 4) {
136 acpi_os_printf("0x%8.8X\n",
137 (u32)source_desc->integer.value);
138 } else {
139 acpi_os_printf("0x%8.8X%8.8X\n",
140 ACPI_FORMAT_UINT64(source_desc->integer.
141 value));
142 }
143 break;
144
145 case ACPI_TYPE_BUFFER:
146
147 acpi_os_printf("[0x%.2X]\n", (u32)source_desc->buffer.length);
148 acpi_ut_dump_buffer2(source_desc->buffer.pointer,
149 (source_desc->buffer.length < 256) ?
150 source_desc->buffer.length : 256,
151 DB_BYTE_DISPLAY);
152 break;
153
154 case ACPI_TYPE_STRING:
155
156 acpi_os_printf("[0x%.2X] \"%s\"\n",
157 source_desc->string.length,
158 source_desc->string.pointer);
159 break;
160
161 case ACPI_TYPE_PACKAGE:
162
163 acpi_os_printf("[Contains 0x%.2X Elements]\n",
164 source_desc->package.count);
165
166 /* Output the entire contents of the package */
167
168 for (i = 0; i < source_desc->package.count; i++) {
169 acpi_ex_do_debug_object(source_desc->package.
170 elements[i], level + 4, i + 1);
171 }
172 break;
173
174 case ACPI_TYPE_LOCAL_REFERENCE:
175
176 acpi_os_printf("[%s] ",
177 acpi_ut_get_reference_name(source_desc));
178
179 /* Decode the reference */
180
181 switch (source_desc->reference.class) {
182 case ACPI_REFCLASS_INDEX:
183
184 acpi_os_printf("0x%X\n", source_desc->reference.value);
185 break;
186
187 case ACPI_REFCLASS_TABLE:
188
189 /* Case for ddb_handle */
190
191 acpi_os_printf("Table Index 0x%X\n",
192 source_desc->reference.value);
193 return;
194
195 default:
196 break;
197 }
198
199 acpi_os_printf(" ");
200
201 /* Check for valid node first, then valid object */
202
203 if (source_desc->reference.node) {
204 if (ACPI_GET_DESCRIPTOR_TYPE
205 (source_desc->reference.node) !=
206 ACPI_DESC_TYPE_NAMED) {
207 acpi_os_printf
208 (" %p - Not a valid namespace node\n",
209 source_desc->reference.node);
210 } else {
211 acpi_os_printf("Node %p [%4.4s] ",
212 source_desc->reference.node,
213 (source_desc->reference.node)->
214 name.ascii);
215
216 switch ((source_desc->reference.node)->type) {
217
218 /* These types have no attached object */
219
220 case ACPI_TYPE_DEVICE:
221 acpi_os_printf("Device\n");
222 break;
223
224 case ACPI_TYPE_THERMAL:
225 acpi_os_printf("Thermal Zone\n");
226 break;
227
228 default:
229 acpi_ex_do_debug_object((source_desc->
230 reference.
231 node)->object,
232 level + 4, 0);
233 break;
234 }
235 }
236 } else if (source_desc->reference.object) {
237 if (ACPI_GET_DESCRIPTOR_TYPE
238 (source_desc->reference.object) ==
239 ACPI_DESC_TYPE_NAMED) {
240 acpi_ex_do_debug_object(((struct
241 acpi_namespace_node *)
242 source_desc->reference.
243 object)->object,
244 level + 4, 0);
245 } else {
246 acpi_ex_do_debug_object(source_desc->reference.
247 object, level + 4, 0);
248 }
249 }
250 break;
251
252 default:
253
254 acpi_os_printf("%p\n", source_desc);
255 break;
256 }
257
258 ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
259 return_VOID;
260}
261#endif
diff --git a/drivers/acpi/acpica/exfield.c b/drivers/acpi/acpica/exfield.c
index 6c79fecbee42..f17d2ff0031b 100644
--- a/drivers/acpi/acpica/exfield.c
+++ b/drivers/acpi/acpica/exfield.c
@@ -281,7 +281,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
281 281
282 if (source_desc->buffer.length < length) { 282 if (source_desc->buffer.length < length) {
283 ACPI_ERROR((AE_INFO, 283 ACPI_ERROR((AE_INFO,
284 "SMBus or IPMI write requires Buffer of length %X, found length %X", 284 "SMBus or IPMI write requires Buffer of length %u, found length %u",
285 length, source_desc->buffer.length)); 285 length, source_desc->buffer.length));
286 286
287 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT); 287 return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c
index f68a216168be..a6dc26f0b3be 100644
--- a/drivers/acpi/acpica/exfldio.c
+++ b/drivers/acpi/acpica/exfldio.c
@@ -94,7 +94,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
94 /* We must have a valid region */ 94 /* We must have a valid region */
95 95
96 if (rgn_desc->common.type != ACPI_TYPE_REGION) { 96 if (rgn_desc->common.type != ACPI_TYPE_REGION) {
97 ACPI_ERROR((AE_INFO, "Needed Region, found type %X (%s)", 97 ACPI_ERROR((AE_INFO, "Needed Region, found type 0x%X (%s)",
98 rgn_desc->common.type, 98 rgn_desc->common.type,
99 acpi_ut_get_object_type_name(rgn_desc))); 99 acpi_ut_get_object_type_name(rgn_desc)));
100 100
@@ -175,7 +175,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
175 * byte, and a field with Dword access specified. 175 * byte, and a field with Dword access specified.
176 */ 176 */
177 ACPI_ERROR((AE_INFO, 177 ACPI_ERROR((AE_INFO,
178 "Field [%4.4s] access width (%d bytes) too large for region [%4.4s] (length %X)", 178 "Field [%4.4s] access width (%u bytes) too large for region [%4.4s] (length %u)",
179 acpi_ut_get_node_name(obj_desc-> 179 acpi_ut_get_node_name(obj_desc->
180 common_field.node), 180 common_field.node),
181 obj_desc->common_field.access_byte_width, 181 obj_desc->common_field.access_byte_width,
@@ -189,7 +189,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
189 * exceeds region length, indicate an error 189 * exceeds region length, indicate an error
190 */ 190 */
191 ACPI_ERROR((AE_INFO, 191 ACPI_ERROR((AE_INFO,
192 "Field [%4.4s] Base+Offset+Width %X+%X+%X is beyond end of region [%4.4s] (length %X)", 192 "Field [%4.4s] Base+Offset+Width %u+%u+%u is beyond end of region [%4.4s] (length %u)",
193 acpi_ut_get_node_name(obj_desc->common_field.node), 193 acpi_ut_get_node_name(obj_desc->common_field.node),
194 obj_desc->common_field.base_byte_offset, 194 obj_desc->common_field.base_byte_offset,
195 field_datum_byte_offset, 195 field_datum_byte_offset,
@@ -281,13 +281,13 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
281 if (ACPI_FAILURE(status)) { 281 if (ACPI_FAILURE(status)) {
282 if (status == AE_NOT_IMPLEMENTED) { 282 if (status == AE_NOT_IMPLEMENTED) {
283 ACPI_ERROR((AE_INFO, 283 ACPI_ERROR((AE_INFO,
284 "Region %s(%X) not implemented", 284 "Region %s(0x%X) not implemented",
285 acpi_ut_get_region_name(rgn_desc->region. 285 acpi_ut_get_region_name(rgn_desc->region.
286 space_id), 286 space_id),
287 rgn_desc->region.space_id)); 287 rgn_desc->region.space_id));
288 } else if (status == AE_NOT_EXIST) { 288 } else if (status == AE_NOT_EXIST) {
289 ACPI_ERROR((AE_INFO, 289 ACPI_ERROR((AE_INFO,
290 "Region %s(%X) has no handler", 290 "Region %s(0x%X) has no handler",
291 acpi_ut_get_region_name(rgn_desc->region. 291 acpi_ut_get_region_name(rgn_desc->region.
292 space_id), 292 space_id),
293 rgn_desc->region.space_id)); 293 rgn_desc->region.space_id));
@@ -525,7 +525,7 @@ acpi_ex_field_datum_io(union acpi_operand_object *obj_desc,
525 525
526 default: 526 default:
527 527
528 ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %X", 528 ACPI_ERROR((AE_INFO, "Wrong object type in field I/O %u",
529 obj_desc->common.type)); 529 obj_desc->common.type));
530 status = AE_AML_INTERNAL; 530 status = AE_AML_INTERNAL;
531 break; 531 break;
@@ -630,7 +630,7 @@ acpi_ex_write_with_update_rule(union acpi_operand_object *obj_desc,
630 default: 630 default:
631 631
632 ACPI_ERROR((AE_INFO, 632 ACPI_ERROR((AE_INFO,
633 "Unknown UpdateRule value: %X", 633 "Unknown UpdateRule value: 0x%X",
634 (obj_desc->common_field. 634 (obj_desc->common_field.
635 field_flags & 635 field_flags &
636 AML_FIELD_UPDATE_RULE_MASK))); 636 AML_FIELD_UPDATE_RULE_MASK)));
@@ -689,7 +689,7 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc,
689 if (buffer_length < 689 if (buffer_length <
690 ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) { 690 ACPI_ROUND_BITS_UP_TO_BYTES(obj_desc->common_field.bit_length)) {
691 ACPI_ERROR((AE_INFO, 691 ACPI_ERROR((AE_INFO,
692 "Field size %X (bits) is too large for buffer (%X)", 692 "Field size %u (bits) is too large for buffer (%u)",
693 obj_desc->common_field.bit_length, buffer_length)); 693 obj_desc->common_field.bit_length, buffer_length));
694 694
695 return_ACPI_STATUS(AE_BUFFER_OVERFLOW); 695 return_ACPI_STATUS(AE_BUFFER_OVERFLOW);
diff --git a/drivers/acpi/acpica/exmisc.c b/drivers/acpi/acpica/exmisc.c
index c5bb1eeed2df..95db4be0877b 100644
--- a/drivers/acpi/acpica/exmisc.c
+++ b/drivers/acpi/acpica/exmisc.c
@@ -99,7 +99,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
99 99
100 default: 100 default:
101 101
102 ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X", 102 ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
103 obj_desc->reference.class)); 103 obj_desc->reference.class));
104 return_ACPI_STATUS(AE_AML_INTERNAL); 104 return_ACPI_STATUS(AE_AML_INTERNAL);
105 } 105 }
@@ -115,7 +115,7 @@ acpi_ex_get_object_reference(union acpi_operand_object *obj_desc,
115 115
116 default: 116 default:
117 117
118 ACPI_ERROR((AE_INFO, "Invalid descriptor type %X", 118 ACPI_ERROR((AE_INFO, "Invalid descriptor type 0x%X",
119 ACPI_GET_DESCRIPTOR_TYPE(obj_desc))); 119 ACPI_GET_DESCRIPTOR_TYPE(obj_desc)));
120 return_ACPI_STATUS(AE_TYPE); 120 return_ACPI_STATUS(AE_TYPE);
121 } 121 }
@@ -276,7 +276,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
276 break; 276 break;
277 277
278 default: 278 default:
279 ACPI_ERROR((AE_INFO, "Invalid object type: %X", 279 ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
280 operand0->common.type)); 280 operand0->common.type));
281 status = AE_AML_INTERNAL; 281 status = AE_AML_INTERNAL;
282 } 282 }
@@ -378,7 +378,7 @@ acpi_ex_do_concatenate(union acpi_operand_object *operand0,
378 378
379 /* Invalid object type, should not happen here */ 379 /* Invalid object type, should not happen here */
380 380
381 ACPI_ERROR((AE_INFO, "Invalid object type: %X", 381 ACPI_ERROR((AE_INFO, "Invalid object type: 0x%X",
382 operand0->common.type)); 382 operand0->common.type));
383 status = AE_AML_INTERNAL; 383 status = AE_AML_INTERNAL;
384 goto cleanup; 384 goto cleanup;
diff --git a/drivers/acpi/acpica/exmutex.c b/drivers/acpi/acpica/exmutex.c
index 7116bc86494d..f73be97043c0 100644
--- a/drivers/acpi/acpica/exmutex.c
+++ b/drivers/acpi/acpica/exmutex.c
@@ -85,10 +85,10 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
85 (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next; 85 (obj_desc->mutex.prev)->mutex.next = obj_desc->mutex.next;
86 86
87 /* 87 /*
88 * Migrate the previous sync level associated with this mutex to the 88 * Migrate the previous sync level associated with this mutex to
89 * previous mutex on the list so that it may be preserved. This handles 89 * the previous mutex on the list so that it may be preserved.
90 * the case where several mutexes have been acquired at the same level, 90 * This handles the case where several mutexes have been acquired
91 * but are not released in opposite order. 91 * at the same level, but are not released in opposite order.
92 */ 92 */
93 (obj_desc->mutex.prev)->mutex.original_sync_level = 93 (obj_desc->mutex.prev)->mutex.original_sync_level =
94 obj_desc->mutex.original_sync_level; 94 obj_desc->mutex.original_sync_level;
@@ -101,8 +101,8 @@ void acpi_ex_unlink_mutex(union acpi_operand_object *obj_desc)
101 * 101 *
102 * FUNCTION: acpi_ex_link_mutex 102 * FUNCTION: acpi_ex_link_mutex
103 * 103 *
104 * PARAMETERS: obj_desc - The mutex to be linked 104 * PARAMETERS: obj_desc - The mutex to be linked
105 * Thread - Current executing thread object 105 * Thread - Current executing thread object
106 * 106 *
107 * RETURN: None 107 * RETURN: None
108 * 108 *
@@ -138,9 +138,9 @@ acpi_ex_link_mutex(union acpi_operand_object *obj_desc,
138 * 138 *
139 * FUNCTION: acpi_ex_acquire_mutex_object 139 * FUNCTION: acpi_ex_acquire_mutex_object
140 * 140 *
141 * PARAMETERS: time_desc - Timeout in milliseconds 141 * PARAMETERS: Timeout - Timeout in milliseconds
142 * obj_desc - Mutex object 142 * obj_desc - Mutex object
143 * Thread - Current thread state 143 * thread_id - Current thread state
144 * 144 *
145 * RETURN: Status 145 * RETURN: Status
146 * 146 *
@@ -234,7 +234,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
234 return_ACPI_STATUS(AE_BAD_PARAMETER); 234 return_ACPI_STATUS(AE_BAD_PARAMETER);
235 } 235 }
236 236
237 /* Must have a valid thread ID */ 237 /* Must have a valid thread state struct */
238 238
239 if (!walk_state->thread) { 239 if (!walk_state->thread) {
240 ACPI_ERROR((AE_INFO, 240 ACPI_ERROR((AE_INFO,
@@ -249,7 +249,7 @@ acpi_ex_acquire_mutex(union acpi_operand_object *time_desc,
249 */ 249 */
250 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) { 250 if (walk_state->thread->current_sync_level > obj_desc->mutex.sync_level) {
251 ACPI_ERROR((AE_INFO, 251 ACPI_ERROR((AE_INFO,
252 "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%d)", 252 "Cannot acquire Mutex [%4.4s], current SyncLevel is too large (%u)",
253 acpi_ut_get_node_name(obj_desc->mutex.node), 253 acpi_ut_get_node_name(obj_desc->mutex.node),
254 walk_state->thread->current_sync_level)); 254 walk_state->thread->current_sync_level));
255 return_ACPI_STATUS(AE_AML_MUTEX_ORDER); 255 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
@@ -359,6 +359,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
359{ 359{
360 acpi_status status = AE_OK; 360 acpi_status status = AE_OK;
361 u8 previous_sync_level; 361 u8 previous_sync_level;
362 struct acpi_thread_state *owner_thread;
362 363
363 ACPI_FUNCTION_TRACE(ex_release_mutex); 364 ACPI_FUNCTION_TRACE(ex_release_mutex);
364 365
@@ -366,9 +367,11 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
366 return_ACPI_STATUS(AE_BAD_PARAMETER); 367 return_ACPI_STATUS(AE_BAD_PARAMETER);
367 } 368 }
368 369
370 owner_thread = obj_desc->mutex.owner_thread;
371
369 /* The mutex must have been previously acquired in order to release it */ 372 /* The mutex must have been previously acquired in order to release it */
370 373
371 if (!obj_desc->mutex.owner_thread) { 374 if (!owner_thread) {
372 ACPI_ERROR((AE_INFO, 375 ACPI_ERROR((AE_INFO,
373 "Cannot release Mutex [%4.4s], not acquired", 376 "Cannot release Mutex [%4.4s], not acquired",
374 acpi_ut_get_node_name(obj_desc->mutex.node))); 377 acpi_ut_get_node_name(obj_desc->mutex.node)));
@@ -387,16 +390,13 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
387 * The Mutex is owned, but this thread must be the owner. 390 * The Mutex is owned, but this thread must be the owner.
388 * Special case for Global Lock, any thread can release 391 * Special case for Global Lock, any thread can release
389 */ 392 */
390 if ((obj_desc->mutex.owner_thread->thread_id != 393 if ((owner_thread->thread_id != walk_state->thread->thread_id) &&
391 walk_state->thread->thread_id) 394 (obj_desc != acpi_gbl_global_lock_mutex)) {
392 && (obj_desc != acpi_gbl_global_lock_mutex)) {
393 ACPI_ERROR((AE_INFO, 395 ACPI_ERROR((AE_INFO,
394 "Thread %p cannot release Mutex [%4.4s] acquired by thread %p", 396 "Thread %p cannot release Mutex [%4.4s] acquired by thread %p",
395 ACPI_CAST_PTR(void, walk_state->thread->thread_id), 397 ACPI_CAST_PTR(void, walk_state->thread->thread_id),
396 acpi_ut_get_node_name(obj_desc->mutex.node), 398 acpi_ut_get_node_name(obj_desc->mutex.node),
397 ACPI_CAST_PTR(void, 399 ACPI_CAST_PTR(void, owner_thread->thread_id)));
398 obj_desc->mutex.owner_thread->
399 thread_id)));
400 return_ACPI_STATUS(AE_AML_NOT_OWNER); 400 return_ACPI_STATUS(AE_AML_NOT_OWNER);
401 } 401 }
402 402
@@ -407,10 +407,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
407 * different level can only mean that the mutex ordering rule is being 407 * different level can only mean that the mutex ordering rule is being
408 * violated. This behavior is clarified in ACPI 4.0 specification. 408 * violated. This behavior is clarified in ACPI 4.0 specification.
409 */ 409 */
410 if (obj_desc->mutex.sync_level != 410 if (obj_desc->mutex.sync_level != owner_thread->current_sync_level) {
411 walk_state->thread->current_sync_level) {
412 ACPI_ERROR((AE_INFO, 411 ACPI_ERROR((AE_INFO,
413 "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %d current %d", 412 "Cannot release Mutex [%4.4s], SyncLevel mismatch: mutex %u current %u",
414 acpi_ut_get_node_name(obj_desc->mutex.node), 413 acpi_ut_get_node_name(obj_desc->mutex.node),
415 obj_desc->mutex.sync_level, 414 obj_desc->mutex.sync_level,
416 walk_state->thread->current_sync_level)); 415 walk_state->thread->current_sync_level));
@@ -423,7 +422,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
423 * acquired, but are not released in reverse order. 422 * acquired, but are not released in reverse order.
424 */ 423 */
425 previous_sync_level = 424 previous_sync_level =
426 walk_state->thread->acquired_mutex_list->mutex.original_sync_level; 425 owner_thread->acquired_mutex_list->mutex.original_sync_level;
427 426
428 status = acpi_ex_release_mutex_object(obj_desc); 427 status = acpi_ex_release_mutex_object(obj_desc);
429 if (ACPI_FAILURE(status)) { 428 if (ACPI_FAILURE(status)) {
@@ -434,8 +433,9 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
434 433
435 /* Restore the previous sync_level */ 434 /* Restore the previous sync_level */
436 435
437 walk_state->thread->current_sync_level = previous_sync_level; 436 owner_thread->current_sync_level = previous_sync_level;
438 } 437 }
438
439 return_ACPI_STATUS(status); 439 return_ACPI_STATUS(status);
440} 440}
441 441
@@ -443,7 +443,7 @@ acpi_ex_release_mutex(union acpi_operand_object *obj_desc,
443 * 443 *
444 * FUNCTION: acpi_ex_release_all_mutexes 444 * FUNCTION: acpi_ex_release_all_mutexes
445 * 445 *
446 * PARAMETERS: Thread - Current executing thread object 446 * PARAMETERS: Thread - Current executing thread object
447 * 447 *
448 * RETURN: Status 448 * RETURN: Status
449 * 449 *
diff --git a/drivers/acpi/acpica/exnames.c b/drivers/acpi/acpica/exnames.c
index 679f308c5a89..d11e539ef763 100644
--- a/drivers/acpi/acpica/exnames.c
+++ b/drivers/acpi/acpica/exnames.c
@@ -102,7 +102,7 @@ static char *acpi_ex_allocate_name_string(u32 prefix_count, u32 num_name_segs)
102 name_string = ACPI_ALLOCATE(size_needed); 102 name_string = ACPI_ALLOCATE(size_needed);
103 if (!name_string) { 103 if (!name_string) {
104 ACPI_ERROR((AE_INFO, 104 ACPI_ERROR((AE_INFO,
105 "Could not allocate size %d", size_needed)); 105 "Could not allocate size %u", size_needed));
106 return_PTR(NULL); 106 return_PTR(NULL);
107 } 107 }
108 108
@@ -216,7 +216,7 @@ static acpi_status acpi_ex_name_segment(u8 ** in_aml_address, char *name_string)
216 */ 216 */
217 status = AE_AML_BAD_NAME; 217 status = AE_AML_BAD_NAME;
218 ACPI_ERROR((AE_INFO, 218 ACPI_ERROR((AE_INFO,
219 "Bad character %02x in name, at %p", 219 "Bad character 0x%02x in name, at %p",
220 *aml_address, aml_address)); 220 *aml_address, aml_address));
221 } 221 }
222 222
diff --git a/drivers/acpi/acpica/exoparg1.c b/drivers/acpi/acpica/exoparg1.c
index 99adbab5acbf..84e4d185aa25 100644
--- a/drivers/acpi/acpica/exoparg1.c
+++ b/drivers/acpi/acpica/exoparg1.c
@@ -110,7 +110,7 @@ acpi_status acpi_ex_opcode_0A_0T_1R(struct acpi_walk_state *walk_state)
110 110
111 default: /* Unknown opcode */ 111 default: /* Unknown opcode */
112 112
113 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 113 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
114 walk_state->opcode)); 114 walk_state->opcode));
115 status = AE_AML_BAD_OPCODE; 115 status = AE_AML_BAD_OPCODE;
116 break; 116 break;
@@ -173,7 +173,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
173 173
174 case AML_SLEEP_OP: /* Sleep (msec_time) */ 174 case AML_SLEEP_OP: /* Sleep (msec_time) */
175 175
176 status = acpi_ex_system_do_suspend(operand[0]->integer.value); 176 status = acpi_ex_system_do_sleep(operand[0]->integer.value);
177 break; 177 break;
178 178
179 case AML_STALL_OP: /* Stall (usec_time) */ 179 case AML_STALL_OP: /* Stall (usec_time) */
@@ -189,7 +189,7 @@ acpi_status acpi_ex_opcode_1A_0T_0R(struct acpi_walk_state *walk_state)
189 189
190 default: /* Unknown opcode */ 190 default: /* Unknown opcode */
191 191
192 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 192 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
193 walk_state->opcode)); 193 walk_state->opcode));
194 status = AE_AML_BAD_OPCODE; 194 status = AE_AML_BAD_OPCODE;
195 break; 195 break;
@@ -229,7 +229,7 @@ acpi_status acpi_ex_opcode_1A_1T_0R(struct acpi_walk_state *walk_state)
229 229
230 default: /* Unknown opcode */ 230 default: /* Unknown opcode */
231 231
232 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 232 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
233 walk_state->opcode)); 233 walk_state->opcode));
234 status = AE_AML_BAD_OPCODE; 234 status = AE_AML_BAD_OPCODE;
235 goto cleanup; 235 goto cleanup;
@@ -399,7 +399,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
399 399
400 if (digit > 0) { 400 if (digit > 0) {
401 ACPI_ERROR((AE_INFO, 401 ACPI_ERROR((AE_INFO,
402 "Integer too large to convert to BCD: %8.8X%8.8X", 402 "Integer too large to convert to BCD: 0x%8.8X%8.8X",
403 ACPI_FORMAT_UINT64(operand[0]-> 403 ACPI_FORMAT_UINT64(operand[0]->
404 integer.value))); 404 integer.value)));
405 status = AE_AML_NUMERIC_OVERFLOW; 405 status = AE_AML_NUMERIC_OVERFLOW;
@@ -540,7 +540,7 @@ acpi_status acpi_ex_opcode_1A_1T_1R(struct acpi_walk_state *walk_state)
540 540
541 default: /* Unknown opcode */ 541 default: /* Unknown opcode */
542 542
543 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 543 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
544 walk_state->opcode)); 544 walk_state->opcode));
545 status = AE_AML_BAD_OPCODE; 545 status = AE_AML_BAD_OPCODE;
546 goto cleanup; 546 goto cleanup;
@@ -979,7 +979,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
979 default: 979 default:
980 980
981 ACPI_ERROR((AE_INFO, 981 ACPI_ERROR((AE_INFO,
982 "Unknown Index TargetType %X in reference object %p", 982 "Unknown Index TargetType 0x%X in reference object %p",
983 operand[0]->reference. 983 operand[0]->reference.
984 target_type, operand[0])); 984 target_type, operand[0]));
985 status = AE_AML_OPERAND_TYPE; 985 status = AE_AML_OPERAND_TYPE;
@@ -1007,7 +1007,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
1007 1007
1008 default: 1008 default:
1009 ACPI_ERROR((AE_INFO, 1009 ACPI_ERROR((AE_INFO,
1010 "Unknown class in reference(%p) - %2.2X", 1010 "Unknown class in reference(%p) - 0x%2.2X",
1011 operand[0], 1011 operand[0],
1012 operand[0]->reference.class)); 1012 operand[0]->reference.class));
1013 1013
@@ -1019,7 +1019,7 @@ acpi_status acpi_ex_opcode_1A_0T_1R(struct acpi_walk_state *walk_state)
1019 1019
1020 default: 1020 default:
1021 1021
1022 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 1022 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
1023 walk_state->opcode)); 1023 walk_state->opcode));
1024 status = AE_AML_BAD_OPCODE; 1024 status = AE_AML_BAD_OPCODE;
1025 goto cleanup; 1025 goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg2.c b/drivers/acpi/acpica/exoparg2.c
index 22841bbbe63c..10e104cf0fb9 100644
--- a/drivers/acpi/acpica/exoparg2.c
+++ b/drivers/acpi/acpica/exoparg2.c
@@ -119,33 +119,6 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
119 status = AE_AML_OPERAND_TYPE; 119 status = AE_AML_OPERAND_TYPE;
120 break; 120 break;
121 } 121 }
122#ifdef ACPI_GPE_NOTIFY_CHECK
123 /*
124 * GPE method wake/notify check. Here, we want to ensure that we
125 * don't receive any "DeviceWake" Notifies from a GPE _Lxx or _Exx
126 * GPE method during system runtime. If we do, the GPE is marked
127 * as "wake-only" and disabled.
128 *
129 * 1) Is the Notify() value == device_wake?
130 * 2) Is this a GPE deferred method? (An _Lxx or _Exx method)
131 * 3) Did the original GPE happen at system runtime?
132 * (versus during wake)
133 *
134 * If all three cases are true, this is a wake-only GPE that should
135 * be disabled at runtime.
136 */
137 if (value == 2) { /* device_wake */
138 status =
139 acpi_ev_check_for_wake_only_gpe(walk_state->
140 gpe_event_info);
141 if (ACPI_FAILURE(status)) {
142
143 /* AE_WAKE_ONLY_GPE only error, means ignore this notify */
144
145 return_ACPI_STATUS(AE_OK)
146 }
147 }
148#endif
149 122
150 /* 123 /*
151 * Dispatch the notify to the appropriate handler 124 * Dispatch the notify to the appropriate handler
@@ -159,7 +132,7 @@ acpi_status acpi_ex_opcode_2A_0T_0R(struct acpi_walk_state *walk_state)
159 132
160 default: 133 default:
161 134
162 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 135 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
163 walk_state->opcode)); 136 walk_state->opcode));
164 status = AE_AML_BAD_OPCODE; 137 status = AE_AML_BAD_OPCODE;
165 } 138 }
@@ -224,7 +197,7 @@ acpi_status acpi_ex_opcode_2A_2T_1R(struct acpi_walk_state *walk_state)
224 197
225 default: 198 default:
226 199
227 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 200 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
228 walk_state->opcode)); 201 walk_state->opcode));
229 status = AE_AML_BAD_OPCODE; 202 status = AE_AML_BAD_OPCODE;
230 goto cleanup; 203 goto cleanup;
@@ -441,7 +414,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
441 414
442 if (ACPI_FAILURE(status)) { 415 if (ACPI_FAILURE(status)) {
443 ACPI_EXCEPTION((AE_INFO, status, 416 ACPI_EXCEPTION((AE_INFO, status,
444 "Index (%X%8.8X) is beyond end of object", 417 "Index (0x%8.8X%8.8X) is beyond end of object",
445 ACPI_FORMAT_UINT64(index))); 418 ACPI_FORMAT_UINT64(index)));
446 goto cleanup; 419 goto cleanup;
447 } 420 }
@@ -464,7 +437,7 @@ acpi_status acpi_ex_opcode_2A_1T_1R(struct acpi_walk_state *walk_state)
464 437
465 default: 438 default:
466 439
467 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 440 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
468 walk_state->opcode)); 441 walk_state->opcode));
469 status = AE_AML_BAD_OPCODE; 442 status = AE_AML_BAD_OPCODE;
470 break; 443 break;
@@ -572,7 +545,7 @@ acpi_status acpi_ex_opcode_2A_0T_1R(struct acpi_walk_state *walk_state)
572 545
573 default: 546 default:
574 547
575 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 548 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
576 walk_state->opcode)); 549 walk_state->opcode));
577 status = AE_AML_BAD_OPCODE; 550 status = AE_AML_BAD_OPCODE;
578 goto cleanup; 551 goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg3.c b/drivers/acpi/acpica/exoparg3.c
index 8bb1012ef44e..7a08d23befcd 100644
--- a/drivers/acpi/acpica/exoparg3.c
+++ b/drivers/acpi/acpica/exoparg3.c
@@ -119,7 +119,7 @@ acpi_status acpi_ex_opcode_3A_0T_0R(struct acpi_walk_state *walk_state)
119 119
120 default: 120 default:
121 121
122 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 122 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
123 walk_state->opcode)); 123 walk_state->opcode));
124 status = AE_AML_BAD_OPCODE; 124 status = AE_AML_BAD_OPCODE;
125 goto cleanup; 125 goto cleanup;
@@ -244,7 +244,7 @@ acpi_status acpi_ex_opcode_3A_1T_1R(struct acpi_walk_state *walk_state)
244 244
245 default: 245 default:
246 246
247 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 247 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
248 walk_state->opcode)); 248 walk_state->opcode));
249 status = AE_AML_BAD_OPCODE; 249 status = AE_AML_BAD_OPCODE;
250 goto cleanup; 250 goto cleanup;
diff --git a/drivers/acpi/acpica/exoparg6.c b/drivers/acpi/acpica/exoparg6.c
index f256b6a25f2e..4b50730cf9a0 100644
--- a/drivers/acpi/acpica/exoparg6.c
+++ b/drivers/acpi/acpica/exoparg6.c
@@ -245,7 +245,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
245 index = operand[5]->integer.value; 245 index = operand[5]->integer.value;
246 if (index >= operand[0]->package.count) { 246 if (index >= operand[0]->package.count) {
247 ACPI_ERROR((AE_INFO, 247 ACPI_ERROR((AE_INFO,
248 "Index (%X%8.8X) beyond package end (%X)", 248 "Index (0x%8.8X%8.8X) beyond package end (0x%X)",
249 ACPI_FORMAT_UINT64(index), 249 ACPI_FORMAT_UINT64(index),
250 operand[0]->package.count)); 250 operand[0]->package.count));
251 status = AE_AML_PACKAGE_LIMIT; 251 status = AE_AML_PACKAGE_LIMIT;
@@ -314,7 +314,7 @@ acpi_status acpi_ex_opcode_6A_0T_1R(struct acpi_walk_state * walk_state)
314 314
315 default: 315 default:
316 316
317 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", 317 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X",
318 walk_state->opcode)); 318 walk_state->opcode));
319 status = AE_AML_BAD_OPCODE; 319 status = AE_AML_BAD_OPCODE;
320 goto cleanup; 320 goto cleanup;
diff --git a/drivers/acpi/acpica/exprep.c b/drivers/acpi/acpica/exprep.c
index 2fbfe51fb141..25059dace0ad 100644
--- a/drivers/acpi/acpica/exprep.c
+++ b/drivers/acpi/acpica/exprep.c
@@ -275,7 +275,7 @@ acpi_ex_decode_field_access(union acpi_operand_object *obj_desc,
275 default: 275 default:
276 /* Invalid field access type */ 276 /* Invalid field access type */
277 277
278 ACPI_ERROR((AE_INFO, "Unknown field access type %X", access)); 278 ACPI_ERROR((AE_INFO, "Unknown field access type 0x%X", access));
279 return_UINT32(0); 279 return_UINT32(0);
280 } 280 }
281 281
@@ -430,7 +430,7 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
430 type = acpi_ns_get_type(info->region_node); 430 type = acpi_ns_get_type(info->region_node);
431 if (type != ACPI_TYPE_REGION) { 431 if (type != ACPI_TYPE_REGION) {
432 ACPI_ERROR((AE_INFO, 432 ACPI_ERROR((AE_INFO,
433 "Needed Region, found type %X (%s)", 433 "Needed Region, found type 0x%X (%s)",
434 type, acpi_ut_get_type_name(type))); 434 type, acpi_ut_get_type_name(type)));
435 435
436 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 436 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exregion.c b/drivers/acpi/acpica/exregion.c
index 486b2e5661b6..531000fc77d2 100644
--- a/drivers/acpi/acpica/exregion.c
+++ b/drivers/acpi/acpica/exregion.c
@@ -105,7 +105,7 @@ acpi_ex_system_memory_space_handler(u32 function,
105 break; 105 break;
106 106
107 default: 107 default:
108 ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %d", 108 ACPI_ERROR((AE_INFO, "Invalid SystemMemory width %u",
109 bit_width)); 109 bit_width));
110 return_ACPI_STATUS(AE_AML_OPERAND_VALUE); 110 return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
111 } 111 }
@@ -173,7 +173,7 @@ acpi_ex_system_memory_space_handler(u32 function,
173 mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length); 173 mem_info->mapped_logical_address = acpi_os_map_memory((acpi_physical_address) address, map_length);
174 if (!mem_info->mapped_logical_address) { 174 if (!mem_info->mapped_logical_address) {
175 ACPI_ERROR((AE_INFO, 175 ACPI_ERROR((AE_INFO,
176 "Could not map memory at %8.8X%8.8X, size %X", 176 "Could not map memory at 0x%8.8X%8.8X, size %u",
177 ACPI_FORMAT_NATIVE_UINT(address), 177 ACPI_FORMAT_NATIVE_UINT(address),
178 (u32) map_length)); 178 (u32) map_length));
179 mem_info->mapped_length = 0; 179 mem_info->mapped_length = 0;
@@ -491,8 +491,10 @@ acpi_ex_data_table_space_handler(u32 function,
491{ 491{
492 ACPI_FUNCTION_TRACE(ex_data_table_space_handler); 492 ACPI_FUNCTION_TRACE(ex_data_table_space_handler);
493 493
494 /* Perform the memory read or write */ 494 /*
495 495 * Perform the memory read or write. The bit_width was already
496 * validated.
497 */
496 switch (function) { 498 switch (function) {
497 case ACPI_READ: 499 case ACPI_READ:
498 500
@@ -502,9 +504,14 @@ acpi_ex_data_table_space_handler(u32 function,
502 break; 504 break;
503 505
504 case ACPI_WRITE: 506 case ACPI_WRITE:
507
508 ACPI_MEMCPY(ACPI_PHYSADDR_TO_PTR(address),
509 ACPI_CAST_PTR(char, value), ACPI_DIV_8(bit_width));
510 break;
511
505 default: 512 default:
506 513
507 return_ACPI_STATUS(AE_SUPPORT); 514 return_ACPI_STATUS(AE_BAD_PARAMETER);
508 } 515 }
509 516
510 return_ACPI_STATUS(AE_OK); 517 return_ACPI_STATUS(AE_OK);
diff --git a/drivers/acpi/acpica/exresnte.c b/drivers/acpi/acpica/exresnte.c
index fdc1b27999ef..1fa4289a687e 100644
--- a/drivers/acpi/acpica/exresnte.c
+++ b/drivers/acpi/acpica/exresnte.c
@@ -252,7 +252,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
252 /* No named references are allowed here */ 252 /* No named references are allowed here */
253 253
254 ACPI_ERROR((AE_INFO, 254 ACPI_ERROR((AE_INFO,
255 "Unsupported Reference type %X", 255 "Unsupported Reference type 0x%X",
256 source_desc->reference.class)); 256 source_desc->reference.class));
257 257
258 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 258 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -264,7 +264,7 @@ acpi_ex_resolve_node_to_value(struct acpi_namespace_node **object_ptr,
264 /* Default case is for unknown types */ 264 /* Default case is for unknown types */
265 265
266 ACPI_ERROR((AE_INFO, 266 ACPI_ERROR((AE_INFO,
267 "Node %p - Unknown object type %X", 267 "Node %p - Unknown object type 0x%X",
268 node, entry_type)); 268 node, entry_type));
269 269
270 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 270 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
diff --git a/drivers/acpi/acpica/exresolv.c b/drivers/acpi/acpica/exresolv.c
index fdd6a7079b97..7ca35ea8acea 100644
--- a/drivers/acpi/acpica/exresolv.c
+++ b/drivers/acpi/acpica/exresolv.c
@@ -231,7 +231,7 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
231 /* Invalid reference object */ 231 /* Invalid reference object */
232 232
233 ACPI_ERROR((AE_INFO, 233 ACPI_ERROR((AE_INFO,
234 "Unknown TargetType %X in Index/Reference object %p", 234 "Unknown TargetType 0x%X in Index/Reference object %p",
235 stack_desc->reference.target_type, 235 stack_desc->reference.target_type,
236 stack_desc)); 236 stack_desc));
237 status = AE_AML_INTERNAL; 237 status = AE_AML_INTERNAL;
@@ -273,8 +273,8 @@ acpi_ex_resolve_object_to_value(union acpi_operand_object **stack_ptr,
273 default: 273 default:
274 274
275 ACPI_ERROR((AE_INFO, 275 ACPI_ERROR((AE_INFO,
276 "Unknown Reference type %X in %p", ref_type, 276 "Unknown Reference type 0x%X in %p",
277 stack_desc)); 277 ref_type, stack_desc));
278 status = AE_AML_INTERNAL; 278 status = AE_AML_INTERNAL;
279 break; 279 break;
280 } 280 }
@@ -403,7 +403,8 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
403 403
404 if (ACPI_GET_DESCRIPTOR_TYPE(node) != 404 if (ACPI_GET_DESCRIPTOR_TYPE(node) !=
405 ACPI_DESC_TYPE_NAMED) { 405 ACPI_DESC_TYPE_NAMED) {
406 ACPI_ERROR((AE_INFO, "Not a NS node %p [%s]", 406 ACPI_ERROR((AE_INFO,
407 "Not a namespace node %p [%s]",
407 node, 408 node,
408 acpi_ut_get_descriptor_name(node))); 409 acpi_ut_get_descriptor_name(node)));
409 return_ACPI_STATUS(AE_AML_INTERNAL); 410 return_ACPI_STATUS(AE_AML_INTERNAL);
@@ -507,7 +508,7 @@ acpi_ex_resolve_multiple(struct acpi_walk_state *walk_state,
507 default: 508 default:
508 509
509 ACPI_ERROR((AE_INFO, 510 ACPI_ERROR((AE_INFO,
510 "Unknown Reference Class %2.2X", 511 "Unknown Reference Class 0x%2.2X",
511 obj_desc->reference.class)); 512 obj_desc->reference.class));
512 return_ACPI_STATUS(AE_AML_INTERNAL); 513 return_ACPI_STATUS(AE_AML_INTERNAL);
513 } 514 }
diff --git a/drivers/acpi/acpica/exresop.c b/drivers/acpi/acpica/exresop.c
index c5ecd615f145..8c97cfd6a0fd 100644
--- a/drivers/acpi/acpica/exresop.c
+++ b/drivers/acpi/acpica/exresop.c
@@ -153,7 +153,7 @@ acpi_ex_resolve_operands(u16 opcode,
153 153
154 arg_types = op_info->runtime_args; 154 arg_types = op_info->runtime_args;
155 if (arg_types == ARGI_INVALID_OPCODE) { 155 if (arg_types == ARGI_INVALID_OPCODE) {
156 ACPI_ERROR((AE_INFO, "Unknown AML opcode %X", opcode)); 156 ACPI_ERROR((AE_INFO, "Unknown AML opcode 0x%X", opcode));
157 157
158 return_ACPI_STATUS(AE_AML_INTERNAL); 158 return_ACPI_STATUS(AE_AML_INTERNAL);
159 } 159 }
@@ -218,7 +218,7 @@ acpi_ex_resolve_operands(u16 opcode,
218 218
219 if (!acpi_ut_valid_object_type(object_type)) { 219 if (!acpi_ut_valid_object_type(object_type)) {
220 ACPI_ERROR((AE_INFO, 220 ACPI_ERROR((AE_INFO,
221 "Bad operand object type [%X]", 221 "Bad operand object type [0x%X]",
222 object_type)); 222 object_type));
223 223
224 return_ACPI_STATUS(AE_AML_OPERAND_TYPE); 224 return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -253,7 +253,7 @@ acpi_ex_resolve_operands(u16 opcode,
253 default: 253 default:
254 254
255 ACPI_ERROR((AE_INFO, 255 ACPI_ERROR((AE_INFO,
256 "Unknown Reference Class %2.2X in %p", 256 "Unknown Reference Class 0x%2.2X in %p",
257 obj_desc->reference.class, 257 obj_desc->reference.class,
258 obj_desc)); 258 obj_desc));
259 259
@@ -665,7 +665,7 @@ acpi_ex_resolve_operands(u16 opcode,
665 /* Unknown type */ 665 /* Unknown type */
666 666
667 ACPI_ERROR((AE_INFO, 667 ACPI_ERROR((AE_INFO,
668 "Internal - Unknown ARGI (required operand) type %X", 668 "Internal - Unknown ARGI (required operand) type 0x%X",
669 this_arg_type)); 669 this_arg_type));
670 670
671 return_ACPI_STATUS(AE_BAD_PARAMETER); 671 return_ACPI_STATUS(AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/exstore.c b/drivers/acpi/acpica/exstore.c
index 702b9ecfd44b..1624436ba4c5 100644
--- a/drivers/acpi/acpica/exstore.c
+++ b/drivers/acpi/acpica/exstore.c
@@ -1,4 +1,3 @@
1
2/****************************************************************************** 1/******************************************************************************
3 * 2 *
4 * Module Name: exstore - AML Interpreter object store support 3 * Module Name: exstore - AML Interpreter object store support
@@ -53,10 +52,6 @@
53ACPI_MODULE_NAME("exstore") 52ACPI_MODULE_NAME("exstore")
54 53
55/* Local prototypes */ 54/* Local prototypes */
56static void
57acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
58 u32 level, u32 index);
59
60static acpi_status 55static acpi_status
61acpi_ex_store_object_to_index(union acpi_operand_object *val_desc, 56acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
62 union acpi_operand_object *dest_desc, 57 union acpi_operand_object *dest_desc,
@@ -64,215 +59,6 @@ acpi_ex_store_object_to_index(union acpi_operand_object *val_desc,
64 59
65/******************************************************************************* 60/*******************************************************************************
66 * 61 *
67 * FUNCTION: acpi_ex_do_debug_object
68 *
69 * PARAMETERS: source_desc - Value to be stored
70 * Level - Indentation level (used for packages)
71 * Index - Current package element, zero if not pkg
72 *
73 * RETURN: None
74 *
75 * DESCRIPTION: Handles stores to the Debug Object.
76 *
77 ******************************************************************************/
78
79static void
80acpi_ex_do_debug_object(union acpi_operand_object *source_desc,
81 u32 level, u32 index)
82{
83 u32 i;
84
85 ACPI_FUNCTION_TRACE_PTR(ex_do_debug_object, source_desc);
86
87 /* Print line header as long as we are not in the middle of an object display */
88
89 if (!((level > 0) && index == 0)) {
90 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[ACPI Debug] %*s",
91 level, " "));
92 }
93
94 /* Display index for package output only */
95
96 if (index > 0) {
97 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
98 "(%.2u) ", index - 1));
99 }
100
101 if (!source_desc) {
102 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[Null Object]\n"));
103 return_VOID;
104 }
105
106 if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_OPERAND) {
107 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s ",
108 acpi_ut_get_object_type_name
109 (source_desc)));
110
111 if (!acpi_ut_valid_internal_object(source_desc)) {
112 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
113 "%p, Invalid Internal Object!\n",
114 source_desc));
115 return_VOID;
116 }
117 } else if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) ==
118 ACPI_DESC_TYPE_NAMED) {
119 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%s: %p\n",
120 acpi_ut_get_type_name(((struct
121 acpi_namespace_node
122 *)source_desc)->
123 type),
124 source_desc));
125 return_VOID;
126 } else {
127 return_VOID;
128 }
129
130 /* source_desc is of type ACPI_DESC_TYPE_OPERAND */
131
132 switch (source_desc->common.type) {
133 case ACPI_TYPE_INTEGER:
134
135 /* Output correct integer width */
136
137 if (acpi_gbl_integer_byte_width == 4) {
138 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%8.8X\n",
139 (u32) source_desc->integer.
140 value));
141 } else {
142 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
143 "0x%8.8X%8.8X\n",
144 ACPI_FORMAT_UINT64(source_desc->
145 integer.
146 value)));
147 }
148 break;
149
150 case ACPI_TYPE_BUFFER:
151
152 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X]\n",
153 (u32) source_desc->buffer.length));
154 ACPI_DUMP_BUFFER(source_desc->buffer.pointer,
155 (source_desc->buffer.length <
156 256) ? source_desc->buffer.length : 256);
157 break;
158
159 case ACPI_TYPE_STRING:
160
161 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[0x%.2X] \"%s\"\n",
162 source_desc->string.length,
163 source_desc->string.pointer));
164 break;
165
166 case ACPI_TYPE_PACKAGE:
167
168 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
169 "[Contains 0x%.2X Elements]\n",
170 source_desc->package.count));
171
172 /* Output the entire contents of the package */
173
174 for (i = 0; i < source_desc->package.count; i++) {
175 acpi_ex_do_debug_object(source_desc->package.
176 elements[i], level + 4, i + 1);
177 }
178 break;
179
180 case ACPI_TYPE_LOCAL_REFERENCE:
181
182 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "[%s] ",
183 acpi_ut_get_reference_name(source_desc)));
184
185 /* Decode the reference */
186
187 switch (source_desc->reference.class) {
188 case ACPI_REFCLASS_INDEX:
189
190 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "0x%X\n",
191 source_desc->reference.value));
192 break;
193
194 case ACPI_REFCLASS_TABLE:
195
196 /* Case for ddb_handle */
197
198 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
199 "Table Index 0x%X\n",
200 source_desc->reference.value));
201 return;
202
203 default:
204 break;
205 }
206
207 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, " "));
208
209 /* Check for valid node first, then valid object */
210
211 if (source_desc->reference.node) {
212 if (ACPI_GET_DESCRIPTOR_TYPE
213 (source_desc->reference.node) !=
214 ACPI_DESC_TYPE_NAMED) {
215 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
216 " %p - Not a valid namespace node\n",
217 source_desc->reference.
218 node));
219 } else {
220 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT,
221 "Node %p [%4.4s] ",
222 source_desc->reference.
223 node,
224 (source_desc->reference.
225 node)->name.ascii));
226
227 switch ((source_desc->reference.node)->type) {
228
229 /* These types have no attached object */
230
231 case ACPI_TYPE_DEVICE:
232 acpi_os_printf("Device\n");
233 break;
234
235 case ACPI_TYPE_THERMAL:
236 acpi_os_printf("Thermal Zone\n");
237 break;
238
239 default:
240 acpi_ex_do_debug_object((source_desc->
241 reference.
242 node)->object,
243 level + 4, 0);
244 break;
245 }
246 }
247 } else if (source_desc->reference.object) {
248 if (ACPI_GET_DESCRIPTOR_TYPE
249 (source_desc->reference.object) ==
250 ACPI_DESC_TYPE_NAMED) {
251 acpi_ex_do_debug_object(((struct
252 acpi_namespace_node *)
253 source_desc->reference.
254 object)->object,
255 level + 4, 0);
256 } else {
257 acpi_ex_do_debug_object(source_desc->reference.
258 object, level + 4, 0);
259 }
260 }
261 break;
262
263 default:
264
265 ACPI_DEBUG_PRINT_RAW((ACPI_DB_DEBUG_OBJECT, "%p\n",
266 source_desc));
267 break;
268 }
269
270 ACPI_DEBUG_PRINT_RAW((ACPI_DB_EXEC, "\n"));
271 return_VOID;
272}
273
274/*******************************************************************************
275 *
276 * FUNCTION: acpi_ex_store 62 * FUNCTION: acpi_ex_store
277 * 63 *
278 * PARAMETERS: *source_desc - Value to be stored 64 * PARAMETERS: *source_desc - Value to be stored
@@ -402,12 +188,12 @@ acpi_ex_store(union acpi_operand_object *source_desc,
402 source_desc, 188 source_desc,
403 acpi_ut_get_object_type_name(source_desc))); 189 acpi_ut_get_object_type_name(source_desc)));
404 190
405 acpi_ex_do_debug_object(source_desc, 0, 0); 191 ACPI_DEBUG_OBJECT(source_desc, 0, 0);
406 break; 192 break;
407 193
408 default: 194 default:
409 195
410 ACPI_ERROR((AE_INFO, "Unknown Reference Class %2.2X", 196 ACPI_ERROR((AE_INFO, "Unknown Reference Class 0x%2.2X",
411 ref_desc->reference.class)); 197 ref_desc->reference.class));
412 ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO); 198 ACPI_DUMP_ENTRY(ref_desc, ACPI_LV_INFO);
413 199
diff --git a/drivers/acpi/acpica/exsystem.c b/drivers/acpi/acpica/exsystem.c
index e11b6cb42a57..6d32e09327f1 100644
--- a/drivers/acpi/acpica/exsystem.c
+++ b/drivers/acpi/acpica/exsystem.c
@@ -170,7 +170,7 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
170 * (ACPI specifies 100 usec as max, but this gives some slack in 170 * (ACPI specifies 100 usec as max, but this gives some slack in
171 * order to support existing BIOSs) 171 * order to support existing BIOSs)
172 */ 172 */
173 ACPI_ERROR((AE_INFO, "Time parameter is too large (%d)", 173 ACPI_ERROR((AE_INFO, "Time parameter is too large (%u)",
174 how_long)); 174 how_long));
175 status = AE_AML_OPERAND_VALUE; 175 status = AE_AML_OPERAND_VALUE;
176 } else { 176 } else {
@@ -182,18 +182,18 @@ acpi_status acpi_ex_system_do_stall(u32 how_long)
182 182
183/******************************************************************************* 183/*******************************************************************************
184 * 184 *
185 * FUNCTION: acpi_ex_system_do_suspend 185 * FUNCTION: acpi_ex_system_do_sleep
186 * 186 *
187 * PARAMETERS: how_long - The amount of time to suspend, 187 * PARAMETERS: how_long - The amount of time to sleep,
188 * in milliseconds 188 * in milliseconds
189 * 189 *
190 * RETURN: None 190 * RETURN: None
191 * 191 *
192 * DESCRIPTION: Suspend running thread for specified amount of time. 192 * DESCRIPTION: Sleep the running thread for specified amount of time.
193 * 193 *
194 ******************************************************************************/ 194 ******************************************************************************/
195 195
196acpi_status acpi_ex_system_do_suspend(u64 how_long) 196acpi_status acpi_ex_system_do_sleep(u64 how_long)
197{ 197{
198 ACPI_FUNCTION_ENTRY(); 198 ACPI_FUNCTION_ENTRY();
199 199
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
63{ 63{
64 64
65 acpi_status status; 65 acpi_status status;
66 u32 retry;
67 66
68 ACPI_FUNCTION_TRACE(hw_set_mode); 67 ACPI_FUNCTION_TRACE(hw_set_mode);
69 68
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
125 return_ACPI_STATUS(status); 124 return_ACPI_STATUS(status);
126 } 125 }
127 126
128 /* 127 return_ACPI_STATUS(AE_OK);
129 * Some hardware takes a LONG time to switch modes. Give them 3 sec to
130 * do so, but allow faster systems to proceed more quickly.
131 */
132 retry = 3000;
133 while (retry) {
134 if (acpi_hw_get_mode() == mode) {
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
136 "Mode %X successfully enabled\n",
137 mode));
138 return_ACPI_STATUS(AE_OK);
139 }
140 acpi_os_stall(1000);
141 retry--;
142 }
143
144 ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
145 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
146} 128}
147 129
148/******************************************************************************* 130/*******************************************************************************
diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c
index ec7fc227b33f..5d1273b660ae 100644
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
@@ -299,7 +299,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
299 ACPI_FUNCTION_ENTRY(); 299 ACPI_FUNCTION_ENTRY();
300 300
301 if (register_id > ACPI_BITREG_MAX) { 301 if (register_id > ACPI_BITREG_MAX) {
302 ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: %X", 302 ACPI_ERROR((AE_INFO, "Invalid BitRegister ID: 0x%X",
303 register_id)); 303 register_id));
304 return (NULL); 304 return (NULL);
305 } 305 }
@@ -413,7 +413,7 @@ acpi_hw_register_read(u32 register_id, u32 * return_value)
413 break; 413 break;
414 414
415 default: 415 default:
416 ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id)); 416 ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
417 status = AE_BAD_PARAMETER; 417 status = AE_BAD_PARAMETER;
418 break; 418 break;
419 } 419 }
@@ -549,7 +549,7 @@ acpi_status acpi_hw_register_write(u32 register_id, u32 value)
549 break; 549 break;
550 550
551 default: 551 default:
552 ACPI_ERROR((AE_INFO, "Unknown Register ID: %X", register_id)); 552 ACPI_ERROR((AE_INFO, "Unknown Register ID: 0x%X", register_id));
553 status = AE_BAD_PARAMETER; 553 status = AE_BAD_PARAMETER;
554 break; 554 break;
555 } 555 }
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index 5e6d4dbb8024..36eb803dd9d0 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -245,7 +245,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
245 245
246 if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) || 246 if ((acpi_gbl_sleep_type_a > ACPI_SLEEP_TYPE_MAX) ||
247 (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) { 247 (acpi_gbl_sleep_type_b > ACPI_SLEEP_TYPE_MAX)) {
248 ACPI_ERROR((AE_INFO, "Sleep values out of range: A=%X B=%X", 248 ACPI_ERROR((AE_INFO, "Sleep values out of range: A=0x%X B=0x%X",
249 acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b)); 249 acpi_gbl_sleep_type_a, acpi_gbl_sleep_type_b));
250 return_ACPI_STATUS(AE_AML_OPERAND_VALUE); 250 return_ACPI_STATUS(AE_AML_OPERAND_VALUE);
251 } 251 }
diff --git a/drivers/acpi/acpica/hwvalid.c b/drivers/acpi/acpica/hwvalid.c
index e26c17d4b716..c10d587c1641 100644
--- a/drivers/acpi/acpica/hwvalid.c
+++ b/drivers/acpi/acpica/hwvalid.c
@@ -150,7 +150,7 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
150 150
151 if (last_address > ACPI_UINT16_MAX) { 151 if (last_address > ACPI_UINT16_MAX) {
152 ACPI_ERROR((AE_INFO, 152 ACPI_ERROR((AE_INFO,
153 "Illegal I/O port address/length above 64K: 0x%p/%X", 153 "Illegal I/O port address/length above 64K: %p/0x%X",
154 ACPI_CAST_PTR(void, address), byte_width)); 154 ACPI_CAST_PTR(void, address), byte_width));
155 return_ACPI_STATUS(AE_LIMIT); 155 return_ACPI_STATUS(AE_LIMIT);
156 } 156 }
diff --git a/drivers/acpi/acpica/nsaccess.c b/drivers/acpi/acpica/nsaccess.c
index aa2b80132d0a..3a2814676ac3 100644
--- a/drivers/acpi/acpica/nsaccess.c
+++ b/drivers/acpi/acpica/nsaccess.c
@@ -222,7 +222,7 @@ acpi_status acpi_ns_root_initialize(void)
222 default: 222 default:
223 223
224 ACPI_ERROR((AE_INFO, 224 ACPI_ERROR((AE_INFO,
225 "Unsupported initial type value %X", 225 "Unsupported initial type value 0x%X",
226 init_val->type)); 226 init_val->type));
227 acpi_ut_remove_reference(obj_desc); 227 acpi_ut_remove_reference(obj_desc);
228 obj_desc = NULL; 228 obj_desc = NULL;
diff --git a/drivers/acpi/acpica/nsdump.c b/drivers/acpi/acpica/nsdump.c
index 0689d36638d9..2110cc2360f0 100644
--- a/drivers/acpi/acpica/nsdump.c
+++ b/drivers/acpi/acpica/nsdump.c
@@ -205,8 +205,8 @@ acpi_ns_dump_one_object(acpi_handle obj_handle,
205 /* Check the node type and name */ 205 /* Check the node type and name */
206 206
207 if (type > ACPI_TYPE_LOCAL_MAX) { 207 if (type > ACPI_TYPE_LOCAL_MAX) {
208 ACPI_WARNING((AE_INFO, "Invalid ACPI Object Type %08X", 208 ACPI_WARNING((AE_INFO,
209 type)); 209 "Invalid ACPI Object Type 0x%08X", type));
210 } 210 }
211 211
212 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) { 212 if (!acpi_ut_valid_acpi_name(this_node->name.integer)) {
diff --git a/drivers/acpi/acpica/nsnames.c b/drivers/acpi/acpica/nsnames.c
index 959372451635..7dea0031605c 100644
--- a/drivers/acpi/acpica/nsnames.c
+++ b/drivers/acpi/acpica/nsnames.c
@@ -107,7 +107,7 @@ acpi_ns_build_external_path(struct acpi_namespace_node *node,
107 107
108 if (index != 0) { 108 if (index != 0) {
109 ACPI_ERROR((AE_INFO, 109 ACPI_ERROR((AE_INFO,
110 "Could not construct external pathname; index=%X, size=%X, Path=%s", 110 "Could not construct external pathname; index=%u, size=%u, Path=%s",
111 (u32) index, (u32) size, &name_buffer[size])); 111 (u32) index, (u32) size, &name_buffer[size]));
112 112
113 return (AE_BAD_PARAMETER); 113 return (AE_BAD_PARAMETER);
diff --git a/drivers/acpi/acpica/nssearch.c b/drivers/acpi/acpica/nssearch.c
index 08f8b3f5ccaa..a8e42b5e9463 100644
--- a/drivers/acpi/acpica/nssearch.c
+++ b/drivers/acpi/acpica/nssearch.c
@@ -311,7 +311,7 @@ acpi_ns_search_and_enter(u32 target_name,
311 311
312 if (!node || !target_name || !return_node) { 312 if (!node || !target_name || !return_node) {
313 ACPI_ERROR((AE_INFO, 313 ACPI_ERROR((AE_INFO,
314 "Null parameter: Node %p Name %X ReturnNode %p", 314 "Null parameter: Node %p Name 0x%X ReturnNode %p",
315 node, target_name, return_node)); 315 node, target_name, return_node));
316 return_ACPI_STATUS(AE_BAD_PARAMETER); 316 return_ACPI_STATUS(AE_BAD_PARAMETER);
317 } 317 }
diff --git a/drivers/acpi/acpica/nsutils.c b/drivers/acpi/acpica/nsutils.c
index 24d05a87a2a3..bab559712da1 100644
--- a/drivers/acpi/acpica/nsutils.c
+++ b/drivers/acpi/acpica/nsutils.c
@@ -276,7 +276,7 @@ u32 acpi_ns_local(acpi_object_type type)
276 276
277 /* Type code out of range */ 277 /* Type code out of range */
278 278
279 ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type)); 279 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
280 return_UINT32(ACPI_NS_NORMAL); 280 return_UINT32(ACPI_NS_NORMAL);
281 } 281 }
282 282
@@ -764,7 +764,7 @@ u32 acpi_ns_opens_scope(acpi_object_type type)
764 764
765 /* type code out of range */ 765 /* type code out of range */
766 766
767 ACPI_WARNING((AE_INFO, "Invalid Object Type %X", type)); 767 ACPI_WARNING((AE_INFO, "Invalid Object Type 0x%X", type));
768 return_UINT32(ACPI_NS_NORMAL); 768 return_UINT32(ACPI_NS_NORMAL);
769 } 769 }
770 770
diff --git a/drivers/acpi/acpica/psargs.c b/drivers/acpi/acpica/psargs.c
index 00493e108a01..7df1a4c95274 100644
--- a/drivers/acpi/acpica/psargs.c
+++ b/drivers/acpi/acpica/psargs.c
@@ -460,7 +460,7 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
460 460
461 default: 461 default:
462 462
463 ACPI_ERROR((AE_INFO, "Invalid ArgType %X", arg_type)); 463 ACPI_ERROR((AE_INFO, "Invalid ArgType 0x%X", arg_type));
464 return_VOID; 464 return_VOID;
465 } 465 }
466 466
@@ -742,7 +742,7 @@ acpi_ps_get_next_arg(struct acpi_walk_state *walk_state,
742 742
743 default: 743 default:
744 744
745 ACPI_ERROR((AE_INFO, "Invalid ArgType: %X", arg_type)); 745 ACPI_ERROR((AE_INFO, "Invalid ArgType: 0x%X", arg_type));
746 status = AE_AML_OPERAND_TYPE; 746 status = AE_AML_OPERAND_TYPE;
747 break; 747 break;
748 } 748 }
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index 59aabaeab1d3..2f2e7760938c 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -136,7 +136,7 @@ static acpi_status acpi_ps_get_aml_opcode(struct acpi_walk_state *walk_state)
136 /* The opcode is unrecognized. Just skip unknown opcodes */ 136 /* The opcode is unrecognized. Just skip unknown opcodes */
137 137
138 ACPI_ERROR((AE_INFO, 138 ACPI_ERROR((AE_INFO,
139 "Found unknown opcode %X at AML address %p offset %X, ignoring", 139 "Found unknown opcode 0x%X at AML address %p offset 0x%X, ignoring",
140 walk_state->opcode, walk_state->parser_state.aml, 140 walk_state->opcode, walk_state->parser_state.aml,
141 walk_state->aml_offset)); 141 walk_state->aml_offset));
142 142
@@ -1021,7 +1021,6 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
1021 if (status == AE_AML_NO_RETURN_VALUE) { 1021 if (status == AE_AML_NO_RETURN_VALUE) {
1022 ACPI_EXCEPTION((AE_INFO, status, 1022 ACPI_EXCEPTION((AE_INFO, status,
1023 "Invoked method did not return a value")); 1023 "Invoked method did not return a value"));
1024
1025 } 1024 }
1026 1025
1027 ACPI_EXCEPTION((AE_INFO, status, 1026 ACPI_EXCEPTION((AE_INFO, status,
diff --git a/drivers/acpi/acpica/psxface.c b/drivers/acpi/acpica/psxface.c
index 6064dd4e94c2..c42f067cff9d 100644
--- a/drivers/acpi/acpica/psxface.c
+++ b/drivers/acpi/acpica/psxface.c
@@ -46,6 +46,7 @@
46#include "acparser.h" 46#include "acparser.h"
47#include "acdispat.h" 47#include "acdispat.h"
48#include "acinterp.h" 48#include "acinterp.h"
49#include "actables.h"
49#include "amlcode.h" 50#include "amlcode.h"
50 51
51#define _COMPONENT ACPI_PARSER 52#define _COMPONENT ACPI_PARSER
@@ -220,6 +221,10 @@ acpi_status acpi_ps_execute_method(struct acpi_evaluate_info *info)
220 221
221 ACPI_FUNCTION_TRACE(ps_execute_method); 222 ACPI_FUNCTION_TRACE(ps_execute_method);
222 223
224 /* Quick validation of DSDT header */
225
226 acpi_tb_check_dsdt_header();
227
223 /* Validate the Info and method Node */ 228 /* Validate the Info and method Node */
224 229
225 if (!info || !info->resolved_node) { 230 if (!info || !info->resolved_node) {
diff --git a/drivers/acpi/acpica/rscreate.c b/drivers/acpi/acpica/rscreate.c
index f2ee3b548609..c80a2eea3a01 100644
--- a/drivers/acpi/acpica/rscreate.c
+++ b/drivers/acpi/acpica/rscreate.c
@@ -212,7 +212,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
212 212
213 if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) { 213 if ((*top_object_list)->common.type != ACPI_TYPE_PACKAGE) {
214 ACPI_ERROR((AE_INFO, 214 ACPI_ERROR((AE_INFO,
215 "(PRT[%X]) Need sub-package, found %s", 215 "(PRT[%u]) Need sub-package, found %s",
216 index, 216 index,
217 acpi_ut_get_object_type_name 217 acpi_ut_get_object_type_name
218 (*top_object_list))); 218 (*top_object_list)));
@@ -223,7 +223,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
223 223
224 if ((*top_object_list)->package.count != 4) { 224 if ((*top_object_list)->package.count != 4) {
225 ACPI_ERROR((AE_INFO, 225 ACPI_ERROR((AE_INFO,
226 "(PRT[%X]) Need package of length 4, found length %d", 226 "(PRT[%u]) Need package of length 4, found length %u",
227 index, (*top_object_list)->package.count)); 227 index, (*top_object_list)->package.count));
228 return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT); 228 return_ACPI_STATUS(AE_AML_PACKAGE_LIMIT);
229 } 229 }
@@ -240,7 +240,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
240 obj_desc = sub_object_list[0]; 240 obj_desc = sub_object_list[0];
241 if (obj_desc->common.type != ACPI_TYPE_INTEGER) { 241 if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
242 ACPI_ERROR((AE_INFO, 242 ACPI_ERROR((AE_INFO,
243 "(PRT[%X].Address) Need Integer, found %s", 243 "(PRT[%u].Address) Need Integer, found %s",
244 index, 244 index,
245 acpi_ut_get_object_type_name(obj_desc))); 245 acpi_ut_get_object_type_name(obj_desc)));
246 return_ACPI_STATUS(AE_BAD_DATA); 246 return_ACPI_STATUS(AE_BAD_DATA);
@@ -253,7 +253,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
253 obj_desc = sub_object_list[1]; 253 obj_desc = sub_object_list[1];
254 if (obj_desc->common.type != ACPI_TYPE_INTEGER) { 254 if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
255 ACPI_ERROR((AE_INFO, 255 ACPI_ERROR((AE_INFO,
256 "(PRT[%X].Pin) Need Integer, found %s", 256 "(PRT[%u].Pin) Need Integer, found %s",
257 index, 257 index,
258 acpi_ut_get_object_type_name(obj_desc))); 258 acpi_ut_get_object_type_name(obj_desc)));
259 return_ACPI_STATUS(AE_BAD_DATA); 259 return_ACPI_STATUS(AE_BAD_DATA);
@@ -289,7 +289,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
289 if (obj_desc->reference.class != 289 if (obj_desc->reference.class !=
290 ACPI_REFCLASS_NAME) { 290 ACPI_REFCLASS_NAME) {
291 ACPI_ERROR((AE_INFO, 291 ACPI_ERROR((AE_INFO,
292 "(PRT[%X].Source) Need name, found Reference Class %X", 292 "(PRT[%u].Source) Need name, found Reference Class 0x%X",
293 index, 293 index,
294 obj_desc->reference.class)); 294 obj_desc->reference.class));
295 return_ACPI_STATUS(AE_BAD_DATA); 295 return_ACPI_STATUS(AE_BAD_DATA);
@@ -340,7 +340,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
340 default: 340 default:
341 341
342 ACPI_ERROR((AE_INFO, 342 ACPI_ERROR((AE_INFO,
343 "(PRT[%X].Source) Need Ref/String/Integer, found %s", 343 "(PRT[%u].Source) Need Ref/String/Integer, found %s",
344 index, 344 index,
345 acpi_ut_get_object_type_name 345 acpi_ut_get_object_type_name
346 (obj_desc))); 346 (obj_desc)));
@@ -358,7 +358,7 @@ acpi_rs_create_pci_routing_table(union acpi_operand_object *package_object,
358 obj_desc = sub_object_list[3]; 358 obj_desc = sub_object_list[3];
359 if (obj_desc->common.type != ACPI_TYPE_INTEGER) { 359 if (obj_desc->common.type != ACPI_TYPE_INTEGER) {
360 ACPI_ERROR((AE_INFO, 360 ACPI_ERROR((AE_INFO,
361 "(PRT[%X].SourceIndex) Need Integer, found %s", 361 "(PRT[%u].SourceIndex) Need Integer, found %s",
362 index, 362 index,
363 acpi_ut_get_object_type_name(obj_desc))); 363 acpi_ut_get_object_type_name(obj_desc)));
364 return_ACPI_STATUS(AE_BAD_DATA); 364 return_ACPI_STATUS(AE_BAD_DATA);
diff --git a/drivers/acpi/acpica/rslist.c b/drivers/acpi/acpica/rslist.c
index fd057c72d252..7335f22aac20 100644
--- a/drivers/acpi/acpica/rslist.c
+++ b/drivers/acpi/acpica/rslist.c
@@ -94,7 +94,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
94 [resource_index]); 94 [resource_index]);
95 if (ACPI_FAILURE(status)) { 95 if (ACPI_FAILURE(status)) {
96 ACPI_EXCEPTION((AE_INFO, status, 96 ACPI_EXCEPTION((AE_INFO, status,
97 "Could not convert AML resource (Type %X)", 97 "Could not convert AML resource (Type 0x%X)",
98 *aml)); 98 *aml));
99 return_ACPI_STATUS(status); 99 return_ACPI_STATUS(status);
100 } 100 }
@@ -147,7 +147,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
147 147
148 if (resource->type > ACPI_RESOURCE_TYPE_MAX) { 148 if (resource->type > ACPI_RESOURCE_TYPE_MAX) {
149 ACPI_ERROR((AE_INFO, 149 ACPI_ERROR((AE_INFO,
150 "Invalid descriptor type (%X) in resource list", 150 "Invalid descriptor type (0x%X) in resource list",
151 resource->type)); 151 resource->type));
152 return_ACPI_STATUS(AE_BAD_DATA); 152 return_ACPI_STATUS(AE_BAD_DATA);
153 } 153 }
@@ -161,7 +161,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
161 [resource->type]); 161 [resource->type]);
162 if (ACPI_FAILURE(status)) { 162 if (ACPI_FAILURE(status)) {
163 ACPI_EXCEPTION((AE_INFO, status, 163 ACPI_EXCEPTION((AE_INFO, status,
164 "Could not convert resource (type %X) to AML", 164 "Could not convert resource (type 0x%X) to AML",
165 resource->type)); 165 resource->type));
166 return_ACPI_STATUS(status); 166 return_ACPI_STATUS(status);
167 } 167 }
diff --git a/drivers/acpi/acpica/rsmisc.c b/drivers/acpi/acpica/rsmisc.c
index 07de352fa443..f8cd9e87d987 100644
--- a/drivers/acpi/acpica/rsmisc.c
+++ b/drivers/acpi/acpica/rsmisc.c
@@ -88,7 +88,7 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
88 /* Each internal resource struct is expected to be 32-bit aligned */ 88 /* Each internal resource struct is expected to be 32-bit aligned */
89 89
90 ACPI_WARNING((AE_INFO, 90 ACPI_WARNING((AE_INFO,
91 "Misaligned resource pointer (get): %p Type %2.2X Len %X", 91 "Misaligned resource pointer (get): %p Type 0x%2.2X Length %u",
92 resource, resource->type, resource->length)); 92 resource, resource->type, resource->length));
93 } 93 }
94 94
@@ -541,7 +541,7 @@ if (((aml->irq.flags & 0x09) == 0x00) || ((aml->irq.flags & 0x09) == 0x09)) {
541 * "IRQ Format"), so 0x00 and 0x09 are illegal. 541 * "IRQ Format"), so 0x00 and 0x09 are illegal.
542 */ 542 */
543 ACPI_ERROR((AE_INFO, 543 ACPI_ERROR((AE_INFO,
544 "Invalid interrupt polarity/trigger in resource list, %X", 544 "Invalid interrupt polarity/trigger in resource list, 0x%X",
545 aml->irq.flags)); 545 aml->irq.flags));
546 return_ACPI_STATUS(AE_BAD_DATA); 546 return_ACPI_STATUS(AE_BAD_DATA);
547} 547}
diff --git a/drivers/acpi/acpica/tbfadt.c b/drivers/acpi/acpica/tbfadt.c
index f43fbe0fc3fc..1728cb9bf600 100644
--- a/drivers/acpi/acpica/tbfadt.c
+++ b/drivers/acpi/acpica/tbfadt.c
@@ -283,7 +283,7 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
283 if (length > sizeof(struct acpi_table_fadt)) { 283 if (length > sizeof(struct acpi_table_fadt)) {
284 ACPI_WARNING((AE_INFO, 284 ACPI_WARNING((AE_INFO,
285 "FADT (revision %u) is longer than ACPI 2.0 version, " 285 "FADT (revision %u) is longer than ACPI 2.0 version, "
286 "truncating length 0x%X to 0x%X", 286 "truncating length %u to %u",
287 table->revision, length, 287 table->revision, length,
288 (u32)sizeof(struct acpi_table_fadt))); 288 (u32)sizeof(struct acpi_table_fadt)));
289 } 289 }
@@ -422,7 +422,7 @@ static void acpi_tb_convert_fadt(void)
422 if (address64->address && address32 && 422 if (address64->address && address32 &&
423 (address64->address != (u64) address32)) { 423 (address64->address != (u64) address32)) {
424 ACPI_ERROR((AE_INFO, 424 ACPI_ERROR((AE_INFO,
425 "32/64X address mismatch in %s: %8.8X/%8.8X%8.8X, using 32", 425 "32/64X address mismatch in %s: 0x%8.8X/0x%8.8X%8.8X, using 32",
426 fadt_info_table[i].name, address32, 426 fadt_info_table[i].name, address32,
427 ACPI_FORMAT_UINT64(address64->address))); 427 ACPI_FORMAT_UINT64(address64->address)));
428 } 428 }
@@ -481,7 +481,7 @@ static void acpi_tb_validate_fadt(void)
481 (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) { 481 (acpi_gbl_FADT.Xfacs != (u64) acpi_gbl_FADT.facs)) {
482 ACPI_WARNING((AE_INFO, 482 ACPI_WARNING((AE_INFO,
483 "32/64X FACS address mismatch in FADT - " 483 "32/64X FACS address mismatch in FADT - "
484 "%8.8X/%8.8X%8.8X, using 32", 484 "0x%8.8X/0x%8.8X%8.8X, using 32",
485 acpi_gbl_FADT.facs, 485 acpi_gbl_FADT.facs,
486 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs))); 486 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xfacs)));
487 487
@@ -492,7 +492,7 @@ static void acpi_tb_validate_fadt(void)
492 (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) { 492 (acpi_gbl_FADT.Xdsdt != (u64) acpi_gbl_FADT.dsdt)) {
493 ACPI_WARNING((AE_INFO, 493 ACPI_WARNING((AE_INFO,
494 "32/64X DSDT address mismatch in FADT - " 494 "32/64X DSDT address mismatch in FADT - "
495 "%8.8X/%8.8X%8.8X, using 32", 495 "0x%8.8X/0x%8.8X%8.8X, using 32",
496 acpi_gbl_FADT.dsdt, 496 acpi_gbl_FADT.dsdt,
497 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt))); 497 ACPI_FORMAT_UINT64(acpi_gbl_FADT.Xdsdt)));
498 498
@@ -521,7 +521,7 @@ static void acpi_tb_validate_fadt(void)
521 if (address64->address && 521 if (address64->address &&
522 (address64->bit_width != ACPI_MUL_8(length))) { 522 (address64->bit_width != ACPI_MUL_8(length))) {
523 ACPI_WARNING((AE_INFO, 523 ACPI_WARNING((AE_INFO,
524 "32/64X length mismatch in %s: %d/%d", 524 "32/64X length mismatch in %s: %u/%u",
525 name, ACPI_MUL_8(length), 525 name, ACPI_MUL_8(length),
526 address64->bit_width)); 526 address64->bit_width));
527 } 527 }
@@ -534,7 +534,7 @@ static void acpi_tb_validate_fadt(void)
534 if (!address64->address || !length) { 534 if (!address64->address || !length) {
535 ACPI_ERROR((AE_INFO, 535 ACPI_ERROR((AE_INFO,
536 "Required field %s has zero address and/or length:" 536 "Required field %s has zero address and/or length:"
537 " %8.8X%8.8X/%X", 537 " 0x%8.8X%8.8X/0x%X",
538 name, 538 name,
539 ACPI_FORMAT_UINT64(address64-> 539 ACPI_FORMAT_UINT64(address64->
540 address), 540 address),
@@ -550,7 +550,7 @@ static void acpi_tb_validate_fadt(void)
550 (!address64->address && length)) { 550 (!address64->address && length)) {
551 ACPI_WARNING((AE_INFO, 551 ACPI_WARNING((AE_INFO,
552 "Optional field %s has zero address or length: " 552 "Optional field %s has zero address or length: "
553 "%8.8X%8.8X/%X", 553 "0x%8.8X%8.8X/0x%X",
554 name, 554 name,
555 ACPI_FORMAT_UINT64(address64-> 555 ACPI_FORMAT_UINT64(address64->
556 address), 556 address),
@@ -600,7 +600,7 @@ static void acpi_tb_setup_fadt_registers(void)
600 (fadt_info_table[i].default_length != 600 (fadt_info_table[i].default_length !=
601 target64->bit_width)) { 601 target64->bit_width)) {
602 ACPI_WARNING((AE_INFO, 602 ACPI_WARNING((AE_INFO,
603 "Invalid length for %s: %d, using default %d", 603 "Invalid length for %s: %u, using default %u",
604 fadt_info_table[i].name, 604 fadt_info_table[i].name,
605 target64->bit_width, 605 target64->bit_width,
606 fadt_info_table[i]. 606 fadt_info_table[i].
diff --git a/drivers/acpi/acpica/tbfind.c b/drivers/acpi/acpica/tbfind.c
index e252180ce61c..989d5c867864 100644
--- a/drivers/acpi/acpica/tbfind.c
+++ b/drivers/acpi/acpica/tbfind.c
@@ -83,7 +83,7 @@ acpi_tb_find_table(char *signature,
83 83
84 /* Search for the table */ 84 /* Search for the table */
85 85
86 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 86 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
87 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature), 87 if (ACPI_MEMCMP(&(acpi_gbl_root_table_list.tables[i].signature),
88 header.signature, ACPI_NAME_SIZE)) { 88 header.signature, ACPI_NAME_SIZE)) {
89 89
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 7ec02b0f69e0..83d7af8d0905 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -137,7 +137,7 @@ acpi_tb_add_table(struct acpi_table_desc *table_desc, u32 *table_index)
137 137
138 /* Check if table is already registered */ 138 /* Check if table is already registered */
139 139
140 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 140 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
141 if (!acpi_gbl_root_table_list.tables[i].pointer) { 141 if (!acpi_gbl_root_table_list.tables[i].pointer) {
142 status = 142 status =
143 acpi_tb_verify_table(&acpi_gbl_root_table_list. 143 acpi_tb_verify_table(&acpi_gbl_root_table_list.
@@ -273,7 +273,7 @@ acpi_status acpi_tb_resize_root_table_list(void)
273 /* Increase the Table Array size */ 273 /* Increase the Table Array size */
274 274
275 tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list. 275 tables = ACPI_ALLOCATE_ZEROED(((acpi_size) acpi_gbl_root_table_list.
276 size + 276 max_table_count +
277 ACPI_ROOT_TABLE_SIZE_INCREMENT) * 277 ACPI_ROOT_TABLE_SIZE_INCREMENT) *
278 sizeof(struct acpi_table_desc)); 278 sizeof(struct acpi_table_desc));
279 if (!tables) { 279 if (!tables) {
@@ -286,8 +286,8 @@ acpi_status acpi_tb_resize_root_table_list(void)
286 286
287 if (acpi_gbl_root_table_list.tables) { 287 if (acpi_gbl_root_table_list.tables) {
288 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, 288 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables,
289 (acpi_size) acpi_gbl_root_table_list.size * 289 (acpi_size) acpi_gbl_root_table_list.
290 sizeof(struct acpi_table_desc)); 290 max_table_count * sizeof(struct acpi_table_desc));
291 291
292 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) { 292 if (acpi_gbl_root_table_list.flags & ACPI_ROOT_ORIGIN_ALLOCATED) {
293 ACPI_FREE(acpi_gbl_root_table_list.tables); 293 ACPI_FREE(acpi_gbl_root_table_list.tables);
@@ -295,8 +295,9 @@ acpi_status acpi_tb_resize_root_table_list(void)
295 } 295 }
296 296
297 acpi_gbl_root_table_list.tables = tables; 297 acpi_gbl_root_table_list.tables = tables;
298 acpi_gbl_root_table_list.size += ACPI_ROOT_TABLE_SIZE_INCREMENT; 298 acpi_gbl_root_table_list.max_table_count +=
299 acpi_gbl_root_table_list.flags |= (u8) ACPI_ROOT_ORIGIN_ALLOCATED; 299 ACPI_ROOT_TABLE_SIZE_INCREMENT;
300 acpi_gbl_root_table_list.flags |= (u8)ACPI_ROOT_ORIGIN_ALLOCATED;
300 301
301 return_ACPI_STATUS(AE_OK); 302 return_ACPI_STATUS(AE_OK);
302} 303}
@@ -321,38 +322,36 @@ acpi_tb_store_table(acpi_physical_address address,
321 struct acpi_table_header *table, 322 struct acpi_table_header *table,
322 u32 length, u8 flags, u32 *table_index) 323 u32 length, u8 flags, u32 *table_index)
323{ 324{
324 acpi_status status = AE_OK; 325 acpi_status status;
326 struct acpi_table_desc *new_table;
325 327
326 /* Ensure that there is room for the table in the Root Table List */ 328 /* Ensure that there is room for the table in the Root Table List */
327 329
328 if (acpi_gbl_root_table_list.count >= acpi_gbl_root_table_list.size) { 330 if (acpi_gbl_root_table_list.current_table_count >=
331 acpi_gbl_root_table_list.max_table_count) {
329 status = acpi_tb_resize_root_table_list(); 332 status = acpi_tb_resize_root_table_list();
330 if (ACPI_FAILURE(status)) { 333 if (ACPI_FAILURE(status)) {
331 return (status); 334 return (status);
332 } 335 }
333 } 336 }
334 337
338 new_table =
339 &acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
340 current_table_count];
341
335 /* Initialize added table */ 342 /* Initialize added table */
336 343
337 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count]. 344 new_table->address = address;
338 address = address; 345 new_table->pointer = table;
339 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count]. 346 new_table->length = length;
340 pointer = table; 347 new_table->owner_id = 0;
341 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].length = 348 new_table->flags = flags;
342 length; 349
343 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count]. 350 ACPI_MOVE_32_TO_32(&new_table->signature, table->signature);
344 owner_id = 0; 351
345 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count].flags = 352 *table_index = acpi_gbl_root_table_list.current_table_count;
346 flags; 353 acpi_gbl_root_table_list.current_table_count++;
347 354 return (AE_OK);
348 ACPI_MOVE_32_TO_32(&
349 (acpi_gbl_root_table_list.
350 tables[acpi_gbl_root_table_list.count].signature),
351 table->signature);
352
353 *table_index = acpi_gbl_root_table_list.count;
354 acpi_gbl_root_table_list.count++;
355 return (status);
356} 355}
357 356
358/******************************************************************************* 357/*******************************************************************************
@@ -408,7 +407,7 @@ void acpi_tb_terminate(void)
408 407
409 /* Delete the individual tables */ 408 /* Delete the individual tables */
410 409
411 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 410 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; i++) {
412 acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]); 411 acpi_tb_delete_table(&acpi_gbl_root_table_list.tables[i]);
413 } 412 }
414 413
@@ -422,7 +421,7 @@ void acpi_tb_terminate(void)
422 421
423 acpi_gbl_root_table_list.tables = NULL; 422 acpi_gbl_root_table_list.tables = NULL;
424 acpi_gbl_root_table_list.flags = 0; 423 acpi_gbl_root_table_list.flags = 0;
425 acpi_gbl_root_table_list.count = 0; 424 acpi_gbl_root_table_list.current_table_count = 0;
426 425
427 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n")); 426 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "ACPI Tables freed\n"));
428 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 427 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
@@ -452,7 +451,7 @@ acpi_status acpi_tb_delete_namespace_by_owner(u32 table_index)
452 return_ACPI_STATUS(status); 451 return_ACPI_STATUS(status);
453 } 452 }
454 453
455 if (table_index >= acpi_gbl_root_table_list.count) { 454 if (table_index >= acpi_gbl_root_table_list.current_table_count) {
456 455
457 /* The table index does not exist */ 456 /* The table index does not exist */
458 457
@@ -505,7 +504,7 @@ acpi_status acpi_tb_allocate_owner_id(u32 table_index)
505 ACPI_FUNCTION_TRACE(tb_allocate_owner_id); 504 ACPI_FUNCTION_TRACE(tb_allocate_owner_id);
506 505
507 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 506 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
508 if (table_index < acpi_gbl_root_table_list.count) { 507 if (table_index < acpi_gbl_root_table_list.current_table_count) {
509 status = acpi_ut_allocate_owner_id 508 status = acpi_ut_allocate_owner_id
510 (&(acpi_gbl_root_table_list.tables[table_index].owner_id)); 509 (&(acpi_gbl_root_table_list.tables[table_index].owner_id));
511 } 510 }
@@ -533,7 +532,7 @@ acpi_status acpi_tb_release_owner_id(u32 table_index)
533 ACPI_FUNCTION_TRACE(tb_release_owner_id); 532 ACPI_FUNCTION_TRACE(tb_release_owner_id);
534 533
535 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 534 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
536 if (table_index < acpi_gbl_root_table_list.count) { 535 if (table_index < acpi_gbl_root_table_list.current_table_count) {
537 acpi_ut_release_owner_id(& 536 acpi_ut_release_owner_id(&
538 (acpi_gbl_root_table_list. 537 (acpi_gbl_root_table_list.
539 tables[table_index].owner_id)); 538 tables[table_index].owner_id));
@@ -564,7 +563,7 @@ acpi_status acpi_tb_get_owner_id(u32 table_index, acpi_owner_id *owner_id)
564 ACPI_FUNCTION_TRACE(tb_get_owner_id); 563 ACPI_FUNCTION_TRACE(tb_get_owner_id);
565 564
566 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 565 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
567 if (table_index < acpi_gbl_root_table_list.count) { 566 if (table_index < acpi_gbl_root_table_list.current_table_count) {
568 *owner_id = 567 *owner_id =
569 acpi_gbl_root_table_list.tables[table_index].owner_id; 568 acpi_gbl_root_table_list.tables[table_index].owner_id;
570 status = AE_OK; 569 status = AE_OK;
@@ -589,7 +588,7 @@ u8 acpi_tb_is_table_loaded(u32 table_index)
589 u8 is_loaded = FALSE; 588 u8 is_loaded = FALSE;
590 589
591 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 590 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
592 if (table_index < acpi_gbl_root_table_list.count) { 591 if (table_index < acpi_gbl_root_table_list.current_table_count) {
593 is_loaded = (u8) 592 is_loaded = (u8)
594 (acpi_gbl_root_table_list.tables[table_index].flags & 593 (acpi_gbl_root_table_list.tables[table_index].flags &
595 ACPI_TABLE_IS_LOADED); 594 ACPI_TABLE_IS_LOADED);
@@ -616,7 +615,7 @@ void acpi_tb_set_table_loaded_flag(u32 table_index, u8 is_loaded)
616{ 615{
617 616
618 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 617 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
619 if (table_index < acpi_gbl_root_table_list.count) { 618 if (table_index < acpi_gbl_root_table_list.current_table_count) {
620 if (is_loaded) { 619 if (is_loaded) {
621 acpi_gbl_root_table_list.tables[table_index].flags |= 620 acpi_gbl_root_table_list.tables[table_index].flags |=
622 ACPI_TABLE_IS_LOADED; 621 ACPI_TABLE_IS_LOADED;
diff --git a/drivers/acpi/acpica/tbutils.c b/drivers/acpi/acpica/tbutils.c
index 02723a9fb10c..34f9c2bc5e1f 100644
--- a/drivers/acpi/acpica/tbutils.c
+++ b/drivers/acpi/acpica/tbutils.c
@@ -158,7 +158,7 @@ acpi_status acpi_tb_initialize_facs(void)
158u8 acpi_tb_tables_loaded(void) 158u8 acpi_tb_tables_loaded(void)
159{ 159{
160 160
161 if (acpi_gbl_root_table_list.count >= 3) { 161 if (acpi_gbl_root_table_list.current_table_count >= 3) {
162 return (TRUE); 162 return (TRUE);
163 } 163 }
164 164
@@ -309,7 +309,7 @@ acpi_status acpi_tb_verify_checksum(struct acpi_table_header *table, u32 length)
309 309
310 if (checksum) { 310 if (checksum) {
311 ACPI_WARNING((AE_INFO, 311 ACPI_WARNING((AE_INFO,
312 "Incorrect checksum in table [%4.4s] - %2.2X, should be %2.2X", 312 "Incorrect checksum in table [%4.4s] - 0x%2.2X, should be 0x%2.2X",
313 table->signature, table->checksum, 313 table->signature, table->checksum,
314 (u8) (table->checksum - checksum))); 314 (u8) (table->checksum - checksum)));
315 315
@@ -349,6 +349,84 @@ u8 acpi_tb_checksum(u8 *buffer, u32 length)
349 349
350/******************************************************************************* 350/*******************************************************************************
351 * 351 *
352 * FUNCTION: acpi_tb_check_dsdt_header
353 *
354 * PARAMETERS: None
355 *
356 * RETURN: None
357 *
358 * DESCRIPTION: Quick compare to check validity of the DSDT. This will detect
359 * if the DSDT has been replaced from outside the OS and/or if
360 * the DSDT header has been corrupted.
361 *
362 ******************************************************************************/
363
364void acpi_tb_check_dsdt_header(void)
365{
366
367 /* Compare original length and checksum to current values */
368
369 if (acpi_gbl_original_dsdt_header.length != acpi_gbl_DSDT->length ||
370 acpi_gbl_original_dsdt_header.checksum != acpi_gbl_DSDT->checksum) {
371 ACPI_ERROR((AE_INFO,
372 "The DSDT has been corrupted or replaced - old, new headers below"));
373 acpi_tb_print_table_header(0, &acpi_gbl_original_dsdt_header);
374 acpi_tb_print_table_header(0, acpi_gbl_DSDT);
375
376 ACPI_ERROR((AE_INFO,
377 "Please send DMI info to linux-acpi@vger.kernel.org\n"
378 "If system does not work as expected, please boot with acpi=copy_dsdt"));
379
380 /* Disable further error messages */
381
382 acpi_gbl_original_dsdt_header.length = acpi_gbl_DSDT->length;
383 acpi_gbl_original_dsdt_header.checksum =
384 acpi_gbl_DSDT->checksum;
385 }
386}
387
388/*******************************************************************************
389 *
390 * FUNCTION: acpi_tb_copy_dsdt
391 *
392 * PARAMETERS: table_desc - Installed table to copy
393 *
394 * RETURN: None
395 *
396 * DESCRIPTION: Implements a subsystem option to copy the DSDT to local memory.
397 * Some very bad BIOSs are known to either corrupt the DSDT or
398 * install a new, bad DSDT. This copy works around the problem.
399 *
400 ******************************************************************************/
401
402struct acpi_table_header *acpi_tb_copy_dsdt(u32 table_index)
403{
404 struct acpi_table_header *new_table;
405 struct acpi_table_desc *table_desc;
406
407 table_desc = &acpi_gbl_root_table_list.tables[table_index];
408
409 new_table = ACPI_ALLOCATE(table_desc->length);
410 if (!new_table) {
411 ACPI_ERROR((AE_INFO, "Could not copy DSDT of length 0x%X",
412 table_desc->length));
413 return (NULL);
414 }
415
416 ACPI_MEMCPY(new_table, table_desc->pointer, table_desc->length);
417 acpi_tb_delete_table(table_desc);
418 table_desc->pointer = new_table;
419 table_desc->flags = ACPI_TABLE_ORIGIN_ALLOCATED;
420
421 ACPI_INFO((AE_INFO,
422 "Forced DSDT copy: length 0x%05X copied locally, original unmapped",
423 new_table->length));
424
425 return (new_table);
426}
427
428/*******************************************************************************
429 *
352 * FUNCTION: acpi_tb_install_table 430 * FUNCTION: acpi_tb_install_table
353 * 431 *
354 * PARAMETERS: Address - Physical address of DSDT or FACS 432 * PARAMETERS: Address - Physical address of DSDT or FACS
@@ -496,7 +574,7 @@ acpi_tb_get_root_table_entry(u8 *table_entry, u32 table_entry_size)
496 /* Will truncate 64-bit address to 32 bits, issue warning */ 574 /* Will truncate 64-bit address to 32 bits, issue warning */
497 575
498 ACPI_WARNING((AE_INFO, 576 ACPI_WARNING((AE_INFO,
499 "64-bit Physical Address in XSDT is too large (%8.8X%8.8X)," 577 "64-bit Physical Address in XSDT is too large (0x%8.8X%8.8X),"
500 " truncating", 578 " truncating",
501 ACPI_FORMAT_UINT64(address64))); 579 ACPI_FORMAT_UINT64(address64)));
502 } 580 }
@@ -629,14 +707,14 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
629 */ 707 */
630 table_entry = 708 table_entry =
631 ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header); 709 ACPI_CAST_PTR(u8, table) + sizeof(struct acpi_table_header);
632 acpi_gbl_root_table_list.count = 2; 710 acpi_gbl_root_table_list.current_table_count = 2;
633 711
634 /* 712 /*
635 * Initialize the root table array from the RSDT/XSDT 713 * Initialize the root table array from the RSDT/XSDT
636 */ 714 */
637 for (i = 0; i < table_count; i++) { 715 for (i = 0; i < table_count; i++) {
638 if (acpi_gbl_root_table_list.count >= 716 if (acpi_gbl_root_table_list.current_table_count >=
639 acpi_gbl_root_table_list.size) { 717 acpi_gbl_root_table_list.max_table_count) {
640 718
641 /* There is no more room in the root table array, attempt resize */ 719 /* There is no more room in the root table array, attempt resize */
642 720
@@ -646,19 +724,20 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
646 "Truncating %u table entries!", 724 "Truncating %u table entries!",
647 (unsigned) (table_count - 725 (unsigned) (table_count -
648 (acpi_gbl_root_table_list. 726 (acpi_gbl_root_table_list.
649 count - 2)))); 727 current_table_count -
728 2))));
650 break; 729 break;
651 } 730 }
652 } 731 }
653 732
654 /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */ 733 /* Get the table physical address (32-bit for RSDT, 64-bit for XSDT) */
655 734
656 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.count]. 735 acpi_gbl_root_table_list.tables[acpi_gbl_root_table_list.
657 address = 736 current_table_count].address =
658 acpi_tb_get_root_table_entry(table_entry, table_entry_size); 737 acpi_tb_get_root_table_entry(table_entry, table_entry_size);
659 738
660 table_entry += table_entry_size; 739 table_entry += table_entry_size;
661 acpi_gbl_root_table_list.count++; 740 acpi_gbl_root_table_list.current_table_count++;
662 } 741 }
663 742
664 /* 743 /*
@@ -671,7 +750,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address)
671 * Complete the initialization of the root table array by examining 750 * Complete the initialization of the root table array by examining
672 * the header of each table 751 * the header of each table
673 */ 752 */
674 for (i = 2; i < acpi_gbl_root_table_list.count; i++) { 753 for (i = 2; i < acpi_gbl_root_table_list.current_table_count; i++) {
675 acpi_tb_install_table(acpi_gbl_root_table_list.tables[i]. 754 acpi_tb_install_table(acpi_gbl_root_table_list.tables[i].
676 address, NULL, i); 755 address, NULL, i);
677 756
diff --git a/drivers/acpi/acpica/tbxface.c b/drivers/acpi/acpica/tbxface.c
index 5217a6159a31..4a8b9e6ea57a 100644
--- a/drivers/acpi/acpica/tbxface.c
+++ b/drivers/acpi/acpica/tbxface.c
@@ -72,7 +72,7 @@ static int no_auto_ssdt;
72acpi_status acpi_allocate_root_table(u32 initial_table_count) 72acpi_status acpi_allocate_root_table(u32 initial_table_count)
73{ 73{
74 74
75 acpi_gbl_root_table_list.size = initial_table_count; 75 acpi_gbl_root_table_list.max_table_count = initial_table_count;
76 acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE; 76 acpi_gbl_root_table_list.flags = ACPI_ROOT_ALLOW_RESIZE;
77 77
78 return (acpi_tb_resize_root_table_list()); 78 return (acpi_tb_resize_root_table_list());
@@ -130,7 +130,7 @@ acpi_initialize_tables(struct acpi_table_desc * initial_table_array,
130 sizeof(struct acpi_table_desc)); 130 sizeof(struct acpi_table_desc));
131 131
132 acpi_gbl_root_table_list.tables = initial_table_array; 132 acpi_gbl_root_table_list.tables = initial_table_array;
133 acpi_gbl_root_table_list.size = initial_table_count; 133 acpi_gbl_root_table_list.max_table_count = initial_table_count;
134 acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN; 134 acpi_gbl_root_table_list.flags = ACPI_ROOT_ORIGIN_UNKNOWN;
135 if (allow_resize) { 135 if (allow_resize) {
136 acpi_gbl_root_table_list.flags |= 136 acpi_gbl_root_table_list.flags |=
@@ -172,6 +172,7 @@ acpi_status acpi_reallocate_root_table(void)
172{ 172{
173 struct acpi_table_desc *tables; 173 struct acpi_table_desc *tables;
174 acpi_size new_size; 174 acpi_size new_size;
175 acpi_size current_size;
175 176
176 ACPI_FUNCTION_TRACE(acpi_reallocate_root_table); 177 ACPI_FUNCTION_TRACE(acpi_reallocate_root_table);
177 178
@@ -183,10 +184,17 @@ acpi_status acpi_reallocate_root_table(void)
183 return_ACPI_STATUS(AE_SUPPORT); 184 return_ACPI_STATUS(AE_SUPPORT);
184 } 185 }
185 186
186 new_size = ((acpi_size) acpi_gbl_root_table_list.count + 187 /*
187 ACPI_ROOT_TABLE_SIZE_INCREMENT) * 188 * Get the current size of the root table and add the default
189 * increment to create the new table size.
190 */
191 current_size = (acpi_size)
192 acpi_gbl_root_table_list.current_table_count *
188 sizeof(struct acpi_table_desc); 193 sizeof(struct acpi_table_desc);
189 194
195 new_size = current_size +
196 (ACPI_ROOT_TABLE_SIZE_INCREMENT * sizeof(struct acpi_table_desc));
197
190 /* Create new array and copy the old array */ 198 /* Create new array and copy the old array */
191 199
192 tables = ACPI_ALLOCATE_ZEROED(new_size); 200 tables = ACPI_ALLOCATE_ZEROED(new_size);
@@ -194,10 +202,17 @@ acpi_status acpi_reallocate_root_table(void)
194 return_ACPI_STATUS(AE_NO_MEMORY); 202 return_ACPI_STATUS(AE_NO_MEMORY);
195 } 203 }
196 204
197 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, new_size); 205 ACPI_MEMCPY(tables, acpi_gbl_root_table_list.tables, current_size);
198 206
199 acpi_gbl_root_table_list.size = acpi_gbl_root_table_list.count; 207 /*
208 * Update the root table descriptor. The new size will be the current
209 * number of tables plus the increment, independent of the reserved
210 * size of the original table list.
211 */
200 acpi_gbl_root_table_list.tables = tables; 212 acpi_gbl_root_table_list.tables = tables;
213 acpi_gbl_root_table_list.max_table_count =
214 acpi_gbl_root_table_list.current_table_count +
215 ACPI_ROOT_TABLE_SIZE_INCREMENT;
201 acpi_gbl_root_table_list.flags = 216 acpi_gbl_root_table_list.flags =
202 ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE; 217 ACPI_ROOT_ORIGIN_ALLOCATED | ACPI_ROOT_ALLOW_RESIZE;
203 218
@@ -278,7 +293,8 @@ acpi_get_table_header(char *signature,
278 293
279 /* Walk the root table list */ 294 /* Walk the root table list */
280 295
281 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) { 296 for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
297 i++) {
282 if (!ACPI_COMPARE_NAME 298 if (!ACPI_COMPARE_NAME
283 (&(acpi_gbl_root_table_list.tables[i].signature), 299 (&(acpi_gbl_root_table_list.tables[i].signature),
284 signature)) { 300 signature)) {
@@ -341,7 +357,7 @@ acpi_status acpi_unload_table_id(acpi_owner_id id)
341 ACPI_FUNCTION_TRACE(acpi_unload_table_id); 357 ACPI_FUNCTION_TRACE(acpi_unload_table_id);
342 358
343 /* Find table in the global table list */ 359 /* Find table in the global table list */
344 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 360 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
345 if (id != acpi_gbl_root_table_list.tables[i].owner_id) { 361 if (id != acpi_gbl_root_table_list.tables[i].owner_id) {
346 continue; 362 continue;
347 } 363 }
@@ -391,7 +407,8 @@ acpi_get_table_with_size(char *signature,
391 407
392 /* Walk the root table list */ 408 /* Walk the root table list */
393 409
394 for (i = 0, j = 0; i < acpi_gbl_root_table_list.count; i++) { 410 for (i = 0, j = 0; i < acpi_gbl_root_table_list.current_table_count;
411 i++) {
395 if (!ACPI_COMPARE_NAME 412 if (!ACPI_COMPARE_NAME
396 (&(acpi_gbl_root_table_list.tables[i].signature), 413 (&(acpi_gbl_root_table_list.tables[i].signature),
397 signature)) { 414 signature)) {
@@ -459,7 +476,7 @@ acpi_get_table_by_index(u32 table_index, struct acpi_table_header **table)
459 476
460 /* Validate index */ 477 /* Validate index */
461 478
462 if (table_index >= acpi_gbl_root_table_list.count) { 479 if (table_index >= acpi_gbl_root_table_list.current_table_count) {
463 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 480 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
464 return_ACPI_STATUS(AE_BAD_PARAMETER); 481 return_ACPI_STATUS(AE_BAD_PARAMETER);
465 } 482 }
@@ -500,16 +517,17 @@ static acpi_status acpi_tb_load_namespace(void)
500{ 517{
501 acpi_status status; 518 acpi_status status;
502 u32 i; 519 u32 i;
520 struct acpi_table_header *new_dsdt;
503 521
504 ACPI_FUNCTION_TRACE(tb_load_namespace); 522 ACPI_FUNCTION_TRACE(tb_load_namespace);
505 523
506 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 524 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
507 525
508 /* 526 /*
509 * Load the namespace. The DSDT is required, but any SSDT and PSDT tables 527 * Load the namespace. The DSDT is required, but any SSDT and
510 * are optional. 528 * PSDT tables are optional. Verify the DSDT.
511 */ 529 */
512 if (!acpi_gbl_root_table_list.count || 530 if (!acpi_gbl_root_table_list.current_table_count ||
513 !ACPI_COMPARE_NAME(& 531 !ACPI_COMPARE_NAME(&
514 (acpi_gbl_root_table_list. 532 (acpi_gbl_root_table_list.
515 tables[ACPI_TABLE_INDEX_DSDT].signature), 533 tables[ACPI_TABLE_INDEX_DSDT].signature),
@@ -522,17 +540,35 @@ static acpi_status acpi_tb_load_namespace(void)
522 goto unlock_and_exit; 540 goto unlock_and_exit;
523 } 541 }
524 542
525 /* A valid DSDT is required */ 543 /*
526 544 * Save the DSDT pointer for simple access. This is the mapped memory
527 status = 545 * address. We must take care here because the address of the .Tables
528 acpi_tb_verify_table(&acpi_gbl_root_table_list. 546 * array can change dynamically as tables are loaded at run-time. Note:
529 tables[ACPI_TABLE_INDEX_DSDT]); 547 * .Pointer field is not validated until after call to acpi_tb_verify_table.
530 if (ACPI_FAILURE(status)) { 548 */
549 acpi_gbl_DSDT =
550 acpi_gbl_root_table_list.tables[ACPI_TABLE_INDEX_DSDT].pointer;
531 551
532 status = AE_NO_ACPI_TABLES; 552 /*
533 goto unlock_and_exit; 553 * Optionally copy the entire DSDT to local memory (instead of simply
554 * mapping it.) There are some BIOSs that corrupt or replace the original
555 * DSDT, creating the need for this option. Default is FALSE, do not copy
556 * the DSDT.
557 */
558 if (acpi_gbl_copy_dsdt_locally) {
559 new_dsdt = acpi_tb_copy_dsdt(ACPI_TABLE_INDEX_DSDT);
560 if (new_dsdt) {
561 acpi_gbl_DSDT = new_dsdt;
562 }
534 } 563 }
535 564
565 /*
566 * Save the original DSDT header for detection of table corruption
567 * and/or replacement of the DSDT from outside the OS.
568 */
569 ACPI_MEMCPY(&acpi_gbl_original_dsdt_header, acpi_gbl_DSDT,
570 sizeof(struct acpi_table_header));
571
536 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES); 572 (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
537 573
538 /* Load and parse tables */ 574 /* Load and parse tables */
@@ -545,7 +581,7 @@ static acpi_status acpi_tb_load_namespace(void)
545 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */ 581 /* Load any SSDT or PSDT tables. Note: Loop leaves tables locked */
546 582
547 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES); 583 (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
548 for (i = 0; i < acpi_gbl_root_table_list.count; ++i) { 584 for (i = 0; i < acpi_gbl_root_table_list.current_table_count; ++i) {
549 if ((!ACPI_COMPARE_NAME 585 if ((!ACPI_COMPARE_NAME
550 (&(acpi_gbl_root_table_list.tables[i].signature), 586 (&(acpi_gbl_root_table_list.tables[i].signature),
551 ACPI_SIG_SSDT) 587 ACPI_SIG_SSDT)
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index dda6e8c497d3..fd2c07d1d3ac 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -134,7 +134,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
134 ACPI_EBDA_PTR_LENGTH); 134 ACPI_EBDA_PTR_LENGTH);
135 if (!table_ptr) { 135 if (!table_ptr) {
136 ACPI_ERROR((AE_INFO, 136 ACPI_ERROR((AE_INFO,
137 "Could not map memory at %8.8X for length %X", 137 "Could not map memory at 0x%8.8X for length %u",
138 ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH)); 138 ACPI_EBDA_PTR_LOCATION, ACPI_EBDA_PTR_LENGTH));
139 139
140 return_ACPI_STATUS(AE_NO_MEMORY); 140 return_ACPI_STATUS(AE_NO_MEMORY);
@@ -159,7 +159,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
159 ACPI_EBDA_WINDOW_SIZE); 159 ACPI_EBDA_WINDOW_SIZE);
160 if (!table_ptr) { 160 if (!table_ptr) {
161 ACPI_ERROR((AE_INFO, 161 ACPI_ERROR((AE_INFO,
162 "Could not map memory at %8.8X for length %X", 162 "Could not map memory at 0x%8.8X for length %u",
163 physical_address, ACPI_EBDA_WINDOW_SIZE)); 163 physical_address, ACPI_EBDA_WINDOW_SIZE));
164 164
165 return_ACPI_STATUS(AE_NO_MEMORY); 165 return_ACPI_STATUS(AE_NO_MEMORY);
@@ -191,7 +191,7 @@ acpi_status acpi_find_root_pointer(acpi_size *table_address)
191 191
192 if (!table_ptr) { 192 if (!table_ptr) {
193 ACPI_ERROR((AE_INFO, 193 ACPI_ERROR((AE_INFO,
194 "Could not map memory at %8.8X for length %X", 194 "Could not map memory at 0x%8.8X for length %u",
195 ACPI_HI_RSDP_WINDOW_BASE, 195 ACPI_HI_RSDP_WINDOW_BASE,
196 ACPI_HI_RSDP_WINDOW_SIZE)); 196 ACPI_HI_RSDP_WINDOW_SIZE));
197 197
diff --git a/drivers/acpi/acpica/utalloc.c b/drivers/acpi/acpica/utalloc.c
index 3d706b8fd449..8f0896281567 100644
--- a/drivers/acpi/acpica/utalloc.c
+++ b/drivers/acpi/acpica/utalloc.c
@@ -340,7 +340,7 @@ void *acpi_ut_allocate(acpi_size size,
340 /* Report allocation error */ 340 /* Report allocation error */
341 341
342 ACPI_WARNING((module, line, 342 ACPI_WARNING((module, line,
343 "Could not allocate size %X", (u32) size)); 343 "Could not allocate size %u", (u32) size));
344 344
345 return_PTR(NULL); 345 return_PTR(NULL);
346 } 346 }
diff --git a/drivers/acpi/acpica/utcopy.c b/drivers/acpi/acpica/utcopy.c
index 97ec3621e71d..6fef83f04bcd 100644
--- a/drivers/acpi/acpica/utcopy.c
+++ b/drivers/acpi/acpica/utcopy.c
@@ -677,16 +677,24 @@ acpi_ut_copy_simple_object(union acpi_operand_object *source_desc,
677 u16 reference_count; 677 u16 reference_count;
678 union acpi_operand_object *next_object; 678 union acpi_operand_object *next_object;
679 acpi_status status; 679 acpi_status status;
680 acpi_size copy_size;
680 681
681 /* Save fields from destination that we don't want to overwrite */ 682 /* Save fields from destination that we don't want to overwrite */
682 683
683 reference_count = dest_desc->common.reference_count; 684 reference_count = dest_desc->common.reference_count;
684 next_object = dest_desc->common.next_object; 685 next_object = dest_desc->common.next_object;
685 686
686 /* Copy the entire source object over the destination object */ 687 /*
688 * Copy the entire source object over the destination object.
689 * Note: Source can be either an operand object or namespace node.
690 */
691 copy_size = sizeof(union acpi_operand_object);
692 if (ACPI_GET_DESCRIPTOR_TYPE(source_desc) == ACPI_DESC_TYPE_NAMED) {
693 copy_size = sizeof(struct acpi_namespace_node);
694 }
687 695
688 ACPI_MEMCPY((char *)dest_desc, (char *)source_desc, 696 ACPI_MEMCPY(ACPI_CAST_PTR(char, dest_desc),
689 sizeof(union acpi_operand_object)); 697 ACPI_CAST_PTR(char, source_desc), copy_size);
690 698
691 /* Restore the saved fields */ 699 /* Restore the saved fields */
692 700
diff --git a/drivers/acpi/acpica/utdelete.c b/drivers/acpi/acpica/utdelete.c
index 16b51c69606a..ed794cd033ea 100644
--- a/drivers/acpi/acpica/utdelete.c
+++ b/drivers/acpi/acpica/utdelete.c
@@ -434,7 +434,7 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
434 434
435 default: 435 default:
436 436
437 ACPI_ERROR((AE_INFO, "Unknown action (%X)", action)); 437 ACPI_ERROR((AE_INFO, "Unknown action (0x%X)", action));
438 break; 438 break;
439 } 439 }
440 440
@@ -444,8 +444,8 @@ acpi_ut_update_ref_count(union acpi_operand_object *object, u32 action)
444 */ 444 */
445 if (count > ACPI_MAX_REFERENCE_COUNT) { 445 if (count > ACPI_MAX_REFERENCE_COUNT) {
446 ACPI_WARNING((AE_INFO, 446 ACPI_WARNING((AE_INFO,
447 "Large Reference Count (%X) in object %p", count, 447 "Large Reference Count (0x%X) in object %p",
448 object)); 448 count, object));
449 } 449 }
450} 450}
451 451
diff --git a/drivers/acpi/acpica/uteval.c b/drivers/acpi/acpica/uteval.c
index 7f5e734ce7f7..6dfdeb653490 100644
--- a/drivers/acpi/acpica/uteval.c
+++ b/drivers/acpi/acpica/uteval.c
@@ -307,7 +307,7 @@ acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
307 prefix_node, path, AE_TYPE); 307 prefix_node, path, AE_TYPE);
308 308
309 ACPI_ERROR((AE_INFO, 309 ACPI_ERROR((AE_INFO,
310 "Type returned from %s was incorrect: %s, expected Btypes: %X", 310 "Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
311 path, 311 path,
312 acpi_ut_get_object_type_name(info->return_object), 312 acpi_ut_get_object_type_name(info->return_object),
313 expected_return_btypes)); 313 expected_return_btypes));
diff --git a/drivers/acpi/acpica/utglobal.c b/drivers/acpi/acpica/utglobal.c
index eda3e656c4af..66116750a0f9 100644
--- a/drivers/acpi/acpica/utglobal.c
+++ b/drivers/acpi/acpica/utglobal.c
@@ -785,6 +785,7 @@ acpi_status acpi_ut_init_globals(void)
785 785
786 /* Miscellaneous variables */ 786 /* Miscellaneous variables */
787 787
788 acpi_gbl_DSDT = NULL;
788 acpi_gbl_cm_single_step = FALSE; 789 acpi_gbl_cm_single_step = FALSE;
789 acpi_gbl_db_terminate_threads = FALSE; 790 acpi_gbl_db_terminate_threads = FALSE;
790 acpi_gbl_shutdown = FALSE; 791 acpi_gbl_shutdown = FALSE;
diff --git a/drivers/acpi/acpica/utmisc.c b/drivers/acpi/acpica/utmisc.c
index 32982e2ac384..e8d0724ee403 100644
--- a/drivers/acpi/acpica/utmisc.c
+++ b/drivers/acpi/acpica/utmisc.c
@@ -205,7 +205,7 @@ acpi_status acpi_ut_allocate_owner_id(acpi_owner_id * owner_id)
205 /* Guard against multiple allocations of ID to the same location */ 205 /* Guard against multiple allocations of ID to the same location */
206 206
207 if (*owner_id) { 207 if (*owner_id) {
208 ACPI_ERROR((AE_INFO, "Owner ID [%2.2X] already exists", 208 ACPI_ERROR((AE_INFO, "Owner ID [0x%2.2X] already exists",
209 *owner_id)); 209 *owner_id));
210 return_ACPI_STATUS(AE_ALREADY_EXISTS); 210 return_ACPI_STATUS(AE_ALREADY_EXISTS);
211 } 211 }
@@ -315,7 +315,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
315 /* Zero is not a valid owner_iD */ 315 /* Zero is not a valid owner_iD */
316 316
317 if (owner_id == 0) { 317 if (owner_id == 0) {
318 ACPI_ERROR((AE_INFO, "Invalid OwnerId: %2.2X", owner_id)); 318 ACPI_ERROR((AE_INFO, "Invalid OwnerId: 0x%2.2X", owner_id));
319 return_VOID; 319 return_VOID;
320 } 320 }
321 321
@@ -341,7 +341,7 @@ void acpi_ut_release_owner_id(acpi_owner_id * owner_id_ptr)
341 acpi_gbl_owner_id_mask[index] ^= bit; 341 acpi_gbl_owner_id_mask[index] ^= bit;
342 } else { 342 } else {
343 ACPI_ERROR((AE_INFO, 343 ACPI_ERROR((AE_INFO,
344 "Release of non-allocated OwnerId: %2.2X", 344 "Release of non-allocated OwnerId: 0x%2.2X",
345 owner_id + 1)); 345 owner_id + 1));
346 } 346 }
347 347
diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c
index 55d014ed6d55..058b3df48271 100644
--- a/drivers/acpi/acpica/utmutex.c
+++ b/drivers/acpi/acpica/utmutex.c
@@ -258,7 +258,7 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
258 acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id; 258 acpi_gbl_mutex_info[mutex_id].thread_id = this_thread_id;
259 } else { 259 } else {
260 ACPI_EXCEPTION((AE_INFO, status, 260 ACPI_EXCEPTION((AE_INFO, status,
261 "Thread %p could not acquire Mutex [%X]", 261 "Thread %p could not acquire Mutex [0x%X]",
262 ACPI_CAST_PTR(void, this_thread_id), mutex_id)); 262 ACPI_CAST_PTR(void, this_thread_id), mutex_id));
263 } 263 }
264 264
@@ -297,7 +297,7 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
297 */ 297 */
298 if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) { 298 if (acpi_gbl_mutex_info[mutex_id].thread_id == ACPI_MUTEX_NOT_ACQUIRED) {
299 ACPI_ERROR((AE_INFO, 299 ACPI_ERROR((AE_INFO,
300 "Mutex [%X] is not acquired, cannot release", 300 "Mutex [0x%X] is not acquired, cannot release",
301 mutex_id)); 301 mutex_id));
302 302
303 return (AE_NOT_ACQUIRED); 303 return (AE_NOT_ACQUIRED);
diff --git a/drivers/acpi/acpica/utobject.c b/drivers/acpi/acpica/utobject.c
index 3356f0cb0745..fd1fa2749ea5 100644
--- a/drivers/acpi/acpica/utobject.c
+++ b/drivers/acpi/acpica/utobject.c
@@ -251,7 +251,7 @@ union acpi_operand_object *acpi_ut_create_buffer_object(acpi_size buffer_size)
251 251
252 buffer = ACPI_ALLOCATE_ZEROED(buffer_size); 252 buffer = ACPI_ALLOCATE_ZEROED(buffer_size);
253 if (!buffer) { 253 if (!buffer) {
254 ACPI_ERROR((AE_INFO, "Could not allocate size %X", 254 ACPI_ERROR((AE_INFO, "Could not allocate size %u",
255 (u32) buffer_size)); 255 (u32) buffer_size));
256 acpi_ut_remove_reference(buffer_desc); 256 acpi_ut_remove_reference(buffer_desc);
257 return_PTR(NULL); 257 return_PTR(NULL);
@@ -303,7 +303,7 @@ union acpi_operand_object *acpi_ut_create_string_object(acpi_size string_size)
303 */ 303 */
304 string = ACPI_ALLOCATE_ZEROED(string_size + 1); 304 string = ACPI_ALLOCATE_ZEROED(string_size + 1);
305 if (!string) { 305 if (!string) {
306 ACPI_ERROR((AE_INFO, "Could not allocate size %X", 306 ACPI_ERROR((AE_INFO, "Could not allocate size %u",
307 (u32) string_size)); 307 (u32) string_size));
308 acpi_ut_remove_reference(string_desc); 308 acpi_ut_remove_reference(string_desc);
309 return_PTR(NULL); 309 return_PTR(NULL);
@@ -533,7 +533,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
533 */ 533 */
534 ACPI_ERROR((AE_INFO, 534 ACPI_ERROR((AE_INFO,
535 "Cannot convert to external object - " 535 "Cannot convert to external object - "
536 "unsupported Reference Class [%s] %X in object %p", 536 "unsupported Reference Class [%s] 0x%X in object %p",
537 acpi_ut_get_reference_name(internal_object), 537 acpi_ut_get_reference_name(internal_object),
538 internal_object->reference.class, 538 internal_object->reference.class,
539 internal_object)); 539 internal_object));
@@ -545,7 +545,7 @@ acpi_ut_get_simple_object_size(union acpi_operand_object *internal_object,
545 default: 545 default:
546 546
547 ACPI_ERROR((AE_INFO, "Cannot convert to external object - " 547 ACPI_ERROR((AE_INFO, "Cannot convert to external object - "
548 "unsupported type [%s] %X in object %p", 548 "unsupported type [%s] 0x%X in object %p",
549 acpi_ut_get_object_type_name(internal_object), 549 acpi_ut_get_object_type_name(internal_object),
550 internal_object->common.type, internal_object)); 550 internal_object->common.type, internal_object));
551 status = AE_TYPE; 551 status = AE_TYPE;
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
1config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)"
3 depends on X86
4 help
5 APEI allows to report errors (for example from the chipset)
6 to the operating system. This improves NMI handling
7 especially. In addition it supports error serialization and
8 error injection.
9
10config ACPI_APEI_GHES
11 tristate "APEI Generic Hardware Error Source"
12 depends on ACPI_APEI && X86
13 select ACPI_HED
14 help
15 Generic Hardware Error Source provides a way to report
16 platform hardware errors (such as that from chipset). It
17 works in so called "Firmware First" mode, that is, hardware
18 errors are reported to firmware firstly, then reported to
19 Linux by firmware. This way, some non-standard hardware
20 error registers or non-standard hardware link can be checked
21 by firmware to produce more valuable hardware error
22 information for Linux.
23
24config ACPI_APEI_EINJ
25 tristate "APEI Error INJection (EINJ)"
26 depends on ACPI_APEI && DEBUG_FS
27 help
28 EINJ provides a hardware error injection mechanism, it is
29 mainly used for debugging and testing the other parts of
30 APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ACPI_APEI) += apei.o
2obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
3obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
4
5apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..db3946e9c66b
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/acpi.h>
37#include <linux/io.h>
38#include <linux/kref.h>
39#include <linux/rculist.h>
40#include <linux/interrupt.h>
41#include <linux/debugfs.h>
42#include <acpi/atomicio.h>
43
44#include "apei-internal.h"
45
46#define APEI_PFX "APEI: "
47
48/*
49 * APEI ERST (Error Record Serialization Table) and EINJ (Error
50 * INJection) interpreter framework.
51 */
52
53#define APEI_EXEC_PRESERVE_REGISTER 0x1
54
55void apei_exec_ctx_init(struct apei_exec_context *ctx,
56 struct apei_exec_ins_type *ins_table,
57 u32 instructions,
58 struct acpi_whea_header *action_table,
59 u32 entries)
60{
61 ctx->ins_table = ins_table;
62 ctx->instructions = instructions;
63 ctx->action_table = action_table;
64 ctx->entries = entries;
65}
66EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
67
68int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
69{
70 int rc;
71
72 rc = acpi_atomic_read(val, &entry->register_region);
73 if (rc)
74 return rc;
75 *val >>= entry->register_region.bit_offset;
76 *val &= entry->mask;
77
78 return 0;
79}
80
81int apei_exec_read_register(struct apei_exec_context *ctx,
82 struct acpi_whea_header *entry)
83{
84 int rc;
85 u64 val = 0;
86
87 rc = __apei_exec_read_register(entry, &val);
88 if (rc)
89 return rc;
90 ctx->value = val;
91
92 return 0;
93}
94EXPORT_SYMBOL_GPL(apei_exec_read_register);
95
96int apei_exec_read_register_value(struct apei_exec_context *ctx,
97 struct acpi_whea_header *entry)
98{
99 int rc;
100
101 rc = apei_exec_read_register(ctx, entry);
102 if (rc)
103 return rc;
104 ctx->value = (ctx->value == entry->value);
105
106 return 0;
107}
108EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
109
110int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
111{
112 int rc;
113
114 val &= entry->mask;
115 val <<= entry->register_region.bit_offset;
116 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
117 u64 valr = 0;
118 rc = acpi_atomic_read(&valr, &entry->register_region);
119 if (rc)
120 return rc;
121 valr &= ~(entry->mask << entry->register_region.bit_offset);
122 val |= valr;
123 }
124 rc = acpi_atomic_write(val, &entry->register_region);
125
126 return rc;
127}
128
129int apei_exec_write_register(struct apei_exec_context *ctx,
130 struct acpi_whea_header *entry)
131{
132 return __apei_exec_write_register(entry, ctx->value);
133}
134EXPORT_SYMBOL_GPL(apei_exec_write_register);
135
136int apei_exec_write_register_value(struct apei_exec_context *ctx,
137 struct acpi_whea_header *entry)
138{
139 int rc;
140
141 ctx->value = entry->value;
142 rc = apei_exec_write_register(ctx, entry);
143
144 return rc;
145}
146EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
147
148int apei_exec_noop(struct apei_exec_context *ctx,
149 struct acpi_whea_header *entry)
150{
151 return 0;
152}
153EXPORT_SYMBOL_GPL(apei_exec_noop);
154
155/*
156 * Interpret the specified action. Go through whole action table,
157 * execute all instructions belong to the action.
158 */
159int apei_exec_run(struct apei_exec_context *ctx, u8 action)
160{
161 int rc;
162 u32 i, ip;
163 struct acpi_whea_header *entry;
164 apei_exec_ins_func_t run;
165
166 ctx->ip = 0;
167
168 /*
169 * "ip" is the instruction pointer of current instruction,
170 * "ctx->ip" specifies the next instruction to executed,
171 * instruction "run" function may change the "ctx->ip" to
172 * implement "goto" semantics.
173 */
174rewind:
175 ip = 0;
176 for (i = 0; i < ctx->entries; i++) {
177 entry = &ctx->action_table[i];
178 if (entry->action != action)
179 continue;
180 if (ip == ctx->ip) {
181 if (entry->instruction >= ctx->instructions ||
182 !ctx->ins_table[entry->instruction].run) {
183 pr_warning(FW_WARN APEI_PFX
184 "Invalid action table, unknown instruction type: %d\n",
185 entry->instruction);
186 return -EINVAL;
187 }
188 run = ctx->ins_table[entry->instruction].run;
189 rc = run(ctx, entry);
190 if (rc < 0)
191 return rc;
192 else if (rc != APEI_EXEC_SET_IP)
193 ctx->ip++;
194 }
195 ip++;
196 if (ctx->ip < ip)
197 goto rewind;
198 }
199
200 return 0;
201}
202EXPORT_SYMBOL_GPL(apei_exec_run);
203
204typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
205 struct acpi_whea_header *entry,
206 void *data);
207
208static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
209 apei_exec_entry_func_t func,
210 void *data,
211 int *end)
212{
213 u8 ins;
214 int i, rc;
215 struct acpi_whea_header *entry;
216 struct apei_exec_ins_type *ins_table = ctx->ins_table;
217
218 for (i = 0; i < ctx->entries; i++) {
219 entry = ctx->action_table + i;
220 ins = entry->instruction;
221 if (end)
222 *end = i;
223 if (ins >= ctx->instructions || !ins_table[ins].run) {
224 pr_warning(FW_WARN APEI_PFX
225 "Invalid action table, unknown instruction type: %d\n",
226 ins);
227 return -EINVAL;
228 }
229 rc = func(ctx, entry, data);
230 if (rc)
231 return rc;
232 }
233
234 return 0;
235}
236
237static int pre_map_gar_callback(struct apei_exec_context *ctx,
238 struct acpi_whea_header *entry,
239 void *data)
240{
241 u8 ins = entry->instruction;
242
243 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
244 return acpi_pre_map_gar(&entry->register_region);
245
246 return 0;
247}
248
249/*
250 * Pre-map all GARs in action table to make it possible to access them
251 * in NMI handler.
252 */
253int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
254{
255 int rc, end;
256
257 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
258 NULL, &end);
259 if (rc) {
260 struct apei_exec_context ctx_unmap;
261 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
262 ctx_unmap.entries = end;
263 apei_exec_post_unmap_gars(&ctx_unmap);
264 }
265
266 return rc;
267}
268EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
269
270static int post_unmap_gar_callback(struct apei_exec_context *ctx,
271 struct acpi_whea_header *entry,
272 void *data)
273{
274 u8 ins = entry->instruction;
275
276 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
277 acpi_post_unmap_gar(&entry->register_region);
278
279 return 0;
280}
281
282/* Post-unmap all GAR in action table. */
283int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
284{
285 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
286 NULL, NULL);
287}
288EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
289
290/*
291 * Resource management for GARs in APEI
292 */
293struct apei_res {
294 struct list_head list;
295 unsigned long start;
296 unsigned long end;
297};
298
299/* Collect all resources requested, to avoid conflict */
300struct apei_resources apei_resources_all = {
301 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
302 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
303};
304
305static int apei_res_add(struct list_head *res_list,
306 unsigned long start, unsigned long size)
307{
308 struct apei_res *res, *resn, *res_ins = NULL;
309 unsigned long end = start + size;
310
311 if (end <= start)
312 return 0;
313repeat:
314 list_for_each_entry_safe(res, resn, res_list, list) {
315 if (res->start > end || res->end < start)
316 continue;
317 else if (end <= res->end && start >= res->start) {
318 kfree(res_ins);
319 return 0;
320 }
321 list_del(&res->list);
322 res->start = start = min(res->start, start);
323 res->end = end = max(res->end, end);
324 kfree(res_ins);
325 res_ins = res;
326 goto repeat;
327 }
328
329 if (res_ins)
330 list_add(&res_ins->list, res_list);
331 else {
332 res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
333 if (!res_ins)
334 return -ENOMEM;
335 res_ins->start = start;
336 res_ins->end = end;
337 list_add(&res_ins->list, res_list);
338 }
339
340 return 0;
341}
342
343static int apei_res_sub(struct list_head *res_list1,
344 struct list_head *res_list2)
345{
346 struct apei_res *res1, *resn1, *res2, *res;
347 res1 = list_entry(res_list1->next, struct apei_res, list);
348 resn1 = list_entry(res1->list.next, struct apei_res, list);
349 while (&res1->list != res_list1) {
350 list_for_each_entry(res2, res_list2, list) {
351 if (res1->start >= res2->end ||
352 res1->end <= res2->start)
353 continue;
354 else if (res1->end <= res2->end &&
355 res1->start >= res2->start) {
356 list_del(&res1->list);
357 kfree(res1);
358 break;
359 } else if (res1->end > res2->end &&
360 res1->start < res2->start) {
361 res = kmalloc(sizeof(*res), GFP_KERNEL);
362 if (!res)
363 return -ENOMEM;
364 res->start = res2->end;
365 res->end = res1->end;
366 res1->end = res2->start;
367 list_add(&res->list, &res1->list);
368 resn1 = res;
369 } else {
370 if (res1->start < res2->start)
371 res1->end = res2->start;
372 else
373 res1->start = res2->end;
374 }
375 }
376 res1 = resn1;
377 resn1 = list_entry(resn1->list.next, struct apei_res, list);
378 }
379
380 return 0;
381}
382
383static void apei_res_clean(struct list_head *res_list)
384{
385 struct apei_res *res, *resn;
386
387 list_for_each_entry_safe(res, resn, res_list, list) {
388 list_del(&res->list);
389 kfree(res);
390 }
391}
392
393void apei_resources_fini(struct apei_resources *resources)
394{
395 apei_res_clean(&resources->iomem);
396 apei_res_clean(&resources->ioport);
397}
398EXPORT_SYMBOL_GPL(apei_resources_fini);
399
400static int apei_resources_merge(struct apei_resources *resources1,
401 struct apei_resources *resources2)
402{
403 int rc;
404 struct apei_res *res;
405
406 list_for_each_entry(res, &resources2->iomem, list) {
407 rc = apei_res_add(&resources1->iomem, res->start,
408 res->end - res->start);
409 if (rc)
410 return rc;
411 }
412 list_for_each_entry(res, &resources2->ioport, list) {
413 rc = apei_res_add(&resources1->ioport, res->start,
414 res->end - res->start);
415 if (rc)
416 return rc;
417 }
418
419 return 0;
420}
421
422/*
423 * EINJ has two groups of GARs (EINJ table entry and trigger table
424 * entry), so common resources are subtracted from the trigger table
425 * resources before the second requesting.
426 */
427int apei_resources_sub(struct apei_resources *resources1,
428 struct apei_resources *resources2)
429{
430 int rc;
431
432 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
433 if (rc)
434 return rc;
435 return apei_res_sub(&resources1->ioport, &resources2->ioport);
436}
437EXPORT_SYMBOL_GPL(apei_resources_sub);
438
439/*
440 * IO memory/port rersource management mechanism is used to check
441 * whether memory/port area used by GARs conflicts with normal memory
442 * or IO memory/port of devices.
443 */
444int apei_resources_request(struct apei_resources *resources,
445 const char *desc)
446{
447 struct apei_res *res, *res_bak;
448 struct resource *r;
449
450 apei_resources_sub(resources, &apei_resources_all);
451
452 list_for_each_entry(res, &resources->iomem, list) {
453 r = request_mem_region(res->start, res->end - res->start,
454 desc);
455 if (!r) {
456 pr_err(APEI_PFX
457 "Can not request iomem region <%016llx-%016llx> for GARs.\n",
458 (unsigned long long)res->start,
459 (unsigned long long)res->end);
460 res_bak = res;
461 goto err_unmap_iomem;
462 }
463 }
464
465 list_for_each_entry(res, &resources->ioport, list) {
466 r = request_region(res->start, res->end - res->start, desc);
467 if (!r) {
468 pr_err(APEI_PFX
469 "Can not request ioport region <%016llx-%016llx> for GARs.\n",
470 (unsigned long long)res->start,
471 (unsigned long long)res->end);
472 res_bak = res;
473 goto err_unmap_ioport;
474 }
475 }
476
477 apei_resources_merge(&apei_resources_all, resources);
478
479 return 0;
480err_unmap_ioport:
481 list_for_each_entry(res, &resources->ioport, list) {
482 if (res == res_bak)
483 break;
484 release_mem_region(res->start, res->end - res->start);
485 }
486 res_bak = NULL;
487err_unmap_iomem:
488 list_for_each_entry(res, &resources->iomem, list) {
489 if (res == res_bak)
490 break;
491 release_region(res->start, res->end - res->start);
492 }
493 return -EINVAL;
494}
495EXPORT_SYMBOL_GPL(apei_resources_request);
496
497void apei_resources_release(struct apei_resources *resources)
498{
499 struct apei_res *res;
500
501 list_for_each_entry(res, &resources->iomem, list)
502 release_mem_region(res->start, res->end - res->start);
503 list_for_each_entry(res, &resources->ioport, list)
504 release_region(res->start, res->end - res->start);
505
506 apei_resources_sub(&apei_resources_all, resources);
507}
508EXPORT_SYMBOL_GPL(apei_resources_release);
509
510static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
511{
512 u32 width, space_id;
513
514 width = reg->bit_width;
515 space_id = reg->space_id;
516 /* Handle possible alignment issues */
517 memcpy(paddr, &reg->address, sizeof(*paddr));
518 if (!*paddr) {
519 pr_warning(FW_BUG APEI_PFX
520 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
521 *paddr, width, space_id);
522 return -EINVAL;
523 }
524
525 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
526 pr_warning(FW_BUG APEI_PFX
527 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
528 *paddr, width, space_id);
529 return -EINVAL;
530 }
531
532 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
533 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
534 pr_warning(FW_BUG APEI_PFX
535 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
536 *paddr, width, space_id);
537 return -EINVAL;
538 }
539
540 return 0;
541}
542
543static int collect_res_callback(struct apei_exec_context *ctx,
544 struct acpi_whea_header *entry,
545 void *data)
546{
547 struct apei_resources *resources = data;
548 struct acpi_generic_address *reg = &entry->register_region;
549 u8 ins = entry->instruction;
550 u64 paddr;
551 int rc;
552
553 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
554 return 0;
555
556 rc = apei_check_gar(reg, &paddr);
557 if (rc)
558 return rc;
559
560 switch (reg->space_id) {
561 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
562 return apei_res_add(&resources->iomem, paddr,
563 reg->bit_width / 8);
564 case ACPI_ADR_SPACE_SYSTEM_IO:
565 return apei_res_add(&resources->ioport, paddr,
566 reg->bit_width / 8);
567 default:
568 return -EINVAL;
569 }
570}
571
572/*
573 * Same register may be used by multiple instructions in GARs, so
574 * resources are collected before requesting.
575 */
576int apei_exec_collect_resources(struct apei_exec_context *ctx,
577 struct apei_resources *resources)
578{
579 return apei_exec_for_each_entry(ctx, collect_res_callback,
580 resources, NULL);
581}
582EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
583
584struct dentry *apei_get_debugfs_dir(void)
585{
586 static struct dentry *dapei;
587
588 if (!dapei)
589 dapei = debugfs_create_dir("apei", NULL);
590
591 return dapei;
592}
593EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
1/*
2 * apei-internal.h - ACPI Platform Error Interface internal
3 * definations.
4 */
5
6#ifndef APEI_INTERNAL_H
7#define APEI_INTERNAL_H
8
9#include <linux/cper.h>
10
11struct apei_exec_context;
12
13typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
14 struct acpi_whea_header *entry);
15
16#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
17
18struct apei_exec_ins_type {
19 u32 flags;
20 apei_exec_ins_func_t run;
21};
22
23struct apei_exec_context {
24 u32 ip;
25 u64 value;
26 u64 var1;
27 u64 var2;
28 u64 src_base;
29 u64 dst_base;
30 struct apei_exec_ins_type *ins_table;
31 u32 instructions;
32 struct acpi_whea_header *action_table;
33 u32 entries;
34};
35
36void apei_exec_ctx_init(struct apei_exec_context *ctx,
37 struct apei_exec_ins_type *ins_table,
38 u32 instructions,
39 struct acpi_whea_header *action_table,
40 u32 entries);
41
42static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
43 u64 input)
44{
45 ctx->value = input;
46}
47
48static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
49{
50 return ctx->value;
51}
52
53int apei_exec_run(struct apei_exec_context *ctx, u8 action);
54
55/* Common instruction implementation */
56
57/* IP has been set in instruction function */
58#define APEI_EXEC_SET_IP 1
59
60int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
61int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
62int apei_exec_read_register(struct apei_exec_context *ctx,
63 struct acpi_whea_header *entry);
64int apei_exec_read_register_value(struct apei_exec_context *ctx,
65 struct acpi_whea_header *entry);
66int apei_exec_write_register(struct apei_exec_context *ctx,
67 struct acpi_whea_header *entry);
68int apei_exec_write_register_value(struct apei_exec_context *ctx,
69 struct acpi_whea_header *entry);
70int apei_exec_noop(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry);
72int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
73int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
74
75struct apei_resources {
76 struct list_head iomem;
77 struct list_head ioport;
78};
79
80static inline void apei_resources_init(struct apei_resources *resources)
81{
82 INIT_LIST_HEAD(&resources->iomem);
83 INIT_LIST_HEAD(&resources->ioport);
84}
85
86void apei_resources_fini(struct apei_resources *resources);
87int apei_resources_sub(struct apei_resources *resources1,
88 struct apei_resources *resources2);
89int apei_resources_request(struct apei_resources *resources,
90 const char *desc);
91void apei_resources_release(struct apei_resources *resources);
92int apei_exec_collect_resources(struct apei_exec_context *ctx,
93 struct apei_resources *resources);
94
95struct dentry;
96struct dentry *apei_get_debugfs_dir(void);
97
98#define apei_estatus_for_each_section(estatus, section) \
99 for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
100 (void *)section - (void *)estatus < estatus->data_length; \
101 section = (void *)(section+1) + section->error_data_length)
102
103static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
104{
105 if (estatus->raw_data_length)
106 return estatus->raw_data_offset + \
107 estatus->raw_data_length;
108 else
109 return sizeof(*estatus) + estatus->data_length;
110}
111
112int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
113int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
114#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
1/*
2 * UEFI Common Platform Error Record (CPER) support
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * CPER is the format used to describe platform hardware error by
8 * various APEI tables, such as ERST, BERT and HEST etc.
9 *
10 * For more information about CPER, please refer to Appendix N of UEFI
11 * Specification version 2.3.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/cper.h>
31#include <linux/acpi.h>
32
33/*
34 * CPER record ID need to be unique even after reboot, because record
35 * ID is used as index for ERST storage, while CPER records from
36 * multiple boot may co-exist in ERST.
37 */
38u64 cper_next_record_id(void)
39{
40 static atomic64_t seq;
41
42 if (!atomic64_read(&seq))
43 atomic64_set(&seq, ((u64)get_seconds()) << 32);
44
45 return atomic64_inc_return(&seq);
46}
47EXPORT_SYMBOL_GPL(cper_next_record_id);
48
49int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
50{
51 if (estatus->data_length &&
52 estatus->data_length < sizeof(struct acpi_hest_generic_data))
53 return -EINVAL;
54 if (estatus->raw_data_length &&
55 estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
56 return -EINVAL;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(apei_estatus_check_header);
61
62int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
63{
64 struct acpi_hest_generic_data *gdata;
65 unsigned int data_len, gedata_len;
66 int rc;
67
68 rc = apei_estatus_check_header(estatus);
69 if (rc)
70 return rc;
71 data_len = estatus->data_length;
72 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
73 while (data_len > sizeof(*gdata)) {
74 gedata_len = gdata->error_data_length;
75 if (gedata_len > data_len - sizeof(*gdata))
76 return -EINVAL;
77 data_len -= gedata_len + sizeof(*gdata);
78 }
79 if (data_len)
80 return -EINVAL;
81
82 return 0;
83}
84EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..465c885938ee
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
1/*
2 * APEI Error INJection support
3 *
4 * EINJ provides a hardware error injection mechanism, this is useful
5 * for debugging and testing of other APEI and RAS features.
6 *
7 * For more information about EINJ, please refer to ACPI Specification
8 * version 4.0, section 17.5.
9 *
10 * Copyright 2009-2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33#include <linux/nmi.h>
34#include <linux/delay.h>
35#include <acpi/acpi.h>
36
37#include "apei-internal.h"
38
39#define EINJ_PFX "EINJ: "
40
41#define SPIN_UNIT 100 /* 100ns */
42/* Firmware should respond within 1 miliseconds */
43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
44
45/*
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address
49 * for error injection.
50 */
51struct einj_parameter {
52 u64 type;
53 u64 reserved1;
54 u64 reserved2;
55 u64 param1;
56 u64 param2;
57};
58
59#define EINJ_OP_BUSY 0x1
60#define EINJ_STATUS_SUCCESS 0x0
61#define EINJ_STATUS_FAIL 0x1
62#define EINJ_STATUS_INVAL 0x2
63
64#define EINJ_TAB_ENTRY(tab) \
65 ((struct acpi_whea_header *)((char *)(tab) + \
66 sizeof(struct acpi_table_einj)))
67
68static struct acpi_table_einj *einj_tab;
69
70static struct apei_resources einj_resources;
71
72static struct apei_exec_ins_type einj_ins_type[] = {
73 [ACPI_EINJ_READ_REGISTER] = {
74 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
75 .run = apei_exec_read_register,
76 },
77 [ACPI_EINJ_READ_REGISTER_VALUE] = {
78 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
79 .run = apei_exec_read_register_value,
80 },
81 [ACPI_EINJ_WRITE_REGISTER] = {
82 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
83 .run = apei_exec_write_register,
84 },
85 [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
86 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
87 .run = apei_exec_write_register_value,
88 },
89 [ACPI_EINJ_NOOP] = {
90 .flags = 0,
91 .run = apei_exec_noop,
92 },
93};
94
95/*
96 * Prevent EINJ interpreter to run simultaneously, because the
97 * corresponding firmware implementation may not work properly when
98 * invoked simultaneously.
99 */
100static DEFINE_MUTEX(einj_mutex);
101
102static struct einj_parameter *einj_param;
103
104static void einj_exec_ctx_init(struct apei_exec_context *ctx)
105{
106 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
107 EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
108}
109
110static int __einj_get_available_error_type(u32 *type)
111{
112 struct apei_exec_context ctx;
113 int rc;
114
115 einj_exec_ctx_init(&ctx);
116 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
117 if (rc)
118 return rc;
119 *type = apei_exec_ctx_get_output(&ctx);
120
121 return 0;
122}
123
124/* Get error injection capabilities of the platform */
125static int einj_get_available_error_type(u32 *type)
126{
127 int rc;
128
129 mutex_lock(&einj_mutex);
130 rc = __einj_get_available_error_type(type);
131 mutex_unlock(&einj_mutex);
132
133 return rc;
134}
135
136static int einj_timedout(u64 *t)
137{
138 if ((s64)*t < SPIN_UNIT) {
139 pr_warning(FW_WARN EINJ_PFX
140 "Firmware does not respond in time\n");
141 return 1;
142 }
143 *t -= SPIN_UNIT;
144 ndelay(SPIN_UNIT);
145 touch_nmi_watchdog();
146 return 0;
147}
148
149static u64 einj_get_parameter_address(void)
150{
151 int i;
152 u64 paddr = 0;
153 struct acpi_whea_header *entry;
154
155 entry = EINJ_TAB_ENTRY(einj_tab);
156 for (i = 0; i < einj_tab->entries; i++) {
157 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
158 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
159 entry->register_region.space_id ==
160 ACPI_ADR_SPACE_SYSTEM_MEMORY)
161 memcpy(&paddr, &entry->register_region.address,
162 sizeof(paddr));
163 entry++;
164 }
165
166 return paddr;
167}
168
169/* do sanity check to trigger table */
170static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
171{
172 if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
173 return -EINVAL;
174 if (trigger_tab->table_size > PAGE_SIZE ||
175 trigger_tab->table_size <= trigger_tab->header_size)
176 return -EINVAL;
177 if (trigger_tab->entry_count !=
178 (trigger_tab->table_size - trigger_tab->header_size) /
179 sizeof(struct acpi_einj_entry))
180 return -EINVAL;
181
182 return 0;
183}
184
185/* Execute instructions in trigger error action table */
186static int __einj_error_trigger(u64 trigger_paddr)
187{
188 struct acpi_einj_trigger *trigger_tab = NULL;
189 struct apei_exec_context trigger_ctx;
190 struct apei_resources trigger_resources;
191 struct acpi_whea_header *trigger_entry;
192 struct resource *r;
193 u32 table_size;
194 int rc = -EIO;
195
196 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
197 "APEI EINJ Trigger Table");
198 if (!r) {
199 pr_err(EINJ_PFX
200 "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
201 (unsigned long long)trigger_paddr,
202 (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
203 goto out;
204 }
205 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
206 if (!trigger_tab) {
207 pr_err(EINJ_PFX "Failed to map trigger table!\n");
208 goto out_rel_header;
209 }
210 rc = einj_check_trigger_header(trigger_tab);
211 if (rc) {
212 pr_warning(FW_BUG EINJ_PFX
213 "The trigger error action table is invalid\n");
214 goto out_rel_header;
215 }
216 rc = -EIO;
217 table_size = trigger_tab->table_size;
218 r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
219 table_size - sizeof(*trigger_tab),
220 "APEI EINJ Trigger Table");
221 if (!r) {
222 pr_err(EINJ_PFX
223"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
224 (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
225 (unsigned long long)trigger_paddr + table_size);
226 goto out_rel_header;
227 }
228 iounmap(trigger_tab);
229 trigger_tab = ioremap_cache(trigger_paddr, table_size);
230 if (!trigger_tab) {
231 pr_err(EINJ_PFX "Failed to map trigger table!\n");
232 goto out_rel_entry;
233 }
234 trigger_entry = (struct acpi_whea_header *)
235 ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
236 apei_resources_init(&trigger_resources);
237 apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
238 ARRAY_SIZE(einj_ins_type),
239 trigger_entry, trigger_tab->entry_count);
240 rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
241 if (rc)
242 goto out_fini;
243 rc = apei_resources_sub(&trigger_resources, &einj_resources);
244 if (rc)
245 goto out_fini;
246 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
247 if (rc)
248 goto out_fini;
249 rc = apei_exec_pre_map_gars(&trigger_ctx);
250 if (rc)
251 goto out_release;
252
253 rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
254
255 apei_exec_post_unmap_gars(&trigger_ctx);
256out_release:
257 apei_resources_release(&trigger_resources);
258out_fini:
259 apei_resources_fini(&trigger_resources);
260out_rel_entry:
261 release_mem_region(trigger_paddr + sizeof(*trigger_tab),
262 table_size - sizeof(*trigger_tab));
263out_rel_header:
264 release_mem_region(trigger_paddr, sizeof(*trigger_tab));
265out:
266 if (trigger_tab)
267 iounmap(trigger_tab);
268
269 return rc;
270}
271
272static int __einj_error_inject(u32 type, u64 param1, u64 param2)
273{
274 struct apei_exec_context ctx;
275 u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
276 int rc;
277
278 einj_exec_ctx_init(&ctx);
279
280 rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
281 if (rc)
282 return rc;
283 apei_exec_ctx_set_input(&ctx, type);
284 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
285 if (rc)
286 return rc;
287 if (einj_param) {
288 writeq(param1, &einj_param->param1);
289 writeq(param2, &einj_param->param2);
290 }
291 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
292 if (rc)
293 return rc;
294 for (;;) {
295 rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
296 if (rc)
297 return rc;
298 val = apei_exec_ctx_get_output(&ctx);
299 if (!(val & EINJ_OP_BUSY))
300 break;
301 if (einj_timedout(&timeout))
302 return -EIO;
303 }
304 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
305 if (rc)
306 return rc;
307 val = apei_exec_ctx_get_output(&ctx);
308 if (val != EINJ_STATUS_SUCCESS)
309 return -EBUSY;
310
311 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
312 if (rc)
313 return rc;
314 trigger_paddr = apei_exec_ctx_get_output(&ctx);
315 rc = __einj_error_trigger(trigger_paddr);
316 if (rc)
317 return rc;
318 rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
319
320 return rc;
321}
322
323/* Inject the specified hardware error */
324static int einj_error_inject(u32 type, u64 param1, u64 param2)
325{
326 int rc;
327
328 mutex_lock(&einj_mutex);
329 rc = __einj_error_inject(type, param1, param2);
330 mutex_unlock(&einj_mutex);
331
332 return rc;
333}
334
335static u32 error_type;
336static u64 error_param1;
337static u64 error_param2;
338static struct dentry *einj_debug_dir;
339
340static int available_error_type_show(struct seq_file *m, void *v)
341{
342 int rc;
343 u32 available_error_type = 0;
344
345 rc = einj_get_available_error_type(&available_error_type);
346 if (rc)
347 return rc;
348 if (available_error_type & 0x0001)
349 seq_printf(m, "0x00000001\tProcessor Correctable\n");
350 if (available_error_type & 0x0002)
351 seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
352 if (available_error_type & 0x0004)
353 seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
354 if (available_error_type & 0x0008)
355 seq_printf(m, "0x00000008\tMemory Correctable\n");
356 if (available_error_type & 0x0010)
357 seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
358 if (available_error_type & 0x0020)
359 seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
360 if (available_error_type & 0x0040)
361 seq_printf(m, "0x00000040\tPCI Express Correctable\n");
362 if (available_error_type & 0x0080)
363 seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
364 if (available_error_type & 0x0100)
365 seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
366 if (available_error_type & 0x0200)
367 seq_printf(m, "0x00000200\tPlatform Correctable\n");
368 if (available_error_type & 0x0400)
369 seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
370 if (available_error_type & 0x0800)
371 seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
372
373 return 0;
374}
375
376static int available_error_type_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, available_error_type_show, NULL);
379}
380
381static const struct file_operations available_error_type_fops = {
382 .open = available_error_type_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
388static int error_type_get(void *data, u64 *val)
389{
390 *val = error_type;
391
392 return 0;
393}
394
395static int error_type_set(void *data, u64 val)
396{
397 int rc;
398 u32 available_error_type = 0;
399
400 /* Only one error type can be specified */
401 if (val & (val - 1))
402 return -EINVAL;
403 rc = einj_get_available_error_type(&available_error_type);
404 if (rc)
405 return rc;
406 if (!(val & available_error_type))
407 return -EINVAL;
408 error_type = val;
409
410 return 0;
411}
412
413DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
414 error_type_set, "0x%llx\n");
415
416static int error_inject_set(void *data, u64 val)
417{
418 if (!error_type)
419 return -EINVAL;
420
421 return einj_error_inject(error_type, error_param1, error_param2);
422}
423
424DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
425 error_inject_set, "%llu\n");
426
427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj))
430 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL;
433 if (einj_tab->entries !=
434 (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
435 sizeof(struct acpi_einj_entry))
436 return -EINVAL;
437
438 return 0;
439}
440
441static int __init einj_init(void)
442{
443 int rc;
444 u64 param_paddr;
445 acpi_status status;
446 struct dentry *fentry;
447 struct apei_exec_context ctx;
448
449 if (acpi_disabled)
450 return -ENODEV;
451
452 status = acpi_get_table(ACPI_SIG_EINJ, 0,
453 (struct acpi_table_header **)&einj_tab);
454 if (status == AE_NOT_FOUND) {
455 pr_info(EINJ_PFX "Table is not found!\n");
456 return -ENODEV;
457 } else if (ACPI_FAILURE(status)) {
458 const char *msg = acpi_format_exception(status);
459 pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
460 return -EINVAL;
461 }
462
463 rc = einj_check_table(einj_tab);
464 if (rc) {
465 pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
466 return -EINVAL;
467 }
468
469 rc = -ENOMEM;
470 einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
471 if (!einj_debug_dir)
472 goto err_cleanup;
473 fentry = debugfs_create_file("available_error_type", S_IRUSR,
474 einj_debug_dir, NULL,
475 &available_error_type_fops);
476 if (!fentry)
477 goto err_cleanup;
478 fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
479 einj_debug_dir, NULL, &error_type_fops);
480 if (!fentry)
481 goto err_cleanup;
482 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
483 einj_debug_dir, &error_param1);
484 if (!fentry)
485 goto err_cleanup;
486 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
487 einj_debug_dir, &error_param2);
488 if (!fentry)
489 goto err_cleanup;
490 fentry = debugfs_create_file("error_inject", S_IWUSR,
491 einj_debug_dir, NULL, &error_inject_fops);
492 if (!fentry)
493 goto err_cleanup;
494
495 apei_resources_init(&einj_resources);
496 einj_exec_ctx_init(&ctx);
497 rc = apei_exec_collect_resources(&ctx, &einj_resources);
498 if (rc)
499 goto err_fini;
500 rc = apei_resources_request(&einj_resources, "APEI EINJ");
501 if (rc)
502 goto err_fini;
503 rc = apei_exec_pre_map_gars(&ctx);
504 if (rc)
505 goto err_release;
506 param_paddr = einj_get_parameter_address();
507 if (param_paddr) {
508 einj_param = ioremap(param_paddr, sizeof(*einj_param));
509 rc = -ENOMEM;
510 if (!einj_param)
511 goto err_unmap;
512 }
513
514 pr_info(EINJ_PFX "Error INJection is initialized.\n");
515
516 return 0;
517
518err_unmap:
519 apei_exec_post_unmap_gars(&ctx);
520err_release:
521 apei_resources_release(&einj_resources);
522err_fini:
523 apei_resources_fini(&einj_resources);
524err_cleanup:
525 debugfs_remove_recursive(einj_debug_dir);
526
527 return rc;
528}
529
530static void __exit einj_exit(void)
531{
532 struct apei_exec_context ctx;
533
534 if (einj_param)
535 iounmap(einj_param);
536 einj_exec_ctx_init(&ctx);
537 apei_exec_post_unmap_gars(&ctx);
538 apei_resources_release(&einj_resources);
539 apei_resources_fini(&einj_resources);
540 debugfs_remove_recursive(einj_debug_dir);
541}
542
543module_init(einj_init);
544module_exit(einj_exit);
545
546MODULE_AUTHOR("Huang Ying");
547MODULE_DESCRIPTION("APEI Error INJection support");
548MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..2ebc39115507
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
1/*
2 * APEI Error Record Serialization Table support
3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store.
6 *
7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4.
9 *
10 * Copyright 2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/io.h>
32#include <linux/acpi.h>
33#include <linux/uaccess.h>
34#include <linux/cper.h>
35#include <linux/nmi.h>
36#include <acpi/apei.h>
37
38#include "apei-internal.h"
39
40#define ERST_PFX "ERST: "
41
42/* ERST command status */
43#define ERST_STATUS_SUCCESS 0x0
44#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
45#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
46#define ERST_STATUS_FAILED 0x3
47#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
48#define ERST_STATUS_RECORD_NOT_FOUND 0x5
49
50#define ERST_TAB_ENTRY(tab) \
51 ((struct acpi_whea_header *)((char *)(tab) + \
52 sizeof(struct acpi_table_erst)))
53
54#define SPIN_UNIT 100 /* 100ns */
55/* Firmware should respond within 1 miliseconds */
56#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
57#define FIRMWARE_MAX_STALL 50 /* 50us */
58
59int erst_disable;
60EXPORT_SYMBOL_GPL(erst_disable);
61
62static struct acpi_table_erst *erst_tab;
63
64/* ERST Error Log Address Range atrributes */
65#define ERST_RANGE_RESERVED 0x0001
66#define ERST_RANGE_NVRAM 0x0002
67#define ERST_RANGE_SLOW 0x0004
68
69/*
70 * ERST Error Log Address Range, used as buffer for reading/writing
71 * error records.
72 */
73static struct erst_erange {
74 u64 base;
75 u64 size;
76 void __iomem *vaddr;
77 u32 attr;
78} erst_erange;
79
80/*
81 * Prevent ERST interpreter to run simultaneously, because the
82 * corresponding firmware implementation may not work properly when
83 * invoked simultaneously.
84 *
85 * It is used to provide exclusive accessing for ERST Error Log
86 * Address Range too.
87 */
88static DEFINE_SPINLOCK(erst_lock);
89
90static inline int erst_errno(int command_status)
91{
92 switch (command_status) {
93 case ERST_STATUS_SUCCESS:
94 return 0;
95 case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
96 return -ENODEV;
97 case ERST_STATUS_NOT_ENOUGH_SPACE:
98 return -ENOSPC;
99 case ERST_STATUS_RECORD_STORE_EMPTY:
100 case ERST_STATUS_RECORD_NOT_FOUND:
101 return -ENOENT;
102 default:
103 return -EINVAL;
104 }
105}
106
107static int erst_timedout(u64 *t, u64 spin_unit)
108{
109 if ((s64)*t < spin_unit) {
110 pr_warning(FW_WARN ERST_PFX
111 "Firmware does not respond in time\n");
112 return 1;
113 }
114 *t -= spin_unit;
115 ndelay(spin_unit);
116 touch_nmi_watchdog();
117 return 0;
118}
119
120static int erst_exec_load_var1(struct apei_exec_context *ctx,
121 struct acpi_whea_header *entry)
122{
123 return __apei_exec_read_register(entry, &ctx->var1);
124}
125
126static int erst_exec_load_var2(struct apei_exec_context *ctx,
127 struct acpi_whea_header *entry)
128{
129 return __apei_exec_read_register(entry, &ctx->var2);
130}
131
132static int erst_exec_store_var1(struct apei_exec_context *ctx,
133 struct acpi_whea_header *entry)
134{
135 return __apei_exec_write_register(entry, ctx->var1);
136}
137
138static int erst_exec_add(struct apei_exec_context *ctx,
139 struct acpi_whea_header *entry)
140{
141 ctx->var1 += ctx->var2;
142 return 0;
143}
144
145static int erst_exec_subtract(struct apei_exec_context *ctx,
146 struct acpi_whea_header *entry)
147{
148 ctx->var1 -= ctx->var2;
149 return 0;
150}
151
152static int erst_exec_add_value(struct apei_exec_context *ctx,
153 struct acpi_whea_header *entry)
154{
155 int rc;
156 u64 val;
157
158 rc = __apei_exec_read_register(entry, &val);
159 if (rc)
160 return rc;
161 val += ctx->value;
162 rc = __apei_exec_write_register(entry, val);
163 return rc;
164}
165
166static int erst_exec_subtract_value(struct apei_exec_context *ctx,
167 struct acpi_whea_header *entry)
168{
169 int rc;
170 u64 val;
171
172 rc = __apei_exec_read_register(entry, &val);
173 if (rc)
174 return rc;
175 val -= ctx->value;
176 rc = __apei_exec_write_register(entry, val);
177 return rc;
178}
179
180static int erst_exec_stall(struct apei_exec_context *ctx,
181 struct acpi_whea_header *entry)
182{
183 u64 stall_time;
184
185 if (ctx->value > FIRMWARE_MAX_STALL) {
186 if (!in_nmi())
187 pr_warning(FW_WARN ERST_PFX
188 "Too long stall time for stall instruction: %llx.\n",
189 ctx->value);
190 stall_time = FIRMWARE_MAX_STALL;
191 } else
192 stall_time = ctx->value;
193 udelay(stall_time);
194 return 0;
195}
196
197static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
198 struct acpi_whea_header *entry)
199{
200 int rc;
201 u64 val;
202 u64 timeout = FIRMWARE_TIMEOUT;
203 u64 stall_time;
204
205 if (ctx->var1 > FIRMWARE_MAX_STALL) {
206 if (!in_nmi())
207 pr_warning(FW_WARN ERST_PFX
208 "Too long stall time for stall while true instruction: %llx.\n",
209 ctx->var1);
210 stall_time = FIRMWARE_MAX_STALL;
211 } else
212 stall_time = ctx->var1;
213
214 for (;;) {
215 rc = __apei_exec_read_register(entry, &val);
216 if (rc)
217 return rc;
218 if (val != ctx->value)
219 break;
220 if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
221 return -EIO;
222 }
223 return 0;
224}
225
226static int erst_exec_skip_next_instruction_if_true(
227 struct apei_exec_context *ctx,
228 struct acpi_whea_header *entry)
229{
230 int rc;
231 u64 val;
232
233 rc = __apei_exec_read_register(entry, &val);
234 if (rc)
235 return rc;
236 if (val == ctx->value) {
237 ctx->ip += 2;
238 return APEI_EXEC_SET_IP;
239 }
240
241 return 0;
242}
243
244static int erst_exec_goto(struct apei_exec_context *ctx,
245 struct acpi_whea_header *entry)
246{
247 ctx->ip = ctx->value;
248 return APEI_EXEC_SET_IP;
249}
250
251static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
252 struct acpi_whea_header *entry)
253{
254 return __apei_exec_read_register(entry, &ctx->src_base);
255}
256
257static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
258 struct acpi_whea_header *entry)
259{
260 return __apei_exec_read_register(entry, &ctx->dst_base);
261}
262
263static int erst_exec_move_data(struct apei_exec_context *ctx,
264 struct acpi_whea_header *entry)
265{
266 int rc;
267 u64 offset;
268
269 rc = __apei_exec_read_register(entry, &offset);
270 if (rc)
271 return rc;
272 memmove((void *)ctx->dst_base + offset,
273 (void *)ctx->src_base + offset,
274 ctx->var2);
275
276 return 0;
277}
278
279static struct apei_exec_ins_type erst_ins_type[] = {
280 [ACPI_ERST_READ_REGISTER] = {
281 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
282 .run = apei_exec_read_register,
283 },
284 [ACPI_ERST_READ_REGISTER_VALUE] = {
285 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
286 .run = apei_exec_read_register_value,
287 },
288 [ACPI_ERST_WRITE_REGISTER] = {
289 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
290 .run = apei_exec_write_register,
291 },
292 [ACPI_ERST_WRITE_REGISTER_VALUE] = {
293 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
294 .run = apei_exec_write_register_value,
295 },
296 [ACPI_ERST_NOOP] = {
297 .flags = 0,
298 .run = apei_exec_noop,
299 },
300 [ACPI_ERST_LOAD_VAR1] = {
301 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
302 .run = erst_exec_load_var1,
303 },
304 [ACPI_ERST_LOAD_VAR2] = {
305 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
306 .run = erst_exec_load_var2,
307 },
308 [ACPI_ERST_STORE_VAR1] = {
309 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
310 .run = erst_exec_store_var1,
311 },
312 [ACPI_ERST_ADD] = {
313 .flags = 0,
314 .run = erst_exec_add,
315 },
316 [ACPI_ERST_SUBTRACT] = {
317 .flags = 0,
318 .run = erst_exec_subtract,
319 },
320 [ACPI_ERST_ADD_VALUE] = {
321 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
322 .run = erst_exec_add_value,
323 },
324 [ACPI_ERST_SUBTRACT_VALUE] = {
325 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
326 .run = erst_exec_subtract_value,
327 },
328 [ACPI_ERST_STALL] = {
329 .flags = 0,
330 .run = erst_exec_stall,
331 },
332 [ACPI_ERST_STALL_WHILE_TRUE] = {
333 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
334 .run = erst_exec_stall_while_true,
335 },
336 [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
337 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
338 .run = erst_exec_skip_next_instruction_if_true,
339 },
340 [ACPI_ERST_GOTO] = {
341 .flags = 0,
342 .run = erst_exec_goto,
343 },
344 [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
345 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
346 .run = erst_exec_set_src_address_base,
347 },
348 [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
349 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
350 .run = erst_exec_set_dst_address_base,
351 },
352 [ACPI_ERST_MOVE_DATA] = {
353 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
354 .run = erst_exec_move_data,
355 },
356};
357
358static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
359{
360 apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
361 ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
362}
363
364static int erst_get_erange(struct erst_erange *range)
365{
366 struct apei_exec_context ctx;
367 int rc;
368
369 erst_exec_ctx_init(&ctx);
370 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
371 if (rc)
372 return rc;
373 range->base = apei_exec_ctx_get_output(&ctx);
374 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
375 if (rc)
376 return rc;
377 range->size = apei_exec_ctx_get_output(&ctx);
378 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
379 if (rc)
380 return rc;
381 range->attr = apei_exec_ctx_get_output(&ctx);
382
383 return 0;
384}
385
386static ssize_t __erst_get_record_count(void)
387{
388 struct apei_exec_context ctx;
389 int rc;
390
391 erst_exec_ctx_init(&ctx);
392 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
393 if (rc)
394 return rc;
395 return apei_exec_ctx_get_output(&ctx);
396}
397
398ssize_t erst_get_record_count(void)
399{
400 ssize_t count;
401 unsigned long flags;
402
403 if (erst_disable)
404 return -ENODEV;
405
406 spin_lock_irqsave(&erst_lock, flags);
407 count = __erst_get_record_count();
408 spin_unlock_irqrestore(&erst_lock, flags);
409
410 return count;
411}
412EXPORT_SYMBOL_GPL(erst_get_record_count);
413
414static int __erst_get_next_record_id(u64 *record_id)
415{
416 struct apei_exec_context ctx;
417 int rc;
418
419 erst_exec_ctx_init(&ctx);
420 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
421 if (rc)
422 return rc;
423 *record_id = apei_exec_ctx_get_output(&ctx);
424
425 return 0;
426}
427
428/*
429 * Get the record ID of an existing error record on the persistent
430 * storage. If there is no error record on the persistent storage, the
431 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
432 */
433int erst_get_next_record_id(u64 *record_id)
434{
435 int rc;
436 unsigned long flags;
437
438 if (erst_disable)
439 return -ENODEV;
440
441 spin_lock_irqsave(&erst_lock, flags);
442 rc = __erst_get_next_record_id(record_id);
443 spin_unlock_irqrestore(&erst_lock, flags);
444
445 return rc;
446}
447EXPORT_SYMBOL_GPL(erst_get_next_record_id);
448
449static int __erst_write_to_storage(u64 offset)
450{
451 struct apei_exec_context ctx;
452 u64 timeout = FIRMWARE_TIMEOUT;
453 u64 val;
454 int rc;
455
456 erst_exec_ctx_init(&ctx);
457 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
458 if (rc)
459 return rc;
460 apei_exec_ctx_set_input(&ctx, offset);
461 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
462 if (rc)
463 return rc;
464 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
465 if (rc)
466 return rc;
467 for (;;) {
468 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
469 if (rc)
470 return rc;
471 val = apei_exec_ctx_get_output(&ctx);
472 if (!val)
473 break;
474 if (erst_timedout(&timeout, SPIN_UNIT))
475 return -EIO;
476 }
477 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
478 if (rc)
479 return rc;
480 val = apei_exec_ctx_get_output(&ctx);
481 rc = apei_exec_run(&ctx, ACPI_ERST_END);
482 if (rc)
483 return rc;
484
485 return erst_errno(val);
486}
487
488static int __erst_read_from_storage(u64 record_id, u64 offset)
489{
490 struct apei_exec_context ctx;
491 u64 timeout = FIRMWARE_TIMEOUT;
492 u64 val;
493 int rc;
494
495 erst_exec_ctx_init(&ctx);
496 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
497 if (rc)
498 return rc;
499 apei_exec_ctx_set_input(&ctx, offset);
500 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
501 if (rc)
502 return rc;
503 apei_exec_ctx_set_input(&ctx, record_id);
504 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
505 if (rc)
506 return rc;
507 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
508 if (rc)
509 return rc;
510 for (;;) {
511 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
512 if (rc)
513 return rc;
514 val = apei_exec_ctx_get_output(&ctx);
515 if (!val)
516 break;
517 if (erst_timedout(&timeout, SPIN_UNIT))
518 return -EIO;
519 };
520 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
521 if (rc)
522 return rc;
523 val = apei_exec_ctx_get_output(&ctx);
524 rc = apei_exec_run(&ctx, ACPI_ERST_END);
525 if (rc)
526 return rc;
527
528 return erst_errno(val);
529}
530
531static int __erst_clear_from_storage(u64 record_id)
532{
533 struct apei_exec_context ctx;
534 u64 timeout = FIRMWARE_TIMEOUT;
535 u64 val;
536 int rc;
537
538 erst_exec_ctx_init(&ctx);
539 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
540 if (rc)
541 return rc;
542 apei_exec_ctx_set_input(&ctx, record_id);
543 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
544 if (rc)
545 return rc;
546 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
547 if (rc)
548 return rc;
549 for (;;) {
550 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
551 if (rc)
552 return rc;
553 val = apei_exec_ctx_get_output(&ctx);
554 if (!val)
555 break;
556 if (erst_timedout(&timeout, SPIN_UNIT))
557 return -EIO;
558 }
559 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
560 if (rc)
561 return rc;
562 val = apei_exec_ctx_get_output(&ctx);
563 rc = apei_exec_run(&ctx, ACPI_ERST_END);
564 if (rc)
565 return rc;
566
567 return erst_errno(val);
568}
569
570/* NVRAM ERST Error Log Address Range is not supported yet */
571static void pr_unimpl_nvram(void)
572{
573 if (printk_ratelimit())
574 pr_warning(ERST_PFX
575 "NVRAM ERST Log Address Range is not implemented yet\n");
576}
577
578static int __erst_write_to_nvram(const struct cper_record_header *record)
579{
580 /* do not print message, because printk is not safe for NMI */
581 return -ENOSYS;
582}
583
584static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
585{
586 pr_unimpl_nvram();
587 return -ENOSYS;
588}
589
590static int __erst_clear_from_nvram(u64 record_id)
591{
592 pr_unimpl_nvram();
593 return -ENOSYS;
594}
595
596int erst_write(const struct cper_record_header *record)
597{
598 int rc;
599 unsigned long flags;
600 struct cper_record_header *rcd_erange;
601
602 if (erst_disable)
603 return -ENODEV;
604
605 if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
606 return -EINVAL;
607
608 if (erst_erange.attr & ERST_RANGE_NVRAM) {
609 if (!spin_trylock_irqsave(&erst_lock, flags))
610 return -EBUSY;
611 rc = __erst_write_to_nvram(record);
612 spin_unlock_irqrestore(&erst_lock, flags);
613 return rc;
614 }
615
616 if (record->record_length > erst_erange.size)
617 return -EINVAL;
618
619 if (!spin_trylock_irqsave(&erst_lock, flags))
620 return -EBUSY;
621 memcpy(erst_erange.vaddr, record, record->record_length);
622 rcd_erange = erst_erange.vaddr;
623 /* signature for serialization system */
624 memcpy(&rcd_erange->persistence_information, "ER", 2);
625
626 rc = __erst_write_to_storage(0);
627 spin_unlock_irqrestore(&erst_lock, flags);
628
629 return rc;
630}
631EXPORT_SYMBOL_GPL(erst_write);
632
633static int __erst_read_to_erange(u64 record_id, u64 *offset)
634{
635 int rc;
636
637 if (erst_erange.attr & ERST_RANGE_NVRAM)
638 return __erst_read_to_erange_from_nvram(
639 record_id, offset);
640
641 rc = __erst_read_from_storage(record_id, 0);
642 if (rc)
643 return rc;
644 *offset = 0;
645
646 return 0;
647}
648
649static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
650 size_t buflen)
651{
652 int rc;
653 u64 offset, len = 0;
654 struct cper_record_header *rcd_tmp;
655
656 rc = __erst_read_to_erange(record_id, &offset);
657 if (rc)
658 return rc;
659 rcd_tmp = erst_erange.vaddr + offset;
660 len = rcd_tmp->record_length;
661 if (len <= buflen)
662 memcpy(record, rcd_tmp, len);
663
664 return len;
665}
666
667/*
668 * If return value > buflen, the buffer size is not big enough,
669 * else if return value < 0, something goes wrong,
670 * else everything is OK, and return value is record length
671 */
672ssize_t erst_read(u64 record_id, struct cper_record_header *record,
673 size_t buflen)
674{
675 ssize_t len;
676 unsigned long flags;
677
678 if (erst_disable)
679 return -ENODEV;
680
681 spin_lock_irqsave(&erst_lock, flags);
682 len = __erst_read(record_id, record, buflen);
683 spin_unlock_irqrestore(&erst_lock, flags);
684 return len;
685}
686EXPORT_SYMBOL_GPL(erst_read);
687
688/*
689 * If return value > buflen, the buffer size is not big enough,
690 * else if return value = 0, there is no more record to read,
691 * else if return value < 0, something goes wrong,
692 * else everything is OK, and return value is record length
693 */
694ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
695{
696 int rc;
697 ssize_t len;
698 unsigned long flags;
699 u64 record_id;
700
701 if (erst_disable)
702 return -ENODEV;
703
704 spin_lock_irqsave(&erst_lock, flags);
705 rc = __erst_get_next_record_id(&record_id);
706 if (rc) {
707 spin_unlock_irqrestore(&erst_lock, flags);
708 return rc;
709 }
710 /* no more record */
711 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
712 spin_unlock_irqrestore(&erst_lock, flags);
713 return 0;
714 }
715
716 len = __erst_read(record_id, record, buflen);
717 spin_unlock_irqrestore(&erst_lock, flags);
718
719 return len;
720}
721EXPORT_SYMBOL_GPL(erst_read_next);
722
723int erst_clear(u64 record_id)
724{
725 int rc;
726 unsigned long flags;
727
728 if (erst_disable)
729 return -ENODEV;
730
731 spin_lock_irqsave(&erst_lock, flags);
732 if (erst_erange.attr & ERST_RANGE_NVRAM)
733 rc = __erst_clear_from_nvram(record_id);
734 else
735 rc = __erst_clear_from_storage(record_id);
736 spin_unlock_irqrestore(&erst_lock, flags);
737
738 return rc;
739}
740EXPORT_SYMBOL_GPL(erst_clear);
741
742static int __init setup_erst_disable(char *str)
743{
744 erst_disable = 1;
745 return 0;
746}
747
748__setup("erst_disable", setup_erst_disable);
749
750static int erst_check_table(struct acpi_table_erst *erst_tab)
751{
752 if (erst_tab->header_length != sizeof(struct acpi_table_erst))
753 return -EINVAL;
754 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
755 return -EINVAL;
756 if (erst_tab->entries !=
757 (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
758 sizeof(struct acpi_erst_entry))
759 return -EINVAL;
760
761 return 0;
762}
763
764static int __init erst_init(void)
765{
766 int rc = 0;
767 acpi_status status;
768 struct apei_exec_context ctx;
769 struct apei_resources erst_resources;
770 struct resource *r;
771
772 if (acpi_disabled)
773 goto err;
774
775 if (erst_disable) {
776 pr_info(ERST_PFX
777 "Error Record Serialization Table (ERST) support is disabled.\n");
778 goto err;
779 }
780
781 status = acpi_get_table(ACPI_SIG_ERST, 0,
782 (struct acpi_table_header **)&erst_tab);
783 if (status == AE_NOT_FOUND) {
784 pr_err(ERST_PFX "Table is not found!\n");
785 goto err;
786 } else if (ACPI_FAILURE(status)) {
787 const char *msg = acpi_format_exception(status);
788 pr_err(ERST_PFX "Failed to get table, %s\n", msg);
789 rc = -EINVAL;
790 goto err;
791 }
792
793 rc = erst_check_table(erst_tab);
794 if (rc) {
795 pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
796 goto err;
797 }
798
799 apei_resources_init(&erst_resources);
800 erst_exec_ctx_init(&ctx);
801 rc = apei_exec_collect_resources(&ctx, &erst_resources);
802 if (rc)
803 goto err_fini;
804 rc = apei_resources_request(&erst_resources, "APEI ERST");
805 if (rc)
806 goto err_fini;
807 rc = apei_exec_pre_map_gars(&ctx);
808 if (rc)
809 goto err_release;
810 rc = erst_get_erange(&erst_erange);
811 if (rc) {
812 if (rc == -ENODEV)
813 pr_info(ERST_PFX
814 "The corresponding hardware device or firmware implementation "
815 "is not available.\n");
816 else
817 pr_err(ERST_PFX
818 "Failed to get Error Log Address Range.\n");
819 goto err_unmap_reg;
820 }
821
822 r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
823 if (!r) {
824 pr_err(ERST_PFX
825 "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
826 (unsigned long long)erst_erange.base,
827 (unsigned long long)erst_erange.base + erst_erange.size);
828 rc = -EIO;
829 goto err_unmap_reg;
830 }
831 rc = -ENOMEM;
832 erst_erange.vaddr = ioremap_cache(erst_erange.base,
833 erst_erange.size);
834 if (!erst_erange.vaddr)
835 goto err_release_erange;
836
837 pr_info(ERST_PFX
838 "Error Record Serialization Table (ERST) support is initialized.\n");
839
840 return 0;
841
842err_release_erange:
843 release_mem_region(erst_erange.base, erst_erange.size);
844err_unmap_reg:
845 apei_exec_post_unmap_gars(&ctx);
846err_release:
847 apei_resources_release(&erst_resources);
848err_fini:
849 apei_resources_fini(&erst_resources);
850err:
851 erst_disable = 1;
852 return rc;
853}
854
855device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..fd0cc016a099
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
1/*
2 * APEI Generic Hardware Error Source support
3 *
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
11 *
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 *
15 * Now, only SCI notification type and memory errors are
16 * supported. More notification type and hardware error type will be
17 * added later.
18 *
19 * Copyright 2010 Intel Corp.
20 * Author: Huang Ying <ying.huang@intel.com>
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License version
24 * 2 as published by the Free Software Foundation;
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/acpi.h>
40#include <linux/io.h>
41#include <linux/interrupt.h>
42#include <linux/cper.h>
43#include <linux/kdebug.h>
44#include <acpi/apei.h>
45#include <acpi/atomicio.h>
46#include <acpi/hed.h>
47#include <asm/mce.h>
48
49#include "apei-internal.h"
50
51#define GHES_PFX "GHES: "
52
53#define GHES_ESTATUS_MAX_SIZE 65536
54
55/*
56 * One struct ghes is created for each generic hardware error
57 * source.
58 *
59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
60 * handler. Handler for one generic hardware error source is only
61 * triggered after the previous one is done. So handler can uses
62 * struct ghes without locking.
63 *
64 * estatus: memory buffer for error status block, allocated during
65 * HEST parsing.
66 */
67#define GHES_TO_CLEAR 0x0001
68
69struct ghes {
70 struct acpi_hest_generic *generic;
71 struct acpi_hest_generic_status *estatus;
72 struct list_head list;
73 u64 buffer_paddr;
74 unsigned long flags;
75};
76
77/*
78 * Error source lists, one list for each notification method. The
79 * members in lists are struct ghes.
80 *
81 * The list members are only added in HEST parsing and deleted during
82 * module_exit, that is, single-threaded. So no lock is needed for
83 * that.
84 *
85 * But the mutual exclusion is needed between members adding/deleting
86 * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
87 * used for that.
88 */
89static LIST_HEAD(ghes_sci);
90
91static struct ghes *ghes_new(struct acpi_hest_generic *generic)
92{
93 struct ghes *ghes;
94 unsigned int error_block_length;
95 int rc;
96
97 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
98 if (!ghes)
99 return ERR_PTR(-ENOMEM);
100 ghes->generic = generic;
101 INIT_LIST_HEAD(&ghes->list);
102 rc = acpi_pre_map_gar(&generic->error_status_address);
103 if (rc)
104 goto err_free;
105 error_block_length = generic->error_block_length;
106 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
107 pr_warning(FW_WARN GHES_PFX
108 "Error status block length is too long: %u for "
109 "generic hardware error source: %d.\n",
110 error_block_length, generic->header.source_id);
111 error_block_length = GHES_ESTATUS_MAX_SIZE;
112 }
113 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
114 if (!ghes->estatus) {
115 rc = -ENOMEM;
116 goto err_unmap;
117 }
118
119 return ghes;
120
121err_unmap:
122 acpi_post_unmap_gar(&generic->error_status_address);
123err_free:
124 kfree(ghes);
125 return ERR_PTR(rc);
126}
127
128static void ghes_fini(struct ghes *ghes)
129{
130 kfree(ghes->estatus);
131 acpi_post_unmap_gar(&ghes->generic->error_status_address);
132}
133
134enum {
135 GHES_SER_NO = 0x0,
136 GHES_SER_CORRECTED = 0x1,
137 GHES_SER_RECOVERABLE = 0x2,
138 GHES_SER_PANIC = 0x3,
139};
140
141static inline int ghes_severity(int severity)
142{
143 switch (severity) {
144 case CPER_SER_INFORMATIONAL:
145 return GHES_SER_NO;
146 case CPER_SER_CORRECTED:
147 return GHES_SER_CORRECTED;
148 case CPER_SER_RECOVERABLE:
149 return GHES_SER_RECOVERABLE;
150 case CPER_SER_FATAL:
151 return GHES_SER_PANIC;
152 default:
153 /* Unkown, go panic */
154 return GHES_SER_PANIC;
155 }
156}
157
158/* SCI handler run in work queue, so ioremap can be used here */
159static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
160 int from_phys)
161{
162 void *vaddr;
163
164 vaddr = ioremap_cache(paddr, len);
165 if (!vaddr)
166 return -ENOMEM;
167 if (from_phys)
168 memcpy(buffer, vaddr, len);
169 else
170 memcpy(vaddr, buffer, len);
171 iounmap(vaddr);
172
173 return 0;
174}
175
176static int ghes_read_estatus(struct ghes *ghes, int silent)
177{
178 struct acpi_hest_generic *g = ghes->generic;
179 u64 buf_paddr;
180 u32 len;
181 int rc;
182
183 rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
184 if (rc) {
185 if (!silent && printk_ratelimit())
186 pr_warning(FW_WARN GHES_PFX
187"Failed to read error status block address for hardware error source: %d.\n",
188 g->header.source_id);
189 return -EIO;
190 }
191 if (!buf_paddr)
192 return -ENOENT;
193
194 rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
195 sizeof(*ghes->estatus), 1);
196 if (rc)
197 return rc;
198 if (!ghes->estatus->block_status)
199 return -ENOENT;
200
201 ghes->buffer_paddr = buf_paddr;
202 ghes->flags |= GHES_TO_CLEAR;
203
204 rc = -EIO;
205 len = apei_estatus_len(ghes->estatus);
206 if (len < sizeof(*ghes->estatus))
207 goto err_read_block;
208 if (len > ghes->generic->error_block_length)
209 goto err_read_block;
210 if (apei_estatus_check_header(ghes->estatus))
211 goto err_read_block;
212 rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
213 buf_paddr + sizeof(*ghes->estatus),
214 len - sizeof(*ghes->estatus), 1);
215 if (rc)
216 return rc;
217 if (apei_estatus_check(ghes->estatus))
218 goto err_read_block;
219 rc = 0;
220
221err_read_block:
222 if (rc && !silent)
223 pr_warning(FW_WARN GHES_PFX
224 "Failed to read error status block!\n");
225 return rc;
226}
227
228static void ghes_clear_estatus(struct ghes *ghes)
229{
230 ghes->estatus->block_status = 0;
231 if (!(ghes->flags & GHES_TO_CLEAR))
232 return;
233 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
234 sizeof(ghes->estatus->block_status), 0);
235 ghes->flags &= ~GHES_TO_CLEAR;
236}
237
238static void ghes_do_proc(struct ghes *ghes)
239{
240 int ser, processed = 0;
241 struct acpi_hest_generic_data *gdata;
242
243 ser = ghes_severity(ghes->estatus->error_severity);
244 apei_estatus_for_each_section(ghes->estatus, gdata) {
245#ifdef CONFIG_X86_MCE
246 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
247 CPER_SEC_PLATFORM_MEM)) {
248 apei_mce_report_mem_error(
249 ser == GHES_SER_CORRECTED,
250 (struct cper_sec_mem_err *)(gdata+1));
251 processed = 1;
252 }
253#endif
254 }
255
256 if (!processed && printk_ratelimit())
257 pr_warning(GHES_PFX
258 "Unknown error record from generic hardware error source: %d\n",
259 ghes->generic->header.source_id);
260}
261
262static int ghes_proc(struct ghes *ghes)
263{
264 int rc;
265
266 rc = ghes_read_estatus(ghes, 0);
267 if (rc)
268 goto out;
269 ghes_do_proc(ghes);
270
271out:
272 ghes_clear_estatus(ghes);
273 return 0;
274}
275
276static int ghes_notify_sci(struct notifier_block *this,
277 unsigned long event, void *data)
278{
279 struct ghes *ghes;
280 int ret = NOTIFY_DONE;
281
282 rcu_read_lock();
283 list_for_each_entry_rcu(ghes, &ghes_sci, list) {
284 if (!ghes_proc(ghes))
285 ret = NOTIFY_OK;
286 }
287 rcu_read_unlock();
288
289 return ret;
290}
291
292static struct notifier_block ghes_notifier_sci = {
293 .notifier_call = ghes_notify_sci,
294};
295
296static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
297{
298 struct acpi_hest_generic *generic;
299 struct ghes *ghes = NULL;
300 int rc = 0;
301
302 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
303 return 0;
304
305 generic = (struct acpi_hest_generic *)hest_hdr;
306 if (!generic->enabled)
307 return 0;
308
309 if (generic->error_block_length <
310 sizeof(struct acpi_hest_generic_status)) {
311 pr_warning(FW_BUG GHES_PFX
312"Invalid error block length: %u for generic hardware error source: %d\n",
313 generic->error_block_length,
314 generic->header.source_id);
315 goto err;
316 }
317 if (generic->records_to_preallocate == 0) {
318 pr_warning(FW_BUG GHES_PFX
319"Invalid records to preallocate: %u for generic hardware error source: %d\n",
320 generic->records_to_preallocate,
321 generic->header.source_id);
322 goto err;
323 }
324 ghes = ghes_new(generic);
325 if (IS_ERR(ghes)) {
326 rc = PTR_ERR(ghes);
327 ghes = NULL;
328 goto err;
329 }
330 switch (generic->notify.type) {
331 case ACPI_HEST_NOTIFY_POLLED:
332 pr_warning(GHES_PFX
333"Generic hardware error source: %d notified via POLL is not supported!\n",
334 generic->header.source_id);
335 break;
336 case ACPI_HEST_NOTIFY_EXTERNAL:
337 case ACPI_HEST_NOTIFY_LOCAL:
338 pr_warning(GHES_PFX
339"Generic hardware error source: %d notified via IRQ is not supported!\n",
340 generic->header.source_id);
341 break;
342 case ACPI_HEST_NOTIFY_SCI:
343 if (list_empty(&ghes_sci))
344 register_acpi_hed_notifier(&ghes_notifier_sci);
345 list_add_rcu(&ghes->list, &ghes_sci);
346 break;
347 case ACPI_HEST_NOTIFY_NMI:
348 pr_warning(GHES_PFX
349"Generic hardware error source: %d notified via NMI is not supported!\n",
350 generic->header.source_id);
351 break;
352 default:
353 pr_warning(FW_WARN GHES_PFX
354 "Unknown notification type: %u for generic hardware error source: %d\n",
355 generic->notify.type, generic->header.source_id);
356 break;
357 }
358
359 return 0;
360err:
361 if (ghes)
362 ghes_fini(ghes);
363 return rc;
364}
365
366static void ghes_cleanup(void)
367{
368 struct ghes *ghes, *nghes;
369
370 if (!list_empty(&ghes_sci))
371 unregister_acpi_hed_notifier(&ghes_notifier_sci);
372
373 synchronize_rcu();
374
375 list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
376 list_del(&ghes->list);
377 ghes_fini(ghes);
378 kfree(ghes);
379 }
380}
381
382static int __init ghes_init(void)
383{
384 int rc;
385
386 if (acpi_disabled)
387 return -ENODEV;
388
389 if (hest_disable) {
390 pr_info(GHES_PFX "HEST is not enabled!\n");
391 return -EINVAL;
392 }
393
394 rc = apei_hest_parse(hest_ghes_parse, NULL);
395 if (rc) {
396 pr_err(GHES_PFX
397 "Error during parsing HEST generic hardware error sources.\n");
398 goto err_cleanup;
399 }
400
401 if (list_empty(&ghes_sci)) {
402 pr_info(GHES_PFX
403 "No functional generic hardware error sources.\n");
404 rc = -ENODEV;
405 goto err_cleanup;
406 }
407
408 pr_info(GHES_PFX
409 "Generic Hardware Error Source support is initialized.\n");
410
411 return 0;
412err_cleanup:
413 ghes_cleanup();
414 return rc;
415}
416
417static void __exit ghes_exit(void)
418{
419 ghes_cleanup();
420}
421
422module_init(ghes_init);
423module_exit(ghes_exit);
424
425MODULE_AUTHOR("Huang Ying");
426MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
427MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
1/*
2 * APEI Hardware Error Souce Table support
3 *
4 * HEST describes error sources in detail; communicates operational
5 * parameters (i.e. severity levels, masking bits, and threshold
6 * values) to Linux as necessary. It also allows the BIOS to report
7 * non-standard error sources to Linux (for example, chipset-specific
8 * error registers).
9 *
10 * For more information about HEST, please refer to ACPI Specification
11 * version 4.0, section 17.3.2.
12 *
13 * Copyright 2009 Intel Corp.
14 * Author: Huang Ying <ying.huang@intel.com>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License version
18 * 2 as published by the Free Software Foundation;
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/acpi.h>
34#include <linux/kdebug.h>
35#include <linux/highmem.h>
36#include <linux/io.h>
37#include <acpi/apei.h>
38
39#include "apei-internal.h"
40
41#define HEST_PFX "HEST: "
42
43int hest_disable;
44EXPORT_SYMBOL_GPL(hest_disable);
45
46/* HEST table parsing */
47
48static struct acpi_table_hest *hest_tab;
49
50static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
51{
52 return 0;
53}
54
55static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
56 [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
57 [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
58 [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
59 [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
60 [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
61 [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
62 [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
63};
64
65static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
66{
67 u16 hest_type = hest_hdr->type;
68 int len;
69
70 if (hest_type >= ACPI_HEST_TYPE_RESERVED)
71 return 0;
72
73 len = hest_esrc_len_tab[hest_type];
74
75 if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
76 struct acpi_hest_ia_corrected *cmc;
77 cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
78 len = sizeof(*cmc) + cmc->num_hardware_banks *
79 sizeof(struct acpi_hest_ia_error_bank);
80 } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
81 struct acpi_hest_ia_machine_check *mc;
82 mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
83 len = sizeof(*mc) + mc->num_hardware_banks *
84 sizeof(struct acpi_hest_ia_error_bank);
85 }
86 BUG_ON(len == -1);
87
88 return len;
89};
90
91int apei_hest_parse(apei_hest_func_t func, void *data)
92{
93 struct acpi_hest_header *hest_hdr;
94 int i, rc, len;
95
96 if (hest_disable)
97 return -EINVAL;
98
99 hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
100 for (i = 0; i < hest_tab->error_source_count; i++) {
101 len = hest_esrc_len(hest_hdr);
102 if (!len) {
103 pr_warning(FW_WARN HEST_PFX
104 "Unknown or unused hardware error source "
105 "type: %d for hardware error source: %d.\n",
106 hest_hdr->type, hest_hdr->source_id);
107 return -EINVAL;
108 }
109 if ((void *)hest_hdr + len >
110 (void *)hest_tab + hest_tab->header.length) {
111 pr_warning(FW_BUG HEST_PFX
112 "Table contents overflow for hardware error source: %d.\n",
113 hest_hdr->source_id);
114 return -EINVAL;
115 }
116
117 rc = func(hest_hdr, data);
118 if (rc)
119 return rc;
120
121 hest_hdr = (void *)hest_hdr + len;
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(apei_hest_parse);
127
128static int __init setup_hest_disable(char *str)
129{
130 hest_disable = 1;
131 return 0;
132}
133
134__setup("hest_disable", setup_hest_disable);
135
136static int __init hest_init(void)
137{
138 acpi_status status;
139 int rc = -ENODEV;
140
141 if (acpi_disabled)
142 goto err;
143
144 if (hest_disable) {
145 pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
146 goto err;
147 }
148
149 status = acpi_get_table(ACPI_SIG_HEST, 0,
150 (struct acpi_table_header **)&hest_tab);
151 if (status == AE_NOT_FOUND) {
152 pr_info(HEST_PFX "Table is not found!\n");
153 goto err;
154 } else if (ACPI_FAILURE(status)) {
155 const char *msg = acpi_format_exception(status);
156 pr_err(HEST_PFX "Failed to get table, %s\n", msg);
157 rc = -EINVAL;
158 goto err;
159 }
160
161 rc = apei_hest_parse(hest_void_parse, NULL);
162 if (rc)
163 goto err;
164
165 pr_info(HEST_PFX "HEST table parsing is initialized.\n");
166
167 return 0;
168err:
169 hest_disable = 1;
170 return rc;
171}
172
173subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
1/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <acpi/atomicio.h>
35
36#define ACPI_PFX "ACPI: "
37
38static LIST_HEAD(acpi_iomaps);
39/*
40 * Used for mutual exclusion between writers of acpi_iomaps list, for
41 * synchronization between readers and writer, RCU is used.
42 */
43static DEFINE_SPINLOCK(acpi_iomaps_lock);
44
45struct acpi_iomap {
46 struct list_head list;
47 void __iomem *vaddr;
48 unsigned long size;
49 phys_addr_t paddr;
50 struct kref ref;
51};
52
53/* acpi_iomaps_lock or RCU read lock must be held before calling */
54static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
55 unsigned long size)
56{
57 struct acpi_iomap *map;
58
59 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
60 if (map->paddr + map->size >= paddr + size &&
61 map->paddr <= paddr)
62 return map;
63 }
64 return NULL;
65}
66
67/*
68 * Atomic "ioremap" used by NMI handler, if the specified IO memory
69 * area is not pre-mapped, NULL will be returned.
70 *
71 * acpi_iomaps_lock or RCU read lock must be held before calling
72 */
73static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
74 unsigned long size)
75{
76 struct acpi_iomap *map;
77
78 map = __acpi_find_iomap(paddr, size);
79 if (map)
80 return map->vaddr + (paddr - map->paddr);
81 else
82 return NULL;
83}
84
85/* acpi_iomaps_lock must be held before calling */
86static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
87 unsigned long size)
88{
89 struct acpi_iomap *map;
90
91 map = __acpi_find_iomap(paddr, size);
92 if (map) {
93 kref_get(&map->ref);
94 return map->vaddr + (paddr - map->paddr);
95 } else
96 return NULL;
97}
98
99/*
100 * Used to pre-map the specified IO memory area. First try to find
101 * whether the area is already pre-mapped, if it is, increase the
102 * reference count (in __acpi_try_ioremap) and return; otherwise, do
103 * the real ioremap, and add the mapping into acpi_iomaps list.
104 */
105static void __iomem *acpi_pre_map(phys_addr_t paddr,
106 unsigned long size)
107{
108 void __iomem *vaddr;
109 struct acpi_iomap *map;
110 unsigned long pg_sz, flags;
111 phys_addr_t pg_off;
112
113 spin_lock_irqsave(&acpi_iomaps_lock, flags);
114 vaddr = __acpi_try_ioremap(paddr, size);
115 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
116 if (vaddr)
117 return vaddr;
118
119 pg_off = paddr & PAGE_MASK;
120 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
121 vaddr = ioremap(pg_off, pg_sz);
122 if (!vaddr)
123 return NULL;
124 map = kmalloc(sizeof(*map), GFP_KERNEL);
125 if (!map)
126 goto err_unmap;
127 INIT_LIST_HEAD(&map->list);
128 map->paddr = pg_off;
129 map->size = pg_sz;
130 map->vaddr = vaddr;
131 kref_init(&map->ref);
132
133 spin_lock_irqsave(&acpi_iomaps_lock, flags);
134 vaddr = __acpi_try_ioremap(paddr, size);
135 if (vaddr) {
136 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
137 iounmap(map->vaddr);
138 kfree(map);
139 return vaddr;
140 }
141 list_add_tail_rcu(&map->list, &acpi_iomaps);
142 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
143
144 return vaddr + (paddr - pg_off);
145err_unmap:
146 iounmap(vaddr);
147 return NULL;
148}
149
150/* acpi_iomaps_lock must be held before calling */
151static void __acpi_kref_del_iomap(struct kref *ref)
152{
153 struct acpi_iomap *map;
154
155 map = container_of(ref, struct acpi_iomap, ref);
156 list_del_rcu(&map->list);
157}
158
159/*
160 * Used to post-unmap the specified IO memory area. The iounmap is
161 * done only if the reference count goes zero.
162 */
163static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
164{
165 struct acpi_iomap *map;
166 unsigned long flags;
167 int del;
168
169 spin_lock_irqsave(&acpi_iomaps_lock, flags);
170 map = __acpi_find_iomap(paddr, size);
171 BUG_ON(!map);
172 del = kref_put(&map->ref, __acpi_kref_del_iomap);
173 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
174
175 if (!del)
176 return;
177
178 synchronize_rcu();
179 iounmap(map->vaddr);
180 kfree(map);
181}
182
183/* In NMI handler, should set silent = 1 */
184static int acpi_check_gar(struct acpi_generic_address *reg,
185 u64 *paddr, int silent)
186{
187 u32 width, space_id;
188
189 width = reg->bit_width;
190 space_id = reg->space_id;
191 /* Handle possible alignment issues */
192 memcpy(paddr, &reg->address, sizeof(*paddr));
193 if (!*paddr) {
194 if (!silent)
195 pr_warning(FW_BUG ACPI_PFX
196 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
197 *paddr, width, space_id);
198 return -EINVAL;
199 }
200
201 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
202 if (!silent)
203 pr_warning(FW_BUG ACPI_PFX
204 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
205 *paddr, width, space_id);
206 return -EINVAL;
207 }
208
209 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
210 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
211 if (!silent)
212 pr_warning(FW_BUG ACPI_PFX
213 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
214 *paddr, width, space_id);
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/* Pre-map, working on GAR */
222int acpi_pre_map_gar(struct acpi_generic_address *reg)
223{
224 u64 paddr;
225 void __iomem *vaddr;
226 int rc;
227
228 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
229 return 0;
230
231 rc = acpi_check_gar(reg, &paddr, 0);
232 if (rc)
233 return rc;
234
235 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
236 if (!vaddr)
237 return -EIO;
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
242
243/* Post-unmap, working on GAR */
244int acpi_post_unmap_gar(struct acpi_generic_address *reg)
245{
246 u64 paddr;
247 int rc;
248
249 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
250 return 0;
251
252 rc = acpi_check_gar(reg, &paddr, 0);
253 if (rc)
254 return rc;
255
256 acpi_post_unmap(paddr, reg->bit_width / 8);
257
258 return 0;
259}
260EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
261
262/*
263 * Can be used in atomic (including NMI) or process context. RCU read
264 * lock can only be released after the IO memory area accessing.
265 */
266static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
267{
268 void __iomem *addr;
269
270 rcu_read_lock();
271 addr = __acpi_ioremap_fast(paddr, width);
272 switch (width) {
273 case 8:
274 *val = readb(addr);
275 break;
276 case 16:
277 *val = readw(addr);
278 break;
279 case 32:
280 *val = readl(addr);
281 break;
282 case 64:
283 *val = readq(addr);
284 break;
285 default:
286 return -EINVAL;
287 }
288 rcu_read_unlock();
289
290 return 0;
291}
292
293static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
294{
295 void __iomem *addr;
296
297 rcu_read_lock();
298 addr = __acpi_ioremap_fast(paddr, width);
299 switch (width) {
300 case 8:
301 writeb(val, addr);
302 break;
303 case 16:
304 writew(val, addr);
305 break;
306 case 32:
307 writel(val, addr);
308 break;
309 case 64:
310 writeq(val, addr);
311 break;
312 default:
313 return -EINVAL;
314 }
315 rcu_read_unlock();
316
317 return 0;
318}
319
320/* GAR accessing in atomic (including NMI) or process context */
321int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
322{
323 u64 paddr;
324 int rc;
325
326 rc = acpi_check_gar(reg, &paddr, 1);
327 if (rc)
328 return rc;
329
330 *val = 0;
331 switch (reg->space_id) {
332 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
333 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
334 case ACPI_ADR_SPACE_SYSTEM_IO:
335 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
336 default:
337 return -EINVAL;
338 }
339}
340EXPORT_SYMBOL_GPL(acpi_atomic_read);
341
342int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
343{
344 u64 paddr;
345 int rc;
346
347 rc = acpi_check_gar(reg, &paddr, 1);
348 if (rc)
349 return rc;
350
351 switch (reg->space_id) {
352 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
353 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
354 case ACPI_ADR_SPACE_SYSTEM_IO:
355 return acpi_os_write_port(paddr, val, reg->bit_width);
356 default:
357 return -EINVAL;
358 }
359}
360EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 743576bf1bd7..c1d23cd71652 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -69,6 +69,44 @@ static struct dmi_system_id __cpuinitdata power_nocheck_dmi_table[] = {
69}; 69};
70 70
71 71
72#ifdef CONFIG_X86
73static int set_copy_dsdt(const struct dmi_system_id *id)
74{
75 printk(KERN_NOTICE "%s detected - "
76 "force copy of DSDT to local memory\n", id->ident);
77 acpi_gbl_copy_dsdt_locally = 1;
78 return 0;
79}
80
81static struct dmi_system_id dsdt_dmi_table[] __initdata = {
82 /*
83 * Insyde BIOS on some TOSHIBA machines corrupt the DSDT.
84 * https://bugzilla.kernel.org/show_bug.cgi?id=14679
85 */
86 {
87 .callback = set_copy_dsdt,
88 .ident = "TOSHIBA Satellite A505",
89 .matches = {
90 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
91 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite A505"),
92 },
93 },
94 {
95 .callback = set_copy_dsdt,
96 .ident = "TOSHIBA Satellite L505D",
97 .matches = {
98 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
99 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L505D"),
100 },
101 },
102 {}
103};
104#else
105static struct dmi_system_id dsdt_dmi_table[] __initdata = {
106 {}
107};
108#endif
109
72/* -------------------------------------------------------------------------- 110/* --------------------------------------------------------------------------
73 Device Management 111 Device Management
74 -------------------------------------------------------------------------- */ 112 -------------------------------------------------------------------------- */
@@ -363,11 +401,6 @@ static void acpi_print_osc_error(acpi_handle handle,
363 printk("\n"); 401 printk("\n");
364} 402}
365 403
366static u8 hex_val(unsigned char c)
367{
368 return isdigit(c) ? c - '0' : toupper(c) - 'A' + 10;
369}
370
371static acpi_status acpi_str_to_uuid(char *str, u8 *uuid) 404static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
372{ 405{
373 int i; 406 int i;
@@ -384,8 +417,8 @@ static acpi_status acpi_str_to_uuid(char *str, u8 *uuid)
384 return AE_BAD_PARAMETER; 417 return AE_BAD_PARAMETER;
385 } 418 }
386 for (i = 0; i < 16; i++) { 419 for (i = 0; i < 16; i++) {
387 uuid[i] = hex_val(str[opc_map_to_uuid[i]]) << 4; 420 uuid[i] = hex_to_bin(str[opc_map_to_uuid[i]]) << 4;
388 uuid[i] |= hex_val(str[opc_map_to_uuid[i] + 1]); 421 uuid[i] |= hex_to_bin(str[opc_map_to_uuid[i] + 1]);
389 } 422 }
390 return AE_OK; 423 return AE_OK;
391} 424}
@@ -813,6 +846,12 @@ void __init acpi_early_init(void)
813 846
814 acpi_gbl_permanent_mmap = 1; 847 acpi_gbl_permanent_mmap = 1;
815 848
849 /*
850 * If the machine falls into the DMI check table,
851 * DSDT will be copied to memory
852 */
853 dmi_check_system(dsdt_dmi_table);
854
816 status = acpi_reallocate_root_table(); 855 status = acpi_reallocate_root_table();
817 if (ACPI_FAILURE(status)) { 856 if (ACPI_FAILURE(status)) {
818 printk(KERN_ERR PREFIX 857 printk(KERN_ERR PREFIX
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 3f01f065b533..5f2027d782e8 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1037,10 +1037,9 @@ int __init acpi_ec_ecdt_probe(void)
1037 /* Don't trust ECDT, which comes from ASUSTek */ 1037 /* Don't trust ECDT, which comes from ASUSTek */
1038 if (!EC_FLAGS_VALIDATE_ECDT) 1038 if (!EC_FLAGS_VALIDATE_ECDT)
1039 goto install; 1039 goto install;
1040 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1040 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
1041 if (!saved_ec) 1041 if (!saved_ec)
1042 return -ENOMEM; 1042 return -ENOMEM;
1043 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
1044 /* fall through */ 1043 /* fall through */
1045 } 1044 }
1046 1045
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..d0c1967f7597
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
1/*
2 * ACPI Hardware Error Device (PNP0C33) Driver
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * ACPI Hardware Error Device is used to report some hardware errors
8 * notified via SCI, mainly the corrected errors.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version
12 * 2 as published by the Free Software Foundation;
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/acpi.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30#include <acpi/hed.h>
31
32static struct acpi_device_id acpi_hed_ids[] = {
33 {"PNP0C33", 0},
34 {"", 0},
35};
36MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
37
38static acpi_handle hed_handle;
39
40static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
41
42int register_acpi_hed_notifier(struct notifier_block *nb)
43{
44 return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
45}
46EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
47
48void unregister_acpi_hed_notifier(struct notifier_block *nb)
49{
50 blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
51}
52EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
53
54/*
55 * SCI to report hardware error is forwarded to the listeners of HED,
56 * it is used by HEST Generic Hardware Error Source with notify type
57 * SCI.
58 */
59static void acpi_hed_notify(struct acpi_device *device, u32 event)
60{
61 blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
62}
63
64static int __devinit acpi_hed_add(struct acpi_device *device)
65{
66 /* Only one hardware error device */
67 if (hed_handle)
68 return -EINVAL;
69 hed_handle = device->handle;
70 return 0;
71}
72
73static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
74{
75 hed_handle = NULL;
76 return 0;
77}
78
79static struct acpi_driver acpi_hed_driver = {
80 .name = "hardware_error_device",
81 .class = "hardware_error",
82 .ids = acpi_hed_ids,
83 .ops = {
84 .add = acpi_hed_add,
85 .remove = acpi_hed_remove,
86 .notify = acpi_hed_notify,
87 },
88};
89
90static int __init acpi_hed_init(void)
91{
92 if (acpi_disabled)
93 return -ENODEV;
94
95 if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
96 return -ENODEV;
97
98 return 0;
99}
100
101static void __exit acpi_hed_exit(void)
102{
103 acpi_bus_unregister_driver(&acpi_hed_driver);
104}
105
106module_init(acpi_hed_init);
107module_exit(acpi_hed_exit);
108
109ACPI_MODULE_NAME("hed");
110MODULE_AUTHOR("Huang Ying");
111MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
112MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
1#include <linux/acpi.h>
2#include <linux/pci.h>
3
4#define PREFIX "ACPI: "
5
6static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
7{
8 return sizeof(*p) +
9 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
10}
11
12static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
13{
14 return sizeof(*p) +
15 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
16}
17
18static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
19{
20 return sizeof(*p);
21}
22
23static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
24{
25 return sizeof(*p);
26}
27
28static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
29{
30 return (0 == pci_domain_nr(pci->bus) &&
31 p->bus == pci->bus->number &&
32 p->device == PCI_SLOT(pci->devfn) &&
33 p->function == PCI_FUNC(pci->devfn));
34}
35
36static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
37{
38 struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
39 unsigned long rc=0;
40 u8 pcie_type = 0;
41 u8 bridge = 0;
42 switch (type) {
43 case ACPI_HEST_TYPE_AER_ROOT_PORT:
44 rc = sizeof(struct acpi_hest_aer_root);
45 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
46 break;
47 case ACPI_HEST_TYPE_AER_ENDPOINT:
48 rc = sizeof(struct acpi_hest_aer);
49 pcie_type = PCI_EXP_TYPE_ENDPOINT;
50 break;
51 case ACPI_HEST_TYPE_AER_BRIDGE:
52 rc = sizeof(struct acpi_hest_aer_bridge);
53 if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
54 bridge = 1;
55 break;
56 }
57
58 if (p->flags & ACPI_HEST_GLOBAL) {
59 if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
60 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
61 }
62 else
63 if (hest_match_pci(p, pci))
64 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
65 return rc;
66}
67
68static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
69{
70 struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
71 void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
72 struct acpi_hest_header *hdr = p;
73
74 int i;
75 int firmware_first = 0;
76 static unsigned char printed_unused = 0;
77 static unsigned char printed_reserved = 0;
78
79 for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
80 switch (hdr->type) {
81 case ACPI_HEST_TYPE_IA32_CHECK:
82 p += parse_acpi_hest_ia_machine_check(p);
83 break;
84 case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
85 p += parse_acpi_hest_ia_corrected(p);
86 break;
87 case ACPI_HEST_TYPE_IA32_NMI:
88 p += parse_acpi_hest_ia_nmi(p);
89 break;
90 /* These three should never appear */
91 case ACPI_HEST_TYPE_NOT_USED3:
92 case ACPI_HEST_TYPE_NOT_USED4:
93 case ACPI_HEST_TYPE_NOT_USED5:
94 if (!printed_unused) {
95 printk(KERN_DEBUG PREFIX
96 "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
97 printed_unused = 1;
98 }
99 break;
100 case ACPI_HEST_TYPE_AER_ROOT_PORT:
101 case ACPI_HEST_TYPE_AER_ENDPOINT:
102 case ACPI_HEST_TYPE_AER_BRIDGE:
103 p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
104 break;
105 case ACPI_HEST_TYPE_GENERIC_ERROR:
106 p += parse_acpi_hest_generic(p);
107 break;
108 /* These should never appear either */
109 case ACPI_HEST_TYPE_RESERVED:
110 default:
111 if (!printed_reserved) {
112 printk(KERN_DEBUG PREFIX
113 "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
114 printed_reserved = 1;
115 }
116 break;
117 }
118 }
119 return firmware_first;
120}
121
122int acpi_hest_firmware_first_pci(struct pci_dev *pci)
123{
124 acpi_status status = AE_NOT_FOUND;
125 struct acpi_table_header *hest = NULL;
126
127 if (acpi_disabled)
128 return 0;
129
130 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
131
132 if (ACPI_SUCCESS(status)) {
133 if (acpi_hest_firmware_first(hest, pci)) {
134 return 1;
135 }
136 }
137 return 0;
138}
139EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7594f65800cf..78418ce4fc78 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1207,6 +1207,15 @@ int acpi_check_mem_region(resource_size_t start, resource_size_t n,
1207EXPORT_SYMBOL(acpi_check_mem_region); 1207EXPORT_SYMBOL(acpi_check_mem_region);
1208 1208
1209/* 1209/*
1210 * Let drivers know whether the resource checks are effective
1211 */
1212int acpi_resources_are_enforced(void)
1213{
1214 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1215}
1216EXPORT_SYMBOL(acpi_resources_are_enforced);
1217
1218/*
1210 * Acquire a spinlock. 1219 * Acquire a spinlock.
1211 * 1220 *
1212 * handle is a pointer to the spinlock_t. 1221 * handle is a pointer to the spinlock_t.
@@ -1406,7 +1415,7 @@ acpi_os_invalidate_address(
1406 switch (space_id) { 1415 switch (space_id) {
1407 case ACPI_ADR_SPACE_SYSTEM_IO: 1416 case ACPI_ADR_SPACE_SYSTEM_IO:
1408 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1417 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1409 /* Only interference checks against SystemIO and SytemMemory 1418 /* Only interference checks against SystemIO and SystemMemory
1410 are needed */ 1419 are needed */
1411 res.start = address; 1420 res.start = address;
1412 res.end = address + length - 1; 1421 res.end = address + length - 1;
@@ -1458,7 +1467,7 @@ acpi_os_validate_address (
1458 switch (space_id) { 1467 switch (space_id) {
1459 case ACPI_ADR_SPACE_SYSTEM_IO: 1468 case ACPI_ADR_SPACE_SYSTEM_IO:
1460 case ACPI_ADR_SPACE_SYSTEM_MEMORY: 1469 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
1461 /* Only interference checks against SystemIO and SytemMemory 1470 /* Only interference checks against SystemIO and SystemMemory
1462 are needed */ 1471 are needed */
1463 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL); 1472 res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
1464 if (!res) 1473 if (!res)
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index b0a71ecee682..e4804fb05e23 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -401,11 +401,13 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
401 * driver reported one, then use it. Exit in any case. 401 * driver reported one, then use it. Exit in any case.
402 */ 402 */
403 if (gsi < 0) { 403 if (gsi < 0) {
404 u32 dev_gsi;
404 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin)); 405 dev_warn(&dev->dev, "PCI INT %c: no GSI", pin_name(pin));
405 /* Interrupt Line values above 0xF are forbidden */ 406 /* Interrupt Line values above 0xF are forbidden */
406 if (dev->irq > 0 && (dev->irq <= 0xF)) { 407 if (dev->irq > 0 && (dev->irq <= 0xF) &&
407 printk(" - using IRQ %d\n", dev->irq); 408 (acpi_isa_irq_to_gsi(dev->irq, &dev_gsi) == 0)) {
408 acpi_register_gsi(&dev->dev, dev->irq, 409 printk(" - using ISA IRQ %d\n", dev->irq);
410 acpi_register_gsi(&dev->dev, dev_gsi,
409 ACPI_LEVEL_SENSITIVE, 411 ACPI_LEVEL_SENSITIVE,
410 ACPI_ACTIVE_LOW); 412 ACPI_ACTIVE_LOW);
411 return 0; 413 return 0;
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
120 struct acpi_pci_root *root; 120 struct acpi_pci_root *root;
121 121
122 list_for_each_entry(root, &acpi_pci_roots, node) 122 list_for_each_entry(root, &acpi_pci_roots, node)
123 if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) 123 if ((root->segment == (u16) seg) &&
124 (root->secondary.start == (u16) bus))
124 return root->device->handle; 125 return root->device->handle;
125 return NULL; 126 return NULL;
126} 127}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
154static acpi_status 155static acpi_status
155get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 156get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
156{ 157{
157 int *busnr = data; 158 struct resource *res = data;
158 struct acpi_resource_address64 address; 159 struct acpi_resource_address64 address;
159 160
160 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && 161 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
164 165
165 acpi_resource_to_address64(resource, &address); 166 acpi_resource_to_address64(resource, &address);
166 if ((address.address_length > 0) && 167 if ((address.address_length > 0) &&
167 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) 168 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
168 *busnr = address.minimum; 169 res->start = address.minimum;
170 res->end = address.minimum + address.address_length - 1;
171 }
169 172
170 return AE_OK; 173 return AE_OK;
171} 174}
172 175
173static acpi_status try_get_root_bridge_busnr(acpi_handle handle, 176static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
174 unsigned long long *bus) 177 struct resource *res)
175{ 178{
176 acpi_status status; 179 acpi_status status;
177 int busnum;
178 180
179 busnum = -1; 181 res->start = -1;
180 status = 182 status =
181 acpi_walk_resources(handle, METHOD_NAME__CRS, 183 acpi_walk_resources(handle, METHOD_NAME__CRS,
182 get_root_bridge_busnr_callback, &busnum); 184 get_root_bridge_busnr_callback, res);
183 if (ACPI_FAILURE(status)) 185 if (ACPI_FAILURE(status))
184 return status; 186 return status;
185 /* Check if we really get a bus number from _CRS */ 187 if (res->start == -1)
186 if (busnum == -1)
187 return AE_ERROR; 188 return AE_ERROR;
188 *bus = busnum;
189 return AE_OK; 189 return AE_OK;
190} 190}
191 191
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
429 struct acpi_device *child; 429 struct acpi_device *child;
430 u32 flags, base_flags; 430 u32 flags, base_flags;
431 431
432 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
433 if (!root)
434 return -ENOMEM;
435
432 segment = 0; 436 segment = 0;
433 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, 437 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
434 &segment); 438 &segment);
435 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 439 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
436 printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); 440 printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
437 return -ENODEV; 441 result = -ENODEV;
442 goto end;
438 } 443 }
439 444
440 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ 445 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
441 bus = 0; 446 root->secondary.flags = IORESOURCE_BUS;
442 status = try_get_root_bridge_busnr(device->handle, &bus); 447 status = try_get_root_bridge_busnr(device->handle, &root->secondary);
443 if (ACPI_FAILURE(status)) { 448 if (ACPI_FAILURE(status)) {
449 /*
450 * We need both the start and end of the downstream bus range
451 * to interpret _CBA (MMCONFIG base address), so it really is
452 * supposed to be in _CRS. If we don't find it there, all we
453 * can do is assume [_BBN-0xFF] or [0-0xFF].
454 */
455 root->secondary.end = 0xFF;
456 printk(KERN_WARNING FW_BUG PREFIX
457 "no secondary bus range in _CRS\n");
444 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); 458 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
445 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 459 if (ACPI_SUCCESS(status))
446 printk(KERN_ERR PREFIX 460 root->secondary.start = bus;
447 "no bus number in _CRS and can't evaluate _BBN\n"); 461 else if (status == AE_NOT_FOUND)
448 return -ENODEV; 462 root->secondary.start = 0;
463 else {
464 printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
465 result = -ENODEV;
466 goto end;
449 } 467 }
450 } 468 }
451 469
452 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
453 if (!root)
454 return -ENOMEM;
455
456 INIT_LIST_HEAD(&root->node); 470 INIT_LIST_HEAD(&root->node);
457 root->device = device; 471 root->device = device;
458 root->segment = segment & 0xFFFF; 472 root->segment = segment & 0xFFFF;
459 root->bus_nr = bus & 0xFF;
460 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 473 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
461 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 474 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
462 device->driver_data = root; 475 device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
475 /* TBD: Locking */ 488 /* TBD: Locking */
476 list_add_tail(&root->node, &acpi_pci_roots); 489 list_add_tail(&root->node, &acpi_pci_roots);
477 490
478 printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", 491 printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
479 acpi_device_name(device), acpi_device_bid(device), 492 acpi_device_name(device), acpi_device_bid(device),
480 root->segment, root->bus_nr); 493 root->segment, &root->secondary);
481 494
482 /* 495 /*
483 * Scan the Root Bridge 496 * Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
486 * PCI namespace does not get created until this call is made (and 499 * PCI namespace does not get created until this call is made (and
487 * thus the root bridge's pci_dev does not exist). 500 * thus the root bridge's pci_dev does not exist).
488 */ 501 */
489 root->bus = pci_acpi_scan_root(device, segment, bus); 502 root->bus = pci_acpi_scan_root(root);
490 if (!root->bus) { 503 if (!root->bus) {
491 printk(KERN_ERR PREFIX 504 printk(KERN_ERR PREFIX
492 "Bus %04x:%02x not present in PCI namespace\n", 505 "Bus %04x:%02x not present in PCI namespace\n",
493 root->segment, root->bus_nr); 506 root->segment, (unsigned int)root->secondary.start);
494 result = -ENODEV; 507 result = -ENODEV;
495 goto end; 508 goto end;
496 } 509 }
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index ddc76787b842..f74d3b31e5c9 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -172,7 +172,6 @@ static int acpi_power_get_list_state(struct acpi_handle_list *list, int *state)
172 return -EINVAL; 172 return -EINVAL;
173 173
174 /* The state of the list is 'on' IFF all resources are 'on'. */ 174 /* The state of the list is 'on' IFF all resources are 'on'. */
175 /* */
176 175
177 for (i = 0; i < list->count; i++) { 176 for (i = 0; i < list->count; i++) {
178 /* 177 /*
diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
index 5675d9747e87..b1034a9ada4e 100644
--- a/drivers/acpi/processor_driver.c
+++ b/drivers/acpi/processor_driver.c
@@ -616,7 +616,8 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
616 acpi_processor_get_limit_info(pr); 616 acpi_processor_get_limit_info(pr);
617 617
618 618
619 acpi_processor_power_init(pr, device); 619 if (cpuidle_get_driver() == &acpi_idle_driver)
620 acpi_processor_power_init(pr, device);
620 621
621 pr->cdev = thermal_cooling_device_register("Processor", device, 622 pr->cdev = thermal_cooling_device_register("Processor", device,
622 &processor_cooling_ops); 623 &processor_cooling_ops);
@@ -920,9 +921,14 @@ static int __init acpi_processor_init(void)
920 if (!acpi_processor_dir) 921 if (!acpi_processor_dir)
921 return -ENOMEM; 922 return -ENOMEM;
922#endif 923#endif
923 result = cpuidle_register_driver(&acpi_idle_driver); 924
924 if (result < 0) 925 if (!cpuidle_register_driver(&acpi_idle_driver)) {
925 goto out_proc; 926 printk(KERN_DEBUG "ACPI: %s registered with cpuidle\n",
927 acpi_idle_driver.name);
928 } else {
929 printk(KERN_DEBUG "ACPI: acpi_idle yielding to %s",
930 cpuidle_get_driver()->name);
931 }
926 932
927 result = acpi_bus_register_driver(&acpi_processor_driver); 933 result = acpi_bus_register_driver(&acpi_processor_driver);
928 if (result < 0) 934 if (result < 0)
@@ -941,7 +947,6 @@ static int __init acpi_processor_init(void)
941out_cpuidle: 947out_cpuidle:
942 cpuidle_unregister_driver(&acpi_idle_driver); 948 cpuidle_unregister_driver(&acpi_idle_driver);
943 949
944out_proc:
945#ifdef CONFIG_ACPI_PROCFS 950#ifdef CONFIG_ACPI_PROCFS
946 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir); 951 remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
947#endif 952#endif
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 5939e7f7d8e9..2e8c27d48f2b 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -698,7 +698,7 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
698 "max_cstate: C%d\n" 698 "max_cstate: C%d\n"
699 "maximum allowed latency: %d usec\n", 699 "maximum allowed latency: %d usec\n",
700 pr->power.state ? pr->power.state - pr->power.states : 0, 700 pr->power.state ? pr->power.state - pr->power.states : 0,
701 max_cstate, pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)); 701 max_cstate, pm_qos_request(PM_QOS_CPU_DMA_LATENCY));
702 702
703 seq_puts(seq, "states:\n"); 703 seq_puts(seq, "states:\n");
704 704
@@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
727 break; 727 break;
728 } 728 }
729 729
730 if (pr->power.states[i].promotion.state) 730 seq_puts(seq, "promotion[--] ");
731 seq_printf(seq, "promotion[C%zd] ",
732 (pr->power.states[i].promotion.state -
733 pr->power.states));
734 else
735 seq_puts(seq, "promotion[--] ");
736 731
737 if (pr->power.states[i].demotion.state) 732 seq_puts(seq, "demotion[--] ");
738 seq_printf(seq, "demotion[C%zd] ",
739 (pr->power.states[i].demotion.state -
740 pr->power.states));
741 else
742 seq_puts(seq, "demotion[--] ");
743 733
744 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 734 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
745 pr->power.states[i].latency, 735 pr->power.states[i].latency,
@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
869 struct acpi_processor *pr; 859 struct acpi_processor *pr;
870 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 860 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
871 ktime_t kt1, kt2; 861 ktime_t kt1, kt2;
862 s64 idle_time_ns;
872 s64 idle_time; 863 s64 idle_time;
873 s64 sleep_ticks = 0; 864 s64 sleep_ticks = 0;
874 865
@@ -881,6 +872,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
881 return(acpi_idle_enter_c1(dev, state)); 872 return(acpi_idle_enter_c1(dev, state));
882 873
883 local_irq_disable(); 874 local_irq_disable();
875
884 if (cx->entry_method != ACPI_CSTATE_FFH) { 876 if (cx->entry_method != ACPI_CSTATE_FFH) {
885 current_thread_info()->status &= ~TS_POLLING; 877 current_thread_info()->status &= ~TS_POLLING;
886 /* 878 /*
@@ -888,12 +880,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
888 * NEED_RESCHED: 880 * NEED_RESCHED:
889 */ 881 */
890 smp_mb(); 882 smp_mb();
891 }
892 883
893 if (unlikely(need_resched())) { 884 if (unlikely(need_resched())) {
894 current_thread_info()->status |= TS_POLLING; 885 current_thread_info()->status |= TS_POLLING;
895 local_irq_enable(); 886 local_irq_enable();
896 return 0; 887 return 0;
888 }
897 } 889 }
898 890
899 /* 891 /*
@@ -910,15 +902,18 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
910 sched_clock_idle_sleep_event(); 902 sched_clock_idle_sleep_event();
911 acpi_idle_do_entry(cx); 903 acpi_idle_do_entry(cx);
912 kt2 = ktime_get_real(); 904 kt2 = ktime_get_real();
913 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 905 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
906 idle_time = idle_time_ns;
907 do_div(idle_time, NSEC_PER_USEC);
914 908
915 sleep_ticks = us_to_pm_timer_ticks(idle_time); 909 sleep_ticks = us_to_pm_timer_ticks(idle_time);
916 910
917 /* Tell the scheduler how much we idled: */ 911 /* Tell the scheduler how much we idled: */
918 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 912 sched_clock_idle_wakeup_event(idle_time_ns);
919 913
920 local_irq_enable(); 914 local_irq_enable();
921 current_thread_info()->status |= TS_POLLING; 915 if (cx->entry_method != ACPI_CSTATE_FFH)
916 current_thread_info()->status |= TS_POLLING;
922 917
923 cx->usage++; 918 cx->usage++;
924 919
@@ -943,6 +938,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
943 struct acpi_processor *pr; 938 struct acpi_processor *pr;
944 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 939 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
945 ktime_t kt1, kt2; 940 ktime_t kt1, kt2;
941 s64 idle_time_ns;
946 s64 idle_time; 942 s64 idle_time;
947 s64 sleep_ticks = 0; 943 s64 sleep_ticks = 0;
948 944
@@ -968,6 +964,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
968 } 964 }
969 965
970 local_irq_disable(); 966 local_irq_disable();
967
971 if (cx->entry_method != ACPI_CSTATE_FFH) { 968 if (cx->entry_method != ACPI_CSTATE_FFH) {
972 current_thread_info()->status &= ~TS_POLLING; 969 current_thread_info()->status &= ~TS_POLLING;
973 /* 970 /*
@@ -975,12 +972,12 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
975 * NEED_RESCHED: 972 * NEED_RESCHED:
976 */ 973 */
977 smp_mb(); 974 smp_mb();
978 }
979 975
980 if (unlikely(need_resched())) { 976 if (unlikely(need_resched())) {
981 current_thread_info()->status |= TS_POLLING; 977 current_thread_info()->status |= TS_POLLING;
982 local_irq_enable(); 978 local_irq_enable();
983 return 0; 979 return 0;
980 }
984 } 981 }
985 982
986 acpi_unlazy_tlb(smp_processor_id()); 983 acpi_unlazy_tlb(smp_processor_id());
@@ -1025,14 +1022,17 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1025 spin_unlock(&c3_lock); 1022 spin_unlock(&c3_lock);
1026 } 1023 }
1027 kt2 = ktime_get_real(); 1024 kt2 = ktime_get_real();
1028 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 1025 idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
1026 idle_time = idle_time_ns;
1027 do_div(idle_time, NSEC_PER_USEC);
1029 1028
1030 sleep_ticks = us_to_pm_timer_ticks(idle_time); 1029 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1031 /* Tell the scheduler how much we idled: */ 1030 /* Tell the scheduler how much we idled: */
1032 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1031 sched_clock_idle_wakeup_event(idle_time_ns);
1033 1032
1034 local_irq_enable(); 1033 local_irq_enable();
1035 current_thread_info()->status |= TS_POLLING; 1034 if (cx->entry_method != ACPI_CSTATE_FFH)
1035 current_thread_info()->status |= TS_POLLING;
1036 1036
1037 cx->usage++; 1037 cx->usage++;
1038 1038
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 0338f513a010..7f2e051ed4f1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -765,7 +765,7 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
765 } 765 }
766 766
767 status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number, 767 status = acpi_get_gpe_status(NULL, device->wakeup.gpe_number,
768 ACPI_NOT_ISR, &event_status); 768 &event_status);
769 if (status == AE_OK) 769 if (status == AE_OK)
770 device->wakeup.flags.run_wake = 770 device->wakeup.flags.run_wake =
771 !!(event_status & ACPI_EVENT_FLAG_HANDLE); 771 !!(event_status & ACPI_EVENT_FLAG_HANDLE);
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 504a55edac49..3fb4bdea7e06 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
80 80
81#ifdef CONFIG_ACPI_SLEEP 81#ifdef CONFIG_ACPI_SLEEP
82static u32 acpi_target_sleep_state = ACPI_STATE_S0; 82static u32 acpi_target_sleep_state = ACPI_STATE_S0;
83/*
84 * According to the ACPI specification the BIOS should make sure that ACPI is
85 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
86 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
87 * on such systems during resume. Unfortunately that doesn't help in
88 * particularly pathological cases in which SCI_EN has to be set directly on
89 * resume, although the specification states very clearly that this flag is
90 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
91 * cases.
92 */
93static bool set_sci_en_on_resume;
94
95void __init acpi_set_sci_en_on_resume(void)
96{
97 set_sci_en_on_resume = true;
98}
99 83
100/* 84/*
101 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 85 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -256,11 +240,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
256 break; 240 break;
257 } 241 }
258 242
259 /* If ACPI is not enabled by the BIOS, we need to enable it here. */ 243 /* This violates the spec but is required for bug compatibility. */
260 if (set_sci_en_on_resume) 244 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
261 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
262 else
263 acpi_enable();
264 245
265 /* Reprogram control registers and execute _BFS */ 246 /* Reprogram control registers and execute _BFS */
266 acpi_leave_sleep_state_prep(acpi_state); 247 acpi_leave_sleep_state_prep(acpi_state);
@@ -357,12 +338,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
357 return 0; 338 return 0;
358} 339}
359 340
360static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
361{
362 set_sci_en_on_resume = true;
363 return 0;
364}
365
366static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 341static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
367 { 342 {
368 .callback = init_old_suspend_ordering, 343 .callback = init_old_suspend_ordering,
@@ -381,22 +356,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
381 }, 356 },
382 }, 357 },
383 { 358 {
384 .callback = init_set_sci_en_on_resume,
385 .ident = "Apple MacBook 1,1",
386 .matches = {
387 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
388 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
389 },
390 },
391 {
392 .callback = init_set_sci_en_on_resume,
393 .ident = "Apple MacMini 1,1",
394 .matches = {
395 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
396 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
397 },
398 },
399 {
400 .callback = init_old_suspend_ordering, 359 .callback = init_old_suspend_ordering,
401 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", 360 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
402 .matches = { 361 .matches = {
@@ -405,94 +364,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
405 }, 364 },
406 }, 365 },
407 { 366 {
408 .callback = init_set_sci_en_on_resume,
409 .ident = "Toshiba Satellite L300",
410 .matches = {
411 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
412 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
413 },
414 },
415 {
416 .callback = init_set_sci_en_on_resume,
417 .ident = "Hewlett-Packard HP G7000 Notebook PC",
418 .matches = {
419 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
420 DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
421 },
422 },
423 {
424 .callback = init_set_sci_en_on_resume,
425 .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
428 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
429 },
430 },
431 {
432 .callback = init_set_sci_en_on_resume,
433 .ident = "Hewlett-Packard Pavilion dv4",
434 .matches = {
435 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
436 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
437 },
438 },
439 {
440 .callback = init_set_sci_en_on_resume,
441 .ident = "Hewlett-Packard Pavilion dv7",
442 .matches = {
443 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
444 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
445 },
446 },
447 {
448 .callback = init_set_sci_en_on_resume,
449 .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
450 .matches = {
451 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
452 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
453 },
454 },
455 {
456 .callback = init_set_sci_en_on_resume,
457 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
458 .matches = {
459 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
460 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
461 },
462 },
463 {
464 .callback = init_set_sci_en_on_resume,
465 .ident = "Lenovo ThinkPad T410",
466 .matches = {
467 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
468 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
469 },
470 },
471 {
472 .callback = init_set_sci_en_on_resume,
473 .ident = "Lenovo ThinkPad T510",
474 .matches = {
475 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
476 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
477 },
478 },
479 {
480 .callback = init_set_sci_en_on_resume,
481 .ident = "Lenovo ThinkPad W510",
482 .matches = {
483 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
484 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
485 },
486 },
487 {
488 .callback = init_set_sci_en_on_resume,
489 .ident = "Lenovo ThinkPad X201[s]",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
492 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
493 },
494 },
495 {
496 .callback = init_old_suspend_ordering, 367 .callback = init_old_suspend_ordering,
497 .ident = "Panasonic CF51-2L", 368 .ident = "Panasonic CF51-2L",
498 .matches = { 369 .matches = {
@@ -501,30 +372,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
501 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 372 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
502 }, 373 },
503 }, 374 },
504 {
505 .callback = init_set_sci_en_on_resume,
506 .ident = "Dell Studio 1558",
507 .matches = {
508 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
509 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
510 },
511 },
512 {
513 .callback = init_set_sci_en_on_resume,
514 .ident = "Dell Studio 1557",
515 .matches = {
516 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
517 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
518 },
519 },
520 {
521 .callback = init_set_sci_en_on_resume,
522 .ident = "Dell Studio 1555",
523 .matches = {
524 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
525 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
526 },
527 },
528 {}, 375 {},
529}; 376};
530#endif /* CONFIG_SUSPEND */ 377#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a6..25b8bd149284 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
1 1
2extern u8 sleep_states[]; 2extern u8 sleep_states[];
3extern int acpi_suspend (u32 state); 3extern int acpi_suspend(u32 state);
4 4
5extern void acpi_enable_wakeup_device_prep(u8 sleep_state); 5extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
6extern void acpi_enable_wakeup_device(u8 sleep_state); 6extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c
index 4aaf24976138..c79e789ed03a 100644
--- a/drivers/acpi/system.c
+++ b/drivers/acpi/system.c
@@ -71,7 +71,7 @@ struct acpi_table_attr {
71 struct list_head node; 71 struct list_head node;
72}; 72};
73 73
74static ssize_t acpi_table_show(struct kobject *kobj, 74static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
75 struct bin_attribute *bin_attr, char *buf, 75 struct bin_attribute *bin_attr, char *buf,
76 loff_t offset, size_t count) 76 loff_t offset, size_t count)
77{ 77{
@@ -303,8 +303,7 @@ static int get_status(u32 index, acpi_event_status *status, acpi_handle *handle)
303 "Invalid GPE 0x%x\n", index)); 303 "Invalid GPE 0x%x\n", index));
304 goto end; 304 goto end;
305 } 305 }
306 result = acpi_get_gpe_status(*handle, index, 306 result = acpi_get_gpe_status(*handle, index, status);
307 ACPI_NOT_ISR, status);
308 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS)) 307 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
309 result = acpi_get_event_status(index - num_gpes, status); 308 result = acpi_get_event_status(index - num_gpes, status);
310 309
@@ -395,7 +394,7 @@ static ssize_t counter_set(struct kobject *kobj,
395 result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE); 394 result = acpi_set_gpe(handle, index, ACPI_GPE_ENABLE);
396 else if (!strcmp(buf, "clear\n") && 395 else if (!strcmp(buf, "clear\n") &&
397 (status & ACPI_EVENT_FLAG_SET)) 396 (status & ACPI_EVENT_FLAG_SET))
398 result = acpi_clear_gpe(handle, index, ACPI_NOT_ISR); 397 result = acpi_clear_gpe(handle, index);
399 else 398 else
400 all_counters[index].count = strtoul(buf, NULL, 0); 399 all_counters[index].count = strtoul(buf, NULL, 0);
401 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) { 400 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e63..f336bca7c450 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled && !acpi_ht) 216 if (acpi_disabled)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled && !acpi_ht) 283 if (acpi_disabled)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
45#include <acpi/acpi_bus.h> 45#include <acpi/acpi_bus.h>
46#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
47#include <linux/suspend.h> 47#include <linux/suspend.h>
48#include <acpi/video.h>
48 49
49#define PREFIX "ACPI: " 50#define PREFIX "ACPI: "
50 51
@@ -65,11 +66,6 @@
65 66
66#define MAX_NAME_LEN 20 67#define MAX_NAME_LEN 20
67 68
68#define ACPI_VIDEO_DISPLAY_CRT 1
69#define ACPI_VIDEO_DISPLAY_TV 2
70#define ACPI_VIDEO_DISPLAY_DVI 3
71#define ACPI_VIDEO_DISPLAY_LCD 4
72
73#define _COMPONENT ACPI_VIDEO_COMPONENT 69#define _COMPONENT ACPI_VIDEO_COMPONENT
74ACPI_MODULE_NAME("video"); 70ACPI_MODULE_NAME("video");
75 71
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1007 result = acpi_video_init_brightness(device); 1003 result = acpi_video_init_brightness(device);
1008 if (result) 1004 if (result)
1009 return; 1005 return;
1010 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1006 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1011 if (!name) 1007 if (!name)
1012 return; 1008 return;
1009 count++;
1013 1010
1014 sprintf(name, "acpi_video%d", count++);
1015 memset(&props, 0, sizeof(struct backlight_properties)); 1011 memset(&props, 0, sizeof(struct backlight_properties));
1016 props.max_brightness = device->brightness->count - 3; 1012 props.max_brightness = device->brightness->count - 3;
1017 device->backlight = backlight_device_register(name, NULL, device, 1013 device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1067 if (device->cap._DCS && device->cap._DSS) { 1063 if (device->cap._DCS && device->cap._DSS) {
1068 static int count; 1064 static int count;
1069 char *name; 1065 char *name;
1070 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1066 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1071 if (!name) 1067 if (!name)
1072 return; 1068 return;
1073 sprintf(name, "acpi_video%d", count++); 1069 count++;
1074 device->output_dev = video_output_register(name, 1070 device->output_dev = video_output_register(name,
1075 NULL, device, &acpi_output_properties); 1071 NULL, device, &acpi_output_properties);
1076 kfree(name); 1072 kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
1748} 1744}
1749 1745
1750static int 1746static int
1747acpi_video_get_device_type(struct acpi_video_bus *video,
1748 unsigned long device_id)
1749{
1750 struct acpi_video_enumerated_device *ids;
1751 int i;
1752
1753 for (i = 0; i < video->attached_count; i++) {
1754 ids = &video->attached_array[i];
1755 if ((ids->value.int_val & 0xffff) == device_id)
1756 return ids->value.int_val;
1757 }
1758
1759 return 0;
1760}
1761
1762static int
1751acpi_video_bus_get_one_device(struct acpi_device *device, 1763acpi_video_bus_get_one_device(struct acpi_device *device,
1752 struct acpi_video_bus *video) 1764 struct acpi_video_bus *video)
1753{ 1765{
1754 unsigned long long device_id; 1766 unsigned long long device_id;
1755 int status; 1767 int status, device_type;
1756 struct acpi_video_device *data; 1768 struct acpi_video_device *data;
1757 struct acpi_video_device_attrib* attribute; 1769 struct acpi_video_device_attrib* attribute;
1758 1770
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1797 } 1809 }
1798 if(attribute->bios_can_detect) 1810 if(attribute->bios_can_detect)
1799 data->flags.bios = 1; 1811 data->flags.bios = 1;
1800 } else 1812 } else {
1801 data->flags.unknown = 1; 1813 /* Check for legacy IDs */
1814 device_type = acpi_video_get_device_type(video,
1815 device_id);
1816 /* Ignore bits 16 and 18-20 */
1817 switch (device_type & 0xffe2ffff) {
1818 case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
1819 data->flags.crt = 1;
1820 break;
1821 case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
1822 data->flags.lcd = 1;
1823 break;
1824 case ACPI_VIDEO_DISPLAY_LEGACY_TV:
1825 data->flags.tvout = 1;
1826 break;
1827 default:
1828 data->flags.unknown = 1;
1829 }
1830 }
1802 1831
1803 acpi_video_device_bind(video, data); 1832 acpi_video_device_bind(video, data);
1804 acpi_video_device_find_cap(data); 1833 acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
2032 return result; 2061 return result;
2033} 2062}
2034 2063
2064int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
2065 void **edid)
2066{
2067 struct acpi_video_bus *video;
2068 struct acpi_video_device *video_device;
2069 union acpi_object *buffer = NULL;
2070 acpi_status status;
2071 int i, length;
2072
2073 if (!device || !acpi_driver_data(device))
2074 return -EINVAL;
2075
2076 video = acpi_driver_data(device);
2077
2078 for (i = 0; i < video->attached_count; i++) {
2079 video_device = video->attached_array[i].bind_info;
2080 length = 256;
2081
2082 if (!video_device)
2083 continue;
2084
2085 if (type) {
2086 switch (type) {
2087 case ACPI_VIDEO_DISPLAY_CRT:
2088 if (!video_device->flags.crt)
2089 continue;
2090 break;
2091 case ACPI_VIDEO_DISPLAY_TV:
2092 if (!video_device->flags.tvout)
2093 continue;
2094 break;
2095 case ACPI_VIDEO_DISPLAY_DVI:
2096 if (!video_device->flags.dvi)
2097 continue;
2098 break;
2099 case ACPI_VIDEO_DISPLAY_LCD:
2100 if (!video_device->flags.lcd)
2101 continue;
2102 break;
2103 }
2104 } else if (video_device->device_id != device_id) {
2105 continue;
2106 }
2107
2108 status = acpi_video_device_EDID(video_device, &buffer, length);
2109
2110 if (ACPI_FAILURE(status) || !buffer ||
2111 buffer->type != ACPI_TYPE_BUFFER) {
2112 length = 128;
2113 status = acpi_video_device_EDID(video_device, &buffer,
2114 length);
2115 if (ACPI_FAILURE(status) || !buffer ||
2116 buffer->type != ACPI_TYPE_BUFFER) {
2117 continue;
2118 }
2119 }
2120
2121 *edid = buffer->buffer.pointer;
2122 return length;
2123 }
2124
2125 return -ENODEV;
2126}
2127EXPORT_SYMBOL(acpi_video_get_edid);
2128
2035static int 2129static int
2036acpi_video_bus_get_devices(struct acpi_video_bus *video, 2130acpi_video_bus_get_devices(struct acpi_video_bus *video,
2037 struct acpi_device *device) 2131 struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b407..c5fef01b3c95 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; 250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
251 if (!strcmp("video", str)) 251 if (!strcmp("video", str))
252 acpi_video_support |= 252 acpi_video_support |=
253 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; 253 ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
254 } 254 }
255 return 1; 255 return 1;
256} 256}