aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig9
-rw-r--r--drivers/acpi/Makefile5
-rw-r--r--drivers/acpi/acpi_pad.c13
-rw-r--r--drivers/acpi/acpica/evxfevnt.c33
-rw-r--r--drivers/acpi/acpica/hwacpi.c20
-rw-r--r--drivers/acpi/apei/Kconfig30
-rw-r--r--drivers/acpi/apei/Makefile5
-rw-r--r--drivers/acpi/apei/apei-base.c593
-rw-r--r--drivers/acpi/apei/apei-internal.h114
-rw-r--r--drivers/acpi/apei/cper.c84
-rw-r--r--drivers/acpi/apei/einj.c548
-rw-r--r--drivers/acpi/apei/erst.c855
-rw-r--r--drivers/acpi/apei/ghes.c427
-rw-r--r--drivers/acpi/apei/hest.c173
-rw-r--r--drivers/acpi/atomicio.c360
-rw-r--r--drivers/acpi/ec.c3
-rw-r--r--drivers/acpi/hed.c112
-rw-r--r--drivers/acpi/hest.c139
-rw-r--r--drivers/acpi/pci_root.c67
-rw-r--r--drivers/acpi/processor_idle.c30
-rw-r--r--drivers/acpi/sleep.c157
-rw-r--r--drivers/acpi/sleep.h2
-rw-r--r--drivers/acpi/tables.c4
-rw-r--r--drivers/acpi/video.c118
-rw-r--r--drivers/acpi/video_detect.c2
-rw-r--r--drivers/pci/pcie/aer/aerdrv.h17
-rw-r--r--drivers/pci/pcie/aer/aerdrv_acpi.c77
-rw-r--r--drivers/pci/pcie/aer/aerdrv_core.c8
-rw-r--r--drivers/pci/probe.c8
29 files changed, 3611 insertions, 402 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 93d2c7971df6..746411518802 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -360,4 +360,13 @@ config ACPI_SBS
360 To compile this driver as a module, choose M here: 360 To compile this driver as a module, choose M here:
361 the modules will be called sbs and sbshc. 361 the modules will be called sbs and sbshc.
362 362
363config ACPI_HED
364 tristate "Hardware Error Device"
365 help
366 This driver supports the Hardware Error Device (PNP0C33),
367 which is used to report some hardware errors notified via
368 SCI, mainly the corrected errors.
369
370source "drivers/acpi/apei/Kconfig"
371
363endif # ACPI 372endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..6ee33169e1dc 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,7 +19,7 @@ obj-y += acpi.o \
19 19
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += hest.o 22acpi-y += atomicio.o
23 23
24# sleep related files 24# sleep related files
25acpi-y += wakeup.o 25acpi-y += wakeup.o
@@ -59,6 +59,7 @@ obj-$(CONFIG_ACPI_BATTERY) += battery.o
59obj-$(CONFIG_ACPI_SBS) += sbshc.o 59obj-$(CONFIG_ACPI_SBS) += sbshc.o
60obj-$(CONFIG_ACPI_SBS) += sbs.o 60obj-$(CONFIG_ACPI_SBS) += sbs.o
61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o 61obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62obj-$(CONFIG_ACPI_HED) += hed.o
62 63
63# processor has its own "processor." module_param namespace 64# processor has its own "processor." module_param namespace
64processor-y := processor_driver.o processor_throttling.o 65processor-y := processor_driver.o processor_throttling.o
@@ -66,3 +67,5 @@ processor-y += processor_idle.o processor_thermal.o
66processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 67processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
67 68
68obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o 69obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
70
71obj-$(CONFIG_ACPI_APEI) += apei/
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
index 62122134693b..f169e516a1af 100644
--- a/drivers/acpi/acpi_pad.c
+++ b/drivers/acpi/acpi_pad.c
@@ -43,6 +43,10 @@ static DEFINE_MUTEX(isolated_cpus_lock);
43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1) 43#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
44#define CPUID5_ECX_INTERRUPT_BREAK (0x2) 44#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
45static unsigned long power_saving_mwait_eax; 45static unsigned long power_saving_mwait_eax;
46
47static unsigned char tsc_detected_unstable;
48static unsigned char tsc_marked_unstable;
49
46static void power_saving_mwait_init(void) 50static void power_saving_mwait_init(void)
47{ 51{
48 unsigned int eax, ebx, ecx, edx; 52 unsigned int eax, ebx, ecx, edx;
@@ -87,8 +91,8 @@ static void power_saving_mwait_init(void)
87 91
88 /*FALL THROUGH*/ 92 /*FALL THROUGH*/
89 default: 93 default:
90 /* TSC could halt in idle, so notify users */ 94 /* TSC could halt in idle */
91 mark_tsc_unstable("TSC halts in idle"); 95 tsc_detected_unstable = 1;
92 } 96 }
93#endif 97#endif
94} 98}
@@ -178,6 +182,11 @@ static int power_saving_thread(void *data)
178 expire_time = jiffies + HZ * (100 - idle_pct) / 100; 182 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
179 183
180 while (!need_resched()) { 184 while (!need_resched()) {
185 if (tsc_detected_unstable && !tsc_marked_unstable) {
186 /* TSC could halt in idle, so notify users */
187 mark_tsc_unstable("TSC halts in idle");
188 tsc_marked_unstable = 1;
189 }
181 local_irq_disable(); 190 local_irq_disable();
182 cpu = smp_processor_id(); 191 cpu = smp_processor_id();
183 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 192 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index 7c7bbb4d402c..d5a5efc043bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -69,7 +69,7 @@ acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
69 69
70acpi_status acpi_enable(void) 70acpi_status acpi_enable(void)
71{ 71{
72 acpi_status status = AE_OK; 72 acpi_status status;
73 73
74 ACPI_FUNCTION_TRACE(acpi_enable); 74 ACPI_FUNCTION_TRACE(acpi_enable);
75 75
@@ -84,21 +84,30 @@ acpi_status acpi_enable(void)
84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { 84 if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) {
85 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 85 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
86 "System is already in ACPI mode\n")); 86 "System is already in ACPI mode\n"));
87 } else { 87 return_ACPI_STATUS(AE_OK);
88 /* Transition to ACPI mode */ 88 }
89 89
90 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); 90 /* Transition to ACPI mode */
91 if (ACPI_FAILURE(status)) {
92 ACPI_ERROR((AE_INFO,
93 "Could not transition to ACPI mode"));
94 return_ACPI_STATUS(status);
95 }
96 91
97 ACPI_DEBUG_PRINT((ACPI_DB_INIT, 92 status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI);
98 "Transition to ACPI mode successful\n")); 93 if (ACPI_FAILURE(status)) {
94 ACPI_ERROR((AE_INFO,
95 "Could not transition to ACPI mode"));
96 return_ACPI_STATUS(status);
99 } 97 }
100 98
101 return_ACPI_STATUS(status); 99 /* Sanity check that transition succeeded */
100
101 if (acpi_hw_get_mode() != ACPI_SYS_MODE_ACPI) {
102 ACPI_ERROR((AE_INFO,
103 "Hardware did not enter ACPI mode"));
104 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
105 }
106
107 ACPI_DEBUG_PRINT((ACPI_DB_INIT,
108 "Transition to ACPI mode successful\n"));
109
110 return_ACPI_STATUS(AE_OK);
102} 111}
103 112
104ACPI_EXPORT_SYMBOL(acpi_enable) 113ACPI_EXPORT_SYMBOL(acpi_enable)
diff --git a/drivers/acpi/acpica/hwacpi.c b/drivers/acpi/acpica/hwacpi.c
index 679a112a7d26..b44274a0b62c 100644
--- a/drivers/acpi/acpica/hwacpi.c
+++ b/drivers/acpi/acpica/hwacpi.c
@@ -63,7 +63,6 @@ acpi_status acpi_hw_set_mode(u32 mode)
63{ 63{
64 64
65 acpi_status status; 65 acpi_status status;
66 u32 retry;
67 66
68 ACPI_FUNCTION_TRACE(hw_set_mode); 67 ACPI_FUNCTION_TRACE(hw_set_mode);
69 68
@@ -125,24 +124,7 @@ acpi_status acpi_hw_set_mode(u32 mode)
125 return_ACPI_STATUS(status); 124 return_ACPI_STATUS(status);
126 } 125 }
127 126
128 /* 127 return_ACPI_STATUS(AE_OK);
129 * Some hardware takes a LONG time to switch modes. Give them 3 sec to
130 * do so, but allow faster systems to proceed more quickly.
131 */
132 retry = 3000;
133 while (retry) {
134 if (acpi_hw_get_mode() == mode) {
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
136 "Mode %X successfully enabled\n",
137 mode));
138 return_ACPI_STATUS(AE_OK);
139 }
140 acpi_os_stall(1000);
141 retry--;
142 }
143
144 ACPI_ERROR((AE_INFO, "Hardware did not change modes"));
145 return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE);
146} 128}
147 129
148/******************************************************************************* 130/*******************************************************************************
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
new file mode 100644
index 000000000000..f8c668f27b5a
--- /dev/null
+++ b/drivers/acpi/apei/Kconfig
@@ -0,0 +1,30 @@
1config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)"
3 depends on X86
4 help
5 APEI allows to report errors (for example from the chipset)
6 to the operating system. This improves NMI handling
7 especially. In addition it supports error serialization and
8 error injection.
9
10config ACPI_APEI_GHES
11 tristate "APEI Generic Hardware Error Source"
12 depends on ACPI_APEI && X86
13 select ACPI_HED
14 help
15 Generic Hardware Error Source provides a way to report
16 platform hardware errors (such as that from chipset). It
17 works in so called "Firmware First" mode, that is, hardware
18 errors are reported to firmware firstly, then reported to
19 Linux by firmware. This way, some non-standard hardware
20 error registers or non-standard hardware link can be checked
21 by firmware to produce more valuable hardware error
22 information for Linux.
23
24config ACPI_APEI_EINJ
25 tristate "APEI Error INJection (EINJ)"
26 depends on ACPI_APEI && DEBUG_FS
27 help
28 EINJ provides a hardware error injection mechanism, it is
29 mainly used for debugging and testing the other parts of
30 APEI and some other RAS features.
diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
new file mode 100644
index 000000000000..b13b03a17789
--- /dev/null
+++ b/drivers/acpi/apei/Makefile
@@ -0,0 +1,5 @@
1obj-$(CONFIG_ACPI_APEI) += apei.o
2obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
3obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
4
5apei-y := apei-base.o hest.o cper.o erst.o
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
new file mode 100644
index 000000000000..db3946e9c66b
--- /dev/null
+++ b/drivers/acpi/apei/apei-base.c
@@ -0,0 +1,593 @@
1/*
2 * apei-base.c - ACPI Platform Error Interface (APEI) supporting
3 * infrastructure
4 *
5 * APEI allows to report errors (for example from the chipset) to the
6 * the operating system. This improves NMI handling especially. In
7 * addition it supports error serialization and error injection.
8 *
9 * For more information about APEI, please refer to ACPI Specification
10 * version 4.0, chapter 17.
11 *
12 * This file has Common functions used by more than one APEI table,
13 * including framework of interpreter for ERST and EINJ; resource
14 * management for APEI registers.
15 *
16 * Copyright (C) 2009, Intel Corp.
17 * Author: Huang Ying <ying.huang@intel.com>
18 *
19 * This program is free software; you can redistribute it and/or
20 * modify it under the terms of the GNU General Public License version
21 * 2 as published by the Free Software Foundation.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 */
32
33#include <linux/kernel.h>
34#include <linux/module.h>
35#include <linux/init.h>
36#include <linux/acpi.h>
37#include <linux/io.h>
38#include <linux/kref.h>
39#include <linux/rculist.h>
40#include <linux/interrupt.h>
41#include <linux/debugfs.h>
42#include <acpi/atomicio.h>
43
44#include "apei-internal.h"
45
46#define APEI_PFX "APEI: "
47
48/*
49 * APEI ERST (Error Record Serialization Table) and EINJ (Error
50 * INJection) interpreter framework.
51 */
52
53#define APEI_EXEC_PRESERVE_REGISTER 0x1
54
55void apei_exec_ctx_init(struct apei_exec_context *ctx,
56 struct apei_exec_ins_type *ins_table,
57 u32 instructions,
58 struct acpi_whea_header *action_table,
59 u32 entries)
60{
61 ctx->ins_table = ins_table;
62 ctx->instructions = instructions;
63 ctx->action_table = action_table;
64 ctx->entries = entries;
65}
66EXPORT_SYMBOL_GPL(apei_exec_ctx_init);
67
68int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
69{
70 int rc;
71
72 rc = acpi_atomic_read(val, &entry->register_region);
73 if (rc)
74 return rc;
75 *val >>= entry->register_region.bit_offset;
76 *val &= entry->mask;
77
78 return 0;
79}
80
81int apei_exec_read_register(struct apei_exec_context *ctx,
82 struct acpi_whea_header *entry)
83{
84 int rc;
85 u64 val = 0;
86
87 rc = __apei_exec_read_register(entry, &val);
88 if (rc)
89 return rc;
90 ctx->value = val;
91
92 return 0;
93}
94EXPORT_SYMBOL_GPL(apei_exec_read_register);
95
96int apei_exec_read_register_value(struct apei_exec_context *ctx,
97 struct acpi_whea_header *entry)
98{
99 int rc;
100
101 rc = apei_exec_read_register(ctx, entry);
102 if (rc)
103 return rc;
104 ctx->value = (ctx->value == entry->value);
105
106 return 0;
107}
108EXPORT_SYMBOL_GPL(apei_exec_read_register_value);
109
110int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
111{
112 int rc;
113
114 val &= entry->mask;
115 val <<= entry->register_region.bit_offset;
116 if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
117 u64 valr = 0;
118 rc = acpi_atomic_read(&valr, &entry->register_region);
119 if (rc)
120 return rc;
121 valr &= ~(entry->mask << entry->register_region.bit_offset);
122 val |= valr;
123 }
124 rc = acpi_atomic_write(val, &entry->register_region);
125
126 return rc;
127}
128
129int apei_exec_write_register(struct apei_exec_context *ctx,
130 struct acpi_whea_header *entry)
131{
132 return __apei_exec_write_register(entry, ctx->value);
133}
134EXPORT_SYMBOL_GPL(apei_exec_write_register);
135
136int apei_exec_write_register_value(struct apei_exec_context *ctx,
137 struct acpi_whea_header *entry)
138{
139 int rc;
140
141 ctx->value = entry->value;
142 rc = apei_exec_write_register(ctx, entry);
143
144 return rc;
145}
146EXPORT_SYMBOL_GPL(apei_exec_write_register_value);
147
148int apei_exec_noop(struct apei_exec_context *ctx,
149 struct acpi_whea_header *entry)
150{
151 return 0;
152}
153EXPORT_SYMBOL_GPL(apei_exec_noop);
154
155/*
156 * Interpret the specified action. Go through whole action table,
157 * execute all instructions belong to the action.
158 */
159int apei_exec_run(struct apei_exec_context *ctx, u8 action)
160{
161 int rc;
162 u32 i, ip;
163 struct acpi_whea_header *entry;
164 apei_exec_ins_func_t run;
165
166 ctx->ip = 0;
167
168 /*
169 * "ip" is the instruction pointer of current instruction,
170 * "ctx->ip" specifies the next instruction to executed,
171 * instruction "run" function may change the "ctx->ip" to
172 * implement "goto" semantics.
173 */
174rewind:
175 ip = 0;
176 for (i = 0; i < ctx->entries; i++) {
177 entry = &ctx->action_table[i];
178 if (entry->action != action)
179 continue;
180 if (ip == ctx->ip) {
181 if (entry->instruction >= ctx->instructions ||
182 !ctx->ins_table[entry->instruction].run) {
183 pr_warning(FW_WARN APEI_PFX
184 "Invalid action table, unknown instruction type: %d\n",
185 entry->instruction);
186 return -EINVAL;
187 }
188 run = ctx->ins_table[entry->instruction].run;
189 rc = run(ctx, entry);
190 if (rc < 0)
191 return rc;
192 else if (rc != APEI_EXEC_SET_IP)
193 ctx->ip++;
194 }
195 ip++;
196 if (ctx->ip < ip)
197 goto rewind;
198 }
199
200 return 0;
201}
202EXPORT_SYMBOL_GPL(apei_exec_run);
203
204typedef int (*apei_exec_entry_func_t)(struct apei_exec_context *ctx,
205 struct acpi_whea_header *entry,
206 void *data);
207
208static int apei_exec_for_each_entry(struct apei_exec_context *ctx,
209 apei_exec_entry_func_t func,
210 void *data,
211 int *end)
212{
213 u8 ins;
214 int i, rc;
215 struct acpi_whea_header *entry;
216 struct apei_exec_ins_type *ins_table = ctx->ins_table;
217
218 for (i = 0; i < ctx->entries; i++) {
219 entry = ctx->action_table + i;
220 ins = entry->instruction;
221 if (end)
222 *end = i;
223 if (ins >= ctx->instructions || !ins_table[ins].run) {
224 pr_warning(FW_WARN APEI_PFX
225 "Invalid action table, unknown instruction type: %d\n",
226 ins);
227 return -EINVAL;
228 }
229 rc = func(ctx, entry, data);
230 if (rc)
231 return rc;
232 }
233
234 return 0;
235}
236
237static int pre_map_gar_callback(struct apei_exec_context *ctx,
238 struct acpi_whea_header *entry,
239 void *data)
240{
241 u8 ins = entry->instruction;
242
243 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
244 return acpi_pre_map_gar(&entry->register_region);
245
246 return 0;
247}
248
249/*
250 * Pre-map all GARs in action table to make it possible to access them
251 * in NMI handler.
252 */
253int apei_exec_pre_map_gars(struct apei_exec_context *ctx)
254{
255 int rc, end;
256
257 rc = apei_exec_for_each_entry(ctx, pre_map_gar_callback,
258 NULL, &end);
259 if (rc) {
260 struct apei_exec_context ctx_unmap;
261 memcpy(&ctx_unmap, ctx, sizeof(*ctx));
262 ctx_unmap.entries = end;
263 apei_exec_post_unmap_gars(&ctx_unmap);
264 }
265
266 return rc;
267}
268EXPORT_SYMBOL_GPL(apei_exec_pre_map_gars);
269
270static int post_unmap_gar_callback(struct apei_exec_context *ctx,
271 struct acpi_whea_header *entry,
272 void *data)
273{
274 u8 ins = entry->instruction;
275
276 if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
277 acpi_post_unmap_gar(&entry->register_region);
278
279 return 0;
280}
281
282/* Post-unmap all GAR in action table. */
283int apei_exec_post_unmap_gars(struct apei_exec_context *ctx)
284{
285 return apei_exec_for_each_entry(ctx, post_unmap_gar_callback,
286 NULL, NULL);
287}
288EXPORT_SYMBOL_GPL(apei_exec_post_unmap_gars);
289
290/*
291 * Resource management for GARs in APEI
292 */
293struct apei_res {
294 struct list_head list;
295 unsigned long start;
296 unsigned long end;
297};
298
299/* Collect all resources requested, to avoid conflict */
300struct apei_resources apei_resources_all = {
301 .iomem = LIST_HEAD_INIT(apei_resources_all.iomem),
302 .ioport = LIST_HEAD_INIT(apei_resources_all.ioport),
303};
304
305static int apei_res_add(struct list_head *res_list,
306 unsigned long start, unsigned long size)
307{
308 struct apei_res *res, *resn, *res_ins = NULL;
309 unsigned long end = start + size;
310
311 if (end <= start)
312 return 0;
313repeat:
314 list_for_each_entry_safe(res, resn, res_list, list) {
315 if (res->start > end || res->end < start)
316 continue;
317 else if (end <= res->end && start >= res->start) {
318 kfree(res_ins);
319 return 0;
320 }
321 list_del(&res->list);
322 res->start = start = min(res->start, start);
323 res->end = end = max(res->end, end);
324 kfree(res_ins);
325 res_ins = res;
326 goto repeat;
327 }
328
329 if (res_ins)
330 list_add(&res_ins->list, res_list);
331 else {
332 res_ins = kmalloc(sizeof(*res), GFP_KERNEL);
333 if (!res_ins)
334 return -ENOMEM;
335 res_ins->start = start;
336 res_ins->end = end;
337 list_add(&res_ins->list, res_list);
338 }
339
340 return 0;
341}
342
343static int apei_res_sub(struct list_head *res_list1,
344 struct list_head *res_list2)
345{
346 struct apei_res *res1, *resn1, *res2, *res;
347 res1 = list_entry(res_list1->next, struct apei_res, list);
348 resn1 = list_entry(res1->list.next, struct apei_res, list);
349 while (&res1->list != res_list1) {
350 list_for_each_entry(res2, res_list2, list) {
351 if (res1->start >= res2->end ||
352 res1->end <= res2->start)
353 continue;
354 else if (res1->end <= res2->end &&
355 res1->start >= res2->start) {
356 list_del(&res1->list);
357 kfree(res1);
358 break;
359 } else if (res1->end > res2->end &&
360 res1->start < res2->start) {
361 res = kmalloc(sizeof(*res), GFP_KERNEL);
362 if (!res)
363 return -ENOMEM;
364 res->start = res2->end;
365 res->end = res1->end;
366 res1->end = res2->start;
367 list_add(&res->list, &res1->list);
368 resn1 = res;
369 } else {
370 if (res1->start < res2->start)
371 res1->end = res2->start;
372 else
373 res1->start = res2->end;
374 }
375 }
376 res1 = resn1;
377 resn1 = list_entry(resn1->list.next, struct apei_res, list);
378 }
379
380 return 0;
381}
382
383static void apei_res_clean(struct list_head *res_list)
384{
385 struct apei_res *res, *resn;
386
387 list_for_each_entry_safe(res, resn, res_list, list) {
388 list_del(&res->list);
389 kfree(res);
390 }
391}
392
393void apei_resources_fini(struct apei_resources *resources)
394{
395 apei_res_clean(&resources->iomem);
396 apei_res_clean(&resources->ioport);
397}
398EXPORT_SYMBOL_GPL(apei_resources_fini);
399
400static int apei_resources_merge(struct apei_resources *resources1,
401 struct apei_resources *resources2)
402{
403 int rc;
404 struct apei_res *res;
405
406 list_for_each_entry(res, &resources2->iomem, list) {
407 rc = apei_res_add(&resources1->iomem, res->start,
408 res->end - res->start);
409 if (rc)
410 return rc;
411 }
412 list_for_each_entry(res, &resources2->ioport, list) {
413 rc = apei_res_add(&resources1->ioport, res->start,
414 res->end - res->start);
415 if (rc)
416 return rc;
417 }
418
419 return 0;
420}
421
422/*
423 * EINJ has two groups of GARs (EINJ table entry and trigger table
424 * entry), so common resources are subtracted from the trigger table
425 * resources before the second requesting.
426 */
427int apei_resources_sub(struct apei_resources *resources1,
428 struct apei_resources *resources2)
429{
430 int rc;
431
432 rc = apei_res_sub(&resources1->iomem, &resources2->iomem);
433 if (rc)
434 return rc;
435 return apei_res_sub(&resources1->ioport, &resources2->ioport);
436}
437EXPORT_SYMBOL_GPL(apei_resources_sub);
438
439/*
440 * IO memory/port rersource management mechanism is used to check
441 * whether memory/port area used by GARs conflicts with normal memory
442 * or IO memory/port of devices.
443 */
444int apei_resources_request(struct apei_resources *resources,
445 const char *desc)
446{
447 struct apei_res *res, *res_bak;
448 struct resource *r;
449
450 apei_resources_sub(resources, &apei_resources_all);
451
452 list_for_each_entry(res, &resources->iomem, list) {
453 r = request_mem_region(res->start, res->end - res->start,
454 desc);
455 if (!r) {
456 pr_err(APEI_PFX
457 "Can not request iomem region <%016llx-%016llx> for GARs.\n",
458 (unsigned long long)res->start,
459 (unsigned long long)res->end);
460 res_bak = res;
461 goto err_unmap_iomem;
462 }
463 }
464
465 list_for_each_entry(res, &resources->ioport, list) {
466 r = request_region(res->start, res->end - res->start, desc);
467 if (!r) {
468 pr_err(APEI_PFX
469 "Can not request ioport region <%016llx-%016llx> for GARs.\n",
470 (unsigned long long)res->start,
471 (unsigned long long)res->end);
472 res_bak = res;
473 goto err_unmap_ioport;
474 }
475 }
476
477 apei_resources_merge(&apei_resources_all, resources);
478
479 return 0;
480err_unmap_ioport:
481 list_for_each_entry(res, &resources->ioport, list) {
482 if (res == res_bak)
483 break;
484 release_mem_region(res->start, res->end - res->start);
485 }
486 res_bak = NULL;
487err_unmap_iomem:
488 list_for_each_entry(res, &resources->iomem, list) {
489 if (res == res_bak)
490 break;
491 release_region(res->start, res->end - res->start);
492 }
493 return -EINVAL;
494}
495EXPORT_SYMBOL_GPL(apei_resources_request);
496
497void apei_resources_release(struct apei_resources *resources)
498{
499 struct apei_res *res;
500
501 list_for_each_entry(res, &resources->iomem, list)
502 release_mem_region(res->start, res->end - res->start);
503 list_for_each_entry(res, &resources->ioport, list)
504 release_region(res->start, res->end - res->start);
505
506 apei_resources_sub(&apei_resources_all, resources);
507}
508EXPORT_SYMBOL_GPL(apei_resources_release);
509
510static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
511{
512 u32 width, space_id;
513
514 width = reg->bit_width;
515 space_id = reg->space_id;
516 /* Handle possible alignment issues */
517 memcpy(paddr, &reg->address, sizeof(*paddr));
518 if (!*paddr) {
519 pr_warning(FW_BUG APEI_PFX
520 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
521 *paddr, width, space_id);
522 return -EINVAL;
523 }
524
525 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
526 pr_warning(FW_BUG APEI_PFX
527 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
528 *paddr, width, space_id);
529 return -EINVAL;
530 }
531
532 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
533 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
534 pr_warning(FW_BUG APEI_PFX
535 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
536 *paddr, width, space_id);
537 return -EINVAL;
538 }
539
540 return 0;
541}
542
543static int collect_res_callback(struct apei_exec_context *ctx,
544 struct acpi_whea_header *entry,
545 void *data)
546{
547 struct apei_resources *resources = data;
548 struct acpi_generic_address *reg = &entry->register_region;
549 u8 ins = entry->instruction;
550 u64 paddr;
551 int rc;
552
553 if (!(ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER))
554 return 0;
555
556 rc = apei_check_gar(reg, &paddr);
557 if (rc)
558 return rc;
559
560 switch (reg->space_id) {
561 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
562 return apei_res_add(&resources->iomem, paddr,
563 reg->bit_width / 8);
564 case ACPI_ADR_SPACE_SYSTEM_IO:
565 return apei_res_add(&resources->ioport, paddr,
566 reg->bit_width / 8);
567 default:
568 return -EINVAL;
569 }
570}
571
572/*
573 * Same register may be used by multiple instructions in GARs, so
574 * resources are collected before requesting.
575 */
576int apei_exec_collect_resources(struct apei_exec_context *ctx,
577 struct apei_resources *resources)
578{
579 return apei_exec_for_each_entry(ctx, collect_res_callback,
580 resources, NULL);
581}
582EXPORT_SYMBOL_GPL(apei_exec_collect_resources);
583
584struct dentry *apei_get_debugfs_dir(void)
585{
586 static struct dentry *dapei;
587
588 if (!dapei)
589 dapei = debugfs_create_dir("apei", NULL);
590
591 return dapei;
592}
593EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
new file mode 100644
index 000000000000..18df1e940276
--- /dev/null
+++ b/drivers/acpi/apei/apei-internal.h
@@ -0,0 +1,114 @@
1/*
2 * apei-internal.h - ACPI Platform Error Interface internal
3 * definations.
4 */
5
6#ifndef APEI_INTERNAL_H
7#define APEI_INTERNAL_H
8
9#include <linux/cper.h>
10
11struct apei_exec_context;
12
13typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
14 struct acpi_whea_header *entry);
15
16#define APEI_EXEC_INS_ACCESS_REGISTER 0x0001
17
18struct apei_exec_ins_type {
19 u32 flags;
20 apei_exec_ins_func_t run;
21};
22
23struct apei_exec_context {
24 u32 ip;
25 u64 value;
26 u64 var1;
27 u64 var2;
28 u64 src_base;
29 u64 dst_base;
30 struct apei_exec_ins_type *ins_table;
31 u32 instructions;
32 struct acpi_whea_header *action_table;
33 u32 entries;
34};
35
36void apei_exec_ctx_init(struct apei_exec_context *ctx,
37 struct apei_exec_ins_type *ins_table,
38 u32 instructions,
39 struct acpi_whea_header *action_table,
40 u32 entries);
41
42static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx,
43 u64 input)
44{
45 ctx->value = input;
46}
47
48static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx)
49{
50 return ctx->value;
51}
52
53int apei_exec_run(struct apei_exec_context *ctx, u8 action);
54
55/* Common instruction implementation */
56
57/* IP has been set in instruction function */
58#define APEI_EXEC_SET_IP 1
59
60int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
61int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
62int apei_exec_read_register(struct apei_exec_context *ctx,
63 struct acpi_whea_header *entry);
64int apei_exec_read_register_value(struct apei_exec_context *ctx,
65 struct acpi_whea_header *entry);
66int apei_exec_write_register(struct apei_exec_context *ctx,
67 struct acpi_whea_header *entry);
68int apei_exec_write_register_value(struct apei_exec_context *ctx,
69 struct acpi_whea_header *entry);
70int apei_exec_noop(struct apei_exec_context *ctx,
71 struct acpi_whea_header *entry);
72int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
73int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
74
75struct apei_resources {
76 struct list_head iomem;
77 struct list_head ioport;
78};
79
80static inline void apei_resources_init(struct apei_resources *resources)
81{
82 INIT_LIST_HEAD(&resources->iomem);
83 INIT_LIST_HEAD(&resources->ioport);
84}
85
86void apei_resources_fini(struct apei_resources *resources);
87int apei_resources_sub(struct apei_resources *resources1,
88 struct apei_resources *resources2);
89int apei_resources_request(struct apei_resources *resources,
90 const char *desc);
91void apei_resources_release(struct apei_resources *resources);
92int apei_exec_collect_resources(struct apei_exec_context *ctx,
93 struct apei_resources *resources);
94
95struct dentry;
96struct dentry *apei_get_debugfs_dir(void);
97
98#define apei_estatus_for_each_section(estatus, section) \
99 for (section = (struct acpi_hest_generic_data *)(estatus + 1); \
100 (void *)section - (void *)estatus < estatus->data_length; \
101 section = (void *)(section+1) + section->error_data_length)
102
103static inline u32 apei_estatus_len(struct acpi_hest_generic_status *estatus)
104{
105 if (estatus->raw_data_length)
106 return estatus->raw_data_offset + \
107 estatus->raw_data_length;
108 else
109 return sizeof(*estatus) + estatus->data_length;
110}
111
112int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus);
113int apei_estatus_check(const struct acpi_hest_generic_status *estatus);
114#endif
diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
new file mode 100644
index 000000000000..f4cf2fc4c8c1
--- /dev/null
+++ b/drivers/acpi/apei/cper.c
@@ -0,0 +1,84 @@
1/*
2 * UEFI Common Platform Error Record (CPER) support
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * CPER is the format used to describe platform hardware error by
8 * various APEI tables, such as ERST, BERT and HEST etc.
9 *
10 * For more information about CPER, please refer to Appendix N of UEFI
11 * Specification version 2.3.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/time.h>
30#include <linux/cper.h>
31#include <linux/acpi.h>
32
33/*
34 * CPER record ID need to be unique even after reboot, because record
35 * ID is used as index for ERST storage, while CPER records from
36 * multiple boot may co-exist in ERST.
37 */
38u64 cper_next_record_id(void)
39{
40 static atomic64_t seq;
41
42 if (!atomic64_read(&seq))
43 atomic64_set(&seq, ((u64)get_seconds()) << 32);
44
45 return atomic64_inc_return(&seq);
46}
47EXPORT_SYMBOL_GPL(cper_next_record_id);
48
49int apei_estatus_check_header(const struct acpi_hest_generic_status *estatus)
50{
51 if (estatus->data_length &&
52 estatus->data_length < sizeof(struct acpi_hest_generic_data))
53 return -EINVAL;
54 if (estatus->raw_data_length &&
55 estatus->raw_data_offset < sizeof(*estatus) + estatus->data_length)
56 return -EINVAL;
57
58 return 0;
59}
60EXPORT_SYMBOL_GPL(apei_estatus_check_header);
61
62int apei_estatus_check(const struct acpi_hest_generic_status *estatus)
63{
64 struct acpi_hest_generic_data *gdata;
65 unsigned int data_len, gedata_len;
66 int rc;
67
68 rc = apei_estatus_check_header(estatus);
69 if (rc)
70 return rc;
71 data_len = estatus->data_length;
72 gdata = (struct acpi_hest_generic_data *)(estatus + 1);
73 while (data_len > sizeof(*gdata)) {
74 gedata_len = gdata->error_data_length;
75 if (gedata_len > data_len - sizeof(*gdata))
76 return -EINVAL;
77 data_len -= gedata_len + sizeof(*gdata);
78 }
79 if (data_len)
80 return -EINVAL;
81
82 return 0;
83}
84EXPORT_SYMBOL_GPL(apei_estatus_check);
diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c
new file mode 100644
index 000000000000..465c885938ee
--- /dev/null
+++ b/drivers/acpi/apei/einj.c
@@ -0,0 +1,548 @@
1/*
2 * APEI Error INJection support
3 *
4 * EINJ provides a hardware error injection mechanism, this is useful
5 * for debugging and testing of other APEI and RAS features.
6 *
7 * For more information about EINJ, please refer to ACPI Specification
8 * version 4.0, section 17.5.
9 *
10 * Copyright 2009-2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/io.h>
31#include <linux/debugfs.h>
32#include <linux/seq_file.h>
33#include <linux/nmi.h>
34#include <linux/delay.h>
35#include <acpi/acpi.h>
36
37#include "apei-internal.h"
38
39#define EINJ_PFX "EINJ: "
40
41#define SPIN_UNIT 100 /* 100ns */
42/* Firmware should respond within 1 miliseconds */
43#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
44
45/*
46 * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
47 * EINJ table through an unpublished extension. Use with caution as
48 * most will ignore the parameter and make their own choice of address
49 * for error injection.
50 */
51struct einj_parameter {
52 u64 type;
53 u64 reserved1;
54 u64 reserved2;
55 u64 param1;
56 u64 param2;
57};
58
59#define EINJ_OP_BUSY 0x1
60#define EINJ_STATUS_SUCCESS 0x0
61#define EINJ_STATUS_FAIL 0x1
62#define EINJ_STATUS_INVAL 0x2
63
64#define EINJ_TAB_ENTRY(tab) \
65 ((struct acpi_whea_header *)((char *)(tab) + \
66 sizeof(struct acpi_table_einj)))
67
68static struct acpi_table_einj *einj_tab;
69
70static struct apei_resources einj_resources;
71
72static struct apei_exec_ins_type einj_ins_type[] = {
73 [ACPI_EINJ_READ_REGISTER] = {
74 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
75 .run = apei_exec_read_register,
76 },
77 [ACPI_EINJ_READ_REGISTER_VALUE] = {
78 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
79 .run = apei_exec_read_register_value,
80 },
81 [ACPI_EINJ_WRITE_REGISTER] = {
82 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
83 .run = apei_exec_write_register,
84 },
85 [ACPI_EINJ_WRITE_REGISTER_VALUE] = {
86 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
87 .run = apei_exec_write_register_value,
88 },
89 [ACPI_EINJ_NOOP] = {
90 .flags = 0,
91 .run = apei_exec_noop,
92 },
93};
94
95/*
96 * Prevent EINJ interpreter to run simultaneously, because the
97 * corresponding firmware implementation may not work properly when
98 * invoked simultaneously.
99 */
100static DEFINE_MUTEX(einj_mutex);
101
102static struct einj_parameter *einj_param;
103
104static void einj_exec_ctx_init(struct apei_exec_context *ctx)
105{
106 apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type),
107 EINJ_TAB_ENTRY(einj_tab), einj_tab->entries);
108}
109
110static int __einj_get_available_error_type(u32 *type)
111{
112 struct apei_exec_context ctx;
113 int rc;
114
115 einj_exec_ctx_init(&ctx);
116 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_ERROR_TYPE);
117 if (rc)
118 return rc;
119 *type = apei_exec_ctx_get_output(&ctx);
120
121 return 0;
122}
123
124/* Get error injection capabilities of the platform */
125static int einj_get_available_error_type(u32 *type)
126{
127 int rc;
128
129 mutex_lock(&einj_mutex);
130 rc = __einj_get_available_error_type(type);
131 mutex_unlock(&einj_mutex);
132
133 return rc;
134}
135
136static int einj_timedout(u64 *t)
137{
138 if ((s64)*t < SPIN_UNIT) {
139 pr_warning(FW_WARN EINJ_PFX
140 "Firmware does not respond in time\n");
141 return 1;
142 }
143 *t -= SPIN_UNIT;
144 ndelay(SPIN_UNIT);
145 touch_nmi_watchdog();
146 return 0;
147}
148
149static u64 einj_get_parameter_address(void)
150{
151 int i;
152 u64 paddr = 0;
153 struct acpi_whea_header *entry;
154
155 entry = EINJ_TAB_ENTRY(einj_tab);
156 for (i = 0; i < einj_tab->entries; i++) {
157 if (entry->action == ACPI_EINJ_SET_ERROR_TYPE &&
158 entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
159 entry->register_region.space_id ==
160 ACPI_ADR_SPACE_SYSTEM_MEMORY)
161 memcpy(&paddr, &entry->register_region.address,
162 sizeof(paddr));
163 entry++;
164 }
165
166 return paddr;
167}
168
169/* do sanity check to trigger table */
170static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
171{
172 if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
173 return -EINVAL;
174 if (trigger_tab->table_size > PAGE_SIZE ||
175 trigger_tab->table_size <= trigger_tab->header_size)
176 return -EINVAL;
177 if (trigger_tab->entry_count !=
178 (trigger_tab->table_size - trigger_tab->header_size) /
179 sizeof(struct acpi_einj_entry))
180 return -EINVAL;
181
182 return 0;
183}
184
185/* Execute instructions in trigger error action table */
186static int __einj_error_trigger(u64 trigger_paddr)
187{
188 struct acpi_einj_trigger *trigger_tab = NULL;
189 struct apei_exec_context trigger_ctx;
190 struct apei_resources trigger_resources;
191 struct acpi_whea_header *trigger_entry;
192 struct resource *r;
193 u32 table_size;
194 int rc = -EIO;
195
196 r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
197 "APEI EINJ Trigger Table");
198 if (!r) {
199 pr_err(EINJ_PFX
200 "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
201 (unsigned long long)trigger_paddr,
202 (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
203 goto out;
204 }
205 trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
206 if (!trigger_tab) {
207 pr_err(EINJ_PFX "Failed to map trigger table!\n");
208 goto out_rel_header;
209 }
210 rc = einj_check_trigger_header(trigger_tab);
211 if (rc) {
212 pr_warning(FW_BUG EINJ_PFX
213 "The trigger error action table is invalid\n");
214 goto out_rel_header;
215 }
216 rc = -EIO;
217 table_size = trigger_tab->table_size;
218 r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
219 table_size - sizeof(*trigger_tab),
220 "APEI EINJ Trigger Table");
221 if (!r) {
222 pr_err(EINJ_PFX
223"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
224 (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
225 (unsigned long long)trigger_paddr + table_size);
226 goto out_rel_header;
227 }
228 iounmap(trigger_tab);
229 trigger_tab = ioremap_cache(trigger_paddr, table_size);
230 if (!trigger_tab) {
231 pr_err(EINJ_PFX "Failed to map trigger table!\n");
232 goto out_rel_entry;
233 }
234 trigger_entry = (struct acpi_whea_header *)
235 ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
236 apei_resources_init(&trigger_resources);
237 apei_exec_ctx_init(&trigger_ctx, einj_ins_type,
238 ARRAY_SIZE(einj_ins_type),
239 trigger_entry, trigger_tab->entry_count);
240 rc = apei_exec_collect_resources(&trigger_ctx, &trigger_resources);
241 if (rc)
242 goto out_fini;
243 rc = apei_resources_sub(&trigger_resources, &einj_resources);
244 if (rc)
245 goto out_fini;
246 rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
247 if (rc)
248 goto out_fini;
249 rc = apei_exec_pre_map_gars(&trigger_ctx);
250 if (rc)
251 goto out_release;
252
253 rc = apei_exec_run(&trigger_ctx, ACPI_EINJ_TRIGGER_ERROR);
254
255 apei_exec_post_unmap_gars(&trigger_ctx);
256out_release:
257 apei_resources_release(&trigger_resources);
258out_fini:
259 apei_resources_fini(&trigger_resources);
260out_rel_entry:
261 release_mem_region(trigger_paddr + sizeof(*trigger_tab),
262 table_size - sizeof(*trigger_tab));
263out_rel_header:
264 release_mem_region(trigger_paddr, sizeof(*trigger_tab));
265out:
266 if (trigger_tab)
267 iounmap(trigger_tab);
268
269 return rc;
270}
271
272static int __einj_error_inject(u32 type, u64 param1, u64 param2)
273{
274 struct apei_exec_context ctx;
275 u64 val, trigger_paddr, timeout = FIRMWARE_TIMEOUT;
276 int rc;
277
278 einj_exec_ctx_init(&ctx);
279
280 rc = apei_exec_run(&ctx, ACPI_EINJ_BEGIN_OPERATION);
281 if (rc)
282 return rc;
283 apei_exec_ctx_set_input(&ctx, type);
284 rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
285 if (rc)
286 return rc;
287 if (einj_param) {
288 writeq(param1, &einj_param->param1);
289 writeq(param2, &einj_param->param2);
290 }
291 rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
292 if (rc)
293 return rc;
294 for (;;) {
295 rc = apei_exec_run(&ctx, ACPI_EINJ_CHECK_BUSY_STATUS);
296 if (rc)
297 return rc;
298 val = apei_exec_ctx_get_output(&ctx);
299 if (!(val & EINJ_OP_BUSY))
300 break;
301 if (einj_timedout(&timeout))
302 return -EIO;
303 }
304 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_COMMAND_STATUS);
305 if (rc)
306 return rc;
307 val = apei_exec_ctx_get_output(&ctx);
308 if (val != EINJ_STATUS_SUCCESS)
309 return -EBUSY;
310
311 rc = apei_exec_run(&ctx, ACPI_EINJ_GET_TRIGGER_TABLE);
312 if (rc)
313 return rc;
314 trigger_paddr = apei_exec_ctx_get_output(&ctx);
315 rc = __einj_error_trigger(trigger_paddr);
316 if (rc)
317 return rc;
318 rc = apei_exec_run(&ctx, ACPI_EINJ_END_OPERATION);
319
320 return rc;
321}
322
323/* Inject the specified hardware error */
324static int einj_error_inject(u32 type, u64 param1, u64 param2)
325{
326 int rc;
327
328 mutex_lock(&einj_mutex);
329 rc = __einj_error_inject(type, param1, param2);
330 mutex_unlock(&einj_mutex);
331
332 return rc;
333}
334
335static u32 error_type;
336static u64 error_param1;
337static u64 error_param2;
338static struct dentry *einj_debug_dir;
339
340static int available_error_type_show(struct seq_file *m, void *v)
341{
342 int rc;
343 u32 available_error_type = 0;
344
345 rc = einj_get_available_error_type(&available_error_type);
346 if (rc)
347 return rc;
348 if (available_error_type & 0x0001)
349 seq_printf(m, "0x00000001\tProcessor Correctable\n");
350 if (available_error_type & 0x0002)
351 seq_printf(m, "0x00000002\tProcessor Uncorrectable non-fatal\n");
352 if (available_error_type & 0x0004)
353 seq_printf(m, "0x00000004\tProcessor Uncorrectable fatal\n");
354 if (available_error_type & 0x0008)
355 seq_printf(m, "0x00000008\tMemory Correctable\n");
356 if (available_error_type & 0x0010)
357 seq_printf(m, "0x00000010\tMemory Uncorrectable non-fatal\n");
358 if (available_error_type & 0x0020)
359 seq_printf(m, "0x00000020\tMemory Uncorrectable fatal\n");
360 if (available_error_type & 0x0040)
361 seq_printf(m, "0x00000040\tPCI Express Correctable\n");
362 if (available_error_type & 0x0080)
363 seq_printf(m, "0x00000080\tPCI Express Uncorrectable non-fatal\n");
364 if (available_error_type & 0x0100)
365 seq_printf(m, "0x00000100\tPCI Express Uncorrectable fatal\n");
366 if (available_error_type & 0x0200)
367 seq_printf(m, "0x00000200\tPlatform Correctable\n");
368 if (available_error_type & 0x0400)
369 seq_printf(m, "0x00000400\tPlatform Uncorrectable non-fatal\n");
370 if (available_error_type & 0x0800)
371 seq_printf(m, "0x00000800\tPlatform Uncorrectable fatal\n");
372
373 return 0;
374}
375
376static int available_error_type_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, available_error_type_show, NULL);
379}
380
381static const struct file_operations available_error_type_fops = {
382 .open = available_error_type_open,
383 .read = seq_read,
384 .llseek = seq_lseek,
385 .release = single_release,
386};
387
388static int error_type_get(void *data, u64 *val)
389{
390 *val = error_type;
391
392 return 0;
393}
394
395static int error_type_set(void *data, u64 val)
396{
397 int rc;
398 u32 available_error_type = 0;
399
400 /* Only one error type can be specified */
401 if (val & (val - 1))
402 return -EINVAL;
403 rc = einj_get_available_error_type(&available_error_type);
404 if (rc)
405 return rc;
406 if (!(val & available_error_type))
407 return -EINVAL;
408 error_type = val;
409
410 return 0;
411}
412
413DEFINE_SIMPLE_ATTRIBUTE(error_type_fops, error_type_get,
414 error_type_set, "0x%llx\n");
415
416static int error_inject_set(void *data, u64 val)
417{
418 if (!error_type)
419 return -EINVAL;
420
421 return einj_error_inject(error_type, error_param1, error_param2);
422}
423
424DEFINE_SIMPLE_ATTRIBUTE(error_inject_fops, NULL,
425 error_inject_set, "%llu\n");
426
427static int einj_check_table(struct acpi_table_einj *einj_tab)
428{
429 if (einj_tab->header_length != sizeof(struct acpi_table_einj))
430 return -EINVAL;
431 if (einj_tab->header.length < sizeof(struct acpi_table_einj))
432 return -EINVAL;
433 if (einj_tab->entries !=
434 (einj_tab->header.length - sizeof(struct acpi_table_einj)) /
435 sizeof(struct acpi_einj_entry))
436 return -EINVAL;
437
438 return 0;
439}
440
441static int __init einj_init(void)
442{
443 int rc;
444 u64 param_paddr;
445 acpi_status status;
446 struct dentry *fentry;
447 struct apei_exec_context ctx;
448
449 if (acpi_disabled)
450 return -ENODEV;
451
452 status = acpi_get_table(ACPI_SIG_EINJ, 0,
453 (struct acpi_table_header **)&einj_tab);
454 if (status == AE_NOT_FOUND) {
455 pr_info(EINJ_PFX "Table is not found!\n");
456 return -ENODEV;
457 } else if (ACPI_FAILURE(status)) {
458 const char *msg = acpi_format_exception(status);
459 pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
460 return -EINVAL;
461 }
462
463 rc = einj_check_table(einj_tab);
464 if (rc) {
465 pr_warning(FW_BUG EINJ_PFX "EINJ table is invalid\n");
466 return -EINVAL;
467 }
468
469 rc = -ENOMEM;
470 einj_debug_dir = debugfs_create_dir("einj", apei_get_debugfs_dir());
471 if (!einj_debug_dir)
472 goto err_cleanup;
473 fentry = debugfs_create_file("available_error_type", S_IRUSR,
474 einj_debug_dir, NULL,
475 &available_error_type_fops);
476 if (!fentry)
477 goto err_cleanup;
478 fentry = debugfs_create_file("error_type", S_IRUSR | S_IWUSR,
479 einj_debug_dir, NULL, &error_type_fops);
480 if (!fentry)
481 goto err_cleanup;
482 fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
483 einj_debug_dir, &error_param1);
484 if (!fentry)
485 goto err_cleanup;
486 fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
487 einj_debug_dir, &error_param2);
488 if (!fentry)
489 goto err_cleanup;
490 fentry = debugfs_create_file("error_inject", S_IWUSR,
491 einj_debug_dir, NULL, &error_inject_fops);
492 if (!fentry)
493 goto err_cleanup;
494
495 apei_resources_init(&einj_resources);
496 einj_exec_ctx_init(&ctx);
497 rc = apei_exec_collect_resources(&ctx, &einj_resources);
498 if (rc)
499 goto err_fini;
500 rc = apei_resources_request(&einj_resources, "APEI EINJ");
501 if (rc)
502 goto err_fini;
503 rc = apei_exec_pre_map_gars(&ctx);
504 if (rc)
505 goto err_release;
506 param_paddr = einj_get_parameter_address();
507 if (param_paddr) {
508 einj_param = ioremap(param_paddr, sizeof(*einj_param));
509 rc = -ENOMEM;
510 if (!einj_param)
511 goto err_unmap;
512 }
513
514 pr_info(EINJ_PFX "Error INJection is initialized.\n");
515
516 return 0;
517
518err_unmap:
519 apei_exec_post_unmap_gars(&ctx);
520err_release:
521 apei_resources_release(&einj_resources);
522err_fini:
523 apei_resources_fini(&einj_resources);
524err_cleanup:
525 debugfs_remove_recursive(einj_debug_dir);
526
527 return rc;
528}
529
530static void __exit einj_exit(void)
531{
532 struct apei_exec_context ctx;
533
534 if (einj_param)
535 iounmap(einj_param);
536 einj_exec_ctx_init(&ctx);
537 apei_exec_post_unmap_gars(&ctx);
538 apei_resources_release(&einj_resources);
539 apei_resources_fini(&einj_resources);
540 debugfs_remove_recursive(einj_debug_dir);
541}
542
543module_init(einj_init);
544module_exit(einj_exit);
545
546MODULE_AUTHOR("Huang Ying");
547MODULE_DESCRIPTION("APEI Error INJection support");
548MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
new file mode 100644
index 000000000000..2ebc39115507
--- /dev/null
+++ b/drivers/acpi/apei/erst.c
@@ -0,0 +1,855 @@
1/*
2 * APEI Error Record Serialization Table support
3 *
4 * ERST is a way provided by APEI to save and retrieve hardware error
5 * infomation to and from a persistent store.
6 *
7 * For more information about ERST, please refer to ACPI Specification
8 * version 4.0, section 17.4.
9 *
10 * Copyright 2010 Intel Corp.
11 * Author: Huang Ying <ying.huang@intel.com>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version
15 * 2 as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/io.h>
32#include <linux/acpi.h>
33#include <linux/uaccess.h>
34#include <linux/cper.h>
35#include <linux/nmi.h>
36#include <acpi/apei.h>
37
38#include "apei-internal.h"
39
40#define ERST_PFX "ERST: "
41
42/* ERST command status */
43#define ERST_STATUS_SUCCESS 0x0
44#define ERST_STATUS_NOT_ENOUGH_SPACE 0x1
45#define ERST_STATUS_HARDWARE_NOT_AVAILABLE 0x2
46#define ERST_STATUS_FAILED 0x3
47#define ERST_STATUS_RECORD_STORE_EMPTY 0x4
48#define ERST_STATUS_RECORD_NOT_FOUND 0x5
49
50#define ERST_TAB_ENTRY(tab) \
51 ((struct acpi_whea_header *)((char *)(tab) + \
52 sizeof(struct acpi_table_erst)))
53
54#define SPIN_UNIT 100 /* 100ns */
55/* Firmware should respond within 1 miliseconds */
56#define FIRMWARE_TIMEOUT (1 * NSEC_PER_MSEC)
57#define FIRMWARE_MAX_STALL 50 /* 50us */
58
59int erst_disable;
60EXPORT_SYMBOL_GPL(erst_disable);
61
62static struct acpi_table_erst *erst_tab;
63
64/* ERST Error Log Address Range atrributes */
65#define ERST_RANGE_RESERVED 0x0001
66#define ERST_RANGE_NVRAM 0x0002
67#define ERST_RANGE_SLOW 0x0004
68
69/*
70 * ERST Error Log Address Range, used as buffer for reading/writing
71 * error records.
72 */
73static struct erst_erange {
74 u64 base;
75 u64 size;
76 void __iomem *vaddr;
77 u32 attr;
78} erst_erange;
79
80/*
81 * Prevent ERST interpreter to run simultaneously, because the
82 * corresponding firmware implementation may not work properly when
83 * invoked simultaneously.
84 *
85 * It is used to provide exclusive accessing for ERST Error Log
86 * Address Range too.
87 */
88static DEFINE_SPINLOCK(erst_lock);
89
90static inline int erst_errno(int command_status)
91{
92 switch (command_status) {
93 case ERST_STATUS_SUCCESS:
94 return 0;
95 case ERST_STATUS_HARDWARE_NOT_AVAILABLE:
96 return -ENODEV;
97 case ERST_STATUS_NOT_ENOUGH_SPACE:
98 return -ENOSPC;
99 case ERST_STATUS_RECORD_STORE_EMPTY:
100 case ERST_STATUS_RECORD_NOT_FOUND:
101 return -ENOENT;
102 default:
103 return -EINVAL;
104 }
105}
106
107static int erst_timedout(u64 *t, u64 spin_unit)
108{
109 if ((s64)*t < spin_unit) {
110 pr_warning(FW_WARN ERST_PFX
111 "Firmware does not respond in time\n");
112 return 1;
113 }
114 *t -= spin_unit;
115 ndelay(spin_unit);
116 touch_nmi_watchdog();
117 return 0;
118}
119
120static int erst_exec_load_var1(struct apei_exec_context *ctx,
121 struct acpi_whea_header *entry)
122{
123 return __apei_exec_read_register(entry, &ctx->var1);
124}
125
126static int erst_exec_load_var2(struct apei_exec_context *ctx,
127 struct acpi_whea_header *entry)
128{
129 return __apei_exec_read_register(entry, &ctx->var2);
130}
131
132static int erst_exec_store_var1(struct apei_exec_context *ctx,
133 struct acpi_whea_header *entry)
134{
135 return __apei_exec_write_register(entry, ctx->var1);
136}
137
138static int erst_exec_add(struct apei_exec_context *ctx,
139 struct acpi_whea_header *entry)
140{
141 ctx->var1 += ctx->var2;
142 return 0;
143}
144
145static int erst_exec_subtract(struct apei_exec_context *ctx,
146 struct acpi_whea_header *entry)
147{
148 ctx->var1 -= ctx->var2;
149 return 0;
150}
151
152static int erst_exec_add_value(struct apei_exec_context *ctx,
153 struct acpi_whea_header *entry)
154{
155 int rc;
156 u64 val;
157
158 rc = __apei_exec_read_register(entry, &val);
159 if (rc)
160 return rc;
161 val += ctx->value;
162 rc = __apei_exec_write_register(entry, val);
163 return rc;
164}
165
166static int erst_exec_subtract_value(struct apei_exec_context *ctx,
167 struct acpi_whea_header *entry)
168{
169 int rc;
170 u64 val;
171
172 rc = __apei_exec_read_register(entry, &val);
173 if (rc)
174 return rc;
175 val -= ctx->value;
176 rc = __apei_exec_write_register(entry, val);
177 return rc;
178}
179
180static int erst_exec_stall(struct apei_exec_context *ctx,
181 struct acpi_whea_header *entry)
182{
183 u64 stall_time;
184
185 if (ctx->value > FIRMWARE_MAX_STALL) {
186 if (!in_nmi())
187 pr_warning(FW_WARN ERST_PFX
188 "Too long stall time for stall instruction: %llx.\n",
189 ctx->value);
190 stall_time = FIRMWARE_MAX_STALL;
191 } else
192 stall_time = ctx->value;
193 udelay(stall_time);
194 return 0;
195}
196
197static int erst_exec_stall_while_true(struct apei_exec_context *ctx,
198 struct acpi_whea_header *entry)
199{
200 int rc;
201 u64 val;
202 u64 timeout = FIRMWARE_TIMEOUT;
203 u64 stall_time;
204
205 if (ctx->var1 > FIRMWARE_MAX_STALL) {
206 if (!in_nmi())
207 pr_warning(FW_WARN ERST_PFX
208 "Too long stall time for stall while true instruction: %llx.\n",
209 ctx->var1);
210 stall_time = FIRMWARE_MAX_STALL;
211 } else
212 stall_time = ctx->var1;
213
214 for (;;) {
215 rc = __apei_exec_read_register(entry, &val);
216 if (rc)
217 return rc;
218 if (val != ctx->value)
219 break;
220 if (erst_timedout(&timeout, stall_time * NSEC_PER_USEC))
221 return -EIO;
222 }
223 return 0;
224}
225
226static int erst_exec_skip_next_instruction_if_true(
227 struct apei_exec_context *ctx,
228 struct acpi_whea_header *entry)
229{
230 int rc;
231 u64 val;
232
233 rc = __apei_exec_read_register(entry, &val);
234 if (rc)
235 return rc;
236 if (val == ctx->value) {
237 ctx->ip += 2;
238 return APEI_EXEC_SET_IP;
239 }
240
241 return 0;
242}
243
244static int erst_exec_goto(struct apei_exec_context *ctx,
245 struct acpi_whea_header *entry)
246{
247 ctx->ip = ctx->value;
248 return APEI_EXEC_SET_IP;
249}
250
251static int erst_exec_set_src_address_base(struct apei_exec_context *ctx,
252 struct acpi_whea_header *entry)
253{
254 return __apei_exec_read_register(entry, &ctx->src_base);
255}
256
257static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx,
258 struct acpi_whea_header *entry)
259{
260 return __apei_exec_read_register(entry, &ctx->dst_base);
261}
262
263static int erst_exec_move_data(struct apei_exec_context *ctx,
264 struct acpi_whea_header *entry)
265{
266 int rc;
267 u64 offset;
268
269 rc = __apei_exec_read_register(entry, &offset);
270 if (rc)
271 return rc;
272 memmove((void *)ctx->dst_base + offset,
273 (void *)ctx->src_base + offset,
274 ctx->var2);
275
276 return 0;
277}
278
279static struct apei_exec_ins_type erst_ins_type[] = {
280 [ACPI_ERST_READ_REGISTER] = {
281 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
282 .run = apei_exec_read_register,
283 },
284 [ACPI_ERST_READ_REGISTER_VALUE] = {
285 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
286 .run = apei_exec_read_register_value,
287 },
288 [ACPI_ERST_WRITE_REGISTER] = {
289 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
290 .run = apei_exec_write_register,
291 },
292 [ACPI_ERST_WRITE_REGISTER_VALUE] = {
293 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
294 .run = apei_exec_write_register_value,
295 },
296 [ACPI_ERST_NOOP] = {
297 .flags = 0,
298 .run = apei_exec_noop,
299 },
300 [ACPI_ERST_LOAD_VAR1] = {
301 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
302 .run = erst_exec_load_var1,
303 },
304 [ACPI_ERST_LOAD_VAR2] = {
305 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
306 .run = erst_exec_load_var2,
307 },
308 [ACPI_ERST_STORE_VAR1] = {
309 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
310 .run = erst_exec_store_var1,
311 },
312 [ACPI_ERST_ADD] = {
313 .flags = 0,
314 .run = erst_exec_add,
315 },
316 [ACPI_ERST_SUBTRACT] = {
317 .flags = 0,
318 .run = erst_exec_subtract,
319 },
320 [ACPI_ERST_ADD_VALUE] = {
321 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
322 .run = erst_exec_add_value,
323 },
324 [ACPI_ERST_SUBTRACT_VALUE] = {
325 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
326 .run = erst_exec_subtract_value,
327 },
328 [ACPI_ERST_STALL] = {
329 .flags = 0,
330 .run = erst_exec_stall,
331 },
332 [ACPI_ERST_STALL_WHILE_TRUE] = {
333 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
334 .run = erst_exec_stall_while_true,
335 },
336 [ACPI_ERST_SKIP_NEXT_IF_TRUE] = {
337 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
338 .run = erst_exec_skip_next_instruction_if_true,
339 },
340 [ACPI_ERST_GOTO] = {
341 .flags = 0,
342 .run = erst_exec_goto,
343 },
344 [ACPI_ERST_SET_SRC_ADDRESS_BASE] = {
345 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
346 .run = erst_exec_set_src_address_base,
347 },
348 [ACPI_ERST_SET_DST_ADDRESS_BASE] = {
349 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
350 .run = erst_exec_set_dst_address_base,
351 },
352 [ACPI_ERST_MOVE_DATA] = {
353 .flags = APEI_EXEC_INS_ACCESS_REGISTER,
354 .run = erst_exec_move_data,
355 },
356};
357
358static inline void erst_exec_ctx_init(struct apei_exec_context *ctx)
359{
360 apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type),
361 ERST_TAB_ENTRY(erst_tab), erst_tab->entries);
362}
363
364static int erst_get_erange(struct erst_erange *range)
365{
366 struct apei_exec_context ctx;
367 int rc;
368
369 erst_exec_ctx_init(&ctx);
370 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE);
371 if (rc)
372 return rc;
373 range->base = apei_exec_ctx_get_output(&ctx);
374 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH);
375 if (rc)
376 return rc;
377 range->size = apei_exec_ctx_get_output(&ctx);
378 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES);
379 if (rc)
380 return rc;
381 range->attr = apei_exec_ctx_get_output(&ctx);
382
383 return 0;
384}
385
386static ssize_t __erst_get_record_count(void)
387{
388 struct apei_exec_context ctx;
389 int rc;
390
391 erst_exec_ctx_init(&ctx);
392 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT);
393 if (rc)
394 return rc;
395 return apei_exec_ctx_get_output(&ctx);
396}
397
398ssize_t erst_get_record_count(void)
399{
400 ssize_t count;
401 unsigned long flags;
402
403 if (erst_disable)
404 return -ENODEV;
405
406 spin_lock_irqsave(&erst_lock, flags);
407 count = __erst_get_record_count();
408 spin_unlock_irqrestore(&erst_lock, flags);
409
410 return count;
411}
412EXPORT_SYMBOL_GPL(erst_get_record_count);
413
414static int __erst_get_next_record_id(u64 *record_id)
415{
416 struct apei_exec_context ctx;
417 int rc;
418
419 erst_exec_ctx_init(&ctx);
420 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID);
421 if (rc)
422 return rc;
423 *record_id = apei_exec_ctx_get_output(&ctx);
424
425 return 0;
426}
427
428/*
429 * Get the record ID of an existing error record on the persistent
430 * storage. If there is no error record on the persistent storage, the
431 * returned record_id is APEI_ERST_INVALID_RECORD_ID.
432 */
433int erst_get_next_record_id(u64 *record_id)
434{
435 int rc;
436 unsigned long flags;
437
438 if (erst_disable)
439 return -ENODEV;
440
441 spin_lock_irqsave(&erst_lock, flags);
442 rc = __erst_get_next_record_id(record_id);
443 spin_unlock_irqrestore(&erst_lock, flags);
444
445 return rc;
446}
447EXPORT_SYMBOL_GPL(erst_get_next_record_id);
448
449static int __erst_write_to_storage(u64 offset)
450{
451 struct apei_exec_context ctx;
452 u64 timeout = FIRMWARE_TIMEOUT;
453 u64 val;
454 int rc;
455
456 erst_exec_ctx_init(&ctx);
457 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_WRITE);
458 if (rc)
459 return rc;
460 apei_exec_ctx_set_input(&ctx, offset);
461 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
462 if (rc)
463 return rc;
464 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
465 if (rc)
466 return rc;
467 for (;;) {
468 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
469 if (rc)
470 return rc;
471 val = apei_exec_ctx_get_output(&ctx);
472 if (!val)
473 break;
474 if (erst_timedout(&timeout, SPIN_UNIT))
475 return -EIO;
476 }
477 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
478 if (rc)
479 return rc;
480 val = apei_exec_ctx_get_output(&ctx);
481 rc = apei_exec_run(&ctx, ACPI_ERST_END);
482 if (rc)
483 return rc;
484
485 return erst_errno(val);
486}
487
488static int __erst_read_from_storage(u64 record_id, u64 offset)
489{
490 struct apei_exec_context ctx;
491 u64 timeout = FIRMWARE_TIMEOUT;
492 u64 val;
493 int rc;
494
495 erst_exec_ctx_init(&ctx);
496 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_READ);
497 if (rc)
498 return rc;
499 apei_exec_ctx_set_input(&ctx, offset);
500 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET);
501 if (rc)
502 return rc;
503 apei_exec_ctx_set_input(&ctx, record_id);
504 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
505 if (rc)
506 return rc;
507 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
508 if (rc)
509 return rc;
510 for (;;) {
511 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
512 if (rc)
513 return rc;
514 val = apei_exec_ctx_get_output(&ctx);
515 if (!val)
516 break;
517 if (erst_timedout(&timeout, SPIN_UNIT))
518 return -EIO;
519 };
520 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
521 if (rc)
522 return rc;
523 val = apei_exec_ctx_get_output(&ctx);
524 rc = apei_exec_run(&ctx, ACPI_ERST_END);
525 if (rc)
526 return rc;
527
528 return erst_errno(val);
529}
530
531static int __erst_clear_from_storage(u64 record_id)
532{
533 struct apei_exec_context ctx;
534 u64 timeout = FIRMWARE_TIMEOUT;
535 u64 val;
536 int rc;
537
538 erst_exec_ctx_init(&ctx);
539 rc = apei_exec_run(&ctx, ACPI_ERST_BEGIN_CLEAR);
540 if (rc)
541 return rc;
542 apei_exec_ctx_set_input(&ctx, record_id);
543 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID);
544 if (rc)
545 return rc;
546 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION);
547 if (rc)
548 return rc;
549 for (;;) {
550 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS);
551 if (rc)
552 return rc;
553 val = apei_exec_ctx_get_output(&ctx);
554 if (!val)
555 break;
556 if (erst_timedout(&timeout, SPIN_UNIT))
557 return -EIO;
558 }
559 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS);
560 if (rc)
561 return rc;
562 val = apei_exec_ctx_get_output(&ctx);
563 rc = apei_exec_run(&ctx, ACPI_ERST_END);
564 if (rc)
565 return rc;
566
567 return erst_errno(val);
568}
569
570/* NVRAM ERST Error Log Address Range is not supported yet */
571static void pr_unimpl_nvram(void)
572{
573 if (printk_ratelimit())
574 pr_warning(ERST_PFX
575 "NVRAM ERST Log Address Range is not implemented yet\n");
576}
577
578static int __erst_write_to_nvram(const struct cper_record_header *record)
579{
580 /* do not print message, because printk is not safe for NMI */
581 return -ENOSYS;
582}
583
584static int __erst_read_to_erange_from_nvram(u64 record_id, u64 *offset)
585{
586 pr_unimpl_nvram();
587 return -ENOSYS;
588}
589
590static int __erst_clear_from_nvram(u64 record_id)
591{
592 pr_unimpl_nvram();
593 return -ENOSYS;
594}
595
596int erst_write(const struct cper_record_header *record)
597{
598 int rc;
599 unsigned long flags;
600 struct cper_record_header *rcd_erange;
601
602 if (erst_disable)
603 return -ENODEV;
604
605 if (memcmp(record->signature, CPER_SIG_RECORD, CPER_SIG_SIZE))
606 return -EINVAL;
607
608 if (erst_erange.attr & ERST_RANGE_NVRAM) {
609 if (!spin_trylock_irqsave(&erst_lock, flags))
610 return -EBUSY;
611 rc = __erst_write_to_nvram(record);
612 spin_unlock_irqrestore(&erst_lock, flags);
613 return rc;
614 }
615
616 if (record->record_length > erst_erange.size)
617 return -EINVAL;
618
619 if (!spin_trylock_irqsave(&erst_lock, flags))
620 return -EBUSY;
621 memcpy(erst_erange.vaddr, record, record->record_length);
622 rcd_erange = erst_erange.vaddr;
623 /* signature for serialization system */
624 memcpy(&rcd_erange->persistence_information, "ER", 2);
625
626 rc = __erst_write_to_storage(0);
627 spin_unlock_irqrestore(&erst_lock, flags);
628
629 return rc;
630}
631EXPORT_SYMBOL_GPL(erst_write);
632
633static int __erst_read_to_erange(u64 record_id, u64 *offset)
634{
635 int rc;
636
637 if (erst_erange.attr & ERST_RANGE_NVRAM)
638 return __erst_read_to_erange_from_nvram(
639 record_id, offset);
640
641 rc = __erst_read_from_storage(record_id, 0);
642 if (rc)
643 return rc;
644 *offset = 0;
645
646 return 0;
647}
648
649static ssize_t __erst_read(u64 record_id, struct cper_record_header *record,
650 size_t buflen)
651{
652 int rc;
653 u64 offset, len = 0;
654 struct cper_record_header *rcd_tmp;
655
656 rc = __erst_read_to_erange(record_id, &offset);
657 if (rc)
658 return rc;
659 rcd_tmp = erst_erange.vaddr + offset;
660 len = rcd_tmp->record_length;
661 if (len <= buflen)
662 memcpy(record, rcd_tmp, len);
663
664 return len;
665}
666
667/*
668 * If return value > buflen, the buffer size is not big enough,
669 * else if return value < 0, something goes wrong,
670 * else everything is OK, and return value is record length
671 */
672ssize_t erst_read(u64 record_id, struct cper_record_header *record,
673 size_t buflen)
674{
675 ssize_t len;
676 unsigned long flags;
677
678 if (erst_disable)
679 return -ENODEV;
680
681 spin_lock_irqsave(&erst_lock, flags);
682 len = __erst_read(record_id, record, buflen);
683 spin_unlock_irqrestore(&erst_lock, flags);
684 return len;
685}
686EXPORT_SYMBOL_GPL(erst_read);
687
688/*
689 * If return value > buflen, the buffer size is not big enough,
690 * else if return value = 0, there is no more record to read,
691 * else if return value < 0, something goes wrong,
692 * else everything is OK, and return value is record length
693 */
694ssize_t erst_read_next(struct cper_record_header *record, size_t buflen)
695{
696 int rc;
697 ssize_t len;
698 unsigned long flags;
699 u64 record_id;
700
701 if (erst_disable)
702 return -ENODEV;
703
704 spin_lock_irqsave(&erst_lock, flags);
705 rc = __erst_get_next_record_id(&record_id);
706 if (rc) {
707 spin_unlock_irqrestore(&erst_lock, flags);
708 return rc;
709 }
710 /* no more record */
711 if (record_id == APEI_ERST_INVALID_RECORD_ID) {
712 spin_unlock_irqrestore(&erst_lock, flags);
713 return 0;
714 }
715
716 len = __erst_read(record_id, record, buflen);
717 spin_unlock_irqrestore(&erst_lock, flags);
718
719 return len;
720}
721EXPORT_SYMBOL_GPL(erst_read_next);
722
723int erst_clear(u64 record_id)
724{
725 int rc;
726 unsigned long flags;
727
728 if (erst_disable)
729 return -ENODEV;
730
731 spin_lock_irqsave(&erst_lock, flags);
732 if (erst_erange.attr & ERST_RANGE_NVRAM)
733 rc = __erst_clear_from_nvram(record_id);
734 else
735 rc = __erst_clear_from_storage(record_id);
736 spin_unlock_irqrestore(&erst_lock, flags);
737
738 return rc;
739}
740EXPORT_SYMBOL_GPL(erst_clear);
741
742static int __init setup_erst_disable(char *str)
743{
744 erst_disable = 1;
745 return 0;
746}
747
748__setup("erst_disable", setup_erst_disable);
749
750static int erst_check_table(struct acpi_table_erst *erst_tab)
751{
752 if (erst_tab->header_length != sizeof(struct acpi_table_erst))
753 return -EINVAL;
754 if (erst_tab->header.length < sizeof(struct acpi_table_erst))
755 return -EINVAL;
756 if (erst_tab->entries !=
757 (erst_tab->header.length - sizeof(struct acpi_table_erst)) /
758 sizeof(struct acpi_erst_entry))
759 return -EINVAL;
760
761 return 0;
762}
763
764static int __init erst_init(void)
765{
766 int rc = 0;
767 acpi_status status;
768 struct apei_exec_context ctx;
769 struct apei_resources erst_resources;
770 struct resource *r;
771
772 if (acpi_disabled)
773 goto err;
774
775 if (erst_disable) {
776 pr_info(ERST_PFX
777 "Error Record Serialization Table (ERST) support is disabled.\n");
778 goto err;
779 }
780
781 status = acpi_get_table(ACPI_SIG_ERST, 0,
782 (struct acpi_table_header **)&erst_tab);
783 if (status == AE_NOT_FOUND) {
784 pr_err(ERST_PFX "Table is not found!\n");
785 goto err;
786 } else if (ACPI_FAILURE(status)) {
787 const char *msg = acpi_format_exception(status);
788 pr_err(ERST_PFX "Failed to get table, %s\n", msg);
789 rc = -EINVAL;
790 goto err;
791 }
792
793 rc = erst_check_table(erst_tab);
794 if (rc) {
795 pr_err(FW_BUG ERST_PFX "ERST table is invalid\n");
796 goto err;
797 }
798
799 apei_resources_init(&erst_resources);
800 erst_exec_ctx_init(&ctx);
801 rc = apei_exec_collect_resources(&ctx, &erst_resources);
802 if (rc)
803 goto err_fini;
804 rc = apei_resources_request(&erst_resources, "APEI ERST");
805 if (rc)
806 goto err_fini;
807 rc = apei_exec_pre_map_gars(&ctx);
808 if (rc)
809 goto err_release;
810 rc = erst_get_erange(&erst_erange);
811 if (rc) {
812 if (rc == -ENODEV)
813 pr_info(ERST_PFX
814 "The corresponding hardware device or firmware implementation "
815 "is not available.\n");
816 else
817 pr_err(ERST_PFX
818 "Failed to get Error Log Address Range.\n");
819 goto err_unmap_reg;
820 }
821
822 r = request_mem_region(erst_erange.base, erst_erange.size, "APEI ERST");
823 if (!r) {
824 pr_err(ERST_PFX
825 "Can not request iomem region <0x%16llx-0x%16llx> for ERST.\n",
826 (unsigned long long)erst_erange.base,
827 (unsigned long long)erst_erange.base + erst_erange.size);
828 rc = -EIO;
829 goto err_unmap_reg;
830 }
831 rc = -ENOMEM;
832 erst_erange.vaddr = ioremap_cache(erst_erange.base,
833 erst_erange.size);
834 if (!erst_erange.vaddr)
835 goto err_release_erange;
836
837 pr_info(ERST_PFX
838 "Error Record Serialization Table (ERST) support is initialized.\n");
839
840 return 0;
841
842err_release_erange:
843 release_mem_region(erst_erange.base, erst_erange.size);
844err_unmap_reg:
845 apei_exec_post_unmap_gars(&ctx);
846err_release:
847 apei_resources_release(&erst_resources);
848err_fini:
849 apei_resources_fini(&erst_resources);
850err:
851 erst_disable = 1;
852 return rc;
853}
854
855device_initcall(erst_init);
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
new file mode 100644
index 000000000000..fd0cc016a099
--- /dev/null
+++ b/drivers/acpi/apei/ghes.c
@@ -0,0 +1,427 @@
1/*
2 * APEI Generic Hardware Error Source support
3 *
4 * Generic Hardware Error Source provides a way to report platform
5 * hardware errors (such as that from chipset). It works in so called
6 * "Firmware First" mode, that is, hardware errors are reported to
7 * firmware firstly, then reported to Linux by firmware. This way,
8 * some non-standard hardware error registers or non-standard hardware
9 * link can be checked by firmware to produce more hardware error
10 * information for Linux.
11 *
12 * For more information about Generic Hardware Error Source, please
13 * refer to ACPI Specification version 4.0, section 17.3.2.6
14 *
15 * Now, only SCI notification type and memory errors are
16 * supported. More notification type and hardware error type will be
17 * added later.
18 *
19 * Copyright 2010 Intel Corp.
20 * Author: Huang Ying <ying.huang@intel.com>
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License version
24 * 2 as published by the Free Software Foundation;
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
34 */
35
36#include <linux/kernel.h>
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/acpi.h>
40#include <linux/io.h>
41#include <linux/interrupt.h>
42#include <linux/cper.h>
43#include <linux/kdebug.h>
44#include <acpi/apei.h>
45#include <acpi/atomicio.h>
46#include <acpi/hed.h>
47#include <asm/mce.h>
48
49#include "apei-internal.h"
50
51#define GHES_PFX "GHES: "
52
53#define GHES_ESTATUS_MAX_SIZE 65536
54
55/*
56 * One struct ghes is created for each generic hardware error
57 * source.
58 *
59 * It provides the context for APEI hardware error timer/IRQ/SCI/NMI
60 * handler. Handler for one generic hardware error source is only
61 * triggered after the previous one is done. So handler can uses
62 * struct ghes without locking.
63 *
64 * estatus: memory buffer for error status block, allocated during
65 * HEST parsing.
66 */
67#define GHES_TO_CLEAR 0x0001
68
69struct ghes {
70 struct acpi_hest_generic *generic;
71 struct acpi_hest_generic_status *estatus;
72 struct list_head list;
73 u64 buffer_paddr;
74 unsigned long flags;
75};
76
77/*
78 * Error source lists, one list for each notification method. The
79 * members in lists are struct ghes.
80 *
81 * The list members are only added in HEST parsing and deleted during
82 * module_exit, that is, single-threaded. So no lock is needed for
83 * that.
84 *
85 * But the mutual exclusion is needed between members adding/deleting
86 * and timer/IRQ/SCI/NMI handler, which may traverse the list. RCU is
87 * used for that.
88 */
89static LIST_HEAD(ghes_sci);
90
91static struct ghes *ghes_new(struct acpi_hest_generic *generic)
92{
93 struct ghes *ghes;
94 unsigned int error_block_length;
95 int rc;
96
97 ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
98 if (!ghes)
99 return ERR_PTR(-ENOMEM);
100 ghes->generic = generic;
101 INIT_LIST_HEAD(&ghes->list);
102 rc = acpi_pre_map_gar(&generic->error_status_address);
103 if (rc)
104 goto err_free;
105 error_block_length = generic->error_block_length;
106 if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
107 pr_warning(FW_WARN GHES_PFX
108 "Error status block length is too long: %u for "
109 "generic hardware error source: %d.\n",
110 error_block_length, generic->header.source_id);
111 error_block_length = GHES_ESTATUS_MAX_SIZE;
112 }
113 ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
114 if (!ghes->estatus) {
115 rc = -ENOMEM;
116 goto err_unmap;
117 }
118
119 return ghes;
120
121err_unmap:
122 acpi_post_unmap_gar(&generic->error_status_address);
123err_free:
124 kfree(ghes);
125 return ERR_PTR(rc);
126}
127
128static void ghes_fini(struct ghes *ghes)
129{
130 kfree(ghes->estatus);
131 acpi_post_unmap_gar(&ghes->generic->error_status_address);
132}
133
134enum {
135 GHES_SER_NO = 0x0,
136 GHES_SER_CORRECTED = 0x1,
137 GHES_SER_RECOVERABLE = 0x2,
138 GHES_SER_PANIC = 0x3,
139};
140
141static inline int ghes_severity(int severity)
142{
143 switch (severity) {
144 case CPER_SER_INFORMATIONAL:
145 return GHES_SER_NO;
146 case CPER_SER_CORRECTED:
147 return GHES_SER_CORRECTED;
148 case CPER_SER_RECOVERABLE:
149 return GHES_SER_RECOVERABLE;
150 case CPER_SER_FATAL:
151 return GHES_SER_PANIC;
152 default:
153 /* Unkown, go panic */
154 return GHES_SER_PANIC;
155 }
156}
157
158/* SCI handler run in work queue, so ioremap can be used here */
159static int ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
160 int from_phys)
161{
162 void *vaddr;
163
164 vaddr = ioremap_cache(paddr, len);
165 if (!vaddr)
166 return -ENOMEM;
167 if (from_phys)
168 memcpy(buffer, vaddr, len);
169 else
170 memcpy(vaddr, buffer, len);
171 iounmap(vaddr);
172
173 return 0;
174}
175
176static int ghes_read_estatus(struct ghes *ghes, int silent)
177{
178 struct acpi_hest_generic *g = ghes->generic;
179 u64 buf_paddr;
180 u32 len;
181 int rc;
182
183 rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
184 if (rc) {
185 if (!silent && printk_ratelimit())
186 pr_warning(FW_WARN GHES_PFX
187"Failed to read error status block address for hardware error source: %d.\n",
188 g->header.source_id);
189 return -EIO;
190 }
191 if (!buf_paddr)
192 return -ENOENT;
193
194 rc = ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
195 sizeof(*ghes->estatus), 1);
196 if (rc)
197 return rc;
198 if (!ghes->estatus->block_status)
199 return -ENOENT;
200
201 ghes->buffer_paddr = buf_paddr;
202 ghes->flags |= GHES_TO_CLEAR;
203
204 rc = -EIO;
205 len = apei_estatus_len(ghes->estatus);
206 if (len < sizeof(*ghes->estatus))
207 goto err_read_block;
208 if (len > ghes->generic->error_block_length)
209 goto err_read_block;
210 if (apei_estatus_check_header(ghes->estatus))
211 goto err_read_block;
212 rc = ghes_copy_tofrom_phys(ghes->estatus + 1,
213 buf_paddr + sizeof(*ghes->estatus),
214 len - sizeof(*ghes->estatus), 1);
215 if (rc)
216 return rc;
217 if (apei_estatus_check(ghes->estatus))
218 goto err_read_block;
219 rc = 0;
220
221err_read_block:
222 if (rc && !silent)
223 pr_warning(FW_WARN GHES_PFX
224 "Failed to read error status block!\n");
225 return rc;
226}
227
228static void ghes_clear_estatus(struct ghes *ghes)
229{
230 ghes->estatus->block_status = 0;
231 if (!(ghes->flags & GHES_TO_CLEAR))
232 return;
233 ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
234 sizeof(ghes->estatus->block_status), 0);
235 ghes->flags &= ~GHES_TO_CLEAR;
236}
237
238static void ghes_do_proc(struct ghes *ghes)
239{
240 int ser, processed = 0;
241 struct acpi_hest_generic_data *gdata;
242
243 ser = ghes_severity(ghes->estatus->error_severity);
244 apei_estatus_for_each_section(ghes->estatus, gdata) {
245#ifdef CONFIG_X86_MCE
246 if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
247 CPER_SEC_PLATFORM_MEM)) {
248 apei_mce_report_mem_error(
249 ser == GHES_SER_CORRECTED,
250 (struct cper_sec_mem_err *)(gdata+1));
251 processed = 1;
252 }
253#endif
254 }
255
256 if (!processed && printk_ratelimit())
257 pr_warning(GHES_PFX
258 "Unknown error record from generic hardware error source: %d\n",
259 ghes->generic->header.source_id);
260}
261
262static int ghes_proc(struct ghes *ghes)
263{
264 int rc;
265
266 rc = ghes_read_estatus(ghes, 0);
267 if (rc)
268 goto out;
269 ghes_do_proc(ghes);
270
271out:
272 ghes_clear_estatus(ghes);
273 return 0;
274}
275
276static int ghes_notify_sci(struct notifier_block *this,
277 unsigned long event, void *data)
278{
279 struct ghes *ghes;
280 int ret = NOTIFY_DONE;
281
282 rcu_read_lock();
283 list_for_each_entry_rcu(ghes, &ghes_sci, list) {
284 if (!ghes_proc(ghes))
285 ret = NOTIFY_OK;
286 }
287 rcu_read_unlock();
288
289 return ret;
290}
291
292static struct notifier_block ghes_notifier_sci = {
293 .notifier_call = ghes_notify_sci,
294};
295
296static int hest_ghes_parse(struct acpi_hest_header *hest_hdr, void *data)
297{
298 struct acpi_hest_generic *generic;
299 struct ghes *ghes = NULL;
300 int rc = 0;
301
302 if (hest_hdr->type != ACPI_HEST_TYPE_GENERIC_ERROR)
303 return 0;
304
305 generic = (struct acpi_hest_generic *)hest_hdr;
306 if (!generic->enabled)
307 return 0;
308
309 if (generic->error_block_length <
310 sizeof(struct acpi_hest_generic_status)) {
311 pr_warning(FW_BUG GHES_PFX
312"Invalid error block length: %u for generic hardware error source: %d\n",
313 generic->error_block_length,
314 generic->header.source_id);
315 goto err;
316 }
317 if (generic->records_to_preallocate == 0) {
318 pr_warning(FW_BUG GHES_PFX
319"Invalid records to preallocate: %u for generic hardware error source: %d\n",
320 generic->records_to_preallocate,
321 generic->header.source_id);
322 goto err;
323 }
324 ghes = ghes_new(generic);
325 if (IS_ERR(ghes)) {
326 rc = PTR_ERR(ghes);
327 ghes = NULL;
328 goto err;
329 }
330 switch (generic->notify.type) {
331 case ACPI_HEST_NOTIFY_POLLED:
332 pr_warning(GHES_PFX
333"Generic hardware error source: %d notified via POLL is not supported!\n",
334 generic->header.source_id);
335 break;
336 case ACPI_HEST_NOTIFY_EXTERNAL:
337 case ACPI_HEST_NOTIFY_LOCAL:
338 pr_warning(GHES_PFX
339"Generic hardware error source: %d notified via IRQ is not supported!\n",
340 generic->header.source_id);
341 break;
342 case ACPI_HEST_NOTIFY_SCI:
343 if (list_empty(&ghes_sci))
344 register_acpi_hed_notifier(&ghes_notifier_sci);
345 list_add_rcu(&ghes->list, &ghes_sci);
346 break;
347 case ACPI_HEST_NOTIFY_NMI:
348 pr_warning(GHES_PFX
349"Generic hardware error source: %d notified via NMI is not supported!\n",
350 generic->header.source_id);
351 break;
352 default:
353 pr_warning(FW_WARN GHES_PFX
354 "Unknown notification type: %u for generic hardware error source: %d\n",
355 generic->notify.type, generic->header.source_id);
356 break;
357 }
358
359 return 0;
360err:
361 if (ghes)
362 ghes_fini(ghes);
363 return rc;
364}
365
366static void ghes_cleanup(void)
367{
368 struct ghes *ghes, *nghes;
369
370 if (!list_empty(&ghes_sci))
371 unregister_acpi_hed_notifier(&ghes_notifier_sci);
372
373 synchronize_rcu();
374
375 list_for_each_entry_safe(ghes, nghes, &ghes_sci, list) {
376 list_del(&ghes->list);
377 ghes_fini(ghes);
378 kfree(ghes);
379 }
380}
381
382static int __init ghes_init(void)
383{
384 int rc;
385
386 if (acpi_disabled)
387 return -ENODEV;
388
389 if (hest_disable) {
390 pr_info(GHES_PFX "HEST is not enabled!\n");
391 return -EINVAL;
392 }
393
394 rc = apei_hest_parse(hest_ghes_parse, NULL);
395 if (rc) {
396 pr_err(GHES_PFX
397 "Error during parsing HEST generic hardware error sources.\n");
398 goto err_cleanup;
399 }
400
401 if (list_empty(&ghes_sci)) {
402 pr_info(GHES_PFX
403 "No functional generic hardware error sources.\n");
404 rc = -ENODEV;
405 goto err_cleanup;
406 }
407
408 pr_info(GHES_PFX
409 "Generic Hardware Error Source support is initialized.\n");
410
411 return 0;
412err_cleanup:
413 ghes_cleanup();
414 return rc;
415}
416
417static void __exit ghes_exit(void)
418{
419 ghes_cleanup();
420}
421
422module_init(ghes_init);
423module_exit(ghes_exit);
424
425MODULE_AUTHOR("Huang Ying");
426MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
427MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
new file mode 100644
index 000000000000..e7f40d362cb3
--- /dev/null
+++ b/drivers/acpi/apei/hest.c
@@ -0,0 +1,173 @@
1/*
2 * APEI Hardware Error Souce Table support
3 *
4 * HEST describes error sources in detail; communicates operational
5 * parameters (i.e. severity levels, masking bits, and threshold
6 * values) to Linux as necessary. It also allows the BIOS to report
7 * non-standard error sources to Linux (for example, chipset-specific
8 * error registers).
9 *
10 * For more information about HEST, please refer to ACPI Specification
11 * version 4.0, section 17.3.2.
12 *
13 * Copyright 2009 Intel Corp.
14 * Author: Huang Ying <ying.huang@intel.com>
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License version
18 * 2 as published by the Free Software Foundation;
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32#include <linux/init.h>
33#include <linux/acpi.h>
34#include <linux/kdebug.h>
35#include <linux/highmem.h>
36#include <linux/io.h>
37#include <acpi/apei.h>
38
39#include "apei-internal.h"
40
41#define HEST_PFX "HEST: "
42
43int hest_disable;
44EXPORT_SYMBOL_GPL(hest_disable);
45
46/* HEST table parsing */
47
48static struct acpi_table_hest *hest_tab;
49
50static int hest_void_parse(struct acpi_hest_header *hest_hdr, void *data)
51{
52 return 0;
53}
54
55static int hest_esrc_len_tab[ACPI_HEST_TYPE_RESERVED] = {
56 [ACPI_HEST_TYPE_IA32_CHECK] = -1, /* need further calculation */
57 [ACPI_HEST_TYPE_IA32_CORRECTED_CHECK] = -1,
58 [ACPI_HEST_TYPE_IA32_NMI] = sizeof(struct acpi_hest_ia_nmi),
59 [ACPI_HEST_TYPE_AER_ROOT_PORT] = sizeof(struct acpi_hest_aer_root),
60 [ACPI_HEST_TYPE_AER_ENDPOINT] = sizeof(struct acpi_hest_aer),
61 [ACPI_HEST_TYPE_AER_BRIDGE] = sizeof(struct acpi_hest_aer_bridge),
62 [ACPI_HEST_TYPE_GENERIC_ERROR] = sizeof(struct acpi_hest_generic),
63};
64
65static int hest_esrc_len(struct acpi_hest_header *hest_hdr)
66{
67 u16 hest_type = hest_hdr->type;
68 int len;
69
70 if (hest_type >= ACPI_HEST_TYPE_RESERVED)
71 return 0;
72
73 len = hest_esrc_len_tab[hest_type];
74
75 if (hest_type == ACPI_HEST_TYPE_IA32_CORRECTED_CHECK) {
76 struct acpi_hest_ia_corrected *cmc;
77 cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
78 len = sizeof(*cmc) + cmc->num_hardware_banks *
79 sizeof(struct acpi_hest_ia_error_bank);
80 } else if (hest_type == ACPI_HEST_TYPE_IA32_CHECK) {
81 struct acpi_hest_ia_machine_check *mc;
82 mc = (struct acpi_hest_ia_machine_check *)hest_hdr;
83 len = sizeof(*mc) + mc->num_hardware_banks *
84 sizeof(struct acpi_hest_ia_error_bank);
85 }
86 BUG_ON(len == -1);
87
88 return len;
89};
90
91int apei_hest_parse(apei_hest_func_t func, void *data)
92{
93 struct acpi_hest_header *hest_hdr;
94 int i, rc, len;
95
96 if (hest_disable)
97 return -EINVAL;
98
99 hest_hdr = (struct acpi_hest_header *)(hest_tab + 1);
100 for (i = 0; i < hest_tab->error_source_count; i++) {
101 len = hest_esrc_len(hest_hdr);
102 if (!len) {
103 pr_warning(FW_WARN HEST_PFX
104 "Unknown or unused hardware error source "
105 "type: %d for hardware error source: %d.\n",
106 hest_hdr->type, hest_hdr->source_id);
107 return -EINVAL;
108 }
109 if ((void *)hest_hdr + len >
110 (void *)hest_tab + hest_tab->header.length) {
111 pr_warning(FW_BUG HEST_PFX
112 "Table contents overflow for hardware error source: %d.\n",
113 hest_hdr->source_id);
114 return -EINVAL;
115 }
116
117 rc = func(hest_hdr, data);
118 if (rc)
119 return rc;
120
121 hest_hdr = (void *)hest_hdr + len;
122 }
123
124 return 0;
125}
126EXPORT_SYMBOL_GPL(apei_hest_parse);
127
128static int __init setup_hest_disable(char *str)
129{
130 hest_disable = 1;
131 return 0;
132}
133
134__setup("hest_disable", setup_hest_disable);
135
136static int __init hest_init(void)
137{
138 acpi_status status;
139 int rc = -ENODEV;
140
141 if (acpi_disabled)
142 goto err;
143
144 if (hest_disable) {
145 pr_info(HEST_PFX "HEST tabling parsing is disabled.\n");
146 goto err;
147 }
148
149 status = acpi_get_table(ACPI_SIG_HEST, 0,
150 (struct acpi_table_header **)&hest_tab);
151 if (status == AE_NOT_FOUND) {
152 pr_info(HEST_PFX "Table is not found!\n");
153 goto err;
154 } else if (ACPI_FAILURE(status)) {
155 const char *msg = acpi_format_exception(status);
156 pr_err(HEST_PFX "Failed to get table, %s\n", msg);
157 rc = -EINVAL;
158 goto err;
159 }
160
161 rc = apei_hest_parse(hest_void_parse, NULL);
162 if (rc)
163 goto err;
164
165 pr_info(HEST_PFX "HEST table parsing is initialized.\n");
166
167 return 0;
168err:
169 hest_disable = 1;
170 return rc;
171}
172
173subsys_initcall(hest_init);
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
1/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <acpi/atomicio.h>
35
36#define ACPI_PFX "ACPI: "
37
38static LIST_HEAD(acpi_iomaps);
39/*
40 * Used for mutual exclusion between writers of acpi_iomaps list, for
41 * synchronization between readers and writer, RCU is used.
42 */
43static DEFINE_SPINLOCK(acpi_iomaps_lock);
44
45struct acpi_iomap {
46 struct list_head list;
47 void __iomem *vaddr;
48 unsigned long size;
49 phys_addr_t paddr;
50 struct kref ref;
51};
52
53/* acpi_iomaps_lock or RCU read lock must be held before calling */
54static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
55 unsigned long size)
56{
57 struct acpi_iomap *map;
58
59 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
60 if (map->paddr + map->size >= paddr + size &&
61 map->paddr <= paddr)
62 return map;
63 }
64 return NULL;
65}
66
67/*
68 * Atomic "ioremap" used by NMI handler, if the specified IO memory
69 * area is not pre-mapped, NULL will be returned.
70 *
71 * acpi_iomaps_lock or RCU read lock must be held before calling
72 */
73static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
74 unsigned long size)
75{
76 struct acpi_iomap *map;
77
78 map = __acpi_find_iomap(paddr, size);
79 if (map)
80 return map->vaddr + (paddr - map->paddr);
81 else
82 return NULL;
83}
84
85/* acpi_iomaps_lock must be held before calling */
86static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
87 unsigned long size)
88{
89 struct acpi_iomap *map;
90
91 map = __acpi_find_iomap(paddr, size);
92 if (map) {
93 kref_get(&map->ref);
94 return map->vaddr + (paddr - map->paddr);
95 } else
96 return NULL;
97}
98
99/*
100 * Used to pre-map the specified IO memory area. First try to find
101 * whether the area is already pre-mapped, if it is, increase the
102 * reference count (in __acpi_try_ioremap) and return; otherwise, do
103 * the real ioremap, and add the mapping into acpi_iomaps list.
104 */
105static void __iomem *acpi_pre_map(phys_addr_t paddr,
106 unsigned long size)
107{
108 void __iomem *vaddr;
109 struct acpi_iomap *map;
110 unsigned long pg_sz, flags;
111 phys_addr_t pg_off;
112
113 spin_lock_irqsave(&acpi_iomaps_lock, flags);
114 vaddr = __acpi_try_ioremap(paddr, size);
115 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
116 if (vaddr)
117 return vaddr;
118
119 pg_off = paddr & PAGE_MASK;
120 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
121 vaddr = ioremap(pg_off, pg_sz);
122 if (!vaddr)
123 return NULL;
124 map = kmalloc(sizeof(*map), GFP_KERNEL);
125 if (!map)
126 goto err_unmap;
127 INIT_LIST_HEAD(&map->list);
128 map->paddr = pg_off;
129 map->size = pg_sz;
130 map->vaddr = vaddr;
131 kref_init(&map->ref);
132
133 spin_lock_irqsave(&acpi_iomaps_lock, flags);
134 vaddr = __acpi_try_ioremap(paddr, size);
135 if (vaddr) {
136 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
137 iounmap(map->vaddr);
138 kfree(map);
139 return vaddr;
140 }
141 list_add_tail_rcu(&map->list, &acpi_iomaps);
142 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
143
144 return vaddr + (paddr - pg_off);
145err_unmap:
146 iounmap(vaddr);
147 return NULL;
148}
149
150/* acpi_iomaps_lock must be held before calling */
151static void __acpi_kref_del_iomap(struct kref *ref)
152{
153 struct acpi_iomap *map;
154
155 map = container_of(ref, struct acpi_iomap, ref);
156 list_del_rcu(&map->list);
157}
158
159/*
160 * Used to post-unmap the specified IO memory area. The iounmap is
161 * done only if the reference count goes zero.
162 */
163static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
164{
165 struct acpi_iomap *map;
166 unsigned long flags;
167 int del;
168
169 spin_lock_irqsave(&acpi_iomaps_lock, flags);
170 map = __acpi_find_iomap(paddr, size);
171 BUG_ON(!map);
172 del = kref_put(&map->ref, __acpi_kref_del_iomap);
173 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
174
175 if (!del)
176 return;
177
178 synchronize_rcu();
179 iounmap(map->vaddr);
180 kfree(map);
181}
182
183/* In NMI handler, should set silent = 1 */
184static int acpi_check_gar(struct acpi_generic_address *reg,
185 u64 *paddr, int silent)
186{
187 u32 width, space_id;
188
189 width = reg->bit_width;
190 space_id = reg->space_id;
191 /* Handle possible alignment issues */
192 memcpy(paddr, &reg->address, sizeof(*paddr));
193 if (!*paddr) {
194 if (!silent)
195 pr_warning(FW_BUG ACPI_PFX
196 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
197 *paddr, width, space_id);
198 return -EINVAL;
199 }
200
201 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
202 if (!silent)
203 pr_warning(FW_BUG ACPI_PFX
204 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
205 *paddr, width, space_id);
206 return -EINVAL;
207 }
208
209 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
210 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
211 if (!silent)
212 pr_warning(FW_BUG ACPI_PFX
213 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
214 *paddr, width, space_id);
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/* Pre-map, working on GAR */
222int acpi_pre_map_gar(struct acpi_generic_address *reg)
223{
224 u64 paddr;
225 void __iomem *vaddr;
226 int rc;
227
228 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
229 return 0;
230
231 rc = acpi_check_gar(reg, &paddr, 0);
232 if (rc)
233 return rc;
234
235 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
236 if (!vaddr)
237 return -EIO;
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
242
243/* Post-unmap, working on GAR */
244int acpi_post_unmap_gar(struct acpi_generic_address *reg)
245{
246 u64 paddr;
247 int rc;
248
249 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
250 return 0;
251
252 rc = acpi_check_gar(reg, &paddr, 0);
253 if (rc)
254 return rc;
255
256 acpi_post_unmap(paddr, reg->bit_width / 8);
257
258 return 0;
259}
260EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
261
262/*
263 * Can be used in atomic (including NMI) or process context. RCU read
264 * lock can only be released after the IO memory area accessing.
265 */
266static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
267{
268 void __iomem *addr;
269
270 rcu_read_lock();
271 addr = __acpi_ioremap_fast(paddr, width);
272 switch (width) {
273 case 8:
274 *val = readb(addr);
275 break;
276 case 16:
277 *val = readw(addr);
278 break;
279 case 32:
280 *val = readl(addr);
281 break;
282 case 64:
283 *val = readq(addr);
284 break;
285 default:
286 return -EINVAL;
287 }
288 rcu_read_unlock();
289
290 return 0;
291}
292
293static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
294{
295 void __iomem *addr;
296
297 rcu_read_lock();
298 addr = __acpi_ioremap_fast(paddr, width);
299 switch (width) {
300 case 8:
301 writeb(val, addr);
302 break;
303 case 16:
304 writew(val, addr);
305 break;
306 case 32:
307 writel(val, addr);
308 break;
309 case 64:
310 writeq(val, addr);
311 break;
312 default:
313 return -EINVAL;
314 }
315 rcu_read_unlock();
316
317 return 0;
318}
319
320/* GAR accessing in atomic (including NMI) or process context */
321int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
322{
323 u64 paddr;
324 int rc;
325
326 rc = acpi_check_gar(reg, &paddr, 1);
327 if (rc)
328 return rc;
329
330 *val = 0;
331 switch (reg->space_id) {
332 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
333 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
334 case ACPI_ADR_SPACE_SYSTEM_IO:
335 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
336 default:
337 return -EINVAL;
338 }
339}
340EXPORT_SYMBOL_GPL(acpi_atomic_read);
341
342int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
343{
344 u64 paddr;
345 int rc;
346
347 rc = acpi_check_gar(reg, &paddr, 1);
348 if (rc)
349 return rc;
350
351 switch (reg->space_id) {
352 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
353 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
354 case ACPI_ADR_SPACE_SYSTEM_IO:
355 return acpi_os_write_port(paddr, val, reg->bit_width);
356 default:
357 return -EINVAL;
358 }
359}
360EXPORT_SYMBOL_GPL(acpi_atomic_write);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f2234db85da0..e61d4f8e62a5 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1027,10 +1027,9 @@ int __init acpi_ec_ecdt_probe(void)
1027 /* Don't trust ECDT, which comes from ASUSTek */ 1027 /* Don't trust ECDT, which comes from ASUSTek */
1028 if (!EC_FLAGS_VALIDATE_ECDT) 1028 if (!EC_FLAGS_VALIDATE_ECDT)
1029 goto install; 1029 goto install;
1030 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 1030 saved_ec = kmemdup(boot_ec, sizeof(struct acpi_ec), GFP_KERNEL);
1031 if (!saved_ec) 1031 if (!saved_ec)
1032 return -ENOMEM; 1032 return -ENOMEM;
1033 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
1034 /* fall through */ 1033 /* fall through */
1035 } 1034 }
1036 1035
diff --git a/drivers/acpi/hed.c b/drivers/acpi/hed.c
new file mode 100644
index 000000000000..d0c1967f7597
--- /dev/null
+++ b/drivers/acpi/hed.c
@@ -0,0 +1,112 @@
1/*
2 * ACPI Hardware Error Device (PNP0C33) Driver
3 *
4 * Copyright (C) 2010, Intel Corp.
5 * Author: Huang Ying <ying.huang@intel.com>
6 *
7 * ACPI Hardware Error Device is used to report some hardware errors
8 * notified via SCI, mainly the corrected errors.
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License version
12 * 2 as published by the Free Software Foundation;
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/acpi.h>
28#include <acpi/acpi_bus.h>
29#include <acpi/acpi_drivers.h>
30#include <acpi/hed.h>
31
32static struct acpi_device_id acpi_hed_ids[] = {
33 {"PNP0C33", 0},
34 {"", 0},
35};
36MODULE_DEVICE_TABLE(acpi, acpi_hed_ids);
37
38static acpi_handle hed_handle;
39
40static BLOCKING_NOTIFIER_HEAD(acpi_hed_notify_list);
41
42int register_acpi_hed_notifier(struct notifier_block *nb)
43{
44 return blocking_notifier_chain_register(&acpi_hed_notify_list, nb);
45}
46EXPORT_SYMBOL_GPL(register_acpi_hed_notifier);
47
48void unregister_acpi_hed_notifier(struct notifier_block *nb)
49{
50 blocking_notifier_chain_unregister(&acpi_hed_notify_list, nb);
51}
52EXPORT_SYMBOL_GPL(unregister_acpi_hed_notifier);
53
54/*
55 * SCI to report hardware error is forwarded to the listeners of HED,
56 * it is used by HEST Generic Hardware Error Source with notify type
57 * SCI.
58 */
59static void acpi_hed_notify(struct acpi_device *device, u32 event)
60{
61 blocking_notifier_call_chain(&acpi_hed_notify_list, 0, NULL);
62}
63
64static int __devinit acpi_hed_add(struct acpi_device *device)
65{
66 /* Only one hardware error device */
67 if (hed_handle)
68 return -EINVAL;
69 hed_handle = device->handle;
70 return 0;
71}
72
73static int __devexit acpi_hed_remove(struct acpi_device *device, int type)
74{
75 hed_handle = NULL;
76 return 0;
77}
78
79static struct acpi_driver acpi_hed_driver = {
80 .name = "hardware_error_device",
81 .class = "hardware_error",
82 .ids = acpi_hed_ids,
83 .ops = {
84 .add = acpi_hed_add,
85 .remove = acpi_hed_remove,
86 .notify = acpi_hed_notify,
87 },
88};
89
90static int __init acpi_hed_init(void)
91{
92 if (acpi_disabled)
93 return -ENODEV;
94
95 if (acpi_bus_register_driver(&acpi_hed_driver) < 0)
96 return -ENODEV;
97
98 return 0;
99}
100
101static void __exit acpi_hed_exit(void)
102{
103 acpi_bus_unregister_driver(&acpi_hed_driver);
104}
105
106module_init(acpi_hed_init);
107module_exit(acpi_hed_exit);
108
109ACPI_MODULE_NAME("hed");
110MODULE_AUTHOR("Huang Ying");
111MODULE_DESCRIPTION("ACPI Hardware Error Device Driver");
112MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/hest.c b/drivers/acpi/hest.c
deleted file mode 100644
index 1c527a192872..000000000000
--- a/drivers/acpi/hest.c
+++ /dev/null
@@ -1,139 +0,0 @@
1#include <linux/acpi.h>
2#include <linux/pci.h>
3
4#define PREFIX "ACPI: "
5
6static inline unsigned long parse_acpi_hest_ia_machine_check(struct acpi_hest_ia_machine_check *p)
7{
8 return sizeof(*p) +
9 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
10}
11
12static inline unsigned long parse_acpi_hest_ia_corrected(struct acpi_hest_ia_corrected *p)
13{
14 return sizeof(*p) +
15 (sizeof(struct acpi_hest_ia_error_bank) * p->num_hardware_banks);
16}
17
18static inline unsigned long parse_acpi_hest_ia_nmi(struct acpi_hest_ia_nmi *p)
19{
20 return sizeof(*p);
21}
22
23static inline unsigned long parse_acpi_hest_generic(struct acpi_hest_generic *p)
24{
25 return sizeof(*p);
26}
27
28static inline unsigned int hest_match_pci(struct acpi_hest_aer_common *p, struct pci_dev *pci)
29{
30 return (0 == pci_domain_nr(pci->bus) &&
31 p->bus == pci->bus->number &&
32 p->device == PCI_SLOT(pci->devfn) &&
33 p->function == PCI_FUNC(pci->devfn));
34}
35
36static unsigned long parse_acpi_hest_aer(void *hdr, int type, struct pci_dev *pci, int *firmware_first)
37{
38 struct acpi_hest_aer_common *p = hdr + sizeof(struct acpi_hest_header);
39 unsigned long rc=0;
40 u8 pcie_type = 0;
41 u8 bridge = 0;
42 switch (type) {
43 case ACPI_HEST_TYPE_AER_ROOT_PORT:
44 rc = sizeof(struct acpi_hest_aer_root);
45 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
46 break;
47 case ACPI_HEST_TYPE_AER_ENDPOINT:
48 rc = sizeof(struct acpi_hest_aer);
49 pcie_type = PCI_EXP_TYPE_ENDPOINT;
50 break;
51 case ACPI_HEST_TYPE_AER_BRIDGE:
52 rc = sizeof(struct acpi_hest_aer_bridge);
53 if ((pci->class >> 16) == PCI_BASE_CLASS_BRIDGE)
54 bridge = 1;
55 break;
56 }
57
58 if (p->flags & ACPI_HEST_GLOBAL) {
59 if ((pci->is_pcie && (pci->pcie_type == pcie_type)) || bridge)
60 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
61 }
62 else
63 if (hest_match_pci(p, pci))
64 *firmware_first = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
65 return rc;
66}
67
68static int acpi_hest_firmware_first(struct acpi_table_header *stdheader, struct pci_dev *pci)
69{
70 struct acpi_table_hest *hest = (struct acpi_table_hest *)stdheader;
71 void *p = (void *)hest + sizeof(*hest); /* defined by the ACPI 4.0 spec */
72 struct acpi_hest_header *hdr = p;
73
74 int i;
75 int firmware_first = 0;
76 static unsigned char printed_unused = 0;
77 static unsigned char printed_reserved = 0;
78
79 for (i=0, hdr=p; p < (((void *)hest) + hest->header.length) && i < hest->error_source_count; i++) {
80 switch (hdr->type) {
81 case ACPI_HEST_TYPE_IA32_CHECK:
82 p += parse_acpi_hest_ia_machine_check(p);
83 break;
84 case ACPI_HEST_TYPE_IA32_CORRECTED_CHECK:
85 p += parse_acpi_hest_ia_corrected(p);
86 break;
87 case ACPI_HEST_TYPE_IA32_NMI:
88 p += parse_acpi_hest_ia_nmi(p);
89 break;
90 /* These three should never appear */
91 case ACPI_HEST_TYPE_NOT_USED3:
92 case ACPI_HEST_TYPE_NOT_USED4:
93 case ACPI_HEST_TYPE_NOT_USED5:
94 if (!printed_unused) {
95 printk(KERN_DEBUG PREFIX
96 "HEST Error Source list contains an obsolete type (%d).\n", hdr->type);
97 printed_unused = 1;
98 }
99 break;
100 case ACPI_HEST_TYPE_AER_ROOT_PORT:
101 case ACPI_HEST_TYPE_AER_ENDPOINT:
102 case ACPI_HEST_TYPE_AER_BRIDGE:
103 p += parse_acpi_hest_aer(p, hdr->type, pci, &firmware_first);
104 break;
105 case ACPI_HEST_TYPE_GENERIC_ERROR:
106 p += parse_acpi_hest_generic(p);
107 break;
108 /* These should never appear either */
109 case ACPI_HEST_TYPE_RESERVED:
110 default:
111 if (!printed_reserved) {
112 printk(KERN_DEBUG PREFIX
113 "HEST Error Source list contains a reserved type (%d).\n", hdr->type);
114 printed_reserved = 1;
115 }
116 break;
117 }
118 }
119 return firmware_first;
120}
121
122int acpi_hest_firmware_first_pci(struct pci_dev *pci)
123{
124 acpi_status status = AE_NOT_FOUND;
125 struct acpi_table_header *hest = NULL;
126
127 if (acpi_disabled)
128 return 0;
129
130 status = acpi_get_table(ACPI_SIG_HEST, 1, &hest);
131
132 if (ACPI_SUCCESS(status)) {
133 if (acpi_hest_firmware_first(hest, pci)) {
134 return 1;
135 }
136 }
137 return 0;
138}
139EXPORT_SYMBOL_GPL(acpi_hest_firmware_first_pci);
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index aefce33f2a09..4eac59393edc 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -120,7 +120,8 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
120 struct acpi_pci_root *root; 120 struct acpi_pci_root *root;
121 121
122 list_for_each_entry(root, &acpi_pci_roots, node) 122 list_for_each_entry(root, &acpi_pci_roots, node)
123 if ((root->segment == (u16) seg) && (root->bus_nr == (u16) bus)) 123 if ((root->segment == (u16) seg) &&
124 (root->secondary.start == (u16) bus))
124 return root->device->handle; 125 return root->device->handle;
125 return NULL; 126 return NULL;
126} 127}
@@ -154,7 +155,7 @@ EXPORT_SYMBOL_GPL(acpi_is_root_bridge);
154static acpi_status 155static acpi_status
155get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data) 156get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
156{ 157{
157 int *busnr = data; 158 struct resource *res = data;
158 struct acpi_resource_address64 address; 159 struct acpi_resource_address64 address;
159 160
160 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 && 161 if (resource->type != ACPI_RESOURCE_TYPE_ADDRESS16 &&
@@ -164,28 +165,27 @@ get_root_bridge_busnr_callback(struct acpi_resource *resource, void *data)
164 165
165 acpi_resource_to_address64(resource, &address); 166 acpi_resource_to_address64(resource, &address);
166 if ((address.address_length > 0) && 167 if ((address.address_length > 0) &&
167 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) 168 (address.resource_type == ACPI_BUS_NUMBER_RANGE)) {
168 *busnr = address.minimum; 169 res->start = address.minimum;
170 res->end = address.minimum + address.address_length - 1;
171 }
169 172
170 return AE_OK; 173 return AE_OK;
171} 174}
172 175
173static acpi_status try_get_root_bridge_busnr(acpi_handle handle, 176static acpi_status try_get_root_bridge_busnr(acpi_handle handle,
174 unsigned long long *bus) 177 struct resource *res)
175{ 178{
176 acpi_status status; 179 acpi_status status;
177 int busnum;
178 180
179 busnum = -1; 181 res->start = -1;
180 status = 182 status =
181 acpi_walk_resources(handle, METHOD_NAME__CRS, 183 acpi_walk_resources(handle, METHOD_NAME__CRS,
182 get_root_bridge_busnr_callback, &busnum); 184 get_root_bridge_busnr_callback, res);
183 if (ACPI_FAILURE(status)) 185 if (ACPI_FAILURE(status))
184 return status; 186 return status;
185 /* Check if we really get a bus number from _CRS */ 187 if (res->start == -1)
186 if (busnum == -1)
187 return AE_ERROR; 188 return AE_ERROR;
188 *bus = busnum;
189 return AE_OK; 189 return AE_OK;
190} 190}
191 191
@@ -429,34 +429,47 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
429 struct acpi_device *child; 429 struct acpi_device *child;
430 u32 flags, base_flags; 430 u32 flags, base_flags;
431 431
432 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
433 if (!root)
434 return -ENOMEM;
435
432 segment = 0; 436 segment = 0;
433 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL, 437 status = acpi_evaluate_integer(device->handle, METHOD_NAME__SEG, NULL,
434 &segment); 438 &segment);
435 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 439 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
436 printk(KERN_ERR PREFIX "can't evaluate _SEG\n"); 440 printk(KERN_ERR PREFIX "can't evaluate _SEG\n");
437 return -ENODEV; 441 result = -ENODEV;
442 goto end;
438 } 443 }
439 444
440 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */ 445 /* Check _CRS first, then _BBN. If no _BBN, default to zero. */
441 bus = 0; 446 root->secondary.flags = IORESOURCE_BUS;
442 status = try_get_root_bridge_busnr(device->handle, &bus); 447 status = try_get_root_bridge_busnr(device->handle, &root->secondary);
443 if (ACPI_FAILURE(status)) { 448 if (ACPI_FAILURE(status)) {
449 /*
450 * We need both the start and end of the downstream bus range
451 * to interpret _CBA (MMCONFIG base address), so it really is
452 * supposed to be in _CRS. If we don't find it there, all we
453 * can do is assume [_BBN-0xFF] or [0-0xFF].
454 */
455 root->secondary.end = 0xFF;
456 printk(KERN_WARNING FW_BUG PREFIX
457 "no secondary bus range in _CRS\n");
444 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus); 458 status = acpi_evaluate_integer(device->handle, METHOD_NAME__BBN, NULL, &bus);
445 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 459 if (ACPI_SUCCESS(status))
446 printk(KERN_ERR PREFIX 460 root->secondary.start = bus;
447 "no bus number in _CRS and can't evaluate _BBN\n"); 461 else if (status == AE_NOT_FOUND)
448 return -ENODEV; 462 root->secondary.start = 0;
463 else {
464 printk(KERN_ERR PREFIX "can't evaluate _BBN\n");
465 result = -ENODEV;
466 goto end;
449 } 467 }
450 } 468 }
451 469
452 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
453 if (!root)
454 return -ENOMEM;
455
456 INIT_LIST_HEAD(&root->node); 470 INIT_LIST_HEAD(&root->node);
457 root->device = device; 471 root->device = device;
458 root->segment = segment & 0xFFFF; 472 root->segment = segment & 0xFFFF;
459 root->bus_nr = bus & 0xFF;
460 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 473 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
461 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 474 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
462 device->driver_data = root; 475 device->driver_data = root;
@@ -475,9 +488,9 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
475 /* TBD: Locking */ 488 /* TBD: Locking */
476 list_add_tail(&root->node, &acpi_pci_roots); 489 list_add_tail(&root->node, &acpi_pci_roots);
477 490
478 printk(KERN_INFO PREFIX "%s [%s] (%04x:%02x)\n", 491 printk(KERN_INFO PREFIX "%s [%s] (domain %04x %pR)\n",
479 acpi_device_name(device), acpi_device_bid(device), 492 acpi_device_name(device), acpi_device_bid(device),
480 root->segment, root->bus_nr); 493 root->segment, &root->secondary);
481 494
482 /* 495 /*
483 * Scan the Root Bridge 496 * Scan the Root Bridge
@@ -486,11 +499,11 @@ static int __devinit acpi_pci_root_add(struct acpi_device *device)
486 * PCI namespace does not get created until this call is made (and 499 * PCI namespace does not get created until this call is made (and
487 * thus the root bridge's pci_dev does not exist). 500 * thus the root bridge's pci_dev does not exist).
488 */ 501 */
489 root->bus = pci_acpi_scan_root(device, segment, bus); 502 root->bus = pci_acpi_scan_root(root);
490 if (!root->bus) { 503 if (!root->bus) {
491 printk(KERN_ERR PREFIX 504 printk(KERN_ERR PREFIX
492 "Bus %04x:%02x not present in PCI namespace\n", 505 "Bus %04x:%02x not present in PCI namespace\n",
493 root->segment, root->bus_nr); 506 root->segment, (unsigned int)root->secondary.start);
494 result = -ENODEV; 507 result = -ENODEV;
495 goto end; 508 goto end;
496 } 509 }
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index c3817e1f32c7..13c6cb703f1d 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -727,19 +727,9 @@ static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
727 break; 727 break;
728 } 728 }
729 729
730 if (pr->power.states[i].promotion.state) 730 seq_puts(seq, "promotion[--] ");
731 seq_printf(seq, "promotion[C%zd] ", 731
732 (pr->power.states[i].promotion.state - 732 seq_puts(seq, "demotion[--] ");
733 pr->power.states));
734 else
735 seq_puts(seq, "promotion[--] ");
736
737 if (pr->power.states[i].demotion.state)
738 seq_printf(seq, "demotion[C%zd] ",
739 (pr->power.states[i].demotion.state -
740 pr->power.states));
741 else
742 seq_puts(seq, "demotion[--] ");
743 733
744 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 734 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
745 pr->power.states[i].latency, 735 pr->power.states[i].latency,
@@ -869,6 +859,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
869 struct acpi_processor *pr; 859 struct acpi_processor *pr;
870 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 860 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
871 ktime_t kt1, kt2; 861 ktime_t kt1, kt2;
862 s64 idle_time_ns;
872 s64 idle_time; 863 s64 idle_time;
873 s64 sleep_ticks = 0; 864 s64 sleep_ticks = 0;
874 865
@@ -910,12 +901,14 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
910 sched_clock_idle_sleep_event(); 901 sched_clock_idle_sleep_event();
911 acpi_idle_do_entry(cx); 902 acpi_idle_do_entry(cx);
912 kt2 = ktime_get_real(); 903 kt2 = ktime_get_real();
913 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 904 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1));
905 idle_time = idle_time_ns;
906 do_div(idle_time, NSEC_PER_USEC);
914 907
915 sleep_ticks = us_to_pm_timer_ticks(idle_time); 908 sleep_ticks = us_to_pm_timer_ticks(idle_time);
916 909
917 /* Tell the scheduler how much we idled: */ 910 /* Tell the scheduler how much we idled: */
918 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 911 sched_clock_idle_wakeup_event(idle_time_ns);
919 912
920 local_irq_enable(); 913 local_irq_enable();
921 current_thread_info()->status |= TS_POLLING; 914 current_thread_info()->status |= TS_POLLING;
@@ -943,6 +936,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
943 struct acpi_processor *pr; 936 struct acpi_processor *pr;
944 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 937 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
945 ktime_t kt1, kt2; 938 ktime_t kt1, kt2;
939 s64 idle_time_ns;
946 s64 idle_time; 940 s64 idle_time;
947 s64 sleep_ticks = 0; 941 s64 sleep_ticks = 0;
948 942
@@ -1025,11 +1019,13 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1025 spin_unlock(&c3_lock); 1019 spin_unlock(&c3_lock);
1026 } 1020 }
1027 kt2 = ktime_get_real(); 1021 kt2 = ktime_get_real();
1028 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 1022 idle_time_ns = ktime_to_us(ktime_sub(kt2, kt1));
1023 idle_time = idle_time_ns;
1024 do_div(idle_time, NSEC_PER_USEC);
1029 1025
1030 sleep_ticks = us_to_pm_timer_ticks(idle_time); 1026 sleep_ticks = us_to_pm_timer_ticks(idle_time);
1031 /* Tell the scheduler how much we idled: */ 1027 /* Tell the scheduler how much we idled: */
1032 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 1028 sched_clock_idle_wakeup_event(idle_time_ns);
1033 1029
1034 local_irq_enable(); 1030 local_irq_enable();
1035 current_thread_info()->status |= TS_POLLING; 1031 current_thread_info()->status |= TS_POLLING;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index baa76bbf244a..4ab2275b4461 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -80,22 +80,6 @@ static int acpi_sleep_prepare(u32 acpi_state)
80 80
81#ifdef CONFIG_ACPI_SLEEP 81#ifdef CONFIG_ACPI_SLEEP
82static u32 acpi_target_sleep_state = ACPI_STATE_S0; 82static u32 acpi_target_sleep_state = ACPI_STATE_S0;
83/*
84 * According to the ACPI specification the BIOS should make sure that ACPI is
85 * enabled and SCI_EN bit is set on wake-up from S1 - S3 sleep states. Still,
86 * some BIOSes don't do that and therefore we use acpi_enable() to enable ACPI
87 * on such systems during resume. Unfortunately that doesn't help in
88 * particularly pathological cases in which SCI_EN has to be set directly on
89 * resume, although the specification states very clearly that this flag is
90 * owned by the hardware. The set_sci_en_on_resume variable will be set in such
91 * cases.
92 */
93static bool set_sci_en_on_resume;
94
95void __init acpi_set_sci_en_on_resume(void)
96{
97 set_sci_en_on_resume = true;
98}
99 83
100/* 84/*
101 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the 85 * ACPI 1.0 wants us to execute _PTS before suspending devices, so we allow the
@@ -253,11 +237,8 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
253 break; 237 break;
254 } 238 }
255 239
256 /* If ACPI is not enabled by the BIOS, we need to enable it here. */ 240 /* This violates the spec but is required for bug compatibility. */
257 if (set_sci_en_on_resume) 241 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
258 acpi_write_bit_register(ACPI_BITREG_SCI_ENABLE, 1);
259 else
260 acpi_enable();
261 242
262 /* Reprogram control registers and execute _BFS */ 243 /* Reprogram control registers and execute _BFS */
263 acpi_leave_sleep_state_prep(acpi_state); 244 acpi_leave_sleep_state_prep(acpi_state);
@@ -346,12 +327,6 @@ static int __init init_old_suspend_ordering(const struct dmi_system_id *d)
346 return 0; 327 return 0;
347} 328}
348 329
349static int __init init_set_sci_en_on_resume(const struct dmi_system_id *d)
350{
351 set_sci_en_on_resume = true;
352 return 0;
353}
354
355static struct dmi_system_id __initdata acpisleep_dmi_table[] = { 330static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
356 { 331 {
357 .callback = init_old_suspend_ordering, 332 .callback = init_old_suspend_ordering,
@@ -370,22 +345,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
370 }, 345 },
371 }, 346 },
372 { 347 {
373 .callback = init_set_sci_en_on_resume,
374 .ident = "Apple MacBook 1,1",
375 .matches = {
376 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
377 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
378 },
379 },
380 {
381 .callback = init_set_sci_en_on_resume,
382 .ident = "Apple MacMini 1,1",
383 .matches = {
384 DMI_MATCH(DMI_SYS_VENDOR, "Apple Computer, Inc."),
385 DMI_MATCH(DMI_PRODUCT_NAME, "Macmini1,1"),
386 },
387 },
388 {
389 .callback = init_old_suspend_ordering, 348 .callback = init_old_suspend_ordering,
390 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)", 349 .ident = "Asus Pundit P1-AH2 (M2N8L motherboard)",
391 .matches = { 350 .matches = {
@@ -394,94 +353,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
394 }, 353 },
395 }, 354 },
396 { 355 {
397 .callback = init_set_sci_en_on_resume,
398 .ident = "Toshiba Satellite L300",
399 .matches = {
400 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
401 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite L300"),
402 },
403 },
404 {
405 .callback = init_set_sci_en_on_resume,
406 .ident = "Hewlett-Packard HP G7000 Notebook PC",
407 .matches = {
408 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
409 DMI_MATCH(DMI_PRODUCT_NAME, "HP G7000 Notebook PC"),
410 },
411 },
412 {
413 .callback = init_set_sci_en_on_resume,
414 .ident = "Hewlett-Packard HP Pavilion dv3 Notebook PC",
415 .matches = {
416 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
417 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv3 Notebook PC"),
418 },
419 },
420 {
421 .callback = init_set_sci_en_on_resume,
422 .ident = "Hewlett-Packard Pavilion dv4",
423 .matches = {
424 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
425 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4"),
426 },
427 },
428 {
429 .callback = init_set_sci_en_on_resume,
430 .ident = "Hewlett-Packard Pavilion dv7",
431 .matches = {
432 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
433 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv7"),
434 },
435 },
436 {
437 .callback = init_set_sci_en_on_resume,
438 .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
439 .matches = {
440 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
441 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
442 },
443 },
444 {
445 .callback = init_set_sci_en_on_resume,
446 .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
447 .matches = {
448 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
449 DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario CQ40 Notebook PC"),
450 },
451 },
452 {
453 .callback = init_set_sci_en_on_resume,
454 .ident = "Lenovo ThinkPad T410",
455 .matches = {
456 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T410"),
458 },
459 },
460 {
461 .callback = init_set_sci_en_on_resume,
462 .ident = "Lenovo ThinkPad T510",
463 .matches = {
464 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
465 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T510"),
466 },
467 },
468 {
469 .callback = init_set_sci_en_on_resume,
470 .ident = "Lenovo ThinkPad W510",
471 .matches = {
472 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
473 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W510"),
474 },
475 },
476 {
477 .callback = init_set_sci_en_on_resume,
478 .ident = "Lenovo ThinkPad X201[s]",
479 .matches = {
480 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
481 DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X201"),
482 },
483 },
484 {
485 .callback = init_old_suspend_ordering, 356 .callback = init_old_suspend_ordering,
486 .ident = "Panasonic CF51-2L", 357 .ident = "Panasonic CF51-2L",
487 .matches = { 358 .matches = {
@@ -490,30 +361,6 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
490 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"), 361 DMI_MATCH(DMI_BOARD_NAME, "CF51-2L"),
491 }, 362 },
492 }, 363 },
493 {
494 .callback = init_set_sci_en_on_resume,
495 .ident = "Dell Studio 1558",
496 .matches = {
497 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
498 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1558"),
499 },
500 },
501 {
502 .callback = init_set_sci_en_on_resume,
503 .ident = "Dell Studio 1557",
504 .matches = {
505 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
506 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1557"),
507 },
508 },
509 {
510 .callback = init_set_sci_en_on_resume,
511 .ident = "Dell Studio 1555",
512 .matches = {
513 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
514 DMI_MATCH(DMI_PRODUCT_NAME, "Studio 1555"),
515 },
516 },
517 {}, 364 {},
518}; 365};
519#endif /* CONFIG_SUSPEND */ 366#endif /* CONFIG_SUSPEND */
diff --git a/drivers/acpi/sleep.h b/drivers/acpi/sleep.h
index 8a8f3b3382a6..25b8bd149284 100644
--- a/drivers/acpi/sleep.h
+++ b/drivers/acpi/sleep.h
@@ -1,6 +1,6 @@
1 1
2extern u8 sleep_states[]; 2extern u8 sleep_states[];
3extern int acpi_suspend (u32 state); 3extern int acpi_suspend(u32 state);
4 4
5extern void acpi_enable_wakeup_device_prep(u8 sleep_state); 5extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
6extern void acpi_enable_wakeup_device(u8 sleep_state); 6extern void acpi_enable_wakeup_device(u8 sleep_state);
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c
index 8a0ed2800e63..f336bca7c450 100644
--- a/drivers/acpi/tables.c
+++ b/drivers/acpi/tables.c
@@ -213,7 +213,7 @@ acpi_table_parse_entries(char *id,
213 unsigned long table_end; 213 unsigned long table_end;
214 acpi_size tbl_size; 214 acpi_size tbl_size;
215 215
216 if (acpi_disabled && !acpi_ht) 216 if (acpi_disabled)
217 return -ENODEV; 217 return -ENODEV;
218 218
219 if (!handler) 219 if (!handler)
@@ -280,7 +280,7 @@ int __init acpi_table_parse(char *id, acpi_table_handler handler)
280 struct acpi_table_header *table = NULL; 280 struct acpi_table_header *table = NULL;
281 acpi_size tbl_size; 281 acpi_size tbl_size;
282 282
283 if (acpi_disabled && !acpi_ht) 283 if (acpi_disabled)
284 return -ENODEV; 284 return -ENODEV;
285 285
286 if (!handler) 286 if (!handler)
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index a0c93b321482..9865d46f49a8 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -45,6 +45,7 @@
45#include <acpi/acpi_bus.h> 45#include <acpi/acpi_bus.h>
46#include <acpi/acpi_drivers.h> 46#include <acpi/acpi_drivers.h>
47#include <linux/suspend.h> 47#include <linux/suspend.h>
48#include <acpi/video.h>
48 49
49#define PREFIX "ACPI: " 50#define PREFIX "ACPI: "
50 51
@@ -65,11 +66,6 @@
65 66
66#define MAX_NAME_LEN 20 67#define MAX_NAME_LEN 20
67 68
68#define ACPI_VIDEO_DISPLAY_CRT 1
69#define ACPI_VIDEO_DISPLAY_TV 2
70#define ACPI_VIDEO_DISPLAY_DVI 3
71#define ACPI_VIDEO_DISPLAY_LCD 4
72
73#define _COMPONENT ACPI_VIDEO_COMPONENT 69#define _COMPONENT ACPI_VIDEO_COMPONENT
74ACPI_MODULE_NAME("video"); 70ACPI_MODULE_NAME("video");
75 71
@@ -1007,11 +1003,11 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1007 result = acpi_video_init_brightness(device); 1003 result = acpi_video_init_brightness(device);
1008 if (result) 1004 if (result)
1009 return; 1005 return;
1010 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1006 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1011 if (!name) 1007 if (!name)
1012 return; 1008 return;
1009 count++;
1013 1010
1014 sprintf(name, "acpi_video%d", count++);
1015 memset(&props, 0, sizeof(struct backlight_properties)); 1011 memset(&props, 0, sizeof(struct backlight_properties));
1016 props.max_brightness = device->brightness->count - 3; 1012 props.max_brightness = device->brightness->count - 3;
1017 device->backlight = backlight_device_register(name, NULL, device, 1013 device->backlight = backlight_device_register(name, NULL, device,
@@ -1067,10 +1063,10 @@ static void acpi_video_device_find_cap(struct acpi_video_device *device)
1067 if (device->cap._DCS && device->cap._DSS) { 1063 if (device->cap._DCS && device->cap._DSS) {
1068 static int count; 1064 static int count;
1069 char *name; 1065 char *name;
1070 name = kzalloc(MAX_NAME_LEN, GFP_KERNEL); 1066 name = kasprintf(GFP_KERNEL, "acpi_video%d", count);
1071 if (!name) 1067 if (!name)
1072 return; 1068 return;
1073 sprintf(name, "acpi_video%d", count++); 1069 count++;
1074 device->output_dev = video_output_register(name, 1070 device->output_dev = video_output_register(name,
1075 NULL, device, &acpi_output_properties); 1071 NULL, device, &acpi_output_properties);
1076 kfree(name); 1072 kfree(name);
@@ -1748,11 +1744,27 @@ acpi_video_get_device_attr(struct acpi_video_bus *video, unsigned long device_id
1748} 1744}
1749 1745
1750static int 1746static int
1747acpi_video_get_device_type(struct acpi_video_bus *video,
1748 unsigned long device_id)
1749{
1750 struct acpi_video_enumerated_device *ids;
1751 int i;
1752
1753 for (i = 0; i < video->attached_count; i++) {
1754 ids = &video->attached_array[i];
1755 if ((ids->value.int_val & 0xffff) == device_id)
1756 return ids->value.int_val;
1757 }
1758
1759 return 0;
1760}
1761
1762static int
1751acpi_video_bus_get_one_device(struct acpi_device *device, 1763acpi_video_bus_get_one_device(struct acpi_device *device,
1752 struct acpi_video_bus *video) 1764 struct acpi_video_bus *video)
1753{ 1765{
1754 unsigned long long device_id; 1766 unsigned long long device_id;
1755 int status; 1767 int status, device_type;
1756 struct acpi_video_device *data; 1768 struct acpi_video_device *data;
1757 struct acpi_video_device_attrib* attribute; 1769 struct acpi_video_device_attrib* attribute;
1758 1770
@@ -1797,8 +1809,25 @@ acpi_video_bus_get_one_device(struct acpi_device *device,
1797 } 1809 }
1798 if(attribute->bios_can_detect) 1810 if(attribute->bios_can_detect)
1799 data->flags.bios = 1; 1811 data->flags.bios = 1;
1800 } else 1812 } else {
1801 data->flags.unknown = 1; 1813 /* Check for legacy IDs */
1814 device_type = acpi_video_get_device_type(video,
1815 device_id);
1816 /* Ignore bits 16 and 18-20 */
1817 switch (device_type & 0xffe2ffff) {
1818 case ACPI_VIDEO_DISPLAY_LEGACY_MONITOR:
1819 data->flags.crt = 1;
1820 break;
1821 case ACPI_VIDEO_DISPLAY_LEGACY_PANEL:
1822 data->flags.lcd = 1;
1823 break;
1824 case ACPI_VIDEO_DISPLAY_LEGACY_TV:
1825 data->flags.tvout = 1;
1826 break;
1827 default:
1828 data->flags.unknown = 1;
1829 }
1830 }
1802 1831
1803 acpi_video_device_bind(video, data); 1832 acpi_video_device_bind(video, data);
1804 acpi_video_device_find_cap(data); 1833 acpi_video_device_find_cap(data);
@@ -2032,6 +2061,71 @@ out:
2032 return result; 2061 return result;
2033} 2062}
2034 2063
2064int acpi_video_get_edid(struct acpi_device *device, int type, int device_id,
2065 void **edid)
2066{
2067 struct acpi_video_bus *video;
2068 struct acpi_video_device *video_device;
2069 union acpi_object *buffer = NULL;
2070 acpi_status status;
2071 int i, length;
2072
2073 if (!device || !acpi_driver_data(device))
2074 return -EINVAL;
2075
2076 video = acpi_driver_data(device);
2077
2078 for (i = 0; i < video->attached_count; i++) {
2079 video_device = video->attached_array[i].bind_info;
2080 length = 256;
2081
2082 if (!video_device)
2083 continue;
2084
2085 if (type) {
2086 switch (type) {
2087 case ACPI_VIDEO_DISPLAY_CRT:
2088 if (!video_device->flags.crt)
2089 continue;
2090 break;
2091 case ACPI_VIDEO_DISPLAY_TV:
2092 if (!video_device->flags.tvout)
2093 continue;
2094 break;
2095 case ACPI_VIDEO_DISPLAY_DVI:
2096 if (!video_device->flags.dvi)
2097 continue;
2098 break;
2099 case ACPI_VIDEO_DISPLAY_LCD:
2100 if (!video_device->flags.lcd)
2101 continue;
2102 break;
2103 }
2104 } else if (video_device->device_id != device_id) {
2105 continue;
2106 }
2107
2108 status = acpi_video_device_EDID(video_device, &buffer, length);
2109
2110 if (ACPI_FAILURE(status) || !buffer ||
2111 buffer->type != ACPI_TYPE_BUFFER) {
2112 length = 128;
2113 status = acpi_video_device_EDID(video_device, &buffer,
2114 length);
2115 if (ACPI_FAILURE(status) || !buffer ||
2116 buffer->type != ACPI_TYPE_BUFFER) {
2117 continue;
2118 }
2119 }
2120
2121 *edid = buffer->buffer.pointer;
2122 return length;
2123 }
2124
2125 return -ENODEV;
2126}
2127EXPORT_SYMBOL(acpi_video_get_edid);
2128
2035static int 2129static int
2036acpi_video_bus_get_devices(struct acpi_video_bus *video, 2130acpi_video_bus_get_devices(struct acpi_video_bus *video,
2037 struct acpi_device *device) 2131 struct acpi_device *device)
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index fc2f26b9b407..c5fef01b3c95 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -250,7 +250,7 @@ static int __init acpi_backlight(char *str)
250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR; 250 ACPI_VIDEO_BACKLIGHT_FORCE_VENDOR;
251 if (!strcmp("video", str)) 251 if (!strcmp("video", str))
252 acpi_video_support |= 252 acpi_video_support |=
253 ACPI_VIDEO_OUTPUT_SWITCHING_FORCE_VIDEO; 253 ACPI_VIDEO_BACKLIGHT_FORCE_VIDEO;
254 } 254 }
255 return 1; 255 return 1;
256} 256}
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 7aaae2d2bd67..80c11d131499 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -130,4 +130,21 @@ static inline int aer_osc_setup(struct pcie_device *pciedev)
130} 130}
131#endif 131#endif
132 132
133#ifdef CONFIG_ACPI_APEI
134extern int pcie_aer_get_firmware_first(struct pci_dev *pci_dev);
135#else
136static inline int pcie_aer_get_firmware_first(struct pci_dev *pci_dev)
137{
138 if (pci_dev->__aer_firmware_first_valid)
139 return pci_dev->__aer_firmware_first;
140 return 0;
141}
142#endif
143
144static inline void pcie_aer_force_firmware_first(struct pci_dev *pci_dev,
145 int enable)
146{
147 pci_dev->__aer_firmware_first = !!enable;
148 pci_dev->__aer_firmware_first_valid = 1;
149}
133#endif /* _AERDRV_H_ */ 150#endif /* _AERDRV_H_ */
diff --git a/drivers/pci/pcie/aer/aerdrv_acpi.c b/drivers/pci/pcie/aer/aerdrv_acpi.c
index 04814087658d..f278d7b0d95d 100644
--- a/drivers/pci/pcie/aer/aerdrv_acpi.c
+++ b/drivers/pci/pcie/aer/aerdrv_acpi.c
@@ -16,6 +16,7 @@
16#include <linux/acpi.h> 16#include <linux/acpi.h>
17#include <linux/pci-acpi.h> 17#include <linux/pci-acpi.h>
18#include <linux/delay.h> 18#include <linux/delay.h>
19#include <acpi/apei.h>
19#include "aerdrv.h" 20#include "aerdrv.h"
20 21
21/** 22/**
@@ -53,3 +54,79 @@ int aer_osc_setup(struct pcie_device *pciedev)
53 54
54 return 0; 55 return 0;
55} 56}
57
58#ifdef CONFIG_ACPI_APEI
59static inline int hest_match_pci(struct acpi_hest_aer_common *p,
60 struct pci_dev *pci)
61{
62 return (0 == pci_domain_nr(pci->bus) &&
63 p->bus == pci->bus->number &&
64 p->device == PCI_SLOT(pci->devfn) &&
65 p->function == PCI_FUNC(pci->devfn));
66}
67
68struct aer_hest_parse_info {
69 struct pci_dev *pci_dev;
70 int firmware_first;
71};
72
73static int aer_hest_parse(struct acpi_hest_header *hest_hdr, void *data)
74{
75 struct aer_hest_parse_info *info = data;
76 struct acpi_hest_aer_common *p;
77 u8 pcie_type = 0;
78 u8 bridge = 0;
79 int ff = 0;
80
81 switch (hest_hdr->type) {
82 case ACPI_HEST_TYPE_AER_ROOT_PORT:
83 pcie_type = PCI_EXP_TYPE_ROOT_PORT;
84 break;
85 case ACPI_HEST_TYPE_AER_ENDPOINT:
86 pcie_type = PCI_EXP_TYPE_ENDPOINT;
87 break;
88 case ACPI_HEST_TYPE_AER_BRIDGE:
89 if ((info->pci_dev->class >> 16) == PCI_BASE_CLASS_BRIDGE)
90 bridge = 1;
91 break;
92 default:
93 return 0;
94 }
95
96 p = (struct acpi_hest_aer_common *)(hest_hdr + 1);
97 if (p->flags & ACPI_HEST_GLOBAL) {
98 if ((info->pci_dev->is_pcie &&
99 info->pci_dev->pcie_type == pcie_type) || bridge)
100 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
101 } else
102 if (hest_match_pci(p, info->pci_dev))
103 ff = !!(p->flags & ACPI_HEST_FIRMWARE_FIRST);
104 info->firmware_first = ff;
105
106 return 0;
107}
108
109static void aer_set_firmware_first(struct pci_dev *pci_dev)
110{
111 int rc;
112 struct aer_hest_parse_info info = {
113 .pci_dev = pci_dev,
114 .firmware_first = 0,
115 };
116
117 rc = apei_hest_parse(aer_hest_parse, &info);
118
119 if (rc)
120 pci_dev->__aer_firmware_first = 0;
121 else
122 pci_dev->__aer_firmware_first = info.firmware_first;
123 pci_dev->__aer_firmware_first_valid = 1;
124}
125
126int pcie_aer_get_firmware_first(struct pci_dev *dev)
127{
128 if (!dev->__aer_firmware_first_valid)
129 aer_set_firmware_first(dev);
130 return dev->__aer_firmware_first;
131}
132#endif
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index df2d686fe3dd..8af4f619bba2 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -36,7 +36,7 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev)
36 u16 reg16 = 0; 36 u16 reg16 = 0;
37 int pos; 37 int pos;
38 38
39 if (dev->aer_firmware_first) 39 if (pcie_aer_get_firmware_first(dev))
40 return -EIO; 40 return -EIO;
41 41
42 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); 42 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
@@ -63,7 +63,7 @@ int pci_disable_pcie_error_reporting(struct pci_dev *dev)
63 u16 reg16 = 0; 63 u16 reg16 = 0;
64 int pos; 64 int pos;
65 65
66 if (dev->aer_firmware_first) 66 if (pcie_aer_get_firmware_first(dev))
67 return -EIO; 67 return -EIO;
68 68
69 pos = pci_pcie_cap(dev); 69 pos = pci_pcie_cap(dev);
@@ -771,7 +771,7 @@ void aer_isr(struct work_struct *work)
771 */ 771 */
772int aer_init(struct pcie_device *dev) 772int aer_init(struct pcie_device *dev)
773{ 773{
774 if (dev->port->aer_firmware_first) { 774 if (pcie_aer_get_firmware_first(dev->port)) {
775 dev_printk(KERN_DEBUG, &dev->device, 775 dev_printk(KERN_DEBUG, &dev->device,
776 "PCIe errors handled by platform firmware.\n"); 776 "PCIe errors handled by platform firmware.\n");
777 goto out; 777 goto out;
@@ -785,7 +785,7 @@ out:
785 if (forceload) { 785 if (forceload) {
786 dev_printk(KERN_DEBUG, &dev->device, 786 dev_printk(KERN_DEBUG, &dev->device,
787 "aerdrv forceload requested.\n"); 787 "aerdrv forceload requested.\n");
788 dev->port->aer_firmware_first = 0; 788 pcie_aer_force_firmware_first(dev->port, 0);
789 return 0; 789 return 0;
790 } 790 }
791 return -ENXIO; 791 return -ENXIO;
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index c82548afcd5c..f4adba2d1dd3 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -10,7 +10,6 @@
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/cpumask.h> 11#include <linux/cpumask.h>
12#include <linux/pci-aspm.h> 12#include <linux/pci-aspm.h>
13#include <acpi/acpi_hest.h>
14#include "pci.h" 13#include "pci.h"
15 14
16#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */ 15#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
@@ -904,12 +903,6 @@ void set_pcie_hotplug_bridge(struct pci_dev *pdev)
904 pdev->is_hotplug_bridge = 1; 903 pdev->is_hotplug_bridge = 1;
905} 904}
906 905
907static void set_pci_aer_firmware_first(struct pci_dev *pdev)
908{
909 if (acpi_hest_firmware_first_pci(pdev))
910 pdev->aer_firmware_first = 1;
911}
912
913#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) 906#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
914 907
915/** 908/**
@@ -939,7 +932,6 @@ int pci_setup_device(struct pci_dev *dev)
939 dev->multifunction = !!(hdr_type & 0x80); 932 dev->multifunction = !!(hdr_type & 0x80);
940 dev->error_state = pci_channel_io_normal; 933 dev->error_state = pci_channel_io_normal;
941 set_pcie_port_type(dev); 934 set_pcie_port_type(dev);
942 set_pci_aer_firmware_first(dev);
943 935
944 list_for_each_entry(slot, &dev->bus->slots, list) 936 list_for_each_entry(slot, &dev->bus->slots, list)
945 if (PCI_SLOT(dev->devfn) == slot->number) 937 if (PCI_SLOT(dev->devfn) == slot->number)