aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
authorHuang Ying <ying.huang@intel.com>2010-05-18 02:35:11 -0400
committerLen Brown <len.brown@intel.com>2010-05-19 11:40:03 -0400
commit15651291a2f8c11e7e6a42d8bfde7a213ff13262 (patch)
tree0bbb96e22231e2b4da4ce9b264dfce8c54a52efc /drivers/acpi
parente40152ee1e1c7a63f4777791863215e3faa37a86 (diff)
ACPI, IO memory pre-mapping and atomic accessing
Some ACPI IO accessing need to be done in atomic context. For example, APEI ERST operations may be used for permanent storage in hardware error handler. That is, it may be called in atomic contexts such as IRQ or NMI, etc. And, ERST/EINJ implement their operations via IO memory/port accessing. But the IO memory accessing method provided by ACPI (acpi_read/acpi_write) maps the IO memory during it is accessed, so it can not be used in atomic context. To solve the issue, the IO memory should be pre-mapped during EINJ/ERST initializing. A linked list is used to record which memory area has been mapped, when memory is accessed in hardware error handler, search the linked list for the mapped virtual address from the given physical address. Signed-off-by: Huang Ying <ying.huang@intel.com> Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/atomicio.c360
2 files changed, 361 insertions, 0 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index a8d8998dd5c5..93251fb599fa 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -19,6 +19,7 @@ obj-y += acpi.o \
19 19
20# All the builtin files are in the "acpi." module_param namespace. 20# All the builtin files are in the "acpi." module_param namespace.
21acpi-y += osl.o utils.o reboot.o 21acpi-y += osl.o utils.o reboot.o
22acpi-y += atomicio.o
22acpi-y += hest.o 23acpi-y += hest.o
23 24
24# sleep related files 25# sleep related files
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
new file mode 100644
index 000000000000..814b19249616
--- /dev/null
+++ b/drivers/acpi/atomicio.c
@@ -0,0 +1,360 @@
1/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <acpi/atomicio.h>
35
36#define ACPI_PFX "ACPI: "
37
38static LIST_HEAD(acpi_iomaps);
39/*
40 * Used for mutual exclusion between writers of acpi_iomaps list, for
41 * synchronization between readers and writer, RCU is used.
42 */
43static DEFINE_SPINLOCK(acpi_iomaps_lock);
44
45struct acpi_iomap {
46 struct list_head list;
47 void __iomem *vaddr;
48 unsigned long size;
49 phys_addr_t paddr;
50 struct kref ref;
51};
52
53/* acpi_iomaps_lock or RCU read lock must be held before calling */
54static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
55 unsigned long size)
56{
57 struct acpi_iomap *map;
58
59 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
60 if (map->paddr + map->size >= paddr + size &&
61 map->paddr <= paddr)
62 return map;
63 }
64 return NULL;
65}
66
67/*
68 * Atomic "ioremap" used by NMI handler, if the specified IO memory
69 * area is not pre-mapped, NULL will be returned.
70 *
71 * acpi_iomaps_lock or RCU read lock must be held before calling
72 */
73static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
74 unsigned long size)
75{
76 struct acpi_iomap *map;
77
78 map = __acpi_find_iomap(paddr, size);
79 if (map)
80 return map->vaddr + (paddr - map->paddr);
81 else
82 return NULL;
83}
84
85/* acpi_iomaps_lock must be held before calling */
86static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
87 unsigned long size)
88{
89 struct acpi_iomap *map;
90
91 map = __acpi_find_iomap(paddr, size);
92 if (map) {
93 kref_get(&map->ref);
94 return map->vaddr + (paddr - map->paddr);
95 } else
96 return NULL;
97}
98
99/*
100 * Used to pre-map the specified IO memory area. First try to find
101 * whether the area is already pre-mapped, if it is, increase the
102 * reference count (in __acpi_try_ioremap) and return; otherwise, do
103 * the real ioremap, and add the mapping into acpi_iomaps list.
104 */
105static void __iomem *acpi_pre_map(phys_addr_t paddr,
106 unsigned long size)
107{
108 void __iomem *vaddr;
109 struct acpi_iomap *map;
110 unsigned long pg_sz, flags;
111 phys_addr_t pg_off;
112
113 spin_lock_irqsave(&acpi_iomaps_lock, flags);
114 vaddr = __acpi_try_ioremap(paddr, size);
115 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
116 if (vaddr)
117 return vaddr;
118
119 pg_off = paddr & PAGE_MASK;
120 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
121 vaddr = ioremap(pg_off, pg_sz);
122 if (!vaddr)
123 return NULL;
124 map = kmalloc(sizeof(*map), GFP_KERNEL);
125 if (!map)
126 goto err_unmap;
127 INIT_LIST_HEAD(&map->list);
128 map->paddr = pg_off;
129 map->size = pg_sz;
130 map->vaddr = vaddr;
131 kref_init(&map->ref);
132
133 spin_lock_irqsave(&acpi_iomaps_lock, flags);
134 vaddr = __acpi_try_ioremap(paddr, size);
135 if (vaddr) {
136 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
137 iounmap(map->vaddr);
138 kfree(map);
139 return vaddr;
140 }
141 list_add_tail_rcu(&map->list, &acpi_iomaps);
142 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
143
144 return vaddr + (paddr - pg_off);
145err_unmap:
146 iounmap(vaddr);
147 return NULL;
148}
149
150/* acpi_iomaps_lock must be held before calling */
151static void __acpi_kref_del_iomap(struct kref *ref)
152{
153 struct acpi_iomap *map;
154
155 map = container_of(ref, struct acpi_iomap, ref);
156 list_del_rcu(&map->list);
157}
158
159/*
160 * Used to post-unmap the specified IO memory area. The iounmap is
161 * done only if the reference count goes zero.
162 */
163static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
164{
165 struct acpi_iomap *map;
166 unsigned long flags;
167 int del;
168
169 spin_lock_irqsave(&acpi_iomaps_lock, flags);
170 map = __acpi_find_iomap(paddr, size);
171 BUG_ON(!map);
172 del = kref_put(&map->ref, __acpi_kref_del_iomap);
173 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
174
175 if (!del)
176 return;
177
178 synchronize_rcu();
179 iounmap(map->vaddr);
180 kfree(map);
181}
182
183/* In NMI handler, should set silent = 1 */
184static int acpi_check_gar(struct acpi_generic_address *reg,
185 u64 *paddr, int silent)
186{
187 u32 width, space_id;
188
189 width = reg->bit_width;
190 space_id = reg->space_id;
191 /* Handle possible alignment issues */
192 memcpy(paddr, &reg->address, sizeof(*paddr));
193 if (!*paddr) {
194 if (!silent)
195 pr_warning(FW_BUG ACPI_PFX
196 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
197 *paddr, width, space_id);
198 return -EINVAL;
199 }
200
201 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
202 if (!silent)
203 pr_warning(FW_BUG ACPI_PFX
204 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
205 *paddr, width, space_id);
206 return -EINVAL;
207 }
208
209 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
210 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
211 if (!silent)
212 pr_warning(FW_BUG ACPI_PFX
213 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
214 *paddr, width, space_id);
215 return -EINVAL;
216 }
217
218 return 0;
219}
220
221/* Pre-map, working on GAR */
222int acpi_pre_map_gar(struct acpi_generic_address *reg)
223{
224 u64 paddr;
225 void __iomem *vaddr;
226 int rc;
227
228 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
229 return 0;
230
231 rc = acpi_check_gar(reg, &paddr, 0);
232 if (rc)
233 return rc;
234
235 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
236 if (!vaddr)
237 return -EIO;
238
239 return 0;
240}
241EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
242
243/* Post-unmap, working on GAR */
244int acpi_post_unmap_gar(struct acpi_generic_address *reg)
245{
246 u64 paddr;
247 int rc;
248
249 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
250 return 0;
251
252 rc = acpi_check_gar(reg, &paddr, 0);
253 if (rc)
254 return rc;
255
256 acpi_post_unmap(paddr, reg->bit_width / 8);
257
258 return 0;
259}
260EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
261
262/*
263 * Can be used in atomic (including NMI) or process context. RCU read
264 * lock can only be released after the IO memory area accessing.
265 */
266static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
267{
268 void __iomem *addr;
269
270 rcu_read_lock();
271 addr = __acpi_ioremap_fast(paddr, width);
272 switch (width) {
273 case 8:
274 *val = readb(addr);
275 break;
276 case 16:
277 *val = readw(addr);
278 break;
279 case 32:
280 *val = readl(addr);
281 break;
282 case 64:
283 *val = readq(addr);
284 break;
285 default:
286 return -EINVAL;
287 }
288 rcu_read_unlock();
289
290 return 0;
291}
292
293static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
294{
295 void __iomem *addr;
296
297 rcu_read_lock();
298 addr = __acpi_ioremap_fast(paddr, width);
299 switch (width) {
300 case 8:
301 writeb(val, addr);
302 break;
303 case 16:
304 writew(val, addr);
305 break;
306 case 32:
307 writel(val, addr);
308 break;
309 case 64:
310 writeq(val, addr);
311 break;
312 default:
313 return -EINVAL;
314 }
315 rcu_read_unlock();
316
317 return 0;
318}
319
320/* GAR accessing in atomic (including NMI) or process context */
321int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
322{
323 u64 paddr;
324 int rc;
325
326 rc = acpi_check_gar(reg, &paddr, 1);
327 if (rc)
328 return rc;
329
330 *val = 0;
331 switch (reg->space_id) {
332 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
333 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
334 case ACPI_ADR_SPACE_SYSTEM_IO:
335 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
336 default:
337 return -EINVAL;
338 }
339}
340EXPORT_SYMBOL_GPL(acpi_atomic_read);
341
342int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
343{
344 u64 paddr;
345 int rc;
346
347 rc = acpi_check_gar(reg, &paddr, 1);
348 if (rc)
349 return rc;
350
351 switch (reg->space_id) {
352 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
353 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
354 case ACPI_ADR_SPACE_SYSTEM_IO:
355 return acpi_os_write_port(paddr, val, reg->bit_width);
356 default:
357 return -EINVAL;
358 }
359}
360EXPORT_SYMBOL_GPL(acpi_atomic_write);