aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/atomicio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/atomicio.c')
-rw-r--r--drivers/acpi/atomicio.c422
1 files changed, 0 insertions, 422 deletions
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
deleted file mode 100644
index d4a5b3d3657b..000000000000
--- a/drivers/acpi/atomicio.c
+++ /dev/null
@@ -1,422 +0,0 @@
1/*
2 * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
3 * accessing in atomic context.
4 *
5 * This is used for NMI handler to access IO memory area, because
6 * ioremap/iounmap can not be used in NMI handler. The IO memory area
7 * is pre-mapped in process context and accessed in NMI handler.
8 *
9 * Copyright (C) 2009-2010, Intel Corp.
10 * Author: Huang Ying <ying.huang@intel.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version
14 * 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25
26#include <linux/kernel.h>
27#include <linux/export.h>
28#include <linux/init.h>
29#include <linux/acpi.h>
30#include <linux/io.h>
31#include <linux/kref.h>
32#include <linux/rculist.h>
33#include <linux/interrupt.h>
34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/highmem.h>
37#include <acpi/atomicio.h>
38
39#define ACPI_PFX "ACPI: "
40
41static LIST_HEAD(acpi_iomaps);
42/*
43 * Used for mutual exclusion between writers of acpi_iomaps list, for
44 * synchronization between readers and writer, RCU is used.
45 */
46static DEFINE_SPINLOCK(acpi_iomaps_lock);
47
48struct acpi_iomap {
49 struct list_head list;
50 void __iomem *vaddr;
51 unsigned long size;
52 phys_addr_t paddr;
53 struct kref ref;
54};
55
56/* acpi_iomaps_lock or RCU read lock must be held before calling */
57static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
58 unsigned long size)
59{
60 struct acpi_iomap *map;
61
62 list_for_each_entry_rcu(map, &acpi_iomaps, list) {
63 if (map->paddr + map->size >= paddr + size &&
64 map->paddr <= paddr)
65 return map;
66 }
67 return NULL;
68}
69
70/*
71 * Atomic "ioremap" used by NMI handler, if the specified IO memory
72 * area is not pre-mapped, NULL will be returned.
73 *
74 * acpi_iomaps_lock or RCU read lock must be held before calling
75 */
76static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
77 unsigned long size)
78{
79 struct acpi_iomap *map;
80
81 map = __acpi_find_iomap(paddr, size/8);
82 if (map)
83 return map->vaddr + (paddr - map->paddr);
84 else
85 return NULL;
86}
87
88/* acpi_iomaps_lock must be held before calling */
89static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
90 unsigned long size)
91{
92 struct acpi_iomap *map;
93
94 map = __acpi_find_iomap(paddr, size);
95 if (map) {
96 kref_get(&map->ref);
97 return map->vaddr + (paddr - map->paddr);
98 } else
99 return NULL;
100}
101
102#ifndef CONFIG_IA64
103#define should_use_kmap(pfn) page_is_ram(pfn)
104#else
105/* ioremap will take care of cache attributes */
106#define should_use_kmap(pfn) 0
107#endif
108
109static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
110{
111 unsigned long pfn;
112
113 pfn = pg_off >> PAGE_SHIFT;
114 if (should_use_kmap(pfn)) {
115 if (pg_sz > PAGE_SIZE)
116 return NULL;
117 return (void __iomem __force *)kmap(pfn_to_page(pfn));
118 } else
119 return ioremap(pg_off, pg_sz);
120}
121
122static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
123{
124 unsigned long pfn;
125
126 pfn = pg_off >> PAGE_SHIFT;
127 if (page_is_ram(pfn))
128 kunmap(pfn_to_page(pfn));
129 else
130 iounmap(vaddr);
131}
132
133/*
134 * Used to pre-map the specified IO memory area. First try to find
135 * whether the area is already pre-mapped, if it is, increase the
136 * reference count (in __acpi_try_ioremap) and return; otherwise, do
137 * the real ioremap, and add the mapping into acpi_iomaps list.
138 */
139static void __iomem *acpi_pre_map(phys_addr_t paddr,
140 unsigned long size)
141{
142 void __iomem *vaddr;
143 struct acpi_iomap *map;
144 unsigned long pg_sz, flags;
145 phys_addr_t pg_off;
146
147 spin_lock_irqsave(&acpi_iomaps_lock, flags);
148 vaddr = __acpi_try_ioremap(paddr, size);
149 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
150 if (vaddr)
151 return vaddr;
152
153 pg_off = paddr & PAGE_MASK;
154 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
155 vaddr = acpi_map(pg_off, pg_sz);
156 if (!vaddr)
157 return NULL;
158 map = kmalloc(sizeof(*map), GFP_KERNEL);
159 if (!map)
160 goto err_unmap;
161 INIT_LIST_HEAD(&map->list);
162 map->paddr = pg_off;
163 map->size = pg_sz;
164 map->vaddr = vaddr;
165 kref_init(&map->ref);
166
167 spin_lock_irqsave(&acpi_iomaps_lock, flags);
168 vaddr = __acpi_try_ioremap(paddr, size);
169 if (vaddr) {
170 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
171 acpi_unmap(pg_off, map->vaddr);
172 kfree(map);
173 return vaddr;
174 }
175 list_add_tail_rcu(&map->list, &acpi_iomaps);
176 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
177
178 return map->vaddr + (paddr - map->paddr);
179err_unmap:
180 acpi_unmap(pg_off, vaddr);
181 return NULL;
182}
183
184/* acpi_iomaps_lock must be held before calling */
185static void __acpi_kref_del_iomap(struct kref *ref)
186{
187 struct acpi_iomap *map;
188
189 map = container_of(ref, struct acpi_iomap, ref);
190 list_del_rcu(&map->list);
191}
192
193/*
194 * Used to post-unmap the specified IO memory area. The iounmap is
195 * done only if the reference count goes zero.
196 */
197static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
198{
199 struct acpi_iomap *map;
200 unsigned long flags;
201 int del;
202
203 spin_lock_irqsave(&acpi_iomaps_lock, flags);
204 map = __acpi_find_iomap(paddr, size);
205 BUG_ON(!map);
206 del = kref_put(&map->ref, __acpi_kref_del_iomap);
207 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
208
209 if (!del)
210 return;
211
212 synchronize_rcu();
213 acpi_unmap(map->paddr, map->vaddr);
214 kfree(map);
215}
216
217/* In NMI handler, should set silent = 1 */
218static int acpi_check_gar(struct acpi_generic_address *reg,
219 u64 *paddr, int silent)
220{
221 u32 width, space_id;
222
223 width = reg->bit_width;
224 space_id = reg->space_id;
225 /* Handle possible alignment issues */
226 memcpy(paddr, &reg->address, sizeof(*paddr));
227 if (!*paddr) {
228 if (!silent)
229 pr_warning(FW_BUG ACPI_PFX
230 "Invalid physical address in GAR [0x%llx/%u/%u]\n",
231 *paddr, width, space_id);
232 return -EINVAL;
233 }
234
235 if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
236 if (!silent)
237 pr_warning(FW_BUG ACPI_PFX
238 "Invalid bit width in GAR [0x%llx/%u/%u]\n",
239 *paddr, width, space_id);
240 return -EINVAL;
241 }
242
243 if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
244 space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
245 if (!silent)
246 pr_warning(FW_BUG ACPI_PFX
247 "Invalid address space type in GAR [0x%llx/%u/%u]\n",
248 *paddr, width, space_id);
249 return -EINVAL;
250 }
251
252 return 0;
253}
254
255/* Pre-map, working on GAR */
256int acpi_pre_map_gar(struct acpi_generic_address *reg)
257{
258 u64 paddr;
259 void __iomem *vaddr;
260 int rc;
261
262 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
263 return 0;
264
265 rc = acpi_check_gar(reg, &paddr, 0);
266 if (rc)
267 return rc;
268
269 vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
270 if (!vaddr)
271 return -EIO;
272
273 return 0;
274}
275EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
276
277/* Post-unmap, working on GAR */
278int acpi_post_unmap_gar(struct acpi_generic_address *reg)
279{
280 u64 paddr;
281 int rc;
282
283 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
284 return 0;
285
286 rc = acpi_check_gar(reg, &paddr, 0);
287 if (rc)
288 return rc;
289
290 acpi_post_unmap(paddr, reg->bit_width / 8);
291
292 return 0;
293}
294EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
295
296#ifdef readq
297static inline u64 read64(const volatile void __iomem *addr)
298{
299 return readq(addr);
300}
301#else
302static inline u64 read64(const volatile void __iomem *addr)
303{
304 u64 l, h;
305 l = readl(addr);
306 h = readl(addr+4);
307 return l | (h << 32);
308}
309#endif
310
311/*
312 * Can be used in atomic (including NMI) or process context. RCU read
313 * lock can only be released after the IO memory area accessing.
314 */
315static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
316{
317 void __iomem *addr;
318
319 rcu_read_lock();
320 addr = __acpi_ioremap_fast(paddr, width);
321 switch (width) {
322 case 8:
323 *val = readb(addr);
324 break;
325 case 16:
326 *val = readw(addr);
327 break;
328 case 32:
329 *val = readl(addr);
330 break;
331 case 64:
332 *val = read64(addr);
333 break;
334 default:
335 return -EINVAL;
336 }
337 rcu_read_unlock();
338
339 return 0;
340}
341
342#ifdef writeq
343static inline void write64(u64 val, volatile void __iomem *addr)
344{
345 writeq(val, addr);
346}
347#else
348static inline void write64(u64 val, volatile void __iomem *addr)
349{
350 writel(val, addr);
351 writel(val>>32, addr+4);
352}
353#endif
354
355static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
356{
357 void __iomem *addr;
358
359 rcu_read_lock();
360 addr = __acpi_ioremap_fast(paddr, width);
361 switch (width) {
362 case 8:
363 writeb(val, addr);
364 break;
365 case 16:
366 writew(val, addr);
367 break;
368 case 32:
369 writel(val, addr);
370 break;
371 case 64:
372 write64(val, addr);
373 break;
374 default:
375 return -EINVAL;
376 }
377 rcu_read_unlock();
378
379 return 0;
380}
381
382/* GAR accessing in atomic (including NMI) or process context */
383int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
384{
385 u64 paddr;
386 int rc;
387
388 rc = acpi_check_gar(reg, &paddr, 1);
389 if (rc)
390 return rc;
391
392 *val = 0;
393 switch (reg->space_id) {
394 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
395 return acpi_atomic_read_mem(paddr, val, reg->bit_width);
396 case ACPI_ADR_SPACE_SYSTEM_IO:
397 return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
398 default:
399 return -EINVAL;
400 }
401}
402EXPORT_SYMBOL_GPL(acpi_atomic_read);
403
404int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
405{
406 u64 paddr;
407 int rc;
408
409 rc = acpi_check_gar(reg, &paddr, 1);
410 if (rc)
411 return rc;
412
413 switch (reg->space_id) {
414 case ACPI_ADR_SPACE_SYSTEM_MEMORY:
415 return acpi_atomic_write_mem(paddr, val, reg->bit_width);
416 case ACPI_ADR_SPACE_SYSTEM_IO:
417 return acpi_os_write_port(paddr, val, reg->bit_width);
418 default:
419 return -EINVAL;
420 }
421}
422EXPORT_SYMBOL_GPL(acpi_atomic_write);