aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/atomicio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi/atomicio.c')
-rw-r--r--drivers/acpi/atomicio.c77
1 files changed, 67 insertions, 10 deletions
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index cfc0cc10af39..d4a5b3d3657b 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/highmem.h>
35#include <acpi/atomicio.h> 37#include <acpi/atomicio.h>
36 38
37#define ACPI_PFX "ACPI: " 39#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
97 return NULL; 99 return NULL;
98} 100}
99 101
102#ifndef CONFIG_IA64
103#define should_use_kmap(pfn) page_is_ram(pfn)
104#else
105/* ioremap will take care of cache attributes */
106#define should_use_kmap(pfn) 0
107#endif
108
109static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
110{
111 unsigned long pfn;
112
113 pfn = pg_off >> PAGE_SHIFT;
114 if (should_use_kmap(pfn)) {
115 if (pg_sz > PAGE_SIZE)
116 return NULL;
117 return (void __iomem __force *)kmap(pfn_to_page(pfn));
118 } else
119 return ioremap(pg_off, pg_sz);
120}
121
122static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
123{
124 unsigned long pfn;
125
126 pfn = pg_off >> PAGE_SHIFT;
127 if (page_is_ram(pfn))
128 kunmap(pfn_to_page(pfn));
129 else
130 iounmap(vaddr);
131}
132
100/* 133/*
101 * Used to pre-map the specified IO memory area. First try to find 134 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the 135 * whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
119 152
120 pg_off = paddr & PAGE_MASK; 153 pg_off = paddr & PAGE_MASK;
121 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; 154 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122 vaddr = ioremap(pg_off, pg_sz); 155 vaddr = acpi_map(pg_off, pg_sz);
123 if (!vaddr) 156 if (!vaddr)
124 return NULL; 157 return NULL;
125 map = kmalloc(sizeof(*map), GFP_KERNEL); 158 map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
135 vaddr = __acpi_try_ioremap(paddr, size); 168 vaddr = __acpi_try_ioremap(paddr, size);
136 if (vaddr) { 169 if (vaddr) {
137 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 170 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138 iounmap(map->vaddr); 171 acpi_unmap(pg_off, map->vaddr);
139 kfree(map); 172 kfree(map);
140 return vaddr; 173 return vaddr;
141 } 174 }
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
144 177
145 return map->vaddr + (paddr - map->paddr); 178 return map->vaddr + (paddr - map->paddr);
146err_unmap: 179err_unmap:
147 iounmap(vaddr); 180 acpi_unmap(pg_off, vaddr);
148 return NULL; 181 return NULL;
149} 182}
150 183
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
177 return; 210 return;
178 211
179 synchronize_rcu(); 212 synchronize_rcu();
180 iounmap(map->vaddr); 213 acpi_unmap(map->paddr, map->vaddr);
181 kfree(map); 214 kfree(map);
182} 215}
183 216
@@ -260,6 +293,21 @@ int acpi_post_unmap_gar(struct acpi_generic_address *reg)
260} 293}
261EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); 294EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
262 295
296#ifdef readq
297static inline u64 read64(const volatile void __iomem *addr)
298{
299 return readq(addr);
300}
301#else
302static inline u64 read64(const volatile void __iomem *addr)
303{
304 u64 l, h;
305 l = readl(addr);
306 h = readl(addr+4);
307 return l | (h << 32);
308}
309#endif
310
263/* 311/*
264 * Can be used in atomic (including NMI) or process context. RCU read 312 * Can be used in atomic (including NMI) or process context. RCU read
265 * lock can only be released after the IO memory area accessing. 313 * lock can only be released after the IO memory area accessing.
@@ -280,11 +328,9 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
280 case 32: 328 case 32:
281 *val = readl(addr); 329 *val = readl(addr);
282 break; 330 break;
283#ifdef readq
284 case 64: 331 case 64:
285 *val = readq(addr); 332 *val = read64(addr);
286 break; 333 break;
287#endif
288 default: 334 default:
289 return -EINVAL; 335 return -EINVAL;
290 } 336 }
@@ -293,6 +339,19 @@ static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
293 return 0; 339 return 0;
294} 340}
295 341
342#ifdef writeq
343static inline void write64(u64 val, volatile void __iomem *addr)
344{
345 writeq(val, addr);
346}
347#else
348static inline void write64(u64 val, volatile void __iomem *addr)
349{
350 writel(val, addr);
351 writel(val>>32, addr+4);
352}
353#endif
354
296static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) 355static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
297{ 356{
298 void __iomem *addr; 357 void __iomem *addr;
@@ -309,11 +368,9 @@ static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
309 case 32: 368 case 32:
310 writel(val, addr); 369 writel(val, addr);
311 break; 370 break;
312#ifdef writeq
313 case 64: 371 case 64:
314 writeq(val, addr); 372 write64(val, addr);
315 break; 373 break;
316#endif
317 default: 374 default:
318 return -EINVAL; 375 return -EINVAL;
319 } 376 }