aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/acpi')
-rw-r--r--drivers/acpi/atomicio.c41
1 files changed, 37 insertions, 4 deletions
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
index 1016f186c17c..d4a5b3d3657b 100644
--- a/drivers/acpi/atomicio.c
+++ b/drivers/acpi/atomicio.c
@@ -32,6 +32,8 @@
32#include <linux/rculist.h> 32#include <linux/rculist.h>
33#include <linux/interrupt.h> 33#include <linux/interrupt.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/mm.h>
36#include <linux/highmem.h>
35#include <acpi/atomicio.h> 37#include <acpi/atomicio.h>
36 38
37#define ACPI_PFX "ACPI: " 39#define ACPI_PFX "ACPI: "
@@ -97,6 +99,37 @@ static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
97 return NULL; 99 return NULL;
98} 100}
99 101
102#ifndef CONFIG_IA64
103#define should_use_kmap(pfn) page_is_ram(pfn)
104#else
105/* ioremap will take care of cache attributes */
106#define should_use_kmap(pfn) 0
107#endif
108
109static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz)
110{
111 unsigned long pfn;
112
113 pfn = pg_off >> PAGE_SHIFT;
114 if (should_use_kmap(pfn)) {
115 if (pg_sz > PAGE_SIZE)
116 return NULL;
117 return (void __iomem __force *)kmap(pfn_to_page(pfn));
118 } else
119 return ioremap(pg_off, pg_sz);
120}
121
122static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr)
123{
124 unsigned long pfn;
125
126 pfn = pg_off >> PAGE_SHIFT;
127 if (page_is_ram(pfn))
128 kunmap(pfn_to_page(pfn));
129 else
130 iounmap(vaddr);
131}
132
100/* 133/*
101 * Used to pre-map the specified IO memory area. First try to find 134 * Used to pre-map the specified IO memory area. First try to find
102 * whether the area is already pre-mapped, if it is, increase the 135 * whether the area is already pre-mapped, if it is, increase the
@@ -119,7 +152,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
119 152
120 pg_off = paddr & PAGE_MASK; 153 pg_off = paddr & PAGE_MASK;
121 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; 154 pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
122 vaddr = ioremap(pg_off, pg_sz); 155 vaddr = acpi_map(pg_off, pg_sz);
123 if (!vaddr) 156 if (!vaddr)
124 return NULL; 157 return NULL;
125 map = kmalloc(sizeof(*map), GFP_KERNEL); 158 map = kmalloc(sizeof(*map), GFP_KERNEL);
@@ -135,7 +168,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
135 vaddr = __acpi_try_ioremap(paddr, size); 168 vaddr = __acpi_try_ioremap(paddr, size);
136 if (vaddr) { 169 if (vaddr) {
137 spin_unlock_irqrestore(&acpi_iomaps_lock, flags); 170 spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
138 iounmap(map->vaddr); 171 acpi_unmap(pg_off, map->vaddr);
139 kfree(map); 172 kfree(map);
140 return vaddr; 173 return vaddr;
141 } 174 }
@@ -144,7 +177,7 @@ static void __iomem *acpi_pre_map(phys_addr_t paddr,
144 177
145 return map->vaddr + (paddr - map->paddr); 178 return map->vaddr + (paddr - map->paddr);
146err_unmap: 179err_unmap:
147 iounmap(vaddr); 180 acpi_unmap(pg_off, vaddr);
148 return NULL; 181 return NULL;
149} 182}
150 183
@@ -177,7 +210,7 @@ static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
177 return; 210 return;
178 211
179 synchronize_rcu(); 212 synchronize_rcu();
180 iounmap(map->vaddr); 213 acpi_unmap(map->paddr, map->vaddr);
181 kfree(map); 214 kfree(map);
182} 215}
183 216