aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/mm/ioremap.c37
-rw-r--r--arch/x86_64/mm/ioremap.c37
2 files changed, 60 insertions, 14 deletions
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index 5d09de8d1c6b..8498b5ac3955 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -223,9 +223,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
223} 223}
224EXPORT_SYMBOL(ioremap_nocache); 224EXPORT_SYMBOL(ioremap_nocache);
225 225
226/**
227 * iounmap - Free a IO remapping
228 * @addr: virtual address from ioremap_*
229 *
230 * Caller must ensure there is only one unmapping for the same pointer.
231 */
226void iounmap(volatile void __iomem *addr) 232void iounmap(volatile void __iomem *addr)
227{ 233{
228 struct vm_struct *p; 234 struct vm_struct *p, *o;
229 235
230 if ((void __force *)addr <= high_memory) 236 if ((void __force *)addr <= high_memory)
231 return; 237 return;
@@ -239,22 +245,37 @@ void iounmap(volatile void __iomem *addr)
239 addr < phys_to_virt(ISA_END_ADDRESS)) 245 addr < phys_to_virt(ISA_END_ADDRESS))
240 return; 246 return;
241 247
242 write_lock(&vmlist_lock); 248 addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
243 p = __remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); 249
244 if (!p) { 250 /* Use the vm area unlocked, assuming the caller
245 printk(KERN_WARNING "iounmap: bad address %p\n", addr); 251 ensures there isn't another iounmap for the same address
252 in parallel. Reuse of the virtual address is prevented by
253 leaving it in the global lists until we're done with it.
254 cpa takes care of the direct mappings. */
255 read_lock(&vmlist_lock);
256 for (p = vmlist; p; p = p->next) {
257 if (p->addr == addr)
258 break;
259 }
260 read_unlock(&vmlist_lock);
261
262 if (!p) {
263 printk("iounmap: bad address %p\n", addr);
246 dump_stack(); 264 dump_stack();
247 goto out_unlock; 265 return;
248 } 266 }
249 267
268 /* Reset the direct mapping. Can block */
250 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { 269 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
251 change_page_attr(virt_to_page(__va(p->phys_addr)), 270 change_page_attr(virt_to_page(__va(p->phys_addr)),
252 p->size >> PAGE_SHIFT, 271 p->size >> PAGE_SHIFT,
253 PAGE_KERNEL); 272 PAGE_KERNEL);
254 global_flush_tlb(); 273 global_flush_tlb();
255 } 274 }
256out_unlock: 275
257 write_unlock(&vmlist_lock); 276 /* Finally remove it */
277 o = remove_vm_area((void *)addr);
278 BUG_ON(p != o || o == NULL);
258 kfree(p); 279 kfree(p);
259} 280}
260EXPORT_SYMBOL(iounmap); 281EXPORT_SYMBOL(iounmap);
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index ecf7acb5db9b..0d260e4492f7 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -247,9 +247,15 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
247 return __ioremap(phys_addr, size, _PAGE_PCD); 247 return __ioremap(phys_addr, size, _PAGE_PCD);
248} 248}
249 249
250/**
251 * iounmap - Free a IO remapping
252 * @addr: virtual address from ioremap_*
253 *
254 * Caller must ensure there is only one unmapping for the same pointer.
255 */
250void iounmap(volatile void __iomem *addr) 256void iounmap(volatile void __iomem *addr)
251{ 257{
252 struct vm_struct *p; 258 struct vm_struct *p, *o;
253 259
254 if (addr <= high_memory) 260 if (addr <= high_memory)
255 return; 261 return;
@@ -257,12 +263,31 @@ void iounmap(volatile void __iomem *addr)
257 addr < phys_to_virt(ISA_END_ADDRESS)) 263 addr < phys_to_virt(ISA_END_ADDRESS))
258 return; 264 return;
259 265
260 write_lock(&vmlist_lock); 266 addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
261 p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK)); 267 /* Use the vm area unlocked, assuming the caller
262 if (!p) 268 ensures there isn't another iounmap for the same address
269 in parallel. Reuse of the virtual address is prevented by
270 leaving it in the global lists until we're done with it.
271 cpa takes care of the direct mappings. */
272 read_lock(&vmlist_lock);
273 for (p = vmlist; p; p = p->next) {
274 if (p->addr == addr)
275 break;
276 }
277 read_unlock(&vmlist_lock);
278
279 if (!p) {
263 printk("iounmap: bad address %p\n", addr); 280 printk("iounmap: bad address %p\n", addr);
264 else if (p->flags >> 20) 281 dump_stack();
282 return;
283 }
284
285 /* Reset the direct mapping. Can block */
286 if (p->flags >> 20)
265 ioremap_change_attr(p->phys_addr, p->size, 0); 287 ioremap_change_attr(p->phys_addr, p->size, 0);
266 write_unlock(&vmlist_lock); 288
289 /* Finally remove it */
290 o = remove_vm_area((void *)addr);
291 BUG_ON(p != o || o == NULL);
267 kfree(p); 292 kfree(p);
268} 293}