aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86_64/mm/ioremap.c29
-rw-r--r--include/linux/vmalloc.h1
-rw-r--r--mm/vmalloc.c33
3 files changed, 29 insertions, 34 deletions
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c
index c6fb0cb69992..58aac23760ef 100644
--- a/arch/x86_64/mm/ioremap.c
+++ b/arch/x86_64/mm/ioremap.c
@@ -133,7 +133,7 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 int err = 0; 135 int err = 0;
136 if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { 136 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 137 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
138 unsigned long vaddr = (unsigned long) __va(phys_addr); 138 unsigned long vaddr = (unsigned long) __va(phys_addr);
139 139
@@ -214,7 +214,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 214 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
215 return NULL; 215 return NULL;
216 } 216 }
217 if (ioremap_change_attr(phys_addr, size, flags) < 0) { 217 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
218 area->flags &= 0xffffff; 218 area->flags &= 0xffffff;
219 vunmap(addr); 219 vunmap(addr);
220 return NULL; 220 return NULL;
@@ -251,7 +251,7 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
251 251
252void iounmap(volatile void __iomem *addr) 252void iounmap(volatile void __iomem *addr)
253{ 253{
254 struct vm_struct *p, **pprev; 254 struct vm_struct *p;
255 255
256 if (addr <= high_memory) 256 if (addr <= high_memory)
257 return; 257 return;
@@ -260,24 +260,11 @@ void iounmap(volatile void __iomem *addr)
260 return; 260 return;
261 261
262 write_lock(&vmlist_lock); 262 write_lock(&vmlist_lock);
263 for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) 263 p = __remove_vm_area((void *)((unsigned long)addr & PAGE_MASK));
264 if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) 264 if (!p)
265 break; 265 printk("iounmap: bad address %p\n", addr);
266 if (!p) { 266 else if (p->flags >> 20)
267 printk("__iounmap: bad address %p\n", addr); 267 ioremap_change_attr(p->phys_addr, p->size, 0);
268 goto out_unlock;
269 }
270 *pprev = p->next;
271 unmap_vm_area(p);
272 if ((p->flags >> 20) &&
273 p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) {
274 /* p->size includes the guard page, but cpa doesn't like that */
275 change_page_attr_addr((unsigned long)__va(p->phys_addr),
276 p->size >> PAGE_SHIFT,
277 PAGE_KERNEL);
278 global_flush_tlb();
279 }
280out_unlock:
281 write_unlock(&vmlist_lock); 268 write_unlock(&vmlist_lock);
282 kfree(p); 269 kfree(p);
283} 270}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 3a358c895188..6409d9cf5965 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -41,6 +41,7 @@ extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
41extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags, 41extern struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
42 unsigned long start, unsigned long end); 42 unsigned long start, unsigned long end);
43extern struct vm_struct *remove_vm_area(void *addr); 43extern struct vm_struct *remove_vm_area(void *addr);
44extern struct vm_struct *__remove_vm_area(void *addr);
44extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 45extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
45 struct page ***pages); 46 struct page ***pages);
46extern void unmap_vm_area(struct vm_struct *area); 47extern void unmap_vm_area(struct vm_struct *area);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2bd83e5c2bbf..8ff16a1eee6a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -248,31 +248,20 @@ struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END); 248 return __get_vm_area(size, flags, VMALLOC_START, VMALLOC_END);
249} 249}
250 250
251/** 251/* Caller must hold vmlist_lock */
252 * remove_vm_area - find and remove a contingous kernel virtual area 252struct vm_struct *__remove_vm_area(void *addr)
253 *
254 * @addr: base address
255 *
256 * Search for the kernel VM area starting at @addr, and remove it.
257 * This function returns the found VM area, but using it is NOT safe
258 * on SMP machines.
259 */
260struct vm_struct *remove_vm_area(void *addr)
261{ 253{
262 struct vm_struct **p, *tmp; 254 struct vm_struct **p, *tmp;
263 255
264 write_lock(&vmlist_lock);
265 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) { 256 for (p = &vmlist ; (tmp = *p) != NULL ;p = &tmp->next) {
266 if (tmp->addr == addr) 257 if (tmp->addr == addr)
267 goto found; 258 goto found;
268 } 259 }
269 write_unlock(&vmlist_lock);
270 return NULL; 260 return NULL;
271 261
272found: 262found:
273 unmap_vm_area(tmp); 263 unmap_vm_area(tmp);
274 *p = tmp->next; 264 *p = tmp->next;
275 write_unlock(&vmlist_lock);
276 265
277 /* 266 /*
278 * Remove the guard page. 267 * Remove the guard page.
@@ -281,6 +270,24 @@ found:
281 return tmp; 270 return tmp;
282} 271}
283 272
273/**
274 * remove_vm_area - find and remove a contingous kernel virtual area
275 *
276 * @addr: base address
277 *
278 * Search for the kernel VM area starting at @addr, and remove it.
279 * This function returns the found VM area, but using it is NOT safe
280 * on SMP machines, except for its size or flags.
281 */
282struct vm_struct *remove_vm_area(void *addr)
283{
284 struct vm_struct *v;
285 write_lock(&vmlist_lock);
286 v = __remove_vm_area(addr);
287 write_unlock(&vmlist_lock);
288 return v;
289}
290
284void __vunmap(void *addr, int deallocate_pages) 291void __vunmap(void *addr, int deallocate_pages)
285{ 292{
286 struct vm_struct *area; 293 struct vm_struct *area;