aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorWANG Chao <chaowang@redhat.com>2014-08-06 19:06:58 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:19 -0400
commitf6f8ed47353597dcb895eb4a15a28af657392e72 (patch)
treee74c51da4b21049b82d9b3fc537d28f713989801 /mm/vmalloc.c
parent21bda264f4243f61dfcc485174055f12ad0530b4 (diff)
mm/vmalloc.c: clean up map_vm_area third argument
Currently map_vm_area() takes (struct page *** pages) as third argument, and after mapping, it moves (*pages) to point to (*pages + nr_mappped_pages). It looks like this kind of increment is useless to its caller these days. The callers don't care about the increments and actually they're trying to avoid this by passing another copy to map_vm_area(). The caller can always guarantee all the pages can be mapped into vm_area as specified in first argument and the caller only cares about whether map_vm_area() fails or not. This patch cleans up the pointer movement in map_vm_area() and updates its callers accordingly. Signed-off-by: WANG Chao <chaowang@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: Minchan Kim <minchan@kernel.org> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 9ec4173f48a8..2b0aa5486092 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1270,19 +1270,15 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
1270} 1270}
1271EXPORT_SYMBOL_GPL(unmap_kernel_range); 1271EXPORT_SYMBOL_GPL(unmap_kernel_range);
1272 1272
1273int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1273int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1274{ 1274{
1275 unsigned long addr = (unsigned long)area->addr; 1275 unsigned long addr = (unsigned long)area->addr;
1276 unsigned long end = addr + get_vm_area_size(area); 1276 unsigned long end = addr + get_vm_area_size(area);
1277 int err; 1277 int err;
1278 1278
1279 err = vmap_page_range(addr, end, prot, *pages); 1279 err = vmap_page_range(addr, end, prot, pages);
1280 if (err > 0) {
1281 *pages += err;
1282 err = 0;
1283 }
1284 1280
1285 return err; 1281 return err > 0 ? 0 : err;
1286} 1282}
1287EXPORT_SYMBOL_GPL(map_vm_area); 1283EXPORT_SYMBOL_GPL(map_vm_area);
1288 1284
@@ -1548,7 +1544,7 @@ void *vmap(struct page **pages, unsigned int count,
1548 if (!area) 1544 if (!area)
1549 return NULL; 1545 return NULL;
1550 1546
1551 if (map_vm_area(area, prot, &pages)) { 1547 if (map_vm_area(area, prot, pages)) {
1552 vunmap(area->addr); 1548 vunmap(area->addr);
1553 return NULL; 1549 return NULL;
1554 } 1550 }
@@ -1606,7 +1602,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1606 cond_resched(); 1602 cond_resched();
1607 } 1603 }
1608 1604
1609 if (map_vm_area(area, prot, &pages)) 1605 if (map_vm_area(area, prot, pages))
1610 goto fail; 1606 goto fail;
1611 return area->addr; 1607 return area->addr;
1612 1608