aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index f64632b67196..2b0aa5486092 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1270,19 +1270,15 @@ void unmap_kernel_range(unsigned long addr, unsigned long size)
1270} 1270}
1271EXPORT_SYMBOL_GPL(unmap_kernel_range); 1271EXPORT_SYMBOL_GPL(unmap_kernel_range);
1272 1272
1273int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages) 1273int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
1274{ 1274{
1275 unsigned long addr = (unsigned long)area->addr; 1275 unsigned long addr = (unsigned long)area->addr;
1276 unsigned long end = addr + get_vm_area_size(area); 1276 unsigned long end = addr + get_vm_area_size(area);
1277 int err; 1277 int err;
1278 1278
1279 err = vmap_page_range(addr, end, prot, *pages); 1279 err = vmap_page_range(addr, end, prot, pages);
1280 if (err > 0) {
1281 *pages += err;
1282 err = 0;
1283 }
1284 1280
1285 return err; 1281 return err > 0 ? 0 : err;
1286} 1282}
1287EXPORT_SYMBOL_GPL(map_vm_area); 1283EXPORT_SYMBOL_GPL(map_vm_area);
1288 1284
@@ -1548,7 +1544,7 @@ void *vmap(struct page **pages, unsigned int count,
1548 if (!area) 1544 if (!area)
1549 return NULL; 1545 return NULL;
1550 1546
1551 if (map_vm_area(area, prot, &pages)) { 1547 if (map_vm_area(area, prot, pages)) {
1552 vunmap(area->addr); 1548 vunmap(area->addr);
1553 return NULL; 1549 return NULL;
1554 } 1550 }
@@ -1566,7 +1562,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1566 const int order = 0; 1562 const int order = 0;
1567 struct page **pages; 1563 struct page **pages;
1568 unsigned int nr_pages, array_size, i; 1564 unsigned int nr_pages, array_size, i;
1569 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1565 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1566 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1570 1567
1571 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1568 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1572 array_size = (nr_pages * sizeof(struct page *)); 1569 array_size = (nr_pages * sizeof(struct page *));
@@ -1589,12 +1586,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1589 1586
1590 for (i = 0; i < area->nr_pages; i++) { 1587 for (i = 0; i < area->nr_pages; i++) {
1591 struct page *page; 1588 struct page *page;
1592 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1593 1589
1594 if (node == NUMA_NO_NODE) 1590 if (node == NUMA_NO_NODE)
1595 page = alloc_page(tmp_mask); 1591 page = alloc_page(alloc_mask);
1596 else 1592 else
1597 page = alloc_pages_node(node, tmp_mask, order); 1593 page = alloc_pages_node(node, alloc_mask, order);
1598 1594
1599 if (unlikely(!page)) { 1595 if (unlikely(!page)) {
1600 /* Successfully allocated i pages, free them in __vunmap() */ 1596 /* Successfully allocated i pages, free them in __vunmap() */
@@ -1602,9 +1598,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1602 goto fail; 1598 goto fail;
1603 } 1599 }
1604 area->pages[i] = page; 1600 area->pages[i] = page;
1601 if (gfp_mask & __GFP_WAIT)
1602 cond_resched();
1605 } 1603 }
1606 1604
1607 if (map_vm_area(area, prot, &pages)) 1605 if (map_vm_area(area, prot, pages))
1608 goto fail; 1606 goto fail;
1609 return area->addr; 1607 return area->addr;
1610 1608
@@ -2690,14 +2688,14 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
2690 2688
2691 prev_end = VMALLOC_START; 2689 prev_end = VMALLOC_START;
2692 2690
2693 spin_lock(&vmap_area_lock); 2691 rcu_read_lock();
2694 2692
2695 if (list_empty(&vmap_area_list)) { 2693 if (list_empty(&vmap_area_list)) {
2696 vmi->largest_chunk = VMALLOC_TOTAL; 2694 vmi->largest_chunk = VMALLOC_TOTAL;
2697 goto out; 2695 goto out;
2698 } 2696 }
2699 2697
2700 list_for_each_entry(va, &vmap_area_list, list) { 2698 list_for_each_entry_rcu(va, &vmap_area_list, list) {
2701 unsigned long addr = va->va_start; 2699 unsigned long addr = va->va_start;
2702 2700
2703 /* 2701 /*
@@ -2724,7 +2722,7 @@ void get_vmalloc_info(struct vmalloc_info *vmi)
2724 vmi->largest_chunk = VMALLOC_END - prev_end; 2722 vmi->largest_chunk = VMALLOC_END - prev_end;
2725 2723
2726out: 2724out:
2727 spin_unlock(&vmap_area_lock); 2725 rcu_read_unlock();
2728} 2726}
2729#endif 2727#endif
2730 2728