aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index fdf4b1e88e53..1d34d75366a7 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -375,7 +375,7 @@ nocache:
375 /* find starting point for our search */ 375 /* find starting point for our search */
376 if (free_vmap_cache) { 376 if (free_vmap_cache) {
377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node); 377 first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
378 addr = ALIGN(first->va_end + PAGE_SIZE, align); 378 addr = ALIGN(first->va_end, align);
379 if (addr < vstart) 379 if (addr < vstart)
380 goto nocache; 380 goto nocache;
381 if (addr + size - 1 < addr) 381 if (addr + size - 1 < addr)
@@ -406,10 +406,10 @@ nocache:
406 } 406 }
407 407
408 /* from the starting point, walk areas until a suitable hole is found */ 408 /* from the starting point, walk areas until a suitable hole is found */
409 while (addr + size >= first->va_start && addr + size <= vend) { 409 while (addr + size > first->va_start && addr + size <= vend) {
410 if (addr + cached_hole_size < first->va_start) 410 if (addr + cached_hole_size < first->va_start)
411 cached_hole_size = first->va_start - addr; 411 cached_hole_size = first->va_start - addr;
412 addr = ALIGN(first->va_end + PAGE_SIZE, align); 412 addr = ALIGN(first->va_end, align);
413 if (addr + size - 1 < addr) 413 if (addr + size - 1 < addr)
414 goto overflow; 414 goto overflow;
415 415
@@ -1534,6 +1534,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask, 1534static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1535 pgprot_t prot, int node, void *caller) 1535 pgprot_t prot, int node, void *caller)
1536{ 1536{
1537 const int order = 0;
1537 struct page **pages; 1538 struct page **pages;
1538 unsigned int nr_pages, array_size, i; 1539 unsigned int nr_pages, array_size, i;
1539 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1540 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
@@ -1560,11 +1561,12 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1560 1561
1561 for (i = 0; i < area->nr_pages; i++) { 1562 for (i = 0; i < area->nr_pages; i++) {
1562 struct page *page; 1563 struct page *page;
1564 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1563 1565
1564 if (node < 0) 1566 if (node < 0)
1565 page = alloc_page(gfp_mask); 1567 page = alloc_page(tmp_mask);
1566 else 1568 else
1567 page = alloc_pages_node(node, gfp_mask, 0); 1569 page = alloc_pages_node(node, tmp_mask, order);
1568 1570
1569 if (unlikely(!page)) { 1571 if (unlikely(!page)) {
1570 /* Successfully allocated i pages, free them in __vunmap() */ 1572 /* Successfully allocated i pages, free them in __vunmap() */
@@ -1579,6 +1581,9 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1579 return area->addr; 1581 return area->addr;
1580 1582
1581fail: 1583fail:
1584 warn_alloc_failed(gfp_mask, order, "vmalloc: allocation failure, "
1585 "allocated %ld of %ld bytes\n",
1586 (area->nr_pages*PAGE_SIZE), area->size);
1582 vfree(area->addr); 1587 vfree(area->addr);
1583 return NULL; 1588 return NULL;
1584} 1589}