aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2010-01-23 01:45:46 -0500
committerDavid S. Miller <davem@davemloft.net>2010-01-23 01:45:46 -0500
commit6be325719b3e54624397e413efd4b33a997e55a3 (patch)
tree57f321a56794cab2222e179b16731e0d76a4a68a /mm/vmalloc.c
parent26d92f9276a56d55511a427fb70bd70886af647a (diff)
parent92dcffb916d309aa01778bf8963a6932e4014d07 (diff)
Merge branch 'master' of /home/davem/src/GIT/linux-2.6/
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 0f551a4a44cd..d55d905463eb 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -555,10 +555,8 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
555 } 555 }
556 rcu_read_unlock(); 556 rcu_read_unlock();
557 557
558 if (nr) { 558 if (nr)
559 BUG_ON(nr > atomic_read(&vmap_lazy_nr));
560 atomic_sub(nr, &vmap_lazy_nr); 559 atomic_sub(nr, &vmap_lazy_nr);
561 }
562 560
563 if (nr || force_flush) 561 if (nr || force_flush)
564 flush_tlb_kernel_range(*start, *end); 562 flush_tlb_kernel_range(*start, *end);
@@ -761,7 +759,7 @@ static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
761 spin_lock(&vbq->lock); 759 spin_lock(&vbq->lock);
762 list_add(&vb->free_list, &vbq->free); 760 list_add(&vb->free_list, &vbq->free);
763 spin_unlock(&vbq->lock); 761 spin_unlock(&vbq->lock);
764 put_cpu_var(vmap_cpu_blocks); 762 put_cpu_var(vmap_block_queue);
765 763
766 return vb; 764 return vb;
767} 765}
@@ -826,7 +824,7 @@ again:
826 } 824 }
827 spin_unlock(&vb->lock); 825 spin_unlock(&vb->lock);
828 } 826 }
829 put_cpu_var(vmap_cpu_blocks); 827 put_cpu_var(vmap_block_queue);
830 rcu_read_unlock(); 828 rcu_read_unlock();
831 829
832 if (!addr) { 830 if (!addr) {
@@ -1411,6 +1409,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1411{ 1409{
1412 struct page **pages; 1410 struct page **pages;
1413 unsigned int nr_pages, array_size, i; 1411 unsigned int nr_pages, array_size, i;
1412 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1414 1413
1415 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT; 1414 nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1416 array_size = (nr_pages * sizeof(struct page *)); 1415 array_size = (nr_pages * sizeof(struct page *));
@@ -1418,13 +1417,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1418 area->nr_pages = nr_pages; 1417 area->nr_pages = nr_pages;
1419 /* Please note that the recursion is strictly bounded. */ 1418 /* Please note that the recursion is strictly bounded. */
1420 if (array_size > PAGE_SIZE) { 1419 if (array_size > PAGE_SIZE) {
1421 pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO, 1420 pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1422 PAGE_KERNEL, node, caller); 1421 PAGE_KERNEL, node, caller);
1423 area->flags |= VM_VPAGES; 1422 area->flags |= VM_VPAGES;
1424 } else { 1423 } else {
1425 pages = kmalloc_node(array_size, 1424 pages = kmalloc_node(array_size, nested_gfp, node);
1426 (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1427 node);
1428 } 1425 }
1429 area->pages = pages; 1426 area->pages = pages;
1430 area->caller = caller; 1427 area->caller = caller;