aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-08-06 19:06:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:18 -0400
commit930f036b4ff6501b91e09bba4bf94423203dabd9 (patch)
treef689223089016c430954b96e7c73f1c32502f68d /mm
parent660654f90e7f8f6d8163276d47fc1573a39c7007 (diff)
mm, vmalloc: constify allocation mask
tmp_mask in the __vmalloc_area_node() iteration never changes so it can be moved into function scope and marked with const. This causes the movl and orl to only be done once per call rather than area->nr_pages times. nested_gfp can also be marked const. Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index a3cad905f560..9ec4173f48a8 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1566,7 +1566,8 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1566 const int order = 0; 1566 const int order = 0;
1567 struct page **pages; 1567 struct page **pages;
1568 unsigned int nr_pages, array_size, i; 1568 unsigned int nr_pages, array_size, i;
1569 gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO; 1569 const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1570 const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
1570 1571
1571 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT; 1572 nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1572 array_size = (nr_pages * sizeof(struct page *)); 1573 array_size = (nr_pages * sizeof(struct page *));
@@ -1589,12 +1590,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1589 1590
1590 for (i = 0; i < area->nr_pages; i++) { 1591 for (i = 0; i < area->nr_pages; i++) {
1591 struct page *page; 1592 struct page *page;
1592 gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1593 1593
1594 if (node == NUMA_NO_NODE) 1594 if (node == NUMA_NO_NODE)
1595 page = alloc_page(tmp_mask); 1595 page = alloc_page(alloc_mask);
1596 else 1596 else
1597 page = alloc_pages_node(node, tmp_mask, order); 1597 page = alloc_pages_node(node, alloc_mask, order);
1598 1598
1599 if (unlikely(!page)) { 1599 if (unlikely(!page)) {
1600 /* Successfully allocated i pages, free them in __vunmap() */ 1600 /* Successfully allocated i pages, free them in __vunmap() */