aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrey Ryabinin <a.ryabinin@samsung.com>2015-02-13 17:40:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 00:21:42 -0500
commitcb9e3c292d0115499c660028ad35ac5501d722b5 (patch)
tree5345bc82199c3cd843ea36d34aadaa0894bdd340 /mm
parent71394fe50146202f2c8d92cf50f5ebc761acf254 (diff)
mm: vmalloc: pass additional vm_flags to __vmalloc_node_range()
For instrumenting global variables KASan will shadow memory backing memory for modules. So on module loading we will need to allocate memory for shadow and map it at address in shadow that corresponds to the address allocated in module_alloc(). __vmalloc_node_range() could be used for this purpose, except it puts a guard hole after allocated area. Guard hole in shadow memory should be a problem because at some future point we might need to have a shadow memory at address occupied by guard hole. So we could fail to allocate shadow for module_alloc(). Now we have VM_NO_GUARD flag disabling guard page, so we need to pass into __vmalloc_node_range(). Add new parameter 'vm_flags' to __vmalloc_node_range() function. Signed-off-by: Andrey Ryabinin <a.ryabinin@samsung.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: Konstantin Serebryany <kcc@google.com> Cc: Dmitry Chernenkov <dmitryc@google.com> Signed-off-by: Andrey Konovalov <adech.fo@gmail.com> Cc: Yuri Gribov <tetra2005@gmail.com> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Christoph Lameter <cl@linux.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmalloc.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 2e74e99d4cfe..35b25e1340ca 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1619,6 +1619,7 @@ fail:
1619 * @end: vm area range end 1619 * @end: vm area range end
1620 * @gfp_mask: flags for the page level allocator 1620 * @gfp_mask: flags for the page level allocator
1621 * @prot: protection mask for the allocated pages 1621 * @prot: protection mask for the allocated pages
1622 * @vm_flags: additional vm area flags (e.g. %VM_NO_GUARD)
1622 * @node: node to use for allocation or NUMA_NO_NODE 1623 * @node: node to use for allocation or NUMA_NO_NODE
1623 * @caller: caller's return address 1624 * @caller: caller's return address
1624 * 1625 *
@@ -1628,7 +1629,8 @@ fail:
1628 */ 1629 */
1629void *__vmalloc_node_range(unsigned long size, unsigned long align, 1630void *__vmalloc_node_range(unsigned long size, unsigned long align,
1630 unsigned long start, unsigned long end, gfp_t gfp_mask, 1631 unsigned long start, unsigned long end, gfp_t gfp_mask,
1631 pgprot_t prot, int node, const void *caller) 1632 pgprot_t prot, unsigned long vm_flags, int node,
1633 const void *caller)
1632{ 1634{
1633 struct vm_struct *area; 1635 struct vm_struct *area;
1634 void *addr; 1636 void *addr;
@@ -1638,8 +1640,8 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
1638 if (!size || (size >> PAGE_SHIFT) > totalram_pages) 1640 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1639 goto fail; 1641 goto fail;
1640 1642
1641 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED, 1643 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
1642 start, end, node, gfp_mask, caller); 1644 vm_flags, start, end, node, gfp_mask, caller);
1643 if (!area) 1645 if (!area)
1644 goto fail; 1646 goto fail;
1645 1647
@@ -1688,7 +1690,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
1688 int node, const void *caller) 1690 int node, const void *caller)
1689{ 1691{
1690 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END, 1692 return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1691 gfp_mask, prot, node, caller); 1693 gfp_mask, prot, 0, node, caller);
1692} 1694}
1693 1695
1694void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) 1696void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)