aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHATAYAMA Daisuke <d.hatayama@jp.fujitsu.com>2013-07-03 18:02:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-03 19:07:30 -0400
commite69e9d4aee712a22665f008ae0550bb3d7c7f7c1 (patch)
treebb63cd5b881678c902f6b5e197a107022825be30
parentcef2ac3f6c8ab532e49cf69d05f540931ad8ee64 (diff)
vmalloc: introduce remap_vmalloc_range_partial
We want to allocate ELF note segment buffer on the 2nd kernel in vmalloc space and remap it to user-space in order to reduce the risk that memory allocation fails on system with huge number of CPUs and so with huge ELF note segment that exceeds 11-order block size. Although there's already remap_vmalloc_range for the purpose of remapping vmalloc memory to user-space, we need to specify user-space range via vma. Mmap on /proc/vmcore needs to remap range across multiple objects, so the interface that requires vma to cover full range is problematic. This patch introduces remap_vmalloc_range_partial that receives user-space range as a pair of base address and size and can be used for mmap on /proc/vmcore case. remap_vmalloc_range is rewritten using remap_vmalloc_range_partial. [akpm@linux-foundation.org: use PAGE_ALIGNED()] Signed-off-by: HATAYAMA Daisuke <d.hatayama@jp.fujitsu.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Atsushi Kumagai <kumagai-atsushi@mxc.nes.nec.co.jp> Cc: Lisa Mitchell <lisa.mitchell@hp.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--mm/vmalloc.c67
2 files changed, 49 insertions, 22 deletions
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 7d5773a99f20..dd0a2c810529 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -82,6 +82,10 @@ extern void *vmap(struct page **pages, unsigned int count,
82 unsigned long flags, pgprot_t prot); 82 unsigned long flags, pgprot_t prot);
83extern void vunmap(const void *addr); 83extern void vunmap(const void *addr);
84 84
85extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
86 unsigned long uaddr, void *kaddr,
87 unsigned long size);
88
85extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 89extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
86 unsigned long pgoff); 90 unsigned long pgoff);
87void vmalloc_sync_all(void); 91void vmalloc_sync_all(void);
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 3875fa2f0f60..b7259906a806 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1476,10 +1476,9 @@ static void __vunmap(const void *addr, int deallocate_pages)
1476 if (!addr) 1476 if (!addr)
1477 return; 1477 return;
1478 1478
1479 if ((PAGE_SIZE-1) & (unsigned long)addr) { 1479 if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1480 WARN(1, KERN_ERR "Trying to vfree() bad address (%p)\n", addr); 1480 addr));
1481 return; 1481 return;
1482 }
1483 1482
1484 area = remove_vm_area(addr); 1483 area = remove_vm_area(addr);
1485 if (unlikely(!area)) { 1484 if (unlikely(!area)) {
@@ -2148,42 +2147,43 @@ finished:
2148} 2147}
2149 2148
2150/** 2149/**
2151 * remap_vmalloc_range - map vmalloc pages to userspace 2150 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2152 * @vma: vma to cover (map full range of vma) 2151 * @vma: vma to cover
2153 * @addr: vmalloc memory 2152 * @uaddr: target user address to start at
2154 * @pgoff: number of pages into addr before first page to map 2153 * @kaddr: virtual address of vmalloc kernel memory
2154 * @size: size of map area
2155 * 2155 *
2156 * Returns: 0 for success, -Exxx on failure 2156 * Returns: 0 for success, -Exxx on failure
2157 * 2157 *
2158 * This function checks that addr is a valid vmalloc'ed area, and 2158 * This function checks that @kaddr is a valid vmalloc'ed area,
2159 * that it is big enough to cover the vma. Will return failure if 2159 * and that it is big enough to cover the range starting at
2160 * that criteria isn't met. 2160 * @uaddr in @vma. Will return failure if that criteria isn't
2161 * met.
2161 * 2162 *
2162 * Similar to remap_pfn_range() (see mm/memory.c) 2163 * Similar to remap_pfn_range() (see mm/memory.c)
2163 */ 2164 */
2164int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, 2165int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2165 unsigned long pgoff) 2166 void *kaddr, unsigned long size)
2166{ 2167{
2167 struct vm_struct *area; 2168 struct vm_struct *area;
2168 unsigned long uaddr = vma->vm_start;
2169 unsigned long usize = vma->vm_end - vma->vm_start;
2170 2169
2171 if ((PAGE_SIZE-1) & (unsigned long)addr) 2170 size = PAGE_ALIGN(size);
2171
2172 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2172 return -EINVAL; 2173 return -EINVAL;
2173 2174
2174 area = find_vm_area(addr); 2175 area = find_vm_area(kaddr);
2175 if (!area) 2176 if (!area)
2176 return -EINVAL; 2177 return -EINVAL;
2177 2178
2178 if (!(area->flags & VM_USERMAP)) 2179 if (!(area->flags & VM_USERMAP))
2179 return -EINVAL; 2180 return -EINVAL;
2180 2181
2181 if (usize + (pgoff << PAGE_SHIFT) > area->size - PAGE_SIZE) 2182 if (kaddr + size > area->addr + area->size)
2182 return -EINVAL; 2183 return -EINVAL;
2183 2184
2184 addr += pgoff << PAGE_SHIFT;
2185 do { 2185 do {
2186 struct page *page = vmalloc_to_page(addr); 2186 struct page *page = vmalloc_to_page(kaddr);
2187 int ret; 2187 int ret;
2188 2188
2189 ret = vm_insert_page(vma, uaddr, page); 2189 ret = vm_insert_page(vma, uaddr, page);
@@ -2191,14 +2191,37 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2191 return ret; 2191 return ret;
2192 2192
2193 uaddr += PAGE_SIZE; 2193 uaddr += PAGE_SIZE;
2194 addr += PAGE_SIZE; 2194 kaddr += PAGE_SIZE;
2195 usize -= PAGE_SIZE; 2195 size -= PAGE_SIZE;
2196 } while (usize > 0); 2196 } while (size > 0);
2197 2197
2198 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; 2198 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2199 2199
2200 return 0; 2200 return 0;
2201} 2201}
2202EXPORT_SYMBOL(remap_vmalloc_range_partial);
2203
2204/**
2205 * remap_vmalloc_range - map vmalloc pages to userspace
2206 * @vma: vma to cover (map full range of vma)
2207 * @addr: vmalloc memory
2208 * @pgoff: number of pages into addr before first page to map
2209 *
2210 * Returns: 0 for success, -Exxx on failure
2211 *
2212 * This function checks that addr is a valid vmalloc'ed area, and
2213 * that it is big enough to cover the vma. Will return failure if
2214 * that criteria isn't met.
2215 *
2216 * Similar to remap_pfn_range() (see mm/memory.c)
2217 */
2218int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2219 unsigned long pgoff)
2220{
2221 return remap_vmalloc_range_partial(vma, vma->vm_start,
2222 addr + (pgoff << PAGE_SHIFT),
2223 vma->vm_end - vma->vm_start);
2224}
2202EXPORT_SYMBOL(remap_vmalloc_range); 2225EXPORT_SYMBOL(remap_vmalloc_range);
2203 2226
2204/* 2227/*