aboutsummaryrefslogtreecommitdiffstats
path: root/mm/util.c
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /mm/util.c
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'mm/util.c')
-rw-r--r--mm/util.c110
1 files changed, 16 insertions, 94 deletions
diff --git a/mm/util.c b/mm/util.c
index c55e26b17d9..88ea1bd661c 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -1,10 +1,9 @@
1#include <linux/mm.h> 1#include <linux/mm.h>
2#include <linux/slab.h> 2#include <linux/slab.h>
3#include <linux/string.h> 3#include <linux/string.h>
4#include <linux/export.h> 4#include <linux/module.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/security.h>
8#include <asm/uaccess.h> 7#include <asm/uaccess.h>
9 8
10#include "internal.h" 9#include "internal.h"
@@ -105,25 +104,6 @@ void *memdup_user(const void __user *src, size_t len)
105} 104}
106EXPORT_SYMBOL(memdup_user); 105EXPORT_SYMBOL(memdup_user);
107 106
108static __always_inline void *__do_krealloc(const void *p, size_t new_size,
109 gfp_t flags)
110{
111 void *ret;
112 size_t ks = 0;
113
114 if (p)
115 ks = ksize(p);
116
117 if (ks >= new_size)
118 return (void *)p;
119
120 ret = kmalloc_track_caller(new_size, flags);
121 if (ret && p)
122 memcpy(ret, p, ks);
123
124 return ret;
125}
126
127/** 107/**
128 * __krealloc - like krealloc() but don't free @p. 108 * __krealloc - like krealloc() but don't free @p.
129 * @p: object to reallocate memory for. 109 * @p: object to reallocate memory for.
@@ -136,11 +116,23 @@ static __always_inline void *__do_krealloc(const void *p, size_t new_size,
136 */ 116 */
137void *__krealloc(const void *p, size_t new_size, gfp_t flags) 117void *__krealloc(const void *p, size_t new_size, gfp_t flags)
138{ 118{
119 void *ret;
120 size_t ks = 0;
121
139 if (unlikely(!new_size)) 122 if (unlikely(!new_size))
140 return ZERO_SIZE_PTR; 123 return ZERO_SIZE_PTR;
141 124
142 return __do_krealloc(p, new_size, flags); 125 if (p)
126 ks = ksize(p);
127
128 if (ks >= new_size)
129 return (void *)p;
130
131 ret = kmalloc_track_caller(new_size, flags);
132 if (ret && p)
133 memcpy(ret, p, ks);
143 134
135 return ret;
144} 136}
145EXPORT_SYMBOL(__krealloc); 137EXPORT_SYMBOL(__krealloc);
146 138
@@ -152,7 +144,7 @@ EXPORT_SYMBOL(__krealloc);
152 * 144 *
153 * The contents of the object pointed to are preserved up to the 145 * The contents of the object pointed to are preserved up to the
154 * lesser of the new and old sizes. If @p is %NULL, krealloc() 146 * lesser of the new and old sizes. If @p is %NULL, krealloc()
155 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a 147 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
156 * %NULL pointer, the object pointed to is freed. 148 * %NULL pointer, the object pointed to is freed.
157 */ 149 */
158void *krealloc(const void *p, size_t new_size, gfp_t flags) 150void *krealloc(const void *p, size_t new_size, gfp_t flags)
@@ -164,7 +156,7 @@ void *krealloc(const void *p, size_t new_size, gfp_t flags)
164 return ZERO_SIZE_PTR; 156 return ZERO_SIZE_PTR;
165 } 157 }
166 158
167 ret = __do_krealloc(p, new_size, flags); 159 ret = __krealloc(p, new_size, flags);
168 if (ret && p != ret) 160 if (ret && p != ret)
169 kfree(p); 161 kfree(p);
170 162
@@ -247,47 +239,6 @@ void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
247 next->vm_prev = vma; 239 next->vm_prev = vma;
248} 240}
249 241
250/* Check if the vma is being used as a stack by this task */
251static int vm_is_stack_for_task(struct task_struct *t,
252 struct vm_area_struct *vma)
253{
254 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
255}
256
257/*
258 * Check if the vma is being used as a stack.
259 * If is_group is non-zero, check in the entire thread group or else
260 * just check in the current task. Returns the pid of the task that
261 * the vma is stack for.
262 */
263pid_t vm_is_stack(struct task_struct *task,
264 struct vm_area_struct *vma, int in_group)
265{
266 pid_t ret = 0;
267
268 if (vm_is_stack_for_task(task, vma))
269 return task->pid;
270
271 if (in_group) {
272 struct task_struct *t;
273 rcu_read_lock();
274 if (!pid_alive(task))
275 goto done;
276
277 t = task;
278 do {
279 if (vm_is_stack_for_task(t, vma)) {
280 ret = t->pid;
281 goto done;
282 }
283 } while_each_thread(task, t);
284done:
285 rcu_read_unlock();
286 }
287
288 return ret;
289}
290
291#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) 242#if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
292void arch_pick_mmap_layout(struct mm_struct *mm) 243void arch_pick_mmap_layout(struct mm_struct *mm)
293{ 244{
@@ -349,35 +300,6 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
349} 300}
350EXPORT_SYMBOL_GPL(get_user_pages_fast); 301EXPORT_SYMBOL_GPL(get_user_pages_fast);
351 302
352unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
353 unsigned long len, unsigned long prot,
354 unsigned long flag, unsigned long pgoff)
355{
356 unsigned long ret;
357 struct mm_struct *mm = current->mm;
358
359 ret = security_mmap_file(file, prot, flag);
360 if (!ret) {
361 down_write(&mm->mmap_sem);
362 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
363 up_write(&mm->mmap_sem);
364 }
365 return ret;
366}
367
368unsigned long vm_mmap(struct file *file, unsigned long addr,
369 unsigned long len, unsigned long prot,
370 unsigned long flag, unsigned long offset)
371{
372 if (unlikely(offset + PAGE_ALIGN(len) < offset))
373 return -EINVAL;
374 if (unlikely(offset & ~PAGE_MASK))
375 return -EINVAL;
376
377 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
378}
379EXPORT_SYMBOL(vm_mmap);
380
381/* Tracepoints definitions. */ 303/* Tracepoints definitions. */
382EXPORT_TRACEPOINT_SYMBOL(kmalloc); 304EXPORT_TRACEPOINT_SYMBOL(kmalloc);
383EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 305EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);