aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 23:51:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 23:51:42 -0400
commitb81a618dcd3ea99de292dbe624f41ca68f464376 (patch)
treec5fbe44f944da9d7dc0c224116be77094d379c8a
parent2f284c846331fa44be1300a3c2c3e85800268a00 (diff)
parenta9712bc12c40c172e393f85a9b2ba8db4bf59509 (diff)
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6: deal with races in /proc/*/{syscall,stack,personality} proc: enable writing to /proc/pid/mem proc: make check_mem_permission() return an mm_struct on success proc: hold cred_guard_mutex in check_mem_permission() proc: disable mem_write after exec mm: implement access_remote_vm mm: factor out main logic of access_process_vm mm: use mm_struct to resolve gate vma's in __get_user_pages mm: arch: rename in_gate_area_no_task to in_gate_area_no_mm mm: arch: make in_gate_area take an mm_struct instead of a task_struct mm: arch: make get_gate_vma take an mm_struct instead of a task_struct x86: mark associated mm when running a task in 32 bit compatibility mode x86: add context tag to mark mm when running a task in 32-bit compatibility mode auxv: require the target to be tracable (or yourself) close race in /proc/*/environ report errors in /proc/*/*map* sanely pagemap: close races with suid execve make sessionid permissions in /proc/*/task/* match those in /proc/* fix leaks in path_lookupat() Fix up trivial conflicts in fs/proc/base.c
-rw-r--r--arch/powerpc/kernel/vdso.c6
-rw-r--r--arch/s390/kernel/vdso.c6
-rw-r--r--arch/sh/kernel/vsyscall/vsyscall.c6
-rw-r--r--arch/x86/ia32/ia32_aout.c1
-rw-r--r--arch/x86/include/asm/mmu.h6
-rw-r--r--arch/x86/kernel/process_64.c8
-rw-r--r--arch/x86/mm/init_64.c16
-rw-r--r--arch/x86/vdso/vdso32-setup.c15
-rw-r--r--fs/binfmt_elf.c2
-rw-r--r--fs/namei.c7
-rw-r--r--fs/proc/base.c181
-rw-r--r--fs/proc/task_mmu.c23
-rw-r--r--fs/proc/task_nommu.c6
-rw-r--r--include/linux/mm.h12
-rw-r--r--kernel/kallsyms.c4
-rw-r--r--mm/memory.c73
-rw-r--r--mm/mlock.c4
-rw-r--r--mm/nommu.c2
18 files changed, 244 insertions, 134 deletions
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index fd8728729abc..142ab1008c3b 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -820,17 +820,17 @@ static int __init vdso_init(void)
820} 820}
821arch_initcall(vdso_init); 821arch_initcall(vdso_init);
822 822
823int in_gate_area_no_task(unsigned long addr) 823int in_gate_area_no_mm(unsigned long addr)
824{ 824{
825 return 0; 825 return 0;
826} 826}
827 827
828int in_gate_area(struct task_struct *task, unsigned long addr) 828int in_gate_area(struct mm_struct *mm, unsigned long addr)
829{ 829{
830 return 0; 830 return 0;
831} 831}
832 832
833struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 833struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
834{ 834{
835 return NULL; 835 return NULL;
836} 836}
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index f438d74dedbd..d73630b4fe1d 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -337,17 +337,17 @@ static int __init vdso_init(void)
337} 337}
338arch_initcall(vdso_init); 338arch_initcall(vdso_init);
339 339
340int in_gate_area_no_task(unsigned long addr) 340int in_gate_area_no_mm(unsigned long addr)
341{ 341{
342 return 0; 342 return 0;
343} 343}
344 344
345int in_gate_area(struct task_struct *task, unsigned long addr) 345int in_gate_area(struct mm_struct *mm, unsigned long addr)
346{ 346{
347 return 0; 347 return 0;
348} 348}
349 349
350struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 350struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
351{ 351{
352 return NULL; 352 return NULL;
353} 353}
diff --git a/arch/sh/kernel/vsyscall/vsyscall.c b/arch/sh/kernel/vsyscall/vsyscall.c
index 242117cbad67..1d6d51a1ce79 100644
--- a/arch/sh/kernel/vsyscall/vsyscall.c
+++ b/arch/sh/kernel/vsyscall/vsyscall.c
@@ -94,17 +94,17 @@ const char *arch_vma_name(struct vm_area_struct *vma)
94 return NULL; 94 return NULL;
95} 95}
96 96
97struct vm_area_struct *get_gate_vma(struct task_struct *task) 97struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
98{ 98{
99 return NULL; 99 return NULL;
100} 100}
101 101
102int in_gate_area(struct task_struct *task, unsigned long address) 102int in_gate_area(struct mm_struct *mm, unsigned long address)
103{ 103{
104 return 0; 104 return 0;
105} 105}
106 106
107int in_gate_area_no_task(unsigned long address) 107int in_gate_area_no_mm(unsigned long address)
108{ 108{
109 return 0; 109 return 0;
110} 110}
diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
index 2d93bdbc9ac0..fd843877e841 100644
--- a/arch/x86/ia32/ia32_aout.c
+++ b/arch/x86/ia32/ia32_aout.c
@@ -298,6 +298,7 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
298 /* OK, This is the point of no return */ 298 /* OK, This is the point of no return */
299 set_personality(PER_LINUX); 299 set_personality(PER_LINUX);
300 set_thread_flag(TIF_IA32); 300 set_thread_flag(TIF_IA32);
301 current->mm->context.ia32_compat = 1;
301 302
302 setup_new_exec(bprm); 303 setup_new_exec(bprm);
303 304
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 80a1dee5bea5..aeff3e89b222 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -13,6 +13,12 @@ typedef struct {
13 int size; 13 int size;
14 struct mutex lock; 14 struct mutex lock;
15 void *vdso; 15 void *vdso;
16
17#ifdef CONFIG_X86_64
18 /* True if mm supports a task running in 32 bit compatibility mode. */
19 unsigned short ia32_compat;
20#endif
21
16} mm_context_t; 22} mm_context_t;
17 23
18#ifdef CONFIG_SMP 24#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bd387e8f73b4..6c9dd922ac0d 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -501,6 +501,10 @@ void set_personality_64bit(void)
501 /* Make sure to be in 64bit mode */ 501 /* Make sure to be in 64bit mode */
502 clear_thread_flag(TIF_IA32); 502 clear_thread_flag(TIF_IA32);
503 503
504 /* Ensure the corresponding mm is not marked. */
505 if (current->mm)
506 current->mm->context.ia32_compat = 0;
507
504 /* TBD: overwrites user setup. Should have two bits. 508 /* TBD: overwrites user setup. Should have two bits.
505 But 64bit processes have always behaved this way, 509 But 64bit processes have always behaved this way,
506 so it's not too bad. The main problem is just that 510 so it's not too bad. The main problem is just that
@@ -516,6 +520,10 @@ void set_personality_ia32(void)
516 set_thread_flag(TIF_IA32); 520 set_thread_flag(TIF_IA32);
517 current->personality |= force_personality32; 521 current->personality |= force_personality32;
518 522
523 /* Mark the associated mm as containing 32-bit tasks. */
524 if (current->mm)
525 current->mm->context.ia32_compat = 1;
526
519 /* Prepare the first "return" to user space */ 527 /* Prepare the first "return" to user space */
520 current_thread_info()->status |= TS_COMPAT; 528 current_thread_info()->status |= TS_COMPAT;
521} 529}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 2362b646178e..794233587287 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -862,18 +862,18 @@ static struct vm_area_struct gate_vma = {
862 .vm_flags = VM_READ | VM_EXEC 862 .vm_flags = VM_READ | VM_EXEC
863}; 863};
864 864
865struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 865struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
866{ 866{
867#ifdef CONFIG_IA32_EMULATION 867#ifdef CONFIG_IA32_EMULATION
868 if (test_tsk_thread_flag(tsk, TIF_IA32)) 868 if (!mm || mm->context.ia32_compat)
869 return NULL; 869 return NULL;
870#endif 870#endif
871 return &gate_vma; 871 return &gate_vma;
872} 872}
873 873
874int in_gate_area(struct task_struct *task, unsigned long addr) 874int in_gate_area(struct mm_struct *mm, unsigned long addr)
875{ 875{
876 struct vm_area_struct *vma = get_gate_vma(task); 876 struct vm_area_struct *vma = get_gate_vma(mm);
877 877
878 if (!vma) 878 if (!vma)
879 return 0; 879 return 0;
@@ -882,11 +882,11 @@ int in_gate_area(struct task_struct *task, unsigned long addr)
882} 882}
883 883
884/* 884/*
885 * Use this when you have no reliable task/vma, typically from interrupt 885 * Use this when you have no reliable mm, typically from interrupt
886 * context. It is less reliable than using the task's vma and may give 886 * context. It is less reliable than using a task's mm and may give
887 * false positives: 887 * false positives.
888 */ 888 */
889int in_gate_area_no_task(unsigned long addr) 889int in_gate_area_no_mm(unsigned long addr)
890{ 890{
891 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); 891 return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
892} 892}
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
index 36df991985b2..468d591dde31 100644
--- a/arch/x86/vdso/vdso32-setup.c
+++ b/arch/x86/vdso/vdso32-setup.c
@@ -417,24 +417,25 @@ const char *arch_vma_name(struct vm_area_struct *vma)
417 return NULL; 417 return NULL;
418} 418}
419 419
420struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 420struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
421{ 421{
422 struct mm_struct *mm = tsk->mm; 422 /*
423 423 * Check to see if the corresponding task was created in compat vdso
424 /* Check to see if this task was created in compat vdso mode */ 424 * mode.
425 */
425 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) 426 if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
426 return &gate_vma; 427 return &gate_vma;
427 return NULL; 428 return NULL;
428} 429}
429 430
430int in_gate_area(struct task_struct *task, unsigned long addr) 431int in_gate_area(struct mm_struct *mm, unsigned long addr)
431{ 432{
432 const struct vm_area_struct *vma = get_gate_vma(task); 433 const struct vm_area_struct *vma = get_gate_vma(mm);
433 434
434 return vma && addr >= vma->vm_start && addr < vma->vm_end; 435 return vma && addr >= vma->vm_start && addr < vma->vm_end;
435} 436}
436 437
437int in_gate_area_no_task(unsigned long addr) 438int in_gate_area_no_mm(unsigned long addr)
438{ 439{
439 return 0; 440 return 0;
440} 441}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b2fae009a4b7..f34078d702d3 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1906,7 +1906,7 @@ static int elf_core_dump(struct coredump_params *cprm)
1906 segs = current->mm->map_count; 1906 segs = current->mm->map_count;
1907 segs += elf_core_extra_phdrs(); 1907 segs += elf_core_extra_phdrs();
1908 1908
1909 gate_vma = get_gate_vma(current); 1909 gate_vma = get_gate_vma(current->mm);
1910 if (gate_vma != NULL) 1910 if (gate_vma != NULL)
1911 segs++; 1911 segs++;
1912 1912
diff --git a/fs/namei.c b/fs/namei.c
index fc858b1124c2..d0066e17d45d 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1650,13 +1650,16 @@ static int path_lookupat(int dfd, const char *name,
1650 err = -ECHILD; 1650 err = -ECHILD;
1651 } 1651 }
1652 1652
1653 if (!err) 1653 if (!err) {
1654 err = handle_reval_path(nd); 1654 err = handle_reval_path(nd);
1655 if (err)
1656 path_put(&nd->path);
1657 }
1655 1658
1656 if (!err && nd->flags & LOOKUP_DIRECTORY) { 1659 if (!err && nd->flags & LOOKUP_DIRECTORY) {
1657 if (!nd->inode->i_op->lookup) { 1660 if (!nd->inode->i_op->lookup) {
1658 path_put(&nd->path); 1661 path_put(&nd->path);
1659 return -ENOTDIR; 1662 err = -ENOTDIR;
1660 } 1663 }
1661 } 1664 }
1662 1665
diff --git a/fs/proc/base.c b/fs/proc/base.c
index daba13653256..5a670c11aeac 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -191,17 +191,20 @@ static int proc_root_link(struct inode *inode, struct path *path)
191 return result; 191 return result;
192} 192}
193 193
194/* 194static struct mm_struct *__check_mem_permission(struct task_struct *task)
195 * Return zero if current may access user memory in @task, -error if not.
196 */
197static int check_mem_permission(struct task_struct *task)
198{ 195{
196 struct mm_struct *mm;
197
198 mm = get_task_mm(task);
199 if (!mm)
200 return ERR_PTR(-EINVAL);
201
199 /* 202 /*
200 * A task can always look at itself, in case it chooses 203 * A task can always look at itself, in case it chooses
201 * to use system calls instead of load instructions. 204 * to use system calls instead of load instructions.
202 */ 205 */
203 if (task == current) 206 if (task == current)
204 return 0; 207 return mm;
205 208
206 /* 209 /*
207 * If current is actively ptrace'ing, and would also be 210 * If current is actively ptrace'ing, and would also be
@@ -213,27 +216,53 @@ static int check_mem_permission(struct task_struct *task)
213 match = (tracehook_tracer_task(task) == current); 216 match = (tracehook_tracer_task(task) == current);
214 rcu_read_unlock(); 217 rcu_read_unlock();
215 if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH)) 218 if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
216 return 0; 219 return mm;
217 } 220 }
218 221
219 /* 222 /*
220 * Noone else is allowed. 223 * Noone else is allowed.
221 */ 224 */
222 return -EPERM; 225 mmput(mm);
226 return ERR_PTR(-EPERM);
227}
228
229/*
230 * If current may access user memory in @task return a reference to the
231 * corresponding mm, otherwise ERR_PTR.
232 */
233static struct mm_struct *check_mem_permission(struct task_struct *task)
234{
235 struct mm_struct *mm;
236 int err;
237
238 /*
239 * Avoid racing if task exec's as we might get a new mm but validate
240 * against old credentials.
241 */
242 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
243 if (err)
244 return ERR_PTR(err);
245
246 mm = __check_mem_permission(task);
247 mutex_unlock(&task->signal->cred_guard_mutex);
248
249 return mm;
223} 250}
224 251
225struct mm_struct *mm_for_maps(struct task_struct *task) 252struct mm_struct *mm_for_maps(struct task_struct *task)
226{ 253{
227 struct mm_struct *mm; 254 struct mm_struct *mm;
255 int err;
228 256
229 if (mutex_lock_killable(&task->signal->cred_guard_mutex)) 257 err = mutex_lock_killable(&task->signal->cred_guard_mutex);
230 return NULL; 258 if (err)
259 return ERR_PTR(err);
231 260
232 mm = get_task_mm(task); 261 mm = get_task_mm(task);
233 if (mm && mm != current->mm && 262 if (mm && mm != current->mm &&
234 !ptrace_may_access(task, PTRACE_MODE_READ)) { 263 !ptrace_may_access(task, PTRACE_MODE_READ)) {
235 mmput(mm); 264 mmput(mm);
236 mm = NULL; 265 mm = ERR_PTR(-EACCES);
237 } 266 }
238 mutex_unlock(&task->signal->cred_guard_mutex); 267 mutex_unlock(&task->signal->cred_guard_mutex);
239 268
@@ -279,9 +308,9 @@ out:
279 308
280static int proc_pid_auxv(struct task_struct *task, char *buffer) 309static int proc_pid_auxv(struct task_struct *task, char *buffer)
281{ 310{
282 int res = 0; 311 struct mm_struct *mm = mm_for_maps(task);
283 struct mm_struct *mm = get_task_mm(task); 312 int res = PTR_ERR(mm);
284 if (mm) { 313 if (mm && !IS_ERR(mm)) {
285 unsigned int nwords = 0; 314 unsigned int nwords = 0;
286 do { 315 do {
287 nwords += 2; 316 nwords += 2;
@@ -318,6 +347,23 @@ static int proc_pid_wchan(struct task_struct *task, char *buffer)
318} 347}
319#endif /* CONFIG_KALLSYMS */ 348#endif /* CONFIG_KALLSYMS */
320 349
350static int lock_trace(struct task_struct *task)
351{
352 int err = mutex_lock_killable(&task->signal->cred_guard_mutex);
353 if (err)
354 return err;
355 if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
356 mutex_unlock(&task->signal->cred_guard_mutex);
357 return -EPERM;
358 }
359 return 0;
360}
361
362static void unlock_trace(struct task_struct *task)
363{
364 mutex_unlock(&task->signal->cred_guard_mutex);
365}
366
321#ifdef CONFIG_STACKTRACE 367#ifdef CONFIG_STACKTRACE
322 368
323#define MAX_STACK_TRACE_DEPTH 64 369#define MAX_STACK_TRACE_DEPTH 64
@@ -327,6 +373,7 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
327{ 373{
328 struct stack_trace trace; 374 struct stack_trace trace;
329 unsigned long *entries; 375 unsigned long *entries;
376 int err;
330 int i; 377 int i;
331 378
332 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL); 379 entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
@@ -337,15 +384,20 @@ static int proc_pid_stack(struct seq_file *m, struct pid_namespace *ns,
337 trace.max_entries = MAX_STACK_TRACE_DEPTH; 384 trace.max_entries = MAX_STACK_TRACE_DEPTH;
338 trace.entries = entries; 385 trace.entries = entries;
339 trace.skip = 0; 386 trace.skip = 0;
340 save_stack_trace_tsk(task, &trace);
341 387
342 for (i = 0; i < trace.nr_entries; i++) { 388 err = lock_trace(task);
343 seq_printf(m, "[<%pK>] %pS\n", 389 if (!err) {
344 (void *)entries[i], (void *)entries[i]); 390 save_stack_trace_tsk(task, &trace);
391
392 for (i = 0; i < trace.nr_entries; i++) {
393 seq_printf(m, "[<%pK>] %pS\n",
394 (void *)entries[i], (void *)entries[i]);
395 }
396 unlock_trace(task);
345 } 397 }
346 kfree(entries); 398 kfree(entries);
347 399
348 return 0; 400 return err;
349} 401}
350#endif 402#endif
351 403
@@ -508,18 +560,22 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
508{ 560{
509 long nr; 561 long nr;
510 unsigned long args[6], sp, pc; 562 unsigned long args[6], sp, pc;
563 int res = lock_trace(task);
564 if (res)
565 return res;
511 566
512 if (task_current_syscall(task, &nr, args, 6, &sp, &pc)) 567 if (task_current_syscall(task, &nr, args, 6, &sp, &pc))
513 return sprintf(buffer, "running\n"); 568 res = sprintf(buffer, "running\n");
514 569 else if (nr < 0)
515 if (nr < 0) 570 res = sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc);
516 return sprintf(buffer, "%ld 0x%lx 0x%lx\n", nr, sp, pc); 571 else
517 572 res = sprintf(buffer,
518 return sprintf(buffer,
519 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n", 573 "%ld 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx 0x%lx\n",
520 nr, 574 nr,
521 args[0], args[1], args[2], args[3], args[4], args[5], 575 args[0], args[1], args[2], args[3], args[4], args[5],
522 sp, pc); 576 sp, pc);
577 unlock_trace(task);
578 return res;
523} 579}
524#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */ 580#endif /* CONFIG_HAVE_ARCH_TRACEHOOK */
525 581
@@ -775,19 +831,14 @@ static ssize_t mem_read(struct file * file, char __user * buf,
775 if (!task) 831 if (!task)
776 goto out_no_task; 832 goto out_no_task;
777 833
778 ret = check_mem_permission(task);
779 if (ret)
780 goto out;
781
782 ret = -ENOMEM; 834 ret = -ENOMEM;
783 page = (char *)__get_free_page(GFP_TEMPORARY); 835 page = (char *)__get_free_page(GFP_TEMPORARY);
784 if (!page) 836 if (!page)
785 goto out; 837 goto out;
786 838
787 ret = 0; 839 mm = check_mem_permission(task);
788 840 ret = PTR_ERR(mm);
789 mm = get_task_mm(task); 841 if (IS_ERR(mm))
790 if (!mm)
791 goto out_free; 842 goto out_free;
792 843
793 ret = -EIO; 844 ret = -EIO;
@@ -801,8 +852,8 @@ static ssize_t mem_read(struct file * file, char __user * buf,
801 int this_len, retval; 852 int this_len, retval;
802 853
803 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 854 this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
804 retval = access_process_vm(task, src, page, this_len, 0); 855 retval = access_remote_vm(mm, src, page, this_len, 0);
805 if (!retval || check_mem_permission(task)) { 856 if (!retval) {
806 if (!ret) 857 if (!ret)
807 ret = -EIO; 858 ret = -EIO;
808 break; 859 break;
@@ -830,10 +881,6 @@ out_no_task:
830 return ret; 881 return ret;
831} 882}
832 883
833#define mem_write NULL
834
835#ifndef mem_write
836/* This is a security hazard */
837static ssize_t mem_write(struct file * file, const char __user *buf, 884static ssize_t mem_write(struct file * file, const char __user *buf,
838 size_t count, loff_t *ppos) 885 size_t count, loff_t *ppos)
839{ 886{
@@ -841,19 +888,25 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
841 char *page; 888 char *page;
842 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 889 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
843 unsigned long dst = *ppos; 890 unsigned long dst = *ppos;
891 struct mm_struct *mm;
844 892
845 copied = -ESRCH; 893 copied = -ESRCH;
846 if (!task) 894 if (!task)
847 goto out_no_task; 895 goto out_no_task;
848 896
849 copied = check_mem_permission(task); 897 mm = check_mem_permission(task);
850 if (copied) 898 copied = PTR_ERR(mm);
851 goto out; 899 if (IS_ERR(mm))
900 goto out_task;
901
902 copied = -EIO;
903 if (file->private_data != (void *)((long)current->self_exec_id))
904 goto out_mm;
852 905
853 copied = -ENOMEM; 906 copied = -ENOMEM;
854 page = (char *)__get_free_page(GFP_TEMPORARY); 907 page = (char *)__get_free_page(GFP_TEMPORARY);
855 if (!page) 908 if (!page)
856 goto out; 909 goto out_mm;
857 910
858 copied = 0; 911 copied = 0;
859 while (count > 0) { 912 while (count > 0) {
@@ -864,7 +917,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
864 copied = -EFAULT; 917 copied = -EFAULT;
865 break; 918 break;
866 } 919 }
867 retval = access_process_vm(task, dst, page, this_len, 1); 920 retval = access_remote_vm(mm, dst, page, this_len, 1);
868 if (!retval) { 921 if (!retval) {
869 if (!copied) 922 if (!copied)
870 copied = -EIO; 923 copied = -EIO;
@@ -877,12 +930,13 @@ static ssize_t mem_write(struct file * file, const char __user *buf,
877 } 930 }
878 *ppos = dst; 931 *ppos = dst;
879 free_page((unsigned long) page); 932 free_page((unsigned long) page);
880out: 933out_mm:
934 mmput(mm);
935out_task:
881 put_task_struct(task); 936 put_task_struct(task);
882out_no_task: 937out_no_task:
883 return copied; 938 return copied;
884} 939}
885#endif
886 940
887loff_t mem_lseek(struct file *file, loff_t offset, int orig) 941loff_t mem_lseek(struct file *file, loff_t offset, int orig)
888{ 942{
@@ -919,21 +973,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
919 if (!task) 973 if (!task)
920 goto out_no_task; 974 goto out_no_task;
921 975
922 ret = -EPERM;
923 if (!ptrace_may_access(task, PTRACE_MODE_READ))
924 goto out;
925
926 ret = -ENOMEM; 976 ret = -ENOMEM;
927 page = (char *)__get_free_page(GFP_TEMPORARY); 977 page = (char *)__get_free_page(GFP_TEMPORARY);
928 if (!page) 978 if (!page)
929 goto out; 979 goto out;
930 980
931 ret = 0;
932 981
933 mm = get_task_mm(task); 982 mm = mm_for_maps(task);
934 if (!mm) 983 ret = PTR_ERR(mm);
984 if (!mm || IS_ERR(mm))
935 goto out_free; 985 goto out_free;
936 986
987 ret = 0;
937 while (count > 0) { 988 while (count > 0) {
938 int this_len, retval, max_len; 989 int this_len, retval, max_len;
939 990
@@ -2751,8 +2802,12 @@ static int proc_tgid_io_accounting(struct task_struct *task, char *buffer)
2751static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns, 2802static int proc_pid_personality(struct seq_file *m, struct pid_namespace *ns,
2752 struct pid *pid, struct task_struct *task) 2803 struct pid *pid, struct task_struct *task)
2753{ 2804{
2754 seq_printf(m, "%08x\n", task->personality); 2805 int err = lock_trace(task);
2755 return 0; 2806 if (!err) {
2807 seq_printf(m, "%08x\n", task->personality);
2808 unlock_trace(task);
2809 }
2810 return err;
2756} 2811}
2757 2812
2758/* 2813/*
@@ -2771,7 +2826,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2771 REG("environ", S_IRUSR, proc_environ_operations), 2826 REG("environ", S_IRUSR, proc_environ_operations),
2772 INF("auxv", S_IRUSR, proc_pid_auxv), 2827 INF("auxv", S_IRUSR, proc_pid_auxv),
2773 ONE("status", S_IRUGO, proc_pid_status), 2828 ONE("status", S_IRUGO, proc_pid_status),
2774 ONE("personality", S_IRUSR, proc_pid_personality), 2829 ONE("personality", S_IRUGO, proc_pid_personality),
2775 INF("limits", S_IRUGO, proc_pid_limits), 2830 INF("limits", S_IRUGO, proc_pid_limits),
2776#ifdef CONFIG_SCHED_DEBUG 2831#ifdef CONFIG_SCHED_DEBUG
2777 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 2832 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
@@ -2781,7 +2836,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2781#endif 2836#endif
2782 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 2837 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
2783#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 2838#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
2784 INF("syscall", S_IRUSR, proc_pid_syscall), 2839 INF("syscall", S_IRUGO, proc_pid_syscall),
2785#endif 2840#endif
2786 INF("cmdline", S_IRUGO, proc_pid_cmdline), 2841 INF("cmdline", S_IRUGO, proc_pid_cmdline),
2787 ONE("stat", S_IRUGO, proc_tgid_stat), 2842 ONE("stat", S_IRUGO, proc_tgid_stat),
@@ -2800,7 +2855,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2800#ifdef CONFIG_PROC_PAGE_MONITOR 2855#ifdef CONFIG_PROC_PAGE_MONITOR
2801 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 2856 REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
2802 REG("smaps", S_IRUGO, proc_smaps_operations), 2857 REG("smaps", S_IRUGO, proc_smaps_operations),
2803 REG("pagemap", S_IRUSR, proc_pagemap_operations), 2858 REG("pagemap", S_IRUGO, proc_pagemap_operations),
2804#endif 2859#endif
2805#ifdef CONFIG_SECURITY 2860#ifdef CONFIG_SECURITY
2806 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 2861 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -2809,7 +2864,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2809 INF("wchan", S_IRUGO, proc_pid_wchan), 2864 INF("wchan", S_IRUGO, proc_pid_wchan),
2810#endif 2865#endif
2811#ifdef CONFIG_STACKTRACE 2866#ifdef CONFIG_STACKTRACE
2812 ONE("stack", S_IRUSR, proc_pid_stack), 2867 ONE("stack", S_IRUGO, proc_pid_stack),
2813#endif 2868#endif
2814#ifdef CONFIG_SCHEDSTATS 2869#ifdef CONFIG_SCHEDSTATS
2815 INF("schedstat", S_IRUGO, proc_pid_schedstat), 2870 INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3111,14 +3166,14 @@ static const struct pid_entry tid_base_stuff[] = {
3111 REG("environ", S_IRUSR, proc_environ_operations), 3166 REG("environ", S_IRUSR, proc_environ_operations),
3112 INF("auxv", S_IRUSR, proc_pid_auxv), 3167 INF("auxv", S_IRUSR, proc_pid_auxv),
3113 ONE("status", S_IRUGO, proc_pid_status), 3168 ONE("status", S_IRUGO, proc_pid_status),
3114 ONE("personality", S_IRUSR, proc_pid_personality), 3169 ONE("personality", S_IRUGO, proc_pid_personality),
3115 INF("limits", S_IRUGO, proc_pid_limits), 3170 INF("limits", S_IRUGO, proc_pid_limits),
3116#ifdef CONFIG_SCHED_DEBUG 3171#ifdef CONFIG_SCHED_DEBUG
3117 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), 3172 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
3118#endif 3173#endif
3119 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), 3174 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
3120#ifdef CONFIG_HAVE_ARCH_TRACEHOOK 3175#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
3121 INF("syscall", S_IRUSR, proc_pid_syscall), 3176 INF("syscall", S_IRUGO, proc_pid_syscall),
3122#endif 3177#endif
3123 INF("cmdline", S_IRUGO, proc_pid_cmdline), 3178 INF("cmdline", S_IRUGO, proc_pid_cmdline),
3124 ONE("stat", S_IRUGO, proc_tid_stat), 3179 ONE("stat", S_IRUGO, proc_tid_stat),
@@ -3136,7 +3191,7 @@ static const struct pid_entry tid_base_stuff[] = {
3136#ifdef CONFIG_PROC_PAGE_MONITOR 3191#ifdef CONFIG_PROC_PAGE_MONITOR
3137 REG("clear_refs", S_IWUSR, proc_clear_refs_operations), 3192 REG("clear_refs", S_IWUSR, proc_clear_refs_operations),
3138 REG("smaps", S_IRUGO, proc_smaps_operations), 3193 REG("smaps", S_IRUGO, proc_smaps_operations),
3139 REG("pagemap", S_IRUSR, proc_pagemap_operations), 3194 REG("pagemap", S_IRUGO, proc_pagemap_operations),
3140#endif 3195#endif
3141#ifdef CONFIG_SECURITY 3196#ifdef CONFIG_SECURITY
3142 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations), 3197 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
@@ -3145,7 +3200,7 @@ static const struct pid_entry tid_base_stuff[] = {
3145 INF("wchan", S_IRUGO, proc_pid_wchan), 3200 INF("wchan", S_IRUGO, proc_pid_wchan),
3146#endif 3201#endif
3147#ifdef CONFIG_STACKTRACE 3202#ifdef CONFIG_STACKTRACE
3148 ONE("stack", S_IRUSR, proc_pid_stack), 3203 ONE("stack", S_IRUGO, proc_pid_stack),
3149#endif 3204#endif
3150#ifdef CONFIG_SCHEDSTATS 3205#ifdef CONFIG_SCHEDSTATS
3151 INF("schedstat", S_IRUGO, proc_pid_schedstat), 3206 INF("schedstat", S_IRUGO, proc_pid_schedstat),
@@ -3164,7 +3219,7 @@ static const struct pid_entry tid_base_stuff[] = {
3164 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations), 3219 REG("oom_score_adj", S_IRUGO|S_IWUSR, proc_oom_score_adj_operations),
3165#ifdef CONFIG_AUDITSYSCALL 3220#ifdef CONFIG_AUDITSYSCALL
3166 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations), 3221 REG("loginuid", S_IWUSR|S_IRUGO, proc_loginuid_operations),
3167 REG("sessionid", S_IRUSR, proc_sessionid_operations), 3222 REG("sessionid", S_IRUGO, proc_sessionid_operations),
3168#endif 3223#endif
3169#ifdef CONFIG_FAULT_INJECTION 3224#ifdef CONFIG_FAULT_INJECTION
3170 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 3225 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 636f1a1fdf87..7c708a418acc 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -121,14 +121,14 @@ static void *m_start(struct seq_file *m, loff_t *pos)
121 121
122 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 122 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
123 if (!priv->task) 123 if (!priv->task)
124 return NULL; 124 return ERR_PTR(-ESRCH);
125 125
126 mm = mm_for_maps(priv->task); 126 mm = mm_for_maps(priv->task);
127 if (!mm) 127 if (!mm || IS_ERR(mm))
128 return NULL; 128 return mm;
129 down_read(&mm->mmap_sem); 129 down_read(&mm->mmap_sem);
130 130
131 tail_vma = get_gate_vma(priv->task); 131 tail_vma = get_gate_vma(priv->task->mm);
132 priv->tail_vma = tail_vma; 132 priv->tail_vma = tail_vma;
133 133
134 /* Start with last addr hint */ 134 /* Start with last addr hint */
@@ -279,7 +279,8 @@ static int show_map(struct seq_file *m, void *v)
279 show_map_vma(m, vma); 279 show_map_vma(m, vma);
280 280
281 if (m->count < m->size) /* vma is copied successfully */ 281 if (m->count < m->size) /* vma is copied successfully */
282 m->version = (vma != get_gate_vma(task))? vma->vm_start: 0; 282 m->version = (vma != get_gate_vma(task->mm))
283 ? vma->vm_start : 0;
283 return 0; 284 return 0;
284} 285}
285 286
@@ -468,7 +469,8 @@ static int show_smap(struct seq_file *m, void *v)
468 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 469 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
469 470
470 if (m->count < m->size) /* vma is copied successfully */ 471 if (m->count < m->size) /* vma is copied successfully */
471 m->version = (vma != get_gate_vma(task)) ? vma->vm_start : 0; 472 m->version = (vma != get_gate_vma(task->mm))
473 ? vma->vm_start : 0;
472 return 0; 474 return 0;
473} 475}
474 476
@@ -764,8 +766,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
764 if (!task) 766 if (!task)
765 goto out; 767 goto out;
766 768
767 ret = -EACCES; 769 mm = mm_for_maps(task);
768 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 770 ret = PTR_ERR(mm);
771 if (!mm || IS_ERR(mm))
769 goto out_task; 772 goto out_task;
770 773
771 ret = -EINVAL; 774 ret = -EINVAL;
@@ -778,10 +781,6 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
778 if (!count) 781 if (!count)
779 goto out_task; 782 goto out_task;
780 783
781 mm = get_task_mm(task);
782 if (!mm)
783 goto out_task;
784
785 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT); 784 pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
786 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); 785 pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
787 ret = -ENOMEM; 786 ret = -ENOMEM;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index b535d3e5d5f1..980de547c070 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -199,13 +199,13 @@ static void *m_start(struct seq_file *m, loff_t *pos)
199 /* pin the task and mm whilst we play with them */ 199 /* pin the task and mm whilst we play with them */
200 priv->task = get_pid_task(priv->pid, PIDTYPE_PID); 200 priv->task = get_pid_task(priv->pid, PIDTYPE_PID);
201 if (!priv->task) 201 if (!priv->task)
202 return NULL; 202 return ERR_PTR(-ESRCH);
203 203
204 mm = mm_for_maps(priv->task); 204 mm = mm_for_maps(priv->task);
205 if (!mm) { 205 if (!mm || IS_ERR(mm)) {
206 put_task_struct(priv->task); 206 put_task_struct(priv->task);
207 priv->task = NULL; 207 priv->task = NULL;
208 return NULL; 208 return mm;
209 } 209 }
210 down_read(&mm->mmap_sem); 210 down_read(&mm->mmap_sem);
211 211
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 294104e0891d..f9535b2c9558 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -982,6 +982,8 @@ static inline int handle_mm_fault(struct mm_struct *mm,
982 982
983extern int make_pages_present(unsigned long addr, unsigned long end); 983extern int make_pages_present(unsigned long addr, unsigned long end);
984extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); 984extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
985extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
986 void *buf, int len, int write);
985 987
986int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, 988int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
987 unsigned long start, int len, unsigned int foll_flags, 989 unsigned long start, int len, unsigned int foll_flags,
@@ -1592,13 +1594,13 @@ static inline bool kernel_page_present(struct page *page) { return true; }
1592#endif /* CONFIG_HIBERNATION */ 1594#endif /* CONFIG_HIBERNATION */
1593#endif 1595#endif
1594 1596
1595extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); 1597extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
1596#ifdef __HAVE_ARCH_GATE_AREA 1598#ifdef __HAVE_ARCH_GATE_AREA
1597int in_gate_area_no_task(unsigned long addr); 1599int in_gate_area_no_mm(unsigned long addr);
1598int in_gate_area(struct task_struct *task, unsigned long addr); 1600int in_gate_area(struct mm_struct *mm, unsigned long addr);
1599#else 1601#else
1600int in_gate_area_no_task(unsigned long addr); 1602int in_gate_area_no_mm(unsigned long addr);
1601#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) 1603#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
1602#endif /* __HAVE_ARCH_GATE_AREA */ 1604#endif /* __HAVE_ARCH_GATE_AREA */
1603 1605
1604int drop_caches_sysctl_handler(struct ctl_table *, int, 1606int drop_caches_sysctl_handler(struct ctl_table *, int,
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 75dcca37d61a..a56aa58b9cb0 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -64,14 +64,14 @@ static inline int is_kernel_text(unsigned long addr)
64 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || 64 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
65 arch_is_kernel_text(addr)) 65 arch_is_kernel_text(addr))
66 return 1; 66 return 1;
67 return in_gate_area_no_task(addr); 67 return in_gate_area_no_mm(addr);
68} 68}
69 69
70static inline int is_kernel(unsigned long addr) 70static inline int is_kernel(unsigned long addr)
71{ 71{
72 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) 72 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
73 return 1; 73 return 1;
74 return in_gate_area_no_task(addr); 74 return in_gate_area_no_mm(addr);
75} 75}
76 76
77static int is_ksym_addr(unsigned long addr) 77static int is_ksym_addr(unsigned long addr)
diff --git a/mm/memory.c b/mm/memory.c
index 20d5f7499ce2..51a5c23704af 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1486,9 +1486,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1486 struct vm_area_struct *vma; 1486 struct vm_area_struct *vma;
1487 1487
1488 vma = find_extend_vma(mm, start); 1488 vma = find_extend_vma(mm, start);
1489 if (!vma && in_gate_area(tsk, start)) { 1489 if (!vma && in_gate_area(mm, start)) {
1490 unsigned long pg = start & PAGE_MASK; 1490 unsigned long pg = start & PAGE_MASK;
1491 struct vm_area_struct *gate_vma = get_gate_vma(tsk); 1491 struct vm_area_struct *gate_vma = get_gate_vma(mm);
1492 pgd_t *pgd; 1492 pgd_t *pgd;
1493 pud_t *pud; 1493 pud_t *pud;
1494 pmd_t *pmd; 1494 pmd_t *pmd;
@@ -1591,10 +1591,13 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1591 return i ? i : -EFAULT; 1591 return i ? i : -EFAULT;
1592 BUG(); 1592 BUG();
1593 } 1593 }
1594 if (ret & VM_FAULT_MAJOR) 1594
1595 tsk->maj_flt++; 1595 if (tsk) {
1596 else 1596 if (ret & VM_FAULT_MAJOR)
1597 tsk->min_flt++; 1597 tsk->maj_flt++;
1598 else
1599 tsk->min_flt++;
1600 }
1598 1601
1599 if (ret & VM_FAULT_RETRY) { 1602 if (ret & VM_FAULT_RETRY) {
1600 if (nonblocking) 1603 if (nonblocking)
@@ -1641,7 +1644,8 @@ EXPORT_SYMBOL(__get_user_pages);
1641 1644
1642/** 1645/**
1643 * get_user_pages() - pin user pages in memory 1646 * get_user_pages() - pin user pages in memory
1644 * @tsk: task_struct of target task 1647 * @tsk: the task_struct to use for page fault accounting, or
1648 * NULL if faults are not to be recorded.
1645 * @mm: mm_struct of target mm 1649 * @mm: mm_struct of target mm
1646 * @start: starting user address 1650 * @start: starting user address
1647 * @nr_pages: number of pages from start to pin 1651 * @nr_pages: number of pages from start to pin
@@ -3499,7 +3503,7 @@ static int __init gate_vma_init(void)
3499__initcall(gate_vma_init); 3503__initcall(gate_vma_init);
3500#endif 3504#endif
3501 3505
3502struct vm_area_struct *get_gate_vma(struct task_struct *tsk) 3506struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3503{ 3507{
3504#ifdef AT_SYSINFO_EHDR 3508#ifdef AT_SYSINFO_EHDR
3505 return &gate_vma; 3509 return &gate_vma;
@@ -3508,7 +3512,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
3508#endif 3512#endif
3509} 3513}
3510 3514
3511int in_gate_area_no_task(unsigned long addr) 3515int in_gate_area_no_mm(unsigned long addr)
3512{ 3516{
3513#ifdef AT_SYSINFO_EHDR 3517#ifdef AT_SYSINFO_EHDR
3514 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END)) 3518 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
@@ -3649,20 +3653,15 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
3649#endif 3653#endif
3650 3654
3651/* 3655/*
3652 * Access another process' address space. 3656 * Access another process' address space as given in mm. If non-NULL, use the
3653 * Source/target buffer must be kernel space, 3657 * given task for page fault accounting.
3654 * Do not walk the page table directly, use get_user_pages
3655 */ 3658 */
3656int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write) 3659static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
3660 unsigned long addr, void *buf, int len, int write)
3657{ 3661{
3658 struct mm_struct *mm;
3659 struct vm_area_struct *vma; 3662 struct vm_area_struct *vma;
3660 void *old_buf = buf; 3663 void *old_buf = buf;
3661 3664
3662 mm = get_task_mm(tsk);
3663 if (!mm)
3664 return 0;
3665
3666 down_read(&mm->mmap_sem); 3665 down_read(&mm->mmap_sem);
3667 /* ignore errors, just check how much was successfully transferred */ 3666 /* ignore errors, just check how much was successfully transferred */
3668 while (len) { 3667 while (len) {
@@ -3711,11 +3710,47 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
3711 addr += bytes; 3710 addr += bytes;
3712 } 3711 }
3713 up_read(&mm->mmap_sem); 3712 up_read(&mm->mmap_sem);
3714 mmput(mm);
3715 3713
3716 return buf - old_buf; 3714 return buf - old_buf;
3717} 3715}
3718 3716
3717/**
3718 * @access_remote_vm - access another process' address space
3719 * @mm: the mm_struct of the target address space
3720 * @addr: start address to access
3721 * @buf: source or destination buffer
3722 * @len: number of bytes to transfer
3723 * @write: whether the access is a write
3724 *
3725 * The caller must hold a reference on @mm.
3726 */
3727int access_remote_vm(struct mm_struct *mm, unsigned long addr,
3728 void *buf, int len, int write)
3729{
3730 return __access_remote_vm(NULL, mm, addr, buf, len, write);
3731}
3732
3733/*
3734 * Access another process' address space.
3735 * Source/target buffer must be kernel space,
3736 * Do not walk the page table directly, use get_user_pages
3737 */
3738int access_process_vm(struct task_struct *tsk, unsigned long addr,
3739 void *buf, int len, int write)
3740{
3741 struct mm_struct *mm;
3742 int ret;
3743
3744 mm = get_task_mm(tsk);
3745 if (!mm)
3746 return 0;
3747
3748 ret = __access_remote_vm(tsk, mm, addr, buf, len, write);
3749 mmput(mm);
3750
3751 return ret;
3752}
3753
3719/* 3754/*
3720 * Print the name of a VMA. 3755 * Print the name of a VMA.
3721 */ 3756 */
diff --git a/mm/mlock.c b/mm/mlock.c
index c3924c7f00be..2689a08c79af 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -237,7 +237,7 @@ long mlock_vma_pages_range(struct vm_area_struct *vma,
237 237
238 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) || 238 if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
239 is_vm_hugetlb_page(vma) || 239 is_vm_hugetlb_page(vma) ||
240 vma == get_gate_vma(current))) { 240 vma == get_gate_vma(current->mm))) {
241 241
242 __mlock_vma_pages_range(vma, start, end, NULL); 242 __mlock_vma_pages_range(vma, start, end, NULL);
243 243
@@ -332,7 +332,7 @@ static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
332 int lock = newflags & VM_LOCKED; 332 int lock = newflags & VM_LOCKED;
333 333
334 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || 334 if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
335 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current)) 335 is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
336 goto out; /* don't set VM_LOCKED, don't count */ 336 goto out; /* don't set VM_LOCKED, don't count */
337 337
338 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); 338 pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
diff --git a/mm/nommu.c b/mm/nommu.c
index f59e1424d3db..e629143f9440 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -1963,7 +1963,7 @@ error:
1963 return -ENOMEM; 1963 return -ENOMEM;
1964} 1964}
1965 1965
1966int in_gate_area_no_task(unsigned long addr) 1966int in_gate_area_no_mm(unsigned long addr)
1967{ 1967{
1968 return 0; 1968 return 0;
1969} 1969}