diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-06 03:02:57 -0400 |
commit | f541ae326fa120fa5c57433e4d9a133df212ce41 (patch) | |
tree | bdbd94ec72cfc601118051cb35e8617d55510177 /kernel/fork.c | |
parent | e255357764f92afcafafbd4879b222b8c752065a (diff) | |
parent | 0221c81b1b8eb0cbb6b30a0ced52ead32d2b4e4c (diff) |
Merge branch 'linus' into perfcounters/core-v2
Merge reason: we have gathered quite a few conflicts, need to merge upstream
Conflicts:
arch/powerpc/kernel/Makefile
arch/x86/ia32/ia32entry.S
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/unistd_32.h
arch/x86/include/asm/unistd_64.h
arch/x86/kernel/cpu/common.c
arch/x86/kernel/irq.c
arch/x86/kernel/syscall_table_32.S
arch/x86/mm/iomap_32.c
include/linux/sched.h
kernel/Makefile
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 83 |
1 files changed, 38 insertions, 45 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 4640a3e0085e..381d7f9b70fb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/tty.h> | 60 | #include <linux/tty.h> |
61 | #include <linux/proc_fs.h> | 61 | #include <linux/proc_fs.h> |
62 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
63 | #include <linux/fs_struct.h> | ||
63 | #include <trace/sched.h> | 64 | #include <trace/sched.h> |
64 | #include <linux/magic.h> | 65 | #include <linux/magic.h> |
65 | 66 | ||
@@ -284,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
284 | mm->free_area_cache = oldmm->mmap_base; | 285 | mm->free_area_cache = oldmm->mmap_base; |
285 | mm->cached_hole_size = ~0UL; | 286 | mm->cached_hole_size = ~0UL; |
286 | mm->map_count = 0; | 287 | mm->map_count = 0; |
287 | cpus_clear(mm->cpu_vm_mask); | 288 | cpumask_clear(mm_cpumask(mm)); |
288 | mm->mm_rb = RB_ROOT; | 289 | mm->mm_rb = RB_ROOT; |
289 | rb_link = &mm->mm_rb.rb_node; | 290 | rb_link = &mm->mm_rb.rb_node; |
290 | rb_parent = NULL; | 291 | rb_parent = NULL; |
@@ -681,38 +682,21 @@ fail_nomem: | |||
681 | return retval; | 682 | return retval; |
682 | } | 683 | } |
683 | 684 | ||
684 | static struct fs_struct *__copy_fs_struct(struct fs_struct *old) | ||
685 | { | ||
686 | struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | ||
687 | /* We don't need to lock fs - think why ;-) */ | ||
688 | if (fs) { | ||
689 | atomic_set(&fs->count, 1); | ||
690 | rwlock_init(&fs->lock); | ||
691 | fs->umask = old->umask; | ||
692 | read_lock(&old->lock); | ||
693 | fs->root = old->root; | ||
694 | path_get(&old->root); | ||
695 | fs->pwd = old->pwd; | ||
696 | path_get(&old->pwd); | ||
697 | read_unlock(&old->lock); | ||
698 | } | ||
699 | return fs; | ||
700 | } | ||
701 | |||
702 | struct fs_struct *copy_fs_struct(struct fs_struct *old) | ||
703 | { | ||
704 | return __copy_fs_struct(old); | ||
705 | } | ||
706 | |||
707 | EXPORT_SYMBOL_GPL(copy_fs_struct); | ||
708 | |||
709 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | 685 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
710 | { | 686 | { |
687 | struct fs_struct *fs = current->fs; | ||
711 | if (clone_flags & CLONE_FS) { | 688 | if (clone_flags & CLONE_FS) { |
712 | atomic_inc(¤t->fs->count); | 689 | /* tsk->fs is already what we want */ |
690 | write_lock(&fs->lock); | ||
691 | if (fs->in_exec) { | ||
692 | write_unlock(&fs->lock); | ||
693 | return -EAGAIN; | ||
694 | } | ||
695 | fs->users++; | ||
696 | write_unlock(&fs->lock); | ||
713 | return 0; | 697 | return 0; |
714 | } | 698 | } |
715 | tsk->fs = __copy_fs_struct(current->fs); | 699 | tsk->fs = copy_fs_struct(fs); |
716 | if (!tsk->fs) | 700 | if (!tsk->fs) |
717 | return -ENOMEM; | 701 | return -ENOMEM; |
718 | return 0; | 702 | return 0; |
@@ -841,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
841 | atomic_set(&sig->live, 1); | 825 | atomic_set(&sig->live, 1); |
842 | init_waitqueue_head(&sig->wait_chldexit); | 826 | init_waitqueue_head(&sig->wait_chldexit); |
843 | sig->flags = 0; | 827 | sig->flags = 0; |
828 | if (clone_flags & CLONE_NEWPID) | ||
829 | sig->flags |= SIGNAL_UNKILLABLE; | ||
844 | sig->group_exit_code = 0; | 830 | sig->group_exit_code = 0; |
845 | sig->group_exit_task = NULL; | 831 | sig->group_exit_task = NULL; |
846 | sig->group_stop_count = 0; | 832 | sig->group_stop_count = 0; |
@@ -1126,7 +1112,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1126 | goto bad_fork_cleanup_mm; | 1112 | goto bad_fork_cleanup_mm; |
1127 | if ((retval = copy_io(clone_flags, p))) | 1113 | if ((retval = copy_io(clone_flags, p))) |
1128 | goto bad_fork_cleanup_namespaces; | 1114 | goto bad_fork_cleanup_namespaces; |
1129 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1115 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
1130 | if (retval) | 1116 | if (retval) |
1131 | goto bad_fork_cleanup_io; | 1117 | goto bad_fork_cleanup_io; |
1132 | 1118 | ||
@@ -1185,10 +1171,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1185 | #endif | 1171 | #endif |
1186 | clear_all_latency_tracing(p); | 1172 | clear_all_latency_tracing(p); |
1187 | 1173 | ||
1188 | /* Our parent execution domain becomes current domain | ||
1189 | These must match for thread signalling to apply */ | ||
1190 | p->parent_exec_id = p->self_exec_id; | ||
1191 | |||
1192 | /* ok, now we should be set up.. */ | 1174 | /* ok, now we should be set up.. */ |
1193 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); | 1175 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); |
1194 | p->pdeath_signal = 0; | 1176 | p->pdeath_signal = 0; |
@@ -1226,10 +1208,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1226 | set_task_cpu(p, smp_processor_id()); | 1208 | set_task_cpu(p, smp_processor_id()); |
1227 | 1209 | ||
1228 | /* CLONE_PARENT re-uses the old parent */ | 1210 | /* CLONE_PARENT re-uses the old parent */ |
1229 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) | 1211 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
1230 | p->real_parent = current->real_parent; | 1212 | p->real_parent = current->real_parent; |
1231 | else | 1213 | p->parent_exec_id = current->parent_exec_id; |
1214 | } else { | ||
1232 | p->real_parent = current; | 1215 | p->real_parent = current; |
1216 | p->parent_exec_id = current->self_exec_id; | ||
1217 | } | ||
1233 | 1218 | ||
1234 | spin_lock(¤t->sighand->siglock); | 1219 | spin_lock(¤t->sighand->siglock); |
1235 | 1220 | ||
@@ -1265,8 +1250,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1265 | p->signal->leader_pid = pid; | 1250 | p->signal->leader_pid = pid; |
1266 | tty_kref_put(p->signal->tty); | 1251 | tty_kref_put(p->signal->tty); |
1267 | p->signal->tty = tty_kref_get(current->signal->tty); | 1252 | p->signal->tty = tty_kref_get(current->signal->tty); |
1268 | set_task_pgrp(p, task_pgrp_nr(current)); | ||
1269 | set_task_session(p, task_session_nr(current)); | ||
1270 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1253 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
1271 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1254 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
1272 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1255 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
@@ -1490,6 +1473,7 @@ void __init proc_caches_init(void) | |||
1490 | mm_cachep = kmem_cache_create("mm_struct", | 1473 | mm_cachep = kmem_cache_create("mm_struct", |
1491 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1474 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1492 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1475 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1476 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | ||
1493 | mmap_init(); | 1477 | mmap_init(); |
1494 | } | 1478 | } |
1495 | 1479 | ||
@@ -1545,12 +1529,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | |||
1545 | { | 1529 | { |
1546 | struct fs_struct *fs = current->fs; | 1530 | struct fs_struct *fs = current->fs; |
1547 | 1531 | ||
1548 | if ((unshare_flags & CLONE_FS) && | 1532 | if (!(unshare_flags & CLONE_FS) || !fs) |
1549 | (fs && atomic_read(&fs->count) > 1)) { | 1533 | return 0; |
1550 | *new_fsp = __copy_fs_struct(current->fs); | 1534 | |
1551 | if (!*new_fsp) | 1535 | /* don't need lock here; in the worst case we'll do useless copy */ |
1552 | return -ENOMEM; | 1536 | if (fs->users == 1) |
1553 | } | 1537 | return 0; |
1538 | |||
1539 | *new_fsp = copy_fs_struct(fs); | ||
1540 | if (!*new_fsp) | ||
1541 | return -ENOMEM; | ||
1554 | 1542 | ||
1555 | return 0; | 1543 | return 0; |
1556 | } | 1544 | } |
@@ -1666,8 +1654,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1666 | 1654 | ||
1667 | if (new_fs) { | 1655 | if (new_fs) { |
1668 | fs = current->fs; | 1656 | fs = current->fs; |
1657 | write_lock(&fs->lock); | ||
1669 | current->fs = new_fs; | 1658 | current->fs = new_fs; |
1670 | new_fs = fs; | 1659 | if (--fs->users) |
1660 | new_fs = NULL; | ||
1661 | else | ||
1662 | new_fs = fs; | ||
1663 | write_unlock(&fs->lock); | ||
1671 | } | 1664 | } |
1672 | 1665 | ||
1673 | if (new_mm) { | 1666 | if (new_mm) { |
@@ -1706,7 +1699,7 @@ bad_unshare_cleanup_sigh: | |||
1706 | 1699 | ||
1707 | bad_unshare_cleanup_fs: | 1700 | bad_unshare_cleanup_fs: |
1708 | if (new_fs) | 1701 | if (new_fs) |
1709 | put_fs_struct(new_fs); | 1702 | free_fs_struct(new_fs); |
1710 | 1703 | ||
1711 | bad_unshare_cleanup_thread: | 1704 | bad_unshare_cleanup_thread: |
1712 | bad_unshare_out: | 1705 | bad_unshare_out: |