diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 49 |
1 files changed, 44 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 8dd8ff281009..05e0b6f4365b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <linux/random.h> | 51 | #include <linux/random.h> |
52 | #include <linux/tty.h> | 52 | #include <linux/tty.h> |
53 | #include <linux/proc_fs.h> | 53 | #include <linux/proc_fs.h> |
54 | #include <linux/blkdev.h> | ||
54 | 55 | ||
55 | #include <asm/pgtable.h> | 56 | #include <asm/pgtable.h> |
56 | #include <asm/pgalloc.h> | 57 | #include <asm/pgalloc.h> |
@@ -392,6 +393,7 @@ void fastcall __mmdrop(struct mm_struct *mm) | |||
392 | destroy_context(mm); | 393 | destroy_context(mm); |
393 | free_mm(mm); | 394 | free_mm(mm); |
394 | } | 395 | } |
396 | EXPORT_SYMBOL_GPL(__mmdrop); | ||
395 | 397 | ||
396 | /* | 398 | /* |
397 | * Decrement the use count and release all resources for an mm. | 399 | * Decrement the use count and release all resources for an mm. |
@@ -791,6 +793,31 @@ out: | |||
791 | return error; | 793 | return error; |
792 | } | 794 | } |
793 | 795 | ||
796 | static int copy_io(unsigned long clone_flags, struct task_struct *tsk) | ||
797 | { | ||
798 | #ifdef CONFIG_BLOCK | ||
799 | struct io_context *ioc = current->io_context; | ||
800 | |||
801 | if (!ioc) | ||
802 | return 0; | ||
803 | /* | ||
804 | * Share io context with parent, if CLONE_IO is set | ||
805 | */ | ||
806 | if (clone_flags & CLONE_IO) { | ||
807 | tsk->io_context = ioc_task_link(ioc); | ||
808 | if (unlikely(!tsk->io_context)) | ||
809 | return -ENOMEM; | ||
810 | } else if (ioprio_valid(ioc->ioprio)) { | ||
811 | tsk->io_context = alloc_io_context(GFP_KERNEL, -1); | ||
812 | if (unlikely(!tsk->io_context)) | ||
813 | return -ENOMEM; | ||
814 | |||
815 | tsk->io_context->ioprio = ioc->ioprio; | ||
816 | } | ||
817 | #endif | ||
818 | return 0; | ||
819 | } | ||
820 | |||
794 | /* | 821 | /* |
795 | * Helper to unshare the files of the current task. | 822 | * Helper to unshare the files of the current task. |
796 | * We don't want to expose copy_files internals to | 823 | * We don't want to expose copy_files internals to |
@@ -1045,6 +1072,10 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1045 | copy_flags(clone_flags, p); | 1072 | copy_flags(clone_flags, p); |
1046 | INIT_LIST_HEAD(&p->children); | 1073 | INIT_LIST_HEAD(&p->children); |
1047 | INIT_LIST_HEAD(&p->sibling); | 1074 | INIT_LIST_HEAD(&p->sibling); |
1075 | #ifdef CONFIG_PREEMPT_RCU | ||
1076 | p->rcu_read_lock_nesting = 0; | ||
1077 | p->rcu_flipctr_idx = 0; | ||
1078 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1048 | p->vfork_done = NULL; | 1079 | p->vfork_done = NULL; |
1049 | spin_lock_init(&p->alloc_lock); | 1080 | spin_lock_init(&p->alloc_lock); |
1050 | 1081 | ||
@@ -1059,6 +1090,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1059 | p->prev_utime = cputime_zero; | 1090 | p->prev_utime = cputime_zero; |
1060 | p->prev_stime = cputime_zero; | 1091 | p->prev_stime = cputime_zero; |
1061 | 1092 | ||
1093 | #ifdef CONFIG_DETECT_SOFTLOCKUP | ||
1094 | p->last_switch_count = 0; | ||
1095 | p->last_switch_timestamp = 0; | ||
1096 | #endif | ||
1097 | |||
1062 | #ifdef CONFIG_TASK_XACCT | 1098 | #ifdef CONFIG_TASK_XACCT |
1063 | p->rchar = 0; /* I/O counter: bytes read */ | 1099 | p->rchar = 0; /* I/O counter: bytes read */ |
1064 | p->wchar = 0; /* I/O counter: bytes written */ | 1100 | p->wchar = 0; /* I/O counter: bytes written */ |
@@ -1147,15 +1183,17 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1147 | goto bad_fork_cleanup_mm; | 1183 | goto bad_fork_cleanup_mm; |
1148 | if ((retval = copy_namespaces(clone_flags, p))) | 1184 | if ((retval = copy_namespaces(clone_flags, p))) |
1149 | goto bad_fork_cleanup_keys; | 1185 | goto bad_fork_cleanup_keys; |
1186 | if ((retval = copy_io(clone_flags, p))) | ||
1187 | goto bad_fork_cleanup_namespaces; | ||
1150 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1188 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); |
1151 | if (retval) | 1189 | if (retval) |
1152 | goto bad_fork_cleanup_namespaces; | 1190 | goto bad_fork_cleanup_io; |
1153 | 1191 | ||
1154 | if (pid != &init_struct_pid) { | 1192 | if (pid != &init_struct_pid) { |
1155 | retval = -ENOMEM; | 1193 | retval = -ENOMEM; |
1156 | pid = alloc_pid(task_active_pid_ns(p)); | 1194 | pid = alloc_pid(task_active_pid_ns(p)); |
1157 | if (!pid) | 1195 | if (!pid) |
1158 | goto bad_fork_cleanup_namespaces; | 1196 | goto bad_fork_cleanup_io; |
1159 | 1197 | ||
1160 | if (clone_flags & CLONE_NEWPID) { | 1198 | if (clone_flags & CLONE_NEWPID) { |
1161 | retval = pid_ns_prepare_proc(task_active_pid_ns(p)); | 1199 | retval = pid_ns_prepare_proc(task_active_pid_ns(p)); |
@@ -1196,6 +1234,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1196 | #ifdef TIF_SYSCALL_EMU | 1234 | #ifdef TIF_SYSCALL_EMU |
1197 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); | 1235 | clear_tsk_thread_flag(p, TIF_SYSCALL_EMU); |
1198 | #endif | 1236 | #endif |
1237 | clear_all_latency_tracing(p); | ||
1199 | 1238 | ||
1200 | /* Our parent execution domain becomes current domain | 1239 | /* Our parent execution domain becomes current domain |
1201 | These must match for thread signalling to apply */ | 1240 | These must match for thread signalling to apply */ |
@@ -1224,9 +1263,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1224 | /* Need tasklist lock for parent etc handling! */ | 1263 | /* Need tasklist lock for parent etc handling! */ |
1225 | write_lock_irq(&tasklist_lock); | 1264 | write_lock_irq(&tasklist_lock); |
1226 | 1265 | ||
1227 | /* for sys_ioprio_set(IOPRIO_WHO_PGRP) */ | ||
1228 | p->ioprio = current->ioprio; | ||
1229 | |||
1230 | /* | 1266 | /* |
1231 | * The task hasn't been attached yet, so its cpus_allowed mask will | 1267 | * The task hasn't been attached yet, so its cpus_allowed mask will |
1232 | * not be changed, nor will its assigned CPU. | 1268 | * not be changed, nor will its assigned CPU. |
@@ -1237,6 +1273,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1237 | * parent's CPU). This avoids alot of nasty races. | 1273 | * parent's CPU). This avoids alot of nasty races. |
1238 | */ | 1274 | */ |
1239 | p->cpus_allowed = current->cpus_allowed; | 1275 | p->cpus_allowed = current->cpus_allowed; |
1276 | p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; | ||
1240 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || | 1277 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || |
1241 | !cpu_online(task_cpu(p)))) | 1278 | !cpu_online(task_cpu(p)))) |
1242 | set_task_cpu(p, smp_processor_id()); | 1279 | set_task_cpu(p, smp_processor_id()); |
@@ -1317,6 +1354,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1317 | bad_fork_free_pid: | 1354 | bad_fork_free_pid: |
1318 | if (pid != &init_struct_pid) | 1355 | if (pid != &init_struct_pid) |
1319 | free_pid(pid); | 1356 | free_pid(pid); |
1357 | bad_fork_cleanup_io: | ||
1358 | put_io_context(p->io_context); | ||
1320 | bad_fork_cleanup_namespaces: | 1359 | bad_fork_cleanup_namespaces: |
1321 | exit_task_namespaces(p); | 1360 | exit_task_namespaces(p); |
1322 | bad_fork_cleanup_keys: | 1361 | bad_fork_cleanup_keys: |