diff options
author | Daniel Rebelo de Oliveira <psykon@gmail.com> | 2011-07-26 19:08:39 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-07-26 19:49:44 -0400 |
commit | fb0a685cb95a0267a96153af2f72486f27be5847 (patch) | |
tree | 99ffae5ab7733ce8a1819fec51ca137a3af48d37 /kernel | |
parent | 293eb1e7772b25a93647c798c7b89bf26c2da2e0 (diff) |
kernel/fork.c: fix a few coding style issues
Signed-off-by: Daniel Rebelo de Oliveira <psykon@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 83 |
1 files changed, 48 insertions, 35 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index e33177edb3bf..e7ceaca89609 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -80,7 +80,7 @@ | |||
80 | * Protected counters by write_lock_irq(&tasklist_lock) | 80 | * Protected counters by write_lock_irq(&tasklist_lock) |
81 | */ | 81 | */ |
82 | unsigned long total_forks; /* Handle normal Linux uptimes. */ | 82 | unsigned long total_forks; /* Handle normal Linux uptimes. */ |
83 | int nr_threads; /* The idle threads do not count.. */ | 83 | int nr_threads; /* The idle threads do not count.. */ |
84 | 84 | ||
85 | int max_threads; /* tunable limit on nr_threads */ | 85 | int max_threads; /* tunable limit on nr_threads */ |
86 | 86 | ||
@@ -232,7 +232,7 @@ void __init fork_init(unsigned long mempages) | |||
232 | /* | 232 | /* |
233 | * we need to allow at least 20 threads to boot a system | 233 | * we need to allow at least 20 threads to boot a system |
234 | */ | 234 | */ |
235 | if(max_threads < 20) | 235 | if (max_threads < 20) |
236 | max_threads = 20; | 236 | max_threads = 20; |
237 | 237 | ||
238 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; | 238 | init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2; |
@@ -268,7 +268,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
268 | return NULL; | 268 | return NULL; |
269 | } | 269 | } |
270 | 270 | ||
271 | err = arch_dup_task_struct(tsk, orig); | 271 | err = arch_dup_task_struct(tsk, orig); |
272 | if (err) | 272 | if (err) |
273 | goto out; | 273 | goto out; |
274 | 274 | ||
@@ -288,8 +288,11 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
288 | tsk->stack_canary = get_random_int(); | 288 | tsk->stack_canary = get_random_int(); |
289 | #endif | 289 | #endif |
290 | 290 | ||
291 | /* One for us, one for whoever does the "release_task()" (usually parent) */ | 291 | /* |
292 | atomic_set(&tsk->usage,2); | 292 | * One for us, one for whoever does the "release_task()" (usually |
293 | * parent) | ||
294 | */ | ||
295 | atomic_set(&tsk->usage, 2); | ||
293 | #ifdef CONFIG_BLK_DEV_IO_TRACE | 296 | #ifdef CONFIG_BLK_DEV_IO_TRACE |
294 | tsk->btrace_seq = 0; | 297 | tsk->btrace_seq = 0; |
295 | #endif | 298 | #endif |
@@ -437,7 +440,7 @@ fail_nomem: | |||
437 | goto out; | 440 | goto out; |
438 | } | 441 | } |
439 | 442 | ||
440 | static inline int mm_alloc_pgd(struct mm_struct * mm) | 443 | static inline int mm_alloc_pgd(struct mm_struct *mm) |
441 | { | 444 | { |
442 | mm->pgd = pgd_alloc(mm); | 445 | mm->pgd = pgd_alloc(mm); |
443 | if (unlikely(!mm->pgd)) | 446 | if (unlikely(!mm->pgd)) |
@@ -445,7 +448,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm) | |||
445 | return 0; | 448 | return 0; |
446 | } | 449 | } |
447 | 450 | ||
448 | static inline void mm_free_pgd(struct mm_struct * mm) | 451 | static inline void mm_free_pgd(struct mm_struct *mm) |
449 | { | 452 | { |
450 | pgd_free(mm, mm->pgd); | 453 | pgd_free(mm, mm->pgd); |
451 | } | 454 | } |
@@ -482,7 +485,7 @@ static void mm_init_aio(struct mm_struct *mm) | |||
482 | #endif | 485 | #endif |
483 | } | 486 | } |
484 | 487 | ||
485 | static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | 488 | static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) |
486 | { | 489 | { |
487 | atomic_set(&mm->mm_users, 1); | 490 | atomic_set(&mm->mm_users, 1); |
488 | atomic_set(&mm->mm_count, 1); | 491 | atomic_set(&mm->mm_count, 1); |
@@ -513,9 +516,9 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
513 | /* | 516 | /* |
514 | * Allocate and initialize an mm_struct. | 517 | * Allocate and initialize an mm_struct. |
515 | */ | 518 | */ |
516 | struct mm_struct * mm_alloc(void) | 519 | struct mm_struct *mm_alloc(void) |
517 | { | 520 | { |
518 | struct mm_struct * mm; | 521 | struct mm_struct *mm; |
519 | 522 | ||
520 | mm = allocate_mm(); | 523 | mm = allocate_mm(); |
521 | if (!mm) | 524 | if (!mm) |
@@ -583,7 +586,7 @@ void added_exe_file_vma(struct mm_struct *mm) | |||
583 | void removed_exe_file_vma(struct mm_struct *mm) | 586 | void removed_exe_file_vma(struct mm_struct *mm) |
584 | { | 587 | { |
585 | mm->num_exe_file_vmas--; | 588 | mm->num_exe_file_vmas--; |
586 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ | 589 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file) { |
587 | fput(mm->exe_file); | 590 | fput(mm->exe_file); |
588 | mm->exe_file = NULL; | 591 | mm->exe_file = NULL; |
589 | } | 592 | } |
@@ -775,9 +778,9 @@ fail_nocontext: | |||
775 | return NULL; | 778 | return NULL; |
776 | } | 779 | } |
777 | 780 | ||
778 | static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | 781 | static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) |
779 | { | 782 | { |
780 | struct mm_struct * mm, *oldmm; | 783 | struct mm_struct *mm, *oldmm; |
781 | int retval; | 784 | int retval; |
782 | 785 | ||
783 | tsk->min_flt = tsk->maj_flt = 0; | 786 | tsk->min_flt = tsk->maj_flt = 0; |
@@ -844,7 +847,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | |||
844 | return 0; | 847 | return 0; |
845 | } | 848 | } |
846 | 849 | ||
847 | static int copy_files(unsigned long clone_flags, struct task_struct * tsk) | 850 | static int copy_files(unsigned long clone_flags, struct task_struct *tsk) |
848 | { | 851 | { |
849 | struct files_struct *oldf, *newf; | 852 | struct files_struct *oldf, *newf; |
850 | int error = 0; | 853 | int error = 0; |
@@ -1166,11 +1169,11 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1166 | cgroup_fork(p); | 1169 | cgroup_fork(p); |
1167 | #ifdef CONFIG_NUMA | 1170 | #ifdef CONFIG_NUMA |
1168 | p->mempolicy = mpol_dup(p->mempolicy); | 1171 | p->mempolicy = mpol_dup(p->mempolicy); |
1169 | if (IS_ERR(p->mempolicy)) { | 1172 | if (IS_ERR(p->mempolicy)) { |
1170 | retval = PTR_ERR(p->mempolicy); | 1173 | retval = PTR_ERR(p->mempolicy); |
1171 | p->mempolicy = NULL; | 1174 | p->mempolicy = NULL; |
1172 | goto bad_fork_cleanup_cgroup; | 1175 | goto bad_fork_cleanup_cgroup; |
1173 | } | 1176 | } |
1174 | mpol_fix_fork_child_flag(p); | 1177 | mpol_fix_fork_child_flag(p); |
1175 | #endif | 1178 | #endif |
1176 | #ifdef CONFIG_CPUSETS | 1179 | #ifdef CONFIG_CPUSETS |
@@ -1216,25 +1219,33 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1216 | retval = perf_event_init_task(p); | 1219 | retval = perf_event_init_task(p); |
1217 | if (retval) | 1220 | if (retval) |
1218 | goto bad_fork_cleanup_policy; | 1221 | goto bad_fork_cleanup_policy; |
1219 | 1222 | retval = audit_alloc(p); | |
1220 | if ((retval = audit_alloc(p))) | 1223 | if (retval) |
1221 | goto bad_fork_cleanup_policy; | 1224 | goto bad_fork_cleanup_policy; |
1222 | /* copy all the process information */ | 1225 | /* copy all the process information */ |
1223 | if ((retval = copy_semundo(clone_flags, p))) | 1226 | retval = copy_semundo(clone_flags, p); |
1227 | if (retval) | ||
1224 | goto bad_fork_cleanup_audit; | 1228 | goto bad_fork_cleanup_audit; |
1225 | if ((retval = copy_files(clone_flags, p))) | 1229 | retval = copy_files(clone_flags, p); |
1230 | if (retval) | ||
1226 | goto bad_fork_cleanup_semundo; | 1231 | goto bad_fork_cleanup_semundo; |
1227 | if ((retval = copy_fs(clone_flags, p))) | 1232 | retval = copy_fs(clone_flags, p); |
1233 | if (retval) | ||
1228 | goto bad_fork_cleanup_files; | 1234 | goto bad_fork_cleanup_files; |
1229 | if ((retval = copy_sighand(clone_flags, p))) | 1235 | retval = copy_sighand(clone_flags, p); |
1236 | if (retval) | ||
1230 | goto bad_fork_cleanup_fs; | 1237 | goto bad_fork_cleanup_fs; |
1231 | if ((retval = copy_signal(clone_flags, p))) | 1238 | retval = copy_signal(clone_flags, p); |
1239 | if (retval) | ||
1232 | goto bad_fork_cleanup_sighand; | 1240 | goto bad_fork_cleanup_sighand; |
1233 | if ((retval = copy_mm(clone_flags, p))) | 1241 | retval = copy_mm(clone_flags, p); |
1242 | if (retval) | ||
1234 | goto bad_fork_cleanup_signal; | 1243 | goto bad_fork_cleanup_signal; |
1235 | if ((retval = copy_namespaces(clone_flags, p))) | 1244 | retval = copy_namespaces(clone_flags, p); |
1245 | if (retval) | ||
1236 | goto bad_fork_cleanup_mm; | 1246 | goto bad_fork_cleanup_mm; |
1237 | if ((retval = copy_io(clone_flags, p))) | 1247 | retval = copy_io(clone_flags, p); |
1248 | if (retval) | ||
1238 | goto bad_fork_cleanup_namespaces; | 1249 | goto bad_fork_cleanup_namespaces; |
1239 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); | 1250 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
1240 | if (retval) | 1251 | if (retval) |
@@ -1256,7 +1267,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1256 | /* | 1267 | /* |
1257 | * Clear TID on mm_release()? | 1268 | * Clear TID on mm_release()? |
1258 | */ | 1269 | */ |
1259 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr: NULL; | 1270 | p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL; |
1260 | #ifdef CONFIG_BLOCK | 1271 | #ifdef CONFIG_BLOCK |
1261 | p->plug = NULL; | 1272 | p->plug = NULL; |
1262 | #endif | 1273 | #endif |
@@ -1324,7 +1335,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1324 | * it's process group. | 1335 | * it's process group. |
1325 | * A fatal signal pending means that current will exit, so the new | 1336 | * A fatal signal pending means that current will exit, so the new |
1326 | * thread can't slip out of an OOM kill (or normal SIGKILL). | 1337 | * thread can't slip out of an OOM kill (or normal SIGKILL). |
1327 | */ | 1338 | */ |
1328 | recalc_sigpending(); | 1339 | recalc_sigpending(); |
1329 | if (signal_pending(current)) { | 1340 | if (signal_pending(current)) { |
1330 | spin_unlock(¤t->sighand->siglock); | 1341 | spin_unlock(¤t->sighand->siglock); |
@@ -1685,12 +1696,14 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1685 | */ | 1696 | */ |
1686 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) | 1697 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) |
1687 | do_sysvsem = 1; | 1698 | do_sysvsem = 1; |
1688 | if ((err = unshare_fs(unshare_flags, &new_fs))) | 1699 | err = unshare_fs(unshare_flags, &new_fs); |
1700 | if (err) | ||
1689 | goto bad_unshare_out; | 1701 | goto bad_unshare_out; |
1690 | if ((err = unshare_fd(unshare_flags, &new_fd))) | 1702 | err = unshare_fd(unshare_flags, &new_fd); |
1703 | if (err) | ||
1691 | goto bad_unshare_cleanup_fs; | 1704 | goto bad_unshare_cleanup_fs; |
1692 | if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, | 1705 | err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs); |
1693 | new_fs))) | 1706 | if (err) |
1694 | goto bad_unshare_cleanup_fd; | 1707 | goto bad_unshare_cleanup_fd; |
1695 | 1708 | ||
1696 | if (new_fs || new_fd || do_sysvsem || new_nsproxy) { | 1709 | if (new_fs || new_fd || do_sysvsem || new_nsproxy) { |