diff options
author | Daniel Walker <dwalker@mvista.com> | 2007-10-18 06:06:07 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-18 17:37:25 -0400 |
commit | 23ff4440243fe3fa42515d18aa213be14bb706ee (patch) | |
tree | fab3db714dd99b34f7d6d1f87732a5f917022a2f /kernel/fork.c | |
parent | 902749cdbd3da8199e57d082f36a6de60591aeb6 (diff) |
whitespace fixes: fork
Signed-off-by: Daniel Walker <dwalker@mvista.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 125246fc75a5..1232aac6a1cd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -268,7 +268,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
268 | get_file(file); | 268 | get_file(file); |
269 | if (tmp->vm_flags & VM_DENYWRITE) | 269 | if (tmp->vm_flags & VM_DENYWRITE) |
270 | atomic_dec(&inode->i_writecount); | 270 | atomic_dec(&inode->i_writecount); |
271 | 271 | ||
272 | /* insert tmp into the share list, just after mpnt */ | 272 | /* insert tmp into the share list, just after mpnt */ |
273 | spin_lock(&file->f_mapping->i_mmap_lock); | 273 | spin_lock(&file->f_mapping->i_mmap_lock); |
274 | tmp->vm_truncate_count = mpnt->vm_truncate_count; | 274 | tmp->vm_truncate_count = mpnt->vm_truncate_count; |
@@ -331,7 +331,7 @@ static inline void mm_free_pgd(struct mm_struct * mm) | |||
331 | #define mm_free_pgd(mm) | 331 | #define mm_free_pgd(mm) |
332 | #endif /* CONFIG_MMU */ | 332 | #endif /* CONFIG_MMU */ |
333 | 333 | ||
334 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); | 334 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); |
335 | 335 | ||
336 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) | 336 | #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) |
337 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) | 337 | #define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) |
@@ -738,8 +738,8 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) | |||
738 | /* compute the remainder to be cleared */ | 738 | /* compute the remainder to be cleared */ |
739 | size = (new_fdt->max_fds - open_files) * sizeof(struct file *); | 739 | size = (new_fdt->max_fds - open_files) * sizeof(struct file *); |
740 | 740 | ||
741 | /* This is long word aligned thus could use a optimized version */ | 741 | /* This is long word aligned thus could use a optimized version */ |
742 | memset(new_fds, 0, size); | 742 | memset(new_fds, 0, size); |
743 | 743 | ||
744 | if (new_fdt->max_fds > open_files) { | 744 | if (new_fdt->max_fds > open_files) { |
745 | int left = (new_fdt->max_fds-open_files)/8; | 745 | int left = (new_fdt->max_fds-open_files)/8; |
@@ -1069,12 +1069,12 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1069 | task_io_accounting_init(p); | 1069 | task_io_accounting_init(p); |
1070 | acct_clear_integrals(p); | 1070 | acct_clear_integrals(p); |
1071 | 1071 | ||
1072 | p->it_virt_expires = cputime_zero; | 1072 | p->it_virt_expires = cputime_zero; |
1073 | p->it_prof_expires = cputime_zero; | 1073 | p->it_prof_expires = cputime_zero; |
1074 | p->it_sched_expires = 0; | 1074 | p->it_sched_expires = 0; |
1075 | INIT_LIST_HEAD(&p->cpu_timers[0]); | 1075 | INIT_LIST_HEAD(&p->cpu_timers[0]); |
1076 | INIT_LIST_HEAD(&p->cpu_timers[1]); | 1076 | INIT_LIST_HEAD(&p->cpu_timers[1]); |
1077 | INIT_LIST_HEAD(&p->cpu_timers[2]); | 1077 | INIT_LIST_HEAD(&p->cpu_timers[2]); |
1078 | 1078 | ||
1079 | p->lock_depth = -1; /* -1 = no lock */ | 1079 | p->lock_depth = -1; /* -1 = no lock */ |
1080 | do_posix_clock_monotonic_gettime(&p->start_time); | 1080 | do_posix_clock_monotonic_gettime(&p->start_time); |
@@ -1239,7 +1239,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1239 | * A fatal signal pending means that current will exit, so the new | 1239 | * A fatal signal pending means that current will exit, so the new |
1240 | * thread can't slip out of an OOM kill (or normal SIGKILL). | 1240 | * thread can't slip out of an OOM kill (or normal SIGKILL). |
1241 | */ | 1241 | */ |
1242 | recalc_sigpending(); | 1242 | recalc_sigpending(); |
1243 | if (signal_pending(current)) { | 1243 | if (signal_pending(current)) { |
1244 | spin_unlock(¤t->sighand->siglock); | 1244 | spin_unlock(¤t->sighand->siglock); |
1245 | write_unlock_irq(&tasklist_lock); | 1245 | write_unlock_irq(&tasklist_lock); |