diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/debug/kdb/kdb_private.h | 7 | ||||
| -rw-r--r-- | kernel/debug/kdb/kdb_support.c | 4 | ||||
| -rw-r--r-- | kernel/exit.c | 5 | ||||
| -rw-r--r-- | kernel/fork.c | 17 | ||||
| -rw-r--r-- | kernel/kfifo.c | 9 | ||||
| -rw-r--r-- | kernel/kmod.c | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 10 | ||||
| -rw-r--r-- | kernel/workqueue.c | 9 |
8 files changed, 51 insertions, 14 deletions
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index c438f545a321..be775f7e81e0 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h | |||
| @@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p); | |||
| 255 | extern void kdb_print_nameval(const char *name, unsigned long val); | 255 | extern void kdb_print_nameval(const char *name, unsigned long val); |
| 256 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); | 256 | extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); |
| 257 | extern void kdb_meminfo_proc_show(void); | 257 | extern void kdb_meminfo_proc_show(void); |
| 258 | #ifdef CONFIG_KALLSYMS | ||
| 258 | extern const char *kdb_walk_kallsyms(loff_t *pos); | 259 | extern const char *kdb_walk_kallsyms(loff_t *pos); |
| 260 | #else /* ! CONFIG_KALLSYMS */ | ||
| 261 | static inline const char *kdb_walk_kallsyms(loff_t *pos) | ||
| 262 | { | ||
| 263 | return NULL; | ||
| 264 | } | ||
| 265 | #endif /* ! CONFIG_KALLSYMS */ | ||
| 259 | extern char *kdb_getstr(char *, size_t, char *); | 266 | extern char *kdb_getstr(char *, size_t, char *); |
| 260 | 267 | ||
| 261 | /* Defines for kdb_symbol_print */ | 268 | /* Defines for kdb_symbol_print */ |
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 45344d5c53dd..6b2485dcb050 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c | |||
| @@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */ | |||
| 82 | int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) | 82 | int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) |
| 83 | { | 83 | { |
| 84 | int ret = 0; | 84 | int ret = 0; |
| 85 | unsigned long symbolsize; | 85 | unsigned long symbolsize = 0; |
| 86 | unsigned long offset; | 86 | unsigned long offset = 0; |
| 87 | #define knt1_size 128 /* must be >= kallsyms table size */ | 87 | #define knt1_size 128 /* must be >= kallsyms table size */ |
| 88 | char *knt1 = NULL; | 88 | char *knt1 = NULL; |
| 89 | 89 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 671ed56e0a49..03120229db28 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
| 1386 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1386 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
| 1387 | *p_code = 0; | 1387 | *p_code = 0; |
| 1388 | 1388 | ||
| 1389 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1389 | uid = task_uid(p); |
| 1390 | uid = __task_cred(p)->uid; | ||
| 1391 | unlock_sig: | 1390 | unlock_sig: |
| 1392 | spin_unlock_irq(&p->sighand->siglock); | 1391 | spin_unlock_irq(&p->sighand->siglock); |
| 1393 | if (!exit_code) | 1392 | if (!exit_code) |
| @@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
| 1460 | } | 1459 | } |
| 1461 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1460 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
| 1462 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1461 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
| 1463 | uid = __task_cred(p)->uid; | 1462 | uid = task_uid(p); |
| 1464 | spin_unlock_irq(&p->sighand->siglock); | 1463 | spin_unlock_irq(&p->sighand->siglock); |
| 1465 | 1464 | ||
| 1466 | pid = task_pid_vnr(p); | 1465 | pid = task_pid_vnr(p); |
diff --git a/kernel/fork.c b/kernel/fork.c index 98b450876f93..b7e9d60a675d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -300,7 +300,7 @@ out: | |||
| 300 | #ifdef CONFIG_MMU | 300 | #ifdef CONFIG_MMU |
| 301 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | 301 | static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) |
| 302 | { | 302 | { |
| 303 | struct vm_area_struct *mpnt, *tmp, **pprev; | 303 | struct vm_area_struct *mpnt, *tmp, *prev, **pprev; |
| 304 | struct rb_node **rb_link, *rb_parent; | 304 | struct rb_node **rb_link, *rb_parent; |
| 305 | int retval; | 305 | int retval; |
| 306 | unsigned long charge; | 306 | unsigned long charge; |
| @@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 328 | if (retval) | 328 | if (retval) |
| 329 | goto out; | 329 | goto out; |
| 330 | 330 | ||
| 331 | prev = NULL; | ||
| 331 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { | 332 | for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { |
| 332 | struct file *file; | 333 | struct file *file; |
| 333 | 334 | ||
| @@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 359 | goto fail_nomem_anon_vma_fork; | 360 | goto fail_nomem_anon_vma_fork; |
| 360 | tmp->vm_flags &= ~VM_LOCKED; | 361 | tmp->vm_flags &= ~VM_LOCKED; |
| 361 | tmp->vm_mm = mm; | 362 | tmp->vm_mm = mm; |
| 362 | tmp->vm_next = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
| 363 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
| 364 | if (file) { | 365 | if (file) { |
| 365 | struct inode *inode = file->f_path.dentry->d_inode; | 366 | struct inode *inode = file->f_path.dentry->d_inode; |
| @@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 392 | */ | 393 | */ |
| 393 | *pprev = tmp; | 394 | *pprev = tmp; |
| 394 | pprev = &tmp->vm_next; | 395 | pprev = &tmp->vm_next; |
| 396 | tmp->vm_prev = prev; | ||
| 397 | prev = tmp; | ||
| 395 | 398 | ||
| 396 | __vma_link_rb(mm, tmp, rb_link, rb_parent); | 399 | __vma_link_rb(mm, tmp, rb_link, rb_parent); |
| 397 | rb_link = &tmp->vm_rb.rb_right; | 400 | rb_link = &tmp->vm_rb.rb_right; |
| @@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | |||
| 752 | struct fs_struct *fs = current->fs; | 755 | struct fs_struct *fs = current->fs; |
| 753 | if (clone_flags & CLONE_FS) { | 756 | if (clone_flags & CLONE_FS) { |
| 754 | /* tsk->fs is already what we want */ | 757 | /* tsk->fs is already what we want */ |
| 755 | write_lock(&fs->lock); | 758 | spin_lock(&fs->lock); |
| 756 | if (fs->in_exec) { | 759 | if (fs->in_exec) { |
| 757 | write_unlock(&fs->lock); | 760 | spin_unlock(&fs->lock); |
| 758 | return -EAGAIN; | 761 | return -EAGAIN; |
| 759 | } | 762 | } |
| 760 | fs->users++; | 763 | fs->users++; |
| 761 | write_unlock(&fs->lock); | 764 | spin_unlock(&fs->lock); |
| 762 | return 0; | 765 | return 0; |
| 763 | } | 766 | } |
| 764 | tsk->fs = copy_fs_struct(fs); | 767 | tsk->fs = copy_fs_struct(fs); |
| @@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
| 1676 | 1679 | ||
| 1677 | if (new_fs) { | 1680 | if (new_fs) { |
| 1678 | fs = current->fs; | 1681 | fs = current->fs; |
| 1679 | write_lock(&fs->lock); | 1682 | spin_lock(&fs->lock); |
| 1680 | current->fs = new_fs; | 1683 | current->fs = new_fs; |
| 1681 | if (--fs->users) | 1684 | if (--fs->users) |
| 1682 | new_fs = NULL; | 1685 | new_fs = NULL; |
| 1683 | else | 1686 | else |
| 1684 | new_fs = fs; | 1687 | new_fs = fs; |
| 1685 | write_unlock(&fs->lock); | 1688 | spin_unlock(&fs->lock); |
| 1686 | } | 1689 | } |
| 1687 | 1690 | ||
| 1688 | if (new_mm) { | 1691 | if (new_mm) { |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 4502604ecadf..6b5580c57644 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf, | |||
| 503 | } | 503 | } |
| 504 | EXPORT_SYMBOL(__kfifo_out_r); | 504 | EXPORT_SYMBOL(__kfifo_out_r); |
| 505 | 505 | ||
| 506 | void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize) | ||
| 507 | { | ||
| 508 | unsigned int n; | ||
| 509 | |||
| 510 | n = __kfifo_peek_n(fifo, recsize); | ||
| 511 | fifo->out += n + recsize; | ||
| 512 | } | ||
| 513 | EXPORT_SYMBOL(__kfifo_skip_r); | ||
| 514 | |||
| 506 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, | 515 | int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, |
| 507 | unsigned long len, unsigned int *copied, size_t recsize) | 516 | unsigned long len, unsigned int *copied, size_t recsize) |
| 508 | { | 517 | { |
diff --git a/kernel/kmod.c b/kernel/kmod.c index 6e9b19667a8d..9cd0591c96a2 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data) | |||
| 153 | goto fail; | 153 | goto fail; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); | 156 | retval = kernel_execve(sub_info->path, |
| 157 | (const char *const *)sub_info->argv, | ||
| 158 | (const char *const *)sub_info->envp); | ||
| 157 | 159 | ||
| 158 | /* Exec failed? */ | 160 | /* Exec failed? */ |
| 159 | fail: | 161 | fail: |
diff --git a/kernel/sched.c b/kernel/sched.c index 41541d79e3c8..09b574e7f4df 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | |||
| 3865 | /* | 3865 | /* |
| 3866 | * Owner changed, break to re-assess state. | 3866 | * Owner changed, break to re-assess state. |
| 3867 | */ | 3867 | */ |
| 3868 | if (lock->owner != owner) | 3868 | if (lock->owner != owner) { |
| 3869 | /* | ||
| 3870 | * If the lock has switched to a different owner, | ||
| 3871 | * we likely have heavy contention. Return 0 to quit | ||
| 3872 | * optimistic spinning and not contend further: | ||
| 3873 | */ | ||
| 3874 | if (lock->owner) | ||
| 3875 | return 0; | ||
| 3869 | break; | 3876 | break; |
| 3877 | } | ||
| 3870 | 3878 | ||
| 3871 | /* | 3879 | /* |
| 3872 | * Is that owner really running on that cpu? | 3880 | * Is that owner really running on that cpu? |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2994a0e3a61c..8bd600c020e5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -35,6 +35,9 @@ | |||
| 35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
| 36 | #include <linux/idr.h> | 36 | #include <linux/idr.h> |
| 37 | 37 | ||
| 38 | #define CREATE_TRACE_POINTS | ||
| 39 | #include <trace/events/workqueue.h> | ||
| 40 | |||
| 38 | #include "workqueue_sched.h" | 41 | #include "workqueue_sched.h" |
| 39 | 42 | ||
| 40 | enum { | 43 | enum { |
| @@ -1790,7 +1793,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
| 1790 | work_clear_pending(work); | 1793 | work_clear_pending(work); |
| 1791 | lock_map_acquire(&cwq->wq->lockdep_map); | 1794 | lock_map_acquire(&cwq->wq->lockdep_map); |
| 1792 | lock_map_acquire(&lockdep_map); | 1795 | lock_map_acquire(&lockdep_map); |
| 1796 | trace_workqueue_execute_start(work); | ||
| 1793 | f(work); | 1797 | f(work); |
| 1798 | /* | ||
| 1799 | * While we must be careful to not use "work" after this, the trace | ||
| 1800 | * point will only record its address. | ||
| 1801 | */ | ||
| 1802 | trace_workqueue_execute_end(work); | ||
| 1794 | lock_map_release(&lockdep_map); | 1803 | lock_map_release(&lockdep_map); |
| 1795 | lock_map_release(&cwq->wq->lockdep_map); | 1804 | lock_map_release(&cwq->wq->lockdep_map); |
| 1796 | 1805 | ||
