diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cgroup.c | 8 | ||||
| -rw-r--r-- | kernel/cpu.c | 10 | ||||
| -rw-r--r-- | kernel/cred.c | 2 | ||||
| -rw-r--r-- | kernel/fork.c | 15 | ||||
| -rw-r--r-- | kernel/futex.c | 57 | ||||
| -rw-r--r-- | kernel/hw_breakpoint.c | 56 | ||||
| -rw-r--r-- | kernel/kexec.c | 4 | ||||
| -rw-r--r-- | kernel/kfifo.c | 108 | ||||
| -rw-r--r-- | kernel/kgdb.c | 9 | ||||
| -rw-r--r-- | kernel/kmod.c | 12 | ||||
| -rw-r--r-- | kernel/lockdep.c | 2 | ||||
| -rw-r--r-- | kernel/panic.c | 3 | ||||
| -rw-r--r-- | kernel/perf_event.c | 9 | ||||
| -rw-r--r-- | kernel/printk.c | 1 | ||||
| -rw-r--r-- | kernel/sched.c | 44 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 2 | ||||
| -rw-r--r-- | kernel/signal.c | 3 | ||||
| -rw-r--r-- | kernel/smp.c | 2 | ||||
| -rw-r--r-- | kernel/softlockup.c | 15 | ||||
| -rw-r--r-- | kernel/time/clockevents.c | 3 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 18 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 2 | ||||
| -rw-r--r-- | kernel/timer.c | 3 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 4 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 28 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_events_filter.c | 29 |
28 files changed, 324 insertions, 136 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 0249f4be9b5c..aa3bee566446 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
| @@ -2468,7 +2468,6 @@ static struct cgroup_pidlist *cgroup_pidlist_find(struct cgroup *cgrp, | |||
| 2468 | /* make sure l doesn't vanish out from under us */ | 2468 | /* make sure l doesn't vanish out from under us */ |
| 2469 | down_write(&l->mutex); | 2469 | down_write(&l->mutex); |
| 2470 | mutex_unlock(&cgrp->pidlist_mutex); | 2470 | mutex_unlock(&cgrp->pidlist_mutex); |
| 2471 | l->use_count++; | ||
| 2472 | return l; | 2471 | return l; |
| 2473 | } | 2472 | } |
| 2474 | } | 2473 | } |
| @@ -2937,14 +2936,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry, | |||
| 2937 | 2936 | ||
| 2938 | for_each_subsys(root, ss) { | 2937 | for_each_subsys(root, ss) { |
| 2939 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); | 2938 | struct cgroup_subsys_state *css = ss->create(ss, cgrp); |
| 2939 | |||
| 2940 | if (IS_ERR(css)) { | 2940 | if (IS_ERR(css)) { |
| 2941 | err = PTR_ERR(css); | 2941 | err = PTR_ERR(css); |
| 2942 | goto err_destroy; | 2942 | goto err_destroy; |
| 2943 | } | 2943 | } |
| 2944 | init_cgroup_css(css, ss, cgrp); | 2944 | init_cgroup_css(css, ss, cgrp); |
| 2945 | if (ss->use_id) | 2945 | if (ss->use_id) { |
| 2946 | if (alloc_css_id(ss, parent, cgrp)) | 2946 | err = alloc_css_id(ss, parent, cgrp); |
| 2947 | if (err) | ||
| 2947 | goto err_destroy; | 2948 | goto err_destroy; |
| 2949 | } | ||
| 2948 | /* At error, ->destroy() callback has to free assigned ID. */ | 2950 | /* At error, ->destroy() callback has to free assigned ID. */ |
| 2949 | } | 2951 | } |
| 2950 | 2952 | ||
diff --git a/kernel/cpu.c b/kernel/cpu.c index 1c8ddd6ee940..677f25376a38 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -151,13 +151,13 @@ static inline void check_for_tasks(int cpu) | |||
| 151 | 151 | ||
| 152 | write_lock_irq(&tasklist_lock); | 152 | write_lock_irq(&tasklist_lock); |
| 153 | for_each_process(p) { | 153 | for_each_process(p) { |
| 154 | if (task_cpu(p) == cpu && | 154 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
| 155 | (!cputime_eq(p->utime, cputime_zero) || | 155 | (!cputime_eq(p->utime, cputime_zero) || |
| 156 | !cputime_eq(p->stime, cputime_zero))) | 156 | !cputime_eq(p->stime, cputime_zero))) |
| 157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | 157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " |
| 158 | (state = %ld, flags = %x) \n", | 158 | "(state = %ld, flags = %x)\n", |
| 159 | p->comm, task_pid_nr(p), cpu, | 159 | p->comm, task_pid_nr(p), cpu, |
| 160 | p->state, p->flags); | 160 | p->state, p->flags); |
| 161 | } | 161 | } |
| 162 | write_unlock_irq(&tasklist_lock); | 162 | write_unlock_irq(&tasklist_lock); |
| 163 | } | 163 | } |
diff --git a/kernel/cred.c b/kernel/cred.c index dd76cfe5f5b0..1ed8ca18790c 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
| @@ -224,7 +224,7 @@ struct cred *cred_alloc_blank(void) | |||
| 224 | #ifdef CONFIG_KEYS | 224 | #ifdef CONFIG_KEYS |
| 225 | new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); | 225 | new->tgcred = kzalloc(sizeof(*new->tgcred), GFP_KERNEL); |
| 226 | if (!new->tgcred) { | 226 | if (!new->tgcred) { |
| 227 | kfree(new); | 227 | kmem_cache_free(cred_jar, new); |
| 228 | return NULL; | 228 | return NULL; |
| 229 | } | 229 | } |
| 230 | atomic_set(&new->tgcred->usage, 1); | 230 | atomic_set(&new->tgcred->usage, 1); |
diff --git a/kernel/fork.c b/kernel/fork.c index 5b2959b3ffc2..f88bd984df35 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1241,21 +1241,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1241 | /* Need tasklist lock for parent etc handling! */ | 1241 | /* Need tasklist lock for parent etc handling! */ |
| 1242 | write_lock_irq(&tasklist_lock); | 1242 | write_lock_irq(&tasklist_lock); |
| 1243 | 1243 | ||
| 1244 | /* | ||
| 1245 | * The task hasn't been attached yet, so its cpus_allowed mask will | ||
| 1246 | * not be changed, nor will its assigned CPU. | ||
| 1247 | * | ||
| 1248 | * The cpus_allowed mask of the parent may have changed after it was | ||
| 1249 | * copied first time - so re-copy it here, then check the child's CPU | ||
| 1250 | * to ensure it is on a valid CPU (and if not, just force it back to | ||
| 1251 | * parent's CPU). This avoids alot of nasty races. | ||
| 1252 | */ | ||
| 1253 | p->cpus_allowed = current->cpus_allowed; | ||
| 1254 | p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; | ||
| 1255 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || | ||
| 1256 | !cpu_online(task_cpu(p)))) | ||
| 1257 | set_task_cpu(p, smp_processor_id()); | ||
| 1258 | |||
| 1259 | /* CLONE_PARENT re-uses the old parent */ | 1244 | /* CLONE_PARENT re-uses the old parent */ |
| 1260 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { | 1245 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
| 1261 | p->real_parent = current->real_parent; | 1246 | p->real_parent = current->real_parent; |
diff --git a/kernel/futex.c b/kernel/futex.c index 8e3c3ffe1b9a..e7a35f1039e7 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -203,8 +203,6 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 203 | * @uaddr: virtual address of the futex | 203 | * @uaddr: virtual address of the futex |
| 204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED | 204 | * @fshared: 0 for a PROCESS_PRIVATE futex, 1 for PROCESS_SHARED |
| 205 | * @key: address where result is stored. | 205 | * @key: address where result is stored. |
| 206 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | ||
| 207 | * VERIFY_WRITE) | ||
| 208 | * | 206 | * |
| 209 | * Returns a negative error code or 0 | 207 | * Returns a negative error code or 0 |
| 210 | * The key words are stored in *key on success. | 208 | * The key words are stored in *key on success. |
| @@ -216,7 +214,7 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 216 | * lock_page() might sleep, the caller should not hold a spinlock. | 214 | * lock_page() might sleep, the caller should not hold a spinlock. |
| 217 | */ | 215 | */ |
| 218 | static int | 216 | static int |
| 219 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | 217 | get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key) |
| 220 | { | 218 | { |
| 221 | unsigned long address = (unsigned long)uaddr; | 219 | unsigned long address = (unsigned long)uaddr; |
| 222 | struct mm_struct *mm = current->mm; | 220 | struct mm_struct *mm = current->mm; |
| @@ -239,7 +237,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
| 239 | * but access_ok() should be faster than find_vma() | 237 | * but access_ok() should be faster than find_vma() |
| 240 | */ | 238 | */ |
| 241 | if (!fshared) { | 239 | if (!fshared) { |
| 242 | if (unlikely(!access_ok(rw, uaddr, sizeof(u32)))) | 240 | if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) |
| 243 | return -EFAULT; | 241 | return -EFAULT; |
| 244 | key->private.mm = mm; | 242 | key->private.mm = mm; |
| 245 | key->private.address = address; | 243 | key->private.address = address; |
| @@ -248,7 +246,7 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw) | |||
| 248 | } | 246 | } |
| 249 | 247 | ||
| 250 | again: | 248 | again: |
| 251 | err = get_user_pages_fast(address, 1, rw == VERIFY_WRITE, &page); | 249 | err = get_user_pages_fast(address, 1, 1, &page); |
| 252 | if (err < 0) | 250 | if (err < 0) |
| 253 | return err; | 251 | return err; |
| 254 | 252 | ||
| @@ -532,8 +530,25 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
| 532 | return -EINVAL; | 530 | return -EINVAL; |
| 533 | 531 | ||
| 534 | WARN_ON(!atomic_read(&pi_state->refcount)); | 532 | WARN_ON(!atomic_read(&pi_state->refcount)); |
| 535 | WARN_ON(pid && pi_state->owner && | 533 | |
| 536 | pi_state->owner->pid != pid); | 534 | /* |
| 535 | * When pi_state->owner is NULL then the owner died | ||
| 536 | * and another waiter is on the fly. pi_state->owner | ||
| 537 | * is fixed up by the task which acquires | ||
| 538 | * pi_state->rt_mutex. | ||
| 539 | * | ||
| 540 | * We do not check for pid == 0 which can happen when | ||
| 541 | * the owner died and robust_list_exit() cleared the | ||
| 542 | * TID. | ||
| 543 | */ | ||
| 544 | if (pid && pi_state->owner) { | ||
| 545 | /* | ||
| 546 | * Bail out if user space manipulated the | ||
| 547 | * futex value. | ||
| 548 | */ | ||
| 549 | if (pid != task_pid_vnr(pi_state->owner)) | ||
| 550 | return -EINVAL; | ||
| 551 | } | ||
| 537 | 552 | ||
| 538 | atomic_inc(&pi_state->refcount); | 553 | atomic_inc(&pi_state->refcount); |
| 539 | *ps = pi_state; | 554 | *ps = pi_state; |
| @@ -760,6 +775,13 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) | |||
| 760 | if (!pi_state) | 775 | if (!pi_state) |
| 761 | return -EINVAL; | 776 | return -EINVAL; |
| 762 | 777 | ||
| 778 | /* | ||
| 779 | * If current does not own the pi_state then the futex is | ||
| 780 | * inconsistent and user space fiddled with the futex value. | ||
| 781 | */ | ||
| 782 | if (pi_state->owner != current) | ||
| 783 | return -EINVAL; | ||
| 784 | |||
| 763 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); | 785 | raw_spin_lock(&pi_state->pi_mutex.wait_lock); |
| 764 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); | 786 | new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); |
| 765 | 787 | ||
| @@ -867,7 +889,7 @@ static int futex_wake(u32 __user *uaddr, int fshared, int nr_wake, u32 bitset) | |||
| 867 | if (!bitset) | 889 | if (!bitset) |
| 868 | return -EINVAL; | 890 | return -EINVAL; |
| 869 | 891 | ||
| 870 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_READ); | 892 | ret = get_futex_key(uaddr, fshared, &key); |
| 871 | if (unlikely(ret != 0)) | 893 | if (unlikely(ret != 0)) |
| 872 | goto out; | 894 | goto out; |
| 873 | 895 | ||
| @@ -913,10 +935,10 @@ futex_wake_op(u32 __user *uaddr1, int fshared, u32 __user *uaddr2, | |||
| 913 | int ret, op_ret; | 935 | int ret, op_ret; |
| 914 | 936 | ||
| 915 | retry: | 937 | retry: |
| 916 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 938 | ret = get_futex_key(uaddr1, fshared, &key1); |
| 917 | if (unlikely(ret != 0)) | 939 | if (unlikely(ret != 0)) |
| 918 | goto out; | 940 | goto out; |
| 919 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 941 | ret = get_futex_key(uaddr2, fshared, &key2); |
| 920 | if (unlikely(ret != 0)) | 942 | if (unlikely(ret != 0)) |
| 921 | goto out_put_key1; | 943 | goto out_put_key1; |
| 922 | 944 | ||
| @@ -1175,11 +1197,10 @@ retry: | |||
| 1175 | pi_state = NULL; | 1197 | pi_state = NULL; |
| 1176 | } | 1198 | } |
| 1177 | 1199 | ||
| 1178 | ret = get_futex_key(uaddr1, fshared, &key1, VERIFY_READ); | 1200 | ret = get_futex_key(uaddr1, fshared, &key1); |
| 1179 | if (unlikely(ret != 0)) | 1201 | if (unlikely(ret != 0)) |
| 1180 | goto out; | 1202 | goto out; |
| 1181 | ret = get_futex_key(uaddr2, fshared, &key2, | 1203 | ret = get_futex_key(uaddr2, fshared, &key2); |
| 1182 | requeue_pi ? VERIFY_WRITE : VERIFY_READ); | ||
| 1183 | if (unlikely(ret != 0)) | 1204 | if (unlikely(ret != 0)) |
| 1184 | goto out_put_key1; | 1205 | goto out_put_key1; |
| 1185 | 1206 | ||
| @@ -1738,7 +1759,7 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, int fshared, | |||
| 1738 | */ | 1759 | */ |
| 1739 | retry: | 1760 | retry: |
| 1740 | q->key = FUTEX_KEY_INIT; | 1761 | q->key = FUTEX_KEY_INIT; |
| 1741 | ret = get_futex_key(uaddr, fshared, &q->key, VERIFY_READ); | 1762 | ret = get_futex_key(uaddr, fshared, &q->key); |
| 1742 | if (unlikely(ret != 0)) | 1763 | if (unlikely(ret != 0)) |
| 1743 | return ret; | 1764 | return ret; |
| 1744 | 1765 | ||
| @@ -1904,7 +1925,7 @@ static int futex_lock_pi(u32 __user *uaddr, int fshared, | |||
| 1904 | q.requeue_pi_key = NULL; | 1925 | q.requeue_pi_key = NULL; |
| 1905 | retry: | 1926 | retry: |
| 1906 | q.key = FUTEX_KEY_INIT; | 1927 | q.key = FUTEX_KEY_INIT; |
| 1907 | ret = get_futex_key(uaddr, fshared, &q.key, VERIFY_WRITE); | 1928 | ret = get_futex_key(uaddr, fshared, &q.key); |
| 1908 | if (unlikely(ret != 0)) | 1929 | if (unlikely(ret != 0)) |
| 1909 | goto out; | 1930 | goto out; |
| 1910 | 1931 | ||
| @@ -1974,7 +1995,7 @@ retry_private: | |||
| 1974 | /* Unqueue and drop the lock */ | 1995 | /* Unqueue and drop the lock */ |
| 1975 | unqueue_me_pi(&q); | 1996 | unqueue_me_pi(&q); |
| 1976 | 1997 | ||
| 1977 | goto out; | 1998 | goto out_put_key; |
| 1978 | 1999 | ||
| 1979 | out_unlock_put_key: | 2000 | out_unlock_put_key: |
| 1980 | queue_unlock(&q, hb); | 2001 | queue_unlock(&q, hb); |
| @@ -2023,7 +2044,7 @@ retry: | |||
| 2023 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) | 2044 | if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) |
| 2024 | return -EPERM; | 2045 | return -EPERM; |
| 2025 | 2046 | ||
| 2026 | ret = get_futex_key(uaddr, fshared, &key, VERIFY_WRITE); | 2047 | ret = get_futex_key(uaddr, fshared, &key); |
| 2027 | if (unlikely(ret != 0)) | 2048 | if (unlikely(ret != 0)) |
| 2028 | goto out; | 2049 | goto out; |
| 2029 | 2050 | ||
| @@ -2215,7 +2236,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared, | |||
| 2215 | rt_waiter.task = NULL; | 2236 | rt_waiter.task = NULL; |
| 2216 | 2237 | ||
| 2217 | key2 = FUTEX_KEY_INIT; | 2238 | key2 = FUTEX_KEY_INIT; |
| 2218 | ret = get_futex_key(uaddr2, fshared, &key2, VERIFY_WRITE); | 2239 | ret = get_futex_key(uaddr2, fshared, &key2); |
| 2219 | if (unlikely(ret != 0)) | 2240 | if (unlikely(ret != 0)) |
| 2220 | goto out; | 2241 | goto out; |
| 2221 | 2242 | ||
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 50dbd5999588..8a5c7d55ac9f 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -243,38 +243,70 @@ static void toggle_bp_slot(struct perf_event *bp, bool enable) | |||
| 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) | 243 | * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *)) |
| 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM | 244 | * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM |
| 245 | */ | 245 | */ |
| 246 | int reserve_bp_slot(struct perf_event *bp) | 246 | static int __reserve_bp_slot(struct perf_event *bp) |
| 247 | { | 247 | { |
| 248 | struct bp_busy_slots slots = {0}; | 248 | struct bp_busy_slots slots = {0}; |
| 249 | int ret = 0; | ||
| 250 | |||
| 251 | mutex_lock(&nr_bp_mutex); | ||
| 252 | 249 | ||
| 253 | fetch_bp_busy_slots(&slots, bp); | 250 | fetch_bp_busy_slots(&slots, bp); |
| 254 | 251 | ||
| 255 | /* Flexible counters need to keep at least one slot */ | 252 | /* Flexible counters need to keep at least one slot */ |
| 256 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) { | 253 | if (slots.pinned + (!!slots.flexible) == HBP_NUM) |
| 257 | ret = -ENOSPC; | 254 | return -ENOSPC; |
| 258 | goto end; | ||
| 259 | } | ||
| 260 | 255 | ||
| 261 | toggle_bp_slot(bp, true); | 256 | toggle_bp_slot(bp, true); |
| 262 | 257 | ||
| 263 | end: | 258 | return 0; |
| 259 | } | ||
| 260 | |||
| 261 | int reserve_bp_slot(struct perf_event *bp) | ||
| 262 | { | ||
| 263 | int ret; | ||
| 264 | |||
| 265 | mutex_lock(&nr_bp_mutex); | ||
| 266 | |||
| 267 | ret = __reserve_bp_slot(bp); | ||
| 268 | |||
| 264 | mutex_unlock(&nr_bp_mutex); | 269 | mutex_unlock(&nr_bp_mutex); |
| 265 | 270 | ||
| 266 | return ret; | 271 | return ret; |
| 267 | } | 272 | } |
| 268 | 273 | ||
| 274 | static void __release_bp_slot(struct perf_event *bp) | ||
| 275 | { | ||
| 276 | toggle_bp_slot(bp, false); | ||
| 277 | } | ||
| 278 | |||
| 269 | void release_bp_slot(struct perf_event *bp) | 279 | void release_bp_slot(struct perf_event *bp) |
| 270 | { | 280 | { |
| 271 | mutex_lock(&nr_bp_mutex); | 281 | mutex_lock(&nr_bp_mutex); |
| 272 | 282 | ||
| 273 | toggle_bp_slot(bp, false); | 283 | __release_bp_slot(bp); |
| 274 | 284 | ||
| 275 | mutex_unlock(&nr_bp_mutex); | 285 | mutex_unlock(&nr_bp_mutex); |
| 276 | } | 286 | } |
| 277 | 287 | ||
| 288 | /* | ||
| 289 | * Allow the kernel debugger to reserve breakpoint slots without | ||
| 290 | * taking a lock using the dbg_* variant of for the reserve and | ||
| 291 | * release breakpoint slots. | ||
| 292 | */ | ||
| 293 | int dbg_reserve_bp_slot(struct perf_event *bp) | ||
| 294 | { | ||
| 295 | if (mutex_is_locked(&nr_bp_mutex)) | ||
| 296 | return -1; | ||
| 297 | |||
| 298 | return __reserve_bp_slot(bp); | ||
| 299 | } | ||
| 300 | |||
| 301 | int dbg_release_bp_slot(struct perf_event *bp) | ||
| 302 | { | ||
| 303 | if (mutex_is_locked(&nr_bp_mutex)) | ||
| 304 | return -1; | ||
| 305 | |||
| 306 | __release_bp_slot(bp); | ||
| 307 | |||
| 308 | return 0; | ||
| 309 | } | ||
| 278 | 310 | ||
| 279 | int register_perf_hw_breakpoint(struct perf_event *bp) | 311 | int register_perf_hw_breakpoint(struct perf_event *bp) |
| 280 | { | 312 | { |
| @@ -296,6 +328,10 @@ int register_perf_hw_breakpoint(struct perf_event *bp) | |||
| 296 | if (!bp->attr.disabled || !bp->overflow_handler) | 328 | if (!bp->attr.disabled || !bp->overflow_handler) |
| 297 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); | 329 | ret = arch_validate_hwbkpt_settings(bp, bp->ctx->task); |
| 298 | 330 | ||
| 331 | /* if arch_validate_hwbkpt_settings() fails then release bp slot */ | ||
| 332 | if (ret) | ||
| 333 | release_bp_slot(bp); | ||
| 334 | |||
| 299 | return ret; | 335 | return ret; |
| 300 | } | 336 | } |
| 301 | 337 | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index a9a93d9ee7a7..ef077fb73155 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | #include <linux/console.h> | 32 | #include <linux/console.h> |
| 33 | #include <linux/vmalloc.h> | 33 | #include <linux/vmalloc.h> |
| 34 | #include <linux/swap.h> | 34 | #include <linux/swap.h> |
| 35 | #include <linux/kmsg_dump.h> | ||
| 35 | 36 | ||
| 36 | #include <asm/page.h> | 37 | #include <asm/page.h> |
| 37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
| @@ -1074,6 +1075,9 @@ void crash_kexec(struct pt_regs *regs) | |||
| 1074 | if (mutex_trylock(&kexec_mutex)) { | 1075 | if (mutex_trylock(&kexec_mutex)) { |
| 1075 | if (kexec_crash_image) { | 1076 | if (kexec_crash_image) { |
| 1076 | struct pt_regs fixed_regs; | 1077 | struct pt_regs fixed_regs; |
| 1078 | |||
| 1079 | kmsg_dump(KMSG_DUMP_KEXEC); | ||
| 1080 | |||
| 1077 | crash_setup_regs(&fixed_regs, regs); | 1081 | crash_setup_regs(&fixed_regs, regs); |
| 1078 | crash_save_vmcoreinfo(); | 1082 | crash_save_vmcoreinfo(); |
| 1079 | machine_crash_shutdown(&fixed_regs); | 1083 | machine_crash_shutdown(&fixed_regs); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index e92d519f93b1..498cabba225e 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -28,7 +28,7 @@ | |||
| 28 | #include <linux/log2.h> | 28 | #include <linux/log2.h> |
| 29 | #include <linux/uaccess.h> | 29 | #include <linux/uaccess.h> |
| 30 | 30 | ||
| 31 | static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | 31 | static void _kfifo_init(struct kfifo *fifo, void *buffer, |
| 32 | unsigned int size) | 32 | unsigned int size) |
| 33 | { | 33 | { |
| 34 | fifo->buffer = buffer; | 34 | fifo->buffer = buffer; |
| @@ -41,10 +41,10 @@ static void _kfifo_init(struct kfifo *fifo, unsigned char *buffer, | |||
| 41 | * kfifo_init - initialize a FIFO using a preallocated buffer | 41 | * kfifo_init - initialize a FIFO using a preallocated buffer |
| 42 | * @fifo: the fifo to assign the buffer | 42 | * @fifo: the fifo to assign the buffer |
| 43 | * @buffer: the preallocated buffer to be used. | 43 | * @buffer: the preallocated buffer to be used. |
| 44 | * @size: the size of the internal buffer, this have to be a power of 2. | 44 | * @size: the size of the internal buffer, this has to be a power of 2. |
| 45 | * | 45 | * |
| 46 | */ | 46 | */ |
| 47 | void kfifo_init(struct kfifo *fifo, unsigned char *buffer, unsigned int size) | 47 | void kfifo_init(struct kfifo *fifo, void *buffer, unsigned int size) |
| 48 | { | 48 | { |
| 49 | /* size must be a power of 2 */ | 49 | /* size must be a power of 2 */ |
| 50 | BUG_ON(!is_power_of_2(size)); | 50 | BUG_ON(!is_power_of_2(size)); |
| @@ -159,8 +159,9 @@ static inline void __kfifo_out_data(struct kfifo *fifo, | |||
| 159 | memcpy(to + l, fifo->buffer, len - l); | 159 | memcpy(to + l, fifo->buffer, len - l); |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | 162 | static inline int __kfifo_from_user_data(struct kfifo *fifo, |
| 163 | const void __user *from, unsigned int len, unsigned int off) | 163 | const void __user *from, unsigned int len, unsigned int off, |
| 164 | unsigned *lenout) | ||
| 164 | { | 165 | { |
| 165 | unsigned int l; | 166 | unsigned int l; |
| 166 | int ret; | 167 | int ret; |
| @@ -177,16 +178,20 @@ static inline unsigned int __kfifo_from_user_data(struct kfifo *fifo, | |||
| 177 | /* first put the data starting from fifo->in to buffer end */ | 178 | /* first put the data starting from fifo->in to buffer end */ |
| 178 | l = min(len, fifo->size - off); | 179 | l = min(len, fifo->size - off); |
| 179 | ret = copy_from_user(fifo->buffer + off, from, l); | 180 | ret = copy_from_user(fifo->buffer + off, from, l); |
| 180 | 181 | if (unlikely(ret)) { | |
| 181 | if (unlikely(ret)) | 182 | *lenout = ret; |
| 182 | return ret + len - l; | 183 | return -EFAULT; |
| 184 | } | ||
| 185 | *lenout = l; | ||
| 183 | 186 | ||
| 184 | /* then put the rest (if any) at the beginning of the buffer */ | 187 | /* then put the rest (if any) at the beginning of the buffer */ |
| 185 | return copy_from_user(fifo->buffer, from + l, len - l); | 188 | ret = copy_from_user(fifo->buffer, from + l, len - l); |
| 189 | *lenout += ret ? ret : len - l; | ||
| 190 | return ret ? -EFAULT : 0; | ||
| 186 | } | 191 | } |
| 187 | 192 | ||
| 188 | static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | 193 | static inline int __kfifo_to_user_data(struct kfifo *fifo, |
| 189 | void __user *to, unsigned int len, unsigned int off) | 194 | void __user *to, unsigned int len, unsigned int off, unsigned *lenout) |
| 190 | { | 195 | { |
| 191 | unsigned int l; | 196 | unsigned int l; |
| 192 | int ret; | 197 | int ret; |
| @@ -203,12 +208,21 @@ static inline unsigned int __kfifo_to_user_data(struct kfifo *fifo, | |||
| 203 | /* first get the data from fifo->out until the end of the buffer */ | 208 | /* first get the data from fifo->out until the end of the buffer */ |
| 204 | l = min(len, fifo->size - off); | 209 | l = min(len, fifo->size - off); |
| 205 | ret = copy_to_user(to, fifo->buffer + off, l); | 210 | ret = copy_to_user(to, fifo->buffer + off, l); |
| 206 | 211 | *lenout = l; | |
| 207 | if (unlikely(ret)) | 212 | if (unlikely(ret)) { |
| 208 | return ret + len - l; | 213 | *lenout -= ret; |
| 214 | return -EFAULT; | ||
| 215 | } | ||
| 209 | 216 | ||
| 210 | /* then get the rest (if any) from the beginning of the buffer */ | 217 | /* then get the rest (if any) from the beginning of the buffer */ |
| 211 | return copy_to_user(to + l, fifo->buffer, len - l); | 218 | len -= l; |
| 219 | ret = copy_to_user(to + l, fifo->buffer, len); | ||
| 220 | if (unlikely(ret)) { | ||
| 221 | *lenout += len - ret; | ||
| 222 | return -EFAULT; | ||
| 223 | } | ||
| 224 | *lenout += len; | ||
| 225 | return 0; | ||
| 212 | } | 226 | } |
| 213 | 227 | ||
| 214 | unsigned int __kfifo_in_n(struct kfifo *fifo, | 228 | unsigned int __kfifo_in_n(struct kfifo *fifo, |
| @@ -235,7 +249,7 @@ EXPORT_SYMBOL(__kfifo_in_n); | |||
| 235 | * Note that with only one concurrent reader and one concurrent | 249 | * Note that with only one concurrent reader and one concurrent |
| 236 | * writer, you don't need extra locking to use these functions. | 250 | * writer, you don't need extra locking to use these functions. |
| 237 | */ | 251 | */ |
| 238 | unsigned int kfifo_in(struct kfifo *fifo, const unsigned char *from, | 252 | unsigned int kfifo_in(struct kfifo *fifo, const void *from, |
| 239 | unsigned int len) | 253 | unsigned int len) |
| 240 | { | 254 | { |
| 241 | len = min(kfifo_avail(fifo), len); | 255 | len = min(kfifo_avail(fifo), len); |
| @@ -277,7 +291,7 @@ EXPORT_SYMBOL(__kfifo_out_n); | |||
| 277 | * Note that with only one concurrent reader and one concurrent | 291 | * Note that with only one concurrent reader and one concurrent |
| 278 | * writer, you don't need extra locking to use these functions. | 292 | * writer, you don't need extra locking to use these functions. |
| 279 | */ | 293 | */ |
| 280 | unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) | 294 | unsigned int kfifo_out(struct kfifo *fifo, void *to, unsigned int len) |
| 281 | { | 295 | { |
| 282 | len = min(kfifo_len(fifo), len); | 296 | len = min(kfifo_len(fifo), len); |
| 283 | 297 | ||
| @@ -288,6 +302,27 @@ unsigned int kfifo_out(struct kfifo *fifo, unsigned char *to, unsigned int len) | |||
| 288 | } | 302 | } |
| 289 | EXPORT_SYMBOL(kfifo_out); | 303 | EXPORT_SYMBOL(kfifo_out); |
| 290 | 304 | ||
| 305 | /** | ||
| 306 | * kfifo_out_peek - copy some data from the FIFO, but do not remove it | ||
| 307 | * @fifo: the fifo to be used. | ||
| 308 | * @to: where the data must be copied. | ||
| 309 | * @len: the size of the destination buffer. | ||
| 310 | * @offset: offset into the fifo | ||
| 311 | * | ||
| 312 | * This function copies at most @len bytes at @offset from the FIFO | ||
| 313 | * into the @to buffer and returns the number of copied bytes. | ||
| 314 | * The data is not removed from the FIFO. | ||
| 315 | */ | ||
| 316 | unsigned int kfifo_out_peek(struct kfifo *fifo, void *to, unsigned int len, | ||
| 317 | unsigned offset) | ||
| 318 | { | ||
| 319 | len = min(kfifo_len(fifo), len + offset); | ||
| 320 | |||
| 321 | __kfifo_out_data(fifo, to, len, offset); | ||
| 322 | return len; | ||
| 323 | } | ||
| 324 | EXPORT_SYMBOL(kfifo_out_peek); | ||
| 325 | |||
| 291 | unsigned int __kfifo_out_generic(struct kfifo *fifo, | 326 | unsigned int __kfifo_out_generic(struct kfifo *fifo, |
| 292 | void *to, unsigned int len, unsigned int recsize, | 327 | void *to, unsigned int len, unsigned int recsize, |
| 293 | unsigned int *total) | 328 | unsigned int *total) |
| @@ -299,10 +334,13 @@ EXPORT_SYMBOL(__kfifo_out_generic); | |||
| 299 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, | 334 | unsigned int __kfifo_from_user_n(struct kfifo *fifo, |
| 300 | const void __user *from, unsigned int len, unsigned int recsize) | 335 | const void __user *from, unsigned int len, unsigned int recsize) |
| 301 | { | 336 | { |
| 337 | unsigned total; | ||
| 338 | |||
| 302 | if (kfifo_avail(fifo) < len + recsize) | 339 | if (kfifo_avail(fifo) < len + recsize) |
| 303 | return len + 1; | 340 | return len + 1; |
| 304 | 341 | ||
| 305 | return __kfifo_from_user_data(fifo, from, len, recsize); | 342 | __kfifo_from_user_data(fifo, from, len, recsize, &total); |
| 343 | return total; | ||
| 306 | } | 344 | } |
| 307 | EXPORT_SYMBOL(__kfifo_from_user_n); | 345 | EXPORT_SYMBOL(__kfifo_from_user_n); |
| 308 | 346 | ||
| @@ -311,20 +349,24 @@ EXPORT_SYMBOL(__kfifo_from_user_n); | |||
| 311 | * @fifo: the fifo to be used. | 349 | * @fifo: the fifo to be used. |
| 312 | * @from: pointer to the data to be added. | 350 | * @from: pointer to the data to be added. |
| 313 | * @len: the length of the data to be added. | 351 | * @len: the length of the data to be added. |
| 352 | * @total: the actual returned data length. | ||
| 314 | * | 353 | * |
| 315 | * This function copies at most @len bytes from the @from into the | 354 | * This function copies at most @len bytes from the @from into the |
| 316 | * FIFO depending and returns the number of copied bytes. | 355 | * FIFO depending and returns -EFAULT/0. |
| 317 | * | 356 | * |
| 318 | * Note that with only one concurrent reader and one concurrent | 357 | * Note that with only one concurrent reader and one concurrent |
| 319 | * writer, you don't need extra locking to use these functions. | 358 | * writer, you don't need extra locking to use these functions. |
| 320 | */ | 359 | */ |
| 321 | unsigned int kfifo_from_user(struct kfifo *fifo, | 360 | int kfifo_from_user(struct kfifo *fifo, |
| 322 | const void __user *from, unsigned int len) | 361 | const void __user *from, unsigned int len, unsigned *total) |
| 323 | { | 362 | { |
| 363 | int ret; | ||
| 324 | len = min(kfifo_avail(fifo), len); | 364 | len = min(kfifo_avail(fifo), len); |
| 325 | len -= __kfifo_from_user_data(fifo, from, len, 0); | 365 | ret = __kfifo_from_user_data(fifo, from, len, 0, total); |
| 366 | if (ret) | ||
| 367 | return ret; | ||
| 326 | __kfifo_add_in(fifo, len); | 368 | __kfifo_add_in(fifo, len); |
| 327 | return len; | 369 | return 0; |
| 328 | } | 370 | } |
| 329 | EXPORT_SYMBOL(kfifo_from_user); | 371 | EXPORT_SYMBOL(kfifo_from_user); |
| 330 | 372 | ||
| @@ -339,17 +381,17 @@ unsigned int __kfifo_to_user_n(struct kfifo *fifo, | |||
| 339 | void __user *to, unsigned int len, unsigned int reclen, | 381 | void __user *to, unsigned int len, unsigned int reclen, |
| 340 | unsigned int recsize) | 382 | unsigned int recsize) |
| 341 | { | 383 | { |
| 342 | unsigned int ret; | 384 | unsigned int ret, total; |
| 343 | 385 | ||
| 344 | if (kfifo_len(fifo) < reclen + recsize) | 386 | if (kfifo_len(fifo) < reclen + recsize) |
| 345 | return len; | 387 | return len; |
| 346 | 388 | ||
| 347 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize); | 389 | ret = __kfifo_to_user_data(fifo, to, reclen, recsize, &total); |
| 348 | 390 | ||
| 349 | if (likely(ret == 0)) | 391 | if (likely(ret == 0)) |
| 350 | __kfifo_add_out(fifo, reclen + recsize); | 392 | __kfifo_add_out(fifo, reclen + recsize); |
| 351 | 393 | ||
| 352 | return ret; | 394 | return total; |
| 353 | } | 395 | } |
| 354 | EXPORT_SYMBOL(__kfifo_to_user_n); | 396 | EXPORT_SYMBOL(__kfifo_to_user_n); |
| 355 | 397 | ||
| @@ -358,20 +400,22 @@ EXPORT_SYMBOL(__kfifo_to_user_n); | |||
| 358 | * @fifo: the fifo to be used. | 400 | * @fifo: the fifo to be used. |
| 359 | * @to: where the data must be copied. | 401 | * @to: where the data must be copied. |
| 360 | * @len: the size of the destination buffer. | 402 | * @len: the size of the destination buffer. |
| 403 | * @lenout: pointer to output variable with copied data | ||
| 361 | * | 404 | * |
| 362 | * This function copies at most @len bytes from the FIFO into the | 405 | * This function copies at most @len bytes from the FIFO into the |
| 363 | * @to buffer and returns the number of copied bytes. | 406 | * @to buffer and 0 or -EFAULT. |
| 364 | * | 407 | * |
| 365 | * Note that with only one concurrent reader and one concurrent | 408 | * Note that with only one concurrent reader and one concurrent |
| 366 | * writer, you don't need extra locking to use these functions. | 409 | * writer, you don't need extra locking to use these functions. |
| 367 | */ | 410 | */ |
| 368 | unsigned int kfifo_to_user(struct kfifo *fifo, | 411 | int kfifo_to_user(struct kfifo *fifo, |
| 369 | void __user *to, unsigned int len) | 412 | void __user *to, unsigned int len, unsigned *lenout) |
| 370 | { | 413 | { |
| 414 | int ret; | ||
| 371 | len = min(kfifo_len(fifo), len); | 415 | len = min(kfifo_len(fifo), len); |
| 372 | len -= __kfifo_to_user_data(fifo, to, len, 0); | 416 | ret = __kfifo_to_user_data(fifo, to, len, 0, lenout); |
| 373 | __kfifo_add_out(fifo, len); | 417 | __kfifo_add_out(fifo, *lenout); |
| 374 | return len; | 418 | return ret; |
| 375 | } | 419 | } |
| 376 | EXPORT_SYMBOL(kfifo_to_user); | 420 | EXPORT_SYMBOL(kfifo_to_user); |
| 377 | 421 | ||
diff --git a/kernel/kgdb.c b/kernel/kgdb.c index 2eb517e23514..761fdd2b3034 100644 --- a/kernel/kgdb.c +++ b/kernel/kgdb.c | |||
| @@ -583,6 +583,9 @@ static void kgdb_wait(struct pt_regs *regs) | |||
| 583 | smp_wmb(); | 583 | smp_wmb(); |
| 584 | atomic_set(&cpu_in_kgdb[cpu], 1); | 584 | atomic_set(&cpu_in_kgdb[cpu], 1); |
| 585 | 585 | ||
| 586 | /* Disable any cpu specific hw breakpoints */ | ||
| 587 | kgdb_disable_hw_debug(regs); | ||
| 588 | |||
| 586 | /* Wait till primary CPU is done with debugging */ | 589 | /* Wait till primary CPU is done with debugging */ |
| 587 | while (atomic_read(&passive_cpu_wait[cpu])) | 590 | while (atomic_read(&passive_cpu_wait[cpu])) |
| 588 | cpu_relax(); | 591 | cpu_relax(); |
| @@ -596,7 +599,7 @@ static void kgdb_wait(struct pt_regs *regs) | |||
| 596 | 599 | ||
| 597 | /* Signal the primary CPU that we are done: */ | 600 | /* Signal the primary CPU that we are done: */ |
| 598 | atomic_set(&cpu_in_kgdb[cpu], 0); | 601 | atomic_set(&cpu_in_kgdb[cpu], 0); |
| 599 | touch_softlockup_watchdog(); | 602 | touch_softlockup_watchdog_sync(); |
| 600 | clocksource_touch_watchdog(); | 603 | clocksource_touch_watchdog(); |
| 601 | local_irq_restore(flags); | 604 | local_irq_restore(flags); |
| 602 | } | 605 | } |
| @@ -1450,7 +1453,7 @@ acquirelock: | |||
| 1450 | (kgdb_info[cpu].task && | 1453 | (kgdb_info[cpu].task && |
| 1451 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { | 1454 | kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) { |
| 1452 | atomic_set(&kgdb_active, -1); | 1455 | atomic_set(&kgdb_active, -1); |
| 1453 | touch_softlockup_watchdog(); | 1456 | touch_softlockup_watchdog_sync(); |
| 1454 | clocksource_touch_watchdog(); | 1457 | clocksource_touch_watchdog(); |
| 1455 | local_irq_restore(flags); | 1458 | local_irq_restore(flags); |
| 1456 | 1459 | ||
| @@ -1550,7 +1553,7 @@ kgdb_restore: | |||
| 1550 | } | 1553 | } |
| 1551 | /* Free kgdb_active */ | 1554 | /* Free kgdb_active */ |
| 1552 | atomic_set(&kgdb_active, -1); | 1555 | atomic_set(&kgdb_active, -1); |
| 1553 | touch_softlockup_watchdog(); | 1556 | touch_softlockup_watchdog_sync(); |
| 1554 | clocksource_touch_watchdog(); | 1557 | clocksource_touch_watchdog(); |
| 1555 | local_irq_restore(flags); | 1558 | local_irq_restore(flags); |
| 1556 | 1559 | ||
diff --git a/kernel/kmod.c b/kernel/kmod.c index 25b103190364..bf0e231d9702 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c | |||
| @@ -520,13 +520,15 @@ int call_usermodehelper_pipe(char *path, char **argv, char **envp, | |||
| 520 | return -ENOMEM; | 520 | return -ENOMEM; |
| 521 | 521 | ||
| 522 | ret = call_usermodehelper_stdinpipe(sub_info, filp); | 522 | ret = call_usermodehelper_stdinpipe(sub_info, filp); |
| 523 | if (ret < 0) | 523 | if (ret < 0) { |
| 524 | goto out; | 524 | call_usermodehelper_freeinfo(sub_info); |
| 525 | return ret; | ||
| 526 | } | ||
| 525 | 527 | ||
| 526 | return call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); | 528 | ret = call_usermodehelper_exec(sub_info, UMH_WAIT_EXEC); |
| 529 | if (ret < 0) /* Failed to execute helper, close pipe */ | ||
| 530 | filp_close(*filp, NULL); | ||
| 527 | 531 | ||
| 528 | out: | ||
| 529 | call_usermodehelper_freeinfo(sub_info); | ||
| 530 | return ret; | 532 | return ret; |
| 531 | } | 533 | } |
| 532 | EXPORT_SYMBOL(call_usermodehelper_pipe); | 534 | EXPORT_SYMBOL(call_usermodehelper_pipe); |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 5feaddcdbe49..c62ec14609b9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2147,7 +2147,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
| 2147 | return ret; | 2147 | return ret; |
| 2148 | 2148 | ||
| 2149 | return print_irq_inversion_bug(curr, &root, target_entry, | 2149 | return print_irq_inversion_bug(curr, &root, target_entry, |
| 2150 | this, 1, irqclass); | 2150 | this, 0, irqclass); |
| 2151 | } | 2151 | } |
| 2152 | 2152 | ||
| 2153 | void print_irqtrace_events(struct task_struct *curr) | 2153 | void print_irqtrace_events(struct task_struct *curr) |
diff --git a/kernel/panic.c b/kernel/panic.c index 5827f7b97254..c787333282b8 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
| @@ -75,7 +75,6 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 75 | dump_stack(); | 75 | dump_stack(); |
| 76 | #endif | 76 | #endif |
| 77 | 77 | ||
| 78 | kmsg_dump(KMSG_DUMP_PANIC); | ||
| 79 | /* | 78 | /* |
| 80 | * If we have crashed and we have a crash kernel loaded let it handle | 79 | * If we have crashed and we have a crash kernel loaded let it handle |
| 81 | * everything else. | 80 | * everything else. |
| @@ -83,6 +82,8 @@ NORET_TYPE void panic(const char * fmt, ...) | |||
| 83 | */ | 82 | */ |
| 84 | crash_kexec(NULL); | 83 | crash_kexec(NULL); |
| 85 | 84 | ||
| 85 | kmsg_dump(KMSG_DUMP_PANIC); | ||
| 86 | |||
| 86 | /* | 87 | /* |
| 87 | * Note smp_send_stop is the usual smp shutdown function, which | 88 | * Note smp_send_stop is the usual smp shutdown function, which |
| 88 | * unfortunately means it may not be hardened to work in a panic | 89 | * unfortunately means it may not be hardened to work in a panic |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 603c0d8b5df1..d27746bd3a06 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -3268,6 +3268,9 @@ static void perf_event_task_output(struct perf_event *event, | |||
| 3268 | 3268 | ||
| 3269 | static int perf_event_task_match(struct perf_event *event) | 3269 | static int perf_event_task_match(struct perf_event *event) |
| 3270 | { | 3270 | { |
| 3271 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 3272 | return 0; | ||
| 3273 | |||
| 3271 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3274 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| 3272 | return 0; | 3275 | return 0; |
| 3273 | 3276 | ||
| @@ -3377,6 +3380,9 @@ static void perf_event_comm_output(struct perf_event *event, | |||
| 3377 | 3380 | ||
| 3378 | static int perf_event_comm_match(struct perf_event *event) | 3381 | static int perf_event_comm_match(struct perf_event *event) |
| 3379 | { | 3382 | { |
| 3383 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 3384 | return 0; | ||
| 3385 | |||
| 3380 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3386 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| 3381 | return 0; | 3387 | return 0; |
| 3382 | 3388 | ||
| @@ -3494,6 +3500,9 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
| 3494 | static int perf_event_mmap_match(struct perf_event *event, | 3500 | static int perf_event_mmap_match(struct perf_event *event, |
| 3495 | struct perf_mmap_event *mmap_event) | 3501 | struct perf_mmap_event *mmap_event) |
| 3496 | { | 3502 | { |
| 3503 | if (event->state != PERF_EVENT_STATE_ACTIVE) | ||
| 3504 | return 0; | ||
| 3505 | |||
| 3497 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3506 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| 3498 | return 0; | 3507 | return 0; |
| 3499 | 3508 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 17463ca2e229..1751c456b71f 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -1467,6 +1467,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_unregister); | |||
| 1467 | static const char const *kmsg_reasons[] = { | 1467 | static const char const *kmsg_reasons[] = { |
| 1468 | [KMSG_DUMP_OOPS] = "oops", | 1468 | [KMSG_DUMP_OOPS] = "oops", |
| 1469 | [KMSG_DUMP_PANIC] = "panic", | 1469 | [KMSG_DUMP_PANIC] = "panic", |
| 1470 | [KMSG_DUMP_KEXEC] = "kexec", | ||
| 1470 | }; | 1471 | }; |
| 1471 | 1472 | ||
| 1472 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) | 1473 | static const char *kmsg_to_str(enum kmsg_dump_reason reason) |
diff --git a/kernel/sched.c b/kernel/sched.c index c535cc4f6428..3a8fb30a91b1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -2320,14 +2320,12 @@ static int select_fallback_rq(int cpu, struct task_struct *p) | |||
| 2320 | } | 2320 | } |
| 2321 | 2321 | ||
| 2322 | /* | 2322 | /* |
| 2323 | * Called from: | 2323 | * Gets called from 3 sites (exec, fork, wakeup), since it is called without |
| 2324 | * holding rq->lock we need to ensure ->cpus_allowed is stable, this is done | ||
| 2325 | * by: | ||
| 2324 | * | 2326 | * |
| 2325 | * - fork, @p is stable because it isn't on the tasklist yet | 2327 | * exec: is unstable, retry loop |
| 2326 | * | 2328 | * fork & wake-up: serialize ->cpus_allowed against TASK_WAKING |
| 2327 | * - exec, @p is unstable, retry loop | ||
| 2328 | * | ||
| 2329 | * - wake-up, we serialize ->cpus_allowed against TASK_WAKING so | ||
| 2330 | * we should be good. | ||
| 2331 | */ | 2329 | */ |
| 2332 | static inline | 2330 | static inline |
| 2333 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) | 2331 | int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) |
| @@ -2620,9 +2618,6 @@ void sched_fork(struct task_struct *p, int clone_flags) | |||
| 2620 | if (p->sched_class->task_fork) | 2618 | if (p->sched_class->task_fork) |
| 2621 | p->sched_class->task_fork(p); | 2619 | p->sched_class->task_fork(p); |
| 2622 | 2620 | ||
| 2623 | #ifdef CONFIG_SMP | ||
| 2624 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | ||
| 2625 | #endif | ||
| 2626 | set_task_cpu(p, cpu); | 2621 | set_task_cpu(p, cpu); |
| 2627 | 2622 | ||
| 2628 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 2623 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
| @@ -2652,6 +2647,21 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
| 2652 | { | 2647 | { |
| 2653 | unsigned long flags; | 2648 | unsigned long flags; |
| 2654 | struct rq *rq; | 2649 | struct rq *rq; |
| 2650 | int cpu = get_cpu(); | ||
| 2651 | |||
| 2652 | #ifdef CONFIG_SMP | ||
| 2653 | /* | ||
| 2654 | * Fork balancing, do it here and not earlier because: | ||
| 2655 | * - cpus_allowed can change in the fork path | ||
| 2656 | * - any previously selected cpu might disappear through hotplug | ||
| 2657 | * | ||
| 2658 | * We still have TASK_WAKING but PF_STARTING is gone now, meaning | ||
| 2659 | * ->cpus_allowed is stable, we have preemption disabled, meaning | ||
| 2660 | * cpu_online_mask is stable. | ||
| 2661 | */ | ||
| 2662 | cpu = select_task_rq(p, SD_BALANCE_FORK, 0); | ||
| 2663 | set_task_cpu(p, cpu); | ||
| 2664 | #endif | ||
| 2655 | 2665 | ||
| 2656 | rq = task_rq_lock(p, &flags); | 2666 | rq = task_rq_lock(p, &flags); |
| 2657 | BUG_ON(p->state != TASK_WAKING); | 2667 | BUG_ON(p->state != TASK_WAKING); |
| @@ -2665,6 +2675,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags) | |||
| 2665 | p->sched_class->task_woken(rq, p); | 2675 | p->sched_class->task_woken(rq, p); |
| 2666 | #endif | 2676 | #endif |
| 2667 | task_rq_unlock(rq, &flags); | 2677 | task_rq_unlock(rq, &flags); |
| 2678 | put_cpu(); | ||
| 2668 | } | 2679 | } |
| 2669 | 2680 | ||
| 2670 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 2681 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| @@ -5530,8 +5541,11 @@ need_resched_nonpreemptible: | |||
| 5530 | 5541 | ||
| 5531 | post_schedule(rq); | 5542 | post_schedule(rq); |
| 5532 | 5543 | ||
| 5533 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 5544 | if (unlikely(reacquire_kernel_lock(current) < 0)) { |
| 5545 | prev = rq->curr; | ||
| 5546 | switch_count = &prev->nivcsw; | ||
| 5534 | goto need_resched_nonpreemptible; | 5547 | goto need_resched_nonpreemptible; |
| 5548 | } | ||
| 5535 | 5549 | ||
| 5536 | preempt_enable_no_resched(); | 5550 | preempt_enable_no_resched(); |
| 5537 | if (need_resched()) | 5551 | if (need_resched()) |
| @@ -7136,14 +7150,18 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) | |||
| 7136 | * the ->cpus_allowed mask from under waking tasks, which would be | 7150 | * the ->cpus_allowed mask from under waking tasks, which would be |
| 7137 | * possible when we change rq->lock in ttwu(), so synchronize against | 7151 | * possible when we change rq->lock in ttwu(), so synchronize against |
| 7138 | * TASK_WAKING to avoid that. | 7152 | * TASK_WAKING to avoid that. |
| 7153 | * | ||
| 7154 | * Make an exception for freshly cloned tasks, since cpuset namespaces | ||
| 7155 | * might move the task about, we have to validate the target in | ||
| 7156 | * wake_up_new_task() anyway since the cpu might have gone away. | ||
| 7139 | */ | 7157 | */ |
| 7140 | again: | 7158 | again: |
| 7141 | while (p->state == TASK_WAKING) | 7159 | while (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) |
| 7142 | cpu_relax(); | 7160 | cpu_relax(); |
| 7143 | 7161 | ||
| 7144 | rq = task_rq_lock(p, &flags); | 7162 | rq = task_rq_lock(p, &flags); |
| 7145 | 7163 | ||
| 7146 | if (p->state == TASK_WAKING) { | 7164 | if (p->state == TASK_WAKING && !(p->flags & PF_STARTING)) { |
| 7147 | task_rq_unlock(rq, &flags); | 7165 | task_rq_unlock(rq, &flags); |
| 7148 | goto again; | 7166 | goto again; |
| 7149 | } | 7167 | } |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 42ac3c9f66f6..8fe7ee81c552 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -1508,7 +1508,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag | |||
| 1508 | * If there's an idle sibling in this domain, make that | 1508 | * If there's an idle sibling in this domain, make that |
| 1509 | * the wake_affine target instead of the current cpu. | 1509 | * the wake_affine target instead of the current cpu. |
| 1510 | */ | 1510 | */ |
| 1511 | if (tmp->flags & SD_PREFER_SIBLING) | 1511 | if (tmp->flags & SD_SHARE_PKG_RESOURCES) |
| 1512 | target = select_idle_sibling(p, tmp, target); | 1512 | target = select_idle_sibling(p, tmp, target); |
| 1513 | 1513 | ||
| 1514 | if (target >= 0) { | 1514 | if (target >= 0) { |
diff --git a/kernel/signal.c b/kernel/signal.c index d09692b40376..934ae5e687b9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -979,7 +979,8 @@ static void print_fatal_signal(struct pt_regs *regs, int signr) | |||
| 979 | for (i = 0; i < 16; i++) { | 979 | for (i = 0; i < 16; i++) { |
| 980 | unsigned char insn; | 980 | unsigned char insn; |
| 981 | 981 | ||
| 982 | __get_user(insn, (unsigned char *)(regs->ip + i)); | 982 | if (get_user(insn, (unsigned char *)(regs->ip + i))) |
| 983 | break; | ||
| 983 | printk("%02x ", insn); | 984 | printk("%02x ", insn); |
| 984 | } | 985 | } |
| 985 | } | 986 | } |
diff --git a/kernel/smp.c b/kernel/smp.c index de735a6637d0..f10408422444 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -347,7 +347,7 @@ int smp_call_function_any(const struct cpumask *mask, | |||
| 347 | goto call; | 347 | goto call; |
| 348 | 348 | ||
| 349 | /* Try for same node. */ | 349 | /* Try for same node. */ |
| 350 | nodemask = cpumask_of_node(cpu); | 350 | nodemask = cpumask_of_node(cpu_to_node(cpu)); |
| 351 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; | 351 | for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids; |
| 352 | cpu = cpumask_next_and(cpu, nodemask, mask)) { | 352 | cpu = cpumask_next_and(cpu, nodemask, mask)) { |
| 353 | if (cpu_online(cpu)) | 353 | if (cpu_online(cpu)) |
diff --git a/kernel/softlockup.c b/kernel/softlockup.c index d22579087e27..0d4c7898ab80 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c | |||
| @@ -25,6 +25,7 @@ static DEFINE_SPINLOCK(print_lock); | |||
| 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ | 25 | static DEFINE_PER_CPU(unsigned long, softlockup_touch_ts); /* touch timestamp */ |
| 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ | 26 | static DEFINE_PER_CPU(unsigned long, softlockup_print_ts); /* print timestamp */ |
| 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | 27 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
| 28 | static DEFINE_PER_CPU(bool, softlock_touch_sync); | ||
| 28 | 29 | ||
| 29 | static int __read_mostly did_panic; | 30 | static int __read_mostly did_panic; |
| 30 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly softlockup_thresh = 60; |
| @@ -79,6 +80,12 @@ void touch_softlockup_watchdog(void) | |||
| 79 | } | 80 | } |
| 80 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 81 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
| 81 | 82 | ||
| 83 | void touch_softlockup_watchdog_sync(void) | ||
| 84 | { | ||
| 85 | __raw_get_cpu_var(softlock_touch_sync) = true; | ||
| 86 | __raw_get_cpu_var(softlockup_touch_ts) = 0; | ||
| 87 | } | ||
| 88 | |||
| 82 | void touch_all_softlockup_watchdogs(void) | 89 | void touch_all_softlockup_watchdogs(void) |
| 83 | { | 90 | { |
| 84 | int cpu; | 91 | int cpu; |
| @@ -118,6 +125,14 @@ void softlockup_tick(void) | |||
| 118 | } | 125 | } |
| 119 | 126 | ||
| 120 | if (touch_ts == 0) { | 127 | if (touch_ts == 0) { |
| 128 | if (unlikely(per_cpu(softlock_touch_sync, this_cpu))) { | ||
| 129 | /* | ||
| 130 | * If the time stamp was touched atomically | ||
| 131 | * make sure the scheduler tick is up to date. | ||
| 132 | */ | ||
| 133 | per_cpu(softlock_touch_sync, this_cpu) = false; | ||
| 134 | sched_clock_tick(); | ||
| 135 | } | ||
| 121 | __touch_softlockup_watchdog(); | 136 | __touch_softlockup_watchdog(); |
| 122 | return; | 137 | return; |
| 123 | } | 138 | } |
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 6f740d9f0948..d7395fdfb9f3 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c | |||
| @@ -259,7 +259,8 @@ void clockevents_notify(unsigned long reason, void *arg) | |||
| 259 | cpu = *((int *)arg); | 259 | cpu = *((int *)arg); |
| 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { | 260 | list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) { |
| 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && | 261 | if (cpumask_test_cpu(cpu, dev->cpumask) && |
| 262 | cpumask_weight(dev->cpumask) == 1) { | 262 | cpumask_weight(dev->cpumask) == 1 && |
| 263 | !tick_is_broadcast_device(dev)) { | ||
| 263 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); | 264 | BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); |
| 264 | list_del(&dev->list); | 265 | list_del(&dev->list); |
| 265 | } | 266 | } |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e85c23404d34..13700833c181 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -343,7 +343,19 @@ static void clocksource_resume_watchdog(void) | |||
| 343 | { | 343 | { |
| 344 | unsigned long flags; | 344 | unsigned long flags; |
| 345 | 345 | ||
| 346 | spin_lock_irqsave(&watchdog_lock, flags); | 346 | /* |
| 347 | * We use trylock here to avoid a potential dead lock when | ||
| 348 | * kgdb calls this code after the kernel has been stopped with | ||
| 349 | * watchdog_lock held. When watchdog_lock is held we just | ||
| 350 | * return and accept, that the watchdog might trigger and mark | ||
| 351 | * the monitored clock source (usually TSC) unstable. | ||
| 352 | * | ||
| 353 | * This does not affect the other caller clocksource_resume() | ||
| 354 | * because at this point the kernel is UP, interrupts are | ||
| 355 | * disabled and nothing can hold watchdog_lock. | ||
| 356 | */ | ||
| 357 | if (!spin_trylock_irqsave(&watchdog_lock, flags)) | ||
| 358 | return; | ||
| 347 | clocksource_reset_watchdog(); | 359 | clocksource_reset_watchdog(); |
| 348 | spin_unlock_irqrestore(&watchdog_lock, flags); | 360 | spin_unlock_irqrestore(&watchdog_lock, flags); |
| 349 | } | 361 | } |
| @@ -458,8 +470,8 @@ void clocksource_resume(void) | |||
| 458 | * clocksource_touch_watchdog - Update watchdog | 470 | * clocksource_touch_watchdog - Update watchdog |
| 459 | * | 471 | * |
| 460 | * Update the watchdog after exception contexts such as kgdb so as not | 472 | * Update the watchdog after exception contexts such as kgdb so as not |
| 461 | * to incorrectly trip the watchdog. | 473 | * to incorrectly trip the watchdog. This might fail when the kernel |
| 462 | * | 474 | * was stopped in code which holds watchdog_lock. |
| 463 | */ | 475 | */ |
| 464 | void clocksource_touch_watchdog(void) | 476 | void clocksource_touch_watchdog(void) |
| 465 | { | 477 | { |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 7faaa32fbf4f..e2ab064c6d41 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -880,6 +880,7 @@ void getboottime(struct timespec *ts) | |||
| 880 | 880 | ||
| 881 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | 881 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); |
| 882 | } | 882 | } |
| 883 | EXPORT_SYMBOL_GPL(getboottime); | ||
| 883 | 884 | ||
| 884 | /** | 885 | /** |
| 885 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | 886 | * monotonic_to_bootbased - Convert the monotonic time to boot based. |
| @@ -889,6 +890,7 @@ void monotonic_to_bootbased(struct timespec *ts) | |||
| 889 | { | 890 | { |
| 890 | *ts = timespec_add_safe(*ts, total_sleep_time); | 891 | *ts = timespec_add_safe(*ts, total_sleep_time); |
| 891 | } | 892 | } |
| 893 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | ||
| 892 | 894 | ||
| 893 | unsigned long get_seconds(void) | 895 | unsigned long get_seconds(void) |
| 894 | { | 896 | { |
diff --git a/kernel/timer.c b/kernel/timer.c index 15533b792397..c61a7949387f 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
| @@ -1198,6 +1198,7 @@ void update_process_times(int user_tick) | |||
| 1198 | run_local_timers(); | 1198 | run_local_timers(); |
| 1199 | rcu_check_callbacks(cpu, user_tick); | 1199 | rcu_check_callbacks(cpu, user_tick); |
| 1200 | printk_tick(); | 1200 | printk_tick(); |
| 1201 | perf_event_do_pending(); | ||
| 1201 | scheduler_tick(); | 1202 | scheduler_tick(); |
| 1202 | run_posix_cpu_timers(p); | 1203 | run_posix_cpu_timers(p); |
| 1203 | } | 1204 | } |
| @@ -1209,8 +1210,6 @@ static void run_timer_softirq(struct softirq_action *h) | |||
| 1209 | { | 1210 | { |
| 1210 | struct tvec_base *base = __get_cpu_var(tvec_bases); | 1211 | struct tvec_base *base = __get_cpu_var(tvec_bases); |
| 1211 | 1212 | ||
| 1212 | perf_event_do_pending(); | ||
| 1213 | |||
| 1214 | hrtimer_run_pending(); | 1213 | hrtimer_run_pending(); |
| 1215 | 1214 | ||
| 1216 | if (time_after_eq(jiffies, base->timer_jiffies)) | 1215 | if (time_after_eq(jiffies, base->timer_jiffies)) |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 6c22d8a2f289..60e2ce0181ee 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -27,9 +27,7 @@ config HAVE_FUNCTION_GRAPH_TRACER | |||
| 27 | config HAVE_FUNCTION_GRAPH_FP_TEST | 27 | config HAVE_FUNCTION_GRAPH_FP_TEST |
| 28 | bool | 28 | bool |
| 29 | help | 29 | help |
| 30 | An arch may pass in a unique value (frame pointer) to both the | 30 | See Documentation/trace/ftrace-design.txt |
| 31 | entering and exiting of a function. On exit, the value is compared | ||
| 32 | and if it does not match, then it will panic the kernel. | ||
| 33 | 31 | ||
| 34 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST | 32 | config HAVE_FUNCTION_TRACE_MCOUNT_TEST |
| 35 | bool | 33 | bool |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 7968762c8167..1e6640f80454 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -1690,7 +1690,7 @@ ftrace_regex_lseek(struct file *file, loff_t offset, int origin) | |||
| 1690 | static int ftrace_match(char *str, char *regex, int len, int type) | 1690 | static int ftrace_match(char *str, char *regex, int len, int type) |
| 1691 | { | 1691 | { |
| 1692 | int matched = 0; | 1692 | int matched = 0; |
| 1693 | char *ptr; | 1693 | int slen; |
| 1694 | 1694 | ||
| 1695 | switch (type) { | 1695 | switch (type) { |
| 1696 | case MATCH_FULL: | 1696 | case MATCH_FULL: |
| @@ -1706,8 +1706,8 @@ static int ftrace_match(char *str, char *regex, int len, int type) | |||
| 1706 | matched = 1; | 1706 | matched = 1; |
| 1707 | break; | 1707 | break; |
| 1708 | case MATCH_END_ONLY: | 1708 | case MATCH_END_ONLY: |
| 1709 | ptr = strstr(str, regex); | 1709 | slen = strlen(str); |
| 1710 | if (ptr && (ptr[len] == 0)) | 1710 | if (slen >= len && memcmp(str + slen - len, regex, len) == 0) |
| 1711 | matched = 1; | 1711 | matched = 1; |
| 1712 | break; | 1712 | break; |
| 1713 | } | 1713 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2326b04c95c4..8c1b2d290718 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -464,6 +464,8 @@ struct ring_buffer_iter { | |||
| 464 | struct ring_buffer_per_cpu *cpu_buffer; | 464 | struct ring_buffer_per_cpu *cpu_buffer; |
| 465 | unsigned long head; | 465 | unsigned long head; |
| 466 | struct buffer_page *head_page; | 466 | struct buffer_page *head_page; |
| 467 | struct buffer_page *cache_reader_page; | ||
| 468 | unsigned long cache_read; | ||
| 467 | u64 read_stamp; | 469 | u64 read_stamp; |
| 468 | }; | 470 | }; |
| 469 | 471 | ||
| @@ -2716,6 +2718,8 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) | |||
| 2716 | iter->read_stamp = cpu_buffer->read_stamp; | 2718 | iter->read_stamp = cpu_buffer->read_stamp; |
| 2717 | else | 2719 | else |
| 2718 | iter->read_stamp = iter->head_page->page->time_stamp; | 2720 | iter->read_stamp = iter->head_page->page->time_stamp; |
| 2721 | iter->cache_reader_page = cpu_buffer->reader_page; | ||
| 2722 | iter->cache_read = cpu_buffer->read; | ||
| 2719 | } | 2723 | } |
| 2720 | 2724 | ||
| 2721 | /** | 2725 | /** |
| @@ -2869,7 +2873,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2869 | * Splice the empty reader page into the list around the head. | 2873 | * Splice the empty reader page into the list around the head. |
| 2870 | */ | 2874 | */ |
| 2871 | reader = rb_set_head_page(cpu_buffer); | 2875 | reader = rb_set_head_page(cpu_buffer); |
| 2872 | cpu_buffer->reader_page->list.next = reader->list.next; | 2876 | cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); |
| 2873 | cpu_buffer->reader_page->list.prev = reader->list.prev; | 2877 | cpu_buffer->reader_page->list.prev = reader->list.prev; |
| 2874 | 2878 | ||
| 2875 | /* | 2879 | /* |
| @@ -2906,7 +2910,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
| 2906 | * | 2910 | * |
| 2907 | * Now make the new head point back to the reader page. | 2911 | * Now make the new head point back to the reader page. |
| 2908 | */ | 2912 | */ |
| 2909 | reader->list.next->prev = &cpu_buffer->reader_page->list; | 2913 | rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; |
| 2910 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); | 2914 | rb_inc_page(cpu_buffer, &cpu_buffer->head_page); |
| 2911 | 2915 | ||
| 2912 | /* Finally update the reader page to the new head */ | 2916 | /* Finally update the reader page to the new head */ |
| @@ -3060,13 +3064,22 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3060 | struct ring_buffer_event *event; | 3064 | struct ring_buffer_event *event; |
| 3061 | int nr_loops = 0; | 3065 | int nr_loops = 0; |
| 3062 | 3066 | ||
| 3063 | if (ring_buffer_iter_empty(iter)) | ||
| 3064 | return NULL; | ||
| 3065 | |||
| 3066 | cpu_buffer = iter->cpu_buffer; | 3067 | cpu_buffer = iter->cpu_buffer; |
| 3067 | buffer = cpu_buffer->buffer; | 3068 | buffer = cpu_buffer->buffer; |
| 3068 | 3069 | ||
| 3070 | /* | ||
| 3071 | * Check if someone performed a consuming read to | ||
| 3072 | * the buffer. A consuming read invalidates the iterator | ||
| 3073 | * and we need to reset the iterator in this case. | ||
| 3074 | */ | ||
| 3075 | if (unlikely(iter->cache_read != cpu_buffer->read || | ||
| 3076 | iter->cache_reader_page != cpu_buffer->reader_page)) | ||
| 3077 | rb_iter_reset(iter); | ||
| 3078 | |||
| 3069 | again: | 3079 | again: |
| 3080 | if (ring_buffer_iter_empty(iter)) | ||
| 3081 | return NULL; | ||
| 3082 | |||
| 3070 | /* | 3083 | /* |
| 3071 | * We repeat when a timestamp is encountered. | 3084 | * We repeat when a timestamp is encountered. |
| 3072 | * We can get multiple timestamps by nested interrupts or also | 3085 | * We can get multiple timestamps by nested interrupts or also |
| @@ -3081,6 +3094,11 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) | |||
| 3081 | if (rb_per_cpu_empty(cpu_buffer)) | 3094 | if (rb_per_cpu_empty(cpu_buffer)) |
| 3082 | return NULL; | 3095 | return NULL; |
| 3083 | 3096 | ||
| 3097 | if (iter->head >= local_read(&iter->head_page->page->commit)) { | ||
| 3098 | rb_inc_iter(iter); | ||
| 3099 | goto again; | ||
| 3100 | } | ||
| 3101 | |||
| 3084 | event = rb_iter_head_event(iter); | 3102 | event = rb_iter_head_event(iter); |
| 3085 | 3103 | ||
| 3086 | switch (event->type_len) { | 3104 | switch (event->type_len) { |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 0df1b0f2cb9e..eac6875cb990 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -951,6 +951,11 @@ void trace_find_cmdline(int pid, char comm[]) | |||
| 951 | return; | 951 | return; |
| 952 | } | 952 | } |
| 953 | 953 | ||
| 954 | if (WARN_ON_ONCE(pid < 0)) { | ||
| 955 | strcpy(comm, "<XXX>"); | ||
| 956 | return; | ||
| 957 | } | ||
| 958 | |||
| 954 | if (pid > PID_MAX_DEFAULT) { | 959 | if (pid > PID_MAX_DEFAULT) { |
| 955 | strcpy(comm, "<...>"); | 960 | strcpy(comm, "<...>"); |
| 956 | return; | 961 | return; |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 50504cb228de..e42af9aad69f 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -211,8 +211,9 @@ static int filter_pred_pchar(struct filter_pred *pred, void *event, | |||
| 211 | { | 211 | { |
| 212 | char **addr = (char **)(event + pred->offset); | 212 | char **addr = (char **)(event + pred->offset); |
| 213 | int cmp, match; | 213 | int cmp, match; |
| 214 | int len = strlen(*addr) + 1; /* including tailing '\0' */ | ||
| 214 | 215 | ||
| 215 | cmp = pred->regex.match(*addr, &pred->regex, pred->regex.field_len); | 216 | cmp = pred->regex.match(*addr, &pred->regex, len); |
| 216 | 217 | ||
| 217 | match = cmp ^ pred->not; | 218 | match = cmp ^ pred->not; |
| 218 | 219 | ||
| @@ -251,7 +252,18 @@ static int filter_pred_none(struct filter_pred *pred, void *event, | |||
| 251 | return 0; | 252 | return 0; |
| 252 | } | 253 | } |
| 253 | 254 | ||
| 254 | /* Basic regex callbacks */ | 255 | /* |
| 256 | * regex_match_foo - Basic regex callbacks | ||
| 257 | * | ||
| 258 | * @str: the string to be searched | ||
| 259 | * @r: the regex structure containing the pattern string | ||
| 260 | * @len: the length of the string to be searched (including '\0') | ||
| 261 | * | ||
| 262 | * Note: | ||
| 263 | * - @str might not be NULL-terminated if it's of type DYN_STRING | ||
| 264 | * or STATIC_STRING | ||
| 265 | */ | ||
| 266 | |||
| 255 | static int regex_match_full(char *str, struct regex *r, int len) | 267 | static int regex_match_full(char *str, struct regex *r, int len) |
| 256 | { | 268 | { |
| 257 | if (strncmp(str, r->pattern, len) == 0) | 269 | if (strncmp(str, r->pattern, len) == 0) |
| @@ -261,23 +273,24 @@ static int regex_match_full(char *str, struct regex *r, int len) | |||
| 261 | 273 | ||
| 262 | static int regex_match_front(char *str, struct regex *r, int len) | 274 | static int regex_match_front(char *str, struct regex *r, int len) |
| 263 | { | 275 | { |
| 264 | if (strncmp(str, r->pattern, len) == 0) | 276 | if (strncmp(str, r->pattern, r->len) == 0) |
| 265 | return 1; | 277 | return 1; |
| 266 | return 0; | 278 | return 0; |
| 267 | } | 279 | } |
| 268 | 280 | ||
| 269 | static int regex_match_middle(char *str, struct regex *r, int len) | 281 | static int regex_match_middle(char *str, struct regex *r, int len) |
| 270 | { | 282 | { |
| 271 | if (strstr(str, r->pattern)) | 283 | if (strnstr(str, r->pattern, len)) |
| 272 | return 1; | 284 | return 1; |
| 273 | return 0; | 285 | return 0; |
| 274 | } | 286 | } |
| 275 | 287 | ||
| 276 | static int regex_match_end(char *str, struct regex *r, int len) | 288 | static int regex_match_end(char *str, struct regex *r, int len) |
| 277 | { | 289 | { |
| 278 | char *ptr = strstr(str, r->pattern); | 290 | int strlen = len - 1; |
| 279 | 291 | ||
| 280 | if (ptr && (ptr[r->len] == 0)) | 292 | if (strlen >= r->len && |
| 293 | memcmp(str + strlen - r->len, r->pattern, r->len) == 0) | ||
| 281 | return 1; | 294 | return 1; |
| 282 | return 0; | 295 | return 0; |
| 283 | } | 296 | } |
| @@ -781,10 +794,8 @@ static int filter_add_pred(struct filter_parse_state *ps, | |||
| 781 | pred->regex.field_len = field->size; | 794 | pred->regex.field_len = field->size; |
| 782 | } else if (field->filter_type == FILTER_DYN_STRING) | 795 | } else if (field->filter_type == FILTER_DYN_STRING) |
| 783 | fn = filter_pred_strloc; | 796 | fn = filter_pred_strloc; |
| 784 | else { | 797 | else |
| 785 | fn = filter_pred_pchar; | 798 | fn = filter_pred_pchar; |
| 786 | pred->regex.field_len = strlen(pred->regex.pattern); | ||
| 787 | } | ||
| 788 | } else { | 799 | } else { |
| 789 | if (field->is_signed) | 800 | if (field->is_signed) |
| 790 | ret = strict_strtoll(pred->regex.pattern, 0, &val); | 801 | ret = strict_strtoll(pred->regex.pattern, 0, &val); |
