diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/hw_breakpoint.c | 2 | ||||
| -rw-r--r-- | kernel/kfifo.c | 3 | ||||
| -rw-r--r-- | kernel/perf_event.c | 13 | ||||
| -rw-r--r-- | kernel/softirq.c | 15 | ||||
| -rw-r--r-- | kernel/sys.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_kprobe.c | 2 | ||||
| -rw-r--r-- | kernel/trace/trace_stack.c | 24 |
7 files changed, 41 insertions, 20 deletions
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index 8a5c7d55ac9f..967e66143e11 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -360,8 +360,8 @@ EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | |||
| 360 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) | 360 | int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr) |
| 361 | { | 361 | { |
| 362 | u64 old_addr = bp->attr.bp_addr; | 362 | u64 old_addr = bp->attr.bp_addr; |
| 363 | u64 old_len = bp->attr.bp_len; | ||
| 363 | int old_type = bp->attr.bp_type; | 364 | int old_type = bp->attr.bp_type; |
| 364 | int old_len = bp->attr.bp_len; | ||
| 365 | int err = 0; | 365 | int err = 0; |
| 366 | 366 | ||
| 367 | perf_event_disable(bp); | 367 | perf_event_disable(bp); |
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 498cabba225e..35edbe22e9a9 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -80,7 +80,7 @@ int kfifo_alloc(struct kfifo *fifo, unsigned int size, gfp_t gfp_mask) | |||
| 80 | 80 | ||
| 81 | buffer = kmalloc(size, gfp_mask); | 81 | buffer = kmalloc(size, gfp_mask); |
| 82 | if (!buffer) { | 82 | if (!buffer) { |
| 83 | _kfifo_init(fifo, 0, 0); | 83 | _kfifo_init(fifo, NULL, 0); |
| 84 | return -ENOMEM; | 84 | return -ENOMEM; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| @@ -97,6 +97,7 @@ EXPORT_SYMBOL(kfifo_alloc); | |||
| 97 | void kfifo_free(struct kfifo *fifo) | 97 | void kfifo_free(struct kfifo *fifo) |
| 98 | { | 98 | { |
| 99 | kfree(fifo->buffer); | 99 | kfree(fifo->buffer); |
| 100 | _kfifo_init(fifo, NULL, 0); | ||
| 100 | } | 101 | } |
| 101 | EXPORT_SYMBOL(kfifo_free); | 102 | EXPORT_SYMBOL(kfifo_free); |
| 102 | 103 | ||
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index d27746bd3a06..2ae7409bf38f 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -3259,8 +3259,6 @@ static void perf_event_task_output(struct perf_event *event, | |||
| 3259 | task_event->event_id.tid = perf_event_tid(event, task); | 3259 | task_event->event_id.tid = perf_event_tid(event, task); |
| 3260 | task_event->event_id.ptid = perf_event_tid(event, current); | 3260 | task_event->event_id.ptid = perf_event_tid(event, current); |
| 3261 | 3261 | ||
| 3262 | task_event->event_id.time = perf_clock(); | ||
| 3263 | |||
| 3264 | perf_output_put(&handle, task_event->event_id); | 3262 | perf_output_put(&handle, task_event->event_id); |
| 3265 | 3263 | ||
| 3266 | perf_output_end(&handle); | 3264 | perf_output_end(&handle); |
| @@ -3268,7 +3266,7 @@ static void perf_event_task_output(struct perf_event *event, | |||
| 3268 | 3266 | ||
| 3269 | static int perf_event_task_match(struct perf_event *event) | 3267 | static int perf_event_task_match(struct perf_event *event) |
| 3270 | { | 3268 | { |
| 3271 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 3269 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 3272 | return 0; | 3270 | return 0; |
| 3273 | 3271 | ||
| 3274 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3272 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| @@ -3300,7 +3298,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
| 3300 | cpuctx = &get_cpu_var(perf_cpu_context); | 3298 | cpuctx = &get_cpu_var(perf_cpu_context); |
| 3301 | perf_event_task_ctx(&cpuctx->ctx, task_event); | 3299 | perf_event_task_ctx(&cpuctx->ctx, task_event); |
| 3302 | if (!ctx) | 3300 | if (!ctx) |
| 3303 | ctx = rcu_dereference(task_event->task->perf_event_ctxp); | 3301 | ctx = rcu_dereference(current->perf_event_ctxp); |
| 3304 | if (ctx) | 3302 | if (ctx) |
| 3305 | perf_event_task_ctx(ctx, task_event); | 3303 | perf_event_task_ctx(ctx, task_event); |
| 3306 | put_cpu_var(perf_cpu_context); | 3304 | put_cpu_var(perf_cpu_context); |
| @@ -3331,6 +3329,7 @@ static void perf_event_task(struct task_struct *task, | |||
| 3331 | /* .ppid */ | 3329 | /* .ppid */ |
| 3332 | /* .tid */ | 3330 | /* .tid */ |
| 3333 | /* .ptid */ | 3331 | /* .ptid */ |
| 3332 | .time = perf_clock(), | ||
| 3334 | }, | 3333 | }, |
| 3335 | }; | 3334 | }; |
| 3336 | 3335 | ||
| @@ -3380,7 +3379,7 @@ static void perf_event_comm_output(struct perf_event *event, | |||
| 3380 | 3379 | ||
| 3381 | static int perf_event_comm_match(struct perf_event *event) | 3380 | static int perf_event_comm_match(struct perf_event *event) |
| 3382 | { | 3381 | { |
| 3383 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 3382 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 3384 | return 0; | 3383 | return 0; |
| 3385 | 3384 | ||
| 3386 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3385 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| @@ -3500,7 +3499,7 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
| 3500 | static int perf_event_mmap_match(struct perf_event *event, | 3499 | static int perf_event_mmap_match(struct perf_event *event, |
| 3501 | struct perf_mmap_event *mmap_event) | 3500 | struct perf_mmap_event *mmap_event) |
| 3502 | { | 3501 | { |
| 3503 | if (event->state != PERF_EVENT_STATE_ACTIVE) | 3502 | if (event->state < PERF_EVENT_STATE_INACTIVE) |
| 3504 | return 0; | 3503 | return 0; |
| 3505 | 3504 | ||
| 3506 | if (event->cpu != -1 && event->cpu != smp_processor_id()) | 3505 | if (event->cpu != -1 && event->cpu != smp_processor_id()) |
| @@ -4580,7 +4579,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, | |||
| 4580 | if (attr->type >= PERF_TYPE_MAX) | 4579 | if (attr->type >= PERF_TYPE_MAX) |
| 4581 | return -EINVAL; | 4580 | return -EINVAL; |
| 4582 | 4581 | ||
| 4583 | if (attr->__reserved_1 || attr->__reserved_2) | 4582 | if (attr->__reserved_1) |
| 4584 | return -EINVAL; | 4583 | return -EINVAL; |
| 4585 | 4584 | ||
| 4586 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) | 4585 | if (attr->sample_type & ~(PERF_SAMPLE_MAX-1)) |
diff --git a/kernel/softirq.c b/kernel/softirq.c index a09502e2ef75..7c1a67ef0274 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -500,22 +500,17 @@ EXPORT_SYMBOL(tasklet_kill); | |||
| 500 | */ | 500 | */ |
| 501 | 501 | ||
| 502 | /* | 502 | /* |
| 503 | * The trampoline is called when the hrtimer expires. If this is | 503 | * The trampoline is called when the hrtimer expires. It schedules a tasklet |
| 504 | * called from the hrtimer interrupt then we schedule the tasklet as | 504 | * to run __tasklet_hrtimer_trampoline() which in turn will call the intended |
| 505 | * the timer callback function expects to run in softirq context. If | 505 | * hrtimer callback, but from softirq context. |
| 506 | * it's called in softirq context anyway (i.e. high resolution timers | ||
| 507 | * disabled) then the hrtimer callback is called right away. | ||
| 508 | */ | 506 | */ |
| 509 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) | 507 | static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) |
| 510 | { | 508 | { |
| 511 | struct tasklet_hrtimer *ttimer = | 509 | struct tasklet_hrtimer *ttimer = |
| 512 | container_of(timer, struct tasklet_hrtimer, timer); | 510 | container_of(timer, struct tasklet_hrtimer, timer); |
| 513 | 511 | ||
| 514 | if (hrtimer_is_hres_active(timer)) { | 512 | tasklet_hi_schedule(&ttimer->tasklet); |
| 515 | tasklet_hi_schedule(&ttimer->tasklet); | 513 | return HRTIMER_NORESTART; |
| 516 | return HRTIMER_NORESTART; | ||
| 517 | } | ||
| 518 | return ttimer->function(timer); | ||
| 519 | } | 514 | } |
| 520 | 515 | ||
| 521 | /* | 516 | /* |
diff --git a/kernel/sys.c b/kernel/sys.c index 26a6b73a6b85..18bde979f346 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -222,6 +222,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) | |||
| 222 | if (which > PRIO_USER || which < PRIO_PROCESS) | 222 | if (which > PRIO_USER || which < PRIO_PROCESS) |
| 223 | return -EINVAL; | 223 | return -EINVAL; |
| 224 | 224 | ||
| 225 | rcu_read_lock(); | ||
| 225 | read_lock(&tasklist_lock); | 226 | read_lock(&tasklist_lock); |
| 226 | switch (which) { | 227 | switch (which) { |
| 227 | case PRIO_PROCESS: | 228 | case PRIO_PROCESS: |
| @@ -267,6 +268,7 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who) | |||
| 267 | } | 268 | } |
| 268 | out_unlock: | 269 | out_unlock: |
| 269 | read_unlock(&tasklist_lock); | 270 | read_unlock(&tasklist_lock); |
| 271 | rcu_read_unlock(); | ||
| 270 | 272 | ||
| 271 | return retval; | 273 | return retval; |
| 272 | } | 274 | } |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 6ea90c0e2c96..50b1b8239806 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -689,7 +689,7 @@ static int create_trace_probe(int argc, char **argv) | |||
| 689 | return -EINVAL; | 689 | return -EINVAL; |
| 690 | } | 690 | } |
| 691 | /* an address specified */ | 691 | /* an address specified */ |
| 692 | ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); | 692 | ret = strict_strtoul(&argv[1][0], 0, (unsigned long *)&addr); |
| 693 | if (ret) { | 693 | if (ret) { |
| 694 | pr_info("Failed to parse address.\n"); | 694 | pr_info("Failed to parse address.\n"); |
| 695 | return ret; | 695 | return ret; |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 678a5120ee30..f4bc9b27de5f 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -157,6 +157,7 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 157 | unsigned long val, flags; | 157 | unsigned long val, flags; |
| 158 | char buf[64]; | 158 | char buf[64]; |
| 159 | int ret; | 159 | int ret; |
| 160 | int cpu; | ||
| 160 | 161 | ||
| 161 | if (count >= sizeof(buf)) | 162 | if (count >= sizeof(buf)) |
| 162 | return -EINVAL; | 163 | return -EINVAL; |
| @@ -171,9 +172,20 @@ stack_max_size_write(struct file *filp, const char __user *ubuf, | |||
| 171 | return ret; | 172 | return ret; |
| 172 | 173 | ||
| 173 | local_irq_save(flags); | 174 | local_irq_save(flags); |
| 175 | |||
| 176 | /* | ||
| 177 | * In case we trace inside arch_spin_lock() or after (NMI), | ||
| 178 | * we will cause circular lock, so we also need to increase | ||
| 179 | * the percpu trace_active here. | ||
| 180 | */ | ||
| 181 | cpu = smp_processor_id(); | ||
| 182 | per_cpu(trace_active, cpu)++; | ||
| 183 | |||
| 174 | arch_spin_lock(&max_stack_lock); | 184 | arch_spin_lock(&max_stack_lock); |
| 175 | *ptr = val; | 185 | *ptr = val; |
| 176 | arch_spin_unlock(&max_stack_lock); | 186 | arch_spin_unlock(&max_stack_lock); |
| 187 | |||
| 188 | per_cpu(trace_active, cpu)--; | ||
| 177 | local_irq_restore(flags); | 189 | local_irq_restore(flags); |
| 178 | 190 | ||
| 179 | return count; | 191 | return count; |
| @@ -206,7 +218,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
| 206 | 218 | ||
| 207 | static void *t_start(struct seq_file *m, loff_t *pos) | 219 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 208 | { | 220 | { |
| 221 | int cpu; | ||
| 222 | |||
| 209 | local_irq_disable(); | 223 | local_irq_disable(); |
| 224 | |||
| 225 | cpu = smp_processor_id(); | ||
| 226 | per_cpu(trace_active, cpu)++; | ||
| 227 | |||
| 210 | arch_spin_lock(&max_stack_lock); | 228 | arch_spin_lock(&max_stack_lock); |
| 211 | 229 | ||
| 212 | if (*pos == 0) | 230 | if (*pos == 0) |
| @@ -217,7 +235,13 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 217 | 235 | ||
| 218 | static void t_stop(struct seq_file *m, void *p) | 236 | static void t_stop(struct seq_file *m, void *p) |
| 219 | { | 237 | { |
| 238 | int cpu; | ||
| 239 | |||
| 220 | arch_spin_unlock(&max_stack_lock); | 240 | arch_spin_unlock(&max_stack_lock); |
| 241 | |||
| 242 | cpu = smp_processor_id(); | ||
| 243 | per_cpu(trace_active, cpu)--; | ||
| 244 | |||
| 221 | local_irq_enable(); | 245 | local_irq_enable(); |
| 222 | } | 246 | } |
| 223 | 247 | ||
