diff options
Diffstat (limited to 'kernel/perf_event.c')
| -rw-r--r-- | kernel/perf_event.c | 23 |
1 files changed, 12 insertions, 11 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 05ebe841270b..84522c796987 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -2228,14 +2228,11 @@ find_get_context(struct pmu *pmu, struct task_struct *task, int cpu) | |||
| 2228 | unsigned long flags; | 2228 | unsigned long flags; |
| 2229 | int ctxn, err; | 2229 | int ctxn, err; |
| 2230 | 2230 | ||
| 2231 | if (!task && cpu != -1) { | 2231 | if (!task) { |
| 2232 | /* Must be root to operate on a CPU event: */ | 2232 | /* Must be root to operate on a CPU event: */ |
| 2233 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) | 2233 | if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) |
| 2234 | return ERR_PTR(-EACCES); | 2234 | return ERR_PTR(-EACCES); |
| 2235 | 2235 | ||
| 2236 | if (cpu < 0 || cpu >= nr_cpumask_bits) | ||
| 2237 | return ERR_PTR(-EINVAL); | ||
| 2238 | |||
| 2239 | /* | 2236 | /* |
| 2240 | * We could be clever and allow to attach a event to an | 2237 | * We could be clever and allow to attach a event to an |
| 2241 | * offline CPU and activate it when the CPU comes up, but | 2238 | * offline CPU and activate it when the CPU comes up, but |
| @@ -5541,6 +5538,11 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 5541 | struct hw_perf_event *hwc; | 5538 | struct hw_perf_event *hwc; |
| 5542 | long err; | 5539 | long err; |
| 5543 | 5540 | ||
| 5541 | if ((unsigned)cpu >= nr_cpu_ids) { | ||
| 5542 | if (!task || cpu != -1) | ||
| 5543 | return ERR_PTR(-EINVAL); | ||
| 5544 | } | ||
| 5545 | |||
| 5544 | event = kzalloc(sizeof(*event), GFP_KERNEL); | 5546 | event = kzalloc(sizeof(*event), GFP_KERNEL); |
| 5545 | if (!event) | 5547 | if (!event) |
| 5546 | return ERR_PTR(-ENOMEM); | 5548 | return ERR_PTR(-ENOMEM); |
| @@ -5589,7 +5591,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
| 5589 | 5591 | ||
| 5590 | if (!overflow_handler && parent_event) | 5592 | if (!overflow_handler && parent_event) |
| 5591 | overflow_handler = parent_event->overflow_handler; | 5593 | overflow_handler = parent_event->overflow_handler; |
| 5592 | 5594 | ||
| 5593 | event->overflow_handler = overflow_handler; | 5595 | event->overflow_handler = overflow_handler; |
| 5594 | 5596 | ||
| 5595 | if (attr->disabled) | 5597 | if (attr->disabled) |
| @@ -6494,7 +6496,6 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6494 | 6496 | ||
| 6495 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); | 6497 | raw_spin_lock_irqsave(&parent_ctx->lock, flags); |
| 6496 | parent_ctx->rotate_disable = 0; | 6498 | parent_ctx->rotate_disable = 0; |
| 6497 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6498 | 6499 | ||
| 6499 | child_ctx = child->perf_event_ctxp[ctxn]; | 6500 | child_ctx = child->perf_event_ctxp[ctxn]; |
| 6500 | 6501 | ||
| @@ -6502,12 +6503,11 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6502 | /* | 6503 | /* |
| 6503 | * Mark the child context as a clone of the parent | 6504 | * Mark the child context as a clone of the parent |
| 6504 | * context, or of whatever the parent is a clone of. | 6505 | * context, or of whatever the parent is a clone of. |
| 6505 | * Note that if the parent is a clone, it could get | 6506 | * |
| 6506 | * uncloned at any point, but that doesn't matter | 6507 | * Note that if the parent is a clone, the holding of |
| 6507 | * because the list of events and the generation | 6508 | * parent_ctx->lock avoids it from being uncloned. |
| 6508 | * count can't have changed since we took the mutex. | ||
| 6509 | */ | 6509 | */ |
| 6510 | cloned_ctx = rcu_dereference(parent_ctx->parent_ctx); | 6510 | cloned_ctx = parent_ctx->parent_ctx; |
| 6511 | if (cloned_ctx) { | 6511 | if (cloned_ctx) { |
| 6512 | child_ctx->parent_ctx = cloned_ctx; | 6512 | child_ctx->parent_ctx = cloned_ctx; |
| 6513 | child_ctx->parent_gen = parent_ctx->parent_gen; | 6513 | child_ctx->parent_gen = parent_ctx->parent_gen; |
| @@ -6518,6 +6518,7 @@ int perf_event_init_context(struct task_struct *child, int ctxn) | |||
| 6518 | get_ctx(child_ctx->parent_ctx); | 6518 | get_ctx(child_ctx->parent_ctx); |
| 6519 | } | 6519 | } |
| 6520 | 6520 | ||
| 6521 | raw_spin_unlock_irqrestore(&parent_ctx->lock, flags); | ||
| 6521 | mutex_unlock(&parent_ctx->mutex); | 6522 | mutex_unlock(&parent_ctx->mutex); |
| 6522 | 6523 | ||
| 6523 | perf_unpin_context(parent_ctx); | 6524 | perf_unpin_context(parent_ctx); |
