aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c6
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c2
-rw-r--r--arch/x86/tools/insn_sanity.c10
-rw-r--r--kernel/events/core.c20
4 files changed, 32 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 93b9e1181f83..4914e94ad6e8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2019,7 +2019,10 @@ __init int intel_pmu_init(void)
2019 break; 2019 break;
2020 2020
2021 case 28: /* Atom */ 2021 case 28: /* Atom */
2022 case 54: /* Cedariew */ 2022 case 38: /* Lincroft */
2023 case 39: /* Penwell */
2024 case 53: /* Cloverview */
2025 case 54: /* Cedarview */
2023 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids, 2026 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
2024 sizeof(hw_cache_event_ids)); 2027 sizeof(hw_cache_event_ids));
2025 2028
@@ -2084,6 +2087,7 @@ __init int intel_pmu_init(void)
2084 pr_cont("SandyBridge events, "); 2087 pr_cont("SandyBridge events, ");
2085 break; 2088 break;
2086 case 58: /* IvyBridge */ 2089 case 58: /* IvyBridge */
2090 case 62: /* IvyBridge EP */
2087 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 2091 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
2088 sizeof(hw_cache_event_ids)); 2092 sizeof(hw_cache_event_ids));
2089 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, 2093 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index f2af39f5dc3d..4820c232a0b9 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -19,7 +19,7 @@ static const u64 p6_perfmon_event_map[] =
19 19
20}; 20};
21 21
22static __initconst u64 p6_hw_cache_event_ids 22static u64 p6_hw_cache_event_ids
23 [PERF_COUNT_HW_CACHE_MAX] 23 [PERF_COUNT_HW_CACHE_MAX]
24 [PERF_COUNT_HW_CACHE_OP_MAX] 24 [PERF_COUNT_HW_CACHE_OP_MAX]
25 [PERF_COUNT_HW_CACHE_RESULT_MAX] = 25 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
diff --git a/arch/x86/tools/insn_sanity.c b/arch/x86/tools/insn_sanity.c
index cc2f8c131286..872eb60e7806 100644
--- a/arch/x86/tools/insn_sanity.c
+++ b/arch/x86/tools/insn_sanity.c
@@ -55,7 +55,7 @@ static FILE *input_file; /* Input file name */
55static void usage(const char *err) 55static void usage(const char *err)
56{ 56{
57 if (err) 57 if (err)
58 fprintf(stderr, "Error: %s\n\n", err); 58 fprintf(stderr, "%s: Error: %s\n\n", prog, err);
59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog); 59 fprintf(stderr, "Usage: %s [-y|-n|-v] [-s seed[,no]] [-m max] [-i input]\n", prog);
60 fprintf(stderr, "\t-y 64bit mode\n"); 60 fprintf(stderr, "\t-y 64bit mode\n");
61 fprintf(stderr, "\t-n 32bit mode\n"); 61 fprintf(stderr, "\t-n 32bit mode\n");
@@ -269,7 +269,13 @@ int main(int argc, char **argv)
269 insns++; 269 insns++;
270 } 270 }
271 271
272 fprintf(stdout, "%s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n", (errors) ? "Failure" : "Success", insns, (input_file) ? "given" : "random", errors, seed); 272 fprintf(stdout, "%s: %s: decoded and checked %d %s instructions with %d errors (seed:0x%x)\n",
273 prog,
274 (errors) ? "Failure" : "Success",
275 insns,
276 (input_file) ? "given" : "random",
277 errors,
278 seed);
273 279
274 return errors ? 1 : 0; 280 return errors ? 1 : 0;
275} 281}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 301079d06f24..7b6646a8c067 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -908,6 +908,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
908} 908}
909 909
910/* 910/*
911 * Initialize event state based on the perf_event_attr::disabled.
912 */
913static inline void perf_event__state_init(struct perf_event *event)
914{
915 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
916 PERF_EVENT_STATE_INACTIVE;
917}
918
919/*
911 * Called at perf_event creation and when events are attached/detached from a 920 * Called at perf_event creation and when events are attached/detached from a
912 * group. 921 * group.
913 */ 922 */
@@ -6179,8 +6188,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6179 event->overflow_handler = overflow_handler; 6188 event->overflow_handler = overflow_handler;
6180 event->overflow_handler_context = context; 6189 event->overflow_handler_context = context;
6181 6190
6182 if (attr->disabled) 6191 perf_event__state_init(event);
6183 event->state = PERF_EVENT_STATE_OFF;
6184 6192
6185 pmu = NULL; 6193 pmu = NULL;
6186 6194
@@ -6609,9 +6617,17 @@ SYSCALL_DEFINE5(perf_event_open,
6609 6617
6610 mutex_lock(&gctx->mutex); 6618 mutex_lock(&gctx->mutex);
6611 perf_remove_from_context(group_leader); 6619 perf_remove_from_context(group_leader);
6620
6621 /*
6622 * Removing from the context ends up with disabled
6623 * event. What we want here is event in the initial
6624 * startup state, ready to be add into new context.
6625 */
6626 perf_event__state_init(group_leader);
6612 list_for_each_entry(sibling, &group_leader->sibling_list, 6627 list_for_each_entry(sibling, &group_leader->sibling_list,
6613 group_entry) { 6628 group_entry) {
6614 perf_remove_from_context(sibling); 6629 perf_remove_from_context(sibling);
6630 perf_event__state_init(sibling);
6615 put_ctx(gctx); 6631 put_ctx(gctx);
6616 } 6632 }
6617 mutex_unlock(&gctx->mutex); 6633 mutex_unlock(&gctx->mutex);