diff options
author | Peter Zijlstra <peterz@infradead.org> | 2015-09-09 13:06:33 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-09-18 03:20:25 -0400 |
commit | a723968c0ed36db676478c3d26078f13484fe01c (patch) | |
tree | 291202901e28ed77ac9226d6fb2d6b0178a8497c | |
parent | f55fc2a57cc9ca3b1bb4fb8eb25b6e1989e5b993 (diff) |
perf: Fix u16 overflows
Vince reported that its possible to overflow the various size fields
and get weird stuff if you stick too many events in a group.
Put a lid on this by requiring the fixed record size not exceed 16k.
This is still a fair amount of events (silly amount really) and leaves
plenty room for callchains and stack dwarves while also avoiding
overflowing the u16 variables.
Reported-by: Vince Weaver <vincent.weaver@maine.edu>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/events/core.c | 50 |
1 files changed, 40 insertions, 10 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 39679f749500..dbb5329b6a3a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1243,11 +1243,7 @@ static inline void perf_event__state_init(struct perf_event *event) | |||
1243 | PERF_EVENT_STATE_INACTIVE; | 1243 | PERF_EVENT_STATE_INACTIVE; |
1244 | } | 1244 | } |
1245 | 1245 | ||
1246 | /* | 1246 | static void __perf_event_read_size(struct perf_event *event, int nr_siblings) |
1247 | * Called at perf_event creation and when events are attached/detached from a | ||
1248 | * group. | ||
1249 | */ | ||
1250 | static void perf_event__read_size(struct perf_event *event) | ||
1251 | { | 1247 | { |
1252 | int entry = sizeof(u64); /* value */ | 1248 | int entry = sizeof(u64); /* value */ |
1253 | int size = 0; | 1249 | int size = 0; |
@@ -1263,7 +1259,7 @@ static void perf_event__read_size(struct perf_event *event) | |||
1263 | entry += sizeof(u64); | 1259 | entry += sizeof(u64); |
1264 | 1260 | ||
1265 | if (event->attr.read_format & PERF_FORMAT_GROUP) { | 1261 | if (event->attr.read_format & PERF_FORMAT_GROUP) { |
1266 | nr += event->group_leader->nr_siblings; | 1262 | nr += nr_siblings; |
1267 | size += sizeof(u64); | 1263 | size += sizeof(u64); |
1268 | } | 1264 | } |
1269 | 1265 | ||
@@ -1271,14 +1267,11 @@ static void perf_event__read_size(struct perf_event *event) | |||
1271 | event->read_size = size; | 1267 | event->read_size = size; |
1272 | } | 1268 | } |
1273 | 1269 | ||
1274 | static void perf_event__header_size(struct perf_event *event) | 1270 | static void __perf_event_header_size(struct perf_event *event, u64 sample_type) |
1275 | { | 1271 | { |
1276 | struct perf_sample_data *data; | 1272 | struct perf_sample_data *data; |
1277 | u64 sample_type = event->attr.sample_type; | ||
1278 | u16 size = 0; | 1273 | u16 size = 0; |
1279 | 1274 | ||
1280 | perf_event__read_size(event); | ||
1281 | |||
1282 | if (sample_type & PERF_SAMPLE_IP) | 1275 | if (sample_type & PERF_SAMPLE_IP) |
1283 | size += sizeof(data->ip); | 1276 | size += sizeof(data->ip); |
1284 | 1277 | ||
@@ -1303,6 +1296,17 @@ static void perf_event__header_size(struct perf_event *event) | |||
1303 | event->header_size = size; | 1296 | event->header_size = size; |
1304 | } | 1297 | } |
1305 | 1298 | ||
1299 | /* | ||
1300 | * Called at perf_event creation and when events are attached/detached from a | ||
1301 | * group. | ||
1302 | */ | ||
1303 | static void perf_event__header_size(struct perf_event *event) | ||
1304 | { | ||
1305 | __perf_event_read_size(event, | ||
1306 | event->group_leader->nr_siblings); | ||
1307 | __perf_event_header_size(event, event->attr.sample_type); | ||
1308 | } | ||
1309 | |||
1306 | static void perf_event__id_header_size(struct perf_event *event) | 1310 | static void perf_event__id_header_size(struct perf_event *event) |
1307 | { | 1311 | { |
1308 | struct perf_sample_data *data; | 1312 | struct perf_sample_data *data; |
@@ -1330,6 +1334,27 @@ static void perf_event__id_header_size(struct perf_event *event) | |||
1330 | event->id_header_size = size; | 1334 | event->id_header_size = size; |
1331 | } | 1335 | } |
1332 | 1336 | ||
1337 | static bool perf_event_validate_size(struct perf_event *event) | ||
1338 | { | ||
1339 | /* | ||
1340 | * The values computed here will be over-written when we actually | ||
1341 | * attach the event. | ||
1342 | */ | ||
1343 | __perf_event_read_size(event, event->group_leader->nr_siblings + 1); | ||
1344 | __perf_event_header_size(event, event->attr.sample_type & ~PERF_SAMPLE_READ); | ||
1345 | perf_event__id_header_size(event); | ||
1346 | |||
1347 | /* | ||
1348 | * Sum the lot; should not exceed the 64k limit we have on records. | ||
1349 | * Conservative limit to allow for callchains and other variable fields. | ||
1350 | */ | ||
1351 | if (event->read_size + event->header_size + | ||
1352 | event->id_header_size + sizeof(struct perf_event_header) >= 16*1024) | ||
1353 | return false; | ||
1354 | |||
1355 | return true; | ||
1356 | } | ||
1357 | |||
1333 | static void perf_group_attach(struct perf_event *event) | 1358 | static void perf_group_attach(struct perf_event *event) |
1334 | { | 1359 | { |
1335 | struct perf_event *group_leader = event->group_leader, *pos; | 1360 | struct perf_event *group_leader = event->group_leader, *pos; |
@@ -8302,6 +8327,11 @@ SYSCALL_DEFINE5(perf_event_open, | |||
8302 | mutex_lock(&ctx->mutex); | 8327 | mutex_lock(&ctx->mutex); |
8303 | } | 8328 | } |
8304 | 8329 | ||
8330 | if (!perf_event_validate_size(event)) { | ||
8331 | err = -E2BIG; | ||
8332 | goto err_locked; | ||
8333 | } | ||
8334 | |||
8305 | /* | 8335 | /* |
8306 | * Must be under the same ctx::mutex as perf_install_in_context(), | 8336 | * Must be under the same ctx::mutex as perf_install_in_context(), |
8307 | * because we need to serialize with concurrent event creation. | 8337 | * because we need to serialize with concurrent event creation. |