aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2013-08-27 04:23:04 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2013-08-29 14:10:02 -0400
commit03b6ea9b91e0914caa847a1ade759af549555298 (patch)
treec06dc11a4e6d2f95c51dd9f1160d392cbef45a82 /tools
parent314add6b1f045b59ca39683bd0cbc5310cd203f2 (diff)
perf evsel: Tidy up sample parsing overflow checking
The size of data retrieved from a sample event must be validated to ensure it does not go past the end of the event. That was being done sporadically and without considering integer overflows. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Acked-by: Jiri Olsa <jolsa@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Namhyung Kim <namhyung@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lkml.kernel.org/r/1377591794-30553-3-git-send-email-adrian.hunter@intel.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/util/evsel.c112
1 files changed, 71 insertions, 41 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 47cbe1e58b73..9a5fb23ff8e2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1131,24 +1131,30 @@ static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1131 return 0; 1131 return 0;
1132} 1132}
1133 1133
1134static bool sample_overlap(const union perf_event *event, 1134static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1135 const void *offset, u64 size) 1135 u64 size)
1136{ 1136{
1137 const void *base = event; 1137 return size > max_size || offset + size > endp;
1138}
1138 1139
1139 if (offset + size > base + event->header.size) 1140#define OVERFLOW_CHECK(offset, size, max_size) \
1140 return true; 1141 do { \
1142 if (overflow(endp, (max_size), (offset), (size))) \
1143 return -EFAULT; \
1144 } while (0)
1141 1145
1142 return false; 1146#define OVERFLOW_CHECK_u64(offset) \
1143} 1147 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
1144 1148
1145int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event, 1149int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1146 struct perf_sample *data) 1150 struct perf_sample *data)
1147{ 1151{
1148 u64 type = evsel->attr.sample_type; 1152 u64 type = evsel->attr.sample_type;
1149 u64 regs_user = evsel->attr.sample_regs_user;
1150 bool swapped = evsel->needs_swap; 1153 bool swapped = evsel->needs_swap;
1151 const u64 *array; 1154 const u64 *array;
1155 u16 max_size = event->header.size;
1156 const void *endp = (void *)event + max_size;
1157 u64 sz;
1152 1158
1153 /* 1159 /*
1154 * used for cross-endian analysis. See git commit 65014ab3 1160 * used for cross-endian analysis. See git commit 65014ab3
@@ -1170,6 +1176,11 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1170 1176
1171 array = event->sample.array; 1177 array = event->sample.array;
1172 1178
1179 /*
1180 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1181 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1182 * check the format does not go past the end of the event.
1183 */
1173 if (evsel->sample_size + sizeof(event->header) > event->header.size) 1184 if (evsel->sample_size + sizeof(event->header) > event->header.size)
1174 return -EFAULT; 1185 return -EFAULT;
1175 1186
@@ -1235,6 +1246,7 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1235 if (type & PERF_SAMPLE_READ) { 1246 if (type & PERF_SAMPLE_READ) {
1236 u64 read_format = evsel->attr.read_format; 1247 u64 read_format = evsel->attr.read_format;
1237 1248
1249 OVERFLOW_CHECK_u64(array);
1238 if (read_format & PERF_FORMAT_GROUP) 1250 if (read_format & PERF_FORMAT_GROUP)
1239 data->read.group.nr = *array; 1251 data->read.group.nr = *array;
1240 else 1252 else
@@ -1243,41 +1255,51 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1243 array++; 1255 array++;
1244 1256
1245 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { 1257 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1258 OVERFLOW_CHECK_u64(array);
1246 data->read.time_enabled = *array; 1259 data->read.time_enabled = *array;
1247 array++; 1260 array++;
1248 } 1261 }
1249 1262
1250 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { 1263 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1264 OVERFLOW_CHECK_u64(array);
1251 data->read.time_running = *array; 1265 data->read.time_running = *array;
1252 array++; 1266 array++;
1253 } 1267 }
1254 1268
1255 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */ 1269 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1256 if (read_format & PERF_FORMAT_GROUP) { 1270 if (read_format & PERF_FORMAT_GROUP) {
1257 data->read.group.values = (struct sample_read_value *) array; 1271 const u64 max_group_nr = UINT64_MAX /
1258 array = (void *) array + data->read.group.nr * 1272 sizeof(struct sample_read_value);
1259 sizeof(struct sample_read_value); 1273
1274 if (data->read.group.nr > max_group_nr)
1275 return -EFAULT;
1276 sz = data->read.group.nr *
1277 sizeof(struct sample_read_value);
1278 OVERFLOW_CHECK(array, sz, max_size);
1279 data->read.group.values =
1280 (struct sample_read_value *)array;
1281 array = (void *)array + sz;
1260 } else { 1282 } else {
1283 OVERFLOW_CHECK_u64(array);
1261 data->read.one.id = *array; 1284 data->read.one.id = *array;
1262 array++; 1285 array++;
1263 } 1286 }
1264 } 1287 }
1265 1288
1266 if (type & PERF_SAMPLE_CALLCHAIN) { 1289 if (type & PERF_SAMPLE_CALLCHAIN) {
1267 if (sample_overlap(event, array, sizeof(data->callchain->nr))) 1290 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
1268 return -EFAULT;
1269
1270 data->callchain = (struct ip_callchain *)array;
1271 1291
1272 if (sample_overlap(event, array, data->callchain->nr)) 1292 OVERFLOW_CHECK_u64(array);
1293 data->callchain = (struct ip_callchain *)array++;
1294 if (data->callchain->nr > max_callchain_nr)
1273 return -EFAULT; 1295 return -EFAULT;
1274 1296 sz = data->callchain->nr * sizeof(u64);
1275 array += 1 + data->callchain->nr; 1297 OVERFLOW_CHECK(array, sz, max_size);
1298 array = (void *)array + sz;
1276 } 1299 }
1277 1300
1278 if (type & PERF_SAMPLE_RAW) { 1301 if (type & PERF_SAMPLE_RAW) {
1279 const u64 *pdata; 1302 OVERFLOW_CHECK_u64(array);
1280
1281 u.val64 = *array; 1303 u.val64 = *array;
1282 if (WARN_ONCE(swapped, 1304 if (WARN_ONCE(swapped,
1283 "Endianness of raw data not corrected!\n")) { 1305 "Endianness of raw data not corrected!\n")) {
@@ -1286,65 +1308,73 @@ int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
1286 u.val32[0] = bswap_32(u.val32[0]); 1308 u.val32[0] = bswap_32(u.val32[0]);
1287 u.val32[1] = bswap_32(u.val32[1]); 1309 u.val32[1] = bswap_32(u.val32[1]);
1288 } 1310 }
1289
1290 if (sample_overlap(event, array, sizeof(u32)))
1291 return -EFAULT;
1292
1293 data->raw_size = u.val32[0]; 1311 data->raw_size = u.val32[0];
1294 pdata = (void *) array + sizeof(u32); 1312 array = (void *)array + sizeof(u32);
1295
1296 if (sample_overlap(event, pdata, data->raw_size))
1297 return -EFAULT;
1298 1313
1299 data->raw_data = (void *) pdata; 1314 OVERFLOW_CHECK(array, data->raw_size, max_size);
1300 1315 data->raw_data = (void *)array;
1301 array = (void *)array + data->raw_size + sizeof(u32); 1316 array = (void *)array + data->raw_size;
1302 } 1317 }
1303 1318
1304 if (type & PERF_SAMPLE_BRANCH_STACK) { 1319 if (type & PERF_SAMPLE_BRANCH_STACK) {
1305 u64 sz; 1320 const u64 max_branch_nr = UINT64_MAX /
1321 sizeof(struct branch_entry);
1306 1322
1307 data->branch_stack = (struct branch_stack *)array; 1323 OVERFLOW_CHECK_u64(array);
1308 array++; /* nr */ 1324 data->branch_stack = (struct branch_stack *)array++;
1309 1325
1326 if (data->branch_stack->nr > max_branch_nr)
1327 return -EFAULT;
1310 sz = data->branch_stack->nr * sizeof(struct branch_entry); 1328 sz = data->branch_stack->nr * sizeof(struct branch_entry);
1311 sz /= sizeof(u64); 1329 OVERFLOW_CHECK(array, sz, max_size);
1312 array += sz; 1330 array = (void *)array + sz;
1313 } 1331 }
1314 1332
1315 if (type & PERF_SAMPLE_REGS_USER) { 1333 if (type & PERF_SAMPLE_REGS_USER) {
1334 u64 avail;
1335
1316 /* First u64 tells us if we have any regs in sample. */ 1336 /* First u64 tells us if we have any regs in sample. */
1317 u64 avail = *array++; 1337 OVERFLOW_CHECK_u64(array);
1338 avail = *array++;
1318 1339
1319 if (avail) { 1340 if (avail) {
1341 u64 regs_user = evsel->attr.sample_regs_user;
1342
1343 sz = hweight_long(regs_user) * sizeof(u64);
1344 OVERFLOW_CHECK(array, sz, max_size);
1320 data->user_regs.regs = (u64 *)array; 1345 data->user_regs.regs = (u64 *)array;
1321 array += hweight_long(regs_user); 1346 array = (void *)array + sz;
1322 } 1347 }
1323 } 1348 }
1324 1349
1325 if (type & PERF_SAMPLE_STACK_USER) { 1350 if (type & PERF_SAMPLE_STACK_USER) {
1326 u64 size = *array++; 1351 OVERFLOW_CHECK_u64(array);
1352 sz = *array++;
1327 1353
1328 data->user_stack.offset = ((char *)(array - 1) 1354 data->user_stack.offset = ((char *)(array - 1)
1329 - (char *) event); 1355 - (char *) event);
1330 1356
1331 if (!size) { 1357 if (!sz) {
1332 data->user_stack.size = 0; 1358 data->user_stack.size = 0;
1333 } else { 1359 } else {
1360 OVERFLOW_CHECK(array, sz, max_size);
1334 data->user_stack.data = (char *)array; 1361 data->user_stack.data = (char *)array;
1335 array += size / sizeof(*array); 1362 array = (void *)array + sz;
1363 OVERFLOW_CHECK_u64(array);
1336 data->user_stack.size = *array++; 1364 data->user_stack.size = *array++;
1337 } 1365 }
1338 } 1366 }
1339 1367
1340 data->weight = 0; 1368 data->weight = 0;
1341 if (type & PERF_SAMPLE_WEIGHT) { 1369 if (type & PERF_SAMPLE_WEIGHT) {
1370 OVERFLOW_CHECK_u64(array);
1342 data->weight = *array; 1371 data->weight = *array;
1343 array++; 1372 array++;
1344 } 1373 }
1345 1374
1346 data->data_src = PERF_MEM_DATA_SRC_NONE; 1375 data->data_src = PERF_MEM_DATA_SRC_NONE;
1347 if (type & PERF_SAMPLE_DATA_SRC) { 1376 if (type & PERF_SAMPLE_DATA_SRC) {
1377 OVERFLOW_CHECK_u64(array);
1348 data->data_src = *array; 1378 data->data_src = *array;
1349 array++; 1379 array++;
1350 } 1380 }