diff options
Diffstat (limited to 'kernel/trace')
27 files changed, 1727 insertions, 1565 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 67d6369ddf83..979ccde26720 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
| @@ -55,7 +55,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | |||
| 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o |
| 56 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 56 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
| 57 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 57 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
| 58 | ifeq ($(CONFIG_PM_RUNTIME),y) | 58 | ifeq ($(CONFIG_PM),y) |
| 59 | obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o | 59 | obj-$(CONFIG_TRACEPOINTS) += rpm-traces.o |
| 60 | endif | 60 | endif |
| 61 | ifeq ($(CONFIG_TRACING),y) | 61 | ifeq ($(CONFIG_TRACING),y) |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index c1bd4ada2a04..483cecfa5c17 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -1142,9 +1142,9 @@ static void get_pdu_remap(const struct trace_entry *ent, | |||
| 1142 | r->sector_from = be64_to_cpu(sector_from); | 1142 | r->sector_from = be64_to_cpu(sector_from); |
| 1143 | } | 1143 | } |
| 1144 | 1144 | ||
| 1145 | typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); | 1145 | typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act); |
| 1146 | 1146 | ||
| 1147 | static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | 1147 | static void blk_log_action_classic(struct trace_iterator *iter, const char *act) |
| 1148 | { | 1148 | { |
| 1149 | char rwbs[RWBS_LEN]; | 1149 | char rwbs[RWBS_LEN]; |
| 1150 | unsigned long long ts = iter->ts; | 1150 | unsigned long long ts = iter->ts; |
| @@ -1154,33 +1154,33 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) | |||
| 1154 | 1154 | ||
| 1155 | fill_rwbs(rwbs, t); | 1155 | fill_rwbs(rwbs, t); |
| 1156 | 1156 | ||
| 1157 | return trace_seq_printf(&iter->seq, | 1157 | trace_seq_printf(&iter->seq, |
| 1158 | "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", | 1158 | "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", |
| 1159 | MAJOR(t->device), MINOR(t->device), iter->cpu, | 1159 | MAJOR(t->device), MINOR(t->device), iter->cpu, |
| 1160 | secs, nsec_rem, iter->ent->pid, act, rwbs); | 1160 | secs, nsec_rem, iter->ent->pid, act, rwbs); |
| 1161 | } | 1161 | } |
| 1162 | 1162 | ||
| 1163 | static int blk_log_action(struct trace_iterator *iter, const char *act) | 1163 | static void blk_log_action(struct trace_iterator *iter, const char *act) |
| 1164 | { | 1164 | { |
| 1165 | char rwbs[RWBS_LEN]; | 1165 | char rwbs[RWBS_LEN]; |
| 1166 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); | 1166 | const struct blk_io_trace *t = te_blk_io_trace(iter->ent); |
| 1167 | 1167 | ||
| 1168 | fill_rwbs(rwbs, t); | 1168 | fill_rwbs(rwbs, t); |
| 1169 | return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", | 1169 | trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", |
| 1170 | MAJOR(t->device), MINOR(t->device), act, rwbs); | 1170 | MAJOR(t->device), MINOR(t->device), act, rwbs); |
| 1171 | } | 1171 | } |
| 1172 | 1172 | ||
| 1173 | static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) | 1173 | static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) |
| 1174 | { | 1174 | { |
| 1175 | const unsigned char *pdu_buf; | 1175 | const unsigned char *pdu_buf; |
| 1176 | int pdu_len; | 1176 | int pdu_len; |
| 1177 | int i, end, ret; | 1177 | int i, end; |
| 1178 | 1178 | ||
| 1179 | pdu_buf = pdu_start(ent); | 1179 | pdu_buf = pdu_start(ent); |
| 1180 | pdu_len = te_blk_io_trace(ent)->pdu_len; | 1180 | pdu_len = te_blk_io_trace(ent)->pdu_len; |
| 1181 | 1181 | ||
| 1182 | if (!pdu_len) | 1182 | if (!pdu_len) |
| 1183 | return 1; | 1183 | return; |
| 1184 | 1184 | ||
| 1185 | /* find the last zero that needs to be printed */ | 1185 | /* find the last zero that needs to be printed */ |
| 1186 | for (end = pdu_len - 1; end >= 0; end--) | 1186 | for (end = pdu_len - 1; end >= 0; end--) |
| @@ -1188,119 +1188,107 @@ static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) | |||
| 1188 | break; | 1188 | break; |
| 1189 | end++; | 1189 | end++; |
| 1190 | 1190 | ||
| 1191 | if (!trace_seq_putc(s, '(')) | 1191 | trace_seq_putc(s, '('); |
| 1192 | return 0; | ||
| 1193 | 1192 | ||
| 1194 | for (i = 0; i < pdu_len; i++) { | 1193 | for (i = 0; i < pdu_len; i++) { |
| 1195 | 1194 | ||
| 1196 | ret = trace_seq_printf(s, "%s%02x", | 1195 | trace_seq_printf(s, "%s%02x", |
| 1197 | i == 0 ? "" : " ", pdu_buf[i]); | 1196 | i == 0 ? "" : " ", pdu_buf[i]); |
| 1198 | if (!ret) | ||
| 1199 | return ret; | ||
| 1200 | 1197 | ||
| 1201 | /* | 1198 | /* |
| 1202 | * stop when the rest is just zeroes and indicate so | 1199 | * stop when the rest is just zeroes and indicate so |
| 1203 | * with a ".." appended | 1200 | * with a ".." appended |
| 1204 | */ | 1201 | */ |
| 1205 | if (i == end && end != pdu_len - 1) | 1202 | if (i == end && end != pdu_len - 1) { |
| 1206 | return trace_seq_puts(s, " ..) "); | 1203 | trace_seq_puts(s, " ..) "); |
| 1204 | return; | ||
| 1205 | } | ||
| 1207 | } | 1206 | } |
| 1208 | 1207 | ||
| 1209 | return trace_seq_puts(s, ") "); | 1208 | trace_seq_puts(s, ") "); |
| 1210 | } | 1209 | } |
| 1211 | 1210 | ||
| 1212 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | 1211 | static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) |
| 1213 | { | 1212 | { |
| 1214 | char cmd[TASK_COMM_LEN]; | 1213 | char cmd[TASK_COMM_LEN]; |
| 1215 | 1214 | ||
| 1216 | trace_find_cmdline(ent->pid, cmd); | 1215 | trace_find_cmdline(ent->pid, cmd); |
| 1217 | 1216 | ||
| 1218 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { | 1217 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
| 1219 | int ret; | 1218 | trace_seq_printf(s, "%u ", t_bytes(ent)); |
| 1220 | 1219 | blk_log_dump_pdu(s, ent); | |
| 1221 | ret = trace_seq_printf(s, "%u ", t_bytes(ent)); | 1220 | trace_seq_printf(s, "[%s]\n", cmd); |
| 1222 | if (!ret) | ||
| 1223 | return 0; | ||
| 1224 | ret = blk_log_dump_pdu(s, ent); | ||
| 1225 | if (!ret) | ||
| 1226 | return 0; | ||
| 1227 | return trace_seq_printf(s, "[%s]\n", cmd); | ||
| 1228 | } else { | 1221 | } else { |
| 1229 | if (t_sec(ent)) | 1222 | if (t_sec(ent)) |
| 1230 | return trace_seq_printf(s, "%llu + %u [%s]\n", | 1223 | trace_seq_printf(s, "%llu + %u [%s]\n", |
| 1231 | t_sector(ent), t_sec(ent), cmd); | 1224 | t_sector(ent), t_sec(ent), cmd); |
| 1232 | return trace_seq_printf(s, "[%s]\n", cmd); | 1225 | else |
| 1226 | trace_seq_printf(s, "[%s]\n", cmd); | ||
| 1233 | } | 1227 | } |
| 1234 | } | 1228 | } |
| 1235 | 1229 | ||
| 1236 | static int blk_log_with_error(struct trace_seq *s, | 1230 | static void blk_log_with_error(struct trace_seq *s, |
| 1237 | const struct trace_entry *ent) | 1231 | const struct trace_entry *ent) |
| 1238 | { | 1232 | { |
| 1239 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { | 1233 | if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { |
| 1240 | int ret; | 1234 | blk_log_dump_pdu(s, ent); |
| 1241 | 1235 | trace_seq_printf(s, "[%d]\n", t_error(ent)); | |
| 1242 | ret = blk_log_dump_pdu(s, ent); | ||
| 1243 | if (ret) | ||
| 1244 | return trace_seq_printf(s, "[%d]\n", t_error(ent)); | ||
| 1245 | return 0; | ||
| 1246 | } else { | 1236 | } else { |
| 1247 | if (t_sec(ent)) | 1237 | if (t_sec(ent)) |
| 1248 | return trace_seq_printf(s, "%llu + %u [%d]\n", | 1238 | trace_seq_printf(s, "%llu + %u [%d]\n", |
| 1249 | t_sector(ent), | 1239 | t_sector(ent), |
| 1250 | t_sec(ent), t_error(ent)); | 1240 | t_sec(ent), t_error(ent)); |
| 1251 | return trace_seq_printf(s, "%llu [%d]\n", | 1241 | else |
| 1252 | t_sector(ent), t_error(ent)); | 1242 | trace_seq_printf(s, "%llu [%d]\n", |
| 1243 | t_sector(ent), t_error(ent)); | ||
| 1253 | } | 1244 | } |
| 1254 | } | 1245 | } |
| 1255 | 1246 | ||
| 1256 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | 1247 | static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) |
| 1257 | { | 1248 | { |
| 1258 | struct blk_io_trace_remap r = { .device_from = 0, }; | 1249 | struct blk_io_trace_remap r = { .device_from = 0, }; |
| 1259 | 1250 | ||
| 1260 | get_pdu_remap(ent, &r); | 1251 | get_pdu_remap(ent, &r); |
| 1261 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | 1252 | trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", |
| 1262 | t_sector(ent), t_sec(ent), | 1253 | t_sector(ent), t_sec(ent), |
| 1263 | MAJOR(r.device_from), MINOR(r.device_from), | 1254 | MAJOR(r.device_from), MINOR(r.device_from), |
| 1264 | (unsigned long long)r.sector_from); | 1255 | (unsigned long long)r.sector_from); |
| 1265 | } | 1256 | } |
| 1266 | 1257 | ||
| 1267 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | 1258 | static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) |
| 1268 | { | 1259 | { |
| 1269 | char cmd[TASK_COMM_LEN]; | 1260 | char cmd[TASK_COMM_LEN]; |
| 1270 | 1261 | ||
| 1271 | trace_find_cmdline(ent->pid, cmd); | 1262 | trace_find_cmdline(ent->pid, cmd); |
| 1272 | 1263 | ||
| 1273 | return trace_seq_printf(s, "[%s]\n", cmd); | 1264 | trace_seq_printf(s, "[%s]\n", cmd); |
| 1274 | } | 1265 | } |
| 1275 | 1266 | ||
| 1276 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | 1267 | static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) |
| 1277 | { | 1268 | { |
| 1278 | char cmd[TASK_COMM_LEN]; | 1269 | char cmd[TASK_COMM_LEN]; |
| 1279 | 1270 | ||
| 1280 | trace_find_cmdline(ent->pid, cmd); | 1271 | trace_find_cmdline(ent->pid, cmd); |
| 1281 | 1272 | ||
| 1282 | return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); | 1273 | trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); |
| 1283 | } | 1274 | } |
| 1284 | 1275 | ||
| 1285 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | 1276 | static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent) |
| 1286 | { | 1277 | { |
| 1287 | char cmd[TASK_COMM_LEN]; | 1278 | char cmd[TASK_COMM_LEN]; |
| 1288 | 1279 | ||
| 1289 | trace_find_cmdline(ent->pid, cmd); | 1280 | trace_find_cmdline(ent->pid, cmd); |
| 1290 | 1281 | ||
| 1291 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), | 1282 | trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), |
| 1292 | get_pdu_int(ent), cmd); | 1283 | get_pdu_int(ent), cmd); |
| 1293 | } | 1284 | } |
| 1294 | 1285 | ||
| 1295 | static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) | 1286 | static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) |
| 1296 | { | 1287 | { |
| 1297 | int ret; | ||
| 1298 | const struct blk_io_trace *t = te_blk_io_trace(ent); | 1288 | const struct blk_io_trace *t = te_blk_io_trace(ent); |
| 1299 | 1289 | ||
| 1300 | ret = trace_seq_putmem(s, t + 1, t->pdu_len); | 1290 | trace_seq_putmem(s, t + 1, t->pdu_len); |
| 1301 | if (ret) | 1291 | trace_seq_putc(s, '\n'); |
| 1302 | return trace_seq_putc(s, '\n'); | ||
| 1303 | return ret; | ||
| 1304 | } | 1292 | } |
| 1305 | 1293 | ||
| 1306 | /* | 1294 | /* |
| @@ -1339,7 +1327,7 @@ static void blk_tracer_reset(struct trace_array *tr) | |||
| 1339 | 1327 | ||
| 1340 | static const struct { | 1328 | static const struct { |
| 1341 | const char *act[2]; | 1329 | const char *act[2]; |
| 1342 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | 1330 | void (*print)(struct trace_seq *s, const struct trace_entry *ent); |
| 1343 | } what2act[] = { | 1331 | } what2act[] = { |
| 1344 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | 1332 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, |
| 1345 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | 1333 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, |
| @@ -1364,7 +1352,6 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, | |||
| 1364 | struct trace_seq *s = &iter->seq; | 1352 | struct trace_seq *s = &iter->seq; |
| 1365 | const struct blk_io_trace *t; | 1353 | const struct blk_io_trace *t; |
| 1366 | u16 what; | 1354 | u16 what; |
| 1367 | int ret; | ||
| 1368 | bool long_act; | 1355 | bool long_act; |
| 1369 | blk_log_action_t *log_action; | 1356 | blk_log_action_t *log_action; |
| 1370 | 1357 | ||
| @@ -1374,21 +1361,18 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, | |||
| 1374 | log_action = classic ? &blk_log_action_classic : &blk_log_action; | 1361 | log_action = classic ? &blk_log_action_classic : &blk_log_action; |
| 1375 | 1362 | ||
| 1376 | if (t->action == BLK_TN_MESSAGE) { | 1363 | if (t->action == BLK_TN_MESSAGE) { |
| 1377 | ret = log_action(iter, long_act ? "message" : "m"); | 1364 | log_action(iter, long_act ? "message" : "m"); |
| 1378 | if (ret) | 1365 | blk_log_msg(s, iter->ent); |
| 1379 | ret = blk_log_msg(s, iter->ent); | ||
| 1380 | goto out; | ||
| 1381 | } | 1366 | } |
| 1382 | 1367 | ||
| 1383 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) | 1368 | if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) |
| 1384 | ret = trace_seq_printf(s, "Unknown action %x\n", what); | 1369 | trace_seq_printf(s, "Unknown action %x\n", what); |
| 1385 | else { | 1370 | else { |
| 1386 | ret = log_action(iter, what2act[what].act[long_act]); | 1371 | log_action(iter, what2act[what].act[long_act]); |
| 1387 | if (ret) | 1372 | what2act[what].print(s, iter->ent); |
| 1388 | ret = what2act[what].print(s, iter->ent); | ||
| 1389 | } | 1373 | } |
| 1390 | out: | 1374 | |
| 1391 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 1375 | return trace_handle_return(s); |
| 1392 | } | 1376 | } |
| 1393 | 1377 | ||
| 1394 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, | 1378 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, |
| @@ -1397,7 +1381,7 @@ static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, | |||
| 1397 | return print_one_line(iter, false); | 1381 | return print_one_line(iter, false); |
| 1398 | } | 1382 | } |
| 1399 | 1383 | ||
| 1400 | static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) | 1384 | static void blk_trace_synthesize_old_trace(struct trace_iterator *iter) |
| 1401 | { | 1385 | { |
| 1402 | struct trace_seq *s = &iter->seq; | 1386 | struct trace_seq *s = &iter->seq; |
| 1403 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | 1387 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; |
| @@ -1407,18 +1391,18 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) | |||
| 1407 | .time = iter->ts, | 1391 | .time = iter->ts, |
| 1408 | }; | 1392 | }; |
| 1409 | 1393 | ||
| 1410 | if (!trace_seq_putmem(s, &old, offset)) | 1394 | trace_seq_putmem(s, &old, offset); |
| 1411 | return 0; | 1395 | trace_seq_putmem(s, &t->sector, |
| 1412 | return trace_seq_putmem(s, &t->sector, | 1396 | sizeof(old) - offset + t->pdu_len); |
| 1413 | sizeof(old) - offset + t->pdu_len); | ||
| 1414 | } | 1397 | } |
| 1415 | 1398 | ||
| 1416 | static enum print_line_t | 1399 | static enum print_line_t |
| 1417 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags, | 1400 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags, |
| 1418 | struct trace_event *event) | 1401 | struct trace_event *event) |
| 1419 | { | 1402 | { |
| 1420 | return blk_trace_synthesize_old_trace(iter) ? | 1403 | blk_trace_synthesize_old_trace(iter); |
| 1421 | TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 1404 | |
| 1405 | return trace_handle_return(&iter->seq); | ||
| 1422 | } | 1406 | } |
| 1423 | 1407 | ||
| 1424 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | 1408 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) |
| @@ -1493,9 +1477,6 @@ static int blk_trace_remove_queue(struct request_queue *q) | |||
| 1493 | if (atomic_dec_and_test(&blk_probes_ref)) | 1477 | if (atomic_dec_and_test(&blk_probes_ref)) |
| 1494 | blk_unregister_tracepoints(); | 1478 | blk_unregister_tracepoints(); |
| 1495 | 1479 | ||
| 1496 | spin_lock_irq(&running_trace_lock); | ||
| 1497 | list_del(&bt->running_list); | ||
| 1498 | spin_unlock_irq(&running_trace_lock); | ||
| 1499 | blk_trace_free(bt); | 1480 | blk_trace_free(bt); |
| 1500 | return 0; | 1481 | return 0; |
| 1501 | } | 1482 | } |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 5916a8e59e87..929a733d302e 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; | |||
| 113 | static struct ftrace_ops global_ops; | 113 | static struct ftrace_ops global_ops; |
| 114 | static struct ftrace_ops control_ops; | 114 | static struct ftrace_ops control_ops; |
| 115 | 115 | ||
| 116 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | ||
| 117 | struct ftrace_ops *op, struct pt_regs *regs); | ||
| 118 | |||
| 116 | #if ARCH_SUPPORTS_FTRACE_OPS | 119 | #if ARCH_SUPPORTS_FTRACE_OPS |
| 117 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, | 120 | static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, |
| 118 | struct ftrace_ops *op, struct pt_regs *regs); | 121 | struct ftrace_ops *op, struct pt_regs *regs); |
| @@ -251,18 +254,24 @@ static void update_ftrace_function(void) | |||
| 251 | ftrace_func_t func; | 254 | ftrace_func_t func; |
| 252 | 255 | ||
| 253 | /* | 256 | /* |
| 257 | * Prepare the ftrace_ops that the arch callback will use. | ||
| 258 | * If there's only one ftrace_ops registered, the ftrace_ops_list | ||
| 259 | * will point to the ops we want. | ||
| 260 | */ | ||
| 261 | set_function_trace_op = ftrace_ops_list; | ||
| 262 | |||
| 263 | /* If there's no ftrace_ops registered, just call the stub function */ | ||
| 264 | if (ftrace_ops_list == &ftrace_list_end) { | ||
| 265 | func = ftrace_stub; | ||
| 266 | |||
| 267 | /* | ||
| 254 | * If we are at the end of the list and this ops is | 268 | * If we are at the end of the list and this ops is |
| 255 | * recursion safe and not dynamic and the arch supports passing ops, | 269 | * recursion safe and not dynamic and the arch supports passing ops, |
| 256 | * then have the mcount trampoline call the function directly. | 270 | * then have the mcount trampoline call the function directly. |
| 257 | */ | 271 | */ |
| 258 | if (ftrace_ops_list == &ftrace_list_end || | 272 | } else if (ftrace_ops_list->next == &ftrace_list_end) { |
| 259 | (ftrace_ops_list->next == &ftrace_list_end && | 273 | func = ftrace_ops_get_func(ftrace_ops_list); |
| 260 | !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && | 274 | |
| 261 | (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) && | ||
| 262 | !FTRACE_FORCE_LIST_FUNC)) { | ||
| 263 | /* Set the ftrace_ops that the arch callback uses */ | ||
| 264 | set_function_trace_op = ftrace_ops_list; | ||
| 265 | func = ftrace_ops_list->func; | ||
| 266 | } else { | 275 | } else { |
| 267 | /* Just use the default ftrace_ops */ | 276 | /* Just use the default ftrace_ops */ |
| 268 | set_function_trace_op = &ftrace_list_end; | 277 | set_function_trace_op = &ftrace_list_end; |
| @@ -378,6 +387,8 @@ static int remove_ftrace_list_ops(struct ftrace_ops **list, | |||
| 378 | return ret; | 387 | return ret; |
| 379 | } | 388 | } |
| 380 | 389 | ||
| 390 | static void ftrace_update_trampoline(struct ftrace_ops *ops); | ||
| 391 | |||
| 381 | static int __register_ftrace_function(struct ftrace_ops *ops) | 392 | static int __register_ftrace_function(struct ftrace_ops *ops) |
| 382 | { | 393 | { |
| 383 | if (ops->flags & FTRACE_OPS_FL_DELETED) | 394 | if (ops->flags & FTRACE_OPS_FL_DELETED) |
| @@ -407,9 +418,13 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
| 407 | if (control_ops_alloc(ops)) | 418 | if (control_ops_alloc(ops)) |
| 408 | return -ENOMEM; | 419 | return -ENOMEM; |
| 409 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); | 420 | add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops); |
| 421 | /* The control_ops needs the trampoline update */ | ||
| 422 | ops = &control_ops; | ||
| 410 | } else | 423 | } else |
| 411 | add_ftrace_ops(&ftrace_ops_list, ops); | 424 | add_ftrace_ops(&ftrace_ops_list, ops); |
| 412 | 425 | ||
| 426 | ftrace_update_trampoline(ops); | ||
| 427 | |||
| 413 | if (ftrace_enabled) | 428 | if (ftrace_enabled) |
| 414 | update_ftrace_function(); | 429 | update_ftrace_function(); |
| 415 | 430 | ||
| @@ -556,13 +571,13 @@ static int function_stat_cmp(void *p1, void *p2) | |||
| 556 | static int function_stat_headers(struct seq_file *m) | 571 | static int function_stat_headers(struct seq_file *m) |
| 557 | { | 572 | { |
| 558 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 573 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 559 | seq_printf(m, " Function " | 574 | seq_puts(m, " Function " |
| 560 | "Hit Time Avg s^2\n" | 575 | "Hit Time Avg s^2\n" |
| 561 | " -------- " | 576 | " -------- " |
| 562 | "--- ---- --- ---\n"); | 577 | "--- ---- --- ---\n"); |
| 563 | #else | 578 | #else |
| 564 | seq_printf(m, " Function Hit\n" | 579 | seq_puts(m, " Function Hit\n" |
| 565 | " -------- ---\n"); | 580 | " -------- ---\n"); |
| 566 | #endif | 581 | #endif |
| 567 | return 0; | 582 | return 0; |
| 568 | } | 583 | } |
| @@ -589,7 +604,7 @@ static int function_stat_show(struct seq_file *m, void *v) | |||
| 589 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); | 604 | seq_printf(m, " %-30.30s %10lu", str, rec->counter); |
| 590 | 605 | ||
| 591 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 606 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 592 | seq_printf(m, " "); | 607 | seq_puts(m, " "); |
| 593 | avg = rec->time; | 608 | avg = rec->time; |
| 594 | do_div(avg, rec->counter); | 609 | do_div(avg, rec->counter); |
| 595 | 610 | ||
| @@ -1048,6 +1063,12 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid; | |||
| 1048 | 1063 | ||
| 1049 | static struct ftrace_ops *removed_ops; | 1064 | static struct ftrace_ops *removed_ops; |
| 1050 | 1065 | ||
| 1066 | /* | ||
| 1067 | * Set when doing a global update, like enabling all recs or disabling them. | ||
| 1068 | * It is not set when just updating a single ftrace_ops. | ||
| 1069 | */ | ||
| 1070 | static bool update_all_ops; | ||
| 1071 | |||
| 1051 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 1072 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
| 1052 | # error Dynamic ftrace depends on MCOUNT_RECORD | 1073 | # error Dynamic ftrace depends on MCOUNT_RECORD |
| 1053 | #endif | 1074 | #endif |
| @@ -1096,6 +1117,43 @@ static struct ftrace_ops global_ops = { | |||
| 1096 | FTRACE_OPS_FL_INITIALIZED, | 1117 | FTRACE_OPS_FL_INITIALIZED, |
| 1097 | }; | 1118 | }; |
| 1098 | 1119 | ||
| 1120 | /* | ||
| 1121 | * This is used by __kernel_text_address() to return true if the | ||
| 1122 | * address is on a dynamically allocated trampoline that would | ||
| 1123 | * not return true for either core_kernel_text() or | ||
| 1124 | * is_module_text_address(). | ||
| 1125 | */ | ||
| 1126 | bool is_ftrace_trampoline(unsigned long addr) | ||
| 1127 | { | ||
| 1128 | struct ftrace_ops *op; | ||
| 1129 | bool ret = false; | ||
| 1130 | |||
| 1131 | /* | ||
| 1132 | * Some of the ops may be dynamically allocated, | ||
| 1133 | * they are freed after a synchronize_sched(). | ||
| 1134 | */ | ||
| 1135 | preempt_disable_notrace(); | ||
| 1136 | |||
| 1137 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 1138 | /* | ||
| 1139 | * This is to check for dynamically allocated trampolines. | ||
| 1140 | * Trampolines that are in kernel text will have | ||
| 1141 | * core_kernel_text() return true. | ||
| 1142 | */ | ||
| 1143 | if (op->trampoline && op->trampoline_size) | ||
| 1144 | if (addr >= op->trampoline && | ||
| 1145 | addr < op->trampoline + op->trampoline_size) { | ||
| 1146 | ret = true; | ||
| 1147 | goto out; | ||
| 1148 | } | ||
| 1149 | } while_for_each_ftrace_op(op); | ||
| 1150 | |||
| 1151 | out: | ||
| 1152 | preempt_enable_notrace(); | ||
| 1153 | |||
| 1154 | return ret; | ||
| 1155 | } | ||
| 1156 | |||
| 1099 | struct ftrace_page { | 1157 | struct ftrace_page { |
| 1100 | struct ftrace_page *next; | 1158 | struct ftrace_page *next; |
| 1101 | struct dyn_ftrace *records; | 1159 | struct dyn_ftrace *records; |
| @@ -1300,6 +1358,9 @@ ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash); | |||
| 1300 | static void | 1358 | static void |
| 1301 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); | 1359 | ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash); |
| 1302 | 1360 | ||
| 1361 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | ||
| 1362 | struct ftrace_hash *new_hash); | ||
| 1363 | |||
| 1303 | static int | 1364 | static int |
| 1304 | ftrace_hash_move(struct ftrace_ops *ops, int enable, | 1365 | ftrace_hash_move(struct ftrace_ops *ops, int enable, |
| 1305 | struct ftrace_hash **dst, struct ftrace_hash *src) | 1366 | struct ftrace_hash **dst, struct ftrace_hash *src) |
| @@ -1307,12 +1368,16 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
| 1307 | struct ftrace_func_entry *entry; | 1368 | struct ftrace_func_entry *entry; |
| 1308 | struct hlist_node *tn; | 1369 | struct hlist_node *tn; |
| 1309 | struct hlist_head *hhd; | 1370 | struct hlist_head *hhd; |
| 1310 | struct ftrace_hash *old_hash; | ||
| 1311 | struct ftrace_hash *new_hash; | 1371 | struct ftrace_hash *new_hash; |
| 1312 | int size = src->count; | 1372 | int size = src->count; |
| 1313 | int bits = 0; | 1373 | int bits = 0; |
| 1374 | int ret; | ||
| 1314 | int i; | 1375 | int i; |
| 1315 | 1376 | ||
| 1377 | /* Reject setting notrace hash on IPMODIFY ftrace_ops */ | ||
| 1378 | if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable) | ||
| 1379 | return -EINVAL; | ||
| 1380 | |||
| 1316 | /* | 1381 | /* |
| 1317 | * If the new source is empty, just free dst and assign it | 1382 | * If the new source is empty, just free dst and assign it |
| 1318 | * the empty_hash. | 1383 | * the empty_hash. |
| @@ -1346,21 +1411,44 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, | |||
| 1346 | } | 1411 | } |
| 1347 | 1412 | ||
| 1348 | update: | 1413 | update: |
| 1414 | /* Make sure this can be applied if it is IPMODIFY ftrace_ops */ | ||
| 1415 | if (enable) { | ||
| 1416 | /* IPMODIFY should be updated only when filter_hash updating */ | ||
| 1417 | ret = ftrace_hash_ipmodify_update(ops, new_hash); | ||
| 1418 | if (ret < 0) { | ||
| 1419 | free_ftrace_hash(new_hash); | ||
| 1420 | return ret; | ||
| 1421 | } | ||
| 1422 | } | ||
| 1423 | |||
| 1349 | /* | 1424 | /* |
| 1350 | * Remove the current set, update the hash and add | 1425 | * Remove the current set, update the hash and add |
| 1351 | * them back. | 1426 | * them back. |
| 1352 | */ | 1427 | */ |
| 1353 | ftrace_hash_rec_disable_modify(ops, enable); | 1428 | ftrace_hash_rec_disable_modify(ops, enable); |
| 1354 | 1429 | ||
| 1355 | old_hash = *dst; | ||
| 1356 | rcu_assign_pointer(*dst, new_hash); | 1430 | rcu_assign_pointer(*dst, new_hash); |
| 1357 | free_ftrace_hash_rcu(old_hash); | ||
| 1358 | 1431 | ||
| 1359 | ftrace_hash_rec_enable_modify(ops, enable); | 1432 | ftrace_hash_rec_enable_modify(ops, enable); |
| 1360 | 1433 | ||
| 1361 | return 0; | 1434 | return 0; |
| 1362 | } | 1435 | } |
| 1363 | 1436 | ||
| 1437 | static bool hash_contains_ip(unsigned long ip, | ||
| 1438 | struct ftrace_ops_hash *hash) | ||
| 1439 | { | ||
| 1440 | /* | ||
| 1441 | * The function record is a match if it exists in the filter | ||
| 1442 | * hash and not in the notrace hash. Note, an emty hash is | ||
| 1443 | * considered a match for the filter hash, but an empty | ||
| 1444 | * notrace hash is considered not in the notrace hash. | ||
| 1445 | */ | ||
| 1446 | return (ftrace_hash_empty(hash->filter_hash) || | ||
| 1447 | ftrace_lookup_ip(hash->filter_hash, ip)) && | ||
| 1448 | (ftrace_hash_empty(hash->notrace_hash) || | ||
| 1449 | !ftrace_lookup_ip(hash->notrace_hash, ip)); | ||
| 1450 | } | ||
| 1451 | |||
| 1364 | /* | 1452 | /* |
| 1365 | * Test the hashes for this ops to see if we want to call | 1453 | * Test the hashes for this ops to see if we want to call |
| 1366 | * the ops->func or not. | 1454 | * the ops->func or not. |
| @@ -1376,8 +1464,7 @@ update: | |||
| 1376 | static int | 1464 | static int |
| 1377 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | 1465 | ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) |
| 1378 | { | 1466 | { |
| 1379 | struct ftrace_hash *filter_hash; | 1467 | struct ftrace_ops_hash hash; |
| 1380 | struct ftrace_hash *notrace_hash; | ||
| 1381 | int ret; | 1468 | int ret; |
| 1382 | 1469 | ||
| 1383 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS | 1470 | #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS |
| @@ -1390,13 +1477,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 1390 | return 0; | 1477 | return 0; |
| 1391 | #endif | 1478 | #endif |
| 1392 | 1479 | ||
| 1393 | filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); | 1480 | hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); |
| 1394 | notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); | 1481 | hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); |
| 1395 | 1482 | ||
| 1396 | if ((ftrace_hash_empty(filter_hash) || | 1483 | if (hash_contains_ip(ip, &hash)) |
| 1397 | ftrace_lookup_ip(filter_hash, ip)) && | ||
| 1398 | (ftrace_hash_empty(notrace_hash) || | ||
| 1399 | !ftrace_lookup_ip(notrace_hash, ip))) | ||
| 1400 | ret = 1; | 1484 | ret = 1; |
| 1401 | else | 1485 | else |
| 1402 | ret = 0; | 1486 | ret = 0; |
| @@ -1508,46 +1592,6 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) | |||
| 1508 | return keep_regs; | 1592 | return keep_regs; |
| 1509 | } | 1593 | } |
| 1510 | 1594 | ||
| 1511 | static void ftrace_remove_tramp(struct ftrace_ops *ops, | ||
| 1512 | struct dyn_ftrace *rec) | ||
| 1513 | { | ||
| 1514 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
| 1515 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 1516 | return; | ||
| 1517 | |||
| 1518 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1519 | |||
| 1520 | if ((!ftrace_hash_empty(ops->func_hash->filter_hash) && | ||
| 1521 | !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) || | ||
| 1522 | ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) | ||
| 1523 | return; | ||
| 1524 | /* | ||
| 1525 | * The tramp_hash entry will be removed at time | ||
| 1526 | * of update. | ||
| 1527 | */ | ||
| 1528 | ops->nr_trampolines--; | ||
| 1529 | } | ||
| 1530 | |||
| 1531 | static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops) | ||
| 1532 | { | ||
| 1533 | struct ftrace_ops *op; | ||
| 1534 | |||
| 1535 | /* If TRAMP is not set, no ops should have a trampoline for this */ | ||
| 1536 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 1537 | return; | ||
| 1538 | |||
| 1539 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 1540 | /* | ||
| 1541 | * This function is called to clear other tramps | ||
| 1542 | * not the one that is being updated. | ||
| 1543 | */ | ||
| 1544 | if (op == ops) | ||
| 1545 | continue; | ||
| 1546 | if (op->nr_trampolines) | ||
| 1547 | ftrace_remove_tramp(op, rec); | ||
| 1548 | } while_for_each_ftrace_op(op); | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | 1595 | static void __ftrace_hash_rec_update(struct ftrace_ops *ops, |
| 1552 | int filter_hash, | 1596 | int filter_hash, |
| 1553 | bool inc) | 1597 | bool inc) |
| @@ -1636,18 +1680,16 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1636 | * function, and the ops has a trampoline registered | 1680 | * function, and the ops has a trampoline registered |
| 1637 | * for it, then we can call it directly. | 1681 | * for it, then we can call it directly. |
| 1638 | */ | 1682 | */ |
| 1639 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) { | 1683 | if (ftrace_rec_count(rec) == 1 && ops->trampoline) |
| 1640 | rec->flags |= FTRACE_FL_TRAMP; | 1684 | rec->flags |= FTRACE_FL_TRAMP; |
| 1641 | ops->nr_trampolines++; | 1685 | else |
| 1642 | } else { | ||
| 1643 | /* | 1686 | /* |
| 1644 | * If we are adding another function callback | 1687 | * If we are adding another function callback |
| 1645 | * to this function, and the previous had a | 1688 | * to this function, and the previous had a |
| 1646 | * custom trampoline in use, then we need to go | 1689 | * custom trampoline in use, then we need to go |
| 1647 | * back to the default trampoline. | 1690 | * back to the default trampoline. |
| 1648 | */ | 1691 | */ |
| 1649 | ftrace_clear_tramps(rec, ops); | 1692 | rec->flags &= ~FTRACE_FL_TRAMP; |
| 1650 | } | ||
| 1651 | 1693 | ||
| 1652 | /* | 1694 | /* |
| 1653 | * If any ops wants regs saved for this function | 1695 | * If any ops wants regs saved for this function |
| @@ -1660,9 +1702,6 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1660 | return; | 1702 | return; |
| 1661 | rec->flags--; | 1703 | rec->flags--; |
| 1662 | 1704 | ||
| 1663 | if (ops->trampoline && !ftrace_rec_count(rec)) | ||
| 1664 | ftrace_remove_tramp(ops, rec); | ||
| 1665 | |||
| 1666 | /* | 1705 | /* |
| 1667 | * If the rec had REGS enabled and the ops that is | 1706 | * If the rec had REGS enabled and the ops that is |
| 1668 | * being removed had REGS set, then see if there is | 1707 | * being removed had REGS set, then see if there is |
| @@ -1677,6 +1716,17 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, | |||
| 1677 | } | 1716 | } |
| 1678 | 1717 | ||
| 1679 | /* | 1718 | /* |
| 1719 | * If the rec had TRAMP enabled, then it needs to | ||
| 1720 | * be cleared. As TRAMP can only be enabled iff | ||
| 1721 | * there is only a single ops attached to it. | ||
| 1722 | * In otherwords, always disable it on decrementing. | ||
| 1723 | * In the future, we may set it if rec count is | ||
| 1724 | * decremented to one, and the ops that is left | ||
| 1725 | * has a trampoline. | ||
| 1726 | */ | ||
| 1727 | rec->flags &= ~FTRACE_FL_TRAMP; | ||
| 1728 | |||
| 1729 | /* | ||
| 1680 | * flags will be cleared in ftrace_check_record() | 1730 | * flags will be cleared in ftrace_check_record() |
| 1681 | * if rec count is zero. | 1731 | * if rec count is zero. |
| 1682 | */ | 1732 | */ |
| @@ -1735,6 +1785,114 @@ static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, | |||
| 1735 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); | 1785 | ftrace_hash_rec_update_modify(ops, filter_hash, 1); |
| 1736 | } | 1786 | } |
| 1737 | 1787 | ||
| 1788 | /* | ||
| 1789 | * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK | ||
| 1790 | * or no-needed to update, -EBUSY if it detects a conflict of the flag | ||
| 1791 | * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs. | ||
| 1792 | * Note that old_hash and new_hash has below meanings | ||
| 1793 | * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected) | ||
| 1794 | * - If the hash is EMPTY_HASH, it hits nothing | ||
| 1795 | * - Anything else hits the recs which match the hash entries. | ||
| 1796 | */ | ||
| 1797 | static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops, | ||
| 1798 | struct ftrace_hash *old_hash, | ||
| 1799 | struct ftrace_hash *new_hash) | ||
| 1800 | { | ||
| 1801 | struct ftrace_page *pg; | ||
| 1802 | struct dyn_ftrace *rec, *end = NULL; | ||
| 1803 | int in_old, in_new; | ||
| 1804 | |||
| 1805 | /* Only update if the ops has been registered */ | ||
| 1806 | if (!(ops->flags & FTRACE_OPS_FL_ENABLED)) | ||
| 1807 | return 0; | ||
| 1808 | |||
| 1809 | if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY)) | ||
| 1810 | return 0; | ||
| 1811 | |||
| 1812 | /* | ||
| 1813 | * Since the IPMODIFY is a very address sensitive action, we do not | ||
| 1814 | * allow ftrace_ops to set all functions to new hash. | ||
| 1815 | */ | ||
| 1816 | if (!new_hash || !old_hash) | ||
| 1817 | return -EINVAL; | ||
| 1818 | |||
| 1819 | /* Update rec->flags */ | ||
| 1820 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1821 | /* We need to update only differences of filter_hash */ | ||
| 1822 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | ||
| 1823 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | ||
| 1824 | if (in_old == in_new) | ||
| 1825 | continue; | ||
| 1826 | |||
| 1827 | if (in_new) { | ||
| 1828 | /* New entries must ensure no others are using it */ | ||
| 1829 | if (rec->flags & FTRACE_FL_IPMODIFY) | ||
| 1830 | goto rollback; | ||
| 1831 | rec->flags |= FTRACE_FL_IPMODIFY; | ||
| 1832 | } else /* Removed entry */ | ||
| 1833 | rec->flags &= ~FTRACE_FL_IPMODIFY; | ||
| 1834 | } while_for_each_ftrace_rec(); | ||
| 1835 | |||
| 1836 | return 0; | ||
| 1837 | |||
| 1838 | rollback: | ||
| 1839 | end = rec; | ||
| 1840 | |||
| 1841 | /* Roll back what we did above */ | ||
| 1842 | do_for_each_ftrace_rec(pg, rec) { | ||
| 1843 | if (rec == end) | ||
| 1844 | goto err_out; | ||
| 1845 | |||
| 1846 | in_old = !!ftrace_lookup_ip(old_hash, rec->ip); | ||
| 1847 | in_new = !!ftrace_lookup_ip(new_hash, rec->ip); | ||
| 1848 | if (in_old == in_new) | ||
| 1849 | continue; | ||
| 1850 | |||
| 1851 | if (in_new) | ||
| 1852 | rec->flags &= ~FTRACE_FL_IPMODIFY; | ||
| 1853 | else | ||
| 1854 | rec->flags |= FTRACE_FL_IPMODIFY; | ||
| 1855 | } while_for_each_ftrace_rec(); | ||
| 1856 | |||
| 1857 | err_out: | ||
| 1858 | return -EBUSY; | ||
| 1859 | } | ||
| 1860 | |||
| 1861 | static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops) | ||
| 1862 | { | ||
| 1863 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | ||
| 1864 | |||
| 1865 | if (ftrace_hash_empty(hash)) | ||
| 1866 | hash = NULL; | ||
| 1867 | |||
| 1868 | return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash); | ||
| 1869 | } | ||
| 1870 | |||
| 1871 | /* Disabling always succeeds */ | ||
| 1872 | static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops) | ||
| 1873 | { | ||
| 1874 | struct ftrace_hash *hash = ops->func_hash->filter_hash; | ||
| 1875 | |||
| 1876 | if (ftrace_hash_empty(hash)) | ||
| 1877 | hash = NULL; | ||
| 1878 | |||
| 1879 | __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH); | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops, | ||
| 1883 | struct ftrace_hash *new_hash) | ||
| 1884 | { | ||
| 1885 | struct ftrace_hash *old_hash = ops->func_hash->filter_hash; | ||
| 1886 | |||
| 1887 | if (ftrace_hash_empty(old_hash)) | ||
| 1888 | old_hash = NULL; | ||
| 1889 | |||
| 1890 | if (ftrace_hash_empty(new_hash)) | ||
| 1891 | new_hash = NULL; | ||
| 1892 | |||
| 1893 | return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash); | ||
| 1894 | } | ||
| 1895 | |||
| 1738 | static void print_ip_ins(const char *fmt, unsigned char *p) | 1896 | static void print_ip_ins(const char *fmt, unsigned char *p) |
| 1739 | { | 1897 | { |
| 1740 | int i; | 1898 | int i; |
| @@ -1745,10 +1903,13 @@ static void print_ip_ins(const char *fmt, unsigned char *p) | |||
| 1745 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); | 1903 | printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]); |
| 1746 | } | 1904 | } |
| 1747 | 1905 | ||
| 1906 | static struct ftrace_ops * | ||
| 1907 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec); | ||
| 1908 | |||
| 1748 | /** | 1909 | /** |
| 1749 | * ftrace_bug - report and shutdown function tracer | 1910 | * ftrace_bug - report and shutdown function tracer |
| 1750 | * @failed: The failed type (EFAULT, EINVAL, EPERM) | 1911 | * @failed: The failed type (EFAULT, EINVAL, EPERM) |
| 1751 | * @ip: The address that failed | 1912 | * @rec: The record that failed |
| 1752 | * | 1913 | * |
| 1753 | * The arch code that enables or disables the function tracing | 1914 | * The arch code that enables or disables the function tracing |
| 1754 | * can call ftrace_bug() when it has detected a problem in | 1915 | * can call ftrace_bug() when it has detected a problem in |
| @@ -1757,8 +1918,10 @@ static void print_ip_ins(const char *fmt, unsigned char *p) | |||
| 1757 | * EINVAL - if what is read at @ip is not what was expected | 1918 | * EINVAL - if what is read at @ip is not what was expected |
| 1758 | * EPERM - if the problem happens on writting to the @ip address | 1919 | * EPERM - if the problem happens on writting to the @ip address |
| 1759 | */ | 1920 | */ |
| 1760 | void ftrace_bug(int failed, unsigned long ip) | 1921 | void ftrace_bug(int failed, struct dyn_ftrace *rec) |
| 1761 | { | 1922 | { |
| 1923 | unsigned long ip = rec ? rec->ip : 0; | ||
| 1924 | |||
| 1762 | switch (failed) { | 1925 | switch (failed) { |
| 1763 | case -EFAULT: | 1926 | case -EFAULT: |
| 1764 | FTRACE_WARN_ON_ONCE(1); | 1927 | FTRACE_WARN_ON_ONCE(1); |
| @@ -1770,7 +1933,7 @@ void ftrace_bug(int failed, unsigned long ip) | |||
| 1770 | pr_info("ftrace failed to modify "); | 1933 | pr_info("ftrace failed to modify "); |
| 1771 | print_ip_sym(ip); | 1934 | print_ip_sym(ip); |
| 1772 | print_ip_ins(" actual: ", (unsigned char *)ip); | 1935 | print_ip_ins(" actual: ", (unsigned char *)ip); |
| 1773 | printk(KERN_CONT "\n"); | 1936 | pr_cont("\n"); |
| 1774 | break; | 1937 | break; |
| 1775 | case -EPERM: | 1938 | case -EPERM: |
| 1776 | FTRACE_WARN_ON_ONCE(1); | 1939 | FTRACE_WARN_ON_ONCE(1); |
| @@ -1782,6 +1945,24 @@ void ftrace_bug(int failed, unsigned long ip) | |||
| 1782 | pr_info("ftrace faulted on unknown error "); | 1945 | pr_info("ftrace faulted on unknown error "); |
| 1783 | print_ip_sym(ip); | 1946 | print_ip_sym(ip); |
| 1784 | } | 1947 | } |
| 1948 | if (rec) { | ||
| 1949 | struct ftrace_ops *ops = NULL; | ||
| 1950 | |||
| 1951 | pr_info("ftrace record flags: %lx\n", rec->flags); | ||
| 1952 | pr_cont(" (%ld)%s", ftrace_rec_count(rec), | ||
| 1953 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | ||
| 1954 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | ||
| 1955 | ops = ftrace_find_tramp_ops_any(rec); | ||
| 1956 | if (ops) | ||
| 1957 | pr_cont("\ttramp: %pS", | ||
| 1958 | (void *)ops->trampoline); | ||
| 1959 | else | ||
| 1960 | pr_cont("\ttramp: ERROR!"); | ||
| 1961 | |||
| 1962 | } | ||
| 1963 | ip = ftrace_get_addr_curr(rec); | ||
| 1964 | pr_cont(" expected tramp: %lx\n", ip); | ||
| 1965 | } | ||
| 1785 | } | 1966 | } |
| 1786 | 1967 | ||
| 1787 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) | 1968 | static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) |
| @@ -1895,21 +2076,86 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) | |||
| 1895 | } | 2076 | } |
| 1896 | 2077 | ||
| 1897 | static struct ftrace_ops * | 2078 | static struct ftrace_ops * |
| 2079 | ftrace_find_tramp_ops_any(struct dyn_ftrace *rec) | ||
| 2080 | { | ||
| 2081 | struct ftrace_ops *op; | ||
| 2082 | unsigned long ip = rec->ip; | ||
| 2083 | |||
| 2084 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 2085 | |||
| 2086 | if (!op->trampoline) | ||
| 2087 | continue; | ||
| 2088 | |||
| 2089 | if (hash_contains_ip(ip, op->func_hash)) | ||
| 2090 | return op; | ||
| 2091 | } while_for_each_ftrace_op(op); | ||
| 2092 | |||
| 2093 | return NULL; | ||
| 2094 | } | ||
| 2095 | |||
| 2096 | static struct ftrace_ops * | ||
| 1898 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) | 2097 | ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) |
| 1899 | { | 2098 | { |
| 1900 | struct ftrace_ops *op; | 2099 | struct ftrace_ops *op; |
| 2100 | unsigned long ip = rec->ip; | ||
| 1901 | 2101 | ||
| 1902 | /* Removed ops need to be tested first */ | 2102 | /* |
| 1903 | if (removed_ops && removed_ops->tramp_hash) { | 2103 | * Need to check removed ops first. |
| 1904 | if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip)) | 2104 | * If they are being removed, and this rec has a tramp, |
| 2105 | * and this rec is in the ops list, then it would be the | ||
| 2106 | * one with the tramp. | ||
| 2107 | */ | ||
| 2108 | if (removed_ops) { | ||
| 2109 | if (hash_contains_ip(ip, &removed_ops->old_hash)) | ||
| 1905 | return removed_ops; | 2110 | return removed_ops; |
| 1906 | } | 2111 | } |
| 1907 | 2112 | ||
| 2113 | /* | ||
| 2114 | * Need to find the current trampoline for a rec. | ||
| 2115 | * Now, a trampoline is only attached to a rec if there | ||
| 2116 | * was a single 'ops' attached to it. But this can be called | ||
| 2117 | * when we are adding another op to the rec or removing the | ||
| 2118 | * current one. Thus, if the op is being added, we can | ||
| 2119 | * ignore it because it hasn't attached itself to the rec | ||
| 2120 | * yet. | ||
| 2121 | * | ||
| 2122 | * If an ops is being modified (hooking to different functions) | ||
| 2123 | * then we don't care about the new functions that are being | ||
| 2124 | * added, just the old ones (that are probably being removed). | ||
| 2125 | * | ||
| 2126 | * If we are adding an ops to a function that already is using | ||
| 2127 | * a trampoline, it needs to be removed (trampolines are only | ||
| 2128 | * for single ops connected), then an ops that is not being | ||
| 2129 | * modified also needs to be checked. | ||
| 2130 | */ | ||
| 1908 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 2131 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
| 1909 | if (!op->tramp_hash) | 2132 | |
| 2133 | if (!op->trampoline) | ||
| 1910 | continue; | 2134 | continue; |
| 1911 | 2135 | ||
| 1912 | if (ftrace_lookup_ip(op->tramp_hash, rec->ip)) | 2136 | /* |
| 2137 | * If the ops is being added, it hasn't gotten to | ||
| 2138 | * the point to be removed from this tree yet. | ||
| 2139 | */ | ||
| 2140 | if (op->flags & FTRACE_OPS_FL_ADDING) | ||
| 2141 | continue; | ||
| 2142 | |||
| 2143 | |||
| 2144 | /* | ||
| 2145 | * If the ops is being modified and is in the old | ||
| 2146 | * hash, then it is probably being removed from this | ||
| 2147 | * function. | ||
| 2148 | */ | ||
| 2149 | if ((op->flags & FTRACE_OPS_FL_MODIFYING) && | ||
| 2150 | hash_contains_ip(ip, &op->old_hash)) | ||
| 2151 | return op; | ||
| 2152 | /* | ||
| 2153 | * If the ops is not being added or modified, and it's | ||
| 2154 | * in its normal filter hash, then this must be the one | ||
| 2155 | * we want! | ||
| 2156 | */ | ||
| 2157 | if (!(op->flags & FTRACE_OPS_FL_MODIFYING) && | ||
| 2158 | hash_contains_ip(ip, op->func_hash)) | ||
| 1913 | return op; | 2159 | return op; |
| 1914 | 2160 | ||
| 1915 | } while_for_each_ftrace_op(op); | 2161 | } while_for_each_ftrace_op(op); |
| @@ -1921,10 +2167,11 @@ static struct ftrace_ops * | |||
| 1921 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) | 2167 | ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) |
| 1922 | { | 2168 | { |
| 1923 | struct ftrace_ops *op; | 2169 | struct ftrace_ops *op; |
| 2170 | unsigned long ip = rec->ip; | ||
| 1924 | 2171 | ||
| 1925 | do_for_each_ftrace_op(op, ftrace_ops_list) { | 2172 | do_for_each_ftrace_op(op, ftrace_ops_list) { |
| 1926 | /* pass rec in as regs to have non-NULL val */ | 2173 | /* pass rec in as regs to have non-NULL val */ |
| 1927 | if (ftrace_ops_test(op, rec->ip, rec)) | 2174 | if (hash_contains_ip(ip, op->func_hash)) |
| 1928 | return op; | 2175 | return op; |
| 1929 | } while_for_each_ftrace_op(op); | 2176 | } while_for_each_ftrace_op(op); |
| 1930 | 2177 | ||
| @@ -2038,7 +2285,7 @@ void __weak ftrace_replace_code(int enable) | |||
| 2038 | do_for_each_ftrace_rec(pg, rec) { | 2285 | do_for_each_ftrace_rec(pg, rec) { |
| 2039 | failed = __ftrace_replace_code(rec, enable); | 2286 | failed = __ftrace_replace_code(rec, enable); |
| 2040 | if (failed) { | 2287 | if (failed) { |
| 2041 | ftrace_bug(failed, rec->ip); | 2288 | ftrace_bug(failed, rec); |
| 2042 | /* Stop processing */ | 2289 | /* Stop processing */ |
| 2043 | return; | 2290 | return; |
| 2044 | } | 2291 | } |
| @@ -2120,17 +2367,14 @@ struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter) | |||
| 2120 | static int | 2367 | static int |
| 2121 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | 2368 | ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) |
| 2122 | { | 2369 | { |
| 2123 | unsigned long ip; | ||
| 2124 | int ret; | 2370 | int ret; |
| 2125 | 2371 | ||
| 2126 | ip = rec->ip; | ||
| 2127 | |||
| 2128 | if (unlikely(ftrace_disabled)) | 2372 | if (unlikely(ftrace_disabled)) |
| 2129 | return 0; | 2373 | return 0; |
| 2130 | 2374 | ||
| 2131 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); | 2375 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
| 2132 | if (ret) { | 2376 | if (ret) { |
| 2133 | ftrace_bug(ret, ip); | 2377 | ftrace_bug(ret, rec); |
| 2134 | return 0; | 2378 | return 0; |
| 2135 | } | 2379 | } |
| 2136 | return 1; | 2380 | return 1; |
| @@ -2231,92 +2475,6 @@ void __weak arch_ftrace_update_code(int command) | |||
| 2231 | ftrace_run_stop_machine(command); | 2475 | ftrace_run_stop_machine(command); |
| 2232 | } | 2476 | } |
| 2233 | 2477 | ||
| 2234 | static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops) | ||
| 2235 | { | ||
| 2236 | struct ftrace_page *pg; | ||
| 2237 | struct dyn_ftrace *rec; | ||
| 2238 | int size, bits; | ||
| 2239 | int ret; | ||
| 2240 | |||
| 2241 | size = ops->nr_trampolines; | ||
| 2242 | bits = 0; | ||
| 2243 | /* | ||
| 2244 | * Make the hash size about 1/2 the # found | ||
| 2245 | */ | ||
| 2246 | for (size /= 2; size; size >>= 1) | ||
| 2247 | bits++; | ||
| 2248 | |||
| 2249 | ops->tramp_hash = alloc_ftrace_hash(bits); | ||
| 2250 | /* | ||
| 2251 | * TODO: a failed allocation is going to screw up | ||
| 2252 | * the accounting of what needs to be modified | ||
| 2253 | * and not. For now, we kill ftrace if we fail | ||
| 2254 | * to allocate here. But there are ways around this, | ||
| 2255 | * but that will take a little more work. | ||
| 2256 | */ | ||
| 2257 | if (!ops->tramp_hash) | ||
| 2258 | return -ENOMEM; | ||
| 2259 | |||
| 2260 | do_for_each_ftrace_rec(pg, rec) { | ||
| 2261 | if (ftrace_rec_count(rec) == 1 && | ||
| 2262 | ftrace_ops_test(ops, rec->ip, rec)) { | ||
| 2263 | |||
| 2264 | /* | ||
| 2265 | * If another ops adds to a rec, the rec will | ||
| 2266 | * lose its trampoline and never get it back | ||
| 2267 | * until all ops are off of it. | ||
| 2268 | */ | ||
| 2269 | if (!(rec->flags & FTRACE_FL_TRAMP)) | ||
| 2270 | continue; | ||
| 2271 | |||
| 2272 | /* This record had better have a trampoline */ | ||
| 2273 | if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN))) | ||
| 2274 | return -1; | ||
| 2275 | |||
| 2276 | ret = add_hash_entry(ops->tramp_hash, rec->ip); | ||
| 2277 | if (ret < 0) | ||
| 2278 | return ret; | ||
| 2279 | } | ||
| 2280 | } while_for_each_ftrace_rec(); | ||
| 2281 | |||
| 2282 | /* The number of recs in the hash must match nr_trampolines */ | ||
| 2283 | if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines)) | ||
| 2284 | pr_warn("count=%ld trampolines=%d\n", | ||
| 2285 | ops->tramp_hash->count, | ||
| 2286 | ops->nr_trampolines); | ||
| 2287 | |||
| 2288 | return 0; | ||
| 2289 | } | ||
| 2290 | |||
| 2291 | static int ftrace_save_tramp_hashes(void) | ||
| 2292 | { | ||
| 2293 | struct ftrace_ops *op; | ||
| 2294 | int ret; | ||
| 2295 | |||
| 2296 | /* | ||
| 2297 | * Now that any trampoline is being used, we need to save the | ||
| 2298 | * hashes for the ops that have them. This allows the mapping | ||
| 2299 | * back from the record to the ops that has the trampoline to | ||
| 2300 | * know what code is being replaced. Modifying code must always | ||
| 2301 | * verify what it is changing. | ||
| 2302 | */ | ||
| 2303 | do_for_each_ftrace_op(op, ftrace_ops_list) { | ||
| 2304 | |||
| 2305 | /* The tramp_hash is recreated each time. */ | ||
| 2306 | free_ftrace_hash(op->tramp_hash); | ||
| 2307 | op->tramp_hash = NULL; | ||
| 2308 | |||
| 2309 | if (op->nr_trampolines) { | ||
| 2310 | ret = ftrace_save_ops_tramp_hash(op); | ||
| 2311 | if (ret) | ||
| 2312 | return ret; | ||
| 2313 | } | ||
| 2314 | |||
| 2315 | } while_for_each_ftrace_op(op); | ||
| 2316 | |||
| 2317 | return 0; | ||
| 2318 | } | ||
| 2319 | |||
| 2320 | static void ftrace_run_update_code(int command) | 2478 | static void ftrace_run_update_code(int command) |
| 2321 | { | 2479 | { |
| 2322 | int ret; | 2480 | int ret; |
| @@ -2336,14 +2494,25 @@ static void ftrace_run_update_code(int command) | |||
| 2336 | 2494 | ||
| 2337 | ret = ftrace_arch_code_modify_post_process(); | 2495 | ret = ftrace_arch_code_modify_post_process(); |
| 2338 | FTRACE_WARN_ON(ret); | 2496 | FTRACE_WARN_ON(ret); |
| 2497 | } | ||
| 2339 | 2498 | ||
| 2340 | ret = ftrace_save_tramp_hashes(); | 2499 | static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, |
| 2341 | FTRACE_WARN_ON(ret); | 2500 | struct ftrace_hash *old_hash) |
| 2501 | { | ||
| 2502 | ops->flags |= FTRACE_OPS_FL_MODIFYING; | ||
| 2503 | ops->old_hash.filter_hash = old_hash; | ||
| 2504 | ftrace_run_update_code(command); | ||
| 2505 | ops->old_hash.filter_hash = NULL; | ||
| 2506 | ops->flags &= ~FTRACE_OPS_FL_MODIFYING; | ||
| 2342 | } | 2507 | } |
| 2343 | 2508 | ||
| 2344 | static ftrace_func_t saved_ftrace_func; | 2509 | static ftrace_func_t saved_ftrace_func; |
| 2345 | static int ftrace_start_up; | 2510 | static int ftrace_start_up; |
| 2346 | 2511 | ||
| 2512 | void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops) | ||
| 2513 | { | ||
| 2514 | } | ||
| 2515 | |||
| 2347 | static void control_ops_free(struct ftrace_ops *ops) | 2516 | static void control_ops_free(struct ftrace_ops *ops) |
| 2348 | { | 2517 | { |
| 2349 | free_percpu(ops->disabled); | 2518 | free_percpu(ops->disabled); |
| @@ -2362,6 +2531,13 @@ static void ftrace_startup_enable(int command) | |||
| 2362 | ftrace_run_update_code(command); | 2531 | ftrace_run_update_code(command); |
| 2363 | } | 2532 | } |
| 2364 | 2533 | ||
| 2534 | static void ftrace_startup_all(int command) | ||
| 2535 | { | ||
| 2536 | update_all_ops = true; | ||
| 2537 | ftrace_startup_enable(command); | ||
| 2538 | update_all_ops = false; | ||
| 2539 | } | ||
| 2540 | |||
| 2365 | static int ftrace_startup(struct ftrace_ops *ops, int command) | 2541 | static int ftrace_startup(struct ftrace_ops *ops, int command) |
| 2366 | { | 2542 | { |
| 2367 | int ret; | 2543 | int ret; |
| @@ -2376,12 +2552,31 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) | |||
| 2376 | ftrace_start_up++; | 2552 | ftrace_start_up++; |
| 2377 | command |= FTRACE_UPDATE_CALLS; | 2553 | command |= FTRACE_UPDATE_CALLS; |
| 2378 | 2554 | ||
| 2379 | ops->flags |= FTRACE_OPS_FL_ENABLED; | 2555 | /* |
| 2556 | * Note that ftrace probes uses this to start up | ||
| 2557 | * and modify functions it will probe. But we still | ||
| 2558 | * set the ADDING flag for modification, as probes | ||
| 2559 | * do not have trampolines. If they add them in the | ||
| 2560 | * future, then the probes will need to distinguish | ||
| 2561 | * between adding and updating probes. | ||
| 2562 | */ | ||
| 2563 | ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING; | ||
| 2564 | |||
| 2565 | ret = ftrace_hash_ipmodify_enable(ops); | ||
| 2566 | if (ret < 0) { | ||
| 2567 | /* Rollback registration process */ | ||
| 2568 | __unregister_ftrace_function(ops); | ||
| 2569 | ftrace_start_up--; | ||
| 2570 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | ||
| 2571 | return ret; | ||
| 2572 | } | ||
| 2380 | 2573 | ||
| 2381 | ftrace_hash_rec_enable(ops, 1); | 2574 | ftrace_hash_rec_enable(ops, 1); |
| 2382 | 2575 | ||
| 2383 | ftrace_startup_enable(command); | 2576 | ftrace_startup_enable(command); |
| 2384 | 2577 | ||
| 2578 | ops->flags &= ~FTRACE_OPS_FL_ADDING; | ||
| 2579 | |||
| 2385 | return 0; | 2580 | return 0; |
| 2386 | } | 2581 | } |
| 2387 | 2582 | ||
| @@ -2404,6 +2599,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2404 | */ | 2599 | */ |
| 2405 | WARN_ON_ONCE(ftrace_start_up < 0); | 2600 | WARN_ON_ONCE(ftrace_start_up < 0); |
| 2406 | 2601 | ||
| 2602 | /* Disabling ipmodify never fails */ | ||
| 2603 | ftrace_hash_ipmodify_disable(ops); | ||
| 2407 | ftrace_hash_rec_disable(ops, 1); | 2604 | ftrace_hash_rec_disable(ops, 1); |
| 2408 | 2605 | ||
| 2409 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; | 2606 | ops->flags &= ~FTRACE_OPS_FL_ENABLED; |
| @@ -2431,11 +2628,35 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2431 | * If the ops uses a trampoline, then it needs to be | 2628 | * If the ops uses a trampoline, then it needs to be |
| 2432 | * tested first on update. | 2629 | * tested first on update. |
| 2433 | */ | 2630 | */ |
| 2631 | ops->flags |= FTRACE_OPS_FL_REMOVING; | ||
| 2434 | removed_ops = ops; | 2632 | removed_ops = ops; |
| 2435 | 2633 | ||
| 2634 | /* The trampoline logic checks the old hashes */ | ||
| 2635 | ops->old_hash.filter_hash = ops->func_hash->filter_hash; | ||
| 2636 | ops->old_hash.notrace_hash = ops->func_hash->notrace_hash; | ||
| 2637 | |||
| 2436 | ftrace_run_update_code(command); | 2638 | ftrace_run_update_code(command); |
| 2437 | 2639 | ||
| 2640 | /* | ||
| 2641 | * If there's no more ops registered with ftrace, run a | ||
| 2642 | * sanity check to make sure all rec flags are cleared. | ||
| 2643 | */ | ||
| 2644 | if (ftrace_ops_list == &ftrace_list_end) { | ||
| 2645 | struct ftrace_page *pg; | ||
| 2646 | struct dyn_ftrace *rec; | ||
| 2647 | |||
| 2648 | do_for_each_ftrace_rec(pg, rec) { | ||
| 2649 | if (FTRACE_WARN_ON_ONCE(rec->flags)) | ||
| 2650 | pr_warn(" %pS flags:%lx\n", | ||
| 2651 | (void *)rec->ip, rec->flags); | ||
| 2652 | } while_for_each_ftrace_rec(); | ||
| 2653 | } | ||
| 2654 | |||
| 2655 | ops->old_hash.filter_hash = NULL; | ||
| 2656 | ops->old_hash.notrace_hash = NULL; | ||
| 2657 | |||
| 2438 | removed_ops = NULL; | 2658 | removed_ops = NULL; |
| 2659 | ops->flags &= ~FTRACE_OPS_FL_REMOVING; | ||
| 2439 | 2660 | ||
| 2440 | /* | 2661 | /* |
| 2441 | * Dynamic ops may be freed, we must make sure that all | 2662 | * Dynamic ops may be freed, we must make sure that all |
| @@ -2454,6 +2675,8 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) | |||
| 2454 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { | 2675 | if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) { |
| 2455 | schedule_on_each_cpu(ftrace_sync); | 2676 | schedule_on_each_cpu(ftrace_sync); |
| 2456 | 2677 | ||
| 2678 | arch_ftrace_trampoline_free(ops); | ||
| 2679 | |||
| 2457 | if (ops->flags & FTRACE_OPS_FL_CONTROL) | 2680 | if (ops->flags & FTRACE_OPS_FL_CONTROL) |
| 2458 | control_ops_free(ops); | 2681 | control_ops_free(ops); |
| 2459 | } | 2682 | } |
| @@ -2606,7 +2829,7 @@ static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs) | |||
| 2606 | if (ftrace_start_up && cnt) { | 2829 | if (ftrace_start_up && cnt) { |
| 2607 | int failed = __ftrace_replace_code(p, 1); | 2830 | int failed = __ftrace_replace_code(p, 1); |
| 2608 | if (failed) | 2831 | if (failed) |
| 2609 | ftrace_bug(failed, p->ip); | 2832 | ftrace_bug(failed, p); |
| 2610 | } | 2833 | } |
| 2611 | } | 2834 | } |
| 2612 | } | 2835 | } |
| @@ -2931,6 +3154,22 @@ static void t_stop(struct seq_file *m, void *p) | |||
| 2931 | mutex_unlock(&ftrace_lock); | 3154 | mutex_unlock(&ftrace_lock); |
| 2932 | } | 3155 | } |
| 2933 | 3156 | ||
| 3157 | void * __weak | ||
| 3158 | arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec) | ||
| 3159 | { | ||
| 3160 | return NULL; | ||
| 3161 | } | ||
| 3162 | |||
| 3163 | static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops, | ||
| 3164 | struct dyn_ftrace *rec) | ||
| 3165 | { | ||
| 3166 | void *ptr; | ||
| 3167 | |||
| 3168 | ptr = arch_ftrace_trampoline_func(ops, rec); | ||
| 3169 | if (ptr) | ||
| 3170 | seq_printf(m, " ->%pS", ptr); | ||
| 3171 | } | ||
| 3172 | |||
| 2934 | static int t_show(struct seq_file *m, void *v) | 3173 | static int t_show(struct seq_file *m, void *v) |
| 2935 | { | 3174 | { |
| 2936 | struct ftrace_iterator *iter = m->private; | 3175 | struct ftrace_iterator *iter = m->private; |
| @@ -2941,9 +3180,9 @@ static int t_show(struct seq_file *m, void *v) | |||
| 2941 | 3180 | ||
| 2942 | if (iter->flags & FTRACE_ITER_PRINTALL) { | 3181 | if (iter->flags & FTRACE_ITER_PRINTALL) { |
| 2943 | if (iter->flags & FTRACE_ITER_NOTRACE) | 3182 | if (iter->flags & FTRACE_ITER_NOTRACE) |
| 2944 | seq_printf(m, "#### no functions disabled ####\n"); | 3183 | seq_puts(m, "#### no functions disabled ####\n"); |
| 2945 | else | 3184 | else |
| 2946 | seq_printf(m, "#### all functions enabled ####\n"); | 3185 | seq_puts(m, "#### all functions enabled ####\n"); |
| 2947 | return 0; | 3186 | return 0; |
| 2948 | } | 3187 | } |
| 2949 | 3188 | ||
| @@ -2954,22 +3193,25 @@ static int t_show(struct seq_file *m, void *v) | |||
| 2954 | 3193 | ||
| 2955 | seq_printf(m, "%ps", (void *)rec->ip); | 3194 | seq_printf(m, "%ps", (void *)rec->ip); |
| 2956 | if (iter->flags & FTRACE_ITER_ENABLED) { | 3195 | if (iter->flags & FTRACE_ITER_ENABLED) { |
| 2957 | seq_printf(m, " (%ld)%s", | 3196 | struct ftrace_ops *ops = NULL; |
| 3197 | |||
| 3198 | seq_printf(m, " (%ld)%s%s", | ||
| 2958 | ftrace_rec_count(rec), | 3199 | ftrace_rec_count(rec), |
| 2959 | rec->flags & FTRACE_FL_REGS ? " R" : " "); | 3200 | rec->flags & FTRACE_FL_REGS ? " R" : " ", |
| 3201 | rec->flags & FTRACE_FL_IPMODIFY ? " I" : " "); | ||
| 2960 | if (rec->flags & FTRACE_FL_TRAMP_EN) { | 3202 | if (rec->flags & FTRACE_FL_TRAMP_EN) { |
| 2961 | struct ftrace_ops *ops; | 3203 | ops = ftrace_find_tramp_ops_any(rec); |
| 2962 | 3204 | if (ops) | |
| 2963 | ops = ftrace_find_tramp_ops_curr(rec); | ||
| 2964 | if (ops && ops->trampoline) | ||
| 2965 | seq_printf(m, "\ttramp: %pS", | 3205 | seq_printf(m, "\ttramp: %pS", |
| 2966 | (void *)ops->trampoline); | 3206 | (void *)ops->trampoline); |
| 2967 | else | 3207 | else |
| 2968 | seq_printf(m, "\ttramp: ERROR!"); | 3208 | seq_puts(m, "\ttramp: ERROR!"); |
| 3209 | |||
| 2969 | } | 3210 | } |
| 3211 | add_trampoline_func(m, ops, rec); | ||
| 2970 | } | 3212 | } |
| 2971 | 3213 | ||
| 2972 | seq_printf(m, "\n"); | 3214 | seq_putc(m, '\n'); |
| 2973 | 3215 | ||
| 2974 | return 0; | 3216 | return 0; |
| 2975 | } | 3217 | } |
| @@ -3003,9 +3245,6 @@ ftrace_enabled_open(struct inode *inode, struct file *file) | |||
| 3003 | { | 3245 | { |
| 3004 | struct ftrace_iterator *iter; | 3246 | struct ftrace_iterator *iter; |
| 3005 | 3247 | ||
| 3006 | if (unlikely(ftrace_disabled)) | ||
| 3007 | return -ENODEV; | ||
| 3008 | |||
| 3009 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); | 3248 | iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter)); |
| 3010 | if (iter) { | 3249 | if (iter) { |
| 3011 | iter->pg = ftrace_pages_start; | 3250 | iter->pg = ftrace_pages_start; |
| @@ -3340,7 +3579,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = | |||
| 3340 | 3579 | ||
| 3341 | static int ftrace_probe_registered; | 3580 | static int ftrace_probe_registered; |
| 3342 | 3581 | ||
| 3343 | static void __enable_ftrace_function_probe(void) | 3582 | static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) |
| 3344 | { | 3583 | { |
| 3345 | int ret; | 3584 | int ret; |
| 3346 | int i; | 3585 | int i; |
| @@ -3348,7 +3587,8 @@ static void __enable_ftrace_function_probe(void) | |||
| 3348 | if (ftrace_probe_registered) { | 3587 | if (ftrace_probe_registered) { |
| 3349 | /* still need to update the function call sites */ | 3588 | /* still need to update the function call sites */ |
| 3350 | if (ftrace_enabled) | 3589 | if (ftrace_enabled) |
| 3351 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 3590 | ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS, |
| 3591 | old_hash); | ||
| 3352 | return; | 3592 | return; |
| 3353 | } | 3593 | } |
| 3354 | 3594 | ||
| @@ -3399,6 +3639,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3399 | { | 3639 | { |
| 3400 | struct ftrace_func_probe *entry; | 3640 | struct ftrace_func_probe *entry; |
| 3401 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 3641 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3642 | struct ftrace_hash *old_hash = *orig_hash; | ||
| 3402 | struct ftrace_hash *hash; | 3643 | struct ftrace_hash *hash; |
| 3403 | struct ftrace_page *pg; | 3644 | struct ftrace_page *pg; |
| 3404 | struct dyn_ftrace *rec; | 3645 | struct dyn_ftrace *rec; |
| @@ -3417,7 +3658,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3417 | 3658 | ||
| 3418 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); | 3659 | mutex_lock(&trace_probe_ops.func_hash->regex_lock); |
| 3419 | 3660 | ||
| 3420 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); | 3661 | hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); |
| 3421 | if (!hash) { | 3662 | if (!hash) { |
| 3422 | count = -ENOMEM; | 3663 | count = -ENOMEM; |
| 3423 | goto out; | 3664 | goto out; |
| @@ -3476,10 +3717,13 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3476 | } while_for_each_ftrace_rec(); | 3717 | } while_for_each_ftrace_rec(); |
| 3477 | 3718 | ||
| 3478 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3719 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
| 3479 | if (ret < 0) | ||
| 3480 | count = ret; | ||
| 3481 | 3720 | ||
| 3482 | __enable_ftrace_function_probe(); | 3721 | __enable_ftrace_function_probe(old_hash); |
| 3722 | |||
| 3723 | if (!ret) | ||
| 3724 | free_ftrace_hash_rcu(old_hash); | ||
| 3725 | else | ||
| 3726 | count = ret; | ||
| 3483 | 3727 | ||
| 3484 | out_unlock: | 3728 | out_unlock: |
| 3485 | mutex_unlock(&ftrace_lock); | 3729 | mutex_unlock(&ftrace_lock); |
| @@ -3503,6 +3747,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3503 | struct ftrace_func_probe *entry; | 3747 | struct ftrace_func_probe *entry; |
| 3504 | struct ftrace_func_probe *p; | 3748 | struct ftrace_func_probe *p; |
| 3505 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; | 3749 | struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; |
| 3750 | struct ftrace_hash *old_hash = *orig_hash; | ||
| 3506 | struct list_head free_list; | 3751 | struct list_head free_list; |
| 3507 | struct ftrace_hash *hash; | 3752 | struct ftrace_hash *hash; |
| 3508 | struct hlist_node *tmp; | 3753 | struct hlist_node *tmp; |
| @@ -3510,6 +3755,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3510 | int type = MATCH_FULL; | 3755 | int type = MATCH_FULL; |
| 3511 | int i, len = 0; | 3756 | int i, len = 0; |
| 3512 | char *search; | 3757 | char *search; |
| 3758 | int ret; | ||
| 3513 | 3759 | ||
| 3514 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) | 3760 | if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) |
| 3515 | glob = NULL; | 3761 | glob = NULL; |
| @@ -3568,8 +3814,11 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3568 | * Remove after the disable is called. Otherwise, if the last | 3814 | * Remove after the disable is called. Otherwise, if the last |
| 3569 | * probe is removed, a null hash means *all enabled*. | 3815 | * probe is removed, a null hash means *all enabled*. |
| 3570 | */ | 3816 | */ |
| 3571 | ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); | 3817 | ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); |
| 3572 | synchronize_sched(); | 3818 | synchronize_sched(); |
| 3819 | if (!ret) | ||
| 3820 | free_ftrace_hash_rcu(old_hash); | ||
| 3821 | |||
| 3573 | list_for_each_entry_safe(entry, p, &free_list, free_list) { | 3822 | list_for_each_entry_safe(entry, p, &free_list, free_list) { |
| 3574 | list_del(&entry->free_list); | 3823 | list_del(&entry->free_list); |
| 3575 | ftrace_free_entry(entry); | 3824 | ftrace_free_entry(entry); |
| @@ -3756,10 +4005,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) | |||
| 3756 | return add_hash_entry(hash, ip); | 4005 | return add_hash_entry(hash, ip); |
| 3757 | } | 4006 | } |
| 3758 | 4007 | ||
| 3759 | static void ftrace_ops_update_code(struct ftrace_ops *ops) | 4008 | static void ftrace_ops_update_code(struct ftrace_ops *ops, |
| 4009 | struct ftrace_hash *old_hash) | ||
| 3760 | { | 4010 | { |
| 3761 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) | 4011 | if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) |
| 3762 | ftrace_run_update_code(FTRACE_UPDATE_CALLS); | 4012 | ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); |
| 3763 | } | 4013 | } |
| 3764 | 4014 | ||
| 3765 | static int | 4015 | static int |
| @@ -3767,6 +4017,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3767 | unsigned long ip, int remove, int reset, int enable) | 4017 | unsigned long ip, int remove, int reset, int enable) |
| 3768 | { | 4018 | { |
| 3769 | struct ftrace_hash **orig_hash; | 4019 | struct ftrace_hash **orig_hash; |
| 4020 | struct ftrace_hash *old_hash; | ||
| 3770 | struct ftrace_hash *hash; | 4021 | struct ftrace_hash *hash; |
| 3771 | int ret; | 4022 | int ret; |
| 3772 | 4023 | ||
| @@ -3801,10 +4052,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, | |||
| 3801 | } | 4052 | } |
| 3802 | 4053 | ||
| 3803 | mutex_lock(&ftrace_lock); | 4054 | mutex_lock(&ftrace_lock); |
| 4055 | old_hash = *orig_hash; | ||
| 3804 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); | 4056 | ret = ftrace_hash_move(ops, enable, orig_hash, hash); |
| 3805 | if (!ret) | 4057 | if (!ret) { |
| 3806 | ftrace_ops_update_code(ops); | 4058 | ftrace_ops_update_code(ops, old_hash); |
| 3807 | 4059 | free_ftrace_hash_rcu(old_hash); | |
| 4060 | } | ||
| 3808 | mutex_unlock(&ftrace_lock); | 4061 | mutex_unlock(&ftrace_lock); |
| 3809 | 4062 | ||
| 3810 | out_regex_unlock: | 4063 | out_regex_unlock: |
| @@ -3944,6 +4197,9 @@ static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata; | |||
| 3944 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; | 4197 | static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata; |
| 3945 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); | 4198 | static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer); |
| 3946 | 4199 | ||
| 4200 | static unsigned long save_global_trampoline; | ||
| 4201 | static unsigned long save_global_flags; | ||
| 4202 | |||
| 3947 | static int __init set_graph_function(char *str) | 4203 | static int __init set_graph_function(char *str) |
| 3948 | { | 4204 | { |
| 3949 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); | 4205 | strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE); |
| @@ -4013,6 +4269,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 4013 | struct seq_file *m = (struct seq_file *)file->private_data; | 4269 | struct seq_file *m = (struct seq_file *)file->private_data; |
| 4014 | struct ftrace_iterator *iter; | 4270 | struct ftrace_iterator *iter; |
| 4015 | struct ftrace_hash **orig_hash; | 4271 | struct ftrace_hash **orig_hash; |
| 4272 | struct ftrace_hash *old_hash; | ||
| 4016 | struct trace_parser *parser; | 4273 | struct trace_parser *parser; |
| 4017 | int filter_hash; | 4274 | int filter_hash; |
| 4018 | int ret; | 4275 | int ret; |
| @@ -4042,11 +4299,13 @@ int ftrace_regex_release(struct inode *inode, struct file *file) | |||
| 4042 | orig_hash = &iter->ops->func_hash->notrace_hash; | 4299 | orig_hash = &iter->ops->func_hash->notrace_hash; |
| 4043 | 4300 | ||
| 4044 | mutex_lock(&ftrace_lock); | 4301 | mutex_lock(&ftrace_lock); |
| 4302 | old_hash = *orig_hash; | ||
| 4045 | ret = ftrace_hash_move(iter->ops, filter_hash, | 4303 | ret = ftrace_hash_move(iter->ops, filter_hash, |
| 4046 | orig_hash, iter->hash); | 4304 | orig_hash, iter->hash); |
| 4047 | if (!ret) | 4305 | if (!ret) { |
| 4048 | ftrace_ops_update_code(iter->ops); | 4306 | ftrace_ops_update_code(iter->ops, old_hash); |
| 4049 | 4307 | free_ftrace_hash_rcu(old_hash); | |
| 4308 | } | ||
| 4050 | mutex_unlock(&ftrace_lock); | 4309 | mutex_unlock(&ftrace_lock); |
| 4051 | } | 4310 | } |
| 4052 | 4311 | ||
| @@ -4149,9 +4408,9 @@ static int g_show(struct seq_file *m, void *v) | |||
| 4149 | struct ftrace_graph_data *fgd = m->private; | 4408 | struct ftrace_graph_data *fgd = m->private; |
| 4150 | 4409 | ||
| 4151 | if (fgd->table == ftrace_graph_funcs) | 4410 | if (fgd->table == ftrace_graph_funcs) |
| 4152 | seq_printf(m, "#### all functions enabled ####\n"); | 4411 | seq_puts(m, "#### all functions enabled ####\n"); |
| 4153 | else | 4412 | else |
| 4154 | seq_printf(m, "#### no functions disabled ####\n"); | 4413 | seq_puts(m, "#### no functions disabled ####\n"); |
| 4155 | return 0; | 4414 | return 0; |
| 4156 | } | 4415 | } |
| 4157 | 4416 | ||
| @@ -4662,6 +4921,32 @@ void __init ftrace_init(void) | |||
| 4662 | ftrace_disabled = 1; | 4921 | ftrace_disabled = 1; |
| 4663 | } | 4922 | } |
| 4664 | 4923 | ||
| 4924 | /* Do nothing if arch does not support this */ | ||
| 4925 | void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops) | ||
| 4926 | { | ||
| 4927 | } | ||
| 4928 | |||
| 4929 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | ||
| 4930 | { | ||
| 4931 | |||
| 4932 | /* | ||
| 4933 | * Currently there's no safe way to free a trampoline when the kernel | ||
| 4934 | * is configured with PREEMPT. That is because a task could be preempted | ||
| 4935 | * when it jumped to the trampoline, it may be preempted for a long time | ||
| 4936 | * depending on the system load, and currently there's no way to know | ||
| 4937 | * when it will be off the trampoline. If the trampoline is freed | ||
| 4938 | * too early, when the task runs again, it will be executing on freed | ||
| 4939 | * memory and crash. | ||
| 4940 | */ | ||
| 4941 | #ifdef CONFIG_PREEMPT | ||
| 4942 | /* Currently, only non dynamic ops can have a trampoline */ | ||
| 4943 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC) | ||
| 4944 | return; | ||
| 4945 | #endif | ||
| 4946 | |||
| 4947 | arch_ftrace_update_trampoline(ops); | ||
| 4948 | } | ||
| 4949 | |||
| 4665 | #else | 4950 | #else |
| 4666 | 4951 | ||
| 4667 | static struct ftrace_ops global_ops = { | 4952 | static struct ftrace_ops global_ops = { |
| @@ -4678,6 +4963,7 @@ core_initcall(ftrace_nodyn_init); | |||
| 4678 | 4963 | ||
| 4679 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } | 4964 | static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } |
| 4680 | static inline void ftrace_startup_enable(int command) { } | 4965 | static inline void ftrace_startup_enable(int command) { } |
| 4966 | static inline void ftrace_startup_all(int command) { } | ||
| 4681 | /* Keep as macros so we do not need to define the commands */ | 4967 | /* Keep as macros so we do not need to define the commands */ |
| 4682 | # define ftrace_startup(ops, command) \ | 4968 | # define ftrace_startup(ops, command) \ |
| 4683 | ({ \ | 4969 | ({ \ |
| @@ -4703,6 +4989,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) | |||
| 4703 | return 1; | 4989 | return 1; |
| 4704 | } | 4990 | } |
| 4705 | 4991 | ||
| 4992 | static void ftrace_update_trampoline(struct ftrace_ops *ops) | ||
| 4993 | { | ||
| 4994 | } | ||
| 4995 | |||
| 4706 | #endif /* CONFIG_DYNAMIC_FTRACE */ | 4996 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
| 4707 | 4997 | ||
| 4708 | __init void ftrace_init_global_array_ops(struct trace_array *tr) | 4998 | __init void ftrace_init_global_array_ops(struct trace_array *tr) |
| @@ -4827,6 +5117,56 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) | |||
| 4827 | } | 5117 | } |
| 4828 | #endif | 5118 | #endif |
| 4829 | 5119 | ||
| 5120 | /* | ||
| 5121 | * If there's only one function registered but it does not support | ||
| 5122 | * recursion, this function will be called by the mcount trampoline. | ||
| 5123 | * This function will handle recursion protection. | ||
| 5124 | */ | ||
| 5125 | static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip, | ||
| 5126 | struct ftrace_ops *op, struct pt_regs *regs) | ||
| 5127 | { | ||
| 5128 | int bit; | ||
| 5129 | |||
| 5130 | bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); | ||
| 5131 | if (bit < 0) | ||
| 5132 | return; | ||
| 5133 | |||
| 5134 | op->func(ip, parent_ip, op, regs); | ||
| 5135 | |||
| 5136 | trace_clear_recursion(bit); | ||
| 5137 | } | ||
| 5138 | |||
| 5139 | /** | ||
| 5140 | * ftrace_ops_get_func - get the function a trampoline should call | ||
| 5141 | * @ops: the ops to get the function for | ||
| 5142 | * | ||
| 5143 | * Normally the mcount trampoline will call the ops->func, but there | ||
| 5144 | * are times that it should not. For example, if the ops does not | ||
| 5145 | * have its own recursion protection, then it should call the | ||
| 5146 | * ftrace_ops_recurs_func() instead. | ||
| 5147 | * | ||
| 5148 | * Returns the function that the trampoline should call for @ops. | ||
| 5149 | */ | ||
| 5150 | ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops) | ||
| 5151 | { | ||
| 5152 | /* | ||
| 5153 | * If this is a dynamic ops or we force list func, | ||
| 5154 | * then it needs to call the list anyway. | ||
| 5155 | */ | ||
| 5156 | if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC) | ||
| 5157 | return ftrace_ops_list_func; | ||
| 5158 | |||
| 5159 | /* | ||
| 5160 | * If the func handles its own recursion, call it directly. | ||
| 5161 | * Otherwise call the recursion protected function that | ||
| 5162 | * will call the ftrace ops function. | ||
| 5163 | */ | ||
| 5164 | if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE)) | ||
| 5165 | return ftrace_ops_recurs_func; | ||
| 5166 | |||
| 5167 | return ops->func; | ||
| 5168 | } | ||
| 5169 | |||
| 4830 | static void clear_ftrace_swapper(void) | 5170 | static void clear_ftrace_swapper(void) |
| 4831 | { | 5171 | { |
| 4832 | struct task_struct *p; | 5172 | struct task_struct *p; |
| @@ -4927,7 +5267,8 @@ static int ftrace_pid_add(int p) | |||
| 4927 | set_ftrace_pid_task(pid); | 5267 | set_ftrace_pid_task(pid); |
| 4928 | 5268 | ||
| 4929 | ftrace_update_pid_func(); | 5269 | ftrace_update_pid_func(); |
| 4930 | ftrace_startup_enable(0); | 5270 | |
| 5271 | ftrace_startup_all(0); | ||
| 4931 | 5272 | ||
| 4932 | mutex_unlock(&ftrace_lock); | 5273 | mutex_unlock(&ftrace_lock); |
| 4933 | return 0; | 5274 | return 0; |
| @@ -4956,7 +5297,7 @@ static void ftrace_pid_reset(void) | |||
| 4956 | } | 5297 | } |
| 4957 | 5298 | ||
| 4958 | ftrace_update_pid_func(); | 5299 | ftrace_update_pid_func(); |
| 4959 | ftrace_startup_enable(0); | 5300 | ftrace_startup_all(0); |
| 4960 | 5301 | ||
| 4961 | mutex_unlock(&ftrace_lock); | 5302 | mutex_unlock(&ftrace_lock); |
| 4962 | } | 5303 | } |
| @@ -4989,12 +5330,12 @@ static int fpid_show(struct seq_file *m, void *v) | |||
| 4989 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); | 5330 | const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list); |
| 4990 | 5331 | ||
| 4991 | if (v == (void *)1) { | 5332 | if (v == (void *)1) { |
| 4992 | seq_printf(m, "no pid\n"); | 5333 | seq_puts(m, "no pid\n"); |
| 4993 | return 0; | 5334 | return 0; |
| 4994 | } | 5335 | } |
| 4995 | 5336 | ||
| 4996 | if (fpid->pid == ftrace_swapper_pid) | 5337 | if (fpid->pid == ftrace_swapper_pid) |
| 4997 | seq_printf(m, "swapper tasks\n"); | 5338 | seq_puts(m, "swapper tasks\n"); |
| 4998 | else | 5339 | else |
| 4999 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); | 5340 | seq_printf(m, "%u\n", pid_vnr(fpid->pid)); |
| 5000 | 5341 | ||
| @@ -5207,6 +5548,7 @@ static struct ftrace_ops graph_ops = { | |||
| 5207 | FTRACE_OPS_FL_STUB, | 5548 | FTRACE_OPS_FL_STUB, |
| 5208 | #ifdef FTRACE_GRAPH_TRAMP_ADDR | 5549 | #ifdef FTRACE_GRAPH_TRAMP_ADDR |
| 5209 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, | 5550 | .trampoline = FTRACE_GRAPH_TRAMP_ADDR, |
| 5551 | /* trampoline_size is only needed for dynamically allocated tramps */ | ||
| 5210 | #endif | 5552 | #endif |
| 5211 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) | 5553 | ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash) |
| 5212 | }; | 5554 | }; |
| @@ -5436,7 +5778,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
| 5436 | update_function_graph_func(); | 5778 | update_function_graph_func(); |
| 5437 | 5779 | ||
| 5438 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); | 5780 | ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET); |
| 5439 | |||
| 5440 | out: | 5781 | out: |
| 5441 | mutex_unlock(&ftrace_lock); | 5782 | mutex_unlock(&ftrace_lock); |
| 5442 | return ret; | 5783 | return ret; |
| @@ -5457,6 +5798,17 @@ void unregister_ftrace_graph(void) | |||
| 5457 | unregister_pm_notifier(&ftrace_suspend_notifier); | 5798 | unregister_pm_notifier(&ftrace_suspend_notifier); |
| 5458 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); | 5799 | unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL); |
| 5459 | 5800 | ||
| 5801 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
| 5802 | /* | ||
| 5803 | * Function graph does not allocate the trampoline, but | ||
| 5804 | * other global_ops do. We need to reset the ALLOC_TRAMP flag | ||
| 5805 | * if one was used. | ||
| 5806 | */ | ||
| 5807 | global_ops.trampoline = save_global_trampoline; | ||
| 5808 | if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP) | ||
| 5809 | global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP; | ||
| 5810 | #endif | ||
| 5811 | |||
| 5460 | out: | 5812 | out: |
| 5461 | mutex_unlock(&ftrace_lock); | 5813 | mutex_unlock(&ftrace_lock); |
| 5462 | } | 5814 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 2d75c94ae87d..7a4104cb95cb 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work); | |||
| 34 | */ | 34 | */ |
| 35 | int ring_buffer_print_entry_header(struct trace_seq *s) | 35 | int ring_buffer_print_entry_header(struct trace_seq *s) |
| 36 | { | 36 | { |
| 37 | int ret; | 37 | trace_seq_puts(s, "# compressed entry header\n"); |
| 38 | 38 | trace_seq_puts(s, "\ttype_len : 5 bits\n"); | |
| 39 | ret = trace_seq_puts(s, "# compressed entry header\n"); | 39 | trace_seq_puts(s, "\ttime_delta : 27 bits\n"); |
| 40 | ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); | 40 | trace_seq_puts(s, "\tarray : 32 bits\n"); |
| 41 | ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); | 41 | trace_seq_putc(s, '\n'); |
| 42 | ret = trace_seq_puts(s, "\tarray : 32 bits\n"); | 42 | trace_seq_printf(s, "\tpadding : type == %d\n", |
| 43 | ret = trace_seq_putc(s, '\n'); | 43 | RINGBUF_TYPE_PADDING); |
| 44 | ret = trace_seq_printf(s, "\tpadding : type == %d\n", | 44 | trace_seq_printf(s, "\ttime_extend : type == %d\n", |
| 45 | RINGBUF_TYPE_PADDING); | 45 | RINGBUF_TYPE_TIME_EXTEND); |
| 46 | ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", | 46 | trace_seq_printf(s, "\tdata max type_len == %d\n", |
| 47 | RINGBUF_TYPE_TIME_EXTEND); | 47 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); |
| 48 | ret = trace_seq_printf(s, "\tdata max type_len == %d\n", | ||
| 49 | RINGBUF_TYPE_DATA_TYPE_LEN_MAX); | ||
| 50 | 48 | ||
| 51 | return ret; | 49 | return !trace_seq_has_overflowed(s); |
| 52 | } | 50 | } |
| 53 | 51 | ||
| 54 | /* | 52 | /* |
| @@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta) | |||
| 419 | int ring_buffer_print_page_header(struct trace_seq *s) | 417 | int ring_buffer_print_page_header(struct trace_seq *s) |
| 420 | { | 418 | { |
| 421 | struct buffer_data_page field; | 419 | struct buffer_data_page field; |
| 422 | int ret; | ||
| 423 | 420 | ||
| 424 | ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" | 421 | trace_seq_printf(s, "\tfield: u64 timestamp;\t" |
| 425 | "offset:0;\tsize:%u;\tsigned:%u;\n", | 422 | "offset:0;\tsize:%u;\tsigned:%u;\n", |
| 426 | (unsigned int)sizeof(field.time_stamp), | 423 | (unsigned int)sizeof(field.time_stamp), |
| 427 | (unsigned int)is_signed_type(u64)); | 424 | (unsigned int)is_signed_type(u64)); |
| 428 | |||
| 429 | ret = trace_seq_printf(s, "\tfield: local_t commit;\t" | ||
| 430 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 431 | (unsigned int)offsetof(typeof(field), commit), | ||
| 432 | (unsigned int)sizeof(field.commit), | ||
| 433 | (unsigned int)is_signed_type(long)); | ||
| 434 | |||
| 435 | ret = trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 436 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 437 | (unsigned int)offsetof(typeof(field), commit), | ||
| 438 | 1, | ||
| 439 | (unsigned int)is_signed_type(long)); | ||
| 440 | |||
| 441 | ret = trace_seq_printf(s, "\tfield: char data;\t" | ||
| 442 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 443 | (unsigned int)offsetof(typeof(field), data), | ||
| 444 | (unsigned int)BUF_PAGE_SIZE, | ||
| 445 | (unsigned int)is_signed_type(char)); | ||
| 446 | 425 | ||
| 447 | return ret; | 426 | trace_seq_printf(s, "\tfield: local_t commit;\t" |
| 427 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 428 | (unsigned int)offsetof(typeof(field), commit), | ||
| 429 | (unsigned int)sizeof(field.commit), | ||
| 430 | (unsigned int)is_signed_type(long)); | ||
| 431 | |||
| 432 | trace_seq_printf(s, "\tfield: int overwrite;\t" | ||
| 433 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 434 | (unsigned int)offsetof(typeof(field), commit), | ||
| 435 | 1, | ||
| 436 | (unsigned int)is_signed_type(long)); | ||
| 437 | |||
| 438 | trace_seq_printf(s, "\tfield: char data;\t" | ||
| 439 | "offset:%u;\tsize:%u;\tsigned:%u;\n", | ||
| 440 | (unsigned int)offsetof(typeof(field), data), | ||
| 441 | (unsigned int)BUF_PAGE_SIZE, | ||
| 442 | (unsigned int)is_signed_type(char)); | ||
| 443 | |||
| 444 | return !trace_seq_has_overflowed(s); | ||
| 448 | } | 445 | } |
| 449 | 446 | ||
| 450 | struct rb_irq_work { | 447 | struct rb_irq_work { |
| @@ -538,16 +535,18 @@ static void rb_wake_up_waiters(struct irq_work *work) | |||
| 538 | * ring_buffer_wait - wait for input to the ring buffer | 535 | * ring_buffer_wait - wait for input to the ring buffer |
| 539 | * @buffer: buffer to wait on | 536 | * @buffer: buffer to wait on |
| 540 | * @cpu: the cpu buffer to wait on | 537 | * @cpu: the cpu buffer to wait on |
| 538 | * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS | ||
| 541 | * | 539 | * |
| 542 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon | 540 | * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon |
| 543 | * as data is added to any of the @buffer's cpu buffers. Otherwise | 541 | * as data is added to any of the @buffer's cpu buffers. Otherwise |
| 544 | * it will wait for data to be added to a specific cpu buffer. | 542 | * it will wait for data to be added to a specific cpu buffer. |
| 545 | */ | 543 | */ |
| 546 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | 544 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full) |
| 547 | { | 545 | { |
| 548 | struct ring_buffer_per_cpu *cpu_buffer; | 546 | struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); |
| 549 | DEFINE_WAIT(wait); | 547 | DEFINE_WAIT(wait); |
| 550 | struct rb_irq_work *work; | 548 | struct rb_irq_work *work; |
| 549 | int ret = 0; | ||
| 551 | 550 | ||
| 552 | /* | 551 | /* |
| 553 | * Depending on what the caller is waiting for, either any | 552 | * Depending on what the caller is waiting for, either any |
| @@ -564,36 +563,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) | |||
| 564 | } | 563 | } |
| 565 | 564 | ||
| 566 | 565 | ||
| 567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | 566 | while (true) { |
| 567 | prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); | ||
| 568 | 568 | ||
| 569 | /* | 569 | /* |
| 570 | * The events can happen in critical sections where | 570 | * The events can happen in critical sections where |
| 571 | * checking a work queue can cause deadlocks. | 571 | * checking a work queue can cause deadlocks. |
| 572 | * After adding a task to the queue, this flag is set | 572 | * After adding a task to the queue, this flag is set |
| 573 | * only to notify events to try to wake up the queue | 573 | * only to notify events to try to wake up the queue |
| 574 | * using irq_work. | 574 | * using irq_work. |
| 575 | * | 575 | * |
| 576 | * We don't clear it even if the buffer is no longer | 576 | * We don't clear it even if the buffer is no longer |
| 577 | * empty. The flag only causes the next event to run | 577 | * empty. The flag only causes the next event to run |
| 578 | * irq_work to do the work queue wake up. The worse | 578 | * irq_work to do the work queue wake up. The worse |
| 579 | * that can happen if we race with !trace_empty() is that | 579 | * that can happen if we race with !trace_empty() is that |
| 580 | * an event will cause an irq_work to try to wake up | 580 | * an event will cause an irq_work to try to wake up |
| 581 | * an empty queue. | 581 | * an empty queue. |
| 582 | * | 582 | * |
| 583 | * There's no reason to protect this flag either, as | 583 | * There's no reason to protect this flag either, as |
| 584 | * the work queue and irq_work logic will do the necessary | 584 | * the work queue and irq_work logic will do the necessary |
| 585 | * synchronization for the wake ups. The only thing | 585 | * synchronization for the wake ups. The only thing |
| 586 | * that is necessary is that the wake up happens after | 586 | * that is necessary is that the wake up happens after |
| 587 | * a task has been queued. It's OK for spurious wake ups. | 587 | * a task has been queued. It's OK for spurious wake ups. |
| 588 | */ | 588 | */ |
| 589 | work->waiters_pending = true; | 589 | work->waiters_pending = true; |
| 590 | |||
| 591 | if (signal_pending(current)) { | ||
| 592 | ret = -EINTR; | ||
| 593 | break; | ||
| 594 | } | ||
| 595 | |||
| 596 | if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) | ||
| 597 | break; | ||
| 598 | |||
| 599 | if (cpu != RING_BUFFER_ALL_CPUS && | ||
| 600 | !ring_buffer_empty_cpu(buffer, cpu)) { | ||
| 601 | unsigned long flags; | ||
| 602 | bool pagebusy; | ||
| 603 | |||
| 604 | if (!full) | ||
| 605 | break; | ||
| 606 | |||
| 607 | raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | ||
| 608 | pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; | ||
| 609 | raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | ||
| 610 | |||
| 611 | if (!pagebusy) | ||
| 612 | break; | ||
| 613 | } | ||
| 590 | 614 | ||
| 591 | if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || | ||
| 592 | (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) | ||
| 593 | schedule(); | 615 | schedule(); |
| 616 | } | ||
| 594 | 617 | ||
| 595 | finish_wait(&work->waiters, &wait); | 618 | finish_wait(&work->waiters, &wait); |
| 596 | return 0; | 619 | |
| 620 | return ret; | ||
| 597 | } | 621 | } |
| 598 | 622 | ||
| 599 | /** | 623 | /** |
diff --git a/kernel/trace/ring_buffer_benchmark.c b/kernel/trace/ring_buffer_benchmark.c index 0434ff1b808e..3f9e328c30b5 100644 --- a/kernel/trace/ring_buffer_benchmark.c +++ b/kernel/trace/ring_buffer_benchmark.c | |||
| @@ -205,7 +205,6 @@ static void ring_buffer_consumer(void) | |||
| 205 | break; | 205 | break; |
| 206 | 206 | ||
| 207 | schedule(); | 207 | schedule(); |
| 208 | __set_current_state(TASK_RUNNING); | ||
| 209 | } | 208 | } |
| 210 | reader_finish = 0; | 209 | reader_finish = 0; |
| 211 | complete(&read_done); | 210 | complete(&read_done); |
| @@ -379,7 +378,6 @@ static int ring_buffer_consumer_thread(void *arg) | |||
| 379 | break; | 378 | break; |
| 380 | 379 | ||
| 381 | schedule(); | 380 | schedule(); |
| 382 | __set_current_state(TASK_RUNNING); | ||
| 383 | } | 381 | } |
| 384 | __set_current_state(TASK_RUNNING); | 382 | __set_current_state(TASK_RUNNING); |
| 385 | 383 | ||
| @@ -407,7 +405,6 @@ static int ring_buffer_producer_thread(void *arg) | |||
| 407 | trace_printk("Sleeping for 10 secs\n"); | 405 | trace_printk("Sleeping for 10 secs\n"); |
| 408 | set_current_state(TASK_INTERRUPTIBLE); | 406 | set_current_state(TASK_INTERRUPTIBLE); |
| 409 | schedule_timeout(HZ * SLEEP_TIME); | 407 | schedule_timeout(HZ * SLEEP_TIME); |
| 410 | __set_current_state(TASK_RUNNING); | ||
| 411 | } | 408 | } |
| 412 | 409 | ||
| 413 | if (kill_test) | 410 | if (kill_test) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8a528392b1f4..2e767972e99c 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -63,6 +63,10 @@ static bool __read_mostly tracing_selftest_running; | |||
| 63 | */ | 63 | */ |
| 64 | bool __read_mostly tracing_selftest_disabled; | 64 | bool __read_mostly tracing_selftest_disabled; |
| 65 | 65 | ||
| 66 | /* Pipe tracepoints to printk */ | ||
| 67 | struct trace_iterator *tracepoint_print_iter; | ||
| 68 | int tracepoint_printk; | ||
| 69 | |||
| 66 | /* For tracers that don't implement custom flags */ | 70 | /* For tracers that don't implement custom flags */ |
| 67 | static struct tracer_opt dummy_tracer_opt[] = { | 71 | static struct tracer_opt dummy_tracer_opt[] = { |
| 68 | { } | 72 | { } |
| @@ -155,10 +159,11 @@ __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); | |||
| 155 | 159 | ||
| 156 | static int __init stop_trace_on_warning(char *str) | 160 | static int __init stop_trace_on_warning(char *str) |
| 157 | { | 161 | { |
| 158 | __disable_trace_on_warning = 1; | 162 | if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) |
| 163 | __disable_trace_on_warning = 1; | ||
| 159 | return 1; | 164 | return 1; |
| 160 | } | 165 | } |
| 161 | __setup("traceoff_on_warning=", stop_trace_on_warning); | 166 | __setup("traceoff_on_warning", stop_trace_on_warning); |
| 162 | 167 | ||
| 163 | static int __init boot_alloc_snapshot(char *str) | 168 | static int __init boot_alloc_snapshot(char *str) |
| 164 | { | 169 | { |
| @@ -192,6 +197,13 @@ static int __init set_trace_boot_clock(char *str) | |||
| 192 | } | 197 | } |
| 193 | __setup("trace_clock=", set_trace_boot_clock); | 198 | __setup("trace_clock=", set_trace_boot_clock); |
| 194 | 199 | ||
| 200 | static int __init set_tracepoint_printk(char *str) | ||
| 201 | { | ||
| 202 | if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0)) | ||
| 203 | tracepoint_printk = 1; | ||
| 204 | return 1; | ||
| 205 | } | ||
| 206 | __setup("tp_printk", set_tracepoint_printk); | ||
| 195 | 207 | ||
| 196 | unsigned long long ns2usecs(cycle_t nsec) | 208 | unsigned long long ns2usecs(cycle_t nsec) |
| 197 | { | 209 | { |
| @@ -938,19 +950,20 @@ out: | |||
| 938 | return ret; | 950 | return ret; |
| 939 | } | 951 | } |
| 940 | 952 | ||
| 953 | /* TODO add a seq_buf_to_buffer() */ | ||
| 941 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) | 954 | static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
| 942 | { | 955 | { |
| 943 | int len; | 956 | int len; |
| 944 | 957 | ||
| 945 | if (s->len <= s->readpos) | 958 | if (trace_seq_used(s) <= s->seq.readpos) |
| 946 | return -EBUSY; | 959 | return -EBUSY; |
| 947 | 960 | ||
| 948 | len = s->len - s->readpos; | 961 | len = trace_seq_used(s) - s->seq.readpos; |
| 949 | if (cnt > len) | 962 | if (cnt > len) |
| 950 | cnt = len; | 963 | cnt = len; |
| 951 | memcpy(buf, s->buffer + s->readpos, cnt); | 964 | memcpy(buf, s->buffer + s->seq.readpos, cnt); |
| 952 | 965 | ||
| 953 | s->readpos += cnt; | 966 | s->seq.readpos += cnt; |
| 954 | return cnt; | 967 | return cnt; |
| 955 | } | 968 | } |
| 956 | 969 | ||
| @@ -1076,13 +1089,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 1076 | } | 1089 | } |
| 1077 | #endif /* CONFIG_TRACER_MAX_TRACE */ | 1090 | #endif /* CONFIG_TRACER_MAX_TRACE */ |
| 1078 | 1091 | ||
| 1079 | static int wait_on_pipe(struct trace_iterator *iter) | 1092 | static int wait_on_pipe(struct trace_iterator *iter, bool full) |
| 1080 | { | 1093 | { |
| 1081 | /* Iterators are static, they should be filled or empty */ | 1094 | /* Iterators are static, they should be filled or empty */ |
| 1082 | if (trace_buffer_iter(iter, iter->cpu_file)) | 1095 | if (trace_buffer_iter(iter, iter->cpu_file)) |
| 1083 | return 0; | 1096 | return 0; |
| 1084 | 1097 | ||
| 1085 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); | 1098 | return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file, |
| 1099 | full); | ||
| 1086 | } | 1100 | } |
| 1087 | 1101 | ||
| 1088 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 1102 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| @@ -2028,7 +2042,7 @@ void trace_printk_init_buffers(void) | |||
| 2028 | pr_warning("** trace_printk() being used. Allocating extra memory. **\n"); | 2042 | pr_warning("** trace_printk() being used. Allocating extra memory. **\n"); |
| 2029 | pr_warning("** **\n"); | 2043 | pr_warning("** **\n"); |
| 2030 | pr_warning("** This means that this is a DEBUG kernel and it is **\n"); | 2044 | pr_warning("** This means that this is a DEBUG kernel and it is **\n"); |
| 2031 | pr_warning("** unsafe for produciton use. **\n"); | 2045 | pr_warning("** unsafe for production use. **\n"); |
| 2032 | pr_warning("** **\n"); | 2046 | pr_warning("** **\n"); |
| 2033 | pr_warning("** If you see this message and you are not debugging **\n"); | 2047 | pr_warning("** If you see this message and you are not debugging **\n"); |
| 2034 | pr_warning("** the kernel, report this immediately to your vendor! **\n"); | 2048 | pr_warning("** the kernel, report this immediately to your vendor! **\n"); |
| @@ -2157,9 +2171,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
| 2157 | goto out; | 2171 | goto out; |
| 2158 | } | 2172 | } |
| 2159 | 2173 | ||
| 2160 | len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); | 2174 | len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args); |
| 2161 | if (len > TRACE_BUF_SIZE) | ||
| 2162 | goto out; | ||
| 2163 | 2175 | ||
| 2164 | local_save_flags(flags); | 2176 | local_save_flags(flags); |
| 2165 | size = sizeof(*entry) + len + 1; | 2177 | size = sizeof(*entry) + len + 1; |
| @@ -2170,8 +2182,7 @@ __trace_array_vprintk(struct ring_buffer *buffer, | |||
| 2170 | entry = ring_buffer_event_data(event); | 2182 | entry = ring_buffer_event_data(event); |
| 2171 | entry->ip = ip; | 2183 | entry->ip = ip; |
| 2172 | 2184 | ||
| 2173 | memcpy(&entry->buf, tbuffer, len); | 2185 | memcpy(&entry->buf, tbuffer, len + 1); |
| 2174 | entry->buf[len] = '\0'; | ||
| 2175 | if (!call_filter_check_discard(call, entry, buffer, event)) { | 2186 | if (!call_filter_check_discard(call, entry, buffer, event)) { |
| 2176 | __buffer_unlock_commit(buffer, event); | 2187 | __buffer_unlock_commit(buffer, event); |
| 2177 | ftrace_trace_stack(buffer, flags, 6, pc); | 2188 | ftrace_trace_stack(buffer, flags, 6, pc); |
| @@ -2508,14 +2519,14 @@ get_total_entries(struct trace_buffer *buf, | |||
| 2508 | 2519 | ||
| 2509 | static void print_lat_help_header(struct seq_file *m) | 2520 | static void print_lat_help_header(struct seq_file *m) |
| 2510 | { | 2521 | { |
| 2511 | seq_puts(m, "# _------=> CPU# \n"); | 2522 | seq_puts(m, "# _------=> CPU# \n" |
| 2512 | seq_puts(m, "# / _-----=> irqs-off \n"); | 2523 | "# / _-----=> irqs-off \n" |
| 2513 | seq_puts(m, "# | / _----=> need-resched \n"); | 2524 | "# | / _----=> need-resched \n" |
| 2514 | seq_puts(m, "# || / _---=> hardirq/softirq \n"); | 2525 | "# || / _---=> hardirq/softirq \n" |
| 2515 | seq_puts(m, "# ||| / _--=> preempt-depth \n"); | 2526 | "# ||| / _--=> preempt-depth \n" |
| 2516 | seq_puts(m, "# |||| / delay \n"); | 2527 | "# |||| / delay \n" |
| 2517 | seq_puts(m, "# cmd pid ||||| time | caller \n"); | 2528 | "# cmd pid ||||| time | caller \n" |
| 2518 | seq_puts(m, "# \\ / ||||| \\ | / \n"); | 2529 | "# \\ / ||||| \\ | / \n"); |
| 2519 | } | 2530 | } |
| 2520 | 2531 | ||
| 2521 | static void print_event_info(struct trace_buffer *buf, struct seq_file *m) | 2532 | static void print_event_info(struct trace_buffer *buf, struct seq_file *m) |
| @@ -2532,20 +2543,20 @@ static void print_event_info(struct trace_buffer *buf, struct seq_file *m) | |||
| 2532 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) | 2543 | static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m) |
| 2533 | { | 2544 | { |
| 2534 | print_event_info(buf, m); | 2545 | print_event_info(buf, m); |
| 2535 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); | 2546 | seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n" |
| 2536 | seq_puts(m, "# | | | | |\n"); | 2547 | "# | | | | |\n"); |
| 2537 | } | 2548 | } |
| 2538 | 2549 | ||
| 2539 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) | 2550 | static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m) |
| 2540 | { | 2551 | { |
| 2541 | print_event_info(buf, m); | 2552 | print_event_info(buf, m); |
| 2542 | seq_puts(m, "# _-----=> irqs-off\n"); | 2553 | seq_puts(m, "# _-----=> irqs-off\n" |
| 2543 | seq_puts(m, "# / _----=> need-resched\n"); | 2554 | "# / _----=> need-resched\n" |
| 2544 | seq_puts(m, "# | / _---=> hardirq/softirq\n"); | 2555 | "# | / _---=> hardirq/softirq\n" |
| 2545 | seq_puts(m, "# || / _--=> preempt-depth\n"); | 2556 | "# || / _--=> preempt-depth\n" |
| 2546 | seq_puts(m, "# ||| / delay\n"); | 2557 | "# ||| / delay\n" |
| 2547 | seq_puts(m, "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n"); | 2558 | "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" |
| 2548 | seq_puts(m, "# | | | |||| | |\n"); | 2559 | "# | | | |||| | |\n"); |
| 2549 | } | 2560 | } |
| 2550 | 2561 | ||
| 2551 | void | 2562 | void |
| @@ -2648,24 +2659,21 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | |||
| 2648 | event = ftrace_find_event(entry->type); | 2659 | event = ftrace_find_event(entry->type); |
| 2649 | 2660 | ||
| 2650 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2661 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2651 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { | 2662 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) |
| 2652 | if (!trace_print_lat_context(iter)) | 2663 | trace_print_lat_context(iter); |
| 2653 | goto partial; | 2664 | else |
| 2654 | } else { | 2665 | trace_print_context(iter); |
| 2655 | if (!trace_print_context(iter)) | ||
| 2656 | goto partial; | ||
| 2657 | } | ||
| 2658 | } | 2666 | } |
| 2659 | 2667 | ||
| 2668 | if (trace_seq_has_overflowed(s)) | ||
| 2669 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2670 | |||
| 2660 | if (event) | 2671 | if (event) |
| 2661 | return event->funcs->trace(iter, sym_flags, event); | 2672 | return event->funcs->trace(iter, sym_flags, event); |
| 2662 | 2673 | ||
| 2663 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) | 2674 | trace_seq_printf(s, "Unknown type %d\n", entry->type); |
| 2664 | goto partial; | ||
| 2665 | 2675 | ||
| 2666 | return TRACE_TYPE_HANDLED; | 2676 | return trace_handle_return(s); |
| 2667 | partial: | ||
| 2668 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2669 | } | 2677 | } |
| 2670 | 2678 | ||
| 2671 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 2679 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
| @@ -2676,22 +2684,20 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | |||
| 2676 | 2684 | ||
| 2677 | entry = iter->ent; | 2685 | entry = iter->ent; |
| 2678 | 2686 | ||
| 2679 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2687 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) |
| 2680 | if (!trace_seq_printf(s, "%d %d %llu ", | 2688 | trace_seq_printf(s, "%d %d %llu ", |
| 2681 | entry->pid, iter->cpu, iter->ts)) | 2689 | entry->pid, iter->cpu, iter->ts); |
| 2682 | goto partial; | 2690 | |
| 2683 | } | 2691 | if (trace_seq_has_overflowed(s)) |
| 2692 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2684 | 2693 | ||
| 2685 | event = ftrace_find_event(entry->type); | 2694 | event = ftrace_find_event(entry->type); |
| 2686 | if (event) | 2695 | if (event) |
| 2687 | return event->funcs->raw(iter, 0, event); | 2696 | return event->funcs->raw(iter, 0, event); |
| 2688 | 2697 | ||
| 2689 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) | 2698 | trace_seq_printf(s, "%d ?\n", entry->type); |
| 2690 | goto partial; | ||
| 2691 | 2699 | ||
| 2692 | return TRACE_TYPE_HANDLED; | 2700 | return trace_handle_return(s); |
| 2693 | partial: | ||
| 2694 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2695 | } | 2701 | } |
| 2696 | 2702 | ||
| 2697 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 2703 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
| @@ -2704,9 +2710,11 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
| 2704 | entry = iter->ent; | 2710 | entry = iter->ent; |
| 2705 | 2711 | ||
| 2706 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2712 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2707 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 2713 | SEQ_PUT_HEX_FIELD(s, entry->pid); |
| 2708 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | 2714 | SEQ_PUT_HEX_FIELD(s, iter->cpu); |
| 2709 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | 2715 | SEQ_PUT_HEX_FIELD(s, iter->ts); |
| 2716 | if (trace_seq_has_overflowed(s)) | ||
| 2717 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2710 | } | 2718 | } |
| 2711 | 2719 | ||
| 2712 | event = ftrace_find_event(entry->type); | 2720 | event = ftrace_find_event(entry->type); |
| @@ -2716,9 +2724,9 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | |||
| 2716 | return ret; | 2724 | return ret; |
| 2717 | } | 2725 | } |
| 2718 | 2726 | ||
| 2719 | SEQ_PUT_FIELD_RET(s, newline); | 2727 | SEQ_PUT_FIELD(s, newline); |
| 2720 | 2728 | ||
| 2721 | return TRACE_TYPE_HANDLED; | 2729 | return trace_handle_return(s); |
| 2722 | } | 2730 | } |
| 2723 | 2731 | ||
| 2724 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | 2732 | static enum print_line_t print_bin_fmt(struct trace_iterator *iter) |
| @@ -2730,9 +2738,11 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
| 2730 | entry = iter->ent; | 2738 | entry = iter->ent; |
| 2731 | 2739 | ||
| 2732 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 2740 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 2733 | SEQ_PUT_FIELD_RET(s, entry->pid); | 2741 | SEQ_PUT_FIELD(s, entry->pid); |
| 2734 | SEQ_PUT_FIELD_RET(s, iter->cpu); | 2742 | SEQ_PUT_FIELD(s, iter->cpu); |
| 2735 | SEQ_PUT_FIELD_RET(s, iter->ts); | 2743 | SEQ_PUT_FIELD(s, iter->ts); |
| 2744 | if (trace_seq_has_overflowed(s)) | ||
| 2745 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2736 | } | 2746 | } |
| 2737 | 2747 | ||
| 2738 | event = ftrace_find_event(entry->type); | 2748 | event = ftrace_find_event(entry->type); |
| @@ -2778,10 +2788,12 @@ enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
| 2778 | { | 2788 | { |
| 2779 | enum print_line_t ret; | 2789 | enum print_line_t ret; |
| 2780 | 2790 | ||
| 2781 | if (iter->lost_events && | 2791 | if (iter->lost_events) { |
| 2782 | !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", | 2792 | trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n", |
| 2783 | iter->cpu, iter->lost_events)) | 2793 | iter->cpu, iter->lost_events); |
| 2784 | return TRACE_TYPE_PARTIAL_LINE; | 2794 | if (trace_seq_has_overflowed(&iter->seq)) |
| 2795 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 2796 | } | ||
| 2785 | 2797 | ||
| 2786 | if (iter->trace && iter->trace->print_line) { | 2798 | if (iter->trace && iter->trace->print_line) { |
| 2787 | ret = iter->trace->print_line(iter); | 2799 | ret = iter->trace->print_line(iter); |
| @@ -2859,44 +2871,44 @@ static void test_ftrace_alive(struct seq_file *m) | |||
| 2859 | { | 2871 | { |
| 2860 | if (!ftrace_is_dead()) | 2872 | if (!ftrace_is_dead()) |
| 2861 | return; | 2873 | return; |
| 2862 | seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"); | 2874 | seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n" |
| 2863 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); | 2875 | "# MAY BE MISSING FUNCTION EVENTS\n"); |
| 2864 | } | 2876 | } |
| 2865 | 2877 | ||
| 2866 | #ifdef CONFIG_TRACER_MAX_TRACE | 2878 | #ifdef CONFIG_TRACER_MAX_TRACE |
| 2867 | static void show_snapshot_main_help(struct seq_file *m) | 2879 | static void show_snapshot_main_help(struct seq_file *m) |
| 2868 | { | 2880 | { |
| 2869 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | 2881 | seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n" |
| 2870 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 2882 | "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" |
| 2871 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | 2883 | "# Takes a snapshot of the main buffer.\n" |
| 2872 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"); | 2884 | "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n" |
| 2873 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | 2885 | "# (Doesn't have to be '2' works with any number that\n" |
| 2874 | seq_printf(m, "# is not a '0' or '1')\n"); | 2886 | "# is not a '0' or '1')\n"); |
| 2875 | } | 2887 | } |
| 2876 | 2888 | ||
| 2877 | static void show_snapshot_percpu_help(struct seq_file *m) | 2889 | static void show_snapshot_percpu_help(struct seq_file *m) |
| 2878 | { | 2890 | { |
| 2879 | seq_printf(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); | 2891 | seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n"); |
| 2880 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP | 2892 | #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP |
| 2881 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | 2893 | seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n" |
| 2882 | seq_printf(m, "# Takes a snapshot of the main buffer for this cpu.\n"); | 2894 | "# Takes a snapshot of the main buffer for this cpu.\n"); |
| 2883 | #else | 2895 | #else |
| 2884 | seq_printf(m, "# echo 1 > snapshot : Not supported with this kernel.\n"); | 2896 | seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n" |
| 2885 | seq_printf(m, "# Must use main snapshot file to allocate.\n"); | 2897 | "# Must use main snapshot file to allocate.\n"); |
| 2886 | #endif | 2898 | #endif |
| 2887 | seq_printf(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"); | 2899 | seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n" |
| 2888 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | 2900 | "# (Doesn't have to be '2' works with any number that\n" |
| 2889 | seq_printf(m, "# is not a '0' or '1')\n"); | 2901 | "# is not a '0' or '1')\n"); |
| 2890 | } | 2902 | } |
| 2891 | 2903 | ||
| 2892 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | 2904 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) |
| 2893 | { | 2905 | { |
| 2894 | if (iter->tr->allocated_snapshot) | 2906 | if (iter->tr->allocated_snapshot) |
| 2895 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | 2907 | seq_puts(m, "#\n# * Snapshot is allocated *\n#\n"); |
| 2896 | else | 2908 | else |
| 2897 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | 2909 | seq_puts(m, "#\n# * Snapshot is freed *\n#\n"); |
| 2898 | 2910 | ||
| 2899 | seq_printf(m, "# Snapshot commands:\n"); | 2911 | seq_puts(m, "# Snapshot commands:\n"); |
| 2900 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) | 2912 | if (iter->cpu_file == RING_BUFFER_ALL_CPUS) |
| 2901 | show_snapshot_main_help(m); | 2913 | show_snapshot_main_help(m); |
| 2902 | else | 2914 | else |
| @@ -3250,7 +3262,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 3250 | if (!t) | 3262 | if (!t) |
| 3251 | return 0; | 3263 | return 0; |
| 3252 | 3264 | ||
| 3253 | seq_printf(m, "%s", t->name); | 3265 | seq_puts(m, t->name); |
| 3254 | if (t->next) | 3266 | if (t->next) |
| 3255 | seq_putc(m, ' '); | 3267 | seq_putc(m, ' '); |
| 3256 | else | 3268 | else |
| @@ -4313,6 +4325,8 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp) | |||
| 4313 | goto out; | 4325 | goto out; |
| 4314 | } | 4326 | } |
| 4315 | 4327 | ||
| 4328 | trace_seq_init(&iter->seq); | ||
| 4329 | |||
| 4316 | /* | 4330 | /* |
| 4317 | * We make a copy of the current tracer to avoid concurrent | 4331 | * We make a copy of the current tracer to avoid concurrent |
| 4318 | * changes on it while we are reading. | 4332 | * changes on it while we are reading. |
| @@ -4434,15 +4448,12 @@ static int tracing_wait_pipe(struct file *filp) | |||
| 4434 | 4448 | ||
| 4435 | mutex_unlock(&iter->mutex); | 4449 | mutex_unlock(&iter->mutex); |
| 4436 | 4450 | ||
| 4437 | ret = wait_on_pipe(iter); | 4451 | ret = wait_on_pipe(iter, false); |
| 4438 | 4452 | ||
| 4439 | mutex_lock(&iter->mutex); | 4453 | mutex_lock(&iter->mutex); |
| 4440 | 4454 | ||
| 4441 | if (ret) | 4455 | if (ret) |
| 4442 | return ret; | 4456 | return ret; |
| 4443 | |||
| 4444 | if (signal_pending(current)) | ||
| 4445 | return -EINTR; | ||
| 4446 | } | 4457 | } |
| 4447 | 4458 | ||
| 4448 | return 1; | 4459 | return 1; |
| @@ -4509,18 +4520,18 @@ waitagain: | |||
| 4509 | trace_access_lock(iter->cpu_file); | 4520 | trace_access_lock(iter->cpu_file); |
| 4510 | while (trace_find_next_entry_inc(iter) != NULL) { | 4521 | while (trace_find_next_entry_inc(iter) != NULL) { |
| 4511 | enum print_line_t ret; | 4522 | enum print_line_t ret; |
| 4512 | int len = iter->seq.len; | 4523 | int save_len = iter->seq.seq.len; |
| 4513 | 4524 | ||
| 4514 | ret = print_trace_line(iter); | 4525 | ret = print_trace_line(iter); |
| 4515 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 4526 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
| 4516 | /* don't print partial lines */ | 4527 | /* don't print partial lines */ |
| 4517 | iter->seq.len = len; | 4528 | iter->seq.seq.len = save_len; |
| 4518 | break; | 4529 | break; |
| 4519 | } | 4530 | } |
| 4520 | if (ret != TRACE_TYPE_NO_CONSUME) | 4531 | if (ret != TRACE_TYPE_NO_CONSUME) |
| 4521 | trace_consume(iter); | 4532 | trace_consume(iter); |
| 4522 | 4533 | ||
| 4523 | if (iter->seq.len >= cnt) | 4534 | if (trace_seq_used(&iter->seq) >= cnt) |
| 4524 | break; | 4535 | break; |
| 4525 | 4536 | ||
| 4526 | /* | 4537 | /* |
| @@ -4536,7 +4547,7 @@ waitagain: | |||
| 4536 | 4547 | ||
| 4537 | /* Now copy what we have to the user */ | 4548 | /* Now copy what we have to the user */ |
| 4538 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 4549 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
| 4539 | if (iter->seq.readpos >= iter->seq.len) | 4550 | if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq)) |
| 4540 | trace_seq_init(&iter->seq); | 4551 | trace_seq_init(&iter->seq); |
| 4541 | 4552 | ||
| 4542 | /* | 4553 | /* |
| @@ -4570,20 +4581,33 @@ static size_t | |||
| 4570 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | 4581 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) |
| 4571 | { | 4582 | { |
| 4572 | size_t count; | 4583 | size_t count; |
| 4584 | int save_len; | ||
| 4573 | int ret; | 4585 | int ret; |
| 4574 | 4586 | ||
| 4575 | /* Seq buffer is page-sized, exactly what we need. */ | 4587 | /* Seq buffer is page-sized, exactly what we need. */ |
| 4576 | for (;;) { | 4588 | for (;;) { |
| 4577 | count = iter->seq.len; | 4589 | save_len = iter->seq.seq.len; |
| 4578 | ret = print_trace_line(iter); | 4590 | ret = print_trace_line(iter); |
| 4579 | count = iter->seq.len - count; | 4591 | |
| 4580 | if (rem < count) { | 4592 | if (trace_seq_has_overflowed(&iter->seq)) { |
| 4581 | rem = 0; | 4593 | iter->seq.seq.len = save_len; |
| 4582 | iter->seq.len -= count; | ||
| 4583 | break; | 4594 | break; |
| 4584 | } | 4595 | } |
| 4596 | |||
| 4597 | /* | ||
| 4598 | * This should not be hit, because it should only | ||
| 4599 | * be set if the iter->seq overflowed. But check it | ||
| 4600 | * anyway to be safe. | ||
| 4601 | */ | ||
| 4585 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | 4602 | if (ret == TRACE_TYPE_PARTIAL_LINE) { |
| 4586 | iter->seq.len -= count; | 4603 | iter->seq.seq.len = save_len; |
| 4604 | break; | ||
| 4605 | } | ||
| 4606 | |||
| 4607 | count = trace_seq_used(&iter->seq) - save_len; | ||
| 4608 | if (rem < count) { | ||
| 4609 | rem = 0; | ||
| 4610 | iter->seq.seq.len = save_len; | ||
| 4587 | break; | 4611 | break; |
| 4588 | } | 4612 | } |
| 4589 | 4613 | ||
| @@ -4664,13 +4688,13 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, | |||
| 4664 | /* Copy the data into the page, so we can start over. */ | 4688 | /* Copy the data into the page, so we can start over. */ |
| 4665 | ret = trace_seq_to_buffer(&iter->seq, | 4689 | ret = trace_seq_to_buffer(&iter->seq, |
| 4666 | page_address(spd.pages[i]), | 4690 | page_address(spd.pages[i]), |
| 4667 | iter->seq.len); | 4691 | trace_seq_used(&iter->seq)); |
| 4668 | if (ret < 0) { | 4692 | if (ret < 0) { |
| 4669 | __free_page(spd.pages[i]); | 4693 | __free_page(spd.pages[i]); |
| 4670 | break; | 4694 | break; |
| 4671 | } | 4695 | } |
| 4672 | spd.partial[i].offset = 0; | 4696 | spd.partial[i].offset = 0; |
| 4673 | spd.partial[i].len = iter->seq.len; | 4697 | spd.partial[i].len = trace_seq_used(&iter->seq); |
| 4674 | 4698 | ||
| 4675 | trace_seq_init(&iter->seq); | 4699 | trace_seq_init(&iter->seq); |
| 4676 | } | 4700 | } |
| @@ -5372,16 +5396,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, | |||
| 5372 | goto out_unlock; | 5396 | goto out_unlock; |
| 5373 | } | 5397 | } |
| 5374 | mutex_unlock(&trace_types_lock); | 5398 | mutex_unlock(&trace_types_lock); |
| 5375 | ret = wait_on_pipe(iter); | 5399 | ret = wait_on_pipe(iter, false); |
| 5376 | mutex_lock(&trace_types_lock); | 5400 | mutex_lock(&trace_types_lock); |
| 5377 | if (ret) { | 5401 | if (ret) { |
| 5378 | size = ret; | 5402 | size = ret; |
| 5379 | goto out_unlock; | 5403 | goto out_unlock; |
| 5380 | } | 5404 | } |
| 5381 | if (signal_pending(current)) { | ||
| 5382 | size = -EINTR; | ||
| 5383 | goto out_unlock; | ||
| 5384 | } | ||
| 5385 | goto again; | 5405 | goto again; |
| 5386 | } | 5406 | } |
| 5387 | size = 0; | 5407 | size = 0; |
| @@ -5500,7 +5520,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5500 | }; | 5520 | }; |
| 5501 | struct buffer_ref *ref; | 5521 | struct buffer_ref *ref; |
| 5502 | int entries, size, i; | 5522 | int entries, size, i; |
| 5503 | ssize_t ret; | 5523 | ssize_t ret = 0; |
| 5504 | 5524 | ||
| 5505 | mutex_lock(&trace_types_lock); | 5525 | mutex_lock(&trace_types_lock); |
| 5506 | 5526 | ||
| @@ -5538,13 +5558,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5538 | int r; | 5558 | int r; |
| 5539 | 5559 | ||
| 5540 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | 5560 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); |
| 5541 | if (!ref) | 5561 | if (!ref) { |
| 5562 | ret = -ENOMEM; | ||
| 5542 | break; | 5563 | break; |
| 5564 | } | ||
| 5543 | 5565 | ||
| 5544 | ref->ref = 1; | 5566 | ref->ref = 1; |
| 5545 | ref->buffer = iter->trace_buffer->buffer; | 5567 | ref->buffer = iter->trace_buffer->buffer; |
| 5546 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); | 5568 | ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); |
| 5547 | if (!ref->page) { | 5569 | if (!ref->page) { |
| 5570 | ret = -ENOMEM; | ||
| 5548 | kfree(ref); | 5571 | kfree(ref); |
| 5549 | break; | 5572 | break; |
| 5550 | } | 5573 | } |
| @@ -5582,19 +5605,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, | |||
| 5582 | 5605 | ||
| 5583 | /* did we read anything? */ | 5606 | /* did we read anything? */ |
| 5584 | if (!spd.nr_pages) { | 5607 | if (!spd.nr_pages) { |
| 5608 | if (ret) | ||
| 5609 | goto out; | ||
| 5610 | |||
| 5585 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { | 5611 | if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { |
| 5586 | ret = -EAGAIN; | 5612 | ret = -EAGAIN; |
| 5587 | goto out; | 5613 | goto out; |
| 5588 | } | 5614 | } |
| 5589 | mutex_unlock(&trace_types_lock); | 5615 | mutex_unlock(&trace_types_lock); |
| 5590 | ret = wait_on_pipe(iter); | 5616 | ret = wait_on_pipe(iter, true); |
| 5591 | mutex_lock(&trace_types_lock); | 5617 | mutex_lock(&trace_types_lock); |
| 5592 | if (ret) | 5618 | if (ret) |
| 5593 | goto out; | 5619 | goto out; |
| 5594 | if (signal_pending(current)) { | 5620 | |
| 5595 | ret = -EINTR; | ||
| 5596 | goto out; | ||
| 5597 | } | ||
| 5598 | goto again; | 5621 | goto again; |
| 5599 | } | 5622 | } |
| 5600 | 5623 | ||
| @@ -5671,7 +5694,8 @@ tracing_stats_read(struct file *filp, char __user *ubuf, | |||
| 5671 | cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); | 5694 | cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu); |
| 5672 | trace_seq_printf(s, "read events: %ld\n", cnt); | 5695 | trace_seq_printf(s, "read events: %ld\n", cnt); |
| 5673 | 5696 | ||
| 5674 | count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len); | 5697 | count = simple_read_from_buffer(ubuf, count, ppos, |
| 5698 | s->buffer, trace_seq_used(s)); | ||
| 5675 | 5699 | ||
| 5676 | kfree(s); | 5700 | kfree(s); |
| 5677 | 5701 | ||
| @@ -5752,10 +5776,10 @@ ftrace_snapshot_print(struct seq_file *m, unsigned long ip, | |||
| 5752 | 5776 | ||
| 5753 | seq_printf(m, "%ps:", (void *)ip); | 5777 | seq_printf(m, "%ps:", (void *)ip); |
| 5754 | 5778 | ||
| 5755 | seq_printf(m, "snapshot"); | 5779 | seq_puts(m, "snapshot"); |
| 5756 | 5780 | ||
| 5757 | if (count == -1) | 5781 | if (count == -1) |
| 5758 | seq_printf(m, ":unlimited\n"); | 5782 | seq_puts(m, ":unlimited\n"); |
| 5759 | else | 5783 | else |
| 5760 | seq_printf(m, ":count=%ld\n", count); | 5784 | seq_printf(m, ":count=%ld\n", count); |
| 5761 | 5785 | ||
| @@ -6420,7 +6444,7 @@ static int instance_mkdir (struct inode *inode, struct dentry *dentry, umode_t m | |||
| 6420 | int ret; | 6444 | int ret; |
| 6421 | 6445 | ||
| 6422 | /* Paranoid: Make sure the parent is the "instances" directory */ | 6446 | /* Paranoid: Make sure the parent is the "instances" directory */ |
| 6423 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); | 6447 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); |
| 6424 | if (WARN_ON_ONCE(parent != trace_instance_dir)) | 6448 | if (WARN_ON_ONCE(parent != trace_instance_dir)) |
| 6425 | return -ENOENT; | 6449 | return -ENOENT; |
| 6426 | 6450 | ||
| @@ -6447,7 +6471,7 @@ static int instance_rmdir(struct inode *inode, struct dentry *dentry) | |||
| 6447 | int ret; | 6471 | int ret; |
| 6448 | 6472 | ||
| 6449 | /* Paranoid: Make sure the parent is the "instances" directory */ | 6473 | /* Paranoid: Make sure the parent is the "instances" directory */ |
| 6450 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_alias); | 6474 | parent = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); |
| 6451 | if (WARN_ON_ONCE(parent != trace_instance_dir)) | 6475 | if (WARN_ON_ONCE(parent != trace_instance_dir)) |
| 6452 | return -ENOENT; | 6476 | return -ENOENT; |
| 6453 | 6477 | ||
| @@ -6634,11 +6658,19 @@ void | |||
| 6634 | trace_printk_seq(struct trace_seq *s) | 6658 | trace_printk_seq(struct trace_seq *s) |
| 6635 | { | 6659 | { |
| 6636 | /* Probably should print a warning here. */ | 6660 | /* Probably should print a warning here. */ |
| 6637 | if (s->len >= TRACE_MAX_PRINT) | 6661 | if (s->seq.len >= TRACE_MAX_PRINT) |
| 6638 | s->len = TRACE_MAX_PRINT; | 6662 | s->seq.len = TRACE_MAX_PRINT; |
| 6663 | |||
| 6664 | /* | ||
| 6665 | * More paranoid code. Although the buffer size is set to | ||
| 6666 | * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just | ||
| 6667 | * an extra layer of protection. | ||
| 6668 | */ | ||
| 6669 | if (WARN_ON_ONCE(s->seq.len >= s->seq.size)) | ||
| 6670 | s->seq.len = s->seq.size - 1; | ||
| 6639 | 6671 | ||
| 6640 | /* should be zero ended, but we are paranoid. */ | 6672 | /* should be zero ended, but we are paranoid. */ |
| 6641 | s->buffer[s->len] = 0; | 6673 | s->buffer[s->seq.len] = 0; |
| 6642 | 6674 | ||
| 6643 | printk(KERN_TRACE "%s", s->buffer); | 6675 | printk(KERN_TRACE "%s", s->buffer); |
| 6644 | 6676 | ||
| @@ -6877,6 +6909,19 @@ out: | |||
| 6877 | return ret; | 6909 | return ret; |
| 6878 | } | 6910 | } |
| 6879 | 6911 | ||
| 6912 | void __init trace_init(void) | ||
| 6913 | { | ||
| 6914 | if (tracepoint_printk) { | ||
| 6915 | tracepoint_print_iter = | ||
| 6916 | kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL); | ||
| 6917 | if (WARN_ON(!tracepoint_print_iter)) | ||
| 6918 | tracepoint_printk = 0; | ||
| 6919 | } | ||
| 6920 | tracer_alloc_buffers(); | ||
| 6921 | init_ftrace_syscalls(); | ||
| 6922 | trace_event_init(); | ||
| 6923 | } | ||
| 6924 | |||
| 6880 | __init static int clear_boot_tracer(void) | 6925 | __init static int clear_boot_tracer(void) |
| 6881 | { | 6926 | { |
| 6882 | /* | 6927 | /* |
| @@ -6896,6 +6941,5 @@ __init static int clear_boot_tracer(void) | |||
| 6896 | return 0; | 6941 | return 0; |
| 6897 | } | 6942 | } |
| 6898 | 6943 | ||
| 6899 | early_initcall(tracer_alloc_buffers); | ||
| 6900 | fs_initcall(tracer_init_debugfs); | 6944 | fs_initcall(tracer_init_debugfs); |
| 6901 | late_initcall(clear_boot_tracer); | 6945 | late_initcall(clear_boot_tracer); |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 385391fb1d3b..8de48bac1ce2 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/trace_seq.h> | 14 | #include <linux/trace_seq.h> |
| 15 | #include <linux/ftrace_event.h> | 15 | #include <linux/ftrace_event.h> |
| 16 | #include <linux/compiler.h> | 16 | #include <linux/compiler.h> |
| 17 | #include <linux/trace_seq.h> | ||
| 17 | 18 | ||
| 18 | #ifdef CONFIG_FTRACE_SYSCALLS | 19 | #ifdef CONFIG_FTRACE_SYSCALLS |
| 19 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | 20 | #include <asm/unistd.h> /* For NR_SYSCALLS */ |
| @@ -569,15 +570,6 @@ void trace_init_global_iter(struct trace_iterator *iter); | |||
| 569 | 570 | ||
| 570 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); | 571 | void tracing_iter_reset(struct trace_iterator *iter, int cpu); |
| 571 | 572 | ||
| 572 | void tracing_sched_switch_trace(struct trace_array *tr, | ||
| 573 | struct task_struct *prev, | ||
| 574 | struct task_struct *next, | ||
| 575 | unsigned long flags, int pc); | ||
| 576 | |||
| 577 | void tracing_sched_wakeup_trace(struct trace_array *tr, | ||
| 578 | struct task_struct *wakee, | ||
| 579 | struct task_struct *cur, | ||
| 580 | unsigned long flags, int pc); | ||
| 581 | void trace_function(struct trace_array *tr, | 573 | void trace_function(struct trace_array *tr, |
| 582 | unsigned long ip, | 574 | unsigned long ip, |
| 583 | unsigned long parent_ip, | 575 | unsigned long parent_ip, |
| @@ -597,9 +589,6 @@ void set_graph_array(struct trace_array *tr); | |||
| 597 | 589 | ||
| 598 | void tracing_start_cmdline_record(void); | 590 | void tracing_start_cmdline_record(void); |
| 599 | void tracing_stop_cmdline_record(void); | 591 | void tracing_stop_cmdline_record(void); |
| 600 | void tracing_sched_switch_assign_trace(struct trace_array *tr); | ||
| 601 | void tracing_stop_sched_switch_record(void); | ||
| 602 | void tracing_start_sched_switch_record(void); | ||
| 603 | int register_tracer(struct tracer *type); | 592 | int register_tracer(struct tracer *type); |
| 604 | int is_tracing_stopped(void); | 593 | int is_tracing_stopped(void); |
| 605 | 594 | ||
| @@ -719,6 +708,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); | |||
| 719 | 708 | ||
| 720 | extern unsigned long trace_flags; | 709 | extern unsigned long trace_flags; |
| 721 | 710 | ||
| 711 | extern char trace_find_mark(unsigned long long duration); | ||
| 712 | |||
| 722 | /* Standard output formatting function used for function return traces */ | 713 | /* Standard output formatting function used for function return traces */ |
| 723 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 714 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
| 724 | 715 | ||
| @@ -737,7 +728,7 @@ extern unsigned long trace_flags; | |||
| 737 | extern enum print_line_t | 728 | extern enum print_line_t |
| 738 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); | 729 | print_graph_function_flags(struct trace_iterator *iter, u32 flags); |
| 739 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); | 730 | extern void print_graph_headers_flags(struct seq_file *s, u32 flags); |
| 740 | extern enum print_line_t | 731 | extern void |
| 741 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); | 732 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); |
| 742 | extern void graph_trace_open(struct trace_iterator *iter); | 733 | extern void graph_trace_open(struct trace_iterator *iter); |
| 743 | extern void graph_trace_close(struct trace_iterator *iter); | 734 | extern void graph_trace_close(struct trace_iterator *iter); |
| @@ -1310,4 +1301,18 @@ int perf_ftrace_event_register(struct ftrace_event_call *call, | |||
| 1310 | #define perf_ftrace_event_register NULL | 1301 | #define perf_ftrace_event_register NULL |
| 1311 | #endif | 1302 | #endif |
| 1312 | 1303 | ||
| 1304 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
| 1305 | void init_ftrace_syscalls(void); | ||
| 1306 | #else | ||
| 1307 | static inline void init_ftrace_syscalls(void) { } | ||
| 1308 | #endif | ||
| 1309 | |||
| 1310 | #ifdef CONFIG_EVENT_TRACING | ||
| 1311 | void trace_event_init(void); | ||
| 1312 | #else | ||
| 1313 | static inline void __init trace_event_init(void) { } | ||
| 1314 | #endif | ||
| 1315 | |||
| 1316 | extern struct trace_iterator *tracepoint_print_iter; | ||
| 1317 | |||
| 1313 | #endif /* _LINUX_KERNEL_TRACE_H */ | 1318 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 697fb9bac8f0..7d6e2afde669 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
| @@ -151,22 +151,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, | |||
| 151 | 151 | ||
| 152 | trace_assign_type(field, iter->ent); | 152 | trace_assign_type(field, iter->ent); |
| 153 | 153 | ||
| 154 | if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", | 154 | trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", |
| 155 | field->correct ? " ok " : " MISS ", | 155 | field->correct ? " ok " : " MISS ", |
| 156 | field->func, | 156 | field->func, |
| 157 | field->file, | 157 | field->file, |
| 158 | field->line)) | 158 | field->line); |
| 159 | return TRACE_TYPE_PARTIAL_LINE; | 159 | |
| 160 | 160 | return trace_handle_return(&iter->seq); | |
| 161 | return TRACE_TYPE_HANDLED; | ||
| 162 | } | 161 | } |
| 163 | 162 | ||
| 164 | static void branch_print_header(struct seq_file *s) | 163 | static void branch_print_header(struct seq_file *s) |
| 165 | { | 164 | { |
| 166 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" | 165 | seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" |
| 167 | " FUNC:FILE:LINE\n"); | 166 | " FUNC:FILE:LINE\n" |
| 168 | seq_puts(s, "# | | | | | " | 167 | "# | | | | | " |
| 169 | " |\n"); | 168 | " |\n"); |
| 170 | } | 169 | } |
| 171 | 170 | ||
| 172 | static struct trace_event_functions trace_branch_funcs = { | 171 | static struct trace_event_functions trace_branch_funcs = { |
| @@ -233,12 +232,12 @@ extern unsigned long __stop_annotated_branch_profile[]; | |||
| 233 | 232 | ||
| 234 | static int annotated_branch_stat_headers(struct seq_file *m) | 233 | static int annotated_branch_stat_headers(struct seq_file *m) |
| 235 | { | 234 | { |
| 236 | seq_printf(m, " correct incorrect %% "); | 235 | seq_puts(m, " correct incorrect % " |
| 237 | seq_printf(m, " Function " | 236 | " Function " |
| 238 | " File Line\n" | 237 | " File Line\n" |
| 239 | " ------- --------- - " | 238 | " ------- --------- - " |
| 240 | " -------- " | 239 | " -------- " |
| 241 | " ---- ----\n"); | 240 | " ---- ----\n"); |
| 242 | return 0; | 241 | return 0; |
| 243 | } | 242 | } |
| 244 | 243 | ||
| @@ -274,7 +273,7 @@ static int branch_stat_show(struct seq_file *m, void *v) | |||
| 274 | 273 | ||
| 275 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | 274 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
| 276 | if (percent < 0) | 275 | if (percent < 0) |
| 277 | seq_printf(m, " X "); | 276 | seq_puts(m, " X "); |
| 278 | else | 277 | else |
| 279 | seq_printf(m, "%3ld ", percent); | 278 | seq_printf(m, "%3ld ", percent); |
| 280 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); | 279 | seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); |
| @@ -362,12 +361,12 @@ extern unsigned long __stop_branch_profile[]; | |||
| 362 | 361 | ||
| 363 | static int all_branch_stat_headers(struct seq_file *m) | 362 | static int all_branch_stat_headers(struct seq_file *m) |
| 364 | { | 363 | { |
| 365 | seq_printf(m, " miss hit %% "); | 364 | seq_puts(m, " miss hit % " |
| 366 | seq_printf(m, " Function " | 365 | " Function " |
| 367 | " File Line\n" | 366 | " File Line\n" |
| 368 | " ------- --------- - " | 367 | " ------- --------- - " |
| 369 | " -------- " | 368 | " -------- " |
| 370 | " ---- ----\n"); | 369 | " ---- ----\n"); |
| 371 | return 0; | 370 | return 0; |
| 372 | } | 371 | } |
| 373 | 372 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index ef06ce7e9cf8..366a78a3e61e 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -212,8 +212,40 @@ void *ftrace_event_buffer_reserve(struct ftrace_event_buffer *fbuffer, | |||
| 212 | } | 212 | } |
| 213 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); | 213 | EXPORT_SYMBOL_GPL(ftrace_event_buffer_reserve); |
| 214 | 214 | ||
| 215 | static DEFINE_SPINLOCK(tracepoint_iter_lock); | ||
| 216 | |||
| 217 | static void output_printk(struct ftrace_event_buffer *fbuffer) | ||
| 218 | { | ||
| 219 | struct ftrace_event_call *event_call; | ||
| 220 | struct trace_event *event; | ||
| 221 | unsigned long flags; | ||
| 222 | struct trace_iterator *iter = tracepoint_print_iter; | ||
| 223 | |||
| 224 | if (!iter) | ||
| 225 | return; | ||
| 226 | |||
| 227 | event_call = fbuffer->ftrace_file->event_call; | ||
| 228 | if (!event_call || !event_call->event.funcs || | ||
| 229 | !event_call->event.funcs->trace) | ||
| 230 | return; | ||
| 231 | |||
| 232 | event = &fbuffer->ftrace_file->event_call->event; | ||
| 233 | |||
| 234 | spin_lock_irqsave(&tracepoint_iter_lock, flags); | ||
| 235 | trace_seq_init(&iter->seq); | ||
| 236 | iter->ent = fbuffer->entry; | ||
| 237 | event_call->event.funcs->trace(iter, 0, event); | ||
| 238 | trace_seq_putc(&iter->seq, 0); | ||
| 239 | printk("%s", iter->seq.buffer); | ||
| 240 | |||
| 241 | spin_unlock_irqrestore(&tracepoint_iter_lock, flags); | ||
| 242 | } | ||
| 243 | |||
| 215 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) | 244 | void ftrace_event_buffer_commit(struct ftrace_event_buffer *fbuffer) |
| 216 | { | 245 | { |
| 246 | if (tracepoint_printk) | ||
| 247 | output_printk(fbuffer); | ||
| 248 | |||
| 217 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, | 249 | event_trigger_unlock_commit(fbuffer->ftrace_file, fbuffer->buffer, |
| 218 | fbuffer->event, fbuffer->entry, | 250 | fbuffer->event, fbuffer->entry, |
| 219 | fbuffer->flags, fbuffer->pc); | 251 | fbuffer->flags, fbuffer->pc); |
| @@ -461,7 +493,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file) | |||
| 461 | 493 | ||
| 462 | if (dir) { | 494 | if (dir) { |
| 463 | spin_lock(&dir->d_lock); /* probably unneeded */ | 495 | spin_lock(&dir->d_lock); /* probably unneeded */ |
| 464 | list_for_each_entry(child, &dir->d_subdirs, d_u.d_child) { | 496 | list_for_each_entry(child, &dir->d_subdirs, d_child) { |
| 465 | if (child->d_inode) /* probably unneeded */ | 497 | if (child->d_inode) /* probably unneeded */ |
| 466 | child->d_inode->i_private = NULL; | 498 | child->d_inode->i_private = NULL; |
| 467 | } | 499 | } |
| @@ -918,7 +950,7 @@ static int f_show(struct seq_file *m, void *v) | |||
| 918 | case FORMAT_HEADER: | 950 | case FORMAT_HEADER: |
| 919 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); | 951 | seq_printf(m, "name: %s\n", ftrace_event_name(call)); |
| 920 | seq_printf(m, "ID: %d\n", call->event.type); | 952 | seq_printf(m, "ID: %d\n", call->event.type); |
| 921 | seq_printf(m, "format:\n"); | 953 | seq_puts(m, "format:\n"); |
| 922 | return 0; | 954 | return 0; |
| 923 | 955 | ||
| 924 | case FORMAT_FIELD_SEPERATOR: | 956 | case FORMAT_FIELD_SEPERATOR: |
| @@ -1044,7 +1076,8 @@ event_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 1044 | mutex_unlock(&event_mutex); | 1076 | mutex_unlock(&event_mutex); |
| 1045 | 1077 | ||
| 1046 | if (file) | 1078 | if (file) |
| 1047 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1079 | r = simple_read_from_buffer(ubuf, cnt, ppos, |
| 1080 | s->buffer, trace_seq_used(s)); | ||
| 1048 | 1081 | ||
| 1049 | kfree(s); | 1082 | kfree(s); |
| 1050 | 1083 | ||
| @@ -1210,7 +1243,8 @@ subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 1210 | trace_seq_init(s); | 1243 | trace_seq_init(s); |
| 1211 | 1244 | ||
| 1212 | print_subsystem_event_filter(system, s); | 1245 | print_subsystem_event_filter(system, s); |
| 1213 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1246 | r = simple_read_from_buffer(ubuf, cnt, ppos, |
| 1247 | s->buffer, trace_seq_used(s)); | ||
| 1214 | 1248 | ||
| 1215 | kfree(s); | 1249 | kfree(s); |
| 1216 | 1250 | ||
| @@ -1265,7 +1299,8 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
| 1265 | trace_seq_init(s); | 1299 | trace_seq_init(s); |
| 1266 | 1300 | ||
| 1267 | func(s); | 1301 | func(s); |
| 1268 | r = simple_read_from_buffer(ubuf, cnt, ppos, s->buffer, s->len); | 1302 | r = simple_read_from_buffer(ubuf, cnt, ppos, |
| 1303 | s->buffer, trace_seq_used(s)); | ||
| 1269 | 1304 | ||
| 1270 | kfree(s); | 1305 | kfree(s); |
| 1271 | 1306 | ||
| @@ -1988,7 +2023,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, | |||
| 1988 | ftrace_event_name(data->file->event_call)); | 2023 | ftrace_event_name(data->file->event_call)); |
| 1989 | 2024 | ||
| 1990 | if (data->count == -1) | 2025 | if (data->count == -1) |
| 1991 | seq_printf(m, ":unlimited\n"); | 2026 | seq_puts(m, ":unlimited\n"); |
| 1992 | else | 2027 | else |
| 1993 | seq_printf(m, ":count=%ld\n", data->count); | 2028 | seq_printf(m, ":count=%ld\n", data->count); |
| 1994 | 2029 | ||
| @@ -2477,8 +2512,14 @@ static __init int event_trace_init(void) | |||
| 2477 | #endif | 2512 | #endif |
| 2478 | return 0; | 2513 | return 0; |
| 2479 | } | 2514 | } |
| 2480 | early_initcall(event_trace_memsetup); | 2515 | |
| 2481 | core_initcall(event_trace_enable); | 2516 | void __init trace_event_init(void) |
| 2517 | { | ||
| 2518 | event_trace_memsetup(); | ||
| 2519 | init_ftrace_syscalls(); | ||
| 2520 | event_trace_enable(); | ||
| 2521 | } | ||
| 2522 | |||
| 2482 | fs_initcall(event_trace_init); | 2523 | fs_initcall(event_trace_init); |
| 2483 | 2524 | ||
| 2484 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 2525 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
| @@ -2513,8 +2554,11 @@ static __init int event_test_thread(void *unused) | |||
| 2513 | kfree(test_malloc); | 2554 | kfree(test_malloc); |
| 2514 | 2555 | ||
| 2515 | set_current_state(TASK_INTERRUPTIBLE); | 2556 | set_current_state(TASK_INTERRUPTIBLE); |
| 2516 | while (!kthread_should_stop()) | 2557 | while (!kthread_should_stop()) { |
| 2517 | schedule(); | 2558 | schedule(); |
| 2559 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 2560 | } | ||
| 2561 | __set_current_state(TASK_RUNNING); | ||
| 2518 | 2562 | ||
| 2519 | return 0; | 2563 | return 0; |
| 2520 | } | 2564 | } |
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c index 7a8c1528e141..ced69da0ff55 100644 --- a/kernel/trace/trace_events_filter.c +++ b/kernel/trace/trace_events_filter.c | |||
| @@ -45,6 +45,7 @@ enum filter_op_ids | |||
| 45 | OP_GT, | 45 | OP_GT, |
| 46 | OP_GE, | 46 | OP_GE, |
| 47 | OP_BAND, | 47 | OP_BAND, |
| 48 | OP_NOT, | ||
| 48 | OP_NONE, | 49 | OP_NONE, |
| 49 | OP_OPEN_PAREN, | 50 | OP_OPEN_PAREN, |
| 50 | }; | 51 | }; |
| @@ -67,6 +68,7 @@ static struct filter_op filter_ops[] = { | |||
| 67 | { OP_GT, ">", 5 }, | 68 | { OP_GT, ">", 5 }, |
| 68 | { OP_GE, ">=", 5 }, | 69 | { OP_GE, ">=", 5 }, |
| 69 | { OP_BAND, "&", 6 }, | 70 | { OP_BAND, "&", 6 }, |
| 71 | { OP_NOT, "!", 6 }, | ||
| 70 | { OP_NONE, "OP_NONE", 0 }, | 72 | { OP_NONE, "OP_NONE", 0 }, |
| 71 | { OP_OPEN_PAREN, "(", 0 }, | 73 | { OP_OPEN_PAREN, "(", 0 }, |
| 72 | }; | 74 | }; |
| @@ -85,6 +87,7 @@ enum { | |||
| 85 | FILT_ERR_MISSING_FIELD, | 87 | FILT_ERR_MISSING_FIELD, |
| 86 | FILT_ERR_INVALID_FILTER, | 88 | FILT_ERR_INVALID_FILTER, |
| 87 | FILT_ERR_IP_FIELD_ONLY, | 89 | FILT_ERR_IP_FIELD_ONLY, |
| 90 | FILT_ERR_ILLEGAL_NOT_OP, | ||
| 88 | }; | 91 | }; |
| 89 | 92 | ||
| 90 | static char *err_text[] = { | 93 | static char *err_text[] = { |
| @@ -101,6 +104,7 @@ static char *err_text[] = { | |||
| 101 | "Missing field name and/or value", | 104 | "Missing field name and/or value", |
| 102 | "Meaningless filter expression", | 105 | "Meaningless filter expression", |
| 103 | "Only 'ip' field is supported for function trace", | 106 | "Only 'ip' field is supported for function trace", |
| 107 | "Illegal use of '!'", | ||
| 104 | }; | 108 | }; |
| 105 | 109 | ||
| 106 | struct opstack_op { | 110 | struct opstack_op { |
| @@ -139,6 +143,7 @@ struct pred_stack { | |||
| 139 | int index; | 143 | int index; |
| 140 | }; | 144 | }; |
| 141 | 145 | ||
| 146 | /* If not of not match is equal to not of not, then it is a match */ | ||
| 142 | #define DEFINE_COMPARISON_PRED(type) \ | 147 | #define DEFINE_COMPARISON_PRED(type) \ |
| 143 | static int filter_pred_##type(struct filter_pred *pred, void *event) \ | 148 | static int filter_pred_##type(struct filter_pred *pred, void *event) \ |
| 144 | { \ | 149 | { \ |
| @@ -166,7 +171,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ | |||
| 166 | break; \ | 171 | break; \ |
| 167 | } \ | 172 | } \ |
| 168 | \ | 173 | \ |
| 169 | return match; \ | 174 | return !!match == !pred->not; \ |
| 170 | } | 175 | } |
| 171 | 176 | ||
| 172 | #define DEFINE_EQUALITY_PRED(size) \ | 177 | #define DEFINE_EQUALITY_PRED(size) \ |
| @@ -484,9 +489,10 @@ static int process_ops(struct filter_pred *preds, | |||
| 484 | if (!WARN_ON_ONCE(!pred->fn)) | 489 | if (!WARN_ON_ONCE(!pred->fn)) |
| 485 | match = pred->fn(pred, rec); | 490 | match = pred->fn(pred, rec); |
| 486 | if (!!match == type) | 491 | if (!!match == type) |
| 487 | return match; | 492 | break; |
| 488 | } | 493 | } |
| 489 | return match; | 494 | /* If not of not match is equal to not of not, then it is a match */ |
| 495 | return !!match == !op->not; | ||
| 490 | } | 496 | } |
| 491 | 497 | ||
| 492 | struct filter_match_preds_data { | 498 | struct filter_match_preds_data { |
| @@ -735,10 +741,10 @@ static int filter_set_pred(struct event_filter *filter, | |||
| 735 | * then this op can be folded. | 741 | * then this op can be folded. |
| 736 | */ | 742 | */ |
| 737 | if (left->index & FILTER_PRED_FOLD && | 743 | if (left->index & FILTER_PRED_FOLD && |
| 738 | (left->op == dest->op || | 744 | ((left->op == dest->op && !left->not) || |
| 739 | left->left == FILTER_PRED_INVALID) && | 745 | left->left == FILTER_PRED_INVALID) && |
| 740 | right->index & FILTER_PRED_FOLD && | 746 | right->index & FILTER_PRED_FOLD && |
| 741 | (right->op == dest->op || | 747 | ((right->op == dest->op && !right->not) || |
| 742 | right->left == FILTER_PRED_INVALID)) | 748 | right->left == FILTER_PRED_INVALID)) |
| 743 | dest->index |= FILTER_PRED_FOLD; | 749 | dest->index |= FILTER_PRED_FOLD; |
| 744 | 750 | ||
| @@ -1028,7 +1034,7 @@ static int init_pred(struct filter_parse_state *ps, | |||
| 1028 | } | 1034 | } |
| 1029 | 1035 | ||
| 1030 | if (pred->op == OP_NE) | 1036 | if (pred->op == OP_NE) |
| 1031 | pred->not = 1; | 1037 | pred->not ^= 1; |
| 1032 | 1038 | ||
| 1033 | pred->fn = fn; | 1039 | pred->fn = fn; |
| 1034 | return 0; | 1040 | return 0; |
| @@ -1590,6 +1596,17 @@ static int replace_preds(struct ftrace_event_call *call, | |||
| 1590 | continue; | 1596 | continue; |
| 1591 | } | 1597 | } |
| 1592 | 1598 | ||
| 1599 | if (elt->op == OP_NOT) { | ||
| 1600 | if (!n_preds || operand1 || operand2) { | ||
| 1601 | parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0); | ||
| 1602 | err = -EINVAL; | ||
| 1603 | goto fail; | ||
| 1604 | } | ||
| 1605 | if (!dry_run) | ||
| 1606 | filter->preds[n_preds - 1].not ^= 1; | ||
| 1607 | continue; | ||
| 1608 | } | ||
| 1609 | |||
| 1593 | if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { | 1610 | if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { |
| 1594 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); | 1611 | parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); |
| 1595 | err = -ENOSPC; | 1612 | err = -ENOSPC; |
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c index 4747b476a030..8712df9decb4 100644 --- a/kernel/trace/trace_events_trigger.c +++ b/kernel/trace/trace_events_trigger.c | |||
| @@ -373,7 +373,7 @@ event_trigger_print(const char *name, struct seq_file *m, | |||
| 373 | { | 373 | { |
| 374 | long count = (long)data; | 374 | long count = (long)data; |
| 375 | 375 | ||
| 376 | seq_printf(m, "%s", name); | 376 | seq_puts(m, name); |
| 377 | 377 | ||
| 378 | if (count == -1) | 378 | if (count == -1) |
| 379 | seq_puts(m, ":unlimited"); | 379 | seq_puts(m, ":unlimited"); |
| @@ -383,7 +383,7 @@ event_trigger_print(const char *name, struct seq_file *m, | |||
| 383 | if (filter_str) | 383 | if (filter_str) |
| 384 | seq_printf(m, " if %s\n", filter_str); | 384 | seq_printf(m, " if %s\n", filter_str); |
| 385 | else | 385 | else |
| 386 | seq_puts(m, "\n"); | 386 | seq_putc(m, '\n'); |
| 387 | 387 | ||
| 388 | return 0; | 388 | return 0; |
| 389 | } | 389 | } |
| @@ -1105,7 +1105,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, | |||
| 1105 | if (data->filter_str) | 1105 | if (data->filter_str) |
| 1106 | seq_printf(m, " if %s\n", data->filter_str); | 1106 | seq_printf(m, " if %s\n", data->filter_str); |
| 1107 | else | 1107 | else |
| 1108 | seq_puts(m, "\n"); | 1108 | seq_putc(m, '\n'); |
| 1109 | 1109 | ||
| 1110 | return 0; | 1110 | return 0; |
| 1111 | } | 1111 | } |
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 57f0ec962d2c..fcd41a166405 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
| @@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data = | |||
| 261 | }; | 261 | }; |
| 262 | 262 | ||
| 263 | #ifdef CONFIG_DYNAMIC_FTRACE | 263 | #ifdef CONFIG_DYNAMIC_FTRACE |
| 264 | static int update_count(void **data) | 264 | static void update_traceon_count(void **data, bool on) |
| 265 | { | 265 | { |
| 266 | unsigned long *count = (long *)data; | 266 | long *count = (long *)data; |
| 267 | long old_count = *count; | ||
| 267 | 268 | ||
| 268 | if (!*count) | 269 | /* |
| 269 | return 0; | 270 | * Tracing gets disabled (or enabled) once per count. |
| 271 | * This function can be called at the same time on multiple CPUs. | ||
| 272 | * It is fine if both disable (or enable) tracing, as disabling | ||
| 273 | * (or enabling) the second time doesn't do anything as the | ||
| 274 | * state of the tracer is already disabled (or enabled). | ||
| 275 | * What needs to be synchronized in this case is that the count | ||
| 276 | * only gets decremented once, even if the tracer is disabled | ||
| 277 | * (or enabled) twice, as the second one is really a nop. | ||
| 278 | * | ||
| 279 | * The memory barriers guarantee that we only decrement the | ||
| 280 | * counter once. First the count is read to a local variable | ||
| 281 | * and a read barrier is used to make sure that it is loaded | ||
| 282 | * before checking if the tracer is in the state we want. | ||
| 283 | * If the tracer is not in the state we want, then the count | ||
| 284 | * is guaranteed to be the old count. | ||
| 285 | * | ||
| 286 | * Next the tracer is set to the state we want (disabled or enabled) | ||
| 287 | * then a write memory barrier is used to make sure that | ||
| 288 | * the new state is visible before changing the counter by | ||
| 289 | * one minus the old counter. This guarantees that another CPU | ||
| 290 | * executing this code will see the new state before seeing | ||
| 291 | * the new counter value, and would not do anything if the new | ||
| 292 | * counter is seen. | ||
| 293 | * | ||
| 294 | * Note, there is no synchronization between this and a user | ||
| 295 | * setting the tracing_on file. But we currently don't care | ||
| 296 | * about that. | ||
| 297 | */ | ||
| 298 | if (!old_count) | ||
| 299 | return; | ||
| 270 | 300 | ||
| 271 | if (*count != -1) | 301 | /* Make sure we see count before checking tracing state */ |
| 272 | (*count)--; | 302 | smp_rmb(); |
| 273 | 303 | ||
| 274 | return 1; | 304 | if (on == !!tracing_is_on()) |
| 305 | return; | ||
| 306 | |||
| 307 | if (on) | ||
| 308 | tracing_on(); | ||
| 309 | else | ||
| 310 | tracing_off(); | ||
| 311 | |||
| 312 | /* unlimited? */ | ||
| 313 | if (old_count == -1) | ||
| 314 | return; | ||
| 315 | |||
| 316 | /* Make sure tracing state is visible before updating count */ | ||
| 317 | smp_wmb(); | ||
| 318 | |||
| 319 | *count = old_count - 1; | ||
| 275 | } | 320 | } |
| 276 | 321 | ||
| 277 | static void | 322 | static void |
| 278 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) | 323 | ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) |
| 279 | { | 324 | { |
| 280 | if (tracing_is_on()) | 325 | update_traceon_count(data, 1); |
| 281 | return; | ||
| 282 | |||
| 283 | if (update_count(data)) | ||
| 284 | tracing_on(); | ||
| 285 | } | 326 | } |
| 286 | 327 | ||
| 287 | static void | 328 | static void |
| 288 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) | 329 | ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) |
| 289 | { | 330 | { |
| 290 | if (!tracing_is_on()) | 331 | update_traceon_count(data, 0); |
| 291 | return; | ||
| 292 | |||
| 293 | if (update_count(data)) | ||
| 294 | tracing_off(); | ||
| 295 | } | 332 | } |
| 296 | 333 | ||
| 297 | static void | 334 | static void |
| @@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) | |||
| 330 | static void | 367 | static void |
| 331 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) | 368 | ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) |
| 332 | { | 369 | { |
| 333 | if (!tracing_is_on()) | 370 | long *count = (long *)data; |
| 334 | return; | 371 | long old_count; |
| 372 | long new_count; | ||
| 335 | 373 | ||
| 336 | if (update_count(data)) | 374 | /* |
| 337 | trace_dump_stack(STACK_SKIP); | 375 | * Stack traces should only execute the number of times the |
| 376 | * user specified in the counter. | ||
| 377 | */ | ||
| 378 | do { | ||
| 379 | |||
| 380 | if (!tracing_is_on()) | ||
| 381 | return; | ||
| 382 | |||
| 383 | old_count = *count; | ||
| 384 | |||
| 385 | if (!old_count) | ||
| 386 | return; | ||
| 387 | |||
| 388 | /* unlimited? */ | ||
| 389 | if (old_count == -1) { | ||
| 390 | trace_dump_stack(STACK_SKIP); | ||
| 391 | return; | ||
| 392 | } | ||
| 393 | |||
| 394 | new_count = old_count - 1; | ||
| 395 | new_count = cmpxchg(count, old_count, new_count); | ||
| 396 | if (new_count == old_count) | ||
| 397 | trace_dump_stack(STACK_SKIP); | ||
| 398 | |||
| 399 | } while (new_count != old_count); | ||
| 400 | } | ||
| 401 | |||
| 402 | static int update_count(void **data) | ||
| 403 | { | ||
| 404 | unsigned long *count = (long *)data; | ||
| 405 | |||
| 406 | if (!*count) | ||
| 407 | return 0; | ||
| 408 | |||
| 409 | if (*count != -1) | ||
| 410 | (*count)--; | ||
| 411 | |||
| 412 | return 1; | ||
| 338 | } | 413 | } |
| 339 | 414 | ||
| 340 | static void | 415 | static void |
| @@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m, | |||
| 361 | seq_printf(m, "%ps:%s", (void *)ip, name); | 436 | seq_printf(m, "%ps:%s", (void *)ip, name); |
| 362 | 437 | ||
| 363 | if (count == -1) | 438 | if (count == -1) |
| 364 | seq_printf(m, ":unlimited\n"); | 439 | seq_puts(m, ":unlimited\n"); |
| 365 | else | 440 | else |
| 366 | seq_printf(m, ":count=%ld\n", count); | 441 | seq_printf(m, ":count=%ld\n", count); |
| 367 | 442 | ||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index f0a0c982cde3..ba476009e5de 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
| @@ -107,7 +107,7 @@ enum { | |||
| 107 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, | 107 | FLAGS_FILL_END = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT, |
| 108 | }; | 108 | }; |
| 109 | 109 | ||
| 110 | static enum print_line_t | 110 | static void |
| 111 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 111 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
| 112 | u32 flags); | 112 | u32 flags); |
| 113 | 113 | ||
| @@ -483,33 +483,24 @@ static int graph_trace_update_thresh(struct trace_array *tr) | |||
| 483 | 483 | ||
| 484 | static int max_bytes_for_cpu; | 484 | static int max_bytes_for_cpu; |
| 485 | 485 | ||
| 486 | static enum print_line_t | 486 | static void print_graph_cpu(struct trace_seq *s, int cpu) |
| 487 | print_graph_cpu(struct trace_seq *s, int cpu) | ||
| 488 | { | 487 | { |
| 489 | int ret; | ||
| 490 | |||
| 491 | /* | 488 | /* |
| 492 | * Start with a space character - to make it stand out | 489 | * Start with a space character - to make it stand out |
| 493 | * to the right a bit when trace output is pasted into | 490 | * to the right a bit when trace output is pasted into |
| 494 | * email: | 491 | * email: |
| 495 | */ | 492 | */ |
| 496 | ret = trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); | 493 | trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu); |
| 497 | if (!ret) | ||
| 498 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 499 | |||
| 500 | return TRACE_TYPE_HANDLED; | ||
| 501 | } | 494 | } |
| 502 | 495 | ||
| 503 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 | 496 | #define TRACE_GRAPH_PROCINFO_LENGTH 14 |
| 504 | 497 | ||
| 505 | static enum print_line_t | 498 | static void print_graph_proc(struct trace_seq *s, pid_t pid) |
| 506 | print_graph_proc(struct trace_seq *s, pid_t pid) | ||
| 507 | { | 499 | { |
| 508 | char comm[TASK_COMM_LEN]; | 500 | char comm[TASK_COMM_LEN]; |
| 509 | /* sign + log10(MAX_INT) + '\0' */ | 501 | /* sign + log10(MAX_INT) + '\0' */ |
| 510 | char pid_str[11]; | 502 | char pid_str[11]; |
| 511 | int spaces = 0; | 503 | int spaces = 0; |
| 512 | int ret; | ||
| 513 | int len; | 504 | int len; |
| 514 | int i; | 505 | int i; |
| 515 | 506 | ||
| @@ -524,56 +515,43 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
| 524 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; | 515 | spaces = TRACE_GRAPH_PROCINFO_LENGTH - len; |
| 525 | 516 | ||
| 526 | /* First spaces to align center */ | 517 | /* First spaces to align center */ |
| 527 | for (i = 0; i < spaces / 2; i++) { | 518 | for (i = 0; i < spaces / 2; i++) |
| 528 | ret = trace_seq_putc(s, ' '); | 519 | trace_seq_putc(s, ' '); |
| 529 | if (!ret) | ||
| 530 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 531 | } | ||
| 532 | 520 | ||
| 533 | ret = trace_seq_printf(s, "%s-%s", comm, pid_str); | 521 | trace_seq_printf(s, "%s-%s", comm, pid_str); |
| 534 | if (!ret) | ||
| 535 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 536 | 522 | ||
| 537 | /* Last spaces to align center */ | 523 | /* Last spaces to align center */ |
| 538 | for (i = 0; i < spaces - (spaces / 2); i++) { | 524 | for (i = 0; i < spaces - (spaces / 2); i++) |
| 539 | ret = trace_seq_putc(s, ' '); | 525 | trace_seq_putc(s, ' '); |
| 540 | if (!ret) | ||
| 541 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 542 | } | ||
| 543 | return TRACE_TYPE_HANDLED; | ||
| 544 | } | 526 | } |
| 545 | 527 | ||
| 546 | 528 | ||
| 547 | static enum print_line_t | 529 | static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) |
| 548 | print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | ||
| 549 | { | 530 | { |
| 550 | if (!trace_seq_putc(s, ' ')) | 531 | trace_seq_putc(s, ' '); |
| 551 | return 0; | 532 | trace_print_lat_fmt(s, entry); |
| 552 | |||
| 553 | return trace_print_lat_fmt(s, entry); | ||
| 554 | } | 533 | } |
| 555 | 534 | ||
| 556 | /* If the pid changed since the last trace, output this event */ | 535 | /* If the pid changed since the last trace, output this event */ |
| 557 | static enum print_line_t | 536 | static void |
| 558 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | 537 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) |
| 559 | { | 538 | { |
| 560 | pid_t prev_pid; | 539 | pid_t prev_pid; |
| 561 | pid_t *last_pid; | 540 | pid_t *last_pid; |
| 562 | int ret; | ||
| 563 | 541 | ||
| 564 | if (!data) | 542 | if (!data) |
| 565 | return TRACE_TYPE_HANDLED; | 543 | return; |
| 566 | 544 | ||
| 567 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); | 545 | last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid); |
| 568 | 546 | ||
| 569 | if (*last_pid == pid) | 547 | if (*last_pid == pid) |
| 570 | return TRACE_TYPE_HANDLED; | 548 | return; |
| 571 | 549 | ||
| 572 | prev_pid = *last_pid; | 550 | prev_pid = *last_pid; |
| 573 | *last_pid = pid; | 551 | *last_pid = pid; |
| 574 | 552 | ||
| 575 | if (prev_pid == -1) | 553 | if (prev_pid == -1) |
| 576 | return TRACE_TYPE_HANDLED; | 554 | return; |
| 577 | /* | 555 | /* |
| 578 | * Context-switch trace line: | 556 | * Context-switch trace line: |
| 579 | 557 | ||
| @@ -582,33 +560,12 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data) | |||
| 582 | ------------------------------------------ | 560 | ------------------------------------------ |
| 583 | 561 | ||
| 584 | */ | 562 | */ |
| 585 | ret = trace_seq_puts(s, | 563 | trace_seq_puts(s, " ------------------------------------------\n"); |
| 586 | " ------------------------------------------\n"); | 564 | print_graph_cpu(s, cpu); |
| 587 | if (!ret) | 565 | print_graph_proc(s, prev_pid); |
| 588 | return TRACE_TYPE_PARTIAL_LINE; | 566 | trace_seq_puts(s, " => "); |
| 589 | 567 | print_graph_proc(s, pid); | |
| 590 | ret = print_graph_cpu(s, cpu); | 568 | trace_seq_puts(s, "\n ------------------------------------------\n\n"); |
| 591 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 592 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 593 | |||
| 594 | ret = print_graph_proc(s, prev_pid); | ||
| 595 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 596 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 597 | |||
| 598 | ret = trace_seq_puts(s, " => "); | ||
| 599 | if (!ret) | ||
| 600 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 601 | |||
| 602 | ret = print_graph_proc(s, pid); | ||
| 603 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 604 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 605 | |||
| 606 | ret = trace_seq_puts(s, | ||
| 607 | "\n ------------------------------------------\n\n"); | ||
| 608 | if (!ret) | ||
| 609 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 610 | |||
| 611 | return TRACE_TYPE_HANDLED; | ||
| 612 | } | 569 | } |
| 613 | 570 | ||
| 614 | static struct ftrace_graph_ret_entry * | 571 | static struct ftrace_graph_ret_entry * |
| @@ -682,175 +639,122 @@ get_return_for_leaf(struct trace_iterator *iter, | |||
| 682 | return next; | 639 | return next; |
| 683 | } | 640 | } |
| 684 | 641 | ||
| 685 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | 642 | static void print_graph_abs_time(u64 t, struct trace_seq *s) |
| 686 | { | 643 | { |
| 687 | unsigned long usecs_rem; | 644 | unsigned long usecs_rem; |
| 688 | 645 | ||
| 689 | usecs_rem = do_div(t, NSEC_PER_SEC); | 646 | usecs_rem = do_div(t, NSEC_PER_SEC); |
| 690 | usecs_rem /= 1000; | 647 | usecs_rem /= 1000; |
| 691 | 648 | ||
| 692 | return trace_seq_printf(s, "%5lu.%06lu | ", | 649 | trace_seq_printf(s, "%5lu.%06lu | ", |
| 693 | (unsigned long)t, usecs_rem); | 650 | (unsigned long)t, usecs_rem); |
| 694 | } | 651 | } |
| 695 | 652 | ||
| 696 | static enum print_line_t | 653 | static void |
| 697 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, | 654 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
| 698 | enum trace_type type, int cpu, pid_t pid, u32 flags) | 655 | enum trace_type type, int cpu, pid_t pid, u32 flags) |
| 699 | { | 656 | { |
| 700 | int ret; | ||
| 701 | struct trace_seq *s = &iter->seq; | 657 | struct trace_seq *s = &iter->seq; |
| 658 | struct trace_entry *ent = iter->ent; | ||
| 702 | 659 | ||
| 703 | if (addr < (unsigned long)__irqentry_text_start || | 660 | if (addr < (unsigned long)__irqentry_text_start || |
| 704 | addr >= (unsigned long)__irqentry_text_end) | 661 | addr >= (unsigned long)__irqentry_text_end) |
| 705 | return TRACE_TYPE_UNHANDLED; | 662 | return; |
| 706 | 663 | ||
| 707 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { | 664 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
| 708 | /* Absolute time */ | 665 | /* Absolute time */ |
| 709 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 666 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 710 | ret = print_graph_abs_time(iter->ts, s); | 667 | print_graph_abs_time(iter->ts, s); |
| 711 | if (!ret) | ||
| 712 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 713 | } | ||
| 714 | 668 | ||
| 715 | /* Cpu */ | 669 | /* Cpu */ |
| 716 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 670 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 717 | ret = print_graph_cpu(s, cpu); | 671 | print_graph_cpu(s, cpu); |
| 718 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 719 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 720 | } | ||
| 721 | 672 | ||
| 722 | /* Proc */ | 673 | /* Proc */ |
| 723 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 674 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
| 724 | ret = print_graph_proc(s, pid); | 675 | print_graph_proc(s, pid); |
| 725 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 676 | trace_seq_puts(s, " | "); |
| 726 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 727 | ret = trace_seq_puts(s, " | "); | ||
| 728 | if (!ret) | ||
| 729 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 730 | } | 677 | } |
| 678 | |||
| 679 | /* Latency format */ | ||
| 680 | if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
| 681 | print_graph_lat_fmt(s, ent); | ||
| 731 | } | 682 | } |
| 732 | 683 | ||
| 733 | /* No overhead */ | 684 | /* No overhead */ |
| 734 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_START); | 685 | print_graph_duration(0, s, flags | FLAGS_FILL_START); |
| 735 | if (ret != TRACE_TYPE_HANDLED) | ||
| 736 | return ret; | ||
| 737 | 686 | ||
| 738 | if (type == TRACE_GRAPH_ENT) | 687 | if (type == TRACE_GRAPH_ENT) |
| 739 | ret = trace_seq_puts(s, "==========>"); | 688 | trace_seq_puts(s, "==========>"); |
| 740 | else | 689 | else |
| 741 | ret = trace_seq_puts(s, "<=========="); | 690 | trace_seq_puts(s, "<=========="); |
| 742 | |||
| 743 | if (!ret) | ||
| 744 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 745 | |||
| 746 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_END); | ||
| 747 | if (ret != TRACE_TYPE_HANDLED) | ||
| 748 | return ret; | ||
| 749 | |||
| 750 | ret = trace_seq_putc(s, '\n'); | ||
| 751 | 691 | ||
| 752 | if (!ret) | 692 | print_graph_duration(0, s, flags | FLAGS_FILL_END); |
| 753 | return TRACE_TYPE_PARTIAL_LINE; | 693 | trace_seq_putc(s, '\n'); |
| 754 | return TRACE_TYPE_HANDLED; | ||
| 755 | } | 694 | } |
| 756 | 695 | ||
| 757 | enum print_line_t | 696 | void |
| 758 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) | 697 | trace_print_graph_duration(unsigned long long duration, struct trace_seq *s) |
| 759 | { | 698 | { |
| 760 | unsigned long nsecs_rem = do_div(duration, 1000); | 699 | unsigned long nsecs_rem = do_div(duration, 1000); |
| 761 | /* log10(ULONG_MAX) + '\0' */ | 700 | /* log10(ULONG_MAX) + '\0' */ |
| 762 | char msecs_str[21]; | 701 | char usecs_str[21]; |
| 763 | char nsecs_str[5]; | 702 | char nsecs_str[5]; |
| 764 | int ret, len; | 703 | int len; |
| 765 | int i; | 704 | int i; |
| 766 | 705 | ||
| 767 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 706 | sprintf(usecs_str, "%lu", (unsigned long) duration); |
| 768 | 707 | ||
| 769 | /* Print msecs */ | 708 | /* Print msecs */ |
| 770 | ret = trace_seq_printf(s, "%s", msecs_str); | 709 | trace_seq_printf(s, "%s", usecs_str); |
| 771 | if (!ret) | ||
| 772 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 773 | 710 | ||
| 774 | len = strlen(msecs_str); | 711 | len = strlen(usecs_str); |
| 775 | 712 | ||
| 776 | /* Print nsecs (we don't want to exceed 7 numbers) */ | 713 | /* Print nsecs (we don't want to exceed 7 numbers) */ |
| 777 | if (len < 7) { | 714 | if (len < 7) { |
| 778 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); | 715 | size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len); |
| 779 | 716 | ||
| 780 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); | 717 | snprintf(nsecs_str, slen, "%03lu", nsecs_rem); |
| 781 | ret = trace_seq_printf(s, ".%s", nsecs_str); | 718 | trace_seq_printf(s, ".%s", nsecs_str); |
| 782 | if (!ret) | ||
| 783 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 784 | len += strlen(nsecs_str); | 719 | len += strlen(nsecs_str); |
| 785 | } | 720 | } |
| 786 | 721 | ||
| 787 | ret = trace_seq_puts(s, " us "); | 722 | trace_seq_puts(s, " us "); |
| 788 | if (!ret) | ||
| 789 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 790 | 723 | ||
| 791 | /* Print remaining spaces to fit the row's width */ | 724 | /* Print remaining spaces to fit the row's width */ |
| 792 | for (i = len; i < 7; i++) { | 725 | for (i = len; i < 7; i++) |
| 793 | ret = trace_seq_putc(s, ' '); | 726 | trace_seq_putc(s, ' '); |
| 794 | if (!ret) | ||
| 795 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 796 | } | ||
| 797 | return TRACE_TYPE_HANDLED; | ||
| 798 | } | 727 | } |
| 799 | 728 | ||
| 800 | static enum print_line_t | 729 | static void |
| 801 | print_graph_duration(unsigned long long duration, struct trace_seq *s, | 730 | print_graph_duration(unsigned long long duration, struct trace_seq *s, |
| 802 | u32 flags) | 731 | u32 flags) |
| 803 | { | 732 | { |
| 804 | int ret = -1; | ||
| 805 | |||
| 806 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || | 733 | if (!(flags & TRACE_GRAPH_PRINT_DURATION) || |
| 807 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 734 | !(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
| 808 | return TRACE_TYPE_HANDLED; | 735 | return; |
| 809 | 736 | ||
| 810 | /* No real adata, just filling the column with spaces */ | 737 | /* No real adata, just filling the column with spaces */ |
| 811 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { | 738 | switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) { |
| 812 | case FLAGS_FILL_FULL: | 739 | case FLAGS_FILL_FULL: |
| 813 | ret = trace_seq_puts(s, " | "); | 740 | trace_seq_puts(s, " | "); |
| 814 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 741 | return; |
| 815 | case FLAGS_FILL_START: | 742 | case FLAGS_FILL_START: |
| 816 | ret = trace_seq_puts(s, " "); | 743 | trace_seq_puts(s, " "); |
| 817 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 744 | return; |
| 818 | case FLAGS_FILL_END: | 745 | case FLAGS_FILL_END: |
| 819 | ret = trace_seq_puts(s, " |"); | 746 | trace_seq_puts(s, " |"); |
| 820 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | 747 | return; |
| 821 | } | 748 | } |
| 822 | 749 | ||
| 823 | /* Signal a overhead of time execution to the output */ | 750 | /* Signal a overhead of time execution to the output */ |
| 824 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) { | 751 | if (flags & TRACE_GRAPH_PRINT_OVERHEAD) |
| 825 | /* Duration exceeded 100 msecs */ | 752 | trace_seq_printf(s, "%c ", trace_find_mark(duration)); |
| 826 | if (duration > 100000ULL) | 753 | else |
| 827 | ret = trace_seq_puts(s, "! "); | 754 | trace_seq_puts(s, " "); |
| 828 | /* Duration exceeded 10 msecs */ | ||
| 829 | else if (duration > 10000ULL) | ||
| 830 | ret = trace_seq_puts(s, "+ "); | ||
| 831 | } | ||
| 832 | |||
| 833 | /* | ||
| 834 | * The -1 means we either did not exceed the duration tresholds | ||
| 835 | * or we dont want to print out the overhead. Either way we need | ||
| 836 | * to fill out the space. | ||
| 837 | */ | ||
| 838 | if (ret == -1) | ||
| 839 | ret = trace_seq_puts(s, " "); | ||
| 840 | |||
| 841 | /* Catching here any failure happenned above */ | ||
| 842 | if (!ret) | ||
| 843 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 844 | |||
| 845 | ret = trace_print_graph_duration(duration, s); | ||
| 846 | if (ret != TRACE_TYPE_HANDLED) | ||
| 847 | return ret; | ||
| 848 | |||
| 849 | ret = trace_seq_puts(s, "| "); | ||
| 850 | if (!ret) | ||
| 851 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 852 | 755 | ||
| 853 | return TRACE_TYPE_HANDLED; | 756 | trace_print_graph_duration(duration, s); |
| 757 | trace_seq_puts(s, "| "); | ||
| 854 | } | 758 | } |
| 855 | 759 | ||
| 856 | /* Case of a leaf function on its call entry */ | 760 | /* Case of a leaf function on its call entry */ |
| @@ -864,7 +768,6 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 864 | struct ftrace_graph_ret *graph_ret; | 768 | struct ftrace_graph_ret *graph_ret; |
| 865 | struct ftrace_graph_ent *call; | 769 | struct ftrace_graph_ent *call; |
| 866 | unsigned long long duration; | 770 | unsigned long long duration; |
| 867 | int ret; | ||
| 868 | int i; | 771 | int i; |
| 869 | 772 | ||
| 870 | graph_ret = &ret_entry->ret; | 773 | graph_ret = &ret_entry->ret; |
| @@ -890,22 +793,15 @@ print_graph_entry_leaf(struct trace_iterator *iter, | |||
| 890 | } | 793 | } |
| 891 | 794 | ||
| 892 | /* Overhead and duration */ | 795 | /* Overhead and duration */ |
| 893 | ret = print_graph_duration(duration, s, flags); | 796 | print_graph_duration(duration, s, flags); |
| 894 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 895 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 896 | 797 | ||
| 897 | /* Function */ | 798 | /* Function */ |
| 898 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 799 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
| 899 | ret = trace_seq_putc(s, ' '); | 800 | trace_seq_putc(s, ' '); |
| 900 | if (!ret) | ||
| 901 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 902 | } | ||
| 903 | 801 | ||
| 904 | ret = trace_seq_printf(s, "%ps();\n", (void *)call->func); | 802 | trace_seq_printf(s, "%ps();\n", (void *)call->func); |
| 905 | if (!ret) | ||
| 906 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 907 | 803 | ||
| 908 | return TRACE_TYPE_HANDLED; | 804 | return trace_handle_return(s); |
| 909 | } | 805 | } |
| 910 | 806 | ||
| 911 | static enum print_line_t | 807 | static enum print_line_t |
| @@ -915,7 +811,6 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 915 | { | 811 | { |
| 916 | struct ftrace_graph_ent *call = &entry->graph_ent; | 812 | struct ftrace_graph_ent *call = &entry->graph_ent; |
| 917 | struct fgraph_data *data = iter->private; | 813 | struct fgraph_data *data = iter->private; |
| 918 | int ret; | ||
| 919 | int i; | 814 | int i; |
| 920 | 815 | ||
| 921 | if (data) { | 816 | if (data) { |
| @@ -931,19 +826,15 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 931 | } | 826 | } |
| 932 | 827 | ||
| 933 | /* No time */ | 828 | /* No time */ |
| 934 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); | 829 | print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
| 935 | if (ret != TRACE_TYPE_HANDLED) | ||
| 936 | return ret; | ||
| 937 | 830 | ||
| 938 | /* Function */ | 831 | /* Function */ |
| 939 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 832 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) |
| 940 | ret = trace_seq_putc(s, ' '); | 833 | trace_seq_putc(s, ' '); |
| 941 | if (!ret) | 834 | |
| 942 | return TRACE_TYPE_PARTIAL_LINE; | 835 | trace_seq_printf(s, "%ps() {\n", (void *)call->func); |
| 943 | } | ||
| 944 | 836 | ||
| 945 | ret = trace_seq_printf(s, "%ps() {\n", (void *)call->func); | 837 | if (trace_seq_has_overflowed(s)) |
| 946 | if (!ret) | ||
| 947 | return TRACE_TYPE_PARTIAL_LINE; | 838 | return TRACE_TYPE_PARTIAL_LINE; |
| 948 | 839 | ||
| 949 | /* | 840 | /* |
| @@ -953,62 +844,43 @@ print_graph_entry_nested(struct trace_iterator *iter, | |||
| 953 | return TRACE_TYPE_NO_CONSUME; | 844 | return TRACE_TYPE_NO_CONSUME; |
| 954 | } | 845 | } |
| 955 | 846 | ||
| 956 | static enum print_line_t | 847 | static void |
| 957 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | 848 | print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, |
| 958 | int type, unsigned long addr, u32 flags) | 849 | int type, unsigned long addr, u32 flags) |
| 959 | { | 850 | { |
| 960 | struct fgraph_data *data = iter->private; | 851 | struct fgraph_data *data = iter->private; |
| 961 | struct trace_entry *ent = iter->ent; | 852 | struct trace_entry *ent = iter->ent; |
| 962 | int cpu = iter->cpu; | 853 | int cpu = iter->cpu; |
| 963 | int ret; | ||
| 964 | 854 | ||
| 965 | /* Pid */ | 855 | /* Pid */ |
| 966 | if (verif_pid(s, ent->pid, cpu, data) == TRACE_TYPE_PARTIAL_LINE) | 856 | verif_pid(s, ent->pid, cpu, data); |
| 967 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 968 | 857 | ||
| 969 | if (type) { | 858 | if (type) |
| 970 | /* Interrupt */ | 859 | /* Interrupt */ |
| 971 | ret = print_graph_irq(iter, addr, type, cpu, ent->pid, flags); | 860 | print_graph_irq(iter, addr, type, cpu, ent->pid, flags); |
| 972 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 973 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 974 | } | ||
| 975 | 861 | ||
| 976 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) | 862 | if (!(trace_flags & TRACE_ITER_CONTEXT_INFO)) |
| 977 | return 0; | 863 | return; |
| 978 | 864 | ||
| 979 | /* Absolute time */ | 865 | /* Absolute time */ |
| 980 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) { | 866 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 981 | ret = print_graph_abs_time(iter->ts, s); | 867 | print_graph_abs_time(iter->ts, s); |
| 982 | if (!ret) | ||
| 983 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 984 | } | ||
| 985 | 868 | ||
| 986 | /* Cpu */ | 869 | /* Cpu */ |
| 987 | if (flags & TRACE_GRAPH_PRINT_CPU) { | 870 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 988 | ret = print_graph_cpu(s, cpu); | 871 | print_graph_cpu(s, cpu); |
| 989 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 990 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 991 | } | ||
| 992 | 872 | ||
| 993 | /* Proc */ | 873 | /* Proc */ |
| 994 | if (flags & TRACE_GRAPH_PRINT_PROC) { | 874 | if (flags & TRACE_GRAPH_PRINT_PROC) { |
| 995 | ret = print_graph_proc(s, ent->pid); | 875 | print_graph_proc(s, ent->pid); |
| 996 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 876 | trace_seq_puts(s, " | "); |
| 997 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 998 | |||
| 999 | ret = trace_seq_puts(s, " | "); | ||
| 1000 | if (!ret) | ||
| 1001 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1002 | } | 877 | } |
| 1003 | 878 | ||
| 1004 | /* Latency format */ | 879 | /* Latency format */ |
| 1005 | if (trace_flags & TRACE_ITER_LATENCY_FMT) { | 880 | if (trace_flags & TRACE_ITER_LATENCY_FMT) |
| 1006 | ret = print_graph_lat_fmt(s, ent); | 881 | print_graph_lat_fmt(s, ent); |
| 1007 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 1008 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1009 | } | ||
| 1010 | 882 | ||
| 1011 | return 0; | 883 | return; |
| 1012 | } | 884 | } |
| 1013 | 885 | ||
| 1014 | /* | 886 | /* |
| @@ -1126,8 +998,7 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
| 1126 | if (check_irq_entry(iter, flags, call->func, call->depth)) | 998 | if (check_irq_entry(iter, flags, call->func, call->depth)) |
| 1127 | return TRACE_TYPE_HANDLED; | 999 | return TRACE_TYPE_HANDLED; |
| 1128 | 1000 | ||
| 1129 | if (print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags)) | 1001 | print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags); |
| 1130 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1131 | 1002 | ||
| 1132 | leaf_ret = get_return_for_leaf(iter, field); | 1003 | leaf_ret = get_return_for_leaf(iter, field); |
| 1133 | if (leaf_ret) | 1004 | if (leaf_ret) |
| @@ -1160,7 +1031,6 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 1160 | pid_t pid = ent->pid; | 1031 | pid_t pid = ent->pid; |
| 1161 | int cpu = iter->cpu; | 1032 | int cpu = iter->cpu; |
| 1162 | int func_match = 1; | 1033 | int func_match = 1; |
| 1163 | int ret; | ||
| 1164 | int i; | 1034 | int i; |
| 1165 | 1035 | ||
| 1166 | if (check_irq_return(iter, flags, trace->depth)) | 1036 | if (check_irq_return(iter, flags, trace->depth)) |
| @@ -1186,20 +1056,14 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 1186 | } | 1056 | } |
| 1187 | } | 1057 | } |
| 1188 | 1058 | ||
| 1189 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1059 | print_graph_prologue(iter, s, 0, 0, flags); |
| 1190 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1191 | 1060 | ||
| 1192 | /* Overhead and duration */ | 1061 | /* Overhead and duration */ |
| 1193 | ret = print_graph_duration(duration, s, flags); | 1062 | print_graph_duration(duration, s, flags); |
| 1194 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 1195 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1196 | 1063 | ||
| 1197 | /* Closing brace */ | 1064 | /* Closing brace */ |
| 1198 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 1065 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) |
| 1199 | ret = trace_seq_putc(s, ' '); | 1066 | trace_seq_putc(s, ' '); |
| 1200 | if (!ret) | ||
| 1201 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1202 | } | ||
| 1203 | 1067 | ||
| 1204 | /* | 1068 | /* |
| 1205 | * If the return function does not have a matching entry, | 1069 | * If the return function does not have a matching entry, |
| @@ -1208,30 +1072,20 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
| 1208 | * belongs to, write out the function name. Always do | 1072 | * belongs to, write out the function name. Always do |
| 1209 | * that if the funcgraph-tail option is enabled. | 1073 | * that if the funcgraph-tail option is enabled. |
| 1210 | */ | 1074 | */ |
| 1211 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) { | 1075 | if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL)) |
| 1212 | ret = trace_seq_puts(s, "}\n"); | 1076 | trace_seq_puts(s, "}\n"); |
| 1213 | if (!ret) | 1077 | else |
| 1214 | return TRACE_TYPE_PARTIAL_LINE; | 1078 | trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); |
| 1215 | } else { | ||
| 1216 | ret = trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func); | ||
| 1217 | if (!ret) | ||
| 1218 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1219 | } | ||
| 1220 | 1079 | ||
| 1221 | /* Overrun */ | 1080 | /* Overrun */ |
| 1222 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) { | 1081 | if (flags & TRACE_GRAPH_PRINT_OVERRUN) |
| 1223 | ret = trace_seq_printf(s, " (Overruns: %lu)\n", | 1082 | trace_seq_printf(s, " (Overruns: %lu)\n", |
| 1224 | trace->overrun); | 1083 | trace->overrun); |
| 1225 | if (!ret) | ||
| 1226 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1227 | } | ||
| 1228 | 1084 | ||
| 1229 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, | 1085 | print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, |
| 1230 | cpu, pid, flags); | 1086 | cpu, pid, flags); |
| 1231 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
| 1232 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1233 | 1087 | ||
| 1234 | return TRACE_TYPE_HANDLED; | 1088 | return trace_handle_return(s); |
| 1235 | } | 1089 | } |
| 1236 | 1090 | ||
| 1237 | static enum print_line_t | 1091 | static enum print_line_t |
| @@ -1248,26 +1102,18 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 1248 | if (data) | 1102 | if (data) |
| 1249 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; | 1103 | depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth; |
| 1250 | 1104 | ||
| 1251 | if (print_graph_prologue(iter, s, 0, 0, flags)) | 1105 | print_graph_prologue(iter, s, 0, 0, flags); |
| 1252 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1253 | 1106 | ||
| 1254 | /* No time */ | 1107 | /* No time */ |
| 1255 | ret = print_graph_duration(0, s, flags | FLAGS_FILL_FULL); | 1108 | print_graph_duration(0, s, flags | FLAGS_FILL_FULL); |
| 1256 | if (ret != TRACE_TYPE_HANDLED) | ||
| 1257 | return ret; | ||
| 1258 | 1109 | ||
| 1259 | /* Indentation */ | 1110 | /* Indentation */ |
| 1260 | if (depth > 0) | 1111 | if (depth > 0) |
| 1261 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) { | 1112 | for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++) |
| 1262 | ret = trace_seq_putc(s, ' '); | 1113 | trace_seq_putc(s, ' '); |
| 1263 | if (!ret) | ||
| 1264 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1265 | } | ||
| 1266 | 1114 | ||
| 1267 | /* The comment */ | 1115 | /* The comment */ |
| 1268 | ret = trace_seq_puts(s, "/* "); | 1116 | trace_seq_puts(s, "/* "); |
| 1269 | if (!ret) | ||
| 1270 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1271 | 1117 | ||
| 1272 | switch (iter->ent->type) { | 1118 | switch (iter->ent->type) { |
| 1273 | case TRACE_BPRINT: | 1119 | case TRACE_BPRINT: |
| @@ -1290,17 +1136,18 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, | |||
| 1290 | return ret; | 1136 | return ret; |
| 1291 | } | 1137 | } |
| 1292 | 1138 | ||
| 1139 | if (trace_seq_has_overflowed(s)) | ||
| 1140 | goto out; | ||
| 1141 | |||
| 1293 | /* Strip ending newline */ | 1142 | /* Strip ending newline */ |
| 1294 | if (s->buffer[s->len - 1] == '\n') { | 1143 | if (s->buffer[s->seq.len - 1] == '\n') { |
| 1295 | s->buffer[s->len - 1] = '\0'; | 1144 | s->buffer[s->seq.len - 1] = '\0'; |
| 1296 | s->len--; | 1145 | s->seq.len--; |
| 1297 | } | 1146 | } |
| 1298 | 1147 | ||
| 1299 | ret = trace_seq_puts(s, " */\n"); | 1148 | trace_seq_puts(s, " */\n"); |
| 1300 | if (!ret) | 1149 | out: |
| 1301 | return TRACE_TYPE_PARTIAL_LINE; | 1150 | return trace_handle_return(s); |
| 1302 | |||
| 1303 | return TRACE_TYPE_HANDLED; | ||
| 1304 | } | 1151 | } |
| 1305 | 1152 | ||
| 1306 | 1153 | ||
| @@ -1407,32 +1254,32 @@ static void __print_graph_headers_flags(struct seq_file *s, u32 flags) | |||
| 1407 | print_lat_header(s, flags); | 1254 | print_lat_header(s, flags); |
| 1408 | 1255 | ||
| 1409 | /* 1st line */ | 1256 | /* 1st line */ |
| 1410 | seq_printf(s, "#"); | 1257 | seq_putc(s, '#'); |
| 1411 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1258 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 1412 | seq_printf(s, " TIME "); | 1259 | seq_puts(s, " TIME "); |
| 1413 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1260 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 1414 | seq_printf(s, " CPU"); | 1261 | seq_puts(s, " CPU"); |
| 1415 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1262 | if (flags & TRACE_GRAPH_PRINT_PROC) |
| 1416 | seq_printf(s, " TASK/PID "); | 1263 | seq_puts(s, " TASK/PID "); |
| 1417 | if (lat) | 1264 | if (lat) |
| 1418 | seq_printf(s, "||||"); | 1265 | seq_puts(s, "||||"); |
| 1419 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1266 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
| 1420 | seq_printf(s, " DURATION "); | 1267 | seq_puts(s, " DURATION "); |
| 1421 | seq_printf(s, " FUNCTION CALLS\n"); | 1268 | seq_puts(s, " FUNCTION CALLS\n"); |
| 1422 | 1269 | ||
| 1423 | /* 2nd line */ | 1270 | /* 2nd line */ |
| 1424 | seq_printf(s, "#"); | 1271 | seq_putc(s, '#'); |
| 1425 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) | 1272 | if (flags & TRACE_GRAPH_PRINT_ABS_TIME) |
| 1426 | seq_printf(s, " | "); | 1273 | seq_puts(s, " | "); |
| 1427 | if (flags & TRACE_GRAPH_PRINT_CPU) | 1274 | if (flags & TRACE_GRAPH_PRINT_CPU) |
| 1428 | seq_printf(s, " | "); | 1275 | seq_puts(s, " | "); |
| 1429 | if (flags & TRACE_GRAPH_PRINT_PROC) | 1276 | if (flags & TRACE_GRAPH_PRINT_PROC) |
| 1430 | seq_printf(s, " | | "); | 1277 | seq_puts(s, " | | "); |
| 1431 | if (lat) | 1278 | if (lat) |
| 1432 | seq_printf(s, "||||"); | 1279 | seq_puts(s, "||||"); |
| 1433 | if (flags & TRACE_GRAPH_PRINT_DURATION) | 1280 | if (flags & TRACE_GRAPH_PRINT_DURATION) |
| 1434 | seq_printf(s, " | | "); | 1281 | seq_puts(s, " | | "); |
| 1435 | seq_printf(s, " | | | |\n"); | 1282 | seq_puts(s, " | | | |\n"); |
| 1436 | } | 1283 | } |
| 1437 | 1284 | ||
| 1438 | static void print_graph_headers(struct seq_file *s) | 1285 | static void print_graph_headers(struct seq_file *s) |
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index bd90e1b06088..3ccf5c2c1320 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c | |||
| @@ -20,10 +20,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |||
| 20 | { | 20 | { |
| 21 | /* use static because iter can be a bit big for the stack */ | 21 | /* use static because iter can be a bit big for the stack */ |
| 22 | static struct trace_iterator iter; | 22 | static struct trace_iterator iter; |
| 23 | static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS]; | ||
| 23 | unsigned int old_userobj; | 24 | unsigned int old_userobj; |
| 24 | int cnt = 0, cpu; | 25 | int cnt = 0, cpu; |
| 25 | 26 | ||
| 26 | trace_init_global_iter(&iter); | 27 | trace_init_global_iter(&iter); |
| 28 | iter.buffer_iter = buffer_iter; | ||
| 27 | 29 | ||
| 28 | for_each_tracing_cpu(cpu) { | 30 | for_each_tracing_cpu(cpu) { |
| 29 | atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); | 31 | atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
| @@ -57,19 +59,19 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) | |||
| 57 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); | 59 | ring_buffer_read_start(iter.buffer_iter[cpu_file]); |
| 58 | tracing_iter_reset(&iter, cpu_file); | 60 | tracing_iter_reset(&iter, cpu_file); |
| 59 | } | 61 | } |
| 60 | if (!trace_empty(&iter)) | 62 | |
| 61 | trace_find_next_entry_inc(&iter); | 63 | while (trace_find_next_entry_inc(&iter)) { |
| 62 | while (!trace_empty(&iter)) { | ||
| 63 | if (!cnt) | 64 | if (!cnt) |
| 64 | kdb_printf("---------------------------------\n"); | 65 | kdb_printf("---------------------------------\n"); |
| 65 | cnt++; | 66 | cnt++; |
| 66 | 67 | ||
| 67 | if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) | 68 | if (!skip_lines) { |
| 68 | print_trace_line(&iter); | 69 | print_trace_line(&iter); |
| 69 | if (!skip_lines) | ||
| 70 | trace_printk_seq(&iter.seq); | 70 | trace_printk_seq(&iter.seq); |
| 71 | else | 71 | } else { |
| 72 | skip_lines--; | 72 | skip_lines--; |
| 73 | } | ||
| 74 | |||
| 73 | if (KDB_FLAG(CMD_INTERRUPT)) | 75 | if (KDB_FLAG(CMD_INTERRUPT)) |
| 74 | goto out; | 76 | goto out; |
| 75 | } | 77 | } |
| @@ -86,9 +88,12 @@ out: | |||
| 86 | atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); | 88 | atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); |
| 87 | } | 89 | } |
| 88 | 90 | ||
| 89 | for_each_tracing_cpu(cpu) | 91 | for_each_tracing_cpu(cpu) { |
| 90 | if (iter.buffer_iter[cpu]) | 92 | if (iter.buffer_iter[cpu]) { |
| 91 | ring_buffer_read_finish(iter.buffer_iter[cpu]); | 93 | ring_buffer_read_finish(iter.buffer_iter[cpu]); |
| 94 | iter.buffer_iter[cpu] = NULL; | ||
| 95 | } | ||
| 96 | } | ||
| 92 | } | 97 | } |
| 93 | 98 | ||
| 94 | /* | 99 | /* |
| @@ -127,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv) | |||
| 127 | 132 | ||
| 128 | static __init int kdb_ftrace_register(void) | 133 | static __init int kdb_ftrace_register(void) |
| 129 | { | 134 | { |
| 130 | kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", | 135 | kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", |
| 131 | "Dump ftrace log", 0, KDB_REPEAT_NONE); | 136 | "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); |
| 132 | return 0; | 137 | return 0; |
| 133 | } | 138 | } |
| 134 | 139 | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 282f6e4e5539..5edb518be345 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
| @@ -826,7 +826,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 826 | struct trace_kprobe *tk = v; | 826 | struct trace_kprobe *tk = v; |
| 827 | int i; | 827 | int i; |
| 828 | 828 | ||
| 829 | seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); | 829 | seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p'); |
| 830 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, | 830 | seq_printf(m, ":%s/%s", tk->tp.call.class->system, |
| 831 | ftrace_event_name(&tk->tp.call)); | 831 | ftrace_event_name(&tk->tp.call)); |
| 832 | 832 | ||
| @@ -840,7 +840,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 840 | 840 | ||
| 841 | for (i = 0; i < tk->tp.nr_args; i++) | 841 | for (i = 0; i < tk->tp.nr_args; i++) |
| 842 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); | 842 | seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); |
| 843 | seq_printf(m, "\n"); | 843 | seq_putc(m, '\n'); |
| 844 | 844 | ||
| 845 | return 0; | 845 | return 0; |
| 846 | } | 846 | } |
| @@ -1024,27 +1024,22 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
| 1024 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1024 | field = (struct kprobe_trace_entry_head *)iter->ent; |
| 1025 | tp = container_of(event, struct trace_probe, call.event); | 1025 | tp = container_of(event, struct trace_probe, call.event); |
| 1026 | 1026 | ||
| 1027 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) | 1027 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); |
| 1028 | goto partial; | ||
| 1029 | 1028 | ||
| 1030 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 1029 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) |
| 1031 | goto partial; | 1030 | goto out; |
| 1032 | 1031 | ||
| 1033 | if (!trace_seq_puts(s, ")")) | 1032 | trace_seq_putc(s, ')'); |
| 1034 | goto partial; | ||
| 1035 | 1033 | ||
| 1036 | data = (u8 *)&field[1]; | 1034 | data = (u8 *)&field[1]; |
| 1037 | for (i = 0; i < tp->nr_args; i++) | 1035 | for (i = 0; i < tp->nr_args; i++) |
| 1038 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1036 | if (!tp->args[i].type->print(s, tp->args[i].name, |
| 1039 | data + tp->args[i].offset, field)) | 1037 | data + tp->args[i].offset, field)) |
| 1040 | goto partial; | 1038 | goto out; |
| 1041 | |||
| 1042 | if (!trace_seq_puts(s, "\n")) | ||
| 1043 | goto partial; | ||
| 1044 | 1039 | ||
| 1045 | return TRACE_TYPE_HANDLED; | 1040 | trace_seq_putc(s, '\n'); |
| 1046 | partial: | 1041 | out: |
| 1047 | return TRACE_TYPE_PARTIAL_LINE; | 1042 | return trace_handle_return(s); |
| 1048 | } | 1043 | } |
| 1049 | 1044 | ||
| 1050 | static enum print_line_t | 1045 | static enum print_line_t |
| @@ -1060,33 +1055,28 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
| 1060 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1055 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
| 1061 | tp = container_of(event, struct trace_probe, call.event); | 1056 | tp = container_of(event, struct trace_probe, call.event); |
| 1062 | 1057 | ||
| 1063 | if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) | 1058 | trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call)); |
| 1064 | goto partial; | ||
| 1065 | 1059 | ||
| 1066 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | 1060 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) |
| 1067 | goto partial; | 1061 | goto out; |
| 1068 | 1062 | ||
| 1069 | if (!trace_seq_puts(s, " <- ")) | 1063 | trace_seq_puts(s, " <- "); |
| 1070 | goto partial; | ||
| 1071 | 1064 | ||
| 1072 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) | 1065 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) |
| 1073 | goto partial; | 1066 | goto out; |
| 1074 | 1067 | ||
| 1075 | if (!trace_seq_puts(s, ")")) | 1068 | trace_seq_putc(s, ')'); |
| 1076 | goto partial; | ||
| 1077 | 1069 | ||
| 1078 | data = (u8 *)&field[1]; | 1070 | data = (u8 *)&field[1]; |
| 1079 | for (i = 0; i < tp->nr_args; i++) | 1071 | for (i = 0; i < tp->nr_args; i++) |
| 1080 | if (!tp->args[i].type->print(s, tp->args[i].name, | 1072 | if (!tp->args[i].type->print(s, tp->args[i].name, |
| 1081 | data + tp->args[i].offset, field)) | 1073 | data + tp->args[i].offset, field)) |
| 1082 | goto partial; | 1074 | goto out; |
| 1083 | 1075 | ||
| 1084 | if (!trace_seq_puts(s, "\n")) | 1076 | trace_seq_putc(s, '\n'); |
| 1085 | goto partial; | ||
| 1086 | 1077 | ||
| 1087 | return TRACE_TYPE_HANDLED; | 1078 | out: |
| 1088 | partial: | 1079 | return trace_handle_return(s); |
| 1089 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1090 | } | 1080 | } |
| 1091 | 1081 | ||
| 1092 | 1082 | ||
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 0abd9b863474..7a9ba62e9fef 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
| @@ -59,17 +59,15 @@ static void mmio_trace_start(struct trace_array *tr) | |||
| 59 | mmio_reset_data(tr); | 59 | mmio_reset_data(tr); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | 62 | static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) |
| 63 | { | 63 | { |
| 64 | int ret = 0; | ||
| 65 | int i; | 64 | int i; |
| 66 | resource_size_t start, end; | 65 | resource_size_t start, end; |
| 67 | const struct pci_driver *drv = pci_dev_driver(dev); | 66 | const struct pci_driver *drv = pci_dev_driver(dev); |
| 68 | 67 | ||
| 69 | /* XXX: incomplete checks for trace_seq_printf() return value */ | 68 | trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", |
| 70 | ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", | 69 | dev->bus->number, dev->devfn, |
| 71 | dev->bus->number, dev->devfn, | 70 | dev->vendor, dev->device, dev->irq); |
| 72 | dev->vendor, dev->device, dev->irq); | ||
| 73 | /* | 71 | /* |
| 74 | * XXX: is pci_resource_to_user() appropriate, since we are | 72 | * XXX: is pci_resource_to_user() appropriate, since we are |
| 75 | * supposed to interpret the __ioremap() phys_addr argument based on | 73 | * supposed to interpret the __ioremap() phys_addr argument based on |
| @@ -77,21 +75,20 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) | |||
| 77 | */ | 75 | */ |
| 78 | for (i = 0; i < 7; i++) { | 76 | for (i = 0; i < 7; i++) { |
| 79 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 77 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
| 80 | ret += trace_seq_printf(s, " %llx", | 78 | trace_seq_printf(s, " %llx", |
| 81 | (unsigned long long)(start | | 79 | (unsigned long long)(start | |
| 82 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); | 80 | (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); |
| 83 | } | 81 | } |
| 84 | for (i = 0; i < 7; i++) { | 82 | for (i = 0; i < 7; i++) { |
| 85 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); | 83 | pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); |
| 86 | ret += trace_seq_printf(s, " %llx", | 84 | trace_seq_printf(s, " %llx", |
| 87 | dev->resource[i].start < dev->resource[i].end ? | 85 | dev->resource[i].start < dev->resource[i].end ? |
| 88 | (unsigned long long)(end - start) + 1 : 0); | 86 | (unsigned long long)(end - start) + 1 : 0); |
| 89 | } | 87 | } |
| 90 | if (drv) | 88 | if (drv) |
| 91 | ret += trace_seq_printf(s, " %s\n", drv->name); | 89 | trace_seq_printf(s, " %s\n", drv->name); |
| 92 | else | 90 | else |
| 93 | ret += trace_seq_puts(s, " \n"); | 91 | trace_seq_puts(s, " \n"); |
| 94 | return ret; | ||
| 95 | } | 92 | } |
| 96 | 93 | ||
| 97 | static void destroy_header_iter(struct header_iter *hiter) | 94 | static void destroy_header_iter(struct header_iter *hiter) |
| @@ -179,28 +176,27 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
| 179 | unsigned long long t = ns2usecs(iter->ts); | 176 | unsigned long long t = ns2usecs(iter->ts); |
| 180 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 177 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
| 181 | unsigned secs = (unsigned long)t; | 178 | unsigned secs = (unsigned long)t; |
| 182 | int ret = 1; | ||
| 183 | 179 | ||
| 184 | trace_assign_type(field, entry); | 180 | trace_assign_type(field, entry); |
| 185 | rw = &field->rw; | 181 | rw = &field->rw; |
| 186 | 182 | ||
| 187 | switch (rw->opcode) { | 183 | switch (rw->opcode) { |
| 188 | case MMIO_READ: | 184 | case MMIO_READ: |
| 189 | ret = trace_seq_printf(s, | 185 | trace_seq_printf(s, |
| 190 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 186 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
| 191 | rw->width, secs, usec_rem, rw->map_id, | 187 | rw->width, secs, usec_rem, rw->map_id, |
| 192 | (unsigned long long)rw->phys, | 188 | (unsigned long long)rw->phys, |
| 193 | rw->value, rw->pc, 0); | 189 | rw->value, rw->pc, 0); |
| 194 | break; | 190 | break; |
| 195 | case MMIO_WRITE: | 191 | case MMIO_WRITE: |
| 196 | ret = trace_seq_printf(s, | 192 | trace_seq_printf(s, |
| 197 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 193 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
| 198 | rw->width, secs, usec_rem, rw->map_id, | 194 | rw->width, secs, usec_rem, rw->map_id, |
| 199 | (unsigned long long)rw->phys, | 195 | (unsigned long long)rw->phys, |
| 200 | rw->value, rw->pc, 0); | 196 | rw->value, rw->pc, 0); |
| 201 | break; | 197 | break; |
| 202 | case MMIO_UNKNOWN_OP: | 198 | case MMIO_UNKNOWN_OP: |
| 203 | ret = trace_seq_printf(s, | 199 | trace_seq_printf(s, |
| 204 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," | 200 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
| 205 | "%02lx 0x%lx %d\n", | 201 | "%02lx 0x%lx %d\n", |
| 206 | secs, usec_rem, rw->map_id, | 202 | secs, usec_rem, rw->map_id, |
| @@ -209,12 +205,11 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
| 209 | (rw->value >> 0) & 0xff, rw->pc, 0); | 205 | (rw->value >> 0) & 0xff, rw->pc, 0); |
| 210 | break; | 206 | break; |
| 211 | default: | 207 | default: |
| 212 | ret = trace_seq_puts(s, "rw what?\n"); | 208 | trace_seq_puts(s, "rw what?\n"); |
| 213 | break; | 209 | break; |
| 214 | } | 210 | } |
| 215 | if (ret) | 211 | |
| 216 | return TRACE_TYPE_HANDLED; | 212 | return trace_handle_return(s); |
| 217 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 218 | } | 213 | } |
| 219 | 214 | ||
| 220 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) | 215 | static enum print_line_t mmio_print_map(struct trace_iterator *iter) |
| @@ -226,31 +221,29 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
| 226 | unsigned long long t = ns2usecs(iter->ts); | 221 | unsigned long long t = ns2usecs(iter->ts); |
| 227 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 222 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
| 228 | unsigned secs = (unsigned long)t; | 223 | unsigned secs = (unsigned long)t; |
| 229 | int ret; | ||
| 230 | 224 | ||
| 231 | trace_assign_type(field, entry); | 225 | trace_assign_type(field, entry); |
| 232 | m = &field->map; | 226 | m = &field->map; |
| 233 | 227 | ||
| 234 | switch (m->opcode) { | 228 | switch (m->opcode) { |
| 235 | case MMIO_PROBE: | 229 | case MMIO_PROBE: |
| 236 | ret = trace_seq_printf(s, | 230 | trace_seq_printf(s, |
| 237 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 231 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
| 238 | secs, usec_rem, m->map_id, | 232 | secs, usec_rem, m->map_id, |
| 239 | (unsigned long long)m->phys, m->virt, m->len, | 233 | (unsigned long long)m->phys, m->virt, m->len, |
| 240 | 0UL, 0); | 234 | 0UL, 0); |
| 241 | break; | 235 | break; |
| 242 | case MMIO_UNPROBE: | 236 | case MMIO_UNPROBE: |
| 243 | ret = trace_seq_printf(s, | 237 | trace_seq_printf(s, |
| 244 | "UNMAP %u.%06lu %d 0x%lx %d\n", | 238 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
| 245 | secs, usec_rem, m->map_id, 0UL, 0); | 239 | secs, usec_rem, m->map_id, 0UL, 0); |
| 246 | break; | 240 | break; |
| 247 | default: | 241 | default: |
| 248 | ret = trace_seq_puts(s, "map what?\n"); | 242 | trace_seq_puts(s, "map what?\n"); |
| 249 | break; | 243 | break; |
| 250 | } | 244 | } |
| 251 | if (ret) | 245 | |
| 252 | return TRACE_TYPE_HANDLED; | 246 | return trace_handle_return(s); |
| 253 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 254 | } | 247 | } |
| 255 | 248 | ||
| 256 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | 249 | static enum print_line_t mmio_print_mark(struct trace_iterator *iter) |
| @@ -262,14 +255,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | |||
| 262 | unsigned long long t = ns2usecs(iter->ts); | 255 | unsigned long long t = ns2usecs(iter->ts); |
| 263 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | 256 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); |
| 264 | unsigned secs = (unsigned long)t; | 257 | unsigned secs = (unsigned long)t; |
| 265 | int ret; | ||
| 266 | 258 | ||
| 267 | /* The trailing newline must be in the message. */ | 259 | /* The trailing newline must be in the message. */ |
| 268 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); | 260 | trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
| 269 | if (!ret) | ||
| 270 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 271 | 261 | ||
| 272 | return TRACE_TYPE_HANDLED; | 262 | return trace_handle_return(s); |
| 273 | } | 263 | } |
| 274 | 264 | ||
| 275 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) | 265 | static enum print_line_t mmio_print_line(struct trace_iterator *iter) |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index c6977d5a9b12..b77b9a697619 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c | |||
| @@ -25,15 +25,12 @@ enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter) | |||
| 25 | struct trace_seq *s = &iter->seq; | 25 | struct trace_seq *s = &iter->seq; |
| 26 | struct trace_entry *entry = iter->ent; | 26 | struct trace_entry *entry = iter->ent; |
| 27 | struct bputs_entry *field; | 27 | struct bputs_entry *field; |
| 28 | int ret; | ||
| 29 | 28 | ||
| 30 | trace_assign_type(field, entry); | 29 | trace_assign_type(field, entry); |
| 31 | 30 | ||
| 32 | ret = trace_seq_puts(s, field->str); | 31 | trace_seq_puts(s, field->str); |
| 33 | if (!ret) | ||
| 34 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 35 | 32 | ||
| 36 | return TRACE_TYPE_HANDLED; | 33 | return trace_handle_return(s); |
| 37 | } | 34 | } |
| 38 | 35 | ||
| 39 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | 36 | enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) |
| @@ -41,15 +38,12 @@ enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) | |||
| 41 | struct trace_seq *s = &iter->seq; | 38 | struct trace_seq *s = &iter->seq; |
| 42 | struct trace_entry *entry = iter->ent; | 39 | struct trace_entry *entry = iter->ent; |
| 43 | struct bprint_entry *field; | 40 | struct bprint_entry *field; |
| 44 | int ret; | ||
| 45 | 41 | ||
| 46 | trace_assign_type(field, entry); | 42 | trace_assign_type(field, entry); |
| 47 | 43 | ||
| 48 | ret = trace_seq_bprintf(s, field->fmt, field->buf); | 44 | trace_seq_bprintf(s, field->fmt, field->buf); |
| 49 | if (!ret) | ||
| 50 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 51 | 45 | ||
| 52 | return TRACE_TYPE_HANDLED; | 46 | return trace_handle_return(s); |
| 53 | } | 47 | } |
| 54 | 48 | ||
| 55 | enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | 49 | enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) |
| @@ -57,15 +51,12 @@ enum print_line_t trace_print_printk_msg_only(struct trace_iterator *iter) | |||
| 57 | struct trace_seq *s = &iter->seq; | 51 | struct trace_seq *s = &iter->seq; |
| 58 | struct trace_entry *entry = iter->ent; | 52 | struct trace_entry *entry = iter->ent; |
| 59 | struct print_entry *field; | 53 | struct print_entry *field; |
| 60 | int ret; | ||
| 61 | 54 | ||
| 62 | trace_assign_type(field, entry); | 55 | trace_assign_type(field, entry); |
| 63 | 56 | ||
| 64 | ret = trace_seq_puts(s, field->buf); | 57 | trace_seq_puts(s, field->buf); |
| 65 | if (!ret) | ||
| 66 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 67 | 58 | ||
| 68 | return TRACE_TYPE_HANDLED; | 59 | return trace_handle_return(s); |
| 69 | } | 60 | } |
| 70 | 61 | ||
| 71 | const char * | 62 | const char * |
| @@ -124,7 +115,7 @@ ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val, | |||
| 124 | 115 | ||
| 125 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) | 116 | if (ret == (const char *)(trace_seq_buffer_ptr(p))) |
| 126 | trace_seq_printf(p, "0x%lx", val); | 117 | trace_seq_printf(p, "0x%lx", val); |
| 127 | 118 | ||
| 128 | trace_seq_putc(p, 0); | 119 | trace_seq_putc(p, 0); |
| 129 | 120 | ||
| 130 | return ret; | 121 | return ret; |
| @@ -193,7 +184,6 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 193 | struct trace_seq *s = &iter->seq; | 184 | struct trace_seq *s = &iter->seq; |
| 194 | struct trace_seq *p = &iter->tmp_seq; | 185 | struct trace_seq *p = &iter->tmp_seq; |
| 195 | struct trace_entry *entry; | 186 | struct trace_entry *entry; |
| 196 | int ret; | ||
| 197 | 187 | ||
| 198 | event = container_of(trace_event, struct ftrace_event_call, event); | 188 | event = container_of(trace_event, struct ftrace_event_call, event); |
| 199 | entry = iter->ent; | 189 | entry = iter->ent; |
| @@ -204,11 +194,9 @@ int ftrace_raw_output_prep(struct trace_iterator *iter, | |||
| 204 | } | 194 | } |
| 205 | 195 | ||
| 206 | trace_seq_init(p); | 196 | trace_seq_init(p); |
| 207 | ret = trace_seq_printf(s, "%s: ", ftrace_event_name(event)); | 197 | trace_seq_printf(s, "%s: ", ftrace_event_name(event)); |
| 208 | if (!ret) | ||
| 209 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 210 | 198 | ||
| 211 | return 0; | 199 | return trace_handle_return(s); |
| 212 | } | 200 | } |
| 213 | EXPORT_SYMBOL(ftrace_raw_output_prep); | 201 | EXPORT_SYMBOL(ftrace_raw_output_prep); |
| 214 | 202 | ||
| @@ -216,18 +204,11 @@ static int ftrace_output_raw(struct trace_iterator *iter, char *name, | |||
| 216 | char *fmt, va_list ap) | 204 | char *fmt, va_list ap) |
| 217 | { | 205 | { |
| 218 | struct trace_seq *s = &iter->seq; | 206 | struct trace_seq *s = &iter->seq; |
| 219 | int ret; | ||
| 220 | |||
| 221 | ret = trace_seq_printf(s, "%s: ", name); | ||
| 222 | if (!ret) | ||
| 223 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 224 | |||
| 225 | ret = trace_seq_vprintf(s, fmt, ap); | ||
| 226 | 207 | ||
| 227 | if (!ret) | 208 | trace_seq_printf(s, "%s: ", name); |
| 228 | return TRACE_TYPE_PARTIAL_LINE; | 209 | trace_seq_vprintf(s, fmt, ap); |
| 229 | 210 | ||
| 230 | return TRACE_TYPE_HANDLED; | 211 | return trace_handle_return(s); |
| 231 | } | 212 | } |
| 232 | 213 | ||
| 233 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) | 214 | int ftrace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...) |
| @@ -260,7 +241,7 @@ static inline const char *kretprobed(const char *name) | |||
| 260 | } | 241 | } |
| 261 | #endif /* CONFIG_KRETPROBES */ | 242 | #endif /* CONFIG_KRETPROBES */ |
| 262 | 243 | ||
| 263 | static int | 244 | static void |
| 264 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | 245 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) |
| 265 | { | 246 | { |
| 266 | #ifdef CONFIG_KALLSYMS | 247 | #ifdef CONFIG_KALLSYMS |
| @@ -271,12 +252,11 @@ seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | |||
| 271 | 252 | ||
| 272 | name = kretprobed(str); | 253 | name = kretprobed(str); |
| 273 | 254 | ||
| 274 | return trace_seq_printf(s, fmt, name); | 255 | trace_seq_printf(s, fmt, name); |
| 275 | #endif | 256 | #endif |
| 276 | return 1; | ||
| 277 | } | 257 | } |
| 278 | 258 | ||
| 279 | static int | 259 | static void |
| 280 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | 260 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, |
| 281 | unsigned long address) | 261 | unsigned long address) |
| 282 | { | 262 | { |
| @@ -287,9 +267,8 @@ seq_print_sym_offset(struct trace_seq *s, const char *fmt, | |||
| 287 | sprint_symbol(str, address); | 267 | sprint_symbol(str, address); |
| 288 | name = kretprobed(str); | 268 | name = kretprobed(str); |
| 289 | 269 | ||
| 290 | return trace_seq_printf(s, fmt, name); | 270 | trace_seq_printf(s, fmt, name); |
| 291 | #endif | 271 | #endif |
| 292 | return 1; | ||
| 293 | } | 272 | } |
| 294 | 273 | ||
| 295 | #ifndef CONFIG_64BIT | 274 | #ifndef CONFIG_64BIT |
| @@ -320,14 +299,14 @@ int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | |||
| 320 | if (file) { | 299 | if (file) { |
| 321 | ret = trace_seq_path(s, &file->f_path); | 300 | ret = trace_seq_path(s, &file->f_path); |
| 322 | if (ret) | 301 | if (ret) |
| 323 | ret = trace_seq_printf(s, "[+0x%lx]", | 302 | trace_seq_printf(s, "[+0x%lx]", |
| 324 | ip - vmstart); | 303 | ip - vmstart); |
| 325 | } | 304 | } |
| 326 | up_read(&mm->mmap_sem); | 305 | up_read(&mm->mmap_sem); |
| 327 | } | 306 | } |
| 328 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | 307 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) |
| 329 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | 308 | trace_seq_printf(s, " <" IP_FMT ">", ip); |
| 330 | return ret; | 309 | return !trace_seq_has_overflowed(s); |
| 331 | } | 310 | } |
| 332 | 311 | ||
| 333 | int | 312 | int |
| @@ -335,7 +314,6 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
| 335 | unsigned long sym_flags) | 314 | unsigned long sym_flags) |
| 336 | { | 315 | { |
| 337 | struct mm_struct *mm = NULL; | 316 | struct mm_struct *mm = NULL; |
| 338 | int ret = 1; | ||
| 339 | unsigned int i; | 317 | unsigned int i; |
| 340 | 318 | ||
| 341 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | 319 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { |
| @@ -354,48 +332,45 @@ seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | |||
| 354 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | 332 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { |
| 355 | unsigned long ip = entry->caller[i]; | 333 | unsigned long ip = entry->caller[i]; |
| 356 | 334 | ||
| 357 | if (ip == ULONG_MAX || !ret) | 335 | if (ip == ULONG_MAX || trace_seq_has_overflowed(s)) |
| 358 | break; | 336 | break; |
| 359 | if (ret) | 337 | |
| 360 | ret = trace_seq_puts(s, " => "); | 338 | trace_seq_puts(s, " => "); |
| 339 | |||
| 361 | if (!ip) { | 340 | if (!ip) { |
| 362 | if (ret) | 341 | trace_seq_puts(s, "??"); |
| 363 | ret = trace_seq_puts(s, "??"); | 342 | trace_seq_putc(s, '\n'); |
| 364 | if (ret) | ||
| 365 | ret = trace_seq_putc(s, '\n'); | ||
| 366 | continue; | 343 | continue; |
| 367 | } | 344 | } |
| 368 | if (!ret) | 345 | |
| 369 | break; | 346 | seq_print_user_ip(s, mm, ip, sym_flags); |
| 370 | if (ret) | 347 | trace_seq_putc(s, '\n'); |
| 371 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
| 372 | ret = trace_seq_putc(s, '\n'); | ||
| 373 | } | 348 | } |
| 374 | 349 | ||
| 375 | if (mm) | 350 | if (mm) |
| 376 | mmput(mm); | 351 | mmput(mm); |
| 377 | return ret; | 352 | |
| 353 | return !trace_seq_has_overflowed(s); | ||
| 378 | } | 354 | } |
| 379 | 355 | ||
| 380 | int | 356 | int |
| 381 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | 357 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) |
| 382 | { | 358 | { |
| 383 | int ret; | 359 | if (!ip) { |
| 384 | 360 | trace_seq_putc(s, '0'); | |
| 385 | if (!ip) | 361 | goto out; |
| 386 | return trace_seq_putc(s, '0'); | 362 | } |
| 387 | 363 | ||
| 388 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | 364 | if (sym_flags & TRACE_ITER_SYM_OFFSET) |
| 389 | ret = seq_print_sym_offset(s, "%s", ip); | 365 | seq_print_sym_offset(s, "%s", ip); |
| 390 | else | 366 | else |
| 391 | ret = seq_print_sym_short(s, "%s", ip); | 367 | seq_print_sym_short(s, "%s", ip); |
| 392 | |||
| 393 | if (!ret) | ||
| 394 | return 0; | ||
| 395 | 368 | ||
| 396 | if (sym_flags & TRACE_ITER_SYM_ADDR) | 369 | if (sym_flags & TRACE_ITER_SYM_ADDR) |
| 397 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | 370 | trace_seq_printf(s, " <" IP_FMT ">", ip); |
| 398 | return ret; | 371 | |
| 372 | out: | ||
| 373 | return !trace_seq_has_overflowed(s); | ||
| 399 | } | 374 | } |
| 400 | 375 | ||
| 401 | /** | 376 | /** |
| @@ -413,7 +388,6 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
| 413 | char irqs_off; | 388 | char irqs_off; |
| 414 | int hardirq; | 389 | int hardirq; |
| 415 | int softirq; | 390 | int softirq; |
| 416 | int ret; | ||
| 417 | 391 | ||
| 418 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | 392 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; |
| 419 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | 393 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; |
| @@ -445,16 +419,15 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) | |||
| 445 | softirq ? 's' : | 419 | softirq ? 's' : |
| 446 | '.'; | 420 | '.'; |
| 447 | 421 | ||
| 448 | if (!trace_seq_printf(s, "%c%c%c", | 422 | trace_seq_printf(s, "%c%c%c", |
| 449 | irqs_off, need_resched, hardsoft_irq)) | 423 | irqs_off, need_resched, hardsoft_irq); |
| 450 | return 0; | ||
| 451 | 424 | ||
| 452 | if (entry->preempt_count) | 425 | if (entry->preempt_count) |
| 453 | ret = trace_seq_printf(s, "%x", entry->preempt_count); | 426 | trace_seq_printf(s, "%x", entry->preempt_count); |
| 454 | else | 427 | else |
| 455 | ret = trace_seq_putc(s, '.'); | 428 | trace_seq_putc(s, '.'); |
| 456 | 429 | ||
| 457 | return ret; | 430 | return !trace_seq_has_overflowed(s); |
| 458 | } | 431 | } |
| 459 | 432 | ||
| 460 | static int | 433 | static int |
| @@ -464,14 +437,38 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | |||
| 464 | 437 | ||
| 465 | trace_find_cmdline(entry->pid, comm); | 438 | trace_find_cmdline(entry->pid, comm); |
| 466 | 439 | ||
| 467 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d", | 440 | trace_seq_printf(s, "%8.8s-%-5d %3d", |
| 468 | comm, entry->pid, cpu)) | 441 | comm, entry->pid, cpu); |
| 469 | return 0; | ||
| 470 | 442 | ||
| 471 | return trace_print_lat_fmt(s, entry); | 443 | return trace_print_lat_fmt(s, entry); |
| 472 | } | 444 | } |
| 473 | 445 | ||
| 474 | static unsigned long preempt_mark_thresh_us = 100; | 446 | #undef MARK |
| 447 | #define MARK(v, s) {.val = v, .sym = s} | ||
| 448 | /* trace overhead mark */ | ||
| 449 | static const struct trace_mark { | ||
| 450 | unsigned long long val; /* unit: nsec */ | ||
| 451 | char sym; | ||
| 452 | } mark[] = { | ||
| 453 | MARK(1000000000ULL , '$'), /* 1 sec */ | ||
| 454 | MARK(1000000ULL , '#'), /* 1000 usecs */ | ||
| 455 | MARK(100000ULL , '!'), /* 100 usecs */ | ||
| 456 | MARK(10000ULL , '+'), /* 10 usecs */ | ||
| 457 | }; | ||
| 458 | #undef MARK | ||
| 459 | |||
| 460 | char trace_find_mark(unsigned long long d) | ||
| 461 | { | ||
| 462 | int i; | ||
| 463 | int size = ARRAY_SIZE(mark); | ||
| 464 | |||
| 465 | for (i = 0; i < size; i++) { | ||
| 466 | if (d >= mark[i].val) | ||
| 467 | break; | ||
| 468 | } | ||
| 469 | |||
| 470 | return (i == size) ? ' ' : mark[i].sym; | ||
| 471 | } | ||
| 475 | 472 | ||
| 476 | static int | 473 | static int |
| 477 | lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) | 474 | lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) |
| @@ -493,24 +490,28 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) | |||
| 493 | unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); | 490 | unsigned long rel_usec = do_div(rel_ts, USEC_PER_MSEC); |
| 494 | unsigned long rel_msec = (unsigned long)rel_ts; | 491 | unsigned long rel_msec = (unsigned long)rel_ts; |
| 495 | 492 | ||
| 496 | return trace_seq_printf( | 493 | trace_seq_printf( |
| 497 | s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", | 494 | s, "[%08llx] %ld.%03ldms (+%ld.%03ldms): ", |
| 498 | ns2usecs(iter->ts), | 495 | ns2usecs(iter->ts), |
| 499 | abs_msec, abs_usec, | 496 | abs_msec, abs_usec, |
| 500 | rel_msec, rel_usec); | 497 | rel_msec, rel_usec); |
| 498 | |||
| 501 | } else if (verbose && !in_ns) { | 499 | } else if (verbose && !in_ns) { |
| 502 | return trace_seq_printf( | 500 | trace_seq_printf( |
| 503 | s, "[%016llx] %lld (+%lld): ", | 501 | s, "[%016llx] %lld (+%lld): ", |
| 504 | iter->ts, abs_ts, rel_ts); | 502 | iter->ts, abs_ts, rel_ts); |
| 503 | |||
| 505 | } else if (!verbose && in_ns) { | 504 | } else if (!verbose && in_ns) { |
| 506 | return trace_seq_printf( | 505 | trace_seq_printf( |
| 507 | s, " %4lldus%c: ", | 506 | s, " %4lldus%c: ", |
| 508 | abs_ts, | 507 | abs_ts, |
| 509 | rel_ts > preempt_mark_thresh_us ? '!' : | 508 | trace_find_mark(rel_ts * NSEC_PER_USEC)); |
| 510 | rel_ts > 1 ? '+' : ' '); | 509 | |
| 511 | } else { /* !verbose && !in_ns */ | 510 | } else { /* !verbose && !in_ns */ |
| 512 | return trace_seq_printf(s, " %4lld: ", abs_ts); | 511 | trace_seq_printf(s, " %4lld: ", abs_ts); |
| 513 | } | 512 | } |
| 513 | |||
| 514 | return !trace_seq_has_overflowed(s); | ||
| 514 | } | 515 | } |
| 515 | 516 | ||
| 516 | int trace_print_context(struct trace_iterator *iter) | 517 | int trace_print_context(struct trace_iterator *iter) |
| @@ -520,34 +521,29 @@ int trace_print_context(struct trace_iterator *iter) | |||
| 520 | unsigned long long t; | 521 | unsigned long long t; |
| 521 | unsigned long secs, usec_rem; | 522 | unsigned long secs, usec_rem; |
| 522 | char comm[TASK_COMM_LEN]; | 523 | char comm[TASK_COMM_LEN]; |
| 523 | int ret; | ||
| 524 | 524 | ||
| 525 | trace_find_cmdline(entry->pid, comm); | 525 | trace_find_cmdline(entry->pid, comm); |
| 526 | 526 | ||
| 527 | ret = trace_seq_printf(s, "%16s-%-5d [%03d] ", | 527 | trace_seq_printf(s, "%16s-%-5d [%03d] ", |
| 528 | comm, entry->pid, iter->cpu); | 528 | comm, entry->pid, iter->cpu); |
| 529 | if (!ret) | ||
| 530 | return 0; | ||
| 531 | 529 | ||
| 532 | if (trace_flags & TRACE_ITER_IRQ_INFO) { | 530 | if (trace_flags & TRACE_ITER_IRQ_INFO) |
| 533 | ret = trace_print_lat_fmt(s, entry); | 531 | trace_print_lat_fmt(s, entry); |
| 534 | if (!ret) | ||
| 535 | return 0; | ||
| 536 | } | ||
| 537 | 532 | ||
| 538 | if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { | 533 | if (iter->iter_flags & TRACE_FILE_TIME_IN_NS) { |
| 539 | t = ns2usecs(iter->ts); | 534 | t = ns2usecs(iter->ts); |
| 540 | usec_rem = do_div(t, USEC_PER_SEC); | 535 | usec_rem = do_div(t, USEC_PER_SEC); |
| 541 | secs = (unsigned long)t; | 536 | secs = (unsigned long)t; |
| 542 | return trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); | 537 | trace_seq_printf(s, " %5lu.%06lu: ", secs, usec_rem); |
| 543 | } else | 538 | } else |
| 544 | return trace_seq_printf(s, " %12llu: ", iter->ts); | 539 | trace_seq_printf(s, " %12llu: ", iter->ts); |
| 540 | |||
| 541 | return !trace_seq_has_overflowed(s); | ||
| 545 | } | 542 | } |
| 546 | 543 | ||
| 547 | int trace_print_lat_context(struct trace_iterator *iter) | 544 | int trace_print_lat_context(struct trace_iterator *iter) |
| 548 | { | 545 | { |
| 549 | u64 next_ts; | 546 | u64 next_ts; |
| 550 | int ret; | ||
| 551 | /* trace_find_next_entry will reset ent_size */ | 547 | /* trace_find_next_entry will reset ent_size */ |
| 552 | int ent_size = iter->ent_size; | 548 | int ent_size = iter->ent_size; |
| 553 | struct trace_seq *s = &iter->seq; | 549 | struct trace_seq *s = &iter->seq; |
| @@ -567,18 +563,17 @@ int trace_print_lat_context(struct trace_iterator *iter) | |||
| 567 | 563 | ||
| 568 | trace_find_cmdline(entry->pid, comm); | 564 | trace_find_cmdline(entry->pid, comm); |
| 569 | 565 | ||
| 570 | ret = trace_seq_printf( | 566 | trace_seq_printf( |
| 571 | s, "%16s %5d %3d %d %08x %08lx ", | 567 | s, "%16s %5d %3d %d %08x %08lx ", |
| 572 | comm, entry->pid, iter->cpu, entry->flags, | 568 | comm, entry->pid, iter->cpu, entry->flags, |
| 573 | entry->preempt_count, iter->idx); | 569 | entry->preempt_count, iter->idx); |
| 574 | } else { | 570 | } else { |
| 575 | ret = lat_print_generic(s, entry, iter->cpu); | 571 | lat_print_generic(s, entry, iter->cpu); |
| 576 | } | 572 | } |
| 577 | 573 | ||
| 578 | if (ret) | 574 | lat_print_timestamp(iter, next_ts); |
| 579 | ret = lat_print_timestamp(iter, next_ts); | ||
| 580 | 575 | ||
| 581 | return ret; | 576 | return !trace_seq_has_overflowed(s); |
| 582 | } | 577 | } |
| 583 | 578 | ||
| 584 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | 579 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; |
| @@ -692,7 +687,7 @@ int register_ftrace_event(struct trace_event *event) | |||
| 692 | goto out; | 687 | goto out; |
| 693 | 688 | ||
| 694 | } else { | 689 | } else { |
| 695 | 690 | ||
| 696 | event->type = next_event_type++; | 691 | event->type = next_event_type++; |
| 697 | list = &ftrace_event_list; | 692 | list = &ftrace_event_list; |
| 698 | } | 693 | } |
| @@ -764,10 +759,9 @@ EXPORT_SYMBOL_GPL(unregister_ftrace_event); | |||
| 764 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, | 759 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags, |
| 765 | struct trace_event *event) | 760 | struct trace_event *event) |
| 766 | { | 761 | { |
| 767 | if (!trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type)) | 762 | trace_seq_printf(&iter->seq, "type: %d\n", iter->ent->type); |
| 768 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 769 | 763 | ||
| 770 | return TRACE_TYPE_HANDLED; | 764 | return trace_handle_return(&iter->seq); |
| 771 | } | 765 | } |
| 772 | 766 | ||
| 773 | /* TRACE_FN */ | 767 | /* TRACE_FN */ |
| @@ -779,24 +773,16 @@ static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags, | |||
| 779 | 773 | ||
| 780 | trace_assign_type(field, iter->ent); | 774 | trace_assign_type(field, iter->ent); |
| 781 | 775 | ||
| 782 | if (!seq_print_ip_sym(s, field->ip, flags)) | 776 | seq_print_ip_sym(s, field->ip, flags); |
| 783 | goto partial; | ||
| 784 | 777 | ||
| 785 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | 778 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { |
| 786 | if (!trace_seq_puts(s, " <-")) | 779 | trace_seq_puts(s, " <-"); |
| 787 | goto partial; | 780 | seq_print_ip_sym(s, field->parent_ip, flags); |
| 788 | if (!seq_print_ip_sym(s, | ||
| 789 | field->parent_ip, | ||
| 790 | flags)) | ||
| 791 | goto partial; | ||
| 792 | } | 781 | } |
| 793 | if (!trace_seq_putc(s, '\n')) | ||
| 794 | goto partial; | ||
| 795 | 782 | ||
| 796 | return TRACE_TYPE_HANDLED; | 783 | trace_seq_putc(s, '\n'); |
| 797 | 784 | ||
| 798 | partial: | 785 | return trace_handle_return(s); |
| 799 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 800 | } | 786 | } |
| 801 | 787 | ||
| 802 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, | 788 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, |
| @@ -806,12 +792,11 @@ static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags, | |||
| 806 | 792 | ||
| 807 | trace_assign_type(field, iter->ent); | 793 | trace_assign_type(field, iter->ent); |
| 808 | 794 | ||
| 809 | if (!trace_seq_printf(&iter->seq, "%lx %lx\n", | 795 | trace_seq_printf(&iter->seq, "%lx %lx\n", |
| 810 | field->ip, | 796 | field->ip, |
| 811 | field->parent_ip)) | 797 | field->parent_ip); |
| 812 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 813 | 798 | ||
| 814 | return TRACE_TYPE_HANDLED; | 799 | return trace_handle_return(&iter->seq); |
| 815 | } | 800 | } |
| 816 | 801 | ||
| 817 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, | 802 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, |
| @@ -822,10 +807,10 @@ static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags, | |||
| 822 | 807 | ||
| 823 | trace_assign_type(field, iter->ent); | 808 | trace_assign_type(field, iter->ent); |
| 824 | 809 | ||
| 825 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | 810 | SEQ_PUT_HEX_FIELD(s, field->ip); |
| 826 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | 811 | SEQ_PUT_HEX_FIELD(s, field->parent_ip); |
| 827 | 812 | ||
| 828 | return TRACE_TYPE_HANDLED; | 813 | return trace_handle_return(s); |
| 829 | } | 814 | } |
| 830 | 815 | ||
| 831 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, | 816 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, |
| @@ -836,10 +821,10 @@ static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags, | |||
| 836 | 821 | ||
| 837 | trace_assign_type(field, iter->ent); | 822 | trace_assign_type(field, iter->ent); |
| 838 | 823 | ||
| 839 | SEQ_PUT_FIELD_RET(s, field->ip); | 824 | SEQ_PUT_FIELD(s, field->ip); |
| 840 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | 825 | SEQ_PUT_FIELD(s, field->parent_ip); |
| 841 | 826 | ||
| 842 | return TRACE_TYPE_HANDLED; | 827 | return trace_handle_return(s); |
| 843 | } | 828 | } |
| 844 | 829 | ||
| 845 | static struct trace_event_functions trace_fn_funcs = { | 830 | static struct trace_event_functions trace_fn_funcs = { |
| @@ -868,18 +853,17 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, | |||
| 868 | T = task_state_char(field->next_state); | 853 | T = task_state_char(field->next_state); |
| 869 | S = task_state_char(field->prev_state); | 854 | S = task_state_char(field->prev_state); |
| 870 | trace_find_cmdline(field->next_pid, comm); | 855 | trace_find_cmdline(field->next_pid, comm); |
| 871 | if (!trace_seq_printf(&iter->seq, | 856 | trace_seq_printf(&iter->seq, |
| 872 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | 857 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", |
| 873 | field->prev_pid, | 858 | field->prev_pid, |
| 874 | field->prev_prio, | 859 | field->prev_prio, |
| 875 | S, delim, | 860 | S, delim, |
| 876 | field->next_cpu, | 861 | field->next_cpu, |
| 877 | field->next_pid, | 862 | field->next_pid, |
| 878 | field->next_prio, | 863 | field->next_prio, |
| 879 | T, comm)) | 864 | T, comm); |
| 880 | return TRACE_TYPE_PARTIAL_LINE; | 865 | |
| 881 | 866 | return trace_handle_return(&iter->seq); | |
| 882 | return TRACE_TYPE_HANDLED; | ||
| 883 | } | 867 | } |
| 884 | 868 | ||
| 885 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, | 869 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags, |
| @@ -904,17 +888,16 @@ static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | |||
| 904 | if (!S) | 888 | if (!S) |
| 905 | S = task_state_char(field->prev_state); | 889 | S = task_state_char(field->prev_state); |
| 906 | T = task_state_char(field->next_state); | 890 | T = task_state_char(field->next_state); |
| 907 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | 891 | trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", |
| 908 | field->prev_pid, | 892 | field->prev_pid, |
| 909 | field->prev_prio, | 893 | field->prev_prio, |
| 910 | S, | 894 | S, |
| 911 | field->next_cpu, | 895 | field->next_cpu, |
| 912 | field->next_pid, | 896 | field->next_pid, |
| 913 | field->next_prio, | 897 | field->next_prio, |
| 914 | T)) | 898 | T); |
| 915 | return TRACE_TYPE_PARTIAL_LINE; | 899 | |
| 916 | 900 | return trace_handle_return(&iter->seq); | |
| 917 | return TRACE_TYPE_HANDLED; | ||
| 918 | } | 901 | } |
| 919 | 902 | ||
| 920 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, | 903 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags, |
| @@ -942,15 +925,15 @@ static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | |||
| 942 | S = task_state_char(field->prev_state); | 925 | S = task_state_char(field->prev_state); |
| 943 | T = task_state_char(field->next_state); | 926 | T = task_state_char(field->next_state); |
| 944 | 927 | ||
| 945 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | 928 | SEQ_PUT_HEX_FIELD(s, field->prev_pid); |
| 946 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | 929 | SEQ_PUT_HEX_FIELD(s, field->prev_prio); |
| 947 | SEQ_PUT_HEX_FIELD_RET(s, S); | 930 | SEQ_PUT_HEX_FIELD(s, S); |
| 948 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | 931 | SEQ_PUT_HEX_FIELD(s, field->next_cpu); |
| 949 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | 932 | SEQ_PUT_HEX_FIELD(s, field->next_pid); |
| 950 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | 933 | SEQ_PUT_HEX_FIELD(s, field->next_prio); |
| 951 | SEQ_PUT_HEX_FIELD_RET(s, T); | 934 | SEQ_PUT_HEX_FIELD(s, T); |
| 952 | 935 | ||
| 953 | return TRACE_TYPE_HANDLED; | 936 | return trace_handle_return(s); |
| 954 | } | 937 | } |
| 955 | 938 | ||
| 956 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, | 939 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags, |
| @@ -973,14 +956,15 @@ static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | |||
| 973 | 956 | ||
| 974 | trace_assign_type(field, iter->ent); | 957 | trace_assign_type(field, iter->ent); |
| 975 | 958 | ||
| 976 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | 959 | SEQ_PUT_FIELD(s, field->prev_pid); |
| 977 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | 960 | SEQ_PUT_FIELD(s, field->prev_prio); |
| 978 | SEQ_PUT_FIELD_RET(s, field->prev_state); | 961 | SEQ_PUT_FIELD(s, field->prev_state); |
| 979 | SEQ_PUT_FIELD_RET(s, field->next_pid); | 962 | SEQ_PUT_FIELD(s, field->next_cpu); |
| 980 | SEQ_PUT_FIELD_RET(s, field->next_prio); | 963 | SEQ_PUT_FIELD(s, field->next_pid); |
| 981 | SEQ_PUT_FIELD_RET(s, field->next_state); | 964 | SEQ_PUT_FIELD(s, field->next_prio); |
| 965 | SEQ_PUT_FIELD(s, field->next_state); | ||
| 982 | 966 | ||
| 983 | return TRACE_TYPE_HANDLED; | 967 | return trace_handle_return(s); |
| 984 | } | 968 | } |
| 985 | 969 | ||
| 986 | static struct trace_event_functions trace_ctx_funcs = { | 970 | static struct trace_event_functions trace_ctx_funcs = { |
| @@ -1020,23 +1004,19 @@ static enum print_line_t trace_stack_print(struct trace_iterator *iter, | |||
| 1020 | trace_assign_type(field, iter->ent); | 1004 | trace_assign_type(field, iter->ent); |
| 1021 | end = (unsigned long *)((long)iter->ent + iter->ent_size); | 1005 | end = (unsigned long *)((long)iter->ent + iter->ent_size); |
| 1022 | 1006 | ||
| 1023 | if (!trace_seq_puts(s, "<stack trace>\n")) | 1007 | trace_seq_puts(s, "<stack trace>\n"); |
| 1024 | goto partial; | ||
| 1025 | 1008 | ||
| 1026 | for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { | 1009 | for (p = field->caller; p && *p != ULONG_MAX && p < end; p++) { |
| 1027 | if (!trace_seq_puts(s, " => ")) | ||
| 1028 | goto partial; | ||
| 1029 | 1010 | ||
| 1030 | if (!seq_print_ip_sym(s, *p, flags)) | 1011 | if (trace_seq_has_overflowed(s)) |
| 1031 | goto partial; | 1012 | break; |
| 1032 | if (!trace_seq_putc(s, '\n')) | ||
| 1033 | goto partial; | ||
| 1034 | } | ||
| 1035 | 1013 | ||
| 1036 | return TRACE_TYPE_HANDLED; | 1014 | trace_seq_puts(s, " => "); |
| 1015 | seq_print_ip_sym(s, *p, flags); | ||
| 1016 | trace_seq_putc(s, '\n'); | ||
| 1017 | } | ||
| 1037 | 1018 | ||
| 1038 | partial: | 1019 | return trace_handle_return(s); |
| 1039 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1040 | } | 1020 | } |
| 1041 | 1021 | ||
| 1042 | static struct trace_event_functions trace_stack_funcs = { | 1022 | static struct trace_event_functions trace_stack_funcs = { |
| @@ -1057,16 +1037,10 @@ static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | |||
| 1057 | 1037 | ||
| 1058 | trace_assign_type(field, iter->ent); | 1038 | trace_assign_type(field, iter->ent); |
| 1059 | 1039 | ||
| 1060 | if (!trace_seq_puts(s, "<user stack trace>\n")) | 1040 | trace_seq_puts(s, "<user stack trace>\n"); |
| 1061 | goto partial; | 1041 | seq_print_userip_objs(field, s, flags); |
| 1062 | |||
| 1063 | if (!seq_print_userip_objs(field, s, flags)) | ||
| 1064 | goto partial; | ||
| 1065 | |||
| 1066 | return TRACE_TYPE_HANDLED; | ||
| 1067 | 1042 | ||
| 1068 | partial: | 1043 | return trace_handle_return(s); |
| 1069 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1070 | } | 1044 | } |
| 1071 | 1045 | ||
| 1072 | static struct trace_event_functions trace_user_stack_funcs = { | 1046 | static struct trace_event_functions trace_user_stack_funcs = { |
| @@ -1089,19 +1063,11 @@ trace_bputs_print(struct trace_iterator *iter, int flags, | |||
| 1089 | 1063 | ||
| 1090 | trace_assign_type(field, entry); | 1064 | trace_assign_type(field, entry); |
| 1091 | 1065 | ||
| 1092 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1066 | seq_print_ip_sym(s, field->ip, flags); |
| 1093 | goto partial; | 1067 | trace_seq_puts(s, ": "); |
| 1068 | trace_seq_puts(s, field->str); | ||
| 1094 | 1069 | ||
| 1095 | if (!trace_seq_puts(s, ": ")) | 1070 | return trace_handle_return(s); |
| 1096 | goto partial; | ||
| 1097 | |||
| 1098 | if (!trace_seq_puts(s, field->str)) | ||
| 1099 | goto partial; | ||
| 1100 | |||
| 1101 | return TRACE_TYPE_HANDLED; | ||
| 1102 | |||
| 1103 | partial: | ||
| 1104 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1105 | } | 1071 | } |
| 1106 | 1072 | ||
| 1107 | 1073 | ||
| @@ -1114,16 +1080,10 @@ trace_bputs_raw(struct trace_iterator *iter, int flags, | |||
| 1114 | 1080 | ||
| 1115 | trace_assign_type(field, iter->ent); | 1081 | trace_assign_type(field, iter->ent); |
| 1116 | 1082 | ||
| 1117 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) | 1083 | trace_seq_printf(s, ": %lx : ", field->ip); |
| 1118 | goto partial; | 1084 | trace_seq_puts(s, field->str); |
| 1119 | |||
| 1120 | if (!trace_seq_puts(s, field->str)) | ||
| 1121 | goto partial; | ||
| 1122 | 1085 | ||
| 1123 | return TRACE_TYPE_HANDLED; | 1086 | return trace_handle_return(s); |
| 1124 | |||
| 1125 | partial: | ||
| 1126 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1127 | } | 1087 | } |
| 1128 | 1088 | ||
| 1129 | static struct trace_event_functions trace_bputs_funcs = { | 1089 | static struct trace_event_functions trace_bputs_funcs = { |
| @@ -1147,19 +1107,11 @@ trace_bprint_print(struct trace_iterator *iter, int flags, | |||
| 1147 | 1107 | ||
| 1148 | trace_assign_type(field, entry); | 1108 | trace_assign_type(field, entry); |
| 1149 | 1109 | ||
| 1150 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1110 | seq_print_ip_sym(s, field->ip, flags); |
| 1151 | goto partial; | 1111 | trace_seq_puts(s, ": "); |
| 1152 | 1112 | trace_seq_bprintf(s, field->fmt, field->buf); | |
| 1153 | if (!trace_seq_puts(s, ": ")) | ||
| 1154 | goto partial; | ||
| 1155 | |||
| 1156 | if (!trace_seq_bprintf(s, field->fmt, field->buf)) | ||
| 1157 | goto partial; | ||
| 1158 | 1113 | ||
| 1159 | return TRACE_TYPE_HANDLED; | 1114 | return trace_handle_return(s); |
| 1160 | |||
| 1161 | partial: | ||
| 1162 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1163 | } | 1115 | } |
| 1164 | 1116 | ||
| 1165 | 1117 | ||
| @@ -1172,16 +1124,10 @@ trace_bprint_raw(struct trace_iterator *iter, int flags, | |||
| 1172 | 1124 | ||
| 1173 | trace_assign_type(field, iter->ent); | 1125 | trace_assign_type(field, iter->ent); |
| 1174 | 1126 | ||
| 1175 | if (!trace_seq_printf(s, ": %lx : ", field->ip)) | 1127 | trace_seq_printf(s, ": %lx : ", field->ip); |
| 1176 | goto partial; | 1128 | trace_seq_bprintf(s, field->fmt, field->buf); |
| 1177 | |||
| 1178 | if (!trace_seq_bprintf(s, field->fmt, field->buf)) | ||
| 1179 | goto partial; | ||
| 1180 | 1129 | ||
| 1181 | return TRACE_TYPE_HANDLED; | 1130 | return trace_handle_return(s); |
| 1182 | |||
| 1183 | partial: | ||
| 1184 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1185 | } | 1131 | } |
| 1186 | 1132 | ||
| 1187 | static struct trace_event_functions trace_bprint_funcs = { | 1133 | static struct trace_event_functions trace_bprint_funcs = { |
| @@ -1203,16 +1149,10 @@ static enum print_line_t trace_print_print(struct trace_iterator *iter, | |||
| 1203 | 1149 | ||
| 1204 | trace_assign_type(field, iter->ent); | 1150 | trace_assign_type(field, iter->ent); |
| 1205 | 1151 | ||
| 1206 | if (!seq_print_ip_sym(s, field->ip, flags)) | 1152 | seq_print_ip_sym(s, field->ip, flags); |
| 1207 | goto partial; | 1153 | trace_seq_printf(s, ": %s", field->buf); |
| 1208 | |||
| 1209 | if (!trace_seq_printf(s, ": %s", field->buf)) | ||
| 1210 | goto partial; | ||
| 1211 | 1154 | ||
| 1212 | return TRACE_TYPE_HANDLED; | 1155 | return trace_handle_return(s); |
| 1213 | |||
| 1214 | partial: | ||
| 1215 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1216 | } | 1156 | } |
| 1217 | 1157 | ||
| 1218 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, | 1158 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, |
| @@ -1222,13 +1162,9 @@ static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags, | |||
| 1222 | 1162 | ||
| 1223 | trace_assign_type(field, iter->ent); | 1163 | trace_assign_type(field, iter->ent); |
| 1224 | 1164 | ||
| 1225 | if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) | 1165 | trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf); |
| 1226 | goto partial; | ||
| 1227 | |||
| 1228 | return TRACE_TYPE_HANDLED; | ||
| 1229 | 1166 | ||
| 1230 | partial: | 1167 | return trace_handle_return(&iter->seq); |
| 1231 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 1232 | } | 1168 | } |
| 1233 | 1169 | ||
| 1234 | static struct trace_event_functions trace_print_funcs = { | 1170 | static struct trace_event_functions trace_print_funcs = { |
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h index 80b25b585a70..8ef2c40efb3c 100644 --- a/kernel/trace/trace_output.h +++ b/kernel/trace/trace_output.h | |||
| @@ -35,17 +35,11 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); | |||
| 35 | extern int __unregister_ftrace_event(struct trace_event *event); | 35 | extern int __unregister_ftrace_event(struct trace_event *event); |
| 36 | extern struct rw_semaphore trace_event_sem; | 36 | extern struct rw_semaphore trace_event_sem; |
| 37 | 37 | ||
| 38 | #define SEQ_PUT_FIELD_RET(s, x) \ | 38 | #define SEQ_PUT_FIELD(s, x) \ |
| 39 | do { \ | 39 | trace_seq_putmem(s, &(x), sizeof(x)) |
| 40 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | 40 | |
| 41 | return TRACE_TYPE_PARTIAL_LINE; \ | 41 | #define SEQ_PUT_HEX_FIELD(s, x) \ |
| 42 | } while (0) | 42 | trace_seq_putmem_hex(s, &(x), sizeof(x)) |
| 43 | |||
| 44 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
| 45 | do { \ | ||
| 46 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
| 47 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
| 48 | } while (0) | ||
| 49 | 43 | ||
| 50 | #endif | 44 | #endif |
| 51 | 45 | ||
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c index 2900817ba65c..c4e70b6bd7fa 100644 --- a/kernel/trace/trace_printk.c +++ b/kernel/trace/trace_printk.c | |||
| @@ -305,7 +305,7 @@ static int t_show(struct seq_file *m, void *v) | |||
| 305 | seq_puts(m, "\\t"); | 305 | seq_puts(m, "\\t"); |
| 306 | break; | 306 | break; |
| 307 | case '\\': | 307 | case '\\': |
| 308 | seq_puts(m, "\\"); | 308 | seq_putc(m, '\\'); |
| 309 | break; | 309 | break; |
| 310 | case '"': | 310 | case '"': |
| 311 | seq_puts(m, "\\\""); | 311 | seq_puts(m, "\\\""); |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index d4b9fc22cd27..b983b2fd2ca1 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
| @@ -40,7 +40,8 @@ const char *reserved_field_names[] = { | |||
| 40 | int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ | 40 | int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ |
| 41 | void *data, void *ent) \ | 41 | void *data, void *ent) \ |
| 42 | { \ | 42 | { \ |
| 43 | return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ | 43 | trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ |
| 44 | return !trace_seq_has_overflowed(s); \ | ||
| 44 | } \ | 45 | } \ |
| 45 | const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ | 46 | const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ |
| 46 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); | 47 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); |
| @@ -61,10 +62,11 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, | |||
| 61 | int len = *(u32 *)data >> 16; | 62 | int len = *(u32 *)data >> 16; |
| 62 | 63 | ||
| 63 | if (!len) | 64 | if (!len) |
| 64 | return trace_seq_printf(s, " %s=(fault)", name); | 65 | trace_seq_printf(s, " %s=(fault)", name); |
| 65 | else | 66 | else |
| 66 | return trace_seq_printf(s, " %s=\"%s\"", name, | 67 | trace_seq_printf(s, " %s=\"%s\"", name, |
| 67 | (const char *)get_loc_data(data, ent)); | 68 | (const char *)get_loc_data(data, ent)); |
| 69 | return !trace_seq_has_overflowed(s); | ||
| 68 | } | 70 | } |
| 69 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); | 71 | NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); |
| 70 | 72 | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index 3f34dc9b40f3..2e293beb186e 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
| @@ -14,122 +14,26 @@ | |||
| 14 | 14 | ||
| 15 | #include "trace.h" | 15 | #include "trace.h" |
| 16 | 16 | ||
| 17 | static struct trace_array *ctx_trace; | ||
| 18 | static int __read_mostly tracer_enabled; | ||
| 19 | static int sched_ref; | 17 | static int sched_ref; |
| 20 | static DEFINE_MUTEX(sched_register_mutex); | 18 | static DEFINE_MUTEX(sched_register_mutex); |
| 21 | static int sched_stopped; | ||
| 22 | |||
| 23 | |||
| 24 | void | ||
| 25 | tracing_sched_switch_trace(struct trace_array *tr, | ||
| 26 | struct task_struct *prev, | ||
| 27 | struct task_struct *next, | ||
| 28 | unsigned long flags, int pc) | ||
| 29 | { | ||
| 30 | struct ftrace_event_call *call = &event_context_switch; | ||
| 31 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
| 32 | struct ring_buffer_event *event; | ||
| 33 | struct ctx_switch_entry *entry; | ||
| 34 | |||
| 35 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
| 36 | sizeof(*entry), flags, pc); | ||
| 37 | if (!event) | ||
| 38 | return; | ||
| 39 | entry = ring_buffer_event_data(event); | ||
| 40 | entry->prev_pid = prev->pid; | ||
| 41 | entry->prev_prio = prev->prio; | ||
| 42 | entry->prev_state = prev->state; | ||
| 43 | entry->next_pid = next->pid; | ||
| 44 | entry->next_prio = next->prio; | ||
| 45 | entry->next_state = next->state; | ||
| 46 | entry->next_cpu = task_cpu(next); | ||
| 47 | |||
| 48 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
| 49 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
| 50 | } | ||
| 51 | 19 | ||
| 52 | static void | 20 | static void |
| 53 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) | 21 | probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) |
| 54 | { | 22 | { |
| 55 | struct trace_array_cpu *data; | ||
| 56 | unsigned long flags; | ||
| 57 | int cpu; | ||
| 58 | int pc; | ||
| 59 | |||
| 60 | if (unlikely(!sched_ref)) | 23 | if (unlikely(!sched_ref)) |
| 61 | return; | 24 | return; |
| 62 | 25 | ||
| 63 | tracing_record_cmdline(prev); | 26 | tracing_record_cmdline(prev); |
| 64 | tracing_record_cmdline(next); | 27 | tracing_record_cmdline(next); |
| 65 | |||
| 66 | if (!tracer_enabled || sched_stopped) | ||
| 67 | return; | ||
| 68 | |||
| 69 | pc = preempt_count(); | ||
| 70 | local_irq_save(flags); | ||
| 71 | cpu = raw_smp_processor_id(); | ||
| 72 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); | ||
| 73 | |||
| 74 | if (likely(!atomic_read(&data->disabled))) | ||
| 75 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); | ||
| 76 | |||
| 77 | local_irq_restore(flags); | ||
| 78 | } | ||
| 79 | |||
| 80 | void | ||
| 81 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
| 82 | struct task_struct *wakee, | ||
| 83 | struct task_struct *curr, | ||
| 84 | unsigned long flags, int pc) | ||
| 85 | { | ||
| 86 | struct ftrace_event_call *call = &event_wakeup; | ||
| 87 | struct ring_buffer_event *event; | ||
| 88 | struct ctx_switch_entry *entry; | ||
| 89 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
| 90 | |||
| 91 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
| 92 | sizeof(*entry), flags, pc); | ||
| 93 | if (!event) | ||
| 94 | return; | ||
| 95 | entry = ring_buffer_event_data(event); | ||
| 96 | entry->prev_pid = curr->pid; | ||
| 97 | entry->prev_prio = curr->prio; | ||
| 98 | entry->prev_state = curr->state; | ||
| 99 | entry->next_pid = wakee->pid; | ||
| 100 | entry->next_prio = wakee->prio; | ||
| 101 | entry->next_state = wakee->state; | ||
| 102 | entry->next_cpu = task_cpu(wakee); | ||
| 103 | |||
| 104 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
| 105 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
| 106 | } | 28 | } |
| 107 | 29 | ||
| 108 | static void | 30 | static void |
| 109 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) | 31 | probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) |
| 110 | { | 32 | { |
| 111 | struct trace_array_cpu *data; | ||
| 112 | unsigned long flags; | ||
| 113 | int cpu, pc; | ||
| 114 | |||
| 115 | if (unlikely(!sched_ref)) | 33 | if (unlikely(!sched_ref)) |
| 116 | return; | 34 | return; |
| 117 | 35 | ||
| 118 | tracing_record_cmdline(current); | 36 | tracing_record_cmdline(current); |
| 119 | |||
| 120 | if (!tracer_enabled || sched_stopped) | ||
| 121 | return; | ||
| 122 | |||
| 123 | pc = preempt_count(); | ||
| 124 | local_irq_save(flags); | ||
| 125 | cpu = raw_smp_processor_id(); | ||
| 126 | data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu); | ||
| 127 | |||
| 128 | if (likely(!atomic_read(&data->disabled))) | ||
| 129 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, | ||
| 130 | flags, pc); | ||
| 131 | |||
| 132 | local_irq_restore(flags); | ||
| 133 | } | 37 | } |
| 134 | 38 | ||
| 135 | static int tracing_sched_register(void) | 39 | static int tracing_sched_register(void) |
| @@ -197,51 +101,3 @@ void tracing_stop_cmdline_record(void) | |||
| 197 | { | 101 | { |
| 198 | tracing_stop_sched_switch(); | 102 | tracing_stop_sched_switch(); |
| 199 | } | 103 | } |
| 200 | |||
| 201 | /** | ||
| 202 | * tracing_start_sched_switch_record - start tracing context switches | ||
| 203 | * | ||
| 204 | * Turns on context switch tracing for a tracer. | ||
| 205 | */ | ||
| 206 | void tracing_start_sched_switch_record(void) | ||
| 207 | { | ||
| 208 | if (unlikely(!ctx_trace)) { | ||
| 209 | WARN_ON(1); | ||
| 210 | return; | ||
| 211 | } | ||
| 212 | |||
| 213 | tracing_start_sched_switch(); | ||
| 214 | |||
| 215 | mutex_lock(&sched_register_mutex); | ||
| 216 | tracer_enabled++; | ||
| 217 | mutex_unlock(&sched_register_mutex); | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 221 | * tracing_stop_sched_switch_record - start tracing context switches | ||
| 222 | * | ||
| 223 | * Turns off context switch tracing for a tracer. | ||
| 224 | */ | ||
| 225 | void tracing_stop_sched_switch_record(void) | ||
| 226 | { | ||
| 227 | mutex_lock(&sched_register_mutex); | ||
| 228 | tracer_enabled--; | ||
| 229 | WARN_ON(tracer_enabled < 0); | ||
| 230 | mutex_unlock(&sched_register_mutex); | ||
| 231 | |||
| 232 | tracing_stop_sched_switch(); | ||
| 233 | } | ||
| 234 | |||
| 235 | /** | ||
| 236 | * tracing_sched_switch_assign_trace - assign a trace array for ctx switch | ||
| 237 | * @tr: trace array pointer to assign | ||
| 238 | * | ||
| 239 | * Some tracers might want to record the context switches in their | ||
| 240 | * trace. This function lets those tracers assign the trace array | ||
| 241 | * to use. | ||
| 242 | */ | ||
| 243 | void tracing_sched_switch_assign_trace(struct trace_array *tr) | ||
| 244 | { | ||
| 245 | ctx_trace = tr; | ||
| 246 | } | ||
| 247 | |||
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 19bd8928ce94..8fb84b362816 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) | |||
| 365 | wakeup_current_cpu = cpu; | 365 | wakeup_current_cpu = cpu; |
| 366 | } | 366 | } |
| 367 | 367 | ||
| 368 | static void | ||
| 369 | tracing_sched_switch_trace(struct trace_array *tr, | ||
| 370 | struct task_struct *prev, | ||
| 371 | struct task_struct *next, | ||
| 372 | unsigned long flags, int pc) | ||
| 373 | { | ||
| 374 | struct ftrace_event_call *call = &event_context_switch; | ||
| 375 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
| 376 | struct ring_buffer_event *event; | ||
| 377 | struct ctx_switch_entry *entry; | ||
| 378 | |||
| 379 | event = trace_buffer_lock_reserve(buffer, TRACE_CTX, | ||
| 380 | sizeof(*entry), flags, pc); | ||
| 381 | if (!event) | ||
| 382 | return; | ||
| 383 | entry = ring_buffer_event_data(event); | ||
| 384 | entry->prev_pid = prev->pid; | ||
| 385 | entry->prev_prio = prev->prio; | ||
| 386 | entry->prev_state = prev->state; | ||
| 387 | entry->next_pid = next->pid; | ||
| 388 | entry->next_prio = next->prio; | ||
| 389 | entry->next_state = next->state; | ||
| 390 | entry->next_cpu = task_cpu(next); | ||
| 391 | |||
| 392 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
| 393 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
| 394 | } | ||
| 395 | |||
| 396 | static void | ||
| 397 | tracing_sched_wakeup_trace(struct trace_array *tr, | ||
| 398 | struct task_struct *wakee, | ||
| 399 | struct task_struct *curr, | ||
| 400 | unsigned long flags, int pc) | ||
| 401 | { | ||
| 402 | struct ftrace_event_call *call = &event_wakeup; | ||
| 403 | struct ring_buffer_event *event; | ||
| 404 | struct ctx_switch_entry *entry; | ||
| 405 | struct ring_buffer *buffer = tr->trace_buffer.buffer; | ||
| 406 | |||
| 407 | event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, | ||
| 408 | sizeof(*entry), flags, pc); | ||
| 409 | if (!event) | ||
| 410 | return; | ||
| 411 | entry = ring_buffer_event_data(event); | ||
| 412 | entry->prev_pid = curr->pid; | ||
| 413 | entry->prev_prio = curr->prio; | ||
| 414 | entry->prev_state = curr->state; | ||
| 415 | entry->next_pid = wakee->pid; | ||
| 416 | entry->next_prio = wakee->prio; | ||
| 417 | entry->next_state = wakee->state; | ||
| 418 | entry->next_cpu = task_cpu(wakee); | ||
| 419 | |||
| 420 | if (!call_filter_check_discard(call, entry, buffer, event)) | ||
| 421 | trace_buffer_unlock_commit(buffer, event, flags, pc); | ||
| 422 | } | ||
| 423 | |||
| 368 | static void notrace | 424 | static void notrace |
| 369 | probe_wakeup_sched_switch(void *ignore, | 425 | probe_wakeup_sched_switch(void *ignore, |
| 370 | struct task_struct *prev, struct task_struct *next) | 426 | struct task_struct *prev, struct task_struct *next) |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 5ef60499dc8e..b0f86ea77881 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
| @@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
| 382 | 382 | ||
| 383 | /* check the trace buffer */ | 383 | /* check the trace buffer */ |
| 384 | ret = trace_test_buffer(&tr->trace_buffer, &count); | 384 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
| 385 | |||
| 386 | ftrace_enabled = 1; | ||
| 385 | tracing_start(); | 387 | tracing_start(); |
| 386 | 388 | ||
| 387 | /* we should only have one item */ | 389 | /* we should only have one item */ |
| @@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
| 679 | 681 | ||
| 680 | /* check the trace buffer */ | 682 | /* check the trace buffer */ |
| 681 | ret = trace_test_buffer(&tr->trace_buffer, &count); | 683 | ret = trace_test_buffer(&tr->trace_buffer, &count); |
| 684 | |||
| 685 | ftrace_enabled = 1; | ||
| 682 | trace->reset(tr); | 686 | trace->reset(tr); |
| 683 | tracing_start(); | 687 | tracing_start(); |
| 684 | 688 | ||
| @@ -1025,6 +1029,12 @@ trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr) | |||
| 1025 | #endif | 1029 | #endif |
| 1026 | 1030 | ||
| 1027 | #ifdef CONFIG_SCHED_TRACER | 1031 | #ifdef CONFIG_SCHED_TRACER |
| 1032 | |||
| 1033 | struct wakeup_test_data { | ||
| 1034 | struct completion is_ready; | ||
| 1035 | int go; | ||
| 1036 | }; | ||
| 1037 | |||
| 1028 | static int trace_wakeup_test_thread(void *data) | 1038 | static int trace_wakeup_test_thread(void *data) |
| 1029 | { | 1039 | { |
| 1030 | /* Make this a -deadline thread */ | 1040 | /* Make this a -deadline thread */ |
| @@ -1034,51 +1044,56 @@ static int trace_wakeup_test_thread(void *data) | |||
| 1034 | .sched_deadline = 10000000ULL, | 1044 | .sched_deadline = 10000000ULL, |
| 1035 | .sched_period = 10000000ULL | 1045 | .sched_period = 10000000ULL |
| 1036 | }; | 1046 | }; |
| 1037 | struct completion *x = data; | 1047 | struct wakeup_test_data *x = data; |
| 1038 | 1048 | ||
| 1039 | sched_setattr(current, &attr); | 1049 | sched_setattr(current, &attr); |
| 1040 | 1050 | ||
| 1041 | /* Make it know we have a new prio */ | 1051 | /* Make it know we have a new prio */ |
| 1042 | complete(x); | 1052 | complete(&x->is_ready); |
| 1043 | 1053 | ||
| 1044 | /* now go to sleep and let the test wake us up */ | 1054 | /* now go to sleep and let the test wake us up */ |
| 1045 | set_current_state(TASK_INTERRUPTIBLE); | 1055 | set_current_state(TASK_INTERRUPTIBLE); |
| 1046 | schedule(); | 1056 | while (!x->go) { |
| 1057 | schedule(); | ||
| 1058 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1059 | } | ||
| 1047 | 1060 | ||
| 1048 | complete(x); | 1061 | complete(&x->is_ready); |
| 1062 | |||
| 1063 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1049 | 1064 | ||
| 1050 | /* we are awake, now wait to disappear */ | 1065 | /* we are awake, now wait to disappear */ |
| 1051 | while (!kthread_should_stop()) { | 1066 | while (!kthread_should_stop()) { |
| 1052 | /* | 1067 | schedule(); |
| 1053 | * This will likely be the system top priority | 1068 | set_current_state(TASK_INTERRUPTIBLE); |
| 1054 | * task, do short sleeps to let others run. | ||
| 1055 | */ | ||
| 1056 | msleep(100); | ||
| 1057 | } | 1069 | } |
| 1058 | 1070 | ||
| 1071 | __set_current_state(TASK_RUNNING); | ||
| 1072 | |||
| 1059 | return 0; | 1073 | return 0; |
| 1060 | } | 1074 | } |
| 1061 | |||
| 1062 | int | 1075 | int |
| 1063 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | 1076 | trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) |
| 1064 | { | 1077 | { |
| 1065 | unsigned long save_max = tr->max_latency; | 1078 | unsigned long save_max = tr->max_latency; |
| 1066 | struct task_struct *p; | 1079 | struct task_struct *p; |
| 1067 | struct completion is_ready; | 1080 | struct wakeup_test_data data; |
| 1068 | unsigned long count; | 1081 | unsigned long count; |
| 1069 | int ret; | 1082 | int ret; |
| 1070 | 1083 | ||
| 1071 | init_completion(&is_ready); | 1084 | memset(&data, 0, sizeof(data)); |
| 1085 | |||
| 1086 | init_completion(&data.is_ready); | ||
| 1072 | 1087 | ||
| 1073 | /* create a -deadline thread */ | 1088 | /* create a -deadline thread */ |
| 1074 | p = kthread_run(trace_wakeup_test_thread, &is_ready, "ftrace-test"); | 1089 | p = kthread_run(trace_wakeup_test_thread, &data, "ftrace-test"); |
| 1075 | if (IS_ERR(p)) { | 1090 | if (IS_ERR(p)) { |
| 1076 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); | 1091 | printk(KERN_CONT "Failed to create ftrace wakeup test thread "); |
| 1077 | return -1; | 1092 | return -1; |
| 1078 | } | 1093 | } |
| 1079 | 1094 | ||
| 1080 | /* make sure the thread is running at -deadline policy */ | 1095 | /* make sure the thread is running at -deadline policy */ |
| 1081 | wait_for_completion(&is_ready); | 1096 | wait_for_completion(&data.is_ready); |
| 1082 | 1097 | ||
| 1083 | /* start the tracing */ | 1098 | /* start the tracing */ |
| 1084 | ret = tracer_init(trace, tr); | 1099 | ret = tracer_init(trace, tr); |
| @@ -1099,18 +1114,20 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
| 1099 | msleep(100); | 1114 | msleep(100); |
| 1100 | } | 1115 | } |
| 1101 | 1116 | ||
| 1102 | init_completion(&is_ready); | 1117 | init_completion(&data.is_ready); |
| 1118 | |||
| 1119 | data.go = 1; | ||
| 1120 | /* memory barrier is in the wake_up_process() */ | ||
| 1103 | 1121 | ||
| 1104 | wake_up_process(p); | 1122 | wake_up_process(p); |
| 1105 | 1123 | ||
| 1106 | /* Wait for the task to wake up */ | 1124 | /* Wait for the task to wake up */ |
| 1107 | wait_for_completion(&is_ready); | 1125 | wait_for_completion(&data.is_ready); |
| 1108 | 1126 | ||
| 1109 | /* stop the tracing. */ | 1127 | /* stop the tracing. */ |
| 1110 | tracing_stop(); | 1128 | tracing_stop(); |
| 1111 | /* check both trace buffers */ | 1129 | /* check both trace buffers */ |
| 1112 | ret = trace_test_buffer(&tr->trace_buffer, NULL); | 1130 | ret = trace_test_buffer(&tr->trace_buffer, NULL); |
| 1113 | printk("ret = %d\n", ret); | ||
| 1114 | if (!ret) | 1131 | if (!ret) |
| 1115 | ret = trace_test_buffer(&tr->max_buffer, &count); | 1132 | ret = trace_test_buffer(&tr->max_buffer, &count); |
| 1116 | 1133 | ||
diff --git a/kernel/trace/trace_seq.c b/kernel/trace/trace_seq.c index 1f24ed99dca2..f8b45d8792f9 100644 --- a/kernel/trace/trace_seq.c +++ b/kernel/trace/trace_seq.c | |||
| @@ -27,10 +27,19 @@ | |||
| 27 | #include <linux/trace_seq.h> | 27 | #include <linux/trace_seq.h> |
| 28 | 28 | ||
| 29 | /* How much buffer is left on the trace_seq? */ | 29 | /* How much buffer is left on the trace_seq? */ |
| 30 | #define TRACE_SEQ_BUF_LEFT(s) ((PAGE_SIZE - 1) - (s)->len) | 30 | #define TRACE_SEQ_BUF_LEFT(s) seq_buf_buffer_left(&(s)->seq) |
| 31 | 31 | ||
| 32 | /* How much buffer is written? */ | 32 | /* How much buffer is written? */ |
| 33 | #define TRACE_SEQ_BUF_USED(s) min((s)->len, (unsigned int)(PAGE_SIZE - 1)) | 33 | #define TRACE_SEQ_BUF_USED(s) seq_buf_used(&(s)->seq) |
| 34 | |||
| 35 | /* | ||
| 36 | * trace_seq should work with being initialized with 0s. | ||
| 37 | */ | ||
| 38 | static inline void __trace_seq_init(struct trace_seq *s) | ||
| 39 | { | ||
| 40 | if (unlikely(!s->seq.size)) | ||
| 41 | trace_seq_init(s); | ||
| 42 | } | ||
| 34 | 43 | ||
| 35 | /** | 44 | /** |
| 36 | * trace_print_seq - move the contents of trace_seq into a seq_file | 45 | * trace_print_seq - move the contents of trace_seq into a seq_file |
| @@ -43,10 +52,11 @@ | |||
| 43 | */ | 52 | */ |
| 44 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) | 53 | int trace_print_seq(struct seq_file *m, struct trace_seq *s) |
| 45 | { | 54 | { |
| 46 | unsigned int len = TRACE_SEQ_BUF_USED(s); | ||
| 47 | int ret; | 55 | int ret; |
| 48 | 56 | ||
| 49 | ret = seq_write(m, s->buffer, len); | 57 | __trace_seq_init(s); |
| 58 | |||
| 59 | ret = seq_buf_print_seq(m, &s->seq); | ||
| 50 | 60 | ||
| 51 | /* | 61 | /* |
| 52 | * Only reset this buffer if we successfully wrote to the | 62 | * Only reset this buffer if we successfully wrote to the |
| @@ -69,34 +79,26 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
| 69 | * trace_seq_printf() is used to store strings into a special | 79 | * trace_seq_printf() is used to store strings into a special |
| 70 | * buffer (@s). Then the output may be either used by | 80 | * buffer (@s). Then the output may be either used by |
| 71 | * the sequencer or pulled into another buffer. | 81 | * the sequencer or pulled into another buffer. |
| 72 | * | ||
| 73 | * Returns 1 if we successfully written all the contents to | ||
| 74 | * the buffer. | ||
| 75 | * Returns 0 if we the length to write is bigger than the | ||
| 76 | * reserved buffer space. In this case, nothing gets written. | ||
| 77 | */ | 82 | */ |
| 78 | int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | 83 | void trace_seq_printf(struct trace_seq *s, const char *fmt, ...) |
| 79 | { | 84 | { |
| 80 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 85 | unsigned int save_len = s->seq.len; |
| 81 | va_list ap; | 86 | va_list ap; |
| 82 | int ret; | ||
| 83 | 87 | ||
| 84 | if (s->full || !len) | 88 | if (s->full) |
| 85 | return 0; | 89 | return; |
| 90 | |||
| 91 | __trace_seq_init(s); | ||
| 86 | 92 | ||
| 87 | va_start(ap, fmt); | 93 | va_start(ap, fmt); |
| 88 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | 94 | seq_buf_vprintf(&s->seq, fmt, ap); |
| 89 | va_end(ap); | 95 | va_end(ap); |
| 90 | 96 | ||
| 91 | /* If we can't write it all, don't bother writing anything */ | 97 | /* If we can't write it all, don't bother writing anything */ |
| 92 | if (ret >= len) { | 98 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { |
| 99 | s->seq.len = save_len; | ||
| 93 | s->full = 1; | 100 | s->full = 1; |
| 94 | return 0; | ||
| 95 | } | 101 | } |
| 96 | |||
| 97 | s->len += ret; | ||
| 98 | |||
| 99 | return 1; | ||
| 100 | } | 102 | } |
| 101 | EXPORT_SYMBOL_GPL(trace_seq_printf); | 103 | EXPORT_SYMBOL_GPL(trace_seq_printf); |
| 102 | 104 | ||
| @@ -107,25 +109,23 @@ EXPORT_SYMBOL_GPL(trace_seq_printf); | |||
| 107 | * @nmaskbits: The number of bits that are valid in @maskp | 109 | * @nmaskbits: The number of bits that are valid in @maskp |
| 108 | * | 110 | * |
| 109 | * Writes a ASCII representation of a bitmask string into @s. | 111 | * Writes a ASCII representation of a bitmask string into @s. |
| 110 | * | ||
| 111 | * Returns 1 if we successfully written all the contents to | ||
| 112 | * the buffer. | ||
| 113 | * Returns 0 if we the length to write is bigger than the | ||
| 114 | * reserved buffer space. In this case, nothing gets written. | ||
| 115 | */ | 112 | */ |
| 116 | int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, | 113 | void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, |
| 117 | int nmaskbits) | 114 | int nmaskbits) |
| 118 | { | 115 | { |
| 119 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 116 | unsigned int save_len = s->seq.len; |
| 120 | int ret; | ||
| 121 | 117 | ||
| 122 | if (s->full || !len) | 118 | if (s->full) |
| 123 | return 0; | 119 | return; |
| 124 | 120 | ||
| 125 | ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); | 121 | __trace_seq_init(s); |
| 126 | s->len += ret; | ||
| 127 | 122 | ||
| 128 | return 1; | 123 | seq_buf_bitmask(&s->seq, maskp, nmaskbits); |
| 124 | |||
| 125 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { | ||
| 126 | s->seq.len = save_len; | ||
| 127 | s->full = 1; | ||
| 128 | } | ||
| 129 | } | 129 | } |
| 130 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); | 130 | EXPORT_SYMBOL_GPL(trace_seq_bitmask); |
| 131 | 131 | ||
| @@ -139,28 +139,23 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask); | |||
| 139 | * trace_seq_printf is used to store strings into a special | 139 | * trace_seq_printf is used to store strings into a special |
| 140 | * buffer (@s). Then the output may be either used by | 140 | * buffer (@s). Then the output may be either used by |
| 141 | * the sequencer or pulled into another buffer. | 141 | * the sequencer or pulled into another buffer. |
| 142 | * | ||
| 143 | * Returns how much it wrote to the buffer. | ||
| 144 | */ | 142 | */ |
| 145 | int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) | 143 | void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) |
| 146 | { | 144 | { |
| 147 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 145 | unsigned int save_len = s->seq.len; |
| 148 | int ret; | ||
| 149 | 146 | ||
| 150 | if (s->full || !len) | 147 | if (s->full) |
| 151 | return 0; | 148 | return; |
| 152 | 149 | ||
| 153 | ret = vsnprintf(s->buffer + s->len, len, fmt, args); | 150 | __trace_seq_init(s); |
| 151 | |||
| 152 | seq_buf_vprintf(&s->seq, fmt, args); | ||
| 154 | 153 | ||
| 155 | /* If we can't write it all, don't bother writing anything */ | 154 | /* If we can't write it all, don't bother writing anything */ |
| 156 | if (ret >= len) { | 155 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { |
| 156 | s->seq.len = save_len; | ||
| 157 | s->full = 1; | 157 | s->full = 1; |
| 158 | return 0; | ||
| 159 | } | 158 | } |
| 160 | |||
| 161 | s->len += ret; | ||
| 162 | |||
| 163 | return len; | ||
| 164 | } | 159 | } |
| 165 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); | 160 | EXPORT_SYMBOL_GPL(trace_seq_vprintf); |
| 166 | 161 | ||
| @@ -178,28 +173,24 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf); | |||
| 178 | * | 173 | * |
| 179 | * This function will take the format and the binary array and finish | 174 | * This function will take the format and the binary array and finish |
| 180 | * the conversion into the ASCII string within the buffer. | 175 | * the conversion into the ASCII string within the buffer. |
| 181 | * | ||
| 182 | * Returns how much it wrote to the buffer. | ||
| 183 | */ | 176 | */ |
| 184 | int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) | 177 | void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) |
| 185 | { | 178 | { |
| 186 | unsigned int len = TRACE_SEQ_BUF_LEFT(s); | 179 | unsigned int save_len = s->seq.len; |
| 187 | int ret; | ||
| 188 | 180 | ||
| 189 | if (s->full || !len) | 181 | if (s->full) |
| 190 | return 0; | 182 | return; |
| 183 | |||
| 184 | __trace_seq_init(s); | ||
| 191 | 185 | ||
| 192 | ret = bstr_printf(s->buffer + s->len, len, fmt, binary); | 186 | seq_buf_bprintf(&s->seq, fmt, binary); |
| 193 | 187 | ||
| 194 | /* If we can't write it all, don't bother writing anything */ | 188 | /* If we can't write it all, don't bother writing anything */ |
| 195 | if (ret >= len) { | 189 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { |
| 190 | s->seq.len = save_len; | ||
| 196 | s->full = 1; | 191 | s->full = 1; |
| 197 | return 0; | 192 | return; |
| 198 | } | 193 | } |
| 199 | |||
| 200 | s->len += ret; | ||
| 201 | |||
| 202 | return len; | ||
| 203 | } | 194 | } |
| 204 | EXPORT_SYMBOL_GPL(trace_seq_bprintf); | 195 | EXPORT_SYMBOL_GPL(trace_seq_bprintf); |
| 205 | 196 | ||
| @@ -212,25 +203,22 @@ EXPORT_SYMBOL_GPL(trace_seq_bprintf); | |||
| 212 | * copy to user routines. This function records a simple string | 203 | * copy to user routines. This function records a simple string |
| 213 | * into a special buffer (@s) for later retrieval by a sequencer | 204 | * into a special buffer (@s) for later retrieval by a sequencer |
| 214 | * or other mechanism. | 205 | * or other mechanism. |
| 215 | * | ||
| 216 | * Returns how much it wrote to the buffer. | ||
| 217 | */ | 206 | */ |
| 218 | int trace_seq_puts(struct trace_seq *s, const char *str) | 207 | void trace_seq_puts(struct trace_seq *s, const char *str) |
| 219 | { | 208 | { |
| 220 | unsigned int len = strlen(str); | 209 | unsigned int len = strlen(str); |
| 221 | 210 | ||
| 222 | if (s->full) | 211 | if (s->full) |
| 223 | return 0; | 212 | return; |
| 213 | |||
| 214 | __trace_seq_init(s); | ||
| 224 | 215 | ||
| 225 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | 216 | if (len > TRACE_SEQ_BUF_LEFT(s)) { |
| 226 | s->full = 1; | 217 | s->full = 1; |
| 227 | return 0; | 218 | return; |
| 228 | } | 219 | } |
| 229 | 220 | ||
| 230 | memcpy(s->buffer + s->len, str, len); | 221 | seq_buf_putmem(&s->seq, str, len); |
| 231 | s->len += len; | ||
| 232 | |||
| 233 | return len; | ||
| 234 | } | 222 | } |
| 235 | EXPORT_SYMBOL_GPL(trace_seq_puts); | 223 | EXPORT_SYMBOL_GPL(trace_seq_puts); |
| 236 | 224 | ||
| @@ -243,22 +231,20 @@ EXPORT_SYMBOL_GPL(trace_seq_puts); | |||
| 243 | * copy to user routines. This function records a simple charater | 231 | * copy to user routines. This function records a simple charater |
| 244 | * into a special buffer (@s) for later retrieval by a sequencer | 232 | * into a special buffer (@s) for later retrieval by a sequencer |
| 245 | * or other mechanism. | 233 | * or other mechanism. |
| 246 | * | ||
| 247 | * Returns how much it wrote to the buffer. | ||
| 248 | */ | 234 | */ |
| 249 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | 235 | void trace_seq_putc(struct trace_seq *s, unsigned char c) |
| 250 | { | 236 | { |
| 251 | if (s->full) | 237 | if (s->full) |
| 252 | return 0; | 238 | return; |
| 239 | |||
| 240 | __trace_seq_init(s); | ||
| 253 | 241 | ||
| 254 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { | 242 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { |
| 255 | s->full = 1; | 243 | s->full = 1; |
| 256 | return 0; | 244 | return; |
| 257 | } | 245 | } |
| 258 | 246 | ||
| 259 | s->buffer[s->len++] = c; | 247 | seq_buf_putc(&s->seq, c); |
| 260 | |||
| 261 | return 1; | ||
| 262 | } | 248 | } |
| 263 | EXPORT_SYMBOL_GPL(trace_seq_putc); | 249 | EXPORT_SYMBOL_GPL(trace_seq_putc); |
| 264 | 250 | ||
| @@ -271,29 +257,23 @@ EXPORT_SYMBOL_GPL(trace_seq_putc); | |||
| 271 | * There may be cases where raw memory needs to be written into the | 257 | * There may be cases where raw memory needs to be written into the |
| 272 | * buffer and a strcpy() would not work. Using this function allows | 258 | * buffer and a strcpy() would not work. Using this function allows |
| 273 | * for such cases. | 259 | * for such cases. |
| 274 | * | ||
| 275 | * Returns how much it wrote to the buffer. | ||
| 276 | */ | 260 | */ |
| 277 | int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) | 261 | void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) |
| 278 | { | 262 | { |
| 279 | if (s->full) | 263 | if (s->full) |
| 280 | return 0; | 264 | return; |
| 265 | |||
| 266 | __trace_seq_init(s); | ||
| 281 | 267 | ||
| 282 | if (len > TRACE_SEQ_BUF_LEFT(s)) { | 268 | if (len > TRACE_SEQ_BUF_LEFT(s)) { |
| 283 | s->full = 1; | 269 | s->full = 1; |
| 284 | return 0; | 270 | return; |
| 285 | } | 271 | } |
| 286 | 272 | ||
| 287 | memcpy(s->buffer + s->len, mem, len); | 273 | seq_buf_putmem(&s->seq, mem, len); |
| 288 | s->len += len; | ||
| 289 | |||
| 290 | return len; | ||
| 291 | } | 274 | } |
| 292 | EXPORT_SYMBOL_GPL(trace_seq_putmem); | 275 | EXPORT_SYMBOL_GPL(trace_seq_putmem); |
| 293 | 276 | ||
| 294 | #define MAX_MEMHEX_BYTES 8U | ||
| 295 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
| 296 | |||
| 297 | /** | 277 | /** |
| 298 | * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex | 278 | * trace_seq_putmem_hex - write raw memory into the buffer in ASCII hex |
| 299 | * @s: trace sequence descriptor | 279 | * @s: trace sequence descriptor |
| @@ -303,41 +283,31 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem); | |||
| 303 | * This is similar to trace_seq_putmem() except instead of just copying the | 283 | * This is similar to trace_seq_putmem() except instead of just copying the |
| 304 | * raw memory into the buffer it writes its ASCII representation of it | 284 | * raw memory into the buffer it writes its ASCII representation of it |
| 305 | * in hex characters. | 285 | * in hex characters. |
| 306 | * | ||
| 307 | * Returns how much it wrote to the buffer. | ||
| 308 | */ | 286 | */ |
| 309 | int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, | 287 | void trace_seq_putmem_hex(struct trace_seq *s, const void *mem, |
| 310 | unsigned int len) | 288 | unsigned int len) |
| 311 | { | 289 | { |
| 312 | unsigned char hex[HEX_CHARS]; | 290 | unsigned int save_len = s->seq.len; |
| 313 | const unsigned char *data = mem; | ||
| 314 | unsigned int start_len; | ||
| 315 | int i, j; | ||
| 316 | int cnt = 0; | ||
| 317 | 291 | ||
| 318 | if (s->full) | 292 | if (s->full) |
| 319 | return 0; | 293 | return; |
| 320 | 294 | ||
| 321 | while (len) { | 295 | __trace_seq_init(s); |
| 322 | start_len = min(len, HEX_CHARS - 1); | 296 | |
| 323 | #ifdef __BIG_ENDIAN | 297 | /* Each byte is represented by two chars */ |
| 324 | for (i = 0, j = 0; i < start_len; i++) { | 298 | if (len * 2 > TRACE_SEQ_BUF_LEFT(s)) { |
| 325 | #else | 299 | s->full = 1; |
| 326 | for (i = start_len-1, j = 0; i >= 0; i--) { | 300 | return; |
| 327 | #endif | 301 | } |
| 328 | hex[j++] = hex_asc_hi(data[i]); | 302 | |
| 329 | hex[j++] = hex_asc_lo(data[i]); | 303 | /* The added spaces can still cause an overflow */ |
| 330 | } | 304 | seq_buf_putmem_hex(&s->seq, mem, len); |
| 331 | if (WARN_ON_ONCE(j == 0 || j/2 > len)) | 305 | |
| 332 | break; | 306 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { |
| 333 | 307 | s->seq.len = save_len; | |
| 334 | /* j increments twice per loop */ | 308 | s->full = 1; |
| 335 | len -= j / 2; | 309 | return; |
| 336 | hex[j++] = ' '; | ||
| 337 | |||
| 338 | cnt += trace_seq_putmem(s, hex, j); | ||
| 339 | } | 310 | } |
| 340 | return cnt; | ||
| 341 | } | 311 | } |
| 342 | EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); | 312 | EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); |
| 343 | 313 | ||
| @@ -355,30 +325,27 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); | |||
| 355 | */ | 325 | */ |
| 356 | int trace_seq_path(struct trace_seq *s, const struct path *path) | 326 | int trace_seq_path(struct trace_seq *s, const struct path *path) |
| 357 | { | 327 | { |
| 358 | unsigned char *p; | 328 | unsigned int save_len = s->seq.len; |
| 359 | 329 | ||
| 360 | if (s->full) | 330 | if (s->full) |
| 361 | return 0; | 331 | return 0; |
| 362 | 332 | ||
| 333 | __trace_seq_init(s); | ||
| 334 | |||
| 363 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { | 335 | if (TRACE_SEQ_BUF_LEFT(s) < 1) { |
| 364 | s->full = 1; | 336 | s->full = 1; |
| 365 | return 0; | 337 | return 0; |
| 366 | } | 338 | } |
| 367 | 339 | ||
| 368 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | 340 | seq_buf_path(&s->seq, path, "\n"); |
| 369 | if (!IS_ERR(p)) { | 341 | |
| 370 | p = mangle_path(s->buffer + s->len, p, "\n"); | 342 | if (unlikely(seq_buf_has_overflowed(&s->seq))) { |
| 371 | if (p) { | 343 | s->seq.len = save_len; |
| 372 | s->len = p - s->buffer; | 344 | s->full = 1; |
| 373 | return 1; | 345 | return 0; |
| 374 | } | ||
| 375 | } else { | ||
| 376 | s->buffer[s->len++] = '?'; | ||
| 377 | return 1; | ||
| 378 | } | 346 | } |
| 379 | 347 | ||
| 380 | s->full = 1; | 348 | return 1; |
| 381 | return 0; | ||
| 382 | } | 349 | } |
| 383 | EXPORT_SYMBOL_GPL(trace_seq_path); | 350 | EXPORT_SYMBOL_GPL(trace_seq_path); |
| 384 | 351 | ||
| @@ -404,25 +371,7 @@ EXPORT_SYMBOL_GPL(trace_seq_path); | |||
| 404 | */ | 371 | */ |
| 405 | int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) | 372 | int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, int cnt) |
| 406 | { | 373 | { |
| 407 | int len; | 374 | __trace_seq_init(s); |
| 408 | int ret; | 375 | return seq_buf_to_user(&s->seq, ubuf, cnt); |
| 409 | |||
| 410 | if (!cnt) | ||
| 411 | return 0; | ||
| 412 | |||
| 413 | if (s->len <= s->readpos) | ||
| 414 | return -EBUSY; | ||
| 415 | |||
| 416 | len = s->len - s->readpos; | ||
| 417 | if (cnt > len) | ||
| 418 | cnt = len; | ||
| 419 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | ||
| 420 | if (ret == cnt) | ||
| 421 | return -EFAULT; | ||
| 422 | |||
| 423 | cnt -= ret; | ||
| 424 | |||
| 425 | s->readpos += cnt; | ||
| 426 | return cnt; | ||
| 427 | } | 376 | } |
| 428 | EXPORT_SYMBOL_GPL(trace_seq_to_user); | 377 | EXPORT_SYMBOL_GPL(trace_seq_to_user); |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index 8a4e5cb66a4c..16eddb308c33 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
| @@ -13,7 +13,6 @@ | |||
| 13 | #include <linux/sysctl.h> | 13 | #include <linux/sysctl.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
| 16 | #include <linux/magic.h> | ||
| 17 | 16 | ||
| 18 | #include <asm/setup.h> | 17 | #include <asm/setup.h> |
| 19 | 18 | ||
| @@ -171,8 +170,7 @@ check_stack(unsigned long ip, unsigned long *stack) | |||
| 171 | i++; | 170 | i++; |
| 172 | } | 171 | } |
| 173 | 172 | ||
| 174 | if ((current != &init_task && | 173 | if (task_stack_end_corrupted(current)) { |
| 175 | *(end_of_stack(current)) != STACK_END_MAGIC)) { | ||
| 176 | print_max_stack(); | 174 | print_max_stack(); |
| 177 | BUG(); | 175 | BUG(); |
| 178 | } | 176 | } |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 759d5e004517..c6ee36fcbf90 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
| @@ -114,7 +114,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags, | |||
| 114 | struct trace_entry *ent = iter->ent; | 114 | struct trace_entry *ent = iter->ent; |
| 115 | struct syscall_trace_enter *trace; | 115 | struct syscall_trace_enter *trace; |
| 116 | struct syscall_metadata *entry; | 116 | struct syscall_metadata *entry; |
| 117 | int i, ret, syscall; | 117 | int i, syscall; |
| 118 | 118 | ||
| 119 | trace = (typeof(trace))ent; | 119 | trace = (typeof(trace))ent; |
| 120 | syscall = trace->nr; | 120 | syscall = trace->nr; |
| @@ -128,35 +128,28 @@ print_syscall_enter(struct trace_iterator *iter, int flags, | |||
| 128 | goto end; | 128 | goto end; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| 131 | ret = trace_seq_printf(s, "%s(", entry->name); | 131 | trace_seq_printf(s, "%s(", entry->name); |
| 132 | if (!ret) | ||
| 133 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 134 | 132 | ||
| 135 | for (i = 0; i < entry->nb_args; i++) { | 133 | for (i = 0; i < entry->nb_args; i++) { |
| 134 | |||
| 135 | if (trace_seq_has_overflowed(s)) | ||
| 136 | goto end; | ||
| 137 | |||
| 136 | /* parameter types */ | 138 | /* parameter types */ |
| 137 | if (trace_flags & TRACE_ITER_VERBOSE) { | 139 | if (trace_flags & TRACE_ITER_VERBOSE) |
| 138 | ret = trace_seq_printf(s, "%s ", entry->types[i]); | 140 | trace_seq_printf(s, "%s ", entry->types[i]); |
| 139 | if (!ret) | 141 | |
| 140 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 141 | } | ||
| 142 | /* parameter values */ | 142 | /* parameter values */ |
| 143 | ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i], | 143 | trace_seq_printf(s, "%s: %lx%s", entry->args[i], |
| 144 | trace->args[i], | 144 | trace->args[i], |
| 145 | i == entry->nb_args - 1 ? "" : ", "); | 145 | i == entry->nb_args - 1 ? "" : ", "); |
| 146 | if (!ret) | ||
| 147 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 148 | } | 146 | } |
| 149 | 147 | ||
| 150 | ret = trace_seq_putc(s, ')'); | 148 | trace_seq_putc(s, ')'); |
| 151 | if (!ret) | ||
| 152 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 153 | |||
| 154 | end: | 149 | end: |
| 155 | ret = trace_seq_putc(s, '\n'); | 150 | trace_seq_putc(s, '\n'); |
| 156 | if (!ret) | ||
| 157 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 158 | 151 | ||
| 159 | return TRACE_TYPE_HANDLED; | 152 | return trace_handle_return(s); |
| 160 | } | 153 | } |
| 161 | 154 | ||
| 162 | static enum print_line_t | 155 | static enum print_line_t |
| @@ -168,7 +161,6 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
| 168 | struct syscall_trace_exit *trace; | 161 | struct syscall_trace_exit *trace; |
| 169 | int syscall; | 162 | int syscall; |
| 170 | struct syscall_metadata *entry; | 163 | struct syscall_metadata *entry; |
| 171 | int ret; | ||
| 172 | 164 | ||
| 173 | trace = (typeof(trace))ent; | 165 | trace = (typeof(trace))ent; |
| 174 | syscall = trace->nr; | 166 | syscall = trace->nr; |
| @@ -176,7 +168,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
| 176 | 168 | ||
| 177 | if (!entry) { | 169 | if (!entry) { |
| 178 | trace_seq_putc(s, '\n'); | 170 | trace_seq_putc(s, '\n'); |
| 179 | return TRACE_TYPE_HANDLED; | 171 | goto out; |
| 180 | } | 172 | } |
| 181 | 173 | ||
| 182 | if (entry->exit_event->event.type != ent->type) { | 174 | if (entry->exit_event->event.type != ent->type) { |
| @@ -184,12 +176,11 @@ print_syscall_exit(struct trace_iterator *iter, int flags, | |||
| 184 | return TRACE_TYPE_UNHANDLED; | 176 | return TRACE_TYPE_UNHANDLED; |
| 185 | } | 177 | } |
| 186 | 178 | ||
| 187 | ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, | 179 | trace_seq_printf(s, "%s -> 0x%lx\n", entry->name, |
| 188 | trace->ret); | 180 | trace->ret); |
| 189 | if (!ret) | ||
| 190 | return TRACE_TYPE_PARTIAL_LINE; | ||
| 191 | 181 | ||
| 192 | return TRACE_TYPE_HANDLED; | 182 | out: |
| 183 | return trace_handle_return(s); | ||
| 193 | } | 184 | } |
| 194 | 185 | ||
| 195 | extern char *__bad_type_size(void); | 186 | extern char *__bad_type_size(void); |
| @@ -313,7 +304,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
| 313 | int size; | 304 | int size; |
| 314 | 305 | ||
| 315 | syscall_nr = trace_get_syscall_nr(current, regs); | 306 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 316 | if (syscall_nr < 0) | 307 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 317 | return; | 308 | return; |
| 318 | 309 | ||
| 319 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | 310 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ |
| @@ -360,7 +351,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
| 360 | int syscall_nr; | 351 | int syscall_nr; |
| 361 | 352 | ||
| 362 | syscall_nr = trace_get_syscall_nr(current, regs); | 353 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 363 | if (syscall_nr < 0) | 354 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 364 | return; | 355 | return; |
| 365 | 356 | ||
| 366 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | 357 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ |
| @@ -425,7 +416,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
| 425 | return; | 416 | return; |
| 426 | mutex_lock(&syscall_trace_lock); | 417 | mutex_lock(&syscall_trace_lock); |
| 427 | tr->sys_refcount_enter--; | 418 | tr->sys_refcount_enter--; |
| 428 | rcu_assign_pointer(tr->enter_syscall_files[num], NULL); | 419 | RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL); |
| 429 | if (!tr->sys_refcount_enter) | 420 | if (!tr->sys_refcount_enter) |
| 430 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 421 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
| 431 | mutex_unlock(&syscall_trace_lock); | 422 | mutex_unlock(&syscall_trace_lock); |
| @@ -463,7 +454,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
| 463 | return; | 454 | return; |
| 464 | mutex_lock(&syscall_trace_lock); | 455 | mutex_lock(&syscall_trace_lock); |
| 465 | tr->sys_refcount_exit--; | 456 | tr->sys_refcount_exit--; |
| 466 | rcu_assign_pointer(tr->exit_syscall_files[num], NULL); | 457 | RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL); |
| 467 | if (!tr->sys_refcount_exit) | 458 | if (!tr->sys_refcount_exit) |
| 468 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 459 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
| 469 | mutex_unlock(&syscall_trace_lock); | 460 | mutex_unlock(&syscall_trace_lock); |
| @@ -523,7 +514,7 @@ unsigned long __init __weak arch_syscall_addr(int nr) | |||
| 523 | return (unsigned long)sys_call_table[nr]; | 514 | return (unsigned long)sys_call_table[nr]; |
| 524 | } | 515 | } |
| 525 | 516 | ||
| 526 | static int __init init_ftrace_syscalls(void) | 517 | void __init init_ftrace_syscalls(void) |
| 527 | { | 518 | { |
| 528 | struct syscall_metadata *meta; | 519 | struct syscall_metadata *meta; |
| 529 | unsigned long addr; | 520 | unsigned long addr; |
| @@ -533,7 +524,7 @@ static int __init init_ftrace_syscalls(void) | |||
| 533 | GFP_KERNEL); | 524 | GFP_KERNEL); |
| 534 | if (!syscalls_metadata) { | 525 | if (!syscalls_metadata) { |
| 535 | WARN_ON(1); | 526 | WARN_ON(1); |
| 536 | return -ENOMEM; | 527 | return; |
| 537 | } | 528 | } |
| 538 | 529 | ||
| 539 | for (i = 0; i < NR_syscalls; i++) { | 530 | for (i = 0; i < NR_syscalls; i++) { |
| @@ -545,10 +536,7 @@ static int __init init_ftrace_syscalls(void) | |||
| 545 | meta->syscall_nr = i; | 536 | meta->syscall_nr = i; |
| 546 | syscalls_metadata[i] = meta; | 537 | syscalls_metadata[i] = meta; |
| 547 | } | 538 | } |
| 548 | |||
| 549 | return 0; | ||
| 550 | } | 539 | } |
| 551 | early_initcall(init_ftrace_syscalls); | ||
| 552 | 540 | ||
| 553 | #ifdef CONFIG_PERF_EVENTS | 541 | #ifdef CONFIG_PERF_EVENTS |
| 554 | 542 | ||
| @@ -567,7 +555,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
| 567 | int size; | 555 | int size; |
| 568 | 556 | ||
| 569 | syscall_nr = trace_get_syscall_nr(current, regs); | 557 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 570 | if (syscall_nr < 0) | 558 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 571 | return; | 559 | return; |
| 572 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) | 560 | if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) |
| 573 | return; | 561 | return; |
| @@ -641,7 +629,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
| 641 | int size; | 629 | int size; |
| 642 | 630 | ||
| 643 | syscall_nr = trace_get_syscall_nr(current, regs); | 631 | syscall_nr = trace_get_syscall_nr(current, regs); |
| 644 | if (syscall_nr < 0) | 632 | if (syscall_nr < 0 || syscall_nr >= NR_syscalls) |
| 645 | return; | 633 | return; |
| 646 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) | 634 | if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) |
| 647 | return; | 635 | return; |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 33ff6a24b802..8520acc34b18 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -552,8 +552,7 @@ error: | |||
| 552 | return ret; | 552 | return ret; |
| 553 | 553 | ||
| 554 | fail_address_parse: | 554 | fail_address_parse: |
| 555 | if (inode) | 555 | iput(inode); |
| 556 | iput(inode); | ||
| 557 | 556 | ||
| 558 | pr_info("Failed to parse address or file.\n"); | 557 | pr_info("Failed to parse address or file.\n"); |
| 559 | 558 | ||
| @@ -606,7 +605,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 606 | for (i = 0; i < tu->tp.nr_args; i++) | 605 | for (i = 0; i < tu->tp.nr_args; i++) |
| 607 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); | 606 | seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); |
| 608 | 607 | ||
| 609 | seq_printf(m, "\n"); | 608 | seq_putc(m, '\n'); |
| 610 | return 0; | 609 | return 0; |
| 611 | } | 610 | } |
| 612 | 611 | ||
| @@ -852,16 +851,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 852 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 851 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 853 | 852 | ||
| 854 | if (is_ret_probe(tu)) { | 853 | if (is_ret_probe(tu)) { |
| 855 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", | 854 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
| 856 | ftrace_event_name(&tu->tp.call), | 855 | ftrace_event_name(&tu->tp.call), |
| 857 | entry->vaddr[1], entry->vaddr[0])) | 856 | entry->vaddr[1], entry->vaddr[0]); |
| 858 | goto partial; | ||
| 859 | data = DATAOF_TRACE_ENTRY(entry, true); | 857 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 860 | } else { | 858 | } else { |
| 861 | if (!trace_seq_printf(s, "%s: (0x%lx)", | 859 | trace_seq_printf(s, "%s: (0x%lx)", |
| 862 | ftrace_event_name(&tu->tp.call), | 860 | ftrace_event_name(&tu->tp.call), |
| 863 | entry->vaddr[0])) | 861 | entry->vaddr[0]); |
| 864 | goto partial; | ||
| 865 | data = DATAOF_TRACE_ENTRY(entry, false); | 862 | data = DATAOF_TRACE_ENTRY(entry, false); |
| 866 | } | 863 | } |
| 867 | 864 | ||
| @@ -869,14 +866,13 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 869 | struct probe_arg *parg = &tu->tp.args[i]; | 866 | struct probe_arg *parg = &tu->tp.args[i]; |
| 870 | 867 | ||
| 871 | if (!parg->type->print(s, parg->name, data + parg->offset, entry)) | 868 | if (!parg->type->print(s, parg->name, data + parg->offset, entry)) |
| 872 | goto partial; | 869 | goto out; |
| 873 | } | 870 | } |
| 874 | 871 | ||
| 875 | if (trace_seq_puts(s, "\n")) | 872 | trace_seq_putc(s, '\n'); |
| 876 | return TRACE_TYPE_HANDLED; | ||
| 877 | 873 | ||
| 878 | partial: | 874 | out: |
| 879 | return TRACE_TYPE_PARTIAL_LINE; | 875 | return trace_handle_return(s); |
| 880 | } | 876 | } |
| 881 | 877 | ||
| 882 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, | 878 | typedef bool (*filter_func_t)(struct uprobe_consumer *self, |
