diff options
Diffstat (limited to 'kernel')
50 files changed, 9192 insertions, 2473 deletions
diff --git a/kernel/extable.c b/kernel/extable.c index e136ed8d82ba..0df6253730be 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -41,7 +41,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
41 | return e; | 41 | return e; |
42 | } | 42 | } |
43 | 43 | ||
44 | __notrace_funcgraph int core_kernel_text(unsigned long addr) | 44 | int core_kernel_text(unsigned long addr) |
45 | { | 45 | { |
46 | if (addr >= (unsigned long)_stext && | 46 | if (addr >= (unsigned long)_stext && |
47 | addr <= (unsigned long)_etext) | 47 | addr <= (unsigned long)_etext) |
@@ -54,7 +54,7 @@ __notrace_funcgraph int core_kernel_text(unsigned long addr) | |||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
56 | 56 | ||
57 | __notrace_funcgraph int __kernel_text_address(unsigned long addr) | 57 | int __kernel_text_address(unsigned long addr) |
58 | { | 58 | { |
59 | if (core_kernel_text(addr)) | 59 | if (core_kernel_text(addr)) |
60 | return 1; | 60 | return 1; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index f51eaee921b6..412370ab9a34 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kernel_stat.h> | 17 | #include <linux/kernel_stat.h> |
18 | #include <linux/rculist.h> | 18 | #include <linux/rculist.h> |
19 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
20 | #include <trace/irq.h> | ||
20 | #include <linux/bootmem.h> | 21 | #include <linux/bootmem.h> |
21 | 22 | ||
22 | #include "internals.h" | 23 | #include "internals.h" |
@@ -329,6 +330,9 @@ irqreturn_t no_action(int cpl, void *dev_id) | |||
329 | return IRQ_NONE; | 330 | return IRQ_NONE; |
330 | } | 331 | } |
331 | 332 | ||
333 | DEFINE_TRACE(irq_handler_entry); | ||
334 | DEFINE_TRACE(irq_handler_exit); | ||
335 | |||
332 | /** | 336 | /** |
333 | * handle_IRQ_event - irq action chain handler | 337 | * handle_IRQ_event - irq action chain handler |
334 | * @irq: the interrupt number | 338 | * @irq: the interrupt number |
@@ -345,7 +349,9 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
345 | local_irq_enable_in_hardirq(); | 349 | local_irq_enable_in_hardirq(); |
346 | 350 | ||
347 | do { | 351 | do { |
352 | trace_irq_handler_entry(irq, action); | ||
348 | ret = action->handler(irq, action->dev_id); | 353 | ret = action->handler(irq, action->dev_id); |
354 | trace_irq_handler_exit(irq, action, ret); | ||
349 | if (ret == IRQ_HANDLED) | 355 | if (ret == IRQ_HANDLED) |
350 | status |= action->flags; | 356 | status |= action->flags; |
351 | retval |= ret; | 357 | retval |= ret; |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 06b0c3568f0b..cb70c1db85d0 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <linux/utsname.h> | 41 | #include <linux/utsname.h> |
42 | #include <linux/hash.h> | 42 | #include <linux/hash.h> |
43 | #include <linux/ftrace.h> | 43 | #include <linux/ftrace.h> |
44 | #include <linux/stringify.h> | ||
45 | #include <trace/lockdep.h> | ||
44 | 46 | ||
45 | #include <asm/sections.h> | 47 | #include <asm/sections.h> |
46 | 48 | ||
@@ -310,12 +312,14 @@ EXPORT_SYMBOL(lockdep_on); | |||
310 | #if VERBOSE | 312 | #if VERBOSE |
311 | # define HARDIRQ_VERBOSE 1 | 313 | # define HARDIRQ_VERBOSE 1 |
312 | # define SOFTIRQ_VERBOSE 1 | 314 | # define SOFTIRQ_VERBOSE 1 |
315 | # define RECLAIM_VERBOSE 1 | ||
313 | #else | 316 | #else |
314 | # define HARDIRQ_VERBOSE 0 | 317 | # define HARDIRQ_VERBOSE 0 |
315 | # define SOFTIRQ_VERBOSE 0 | 318 | # define SOFTIRQ_VERBOSE 0 |
319 | # define RECLAIM_VERBOSE 0 | ||
316 | #endif | 320 | #endif |
317 | 321 | ||
318 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE | 322 | #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE |
319 | /* | 323 | /* |
320 | * Quick filtering for interesting events: | 324 | * Quick filtering for interesting events: |
321 | */ | 325 | */ |
@@ -443,17 +447,18 @@ atomic_t nr_find_usage_backwards_recursions; | |||
443 | * Locking printouts: | 447 | * Locking printouts: |
444 | */ | 448 | */ |
445 | 449 | ||
450 | #define __USAGE(__STATE) \ | ||
451 | [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ | ||
452 | [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ | ||
453 | [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ | ||
454 | [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", | ||
455 | |||
446 | static const char *usage_str[] = | 456 | static const char *usage_str[] = |
447 | { | 457 | { |
448 | [LOCK_USED] = "initial-use ", | 458 | #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) |
449 | [LOCK_USED_IN_HARDIRQ] = "in-hardirq-W", | 459 | #include "lockdep_states.h" |
450 | [LOCK_USED_IN_SOFTIRQ] = "in-softirq-W", | 460 | #undef LOCKDEP_STATE |
451 | [LOCK_ENABLED_SOFTIRQS] = "softirq-on-W", | 461 | [LOCK_USED] = "INITIAL USE", |
452 | [LOCK_ENABLED_HARDIRQS] = "hardirq-on-W", | ||
453 | [LOCK_USED_IN_HARDIRQ_READ] = "in-hardirq-R", | ||
454 | [LOCK_USED_IN_SOFTIRQ_READ] = "in-softirq-R", | ||
455 | [LOCK_ENABLED_SOFTIRQS_READ] = "softirq-on-R", | ||
456 | [LOCK_ENABLED_HARDIRQS_READ] = "hardirq-on-R", | ||
457 | }; | 462 | }; |
458 | 463 | ||
459 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | 464 | const char * __get_key_name(struct lockdep_subclass_key *key, char *str) |
@@ -461,46 +466,45 @@ const char * __get_key_name(struct lockdep_subclass_key *key, char *str) | |||
461 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); | 466 | return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); |
462 | } | 467 | } |
463 | 468 | ||
464 | void | 469 | static inline unsigned long lock_flag(enum lock_usage_bit bit) |
465 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4) | ||
466 | { | 470 | { |
467 | *c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.'; | 471 | return 1UL << bit; |
468 | 472 | } | |
469 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | ||
470 | *c1 = '+'; | ||
471 | else | ||
472 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | ||
473 | *c1 = '-'; | ||
474 | 473 | ||
475 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 474 | static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) |
476 | *c2 = '+'; | 475 | { |
477 | else | 476 | char c = '.'; |
478 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | ||
479 | *c2 = '-'; | ||
480 | 477 | ||
481 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 478 | if (class->usage_mask & lock_flag(bit + 2)) |
482 | *c3 = '-'; | 479 | c = '+'; |
483 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) { | 480 | if (class->usage_mask & lock_flag(bit)) { |
484 | *c3 = '+'; | 481 | c = '-'; |
485 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 482 | if (class->usage_mask & lock_flag(bit + 2)) |
486 | *c3 = '?'; | 483 | c = '?'; |
487 | } | 484 | } |
488 | 485 | ||
489 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 486 | return c; |
490 | *c4 = '-'; | 487 | } |
491 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) { | 488 | |
492 | *c4 = '+'; | 489 | void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) |
493 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 490 | { |
494 | *c4 = '?'; | 491 | int i = 0; |
495 | } | 492 | |
493 | #define LOCKDEP_STATE(__STATE) \ | ||
494 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ | ||
495 | usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); | ||
496 | #include "lockdep_states.h" | ||
497 | #undef LOCKDEP_STATE | ||
498 | |||
499 | usage[i] = '\0'; | ||
496 | } | 500 | } |
497 | 501 | ||
498 | static void print_lock_name(struct lock_class *class) | 502 | static void print_lock_name(struct lock_class *class) |
499 | { | 503 | { |
500 | char str[KSYM_NAME_LEN], c1, c2, c3, c4; | 504 | char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; |
501 | const char *name; | 505 | const char *name; |
502 | 506 | ||
503 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 507 | get_usage_chars(class, usage); |
504 | 508 | ||
505 | name = class->name; | 509 | name = class->name; |
506 | if (!name) { | 510 | if (!name) { |
@@ -513,7 +517,7 @@ static void print_lock_name(struct lock_class *class) | |||
513 | if (class->subclass) | 517 | if (class->subclass) |
514 | printk("/%d", class->subclass); | 518 | printk("/%d", class->subclass); |
515 | } | 519 | } |
516 | printk("){%c%c%c%c}", c1, c2, c3, c4); | 520 | printk("){%s}", usage); |
517 | } | 521 | } |
518 | 522 | ||
519 | static void print_lockdep_cache(struct lockdep_map *lock) | 523 | static void print_lockdep_cache(struct lockdep_map *lock) |
@@ -1263,9 +1267,49 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1263 | bit_backwards, bit_forwards, irqclass); | 1267 | bit_backwards, bit_forwards, irqclass); |
1264 | } | 1268 | } |
1265 | 1269 | ||
1266 | static int | 1270 | static const char *state_names[] = { |
1267 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | 1271 | #define LOCKDEP_STATE(__STATE) \ |
1268 | struct held_lock *next) | 1272 | __stringify(__STATE), |
1273 | #include "lockdep_states.h" | ||
1274 | #undef LOCKDEP_STATE | ||
1275 | }; | ||
1276 | |||
1277 | static const char *state_rnames[] = { | ||
1278 | #define LOCKDEP_STATE(__STATE) \ | ||
1279 | __stringify(__STATE)"-READ", | ||
1280 | #include "lockdep_states.h" | ||
1281 | #undef LOCKDEP_STATE | ||
1282 | }; | ||
1283 | |||
1284 | static inline const char *state_name(enum lock_usage_bit bit) | ||
1285 | { | ||
1286 | return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; | ||
1287 | } | ||
1288 | |||
1289 | static int exclusive_bit(int new_bit) | ||
1290 | { | ||
1291 | /* | ||
1292 | * USED_IN | ||
1293 | * USED_IN_READ | ||
1294 | * ENABLED | ||
1295 | * ENABLED_READ | ||
1296 | * | ||
1297 | * bit 0 - write/read | ||
1298 | * bit 1 - used_in/enabled | ||
1299 | * bit 2+ state | ||
1300 | */ | ||
1301 | |||
1302 | int state = new_bit & ~3; | ||
1303 | int dir = new_bit & 2; | ||
1304 | |||
1305 | /* | ||
1306 | * keep state, bit flip the direction and strip read. | ||
1307 | */ | ||
1308 | return state | (dir ^ 2); | ||
1309 | } | ||
1310 | |||
1311 | static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, | ||
1312 | struct held_lock *next, enum lock_usage_bit bit) | ||
1269 | { | 1313 | { |
1270 | /* | 1314 | /* |
1271 | * Prove that the new dependency does not connect a hardirq-safe | 1315 | * Prove that the new dependency does not connect a hardirq-safe |
@@ -1273,38 +1317,34 @@ check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, | |||
1273 | * the backwards-subgraph starting at <prev>, and the | 1317 | * the backwards-subgraph starting at <prev>, and the |
1274 | * forwards-subgraph starting at <next>: | 1318 | * forwards-subgraph starting at <next>: |
1275 | */ | 1319 | */ |
1276 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ, | 1320 | if (!check_usage(curr, prev, next, bit, |
1277 | LOCK_ENABLED_HARDIRQS, "hard")) | 1321 | exclusive_bit(bit), state_name(bit))) |
1278 | return 0; | 1322 | return 0; |
1279 | 1323 | ||
1324 | bit++; /* _READ */ | ||
1325 | |||
1280 | /* | 1326 | /* |
1281 | * Prove that the new dependency does not connect a hardirq-safe-read | 1327 | * Prove that the new dependency does not connect a hardirq-safe-read |
1282 | * lock with a hardirq-unsafe lock - to achieve this we search | 1328 | * lock with a hardirq-unsafe lock - to achieve this we search |
1283 | * the backwards-subgraph starting at <prev>, and the | 1329 | * the backwards-subgraph starting at <prev>, and the |
1284 | * forwards-subgraph starting at <next>: | 1330 | * forwards-subgraph starting at <next>: |
1285 | */ | 1331 | */ |
1286 | if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ, | 1332 | if (!check_usage(curr, prev, next, bit, |
1287 | LOCK_ENABLED_HARDIRQS, "hard-read")) | 1333 | exclusive_bit(bit), state_name(bit))) |
1288 | return 0; | 1334 | return 0; |
1289 | 1335 | ||
1290 | /* | 1336 | return 1; |
1291 | * Prove that the new dependency does not connect a softirq-safe | 1337 | } |
1292 | * lock with a softirq-unsafe lock - to achieve this we search | 1338 | |
1293 | * the backwards-subgraph starting at <prev>, and the | 1339 | static int |
1294 | * forwards-subgraph starting at <next>: | 1340 | check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, |
1295 | */ | 1341 | struct held_lock *next) |
1296 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ, | 1342 | { |
1297 | LOCK_ENABLED_SOFTIRQS, "soft")) | 1343 | #define LOCKDEP_STATE(__STATE) \ |
1298 | return 0; | 1344 | if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ |
1299 | /* | ||
1300 | * Prove that the new dependency does not connect a softirq-safe-read | ||
1301 | * lock with a softirq-unsafe lock - to achieve this we search | ||
1302 | * the backwards-subgraph starting at <prev>, and the | ||
1303 | * forwards-subgraph starting at <next>: | ||
1304 | */ | ||
1305 | if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ, | ||
1306 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1307 | return 0; | 1345 | return 0; |
1346 | #include "lockdep_states.h" | ||
1347 | #undef LOCKDEP_STATE | ||
1308 | 1348 | ||
1309 | return 1; | 1349 | return 1; |
1310 | } | 1350 | } |
@@ -1861,9 +1901,9 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
1861 | curr->comm, task_pid_nr(curr)); | 1901 | curr->comm, task_pid_nr(curr)); |
1862 | print_lock(this); | 1902 | print_lock(this); |
1863 | if (forwards) | 1903 | if (forwards) |
1864 | printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass); | 1904 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
1865 | else | 1905 | else |
1866 | printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass); | 1906 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
1867 | print_lock_name(other); | 1907 | print_lock_name(other); |
1868 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 1908 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
1869 | 1909 | ||
@@ -1933,7 +1973,7 @@ void print_irqtrace_events(struct task_struct *curr) | |||
1933 | print_ip_sym(curr->softirq_disable_ip); | 1973 | print_ip_sym(curr->softirq_disable_ip); |
1934 | } | 1974 | } |
1935 | 1975 | ||
1936 | static int hardirq_verbose(struct lock_class *class) | 1976 | static int HARDIRQ_verbose(struct lock_class *class) |
1937 | { | 1977 | { |
1938 | #if HARDIRQ_VERBOSE | 1978 | #if HARDIRQ_VERBOSE |
1939 | return class_filter(class); | 1979 | return class_filter(class); |
@@ -1941,7 +1981,7 @@ static int hardirq_verbose(struct lock_class *class) | |||
1941 | return 0; | 1981 | return 0; |
1942 | } | 1982 | } |
1943 | 1983 | ||
1944 | static int softirq_verbose(struct lock_class *class) | 1984 | static int SOFTIRQ_verbose(struct lock_class *class) |
1945 | { | 1985 | { |
1946 | #if SOFTIRQ_VERBOSE | 1986 | #if SOFTIRQ_VERBOSE |
1947 | return class_filter(class); | 1987 | return class_filter(class); |
@@ -1949,185 +1989,95 @@ static int softirq_verbose(struct lock_class *class) | |||
1949 | return 0; | 1989 | return 0; |
1950 | } | 1990 | } |
1951 | 1991 | ||
1992 | static int RECLAIM_FS_verbose(struct lock_class *class) | ||
1993 | { | ||
1994 | #if RECLAIM_VERBOSE | ||
1995 | return class_filter(class); | ||
1996 | #endif | ||
1997 | return 0; | ||
1998 | } | ||
1999 | |||
1952 | #define STRICT_READ_CHECKS 1 | 2000 | #define STRICT_READ_CHECKS 1 |
1953 | 2001 | ||
1954 | static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | 2002 | static int (*state_verbose_f[])(struct lock_class *class) = { |
2003 | #define LOCKDEP_STATE(__STATE) \ | ||
2004 | __STATE##_verbose, | ||
2005 | #include "lockdep_states.h" | ||
2006 | #undef LOCKDEP_STATE | ||
2007 | }; | ||
2008 | |||
2009 | static inline int state_verbose(enum lock_usage_bit bit, | ||
2010 | struct lock_class *class) | ||
2011 | { | ||
2012 | return state_verbose_f[bit >> 2](class); | ||
2013 | } | ||
2014 | |||
2015 | typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, | ||
2016 | enum lock_usage_bit bit, const char *name); | ||
2017 | |||
2018 | static int | ||
2019 | mark_lock_irq(struct task_struct *curr, struct held_lock *this, | ||
1955 | enum lock_usage_bit new_bit) | 2020 | enum lock_usage_bit new_bit) |
1956 | { | 2021 | { |
1957 | int ret = 1; | 2022 | int excl_bit = exclusive_bit(new_bit); |
2023 | int read = new_bit & 1; | ||
2024 | int dir = new_bit & 2; | ||
1958 | 2025 | ||
1959 | switch(new_bit) { | 2026 | /* |
1960 | case LOCK_USED_IN_HARDIRQ: | 2027 | * mark USED_IN has to look forwards -- to ensure no dependency |
1961 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | 2028 | * has ENABLED state, which would allow recursion deadlocks. |
1962 | return 0; | 2029 | * |
1963 | if (!valid_state(curr, this, new_bit, | 2030 | * mark ENABLED has to look backwards -- to ensure no dependee |
1964 | LOCK_ENABLED_HARDIRQS_READ)) | 2031 | * has USED_IN state, which, again, would allow recursion deadlocks. |
1965 | return 0; | 2032 | */ |
1966 | /* | 2033 | check_usage_f usage = dir ? |
1967 | * just marked it hardirq-safe, check that this lock | 2034 | check_usage_backwards : check_usage_forwards; |
1968 | * took no hardirq-unsafe lock in the past: | 2035 | |
1969 | */ | 2036 | /* |
1970 | if (!check_usage_forwards(curr, this, | 2037 | * Validate that this particular lock does not have conflicting |
1971 | LOCK_ENABLED_HARDIRQS, "hard")) | 2038 | * usage states. |
1972 | return 0; | 2039 | */ |
1973 | #if STRICT_READ_CHECKS | 2040 | if (!valid_state(curr, this, new_bit, excl_bit)) |
1974 | /* | 2041 | return 0; |
1975 | * just marked it hardirq-safe, check that this lock | 2042 | |
1976 | * took no hardirq-unsafe-read lock in the past: | 2043 | /* |
1977 | */ | 2044 | * Validate that the lock dependencies don't have conflicting usage |
1978 | if (!check_usage_forwards(curr, this, | 2045 | * states. |
1979 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 2046 | */ |
1980 | return 0; | 2047 | if ((!read || !dir || STRICT_READ_CHECKS) && |
1981 | #endif | 2048 | !usage(curr, this, excl_bit, state_name(new_bit & ~1))) |
1982 | if (hardirq_verbose(hlock_class(this))) | 2049 | return 0; |
1983 | ret = 2; | 2050 | |
1984 | break; | 2051 | /* |
1985 | case LOCK_USED_IN_SOFTIRQ: | 2052 | * Check for read in write conflicts |
1986 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | 2053 | */ |
1987 | return 0; | 2054 | if (!read) { |
1988 | if (!valid_state(curr, this, new_bit, | 2055 | if (!valid_state(curr, this, new_bit, excl_bit + 1)) |
1989 | LOCK_ENABLED_SOFTIRQS_READ)) | ||
1990 | return 0; | ||
1991 | /* | ||
1992 | * just marked it softirq-safe, check that this lock | ||
1993 | * took no softirq-unsafe lock in the past: | ||
1994 | */ | ||
1995 | if (!check_usage_forwards(curr, this, | ||
1996 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
1997 | return 0; | ||
1998 | #if STRICT_READ_CHECKS | ||
1999 | /* | ||
2000 | * just marked it softirq-safe, check that this lock | ||
2001 | * took no softirq-unsafe-read lock in the past: | ||
2002 | */ | ||
2003 | if (!check_usage_forwards(curr, this, | ||
2004 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | ||
2005 | return 0; | ||
2006 | #endif | ||
2007 | if (softirq_verbose(hlock_class(this))) | ||
2008 | ret = 2; | ||
2009 | break; | ||
2010 | case LOCK_USED_IN_HARDIRQ_READ: | ||
2011 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) | ||
2012 | return 0; | ||
2013 | /* | ||
2014 | * just marked it hardirq-read-safe, check that this lock | ||
2015 | * took no hardirq-unsafe lock in the past: | ||
2016 | */ | ||
2017 | if (!check_usage_forwards(curr, this, | ||
2018 | LOCK_ENABLED_HARDIRQS, "hard")) | ||
2019 | return 0; | ||
2020 | if (hardirq_verbose(hlock_class(this))) | ||
2021 | ret = 2; | ||
2022 | break; | ||
2023 | case LOCK_USED_IN_SOFTIRQ_READ: | ||
2024 | if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS)) | ||
2025 | return 0; | ||
2026 | /* | ||
2027 | * just marked it softirq-read-safe, check that this lock | ||
2028 | * took no softirq-unsafe lock in the past: | ||
2029 | */ | ||
2030 | if (!check_usage_forwards(curr, this, | ||
2031 | LOCK_ENABLED_SOFTIRQS, "soft")) | ||
2032 | return 0; | ||
2033 | if (softirq_verbose(hlock_class(this))) | ||
2034 | ret = 2; | ||
2035 | break; | ||
2036 | case LOCK_ENABLED_HARDIRQS: | ||
2037 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2038 | return 0; | ||
2039 | if (!valid_state(curr, this, new_bit, | ||
2040 | LOCK_USED_IN_HARDIRQ_READ)) | ||
2041 | return 0; | ||
2042 | /* | ||
2043 | * just marked it hardirq-unsafe, check that no hardirq-safe | ||
2044 | * lock in the system ever took it in the past: | ||
2045 | */ | ||
2046 | if (!check_usage_backwards(curr, this, | ||
2047 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2048 | return 0; | ||
2049 | #if STRICT_READ_CHECKS | ||
2050 | /* | ||
2051 | * just marked it hardirq-unsafe, check that no | ||
2052 | * hardirq-safe-read lock in the system ever took | ||
2053 | * it in the past: | ||
2054 | */ | ||
2055 | if (!check_usage_backwards(curr, this, | ||
2056 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | ||
2057 | return 0; | ||
2058 | #endif | ||
2059 | if (hardirq_verbose(hlock_class(this))) | ||
2060 | ret = 2; | ||
2061 | break; | ||
2062 | case LOCK_ENABLED_SOFTIRQS: | ||
2063 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2064 | return 0; | ||
2065 | if (!valid_state(curr, this, new_bit, | ||
2066 | LOCK_USED_IN_SOFTIRQ_READ)) | ||
2067 | return 0; | ||
2068 | /* | ||
2069 | * just marked it softirq-unsafe, check that no softirq-safe | ||
2070 | * lock in the system ever took it in the past: | ||
2071 | */ | ||
2072 | if (!check_usage_backwards(curr, this, | ||
2073 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2074 | return 0; | ||
2075 | #if STRICT_READ_CHECKS | ||
2076 | /* | ||
2077 | * just marked it softirq-unsafe, check that no | ||
2078 | * softirq-safe-read lock in the system ever took | ||
2079 | * it in the past: | ||
2080 | */ | ||
2081 | if (!check_usage_backwards(curr, this, | ||
2082 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | ||
2083 | return 0; | ||
2084 | #endif | ||
2085 | if (softirq_verbose(hlock_class(this))) | ||
2086 | ret = 2; | ||
2087 | break; | ||
2088 | case LOCK_ENABLED_HARDIRQS_READ: | ||
2089 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ)) | ||
2090 | return 0; | ||
2091 | #if STRICT_READ_CHECKS | ||
2092 | /* | ||
2093 | * just marked it hardirq-read-unsafe, check that no | ||
2094 | * hardirq-safe lock in the system ever took it in the past: | ||
2095 | */ | ||
2096 | if (!check_usage_backwards(curr, this, | ||
2097 | LOCK_USED_IN_HARDIRQ, "hard")) | ||
2098 | return 0; | ||
2099 | #endif | ||
2100 | if (hardirq_verbose(hlock_class(this))) | ||
2101 | ret = 2; | ||
2102 | break; | ||
2103 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2104 | if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ)) | ||
2105 | return 0; | 2056 | return 0; |
2106 | #if STRICT_READ_CHECKS | 2057 | |
2107 | /* | 2058 | if (STRICT_READ_CHECKS && |
2108 | * just marked it softirq-read-unsafe, check that no | 2059 | !usage(curr, this, excl_bit + 1, |
2109 | * softirq-safe lock in the system ever took it in the past: | 2060 | state_name(new_bit + 1))) |
2110 | */ | ||
2111 | if (!check_usage_backwards(curr, this, | ||
2112 | LOCK_USED_IN_SOFTIRQ, "soft")) | ||
2113 | return 0; | 2061 | return 0; |
2114 | #endif | ||
2115 | if (softirq_verbose(hlock_class(this))) | ||
2116 | ret = 2; | ||
2117 | break; | ||
2118 | default: | ||
2119 | WARN_ON(1); | ||
2120 | break; | ||
2121 | } | 2062 | } |
2122 | 2063 | ||
2123 | return ret; | 2064 | if (state_verbose(new_bit, hlock_class(this))) |
2065 | return 2; | ||
2066 | |||
2067 | return 1; | ||
2124 | } | 2068 | } |
2125 | 2069 | ||
2070 | enum mark_type { | ||
2071 | #define LOCKDEP_STATE(__STATE) __STATE, | ||
2072 | #include "lockdep_states.h" | ||
2073 | #undef LOCKDEP_STATE | ||
2074 | }; | ||
2075 | |||
2126 | /* | 2076 | /* |
2127 | * Mark all held locks with a usage bit: | 2077 | * Mark all held locks with a usage bit: |
2128 | */ | 2078 | */ |
2129 | static int | 2079 | static int |
2130 | mark_held_locks(struct task_struct *curr, int hardirq) | 2080 | mark_held_locks(struct task_struct *curr, enum mark_type mark) |
2131 | { | 2081 | { |
2132 | enum lock_usage_bit usage_bit; | 2082 | enum lock_usage_bit usage_bit; |
2133 | struct held_lock *hlock; | 2083 | struct held_lock *hlock; |
@@ -2136,17 +2086,12 @@ mark_held_locks(struct task_struct *curr, int hardirq) | |||
2136 | for (i = 0; i < curr->lockdep_depth; i++) { | 2086 | for (i = 0; i < curr->lockdep_depth; i++) { |
2137 | hlock = curr->held_locks + i; | 2087 | hlock = curr->held_locks + i; |
2138 | 2088 | ||
2139 | if (hardirq) { | 2089 | usage_bit = 2 + (mark << 2); /* ENABLED */ |
2140 | if (hlock->read) | 2090 | if (hlock->read) |
2141 | usage_bit = LOCK_ENABLED_HARDIRQS_READ; | 2091 | usage_bit += 1; /* READ */ |
2142 | else | 2092 | |
2143 | usage_bit = LOCK_ENABLED_HARDIRQS; | 2093 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
2144 | } else { | 2094 | |
2145 | if (hlock->read) | ||
2146 | usage_bit = LOCK_ENABLED_SOFTIRQS_READ; | ||
2147 | else | ||
2148 | usage_bit = LOCK_ENABLED_SOFTIRQS; | ||
2149 | } | ||
2150 | if (!mark_lock(curr, hlock, usage_bit)) | 2095 | if (!mark_lock(curr, hlock, usage_bit)) |
2151 | return 0; | 2096 | return 0; |
2152 | } | 2097 | } |
@@ -2200,7 +2145,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2200 | * We are going to turn hardirqs on, so set the | 2145 | * We are going to turn hardirqs on, so set the |
2201 | * usage bit for all held locks: | 2146 | * usage bit for all held locks: |
2202 | */ | 2147 | */ |
2203 | if (!mark_held_locks(curr, 1)) | 2148 | if (!mark_held_locks(curr, HARDIRQ)) |
2204 | return; | 2149 | return; |
2205 | /* | 2150 | /* |
2206 | * If we have softirqs enabled, then set the usage | 2151 | * If we have softirqs enabled, then set the usage |
@@ -2208,7 +2153,7 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2208 | * this bit from being set before) | 2153 | * this bit from being set before) |
2209 | */ | 2154 | */ |
2210 | if (curr->softirqs_enabled) | 2155 | if (curr->softirqs_enabled) |
2211 | if (!mark_held_locks(curr, 0)) | 2156 | if (!mark_held_locks(curr, SOFTIRQ)) |
2212 | return; | 2157 | return; |
2213 | 2158 | ||
2214 | curr->hardirq_enable_ip = ip; | 2159 | curr->hardirq_enable_ip = ip; |
@@ -2288,7 +2233,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2288 | * enabled too: | 2233 | * enabled too: |
2289 | */ | 2234 | */ |
2290 | if (curr->hardirqs_enabled) | 2235 | if (curr->hardirqs_enabled) |
2291 | mark_held_locks(curr, 0); | 2236 | mark_held_locks(curr, SOFTIRQ); |
2292 | } | 2237 | } |
2293 | 2238 | ||
2294 | /* | 2239 | /* |
@@ -2317,6 +2262,31 @@ void trace_softirqs_off(unsigned long ip) | |||
2317 | debug_atomic_inc(&redundant_softirqs_off); | 2262 | debug_atomic_inc(&redundant_softirqs_off); |
2318 | } | 2263 | } |
2319 | 2264 | ||
2265 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2266 | { | ||
2267 | struct task_struct *curr = current; | ||
2268 | |||
2269 | if (unlikely(!debug_locks)) | ||
2270 | return; | ||
2271 | |||
2272 | /* no reclaim without waiting on it */ | ||
2273 | if (!(gfp_mask & __GFP_WAIT)) | ||
2274 | return; | ||
2275 | |||
2276 | /* this guy won't enter reclaim */ | ||
2277 | if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) | ||
2278 | return; | ||
2279 | |||
2280 | /* We're only interested __GFP_FS allocations for now */ | ||
2281 | if (!(gfp_mask & __GFP_FS)) | ||
2282 | return; | ||
2283 | |||
2284 | if (DEBUG_LOCKS_WARN_ON(irqs_disabled())) | ||
2285 | return; | ||
2286 | |||
2287 | mark_held_locks(curr, RECLAIM_FS); | ||
2288 | } | ||
2289 | |||
2320 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | 2290 | static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) |
2321 | { | 2291 | { |
2322 | /* | 2292 | /* |
@@ -2345,19 +2315,35 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) | |||
2345 | if (!hlock->hardirqs_off) { | 2315 | if (!hlock->hardirqs_off) { |
2346 | if (hlock->read) { | 2316 | if (hlock->read) { |
2347 | if (!mark_lock(curr, hlock, | 2317 | if (!mark_lock(curr, hlock, |
2348 | LOCK_ENABLED_HARDIRQS_READ)) | 2318 | LOCK_ENABLED_HARDIRQ_READ)) |
2349 | return 0; | 2319 | return 0; |
2350 | if (curr->softirqs_enabled) | 2320 | if (curr->softirqs_enabled) |
2351 | if (!mark_lock(curr, hlock, | 2321 | if (!mark_lock(curr, hlock, |
2352 | LOCK_ENABLED_SOFTIRQS_READ)) | 2322 | LOCK_ENABLED_SOFTIRQ_READ)) |
2353 | return 0; | 2323 | return 0; |
2354 | } else { | 2324 | } else { |
2355 | if (!mark_lock(curr, hlock, | 2325 | if (!mark_lock(curr, hlock, |
2356 | LOCK_ENABLED_HARDIRQS)) | 2326 | LOCK_ENABLED_HARDIRQ)) |
2357 | return 0; | 2327 | return 0; |
2358 | if (curr->softirqs_enabled) | 2328 | if (curr->softirqs_enabled) |
2359 | if (!mark_lock(curr, hlock, | 2329 | if (!mark_lock(curr, hlock, |
2360 | LOCK_ENABLED_SOFTIRQS)) | 2330 | LOCK_ENABLED_SOFTIRQ)) |
2331 | return 0; | ||
2332 | } | ||
2333 | } | ||
2334 | |||
2335 | /* | ||
2336 | * We reuse the irq context infrastructure more broadly as a general | ||
2337 | * context checking code. This tests GFP_FS recursion (a lock taken | ||
2338 | * during reclaim for a GFP_FS allocation is held over a GFP_FS | ||
2339 | * allocation). | ||
2340 | */ | ||
2341 | if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { | ||
2342 | if (hlock->read) { | ||
2343 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) | ||
2344 | return 0; | ||
2345 | } else { | ||
2346 | if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) | ||
2361 | return 0; | 2347 | return 0; |
2362 | } | 2348 | } |
2363 | } | 2349 | } |
@@ -2412,6 +2398,10 @@ static inline int separate_irq_context(struct task_struct *curr, | |||
2412 | return 0; | 2398 | return 0; |
2413 | } | 2399 | } |
2414 | 2400 | ||
2401 | void lockdep_trace_alloc(gfp_t gfp_mask) | ||
2402 | { | ||
2403 | } | ||
2404 | |||
2415 | #endif | 2405 | #endif |
2416 | 2406 | ||
2417 | /* | 2407 | /* |
@@ -2445,14 +2435,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
2445 | return 0; | 2435 | return 0; |
2446 | 2436 | ||
2447 | switch (new_bit) { | 2437 | switch (new_bit) { |
2448 | case LOCK_USED_IN_HARDIRQ: | 2438 | #define LOCKDEP_STATE(__STATE) \ |
2449 | case LOCK_USED_IN_SOFTIRQ: | 2439 | case LOCK_USED_IN_##__STATE: \ |
2450 | case LOCK_USED_IN_HARDIRQ_READ: | 2440 | case LOCK_USED_IN_##__STATE##_READ: \ |
2451 | case LOCK_USED_IN_SOFTIRQ_READ: | 2441 | case LOCK_ENABLED_##__STATE: \ |
2452 | case LOCK_ENABLED_HARDIRQS: | 2442 | case LOCK_ENABLED_##__STATE##_READ: |
2453 | case LOCK_ENABLED_SOFTIRQS: | 2443 | #include "lockdep_states.h" |
2454 | case LOCK_ENABLED_HARDIRQS_READ: | 2444 | #undef LOCKDEP_STATE |
2455 | case LOCK_ENABLED_SOFTIRQS_READ: | ||
2456 | ret = mark_lock_irq(curr, this, new_bit); | 2445 | ret = mark_lock_irq(curr, this, new_bit); |
2457 | if (!ret) | 2446 | if (!ret) |
2458 | return 0; | 2447 | return 0; |
@@ -2925,6 +2914,8 @@ void lock_set_class(struct lockdep_map *lock, const char *name, | |||
2925 | } | 2914 | } |
2926 | EXPORT_SYMBOL_GPL(lock_set_class); | 2915 | EXPORT_SYMBOL_GPL(lock_set_class); |
2927 | 2916 | ||
2917 | DEFINE_TRACE(lock_acquire); | ||
2918 | |||
2928 | /* | 2919 | /* |
2929 | * We are not always called with irqs disabled - do that here, | 2920 | * We are not always called with irqs disabled - do that here, |
2930 | * and also avoid lockdep recursion: | 2921 | * and also avoid lockdep recursion: |
@@ -2935,6 +2926,8 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2935 | { | 2926 | { |
2936 | unsigned long flags; | 2927 | unsigned long flags; |
2937 | 2928 | ||
2929 | trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); | ||
2930 | |||
2938 | if (unlikely(current->lockdep_recursion)) | 2931 | if (unlikely(current->lockdep_recursion)) |
2939 | return; | 2932 | return; |
2940 | 2933 | ||
@@ -2949,11 +2942,15 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
2949 | } | 2942 | } |
2950 | EXPORT_SYMBOL_GPL(lock_acquire); | 2943 | EXPORT_SYMBOL_GPL(lock_acquire); |
2951 | 2944 | ||
2945 | DEFINE_TRACE(lock_release); | ||
2946 | |||
2952 | void lock_release(struct lockdep_map *lock, int nested, | 2947 | void lock_release(struct lockdep_map *lock, int nested, |
2953 | unsigned long ip) | 2948 | unsigned long ip) |
2954 | { | 2949 | { |
2955 | unsigned long flags; | 2950 | unsigned long flags; |
2956 | 2951 | ||
2952 | trace_lock_release(lock, nested, ip); | ||
2953 | |||
2957 | if (unlikely(current->lockdep_recursion)) | 2954 | if (unlikely(current->lockdep_recursion)) |
2958 | return; | 2955 | return; |
2959 | 2956 | ||
@@ -2966,6 +2963,16 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
2966 | } | 2963 | } |
2967 | EXPORT_SYMBOL_GPL(lock_release); | 2964 | EXPORT_SYMBOL_GPL(lock_release); |
2968 | 2965 | ||
2966 | void lockdep_set_current_reclaim_state(gfp_t gfp_mask) | ||
2967 | { | ||
2968 | current->lockdep_reclaim_gfp = gfp_mask; | ||
2969 | } | ||
2970 | |||
2971 | void lockdep_clear_current_reclaim_state(void) | ||
2972 | { | ||
2973 | current->lockdep_reclaim_gfp = 0; | ||
2974 | } | ||
2975 | |||
2969 | #ifdef CONFIG_LOCK_STAT | 2976 | #ifdef CONFIG_LOCK_STAT |
2970 | static int | 2977 | static int |
2971 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | 2978 | print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, |
@@ -3092,10 +3099,14 @@ found_it: | |||
3092 | lock->ip = ip; | 3099 | lock->ip = ip; |
3093 | } | 3100 | } |
3094 | 3101 | ||
3102 | DEFINE_TRACE(lock_contended); | ||
3103 | |||
3095 | void lock_contended(struct lockdep_map *lock, unsigned long ip) | 3104 | void lock_contended(struct lockdep_map *lock, unsigned long ip) |
3096 | { | 3105 | { |
3097 | unsigned long flags; | 3106 | unsigned long flags; |
3098 | 3107 | ||
3108 | trace_lock_contended(lock, ip); | ||
3109 | |||
3099 | if (unlikely(!lock_stat)) | 3110 | if (unlikely(!lock_stat)) |
3100 | return; | 3111 | return; |
3101 | 3112 | ||
@@ -3111,10 +3122,14 @@ void lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
3111 | } | 3122 | } |
3112 | EXPORT_SYMBOL_GPL(lock_contended); | 3123 | EXPORT_SYMBOL_GPL(lock_contended); |
3113 | 3124 | ||
3125 | DEFINE_TRACE(lock_acquired); | ||
3126 | |||
3114 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) | 3127 | void lock_acquired(struct lockdep_map *lock, unsigned long ip) |
3115 | { | 3128 | { |
3116 | unsigned long flags; | 3129 | unsigned long flags; |
3117 | 3130 | ||
3131 | trace_lock_acquired(lock, ip); | ||
3132 | |||
3118 | if (unlikely(!lock_stat)) | 3133 | if (unlikely(!lock_stat)) |
3119 | return; | 3134 | return; |
3120 | 3135 | ||
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index 56b196932c08..a2cc7e9a6e84 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -7,6 +7,45 @@ | |||
7 | */ | 7 | */ |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Lock-class usage-state bits: | ||
11 | */ | ||
12 | enum lock_usage_bit { | ||
13 | #define LOCKDEP_STATE(__STATE) \ | ||
14 | LOCK_USED_IN_##__STATE, \ | ||
15 | LOCK_USED_IN_##__STATE##_READ, \ | ||
16 | LOCK_ENABLED_##__STATE, \ | ||
17 | LOCK_ENABLED_##__STATE##_READ, | ||
18 | #include "lockdep_states.h" | ||
19 | #undef LOCKDEP_STATE | ||
20 | LOCK_USED, | ||
21 | LOCK_USAGE_STATES | ||
22 | }; | ||
23 | |||
24 | /* | ||
25 | * Usage-state bitmasks: | ||
26 | */ | ||
27 | #define __LOCKF(__STATE) LOCKF_##__STATE = (1 << LOCK_##__STATE), | ||
28 | |||
29 | enum { | ||
30 | #define LOCKDEP_STATE(__STATE) \ | ||
31 | __LOCKF(USED_IN_##__STATE) \ | ||
32 | __LOCKF(USED_IN_##__STATE##_READ) \ | ||
33 | __LOCKF(ENABLED_##__STATE) \ | ||
34 | __LOCKF(ENABLED_##__STATE##_READ) | ||
35 | #include "lockdep_states.h" | ||
36 | #undef LOCKDEP_STATE | ||
37 | __LOCKF(USED) | ||
38 | }; | ||
39 | |||
40 | #define LOCKF_ENABLED_IRQ (LOCKF_ENABLED_HARDIRQ | LOCKF_ENABLED_SOFTIRQ) | ||
41 | #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ) | ||
42 | |||
43 | #define LOCKF_ENABLED_IRQ_READ \ | ||
44 | (LOCKF_ENABLED_HARDIRQ_READ | LOCKF_ENABLED_SOFTIRQ_READ) | ||
45 | #define LOCKF_USED_IN_IRQ_READ \ | ||
46 | (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ) | ||
47 | |||
48 | /* | ||
10 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies | 49 | * MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies |
11 | * we track. | 50 | * we track. |
12 | * | 51 | * |
@@ -31,8 +70,10 @@ | |||
31 | extern struct list_head all_lock_classes; | 70 | extern struct list_head all_lock_classes; |
32 | extern struct lock_chain lock_chains[]; | 71 | extern struct lock_chain lock_chains[]; |
33 | 72 | ||
34 | extern void | 73 | #define LOCK_USAGE_CHARS (1+LOCK_USAGE_STATES/2) |
35 | get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4); | 74 | |
75 | extern void get_usage_chars(struct lock_class *class, | ||
76 | char usage[LOCK_USAGE_CHARS]); | ||
36 | 77 | ||
37 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); | 78 | extern const char * __get_key_name(struct lockdep_subclass_key *key, char *str); |
38 | 79 | ||
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 13716b813896..d7135aa2d2c4 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -84,7 +84,7 @@ static int l_show(struct seq_file *m, void *v) | |||
84 | { | 84 | { |
85 | struct lock_class *class = v; | 85 | struct lock_class *class = v; |
86 | struct lock_list *entry; | 86 | struct lock_list *entry; |
87 | char c1, c2, c3, c4; | 87 | char usage[LOCK_USAGE_CHARS]; |
88 | 88 | ||
89 | if (v == SEQ_START_TOKEN) { | 89 | if (v == SEQ_START_TOKEN) { |
90 | seq_printf(m, "all lock classes:\n"); | 90 | seq_printf(m, "all lock classes:\n"); |
@@ -100,8 +100,8 @@ static int l_show(struct seq_file *m, void *v) | |||
100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); | 100 | seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class)); |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 103 | get_usage_chars(class, usage); |
104 | seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); | 104 | seq_printf(m, " %s", usage); |
105 | 105 | ||
106 | seq_printf(m, ": "); | 106 | seq_printf(m, ": "); |
107 | print_name(m, class); | 107 | print_name(m, class); |
@@ -300,27 +300,27 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
300 | nr_uncategorized++; | 300 | nr_uncategorized++; |
301 | if (class->usage_mask & LOCKF_USED_IN_IRQ) | 301 | if (class->usage_mask & LOCKF_USED_IN_IRQ) |
302 | nr_irq_safe++; | 302 | nr_irq_safe++; |
303 | if (class->usage_mask & LOCKF_ENABLED_IRQS) | 303 | if (class->usage_mask & LOCKF_ENABLED_IRQ) |
304 | nr_irq_unsafe++; | 304 | nr_irq_unsafe++; |
305 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) | 305 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ) |
306 | nr_softirq_safe++; | 306 | nr_softirq_safe++; |
307 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS) | 307 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ) |
308 | nr_softirq_unsafe++; | 308 | nr_softirq_unsafe++; |
309 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) | 309 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ) |
310 | nr_hardirq_safe++; | 310 | nr_hardirq_safe++; |
311 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS) | 311 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ) |
312 | nr_hardirq_unsafe++; | 312 | nr_hardirq_unsafe++; |
313 | if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) | 313 | if (class->usage_mask & LOCKF_USED_IN_IRQ_READ) |
314 | nr_irq_read_safe++; | 314 | nr_irq_read_safe++; |
315 | if (class->usage_mask & LOCKF_ENABLED_IRQS_READ) | 315 | if (class->usage_mask & LOCKF_ENABLED_IRQ_READ) |
316 | nr_irq_read_unsafe++; | 316 | nr_irq_read_unsafe++; |
317 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) | 317 | if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) |
318 | nr_softirq_read_safe++; | 318 | nr_softirq_read_safe++; |
319 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ) | 319 | if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ) |
320 | nr_softirq_read_unsafe++; | 320 | nr_softirq_read_unsafe++; |
321 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) | 321 | if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) |
322 | nr_hardirq_read_safe++; | 322 | nr_hardirq_read_safe++; |
323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 323 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ) |
324 | nr_hardirq_read_unsafe++; | 324 | nr_hardirq_read_unsafe++; |
325 | 325 | ||
326 | #ifdef CONFIG_PROVE_LOCKING | 326 | #ifdef CONFIG_PROVE_LOCKING |
@@ -601,6 +601,10 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data) | |||
601 | static void seq_header(struct seq_file *m) | 601 | static void seq_header(struct seq_file *m) |
602 | { | 602 | { |
603 | seq_printf(m, "lock_stat version 0.3\n"); | 603 | seq_printf(m, "lock_stat version 0.3\n"); |
604 | |||
605 | if (unlikely(!debug_locks)) | ||
606 | seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n"); | ||
607 | |||
604 | seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); | 608 | seq_line(m, '-', 0, 40 + 1 + 10 * (14 + 1)); |
605 | seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " | 609 | seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s " |
606 | "%14s %14s\n", | 610 | "%14s %14s\n", |
diff --git a/kernel/lockdep_states.h b/kernel/lockdep_states.h new file mode 100644 index 000000000000..995b0cc2b84c --- /dev/null +++ b/kernel/lockdep_states.h | |||
@@ -0,0 +1,9 @@ | |||
1 | /* | ||
2 | * Lockdep states, | ||
3 | * | ||
4 | * please update XXX_LOCK_USAGE_STATES in include/linux/lockdep.h whenever | ||
5 | * you add one, or come up with a nice dynamic solution. | ||
6 | */ | ||
7 | LOCKDEP_STATE(HARDIRQ) | ||
8 | LOCKDEP_STATE(SOFTIRQ) | ||
9 | LOCKDEP_STATE(RECLAIM_FS) | ||
diff --git a/kernel/module.c b/kernel/module.c index 1f0657ae555b..90a6d63d9211 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -2769,7 +2769,7 @@ int is_module_address(unsigned long addr) | |||
2769 | 2769 | ||
2770 | 2770 | ||
2771 | /* Is this a valid kernel address? */ | 2771 | /* Is this a valid kernel address? */ |
2772 | __notrace_funcgraph struct module *__module_text_address(unsigned long addr) | 2772 | struct module *__module_text_address(unsigned long addr) |
2773 | { | 2773 | { |
2774 | struct module *mod; | 2774 | struct module *mod; |
2775 | 2775 | ||
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index 1d94160eb532..50d022e5a560 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -26,11 +26,6 @@ | |||
26 | /* | 26 | /* |
27 | * Must be called with lock->wait_lock held. | 27 | * Must be called with lock->wait_lock held. |
28 | */ | 28 | */ |
29 | void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner) | ||
30 | { | ||
31 | lock->owner = new_owner; | ||
32 | } | ||
33 | |||
34 | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) | 29 | void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) |
35 | { | 30 | { |
36 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); | 31 | memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); |
@@ -59,7 +54,6 @@ void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, | |||
59 | 54 | ||
60 | /* Mark the current thread as blocked on the lock: */ | 55 | /* Mark the current thread as blocked on the lock: */ |
61 | ti->task->blocked_on = waiter; | 56 | ti->task->blocked_on = waiter; |
62 | waiter->lock = lock; | ||
63 | } | 57 | } |
64 | 58 | ||
65 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, | 59 | void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, |
@@ -82,7 +76,7 @@ void debug_mutex_unlock(struct mutex *lock) | |||
82 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); | 76 | DEBUG_LOCKS_WARN_ON(lock->magic != lock); |
83 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 77 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); |
84 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); | 78 | DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); |
85 | DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); | 79 | mutex_clear_owner(lock); |
86 | } | 80 | } |
87 | 81 | ||
88 | void debug_mutex_init(struct mutex *lock, const char *name, | 82 | void debug_mutex_init(struct mutex *lock, const char *name, |
@@ -95,7 +89,6 @@ void debug_mutex_init(struct mutex *lock, const char *name, | |||
95 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 89 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
96 | lockdep_init_map(&lock->dep_map, name, key, 0); | 90 | lockdep_init_map(&lock->dep_map, name, key, 0); |
97 | #endif | 91 | #endif |
98 | lock->owner = NULL; | ||
99 | lock->magic = lock; | 92 | lock->magic = lock; |
100 | } | 93 | } |
101 | 94 | ||
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h index babfbdfc534b..6b2d735846a5 100644 --- a/kernel/mutex-debug.h +++ b/kernel/mutex-debug.h | |||
@@ -13,14 +13,6 @@ | |||
13 | /* | 13 | /* |
14 | * This must be called with lock->wait_lock held. | 14 | * This must be called with lock->wait_lock held. |
15 | */ | 15 | */ |
16 | extern void | ||
17 | debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner); | ||
18 | |||
19 | static inline void debug_mutex_clear_owner(struct mutex *lock) | ||
20 | { | ||
21 | lock->owner = NULL; | ||
22 | } | ||
23 | |||
24 | extern void debug_mutex_lock_common(struct mutex *lock, | 16 | extern void debug_mutex_lock_common(struct mutex *lock, |
25 | struct mutex_waiter *waiter); | 17 | struct mutex_waiter *waiter); |
26 | extern void debug_mutex_wake_waiter(struct mutex *lock, | 18 | extern void debug_mutex_wake_waiter(struct mutex *lock, |
@@ -35,6 +27,16 @@ extern void debug_mutex_unlock(struct mutex *lock); | |||
35 | extern void debug_mutex_init(struct mutex *lock, const char *name, | 27 | extern void debug_mutex_init(struct mutex *lock, const char *name, |
36 | struct lock_class_key *key); | 28 | struct lock_class_key *key); |
37 | 29 | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | ||
31 | { | ||
32 | lock->owner = current_thread_info(); | ||
33 | } | ||
34 | |||
35 | static inline void mutex_clear_owner(struct mutex *lock) | ||
36 | { | ||
37 | lock->owner = NULL; | ||
38 | } | ||
39 | |||
38 | #define spin_lock_mutex(lock, flags) \ | 40 | #define spin_lock_mutex(lock, flags) \ |
39 | do { \ | 41 | do { \ |
40 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ | 42 | struct mutex *l = container_of(lock, struct mutex, wait_lock); \ |
diff --git a/kernel/mutex.c b/kernel/mutex.c index 4f45d4b658ef..5d79781394a3 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c | |||
@@ -10,6 +10,11 @@ | |||
10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and | 10 | * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and |
11 | * David Howells for suggestions and improvements. | 11 | * David Howells for suggestions and improvements. |
12 | * | 12 | * |
13 | * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline | ||
14 | * from the -rt tree, where it was originally implemented for rtmutexes | ||
15 | * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale | ||
16 | * and Sven Dietrich. | ||
17 | * | ||
13 | * Also see Documentation/mutex-design.txt. | 18 | * Also see Documentation/mutex-design.txt. |
14 | */ | 19 | */ |
15 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
@@ -46,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) | |||
46 | atomic_set(&lock->count, 1); | 51 | atomic_set(&lock->count, 1); |
47 | spin_lock_init(&lock->wait_lock); | 52 | spin_lock_init(&lock->wait_lock); |
48 | INIT_LIST_HEAD(&lock->wait_list); | 53 | INIT_LIST_HEAD(&lock->wait_list); |
54 | mutex_clear_owner(lock); | ||
49 | 55 | ||
50 | debug_mutex_init(lock, name, key); | 56 | debug_mutex_init(lock, name, key); |
51 | } | 57 | } |
@@ -91,6 +97,7 @@ void inline __sched mutex_lock(struct mutex *lock) | |||
91 | * 'unlocked' into 'locked' state. | 97 | * 'unlocked' into 'locked' state. |
92 | */ | 98 | */ |
93 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); | 99 | __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath); |
100 | mutex_set_owner(lock); | ||
94 | } | 101 | } |
95 | 102 | ||
96 | EXPORT_SYMBOL(mutex_lock); | 103 | EXPORT_SYMBOL(mutex_lock); |
@@ -115,6 +122,14 @@ void __sched mutex_unlock(struct mutex *lock) | |||
115 | * The unlocking fastpath is the 0->1 transition from 'locked' | 122 | * The unlocking fastpath is the 0->1 transition from 'locked' |
116 | * into 'unlocked' state: | 123 | * into 'unlocked' state: |
117 | */ | 124 | */ |
125 | #ifndef CONFIG_DEBUG_MUTEXES | ||
126 | /* | ||
127 | * When debugging is enabled we must not clear the owner before time, | ||
128 | * the slow path will always be taken, and that clears the owner field | ||
129 | * after verifying that it was indeed current. | ||
130 | */ | ||
131 | mutex_clear_owner(lock); | ||
132 | #endif | ||
118 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); | 133 | __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath); |
119 | } | 134 | } |
120 | 135 | ||
@@ -129,21 +144,75 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
129 | { | 144 | { |
130 | struct task_struct *task = current; | 145 | struct task_struct *task = current; |
131 | struct mutex_waiter waiter; | 146 | struct mutex_waiter waiter; |
132 | unsigned int old_val; | ||
133 | unsigned long flags; | 147 | unsigned long flags; |
134 | 148 | ||
149 | preempt_disable(); | ||
150 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
151 | #if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) | ||
152 | /* | ||
153 | * Optimistic spinning. | ||
154 | * | ||
155 | * We try to spin for acquisition when we find that there are no | ||
156 | * pending waiters and the lock owner is currently running on a | ||
157 | * (different) CPU. | ||
158 | * | ||
159 | * The rationale is that if the lock owner is running, it is likely to | ||
160 | * release the lock soon. | ||
161 | * | ||
162 | * Since this needs the lock owner, and this mutex implementation | ||
163 | * doesn't track the owner atomically in the lock field, we need to | ||
164 | * track it non-atomically. | ||
165 | * | ||
166 | * We can't do this for DEBUG_MUTEXES because that relies on wait_lock | ||
167 | * to serialize everything. | ||
168 | */ | ||
169 | |||
170 | for (;;) { | ||
171 | struct thread_info *owner; | ||
172 | |||
173 | /* | ||
174 | * If there's an owner, wait for it to either | ||
175 | * release the lock or go to sleep. | ||
176 | */ | ||
177 | owner = ACCESS_ONCE(lock->owner); | ||
178 | if (owner && !mutex_spin_on_owner(lock, owner)) | ||
179 | break; | ||
180 | |||
181 | if (atomic_cmpxchg(&lock->count, 1, 0) == 1) { | ||
182 | lock_acquired(&lock->dep_map, ip); | ||
183 | mutex_set_owner(lock); | ||
184 | preempt_enable(); | ||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * When there's no owner, we might have preempted between the | ||
190 | * owner acquiring the lock and setting the owner field. If | ||
191 | * we're an RT task that will live-lock because we won't let | ||
192 | * the owner complete. | ||
193 | */ | ||
194 | if (!owner && (need_resched() || rt_task(task))) | ||
195 | break; | ||
196 | |||
197 | /* | ||
198 | * The cpu_relax() call is a compiler barrier which forces | ||
199 | * everything in this loop to be re-loaded. We don't need | ||
200 | * memory barriers as we'll eventually observe the right | ||
201 | * values at the cost of a few extra spins. | ||
202 | */ | ||
203 | cpu_relax(); | ||
204 | } | ||
205 | #endif | ||
135 | spin_lock_mutex(&lock->wait_lock, flags); | 206 | spin_lock_mutex(&lock->wait_lock, flags); |
136 | 207 | ||
137 | debug_mutex_lock_common(lock, &waiter); | 208 | debug_mutex_lock_common(lock, &waiter); |
138 | mutex_acquire(&lock->dep_map, subclass, 0, ip); | ||
139 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); | 209 | debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); |
140 | 210 | ||
141 | /* add waiting tasks to the end of the waitqueue (FIFO): */ | 211 | /* add waiting tasks to the end of the waitqueue (FIFO): */ |
142 | list_add_tail(&waiter.list, &lock->wait_list); | 212 | list_add_tail(&waiter.list, &lock->wait_list); |
143 | waiter.task = task; | 213 | waiter.task = task; |
144 | 214 | ||
145 | old_val = atomic_xchg(&lock->count, -1); | 215 | if (atomic_xchg(&lock->count, -1) == 1) |
146 | if (old_val == 1) | ||
147 | goto done; | 216 | goto done; |
148 | 217 | ||
149 | lock_contended(&lock->dep_map, ip); | 218 | lock_contended(&lock->dep_map, ip); |
@@ -158,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
158 | * that when we release the lock, we properly wake up the | 227 | * that when we release the lock, we properly wake up the |
159 | * other waiters: | 228 | * other waiters: |
160 | */ | 229 | */ |
161 | old_val = atomic_xchg(&lock->count, -1); | 230 | if (atomic_xchg(&lock->count, -1) == 1) |
162 | if (old_val == 1) | ||
163 | break; | 231 | break; |
164 | 232 | ||
165 | /* | 233 | /* |
@@ -173,21 +241,22 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, | |||
173 | spin_unlock_mutex(&lock->wait_lock, flags); | 241 | spin_unlock_mutex(&lock->wait_lock, flags); |
174 | 242 | ||
175 | debug_mutex_free_waiter(&waiter); | 243 | debug_mutex_free_waiter(&waiter); |
244 | preempt_enable(); | ||
176 | return -EINTR; | 245 | return -EINTR; |
177 | } | 246 | } |
178 | __set_task_state(task, state); | 247 | __set_task_state(task, state); |
179 | 248 | ||
180 | /* didnt get the lock, go to sleep: */ | 249 | /* didnt get the lock, go to sleep: */ |
181 | spin_unlock_mutex(&lock->wait_lock, flags); | 250 | spin_unlock_mutex(&lock->wait_lock, flags); |
182 | schedule(); | 251 | __schedule(); |
183 | spin_lock_mutex(&lock->wait_lock, flags); | 252 | spin_lock_mutex(&lock->wait_lock, flags); |
184 | } | 253 | } |
185 | 254 | ||
186 | done: | 255 | done: |
187 | lock_acquired(&lock->dep_map, ip); | 256 | lock_acquired(&lock->dep_map, ip); |
188 | /* got the lock - rejoice! */ | 257 | /* got the lock - rejoice! */ |
189 | mutex_remove_waiter(lock, &waiter, task_thread_info(task)); | 258 | mutex_remove_waiter(lock, &waiter, current_thread_info()); |
190 | debug_mutex_set_owner(lock, task_thread_info(task)); | 259 | mutex_set_owner(lock); |
191 | 260 | ||
192 | /* set it to 0 if there are no waiters left: */ | 261 | /* set it to 0 if there are no waiters left: */ |
193 | if (likely(list_empty(&lock->wait_list))) | 262 | if (likely(list_empty(&lock->wait_list))) |
@@ -196,6 +265,7 @@ done: | |||
196 | spin_unlock_mutex(&lock->wait_lock, flags); | 265 | spin_unlock_mutex(&lock->wait_lock, flags); |
197 | 266 | ||
198 | debug_mutex_free_waiter(&waiter); | 267 | debug_mutex_free_waiter(&waiter); |
268 | preempt_enable(); | ||
199 | 269 | ||
200 | return 0; | 270 | return 0; |
201 | } | 271 | } |
@@ -222,7 +292,8 @@ int __sched | |||
222 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) | 292 | mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) |
223 | { | 293 | { |
224 | might_sleep(); | 294 | might_sleep(); |
225 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); | 295 | return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, |
296 | subclass, _RET_IP_); | ||
226 | } | 297 | } |
227 | 298 | ||
228 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); | 299 | EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); |
@@ -260,8 +331,6 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) | |||
260 | wake_up_process(waiter->task); | 331 | wake_up_process(waiter->task); |
261 | } | 332 | } |
262 | 333 | ||
263 | debug_mutex_clear_owner(lock); | ||
264 | |||
265 | spin_unlock_mutex(&lock->wait_lock, flags); | 334 | spin_unlock_mutex(&lock->wait_lock, flags); |
266 | } | 335 | } |
267 | 336 | ||
@@ -298,18 +367,30 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count); | |||
298 | */ | 367 | */ |
299 | int __sched mutex_lock_interruptible(struct mutex *lock) | 368 | int __sched mutex_lock_interruptible(struct mutex *lock) |
300 | { | 369 | { |
370 | int ret; | ||
371 | |||
301 | might_sleep(); | 372 | might_sleep(); |
302 | return __mutex_fastpath_lock_retval | 373 | ret = __mutex_fastpath_lock_retval |
303 | (&lock->count, __mutex_lock_interruptible_slowpath); | 374 | (&lock->count, __mutex_lock_interruptible_slowpath); |
375 | if (!ret) | ||
376 | mutex_set_owner(lock); | ||
377 | |||
378 | return ret; | ||
304 | } | 379 | } |
305 | 380 | ||
306 | EXPORT_SYMBOL(mutex_lock_interruptible); | 381 | EXPORT_SYMBOL(mutex_lock_interruptible); |
307 | 382 | ||
308 | int __sched mutex_lock_killable(struct mutex *lock) | 383 | int __sched mutex_lock_killable(struct mutex *lock) |
309 | { | 384 | { |
385 | int ret; | ||
386 | |||
310 | might_sleep(); | 387 | might_sleep(); |
311 | return __mutex_fastpath_lock_retval | 388 | ret = __mutex_fastpath_lock_retval |
312 | (&lock->count, __mutex_lock_killable_slowpath); | 389 | (&lock->count, __mutex_lock_killable_slowpath); |
390 | if (!ret) | ||
391 | mutex_set_owner(lock); | ||
392 | |||
393 | return ret; | ||
313 | } | 394 | } |
314 | EXPORT_SYMBOL(mutex_lock_killable); | 395 | EXPORT_SYMBOL(mutex_lock_killable); |
315 | 396 | ||
@@ -352,9 +433,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
352 | 433 | ||
353 | prev = atomic_xchg(&lock->count, -1); | 434 | prev = atomic_xchg(&lock->count, -1); |
354 | if (likely(prev == 1)) { | 435 | if (likely(prev == 1)) { |
355 | debug_mutex_set_owner(lock, current_thread_info()); | 436 | mutex_set_owner(lock); |
356 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 437 | mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
357 | } | 438 | } |
439 | |||
358 | /* Set it back to 0 if there are no waiters: */ | 440 | /* Set it back to 0 if there are no waiters: */ |
359 | if (likely(list_empty(&lock->wait_list))) | 441 | if (likely(list_empty(&lock->wait_list))) |
360 | atomic_set(&lock->count, 0); | 442 | atomic_set(&lock->count, 0); |
@@ -380,8 +462,13 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count) | |||
380 | */ | 462 | */ |
381 | int __sched mutex_trylock(struct mutex *lock) | 463 | int __sched mutex_trylock(struct mutex *lock) |
382 | { | 464 | { |
383 | return __mutex_fastpath_trylock(&lock->count, | 465 | int ret; |
384 | __mutex_trylock_slowpath); | 466 | |
467 | ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath); | ||
468 | if (ret) | ||
469 | mutex_set_owner(lock); | ||
470 | |||
471 | return ret; | ||
385 | } | 472 | } |
386 | 473 | ||
387 | EXPORT_SYMBOL(mutex_trylock); | 474 | EXPORT_SYMBOL(mutex_trylock); |
diff --git a/kernel/mutex.h b/kernel/mutex.h index a075dafbb290..67578ca48f94 100644 --- a/kernel/mutex.h +++ b/kernel/mutex.h | |||
@@ -16,8 +16,26 @@ | |||
16 | #define mutex_remove_waiter(lock, waiter, ti) \ | 16 | #define mutex_remove_waiter(lock, waiter, ti) \ |
17 | __list_del((waiter)->list.prev, (waiter)->list.next) | 17 | __list_del((waiter)->list.prev, (waiter)->list.next) |
18 | 18 | ||
19 | #define debug_mutex_set_owner(lock, new_owner) do { } while (0) | 19 | #ifdef CONFIG_SMP |
20 | #define debug_mutex_clear_owner(lock) do { } while (0) | 20 | static inline void mutex_set_owner(struct mutex *lock) |
21 | { | ||
22 | lock->owner = current_thread_info(); | ||
23 | } | ||
24 | |||
25 | static inline void mutex_clear_owner(struct mutex *lock) | ||
26 | { | ||
27 | lock->owner = NULL; | ||
28 | } | ||
29 | #else | ||
30 | static inline void mutex_set_owner(struct mutex *lock) | ||
31 | { | ||
32 | } | ||
33 | |||
34 | static inline void mutex_clear_owner(struct mutex *lock) | ||
35 | { | ||
36 | } | ||
37 | #endif | ||
38 | |||
21 | #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) | 39 | #define debug_mutex_wake_waiter(lock, waiter) do { } while (0) |
22 | #define debug_mutex_free_waiter(waiter) do { } while (0) | 40 | #define debug_mutex_free_waiter(waiter) do { } while (0) |
23 | #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) | 41 | #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) |
diff --git a/kernel/relay.c b/kernel/relay.c index 9d79b7854fa6..edc0ba6d8160 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -677,9 +677,7 @@ int relay_late_setup_files(struct rchan *chan, | |||
677 | */ | 677 | */ |
678 | for_each_online_cpu(i) { | 678 | for_each_online_cpu(i) { |
679 | if (unlikely(!chan->buf[i])) { | 679 | if (unlikely(!chan->buf[i])) { |
680 | printk(KERN_ERR "relay_late_setup_files: CPU %u " | 680 | WARN_ONCE(1, KERN_ERR "CPU has no buffer!\n"); |
681 | "has no buffer, it must have!\n", i); | ||
682 | BUG(); | ||
683 | err = -EINVAL; | 681 | err = -EINVAL; |
684 | break; | 682 | break; |
685 | } | 683 | } |
diff --git a/kernel/sched.c b/kernel/sched.c index 0a76d0b6f215..7299083e69e7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4404,10 +4404,7 @@ void scheduler_tick(void) | |||
4404 | #endif | 4404 | #endif |
4405 | } | 4405 | } |
4406 | 4406 | ||
4407 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | 4407 | unsigned long get_parent_ip(unsigned long addr) |
4408 | defined(CONFIG_PREEMPT_TRACER)) | ||
4409 | |||
4410 | static inline unsigned long get_parent_ip(unsigned long addr) | ||
4411 | { | 4408 | { |
4412 | if (in_lock_functions(addr)) { | 4409 | if (in_lock_functions(addr)) { |
4413 | addr = CALLER_ADDR2; | 4410 | addr = CALLER_ADDR2; |
@@ -4417,6 +4414,9 @@ static inline unsigned long get_parent_ip(unsigned long addr) | |||
4417 | return addr; | 4414 | return addr; |
4418 | } | 4415 | } |
4419 | 4416 | ||
4417 | #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ | ||
4418 | defined(CONFIG_PREEMPT_TRACER)) | ||
4419 | |||
4420 | void __kprobes add_preempt_count(int val) | 4420 | void __kprobes add_preempt_count(int val) |
4421 | { | 4421 | { |
4422 | #ifdef CONFIG_DEBUG_PREEMPT | 4422 | #ifdef CONFIG_DEBUG_PREEMPT |
@@ -4543,15 +4543,13 @@ pick_next_task(struct rq *rq, struct task_struct *prev) | |||
4543 | /* | 4543 | /* |
4544 | * schedule() is the main scheduler function. | 4544 | * schedule() is the main scheduler function. |
4545 | */ | 4545 | */ |
4546 | asmlinkage void __sched schedule(void) | 4546 | asmlinkage void __sched __schedule(void) |
4547 | { | 4547 | { |
4548 | struct task_struct *prev, *next; | 4548 | struct task_struct *prev, *next; |
4549 | unsigned long *switch_count; | 4549 | unsigned long *switch_count; |
4550 | struct rq *rq; | 4550 | struct rq *rq; |
4551 | int cpu; | 4551 | int cpu; |
4552 | 4552 | ||
4553 | need_resched: | ||
4554 | preempt_disable(); | ||
4555 | cpu = smp_processor_id(); | 4553 | cpu = smp_processor_id(); |
4556 | rq = cpu_rq(cpu); | 4554 | rq = cpu_rq(cpu); |
4557 | rcu_qsctr_inc(cpu); | 4555 | rcu_qsctr_inc(cpu); |
@@ -4608,13 +4606,80 @@ need_resched_nonpreemptible: | |||
4608 | 4606 | ||
4609 | if (unlikely(reacquire_kernel_lock(current) < 0)) | 4607 | if (unlikely(reacquire_kernel_lock(current) < 0)) |
4610 | goto need_resched_nonpreemptible; | 4608 | goto need_resched_nonpreemptible; |
4609 | } | ||
4611 | 4610 | ||
4611 | asmlinkage void __sched schedule(void) | ||
4612 | { | ||
4613 | need_resched: | ||
4614 | preempt_disable(); | ||
4615 | __schedule(); | ||
4612 | preempt_enable_no_resched(); | 4616 | preempt_enable_no_resched(); |
4613 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) | 4617 | if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) |
4614 | goto need_resched; | 4618 | goto need_resched; |
4615 | } | 4619 | } |
4616 | EXPORT_SYMBOL(schedule); | 4620 | EXPORT_SYMBOL(schedule); |
4617 | 4621 | ||
4622 | #ifdef CONFIG_SMP | ||
4623 | /* | ||
4624 | * Look out! "owner" is an entirely speculative pointer | ||
4625 | * access and not reliable. | ||
4626 | */ | ||
4627 | int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner) | ||
4628 | { | ||
4629 | unsigned int cpu; | ||
4630 | struct rq *rq; | ||
4631 | |||
4632 | if (!sched_feat(OWNER_SPIN)) | ||
4633 | return 0; | ||
4634 | |||
4635 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
4636 | /* | ||
4637 | * Need to access the cpu field knowing that | ||
4638 | * DEBUG_PAGEALLOC could have unmapped it if | ||
4639 | * the mutex owner just released it and exited. | ||
4640 | */ | ||
4641 | if (probe_kernel_address(&owner->cpu, cpu)) | ||
4642 | goto out; | ||
4643 | #else | ||
4644 | cpu = owner->cpu; | ||
4645 | #endif | ||
4646 | |||
4647 | /* | ||
4648 | * Even if the access succeeded (likely case), | ||
4649 | * the cpu field may no longer be valid. | ||
4650 | */ | ||
4651 | if (cpu >= nr_cpumask_bits) | ||
4652 | goto out; | ||
4653 | |||
4654 | /* | ||
4655 | * We need to validate that we can do a | ||
4656 | * get_cpu() and that we have the percpu area. | ||
4657 | */ | ||
4658 | if (!cpu_online(cpu)) | ||
4659 | goto out; | ||
4660 | |||
4661 | rq = cpu_rq(cpu); | ||
4662 | |||
4663 | for (;;) { | ||
4664 | /* | ||
4665 | * Owner changed, break to re-assess state. | ||
4666 | */ | ||
4667 | if (lock->owner != owner) | ||
4668 | break; | ||
4669 | |||
4670 | /* | ||
4671 | * Is that owner really running on that cpu? | ||
4672 | */ | ||
4673 | if (task_thread_info(rq->curr) != owner || need_resched()) | ||
4674 | return 0; | ||
4675 | |||
4676 | cpu_relax(); | ||
4677 | } | ||
4678 | out: | ||
4679 | return 1; | ||
4680 | } | ||
4681 | #endif | ||
4682 | |||
4618 | #ifdef CONFIG_PREEMPT | 4683 | #ifdef CONFIG_PREEMPT |
4619 | /* | 4684 | /* |
4620 | * this is the entry point to schedule() from in-kernel preemption | 4685 | * this is the entry point to schedule() from in-kernel preemption |
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index a0b0852414cc..7ec82c1c61c5 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c | |||
@@ -24,11 +24,12 @@ | |||
24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat | 24 | * The clock: sched_clock_cpu() is monotonic per cpu, and should be somewhat |
25 | * consistent between cpus (never more than 2 jiffies difference). | 25 | * consistent between cpus (never more than 2 jiffies difference). |
26 | */ | 26 | */ |
27 | #include <linux/sched.h> | ||
28 | #include <linux/percpu.h> | ||
29 | #include <linux/spinlock.h> | 27 | #include <linux/spinlock.h> |
30 | #include <linux/ktime.h> | 28 | #include <linux/hardirq.h> |
31 | #include <linux/module.h> | 29 | #include <linux/module.h> |
30 | #include <linux/percpu.h> | ||
31 | #include <linux/ktime.h> | ||
32 | #include <linux/sched.h> | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Scheduler clock - returns current time in nanosec units. | 35 | * Scheduler clock - returns current time in nanosec units. |
@@ -43,6 +44,10 @@ unsigned long long __attribute__((weak)) sched_clock(void) | |||
43 | static __read_mostly int sched_clock_running; | 44 | static __read_mostly int sched_clock_running; |
44 | 45 | ||
45 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | 46 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK |
47 | __read_mostly int sched_clock_stable; | ||
48 | #else | ||
49 | static const int sched_clock_stable = 1; | ||
50 | #endif | ||
46 | 51 | ||
47 | struct sched_clock_data { | 52 | struct sched_clock_data { |
48 | /* | 53 | /* |
@@ -87,7 +92,7 @@ void sched_clock_init(void) | |||
87 | } | 92 | } |
88 | 93 | ||
89 | /* | 94 | /* |
90 | * min,max except they take wrapping into account | 95 | * min, max except they take wrapping into account |
91 | */ | 96 | */ |
92 | 97 | ||
93 | static inline u64 wrap_min(u64 x, u64 y) | 98 | static inline u64 wrap_min(u64 x, u64 y) |
@@ -116,10 +121,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now) | |||
116 | if (unlikely(delta < 0)) | 121 | if (unlikely(delta < 0)) |
117 | delta = 0; | 122 | delta = 0; |
118 | 123 | ||
124 | if (unlikely(!sched_clock_running)) | ||
125 | return 0ull; | ||
126 | |||
119 | /* | 127 | /* |
120 | * scd->clock = clamp(scd->tick_gtod + delta, | 128 | * scd->clock = clamp(scd->tick_gtod + delta, |
121 | * max(scd->tick_gtod, scd->clock), | 129 | * max(scd->tick_gtod, scd->clock), |
122 | * scd->tick_gtod + TICK_NSEC); | 130 | * scd->tick_gtod + TICK_NSEC); |
123 | */ | 131 | */ |
124 | 132 | ||
125 | clock = scd->tick_gtod + delta; | 133 | clock = scd->tick_gtod + delta; |
@@ -148,8 +156,20 @@ static void lock_double_clock(struct sched_clock_data *data1, | |||
148 | 156 | ||
149 | u64 sched_clock_cpu(int cpu) | 157 | u64 sched_clock_cpu(int cpu) |
150 | { | 158 | { |
151 | struct sched_clock_data *scd = cpu_sdc(cpu); | ||
152 | u64 now, clock, this_clock, remote_clock; | 159 | u64 now, clock, this_clock, remote_clock; |
160 | struct sched_clock_data *scd; | ||
161 | |||
162 | if (sched_clock_stable) | ||
163 | return sched_clock(); | ||
164 | |||
165 | scd = cpu_sdc(cpu); | ||
166 | |||
167 | /* | ||
168 | * Normally this is not called in NMI context - but if it is, | ||
169 | * trying to do any locking here is totally lethal. | ||
170 | */ | ||
171 | if (unlikely(in_nmi())) | ||
172 | return scd->clock; | ||
153 | 173 | ||
154 | if (unlikely(!sched_clock_running)) | 174 | if (unlikely(!sched_clock_running)) |
155 | return 0ull; | 175 | return 0ull; |
@@ -193,6 +213,8 @@ u64 sched_clock_cpu(int cpu) | |||
193 | return clock; | 213 | return clock; |
194 | } | 214 | } |
195 | 215 | ||
216 | #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK | ||
217 | |||
196 | void sched_clock_tick(void) | 218 | void sched_clock_tick(void) |
197 | { | 219 | { |
198 | struct sched_clock_data *scd = this_scd(); | 220 | struct sched_clock_data *scd = this_scd(); |
@@ -235,22 +257,7 @@ void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
235 | } | 257 | } |
236 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); | 258 | EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event); |
237 | 259 | ||
238 | #else /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ | 260 | #endif /* CONFIG_HAVE_UNSTABLE_SCHED_CLOCK */ |
239 | |||
240 | void sched_clock_init(void) | ||
241 | { | ||
242 | sched_clock_running = 1; | ||
243 | } | ||
244 | |||
245 | u64 sched_clock_cpu(int cpu) | ||
246 | { | ||
247 | if (unlikely(!sched_clock_running)) | ||
248 | return 0; | ||
249 | |||
250 | return sched_clock(); | ||
251 | } | ||
252 | |||
253 | #endif | ||
254 | 261 | ||
255 | unsigned long long cpu_clock(int cpu) | 262 | unsigned long long cpu_clock(int cpu) |
256 | { | 263 | { |
diff --git a/kernel/sched_features.h b/kernel/sched_features.h index da5d93b5d2c6..07bc02e99ab1 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h | |||
@@ -13,3 +13,4 @@ SCHED_FEAT(LB_WAKEUP_UPDATE, 1) | |||
13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) | 13 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | 14 | SCHED_FEAT(WAKEUP_OVERLAP, 0) |
15 | SCHED_FEAT(LAST_BUDDY, 1) | 15 | SCHED_FEAT(LAST_BUDDY, 1) |
16 | SCHED_FEAT(OWNER_SPIN, 1) | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index 0365b4899a3d..98dd68eea9e6 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/freezer.h> | 21 | #include <linux/freezer.h> |
22 | #include <linux/kthread.h> | 22 | #include <linux/kthread.h> |
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/ftrace.h> | ||
24 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
25 | #include <linux/tick.h> | 26 | #include <linux/tick.h> |
26 | 27 | ||
@@ -79,13 +80,23 @@ static void __local_bh_disable(unsigned long ip) | |||
79 | WARN_ON_ONCE(in_irq()); | 80 | WARN_ON_ONCE(in_irq()); |
80 | 81 | ||
81 | raw_local_irq_save(flags); | 82 | raw_local_irq_save(flags); |
82 | add_preempt_count(SOFTIRQ_OFFSET); | 83 | /* |
84 | * The preempt tracer hooks into add_preempt_count and will break | ||
85 | * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET | ||
86 | * is set and before current->softirq_enabled is cleared. | ||
87 | * We must manually increment preempt_count here and manually | ||
88 | * call the trace_preempt_off later. | ||
89 | */ | ||
90 | preempt_count() += SOFTIRQ_OFFSET; | ||
83 | /* | 91 | /* |
84 | * Were softirqs turned off above: | 92 | * Were softirqs turned off above: |
85 | */ | 93 | */ |
86 | if (softirq_count() == SOFTIRQ_OFFSET) | 94 | if (softirq_count() == SOFTIRQ_OFFSET) |
87 | trace_softirqs_off(ip); | 95 | trace_softirqs_off(ip); |
88 | raw_local_irq_restore(flags); | 96 | raw_local_irq_restore(flags); |
97 | |||
98 | if (preempt_count() == SOFTIRQ_OFFSET) | ||
99 | trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); | ||
89 | } | 100 | } |
90 | #else /* !CONFIG_TRACE_IRQFLAGS */ | 101 | #else /* !CONFIG_TRACE_IRQFLAGS */ |
91 | static inline void __local_bh_disable(unsigned long ip) | 102 | static inline void __local_bh_disable(unsigned long ip) |
diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64fe143d..ef1c385bc572 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer) | |||
491 | debug_object_free(timer, &timer_debug_descr); | 491 | debug_object_free(timer, &timer_debug_descr); |
492 | } | 492 | } |
493 | 493 | ||
494 | static void __init_timer(struct timer_list *timer); | 494 | static void __init_timer(struct timer_list *timer, |
495 | const char *name, | ||
496 | struct lock_class_key *key); | ||
495 | 497 | ||
496 | void init_timer_on_stack(struct timer_list *timer) | 498 | void init_timer_on_stack_key(struct timer_list *timer, |
499 | const char *name, | ||
500 | struct lock_class_key *key) | ||
497 | { | 501 | { |
498 | debug_object_init_on_stack(timer, &timer_debug_descr); | 502 | debug_object_init_on_stack(timer, &timer_debug_descr); |
499 | __init_timer(timer); | 503 | __init_timer(timer, name, key); |
500 | } | 504 | } |
501 | EXPORT_SYMBOL_GPL(init_timer_on_stack); | 505 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
502 | 506 | ||
503 | void destroy_timer_on_stack(struct timer_list *timer) | 507 | void destroy_timer_on_stack(struct timer_list *timer) |
504 | { | 508 | { |
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { } | |||
512 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 516 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
513 | #endif | 517 | #endif |
514 | 518 | ||
515 | static void __init_timer(struct timer_list *timer) | 519 | static void __init_timer(struct timer_list *timer, |
520 | const char *name, | ||
521 | struct lock_class_key *key) | ||
516 | { | 522 | { |
517 | timer->entry.next = NULL; | 523 | timer->entry.next = NULL; |
518 | timer->base = __raw_get_cpu_var(tvec_bases); | 524 | timer->base = __raw_get_cpu_var(tvec_bases); |
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer) | |||
521 | timer->start_pid = -1; | 527 | timer->start_pid = -1; |
522 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 528 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
523 | #endif | 529 | #endif |
530 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | ||
524 | } | 531 | } |
525 | 532 | ||
526 | /** | 533 | /** |
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer) | |||
530 | * init_timer() must be done to a timer prior calling *any* of the | 537 | * init_timer() must be done to a timer prior calling *any* of the |
531 | * other timer functions. | 538 | * other timer functions. |
532 | */ | 539 | */ |
533 | void init_timer(struct timer_list *timer) | 540 | void init_timer_key(struct timer_list *timer, |
541 | const char *name, | ||
542 | struct lock_class_key *key) | ||
534 | { | 543 | { |
535 | debug_timer_init(timer); | 544 | debug_timer_init(timer); |
536 | __init_timer(timer); | 545 | __init_timer(timer, name, key); |
537 | } | 546 | } |
538 | EXPORT_SYMBOL(init_timer); | 547 | EXPORT_SYMBOL(init_timer_key); |
539 | 548 | ||
540 | void init_timer_deferrable(struct timer_list *timer) | 549 | void init_timer_deferrable_key(struct timer_list *timer, |
550 | const char *name, | ||
551 | struct lock_class_key *key) | ||
541 | { | 552 | { |
542 | init_timer(timer); | 553 | init_timer_key(timer, name, key); |
543 | timer_set_deferrable(timer); | 554 | timer_set_deferrable(timer); |
544 | } | 555 | } |
545 | EXPORT_SYMBOL(init_timer_deferrable); | 556 | EXPORT_SYMBOL(init_timer_deferrable_key); |
546 | 557 | ||
547 | static inline void detach_timer(struct timer_list *timer, | 558 | static inline void detach_timer(struct timer_list *timer, |
548 | int clear_pending) | 559 | int clear_pending) |
@@ -789,6 +800,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
789 | */ | 800 | */ |
790 | int del_timer_sync(struct timer_list *timer) | 801 | int del_timer_sync(struct timer_list *timer) |
791 | { | 802 | { |
803 | #ifdef CONFIG_LOCKDEP | ||
804 | unsigned long flags; | ||
805 | |||
806 | local_irq_save(flags); | ||
807 | lock_map_acquire(&timer->lockdep_map); | ||
808 | lock_map_release(&timer->lockdep_map); | ||
809 | local_irq_restore(flags); | ||
810 | #endif | ||
811 | |||
792 | for (;;) { | 812 | for (;;) { |
793 | int ret = try_to_del_timer_sync(timer); | 813 | int ret = try_to_del_timer_sync(timer); |
794 | if (ret >= 0) | 814 | if (ret >= 0) |
@@ -861,10 +881,36 @@ static inline void __run_timers(struct tvec_base *base) | |||
861 | 881 | ||
862 | set_running_timer(base, timer); | 882 | set_running_timer(base, timer); |
863 | detach_timer(timer, 1); | 883 | detach_timer(timer, 1); |
884 | |||
864 | spin_unlock_irq(&base->lock); | 885 | spin_unlock_irq(&base->lock); |
865 | { | 886 | { |
866 | int preempt_count = preempt_count(); | 887 | int preempt_count = preempt_count(); |
888 | |||
889 | #ifdef CONFIG_LOCKDEP | ||
890 | /* | ||
891 | * It is permissible to free the timer from | ||
892 | * inside the function that is called from | ||
893 | * it, this we need to take into account for | ||
894 | * lockdep too. To avoid bogus "held lock | ||
895 | * freed" warnings as well as problems when | ||
896 | * looking into timer->lockdep_map, make a | ||
897 | * copy and use that here. | ||
898 | */ | ||
899 | struct lockdep_map lockdep_map = | ||
900 | timer->lockdep_map; | ||
901 | #endif | ||
902 | /* | ||
903 | * Couple the lock chain with the lock chain at | ||
904 | * del_timer_sync() by acquiring the lock_map | ||
905 | * around the fn() call here and in | ||
906 | * del_timer_sync(). | ||
907 | */ | ||
908 | lock_map_acquire(&lockdep_map); | ||
909 | |||
867 | fn(data); | 910 | fn(data); |
911 | |||
912 | lock_map_release(&lockdep_map); | ||
913 | |||
868 | if (preempt_count != preempt_count()) { | 914 | if (preempt_count != preempt_count()) { |
869 | printk(KERN_ERR "huh, entered %p " | 915 | printk(KERN_ERR "huh, entered %p " |
870 | "with preempt_count %08x, exited" | 916 | "with preempt_count %08x, exited" |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 34e707e5ab87..5d733da5345a 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -9,6 +9,9 @@ config USER_STACKTRACE_SUPPORT | |||
9 | config NOP_TRACER | 9 | config NOP_TRACER |
10 | bool | 10 | bool |
11 | 11 | ||
12 | config HAVE_FTRACE_NMI_ENTER | ||
13 | bool | ||
14 | |||
12 | config HAVE_FUNCTION_TRACER | 15 | config HAVE_FUNCTION_TRACER |
13 | bool | 16 | bool |
14 | 17 | ||
@@ -37,6 +40,11 @@ config TRACER_MAX_TRACE | |||
37 | config RING_BUFFER | 40 | config RING_BUFFER |
38 | bool | 41 | bool |
39 | 42 | ||
43 | config FTRACE_NMI_ENTER | ||
44 | bool | ||
45 | depends on HAVE_FTRACE_NMI_ENTER | ||
46 | default y | ||
47 | |||
40 | config TRACING | 48 | config TRACING |
41 | bool | 49 | bool |
42 | select DEBUG_FS | 50 | select DEBUG_FS |
@@ -45,12 +53,22 @@ config TRACING | |||
45 | select TRACEPOINTS | 53 | select TRACEPOINTS |
46 | select NOP_TRACER | 54 | select NOP_TRACER |
47 | 55 | ||
56 | # | ||
57 | # Minimum requirements an architecture has to meet for us to | ||
58 | # be able to offer generic tracing facilities: | ||
59 | # | ||
60 | config TRACING_SUPPORT | ||
61 | bool | ||
62 | depends on TRACE_IRQFLAGS_SUPPORT | ||
63 | depends on STACKTRACE_SUPPORT | ||
64 | |||
65 | if TRACING_SUPPORT | ||
66 | |||
48 | menu "Tracers" | 67 | menu "Tracers" |
49 | 68 | ||
50 | config FUNCTION_TRACER | 69 | config FUNCTION_TRACER |
51 | bool "Kernel Function Tracer" | 70 | bool "Kernel Function Tracer" |
52 | depends on HAVE_FUNCTION_TRACER | 71 | depends on HAVE_FUNCTION_TRACER |
53 | depends on DEBUG_KERNEL | ||
54 | select FRAME_POINTER | 72 | select FRAME_POINTER |
55 | select KALLSYMS | 73 | select KALLSYMS |
56 | select TRACING | 74 | select TRACING |
@@ -83,7 +101,6 @@ config IRQSOFF_TRACER | |||
83 | default n | 101 | default n |
84 | depends on TRACE_IRQFLAGS_SUPPORT | 102 | depends on TRACE_IRQFLAGS_SUPPORT |
85 | depends on GENERIC_TIME | 103 | depends on GENERIC_TIME |
86 | depends on DEBUG_KERNEL | ||
87 | select TRACE_IRQFLAGS | 104 | select TRACE_IRQFLAGS |
88 | select TRACING | 105 | select TRACING |
89 | select TRACER_MAX_TRACE | 106 | select TRACER_MAX_TRACE |
@@ -106,7 +123,6 @@ config PREEMPT_TRACER | |||
106 | default n | 123 | default n |
107 | depends on GENERIC_TIME | 124 | depends on GENERIC_TIME |
108 | depends on PREEMPT | 125 | depends on PREEMPT |
109 | depends on DEBUG_KERNEL | ||
110 | select TRACING | 126 | select TRACING |
111 | select TRACER_MAX_TRACE | 127 | select TRACER_MAX_TRACE |
112 | help | 128 | help |
@@ -127,13 +143,13 @@ config SYSPROF_TRACER | |||
127 | bool "Sysprof Tracer" | 143 | bool "Sysprof Tracer" |
128 | depends on X86 | 144 | depends on X86 |
129 | select TRACING | 145 | select TRACING |
146 | select CONTEXT_SWITCH_TRACER | ||
130 | help | 147 | help |
131 | This tracer provides the trace needed by the 'Sysprof' userspace | 148 | This tracer provides the trace needed by the 'Sysprof' userspace |
132 | tool. | 149 | tool. |
133 | 150 | ||
134 | config SCHED_TRACER | 151 | config SCHED_TRACER |
135 | bool "Scheduling Latency Tracer" | 152 | bool "Scheduling Latency Tracer" |
136 | depends on DEBUG_KERNEL | ||
137 | select TRACING | 153 | select TRACING |
138 | select CONTEXT_SWITCH_TRACER | 154 | select CONTEXT_SWITCH_TRACER |
139 | select TRACER_MAX_TRACE | 155 | select TRACER_MAX_TRACE |
@@ -143,16 +159,22 @@ config SCHED_TRACER | |||
143 | 159 | ||
144 | config CONTEXT_SWITCH_TRACER | 160 | config CONTEXT_SWITCH_TRACER |
145 | bool "Trace process context switches" | 161 | bool "Trace process context switches" |
146 | depends on DEBUG_KERNEL | ||
147 | select TRACING | 162 | select TRACING |
148 | select MARKERS | 163 | select MARKERS |
149 | help | 164 | help |
150 | This tracer gets called from the context switch and records | 165 | This tracer gets called from the context switch and records |
151 | all switching of tasks. | 166 | all switching of tasks. |
152 | 167 | ||
168 | config EVENT_TRACER | ||
169 | bool "Trace various events in the kernel" | ||
170 | select TRACING | ||
171 | help | ||
172 | This tracer hooks to various trace points in the kernel | ||
173 | allowing the user to pick and choose which trace point they | ||
174 | want to trace. | ||
175 | |||
153 | config BOOT_TRACER | 176 | config BOOT_TRACER |
154 | bool "Trace boot initcalls" | 177 | bool "Trace boot initcalls" |
155 | depends on DEBUG_KERNEL | ||
156 | select TRACING | 178 | select TRACING |
157 | select CONTEXT_SWITCH_TRACER | 179 | select CONTEXT_SWITCH_TRACER |
158 | help | 180 | help |
@@ -165,13 +187,11 @@ config BOOT_TRACER | |||
165 | representation of the delays during initcalls - but the raw | 187 | representation of the delays during initcalls - but the raw |
166 | /debug/tracing/trace text output is readable too. | 188 | /debug/tracing/trace text output is readable too. |
167 | 189 | ||
168 | ( Note that tracing self tests can't be enabled if this tracer is | 190 | You must pass in ftrace=initcall to the kernel command line |
169 | selected, because the self-tests are an initcall as well and that | 191 | to enable this on bootup. |
170 | would invalidate the boot trace. ) | ||
171 | 192 | ||
172 | config TRACE_BRANCH_PROFILING | 193 | config TRACE_BRANCH_PROFILING |
173 | bool "Trace likely/unlikely profiler" | 194 | bool "Trace likely/unlikely profiler" |
174 | depends on DEBUG_KERNEL | ||
175 | select TRACING | 195 | select TRACING |
176 | help | 196 | help |
177 | This tracer profiles all the the likely and unlikely macros | 197 | This tracer profiles all the the likely and unlikely macros |
@@ -224,7 +244,6 @@ config BRANCH_TRACER | |||
224 | 244 | ||
225 | config POWER_TRACER | 245 | config POWER_TRACER |
226 | bool "Trace power consumption behavior" | 246 | bool "Trace power consumption behavior" |
227 | depends on DEBUG_KERNEL | ||
228 | depends on X86 | 247 | depends on X86 |
229 | select TRACING | 248 | select TRACING |
230 | help | 249 | help |
@@ -236,7 +255,6 @@ config POWER_TRACER | |||
236 | config STACK_TRACER | 255 | config STACK_TRACER |
237 | bool "Trace max stack" | 256 | bool "Trace max stack" |
238 | depends on HAVE_FUNCTION_TRACER | 257 | depends on HAVE_FUNCTION_TRACER |
239 | depends on DEBUG_KERNEL | ||
240 | select FUNCTION_TRACER | 258 | select FUNCTION_TRACER |
241 | select STACKTRACE | 259 | select STACKTRACE |
242 | select KALLSYMS | 260 | select KALLSYMS |
@@ -266,11 +284,66 @@ config HW_BRANCH_TRACER | |||
266 | This tracer records all branches on the system in a circular | 284 | This tracer records all branches on the system in a circular |
267 | buffer giving access to the last N branches for each cpu. | 285 | buffer giving access to the last N branches for each cpu. |
268 | 286 | ||
287 | config KMEMTRACE | ||
288 | bool "Trace SLAB allocations" | ||
289 | select TRACING | ||
290 | help | ||
291 | kmemtrace provides tracing for slab allocator functions, such as | ||
292 | kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected | ||
293 | data is then fed to the userspace application in order to analyse | ||
294 | allocation hotspots, internal fragmentation and so on, making it | ||
295 | possible to see how well an allocator performs, as well as debug | ||
296 | and profile kernel code. | ||
297 | |||
298 | This requires an userspace application to use. See | ||
299 | Documentation/vm/kmemtrace.txt for more information. | ||
300 | |||
301 | Saying Y will make the kernel somewhat larger and slower. However, | ||
302 | if you disable kmemtrace at run-time or boot-time, the performance | ||
303 | impact is minimal (depending on the arch the kernel is built for). | ||
304 | |||
305 | If unsure, say N. | ||
306 | |||
307 | config WORKQUEUE_TRACER | ||
308 | bool "Trace workqueues" | ||
309 | select TRACING | ||
310 | help | ||
311 | The workqueue tracer provides some statistical informations | ||
312 | about each cpu workqueue thread such as the number of the | ||
313 | works inserted and executed since their creation. It can help | ||
314 | to evaluate the amount of work each of them have to perform. | ||
315 | For example it can help a developer to decide whether he should | ||
316 | choose a per cpu workqueue instead of a singlethreaded one. | ||
317 | |||
318 | config BLK_DEV_IO_TRACE | ||
319 | bool "Support for tracing block io actions" | ||
320 | depends on SYSFS | ||
321 | depends on BLOCK | ||
322 | select RELAY | ||
323 | select DEBUG_FS | ||
324 | select TRACEPOINTS | ||
325 | select TRACING | ||
326 | select STACKTRACE | ||
327 | help | ||
328 | Say Y here if you want to be able to trace the block layer actions | ||
329 | on a given queue. Tracing allows you to see any traffic happening | ||
330 | on a block device queue. For more information (and the userspace | ||
331 | support tools needed), fetch the blktrace tools from: | ||
332 | |||
333 | git://git.kernel.dk/blktrace.git | ||
334 | |||
335 | Tracing also is possible using the ftrace interface, e.g.: | ||
336 | |||
337 | echo 1 > /sys/block/sda/sda1/trace/enable | ||
338 | echo blk > /sys/kernel/debug/tracing/current_tracer | ||
339 | cat /sys/kernel/debug/tracing/trace_pipe | ||
340 | |||
341 | If unsure, say N. | ||
342 | |||
269 | config DYNAMIC_FTRACE | 343 | config DYNAMIC_FTRACE |
270 | bool "enable/disable ftrace tracepoints dynamically" | 344 | bool "enable/disable ftrace tracepoints dynamically" |
271 | depends on FUNCTION_TRACER | 345 | depends on FUNCTION_TRACER |
272 | depends on HAVE_DYNAMIC_FTRACE | 346 | depends on HAVE_DYNAMIC_FTRACE |
273 | depends on DEBUG_KERNEL | ||
274 | default y | 347 | default y |
275 | help | 348 | help |
276 | This option will modify all the calls to ftrace dynamically | 349 | This option will modify all the calls to ftrace dynamically |
@@ -296,7 +369,7 @@ config FTRACE_SELFTEST | |||
296 | 369 | ||
297 | config FTRACE_STARTUP_TEST | 370 | config FTRACE_STARTUP_TEST |
298 | bool "Perform a startup test on ftrace" | 371 | bool "Perform a startup test on ftrace" |
299 | depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER | 372 | depends on TRACING |
300 | select FTRACE_SELFTEST | 373 | select FTRACE_SELFTEST |
301 | help | 374 | help |
302 | This option performs a series of startup tests on ftrace. On bootup | 375 | This option performs a series of startup tests on ftrace. On bootup |
@@ -306,7 +379,7 @@ config FTRACE_STARTUP_TEST | |||
306 | 379 | ||
307 | config MMIOTRACE | 380 | config MMIOTRACE |
308 | bool "Memory mapped IO tracing" | 381 | bool "Memory mapped IO tracing" |
309 | depends on HAVE_MMIOTRACE_SUPPORT && DEBUG_KERNEL && PCI | 382 | depends on HAVE_MMIOTRACE_SUPPORT && PCI |
310 | select TRACING | 383 | select TRACING |
311 | help | 384 | help |
312 | Mmiotrace traces Memory Mapped I/O access and is meant for | 385 | Mmiotrace traces Memory Mapped I/O access and is meant for |
@@ -328,3 +401,6 @@ config MMIOTRACE_TEST | |||
328 | Say N, unless you absolutely know what you are doing. | 401 | Say N, unless you absolutely know what you are doing. |
329 | 402 | ||
330 | endmenu | 403 | endmenu |
404 | |||
405 | endif # TRACING_SUPPORT | ||
406 | |||
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 349d5a93653f..c931fe0560cb 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -19,6 +19,9 @@ obj-$(CONFIG_FUNCTION_TRACER) += libftrace.o | |||
19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o | 19 | obj-$(CONFIG_RING_BUFFER) += ring_buffer.o |
20 | 20 | ||
21 | obj-$(CONFIG_TRACING) += trace.o | 21 | obj-$(CONFIG_TRACING) += trace.o |
22 | obj-$(CONFIG_TRACING) += trace_clock.o | ||
23 | obj-$(CONFIG_TRACING) += trace_output.o | ||
24 | obj-$(CONFIG_TRACING) += trace_stat.o | ||
22 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o | 25 | obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o |
23 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o | 26 | obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o |
24 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o | 27 | obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o |
@@ -33,5 +36,10 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o | |||
33 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o | 36 | obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o |
34 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o | 37 | obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o |
35 | obj-$(CONFIG_POWER_TRACER) += trace_power.o | 38 | obj-$(CONFIG_POWER_TRACER) += trace_power.o |
39 | obj-$(CONFIG_KMEMTRACE) += kmemtrace.o | ||
40 | obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o | ||
41 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | ||
42 | obj-$(CONFIG_EVENT_TRACER) += trace_events.o | ||
43 | obj-$(CONFIG_EVENT_TRACER) += events.o | ||
36 | 44 | ||
37 | libftrace-y := ftrace.o | 45 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c new file mode 100644 index 000000000000..d24a10b8411a --- /dev/null +++ b/kernel/trace/blktrace.c | |||
@@ -0,0 +1,1537 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
16 | * | ||
17 | */ | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/blkdev.h> | ||
20 | #include <linux/blktrace_api.h> | ||
21 | #include <linux/percpu.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/mutex.h> | ||
24 | #include <linux/debugfs.h> | ||
25 | #include <linux/time.h> | ||
26 | #include <trace/block.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include "trace_output.h" | ||
29 | |||
30 | static unsigned int blktrace_seq __read_mostly = 1; | ||
31 | |||
32 | static struct trace_array *blk_tr; | ||
33 | static int __read_mostly blk_tracer_enabled; | ||
34 | |||
35 | /* Select an alternative, minimalistic output than the original one */ | ||
36 | #define TRACE_BLK_OPT_CLASSIC 0x1 | ||
37 | |||
38 | static struct tracer_opt blk_tracer_opts[] = { | ||
39 | /* Default disable the minimalistic output */ | ||
40 | { TRACER_OPT(blk_classic, TRACE_BLK_OPT_CLASSIC) }, | ||
41 | { } | ||
42 | }; | ||
43 | |||
44 | static struct tracer_flags blk_tracer_flags = { | ||
45 | .val = 0, | ||
46 | .opts = blk_tracer_opts, | ||
47 | }; | ||
48 | |||
49 | /* Global reference count of probes */ | ||
50 | static DEFINE_MUTEX(blk_probe_mutex); | ||
51 | static atomic_t blk_probes_ref = ATOMIC_INIT(0); | ||
52 | |||
53 | static int blk_register_tracepoints(void); | ||
54 | static void blk_unregister_tracepoints(void); | ||
55 | |||
56 | /* | ||
57 | * Send out a notify message. | ||
58 | */ | ||
59 | static void trace_note(struct blk_trace *bt, pid_t pid, int action, | ||
60 | const void *data, size_t len) | ||
61 | { | ||
62 | struct blk_io_trace *t; | ||
63 | |||
64 | if (!bt->rchan) | ||
65 | return; | ||
66 | |||
67 | t = relay_reserve(bt->rchan, sizeof(*t) + len); | ||
68 | if (t) { | ||
69 | const int cpu = smp_processor_id(); | ||
70 | |||
71 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | ||
72 | t->time = ktime_to_ns(ktime_get()); | ||
73 | t->device = bt->dev; | ||
74 | t->action = action; | ||
75 | t->pid = pid; | ||
76 | t->cpu = cpu; | ||
77 | t->pdu_len = len; | ||
78 | memcpy((void *) t + sizeof(*t), data, len); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Send out a notify for this process, if we haven't done so since a trace | ||
84 | * started | ||
85 | */ | ||
86 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | ||
87 | { | ||
88 | tsk->btrace_seq = blktrace_seq; | ||
89 | trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm)); | ||
90 | } | ||
91 | |||
92 | static void trace_note_time(struct blk_trace *bt) | ||
93 | { | ||
94 | struct timespec now; | ||
95 | unsigned long flags; | ||
96 | u32 words[2]; | ||
97 | |||
98 | getnstimeofday(&now); | ||
99 | words[0] = now.tv_sec; | ||
100 | words[1] = now.tv_nsec; | ||
101 | |||
102 | local_irq_save(flags); | ||
103 | trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words)); | ||
104 | local_irq_restore(flags); | ||
105 | } | ||
106 | |||
107 | void __trace_note_message(struct blk_trace *bt, const char *fmt, ...) | ||
108 | { | ||
109 | int n; | ||
110 | va_list args; | ||
111 | unsigned long flags; | ||
112 | char *buf; | ||
113 | |||
114 | if (blk_tr) { | ||
115 | va_start(args, fmt); | ||
116 | ftrace_vprintk(fmt, args); | ||
117 | va_end(args); | ||
118 | return; | ||
119 | } | ||
120 | |||
121 | if (!bt->msg_data) | ||
122 | return; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | buf = per_cpu_ptr(bt->msg_data, smp_processor_id()); | ||
126 | va_start(args, fmt); | ||
127 | n = vscnprintf(buf, BLK_TN_MAX_MSG, fmt, args); | ||
128 | va_end(args); | ||
129 | |||
130 | trace_note(bt, 0, BLK_TN_MESSAGE, buf, n); | ||
131 | local_irq_restore(flags); | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(__trace_note_message); | ||
134 | |||
135 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | ||
136 | pid_t pid) | ||
137 | { | ||
138 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | ||
139 | return 1; | ||
140 | if (sector < bt->start_lba || sector > bt->end_lba) | ||
141 | return 1; | ||
142 | if (bt->pid && pid != bt->pid) | ||
143 | return 1; | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * Data direction bit lookup | ||
150 | */ | ||
151 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), | ||
152 | BLK_TC_ACT(BLK_TC_WRITE) }; | ||
153 | |||
154 | /* The ilog2() calls fall out because they're constant */ | ||
155 | #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ | ||
156 | (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) | ||
157 | |||
158 | /* | ||
159 | * The worker for the various blk_add_trace*() types. Fills out a | ||
160 | * blk_io_trace structure and places it in a per-cpu subbuffer. | ||
161 | */ | ||
162 | static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | ||
163 | int rw, u32 what, int error, int pdu_len, void *pdu_data) | ||
164 | { | ||
165 | struct task_struct *tsk = current; | ||
166 | struct ring_buffer_event *event = NULL; | ||
167 | struct blk_io_trace *t; | ||
168 | unsigned long flags = 0; | ||
169 | unsigned long *sequence; | ||
170 | pid_t pid; | ||
171 | int cpu, pc = 0; | ||
172 | |||
173 | if (unlikely(bt->trace_state != Blktrace_running || | ||
174 | !blk_tracer_enabled)) | ||
175 | return; | ||
176 | |||
177 | what |= ddir_act[rw & WRITE]; | ||
178 | what |= MASK_TC_BIT(rw, BARRIER); | ||
179 | what |= MASK_TC_BIT(rw, SYNCIO); | ||
180 | what |= MASK_TC_BIT(rw, AHEAD); | ||
181 | what |= MASK_TC_BIT(rw, META); | ||
182 | what |= MASK_TC_BIT(rw, DISCARD); | ||
183 | |||
184 | pid = tsk->pid; | ||
185 | if (unlikely(act_log_check(bt, what, sector, pid))) | ||
186 | return; | ||
187 | cpu = raw_smp_processor_id(); | ||
188 | |||
189 | if (blk_tr) { | ||
190 | tracing_record_cmdline(current); | ||
191 | |||
192 | pc = preempt_count(); | ||
193 | event = trace_buffer_lock_reserve(blk_tr, TRACE_BLK, | ||
194 | sizeof(*t) + pdu_len, | ||
195 | 0, pc); | ||
196 | if (!event) | ||
197 | return; | ||
198 | t = ring_buffer_event_data(event); | ||
199 | goto record_it; | ||
200 | } | ||
201 | |||
202 | /* | ||
203 | * A word about the locking here - we disable interrupts to reserve | ||
204 | * some space in the relay per-cpu buffer, to prevent an irq | ||
205 | * from coming in and stepping on our toes. | ||
206 | */ | ||
207 | local_irq_save(flags); | ||
208 | |||
209 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
210 | trace_note_tsk(bt, tsk); | ||
211 | |||
212 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | ||
213 | if (t) { | ||
214 | sequence = per_cpu_ptr(bt->sequence, cpu); | ||
215 | |||
216 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | ||
217 | t->sequence = ++(*sequence); | ||
218 | t->time = ktime_to_ns(ktime_get()); | ||
219 | record_it: | ||
220 | /* | ||
221 | * These two are not needed in ftrace as they are in the | ||
222 | * generic trace_entry, filled by tracing_generic_entry_update, | ||
223 | * but for the trace_event->bin() synthesizer benefit we do it | ||
224 | * here too. | ||
225 | */ | ||
226 | t->cpu = cpu; | ||
227 | t->pid = pid; | ||
228 | |||
229 | t->sector = sector; | ||
230 | t->bytes = bytes; | ||
231 | t->action = what; | ||
232 | t->device = bt->dev; | ||
233 | t->error = error; | ||
234 | t->pdu_len = pdu_len; | ||
235 | |||
236 | if (pdu_len) | ||
237 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | ||
238 | |||
239 | if (blk_tr) { | ||
240 | trace_buffer_unlock_commit(blk_tr, event, 0, pc); | ||
241 | return; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | local_irq_restore(flags); | ||
246 | } | ||
247 | |||
248 | static struct dentry *blk_tree_root; | ||
249 | static DEFINE_MUTEX(blk_tree_mutex); | ||
250 | |||
251 | static void blk_trace_cleanup(struct blk_trace *bt) | ||
252 | { | ||
253 | debugfs_remove(bt->msg_file); | ||
254 | debugfs_remove(bt->dropped_file); | ||
255 | relay_close(bt->rchan); | ||
256 | free_percpu(bt->sequence); | ||
257 | free_percpu(bt->msg_data); | ||
258 | kfree(bt); | ||
259 | mutex_lock(&blk_probe_mutex); | ||
260 | if (atomic_dec_and_test(&blk_probes_ref)) | ||
261 | blk_unregister_tracepoints(); | ||
262 | mutex_unlock(&blk_probe_mutex); | ||
263 | } | ||
264 | |||
265 | int blk_trace_remove(struct request_queue *q) | ||
266 | { | ||
267 | struct blk_trace *bt; | ||
268 | |||
269 | bt = xchg(&q->blk_trace, NULL); | ||
270 | if (!bt) | ||
271 | return -EINVAL; | ||
272 | |||
273 | if (bt->trace_state == Blktrace_setup || | ||
274 | bt->trace_state == Blktrace_stopped) | ||
275 | blk_trace_cleanup(bt); | ||
276 | |||
277 | return 0; | ||
278 | } | ||
279 | EXPORT_SYMBOL_GPL(blk_trace_remove); | ||
280 | |||
281 | static int blk_dropped_open(struct inode *inode, struct file *filp) | ||
282 | { | ||
283 | filp->private_data = inode->i_private; | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | ||
289 | size_t count, loff_t *ppos) | ||
290 | { | ||
291 | struct blk_trace *bt = filp->private_data; | ||
292 | char buf[16]; | ||
293 | |||
294 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | ||
295 | |||
296 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | ||
297 | } | ||
298 | |||
299 | static const struct file_operations blk_dropped_fops = { | ||
300 | .owner = THIS_MODULE, | ||
301 | .open = blk_dropped_open, | ||
302 | .read = blk_dropped_read, | ||
303 | }; | ||
304 | |||
305 | static int blk_msg_open(struct inode *inode, struct file *filp) | ||
306 | { | ||
307 | filp->private_data = inode->i_private; | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | static ssize_t blk_msg_write(struct file *filp, const char __user *buffer, | ||
313 | size_t count, loff_t *ppos) | ||
314 | { | ||
315 | char *msg; | ||
316 | struct blk_trace *bt; | ||
317 | |||
318 | if (count > BLK_TN_MAX_MSG) | ||
319 | return -EINVAL; | ||
320 | |||
321 | msg = kmalloc(count, GFP_KERNEL); | ||
322 | if (msg == NULL) | ||
323 | return -ENOMEM; | ||
324 | |||
325 | if (copy_from_user(msg, buffer, count)) { | ||
326 | kfree(msg); | ||
327 | return -EFAULT; | ||
328 | } | ||
329 | |||
330 | bt = filp->private_data; | ||
331 | __trace_note_message(bt, "%s", msg); | ||
332 | kfree(msg); | ||
333 | |||
334 | return count; | ||
335 | } | ||
336 | |||
337 | static const struct file_operations blk_msg_fops = { | ||
338 | .owner = THIS_MODULE, | ||
339 | .open = blk_msg_open, | ||
340 | .write = blk_msg_write, | ||
341 | }; | ||
342 | |||
343 | /* | ||
344 | * Keep track of how many times we encountered a full subbuffer, to aid | ||
345 | * the user space app in telling how many lost events there were. | ||
346 | */ | ||
347 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | ||
348 | void *prev_subbuf, size_t prev_padding) | ||
349 | { | ||
350 | struct blk_trace *bt; | ||
351 | |||
352 | if (!relay_buf_full(buf)) | ||
353 | return 1; | ||
354 | |||
355 | bt = buf->chan->private_data; | ||
356 | atomic_inc(&bt->dropped); | ||
357 | return 0; | ||
358 | } | ||
359 | |||
360 | static int blk_remove_buf_file_callback(struct dentry *dentry) | ||
361 | { | ||
362 | struct dentry *parent = dentry->d_parent; | ||
363 | debugfs_remove(dentry); | ||
364 | |||
365 | /* | ||
366 | * this will fail for all but the last file, but that is ok. what we | ||
367 | * care about is the top level buts->name directory going away, when | ||
368 | * the last trace file is gone. Then we don't have to rmdir() that | ||
369 | * manually on trace stop, so it nicely solves the issue with | ||
370 | * force killing of running traces. | ||
371 | */ | ||
372 | |||
373 | debugfs_remove(parent); | ||
374 | return 0; | ||
375 | } | ||
376 | |||
377 | static struct dentry *blk_create_buf_file_callback(const char *filename, | ||
378 | struct dentry *parent, | ||
379 | int mode, | ||
380 | struct rchan_buf *buf, | ||
381 | int *is_global) | ||
382 | { | ||
383 | return debugfs_create_file(filename, mode, parent, buf, | ||
384 | &relay_file_operations); | ||
385 | } | ||
386 | |||
387 | static struct rchan_callbacks blk_relay_callbacks = { | ||
388 | .subbuf_start = blk_subbuf_start_callback, | ||
389 | .create_buf_file = blk_create_buf_file_callback, | ||
390 | .remove_buf_file = blk_remove_buf_file_callback, | ||
391 | }; | ||
392 | |||
393 | /* | ||
394 | * Setup everything required to start tracing | ||
395 | */ | ||
396 | int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | ||
397 | struct blk_user_trace_setup *buts) | ||
398 | { | ||
399 | struct blk_trace *old_bt, *bt = NULL; | ||
400 | struct dentry *dir = NULL; | ||
401 | int ret, i; | ||
402 | |||
403 | if (!buts->buf_size || !buts->buf_nr) | ||
404 | return -EINVAL; | ||
405 | |||
406 | strncpy(buts->name, name, BLKTRACE_BDEV_SIZE); | ||
407 | buts->name[BLKTRACE_BDEV_SIZE - 1] = '\0'; | ||
408 | |||
409 | /* | ||
410 | * some device names have larger paths - convert the slashes | ||
411 | * to underscores for this to work as expected | ||
412 | */ | ||
413 | for (i = 0; i < strlen(buts->name); i++) | ||
414 | if (buts->name[i] == '/') | ||
415 | buts->name[i] = '_'; | ||
416 | |||
417 | ret = -ENOMEM; | ||
418 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | ||
419 | if (!bt) | ||
420 | goto err; | ||
421 | |||
422 | bt->sequence = alloc_percpu(unsigned long); | ||
423 | if (!bt->sequence) | ||
424 | goto err; | ||
425 | |||
426 | bt->msg_data = __alloc_percpu(BLK_TN_MAX_MSG, __alignof__(char)); | ||
427 | if (!bt->msg_data) | ||
428 | goto err; | ||
429 | |||
430 | ret = -ENOENT; | ||
431 | |||
432 | if (!blk_tree_root) { | ||
433 | blk_tree_root = debugfs_create_dir("block", NULL); | ||
434 | if (!blk_tree_root) | ||
435 | return -ENOMEM; | ||
436 | } | ||
437 | |||
438 | dir = debugfs_create_dir(buts->name, blk_tree_root); | ||
439 | |||
440 | if (!dir) | ||
441 | goto err; | ||
442 | |||
443 | bt->dir = dir; | ||
444 | bt->dev = dev; | ||
445 | atomic_set(&bt->dropped, 0); | ||
446 | |||
447 | ret = -EIO; | ||
448 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, | ||
449 | &blk_dropped_fops); | ||
450 | if (!bt->dropped_file) | ||
451 | goto err; | ||
452 | |||
453 | bt->msg_file = debugfs_create_file("msg", 0222, dir, bt, &blk_msg_fops); | ||
454 | if (!bt->msg_file) | ||
455 | goto err; | ||
456 | |||
457 | bt->rchan = relay_open("trace", dir, buts->buf_size, | ||
458 | buts->buf_nr, &blk_relay_callbacks, bt); | ||
459 | if (!bt->rchan) | ||
460 | goto err; | ||
461 | |||
462 | bt->act_mask = buts->act_mask; | ||
463 | if (!bt->act_mask) | ||
464 | bt->act_mask = (u16) -1; | ||
465 | |||
466 | bt->start_lba = buts->start_lba; | ||
467 | bt->end_lba = buts->end_lba; | ||
468 | if (!bt->end_lba) | ||
469 | bt->end_lba = -1ULL; | ||
470 | |||
471 | bt->pid = buts->pid; | ||
472 | bt->trace_state = Blktrace_setup; | ||
473 | |||
474 | mutex_lock(&blk_probe_mutex); | ||
475 | if (atomic_add_return(1, &blk_probes_ref) == 1) { | ||
476 | ret = blk_register_tracepoints(); | ||
477 | if (ret) | ||
478 | goto probe_err; | ||
479 | } | ||
480 | mutex_unlock(&blk_probe_mutex); | ||
481 | |||
482 | ret = -EBUSY; | ||
483 | old_bt = xchg(&q->blk_trace, bt); | ||
484 | if (old_bt) { | ||
485 | (void) xchg(&q->blk_trace, old_bt); | ||
486 | goto err; | ||
487 | } | ||
488 | |||
489 | return 0; | ||
490 | probe_err: | ||
491 | atomic_dec(&blk_probes_ref); | ||
492 | mutex_unlock(&blk_probe_mutex); | ||
493 | err: | ||
494 | if (bt) { | ||
495 | if (bt->msg_file) | ||
496 | debugfs_remove(bt->msg_file); | ||
497 | if (bt->dropped_file) | ||
498 | debugfs_remove(bt->dropped_file); | ||
499 | free_percpu(bt->sequence); | ||
500 | free_percpu(bt->msg_data); | ||
501 | if (bt->rchan) | ||
502 | relay_close(bt->rchan); | ||
503 | kfree(bt); | ||
504 | } | ||
505 | return ret; | ||
506 | } | ||
507 | |||
508 | int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | ||
509 | char __user *arg) | ||
510 | { | ||
511 | struct blk_user_trace_setup buts; | ||
512 | int ret; | ||
513 | |||
514 | ret = copy_from_user(&buts, arg, sizeof(buts)); | ||
515 | if (ret) | ||
516 | return -EFAULT; | ||
517 | |||
518 | ret = do_blk_trace_setup(q, name, dev, &buts); | ||
519 | if (ret) | ||
520 | return ret; | ||
521 | |||
522 | if (copy_to_user(arg, &buts, sizeof(buts))) | ||
523 | return -EFAULT; | ||
524 | |||
525 | return 0; | ||
526 | } | ||
527 | EXPORT_SYMBOL_GPL(blk_trace_setup); | ||
528 | |||
529 | int blk_trace_startstop(struct request_queue *q, int start) | ||
530 | { | ||
531 | int ret; | ||
532 | struct blk_trace *bt = q->blk_trace; | ||
533 | |||
534 | if (bt == NULL) | ||
535 | return -EINVAL; | ||
536 | |||
537 | /* | ||
538 | * For starting a trace, we can transition from a setup or stopped | ||
539 | * trace. For stopping a trace, the state must be running | ||
540 | */ | ||
541 | ret = -EINVAL; | ||
542 | if (start) { | ||
543 | if (bt->trace_state == Blktrace_setup || | ||
544 | bt->trace_state == Blktrace_stopped) { | ||
545 | blktrace_seq++; | ||
546 | smp_mb(); | ||
547 | bt->trace_state = Blktrace_running; | ||
548 | |||
549 | trace_note_time(bt); | ||
550 | ret = 0; | ||
551 | } | ||
552 | } else { | ||
553 | if (bt->trace_state == Blktrace_running) { | ||
554 | bt->trace_state = Blktrace_stopped; | ||
555 | relay_flush(bt->rchan); | ||
556 | ret = 0; | ||
557 | } | ||
558 | } | ||
559 | |||
560 | return ret; | ||
561 | } | ||
562 | EXPORT_SYMBOL_GPL(blk_trace_startstop); | ||
563 | |||
564 | /** | ||
565 | * blk_trace_ioctl: - handle the ioctls associated with tracing | ||
566 | * @bdev: the block device | ||
567 | * @cmd: the ioctl cmd | ||
568 | * @arg: the argument data, if any | ||
569 | * | ||
570 | **/ | ||
571 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | ||
572 | { | ||
573 | struct request_queue *q; | ||
574 | int ret, start = 0; | ||
575 | char b[BDEVNAME_SIZE]; | ||
576 | |||
577 | q = bdev_get_queue(bdev); | ||
578 | if (!q) | ||
579 | return -ENXIO; | ||
580 | |||
581 | mutex_lock(&bdev->bd_mutex); | ||
582 | |||
583 | switch (cmd) { | ||
584 | case BLKTRACESETUP: | ||
585 | bdevname(bdev, b); | ||
586 | ret = blk_trace_setup(q, b, bdev->bd_dev, arg); | ||
587 | break; | ||
588 | case BLKTRACESTART: | ||
589 | start = 1; | ||
590 | case BLKTRACESTOP: | ||
591 | ret = blk_trace_startstop(q, start); | ||
592 | break; | ||
593 | case BLKTRACETEARDOWN: | ||
594 | ret = blk_trace_remove(q); | ||
595 | break; | ||
596 | default: | ||
597 | ret = -ENOTTY; | ||
598 | break; | ||
599 | } | ||
600 | |||
601 | mutex_unlock(&bdev->bd_mutex); | ||
602 | return ret; | ||
603 | } | ||
604 | |||
605 | /** | ||
606 | * blk_trace_shutdown: - stop and cleanup trace structures | ||
607 | * @q: the request queue associated with the device | ||
608 | * | ||
609 | **/ | ||
610 | void blk_trace_shutdown(struct request_queue *q) | ||
611 | { | ||
612 | if (q->blk_trace) { | ||
613 | blk_trace_startstop(q, 0); | ||
614 | blk_trace_remove(q); | ||
615 | } | ||
616 | } | ||
617 | |||
618 | /* | ||
619 | * blktrace probes | ||
620 | */ | ||
621 | |||
622 | /** | ||
623 | * blk_add_trace_rq - Add a trace for a request oriented action | ||
624 | * @q: queue the io is for | ||
625 | * @rq: the source request | ||
626 | * @what: the action | ||
627 | * | ||
628 | * Description: | ||
629 | * Records an action against a request. Will log the bio offset + size. | ||
630 | * | ||
631 | **/ | ||
632 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | ||
633 | u32 what) | ||
634 | { | ||
635 | struct blk_trace *bt = q->blk_trace; | ||
636 | int rw = rq->cmd_flags & 0x03; | ||
637 | |||
638 | if (likely(!bt)) | ||
639 | return; | ||
640 | |||
641 | if (blk_discard_rq(rq)) | ||
642 | rw |= (1 << BIO_RW_DISCARD); | ||
643 | |||
644 | if (blk_pc_request(rq)) { | ||
645 | what |= BLK_TC_ACT(BLK_TC_PC); | ||
646 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, | ||
647 | sizeof(rq->cmd), rq->cmd); | ||
648 | } else { | ||
649 | what |= BLK_TC_ACT(BLK_TC_FS); | ||
650 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | ||
651 | rw, what, rq->errors, 0, NULL); | ||
652 | } | ||
653 | } | ||
654 | |||
655 | static void blk_add_trace_rq_abort(struct request_queue *q, struct request *rq) | ||
656 | { | ||
657 | blk_add_trace_rq(q, rq, BLK_TA_ABORT); | ||
658 | } | ||
659 | |||
660 | static void blk_add_trace_rq_insert(struct request_queue *q, struct request *rq) | ||
661 | { | ||
662 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | ||
663 | } | ||
664 | |||
665 | static void blk_add_trace_rq_issue(struct request_queue *q, struct request *rq) | ||
666 | { | ||
667 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | ||
668 | } | ||
669 | |||
670 | static void blk_add_trace_rq_requeue(struct request_queue *q, | ||
671 | struct request *rq) | ||
672 | { | ||
673 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
674 | } | ||
675 | |||
676 | static void blk_add_trace_rq_complete(struct request_queue *q, | ||
677 | struct request *rq) | ||
678 | { | ||
679 | blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); | ||
680 | } | ||
681 | |||
682 | /** | ||
683 | * blk_add_trace_bio - Add a trace for a bio oriented action | ||
684 | * @q: queue the io is for | ||
685 | * @bio: the source bio | ||
686 | * @what: the action | ||
687 | * | ||
688 | * Description: | ||
689 | * Records an action against a bio. Will log the bio offset + size. | ||
690 | * | ||
691 | **/ | ||
692 | static void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | ||
693 | u32 what) | ||
694 | { | ||
695 | struct blk_trace *bt = q->blk_trace; | ||
696 | |||
697 | if (likely(!bt)) | ||
698 | return; | ||
699 | |||
700 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, | ||
701 | !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | ||
702 | } | ||
703 | |||
704 | static void blk_add_trace_bio_bounce(struct request_queue *q, struct bio *bio) | ||
705 | { | ||
706 | blk_add_trace_bio(q, bio, BLK_TA_BOUNCE); | ||
707 | } | ||
708 | |||
709 | static void blk_add_trace_bio_complete(struct request_queue *q, struct bio *bio) | ||
710 | { | ||
711 | blk_add_trace_bio(q, bio, BLK_TA_COMPLETE); | ||
712 | } | ||
713 | |||
714 | static void blk_add_trace_bio_backmerge(struct request_queue *q, | ||
715 | struct bio *bio) | ||
716 | { | ||
717 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
718 | } | ||
719 | |||
720 | static void blk_add_trace_bio_frontmerge(struct request_queue *q, | ||
721 | struct bio *bio) | ||
722 | { | ||
723 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
724 | } | ||
725 | |||
726 | static void blk_add_trace_bio_queue(struct request_queue *q, struct bio *bio) | ||
727 | { | ||
728 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
729 | } | ||
730 | |||
731 | static void blk_add_trace_getrq(struct request_queue *q, | ||
732 | struct bio *bio, int rw) | ||
733 | { | ||
734 | if (bio) | ||
735 | blk_add_trace_bio(q, bio, BLK_TA_GETRQ); | ||
736 | else { | ||
737 | struct blk_trace *bt = q->blk_trace; | ||
738 | |||
739 | if (bt) | ||
740 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_GETRQ, 0, 0, NULL); | ||
741 | } | ||
742 | } | ||
743 | |||
744 | |||
745 | static void blk_add_trace_sleeprq(struct request_queue *q, | ||
746 | struct bio *bio, int rw) | ||
747 | { | ||
748 | if (bio) | ||
749 | blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ); | ||
750 | else { | ||
751 | struct blk_trace *bt = q->blk_trace; | ||
752 | |||
753 | if (bt) | ||
754 | __blk_add_trace(bt, 0, 0, rw, BLK_TA_SLEEPRQ, | ||
755 | 0, 0, NULL); | ||
756 | } | ||
757 | } | ||
758 | |||
759 | static void blk_add_trace_plug(struct request_queue *q) | ||
760 | { | ||
761 | struct blk_trace *bt = q->blk_trace; | ||
762 | |||
763 | if (bt) | ||
764 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | ||
765 | } | ||
766 | |||
767 | static void blk_add_trace_unplug_io(struct request_queue *q) | ||
768 | { | ||
769 | struct blk_trace *bt = q->blk_trace; | ||
770 | |||
771 | if (bt) { | ||
772 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
773 | __be64 rpdu = cpu_to_be64(pdu); | ||
774 | |||
775 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | ||
776 | sizeof(rpdu), &rpdu); | ||
777 | } | ||
778 | } | ||
779 | |||
780 | static void blk_add_trace_unplug_timer(struct request_queue *q) | ||
781 | { | ||
782 | struct blk_trace *bt = q->blk_trace; | ||
783 | |||
784 | if (bt) { | ||
785 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
786 | __be64 rpdu = cpu_to_be64(pdu); | ||
787 | |||
788 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | ||
789 | sizeof(rpdu), &rpdu); | ||
790 | } | ||
791 | } | ||
792 | |||
793 | static void blk_add_trace_split(struct request_queue *q, struct bio *bio, | ||
794 | unsigned int pdu) | ||
795 | { | ||
796 | struct blk_trace *bt = q->blk_trace; | ||
797 | |||
798 | if (bt) { | ||
799 | __be64 rpdu = cpu_to_be64(pdu); | ||
800 | |||
801 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, | ||
802 | BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), | ||
803 | sizeof(rpdu), &rpdu); | ||
804 | } | ||
805 | } | ||
806 | |||
807 | /** | ||
808 | * blk_add_trace_remap - Add a trace for a remap operation | ||
809 | * @q: queue the io is for | ||
810 | * @bio: the source bio | ||
811 | * @dev: target device | ||
812 | * @from: source sector | ||
813 | * @to: target sector | ||
814 | * | ||
815 | * Description: | ||
816 | * Device mapper or raid target sometimes need to split a bio because | ||
817 | * it spans a stripe (or similar). Add a trace for that action. | ||
818 | * | ||
819 | **/ | ||
820 | static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | ||
821 | dev_t dev, sector_t from, sector_t to) | ||
822 | { | ||
823 | struct blk_trace *bt = q->blk_trace; | ||
824 | struct blk_io_trace_remap r; | ||
825 | |||
826 | if (likely(!bt)) | ||
827 | return; | ||
828 | |||
829 | r.device = cpu_to_be32(dev); | ||
830 | r.device_from = cpu_to_be32(bio->bi_bdev->bd_dev); | ||
831 | r.sector = cpu_to_be64(to); | ||
832 | |||
833 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, | ||
834 | !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | ||
835 | } | ||
836 | |||
837 | /** | ||
838 | * blk_add_driver_data - Add binary message with driver-specific data | ||
839 | * @q: queue the io is for | ||
840 | * @rq: io request | ||
841 | * @data: driver-specific data | ||
842 | * @len: length of driver-specific data | ||
843 | * | ||
844 | * Description: | ||
845 | * Some drivers might want to write driver-specific data per request. | ||
846 | * | ||
847 | **/ | ||
848 | void blk_add_driver_data(struct request_queue *q, | ||
849 | struct request *rq, | ||
850 | void *data, size_t len) | ||
851 | { | ||
852 | struct blk_trace *bt = q->blk_trace; | ||
853 | |||
854 | if (likely(!bt)) | ||
855 | return; | ||
856 | |||
857 | if (blk_pc_request(rq)) | ||
858 | __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, | ||
859 | rq->errors, len, data); | ||
860 | else | ||
861 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, | ||
862 | 0, BLK_TA_DRV_DATA, rq->errors, len, data); | ||
863 | } | ||
864 | EXPORT_SYMBOL_GPL(blk_add_driver_data); | ||
865 | |||
866 | static int blk_register_tracepoints(void) | ||
867 | { | ||
868 | int ret; | ||
869 | |||
870 | ret = register_trace_block_rq_abort(blk_add_trace_rq_abort); | ||
871 | WARN_ON(ret); | ||
872 | ret = register_trace_block_rq_insert(blk_add_trace_rq_insert); | ||
873 | WARN_ON(ret); | ||
874 | ret = register_trace_block_rq_issue(blk_add_trace_rq_issue); | ||
875 | WARN_ON(ret); | ||
876 | ret = register_trace_block_rq_requeue(blk_add_trace_rq_requeue); | ||
877 | WARN_ON(ret); | ||
878 | ret = register_trace_block_rq_complete(blk_add_trace_rq_complete); | ||
879 | WARN_ON(ret); | ||
880 | ret = register_trace_block_bio_bounce(blk_add_trace_bio_bounce); | ||
881 | WARN_ON(ret); | ||
882 | ret = register_trace_block_bio_complete(blk_add_trace_bio_complete); | ||
883 | WARN_ON(ret); | ||
884 | ret = register_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | ||
885 | WARN_ON(ret); | ||
886 | ret = register_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | ||
887 | WARN_ON(ret); | ||
888 | ret = register_trace_block_bio_queue(blk_add_trace_bio_queue); | ||
889 | WARN_ON(ret); | ||
890 | ret = register_trace_block_getrq(blk_add_trace_getrq); | ||
891 | WARN_ON(ret); | ||
892 | ret = register_trace_block_sleeprq(blk_add_trace_sleeprq); | ||
893 | WARN_ON(ret); | ||
894 | ret = register_trace_block_plug(blk_add_trace_plug); | ||
895 | WARN_ON(ret); | ||
896 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer); | ||
897 | WARN_ON(ret); | ||
898 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io); | ||
899 | WARN_ON(ret); | ||
900 | ret = register_trace_block_split(blk_add_trace_split); | ||
901 | WARN_ON(ret); | ||
902 | ret = register_trace_block_remap(blk_add_trace_remap); | ||
903 | WARN_ON(ret); | ||
904 | return 0; | ||
905 | } | ||
906 | |||
907 | static void blk_unregister_tracepoints(void) | ||
908 | { | ||
909 | unregister_trace_block_remap(blk_add_trace_remap); | ||
910 | unregister_trace_block_split(blk_add_trace_split); | ||
911 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | ||
912 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer); | ||
913 | unregister_trace_block_plug(blk_add_trace_plug); | ||
914 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq); | ||
915 | unregister_trace_block_getrq(blk_add_trace_getrq); | ||
916 | unregister_trace_block_bio_queue(blk_add_trace_bio_queue); | ||
917 | unregister_trace_block_bio_frontmerge(blk_add_trace_bio_frontmerge); | ||
918 | unregister_trace_block_bio_backmerge(blk_add_trace_bio_backmerge); | ||
919 | unregister_trace_block_bio_complete(blk_add_trace_bio_complete); | ||
920 | unregister_trace_block_bio_bounce(blk_add_trace_bio_bounce); | ||
921 | unregister_trace_block_rq_complete(blk_add_trace_rq_complete); | ||
922 | unregister_trace_block_rq_requeue(blk_add_trace_rq_requeue); | ||
923 | unregister_trace_block_rq_issue(blk_add_trace_rq_issue); | ||
924 | unregister_trace_block_rq_insert(blk_add_trace_rq_insert); | ||
925 | unregister_trace_block_rq_abort(blk_add_trace_rq_abort); | ||
926 | |||
927 | tracepoint_synchronize_unregister(); | ||
928 | } | ||
929 | |||
930 | /* | ||
931 | * struct blk_io_tracer formatting routines | ||
932 | */ | ||
933 | |||
934 | static void fill_rwbs(char *rwbs, const struct blk_io_trace *t) | ||
935 | { | ||
936 | int i = 0; | ||
937 | |||
938 | if (t->action & BLK_TC_DISCARD) | ||
939 | rwbs[i++] = 'D'; | ||
940 | else if (t->action & BLK_TC_WRITE) | ||
941 | rwbs[i++] = 'W'; | ||
942 | else if (t->bytes) | ||
943 | rwbs[i++] = 'R'; | ||
944 | else | ||
945 | rwbs[i++] = 'N'; | ||
946 | |||
947 | if (t->action & BLK_TC_AHEAD) | ||
948 | rwbs[i++] = 'A'; | ||
949 | if (t->action & BLK_TC_BARRIER) | ||
950 | rwbs[i++] = 'B'; | ||
951 | if (t->action & BLK_TC_SYNC) | ||
952 | rwbs[i++] = 'S'; | ||
953 | if (t->action & BLK_TC_META) | ||
954 | rwbs[i++] = 'M'; | ||
955 | |||
956 | rwbs[i] = '\0'; | ||
957 | } | ||
958 | |||
959 | static inline | ||
960 | const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent) | ||
961 | { | ||
962 | return (const struct blk_io_trace *)ent; | ||
963 | } | ||
964 | |||
965 | static inline const void *pdu_start(const struct trace_entry *ent) | ||
966 | { | ||
967 | return te_blk_io_trace(ent) + 1; | ||
968 | } | ||
969 | |||
970 | static inline u32 t_sec(const struct trace_entry *ent) | ||
971 | { | ||
972 | return te_blk_io_trace(ent)->bytes >> 9; | ||
973 | } | ||
974 | |||
975 | static inline unsigned long long t_sector(const struct trace_entry *ent) | ||
976 | { | ||
977 | return te_blk_io_trace(ent)->sector; | ||
978 | } | ||
979 | |||
980 | static inline __u16 t_error(const struct trace_entry *ent) | ||
981 | { | ||
982 | return te_blk_io_trace(ent)->sector; | ||
983 | } | ||
984 | |||
985 | static __u64 get_pdu_int(const struct trace_entry *ent) | ||
986 | { | ||
987 | const __u64 *val = pdu_start(ent); | ||
988 | return be64_to_cpu(*val); | ||
989 | } | ||
990 | |||
991 | static void get_pdu_remap(const struct trace_entry *ent, | ||
992 | struct blk_io_trace_remap *r) | ||
993 | { | ||
994 | const struct blk_io_trace_remap *__r = pdu_start(ent); | ||
995 | __u64 sector = __r->sector; | ||
996 | |||
997 | r->device = be32_to_cpu(__r->device); | ||
998 | r->device_from = be32_to_cpu(__r->device_from); | ||
999 | r->sector = be64_to_cpu(sector); | ||
1000 | } | ||
1001 | |||
1002 | static int blk_log_action_iter(struct trace_iterator *iter, const char *act) | ||
1003 | { | ||
1004 | char rwbs[6]; | ||
1005 | unsigned long long ts = ns2usecs(iter->ts); | ||
1006 | unsigned long usec_rem = do_div(ts, USEC_PER_SEC); | ||
1007 | unsigned secs = (unsigned long)ts; | ||
1008 | const struct trace_entry *ent = iter->ent; | ||
1009 | const struct blk_io_trace *t = (const struct blk_io_trace *)ent; | ||
1010 | |||
1011 | fill_rwbs(rwbs, t); | ||
1012 | |||
1013 | return trace_seq_printf(&iter->seq, | ||
1014 | "%3d,%-3d %2d %5d.%06lu %5u %2s %3s ", | ||
1015 | MAJOR(t->device), MINOR(t->device), iter->cpu, | ||
1016 | secs, usec_rem, ent->pid, act, rwbs); | ||
1017 | } | ||
1018 | |||
1019 | static int blk_log_action_seq(struct trace_seq *s, const struct blk_io_trace *t, | ||
1020 | const char *act) | ||
1021 | { | ||
1022 | char rwbs[6]; | ||
1023 | fill_rwbs(rwbs, t); | ||
1024 | return trace_seq_printf(s, "%3d,%-3d %2s %3s ", | ||
1025 | MAJOR(t->device), MINOR(t->device), act, rwbs); | ||
1026 | } | ||
1027 | |||
1028 | static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) | ||
1029 | { | ||
1030 | const char *cmd = trace_find_cmdline(ent->pid); | ||
1031 | |||
1032 | if (t_sec(ent)) | ||
1033 | return trace_seq_printf(s, "%llu + %u [%s]\n", | ||
1034 | t_sector(ent), t_sec(ent), cmd); | ||
1035 | return trace_seq_printf(s, "[%s]\n", cmd); | ||
1036 | } | ||
1037 | |||
1038 | static int blk_log_with_error(struct trace_seq *s, | ||
1039 | const struct trace_entry *ent) | ||
1040 | { | ||
1041 | if (t_sec(ent)) | ||
1042 | return trace_seq_printf(s, "%llu + %u [%d]\n", t_sector(ent), | ||
1043 | t_sec(ent), t_error(ent)); | ||
1044 | return trace_seq_printf(s, "%llu [%d]\n", t_sector(ent), t_error(ent)); | ||
1045 | } | ||
1046 | |||
1047 | static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) | ||
1048 | { | ||
1049 | struct blk_io_trace_remap r = { .device = 0, }; | ||
1050 | |||
1051 | get_pdu_remap(ent, &r); | ||
1052 | return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", | ||
1053 | t_sector(ent), | ||
1054 | t_sec(ent), MAJOR(r.device), MINOR(r.device), | ||
1055 | (unsigned long long)r.sector); | ||
1056 | } | ||
1057 | |||
1058 | static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) | ||
1059 | { | ||
1060 | return trace_seq_printf(s, "[%s]\n", trace_find_cmdline(ent->pid)); | ||
1061 | } | ||
1062 | |||
1063 | static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) | ||
1064 | { | ||
1065 | return trace_seq_printf(s, "[%s] %llu\n", trace_find_cmdline(ent->pid), | ||
1066 | get_pdu_int(ent)); | ||
1067 | } | ||
1068 | |||
1069 | static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) | ||
1070 | { | ||
1071 | return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), | ||
1072 | get_pdu_int(ent), trace_find_cmdline(ent->pid)); | ||
1073 | } | ||
1074 | |||
1075 | /* | ||
1076 | * struct tracer operations | ||
1077 | */ | ||
1078 | |||
1079 | static void blk_tracer_print_header(struct seq_file *m) | ||
1080 | { | ||
1081 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | ||
1082 | return; | ||
1083 | seq_puts(m, "# DEV CPU TIMESTAMP PID ACT FLG\n" | ||
1084 | "# | | | | | |\n"); | ||
1085 | } | ||
1086 | |||
1087 | static void blk_tracer_start(struct trace_array *tr) | ||
1088 | { | ||
1089 | mutex_lock(&blk_probe_mutex); | ||
1090 | if (atomic_add_return(1, &blk_probes_ref) == 1) | ||
1091 | if (blk_register_tracepoints()) | ||
1092 | atomic_dec(&blk_probes_ref); | ||
1093 | mutex_unlock(&blk_probe_mutex); | ||
1094 | trace_flags &= ~TRACE_ITER_CONTEXT_INFO; | ||
1095 | } | ||
1096 | |||
1097 | static int blk_tracer_init(struct trace_array *tr) | ||
1098 | { | ||
1099 | blk_tr = tr; | ||
1100 | blk_tracer_start(tr); | ||
1101 | mutex_lock(&blk_probe_mutex); | ||
1102 | blk_tracer_enabled++; | ||
1103 | mutex_unlock(&blk_probe_mutex); | ||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | static void blk_tracer_stop(struct trace_array *tr) | ||
1108 | { | ||
1109 | trace_flags |= TRACE_ITER_CONTEXT_INFO; | ||
1110 | mutex_lock(&blk_probe_mutex); | ||
1111 | if (atomic_dec_and_test(&blk_probes_ref)) | ||
1112 | blk_unregister_tracepoints(); | ||
1113 | mutex_unlock(&blk_probe_mutex); | ||
1114 | } | ||
1115 | |||
1116 | static void blk_tracer_reset(struct trace_array *tr) | ||
1117 | { | ||
1118 | if (!atomic_read(&blk_probes_ref)) | ||
1119 | return; | ||
1120 | |||
1121 | mutex_lock(&blk_probe_mutex); | ||
1122 | blk_tracer_enabled--; | ||
1123 | WARN_ON(blk_tracer_enabled < 0); | ||
1124 | mutex_unlock(&blk_probe_mutex); | ||
1125 | |||
1126 | blk_tracer_stop(tr); | ||
1127 | } | ||
1128 | |||
1129 | static struct { | ||
1130 | const char *act[2]; | ||
1131 | int (*print)(struct trace_seq *s, const struct trace_entry *ent); | ||
1132 | } what2act[] __read_mostly = { | ||
1133 | [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, | ||
1134 | [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, | ||
1135 | [__BLK_TA_FRONTMERGE] = {{ "F", "frontmerge" }, blk_log_generic }, | ||
1136 | [__BLK_TA_GETRQ] = {{ "G", "getrq" }, blk_log_generic }, | ||
1137 | [__BLK_TA_SLEEPRQ] = {{ "S", "sleeprq" }, blk_log_generic }, | ||
1138 | [__BLK_TA_REQUEUE] = {{ "R", "requeue" }, blk_log_with_error }, | ||
1139 | [__BLK_TA_ISSUE] = {{ "D", "issue" }, blk_log_generic }, | ||
1140 | [__BLK_TA_COMPLETE] = {{ "C", "complete" }, blk_log_with_error }, | ||
1141 | [__BLK_TA_PLUG] = {{ "P", "plug" }, blk_log_plug }, | ||
1142 | [__BLK_TA_UNPLUG_IO] = {{ "U", "unplug_io" }, blk_log_unplug }, | ||
1143 | [__BLK_TA_UNPLUG_TIMER] = {{ "UT", "unplug_timer" }, blk_log_unplug }, | ||
1144 | [__BLK_TA_INSERT] = {{ "I", "insert" }, blk_log_generic }, | ||
1145 | [__BLK_TA_SPLIT] = {{ "X", "split" }, blk_log_split }, | ||
1146 | [__BLK_TA_BOUNCE] = {{ "B", "bounce" }, blk_log_generic }, | ||
1147 | [__BLK_TA_REMAP] = {{ "A", "remap" }, blk_log_remap }, | ||
1148 | }; | ||
1149 | |||
1150 | static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, | ||
1151 | int flags) | ||
1152 | { | ||
1153 | struct trace_seq *s = &iter->seq; | ||
1154 | const struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | ||
1155 | const u16 what = t->action & ((1 << BLK_TC_SHIFT) - 1); | ||
1156 | int ret; | ||
1157 | |||
1158 | if (!trace_print_context(iter)) | ||
1159 | return TRACE_TYPE_PARTIAL_LINE; | ||
1160 | |||
1161 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) | ||
1162 | ret = trace_seq_printf(s, "Bad pc action %x\n", what); | ||
1163 | else { | ||
1164 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | ||
1165 | ret = blk_log_action_seq(s, t, what2act[what].act[long_act]); | ||
1166 | if (ret) | ||
1167 | ret = what2act[what].print(s, iter->ent); | ||
1168 | } | ||
1169 | |||
1170 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
1171 | } | ||
1172 | |||
1173 | static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) | ||
1174 | { | ||
1175 | struct trace_seq *s = &iter->seq; | ||
1176 | struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; | ||
1177 | const int offset = offsetof(struct blk_io_trace, sector); | ||
1178 | struct blk_io_trace old = { | ||
1179 | .magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION, | ||
1180 | .time = ns2usecs(iter->ts), | ||
1181 | }; | ||
1182 | |||
1183 | if (!trace_seq_putmem(s, &old, offset)) | ||
1184 | return 0; | ||
1185 | return trace_seq_putmem(s, &t->sector, | ||
1186 | sizeof(old) - offset + t->pdu_len); | ||
1187 | } | ||
1188 | |||
1189 | static enum print_line_t | ||
1190 | blk_trace_event_print_binary(struct trace_iterator *iter, int flags) | ||
1191 | { | ||
1192 | return blk_trace_synthesize_old_trace(iter) ? | ||
1193 | TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
1194 | } | ||
1195 | |||
1196 | static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) | ||
1197 | { | ||
1198 | const struct blk_io_trace *t; | ||
1199 | u16 what; | ||
1200 | int ret; | ||
1201 | |||
1202 | if (!(blk_tracer_flags.val & TRACE_BLK_OPT_CLASSIC)) | ||
1203 | return TRACE_TYPE_UNHANDLED; | ||
1204 | |||
1205 | t = (const struct blk_io_trace *)iter->ent; | ||
1206 | what = t->action & ((1 << BLK_TC_SHIFT) - 1); | ||
1207 | |||
1208 | if (unlikely(what == 0 || what > ARRAY_SIZE(what2act))) | ||
1209 | ret = trace_seq_printf(&iter->seq, "Bad pc action %x\n", what); | ||
1210 | else { | ||
1211 | const bool long_act = !!(trace_flags & TRACE_ITER_VERBOSE); | ||
1212 | ret = blk_log_action_iter(iter, what2act[what].act[long_act]); | ||
1213 | if (ret) | ||
1214 | ret = what2act[what].print(&iter->seq, iter->ent); | ||
1215 | } | ||
1216 | |||
1217 | return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; | ||
1218 | } | ||
1219 | |||
1220 | static struct tracer blk_tracer __read_mostly = { | ||
1221 | .name = "blk", | ||
1222 | .init = blk_tracer_init, | ||
1223 | .reset = blk_tracer_reset, | ||
1224 | .start = blk_tracer_start, | ||
1225 | .stop = blk_tracer_stop, | ||
1226 | .print_header = blk_tracer_print_header, | ||
1227 | .print_line = blk_tracer_print_line, | ||
1228 | .flags = &blk_tracer_flags, | ||
1229 | }; | ||
1230 | |||
1231 | static struct trace_event trace_blk_event = { | ||
1232 | .type = TRACE_BLK, | ||
1233 | .trace = blk_trace_event_print, | ||
1234 | .binary = blk_trace_event_print_binary, | ||
1235 | }; | ||
1236 | |||
1237 | static int __init init_blk_tracer(void) | ||
1238 | { | ||
1239 | if (!register_ftrace_event(&trace_blk_event)) { | ||
1240 | pr_warning("Warning: could not register block events\n"); | ||
1241 | return 1; | ||
1242 | } | ||
1243 | |||
1244 | if (register_tracer(&blk_tracer) != 0) { | ||
1245 | pr_warning("Warning: could not register the block tracer\n"); | ||
1246 | unregister_ftrace_event(&trace_blk_event); | ||
1247 | return 1; | ||
1248 | } | ||
1249 | |||
1250 | return 0; | ||
1251 | } | ||
1252 | |||
1253 | device_initcall(init_blk_tracer); | ||
1254 | |||
1255 | static int blk_trace_remove_queue(struct request_queue *q) | ||
1256 | { | ||
1257 | struct blk_trace *bt; | ||
1258 | |||
1259 | bt = xchg(&q->blk_trace, NULL); | ||
1260 | if (bt == NULL) | ||
1261 | return -EINVAL; | ||
1262 | |||
1263 | kfree(bt); | ||
1264 | return 0; | ||
1265 | } | ||
1266 | |||
1267 | /* | ||
1268 | * Setup everything required to start tracing | ||
1269 | */ | ||
1270 | static int blk_trace_setup_queue(struct request_queue *q, dev_t dev) | ||
1271 | { | ||
1272 | struct blk_trace *old_bt, *bt = NULL; | ||
1273 | int ret; | ||
1274 | |||
1275 | ret = -ENOMEM; | ||
1276 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | ||
1277 | if (!bt) | ||
1278 | goto err; | ||
1279 | |||
1280 | bt->dev = dev; | ||
1281 | bt->act_mask = (u16)-1; | ||
1282 | bt->end_lba = -1ULL; | ||
1283 | bt->trace_state = Blktrace_running; | ||
1284 | |||
1285 | old_bt = xchg(&q->blk_trace, bt); | ||
1286 | if (old_bt != NULL) { | ||
1287 | (void)xchg(&q->blk_trace, old_bt); | ||
1288 | kfree(bt); | ||
1289 | ret = -EBUSY; | ||
1290 | } | ||
1291 | return 0; | ||
1292 | err: | ||
1293 | return ret; | ||
1294 | } | ||
1295 | |||
1296 | /* | ||
1297 | * sysfs interface to enable and configure tracing | ||
1298 | */ | ||
1299 | |||
1300 | static ssize_t sysfs_blk_trace_enable_show(struct device *dev, | ||
1301 | struct device_attribute *attr, | ||
1302 | char *buf) | ||
1303 | { | ||
1304 | struct hd_struct *p = dev_to_part(dev); | ||
1305 | struct block_device *bdev; | ||
1306 | ssize_t ret = -ENXIO; | ||
1307 | |||
1308 | lock_kernel(); | ||
1309 | bdev = bdget(part_devt(p)); | ||
1310 | if (bdev != NULL) { | ||
1311 | struct request_queue *q = bdev_get_queue(bdev); | ||
1312 | |||
1313 | if (q != NULL) { | ||
1314 | mutex_lock(&bdev->bd_mutex); | ||
1315 | ret = sprintf(buf, "%u\n", !!q->blk_trace); | ||
1316 | mutex_unlock(&bdev->bd_mutex); | ||
1317 | } | ||
1318 | |||
1319 | bdput(bdev); | ||
1320 | } | ||
1321 | |||
1322 | unlock_kernel(); | ||
1323 | return ret; | ||
1324 | } | ||
1325 | |||
1326 | static ssize_t sysfs_blk_trace_enable_store(struct device *dev, | ||
1327 | struct device_attribute *attr, | ||
1328 | const char *buf, size_t count) | ||
1329 | { | ||
1330 | struct block_device *bdev; | ||
1331 | struct request_queue *q; | ||
1332 | struct hd_struct *p; | ||
1333 | int value; | ||
1334 | ssize_t ret = -ENXIO; | ||
1335 | |||
1336 | if (count == 0 || sscanf(buf, "%d", &value) != 1) | ||
1337 | goto out; | ||
1338 | |||
1339 | lock_kernel(); | ||
1340 | p = dev_to_part(dev); | ||
1341 | bdev = bdget(part_devt(p)); | ||
1342 | if (bdev == NULL) | ||
1343 | goto out_unlock_kernel; | ||
1344 | |||
1345 | q = bdev_get_queue(bdev); | ||
1346 | if (q == NULL) | ||
1347 | goto out_bdput; | ||
1348 | |||
1349 | mutex_lock(&bdev->bd_mutex); | ||
1350 | if (value) | ||
1351 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | ||
1352 | else | ||
1353 | ret = blk_trace_remove_queue(q); | ||
1354 | mutex_unlock(&bdev->bd_mutex); | ||
1355 | |||
1356 | if (ret == 0) | ||
1357 | ret = count; | ||
1358 | out_bdput: | ||
1359 | bdput(bdev); | ||
1360 | out_unlock_kernel: | ||
1361 | unlock_kernel(); | ||
1362 | out: | ||
1363 | return ret; | ||
1364 | } | ||
1365 | |||
1366 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | ||
1367 | struct device_attribute *attr, | ||
1368 | char *buf); | ||
1369 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | ||
1370 | struct device_attribute *attr, | ||
1371 | const char *buf, size_t count); | ||
1372 | #define BLK_TRACE_DEVICE_ATTR(_name) \ | ||
1373 | DEVICE_ATTR(_name, S_IRUGO | S_IWUSR, \ | ||
1374 | sysfs_blk_trace_attr_show, \ | ||
1375 | sysfs_blk_trace_attr_store) | ||
1376 | |||
1377 | static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR, | ||
1378 | sysfs_blk_trace_enable_show, sysfs_blk_trace_enable_store); | ||
1379 | static BLK_TRACE_DEVICE_ATTR(act_mask); | ||
1380 | static BLK_TRACE_DEVICE_ATTR(pid); | ||
1381 | static BLK_TRACE_DEVICE_ATTR(start_lba); | ||
1382 | static BLK_TRACE_DEVICE_ATTR(end_lba); | ||
1383 | |||
1384 | static struct attribute *blk_trace_attrs[] = { | ||
1385 | &dev_attr_enable.attr, | ||
1386 | &dev_attr_act_mask.attr, | ||
1387 | &dev_attr_pid.attr, | ||
1388 | &dev_attr_start_lba.attr, | ||
1389 | &dev_attr_end_lba.attr, | ||
1390 | NULL | ||
1391 | }; | ||
1392 | |||
1393 | struct attribute_group blk_trace_attr_group = { | ||
1394 | .name = "trace", | ||
1395 | .attrs = blk_trace_attrs, | ||
1396 | }; | ||
1397 | |||
1398 | static int blk_str2act_mask(const char *str) | ||
1399 | { | ||
1400 | int mask = 0; | ||
1401 | char *copy = kstrdup(str, GFP_KERNEL), *s; | ||
1402 | |||
1403 | if (copy == NULL) | ||
1404 | return -ENOMEM; | ||
1405 | |||
1406 | s = strstrip(copy); | ||
1407 | |||
1408 | while (1) { | ||
1409 | char *sep = strchr(s, ','); | ||
1410 | |||
1411 | if (sep != NULL) | ||
1412 | *sep = '\0'; | ||
1413 | |||
1414 | if (strcasecmp(s, "barrier") == 0) | ||
1415 | mask |= BLK_TC_BARRIER; | ||
1416 | else if (strcasecmp(s, "complete") == 0) | ||
1417 | mask |= BLK_TC_COMPLETE; | ||
1418 | else if (strcasecmp(s, "fs") == 0) | ||
1419 | mask |= BLK_TC_FS; | ||
1420 | else if (strcasecmp(s, "issue") == 0) | ||
1421 | mask |= BLK_TC_ISSUE; | ||
1422 | else if (strcasecmp(s, "pc") == 0) | ||
1423 | mask |= BLK_TC_PC; | ||
1424 | else if (strcasecmp(s, "queue") == 0) | ||
1425 | mask |= BLK_TC_QUEUE; | ||
1426 | else if (strcasecmp(s, "read") == 0) | ||
1427 | mask |= BLK_TC_READ; | ||
1428 | else if (strcasecmp(s, "requeue") == 0) | ||
1429 | mask |= BLK_TC_REQUEUE; | ||
1430 | else if (strcasecmp(s, "sync") == 0) | ||
1431 | mask |= BLK_TC_SYNC; | ||
1432 | else if (strcasecmp(s, "write") == 0) | ||
1433 | mask |= BLK_TC_WRITE; | ||
1434 | |||
1435 | if (sep == NULL) | ||
1436 | break; | ||
1437 | |||
1438 | s = sep + 1; | ||
1439 | } | ||
1440 | kfree(copy); | ||
1441 | |||
1442 | return mask; | ||
1443 | } | ||
1444 | |||
1445 | static ssize_t sysfs_blk_trace_attr_show(struct device *dev, | ||
1446 | struct device_attribute *attr, | ||
1447 | char *buf) | ||
1448 | { | ||
1449 | struct hd_struct *p = dev_to_part(dev); | ||
1450 | struct request_queue *q; | ||
1451 | struct block_device *bdev; | ||
1452 | ssize_t ret = -ENXIO; | ||
1453 | |||
1454 | lock_kernel(); | ||
1455 | bdev = bdget(part_devt(p)); | ||
1456 | if (bdev == NULL) | ||
1457 | goto out_unlock_kernel; | ||
1458 | |||
1459 | q = bdev_get_queue(bdev); | ||
1460 | if (q == NULL) | ||
1461 | goto out_bdput; | ||
1462 | mutex_lock(&bdev->bd_mutex); | ||
1463 | if (q->blk_trace == NULL) | ||
1464 | ret = sprintf(buf, "disabled\n"); | ||
1465 | else if (attr == &dev_attr_act_mask) | ||
1466 | ret = sprintf(buf, "%#x\n", q->blk_trace->act_mask); | ||
1467 | else if (attr == &dev_attr_pid) | ||
1468 | ret = sprintf(buf, "%u\n", q->blk_trace->pid); | ||
1469 | else if (attr == &dev_attr_start_lba) | ||
1470 | ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba); | ||
1471 | else if (attr == &dev_attr_end_lba) | ||
1472 | ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba); | ||
1473 | mutex_unlock(&bdev->bd_mutex); | ||
1474 | out_bdput: | ||
1475 | bdput(bdev); | ||
1476 | out_unlock_kernel: | ||
1477 | unlock_kernel(); | ||
1478 | return ret; | ||
1479 | } | ||
1480 | |||
1481 | static ssize_t sysfs_blk_trace_attr_store(struct device *dev, | ||
1482 | struct device_attribute *attr, | ||
1483 | const char *buf, size_t count) | ||
1484 | { | ||
1485 | struct block_device *bdev; | ||
1486 | struct request_queue *q; | ||
1487 | struct hd_struct *p; | ||
1488 | u64 value; | ||
1489 | ssize_t ret = -ENXIO; | ||
1490 | |||
1491 | if (count == 0) | ||
1492 | goto out; | ||
1493 | |||
1494 | if (attr == &dev_attr_act_mask) { | ||
1495 | if (sscanf(buf, "%llx", &value) != 1) { | ||
1496 | /* Assume it is a list of trace category names */ | ||
1497 | value = blk_str2act_mask(buf); | ||
1498 | if (value < 0) | ||
1499 | goto out; | ||
1500 | } | ||
1501 | } else if (sscanf(buf, "%llu", &value) != 1) | ||
1502 | goto out; | ||
1503 | |||
1504 | lock_kernel(); | ||
1505 | p = dev_to_part(dev); | ||
1506 | bdev = bdget(part_devt(p)); | ||
1507 | if (bdev == NULL) | ||
1508 | goto out_unlock_kernel; | ||
1509 | |||
1510 | q = bdev_get_queue(bdev); | ||
1511 | if (q == NULL) | ||
1512 | goto out_bdput; | ||
1513 | |||
1514 | mutex_lock(&bdev->bd_mutex); | ||
1515 | ret = 0; | ||
1516 | if (q->blk_trace == NULL) | ||
1517 | ret = blk_trace_setup_queue(q, bdev->bd_dev); | ||
1518 | |||
1519 | if (ret == 0) { | ||
1520 | if (attr == &dev_attr_act_mask) | ||
1521 | q->blk_trace->act_mask = value; | ||
1522 | else if (attr == &dev_attr_pid) | ||
1523 | q->blk_trace->pid = value; | ||
1524 | else if (attr == &dev_attr_start_lba) | ||
1525 | q->blk_trace->start_lba = value; | ||
1526 | else if (attr == &dev_attr_end_lba) | ||
1527 | q->blk_trace->end_lba = value; | ||
1528 | ret = count; | ||
1529 | } | ||
1530 | mutex_unlock(&bdev->bd_mutex); | ||
1531 | out_bdput: | ||
1532 | bdput(bdev); | ||
1533 | out_unlock_kernel: | ||
1534 | unlock_kernel(); | ||
1535 | out: | ||
1536 | return ret; | ||
1537 | } | ||
diff --git a/kernel/trace/events.c b/kernel/trace/events.c new file mode 100644 index 000000000000..f2509cbaacea --- /dev/null +++ b/kernel/trace/events.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * This is the place to register all trace points as events. | ||
3 | */ | ||
4 | |||
5 | /* someday this needs to go in a generic header */ | ||
6 | #define __STR(x) #x | ||
7 | #define STR(x) __STR(x) | ||
8 | |||
9 | #include <trace/trace_events.h> | ||
10 | |||
11 | #include "trace_output.h" | ||
12 | |||
13 | #include "trace_events_stage_1.h" | ||
14 | #include "trace_events_stage_2.h" | ||
15 | #include "trace_events_stage_3.h" | ||
16 | |||
17 | #include <trace/trace_event_types.h> | ||
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index fdf913dfc7e8..5a3a06b21eee 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/sysctl.h> | 27 | #include <linux/sysctl.h> |
28 | #include <linux/ctype.h> | 28 | #include <linux/ctype.h> |
29 | #include <linux/list.h> | 29 | #include <linux/list.h> |
30 | #include <linux/hash.h> | ||
30 | 31 | ||
31 | #include <asm/ftrace.h> | 32 | #include <asm/ftrace.h> |
32 | 33 | ||
@@ -44,14 +45,14 @@ | |||
44 | ftrace_kill(); \ | 45 | ftrace_kill(); \ |
45 | } while (0) | 46 | } while (0) |
46 | 47 | ||
48 | /* hash bits for specific function selection */ | ||
49 | #define FTRACE_HASH_BITS 7 | ||
50 | #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS) | ||
51 | |||
47 | /* ftrace_enabled is a method to turn ftrace on or off */ | 52 | /* ftrace_enabled is a method to turn ftrace on or off */ |
48 | int ftrace_enabled __read_mostly; | 53 | int ftrace_enabled __read_mostly; |
49 | static int last_ftrace_enabled; | 54 | static int last_ftrace_enabled; |
50 | 55 | ||
51 | /* set when tracing only a pid */ | ||
52 | struct pid *ftrace_pid_trace; | ||
53 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
54 | |||
55 | /* Quick disabling of function tracer. */ | 56 | /* Quick disabling of function tracer. */ |
56 | int function_trace_stop; | 57 | int function_trace_stop; |
57 | 58 | ||
@@ -61,9 +62,7 @@ int function_trace_stop; | |||
61 | */ | 62 | */ |
62 | static int ftrace_disabled __read_mostly; | 63 | static int ftrace_disabled __read_mostly; |
63 | 64 | ||
64 | static DEFINE_SPINLOCK(ftrace_lock); | 65 | static DEFINE_MUTEX(ftrace_lock); |
65 | static DEFINE_MUTEX(ftrace_sysctl_lock); | ||
66 | static DEFINE_MUTEX(ftrace_start_lock); | ||
67 | 66 | ||
68 | static struct ftrace_ops ftrace_list_end __read_mostly = | 67 | static struct ftrace_ops ftrace_list_end __read_mostly = |
69 | { | 68 | { |
@@ -134,9 +133,6 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip) | |||
134 | 133 | ||
135 | static int __register_ftrace_function(struct ftrace_ops *ops) | 134 | static int __register_ftrace_function(struct ftrace_ops *ops) |
136 | { | 135 | { |
137 | /* should not be called from interrupt context */ | ||
138 | spin_lock(&ftrace_lock); | ||
139 | |||
140 | ops->next = ftrace_list; | 136 | ops->next = ftrace_list; |
141 | /* | 137 | /* |
142 | * We are entering ops into the ftrace_list but another | 138 | * We are entering ops into the ftrace_list but another |
@@ -172,18 +168,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops) | |||
172 | #endif | 168 | #endif |
173 | } | 169 | } |
174 | 170 | ||
175 | spin_unlock(&ftrace_lock); | ||
176 | |||
177 | return 0; | 171 | return 0; |
178 | } | 172 | } |
179 | 173 | ||
180 | static int __unregister_ftrace_function(struct ftrace_ops *ops) | 174 | static int __unregister_ftrace_function(struct ftrace_ops *ops) |
181 | { | 175 | { |
182 | struct ftrace_ops **p; | 176 | struct ftrace_ops **p; |
183 | int ret = 0; | ||
184 | |||
185 | /* should not be called from interrupt context */ | ||
186 | spin_lock(&ftrace_lock); | ||
187 | 177 | ||
188 | /* | 178 | /* |
189 | * If we are removing the last function, then simply point | 179 | * If we are removing the last function, then simply point |
@@ -192,17 +182,15 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
192 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { | 182 | if (ftrace_list == ops && ops->next == &ftrace_list_end) { |
193 | ftrace_trace_function = ftrace_stub; | 183 | ftrace_trace_function = ftrace_stub; |
194 | ftrace_list = &ftrace_list_end; | 184 | ftrace_list = &ftrace_list_end; |
195 | goto out; | 185 | return 0; |
196 | } | 186 | } |
197 | 187 | ||
198 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) | 188 | for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next) |
199 | if (*p == ops) | 189 | if (*p == ops) |
200 | break; | 190 | break; |
201 | 191 | ||
202 | if (*p != ops) { | 192 | if (*p != ops) |
203 | ret = -1; | 193 | return -1; |
204 | goto out; | ||
205 | } | ||
206 | 194 | ||
207 | *p = (*p)->next; | 195 | *p = (*p)->next; |
208 | 196 | ||
@@ -223,18 +211,14 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops) | |||
223 | } | 211 | } |
224 | } | 212 | } |
225 | 213 | ||
226 | out: | 214 | return 0; |
227 | spin_unlock(&ftrace_lock); | ||
228 | |||
229 | return ret; | ||
230 | } | 215 | } |
231 | 216 | ||
232 | static void ftrace_update_pid_func(void) | 217 | static void ftrace_update_pid_func(void) |
233 | { | 218 | { |
234 | ftrace_func_t func; | 219 | ftrace_func_t func; |
235 | 220 | ||
236 | /* should not be called from interrupt context */ | 221 | mutex_lock(&ftrace_lock); |
237 | spin_lock(&ftrace_lock); | ||
238 | 222 | ||
239 | if (ftrace_trace_function == ftrace_stub) | 223 | if (ftrace_trace_function == ftrace_stub) |
240 | goto out; | 224 | goto out; |
@@ -256,21 +240,30 @@ static void ftrace_update_pid_func(void) | |||
256 | #endif | 240 | #endif |
257 | 241 | ||
258 | out: | 242 | out: |
259 | spin_unlock(&ftrace_lock); | 243 | mutex_unlock(&ftrace_lock); |
260 | } | 244 | } |
261 | 245 | ||
246 | /* set when tracing only a pid */ | ||
247 | struct pid *ftrace_pid_trace; | ||
248 | static struct pid * const ftrace_swapper_pid = &init_struct_pid; | ||
249 | |||
262 | #ifdef CONFIG_DYNAMIC_FTRACE | 250 | #ifdef CONFIG_DYNAMIC_FTRACE |
251 | |||
263 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD | 252 | #ifndef CONFIG_FTRACE_MCOUNT_RECORD |
264 | # error Dynamic ftrace depends on MCOUNT_RECORD | 253 | # error Dynamic ftrace depends on MCOUNT_RECORD |
265 | #endif | 254 | #endif |
266 | 255 | ||
267 | /* | 256 | static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly; |
268 | * Since MCOUNT_ADDR may point to mcount itself, we do not want | 257 | |
269 | * to get it confused by reading a reference in the code as we | 258 | struct ftrace_func_probe { |
270 | * are parsing on objcopy output of text. Use a variable for | 259 | struct hlist_node node; |
271 | * it instead. | 260 | struct ftrace_probe_ops *ops; |
272 | */ | 261 | unsigned long flags; |
273 | static unsigned long mcount_addr = MCOUNT_ADDR; | 262 | unsigned long ip; |
263 | void *data; | ||
264 | struct rcu_head rcu; | ||
265 | }; | ||
266 | |||
274 | 267 | ||
275 | enum { | 268 | enum { |
276 | FTRACE_ENABLE_CALLS = (1 << 0), | 269 | FTRACE_ENABLE_CALLS = (1 << 0), |
@@ -290,7 +283,7 @@ static DEFINE_MUTEX(ftrace_regex_lock); | |||
290 | 283 | ||
291 | struct ftrace_page { | 284 | struct ftrace_page { |
292 | struct ftrace_page *next; | 285 | struct ftrace_page *next; |
293 | unsigned long index; | 286 | int index; |
294 | struct dyn_ftrace records[]; | 287 | struct dyn_ftrace records[]; |
295 | }; | 288 | }; |
296 | 289 | ||
@@ -305,6 +298,19 @@ static struct ftrace_page *ftrace_pages; | |||
305 | 298 | ||
306 | static struct dyn_ftrace *ftrace_free_records; | 299 | static struct dyn_ftrace *ftrace_free_records; |
307 | 300 | ||
301 | /* | ||
302 | * This is a double for. Do not use 'break' to break out of the loop, | ||
303 | * you must use a goto. | ||
304 | */ | ||
305 | #define do_for_each_ftrace_rec(pg, rec) \ | ||
306 | for (pg = ftrace_pages_start; pg; pg = pg->next) { \ | ||
307 | int _____i; \ | ||
308 | for (_____i = 0; _____i < pg->index; _____i++) { \ | ||
309 | rec = &pg->records[_____i]; | ||
310 | |||
311 | #define while_for_each_ftrace_rec() \ | ||
312 | } \ | ||
313 | } | ||
308 | 314 | ||
309 | #ifdef CONFIG_KPROBES | 315 | #ifdef CONFIG_KPROBES |
310 | 316 | ||
@@ -349,23 +355,16 @@ void ftrace_release(void *start, unsigned long size) | |||
349 | struct ftrace_page *pg; | 355 | struct ftrace_page *pg; |
350 | unsigned long s = (unsigned long)start; | 356 | unsigned long s = (unsigned long)start; |
351 | unsigned long e = s + size; | 357 | unsigned long e = s + size; |
352 | int i; | ||
353 | 358 | ||
354 | if (ftrace_disabled || !start) | 359 | if (ftrace_disabled || !start) |
355 | return; | 360 | return; |
356 | 361 | ||
357 | /* should not be called from interrupt context */ | 362 | mutex_lock(&ftrace_lock); |
358 | spin_lock(&ftrace_lock); | 363 | do_for_each_ftrace_rec(pg, rec) { |
359 | 364 | if ((rec->ip >= s) && (rec->ip < e)) | |
360 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 365 | ftrace_free_rec(rec); |
361 | for (i = 0; i < pg->index; i++) { | 366 | } while_for_each_ftrace_rec(); |
362 | rec = &pg->records[i]; | 367 | mutex_unlock(&ftrace_lock); |
363 | |||
364 | if ((rec->ip >= s) && (rec->ip < e)) | ||
365 | ftrace_free_rec(rec); | ||
366 | } | ||
367 | } | ||
368 | spin_unlock(&ftrace_lock); | ||
369 | } | 368 | } |
370 | 369 | ||
371 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) | 370 | static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) |
@@ -461,10 +460,10 @@ static void ftrace_bug(int failed, unsigned long ip) | |||
461 | static int | 460 | static int |
462 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | 461 | __ftrace_replace_code(struct dyn_ftrace *rec, int enable) |
463 | { | 462 | { |
464 | unsigned long ip, fl; | ||
465 | unsigned long ftrace_addr; | 463 | unsigned long ftrace_addr; |
464 | unsigned long ip, fl; | ||
466 | 465 | ||
467 | ftrace_addr = (unsigned long)ftrace_caller; | 466 | ftrace_addr = (unsigned long)FTRACE_ADDR; |
468 | 467 | ||
469 | ip = rec->ip; | 468 | ip = rec->ip; |
470 | 469 | ||
@@ -473,7 +472,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
473 | * it is not enabled then do nothing. | 472 | * it is not enabled then do nothing. |
474 | * | 473 | * |
475 | * If this record is not to be traced and | 474 | * If this record is not to be traced and |
476 | * it is enabled then disabled it. | 475 | * it is enabled then disable it. |
477 | * | 476 | * |
478 | */ | 477 | */ |
479 | if (rec->flags & FTRACE_FL_NOTRACE) { | 478 | if (rec->flags & FTRACE_FL_NOTRACE) { |
@@ -493,7 +492,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
493 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) | 492 | if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) |
494 | return 0; | 493 | return 0; |
495 | 494 | ||
496 | /* Record is not filtered and is not enabled do nothing */ | 495 | /* Record is not filtered or enabled, do nothing */ |
497 | if (!fl) | 496 | if (!fl) |
498 | return 0; | 497 | return 0; |
499 | 498 | ||
@@ -515,7 +514,7 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
515 | 514 | ||
516 | } else { | 515 | } else { |
517 | 516 | ||
518 | /* if record is not enabled do nothing */ | 517 | /* if record is not enabled, do nothing */ |
519 | if (!(rec->flags & FTRACE_FL_ENABLED)) | 518 | if (!(rec->flags & FTRACE_FL_ENABLED)) |
520 | return 0; | 519 | return 0; |
521 | 520 | ||
@@ -531,41 +530,40 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) | |||
531 | 530 | ||
532 | static void ftrace_replace_code(int enable) | 531 | static void ftrace_replace_code(int enable) |
533 | { | 532 | { |
534 | int i, failed; | ||
535 | struct dyn_ftrace *rec; | 533 | struct dyn_ftrace *rec; |
536 | struct ftrace_page *pg; | 534 | struct ftrace_page *pg; |
535 | int failed; | ||
537 | 536 | ||
538 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 537 | do_for_each_ftrace_rec(pg, rec) { |
539 | for (i = 0; i < pg->index; i++) { | 538 | /* |
540 | rec = &pg->records[i]; | 539 | * Skip over free records and records that have |
541 | 540 | * failed. | |
542 | /* | 541 | */ |
543 | * Skip over free records and records that have | 542 | if (rec->flags & FTRACE_FL_FREE || |
544 | * failed. | 543 | rec->flags & FTRACE_FL_FAILED) |
545 | */ | 544 | continue; |
546 | if (rec->flags & FTRACE_FL_FREE || | ||
547 | rec->flags & FTRACE_FL_FAILED) | ||
548 | continue; | ||
549 | 545 | ||
550 | /* ignore updates to this record's mcount site */ | 546 | /* ignore updates to this record's mcount site */ |
551 | if (get_kprobe((void *)rec->ip)) { | 547 | if (get_kprobe((void *)rec->ip)) { |
552 | freeze_record(rec); | 548 | freeze_record(rec); |
553 | continue; | 549 | continue; |
554 | } else { | 550 | } else { |
555 | unfreeze_record(rec); | 551 | unfreeze_record(rec); |
556 | } | 552 | } |
557 | 553 | ||
558 | failed = __ftrace_replace_code(rec, enable); | 554 | failed = __ftrace_replace_code(rec, enable); |
559 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { | 555 | if (failed && (rec->flags & FTRACE_FL_CONVERTED)) { |
560 | rec->flags |= FTRACE_FL_FAILED; | 556 | rec->flags |= FTRACE_FL_FAILED; |
561 | if ((system_state == SYSTEM_BOOTING) || | 557 | if ((system_state == SYSTEM_BOOTING) || |
562 | !core_kernel_text(rec->ip)) { | 558 | !core_kernel_text(rec->ip)) { |
563 | ftrace_free_rec(rec); | 559 | ftrace_free_rec(rec); |
564 | } else | 560 | } else { |
565 | ftrace_bug(failed, rec->ip); | 561 | ftrace_bug(failed, rec->ip); |
566 | } | 562 | /* Stop processing */ |
563 | return; | ||
564 | } | ||
567 | } | 565 | } |
568 | } | 566 | } while_for_each_ftrace_rec(); |
569 | } | 567 | } |
570 | 568 | ||
571 | static int | 569 | static int |
@@ -576,7 +574,7 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
576 | 574 | ||
577 | ip = rec->ip; | 575 | ip = rec->ip; |
578 | 576 | ||
579 | ret = ftrace_make_nop(mod, rec, mcount_addr); | 577 | ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); |
580 | if (ret) { | 578 | if (ret) { |
581 | ftrace_bug(ret, ip); | 579 | ftrace_bug(ret, ip); |
582 | rec->flags |= FTRACE_FL_FAILED; | 580 | rec->flags |= FTRACE_FL_FAILED; |
@@ -585,6 +583,24 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) | |||
585 | return 1; | 583 | return 1; |
586 | } | 584 | } |
587 | 585 | ||
586 | /* | ||
587 | * archs can override this function if they must do something | ||
588 | * before the modifying code is performed. | ||
589 | */ | ||
590 | int __weak ftrace_arch_code_modify_prepare(void) | ||
591 | { | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * archs can override this function if they must do something | ||
597 | * after the modifying code is performed. | ||
598 | */ | ||
599 | int __weak ftrace_arch_code_modify_post_process(void) | ||
600 | { | ||
601 | return 0; | ||
602 | } | ||
603 | |||
588 | static int __ftrace_modify_code(void *data) | 604 | static int __ftrace_modify_code(void *data) |
589 | { | 605 | { |
590 | int *command = data; | 606 | int *command = data; |
@@ -607,7 +623,17 @@ static int __ftrace_modify_code(void *data) | |||
607 | 623 | ||
608 | static void ftrace_run_update_code(int command) | 624 | static void ftrace_run_update_code(int command) |
609 | { | 625 | { |
626 | int ret; | ||
627 | |||
628 | ret = ftrace_arch_code_modify_prepare(); | ||
629 | FTRACE_WARN_ON(ret); | ||
630 | if (ret) | ||
631 | return; | ||
632 | |||
610 | stop_machine(__ftrace_modify_code, &command, NULL); | 633 | stop_machine(__ftrace_modify_code, &command, NULL); |
634 | |||
635 | ret = ftrace_arch_code_modify_post_process(); | ||
636 | FTRACE_WARN_ON(ret); | ||
611 | } | 637 | } |
612 | 638 | ||
613 | static ftrace_func_t saved_ftrace_func; | 639 | static ftrace_func_t saved_ftrace_func; |
@@ -631,13 +657,10 @@ static void ftrace_startup(int command) | |||
631 | if (unlikely(ftrace_disabled)) | 657 | if (unlikely(ftrace_disabled)) |
632 | return; | 658 | return; |
633 | 659 | ||
634 | mutex_lock(&ftrace_start_lock); | ||
635 | ftrace_start_up++; | 660 | ftrace_start_up++; |
636 | command |= FTRACE_ENABLE_CALLS; | 661 | command |= FTRACE_ENABLE_CALLS; |
637 | 662 | ||
638 | ftrace_startup_enable(command); | 663 | ftrace_startup_enable(command); |
639 | |||
640 | mutex_unlock(&ftrace_start_lock); | ||
641 | } | 664 | } |
642 | 665 | ||
643 | static void ftrace_shutdown(int command) | 666 | static void ftrace_shutdown(int command) |
@@ -645,7 +668,6 @@ static void ftrace_shutdown(int command) | |||
645 | if (unlikely(ftrace_disabled)) | 668 | if (unlikely(ftrace_disabled)) |
646 | return; | 669 | return; |
647 | 670 | ||
648 | mutex_lock(&ftrace_start_lock); | ||
649 | ftrace_start_up--; | 671 | ftrace_start_up--; |
650 | if (!ftrace_start_up) | 672 | if (!ftrace_start_up) |
651 | command |= FTRACE_DISABLE_CALLS; | 673 | command |= FTRACE_DISABLE_CALLS; |
@@ -656,11 +678,9 @@ static void ftrace_shutdown(int command) | |||
656 | } | 678 | } |
657 | 679 | ||
658 | if (!command || !ftrace_enabled) | 680 | if (!command || !ftrace_enabled) |
659 | goto out; | 681 | return; |
660 | 682 | ||
661 | ftrace_run_update_code(command); | 683 | ftrace_run_update_code(command); |
662 | out: | ||
663 | mutex_unlock(&ftrace_start_lock); | ||
664 | } | 684 | } |
665 | 685 | ||
666 | static void ftrace_startup_sysctl(void) | 686 | static void ftrace_startup_sysctl(void) |
@@ -670,7 +690,6 @@ static void ftrace_startup_sysctl(void) | |||
670 | if (unlikely(ftrace_disabled)) | 690 | if (unlikely(ftrace_disabled)) |
671 | return; | 691 | return; |
672 | 692 | ||
673 | mutex_lock(&ftrace_start_lock); | ||
674 | /* Force update next time */ | 693 | /* Force update next time */ |
675 | saved_ftrace_func = NULL; | 694 | saved_ftrace_func = NULL; |
676 | /* ftrace_start_up is true if we want ftrace running */ | 695 | /* ftrace_start_up is true if we want ftrace running */ |
@@ -678,7 +697,6 @@ static void ftrace_startup_sysctl(void) | |||
678 | command |= FTRACE_ENABLE_CALLS; | 697 | command |= FTRACE_ENABLE_CALLS; |
679 | 698 | ||
680 | ftrace_run_update_code(command); | 699 | ftrace_run_update_code(command); |
681 | mutex_unlock(&ftrace_start_lock); | ||
682 | } | 700 | } |
683 | 701 | ||
684 | static void ftrace_shutdown_sysctl(void) | 702 | static void ftrace_shutdown_sysctl(void) |
@@ -688,13 +706,11 @@ static void ftrace_shutdown_sysctl(void) | |||
688 | if (unlikely(ftrace_disabled)) | 706 | if (unlikely(ftrace_disabled)) |
689 | return; | 707 | return; |
690 | 708 | ||
691 | mutex_lock(&ftrace_start_lock); | ||
692 | /* ftrace_start_up is true if ftrace is running */ | 709 | /* ftrace_start_up is true if ftrace is running */ |
693 | if (ftrace_start_up) | 710 | if (ftrace_start_up) |
694 | command |= FTRACE_DISABLE_CALLS; | 711 | command |= FTRACE_DISABLE_CALLS; |
695 | 712 | ||
696 | ftrace_run_update_code(command); | 713 | ftrace_run_update_code(command); |
697 | mutex_unlock(&ftrace_start_lock); | ||
698 | } | 714 | } |
699 | 715 | ||
700 | static cycle_t ftrace_update_time; | 716 | static cycle_t ftrace_update_time; |
@@ -781,13 +797,16 @@ enum { | |||
781 | FTRACE_ITER_CONT = (1 << 1), | 797 | FTRACE_ITER_CONT = (1 << 1), |
782 | FTRACE_ITER_NOTRACE = (1 << 2), | 798 | FTRACE_ITER_NOTRACE = (1 << 2), |
783 | FTRACE_ITER_FAILURES = (1 << 3), | 799 | FTRACE_ITER_FAILURES = (1 << 3), |
800 | FTRACE_ITER_PRINTALL = (1 << 4), | ||
801 | FTRACE_ITER_HASH = (1 << 5), | ||
784 | }; | 802 | }; |
785 | 803 | ||
786 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ | 804 | #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */ |
787 | 805 | ||
788 | struct ftrace_iterator { | 806 | struct ftrace_iterator { |
789 | struct ftrace_page *pg; | 807 | struct ftrace_page *pg; |
790 | unsigned idx; | 808 | int hidx; |
809 | int idx; | ||
791 | unsigned flags; | 810 | unsigned flags; |
792 | unsigned char buffer[FTRACE_BUFF_MAX+1]; | 811 | unsigned char buffer[FTRACE_BUFF_MAX+1]; |
793 | unsigned buffer_idx; | 812 | unsigned buffer_idx; |
@@ -795,15 +814,89 @@ struct ftrace_iterator { | |||
795 | }; | 814 | }; |
796 | 815 | ||
797 | static void * | 816 | static void * |
817 | t_hash_next(struct seq_file *m, void *v, loff_t *pos) | ||
818 | { | ||
819 | struct ftrace_iterator *iter = m->private; | ||
820 | struct hlist_node *hnd = v; | ||
821 | struct hlist_head *hhd; | ||
822 | |||
823 | WARN_ON(!(iter->flags & FTRACE_ITER_HASH)); | ||
824 | |||
825 | (*pos)++; | ||
826 | |||
827 | retry: | ||
828 | if (iter->hidx >= FTRACE_FUNC_HASHSIZE) | ||
829 | return NULL; | ||
830 | |||
831 | hhd = &ftrace_func_hash[iter->hidx]; | ||
832 | |||
833 | if (hlist_empty(hhd)) { | ||
834 | iter->hidx++; | ||
835 | hnd = NULL; | ||
836 | goto retry; | ||
837 | } | ||
838 | |||
839 | if (!hnd) | ||
840 | hnd = hhd->first; | ||
841 | else { | ||
842 | hnd = hnd->next; | ||
843 | if (!hnd) { | ||
844 | iter->hidx++; | ||
845 | goto retry; | ||
846 | } | ||
847 | } | ||
848 | |||
849 | return hnd; | ||
850 | } | ||
851 | |||
852 | static void *t_hash_start(struct seq_file *m, loff_t *pos) | ||
853 | { | ||
854 | struct ftrace_iterator *iter = m->private; | ||
855 | void *p = NULL; | ||
856 | |||
857 | iter->flags |= FTRACE_ITER_HASH; | ||
858 | |||
859 | return t_hash_next(m, p, pos); | ||
860 | } | ||
861 | |||
862 | static int t_hash_show(struct seq_file *m, void *v) | ||
863 | { | ||
864 | struct ftrace_func_probe *rec; | ||
865 | struct hlist_node *hnd = v; | ||
866 | char str[KSYM_SYMBOL_LEN]; | ||
867 | |||
868 | rec = hlist_entry(hnd, struct ftrace_func_probe, node); | ||
869 | |||
870 | if (rec->ops->print) | ||
871 | return rec->ops->print(m, rec->ip, rec->ops, rec->data); | ||
872 | |||
873 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
874 | seq_printf(m, "%s:", str); | ||
875 | |||
876 | kallsyms_lookup((unsigned long)rec->ops->func, NULL, NULL, NULL, str); | ||
877 | seq_printf(m, "%s", str); | ||
878 | |||
879 | if (rec->data) | ||
880 | seq_printf(m, ":%p", rec->data); | ||
881 | seq_putc(m, '\n'); | ||
882 | |||
883 | return 0; | ||
884 | } | ||
885 | |||
886 | static void * | ||
798 | t_next(struct seq_file *m, void *v, loff_t *pos) | 887 | t_next(struct seq_file *m, void *v, loff_t *pos) |
799 | { | 888 | { |
800 | struct ftrace_iterator *iter = m->private; | 889 | struct ftrace_iterator *iter = m->private; |
801 | struct dyn_ftrace *rec = NULL; | 890 | struct dyn_ftrace *rec = NULL; |
802 | 891 | ||
892 | if (iter->flags & FTRACE_ITER_HASH) | ||
893 | return t_hash_next(m, v, pos); | ||
894 | |||
803 | (*pos)++; | 895 | (*pos)++; |
804 | 896 | ||
805 | /* should not be called from interrupt context */ | 897 | if (iter->flags & FTRACE_ITER_PRINTALL) |
806 | spin_lock(&ftrace_lock); | 898 | return NULL; |
899 | |||
807 | retry: | 900 | retry: |
808 | if (iter->idx >= iter->pg->index) { | 901 | if (iter->idx >= iter->pg->index) { |
809 | if (iter->pg->next) { | 902 | if (iter->pg->next) { |
@@ -832,7 +925,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
832 | goto retry; | 925 | goto retry; |
833 | } | 926 | } |
834 | } | 927 | } |
835 | spin_unlock(&ftrace_lock); | ||
836 | 928 | ||
837 | return rec; | 929 | return rec; |
838 | } | 930 | } |
@@ -842,6 +934,23 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
842 | struct ftrace_iterator *iter = m->private; | 934 | struct ftrace_iterator *iter = m->private; |
843 | void *p = NULL; | 935 | void *p = NULL; |
844 | 936 | ||
937 | mutex_lock(&ftrace_lock); | ||
938 | /* | ||
939 | * For set_ftrace_filter reading, if we have the filter | ||
940 | * off, we can short cut and just print out that all | ||
941 | * functions are enabled. | ||
942 | */ | ||
943 | if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) { | ||
944 | if (*pos > 0) | ||
945 | return t_hash_start(m, pos); | ||
946 | iter->flags |= FTRACE_ITER_PRINTALL; | ||
947 | (*pos)++; | ||
948 | return iter; | ||
949 | } | ||
950 | |||
951 | if (iter->flags & FTRACE_ITER_HASH) | ||
952 | return t_hash_start(m, pos); | ||
953 | |||
845 | if (*pos > 0) { | 954 | if (*pos > 0) { |
846 | if (iter->idx < 0) | 955 | if (iter->idx < 0) |
847 | return p; | 956 | return p; |
@@ -851,18 +960,31 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
851 | 960 | ||
852 | p = t_next(m, p, pos); | 961 | p = t_next(m, p, pos); |
853 | 962 | ||
963 | if (!p) | ||
964 | return t_hash_start(m, pos); | ||
965 | |||
854 | return p; | 966 | return p; |
855 | } | 967 | } |
856 | 968 | ||
857 | static void t_stop(struct seq_file *m, void *p) | 969 | static void t_stop(struct seq_file *m, void *p) |
858 | { | 970 | { |
971 | mutex_unlock(&ftrace_lock); | ||
859 | } | 972 | } |
860 | 973 | ||
861 | static int t_show(struct seq_file *m, void *v) | 974 | static int t_show(struct seq_file *m, void *v) |
862 | { | 975 | { |
976 | struct ftrace_iterator *iter = m->private; | ||
863 | struct dyn_ftrace *rec = v; | 977 | struct dyn_ftrace *rec = v; |
864 | char str[KSYM_SYMBOL_LEN]; | 978 | char str[KSYM_SYMBOL_LEN]; |
865 | 979 | ||
980 | if (iter->flags & FTRACE_ITER_HASH) | ||
981 | return t_hash_show(m, v); | ||
982 | |||
983 | if (iter->flags & FTRACE_ITER_PRINTALL) { | ||
984 | seq_printf(m, "#### all functions enabled ####\n"); | ||
985 | return 0; | ||
986 | } | ||
987 | |||
866 | if (!rec) | 988 | if (!rec) |
867 | return 0; | 989 | return 0; |
868 | 990 | ||
@@ -941,23 +1063,16 @@ static void ftrace_filter_reset(int enable) | |||
941 | struct ftrace_page *pg; | 1063 | struct ftrace_page *pg; |
942 | struct dyn_ftrace *rec; | 1064 | struct dyn_ftrace *rec; |
943 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1065 | unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; |
944 | unsigned i; | ||
945 | 1066 | ||
946 | /* should not be called from interrupt context */ | 1067 | mutex_lock(&ftrace_lock); |
947 | spin_lock(&ftrace_lock); | ||
948 | if (enable) | 1068 | if (enable) |
949 | ftrace_filtered = 0; | 1069 | ftrace_filtered = 0; |
950 | pg = ftrace_pages_start; | 1070 | do_for_each_ftrace_rec(pg, rec) { |
951 | while (pg) { | 1071 | if (rec->flags & FTRACE_FL_FAILED) |
952 | for (i = 0; i < pg->index; i++) { | 1072 | continue; |
953 | rec = &pg->records[i]; | 1073 | rec->flags &= ~type; |
954 | if (rec->flags & FTRACE_FL_FAILED) | 1074 | } while_for_each_ftrace_rec(); |
955 | continue; | 1075 | mutex_unlock(&ftrace_lock); |
956 | rec->flags &= ~type; | ||
957 | } | ||
958 | pg = pg->next; | ||
959 | } | ||
960 | spin_unlock(&ftrace_lock); | ||
961 | } | 1076 | } |
962 | 1077 | ||
963 | static int | 1078 | static int |
@@ -1038,86 +1153,536 @@ enum { | |||
1038 | MATCH_END_ONLY, | 1153 | MATCH_END_ONLY, |
1039 | }; | 1154 | }; |
1040 | 1155 | ||
1041 | static void | 1156 | /* |
1042 | ftrace_match(unsigned char *buff, int len, int enable) | 1157 | * (static function - no need for kernel doc) |
1158 | * | ||
1159 | * Pass in a buffer containing a glob and this function will | ||
1160 | * set search to point to the search part of the buffer and | ||
1161 | * return the type of search it is (see enum above). | ||
1162 | * This does modify buff. | ||
1163 | * | ||
1164 | * Returns enum type. | ||
1165 | * search returns the pointer to use for comparison. | ||
1166 | * not returns 1 if buff started with a '!' | ||
1167 | * 0 otherwise. | ||
1168 | */ | ||
1169 | static int | ||
1170 | ftrace_setup_glob(char *buff, int len, char **search, int *not) | ||
1043 | { | 1171 | { |
1044 | char str[KSYM_SYMBOL_LEN]; | ||
1045 | char *search = NULL; | ||
1046 | struct ftrace_page *pg; | ||
1047 | struct dyn_ftrace *rec; | ||
1048 | int type = MATCH_FULL; | 1172 | int type = MATCH_FULL; |
1049 | unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | 1173 | int i; |
1050 | unsigned i, match = 0, search_len = 0; | ||
1051 | int not = 0; | ||
1052 | 1174 | ||
1053 | if (buff[0] == '!') { | 1175 | if (buff[0] == '!') { |
1054 | not = 1; | 1176 | *not = 1; |
1055 | buff++; | 1177 | buff++; |
1056 | len--; | 1178 | len--; |
1057 | } | 1179 | } else |
1180 | *not = 0; | ||
1181 | |||
1182 | *search = buff; | ||
1058 | 1183 | ||
1059 | for (i = 0; i < len; i++) { | 1184 | for (i = 0; i < len; i++) { |
1060 | if (buff[i] == '*') { | 1185 | if (buff[i] == '*') { |
1061 | if (!i) { | 1186 | if (!i) { |
1062 | search = buff + i + 1; | 1187 | *search = buff + 1; |
1063 | type = MATCH_END_ONLY; | 1188 | type = MATCH_END_ONLY; |
1064 | search_len = len - (i + 1); | ||
1065 | } else { | 1189 | } else { |
1066 | if (type == MATCH_END_ONLY) { | 1190 | if (type == MATCH_END_ONLY) |
1067 | type = MATCH_MIDDLE_ONLY; | 1191 | type = MATCH_MIDDLE_ONLY; |
1068 | } else { | 1192 | else |
1069 | match = i; | ||
1070 | type = MATCH_FRONT_ONLY; | 1193 | type = MATCH_FRONT_ONLY; |
1071 | } | ||
1072 | buff[i] = 0; | 1194 | buff[i] = 0; |
1073 | break; | 1195 | break; |
1074 | } | 1196 | } |
1075 | } | 1197 | } |
1076 | } | 1198 | } |
1077 | 1199 | ||
1078 | /* should not be called from interrupt context */ | 1200 | return type; |
1079 | spin_lock(&ftrace_lock); | 1201 | } |
1080 | if (enable) | 1202 | |
1081 | ftrace_filtered = 1; | 1203 | static int ftrace_match(char *str, char *regex, int len, int type) |
1082 | pg = ftrace_pages_start; | 1204 | { |
1083 | while (pg) { | 1205 | int matched = 0; |
1084 | for (i = 0; i < pg->index; i++) { | 1206 | char *ptr; |
1085 | int matched = 0; | 1207 | |
1086 | char *ptr; | 1208 | switch (type) { |
1087 | 1209 | case MATCH_FULL: | |
1088 | rec = &pg->records[i]; | 1210 | if (strcmp(str, regex) == 0) |
1089 | if (rec->flags & FTRACE_FL_FAILED) | 1211 | matched = 1; |
1212 | break; | ||
1213 | case MATCH_FRONT_ONLY: | ||
1214 | if (strncmp(str, regex, len) == 0) | ||
1215 | matched = 1; | ||
1216 | break; | ||
1217 | case MATCH_MIDDLE_ONLY: | ||
1218 | if (strstr(str, regex)) | ||
1219 | matched = 1; | ||
1220 | break; | ||
1221 | case MATCH_END_ONLY: | ||
1222 | ptr = strstr(str, regex); | ||
1223 | if (ptr && (ptr[len] == 0)) | ||
1224 | matched = 1; | ||
1225 | break; | ||
1226 | } | ||
1227 | |||
1228 | return matched; | ||
1229 | } | ||
1230 | |||
1231 | static int | ||
1232 | ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type) | ||
1233 | { | ||
1234 | char str[KSYM_SYMBOL_LEN]; | ||
1235 | |||
1236 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1237 | return ftrace_match(str, regex, len, type); | ||
1238 | } | ||
1239 | |||
1240 | static void ftrace_match_records(char *buff, int len, int enable) | ||
1241 | { | ||
1242 | unsigned int search_len; | ||
1243 | struct ftrace_page *pg; | ||
1244 | struct dyn_ftrace *rec; | ||
1245 | unsigned long flag; | ||
1246 | char *search; | ||
1247 | int type; | ||
1248 | int not; | ||
1249 | |||
1250 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1251 | type = ftrace_setup_glob(buff, len, &search, ¬); | ||
1252 | |||
1253 | search_len = strlen(search); | ||
1254 | |||
1255 | mutex_lock(&ftrace_lock); | ||
1256 | do_for_each_ftrace_rec(pg, rec) { | ||
1257 | |||
1258 | if (rec->flags & FTRACE_FL_FAILED) | ||
1259 | continue; | ||
1260 | |||
1261 | if (ftrace_match_record(rec, search, search_len, type)) { | ||
1262 | if (not) | ||
1263 | rec->flags &= ~flag; | ||
1264 | else | ||
1265 | rec->flags |= flag; | ||
1266 | } | ||
1267 | /* | ||
1268 | * Only enable filtering if we have a function that | ||
1269 | * is filtered on. | ||
1270 | */ | ||
1271 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1272 | ftrace_filtered = 1; | ||
1273 | } while_for_each_ftrace_rec(); | ||
1274 | mutex_unlock(&ftrace_lock); | ||
1275 | } | ||
1276 | |||
1277 | static int | ||
1278 | ftrace_match_module_record(struct dyn_ftrace *rec, char *mod, | ||
1279 | char *regex, int len, int type) | ||
1280 | { | ||
1281 | char str[KSYM_SYMBOL_LEN]; | ||
1282 | char *modname; | ||
1283 | |||
1284 | kallsyms_lookup(rec->ip, NULL, NULL, &modname, str); | ||
1285 | |||
1286 | if (!modname || strcmp(modname, mod)) | ||
1287 | return 0; | ||
1288 | |||
1289 | /* blank search means to match all funcs in the mod */ | ||
1290 | if (len) | ||
1291 | return ftrace_match(str, regex, len, type); | ||
1292 | else | ||
1293 | return 1; | ||
1294 | } | ||
1295 | |||
1296 | static void ftrace_match_module_records(char *buff, char *mod, int enable) | ||
1297 | { | ||
1298 | unsigned search_len = 0; | ||
1299 | struct ftrace_page *pg; | ||
1300 | struct dyn_ftrace *rec; | ||
1301 | int type = MATCH_FULL; | ||
1302 | char *search = buff; | ||
1303 | unsigned long flag; | ||
1304 | int not = 0; | ||
1305 | |||
1306 | flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; | ||
1307 | |||
1308 | /* blank or '*' mean the same */ | ||
1309 | if (strcmp(buff, "*") == 0) | ||
1310 | buff[0] = 0; | ||
1311 | |||
1312 | /* handle the case of 'dont filter this module' */ | ||
1313 | if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) { | ||
1314 | buff[0] = 0; | ||
1315 | not = 1; | ||
1316 | } | ||
1317 | |||
1318 | if (strlen(buff)) { | ||
1319 | type = ftrace_setup_glob(buff, strlen(buff), &search, ¬); | ||
1320 | search_len = strlen(search); | ||
1321 | } | ||
1322 | |||
1323 | mutex_lock(&ftrace_lock); | ||
1324 | do_for_each_ftrace_rec(pg, rec) { | ||
1325 | |||
1326 | if (rec->flags & FTRACE_FL_FAILED) | ||
1327 | continue; | ||
1328 | |||
1329 | if (ftrace_match_module_record(rec, mod, | ||
1330 | search, search_len, type)) { | ||
1331 | if (not) | ||
1332 | rec->flags &= ~flag; | ||
1333 | else | ||
1334 | rec->flags |= flag; | ||
1335 | } | ||
1336 | if (enable && (rec->flags & FTRACE_FL_FILTER)) | ||
1337 | ftrace_filtered = 1; | ||
1338 | |||
1339 | } while_for_each_ftrace_rec(); | ||
1340 | mutex_unlock(&ftrace_lock); | ||
1341 | } | ||
1342 | |||
1343 | /* | ||
1344 | * We register the module command as a template to show others how | ||
1345 | * to register the a command as well. | ||
1346 | */ | ||
1347 | |||
1348 | static int | ||
1349 | ftrace_mod_callback(char *func, char *cmd, char *param, int enable) | ||
1350 | { | ||
1351 | char *mod; | ||
1352 | |||
1353 | /* | ||
1354 | * cmd == 'mod' because we only registered this func | ||
1355 | * for the 'mod' ftrace_func_command. | ||
1356 | * But if you register one func with multiple commands, | ||
1357 | * you can tell which command was used by the cmd | ||
1358 | * parameter. | ||
1359 | */ | ||
1360 | |||
1361 | /* we must have a module name */ | ||
1362 | if (!param) | ||
1363 | return -EINVAL; | ||
1364 | |||
1365 | mod = strsep(¶m, ":"); | ||
1366 | if (!strlen(mod)) | ||
1367 | return -EINVAL; | ||
1368 | |||
1369 | ftrace_match_module_records(func, mod, enable); | ||
1370 | return 0; | ||
1371 | } | ||
1372 | |||
1373 | static struct ftrace_func_command ftrace_mod_cmd = { | ||
1374 | .name = "mod", | ||
1375 | .func = ftrace_mod_callback, | ||
1376 | }; | ||
1377 | |||
1378 | static int __init ftrace_mod_cmd_init(void) | ||
1379 | { | ||
1380 | return register_ftrace_command(&ftrace_mod_cmd); | ||
1381 | } | ||
1382 | device_initcall(ftrace_mod_cmd_init); | ||
1383 | |||
1384 | static void | ||
1385 | function_trace_probe_call(unsigned long ip, unsigned long parent_ip) | ||
1386 | { | ||
1387 | struct ftrace_func_probe *entry; | ||
1388 | struct hlist_head *hhd; | ||
1389 | struct hlist_node *n; | ||
1390 | unsigned long key; | ||
1391 | int resched; | ||
1392 | |||
1393 | key = hash_long(ip, FTRACE_HASH_BITS); | ||
1394 | |||
1395 | hhd = &ftrace_func_hash[key]; | ||
1396 | |||
1397 | if (hlist_empty(hhd)) | ||
1398 | return; | ||
1399 | |||
1400 | /* | ||
1401 | * Disable preemption for these calls to prevent a RCU grace | ||
1402 | * period. This syncs the hash iteration and freeing of items | ||
1403 | * on the hash. rcu_read_lock is too dangerous here. | ||
1404 | */ | ||
1405 | resched = ftrace_preempt_disable(); | ||
1406 | hlist_for_each_entry_rcu(entry, n, hhd, node) { | ||
1407 | if (entry->ip == ip) | ||
1408 | entry->ops->func(ip, parent_ip, &entry->data); | ||
1409 | } | ||
1410 | ftrace_preempt_enable(resched); | ||
1411 | } | ||
1412 | |||
1413 | static struct ftrace_ops trace_probe_ops __read_mostly = | ||
1414 | { | ||
1415 | .func = function_trace_probe_call, | ||
1416 | }; | ||
1417 | |||
1418 | static int ftrace_probe_registered; | ||
1419 | |||
1420 | static void __enable_ftrace_function_probe(void) | ||
1421 | { | ||
1422 | int i; | ||
1423 | |||
1424 | if (ftrace_probe_registered) | ||
1425 | return; | ||
1426 | |||
1427 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1428 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1429 | if (hhd->first) | ||
1430 | break; | ||
1431 | } | ||
1432 | /* Nothing registered? */ | ||
1433 | if (i == FTRACE_FUNC_HASHSIZE) | ||
1434 | return; | ||
1435 | |||
1436 | __register_ftrace_function(&trace_probe_ops); | ||
1437 | ftrace_startup(0); | ||
1438 | ftrace_probe_registered = 1; | ||
1439 | } | ||
1440 | |||
1441 | static void __disable_ftrace_function_probe(void) | ||
1442 | { | ||
1443 | int i; | ||
1444 | |||
1445 | if (!ftrace_probe_registered) | ||
1446 | return; | ||
1447 | |||
1448 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1449 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1450 | if (hhd->first) | ||
1451 | return; | ||
1452 | } | ||
1453 | |||
1454 | /* no more funcs left */ | ||
1455 | __unregister_ftrace_function(&trace_probe_ops); | ||
1456 | ftrace_shutdown(0); | ||
1457 | ftrace_probe_registered = 0; | ||
1458 | } | ||
1459 | |||
1460 | |||
1461 | static void ftrace_free_entry_rcu(struct rcu_head *rhp) | ||
1462 | { | ||
1463 | struct ftrace_func_probe *entry = | ||
1464 | container_of(rhp, struct ftrace_func_probe, rcu); | ||
1465 | |||
1466 | if (entry->ops->free) | ||
1467 | entry->ops->free(&entry->data); | ||
1468 | kfree(entry); | ||
1469 | } | ||
1470 | |||
1471 | |||
1472 | int | ||
1473 | register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1474 | void *data) | ||
1475 | { | ||
1476 | struct ftrace_func_probe *entry; | ||
1477 | struct ftrace_page *pg; | ||
1478 | struct dyn_ftrace *rec; | ||
1479 | int type, len, not; | ||
1480 | unsigned long key; | ||
1481 | int count = 0; | ||
1482 | char *search; | ||
1483 | |||
1484 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | ||
1485 | len = strlen(search); | ||
1486 | |||
1487 | /* we do not support '!' for function probes */ | ||
1488 | if (WARN_ON(not)) | ||
1489 | return -EINVAL; | ||
1490 | |||
1491 | mutex_lock(&ftrace_lock); | ||
1492 | do_for_each_ftrace_rec(pg, rec) { | ||
1493 | |||
1494 | if (rec->flags & FTRACE_FL_FAILED) | ||
1495 | continue; | ||
1496 | |||
1497 | if (!ftrace_match_record(rec, search, len, type)) | ||
1498 | continue; | ||
1499 | |||
1500 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | ||
1501 | if (!entry) { | ||
1502 | /* If we did not process any, then return error */ | ||
1503 | if (!count) | ||
1504 | count = -ENOMEM; | ||
1505 | goto out_unlock; | ||
1506 | } | ||
1507 | |||
1508 | count++; | ||
1509 | |||
1510 | entry->data = data; | ||
1511 | |||
1512 | /* | ||
1513 | * The caller might want to do something special | ||
1514 | * for each function we find. We call the callback | ||
1515 | * to give the caller an opportunity to do so. | ||
1516 | */ | ||
1517 | if (ops->callback) { | ||
1518 | if (ops->callback(rec->ip, &entry->data) < 0) { | ||
1519 | /* caller does not like this func */ | ||
1520 | kfree(entry); | ||
1090 | continue; | 1521 | continue; |
1091 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | ||
1092 | switch (type) { | ||
1093 | case MATCH_FULL: | ||
1094 | if (strcmp(str, buff) == 0) | ||
1095 | matched = 1; | ||
1096 | break; | ||
1097 | case MATCH_FRONT_ONLY: | ||
1098 | if (memcmp(str, buff, match) == 0) | ||
1099 | matched = 1; | ||
1100 | break; | ||
1101 | case MATCH_MIDDLE_ONLY: | ||
1102 | if (strstr(str, search)) | ||
1103 | matched = 1; | ||
1104 | break; | ||
1105 | case MATCH_END_ONLY: | ||
1106 | ptr = strstr(str, search); | ||
1107 | if (ptr && (ptr[search_len] == 0)) | ||
1108 | matched = 1; | ||
1109 | break; | ||
1110 | } | 1522 | } |
1111 | if (matched) { | 1523 | } |
1112 | if (not) | 1524 | |
1113 | rec->flags &= ~flag; | 1525 | entry->ops = ops; |
1114 | else | 1526 | entry->ip = rec->ip; |
1115 | rec->flags |= flag; | 1527 | |
1528 | key = hash_long(entry->ip, FTRACE_HASH_BITS); | ||
1529 | hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); | ||
1530 | |||
1531 | } while_for_each_ftrace_rec(); | ||
1532 | __enable_ftrace_function_probe(); | ||
1533 | |||
1534 | out_unlock: | ||
1535 | mutex_unlock(&ftrace_lock); | ||
1536 | |||
1537 | return count; | ||
1538 | } | ||
1539 | |||
1540 | enum { | ||
1541 | PROBE_TEST_FUNC = 1, | ||
1542 | PROBE_TEST_DATA = 2 | ||
1543 | }; | ||
1544 | |||
1545 | static void | ||
1546 | __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1547 | void *data, int flags) | ||
1548 | { | ||
1549 | struct ftrace_func_probe *entry; | ||
1550 | struct hlist_node *n, *tmp; | ||
1551 | char str[KSYM_SYMBOL_LEN]; | ||
1552 | int type = MATCH_FULL; | ||
1553 | int i, len = 0; | ||
1554 | char *search; | ||
1555 | |||
1556 | if (glob && (strcmp(glob, "*") || !strlen(glob))) | ||
1557 | glob = NULL; | ||
1558 | else { | ||
1559 | int not; | ||
1560 | |||
1561 | type = ftrace_setup_glob(glob, strlen(glob), &search, ¬); | ||
1562 | len = strlen(search); | ||
1563 | |||
1564 | /* we do not support '!' for function probes */ | ||
1565 | if (WARN_ON(not)) | ||
1566 | return; | ||
1567 | } | ||
1568 | |||
1569 | mutex_lock(&ftrace_lock); | ||
1570 | for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { | ||
1571 | struct hlist_head *hhd = &ftrace_func_hash[i]; | ||
1572 | |||
1573 | hlist_for_each_entry_safe(entry, n, tmp, hhd, node) { | ||
1574 | |||
1575 | /* break up if statements for readability */ | ||
1576 | if ((flags & PROBE_TEST_FUNC) && entry->ops != ops) | ||
1577 | continue; | ||
1578 | |||
1579 | if ((flags & PROBE_TEST_DATA) && entry->data != data) | ||
1580 | continue; | ||
1581 | |||
1582 | /* do this last, since it is the most expensive */ | ||
1583 | if (glob) { | ||
1584 | kallsyms_lookup(entry->ip, NULL, NULL, | ||
1585 | NULL, str); | ||
1586 | if (!ftrace_match(str, glob, len, type)) | ||
1587 | continue; | ||
1116 | } | 1588 | } |
1589 | |||
1590 | hlist_del(&entry->node); | ||
1591 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | ||
1117 | } | 1592 | } |
1118 | pg = pg->next; | ||
1119 | } | 1593 | } |
1120 | spin_unlock(&ftrace_lock); | 1594 | __disable_ftrace_function_probe(); |
1595 | mutex_unlock(&ftrace_lock); | ||
1596 | } | ||
1597 | |||
1598 | void | ||
1599 | unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | ||
1600 | void *data) | ||
1601 | { | ||
1602 | __unregister_ftrace_function_probe(glob, ops, data, | ||
1603 | PROBE_TEST_FUNC | PROBE_TEST_DATA); | ||
1604 | } | ||
1605 | |||
1606 | void | ||
1607 | unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops) | ||
1608 | { | ||
1609 | __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC); | ||
1610 | } | ||
1611 | |||
1612 | void unregister_ftrace_function_probe_all(char *glob) | ||
1613 | { | ||
1614 | __unregister_ftrace_function_probe(glob, NULL, NULL, 0); | ||
1615 | } | ||
1616 | |||
1617 | static LIST_HEAD(ftrace_commands); | ||
1618 | static DEFINE_MUTEX(ftrace_cmd_mutex); | ||
1619 | |||
1620 | int register_ftrace_command(struct ftrace_func_command *cmd) | ||
1621 | { | ||
1622 | struct ftrace_func_command *p; | ||
1623 | int ret = 0; | ||
1624 | |||
1625 | mutex_lock(&ftrace_cmd_mutex); | ||
1626 | list_for_each_entry(p, &ftrace_commands, list) { | ||
1627 | if (strcmp(cmd->name, p->name) == 0) { | ||
1628 | ret = -EBUSY; | ||
1629 | goto out_unlock; | ||
1630 | } | ||
1631 | } | ||
1632 | list_add(&cmd->list, &ftrace_commands); | ||
1633 | out_unlock: | ||
1634 | mutex_unlock(&ftrace_cmd_mutex); | ||
1635 | |||
1636 | return ret; | ||
1637 | } | ||
1638 | |||
1639 | int unregister_ftrace_command(struct ftrace_func_command *cmd) | ||
1640 | { | ||
1641 | struct ftrace_func_command *p, *n; | ||
1642 | int ret = -ENODEV; | ||
1643 | |||
1644 | mutex_lock(&ftrace_cmd_mutex); | ||
1645 | list_for_each_entry_safe(p, n, &ftrace_commands, list) { | ||
1646 | if (strcmp(cmd->name, p->name) == 0) { | ||
1647 | ret = 0; | ||
1648 | list_del_init(&p->list); | ||
1649 | goto out_unlock; | ||
1650 | } | ||
1651 | } | ||
1652 | out_unlock: | ||
1653 | mutex_unlock(&ftrace_cmd_mutex); | ||
1654 | |||
1655 | return ret; | ||
1656 | } | ||
1657 | |||
1658 | static int ftrace_process_regex(char *buff, int len, int enable) | ||
1659 | { | ||
1660 | char *func, *command, *next = buff; | ||
1661 | struct ftrace_func_command *p; | ||
1662 | int ret = -EINVAL; | ||
1663 | |||
1664 | func = strsep(&next, ":"); | ||
1665 | |||
1666 | if (!next) { | ||
1667 | ftrace_match_records(func, len, enable); | ||
1668 | return 0; | ||
1669 | } | ||
1670 | |||
1671 | /* command found */ | ||
1672 | |||
1673 | command = strsep(&next, ":"); | ||
1674 | |||
1675 | mutex_lock(&ftrace_cmd_mutex); | ||
1676 | list_for_each_entry(p, &ftrace_commands, list) { | ||
1677 | if (strcmp(p->name, command) == 0) { | ||
1678 | ret = p->func(func, command, next, enable); | ||
1679 | goto out_unlock; | ||
1680 | } | ||
1681 | } | ||
1682 | out_unlock: | ||
1683 | mutex_unlock(&ftrace_cmd_mutex); | ||
1684 | |||
1685 | return ret; | ||
1121 | } | 1686 | } |
1122 | 1687 | ||
1123 | static ssize_t | 1688 | static ssize_t |
@@ -1187,7 +1752,10 @@ ftrace_regex_write(struct file *file, const char __user *ubuf, | |||
1187 | if (isspace(ch)) { | 1752 | if (isspace(ch)) { |
1188 | iter->filtered++; | 1753 | iter->filtered++; |
1189 | iter->buffer[iter->buffer_idx] = 0; | 1754 | iter->buffer[iter->buffer_idx] = 0; |
1190 | ftrace_match(iter->buffer, iter->buffer_idx, enable); | 1755 | ret = ftrace_process_regex(iter->buffer, |
1756 | iter->buffer_idx, enable); | ||
1757 | if (ret) | ||
1758 | goto out; | ||
1191 | iter->buffer_idx = 0; | 1759 | iter->buffer_idx = 0; |
1192 | } else | 1760 | } else |
1193 | iter->flags |= FTRACE_ITER_CONT; | 1761 | iter->flags |= FTRACE_ITER_CONT; |
@@ -1226,7 +1794,7 @@ ftrace_set_regex(unsigned char *buf, int len, int reset, int enable) | |||
1226 | if (reset) | 1794 | if (reset) |
1227 | ftrace_filter_reset(enable); | 1795 | ftrace_filter_reset(enable); |
1228 | if (buf) | 1796 | if (buf) |
1229 | ftrace_match(buf, len, enable); | 1797 | ftrace_match_records(buf, len, enable); |
1230 | mutex_unlock(&ftrace_regex_lock); | 1798 | mutex_unlock(&ftrace_regex_lock); |
1231 | } | 1799 | } |
1232 | 1800 | ||
@@ -1276,15 +1844,13 @@ ftrace_regex_release(struct inode *inode, struct file *file, int enable) | |||
1276 | if (iter->buffer_idx) { | 1844 | if (iter->buffer_idx) { |
1277 | iter->filtered++; | 1845 | iter->filtered++; |
1278 | iter->buffer[iter->buffer_idx] = 0; | 1846 | iter->buffer[iter->buffer_idx] = 0; |
1279 | ftrace_match(iter->buffer, iter->buffer_idx, enable); | 1847 | ftrace_match_records(iter->buffer, iter->buffer_idx, enable); |
1280 | } | 1848 | } |
1281 | 1849 | ||
1282 | mutex_lock(&ftrace_sysctl_lock); | 1850 | mutex_lock(&ftrace_lock); |
1283 | mutex_lock(&ftrace_start_lock); | ||
1284 | if (ftrace_start_up && ftrace_enabled) | 1851 | if (ftrace_start_up && ftrace_enabled) |
1285 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); | 1852 | ftrace_run_update_code(FTRACE_ENABLE_CALLS); |
1286 | mutex_unlock(&ftrace_start_lock); | 1853 | mutex_unlock(&ftrace_lock); |
1287 | mutex_unlock(&ftrace_sysctl_lock); | ||
1288 | 1854 | ||
1289 | kfree(iter); | 1855 | kfree(iter); |
1290 | mutex_unlock(&ftrace_regex_lock); | 1856 | mutex_unlock(&ftrace_regex_lock); |
@@ -1360,6 +1926,10 @@ static void *g_start(struct seq_file *m, loff_t *pos) | |||
1360 | 1926 | ||
1361 | mutex_lock(&graph_lock); | 1927 | mutex_lock(&graph_lock); |
1362 | 1928 | ||
1929 | /* Nothing, tell g_show to print all functions are enabled */ | ||
1930 | if (!ftrace_graph_count && !*pos) | ||
1931 | return (void *)1; | ||
1932 | |||
1363 | p = g_next(m, p, pos); | 1933 | p = g_next(m, p, pos); |
1364 | 1934 | ||
1365 | return p; | 1935 | return p; |
@@ -1378,6 +1948,11 @@ static int g_show(struct seq_file *m, void *v) | |||
1378 | if (!ptr) | 1948 | if (!ptr) |
1379 | return 0; | 1949 | return 0; |
1380 | 1950 | ||
1951 | if (ptr == (unsigned long *)1) { | ||
1952 | seq_printf(m, "#### all functions enabled ####\n"); | ||
1953 | return 0; | ||
1954 | } | ||
1955 | |||
1381 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); | 1956 | kallsyms_lookup(*ptr, NULL, NULL, NULL, str); |
1382 | 1957 | ||
1383 | seq_printf(m, "%s\n", str); | 1958 | seq_printf(m, "%s\n", str); |
@@ -1431,42 +2006,52 @@ ftrace_graph_read(struct file *file, char __user *ubuf, | |||
1431 | } | 2006 | } |
1432 | 2007 | ||
1433 | static int | 2008 | static int |
1434 | ftrace_set_func(unsigned long *array, int idx, char *buffer) | 2009 | ftrace_set_func(unsigned long *array, int *idx, char *buffer) |
1435 | { | 2010 | { |
1436 | char str[KSYM_SYMBOL_LEN]; | ||
1437 | struct dyn_ftrace *rec; | 2011 | struct dyn_ftrace *rec; |
1438 | struct ftrace_page *pg; | 2012 | struct ftrace_page *pg; |
2013 | int search_len; | ||
1439 | int found = 0; | 2014 | int found = 0; |
1440 | int i, j; | 2015 | int type, not; |
2016 | char *search; | ||
2017 | bool exists; | ||
2018 | int i; | ||
1441 | 2019 | ||
1442 | if (ftrace_disabled) | 2020 | if (ftrace_disabled) |
1443 | return -ENODEV; | 2021 | return -ENODEV; |
1444 | 2022 | ||
1445 | /* should not be called from interrupt context */ | 2023 | /* decode regex */ |
1446 | spin_lock(&ftrace_lock); | 2024 | type = ftrace_setup_glob(buffer, strlen(buffer), &search, ¬); |
2025 | if (not) | ||
2026 | return -EINVAL; | ||
1447 | 2027 | ||
1448 | for (pg = ftrace_pages_start; pg; pg = pg->next) { | 2028 | search_len = strlen(search); |
1449 | for (i = 0; i < pg->index; i++) { | ||
1450 | rec = &pg->records[i]; | ||
1451 | 2029 | ||
1452 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | 2030 | mutex_lock(&ftrace_lock); |
1453 | continue; | 2031 | do_for_each_ftrace_rec(pg, rec) { |
2032 | |||
2033 | if (*idx >= FTRACE_GRAPH_MAX_FUNCS) | ||
2034 | break; | ||
2035 | |||
2036 | if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE)) | ||
2037 | continue; | ||
1454 | 2038 | ||
1455 | kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); | 2039 | if (ftrace_match_record(rec, search, search_len, type)) { |
1456 | if (strcmp(str, buffer) == 0) { | 2040 | /* ensure it is not already in the array */ |
2041 | exists = false; | ||
2042 | for (i = 0; i < *idx; i++) | ||
2043 | if (array[i] == rec->ip) { | ||
2044 | exists = true; | ||
2045 | break; | ||
2046 | } | ||
2047 | if (!exists) { | ||
2048 | array[(*idx)++] = rec->ip; | ||
1457 | found = 1; | 2049 | found = 1; |
1458 | for (j = 0; j < idx; j++) | ||
1459 | if (array[j] == rec->ip) { | ||
1460 | found = 0; | ||
1461 | break; | ||
1462 | } | ||
1463 | if (found) | ||
1464 | array[idx] = rec->ip; | ||
1465 | break; | ||
1466 | } | 2050 | } |
1467 | } | 2051 | } |
1468 | } | 2052 | } while_for_each_ftrace_rec(); |
1469 | spin_unlock(&ftrace_lock); | 2053 | |
2054 | mutex_unlock(&ftrace_lock); | ||
1470 | 2055 | ||
1471 | return found ? 0 : -EINVAL; | 2056 | return found ? 0 : -EINVAL; |
1472 | } | 2057 | } |
@@ -1534,13 +2119,11 @@ ftrace_graph_write(struct file *file, const char __user *ubuf, | |||
1534 | } | 2119 | } |
1535 | buffer[index] = 0; | 2120 | buffer[index] = 0; |
1536 | 2121 | ||
1537 | /* we allow only one at a time */ | 2122 | /* we allow only one expression at a time */ |
1538 | ret = ftrace_set_func(array, ftrace_graph_count, buffer); | 2123 | ret = ftrace_set_func(array, &ftrace_graph_count, buffer); |
1539 | if (ret) | 2124 | if (ret) |
1540 | goto out; | 2125 | goto out; |
1541 | 2126 | ||
1542 | ftrace_graph_count++; | ||
1543 | |||
1544 | file->f_pos += read; | 2127 | file->f_pos += read; |
1545 | 2128 | ||
1546 | ret = read; | 2129 | ret = read; |
@@ -1604,7 +2187,7 @@ static int ftrace_convert_nops(struct module *mod, | |||
1604 | unsigned long addr; | 2187 | unsigned long addr; |
1605 | unsigned long flags; | 2188 | unsigned long flags; |
1606 | 2189 | ||
1607 | mutex_lock(&ftrace_start_lock); | 2190 | mutex_lock(&ftrace_lock); |
1608 | p = start; | 2191 | p = start; |
1609 | while (p < end) { | 2192 | while (p < end) { |
1610 | addr = ftrace_call_adjust(*p++); | 2193 | addr = ftrace_call_adjust(*p++); |
@@ -1623,7 +2206,7 @@ static int ftrace_convert_nops(struct module *mod, | |||
1623 | local_irq_save(flags); | 2206 | local_irq_save(flags); |
1624 | ftrace_update_code(mod); | 2207 | ftrace_update_code(mod); |
1625 | local_irq_restore(flags); | 2208 | local_irq_restore(flags); |
1626 | mutex_unlock(&ftrace_start_lock); | 2209 | mutex_unlock(&ftrace_lock); |
1627 | 2210 | ||
1628 | return 0; | 2211 | return 0; |
1629 | } | 2212 | } |
@@ -1796,7 +2379,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
1796 | if (ret < 0) | 2379 | if (ret < 0) |
1797 | return ret; | 2380 | return ret; |
1798 | 2381 | ||
1799 | mutex_lock(&ftrace_start_lock); | 2382 | mutex_lock(&ftrace_lock); |
1800 | if (val < 0) { | 2383 | if (val < 0) { |
1801 | /* disable pid tracing */ | 2384 | /* disable pid tracing */ |
1802 | if (!ftrace_pid_trace) | 2385 | if (!ftrace_pid_trace) |
@@ -1835,7 +2418,7 @@ ftrace_pid_write(struct file *filp, const char __user *ubuf, | |||
1835 | ftrace_startup_enable(0); | 2418 | ftrace_startup_enable(0); |
1836 | 2419 | ||
1837 | out: | 2420 | out: |
1838 | mutex_unlock(&ftrace_start_lock); | 2421 | mutex_unlock(&ftrace_lock); |
1839 | 2422 | ||
1840 | return cnt; | 2423 | return cnt; |
1841 | } | 2424 | } |
@@ -1863,7 +2446,6 @@ static __init int ftrace_init_debugfs(void) | |||
1863 | "'set_ftrace_pid' entry\n"); | 2446 | "'set_ftrace_pid' entry\n"); |
1864 | return 0; | 2447 | return 0; |
1865 | } | 2448 | } |
1866 | |||
1867 | fs_initcall(ftrace_init_debugfs); | 2449 | fs_initcall(ftrace_init_debugfs); |
1868 | 2450 | ||
1869 | /** | 2451 | /** |
@@ -1898,17 +2480,17 @@ int register_ftrace_function(struct ftrace_ops *ops) | |||
1898 | if (unlikely(ftrace_disabled)) | 2480 | if (unlikely(ftrace_disabled)) |
1899 | return -1; | 2481 | return -1; |
1900 | 2482 | ||
1901 | mutex_lock(&ftrace_sysctl_lock); | 2483 | mutex_lock(&ftrace_lock); |
1902 | 2484 | ||
1903 | ret = __register_ftrace_function(ops); | 2485 | ret = __register_ftrace_function(ops); |
1904 | ftrace_startup(0); | 2486 | ftrace_startup(0); |
1905 | 2487 | ||
1906 | mutex_unlock(&ftrace_sysctl_lock); | 2488 | mutex_unlock(&ftrace_lock); |
1907 | return ret; | 2489 | return ret; |
1908 | } | 2490 | } |
1909 | 2491 | ||
1910 | /** | 2492 | /** |
1911 | * unregister_ftrace_function - unresgister a function for profiling. | 2493 | * unregister_ftrace_function - unregister a function for profiling. |
1912 | * @ops - ops structure that holds the function to unregister | 2494 | * @ops - ops structure that holds the function to unregister |
1913 | * | 2495 | * |
1914 | * Unregister a function that was added to be called by ftrace profiling. | 2496 | * Unregister a function that was added to be called by ftrace profiling. |
@@ -1917,10 +2499,10 @@ int unregister_ftrace_function(struct ftrace_ops *ops) | |||
1917 | { | 2499 | { |
1918 | int ret; | 2500 | int ret; |
1919 | 2501 | ||
1920 | mutex_lock(&ftrace_sysctl_lock); | 2502 | mutex_lock(&ftrace_lock); |
1921 | ret = __unregister_ftrace_function(ops); | 2503 | ret = __unregister_ftrace_function(ops); |
1922 | ftrace_shutdown(0); | 2504 | ftrace_shutdown(0); |
1923 | mutex_unlock(&ftrace_sysctl_lock); | 2505 | mutex_unlock(&ftrace_lock); |
1924 | 2506 | ||
1925 | return ret; | 2507 | return ret; |
1926 | } | 2508 | } |
@@ -1935,7 +2517,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1935 | if (unlikely(ftrace_disabled)) | 2517 | if (unlikely(ftrace_disabled)) |
1936 | return -ENODEV; | 2518 | return -ENODEV; |
1937 | 2519 | ||
1938 | mutex_lock(&ftrace_sysctl_lock); | 2520 | mutex_lock(&ftrace_lock); |
1939 | 2521 | ||
1940 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); | 2522 | ret = proc_dointvec(table, write, file, buffer, lenp, ppos); |
1941 | 2523 | ||
@@ -1964,7 +2546,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, | |||
1964 | } | 2546 | } |
1965 | 2547 | ||
1966 | out: | 2548 | out: |
1967 | mutex_unlock(&ftrace_sysctl_lock); | 2549 | mutex_unlock(&ftrace_lock); |
1968 | return ret; | 2550 | return ret; |
1969 | } | 2551 | } |
1970 | 2552 | ||
@@ -2080,7 +2662,7 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2080 | { | 2662 | { |
2081 | int ret = 0; | 2663 | int ret = 0; |
2082 | 2664 | ||
2083 | mutex_lock(&ftrace_sysctl_lock); | 2665 | mutex_lock(&ftrace_lock); |
2084 | 2666 | ||
2085 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; | 2667 | ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call; |
2086 | register_pm_notifier(&ftrace_suspend_notifier); | 2668 | register_pm_notifier(&ftrace_suspend_notifier); |
@@ -2098,13 +2680,13 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc, | |||
2098 | ftrace_startup(FTRACE_START_FUNC_RET); | 2680 | ftrace_startup(FTRACE_START_FUNC_RET); |
2099 | 2681 | ||
2100 | out: | 2682 | out: |
2101 | mutex_unlock(&ftrace_sysctl_lock); | 2683 | mutex_unlock(&ftrace_lock); |
2102 | return ret; | 2684 | return ret; |
2103 | } | 2685 | } |
2104 | 2686 | ||
2105 | void unregister_ftrace_graph(void) | 2687 | void unregister_ftrace_graph(void) |
2106 | { | 2688 | { |
2107 | mutex_lock(&ftrace_sysctl_lock); | 2689 | mutex_lock(&ftrace_lock); |
2108 | 2690 | ||
2109 | atomic_dec(&ftrace_graph_active); | 2691 | atomic_dec(&ftrace_graph_active); |
2110 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; | 2692 | ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub; |
@@ -2112,7 +2694,7 @@ void unregister_ftrace_graph(void) | |||
2112 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); | 2694 | ftrace_shutdown(FTRACE_STOP_FUNC_RET); |
2113 | unregister_pm_notifier(&ftrace_suspend_notifier); | 2695 | unregister_pm_notifier(&ftrace_suspend_notifier); |
2114 | 2696 | ||
2115 | mutex_unlock(&ftrace_sysctl_lock); | 2697 | mutex_unlock(&ftrace_lock); |
2116 | } | 2698 | } |
2117 | 2699 | ||
2118 | /* Allocate a return stack for newly created task */ | 2700 | /* Allocate a return stack for newly created task */ |
diff --git a/kernel/trace/kmemtrace.c b/kernel/trace/kmemtrace.c new file mode 100644 index 000000000000..ae201b3eda89 --- /dev/null +++ b/kernel/trace/kmemtrace.c | |||
@@ -0,0 +1,339 @@ | |||
1 | /* | ||
2 | * Memory allocator tracing | ||
3 | * | ||
4 | * Copyright (C) 2008 Eduard - Gabriel Munteanu | ||
5 | * Copyright (C) 2008 Pekka Enberg <penberg@cs.helsinki.fi> | ||
6 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/dcache.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | #include <trace/kmemtrace.h> | ||
14 | |||
15 | #include "trace.h" | ||
16 | #include "trace_output.h" | ||
17 | |||
18 | /* Select an alternative, minimalistic output than the original one */ | ||
19 | #define TRACE_KMEM_OPT_MINIMAL 0x1 | ||
20 | |||
21 | static struct tracer_opt kmem_opts[] = { | ||
22 | /* Default disable the minimalistic output */ | ||
23 | { TRACER_OPT(kmem_minimalistic, TRACE_KMEM_OPT_MINIMAL) }, | ||
24 | { } | ||
25 | }; | ||
26 | |||
27 | static struct tracer_flags kmem_tracer_flags = { | ||
28 | .val = 0, | ||
29 | .opts = kmem_opts | ||
30 | }; | ||
31 | |||
32 | |||
33 | static bool kmem_tracing_enabled __read_mostly; | ||
34 | static struct trace_array *kmemtrace_array; | ||
35 | |||
36 | static int kmem_trace_init(struct trace_array *tr) | ||
37 | { | ||
38 | int cpu; | ||
39 | kmemtrace_array = tr; | ||
40 | |||
41 | for_each_cpu_mask(cpu, cpu_possible_map) | ||
42 | tracing_reset(tr, cpu); | ||
43 | |||
44 | kmem_tracing_enabled = true; | ||
45 | |||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static void kmem_trace_reset(struct trace_array *tr) | ||
50 | { | ||
51 | kmem_tracing_enabled = false; | ||
52 | } | ||
53 | |||
54 | static void kmemtrace_headers(struct seq_file *s) | ||
55 | { | ||
56 | /* Don't need headers for the original kmemtrace output */ | ||
57 | if (!(kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL)) | ||
58 | return; | ||
59 | |||
60 | seq_printf(s, "#\n"); | ||
61 | seq_printf(s, "# ALLOC TYPE REQ GIVEN FLAGS " | ||
62 | " POINTER NODE CALLER\n"); | ||
63 | seq_printf(s, "# FREE | | | | " | ||
64 | " | | | |\n"); | ||
65 | seq_printf(s, "# |\n\n"); | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * The two following functions give the original output from kmemtrace, | ||
70 | * or something close to....perhaps they need some missing things | ||
71 | */ | ||
72 | static enum print_line_t | ||
73 | kmemtrace_print_alloc_original(struct trace_iterator *iter, | ||
74 | struct kmemtrace_alloc_entry *entry) | ||
75 | { | ||
76 | struct trace_seq *s = &iter->seq; | ||
77 | int ret; | ||
78 | |||
79 | /* Taken from the old linux/kmemtrace.h */ | ||
80 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu " | ||
81 | "bytes_req %lu bytes_alloc %lu gfp_flags %lu node %d\n", | ||
82 | entry->type_id, entry->call_site, (unsigned long) entry->ptr, | ||
83 | (unsigned long) entry->bytes_req, (unsigned long) entry->bytes_alloc, | ||
84 | (unsigned long) entry->gfp_flags, entry->node); | ||
85 | |||
86 | if (!ret) | ||
87 | return TRACE_TYPE_PARTIAL_LINE; | ||
88 | |||
89 | return TRACE_TYPE_HANDLED; | ||
90 | } | ||
91 | |||
92 | static enum print_line_t | ||
93 | kmemtrace_print_free_original(struct trace_iterator *iter, | ||
94 | struct kmemtrace_free_entry *entry) | ||
95 | { | ||
96 | struct trace_seq *s = &iter->seq; | ||
97 | int ret; | ||
98 | |||
99 | /* Taken from the old linux/kmemtrace.h */ | ||
100 | ret = trace_seq_printf(s, "type_id %d call_site %lu ptr %lu\n", | ||
101 | entry->type_id, entry->call_site, (unsigned long) entry->ptr); | ||
102 | |||
103 | if (!ret) | ||
104 | return TRACE_TYPE_PARTIAL_LINE; | ||
105 | |||
106 | return TRACE_TYPE_HANDLED; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* The two other following provide a more minimalistic output */ | ||
111 | static enum print_line_t | ||
112 | kmemtrace_print_alloc_compress(struct trace_iterator *iter, | ||
113 | struct kmemtrace_alloc_entry *entry) | ||
114 | { | ||
115 | struct trace_seq *s = &iter->seq; | ||
116 | int ret; | ||
117 | |||
118 | /* Alloc entry */ | ||
119 | ret = trace_seq_printf(s, " + "); | ||
120 | if (!ret) | ||
121 | return TRACE_TYPE_PARTIAL_LINE; | ||
122 | |||
123 | /* Type */ | ||
124 | switch (entry->type_id) { | ||
125 | case KMEMTRACE_TYPE_KMALLOC: | ||
126 | ret = trace_seq_printf(s, "K "); | ||
127 | break; | ||
128 | case KMEMTRACE_TYPE_CACHE: | ||
129 | ret = trace_seq_printf(s, "C "); | ||
130 | break; | ||
131 | case KMEMTRACE_TYPE_PAGES: | ||
132 | ret = trace_seq_printf(s, "P "); | ||
133 | break; | ||
134 | default: | ||
135 | ret = trace_seq_printf(s, "? "); | ||
136 | } | ||
137 | |||
138 | if (!ret) | ||
139 | return TRACE_TYPE_PARTIAL_LINE; | ||
140 | |||
141 | /* Requested */ | ||
142 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_req); | ||
143 | if (!ret) | ||
144 | return TRACE_TYPE_PARTIAL_LINE; | ||
145 | |||
146 | /* Allocated */ | ||
147 | ret = trace_seq_printf(s, "%4zu ", entry->bytes_alloc); | ||
148 | if (!ret) | ||
149 | return TRACE_TYPE_PARTIAL_LINE; | ||
150 | |||
151 | /* Flags | ||
152 | * TODO: would be better to see the name of the GFP flag names | ||
153 | */ | ||
154 | ret = trace_seq_printf(s, "%08x ", entry->gfp_flags); | ||
155 | if (!ret) | ||
156 | return TRACE_TYPE_PARTIAL_LINE; | ||
157 | |||
158 | /* Pointer to allocated */ | ||
159 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
160 | if (!ret) | ||
161 | return TRACE_TYPE_PARTIAL_LINE; | ||
162 | |||
163 | /* Node */ | ||
164 | ret = trace_seq_printf(s, "%4d ", entry->node); | ||
165 | if (!ret) | ||
166 | return TRACE_TYPE_PARTIAL_LINE; | ||
167 | |||
168 | /* Call site */ | ||
169 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
170 | if (!ret) | ||
171 | return TRACE_TYPE_PARTIAL_LINE; | ||
172 | |||
173 | if (!trace_seq_printf(s, "\n")) | ||
174 | return TRACE_TYPE_PARTIAL_LINE; | ||
175 | |||
176 | return TRACE_TYPE_HANDLED; | ||
177 | } | ||
178 | |||
179 | static enum print_line_t | ||
180 | kmemtrace_print_free_compress(struct trace_iterator *iter, | ||
181 | struct kmemtrace_free_entry *entry) | ||
182 | { | ||
183 | struct trace_seq *s = &iter->seq; | ||
184 | int ret; | ||
185 | |||
186 | /* Free entry */ | ||
187 | ret = trace_seq_printf(s, " - "); | ||
188 | if (!ret) | ||
189 | return TRACE_TYPE_PARTIAL_LINE; | ||
190 | |||
191 | /* Type */ | ||
192 | switch (entry->type_id) { | ||
193 | case KMEMTRACE_TYPE_KMALLOC: | ||
194 | ret = trace_seq_printf(s, "K "); | ||
195 | break; | ||
196 | case KMEMTRACE_TYPE_CACHE: | ||
197 | ret = trace_seq_printf(s, "C "); | ||
198 | break; | ||
199 | case KMEMTRACE_TYPE_PAGES: | ||
200 | ret = trace_seq_printf(s, "P "); | ||
201 | break; | ||
202 | default: | ||
203 | ret = trace_seq_printf(s, "? "); | ||
204 | } | ||
205 | |||
206 | if (!ret) | ||
207 | return TRACE_TYPE_PARTIAL_LINE; | ||
208 | |||
209 | /* Skip requested/allocated/flags */ | ||
210 | ret = trace_seq_printf(s, " "); | ||
211 | if (!ret) | ||
212 | return TRACE_TYPE_PARTIAL_LINE; | ||
213 | |||
214 | /* Pointer to allocated */ | ||
215 | ret = trace_seq_printf(s, "0x%tx ", (ptrdiff_t)entry->ptr); | ||
216 | if (!ret) | ||
217 | return TRACE_TYPE_PARTIAL_LINE; | ||
218 | |||
219 | /* Skip node */ | ||
220 | ret = trace_seq_printf(s, " "); | ||
221 | if (!ret) | ||
222 | return TRACE_TYPE_PARTIAL_LINE; | ||
223 | |||
224 | /* Call site */ | ||
225 | ret = seq_print_ip_sym(s, entry->call_site, 0); | ||
226 | if (!ret) | ||
227 | return TRACE_TYPE_PARTIAL_LINE; | ||
228 | |||
229 | if (!trace_seq_printf(s, "\n")) | ||
230 | return TRACE_TYPE_PARTIAL_LINE; | ||
231 | |||
232 | return TRACE_TYPE_HANDLED; | ||
233 | } | ||
234 | |||
235 | static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) | ||
236 | { | ||
237 | struct trace_entry *entry = iter->ent; | ||
238 | |||
239 | switch (entry->type) { | ||
240 | case TRACE_KMEM_ALLOC: { | ||
241 | struct kmemtrace_alloc_entry *field; | ||
242 | trace_assign_type(field, entry); | ||
243 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
244 | return kmemtrace_print_alloc_compress(iter, field); | ||
245 | else | ||
246 | return kmemtrace_print_alloc_original(iter, field); | ||
247 | } | ||
248 | |||
249 | case TRACE_KMEM_FREE: { | ||
250 | struct kmemtrace_free_entry *field; | ||
251 | trace_assign_type(field, entry); | ||
252 | if (kmem_tracer_flags.val & TRACE_KMEM_OPT_MINIMAL) | ||
253 | return kmemtrace_print_free_compress(iter, field); | ||
254 | else | ||
255 | return kmemtrace_print_free_original(iter, field); | ||
256 | } | ||
257 | |||
258 | default: | ||
259 | return TRACE_TYPE_UNHANDLED; | ||
260 | } | ||
261 | } | ||
262 | |||
263 | /* Trace allocations */ | ||
264 | void kmemtrace_mark_alloc_node(enum kmemtrace_type_id type_id, | ||
265 | unsigned long call_site, | ||
266 | const void *ptr, | ||
267 | size_t bytes_req, | ||
268 | size_t bytes_alloc, | ||
269 | gfp_t gfp_flags, | ||
270 | int node) | ||
271 | { | ||
272 | struct ring_buffer_event *event; | ||
273 | struct kmemtrace_alloc_entry *entry; | ||
274 | struct trace_array *tr = kmemtrace_array; | ||
275 | |||
276 | if (!kmem_tracing_enabled) | ||
277 | return; | ||
278 | |||
279 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_ALLOC, | ||
280 | sizeof(*entry), 0, 0); | ||
281 | if (!event) | ||
282 | return; | ||
283 | entry = ring_buffer_event_data(event); | ||
284 | |||
285 | entry->call_site = call_site; | ||
286 | entry->ptr = ptr; | ||
287 | entry->bytes_req = bytes_req; | ||
288 | entry->bytes_alloc = bytes_alloc; | ||
289 | entry->gfp_flags = gfp_flags; | ||
290 | entry->node = node; | ||
291 | |||
292 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
293 | } | ||
294 | EXPORT_SYMBOL(kmemtrace_mark_alloc_node); | ||
295 | |||
296 | void kmemtrace_mark_free(enum kmemtrace_type_id type_id, | ||
297 | unsigned long call_site, | ||
298 | const void *ptr) | ||
299 | { | ||
300 | struct ring_buffer_event *event; | ||
301 | struct kmemtrace_free_entry *entry; | ||
302 | struct trace_array *tr = kmemtrace_array; | ||
303 | |||
304 | if (!kmem_tracing_enabled) | ||
305 | return; | ||
306 | |||
307 | event = trace_buffer_lock_reserve(tr, TRACE_KMEM_FREE, | ||
308 | sizeof(*entry), 0, 0); | ||
309 | if (!event) | ||
310 | return; | ||
311 | entry = ring_buffer_event_data(event); | ||
312 | entry->type_id = type_id; | ||
313 | entry->call_site = call_site; | ||
314 | entry->ptr = ptr; | ||
315 | |||
316 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
317 | } | ||
318 | EXPORT_SYMBOL(kmemtrace_mark_free); | ||
319 | |||
320 | static struct tracer kmem_tracer __read_mostly = { | ||
321 | .name = "kmemtrace", | ||
322 | .init = kmem_trace_init, | ||
323 | .reset = kmem_trace_reset, | ||
324 | .print_line = kmemtrace_print_line, | ||
325 | .print_header = kmemtrace_headers, | ||
326 | .flags = &kmem_tracer_flags | ||
327 | }; | ||
328 | |||
329 | void kmemtrace_init(void) | ||
330 | { | ||
331 | /* earliest opportunity to start kmem tracing */ | ||
332 | } | ||
333 | |||
334 | static int __init init_kmem_tracer(void) | ||
335 | { | ||
336 | return register_tracer(&kmem_tracer); | ||
337 | } | ||
338 | |||
339 | device_initcall(init_kmem_tracer); | ||
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index bd38c5cfd8ad..f7473645b9c6 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -4,13 +4,15 @@ | |||
4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> |
5 | */ | 5 | */ |
6 | #include <linux/ring_buffer.h> | 6 | #include <linux/ring_buffer.h> |
7 | #include <linux/trace_clock.h> | ||
8 | #include <linux/ftrace_irq.h> | ||
7 | #include <linux/spinlock.h> | 9 | #include <linux/spinlock.h> |
8 | #include <linux/debugfs.h> | 10 | #include <linux/debugfs.h> |
9 | #include <linux/uaccess.h> | 11 | #include <linux/uaccess.h> |
12 | #include <linux/hardirq.h> | ||
10 | #include <linux/module.h> | 13 | #include <linux/module.h> |
11 | #include <linux/percpu.h> | 14 | #include <linux/percpu.h> |
12 | #include <linux/mutex.h> | 15 | #include <linux/mutex.h> |
13 | #include <linux/sched.h> /* used for sched_clock() (for now) */ | ||
14 | #include <linux/init.h> | 16 | #include <linux/init.h> |
15 | #include <linux/hash.h> | 17 | #include <linux/hash.h> |
16 | #include <linux/list.h> | 18 | #include <linux/list.h> |
@@ -57,7 +59,9 @@ enum { | |||
57 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, | 59 | RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, |
58 | }; | 60 | }; |
59 | 61 | ||
60 | static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; | 62 | static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; |
63 | |||
64 | #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) | ||
61 | 65 | ||
62 | /** | 66 | /** |
63 | * tracing_on - enable all tracing buffers | 67 | * tracing_on - enable all tracing buffers |
@@ -89,26 +93,34 @@ EXPORT_SYMBOL_GPL(tracing_off); | |||
89 | * tracing_off_permanent - permanently disable ring buffers | 93 | * tracing_off_permanent - permanently disable ring buffers |
90 | * | 94 | * |
91 | * This function, once called, will disable all ring buffers | 95 | * This function, once called, will disable all ring buffers |
92 | * permanenty. | 96 | * permanently. |
93 | */ | 97 | */ |
94 | void tracing_off_permanent(void) | 98 | void tracing_off_permanent(void) |
95 | { | 99 | { |
96 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); | 100 | set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); |
97 | } | 101 | } |
98 | 102 | ||
103 | /** | ||
104 | * tracing_is_on - show state of ring buffers enabled | ||
105 | */ | ||
106 | int tracing_is_on(void) | ||
107 | { | ||
108 | return ring_buffer_flags == RB_BUFFERS_ON; | ||
109 | } | ||
110 | EXPORT_SYMBOL_GPL(tracing_is_on); | ||
111 | |||
99 | #include "trace.h" | 112 | #include "trace.h" |
100 | 113 | ||
101 | /* Up this if you want to test the TIME_EXTENTS and normalization */ | 114 | /* Up this if you want to test the TIME_EXTENTS and normalization */ |
102 | #define DEBUG_SHIFT 0 | 115 | #define DEBUG_SHIFT 0 |
103 | 116 | ||
104 | /* FIXME!!! */ | ||
105 | u64 ring_buffer_time_stamp(int cpu) | 117 | u64 ring_buffer_time_stamp(int cpu) |
106 | { | 118 | { |
107 | u64 time; | 119 | u64 time; |
108 | 120 | ||
109 | preempt_disable_notrace(); | 121 | preempt_disable_notrace(); |
110 | /* shift to debug/test normalization and TIME_EXTENTS */ | 122 | /* shift to debug/test normalization and TIME_EXTENTS */ |
111 | time = sched_clock() << DEBUG_SHIFT; | 123 | time = trace_clock_local() << DEBUG_SHIFT; |
112 | preempt_enable_no_resched_notrace(); | 124 | preempt_enable_no_resched_notrace(); |
113 | 125 | ||
114 | return time; | 126 | return time; |
@@ -122,9 +134,8 @@ void ring_buffer_normalize_time_stamp(int cpu, u64 *ts) | |||
122 | } | 134 | } |
123 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); | 135 | EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); |
124 | 136 | ||
125 | #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event)) | 137 | #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) |
126 | #define RB_ALIGNMENT_SHIFT 2 | 138 | #define RB_ALIGNMENT 4U |
127 | #define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT) | ||
128 | #define RB_MAX_SMALL_DATA 28 | 139 | #define RB_MAX_SMALL_DATA 28 |
129 | 140 | ||
130 | enum { | 141 | enum { |
@@ -133,7 +144,7 @@ enum { | |||
133 | }; | 144 | }; |
134 | 145 | ||
135 | /* inline for ring buffer fast paths */ | 146 | /* inline for ring buffer fast paths */ |
136 | static inline unsigned | 147 | static unsigned |
137 | rb_event_length(struct ring_buffer_event *event) | 148 | rb_event_length(struct ring_buffer_event *event) |
138 | { | 149 | { |
139 | unsigned length; | 150 | unsigned length; |
@@ -151,7 +162,7 @@ rb_event_length(struct ring_buffer_event *event) | |||
151 | 162 | ||
152 | case RINGBUF_TYPE_DATA: | 163 | case RINGBUF_TYPE_DATA: |
153 | if (event->len) | 164 | if (event->len) |
154 | length = event->len << RB_ALIGNMENT_SHIFT; | 165 | length = event->len * RB_ALIGNMENT; |
155 | else | 166 | else |
156 | length = event->array[0]; | 167 | length = event->array[0]; |
157 | return length + RB_EVNT_HDR_SIZE; | 168 | return length + RB_EVNT_HDR_SIZE; |
@@ -179,7 +190,7 @@ unsigned ring_buffer_event_length(struct ring_buffer_event *event) | |||
179 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); | 190 | EXPORT_SYMBOL_GPL(ring_buffer_event_length); |
180 | 191 | ||
181 | /* inline for ring buffer fast paths */ | 192 | /* inline for ring buffer fast paths */ |
182 | static inline void * | 193 | static void * |
183 | rb_event_data(struct ring_buffer_event *event) | 194 | rb_event_data(struct ring_buffer_event *event) |
184 | { | 195 | { |
185 | BUG_ON(event->type != RINGBUF_TYPE_DATA); | 196 | BUG_ON(event->type != RINGBUF_TYPE_DATA); |
@@ -209,7 +220,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); | |||
209 | 220 | ||
210 | struct buffer_data_page { | 221 | struct buffer_data_page { |
211 | u64 time_stamp; /* page time stamp */ | 222 | u64 time_stamp; /* page time stamp */ |
212 | local_t commit; /* write commited index */ | 223 | local_t commit; /* write committed index */ |
213 | unsigned char data[]; /* data of buffer page */ | 224 | unsigned char data[]; /* data of buffer page */ |
214 | }; | 225 | }; |
215 | 226 | ||
@@ -225,14 +236,25 @@ static void rb_init_page(struct buffer_data_page *bpage) | |||
225 | local_set(&bpage->commit, 0); | 236 | local_set(&bpage->commit, 0); |
226 | } | 237 | } |
227 | 238 | ||
239 | /** | ||
240 | * ring_buffer_page_len - the size of data on the page. | ||
241 | * @page: The page to read | ||
242 | * | ||
243 | * Returns the amount of data on the page, including buffer page header. | ||
244 | */ | ||
245 | size_t ring_buffer_page_len(void *page) | ||
246 | { | ||
247 | return local_read(&((struct buffer_data_page *)page)->commit) | ||
248 | + BUF_PAGE_HDR_SIZE; | ||
249 | } | ||
250 | |||
228 | /* | 251 | /* |
229 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing | 252 | * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing |
230 | * this issue out. | 253 | * this issue out. |
231 | */ | 254 | */ |
232 | static inline void free_buffer_page(struct buffer_page *bpage) | 255 | static void free_buffer_page(struct buffer_page *bpage) |
233 | { | 256 | { |
234 | if (bpage->page) | 257 | free_page((unsigned long)bpage->page); |
235 | free_page((unsigned long)bpage->page); | ||
236 | kfree(bpage); | 258 | kfree(bpage); |
237 | } | 259 | } |
238 | 260 | ||
@@ -246,7 +268,7 @@ static inline int test_time_stamp(u64 delta) | |||
246 | return 0; | 268 | return 0; |
247 | } | 269 | } |
248 | 270 | ||
249 | #define BUF_PAGE_SIZE (PAGE_SIZE - offsetof(struct buffer_data_page, data)) | 271 | #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) |
250 | 272 | ||
251 | /* | 273 | /* |
252 | * head_page == tail_page && head == tail then buffer is empty. | 274 | * head_page == tail_page && head == tail then buffer is empty. |
@@ -260,7 +282,7 @@ struct ring_buffer_per_cpu { | |||
260 | struct list_head pages; | 282 | struct list_head pages; |
261 | struct buffer_page *head_page; /* read from head */ | 283 | struct buffer_page *head_page; /* read from head */ |
262 | struct buffer_page *tail_page; /* write to tail */ | 284 | struct buffer_page *tail_page; /* write to tail */ |
263 | struct buffer_page *commit_page; /* commited pages */ | 285 | struct buffer_page *commit_page; /* committed pages */ |
264 | struct buffer_page *reader_page; | 286 | struct buffer_page *reader_page; |
265 | unsigned long overrun; | 287 | unsigned long overrun; |
266 | unsigned long entries; | 288 | unsigned long entries; |
@@ -273,8 +295,8 @@ struct ring_buffer { | |||
273 | unsigned pages; | 295 | unsigned pages; |
274 | unsigned flags; | 296 | unsigned flags; |
275 | int cpus; | 297 | int cpus; |
276 | cpumask_var_t cpumask; | ||
277 | atomic_t record_disabled; | 298 | atomic_t record_disabled; |
299 | cpumask_var_t cpumask; | ||
278 | 300 | ||
279 | struct mutex mutex; | 301 | struct mutex mutex; |
280 | 302 | ||
@@ -303,7 +325,7 @@ struct ring_buffer_iter { | |||
303 | * check_pages - integrity check of buffer pages | 325 | * check_pages - integrity check of buffer pages |
304 | * @cpu_buffer: CPU buffer with pages to test | 326 | * @cpu_buffer: CPU buffer with pages to test |
305 | * | 327 | * |
306 | * As a safty measure we check to make sure the data pages have not | 328 | * As a safety measure we check to make sure the data pages have not |
307 | * been corrupted. | 329 | * been corrupted. |
308 | */ | 330 | */ |
309 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) | 331 | static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) |
@@ -811,7 +833,7 @@ rb_event_index(struct ring_buffer_event *event) | |||
811 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); | 833 | return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE); |
812 | } | 834 | } |
813 | 835 | ||
814 | static inline int | 836 | static int |
815 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | 837 | rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, |
816 | struct ring_buffer_event *event) | 838 | struct ring_buffer_event *event) |
817 | { | 839 | { |
@@ -825,7 +847,7 @@ rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
825 | rb_commit_index(cpu_buffer) == index; | 847 | rb_commit_index(cpu_buffer) == index; |
826 | } | 848 | } |
827 | 849 | ||
828 | static inline void | 850 | static void |
829 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | 851 | rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, |
830 | struct ring_buffer_event *event) | 852 | struct ring_buffer_event *event) |
831 | { | 853 | { |
@@ -850,7 +872,7 @@ rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
850 | local_set(&cpu_buffer->commit_page->page->commit, index); | 872 | local_set(&cpu_buffer->commit_page->page->commit, index); |
851 | } | 873 | } |
852 | 874 | ||
853 | static inline void | 875 | static void |
854 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) | 876 | rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) |
855 | { | 877 | { |
856 | /* | 878 | /* |
@@ -896,7 +918,7 @@ static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
896 | cpu_buffer->reader_page->read = 0; | 918 | cpu_buffer->reader_page->read = 0; |
897 | } | 919 | } |
898 | 920 | ||
899 | static inline void rb_inc_iter(struct ring_buffer_iter *iter) | 921 | static void rb_inc_iter(struct ring_buffer_iter *iter) |
900 | { | 922 | { |
901 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; | 923 | struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; |
902 | 924 | ||
@@ -926,7 +948,7 @@ static inline void rb_inc_iter(struct ring_buffer_iter *iter) | |||
926 | * and with this, we can determine what to place into the | 948 | * and with this, we can determine what to place into the |
927 | * data field. | 949 | * data field. |
928 | */ | 950 | */ |
929 | static inline void | 951 | static void |
930 | rb_update_event(struct ring_buffer_event *event, | 952 | rb_update_event(struct ring_buffer_event *event, |
931 | unsigned type, unsigned length) | 953 | unsigned type, unsigned length) |
932 | { | 954 | { |
@@ -938,15 +960,11 @@ rb_update_event(struct ring_buffer_event *event, | |||
938 | break; | 960 | break; |
939 | 961 | ||
940 | case RINGBUF_TYPE_TIME_EXTEND: | 962 | case RINGBUF_TYPE_TIME_EXTEND: |
941 | event->len = | 963 | event->len = DIV_ROUND_UP(RB_LEN_TIME_EXTEND, RB_ALIGNMENT); |
942 | (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1)) | ||
943 | >> RB_ALIGNMENT_SHIFT; | ||
944 | break; | 964 | break; |
945 | 965 | ||
946 | case RINGBUF_TYPE_TIME_STAMP: | 966 | case RINGBUF_TYPE_TIME_STAMP: |
947 | event->len = | 967 | event->len = DIV_ROUND_UP(RB_LEN_TIME_STAMP, RB_ALIGNMENT); |
948 | (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1)) | ||
949 | >> RB_ALIGNMENT_SHIFT; | ||
950 | break; | 968 | break; |
951 | 969 | ||
952 | case RINGBUF_TYPE_DATA: | 970 | case RINGBUF_TYPE_DATA: |
@@ -955,16 +973,14 @@ rb_update_event(struct ring_buffer_event *event, | |||
955 | event->len = 0; | 973 | event->len = 0; |
956 | event->array[0] = length; | 974 | event->array[0] = length; |
957 | } else | 975 | } else |
958 | event->len = | 976 | event->len = DIV_ROUND_UP(length, RB_ALIGNMENT); |
959 | (length + (RB_ALIGNMENT-1)) | ||
960 | >> RB_ALIGNMENT_SHIFT; | ||
961 | break; | 977 | break; |
962 | default: | 978 | default: |
963 | BUG(); | 979 | BUG(); |
964 | } | 980 | } |
965 | } | 981 | } |
966 | 982 | ||
967 | static inline unsigned rb_calculate_event_length(unsigned length) | 983 | static unsigned rb_calculate_event_length(unsigned length) |
968 | { | 984 | { |
969 | struct ring_buffer_event event; /* Used only for sizeof array */ | 985 | struct ring_buffer_event event; /* Used only for sizeof array */ |
970 | 986 | ||
@@ -990,6 +1006,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
990 | struct ring_buffer *buffer = cpu_buffer->buffer; | 1006 | struct ring_buffer *buffer = cpu_buffer->buffer; |
991 | struct ring_buffer_event *event; | 1007 | struct ring_buffer_event *event; |
992 | unsigned long flags; | 1008 | unsigned long flags; |
1009 | bool lock_taken = false; | ||
993 | 1010 | ||
994 | commit_page = cpu_buffer->commit_page; | 1011 | commit_page = cpu_buffer->commit_page; |
995 | /* we just need to protect against interrupts */ | 1012 | /* we just need to protect against interrupts */ |
@@ -1003,7 +1020,30 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1003 | struct buffer_page *next_page = tail_page; | 1020 | struct buffer_page *next_page = tail_page; |
1004 | 1021 | ||
1005 | local_irq_save(flags); | 1022 | local_irq_save(flags); |
1006 | __raw_spin_lock(&cpu_buffer->lock); | 1023 | /* |
1024 | * Since the write to the buffer is still not | ||
1025 | * fully lockless, we must be careful with NMIs. | ||
1026 | * The locks in the writers are taken when a write | ||
1027 | * crosses to a new page. The locks protect against | ||
1028 | * races with the readers (this will soon be fixed | ||
1029 | * with a lockless solution). | ||
1030 | * | ||
1031 | * Because we can not protect against NMIs, and we | ||
1032 | * want to keep traces reentrant, we need to manage | ||
1033 | * what happens when we are in an NMI. | ||
1034 | * | ||
1035 | * NMIs can happen after we take the lock. | ||
1036 | * If we are in an NMI, only take the lock | ||
1037 | * if it is not already taken. Otherwise | ||
1038 | * simply fail. | ||
1039 | */ | ||
1040 | if (unlikely(in_nmi())) { | ||
1041 | if (!__raw_spin_trylock(&cpu_buffer->lock)) | ||
1042 | goto out_reset; | ||
1043 | } else | ||
1044 | __raw_spin_lock(&cpu_buffer->lock); | ||
1045 | |||
1046 | lock_taken = true; | ||
1007 | 1047 | ||
1008 | rb_inc_page(cpu_buffer, &next_page); | 1048 | rb_inc_page(cpu_buffer, &next_page); |
1009 | 1049 | ||
@@ -1012,7 +1052,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1012 | 1052 | ||
1013 | /* we grabbed the lock before incrementing */ | 1053 | /* we grabbed the lock before incrementing */ |
1014 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) | 1054 | if (RB_WARN_ON(cpu_buffer, next_page == reader_page)) |
1015 | goto out_unlock; | 1055 | goto out_reset; |
1016 | 1056 | ||
1017 | /* | 1057 | /* |
1018 | * If for some reason, we had an interrupt storm that made | 1058 | * If for some reason, we had an interrupt storm that made |
@@ -1021,12 +1061,12 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1021 | */ | 1061 | */ |
1022 | if (unlikely(next_page == commit_page)) { | 1062 | if (unlikely(next_page == commit_page)) { |
1023 | WARN_ON_ONCE(1); | 1063 | WARN_ON_ONCE(1); |
1024 | goto out_unlock; | 1064 | goto out_reset; |
1025 | } | 1065 | } |
1026 | 1066 | ||
1027 | if (next_page == head_page) { | 1067 | if (next_page == head_page) { |
1028 | if (!(buffer->flags & RB_FL_OVERWRITE)) | 1068 | if (!(buffer->flags & RB_FL_OVERWRITE)) |
1029 | goto out_unlock; | 1069 | goto out_reset; |
1030 | 1070 | ||
1031 | /* tail_page has not moved yet? */ | 1071 | /* tail_page has not moved yet? */ |
1032 | if (tail_page == cpu_buffer->tail_page) { | 1072 | if (tail_page == cpu_buffer->tail_page) { |
@@ -1100,12 +1140,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, | |||
1100 | 1140 | ||
1101 | return event; | 1141 | return event; |
1102 | 1142 | ||
1103 | out_unlock: | 1143 | out_reset: |
1104 | /* reset write */ | 1144 | /* reset write */ |
1105 | if (tail <= BUF_PAGE_SIZE) | 1145 | if (tail <= BUF_PAGE_SIZE) |
1106 | local_set(&tail_page->write, tail); | 1146 | local_set(&tail_page->write, tail); |
1107 | 1147 | ||
1108 | __raw_spin_unlock(&cpu_buffer->lock); | 1148 | if (likely(lock_taken)) |
1149 | __raw_spin_unlock(&cpu_buffer->lock); | ||
1109 | local_irq_restore(flags); | 1150 | local_irq_restore(flags); |
1110 | return NULL; | 1151 | return NULL; |
1111 | } | 1152 | } |
@@ -1265,7 +1306,6 @@ static DEFINE_PER_CPU(int, rb_need_resched); | |||
1265 | * ring_buffer_lock_reserve - reserve a part of the buffer | 1306 | * ring_buffer_lock_reserve - reserve a part of the buffer |
1266 | * @buffer: the ring buffer to reserve from | 1307 | * @buffer: the ring buffer to reserve from |
1267 | * @length: the length of the data to reserve (excluding event header) | 1308 | * @length: the length of the data to reserve (excluding event header) |
1268 | * @flags: a pointer to save the interrupt flags | ||
1269 | * | 1309 | * |
1270 | * Returns a reseverd event on the ring buffer to copy directly to. | 1310 | * Returns a reseverd event on the ring buffer to copy directly to. |
1271 | * The user of this interface will need to get the body to write into | 1311 | * The user of this interface will need to get the body to write into |
@@ -1278,9 +1318,7 @@ static DEFINE_PER_CPU(int, rb_need_resched); | |||
1278 | * If NULL is returned, then nothing has been allocated or locked. | 1318 | * If NULL is returned, then nothing has been allocated or locked. |
1279 | */ | 1319 | */ |
1280 | struct ring_buffer_event * | 1320 | struct ring_buffer_event * |
1281 | ring_buffer_lock_reserve(struct ring_buffer *buffer, | 1321 | ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) |
1282 | unsigned long length, | ||
1283 | unsigned long *flags) | ||
1284 | { | 1322 | { |
1285 | struct ring_buffer_per_cpu *cpu_buffer; | 1323 | struct ring_buffer_per_cpu *cpu_buffer; |
1286 | struct ring_buffer_event *event; | 1324 | struct ring_buffer_event *event; |
@@ -1347,15 +1385,13 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, | |||
1347 | * ring_buffer_unlock_commit - commit a reserved | 1385 | * ring_buffer_unlock_commit - commit a reserved |
1348 | * @buffer: The buffer to commit to | 1386 | * @buffer: The buffer to commit to |
1349 | * @event: The event pointer to commit. | 1387 | * @event: The event pointer to commit. |
1350 | * @flags: the interrupt flags received from ring_buffer_lock_reserve. | ||
1351 | * | 1388 | * |
1352 | * This commits the data to the ring buffer, and releases any locks held. | 1389 | * This commits the data to the ring buffer, and releases any locks held. |
1353 | * | 1390 | * |
1354 | * Must be paired with ring_buffer_lock_reserve. | 1391 | * Must be paired with ring_buffer_lock_reserve. |
1355 | */ | 1392 | */ |
1356 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, | 1393 | int ring_buffer_unlock_commit(struct ring_buffer *buffer, |
1357 | struct ring_buffer_event *event, | 1394 | struct ring_buffer_event *event) |
1358 | unsigned long flags) | ||
1359 | { | 1395 | { |
1360 | struct ring_buffer_per_cpu *cpu_buffer; | 1396 | struct ring_buffer_per_cpu *cpu_buffer; |
1361 | int cpu = raw_smp_processor_id(); | 1397 | int cpu = raw_smp_processor_id(); |
@@ -1438,7 +1474,7 @@ int ring_buffer_write(struct ring_buffer *buffer, | |||
1438 | } | 1474 | } |
1439 | EXPORT_SYMBOL_GPL(ring_buffer_write); | 1475 | EXPORT_SYMBOL_GPL(ring_buffer_write); |
1440 | 1476 | ||
1441 | static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) | 1477 | static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) |
1442 | { | 1478 | { |
1443 | struct buffer_page *reader = cpu_buffer->reader_page; | 1479 | struct buffer_page *reader = cpu_buffer->reader_page; |
1444 | struct buffer_page *head = cpu_buffer->head_page; | 1480 | struct buffer_page *head = cpu_buffer->head_page; |
@@ -2277,9 +2313,24 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2277 | if (buffer_a->pages != buffer_b->pages) | 2313 | if (buffer_a->pages != buffer_b->pages) |
2278 | return -EINVAL; | 2314 | return -EINVAL; |
2279 | 2315 | ||
2316 | if (ring_buffer_flags != RB_BUFFERS_ON) | ||
2317 | return -EAGAIN; | ||
2318 | |||
2319 | if (atomic_read(&buffer_a->record_disabled)) | ||
2320 | return -EAGAIN; | ||
2321 | |||
2322 | if (atomic_read(&buffer_b->record_disabled)) | ||
2323 | return -EAGAIN; | ||
2324 | |||
2280 | cpu_buffer_a = buffer_a->buffers[cpu]; | 2325 | cpu_buffer_a = buffer_a->buffers[cpu]; |
2281 | cpu_buffer_b = buffer_b->buffers[cpu]; | 2326 | cpu_buffer_b = buffer_b->buffers[cpu]; |
2282 | 2327 | ||
2328 | if (atomic_read(&cpu_buffer_a->record_disabled)) | ||
2329 | return -EAGAIN; | ||
2330 | |||
2331 | if (atomic_read(&cpu_buffer_b->record_disabled)) | ||
2332 | return -EAGAIN; | ||
2333 | |||
2283 | /* | 2334 | /* |
2284 | * We can't do a synchronize_sched here because this | 2335 | * We can't do a synchronize_sched here because this |
2285 | * function can be called in atomic context. | 2336 | * function can be called in atomic context. |
@@ -2303,13 +2354,14 @@ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, | |||
2303 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); | 2354 | EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); |
2304 | 2355 | ||
2305 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | 2356 | static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, |
2306 | struct buffer_data_page *bpage) | 2357 | struct buffer_data_page *bpage, |
2358 | unsigned int offset) | ||
2307 | { | 2359 | { |
2308 | struct ring_buffer_event *event; | 2360 | struct ring_buffer_event *event; |
2309 | unsigned long head; | 2361 | unsigned long head; |
2310 | 2362 | ||
2311 | __raw_spin_lock(&cpu_buffer->lock); | 2363 | __raw_spin_lock(&cpu_buffer->lock); |
2312 | for (head = 0; head < local_read(&bpage->commit); | 2364 | for (head = offset; head < local_read(&bpage->commit); |
2313 | head += rb_event_length(event)) { | 2365 | head += rb_event_length(event)) { |
2314 | 2366 | ||
2315 | event = __rb_data_page_index(bpage, head); | 2367 | event = __rb_data_page_index(bpage, head); |
@@ -2340,8 +2392,8 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer, | |||
2340 | */ | 2392 | */ |
2341 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | 2393 | void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) |
2342 | { | 2394 | { |
2343 | unsigned long addr; | ||
2344 | struct buffer_data_page *bpage; | 2395 | struct buffer_data_page *bpage; |
2396 | unsigned long addr; | ||
2345 | 2397 | ||
2346 | addr = __get_free_page(GFP_KERNEL); | 2398 | addr = __get_free_page(GFP_KERNEL); |
2347 | if (!addr) | 2399 | if (!addr) |
@@ -2349,6 +2401,8 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer) | |||
2349 | 2401 | ||
2350 | bpage = (void *)addr; | 2402 | bpage = (void *)addr; |
2351 | 2403 | ||
2404 | rb_init_page(bpage); | ||
2405 | |||
2352 | return bpage; | 2406 | return bpage; |
2353 | } | 2407 | } |
2354 | 2408 | ||
@@ -2368,6 +2422,7 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
2368 | * ring_buffer_read_page - extract a page from the ring buffer | 2422 | * ring_buffer_read_page - extract a page from the ring buffer |
2369 | * @buffer: buffer to extract from | 2423 | * @buffer: buffer to extract from |
2370 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page | 2424 | * @data_page: the page to use allocated from ring_buffer_alloc_read_page |
2425 | * @len: amount to extract | ||
2371 | * @cpu: the cpu of the buffer to extract | 2426 | * @cpu: the cpu of the buffer to extract |
2372 | * @full: should the extraction only happen when the page is full. | 2427 | * @full: should the extraction only happen when the page is full. |
2373 | * | 2428 | * |
@@ -2377,12 +2432,12 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
2377 | * to swap with a page in the ring buffer. | 2432 | * to swap with a page in the ring buffer. |
2378 | * | 2433 | * |
2379 | * for example: | 2434 | * for example: |
2380 | * rpage = ring_buffer_alloc_page(buffer); | 2435 | * rpage = ring_buffer_alloc_read_page(buffer); |
2381 | * if (!rpage) | 2436 | * if (!rpage) |
2382 | * return error; | 2437 | * return error; |
2383 | * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0); | 2438 | * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); |
2384 | * if (ret) | 2439 | * if (ret >= 0) |
2385 | * process_page(rpage); | 2440 | * process_page(rpage, ret); |
2386 | * | 2441 | * |
2387 | * When @full is set, the function will not return true unless | 2442 | * When @full is set, the function will not return true unless |
2388 | * the writer is off the reader page. | 2443 | * the writer is off the reader page. |
@@ -2393,69 +2448,111 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) | |||
2393 | * responsible for that. | 2448 | * responsible for that. |
2394 | * | 2449 | * |
2395 | * Returns: | 2450 | * Returns: |
2396 | * 1 if data has been transferred | 2451 | * >=0 if data has been transferred, returns the offset of consumed data. |
2397 | * 0 if no data has been transferred. | 2452 | * <0 if no data has been transferred. |
2398 | */ | 2453 | */ |
2399 | int ring_buffer_read_page(struct ring_buffer *buffer, | 2454 | int ring_buffer_read_page(struct ring_buffer *buffer, |
2400 | void **data_page, int cpu, int full) | 2455 | void **data_page, size_t len, int cpu, int full) |
2401 | { | 2456 | { |
2402 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; | 2457 | struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; |
2403 | struct ring_buffer_event *event; | 2458 | struct ring_buffer_event *event; |
2404 | struct buffer_data_page *bpage; | 2459 | struct buffer_data_page *bpage; |
2460 | struct buffer_page *reader; | ||
2405 | unsigned long flags; | 2461 | unsigned long flags; |
2406 | int ret = 0; | 2462 | unsigned int commit; |
2463 | unsigned int read; | ||
2464 | u64 save_timestamp; | ||
2465 | int ret = -1; | ||
2466 | |||
2467 | /* | ||
2468 | * If len is not big enough to hold the page header, then | ||
2469 | * we can not copy anything. | ||
2470 | */ | ||
2471 | if (len <= BUF_PAGE_HDR_SIZE) | ||
2472 | return -1; | ||
2473 | |||
2474 | len -= BUF_PAGE_HDR_SIZE; | ||
2407 | 2475 | ||
2408 | if (!data_page) | 2476 | if (!data_page) |
2409 | return 0; | 2477 | return -1; |
2410 | 2478 | ||
2411 | bpage = *data_page; | 2479 | bpage = *data_page; |
2412 | if (!bpage) | 2480 | if (!bpage) |
2413 | return 0; | 2481 | return -1; |
2414 | 2482 | ||
2415 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); | 2483 | spin_lock_irqsave(&cpu_buffer->reader_lock, flags); |
2416 | 2484 | ||
2417 | /* | 2485 | reader = rb_get_reader_page(cpu_buffer); |
2418 | * rb_buffer_peek will get the next ring buffer if | 2486 | if (!reader) |
2419 | * the current reader page is empty. | ||
2420 | */ | ||
2421 | event = rb_buffer_peek(buffer, cpu, NULL); | ||
2422 | if (!event) | ||
2423 | goto out; | 2487 | goto out; |
2424 | 2488 | ||
2425 | /* check for data */ | 2489 | event = rb_reader_event(cpu_buffer); |
2426 | if (!local_read(&cpu_buffer->reader_page->page->commit)) | 2490 | |
2427 | goto out; | 2491 | read = reader->read; |
2492 | commit = rb_page_commit(reader); | ||
2493 | |||
2428 | /* | 2494 | /* |
2429 | * If the writer is already off of the read page, then simply | 2495 | * If this page has been partially read or |
2430 | * switch the read page with the given page. Otherwise | 2496 | * if len is not big enough to read the rest of the page or |
2431 | * we need to copy the data from the reader to the writer. | 2497 | * a writer is still on the page, then |
2498 | * we must copy the data from the page to the buffer. | ||
2499 | * Otherwise, we can simply swap the page with the one passed in. | ||
2432 | */ | 2500 | */ |
2433 | if (cpu_buffer->reader_page == cpu_buffer->commit_page) { | 2501 | if (read || (len < (commit - read)) || |
2434 | unsigned int read = cpu_buffer->reader_page->read; | 2502 | cpu_buffer->reader_page == cpu_buffer->commit_page) { |
2503 | struct buffer_data_page *rpage = cpu_buffer->reader_page->page; | ||
2504 | unsigned int rpos = read; | ||
2505 | unsigned int pos = 0; | ||
2506 | unsigned int size; | ||
2435 | 2507 | ||
2436 | if (full) | 2508 | if (full) |
2437 | goto out; | 2509 | goto out; |
2438 | /* The writer is still on the reader page, we must copy */ | ||
2439 | bpage = cpu_buffer->reader_page->page; | ||
2440 | memcpy(bpage->data, | ||
2441 | cpu_buffer->reader_page->page->data + read, | ||
2442 | local_read(&bpage->commit) - read); | ||
2443 | 2510 | ||
2444 | /* consume what was read */ | 2511 | if (len > (commit - read)) |
2445 | cpu_buffer->reader_page += read; | 2512 | len = (commit - read); |
2446 | 2513 | ||
2514 | size = rb_event_length(event); | ||
2515 | |||
2516 | if (len < size) | ||
2517 | goto out; | ||
2518 | |||
2519 | /* save the current timestamp, since the user will need it */ | ||
2520 | save_timestamp = cpu_buffer->read_stamp; | ||
2521 | |||
2522 | /* Need to copy one event at a time */ | ||
2523 | do { | ||
2524 | memcpy(bpage->data + pos, rpage->data + rpos, size); | ||
2525 | |||
2526 | len -= size; | ||
2527 | |||
2528 | rb_advance_reader(cpu_buffer); | ||
2529 | rpos = reader->read; | ||
2530 | pos += size; | ||
2531 | |||
2532 | event = rb_reader_event(cpu_buffer); | ||
2533 | size = rb_event_length(event); | ||
2534 | } while (len > size); | ||
2535 | |||
2536 | /* update bpage */ | ||
2537 | local_set(&bpage->commit, pos); | ||
2538 | bpage->time_stamp = save_timestamp; | ||
2539 | |||
2540 | /* we copied everything to the beginning */ | ||
2541 | read = 0; | ||
2447 | } else { | 2542 | } else { |
2448 | /* swap the pages */ | 2543 | /* swap the pages */ |
2449 | rb_init_page(bpage); | 2544 | rb_init_page(bpage); |
2450 | bpage = cpu_buffer->reader_page->page; | 2545 | bpage = reader->page; |
2451 | cpu_buffer->reader_page->page = *data_page; | 2546 | reader->page = *data_page; |
2452 | cpu_buffer->reader_page->read = 0; | 2547 | local_set(&reader->write, 0); |
2548 | reader->read = 0; | ||
2453 | *data_page = bpage; | 2549 | *data_page = bpage; |
2550 | |||
2551 | /* update the entry counter */ | ||
2552 | rb_remove_entries(cpu_buffer, bpage, read); | ||
2454 | } | 2553 | } |
2455 | ret = 1; | 2554 | ret = read; |
2456 | 2555 | ||
2457 | /* update the entry counter */ | ||
2458 | rb_remove_entries(cpu_buffer, bpage); | ||
2459 | out: | 2556 | out: |
2460 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); | 2557 | spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); |
2461 | 2558 | ||
@@ -2466,7 +2563,7 @@ static ssize_t | |||
2466 | rb_simple_read(struct file *filp, char __user *ubuf, | 2563 | rb_simple_read(struct file *filp, char __user *ubuf, |
2467 | size_t cnt, loff_t *ppos) | 2564 | size_t cnt, loff_t *ppos) |
2468 | { | 2565 | { |
2469 | long *p = filp->private_data; | 2566 | unsigned long *p = filp->private_data; |
2470 | char buf[64]; | 2567 | char buf[64]; |
2471 | int r; | 2568 | int r; |
2472 | 2569 | ||
@@ -2482,9 +2579,9 @@ static ssize_t | |||
2482 | rb_simple_write(struct file *filp, const char __user *ubuf, | 2579 | rb_simple_write(struct file *filp, const char __user *ubuf, |
2483 | size_t cnt, loff_t *ppos) | 2580 | size_t cnt, loff_t *ppos) |
2484 | { | 2581 | { |
2485 | long *p = filp->private_data; | 2582 | unsigned long *p = filp->private_data; |
2486 | char buf[64]; | 2583 | char buf[64]; |
2487 | long val; | 2584 | unsigned long val; |
2488 | int ret; | 2585 | int ret; |
2489 | 2586 | ||
2490 | if (cnt >= sizeof(buf)) | 2587 | if (cnt >= sizeof(buf)) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 17bb88d86ac2..c0e9c1263393 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -11,32 +11,33 @@ | |||
11 | * Copyright (C) 2004-2006 Ingo Molnar | 11 | * Copyright (C) 2004-2006 Ingo Molnar |
12 | * Copyright (C) 2004 William Lee Irwin III | 12 | * Copyright (C) 2004 William Lee Irwin III |
13 | */ | 13 | */ |
14 | #include <linux/ring_buffer.h> | ||
14 | #include <linux/utsrelease.h> | 15 | #include <linux/utsrelease.h> |
16 | #include <linux/stacktrace.h> | ||
17 | #include <linux/writeback.h> | ||
15 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
16 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
17 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
21 | #include <linux/irqflags.h> | ||
18 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
19 | #include <linux/pagemap.h> | 23 | #include <linux/pagemap.h> |
20 | #include <linux/hardirq.h> | 24 | #include <linux/hardirq.h> |
21 | #include <linux/linkage.h> | 25 | #include <linux/linkage.h> |
22 | #include <linux/uaccess.h> | 26 | #include <linux/uaccess.h> |
27 | #include <linux/kprobes.h> | ||
23 | #include <linux/ftrace.h> | 28 | #include <linux/ftrace.h> |
24 | #include <linux/module.h> | 29 | #include <linux/module.h> |
25 | #include <linux/percpu.h> | 30 | #include <linux/percpu.h> |
31 | #include <linux/splice.h> | ||
26 | #include <linux/kdebug.h> | 32 | #include <linux/kdebug.h> |
27 | #include <linux/ctype.h> | 33 | #include <linux/ctype.h> |
28 | #include <linux/init.h> | 34 | #include <linux/init.h> |
29 | #include <linux/poll.h> | 35 | #include <linux/poll.h> |
30 | #include <linux/gfp.h> | 36 | #include <linux/gfp.h> |
31 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
32 | #include <linux/kprobes.h> | ||
33 | #include <linux/writeback.h> | ||
34 | |||
35 | #include <linux/stacktrace.h> | ||
36 | #include <linux/ring_buffer.h> | ||
37 | #include <linux/irqflags.h> | ||
38 | 38 | ||
39 | #include "trace.h" | 39 | #include "trace.h" |
40 | #include "trace_output.h" | ||
40 | 41 | ||
41 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) | 42 | #define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE) |
42 | 43 | ||
@@ -47,11 +48,16 @@ unsigned long __read_mostly tracing_thresh; | |||
47 | * We need to change this state when a selftest is running. | 48 | * We need to change this state when a selftest is running. |
48 | * A selftest will lurk into the ring-buffer to count the | 49 | * A selftest will lurk into the ring-buffer to count the |
49 | * entries inserted during the selftest although some concurrent | 50 | * entries inserted during the selftest although some concurrent |
50 | * insertions into the ring-buffer such as ftrace_printk could occurred | 51 | * insertions into the ring-buffer such as trace_printk could occurred |
51 | * at the same time, giving false positive or negative results. | 52 | * at the same time, giving false positive or negative results. |
52 | */ | 53 | */ |
53 | static bool __read_mostly tracing_selftest_running; | 54 | static bool __read_mostly tracing_selftest_running; |
54 | 55 | ||
56 | /* | ||
57 | * If a tracer is running, we do not want to run SELFTEST. | ||
58 | */ | ||
59 | static bool __read_mostly tracing_selftest_disabled; | ||
60 | |||
55 | /* For tracers that don't implement custom flags */ | 61 | /* For tracers that don't implement custom flags */ |
56 | static struct tracer_opt dummy_tracer_opt[] = { | 62 | static struct tracer_opt dummy_tracer_opt[] = { |
57 | { } | 63 | { } |
@@ -73,7 +79,7 @@ static int dummy_set_flag(u32 old_flags, u32 bit, int set) | |||
73 | * of the tracer is successful. But that is the only place that sets | 79 | * of the tracer is successful. But that is the only place that sets |
74 | * this back to zero. | 80 | * this back to zero. |
75 | */ | 81 | */ |
76 | int tracing_disabled = 1; | 82 | static int tracing_disabled = 1; |
77 | 83 | ||
78 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); | 84 | static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); |
79 | 85 | ||
@@ -91,6 +97,9 @@ static inline void ftrace_enable_cpu(void) | |||
91 | 97 | ||
92 | static cpumask_var_t __read_mostly tracing_buffer_mask; | 98 | static cpumask_var_t __read_mostly tracing_buffer_mask; |
93 | 99 | ||
100 | /* Define which cpu buffers are currently read in trace_pipe */ | ||
101 | static cpumask_var_t tracing_reader_cpumask; | ||
102 | |||
94 | #define for_each_tracing_cpu(cpu) \ | 103 | #define for_each_tracing_cpu(cpu) \ |
95 | for_each_cpu(cpu, tracing_buffer_mask) | 104 | for_each_cpu(cpu, tracing_buffer_mask) |
96 | 105 | ||
@@ -109,14 +118,19 @@ static cpumask_var_t __read_mostly tracing_buffer_mask; | |||
109 | */ | 118 | */ |
110 | int ftrace_dump_on_oops; | 119 | int ftrace_dump_on_oops; |
111 | 120 | ||
112 | static int tracing_set_tracer(char *buf); | 121 | static int tracing_set_tracer(const char *buf); |
122 | |||
123 | #define BOOTUP_TRACER_SIZE 100 | ||
124 | static char bootup_tracer_buf[BOOTUP_TRACER_SIZE] __initdata; | ||
125 | static char *default_bootup_tracer; | ||
113 | 126 | ||
114 | static int __init set_ftrace(char *str) | 127 | static int __init set_ftrace(char *str) |
115 | { | 128 | { |
116 | tracing_set_tracer(str); | 129 | strncpy(bootup_tracer_buf, str, BOOTUP_TRACER_SIZE); |
130 | default_bootup_tracer = bootup_tracer_buf; | ||
117 | return 1; | 131 | return 1; |
118 | } | 132 | } |
119 | __setup("ftrace", set_ftrace); | 133 | __setup("ftrace=", set_ftrace); |
120 | 134 | ||
121 | static int __init set_ftrace_dump_on_oops(char *str) | 135 | static int __init set_ftrace_dump_on_oops(char *str) |
122 | { | 136 | { |
@@ -186,9 +200,6 @@ int tracing_is_enabled(void) | |||
186 | return tracer_enabled; | 200 | return tracer_enabled; |
187 | } | 201 | } |
188 | 202 | ||
189 | /* function tracing enabled */ | ||
190 | int ftrace_function_enabled; | ||
191 | |||
192 | /* | 203 | /* |
193 | * trace_buf_size is the size in bytes that is allocated | 204 | * trace_buf_size is the size in bytes that is allocated |
194 | * for a buffer. Note, the number of bytes is always rounded | 205 | * for a buffer. Note, the number of bytes is always rounded |
@@ -229,7 +240,7 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); | |||
229 | 240 | ||
230 | /* trace_flags holds trace_options default values */ | 241 | /* trace_flags holds trace_options default values */ |
231 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | | 242 | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | |
232 | TRACE_ITER_ANNOTATE; | 243 | TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO; |
233 | 244 | ||
234 | /** | 245 | /** |
235 | * trace_wake_up - wake up tasks waiting for trace input | 246 | * trace_wake_up - wake up tasks waiting for trace input |
@@ -280,13 +291,15 @@ static const char *trace_options[] = { | |||
280 | "block", | 291 | "block", |
281 | "stacktrace", | 292 | "stacktrace", |
282 | "sched-tree", | 293 | "sched-tree", |
283 | "ftrace_printk", | 294 | "trace_printk", |
284 | "ftrace_preempt", | 295 | "ftrace_preempt", |
285 | "branch", | 296 | "branch", |
286 | "annotate", | 297 | "annotate", |
287 | "userstacktrace", | 298 | "userstacktrace", |
288 | "sym-userobj", | 299 | "sym-userobj", |
289 | "printk-msg-only", | 300 | "printk-msg-only", |
301 | "context-info", | ||
302 | "latency-format", | ||
290 | NULL | 303 | NULL |
291 | }; | 304 | }; |
292 | 305 | ||
@@ -326,146 +339,37 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
326 | data->rt_priority = tsk->rt_priority; | 339 | data->rt_priority = tsk->rt_priority; |
327 | 340 | ||
328 | /* record this tasks comm */ | 341 | /* record this tasks comm */ |
329 | tracing_record_cmdline(current); | 342 | tracing_record_cmdline(tsk); |
330 | } | 343 | } |
331 | 344 | ||
332 | /** | 345 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) |
333 | * trace_seq_printf - sequence printing of trace information | ||
334 | * @s: trace sequence descriptor | ||
335 | * @fmt: printf format string | ||
336 | * | ||
337 | * The tracer may use either sequence operations or its own | ||
338 | * copy to user routines. To simplify formating of a trace | ||
339 | * trace_seq_printf is used to store strings into a special | ||
340 | * buffer (@s). Then the output may be either used by | ||
341 | * the sequencer or pulled into another buffer. | ||
342 | */ | ||
343 | int | ||
344 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
345 | { | 346 | { |
346 | int len = (PAGE_SIZE - 1) - s->len; | 347 | int len; |
347 | va_list ap; | ||
348 | int ret; | 348 | int ret; |
349 | 349 | ||
350 | if (!len) | 350 | if (!cnt) |
351 | return 0; | ||
352 | |||
353 | va_start(ap, fmt); | ||
354 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
355 | va_end(ap); | ||
356 | |||
357 | /* If we can't write it all, don't bother writing anything */ | ||
358 | if (ret >= len) | ||
359 | return 0; | ||
360 | |||
361 | s->len += ret; | ||
362 | |||
363 | return len; | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * trace_seq_puts - trace sequence printing of simple string | ||
368 | * @s: trace sequence descriptor | ||
369 | * @str: simple string to record | ||
370 | * | ||
371 | * The tracer may use either the sequence operations or its own | ||
372 | * copy to user routines. This function records a simple string | ||
373 | * into a special buffer (@s) for later retrieval by a sequencer | ||
374 | * or other mechanism. | ||
375 | */ | ||
376 | static int | ||
377 | trace_seq_puts(struct trace_seq *s, const char *str) | ||
378 | { | ||
379 | int len = strlen(str); | ||
380 | |||
381 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
382 | return 0; | ||
383 | |||
384 | memcpy(s->buffer + s->len, str, len); | ||
385 | s->len += len; | ||
386 | |||
387 | return len; | ||
388 | } | ||
389 | |||
390 | static int | ||
391 | trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
392 | { | ||
393 | if (s->len >= (PAGE_SIZE - 1)) | ||
394 | return 0; | ||
395 | |||
396 | s->buffer[s->len++] = c; | ||
397 | |||
398 | return 1; | ||
399 | } | ||
400 | |||
401 | static int | ||
402 | trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | ||
403 | { | ||
404 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
405 | return 0; | 351 | return 0; |
406 | 352 | ||
407 | memcpy(s->buffer + s->len, mem, len); | 353 | if (s->len <= s->readpos) |
408 | s->len += len; | 354 | return -EBUSY; |
409 | |||
410 | return len; | ||
411 | } | ||
412 | |||
413 | #define MAX_MEMHEX_BYTES 8 | ||
414 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
415 | |||
416 | static int | ||
417 | trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | ||
418 | { | ||
419 | unsigned char hex[HEX_CHARS]; | ||
420 | unsigned char *data = mem; | ||
421 | int i, j; | ||
422 | |||
423 | #ifdef __BIG_ENDIAN | ||
424 | for (i = 0, j = 0; i < len; i++) { | ||
425 | #else | ||
426 | for (i = len-1, j = 0; i >= 0; i--) { | ||
427 | #endif | ||
428 | hex[j++] = hex_asc_hi(data[i]); | ||
429 | hex[j++] = hex_asc_lo(data[i]); | ||
430 | } | ||
431 | hex[j++] = ' '; | ||
432 | |||
433 | return trace_seq_putmem(s, hex, j); | ||
434 | } | ||
435 | |||
436 | static int | ||
437 | trace_seq_path(struct trace_seq *s, struct path *path) | ||
438 | { | ||
439 | unsigned char *p; | ||
440 | 355 | ||
441 | if (s->len >= (PAGE_SIZE - 1)) | 356 | len = s->len - s->readpos; |
442 | return 0; | 357 | if (cnt > len) |
443 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | 358 | cnt = len; |
444 | if (!IS_ERR(p)) { | 359 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); |
445 | p = mangle_path(s->buffer + s->len, p, "\n"); | 360 | if (ret == cnt) |
446 | if (p) { | 361 | return -EFAULT; |
447 | s->len = p - s->buffer; | ||
448 | return 1; | ||
449 | } | ||
450 | } else { | ||
451 | s->buffer[s->len++] = '?'; | ||
452 | return 1; | ||
453 | } | ||
454 | 362 | ||
455 | return 0; | 363 | cnt -= ret; |
456 | } | ||
457 | 364 | ||
458 | static void | 365 | s->readpos += cnt; |
459 | trace_seq_reset(struct trace_seq *s) | 366 | return cnt; |
460 | { | ||
461 | s->len = 0; | ||
462 | s->readpos = 0; | ||
463 | } | 367 | } |
464 | 368 | ||
465 | ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | 369 | ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt) |
466 | { | 370 | { |
467 | int len; | 371 | int len; |
468 | int ret; | 372 | void *ret; |
469 | 373 | ||
470 | if (s->len <= s->readpos) | 374 | if (s->len <= s->readpos) |
471 | return -EBUSY; | 375 | return -EBUSY; |
@@ -473,11 +377,11 @@ ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt) | |||
473 | len = s->len - s->readpos; | 377 | len = s->len - s->readpos; |
474 | if (cnt > len) | 378 | if (cnt > len) |
475 | cnt = len; | 379 | cnt = len; |
476 | ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt); | 380 | ret = memcpy(buf, s->buffer + s->readpos, cnt); |
477 | if (ret) | 381 | if (!ret) |
478 | return -EFAULT; | 382 | return -EFAULT; |
479 | 383 | ||
480 | s->readpos += len; | 384 | s->readpos += cnt; |
481 | return cnt; | 385 | return cnt; |
482 | } | 386 | } |
483 | 387 | ||
@@ -489,7 +393,7 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) | |||
489 | s->buffer[len] = 0; | 393 | s->buffer[len] = 0; |
490 | seq_puts(m, s->buffer); | 394 | seq_puts(m, s->buffer); |
491 | 395 | ||
492 | trace_seq_reset(s); | 396 | trace_seq_init(s); |
493 | } | 397 | } |
494 | 398 | ||
495 | /** | 399 | /** |
@@ -543,7 +447,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
543 | 447 | ||
544 | ftrace_enable_cpu(); | 448 | ftrace_enable_cpu(); |
545 | 449 | ||
546 | WARN_ON_ONCE(ret); | 450 | WARN_ON_ONCE(ret && ret != -EAGAIN); |
547 | 451 | ||
548 | __update_max_tr(tr, tsk, cpu); | 452 | __update_max_tr(tr, tsk, cpu); |
549 | __raw_spin_unlock(&ftrace_max_lock); | 453 | __raw_spin_unlock(&ftrace_max_lock); |
@@ -556,6 +460,8 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
556 | * Register a new plugin tracer. | 460 | * Register a new plugin tracer. |
557 | */ | 461 | */ |
558 | int register_tracer(struct tracer *type) | 462 | int register_tracer(struct tracer *type) |
463 | __releases(kernel_lock) | ||
464 | __acquires(kernel_lock) | ||
559 | { | 465 | { |
560 | struct tracer *t; | 466 | struct tracer *t; |
561 | int len; | 467 | int len; |
@@ -594,9 +500,12 @@ int register_tracer(struct tracer *type) | |||
594 | else | 500 | else |
595 | if (!type->flags->opts) | 501 | if (!type->flags->opts) |
596 | type->flags->opts = dummy_tracer_opt; | 502 | type->flags->opts = dummy_tracer_opt; |
503 | if (!type->wait_pipe) | ||
504 | type->wait_pipe = default_wait_pipe; | ||
505 | |||
597 | 506 | ||
598 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 507 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
599 | if (type->selftest) { | 508 | if (type->selftest && !tracing_selftest_disabled) { |
600 | struct tracer *saved_tracer = current_trace; | 509 | struct tracer *saved_tracer = current_trace; |
601 | struct trace_array *tr = &global_trace; | 510 | struct trace_array *tr = &global_trace; |
602 | int i; | 511 | int i; |
@@ -638,8 +547,26 @@ int register_tracer(struct tracer *type) | |||
638 | out: | 547 | out: |
639 | tracing_selftest_running = false; | 548 | tracing_selftest_running = false; |
640 | mutex_unlock(&trace_types_lock); | 549 | mutex_unlock(&trace_types_lock); |
641 | lock_kernel(); | ||
642 | 550 | ||
551 | if (ret || !default_bootup_tracer) | ||
552 | goto out_unlock; | ||
553 | |||
554 | if (strncmp(default_bootup_tracer, type->name, BOOTUP_TRACER_SIZE)) | ||
555 | goto out_unlock; | ||
556 | |||
557 | printk(KERN_INFO "Starting tracer '%s'\n", type->name); | ||
558 | /* Do we want this tracer to start on bootup? */ | ||
559 | tracing_set_tracer(type->name); | ||
560 | default_bootup_tracer = NULL; | ||
561 | /* disable other selftests, since this will break it. */ | ||
562 | tracing_selftest_disabled = 1; | ||
563 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
564 | printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n", | ||
565 | type->name); | ||
566 | #endif | ||
567 | |||
568 | out_unlock: | ||
569 | lock_kernel(); | ||
643 | return ret; | 570 | return ret; |
644 | } | 571 | } |
645 | 572 | ||
@@ -658,6 +585,15 @@ void unregister_tracer(struct tracer *type) | |||
658 | 585 | ||
659 | found: | 586 | found: |
660 | *t = (*t)->next; | 587 | *t = (*t)->next; |
588 | |||
589 | if (type == current_trace && tracer_enabled) { | ||
590 | tracer_enabled = 0; | ||
591 | tracing_stop(); | ||
592 | if (current_trace->stop) | ||
593 | current_trace->stop(&global_trace); | ||
594 | current_trace = &nop_trace; | ||
595 | } | ||
596 | |||
661 | if (strlen(type->name) != max_tracer_type_len) | 597 | if (strlen(type->name) != max_tracer_type_len) |
662 | goto out; | 598 | goto out; |
663 | 599 | ||
@@ -693,10 +629,10 @@ static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1]; | |||
693 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; | 629 | static unsigned map_cmdline_to_pid[SAVED_CMDLINES]; |
694 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; | 630 | static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN]; |
695 | static int cmdline_idx; | 631 | static int cmdline_idx; |
696 | static DEFINE_SPINLOCK(trace_cmdline_lock); | 632 | static raw_spinlock_t trace_cmdline_lock = __RAW_SPIN_LOCK_UNLOCKED; |
697 | 633 | ||
698 | /* temporary disable recording */ | 634 | /* temporary disable recording */ |
699 | atomic_t trace_record_cmdline_disabled __read_mostly; | 635 | static atomic_t trace_record_cmdline_disabled __read_mostly; |
700 | 636 | ||
701 | static void trace_init_cmdlines(void) | 637 | static void trace_init_cmdlines(void) |
702 | { | 638 | { |
@@ -738,13 +674,12 @@ void tracing_start(void) | |||
738 | return; | 674 | return; |
739 | 675 | ||
740 | spin_lock_irqsave(&tracing_start_lock, flags); | 676 | spin_lock_irqsave(&tracing_start_lock, flags); |
741 | if (--trace_stop_count) | 677 | if (--trace_stop_count) { |
742 | goto out; | 678 | if (trace_stop_count < 0) { |
743 | 679 | /* Someone screwed up their debugging */ | |
744 | if (trace_stop_count < 0) { | 680 | WARN_ON_ONCE(1); |
745 | /* Someone screwed up their debugging */ | 681 | trace_stop_count = 0; |
746 | WARN_ON_ONCE(1); | 682 | } |
747 | trace_stop_count = 0; | ||
748 | goto out; | 683 | goto out; |
749 | } | 684 | } |
750 | 685 | ||
@@ -806,7 +741,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
806 | * nor do we want to disable interrupts, | 741 | * nor do we want to disable interrupts, |
807 | * so if we miss here, then better luck next time. | 742 | * so if we miss here, then better luck next time. |
808 | */ | 743 | */ |
809 | if (!spin_trylock(&trace_cmdline_lock)) | 744 | if (!__raw_spin_trylock(&trace_cmdline_lock)) |
810 | return; | 745 | return; |
811 | 746 | ||
812 | idx = map_pid_to_cmdline[tsk->pid]; | 747 | idx = map_pid_to_cmdline[tsk->pid]; |
@@ -824,7 +759,7 @@ static void trace_save_cmdline(struct task_struct *tsk) | |||
824 | 759 | ||
825 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); | 760 | memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN); |
826 | 761 | ||
827 | spin_unlock(&trace_cmdline_lock); | 762 | __raw_spin_unlock(&trace_cmdline_lock); |
828 | } | 763 | } |
829 | 764 | ||
830 | char *trace_find_cmdline(int pid) | 765 | char *trace_find_cmdline(int pid) |
@@ -876,78 +811,114 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, | |||
876 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); | 811 | (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); |
877 | } | 812 | } |
878 | 813 | ||
814 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
815 | unsigned char type, | ||
816 | unsigned long len, | ||
817 | unsigned long flags, int pc) | ||
818 | { | ||
819 | struct ring_buffer_event *event; | ||
820 | |||
821 | event = ring_buffer_lock_reserve(tr->buffer, len); | ||
822 | if (event != NULL) { | ||
823 | struct trace_entry *ent = ring_buffer_event_data(event); | ||
824 | |||
825 | tracing_generic_entry_update(ent, flags, pc); | ||
826 | ent->type = type; | ||
827 | } | ||
828 | |||
829 | return event; | ||
830 | } | ||
831 | static void ftrace_trace_stack(struct trace_array *tr, | ||
832 | unsigned long flags, int skip, int pc); | ||
833 | static void ftrace_trace_userstack(struct trace_array *tr, | ||
834 | unsigned long flags, int pc); | ||
835 | |||
836 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
837 | struct ring_buffer_event *event, | ||
838 | unsigned long flags, int pc) | ||
839 | { | ||
840 | ring_buffer_unlock_commit(tr->buffer, event); | ||
841 | |||
842 | ftrace_trace_stack(tr, flags, 6, pc); | ||
843 | ftrace_trace_userstack(tr, flags, pc); | ||
844 | trace_wake_up(); | ||
845 | } | ||
846 | |||
847 | struct ring_buffer_event * | ||
848 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | ||
849 | unsigned long flags, int pc) | ||
850 | { | ||
851 | return trace_buffer_lock_reserve(&global_trace, | ||
852 | type, len, flags, pc); | ||
853 | } | ||
854 | |||
855 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
856 | unsigned long flags, int pc) | ||
857 | { | ||
858 | return trace_buffer_unlock_commit(&global_trace, event, flags, pc); | ||
859 | } | ||
860 | |||
879 | void | 861 | void |
880 | trace_function(struct trace_array *tr, struct trace_array_cpu *data, | 862 | trace_function(struct trace_array *tr, |
881 | unsigned long ip, unsigned long parent_ip, unsigned long flags, | 863 | unsigned long ip, unsigned long parent_ip, unsigned long flags, |
882 | int pc) | 864 | int pc) |
883 | { | 865 | { |
884 | struct ring_buffer_event *event; | 866 | struct ring_buffer_event *event; |
885 | struct ftrace_entry *entry; | 867 | struct ftrace_entry *entry; |
886 | unsigned long irq_flags; | ||
887 | 868 | ||
888 | /* If we are reading the ring buffer, don't trace */ | 869 | /* If we are reading the ring buffer, don't trace */ |
889 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 870 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
890 | return; | 871 | return; |
891 | 872 | ||
892 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 873 | event = trace_buffer_lock_reserve(tr, TRACE_FN, sizeof(*entry), |
893 | &irq_flags); | 874 | flags, pc); |
894 | if (!event) | 875 | if (!event) |
895 | return; | 876 | return; |
896 | entry = ring_buffer_event_data(event); | 877 | entry = ring_buffer_event_data(event); |
897 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
898 | entry->ent.type = TRACE_FN; | ||
899 | entry->ip = ip; | 878 | entry->ip = ip; |
900 | entry->parent_ip = parent_ip; | 879 | entry->parent_ip = parent_ip; |
901 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 880 | ring_buffer_unlock_commit(tr->buffer, event); |
902 | } | 881 | } |
903 | 882 | ||
904 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 883 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
905 | static void __trace_graph_entry(struct trace_array *tr, | 884 | static void __trace_graph_entry(struct trace_array *tr, |
906 | struct trace_array_cpu *data, | ||
907 | struct ftrace_graph_ent *trace, | 885 | struct ftrace_graph_ent *trace, |
908 | unsigned long flags, | 886 | unsigned long flags, |
909 | int pc) | 887 | int pc) |
910 | { | 888 | { |
911 | struct ring_buffer_event *event; | 889 | struct ring_buffer_event *event; |
912 | struct ftrace_graph_ent_entry *entry; | 890 | struct ftrace_graph_ent_entry *entry; |
913 | unsigned long irq_flags; | ||
914 | 891 | ||
915 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 892 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
916 | return; | 893 | return; |
917 | 894 | ||
918 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 895 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_ENT, |
919 | &irq_flags); | 896 | sizeof(*entry), flags, pc); |
920 | if (!event) | 897 | if (!event) |
921 | return; | 898 | return; |
922 | entry = ring_buffer_event_data(event); | 899 | entry = ring_buffer_event_data(event); |
923 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
924 | entry->ent.type = TRACE_GRAPH_ENT; | ||
925 | entry->graph_ent = *trace; | 900 | entry->graph_ent = *trace; |
926 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 901 | ring_buffer_unlock_commit(global_trace.buffer, event); |
927 | } | 902 | } |
928 | 903 | ||
929 | static void __trace_graph_return(struct trace_array *tr, | 904 | static void __trace_graph_return(struct trace_array *tr, |
930 | struct trace_array_cpu *data, | ||
931 | struct ftrace_graph_ret *trace, | 905 | struct ftrace_graph_ret *trace, |
932 | unsigned long flags, | 906 | unsigned long flags, |
933 | int pc) | 907 | int pc) |
934 | { | 908 | { |
935 | struct ring_buffer_event *event; | 909 | struct ring_buffer_event *event; |
936 | struct ftrace_graph_ret_entry *entry; | 910 | struct ftrace_graph_ret_entry *entry; |
937 | unsigned long irq_flags; | ||
938 | 911 | ||
939 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) | 912 | if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled)))) |
940 | return; | 913 | return; |
941 | 914 | ||
942 | event = ring_buffer_lock_reserve(global_trace.buffer, sizeof(*entry), | 915 | event = trace_buffer_lock_reserve(&global_trace, TRACE_GRAPH_RET, |
943 | &irq_flags); | 916 | sizeof(*entry), flags, pc); |
944 | if (!event) | 917 | if (!event) |
945 | return; | 918 | return; |
946 | entry = ring_buffer_event_data(event); | 919 | entry = ring_buffer_event_data(event); |
947 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
948 | entry->ent.type = TRACE_GRAPH_RET; | ||
949 | entry->ret = *trace; | 920 | entry->ret = *trace; |
950 | ring_buffer_unlock_commit(global_trace.buffer, event, irq_flags); | 921 | ring_buffer_unlock_commit(global_trace.buffer, event); |
951 | } | 922 | } |
952 | #endif | 923 | #endif |
953 | 924 | ||
@@ -957,31 +928,23 @@ ftrace(struct trace_array *tr, struct trace_array_cpu *data, | |||
957 | int pc) | 928 | int pc) |
958 | { | 929 | { |
959 | if (likely(!atomic_read(&data->disabled))) | 930 | if (likely(!atomic_read(&data->disabled))) |
960 | trace_function(tr, data, ip, parent_ip, flags, pc); | 931 | trace_function(tr, ip, parent_ip, flags, pc); |
961 | } | 932 | } |
962 | 933 | ||
963 | static void ftrace_trace_stack(struct trace_array *tr, | 934 | static void __ftrace_trace_stack(struct trace_array *tr, |
964 | struct trace_array_cpu *data, | 935 | unsigned long flags, |
965 | unsigned long flags, | 936 | int skip, int pc) |
966 | int skip, int pc) | ||
967 | { | 937 | { |
968 | #ifdef CONFIG_STACKTRACE | 938 | #ifdef CONFIG_STACKTRACE |
969 | struct ring_buffer_event *event; | 939 | struct ring_buffer_event *event; |
970 | struct stack_entry *entry; | 940 | struct stack_entry *entry; |
971 | struct stack_trace trace; | 941 | struct stack_trace trace; |
972 | unsigned long irq_flags; | ||
973 | |||
974 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
975 | return; | ||
976 | 942 | ||
977 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 943 | event = trace_buffer_lock_reserve(tr, TRACE_STACK, |
978 | &irq_flags); | 944 | sizeof(*entry), flags, pc); |
979 | if (!event) | 945 | if (!event) |
980 | return; | 946 | return; |
981 | entry = ring_buffer_event_data(event); | 947 | entry = ring_buffer_event_data(event); |
982 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
983 | entry->ent.type = TRACE_STACK; | ||
984 | |||
985 | memset(&entry->caller, 0, sizeof(entry->caller)); | 948 | memset(&entry->caller, 0, sizeof(entry->caller)); |
986 | 949 | ||
987 | trace.nr_entries = 0; | 950 | trace.nr_entries = 0; |
@@ -990,38 +953,43 @@ static void ftrace_trace_stack(struct trace_array *tr, | |||
990 | trace.entries = entry->caller; | 953 | trace.entries = entry->caller; |
991 | 954 | ||
992 | save_stack_trace(&trace); | 955 | save_stack_trace(&trace); |
993 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 956 | ring_buffer_unlock_commit(tr->buffer, event); |
994 | #endif | 957 | #endif |
995 | } | 958 | } |
996 | 959 | ||
960 | static void ftrace_trace_stack(struct trace_array *tr, | ||
961 | unsigned long flags, | ||
962 | int skip, int pc) | ||
963 | { | ||
964 | if (!(trace_flags & TRACE_ITER_STACKTRACE)) | ||
965 | return; | ||
966 | |||
967 | __ftrace_trace_stack(tr, flags, skip, pc); | ||
968 | } | ||
969 | |||
997 | void __trace_stack(struct trace_array *tr, | 970 | void __trace_stack(struct trace_array *tr, |
998 | struct trace_array_cpu *data, | ||
999 | unsigned long flags, | 971 | unsigned long flags, |
1000 | int skip) | 972 | int skip, int pc) |
1001 | { | 973 | { |
1002 | ftrace_trace_stack(tr, data, flags, skip, preempt_count()); | 974 | __ftrace_trace_stack(tr, flags, skip, pc); |
1003 | } | 975 | } |
1004 | 976 | ||
1005 | static void ftrace_trace_userstack(struct trace_array *tr, | 977 | static void ftrace_trace_userstack(struct trace_array *tr, |
1006 | struct trace_array_cpu *data, | 978 | unsigned long flags, int pc) |
1007 | unsigned long flags, int pc) | ||
1008 | { | 979 | { |
1009 | #ifdef CONFIG_STACKTRACE | 980 | #ifdef CONFIG_STACKTRACE |
1010 | struct ring_buffer_event *event; | 981 | struct ring_buffer_event *event; |
1011 | struct userstack_entry *entry; | 982 | struct userstack_entry *entry; |
1012 | struct stack_trace trace; | 983 | struct stack_trace trace; |
1013 | unsigned long irq_flags; | ||
1014 | 984 | ||
1015 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) | 985 | if (!(trace_flags & TRACE_ITER_USERSTACKTRACE)) |
1016 | return; | 986 | return; |
1017 | 987 | ||
1018 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 988 | event = trace_buffer_lock_reserve(tr, TRACE_USER_STACK, |
1019 | &irq_flags); | 989 | sizeof(*entry), flags, pc); |
1020 | if (!event) | 990 | if (!event) |
1021 | return; | 991 | return; |
1022 | entry = ring_buffer_event_data(event); | 992 | entry = ring_buffer_event_data(event); |
1023 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1024 | entry->ent.type = TRACE_USER_STACK; | ||
1025 | 993 | ||
1026 | memset(&entry->caller, 0, sizeof(entry->caller)); | 994 | memset(&entry->caller, 0, sizeof(entry->caller)); |
1027 | 995 | ||
@@ -1031,70 +999,58 @@ static void ftrace_trace_userstack(struct trace_array *tr, | |||
1031 | trace.entries = entry->caller; | 999 | trace.entries = entry->caller; |
1032 | 1000 | ||
1033 | save_stack_trace_user(&trace); | 1001 | save_stack_trace_user(&trace); |
1034 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1002 | ring_buffer_unlock_commit(tr->buffer, event); |
1035 | #endif | 1003 | #endif |
1036 | } | 1004 | } |
1037 | 1005 | ||
1038 | void __trace_userstack(struct trace_array *tr, | 1006 | #ifdef UNUSED |
1039 | struct trace_array_cpu *data, | 1007 | static void __trace_userstack(struct trace_array *tr, unsigned long flags) |
1040 | unsigned long flags) | ||
1041 | { | 1008 | { |
1042 | ftrace_trace_userstack(tr, data, flags, preempt_count()); | 1009 | ftrace_trace_userstack(tr, flags, preempt_count()); |
1043 | } | 1010 | } |
1011 | #endif /* UNUSED */ | ||
1044 | 1012 | ||
1045 | static void | 1013 | static void |
1046 | ftrace_trace_special(void *__tr, void *__data, | 1014 | ftrace_trace_special(void *__tr, |
1047 | unsigned long arg1, unsigned long arg2, unsigned long arg3, | 1015 | unsigned long arg1, unsigned long arg2, unsigned long arg3, |
1048 | int pc) | 1016 | int pc) |
1049 | { | 1017 | { |
1050 | struct ring_buffer_event *event; | 1018 | struct ring_buffer_event *event; |
1051 | struct trace_array_cpu *data = __data; | ||
1052 | struct trace_array *tr = __tr; | 1019 | struct trace_array *tr = __tr; |
1053 | struct special_entry *entry; | 1020 | struct special_entry *entry; |
1054 | unsigned long irq_flags; | ||
1055 | 1021 | ||
1056 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1022 | event = trace_buffer_lock_reserve(tr, TRACE_SPECIAL, |
1057 | &irq_flags); | 1023 | sizeof(*entry), 0, pc); |
1058 | if (!event) | 1024 | if (!event) |
1059 | return; | 1025 | return; |
1060 | entry = ring_buffer_event_data(event); | 1026 | entry = ring_buffer_event_data(event); |
1061 | tracing_generic_entry_update(&entry->ent, 0, pc); | ||
1062 | entry->ent.type = TRACE_SPECIAL; | ||
1063 | entry->arg1 = arg1; | 1027 | entry->arg1 = arg1; |
1064 | entry->arg2 = arg2; | 1028 | entry->arg2 = arg2; |
1065 | entry->arg3 = arg3; | 1029 | entry->arg3 = arg3; |
1066 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1030 | trace_buffer_unlock_commit(tr, event, 0, pc); |
1067 | ftrace_trace_stack(tr, data, irq_flags, 4, pc); | ||
1068 | ftrace_trace_userstack(tr, data, irq_flags, pc); | ||
1069 | |||
1070 | trace_wake_up(); | ||
1071 | } | 1031 | } |
1072 | 1032 | ||
1073 | void | 1033 | void |
1074 | __trace_special(void *__tr, void *__data, | 1034 | __trace_special(void *__tr, void *__data, |
1075 | unsigned long arg1, unsigned long arg2, unsigned long arg3) | 1035 | unsigned long arg1, unsigned long arg2, unsigned long arg3) |
1076 | { | 1036 | { |
1077 | ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count()); | 1037 | ftrace_trace_special(__tr, arg1, arg2, arg3, preempt_count()); |
1078 | } | 1038 | } |
1079 | 1039 | ||
1080 | void | 1040 | void |
1081 | tracing_sched_switch_trace(struct trace_array *tr, | 1041 | tracing_sched_switch_trace(struct trace_array *tr, |
1082 | struct trace_array_cpu *data, | ||
1083 | struct task_struct *prev, | 1042 | struct task_struct *prev, |
1084 | struct task_struct *next, | 1043 | struct task_struct *next, |
1085 | unsigned long flags, int pc) | 1044 | unsigned long flags, int pc) |
1086 | { | 1045 | { |
1087 | struct ring_buffer_event *event; | 1046 | struct ring_buffer_event *event; |
1088 | struct ctx_switch_entry *entry; | 1047 | struct ctx_switch_entry *entry; |
1089 | unsigned long irq_flags; | ||
1090 | 1048 | ||
1091 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1049 | event = trace_buffer_lock_reserve(tr, TRACE_CTX, |
1092 | &irq_flags); | 1050 | sizeof(*entry), flags, pc); |
1093 | if (!event) | 1051 | if (!event) |
1094 | return; | 1052 | return; |
1095 | entry = ring_buffer_event_data(event); | 1053 | entry = ring_buffer_event_data(event); |
1096 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1097 | entry->ent.type = TRACE_CTX; | ||
1098 | entry->prev_pid = prev->pid; | 1054 | entry->prev_pid = prev->pid; |
1099 | entry->prev_prio = prev->prio; | 1055 | entry->prev_prio = prev->prio; |
1100 | entry->prev_state = prev->state; | 1056 | entry->prev_state = prev->state; |
@@ -1102,29 +1058,23 @@ tracing_sched_switch_trace(struct trace_array *tr, | |||
1102 | entry->next_prio = next->prio; | 1058 | entry->next_prio = next->prio; |
1103 | entry->next_state = next->state; | 1059 | entry->next_state = next->state; |
1104 | entry->next_cpu = task_cpu(next); | 1060 | entry->next_cpu = task_cpu(next); |
1105 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 1061 | trace_buffer_unlock_commit(tr, event, flags, pc); |
1106 | ftrace_trace_stack(tr, data, flags, 5, pc); | ||
1107 | ftrace_trace_userstack(tr, data, flags, pc); | ||
1108 | } | 1062 | } |
1109 | 1063 | ||
1110 | void | 1064 | void |
1111 | tracing_sched_wakeup_trace(struct trace_array *tr, | 1065 | tracing_sched_wakeup_trace(struct trace_array *tr, |
1112 | struct trace_array_cpu *data, | ||
1113 | struct task_struct *wakee, | 1066 | struct task_struct *wakee, |
1114 | struct task_struct *curr, | 1067 | struct task_struct *curr, |
1115 | unsigned long flags, int pc) | 1068 | unsigned long flags, int pc) |
1116 | { | 1069 | { |
1117 | struct ring_buffer_event *event; | 1070 | struct ring_buffer_event *event; |
1118 | struct ctx_switch_entry *entry; | 1071 | struct ctx_switch_entry *entry; |
1119 | unsigned long irq_flags; | ||
1120 | 1072 | ||
1121 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 1073 | event = trace_buffer_lock_reserve(tr, TRACE_WAKE, |
1122 | &irq_flags); | 1074 | sizeof(*entry), flags, pc); |
1123 | if (!event) | 1075 | if (!event) |
1124 | return; | 1076 | return; |
1125 | entry = ring_buffer_event_data(event); | 1077 | entry = ring_buffer_event_data(event); |
1126 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
1127 | entry->ent.type = TRACE_WAKE; | ||
1128 | entry->prev_pid = curr->pid; | 1078 | entry->prev_pid = curr->pid; |
1129 | entry->prev_prio = curr->prio; | 1079 | entry->prev_prio = curr->prio; |
1130 | entry->prev_state = curr->state; | 1080 | entry->prev_state = curr->state; |
@@ -1132,11 +1082,10 @@ tracing_sched_wakeup_trace(struct trace_array *tr, | |||
1132 | entry->next_prio = wakee->prio; | 1082 | entry->next_prio = wakee->prio; |
1133 | entry->next_state = wakee->state; | 1083 | entry->next_state = wakee->state; |
1134 | entry->next_cpu = task_cpu(wakee); | 1084 | entry->next_cpu = task_cpu(wakee); |
1135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
1136 | ftrace_trace_stack(tr, data, flags, 6, pc); | ||
1137 | ftrace_trace_userstack(tr, data, flags, pc); | ||
1138 | 1085 | ||
1139 | trace_wake_up(); | 1086 | ring_buffer_unlock_commit(tr->buffer, event); |
1087 | ftrace_trace_stack(tr, flags, 6, pc); | ||
1088 | ftrace_trace_userstack(tr, flags, pc); | ||
1140 | } | 1089 | } |
1141 | 1090 | ||
1142 | void | 1091 | void |
@@ -1157,66 +1106,7 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) | |||
1157 | data = tr->data[cpu]; | 1106 | data = tr->data[cpu]; |
1158 | 1107 | ||
1159 | if (likely(atomic_inc_return(&data->disabled) == 1)) | 1108 | if (likely(atomic_inc_return(&data->disabled) == 1)) |
1160 | ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); | 1109 | ftrace_trace_special(tr, arg1, arg2, arg3, pc); |
1161 | |||
1162 | atomic_dec(&data->disabled); | ||
1163 | local_irq_restore(flags); | ||
1164 | } | ||
1165 | |||
1166 | #ifdef CONFIG_FUNCTION_TRACER | ||
1167 | static void | ||
1168 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
1169 | { | ||
1170 | struct trace_array *tr = &global_trace; | ||
1171 | struct trace_array_cpu *data; | ||
1172 | unsigned long flags; | ||
1173 | long disabled; | ||
1174 | int cpu, resched; | ||
1175 | int pc; | ||
1176 | |||
1177 | if (unlikely(!ftrace_function_enabled)) | ||
1178 | return; | ||
1179 | |||
1180 | pc = preempt_count(); | ||
1181 | resched = ftrace_preempt_disable(); | ||
1182 | local_save_flags(flags); | ||
1183 | cpu = raw_smp_processor_id(); | ||
1184 | data = tr->data[cpu]; | ||
1185 | disabled = atomic_inc_return(&data->disabled); | ||
1186 | |||
1187 | if (likely(disabled == 1)) | ||
1188 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1189 | |||
1190 | atomic_dec(&data->disabled); | ||
1191 | ftrace_preempt_enable(resched); | ||
1192 | } | ||
1193 | |||
1194 | static void | ||
1195 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
1196 | { | ||
1197 | struct trace_array *tr = &global_trace; | ||
1198 | struct trace_array_cpu *data; | ||
1199 | unsigned long flags; | ||
1200 | long disabled; | ||
1201 | int cpu; | ||
1202 | int pc; | ||
1203 | |||
1204 | if (unlikely(!ftrace_function_enabled)) | ||
1205 | return; | ||
1206 | |||
1207 | /* | ||
1208 | * Need to use raw, since this must be called before the | ||
1209 | * recursive protection is performed. | ||
1210 | */ | ||
1211 | local_irq_save(flags); | ||
1212 | cpu = raw_smp_processor_id(); | ||
1213 | data = tr->data[cpu]; | ||
1214 | disabled = atomic_inc_return(&data->disabled); | ||
1215 | |||
1216 | if (likely(disabled == 1)) { | ||
1217 | pc = preempt_count(); | ||
1218 | trace_function(tr, data, ip, parent_ip, flags, pc); | ||
1219 | } | ||
1220 | 1110 | ||
1221 | atomic_dec(&data->disabled); | 1111 | atomic_dec(&data->disabled); |
1222 | local_irq_restore(flags); | 1112 | local_irq_restore(flags); |
@@ -1244,7 +1134,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) | |||
1244 | disabled = atomic_inc_return(&data->disabled); | 1134 | disabled = atomic_inc_return(&data->disabled); |
1245 | if (likely(disabled == 1)) { | 1135 | if (likely(disabled == 1)) { |
1246 | pc = preempt_count(); | 1136 | pc = preempt_count(); |
1247 | __trace_graph_entry(tr, data, trace, flags, pc); | 1137 | __trace_graph_entry(tr, trace, flags, pc); |
1248 | } | 1138 | } |
1249 | /* Only do the atomic if it is not already set */ | 1139 | /* Only do the atomic if it is not already set */ |
1250 | if (!test_tsk_trace_graph(current)) | 1140 | if (!test_tsk_trace_graph(current)) |
@@ -1270,7 +1160,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1270 | disabled = atomic_inc_return(&data->disabled); | 1160 | disabled = atomic_inc_return(&data->disabled); |
1271 | if (likely(disabled == 1)) { | 1161 | if (likely(disabled == 1)) { |
1272 | pc = preempt_count(); | 1162 | pc = preempt_count(); |
1273 | __trace_graph_return(tr, data, trace, flags, pc); | 1163 | __trace_graph_return(tr, trace, flags, pc); |
1274 | } | 1164 | } |
1275 | if (!trace->depth) | 1165 | if (!trace->depth) |
1276 | clear_tsk_trace_graph(current); | 1166 | clear_tsk_trace_graph(current); |
@@ -1279,31 +1169,6 @@ void trace_graph_return(struct ftrace_graph_ret *trace) | |||
1279 | } | 1169 | } |
1280 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 1170 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
1281 | 1171 | ||
1282 | static struct ftrace_ops trace_ops __read_mostly = | ||
1283 | { | ||
1284 | .func = function_trace_call, | ||
1285 | }; | ||
1286 | |||
1287 | void tracing_start_function_trace(void) | ||
1288 | { | ||
1289 | ftrace_function_enabled = 0; | ||
1290 | |||
1291 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
1292 | trace_ops.func = function_trace_call_preempt_only; | ||
1293 | else | ||
1294 | trace_ops.func = function_trace_call; | ||
1295 | |||
1296 | register_ftrace_function(&trace_ops); | ||
1297 | ftrace_function_enabled = 1; | ||
1298 | } | ||
1299 | |||
1300 | void tracing_stop_function_trace(void) | ||
1301 | { | ||
1302 | ftrace_function_enabled = 0; | ||
1303 | unregister_ftrace_function(&trace_ops); | ||
1304 | } | ||
1305 | #endif | ||
1306 | |||
1307 | enum trace_file_type { | 1172 | enum trace_file_type { |
1308 | TRACE_FILE_LAT_FMT = 1, | 1173 | TRACE_FILE_LAT_FMT = 1, |
1309 | TRACE_FILE_ANNOTATE = 2, | 1174 | TRACE_FILE_ANNOTATE = 2, |
@@ -1345,10 +1210,25 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1345 | { | 1210 | { |
1346 | struct ring_buffer *buffer = iter->tr->buffer; | 1211 | struct ring_buffer *buffer = iter->tr->buffer; |
1347 | struct trace_entry *ent, *next = NULL; | 1212 | struct trace_entry *ent, *next = NULL; |
1213 | int cpu_file = iter->cpu_file; | ||
1348 | u64 next_ts = 0, ts; | 1214 | u64 next_ts = 0, ts; |
1349 | int next_cpu = -1; | 1215 | int next_cpu = -1; |
1350 | int cpu; | 1216 | int cpu; |
1351 | 1217 | ||
1218 | /* | ||
1219 | * If we are in a per_cpu trace file, don't bother by iterating over | ||
1220 | * all cpu and peek directly. | ||
1221 | */ | ||
1222 | if (cpu_file > TRACE_PIPE_ALL_CPU) { | ||
1223 | if (ring_buffer_empty_cpu(buffer, cpu_file)) | ||
1224 | return NULL; | ||
1225 | ent = peek_next_entry(iter, cpu_file, ent_ts); | ||
1226 | if (ent_cpu) | ||
1227 | *ent_cpu = cpu_file; | ||
1228 | |||
1229 | return ent; | ||
1230 | } | ||
1231 | |||
1352 | for_each_tracing_cpu(cpu) { | 1232 | for_each_tracing_cpu(cpu) { |
1353 | 1233 | ||
1354 | if (ring_buffer_empty_cpu(buffer, cpu)) | 1234 | if (ring_buffer_empty_cpu(buffer, cpu)) |
@@ -1376,8 +1256,8 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | |||
1376 | } | 1256 | } |
1377 | 1257 | ||
1378 | /* Find the next real entry, without updating the iterator itself */ | 1258 | /* Find the next real entry, without updating the iterator itself */ |
1379 | static struct trace_entry * | 1259 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, |
1380 | find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) | 1260 | int *ent_cpu, u64 *ent_ts) |
1381 | { | 1261 | { |
1382 | return __find_next_entry(iter, ent_cpu, ent_ts); | 1262 | return __find_next_entry(iter, ent_cpu, ent_ts); |
1383 | } | 1263 | } |
@@ -1426,19 +1306,32 @@ static void *s_next(struct seq_file *m, void *v, loff_t *pos) | |||
1426 | return ent; | 1306 | return ent; |
1427 | } | 1307 | } |
1428 | 1308 | ||
1309 | /* | ||
1310 | * No necessary locking here. The worst thing which can | ||
1311 | * happen is loosing events consumed at the same time | ||
1312 | * by a trace_pipe reader. | ||
1313 | * Other than that, we don't risk to crash the ring buffer | ||
1314 | * because it serializes the readers. | ||
1315 | * | ||
1316 | * The current tracer is copied to avoid a global locking | ||
1317 | * all around. | ||
1318 | */ | ||
1429 | static void *s_start(struct seq_file *m, loff_t *pos) | 1319 | static void *s_start(struct seq_file *m, loff_t *pos) |
1430 | { | 1320 | { |
1431 | struct trace_iterator *iter = m->private; | 1321 | struct trace_iterator *iter = m->private; |
1322 | static struct tracer *old_tracer; | ||
1323 | int cpu_file = iter->cpu_file; | ||
1432 | void *p = NULL; | 1324 | void *p = NULL; |
1433 | loff_t l = 0; | 1325 | loff_t l = 0; |
1434 | int cpu; | 1326 | int cpu; |
1435 | 1327 | ||
1328 | /* copy the tracer to avoid using a global lock all around */ | ||
1436 | mutex_lock(&trace_types_lock); | 1329 | mutex_lock(&trace_types_lock); |
1437 | 1330 | if (unlikely(old_tracer != current_trace && current_trace)) { | |
1438 | if (!current_trace || current_trace != iter->trace) { | 1331 | old_tracer = current_trace; |
1439 | mutex_unlock(&trace_types_lock); | 1332 | *iter->trace = *current_trace; |
1440 | return NULL; | ||
1441 | } | 1333 | } |
1334 | mutex_unlock(&trace_types_lock); | ||
1442 | 1335 | ||
1443 | atomic_inc(&trace_record_cmdline_disabled); | 1336 | atomic_inc(&trace_record_cmdline_disabled); |
1444 | 1337 | ||
@@ -1449,9 +1342,12 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1449 | 1342 | ||
1450 | ftrace_disable_cpu(); | 1343 | ftrace_disable_cpu(); |
1451 | 1344 | ||
1452 | for_each_tracing_cpu(cpu) { | 1345 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
1453 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); | 1346 | for_each_tracing_cpu(cpu) |
1454 | } | 1347 | ring_buffer_iter_reset(iter->buffer_iter[cpu]); |
1348 | } else | ||
1349 | ring_buffer_iter_reset(iter->buffer_iter[cpu_file]); | ||
1350 | |||
1455 | 1351 | ||
1456 | ftrace_enable_cpu(); | 1352 | ftrace_enable_cpu(); |
1457 | 1353 | ||
@@ -1469,155 +1365,6 @@ static void *s_start(struct seq_file *m, loff_t *pos) | |||
1469 | static void s_stop(struct seq_file *m, void *p) | 1365 | static void s_stop(struct seq_file *m, void *p) |
1470 | { | 1366 | { |
1471 | atomic_dec(&trace_record_cmdline_disabled); | 1367 | atomic_dec(&trace_record_cmdline_disabled); |
1472 | mutex_unlock(&trace_types_lock); | ||
1473 | } | ||
1474 | |||
1475 | #ifdef CONFIG_KRETPROBES | ||
1476 | static inline const char *kretprobed(const char *name) | ||
1477 | { | ||
1478 | static const char tramp_name[] = "kretprobe_trampoline"; | ||
1479 | int size = sizeof(tramp_name); | ||
1480 | |||
1481 | if (strncmp(tramp_name, name, size) == 0) | ||
1482 | return "[unknown/kretprobe'd]"; | ||
1483 | return name; | ||
1484 | } | ||
1485 | #else | ||
1486 | static inline const char *kretprobed(const char *name) | ||
1487 | { | ||
1488 | return name; | ||
1489 | } | ||
1490 | #endif /* CONFIG_KRETPROBES */ | ||
1491 | |||
1492 | static int | ||
1493 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | ||
1494 | { | ||
1495 | #ifdef CONFIG_KALLSYMS | ||
1496 | char str[KSYM_SYMBOL_LEN]; | ||
1497 | const char *name; | ||
1498 | |||
1499 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
1500 | |||
1501 | name = kretprobed(str); | ||
1502 | |||
1503 | return trace_seq_printf(s, fmt, name); | ||
1504 | #endif | ||
1505 | return 1; | ||
1506 | } | ||
1507 | |||
1508 | static int | ||
1509 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | ||
1510 | unsigned long address) | ||
1511 | { | ||
1512 | #ifdef CONFIG_KALLSYMS | ||
1513 | char str[KSYM_SYMBOL_LEN]; | ||
1514 | const char *name; | ||
1515 | |||
1516 | sprint_symbol(str, address); | ||
1517 | name = kretprobed(str); | ||
1518 | |||
1519 | return trace_seq_printf(s, fmt, name); | ||
1520 | #endif | ||
1521 | return 1; | ||
1522 | } | ||
1523 | |||
1524 | #ifndef CONFIG_64BIT | ||
1525 | # define IP_FMT "%08lx" | ||
1526 | #else | ||
1527 | # define IP_FMT "%016lx" | ||
1528 | #endif | ||
1529 | |||
1530 | int | ||
1531 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | ||
1532 | { | ||
1533 | int ret; | ||
1534 | |||
1535 | if (!ip) | ||
1536 | return trace_seq_printf(s, "0"); | ||
1537 | |||
1538 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
1539 | ret = seq_print_sym_offset(s, "%s", ip); | ||
1540 | else | ||
1541 | ret = seq_print_sym_short(s, "%s", ip); | ||
1542 | |||
1543 | if (!ret) | ||
1544 | return 0; | ||
1545 | |||
1546 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
1547 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1548 | return ret; | ||
1549 | } | ||
1550 | |||
1551 | static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
1552 | unsigned long ip, unsigned long sym_flags) | ||
1553 | { | ||
1554 | struct file *file = NULL; | ||
1555 | unsigned long vmstart = 0; | ||
1556 | int ret = 1; | ||
1557 | |||
1558 | if (mm) { | ||
1559 | const struct vm_area_struct *vma; | ||
1560 | |||
1561 | down_read(&mm->mmap_sem); | ||
1562 | vma = find_vma(mm, ip); | ||
1563 | if (vma) { | ||
1564 | file = vma->vm_file; | ||
1565 | vmstart = vma->vm_start; | ||
1566 | } | ||
1567 | if (file) { | ||
1568 | ret = trace_seq_path(s, &file->f_path); | ||
1569 | if (ret) | ||
1570 | ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart); | ||
1571 | } | ||
1572 | up_read(&mm->mmap_sem); | ||
1573 | } | ||
1574 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
1575 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
1576 | return ret; | ||
1577 | } | ||
1578 | |||
1579 | static int | ||
1580 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
1581 | unsigned long sym_flags) | ||
1582 | { | ||
1583 | struct mm_struct *mm = NULL; | ||
1584 | int ret = 1; | ||
1585 | unsigned int i; | ||
1586 | |||
1587 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
1588 | struct task_struct *task; | ||
1589 | /* | ||
1590 | * we do the lookup on the thread group leader, | ||
1591 | * since individual threads might have already quit! | ||
1592 | */ | ||
1593 | rcu_read_lock(); | ||
1594 | task = find_task_by_vpid(entry->ent.tgid); | ||
1595 | if (task) | ||
1596 | mm = get_task_mm(task); | ||
1597 | rcu_read_unlock(); | ||
1598 | } | ||
1599 | |||
1600 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1601 | unsigned long ip = entry->caller[i]; | ||
1602 | |||
1603 | if (ip == ULONG_MAX || !ret) | ||
1604 | break; | ||
1605 | if (i && ret) | ||
1606 | ret = trace_seq_puts(s, " <- "); | ||
1607 | if (!ip) { | ||
1608 | if (ret) | ||
1609 | ret = trace_seq_puts(s, "??"); | ||
1610 | continue; | ||
1611 | } | ||
1612 | if (!ret) | ||
1613 | break; | ||
1614 | if (ret) | ||
1615 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
1616 | } | ||
1617 | |||
1618 | if (mm) | ||
1619 | mmput(mm); | ||
1620 | return ret; | ||
1621 | } | 1368 | } |
1622 | 1369 | ||
1623 | static void print_lat_help_header(struct seq_file *m) | 1370 | static void print_lat_help_header(struct seq_file *m) |
@@ -1704,103 +1451,6 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter) | |||
1704 | seq_puts(m, "\n"); | 1451 | seq_puts(m, "\n"); |
1705 | } | 1452 | } |
1706 | 1453 | ||
1707 | static void | ||
1708 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
1709 | { | ||
1710 | int hardirq, softirq; | ||
1711 | char *comm; | ||
1712 | |||
1713 | comm = trace_find_cmdline(entry->pid); | ||
1714 | |||
1715 | trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); | ||
1716 | trace_seq_printf(s, "%3d", cpu); | ||
1717 | trace_seq_printf(s, "%c%c", | ||
1718 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
1719 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' : '.', | ||
1720 | ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); | ||
1721 | |||
1722 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
1723 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
1724 | if (hardirq && softirq) { | ||
1725 | trace_seq_putc(s, 'H'); | ||
1726 | } else { | ||
1727 | if (hardirq) { | ||
1728 | trace_seq_putc(s, 'h'); | ||
1729 | } else { | ||
1730 | if (softirq) | ||
1731 | trace_seq_putc(s, 's'); | ||
1732 | else | ||
1733 | trace_seq_putc(s, '.'); | ||
1734 | } | ||
1735 | } | ||
1736 | |||
1737 | if (entry->preempt_count) | ||
1738 | trace_seq_printf(s, "%x", entry->preempt_count); | ||
1739 | else | ||
1740 | trace_seq_puts(s, "."); | ||
1741 | } | ||
1742 | |||
1743 | unsigned long preempt_mark_thresh = 100; | ||
1744 | |||
1745 | static void | ||
1746 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | ||
1747 | unsigned long rel_usecs) | ||
1748 | { | ||
1749 | trace_seq_printf(s, " %4lldus", abs_usecs); | ||
1750 | if (rel_usecs > preempt_mark_thresh) | ||
1751 | trace_seq_puts(s, "!: "); | ||
1752 | else if (rel_usecs > 1) | ||
1753 | trace_seq_puts(s, "+: "); | ||
1754 | else | ||
1755 | trace_seq_puts(s, " : "); | ||
1756 | } | ||
1757 | |||
1758 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
1759 | |||
1760 | static int task_state_char(unsigned long state) | ||
1761 | { | ||
1762 | int bit = state ? __ffs(state) + 1 : 0; | ||
1763 | |||
1764 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
1765 | } | ||
1766 | |||
1767 | /* | ||
1768 | * The message is supposed to contain an ending newline. | ||
1769 | * If the printing stops prematurely, try to add a newline of our own. | ||
1770 | */ | ||
1771 | void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter) | ||
1772 | { | ||
1773 | struct trace_entry *ent; | ||
1774 | struct trace_field_cont *cont; | ||
1775 | bool ok = true; | ||
1776 | |||
1777 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
1778 | if (!ent || ent->type != TRACE_CONT) { | ||
1779 | trace_seq_putc(s, '\n'); | ||
1780 | return; | ||
1781 | } | ||
1782 | |||
1783 | do { | ||
1784 | cont = (struct trace_field_cont *)ent; | ||
1785 | if (ok) | ||
1786 | ok = (trace_seq_printf(s, "%s", cont->buf) > 0); | ||
1787 | |||
1788 | ftrace_disable_cpu(); | ||
1789 | |||
1790 | if (iter->buffer_iter[iter->cpu]) | ||
1791 | ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
1792 | else | ||
1793 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
1794 | |||
1795 | ftrace_enable_cpu(); | ||
1796 | |||
1797 | ent = peek_next_entry(iter, iter->cpu, NULL); | ||
1798 | } while (ent && ent->type == TRACE_CONT); | ||
1799 | |||
1800 | if (!ok) | ||
1801 | trace_seq_putc(s, '\n'); | ||
1802 | } | ||
1803 | |||
1804 | static void test_cpu_buff_start(struct trace_iterator *iter) | 1454 | static void test_cpu_buff_start(struct trace_iterator *iter) |
1805 | { | 1455 | { |
1806 | struct trace_seq *s = &iter->seq; | 1456 | struct trace_seq *s = &iter->seq; |
@@ -1818,452 +1468,88 @@ static void test_cpu_buff_start(struct trace_iterator *iter) | |||
1818 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); | 1468 | trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu); |
1819 | } | 1469 | } |
1820 | 1470 | ||
1821 | static enum print_line_t | ||
1822 | print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) | ||
1823 | { | ||
1824 | struct trace_seq *s = &iter->seq; | ||
1825 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | ||
1826 | struct trace_entry *next_entry; | ||
1827 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
1828 | struct trace_entry *entry = iter->ent; | ||
1829 | unsigned long abs_usecs; | ||
1830 | unsigned long rel_usecs; | ||
1831 | u64 next_ts; | ||
1832 | char *comm; | ||
1833 | int S, T; | ||
1834 | int i; | ||
1835 | |||
1836 | if (entry->type == TRACE_CONT) | ||
1837 | return TRACE_TYPE_HANDLED; | ||
1838 | |||
1839 | test_cpu_buff_start(iter); | ||
1840 | |||
1841 | next_entry = find_next_entry(iter, NULL, &next_ts); | ||
1842 | if (!next_entry) | ||
1843 | next_ts = iter->ts; | ||
1844 | rel_usecs = ns2usecs(next_ts - iter->ts); | ||
1845 | abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | ||
1846 | |||
1847 | if (verbose) { | ||
1848 | comm = trace_find_cmdline(entry->pid); | ||
1849 | trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]" | ||
1850 | " %ld.%03ldms (+%ld.%03ldms): ", | ||
1851 | comm, | ||
1852 | entry->pid, cpu, entry->flags, | ||
1853 | entry->preempt_count, trace_idx, | ||
1854 | ns2usecs(iter->ts), | ||
1855 | abs_usecs/1000, | ||
1856 | abs_usecs % 1000, rel_usecs/1000, | ||
1857 | rel_usecs % 1000); | ||
1858 | } else { | ||
1859 | lat_print_generic(s, entry, cpu); | ||
1860 | lat_print_timestamp(s, abs_usecs, rel_usecs); | ||
1861 | } | ||
1862 | switch (entry->type) { | ||
1863 | case TRACE_FN: { | ||
1864 | struct ftrace_entry *field; | ||
1865 | |||
1866 | trace_assign_type(field, entry); | ||
1867 | |||
1868 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1869 | trace_seq_puts(s, " ("); | ||
1870 | seq_print_ip_sym(s, field->parent_ip, sym_flags); | ||
1871 | trace_seq_puts(s, ")\n"); | ||
1872 | break; | ||
1873 | } | ||
1874 | case TRACE_CTX: | ||
1875 | case TRACE_WAKE: { | ||
1876 | struct ctx_switch_entry *field; | ||
1877 | |||
1878 | trace_assign_type(field, entry); | ||
1879 | |||
1880 | T = task_state_char(field->next_state); | ||
1881 | S = task_state_char(field->prev_state); | ||
1882 | comm = trace_find_cmdline(field->next_pid); | ||
1883 | trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | ||
1884 | field->prev_pid, | ||
1885 | field->prev_prio, | ||
1886 | S, entry->type == TRACE_CTX ? "==>" : " +", | ||
1887 | field->next_cpu, | ||
1888 | field->next_pid, | ||
1889 | field->next_prio, | ||
1890 | T, comm); | ||
1891 | break; | ||
1892 | } | ||
1893 | case TRACE_SPECIAL: { | ||
1894 | struct special_entry *field; | ||
1895 | |||
1896 | trace_assign_type(field, entry); | ||
1897 | |||
1898 | trace_seq_printf(s, "# %ld %ld %ld\n", | ||
1899 | field->arg1, | ||
1900 | field->arg2, | ||
1901 | field->arg3); | ||
1902 | break; | ||
1903 | } | ||
1904 | case TRACE_STACK: { | ||
1905 | struct stack_entry *field; | ||
1906 | |||
1907 | trace_assign_type(field, entry); | ||
1908 | |||
1909 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
1910 | if (i) | ||
1911 | trace_seq_puts(s, " <= "); | ||
1912 | seq_print_ip_sym(s, field->caller[i], sym_flags); | ||
1913 | } | ||
1914 | trace_seq_puts(s, "\n"); | ||
1915 | break; | ||
1916 | } | ||
1917 | case TRACE_PRINT: { | ||
1918 | struct print_entry *field; | ||
1919 | |||
1920 | trace_assign_type(field, entry); | ||
1921 | |||
1922 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
1923 | trace_seq_printf(s, ": %s", field->buf); | ||
1924 | if (entry->flags & TRACE_FLAG_CONT) | ||
1925 | trace_seq_print_cont(s, iter); | ||
1926 | break; | ||
1927 | } | ||
1928 | case TRACE_BRANCH: { | ||
1929 | struct trace_branch *field; | ||
1930 | |||
1931 | trace_assign_type(field, entry); | ||
1932 | |||
1933 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | ||
1934 | field->correct ? " ok " : " MISS ", | ||
1935 | field->func, | ||
1936 | field->file, | ||
1937 | field->line); | ||
1938 | break; | ||
1939 | } | ||
1940 | case TRACE_USER_STACK: { | ||
1941 | struct userstack_entry *field; | ||
1942 | |||
1943 | trace_assign_type(field, entry); | ||
1944 | |||
1945 | seq_print_userip_objs(field, s, sym_flags); | ||
1946 | trace_seq_putc(s, '\n'); | ||
1947 | break; | ||
1948 | } | ||
1949 | default: | ||
1950 | trace_seq_printf(s, "Unknown type %d\n", entry->type); | ||
1951 | } | ||
1952 | return TRACE_TYPE_HANDLED; | ||
1953 | } | ||
1954 | |||
1955 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) | 1471 | static enum print_line_t print_trace_fmt(struct trace_iterator *iter) |
1956 | { | 1472 | { |
1957 | struct trace_seq *s = &iter->seq; | 1473 | struct trace_seq *s = &iter->seq; |
1958 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); | 1474 | unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); |
1959 | struct trace_entry *entry; | 1475 | struct trace_entry *entry; |
1960 | unsigned long usec_rem; | 1476 | struct trace_event *event; |
1961 | unsigned long long t; | ||
1962 | unsigned long secs; | ||
1963 | char *comm; | ||
1964 | int ret; | ||
1965 | int S, T; | ||
1966 | int i; | ||
1967 | 1477 | ||
1968 | entry = iter->ent; | 1478 | entry = iter->ent; |
1969 | 1479 | ||
1970 | if (entry->type == TRACE_CONT) | ||
1971 | return TRACE_TYPE_HANDLED; | ||
1972 | |||
1973 | test_cpu_buff_start(iter); | 1480 | test_cpu_buff_start(iter); |
1974 | 1481 | ||
1975 | comm = trace_find_cmdline(iter->ent->pid); | 1482 | event = ftrace_find_event(entry->type); |
1976 | 1483 | ||
1977 | t = ns2usecs(iter->ts); | 1484 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
1978 | usec_rem = do_div(t, 1000000ULL); | 1485 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) { |
1979 | secs = (unsigned long)t; | 1486 | if (!trace_print_lat_context(iter)) |
1980 | 1487 | goto partial; | |
1981 | ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); | 1488 | } else { |
1982 | if (!ret) | 1489 | if (!trace_print_context(iter)) |
1983 | return TRACE_TYPE_PARTIAL_LINE; | 1490 | goto partial; |
1984 | ret = trace_seq_printf(s, "[%03d] ", iter->cpu); | ||
1985 | if (!ret) | ||
1986 | return TRACE_TYPE_PARTIAL_LINE; | ||
1987 | ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); | ||
1988 | if (!ret) | ||
1989 | return TRACE_TYPE_PARTIAL_LINE; | ||
1990 | |||
1991 | switch (entry->type) { | ||
1992 | case TRACE_FN: { | ||
1993 | struct ftrace_entry *field; | ||
1994 | |||
1995 | trace_assign_type(field, entry); | ||
1996 | |||
1997 | ret = seq_print_ip_sym(s, field->ip, sym_flags); | ||
1998 | if (!ret) | ||
1999 | return TRACE_TYPE_PARTIAL_LINE; | ||
2000 | if ((sym_flags & TRACE_ITER_PRINT_PARENT) && | ||
2001 | field->parent_ip) { | ||
2002 | ret = trace_seq_printf(s, " <-"); | ||
2003 | if (!ret) | ||
2004 | return TRACE_TYPE_PARTIAL_LINE; | ||
2005 | ret = seq_print_ip_sym(s, | ||
2006 | field->parent_ip, | ||
2007 | sym_flags); | ||
2008 | if (!ret) | ||
2009 | return TRACE_TYPE_PARTIAL_LINE; | ||
2010 | } | ||
2011 | ret = trace_seq_printf(s, "\n"); | ||
2012 | if (!ret) | ||
2013 | return TRACE_TYPE_PARTIAL_LINE; | ||
2014 | break; | ||
2015 | } | ||
2016 | case TRACE_CTX: | ||
2017 | case TRACE_WAKE: { | ||
2018 | struct ctx_switch_entry *field; | ||
2019 | |||
2020 | trace_assign_type(field, entry); | ||
2021 | |||
2022 | T = task_state_char(field->next_state); | ||
2023 | S = task_state_char(field->prev_state); | ||
2024 | ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n", | ||
2025 | field->prev_pid, | ||
2026 | field->prev_prio, | ||
2027 | S, | ||
2028 | entry->type == TRACE_CTX ? "==>" : " +", | ||
2029 | field->next_cpu, | ||
2030 | field->next_pid, | ||
2031 | field->next_prio, | ||
2032 | T); | ||
2033 | if (!ret) | ||
2034 | return TRACE_TYPE_PARTIAL_LINE; | ||
2035 | break; | ||
2036 | } | ||
2037 | case TRACE_SPECIAL: { | ||
2038 | struct special_entry *field; | ||
2039 | |||
2040 | trace_assign_type(field, entry); | ||
2041 | |||
2042 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | ||
2043 | field->arg1, | ||
2044 | field->arg2, | ||
2045 | field->arg3); | ||
2046 | if (!ret) | ||
2047 | return TRACE_TYPE_PARTIAL_LINE; | ||
2048 | break; | ||
2049 | } | ||
2050 | case TRACE_STACK: { | ||
2051 | struct stack_entry *field; | ||
2052 | |||
2053 | trace_assign_type(field, entry); | ||
2054 | |||
2055 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
2056 | if (i) { | ||
2057 | ret = trace_seq_puts(s, " <= "); | ||
2058 | if (!ret) | ||
2059 | return TRACE_TYPE_PARTIAL_LINE; | ||
2060 | } | ||
2061 | ret = seq_print_ip_sym(s, field->caller[i], | ||
2062 | sym_flags); | ||
2063 | if (!ret) | ||
2064 | return TRACE_TYPE_PARTIAL_LINE; | ||
2065 | } | 1491 | } |
2066 | ret = trace_seq_puts(s, "\n"); | ||
2067 | if (!ret) | ||
2068 | return TRACE_TYPE_PARTIAL_LINE; | ||
2069 | break; | ||
2070 | } | ||
2071 | case TRACE_PRINT: { | ||
2072 | struct print_entry *field; | ||
2073 | |||
2074 | trace_assign_type(field, entry); | ||
2075 | |||
2076 | seq_print_ip_sym(s, field->ip, sym_flags); | ||
2077 | trace_seq_printf(s, ": %s", field->buf); | ||
2078 | if (entry->flags & TRACE_FLAG_CONT) | ||
2079 | trace_seq_print_cont(s, iter); | ||
2080 | break; | ||
2081 | } | ||
2082 | case TRACE_GRAPH_RET: { | ||
2083 | return print_graph_function(iter); | ||
2084 | } | ||
2085 | case TRACE_GRAPH_ENT: { | ||
2086 | return print_graph_function(iter); | ||
2087 | } | 1492 | } |
2088 | case TRACE_BRANCH: { | ||
2089 | struct trace_branch *field; | ||
2090 | 1493 | ||
2091 | trace_assign_type(field, entry); | 1494 | if (event) |
1495 | return event->trace(iter, sym_flags); | ||
2092 | 1496 | ||
2093 | trace_seq_printf(s, "[%s] %s:%s:%d\n", | 1497 | if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) |
2094 | field->correct ? " ok " : " MISS ", | 1498 | goto partial; |
2095 | field->func, | ||
2096 | field->file, | ||
2097 | field->line); | ||
2098 | break; | ||
2099 | } | ||
2100 | case TRACE_USER_STACK: { | ||
2101 | struct userstack_entry *field; | ||
2102 | 1499 | ||
2103 | trace_assign_type(field, entry); | ||
2104 | |||
2105 | ret = seq_print_userip_objs(field, s, sym_flags); | ||
2106 | if (!ret) | ||
2107 | return TRACE_TYPE_PARTIAL_LINE; | ||
2108 | ret = trace_seq_putc(s, '\n'); | ||
2109 | if (!ret) | ||
2110 | return TRACE_TYPE_PARTIAL_LINE; | ||
2111 | break; | ||
2112 | } | ||
2113 | } | ||
2114 | return TRACE_TYPE_HANDLED; | 1500 | return TRACE_TYPE_HANDLED; |
1501 | partial: | ||
1502 | return TRACE_TYPE_PARTIAL_LINE; | ||
2115 | } | 1503 | } |
2116 | 1504 | ||
2117 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) | 1505 | static enum print_line_t print_raw_fmt(struct trace_iterator *iter) |
2118 | { | 1506 | { |
2119 | struct trace_seq *s = &iter->seq; | 1507 | struct trace_seq *s = &iter->seq; |
2120 | struct trace_entry *entry; | 1508 | struct trace_entry *entry; |
2121 | int ret; | 1509 | struct trace_event *event; |
2122 | int S, T; | ||
2123 | 1510 | ||
2124 | entry = iter->ent; | 1511 | entry = iter->ent; |
2125 | 1512 | ||
2126 | if (entry->type == TRACE_CONT) | 1513 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2127 | return TRACE_TYPE_HANDLED; | 1514 | if (!trace_seq_printf(s, "%d %d %llu ", |
2128 | 1515 | entry->pid, iter->cpu, iter->ts)) | |
2129 | ret = trace_seq_printf(s, "%d %d %llu ", | 1516 | goto partial; |
2130 | entry->pid, iter->cpu, iter->ts); | ||
2131 | if (!ret) | ||
2132 | return TRACE_TYPE_PARTIAL_LINE; | ||
2133 | |||
2134 | switch (entry->type) { | ||
2135 | case TRACE_FN: { | ||
2136 | struct ftrace_entry *field; | ||
2137 | |||
2138 | trace_assign_type(field, entry); | ||
2139 | |||
2140 | ret = trace_seq_printf(s, "%x %x\n", | ||
2141 | field->ip, | ||
2142 | field->parent_ip); | ||
2143 | if (!ret) | ||
2144 | return TRACE_TYPE_PARTIAL_LINE; | ||
2145 | break; | ||
2146 | } | ||
2147 | case TRACE_CTX: | ||
2148 | case TRACE_WAKE: { | ||
2149 | struct ctx_switch_entry *field; | ||
2150 | |||
2151 | trace_assign_type(field, entry); | ||
2152 | |||
2153 | T = task_state_char(field->next_state); | ||
2154 | S = entry->type == TRACE_WAKE ? '+' : | ||
2155 | task_state_char(field->prev_state); | ||
2156 | ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n", | ||
2157 | field->prev_pid, | ||
2158 | field->prev_prio, | ||
2159 | S, | ||
2160 | field->next_cpu, | ||
2161 | field->next_pid, | ||
2162 | field->next_prio, | ||
2163 | T); | ||
2164 | if (!ret) | ||
2165 | return TRACE_TYPE_PARTIAL_LINE; | ||
2166 | break; | ||
2167 | } | 1517 | } |
2168 | case TRACE_SPECIAL: | ||
2169 | case TRACE_USER_STACK: | ||
2170 | case TRACE_STACK: { | ||
2171 | struct special_entry *field; | ||
2172 | 1518 | ||
2173 | trace_assign_type(field, entry); | 1519 | event = ftrace_find_event(entry->type); |
1520 | if (event) | ||
1521 | return event->raw(iter, 0); | ||
2174 | 1522 | ||
2175 | ret = trace_seq_printf(s, "# %ld %ld %ld\n", | 1523 | if (!trace_seq_printf(s, "%d ?\n", entry->type)) |
2176 | field->arg1, | 1524 | goto partial; |
2177 | field->arg2, | ||
2178 | field->arg3); | ||
2179 | if (!ret) | ||
2180 | return TRACE_TYPE_PARTIAL_LINE; | ||
2181 | break; | ||
2182 | } | ||
2183 | case TRACE_PRINT: { | ||
2184 | struct print_entry *field; | ||
2185 | 1525 | ||
2186 | trace_assign_type(field, entry); | ||
2187 | |||
2188 | trace_seq_printf(s, "# %lx %s", field->ip, field->buf); | ||
2189 | if (entry->flags & TRACE_FLAG_CONT) | ||
2190 | trace_seq_print_cont(s, iter); | ||
2191 | break; | ||
2192 | } | ||
2193 | } | ||
2194 | return TRACE_TYPE_HANDLED; | 1526 | return TRACE_TYPE_HANDLED; |
1527 | partial: | ||
1528 | return TRACE_TYPE_PARTIAL_LINE; | ||
2195 | } | 1529 | } |
2196 | 1530 | ||
2197 | #define SEQ_PUT_FIELD_RET(s, x) \ | ||
2198 | do { \ | ||
2199 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | ||
2200 | return 0; \ | ||
2201 | } while (0) | ||
2202 | |||
2203 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
2204 | do { \ | ||
2205 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
2206 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
2207 | return 0; \ | ||
2208 | } while (0) | ||
2209 | |||
2210 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) | 1531 | static enum print_line_t print_hex_fmt(struct trace_iterator *iter) |
2211 | { | 1532 | { |
2212 | struct trace_seq *s = &iter->seq; | 1533 | struct trace_seq *s = &iter->seq; |
2213 | unsigned char newline = '\n'; | 1534 | unsigned char newline = '\n'; |
2214 | struct trace_entry *entry; | 1535 | struct trace_entry *entry; |
2215 | int S, T; | 1536 | struct trace_event *event; |
2216 | 1537 | ||
2217 | entry = iter->ent; | 1538 | entry = iter->ent; |
2218 | 1539 | ||
2219 | if (entry->type == TRACE_CONT) | 1540 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2220 | return TRACE_TYPE_HANDLED; | 1541 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); |
2221 | 1542 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | |
2222 | SEQ_PUT_HEX_FIELD_RET(s, entry->pid); | 1543 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); |
2223 | SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); | ||
2224 | SEQ_PUT_HEX_FIELD_RET(s, iter->ts); | ||
2225 | |||
2226 | switch (entry->type) { | ||
2227 | case TRACE_FN: { | ||
2228 | struct ftrace_entry *field; | ||
2229 | |||
2230 | trace_assign_type(field, entry); | ||
2231 | |||
2232 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
2233 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
2234 | break; | ||
2235 | } | ||
2236 | case TRACE_CTX: | ||
2237 | case TRACE_WAKE: { | ||
2238 | struct ctx_switch_entry *field; | ||
2239 | |||
2240 | trace_assign_type(field, entry); | ||
2241 | |||
2242 | T = task_state_char(field->next_state); | ||
2243 | S = entry->type == TRACE_WAKE ? '+' : | ||
2244 | task_state_char(field->prev_state); | ||
2245 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | ||
2246 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
2247 | SEQ_PUT_HEX_FIELD_RET(s, S); | ||
2248 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | ||
2249 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | ||
2250 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | ||
2251 | SEQ_PUT_HEX_FIELD_RET(s, T); | ||
2252 | break; | ||
2253 | } | 1544 | } |
2254 | case TRACE_SPECIAL: | ||
2255 | case TRACE_USER_STACK: | ||
2256 | case TRACE_STACK: { | ||
2257 | struct special_entry *field; | ||
2258 | |||
2259 | trace_assign_type(field, entry); | ||
2260 | 1545 | ||
2261 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | 1546 | event = ftrace_find_event(entry->type); |
2262 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | 1547 | if (event) { |
2263 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | 1548 | enum print_line_t ret = event->hex(iter, 0); |
2264 | break; | 1549 | if (ret != TRACE_TYPE_HANDLED) |
2265 | } | 1550 | return ret; |
2266 | } | 1551 | } |
1552 | |||
2267 | SEQ_PUT_FIELD_RET(s, newline); | 1553 | SEQ_PUT_FIELD_RET(s, newline); |
2268 | 1554 | ||
2269 | return TRACE_TYPE_HANDLED; | 1555 | return TRACE_TYPE_HANDLED; |
@@ -2278,13 +1564,10 @@ static enum print_line_t print_printk_msg_only(struct trace_iterator *iter) | |||
2278 | 1564 | ||
2279 | trace_assign_type(field, entry); | 1565 | trace_assign_type(field, entry); |
2280 | 1566 | ||
2281 | ret = trace_seq_printf(s, field->buf); | 1567 | ret = trace_seq_printf(s, "%s", field->buf); |
2282 | if (!ret) | 1568 | if (!ret) |
2283 | return TRACE_TYPE_PARTIAL_LINE; | 1569 | return TRACE_TYPE_PARTIAL_LINE; |
2284 | 1570 | ||
2285 | if (entry->flags & TRACE_FLAG_CONT) | ||
2286 | trace_seq_print_cont(s, iter); | ||
2287 | |||
2288 | return TRACE_TYPE_HANDLED; | 1571 | return TRACE_TYPE_HANDLED; |
2289 | } | 1572 | } |
2290 | 1573 | ||
@@ -2292,53 +1575,18 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) | |||
2292 | { | 1575 | { |
2293 | struct trace_seq *s = &iter->seq; | 1576 | struct trace_seq *s = &iter->seq; |
2294 | struct trace_entry *entry; | 1577 | struct trace_entry *entry; |
1578 | struct trace_event *event; | ||
2295 | 1579 | ||
2296 | entry = iter->ent; | 1580 | entry = iter->ent; |
2297 | 1581 | ||
2298 | if (entry->type == TRACE_CONT) | 1582 | if (trace_flags & TRACE_ITER_CONTEXT_INFO) { |
2299 | return TRACE_TYPE_HANDLED; | 1583 | SEQ_PUT_FIELD_RET(s, entry->pid); |
2300 | 1584 | SEQ_PUT_FIELD_RET(s, iter->cpu); | |
2301 | SEQ_PUT_FIELD_RET(s, entry->pid); | 1585 | SEQ_PUT_FIELD_RET(s, iter->ts); |
2302 | SEQ_PUT_FIELD_RET(s, entry->cpu); | ||
2303 | SEQ_PUT_FIELD_RET(s, iter->ts); | ||
2304 | |||
2305 | switch (entry->type) { | ||
2306 | case TRACE_FN: { | ||
2307 | struct ftrace_entry *field; | ||
2308 | |||
2309 | trace_assign_type(field, entry); | ||
2310 | |||
2311 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
2312 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
2313 | break; | ||
2314 | } | ||
2315 | case TRACE_CTX: { | ||
2316 | struct ctx_switch_entry *field; | ||
2317 | |||
2318 | trace_assign_type(field, entry); | ||
2319 | |||
2320 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | ||
2321 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
2322 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
2323 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
2324 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
2325 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
2326 | break; | ||
2327 | } | 1586 | } |
2328 | case TRACE_SPECIAL: | ||
2329 | case TRACE_USER_STACK: | ||
2330 | case TRACE_STACK: { | ||
2331 | struct special_entry *field; | ||
2332 | 1587 | ||
2333 | trace_assign_type(field, entry); | 1588 | event = ftrace_find_event(entry->type); |
2334 | 1589 | return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; | |
2335 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
2336 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
2337 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
2338 | break; | ||
2339 | } | ||
2340 | } | ||
2341 | return 1; | ||
2342 | } | 1590 | } |
2343 | 1591 | ||
2344 | static int trace_empty(struct trace_iterator *iter) | 1592 | static int trace_empty(struct trace_iterator *iter) |
@@ -2382,9 +1630,6 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) | |||
2382 | if (trace_flags & TRACE_ITER_RAW) | 1630 | if (trace_flags & TRACE_ITER_RAW) |
2383 | return print_raw_fmt(iter); | 1631 | return print_raw_fmt(iter); |
2384 | 1632 | ||
2385 | if (iter->iter_flags & TRACE_FILE_LAT_FMT) | ||
2386 | return print_lat_fmt(iter, iter->idx, iter->cpu); | ||
2387 | |||
2388 | return print_trace_fmt(iter); | 1633 | return print_trace_fmt(iter); |
2389 | } | 1634 | } |
2390 | 1635 | ||
@@ -2426,30 +1671,40 @@ static struct seq_operations tracer_seq_ops = { | |||
2426 | }; | 1671 | }; |
2427 | 1672 | ||
2428 | static struct trace_iterator * | 1673 | static struct trace_iterator * |
2429 | __tracing_open(struct inode *inode, struct file *file, int *ret) | 1674 | __tracing_open(struct inode *inode, struct file *file) |
2430 | { | 1675 | { |
1676 | long cpu_file = (long) inode->i_private; | ||
1677 | void *fail_ret = ERR_PTR(-ENOMEM); | ||
2431 | struct trace_iterator *iter; | 1678 | struct trace_iterator *iter; |
2432 | struct seq_file *m; | 1679 | struct seq_file *m; |
2433 | int cpu; | 1680 | int cpu, ret; |
2434 | 1681 | ||
2435 | if (tracing_disabled) { | 1682 | if (tracing_disabled) |
2436 | *ret = -ENODEV; | 1683 | return ERR_PTR(-ENODEV); |
2437 | return NULL; | ||
2438 | } | ||
2439 | 1684 | ||
2440 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 1685 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
2441 | if (!iter) { | 1686 | if (!iter) |
2442 | *ret = -ENOMEM; | 1687 | return ERR_PTR(-ENOMEM); |
2443 | goto out; | ||
2444 | } | ||
2445 | 1688 | ||
1689 | /* | ||
1690 | * We make a copy of the current tracer to avoid concurrent | ||
1691 | * changes on it while we are reading. | ||
1692 | */ | ||
2446 | mutex_lock(&trace_types_lock); | 1693 | mutex_lock(&trace_types_lock); |
1694 | iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL); | ||
1695 | if (!iter->trace) | ||
1696 | goto fail; | ||
1697 | |||
1698 | if (current_trace) | ||
1699 | *iter->trace = *current_trace; | ||
1700 | |||
2447 | if (current_trace && current_trace->print_max) | 1701 | if (current_trace && current_trace->print_max) |
2448 | iter->tr = &max_tr; | 1702 | iter->tr = &max_tr; |
2449 | else | 1703 | else |
2450 | iter->tr = inode->i_private; | 1704 | iter->tr = &global_trace; |
2451 | iter->trace = current_trace; | ||
2452 | iter->pos = -1; | 1705 | iter->pos = -1; |
1706 | mutex_init(&iter->mutex); | ||
1707 | iter->cpu_file = cpu_file; | ||
2453 | 1708 | ||
2454 | /* Notify the tracer early; before we stop tracing. */ | 1709 | /* Notify the tracer early; before we stop tracing. */ |
2455 | if (iter->trace && iter->trace->open) | 1710 | if (iter->trace && iter->trace->open) |
@@ -2459,20 +1714,30 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
2459 | if (ring_buffer_overruns(iter->tr->buffer)) | 1714 | if (ring_buffer_overruns(iter->tr->buffer)) |
2460 | iter->iter_flags |= TRACE_FILE_ANNOTATE; | 1715 | iter->iter_flags |= TRACE_FILE_ANNOTATE; |
2461 | 1716 | ||
1717 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) { | ||
1718 | for_each_tracing_cpu(cpu) { | ||
2462 | 1719 | ||
2463 | for_each_tracing_cpu(cpu) { | 1720 | iter->buffer_iter[cpu] = |
1721 | ring_buffer_read_start(iter->tr->buffer, cpu); | ||
2464 | 1722 | ||
1723 | if (!iter->buffer_iter[cpu]) | ||
1724 | goto fail_buffer; | ||
1725 | } | ||
1726 | } else { | ||
1727 | cpu = iter->cpu_file; | ||
2465 | iter->buffer_iter[cpu] = | 1728 | iter->buffer_iter[cpu] = |
2466 | ring_buffer_read_start(iter->tr->buffer, cpu); | 1729 | ring_buffer_read_start(iter->tr->buffer, cpu); |
2467 | 1730 | ||
2468 | if (!iter->buffer_iter[cpu]) | 1731 | if (!iter->buffer_iter[cpu]) |
2469 | goto fail_buffer; | 1732 | goto fail; |
2470 | } | 1733 | } |
2471 | 1734 | ||
2472 | /* TODO stop tracer */ | 1735 | /* TODO stop tracer */ |
2473 | *ret = seq_open(file, &tracer_seq_ops); | 1736 | ret = seq_open(file, &tracer_seq_ops); |
2474 | if (*ret) | 1737 | if (ret < 0) { |
1738 | fail_ret = ERR_PTR(ret); | ||
2475 | goto fail_buffer; | 1739 | goto fail_buffer; |
1740 | } | ||
2476 | 1741 | ||
2477 | m = file->private_data; | 1742 | m = file->private_data; |
2478 | m->private = iter; | 1743 | m->private = iter; |
@@ -2482,7 +1747,6 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
2482 | 1747 | ||
2483 | mutex_unlock(&trace_types_lock); | 1748 | mutex_unlock(&trace_types_lock); |
2484 | 1749 | ||
2485 | out: | ||
2486 | return iter; | 1750 | return iter; |
2487 | 1751 | ||
2488 | fail_buffer: | 1752 | fail_buffer: |
@@ -2490,10 +1754,12 @@ __tracing_open(struct inode *inode, struct file *file, int *ret) | |||
2490 | if (iter->buffer_iter[cpu]) | 1754 | if (iter->buffer_iter[cpu]) |
2491 | ring_buffer_read_finish(iter->buffer_iter[cpu]); | 1755 | ring_buffer_read_finish(iter->buffer_iter[cpu]); |
2492 | } | 1756 | } |
1757 | fail: | ||
2493 | mutex_unlock(&trace_types_lock); | 1758 | mutex_unlock(&trace_types_lock); |
1759 | kfree(iter->trace); | ||
2494 | kfree(iter); | 1760 | kfree(iter); |
2495 | 1761 | ||
2496 | return ERR_PTR(-ENOMEM); | 1762 | return fail_ret; |
2497 | } | 1763 | } |
2498 | 1764 | ||
2499 | int tracing_open_generic(struct inode *inode, struct file *filp) | 1765 | int tracing_open_generic(struct inode *inode, struct file *filp) |
@@ -2505,7 +1771,7 @@ int tracing_open_generic(struct inode *inode, struct file *filp) | |||
2505 | return 0; | 1771 | return 0; |
2506 | } | 1772 | } |
2507 | 1773 | ||
2508 | int tracing_release(struct inode *inode, struct file *file) | 1774 | static int tracing_release(struct inode *inode, struct file *file) |
2509 | { | 1775 | { |
2510 | struct seq_file *m = (struct seq_file *)file->private_data; | 1776 | struct seq_file *m = (struct seq_file *)file->private_data; |
2511 | struct trace_iterator *iter = m->private; | 1777 | struct trace_iterator *iter = m->private; |
@@ -2525,33 +1791,26 @@ int tracing_release(struct inode *inode, struct file *file) | |||
2525 | mutex_unlock(&trace_types_lock); | 1791 | mutex_unlock(&trace_types_lock); |
2526 | 1792 | ||
2527 | seq_release(inode, file); | 1793 | seq_release(inode, file); |
1794 | mutex_destroy(&iter->mutex); | ||
1795 | kfree(iter->trace); | ||
2528 | kfree(iter); | 1796 | kfree(iter); |
2529 | return 0; | 1797 | return 0; |
2530 | } | 1798 | } |
2531 | 1799 | ||
2532 | static int tracing_open(struct inode *inode, struct file *file) | 1800 | static int tracing_open(struct inode *inode, struct file *file) |
2533 | { | 1801 | { |
2534 | int ret; | ||
2535 | |||
2536 | __tracing_open(inode, file, &ret); | ||
2537 | |||
2538 | return ret; | ||
2539 | } | ||
2540 | |||
2541 | static int tracing_lt_open(struct inode *inode, struct file *file) | ||
2542 | { | ||
2543 | struct trace_iterator *iter; | 1802 | struct trace_iterator *iter; |
2544 | int ret; | 1803 | int ret = 0; |
2545 | |||
2546 | iter = __tracing_open(inode, file, &ret); | ||
2547 | 1804 | ||
2548 | if (!ret) | 1805 | iter = __tracing_open(inode, file); |
1806 | if (IS_ERR(iter)) | ||
1807 | ret = PTR_ERR(iter); | ||
1808 | else if (trace_flags & TRACE_ITER_LATENCY_FMT) | ||
2549 | iter->iter_flags |= TRACE_FILE_LAT_FMT; | 1809 | iter->iter_flags |= TRACE_FILE_LAT_FMT; |
2550 | 1810 | ||
2551 | return ret; | 1811 | return ret; |
2552 | } | 1812 | } |
2553 | 1813 | ||
2554 | |||
2555 | static void * | 1814 | static void * |
2556 | t_next(struct seq_file *m, void *v, loff_t *pos) | 1815 | t_next(struct seq_file *m, void *v, loff_t *pos) |
2557 | { | 1816 | { |
@@ -2630,13 +1889,6 @@ static struct file_operations tracing_fops = { | |||
2630 | .release = tracing_release, | 1889 | .release = tracing_release, |
2631 | }; | 1890 | }; |
2632 | 1891 | ||
2633 | static struct file_operations tracing_lt_fops = { | ||
2634 | .open = tracing_lt_open, | ||
2635 | .read = seq_read, | ||
2636 | .llseek = seq_lseek, | ||
2637 | .release = tracing_release, | ||
2638 | }; | ||
2639 | |||
2640 | static struct file_operations show_traces_fops = { | 1892 | static struct file_operations show_traces_fops = { |
2641 | .open = show_traces_open, | 1893 | .open = show_traces_open, |
2642 | .read = seq_read, | 1894 | .read = seq_read, |
@@ -2740,57 +1992,62 @@ static ssize_t | |||
2740 | tracing_trace_options_read(struct file *filp, char __user *ubuf, | 1992 | tracing_trace_options_read(struct file *filp, char __user *ubuf, |
2741 | size_t cnt, loff_t *ppos) | 1993 | size_t cnt, loff_t *ppos) |
2742 | { | 1994 | { |
2743 | int i; | 1995 | struct tracer_opt *trace_opts; |
1996 | u32 tracer_flags; | ||
1997 | int len = 0; | ||
2744 | char *buf; | 1998 | char *buf; |
2745 | int r = 0; | 1999 | int r = 0; |
2746 | int len = 0; | 2000 | int i; |
2747 | u32 tracer_flags = current_trace->flags->val; | ||
2748 | struct tracer_opt *trace_opts = current_trace->flags->opts; | ||
2749 | 2001 | ||
2750 | 2002 | ||
2751 | /* calulate max size */ | 2003 | /* calculate max size */ |
2752 | for (i = 0; trace_options[i]; i++) { | 2004 | for (i = 0; trace_options[i]; i++) { |
2753 | len += strlen(trace_options[i]); | 2005 | len += strlen(trace_options[i]); |
2754 | len += 3; /* "no" and space */ | 2006 | len += 3; /* "no" and newline */ |
2755 | } | 2007 | } |
2756 | 2008 | ||
2009 | mutex_lock(&trace_types_lock); | ||
2010 | tracer_flags = current_trace->flags->val; | ||
2011 | trace_opts = current_trace->flags->opts; | ||
2012 | |||
2757 | /* | 2013 | /* |
2758 | * Increase the size with names of options specific | 2014 | * Increase the size with names of options specific |
2759 | * of the current tracer. | 2015 | * of the current tracer. |
2760 | */ | 2016 | */ |
2761 | for (i = 0; trace_opts[i].name; i++) { | 2017 | for (i = 0; trace_opts[i].name; i++) { |
2762 | len += strlen(trace_opts[i].name); | 2018 | len += strlen(trace_opts[i].name); |
2763 | len += 3; /* "no" and space */ | 2019 | len += 3; /* "no" and newline */ |
2764 | } | 2020 | } |
2765 | 2021 | ||
2766 | /* +2 for \n and \0 */ | 2022 | /* +2 for \n and \0 */ |
2767 | buf = kmalloc(len + 2, GFP_KERNEL); | 2023 | buf = kmalloc(len + 2, GFP_KERNEL); |
2768 | if (!buf) | 2024 | if (!buf) { |
2025 | mutex_unlock(&trace_types_lock); | ||
2769 | return -ENOMEM; | 2026 | return -ENOMEM; |
2027 | } | ||
2770 | 2028 | ||
2771 | for (i = 0; trace_options[i]; i++) { | 2029 | for (i = 0; trace_options[i]; i++) { |
2772 | if (trace_flags & (1 << i)) | 2030 | if (trace_flags & (1 << i)) |
2773 | r += sprintf(buf + r, "%s ", trace_options[i]); | 2031 | r += sprintf(buf + r, "%s\n", trace_options[i]); |
2774 | else | 2032 | else |
2775 | r += sprintf(buf + r, "no%s ", trace_options[i]); | 2033 | r += sprintf(buf + r, "no%s\n", trace_options[i]); |
2776 | } | 2034 | } |
2777 | 2035 | ||
2778 | for (i = 0; trace_opts[i].name; i++) { | 2036 | for (i = 0; trace_opts[i].name; i++) { |
2779 | if (tracer_flags & trace_opts[i].bit) | 2037 | if (tracer_flags & trace_opts[i].bit) |
2780 | r += sprintf(buf + r, "%s ", | 2038 | r += sprintf(buf + r, "%s\n", |
2781 | trace_opts[i].name); | 2039 | trace_opts[i].name); |
2782 | else | 2040 | else |
2783 | r += sprintf(buf + r, "no%s ", | 2041 | r += sprintf(buf + r, "no%s\n", |
2784 | trace_opts[i].name); | 2042 | trace_opts[i].name); |
2785 | } | 2043 | } |
2044 | mutex_unlock(&trace_types_lock); | ||
2786 | 2045 | ||
2787 | r += sprintf(buf + r, "\n"); | ||
2788 | WARN_ON(r >= len + 2); | 2046 | WARN_ON(r >= len + 2); |
2789 | 2047 | ||
2790 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2048 | r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2791 | 2049 | ||
2792 | kfree(buf); | 2050 | kfree(buf); |
2793 | |||
2794 | return r; | 2051 | return r; |
2795 | } | 2052 | } |
2796 | 2053 | ||
@@ -2865,7 +2122,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
2865 | 2122 | ||
2866 | /* If no option could be set, test the specific tracer options */ | 2123 | /* If no option could be set, test the specific tracer options */ |
2867 | if (!trace_options[i]) { | 2124 | if (!trace_options[i]) { |
2125 | mutex_lock(&trace_types_lock); | ||
2868 | ret = set_tracer_option(current_trace, cmp, neg); | 2126 | ret = set_tracer_option(current_trace, cmp, neg); |
2127 | mutex_unlock(&trace_types_lock); | ||
2869 | if (ret) | 2128 | if (ret) |
2870 | return ret; | 2129 | return ret; |
2871 | } | 2130 | } |
@@ -2930,7 +2189,7 @@ tracing_ctrl_write(struct file *filp, const char __user *ubuf, | |||
2930 | { | 2189 | { |
2931 | struct trace_array *tr = filp->private_data; | 2190 | struct trace_array *tr = filp->private_data; |
2932 | char buf[64]; | 2191 | char buf[64]; |
2933 | long val; | 2192 | unsigned long val; |
2934 | int ret; | 2193 | int ret; |
2935 | 2194 | ||
2936 | if (cnt >= sizeof(buf)) | 2195 | if (cnt >= sizeof(buf)) |
@@ -2985,8 +2244,23 @@ tracing_set_trace_read(struct file *filp, char __user *ubuf, | |||
2985 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | 2244 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); |
2986 | } | 2245 | } |
2987 | 2246 | ||
2988 | static int tracing_set_tracer(char *buf) | 2247 | int tracer_init(struct tracer *t, struct trace_array *tr) |
2248 | { | ||
2249 | tracing_reset_online_cpus(tr); | ||
2250 | return t->init(tr); | ||
2251 | } | ||
2252 | |||
2253 | struct trace_option_dentry; | ||
2254 | |||
2255 | static struct trace_option_dentry * | ||
2256 | create_trace_option_files(struct tracer *tracer); | ||
2257 | |||
2258 | static void | ||
2259 | destroy_trace_option_files(struct trace_option_dentry *topts); | ||
2260 | |||
2261 | static int tracing_set_tracer(const char *buf) | ||
2989 | { | 2262 | { |
2263 | static struct trace_option_dentry *topts; | ||
2990 | struct trace_array *tr = &global_trace; | 2264 | struct trace_array *tr = &global_trace; |
2991 | struct tracer *t; | 2265 | struct tracer *t; |
2992 | int ret = 0; | 2266 | int ret = 0; |
@@ -3007,9 +2281,14 @@ static int tracing_set_tracer(char *buf) | |||
3007 | if (current_trace && current_trace->reset) | 2281 | if (current_trace && current_trace->reset) |
3008 | current_trace->reset(tr); | 2282 | current_trace->reset(tr); |
3009 | 2283 | ||
2284 | destroy_trace_option_files(topts); | ||
2285 | |||
3010 | current_trace = t; | 2286 | current_trace = t; |
2287 | |||
2288 | topts = create_trace_option_files(current_trace); | ||
2289 | |||
3011 | if (t->init) { | 2290 | if (t->init) { |
3012 | ret = t->init(tr); | 2291 | ret = tracer_init(t, tr); |
3013 | if (ret) | 2292 | if (ret) |
3014 | goto out; | 2293 | goto out; |
3015 | } | 2294 | } |
@@ -3072,9 +2351,9 @@ static ssize_t | |||
3072 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, | 2351 | tracing_max_lat_write(struct file *filp, const char __user *ubuf, |
3073 | size_t cnt, loff_t *ppos) | 2352 | size_t cnt, loff_t *ppos) |
3074 | { | 2353 | { |
3075 | long *ptr = filp->private_data; | 2354 | unsigned long *ptr = filp->private_data; |
3076 | char buf[64]; | 2355 | char buf[64]; |
3077 | long val; | 2356 | unsigned long val; |
3078 | int ret; | 2357 | int ret; |
3079 | 2358 | ||
3080 | if (cnt >= sizeof(buf)) | 2359 | if (cnt >= sizeof(buf)) |
@@ -3094,54 +2373,96 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf, | |||
3094 | return cnt; | 2373 | return cnt; |
3095 | } | 2374 | } |
3096 | 2375 | ||
3097 | static atomic_t tracing_reader; | ||
3098 | |||
3099 | static int tracing_open_pipe(struct inode *inode, struct file *filp) | 2376 | static int tracing_open_pipe(struct inode *inode, struct file *filp) |
3100 | { | 2377 | { |
2378 | long cpu_file = (long) inode->i_private; | ||
3101 | struct trace_iterator *iter; | 2379 | struct trace_iterator *iter; |
2380 | int ret = 0; | ||
3102 | 2381 | ||
3103 | if (tracing_disabled) | 2382 | if (tracing_disabled) |
3104 | return -ENODEV; | 2383 | return -ENODEV; |
3105 | 2384 | ||
3106 | /* We only allow for reader of the pipe */ | 2385 | mutex_lock(&trace_types_lock); |
3107 | if (atomic_inc_return(&tracing_reader) != 1) { | 2386 | |
3108 | atomic_dec(&tracing_reader); | 2387 | /* We only allow one reader per cpu */ |
3109 | return -EBUSY; | 2388 | if (cpu_file == TRACE_PIPE_ALL_CPU) { |
2389 | if (!cpumask_empty(tracing_reader_cpumask)) { | ||
2390 | ret = -EBUSY; | ||
2391 | goto out; | ||
2392 | } | ||
2393 | cpumask_setall(tracing_reader_cpumask); | ||
2394 | } else { | ||
2395 | if (!cpumask_test_cpu(cpu_file, tracing_reader_cpumask)) | ||
2396 | cpumask_set_cpu(cpu_file, tracing_reader_cpumask); | ||
2397 | else { | ||
2398 | ret = -EBUSY; | ||
2399 | goto out; | ||
2400 | } | ||
3110 | } | 2401 | } |
3111 | 2402 | ||
3112 | /* create a buffer to store the information to pass to userspace */ | 2403 | /* create a buffer to store the information to pass to userspace */ |
3113 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); | 2404 | iter = kzalloc(sizeof(*iter), GFP_KERNEL); |
3114 | if (!iter) | 2405 | if (!iter) { |
3115 | return -ENOMEM; | 2406 | ret = -ENOMEM; |
2407 | goto out; | ||
2408 | } | ||
3116 | 2409 | ||
3117 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { | 2410 | /* |
3118 | kfree(iter); | 2411 | * We make a copy of the current tracer to avoid concurrent |
3119 | return -ENOMEM; | 2412 | * changes on it while we are reading. |
2413 | */ | ||
2414 | iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL); | ||
2415 | if (!iter->trace) { | ||
2416 | ret = -ENOMEM; | ||
2417 | goto fail; | ||
3120 | } | 2418 | } |
2419 | if (current_trace) | ||
2420 | *iter->trace = *current_trace; | ||
3121 | 2421 | ||
3122 | mutex_lock(&trace_types_lock); | 2422 | if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) { |
2423 | ret = -ENOMEM; | ||
2424 | goto fail; | ||
2425 | } | ||
3123 | 2426 | ||
3124 | /* trace pipe does not show start of buffer */ | 2427 | /* trace pipe does not show start of buffer */ |
3125 | cpumask_setall(iter->started); | 2428 | cpumask_setall(iter->started); |
3126 | 2429 | ||
2430 | iter->cpu_file = cpu_file; | ||
3127 | iter->tr = &global_trace; | 2431 | iter->tr = &global_trace; |
3128 | iter->trace = current_trace; | 2432 | mutex_init(&iter->mutex); |
3129 | filp->private_data = iter; | 2433 | filp->private_data = iter; |
3130 | 2434 | ||
3131 | if (iter->trace->pipe_open) | 2435 | if (iter->trace->pipe_open) |
3132 | iter->trace->pipe_open(iter); | 2436 | iter->trace->pipe_open(iter); |
2437 | |||
2438 | out: | ||
3133 | mutex_unlock(&trace_types_lock); | 2439 | mutex_unlock(&trace_types_lock); |
2440 | return ret; | ||
3134 | 2441 | ||
3135 | return 0; | 2442 | fail: |
2443 | kfree(iter->trace); | ||
2444 | kfree(iter); | ||
2445 | mutex_unlock(&trace_types_lock); | ||
2446 | return ret; | ||
3136 | } | 2447 | } |
3137 | 2448 | ||
3138 | static int tracing_release_pipe(struct inode *inode, struct file *file) | 2449 | static int tracing_release_pipe(struct inode *inode, struct file *file) |
3139 | { | 2450 | { |
3140 | struct trace_iterator *iter = file->private_data; | 2451 | struct trace_iterator *iter = file->private_data; |
3141 | 2452 | ||
2453 | mutex_lock(&trace_types_lock); | ||
2454 | |||
2455 | if (iter->cpu_file == TRACE_PIPE_ALL_CPU) | ||
2456 | cpumask_clear(tracing_reader_cpumask); | ||
2457 | else | ||
2458 | cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask); | ||
2459 | |||
2460 | mutex_unlock(&trace_types_lock); | ||
2461 | |||
3142 | free_cpumask_var(iter->started); | 2462 | free_cpumask_var(iter->started); |
2463 | mutex_destroy(&iter->mutex); | ||
2464 | kfree(iter->trace); | ||
3143 | kfree(iter); | 2465 | kfree(iter); |
3144 | atomic_dec(&tracing_reader); | ||
3145 | 2466 | ||
3146 | return 0; | 2467 | return 0; |
3147 | } | 2468 | } |
@@ -3167,67 +2488,57 @@ tracing_poll_pipe(struct file *filp, poll_table *poll_table) | |||
3167 | } | 2488 | } |
3168 | } | 2489 | } |
3169 | 2490 | ||
3170 | /* | 2491 | |
3171 | * Consumer reader. | 2492 | void default_wait_pipe(struct trace_iterator *iter) |
3172 | */ | ||
3173 | static ssize_t | ||
3174 | tracing_read_pipe(struct file *filp, char __user *ubuf, | ||
3175 | size_t cnt, loff_t *ppos) | ||
3176 | { | 2493 | { |
3177 | struct trace_iterator *iter = filp->private_data; | 2494 | DEFINE_WAIT(wait); |
3178 | ssize_t sret; | ||
3179 | 2495 | ||
3180 | /* return any leftover data */ | 2496 | prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE); |
3181 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | ||
3182 | if (sret != -EBUSY) | ||
3183 | return sret; | ||
3184 | 2497 | ||
3185 | trace_seq_reset(&iter->seq); | 2498 | if (trace_empty(iter)) |
2499 | schedule(); | ||
3186 | 2500 | ||
3187 | mutex_lock(&trace_types_lock); | 2501 | finish_wait(&trace_wait, &wait); |
3188 | if (iter->trace->read) { | 2502 | } |
3189 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | 2503 | |
3190 | if (sret) | 2504 | /* |
3191 | goto out; | 2505 | * This is a make-shift waitqueue. |
3192 | } | 2506 | * A tracer might use this callback on some rare cases: |
2507 | * | ||
2508 | * 1) the current tracer might hold the runqueue lock when it wakes up | ||
2509 | * a reader, hence a deadlock (sched, function, and function graph tracers) | ||
2510 | * 2) the function tracers, trace all functions, we don't want | ||
2511 | * the overhead of calling wake_up and friends | ||
2512 | * (and tracing them too) | ||
2513 | * | ||
2514 | * Anyway, this is really very primitive wakeup. | ||
2515 | */ | ||
2516 | void poll_wait_pipe(struct trace_iterator *iter) | ||
2517 | { | ||
2518 | set_current_state(TASK_INTERRUPTIBLE); | ||
2519 | /* sleep for 100 msecs, and try again. */ | ||
2520 | schedule_timeout(HZ / 10); | ||
2521 | } | ||
2522 | |||
2523 | /* Must be called with trace_types_lock mutex held. */ | ||
2524 | static int tracing_wait_pipe(struct file *filp) | ||
2525 | { | ||
2526 | struct trace_iterator *iter = filp->private_data; | ||
3193 | 2527 | ||
3194 | waitagain: | ||
3195 | sret = 0; | ||
3196 | while (trace_empty(iter)) { | 2528 | while (trace_empty(iter)) { |
3197 | 2529 | ||
3198 | if ((filp->f_flags & O_NONBLOCK)) { | 2530 | if ((filp->f_flags & O_NONBLOCK)) { |
3199 | sret = -EAGAIN; | 2531 | return -EAGAIN; |
3200 | goto out; | ||
3201 | } | 2532 | } |
3202 | 2533 | ||
3203 | /* | 2534 | mutex_unlock(&iter->mutex); |
3204 | * This is a make-shift waitqueue. The reason we don't use | ||
3205 | * an actual wait queue is because: | ||
3206 | * 1) we only ever have one waiter | ||
3207 | * 2) the tracing, traces all functions, we don't want | ||
3208 | * the overhead of calling wake_up and friends | ||
3209 | * (and tracing them too) | ||
3210 | * Anyway, this is really very primitive wakeup. | ||
3211 | */ | ||
3212 | set_current_state(TASK_INTERRUPTIBLE); | ||
3213 | iter->tr->waiter = current; | ||
3214 | |||
3215 | mutex_unlock(&trace_types_lock); | ||
3216 | |||
3217 | /* sleep for 100 msecs, and try again. */ | ||
3218 | schedule_timeout(HZ/10); | ||
3219 | 2535 | ||
3220 | mutex_lock(&trace_types_lock); | 2536 | iter->trace->wait_pipe(iter); |
3221 | |||
3222 | iter->tr->waiter = NULL; | ||
3223 | 2537 | ||
3224 | if (signal_pending(current)) { | 2538 | mutex_lock(&iter->mutex); |
3225 | sret = -EINTR; | ||
3226 | goto out; | ||
3227 | } | ||
3228 | 2539 | ||
3229 | if (iter->trace != current_trace) | 2540 | if (signal_pending(current)) |
3230 | goto out; | 2541 | return -EINTR; |
3231 | 2542 | ||
3232 | /* | 2543 | /* |
3233 | * We block until we read something and tracing is disabled. | 2544 | * We block until we read something and tracing is disabled. |
@@ -3240,13 +2551,59 @@ waitagain: | |||
3240 | */ | 2551 | */ |
3241 | if (!tracer_enabled && iter->pos) | 2552 | if (!tracer_enabled && iter->pos) |
3242 | break; | 2553 | break; |
2554 | } | ||
2555 | |||
2556 | return 1; | ||
2557 | } | ||
2558 | |||
2559 | /* | ||
2560 | * Consumer reader. | ||
2561 | */ | ||
2562 | static ssize_t | ||
2563 | tracing_read_pipe(struct file *filp, char __user *ubuf, | ||
2564 | size_t cnt, loff_t *ppos) | ||
2565 | { | ||
2566 | struct trace_iterator *iter = filp->private_data; | ||
2567 | static struct tracer *old_tracer; | ||
2568 | ssize_t sret; | ||
2569 | |||
2570 | /* return any leftover data */ | ||
2571 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | ||
2572 | if (sret != -EBUSY) | ||
2573 | return sret; | ||
2574 | |||
2575 | trace_seq_init(&iter->seq); | ||
2576 | |||
2577 | /* copy the tracer to avoid using a global lock all around */ | ||
2578 | mutex_lock(&trace_types_lock); | ||
2579 | if (unlikely(old_tracer != current_trace && current_trace)) { | ||
2580 | old_tracer = current_trace; | ||
2581 | *iter->trace = *current_trace; | ||
2582 | } | ||
2583 | mutex_unlock(&trace_types_lock); | ||
3243 | 2584 | ||
3244 | continue; | 2585 | /* |
2586 | * Avoid more than one consumer on a single file descriptor | ||
2587 | * This is just a matter of traces coherency, the ring buffer itself | ||
2588 | * is protected. | ||
2589 | */ | ||
2590 | mutex_lock(&iter->mutex); | ||
2591 | if (iter->trace->read) { | ||
2592 | sret = iter->trace->read(iter, filp, ubuf, cnt, ppos); | ||
2593 | if (sret) | ||
2594 | goto out; | ||
3245 | } | 2595 | } |
3246 | 2596 | ||
2597 | waitagain: | ||
2598 | sret = tracing_wait_pipe(filp); | ||
2599 | if (sret <= 0) | ||
2600 | goto out; | ||
2601 | |||
3247 | /* stop when tracing is finished */ | 2602 | /* stop when tracing is finished */ |
3248 | if (trace_empty(iter)) | 2603 | if (trace_empty(iter)) { |
2604 | sret = 0; | ||
3249 | goto out; | 2605 | goto out; |
2606 | } | ||
3250 | 2607 | ||
3251 | if (cnt >= PAGE_SIZE) | 2608 | if (cnt >= PAGE_SIZE) |
3252 | cnt = PAGE_SIZE - 1; | 2609 | cnt = PAGE_SIZE - 1; |
@@ -3267,8 +2624,8 @@ waitagain: | |||
3267 | iter->seq.len = len; | 2624 | iter->seq.len = len; |
3268 | break; | 2625 | break; |
3269 | } | 2626 | } |
3270 | 2627 | if (ret != TRACE_TYPE_NO_CONSUME) | |
3271 | trace_consume(iter); | 2628 | trace_consume(iter); |
3272 | 2629 | ||
3273 | if (iter->seq.len >= cnt) | 2630 | if (iter->seq.len >= cnt) |
3274 | break; | 2631 | break; |
@@ -3277,7 +2634,7 @@ waitagain: | |||
3277 | /* Now copy what we have to the user */ | 2634 | /* Now copy what we have to the user */ |
3278 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); | 2635 | sret = trace_seq_to_user(&iter->seq, ubuf, cnt); |
3279 | if (iter->seq.readpos >= iter->seq.len) | 2636 | if (iter->seq.readpos >= iter->seq.len) |
3280 | trace_seq_reset(&iter->seq); | 2637 | trace_seq_init(&iter->seq); |
3281 | 2638 | ||
3282 | /* | 2639 | /* |
3283 | * If there was nothing to send to user, inspite of consuming trace | 2640 | * If there was nothing to send to user, inspite of consuming trace |
@@ -3287,11 +2644,148 @@ waitagain: | |||
3287 | goto waitagain; | 2644 | goto waitagain; |
3288 | 2645 | ||
3289 | out: | 2646 | out: |
3290 | mutex_unlock(&trace_types_lock); | 2647 | mutex_unlock(&iter->mutex); |
3291 | 2648 | ||
3292 | return sret; | 2649 | return sret; |
3293 | } | 2650 | } |
3294 | 2651 | ||
2652 | static void tracing_pipe_buf_release(struct pipe_inode_info *pipe, | ||
2653 | struct pipe_buffer *buf) | ||
2654 | { | ||
2655 | __free_page(buf->page); | ||
2656 | } | ||
2657 | |||
2658 | static void tracing_spd_release_pipe(struct splice_pipe_desc *spd, | ||
2659 | unsigned int idx) | ||
2660 | { | ||
2661 | __free_page(spd->pages[idx]); | ||
2662 | } | ||
2663 | |||
2664 | static struct pipe_buf_operations tracing_pipe_buf_ops = { | ||
2665 | .can_merge = 0, | ||
2666 | .map = generic_pipe_buf_map, | ||
2667 | .unmap = generic_pipe_buf_unmap, | ||
2668 | .confirm = generic_pipe_buf_confirm, | ||
2669 | .release = tracing_pipe_buf_release, | ||
2670 | .steal = generic_pipe_buf_steal, | ||
2671 | .get = generic_pipe_buf_get, | ||
2672 | }; | ||
2673 | |||
2674 | static size_t | ||
2675 | tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter) | ||
2676 | { | ||
2677 | size_t count; | ||
2678 | int ret; | ||
2679 | |||
2680 | /* Seq buffer is page-sized, exactly what we need. */ | ||
2681 | for (;;) { | ||
2682 | count = iter->seq.len; | ||
2683 | ret = print_trace_line(iter); | ||
2684 | count = iter->seq.len - count; | ||
2685 | if (rem < count) { | ||
2686 | rem = 0; | ||
2687 | iter->seq.len -= count; | ||
2688 | break; | ||
2689 | } | ||
2690 | if (ret == TRACE_TYPE_PARTIAL_LINE) { | ||
2691 | iter->seq.len -= count; | ||
2692 | break; | ||
2693 | } | ||
2694 | |||
2695 | trace_consume(iter); | ||
2696 | rem -= count; | ||
2697 | if (!find_next_entry_inc(iter)) { | ||
2698 | rem = 0; | ||
2699 | iter->ent = NULL; | ||
2700 | break; | ||
2701 | } | ||
2702 | } | ||
2703 | |||
2704 | return rem; | ||
2705 | } | ||
2706 | |||
2707 | static ssize_t tracing_splice_read_pipe(struct file *filp, | ||
2708 | loff_t *ppos, | ||
2709 | struct pipe_inode_info *pipe, | ||
2710 | size_t len, | ||
2711 | unsigned int flags) | ||
2712 | { | ||
2713 | struct page *pages[PIPE_BUFFERS]; | ||
2714 | struct partial_page partial[PIPE_BUFFERS]; | ||
2715 | struct trace_iterator *iter = filp->private_data; | ||
2716 | struct splice_pipe_desc spd = { | ||
2717 | .pages = pages, | ||
2718 | .partial = partial, | ||
2719 | .nr_pages = 0, /* This gets updated below. */ | ||
2720 | .flags = flags, | ||
2721 | .ops = &tracing_pipe_buf_ops, | ||
2722 | .spd_release = tracing_spd_release_pipe, | ||
2723 | }; | ||
2724 | static struct tracer *old_tracer; | ||
2725 | ssize_t ret; | ||
2726 | size_t rem; | ||
2727 | unsigned int i; | ||
2728 | |||
2729 | /* copy the tracer to avoid using a global lock all around */ | ||
2730 | mutex_lock(&trace_types_lock); | ||
2731 | if (unlikely(old_tracer != current_trace && current_trace)) { | ||
2732 | old_tracer = current_trace; | ||
2733 | *iter->trace = *current_trace; | ||
2734 | } | ||
2735 | mutex_unlock(&trace_types_lock); | ||
2736 | |||
2737 | mutex_lock(&iter->mutex); | ||
2738 | |||
2739 | if (iter->trace->splice_read) { | ||
2740 | ret = iter->trace->splice_read(iter, filp, | ||
2741 | ppos, pipe, len, flags); | ||
2742 | if (ret) | ||
2743 | goto out_err; | ||
2744 | } | ||
2745 | |||
2746 | ret = tracing_wait_pipe(filp); | ||
2747 | if (ret <= 0) | ||
2748 | goto out_err; | ||
2749 | |||
2750 | if (!iter->ent && !find_next_entry_inc(iter)) { | ||
2751 | ret = -EFAULT; | ||
2752 | goto out_err; | ||
2753 | } | ||
2754 | |||
2755 | /* Fill as many pages as possible. */ | ||
2756 | for (i = 0, rem = len; i < PIPE_BUFFERS && rem; i++) { | ||
2757 | pages[i] = alloc_page(GFP_KERNEL); | ||
2758 | if (!pages[i]) | ||
2759 | break; | ||
2760 | |||
2761 | rem = tracing_fill_pipe_page(rem, iter); | ||
2762 | |||
2763 | /* Copy the data into the page, so we can start over. */ | ||
2764 | ret = trace_seq_to_buffer(&iter->seq, | ||
2765 | page_address(pages[i]), | ||
2766 | iter->seq.len); | ||
2767 | if (ret < 0) { | ||
2768 | __free_page(pages[i]); | ||
2769 | break; | ||
2770 | } | ||
2771 | partial[i].offset = 0; | ||
2772 | partial[i].len = iter->seq.len; | ||
2773 | |||
2774 | trace_seq_init(&iter->seq); | ||
2775 | } | ||
2776 | |||
2777 | mutex_unlock(&iter->mutex); | ||
2778 | |||
2779 | spd.nr_pages = i; | ||
2780 | |||
2781 | return splice_to_pipe(pipe, &spd); | ||
2782 | |||
2783 | out_err: | ||
2784 | mutex_unlock(&iter->mutex); | ||
2785 | |||
2786 | return ret; | ||
2787 | } | ||
2788 | |||
3295 | static ssize_t | 2789 | static ssize_t |
3296 | tracing_entries_read(struct file *filp, char __user *ubuf, | 2790 | tracing_entries_read(struct file *filp, char __user *ubuf, |
3297 | size_t cnt, loff_t *ppos) | 2791 | size_t cnt, loff_t *ppos) |
@@ -3455,6 +2949,7 @@ static struct file_operations tracing_pipe_fops = { | |||
3455 | .open = tracing_open_pipe, | 2949 | .open = tracing_open_pipe, |
3456 | .poll = tracing_poll_pipe, | 2950 | .poll = tracing_poll_pipe, |
3457 | .read = tracing_read_pipe, | 2951 | .read = tracing_read_pipe, |
2952 | .splice_read = tracing_splice_read_pipe, | ||
3458 | .release = tracing_release_pipe, | 2953 | .release = tracing_release_pipe, |
3459 | }; | 2954 | }; |
3460 | 2955 | ||
@@ -3469,6 +2964,251 @@ static struct file_operations tracing_mark_fops = { | |||
3469 | .write = tracing_mark_write, | 2964 | .write = tracing_mark_write, |
3470 | }; | 2965 | }; |
3471 | 2966 | ||
2967 | struct ftrace_buffer_info { | ||
2968 | struct trace_array *tr; | ||
2969 | void *spare; | ||
2970 | int cpu; | ||
2971 | unsigned int read; | ||
2972 | }; | ||
2973 | |||
2974 | static int tracing_buffers_open(struct inode *inode, struct file *filp) | ||
2975 | { | ||
2976 | int cpu = (int)(long)inode->i_private; | ||
2977 | struct ftrace_buffer_info *info; | ||
2978 | |||
2979 | if (tracing_disabled) | ||
2980 | return -ENODEV; | ||
2981 | |||
2982 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
2983 | if (!info) | ||
2984 | return -ENOMEM; | ||
2985 | |||
2986 | info->tr = &global_trace; | ||
2987 | info->cpu = cpu; | ||
2988 | info->spare = ring_buffer_alloc_read_page(info->tr->buffer); | ||
2989 | /* Force reading ring buffer for first read */ | ||
2990 | info->read = (unsigned int)-1; | ||
2991 | if (!info->spare) | ||
2992 | goto out; | ||
2993 | |||
2994 | filp->private_data = info; | ||
2995 | |||
2996 | return 0; | ||
2997 | |||
2998 | out: | ||
2999 | kfree(info); | ||
3000 | return -ENOMEM; | ||
3001 | } | ||
3002 | |||
3003 | static ssize_t | ||
3004 | tracing_buffers_read(struct file *filp, char __user *ubuf, | ||
3005 | size_t count, loff_t *ppos) | ||
3006 | { | ||
3007 | struct ftrace_buffer_info *info = filp->private_data; | ||
3008 | unsigned int pos; | ||
3009 | ssize_t ret; | ||
3010 | size_t size; | ||
3011 | |||
3012 | if (!count) | ||
3013 | return 0; | ||
3014 | |||
3015 | /* Do we have previous read data to read? */ | ||
3016 | if (info->read < PAGE_SIZE) | ||
3017 | goto read; | ||
3018 | |||
3019 | info->read = 0; | ||
3020 | |||
3021 | ret = ring_buffer_read_page(info->tr->buffer, | ||
3022 | &info->spare, | ||
3023 | count, | ||
3024 | info->cpu, 0); | ||
3025 | if (ret < 0) | ||
3026 | return 0; | ||
3027 | |||
3028 | pos = ring_buffer_page_len(info->spare); | ||
3029 | |||
3030 | if (pos < PAGE_SIZE) | ||
3031 | memset(info->spare + pos, 0, PAGE_SIZE - pos); | ||
3032 | |||
3033 | read: | ||
3034 | size = PAGE_SIZE - info->read; | ||
3035 | if (size > count) | ||
3036 | size = count; | ||
3037 | |||
3038 | ret = copy_to_user(ubuf, info->spare + info->read, size); | ||
3039 | if (ret == size) | ||
3040 | return -EFAULT; | ||
3041 | size -= ret; | ||
3042 | |||
3043 | *ppos += size; | ||
3044 | info->read += size; | ||
3045 | |||
3046 | return size; | ||
3047 | } | ||
3048 | |||
3049 | static int tracing_buffers_release(struct inode *inode, struct file *file) | ||
3050 | { | ||
3051 | struct ftrace_buffer_info *info = file->private_data; | ||
3052 | |||
3053 | ring_buffer_free_read_page(info->tr->buffer, info->spare); | ||
3054 | kfree(info); | ||
3055 | |||
3056 | return 0; | ||
3057 | } | ||
3058 | |||
3059 | struct buffer_ref { | ||
3060 | struct ring_buffer *buffer; | ||
3061 | void *page; | ||
3062 | int ref; | ||
3063 | }; | ||
3064 | |||
3065 | static void buffer_pipe_buf_release(struct pipe_inode_info *pipe, | ||
3066 | struct pipe_buffer *buf) | ||
3067 | { | ||
3068 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
3069 | |||
3070 | if (--ref->ref) | ||
3071 | return; | ||
3072 | |||
3073 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
3074 | kfree(ref); | ||
3075 | buf->private = 0; | ||
3076 | } | ||
3077 | |||
3078 | static int buffer_pipe_buf_steal(struct pipe_inode_info *pipe, | ||
3079 | struct pipe_buffer *buf) | ||
3080 | { | ||
3081 | return 1; | ||
3082 | } | ||
3083 | |||
3084 | static void buffer_pipe_buf_get(struct pipe_inode_info *pipe, | ||
3085 | struct pipe_buffer *buf) | ||
3086 | { | ||
3087 | struct buffer_ref *ref = (struct buffer_ref *)buf->private; | ||
3088 | |||
3089 | ref->ref++; | ||
3090 | } | ||
3091 | |||
3092 | /* Pipe buffer operations for a buffer. */ | ||
3093 | static struct pipe_buf_operations buffer_pipe_buf_ops = { | ||
3094 | .can_merge = 0, | ||
3095 | .map = generic_pipe_buf_map, | ||
3096 | .unmap = generic_pipe_buf_unmap, | ||
3097 | .confirm = generic_pipe_buf_confirm, | ||
3098 | .release = buffer_pipe_buf_release, | ||
3099 | .steal = buffer_pipe_buf_steal, | ||
3100 | .get = buffer_pipe_buf_get, | ||
3101 | }; | ||
3102 | |||
3103 | /* | ||
3104 | * Callback from splice_to_pipe(), if we need to release some pages | ||
3105 | * at the end of the spd in case we error'ed out in filling the pipe. | ||
3106 | */ | ||
3107 | static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i) | ||
3108 | { | ||
3109 | struct buffer_ref *ref = | ||
3110 | (struct buffer_ref *)spd->partial[i].private; | ||
3111 | |||
3112 | if (--ref->ref) | ||
3113 | return; | ||
3114 | |||
3115 | ring_buffer_free_read_page(ref->buffer, ref->page); | ||
3116 | kfree(ref); | ||
3117 | spd->partial[i].private = 0; | ||
3118 | } | ||
3119 | |||
3120 | static ssize_t | ||
3121 | tracing_buffers_splice_read(struct file *file, loff_t *ppos, | ||
3122 | struct pipe_inode_info *pipe, size_t len, | ||
3123 | unsigned int flags) | ||
3124 | { | ||
3125 | struct ftrace_buffer_info *info = file->private_data; | ||
3126 | struct partial_page partial[PIPE_BUFFERS]; | ||
3127 | struct page *pages[PIPE_BUFFERS]; | ||
3128 | struct splice_pipe_desc spd = { | ||
3129 | .pages = pages, | ||
3130 | .partial = partial, | ||
3131 | .flags = flags, | ||
3132 | .ops = &buffer_pipe_buf_ops, | ||
3133 | .spd_release = buffer_spd_release, | ||
3134 | }; | ||
3135 | struct buffer_ref *ref; | ||
3136 | int size, i; | ||
3137 | size_t ret; | ||
3138 | |||
3139 | /* | ||
3140 | * We can't seek on a buffer input | ||
3141 | */ | ||
3142 | if (unlikely(*ppos)) | ||
3143 | return -ESPIPE; | ||
3144 | |||
3145 | |||
3146 | for (i = 0; i < PIPE_BUFFERS && len; i++, len -= size) { | ||
3147 | struct page *page; | ||
3148 | int r; | ||
3149 | |||
3150 | ref = kzalloc(sizeof(*ref), GFP_KERNEL); | ||
3151 | if (!ref) | ||
3152 | break; | ||
3153 | |||
3154 | ref->buffer = info->tr->buffer; | ||
3155 | ref->page = ring_buffer_alloc_read_page(ref->buffer); | ||
3156 | if (!ref->page) { | ||
3157 | kfree(ref); | ||
3158 | break; | ||
3159 | } | ||
3160 | |||
3161 | r = ring_buffer_read_page(ref->buffer, &ref->page, | ||
3162 | len, info->cpu, 0); | ||
3163 | if (r < 0) { | ||
3164 | ring_buffer_free_read_page(ref->buffer, | ||
3165 | ref->page); | ||
3166 | kfree(ref); | ||
3167 | break; | ||
3168 | } | ||
3169 | |||
3170 | /* | ||
3171 | * zero out any left over data, this is going to | ||
3172 | * user land. | ||
3173 | */ | ||
3174 | size = ring_buffer_page_len(ref->page); | ||
3175 | if (size < PAGE_SIZE) | ||
3176 | memset(ref->page + size, 0, PAGE_SIZE - size); | ||
3177 | |||
3178 | page = virt_to_page(ref->page); | ||
3179 | |||
3180 | spd.pages[i] = page; | ||
3181 | spd.partial[i].len = PAGE_SIZE; | ||
3182 | spd.partial[i].offset = 0; | ||
3183 | spd.partial[i].private = (unsigned long)ref; | ||
3184 | spd.nr_pages++; | ||
3185 | } | ||
3186 | |||
3187 | spd.nr_pages = i; | ||
3188 | |||
3189 | /* did we read anything? */ | ||
3190 | if (!spd.nr_pages) { | ||
3191 | if (flags & SPLICE_F_NONBLOCK) | ||
3192 | ret = -EAGAIN; | ||
3193 | else | ||
3194 | ret = 0; | ||
3195 | /* TODO: block */ | ||
3196 | return ret; | ||
3197 | } | ||
3198 | |||
3199 | ret = splice_to_pipe(pipe, &spd); | ||
3200 | |||
3201 | return ret; | ||
3202 | } | ||
3203 | |||
3204 | static const struct file_operations tracing_buffers_fops = { | ||
3205 | .open = tracing_buffers_open, | ||
3206 | .read = tracing_buffers_read, | ||
3207 | .release = tracing_buffers_release, | ||
3208 | .splice_read = tracing_buffers_splice_read, | ||
3209 | .llseek = no_llseek, | ||
3210 | }; | ||
3211 | |||
3472 | #ifdef CONFIG_DYNAMIC_FTRACE | 3212 | #ifdef CONFIG_DYNAMIC_FTRACE |
3473 | 3213 | ||
3474 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) | 3214 | int __weak ftrace_arch_read_dyn_info(char *buf, int size) |
@@ -3526,15 +3266,346 @@ struct dentry *tracing_init_dentry(void) | |||
3526 | return d_tracer; | 3266 | return d_tracer; |
3527 | } | 3267 | } |
3528 | 3268 | ||
3269 | static struct dentry *d_percpu; | ||
3270 | |||
3271 | struct dentry *tracing_dentry_percpu(void) | ||
3272 | { | ||
3273 | static int once; | ||
3274 | struct dentry *d_tracer; | ||
3275 | |||
3276 | if (d_percpu) | ||
3277 | return d_percpu; | ||
3278 | |||
3279 | d_tracer = tracing_init_dentry(); | ||
3280 | |||
3281 | if (!d_tracer) | ||
3282 | return NULL; | ||
3283 | |||
3284 | d_percpu = debugfs_create_dir("per_cpu", d_tracer); | ||
3285 | |||
3286 | if (!d_percpu && !once) { | ||
3287 | once = 1; | ||
3288 | pr_warning("Could not create debugfs directory 'per_cpu'\n"); | ||
3289 | return NULL; | ||
3290 | } | ||
3291 | |||
3292 | return d_percpu; | ||
3293 | } | ||
3294 | |||
3295 | static void tracing_init_debugfs_percpu(long cpu) | ||
3296 | { | ||
3297 | struct dentry *d_percpu = tracing_dentry_percpu(); | ||
3298 | struct dentry *entry, *d_cpu; | ||
3299 | /* strlen(cpu) + MAX(log10(cpu)) + '\0' */ | ||
3300 | char cpu_dir[7]; | ||
3301 | |||
3302 | if (cpu > 999 || cpu < 0) | ||
3303 | return; | ||
3304 | |||
3305 | sprintf(cpu_dir, "cpu%ld", cpu); | ||
3306 | d_cpu = debugfs_create_dir(cpu_dir, d_percpu); | ||
3307 | if (!d_cpu) { | ||
3308 | pr_warning("Could not create debugfs '%s' entry\n", cpu_dir); | ||
3309 | return; | ||
3310 | } | ||
3311 | |||
3312 | /* per cpu trace_pipe */ | ||
3313 | entry = debugfs_create_file("trace_pipe", 0444, d_cpu, | ||
3314 | (void *) cpu, &tracing_pipe_fops); | ||
3315 | if (!entry) | ||
3316 | pr_warning("Could not create debugfs 'trace_pipe' entry\n"); | ||
3317 | |||
3318 | /* per cpu trace */ | ||
3319 | entry = debugfs_create_file("trace", 0444, d_cpu, | ||
3320 | (void *) cpu, &tracing_fops); | ||
3321 | if (!entry) | ||
3322 | pr_warning("Could not create debugfs 'trace' entry\n"); | ||
3323 | } | ||
3324 | |||
3529 | #ifdef CONFIG_FTRACE_SELFTEST | 3325 | #ifdef CONFIG_FTRACE_SELFTEST |
3530 | /* Let selftest have access to static functions in this file */ | 3326 | /* Let selftest have access to static functions in this file */ |
3531 | #include "trace_selftest.c" | 3327 | #include "trace_selftest.c" |
3532 | #endif | 3328 | #endif |
3533 | 3329 | ||
3330 | struct trace_option_dentry { | ||
3331 | struct tracer_opt *opt; | ||
3332 | struct tracer_flags *flags; | ||
3333 | struct dentry *entry; | ||
3334 | }; | ||
3335 | |||
3336 | static ssize_t | ||
3337 | trace_options_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
3338 | loff_t *ppos) | ||
3339 | { | ||
3340 | struct trace_option_dentry *topt = filp->private_data; | ||
3341 | char *buf; | ||
3342 | |||
3343 | if (topt->flags->val & topt->opt->bit) | ||
3344 | buf = "1\n"; | ||
3345 | else | ||
3346 | buf = "0\n"; | ||
3347 | |||
3348 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | ||
3349 | } | ||
3350 | |||
3351 | static ssize_t | ||
3352 | trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
3353 | loff_t *ppos) | ||
3354 | { | ||
3355 | struct trace_option_dentry *topt = filp->private_data; | ||
3356 | unsigned long val; | ||
3357 | char buf[64]; | ||
3358 | int ret; | ||
3359 | |||
3360 | if (cnt >= sizeof(buf)) | ||
3361 | return -EINVAL; | ||
3362 | |||
3363 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3364 | return -EFAULT; | ||
3365 | |||
3366 | buf[cnt] = 0; | ||
3367 | |||
3368 | ret = strict_strtoul(buf, 10, &val); | ||
3369 | if (ret < 0) | ||
3370 | return ret; | ||
3371 | |||
3372 | ret = 0; | ||
3373 | switch (val) { | ||
3374 | case 0: | ||
3375 | /* do nothing if already cleared */ | ||
3376 | if (!(topt->flags->val & topt->opt->bit)) | ||
3377 | break; | ||
3378 | |||
3379 | mutex_lock(&trace_types_lock); | ||
3380 | if (current_trace->set_flag) | ||
3381 | ret = current_trace->set_flag(topt->flags->val, | ||
3382 | topt->opt->bit, 0); | ||
3383 | mutex_unlock(&trace_types_lock); | ||
3384 | if (ret) | ||
3385 | return ret; | ||
3386 | topt->flags->val &= ~topt->opt->bit; | ||
3387 | break; | ||
3388 | case 1: | ||
3389 | /* do nothing if already set */ | ||
3390 | if (topt->flags->val & topt->opt->bit) | ||
3391 | break; | ||
3392 | |||
3393 | mutex_lock(&trace_types_lock); | ||
3394 | if (current_trace->set_flag) | ||
3395 | ret = current_trace->set_flag(topt->flags->val, | ||
3396 | topt->opt->bit, 1); | ||
3397 | mutex_unlock(&trace_types_lock); | ||
3398 | if (ret) | ||
3399 | return ret; | ||
3400 | topt->flags->val |= topt->opt->bit; | ||
3401 | break; | ||
3402 | |||
3403 | default: | ||
3404 | return -EINVAL; | ||
3405 | } | ||
3406 | |||
3407 | *ppos += cnt; | ||
3408 | |||
3409 | return cnt; | ||
3410 | } | ||
3411 | |||
3412 | |||
3413 | static const struct file_operations trace_options_fops = { | ||
3414 | .open = tracing_open_generic, | ||
3415 | .read = trace_options_read, | ||
3416 | .write = trace_options_write, | ||
3417 | }; | ||
3418 | |||
3419 | static ssize_t | ||
3420 | trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
3421 | loff_t *ppos) | ||
3422 | { | ||
3423 | long index = (long)filp->private_data; | ||
3424 | char *buf; | ||
3425 | |||
3426 | if (trace_flags & (1 << index)) | ||
3427 | buf = "1\n"; | ||
3428 | else | ||
3429 | buf = "0\n"; | ||
3430 | |||
3431 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | ||
3432 | } | ||
3433 | |||
3434 | static ssize_t | ||
3435 | trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
3436 | loff_t *ppos) | ||
3437 | { | ||
3438 | long index = (long)filp->private_data; | ||
3439 | char buf[64]; | ||
3440 | unsigned long val; | ||
3441 | int ret; | ||
3442 | |||
3443 | if (cnt >= sizeof(buf)) | ||
3444 | return -EINVAL; | ||
3445 | |||
3446 | if (copy_from_user(&buf, ubuf, cnt)) | ||
3447 | return -EFAULT; | ||
3448 | |||
3449 | buf[cnt] = 0; | ||
3450 | |||
3451 | ret = strict_strtoul(buf, 10, &val); | ||
3452 | if (ret < 0) | ||
3453 | return ret; | ||
3454 | |||
3455 | switch (val) { | ||
3456 | case 0: | ||
3457 | trace_flags &= ~(1 << index); | ||
3458 | break; | ||
3459 | case 1: | ||
3460 | trace_flags |= 1 << index; | ||
3461 | break; | ||
3462 | |||
3463 | default: | ||
3464 | return -EINVAL; | ||
3465 | } | ||
3466 | |||
3467 | *ppos += cnt; | ||
3468 | |||
3469 | return cnt; | ||
3470 | } | ||
3471 | |||
3472 | static const struct file_operations trace_options_core_fops = { | ||
3473 | .open = tracing_open_generic, | ||
3474 | .read = trace_options_core_read, | ||
3475 | .write = trace_options_core_write, | ||
3476 | }; | ||
3477 | |||
3478 | static struct dentry *trace_options_init_dentry(void) | ||
3479 | { | ||
3480 | struct dentry *d_tracer; | ||
3481 | static struct dentry *t_options; | ||
3482 | |||
3483 | if (t_options) | ||
3484 | return t_options; | ||
3485 | |||
3486 | d_tracer = tracing_init_dentry(); | ||
3487 | if (!d_tracer) | ||
3488 | return NULL; | ||
3489 | |||
3490 | t_options = debugfs_create_dir("options", d_tracer); | ||
3491 | if (!t_options) { | ||
3492 | pr_warning("Could not create debugfs directory 'options'\n"); | ||
3493 | return NULL; | ||
3494 | } | ||
3495 | |||
3496 | return t_options; | ||
3497 | } | ||
3498 | |||
3499 | static void | ||
3500 | create_trace_option_file(struct trace_option_dentry *topt, | ||
3501 | struct tracer_flags *flags, | ||
3502 | struct tracer_opt *opt) | ||
3503 | { | ||
3504 | struct dentry *t_options; | ||
3505 | struct dentry *entry; | ||
3506 | |||
3507 | t_options = trace_options_init_dentry(); | ||
3508 | if (!t_options) | ||
3509 | return; | ||
3510 | |||
3511 | topt->flags = flags; | ||
3512 | topt->opt = opt; | ||
3513 | |||
3514 | entry = debugfs_create_file(opt->name, 0644, t_options, topt, | ||
3515 | &trace_options_fops); | ||
3516 | |||
3517 | topt->entry = entry; | ||
3518 | |||
3519 | } | ||
3520 | |||
3521 | static struct trace_option_dentry * | ||
3522 | create_trace_option_files(struct tracer *tracer) | ||
3523 | { | ||
3524 | struct trace_option_dentry *topts; | ||
3525 | struct tracer_flags *flags; | ||
3526 | struct tracer_opt *opts; | ||
3527 | int cnt; | ||
3528 | |||
3529 | if (!tracer) | ||
3530 | return NULL; | ||
3531 | |||
3532 | flags = tracer->flags; | ||
3533 | |||
3534 | if (!flags || !flags->opts) | ||
3535 | return NULL; | ||
3536 | |||
3537 | opts = flags->opts; | ||
3538 | |||
3539 | for (cnt = 0; opts[cnt].name; cnt++) | ||
3540 | ; | ||
3541 | |||
3542 | topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL); | ||
3543 | if (!topts) | ||
3544 | return NULL; | ||
3545 | |||
3546 | for (cnt = 0; opts[cnt].name; cnt++) | ||
3547 | create_trace_option_file(&topts[cnt], flags, | ||
3548 | &opts[cnt]); | ||
3549 | |||
3550 | return topts; | ||
3551 | } | ||
3552 | |||
3553 | static void | ||
3554 | destroy_trace_option_files(struct trace_option_dentry *topts) | ||
3555 | { | ||
3556 | int cnt; | ||
3557 | |||
3558 | if (!topts) | ||
3559 | return; | ||
3560 | |||
3561 | for (cnt = 0; topts[cnt].opt; cnt++) { | ||
3562 | if (topts[cnt].entry) | ||
3563 | debugfs_remove(topts[cnt].entry); | ||
3564 | } | ||
3565 | |||
3566 | kfree(topts); | ||
3567 | } | ||
3568 | |||
3569 | static struct dentry * | ||
3570 | create_trace_option_core_file(const char *option, long index) | ||
3571 | { | ||
3572 | struct dentry *t_options; | ||
3573 | struct dentry *entry; | ||
3574 | |||
3575 | t_options = trace_options_init_dentry(); | ||
3576 | if (!t_options) | ||
3577 | return NULL; | ||
3578 | |||
3579 | entry = debugfs_create_file(option, 0644, t_options, (void *)index, | ||
3580 | &trace_options_core_fops); | ||
3581 | |||
3582 | return entry; | ||
3583 | } | ||
3584 | |||
3585 | static __init void create_trace_options_dir(void) | ||
3586 | { | ||
3587 | struct dentry *t_options; | ||
3588 | struct dentry *entry; | ||
3589 | int i; | ||
3590 | |||
3591 | t_options = trace_options_init_dentry(); | ||
3592 | if (!t_options) | ||
3593 | return; | ||
3594 | |||
3595 | for (i = 0; trace_options[i]; i++) { | ||
3596 | entry = create_trace_option_core_file(trace_options[i], i); | ||
3597 | if (!entry) | ||
3598 | pr_warning("Could not create debugfs %s entry\n", | ||
3599 | trace_options[i]); | ||
3600 | } | ||
3601 | } | ||
3602 | |||
3534 | static __init int tracer_init_debugfs(void) | 3603 | static __init int tracer_init_debugfs(void) |
3535 | { | 3604 | { |
3536 | struct dentry *d_tracer; | 3605 | struct dentry *d_tracer; |
3606 | struct dentry *buffers; | ||
3537 | struct dentry *entry; | 3607 | struct dentry *entry; |
3608 | int cpu; | ||
3538 | 3609 | ||
3539 | d_tracer = tracing_init_dentry(); | 3610 | d_tracer = tracing_init_dentry(); |
3540 | 3611 | ||
@@ -3548,18 +3619,15 @@ static __init int tracer_init_debugfs(void) | |||
3548 | if (!entry) | 3619 | if (!entry) |
3549 | pr_warning("Could not create debugfs 'trace_options' entry\n"); | 3620 | pr_warning("Could not create debugfs 'trace_options' entry\n"); |
3550 | 3621 | ||
3622 | create_trace_options_dir(); | ||
3623 | |||
3551 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, | 3624 | entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer, |
3552 | NULL, &tracing_cpumask_fops); | 3625 | NULL, &tracing_cpumask_fops); |
3553 | if (!entry) | 3626 | if (!entry) |
3554 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); | 3627 | pr_warning("Could not create debugfs 'tracing_cpumask' entry\n"); |
3555 | 3628 | ||
3556 | entry = debugfs_create_file("latency_trace", 0444, d_tracer, | ||
3557 | &global_trace, &tracing_lt_fops); | ||
3558 | if (!entry) | ||
3559 | pr_warning("Could not create debugfs 'latency_trace' entry\n"); | ||
3560 | |||
3561 | entry = debugfs_create_file("trace", 0444, d_tracer, | 3629 | entry = debugfs_create_file("trace", 0444, d_tracer, |
3562 | &global_trace, &tracing_fops); | 3630 | (void *) TRACE_PIPE_ALL_CPU, &tracing_fops); |
3563 | if (!entry) | 3631 | if (!entry) |
3564 | pr_warning("Could not create debugfs 'trace' entry\n"); | 3632 | pr_warning("Could not create debugfs 'trace' entry\n"); |
3565 | 3633 | ||
@@ -3590,8 +3658,8 @@ static __init int tracer_init_debugfs(void) | |||
3590 | if (!entry) | 3658 | if (!entry) |
3591 | pr_warning("Could not create debugfs 'README' entry\n"); | 3659 | pr_warning("Could not create debugfs 'README' entry\n"); |
3592 | 3660 | ||
3593 | entry = debugfs_create_file("trace_pipe", 0644, d_tracer, | 3661 | entry = debugfs_create_file("trace_pipe", 0444, d_tracer, |
3594 | NULL, &tracing_pipe_fops); | 3662 | (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops); |
3595 | if (!entry) | 3663 | if (!entry) |
3596 | pr_warning("Could not create debugfs " | 3664 | pr_warning("Could not create debugfs " |
3597 | "'trace_pipe' entry\n"); | 3665 | "'trace_pipe' entry\n"); |
@@ -3608,6 +3676,26 @@ static __init int tracer_init_debugfs(void) | |||
3608 | pr_warning("Could not create debugfs " | 3676 | pr_warning("Could not create debugfs " |
3609 | "'trace_marker' entry\n"); | 3677 | "'trace_marker' entry\n"); |
3610 | 3678 | ||
3679 | buffers = debugfs_create_dir("binary_buffers", d_tracer); | ||
3680 | |||
3681 | if (!buffers) | ||
3682 | pr_warning("Could not create buffers directory\n"); | ||
3683 | else { | ||
3684 | int cpu; | ||
3685 | char buf[64]; | ||
3686 | |||
3687 | for_each_tracing_cpu(cpu) { | ||
3688 | sprintf(buf, "%d", cpu); | ||
3689 | |||
3690 | entry = debugfs_create_file(buf, 0444, buffers, | ||
3691 | (void *)(long)cpu, | ||
3692 | &tracing_buffers_fops); | ||
3693 | if (!entry) | ||
3694 | pr_warning("Could not create debugfs buffers " | ||
3695 | "'%s' entry\n", buf); | ||
3696 | } | ||
3697 | } | ||
3698 | |||
3611 | #ifdef CONFIG_DYNAMIC_FTRACE | 3699 | #ifdef CONFIG_DYNAMIC_FTRACE |
3612 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, | 3700 | entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, |
3613 | &ftrace_update_tot_cnt, | 3701 | &ftrace_update_tot_cnt, |
@@ -3619,12 +3707,16 @@ static __init int tracer_init_debugfs(void) | |||
3619 | #ifdef CONFIG_SYSPROF_TRACER | 3707 | #ifdef CONFIG_SYSPROF_TRACER |
3620 | init_tracer_sysprof_debugfs(d_tracer); | 3708 | init_tracer_sysprof_debugfs(d_tracer); |
3621 | #endif | 3709 | #endif |
3710 | |||
3711 | for_each_tracing_cpu(cpu) | ||
3712 | tracing_init_debugfs_percpu(cpu); | ||
3713 | |||
3622 | return 0; | 3714 | return 0; |
3623 | } | 3715 | } |
3624 | 3716 | ||
3625 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | 3717 | int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) |
3626 | { | 3718 | { |
3627 | static DEFINE_SPINLOCK(trace_buf_lock); | 3719 | static raw_spinlock_t trace_buf_lock = __RAW_SPIN_LOCK_UNLOCKED; |
3628 | static char trace_buf[TRACE_BUF_SIZE]; | 3720 | static char trace_buf[TRACE_BUF_SIZE]; |
3629 | 3721 | ||
3630 | struct ring_buffer_event *event; | 3722 | struct ring_buffer_event *event; |
@@ -3646,28 +3738,28 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
3646 | goto out; | 3738 | goto out; |
3647 | 3739 | ||
3648 | pause_graph_tracing(); | 3740 | pause_graph_tracing(); |
3649 | spin_lock_irqsave(&trace_buf_lock, irq_flags); | 3741 | raw_local_irq_save(irq_flags); |
3742 | __raw_spin_lock(&trace_buf_lock); | ||
3650 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); | 3743 | len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args); |
3651 | 3744 | ||
3652 | len = min(len, TRACE_BUF_SIZE-1); | 3745 | len = min(len, TRACE_BUF_SIZE-1); |
3653 | trace_buf[len] = 0; | 3746 | trace_buf[len] = 0; |
3654 | 3747 | ||
3655 | size = sizeof(*entry) + len + 1; | 3748 | size = sizeof(*entry) + len + 1; |
3656 | event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags); | 3749 | event = trace_buffer_lock_reserve(tr, TRACE_PRINT, size, irq_flags, pc); |
3657 | if (!event) | 3750 | if (!event) |
3658 | goto out_unlock; | 3751 | goto out_unlock; |
3659 | entry = ring_buffer_event_data(event); | 3752 | entry = ring_buffer_event_data(event); |
3660 | tracing_generic_entry_update(&entry->ent, irq_flags, pc); | ||
3661 | entry->ent.type = TRACE_PRINT; | ||
3662 | entry->ip = ip; | 3753 | entry->ip = ip; |
3663 | entry->depth = depth; | 3754 | entry->depth = depth; |
3664 | 3755 | ||
3665 | memcpy(&entry->buf, trace_buf, len); | 3756 | memcpy(&entry->buf, trace_buf, len); |
3666 | entry->buf[len] = 0; | 3757 | entry->buf[len] = 0; |
3667 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 3758 | ring_buffer_unlock_commit(tr->buffer, event); |
3668 | 3759 | ||
3669 | out_unlock: | 3760 | out_unlock: |
3670 | spin_unlock_irqrestore(&trace_buf_lock, irq_flags); | 3761 | __raw_spin_unlock(&trace_buf_lock); |
3762 | raw_local_irq_restore(irq_flags); | ||
3671 | unpause_graph_tracing(); | 3763 | unpause_graph_tracing(); |
3672 | out: | 3764 | out: |
3673 | preempt_enable_notrace(); | 3765 | preempt_enable_notrace(); |
@@ -3676,7 +3768,7 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args) | |||
3676 | } | 3768 | } |
3677 | EXPORT_SYMBOL_GPL(trace_vprintk); | 3769 | EXPORT_SYMBOL_GPL(trace_vprintk); |
3678 | 3770 | ||
3679 | int __ftrace_printk(unsigned long ip, const char *fmt, ...) | 3771 | int __trace_printk(unsigned long ip, const char *fmt, ...) |
3680 | { | 3772 | { |
3681 | int ret; | 3773 | int ret; |
3682 | va_list ap; | 3774 | va_list ap; |
@@ -3689,7 +3781,16 @@ int __ftrace_printk(unsigned long ip, const char *fmt, ...) | |||
3689 | va_end(ap); | 3781 | va_end(ap); |
3690 | return ret; | 3782 | return ret; |
3691 | } | 3783 | } |
3692 | EXPORT_SYMBOL_GPL(__ftrace_printk); | 3784 | EXPORT_SYMBOL_GPL(__trace_printk); |
3785 | |||
3786 | int __ftrace_vprintk(unsigned long ip, const char *fmt, va_list ap) | ||
3787 | { | ||
3788 | if (!(trace_flags & TRACE_ITER_PRINTK)) | ||
3789 | return 0; | ||
3790 | |||
3791 | return trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
3792 | } | ||
3793 | EXPORT_SYMBOL_GPL(__ftrace_vprintk); | ||
3693 | 3794 | ||
3694 | static int trace_panic_handler(struct notifier_block *this, | 3795 | static int trace_panic_handler(struct notifier_block *this, |
3695 | unsigned long event, void *unused) | 3796 | unsigned long event, void *unused) |
@@ -3750,7 +3851,7 @@ trace_printk_seq(struct trace_seq *s) | |||
3750 | 3851 | ||
3751 | printk(KERN_TRACE "%s", s->buffer); | 3852 | printk(KERN_TRACE "%s", s->buffer); |
3752 | 3853 | ||
3753 | trace_seq_reset(s); | 3854 | trace_seq_init(s); |
3754 | } | 3855 | } |
3755 | 3856 | ||
3756 | void ftrace_dump(void) | 3857 | void ftrace_dump(void) |
@@ -3782,8 +3883,10 @@ void ftrace_dump(void) | |||
3782 | 3883 | ||
3783 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); | 3884 | printk(KERN_TRACE "Dumping ftrace buffer:\n"); |
3784 | 3885 | ||
3886 | /* Simulate the iterator */ | ||
3785 | iter.tr = &global_trace; | 3887 | iter.tr = &global_trace; |
3786 | iter.trace = current_trace; | 3888 | iter.trace = current_trace; |
3889 | iter.cpu_file = TRACE_PIPE_ALL_CPU; | ||
3787 | 3890 | ||
3788 | /* | 3891 | /* |
3789 | * We need to stop all tracing on all CPUS to read the | 3892 | * We need to stop all tracing on all CPUS to read the |
@@ -3835,8 +3938,12 @@ __init static int tracer_alloc_buffers(void) | |||
3835 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 3938 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) |
3836 | goto out_free_buffer_mask; | 3939 | goto out_free_buffer_mask; |
3837 | 3940 | ||
3941 | if (!alloc_cpumask_var(&tracing_reader_cpumask, GFP_KERNEL)) | ||
3942 | goto out_free_tracing_cpumask; | ||
3943 | |||
3838 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 3944 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
3839 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 3945 | cpumask_copy(tracing_cpumask, cpu_all_mask); |
3946 | cpumask_clear(tracing_reader_cpumask); | ||
3840 | 3947 | ||
3841 | /* TODO: make the number of buffers hot pluggable with CPUS */ | 3948 | /* TODO: make the number of buffers hot pluggable with CPUS */ |
3842 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, | 3949 | global_trace.buffer = ring_buffer_alloc(trace_buf_size, |
@@ -3871,14 +3978,10 @@ __init static int tracer_alloc_buffers(void) | |||
3871 | trace_init_cmdlines(); | 3978 | trace_init_cmdlines(); |
3872 | 3979 | ||
3873 | register_tracer(&nop_trace); | 3980 | register_tracer(&nop_trace); |
3981 | current_trace = &nop_trace; | ||
3874 | #ifdef CONFIG_BOOT_TRACER | 3982 | #ifdef CONFIG_BOOT_TRACER |
3875 | register_tracer(&boot_tracer); | 3983 | register_tracer(&boot_tracer); |
3876 | current_trace = &boot_tracer; | ||
3877 | current_trace->init(&global_trace); | ||
3878 | #else | ||
3879 | current_trace = &nop_trace; | ||
3880 | #endif | 3984 | #endif |
3881 | |||
3882 | /* All seems OK, enable tracing */ | 3985 | /* All seems OK, enable tracing */ |
3883 | tracing_disabled = 0; | 3986 | tracing_disabled = 0; |
3884 | 3987 | ||
@@ -3889,11 +3992,34 @@ __init static int tracer_alloc_buffers(void) | |||
3889 | ret = 0; | 3992 | ret = 0; |
3890 | 3993 | ||
3891 | out_free_cpumask: | 3994 | out_free_cpumask: |
3995 | free_cpumask_var(tracing_reader_cpumask); | ||
3996 | out_free_tracing_cpumask: | ||
3892 | free_cpumask_var(tracing_cpumask); | 3997 | free_cpumask_var(tracing_cpumask); |
3893 | out_free_buffer_mask: | 3998 | out_free_buffer_mask: |
3894 | free_cpumask_var(tracing_buffer_mask); | 3999 | free_cpumask_var(tracing_buffer_mask); |
3895 | out: | 4000 | out: |
3896 | return ret; | 4001 | return ret; |
3897 | } | 4002 | } |
4003 | |||
4004 | __init static int clear_boot_tracer(void) | ||
4005 | { | ||
4006 | /* | ||
4007 | * The default tracer at boot buffer is an init section. | ||
4008 | * This function is called in lateinit. If we did not | ||
4009 | * find the boot tracer, then clear it out, to prevent | ||
4010 | * later registration from accessing the buffer that is | ||
4011 | * about to be freed. | ||
4012 | */ | ||
4013 | if (!default_bootup_tracer) | ||
4014 | return 0; | ||
4015 | |||
4016 | printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n", | ||
4017 | default_bootup_tracer); | ||
4018 | default_bootup_tracer = NULL; | ||
4019 | |||
4020 | return 0; | ||
4021 | } | ||
4022 | |||
3898 | early_initcall(tracer_alloc_buffers); | 4023 | early_initcall(tracer_alloc_buffers); |
3899 | fs_initcall(tracer_init_debugfs); | 4024 | fs_initcall(tracer_init_debugfs); |
4025 | late_initcall(clear_boot_tracer); | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 4d3d381bfd95..8beff03fda68 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -9,6 +9,8 @@ | |||
9 | #include <linux/mmiotrace.h> | 9 | #include <linux/mmiotrace.h> |
10 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
11 | #include <trace/boot.h> | 11 | #include <trace/boot.h> |
12 | #include <trace/kmemtrace.h> | ||
13 | #include <trace/power.h> | ||
12 | 14 | ||
13 | enum trace_type { | 15 | enum trace_type { |
14 | __TRACE_FIRST_TYPE = 0, | 16 | __TRACE_FIRST_TYPE = 0, |
@@ -16,7 +18,6 @@ enum trace_type { | |||
16 | TRACE_FN, | 18 | TRACE_FN, |
17 | TRACE_CTX, | 19 | TRACE_CTX, |
18 | TRACE_WAKE, | 20 | TRACE_WAKE, |
19 | TRACE_CONT, | ||
20 | TRACE_STACK, | 21 | TRACE_STACK, |
21 | TRACE_PRINT, | 22 | TRACE_PRINT, |
22 | TRACE_SPECIAL, | 23 | TRACE_SPECIAL, |
@@ -29,9 +30,12 @@ enum trace_type { | |||
29 | TRACE_GRAPH_ENT, | 30 | TRACE_GRAPH_ENT, |
30 | TRACE_USER_STACK, | 31 | TRACE_USER_STACK, |
31 | TRACE_HW_BRANCHES, | 32 | TRACE_HW_BRANCHES, |
33 | TRACE_KMEM_ALLOC, | ||
34 | TRACE_KMEM_FREE, | ||
32 | TRACE_POWER, | 35 | TRACE_POWER, |
36 | TRACE_BLK, | ||
33 | 37 | ||
34 | __TRACE_LAST_TYPE | 38 | __TRACE_LAST_TYPE, |
35 | }; | 39 | }; |
36 | 40 | ||
37 | /* | 41 | /* |
@@ -42,7 +46,6 @@ enum trace_type { | |||
42 | */ | 46 | */ |
43 | struct trace_entry { | 47 | struct trace_entry { |
44 | unsigned char type; | 48 | unsigned char type; |
45 | unsigned char cpu; | ||
46 | unsigned char flags; | 49 | unsigned char flags; |
47 | unsigned char preempt_count; | 50 | unsigned char preempt_count; |
48 | int pid; | 51 | int pid; |
@@ -60,13 +63,13 @@ struct ftrace_entry { | |||
60 | 63 | ||
61 | /* Function call entry */ | 64 | /* Function call entry */ |
62 | struct ftrace_graph_ent_entry { | 65 | struct ftrace_graph_ent_entry { |
63 | struct trace_entry ent; | 66 | struct trace_entry ent; |
64 | struct ftrace_graph_ent graph_ent; | 67 | struct ftrace_graph_ent graph_ent; |
65 | }; | 68 | }; |
66 | 69 | ||
67 | /* Function return entry */ | 70 | /* Function return entry */ |
68 | struct ftrace_graph_ret_entry { | 71 | struct ftrace_graph_ret_entry { |
69 | struct trace_entry ent; | 72 | struct trace_entry ent; |
70 | struct ftrace_graph_ret ret; | 73 | struct ftrace_graph_ret ret; |
71 | }; | 74 | }; |
72 | extern struct tracer boot_tracer; | 75 | extern struct tracer boot_tracer; |
@@ -112,7 +115,7 @@ struct userstack_entry { | |||
112 | }; | 115 | }; |
113 | 116 | ||
114 | /* | 117 | /* |
115 | * ftrace_printk entry: | 118 | * trace_printk entry: |
116 | */ | 119 | */ |
117 | struct print_entry { | 120 | struct print_entry { |
118 | struct trace_entry ent; | 121 | struct trace_entry ent; |
@@ -170,6 +173,24 @@ struct trace_power { | |||
170 | struct power_trace state_data; | 173 | struct power_trace state_data; |
171 | }; | 174 | }; |
172 | 175 | ||
176 | struct kmemtrace_alloc_entry { | ||
177 | struct trace_entry ent; | ||
178 | enum kmemtrace_type_id type_id; | ||
179 | unsigned long call_site; | ||
180 | const void *ptr; | ||
181 | size_t bytes_req; | ||
182 | size_t bytes_alloc; | ||
183 | gfp_t gfp_flags; | ||
184 | int node; | ||
185 | }; | ||
186 | |||
187 | struct kmemtrace_free_entry { | ||
188 | struct trace_entry ent; | ||
189 | enum kmemtrace_type_id type_id; | ||
190 | unsigned long call_site; | ||
191 | const void *ptr; | ||
192 | }; | ||
193 | |||
173 | /* | 194 | /* |
174 | * trace_flag_type is an enumeration that holds different | 195 | * trace_flag_type is an enumeration that holds different |
175 | * states when a trace occurs. These are: | 196 | * states when a trace occurs. These are: |
@@ -178,7 +199,6 @@ struct trace_power { | |||
178 | * NEED_RESCED - reschedule is requested | 199 | * NEED_RESCED - reschedule is requested |
179 | * HARDIRQ - inside an interrupt handler | 200 | * HARDIRQ - inside an interrupt handler |
180 | * SOFTIRQ - inside a softirq handler | 201 | * SOFTIRQ - inside a softirq handler |
181 | * CONT - multiple entries hold the trace item | ||
182 | */ | 202 | */ |
183 | enum trace_flag_type { | 203 | enum trace_flag_type { |
184 | TRACE_FLAG_IRQS_OFF = 0x01, | 204 | TRACE_FLAG_IRQS_OFF = 0x01, |
@@ -186,7 +206,6 @@ enum trace_flag_type { | |||
186 | TRACE_FLAG_NEED_RESCHED = 0x04, | 206 | TRACE_FLAG_NEED_RESCHED = 0x04, |
187 | TRACE_FLAG_HARDIRQ = 0x08, | 207 | TRACE_FLAG_HARDIRQ = 0x08, |
188 | TRACE_FLAG_SOFTIRQ = 0x10, | 208 | TRACE_FLAG_SOFTIRQ = 0x10, |
189 | TRACE_FLAG_CONT = 0x20, | ||
190 | }; | 209 | }; |
191 | 210 | ||
192 | #define TRACE_BUF_SIZE 1024 | 211 | #define TRACE_BUF_SIZE 1024 |
@@ -198,6 +217,7 @@ enum trace_flag_type { | |||
198 | */ | 217 | */ |
199 | struct trace_array_cpu { | 218 | struct trace_array_cpu { |
200 | atomic_t disabled; | 219 | atomic_t disabled; |
220 | void *buffer_page; /* ring buffer spare */ | ||
201 | 221 | ||
202 | /* these fields get copied into max-trace: */ | 222 | /* these fields get copied into max-trace: */ |
203 | unsigned long trace_idx; | 223 | unsigned long trace_idx; |
@@ -262,7 +282,6 @@ extern void __ftrace_bad_type(void); | |||
262 | do { \ | 282 | do { \ |
263 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ | 283 | IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \ |
264 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ | 284 | IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \ |
265 | IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \ | ||
266 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ | 285 | IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \ |
267 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ | 286 | IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ |
268 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ | 287 | IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ |
@@ -280,6 +299,10 @@ extern void __ftrace_bad_type(void); | |||
280 | TRACE_GRAPH_RET); \ | 299 | TRACE_GRAPH_RET); \ |
281 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ | 300 | IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\ |
282 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ | 301 | IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \ |
302 | IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry, \ | ||
303 | TRACE_KMEM_ALLOC); \ | ||
304 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | ||
305 | TRACE_KMEM_FREE); \ | ||
283 | __ftrace_bad_type(); \ | 306 | __ftrace_bad_type(); \ |
284 | } while (0) | 307 | } while (0) |
285 | 308 | ||
@@ -287,7 +310,8 @@ extern void __ftrace_bad_type(void); | |||
287 | enum print_line_t { | 310 | enum print_line_t { |
288 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ | 311 | TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */ |
289 | TRACE_TYPE_HANDLED = 1, | 312 | TRACE_TYPE_HANDLED = 1, |
290 | TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */ | 313 | TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */ |
314 | TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ | ||
291 | }; | 315 | }; |
292 | 316 | ||
293 | 317 | ||
@@ -313,22 +337,45 @@ struct tracer_flags { | |||
313 | /* Makes more easy to define a tracer opt */ | 337 | /* Makes more easy to define a tracer opt */ |
314 | #define TRACER_OPT(s, b) .name = #s, .bit = b | 338 | #define TRACER_OPT(s, b) .name = #s, .bit = b |
315 | 339 | ||
316 | /* | 340 | |
317 | * A specific tracer, represented by methods that operate on a trace array: | 341 | /** |
342 | * struct tracer - a specific tracer and its callbacks to interact with debugfs | ||
343 | * @name: the name chosen to select it on the available_tracers file | ||
344 | * @init: called when one switches to this tracer (echo name > current_tracer) | ||
345 | * @reset: called when one switches to another tracer | ||
346 | * @start: called when tracing is unpaused (echo 1 > tracing_enabled) | ||
347 | * @stop: called when tracing is paused (echo 0 > tracing_enabled) | ||
348 | * @open: called when the trace file is opened | ||
349 | * @pipe_open: called when the trace_pipe file is opened | ||
350 | * @wait_pipe: override how the user waits for traces on trace_pipe | ||
351 | * @close: called when the trace file is released | ||
352 | * @read: override the default read callback on trace_pipe | ||
353 | * @splice_read: override the default splice_read callback on trace_pipe | ||
354 | * @selftest: selftest to run on boot (see trace_selftest.c) | ||
355 | * @print_headers: override the first lines that describe your columns | ||
356 | * @print_line: callback that prints a trace | ||
357 | * @set_flag: signals one of your private flags changed (trace_options file) | ||
358 | * @flags: your private flags | ||
318 | */ | 359 | */ |
319 | struct tracer { | 360 | struct tracer { |
320 | const char *name; | 361 | const char *name; |
321 | /* Your tracer should raise a warning if init fails */ | ||
322 | int (*init)(struct trace_array *tr); | 362 | int (*init)(struct trace_array *tr); |
323 | void (*reset)(struct trace_array *tr); | 363 | void (*reset)(struct trace_array *tr); |
324 | void (*start)(struct trace_array *tr); | 364 | void (*start)(struct trace_array *tr); |
325 | void (*stop)(struct trace_array *tr); | 365 | void (*stop)(struct trace_array *tr); |
326 | void (*open)(struct trace_iterator *iter); | 366 | void (*open)(struct trace_iterator *iter); |
327 | void (*pipe_open)(struct trace_iterator *iter); | 367 | void (*pipe_open)(struct trace_iterator *iter); |
368 | void (*wait_pipe)(struct trace_iterator *iter); | ||
328 | void (*close)(struct trace_iterator *iter); | 369 | void (*close)(struct trace_iterator *iter); |
329 | ssize_t (*read)(struct trace_iterator *iter, | 370 | ssize_t (*read)(struct trace_iterator *iter, |
330 | struct file *filp, char __user *ubuf, | 371 | struct file *filp, char __user *ubuf, |
331 | size_t cnt, loff_t *ppos); | 372 | size_t cnt, loff_t *ppos); |
373 | ssize_t (*splice_read)(struct trace_iterator *iter, | ||
374 | struct file *filp, | ||
375 | loff_t *ppos, | ||
376 | struct pipe_inode_info *pipe, | ||
377 | size_t len, | ||
378 | unsigned int flags); | ||
332 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 379 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
333 | int (*selftest)(struct tracer *trace, | 380 | int (*selftest)(struct tracer *trace, |
334 | struct trace_array *tr); | 381 | struct trace_array *tr); |
@@ -340,6 +387,7 @@ struct tracer { | |||
340 | struct tracer *next; | 387 | struct tracer *next; |
341 | int print_max; | 388 | int print_max; |
342 | struct tracer_flags *flags; | 389 | struct tracer_flags *flags; |
390 | struct tracer_stat *stats; | ||
343 | }; | 391 | }; |
344 | 392 | ||
345 | struct trace_seq { | 393 | struct trace_seq { |
@@ -348,6 +396,16 @@ struct trace_seq { | |||
348 | unsigned int readpos; | 396 | unsigned int readpos; |
349 | }; | 397 | }; |
350 | 398 | ||
399 | static inline void | ||
400 | trace_seq_init(struct trace_seq *s) | ||
401 | { | ||
402 | s->len = 0; | ||
403 | s->readpos = 0; | ||
404 | } | ||
405 | |||
406 | |||
407 | #define TRACE_PIPE_ALL_CPU -1 | ||
408 | |||
351 | /* | 409 | /* |
352 | * Trace iterator - used by printout routines who present trace | 410 | * Trace iterator - used by printout routines who present trace |
353 | * results to users and which routines might sleep, etc: | 411 | * results to users and which routines might sleep, etc: |
@@ -356,6 +414,8 @@ struct trace_iterator { | |||
356 | struct trace_array *tr; | 414 | struct trace_array *tr; |
357 | struct tracer *trace; | 415 | struct tracer *trace; |
358 | void *private; | 416 | void *private; |
417 | int cpu_file; | ||
418 | struct mutex mutex; | ||
359 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; | 419 | struct ring_buffer_iter *buffer_iter[NR_CPUS]; |
360 | 420 | ||
361 | /* The below is zeroed out in pipe_read */ | 421 | /* The below is zeroed out in pipe_read */ |
@@ -371,6 +431,7 @@ struct trace_iterator { | |||
371 | cpumask_var_t started; | 431 | cpumask_var_t started; |
372 | }; | 432 | }; |
373 | 433 | ||
434 | int tracer_init(struct tracer *t, struct trace_array *tr); | ||
374 | int tracing_is_enabled(void); | 435 | int tracing_is_enabled(void); |
375 | void trace_wake_up(void); | 436 | void trace_wake_up(void); |
376 | void tracing_reset(struct trace_array *tr, int cpu); | 437 | void tracing_reset(struct trace_array *tr, int cpu); |
@@ -379,26 +440,48 @@ int tracing_open_generic(struct inode *inode, struct file *filp); | |||
379 | struct dentry *tracing_init_dentry(void); | 440 | struct dentry *tracing_init_dentry(void); |
380 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); | 441 | void init_tracer_sysprof_debugfs(struct dentry *d_tracer); |
381 | 442 | ||
443 | struct ring_buffer_event; | ||
444 | |||
445 | struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, | ||
446 | unsigned char type, | ||
447 | unsigned long len, | ||
448 | unsigned long flags, | ||
449 | int pc); | ||
450 | void trace_buffer_unlock_commit(struct trace_array *tr, | ||
451 | struct ring_buffer_event *event, | ||
452 | unsigned long flags, int pc); | ||
453 | |||
454 | struct ring_buffer_event * | ||
455 | trace_current_buffer_lock_reserve(unsigned char type, unsigned long len, | ||
456 | unsigned long flags, int pc); | ||
457 | void trace_current_buffer_unlock_commit(struct ring_buffer_event *event, | ||
458 | unsigned long flags, int pc); | ||
459 | |||
382 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, | 460 | struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, |
383 | struct trace_array_cpu *data); | 461 | struct trace_array_cpu *data); |
462 | |||
463 | struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, | ||
464 | int *ent_cpu, u64 *ent_ts); | ||
465 | |||
384 | void tracing_generic_entry_update(struct trace_entry *entry, | 466 | void tracing_generic_entry_update(struct trace_entry *entry, |
385 | unsigned long flags, | 467 | unsigned long flags, |
386 | int pc); | 468 | int pc); |
387 | 469 | ||
470 | void default_wait_pipe(struct trace_iterator *iter); | ||
471 | void poll_wait_pipe(struct trace_iterator *iter); | ||
472 | |||
388 | void ftrace(struct trace_array *tr, | 473 | void ftrace(struct trace_array *tr, |
389 | struct trace_array_cpu *data, | 474 | struct trace_array_cpu *data, |
390 | unsigned long ip, | 475 | unsigned long ip, |
391 | unsigned long parent_ip, | 476 | unsigned long parent_ip, |
392 | unsigned long flags, int pc); | 477 | unsigned long flags, int pc); |
393 | void tracing_sched_switch_trace(struct trace_array *tr, | 478 | void tracing_sched_switch_trace(struct trace_array *tr, |
394 | struct trace_array_cpu *data, | ||
395 | struct task_struct *prev, | 479 | struct task_struct *prev, |
396 | struct task_struct *next, | 480 | struct task_struct *next, |
397 | unsigned long flags, int pc); | 481 | unsigned long flags, int pc); |
398 | void tracing_record_cmdline(struct task_struct *tsk); | 482 | void tracing_record_cmdline(struct task_struct *tsk); |
399 | 483 | ||
400 | void tracing_sched_wakeup_trace(struct trace_array *tr, | 484 | void tracing_sched_wakeup_trace(struct trace_array *tr, |
401 | struct trace_array_cpu *data, | ||
402 | struct task_struct *wakee, | 485 | struct task_struct *wakee, |
403 | struct task_struct *cur, | 486 | struct task_struct *cur, |
404 | unsigned long flags, int pc); | 487 | unsigned long flags, int pc); |
@@ -408,14 +491,12 @@ void trace_special(struct trace_array *tr, | |||
408 | unsigned long arg2, | 491 | unsigned long arg2, |
409 | unsigned long arg3, int pc); | 492 | unsigned long arg3, int pc); |
410 | void trace_function(struct trace_array *tr, | 493 | void trace_function(struct trace_array *tr, |
411 | struct trace_array_cpu *data, | ||
412 | unsigned long ip, | 494 | unsigned long ip, |
413 | unsigned long parent_ip, | 495 | unsigned long parent_ip, |
414 | unsigned long flags, int pc); | 496 | unsigned long flags, int pc); |
415 | 497 | ||
416 | void trace_graph_return(struct ftrace_graph_ret *trace); | 498 | void trace_graph_return(struct ftrace_graph_ret *trace); |
417 | int trace_graph_entry(struct ftrace_graph_ent *trace); | 499 | int trace_graph_entry(struct ftrace_graph_ent *trace); |
418 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to); | ||
419 | 500 | ||
420 | void tracing_start_cmdline_record(void); | 501 | void tracing_start_cmdline_record(void); |
421 | void tracing_stop_cmdline_record(void); | 502 | void tracing_stop_cmdline_record(void); |
@@ -434,15 +515,11 @@ void update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu); | |||
434 | void update_max_tr_single(struct trace_array *tr, | 515 | void update_max_tr_single(struct trace_array *tr, |
435 | struct task_struct *tsk, int cpu); | 516 | struct task_struct *tsk, int cpu); |
436 | 517 | ||
437 | extern cycle_t ftrace_now(int cpu); | 518 | void __trace_stack(struct trace_array *tr, |
519 | unsigned long flags, | ||
520 | int skip, int pc); | ||
438 | 521 | ||
439 | #ifdef CONFIG_FUNCTION_TRACER | 522 | extern cycle_t ftrace_now(int cpu); |
440 | void tracing_start_function_trace(void); | ||
441 | void tracing_stop_function_trace(void); | ||
442 | #else | ||
443 | # define tracing_start_function_trace() do { } while (0) | ||
444 | # define tracing_stop_function_trace() do { } while (0) | ||
445 | #endif | ||
446 | 523 | ||
447 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER | 524 | #ifdef CONFIG_CONTEXT_SWITCH_TRACER |
448 | typedef void | 525 | typedef void |
@@ -456,10 +533,10 @@ struct tracer_switch_ops { | |||
456 | void *private; | 533 | void *private; |
457 | struct tracer_switch_ops *next; | 534 | struct tracer_switch_ops *next; |
458 | }; | 535 | }; |
459 | |||
460 | char *trace_find_cmdline(int pid); | ||
461 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ | 536 | #endif /* CONFIG_CONTEXT_SWITCH_TRACER */ |
462 | 537 | ||
538 | extern char *trace_find_cmdline(int pid); | ||
539 | |||
463 | #ifdef CONFIG_DYNAMIC_FTRACE | 540 | #ifdef CONFIG_DYNAMIC_FTRACE |
464 | extern unsigned long ftrace_update_tot_cnt; | 541 | extern unsigned long ftrace_update_tot_cnt; |
465 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func | 542 | #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func |
@@ -469,6 +546,8 @@ extern int DYN_FTRACE_TEST_NAME(void); | |||
469 | #ifdef CONFIG_FTRACE_STARTUP_TEST | 546 | #ifdef CONFIG_FTRACE_STARTUP_TEST |
470 | extern int trace_selftest_startup_function(struct tracer *trace, | 547 | extern int trace_selftest_startup_function(struct tracer *trace, |
471 | struct trace_array *tr); | 548 | struct trace_array *tr); |
549 | extern int trace_selftest_startup_function_graph(struct tracer *trace, | ||
550 | struct trace_array *tr); | ||
472 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, | 551 | extern int trace_selftest_startup_irqsoff(struct tracer *trace, |
473 | struct trace_array *tr); | 552 | struct trace_array *tr); |
474 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, | 553 | extern int trace_selftest_startup_preemptoff(struct tracer *trace, |
@@ -488,15 +567,6 @@ extern int trace_selftest_startup_branch(struct tracer *trace, | |||
488 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ | 567 | #endif /* CONFIG_FTRACE_STARTUP_TEST */ |
489 | 568 | ||
490 | extern void *head_page(struct trace_array_cpu *data); | 569 | extern void *head_page(struct trace_array_cpu *data); |
491 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); | ||
492 | extern void trace_seq_print_cont(struct trace_seq *s, | ||
493 | struct trace_iterator *iter); | ||
494 | |||
495 | extern int | ||
496 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
497 | unsigned long sym_flags); | ||
498 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
499 | size_t cnt); | ||
500 | extern long ns2usecs(cycle_t nsec); | 570 | extern long ns2usecs(cycle_t nsec); |
501 | extern int | 571 | extern int |
502 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); | 572 | trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args); |
@@ -580,7 +650,9 @@ enum trace_iterator_flags { | |||
580 | TRACE_ITER_ANNOTATE = 0x2000, | 650 | TRACE_ITER_ANNOTATE = 0x2000, |
581 | TRACE_ITER_USERSTACKTRACE = 0x4000, | 651 | TRACE_ITER_USERSTACKTRACE = 0x4000, |
582 | TRACE_ITER_SYM_USEROBJ = 0x8000, | 652 | TRACE_ITER_SYM_USEROBJ = 0x8000, |
583 | TRACE_ITER_PRINTK_MSGONLY = 0x10000 | 653 | TRACE_ITER_PRINTK_MSGONLY = 0x10000, |
654 | TRACE_ITER_CONTEXT_INFO = 0x20000, /* Print pid/cpu/time */ | ||
655 | TRACE_ITER_LATENCY_FMT = 0x40000, | ||
584 | }; | 656 | }; |
585 | 657 | ||
586 | /* | 658 | /* |
@@ -601,12 +673,12 @@ extern struct tracer nop_trace; | |||
601 | * preempt_enable (after a disable), a schedule might take place | 673 | * preempt_enable (after a disable), a schedule might take place |
602 | * causing an infinite recursion. | 674 | * causing an infinite recursion. |
603 | * | 675 | * |
604 | * To prevent this, we read the need_recshed flag before | 676 | * To prevent this, we read the need_resched flag before |
605 | * disabling preemption. When we want to enable preemption we | 677 | * disabling preemption. When we want to enable preemption we |
606 | * check the flag, if it is set, then we call preempt_enable_no_resched. | 678 | * check the flag, if it is set, then we call preempt_enable_no_resched. |
607 | * Otherwise, we call preempt_enable. | 679 | * Otherwise, we call preempt_enable. |
608 | * | 680 | * |
609 | * The rational for doing the above is that if need resched is set | 681 | * The rational for doing the above is that if need_resched is set |
610 | * and we have yet to reschedule, we are either in an atomic location | 682 | * and we have yet to reschedule, we are either in an atomic location |
611 | * (where we do not need to check for scheduling) or we are inside | 683 | * (where we do not need to check for scheduling) or we are inside |
612 | * the scheduler and do not want to resched. | 684 | * the scheduler and do not want to resched. |
@@ -627,7 +699,7 @@ static inline int ftrace_preempt_disable(void) | |||
627 | * | 699 | * |
628 | * This is a scheduler safe way to enable preemption and not miss | 700 | * This is a scheduler safe way to enable preemption and not miss |
629 | * any preemption checks. The disabled saved the state of preemption. | 701 | * any preemption checks. The disabled saved the state of preemption. |
630 | * If resched is set, then we were either inside an atomic or | 702 | * If resched is set, then we are either inside an atomic or |
631 | * are inside the scheduler (we would have already scheduled | 703 | * are inside the scheduler (we would have already scheduled |
632 | * otherwise). In this case, we do not want to call normal | 704 | * otherwise). In this case, we do not want to call normal |
633 | * preempt_enable, but preempt_enable_no_resched instead. | 705 | * preempt_enable, but preempt_enable_no_resched instead. |
@@ -664,4 +736,31 @@ static inline void trace_branch_disable(void) | |||
664 | } | 736 | } |
665 | #endif /* CONFIG_BRANCH_TRACER */ | 737 | #endif /* CONFIG_BRANCH_TRACER */ |
666 | 738 | ||
739 | /* trace event type bit fields, not numeric */ | ||
740 | enum { | ||
741 | TRACE_EVENT_TYPE_PRINTF = 1, | ||
742 | TRACE_EVENT_TYPE_RAW = 2, | ||
743 | }; | ||
744 | |||
745 | struct ftrace_event_call { | ||
746 | char *name; | ||
747 | char *system; | ||
748 | struct dentry *dir; | ||
749 | int enabled; | ||
750 | int (*regfunc)(void); | ||
751 | void (*unregfunc)(void); | ||
752 | int id; | ||
753 | struct dentry *raw_dir; | ||
754 | int raw_enabled; | ||
755 | int type; | ||
756 | int (*raw_init)(void); | ||
757 | int (*raw_reg)(void); | ||
758 | void (*raw_unreg)(void); | ||
759 | int (*show_format)(struct trace_seq *s); | ||
760 | }; | ||
761 | |||
762 | void event_trace_printk(unsigned long ip, const char *fmt, ...); | ||
763 | extern struct ftrace_event_call __start_ftrace_events[]; | ||
764 | extern struct ftrace_event_call __stop_ftrace_events[]; | ||
765 | |||
667 | #endif /* _LINUX_KERNEL_TRACE_H */ | 766 | #endif /* _LINUX_KERNEL_TRACE_H */ |
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c index 366c8c333e13..7a30fc4c3642 100644 --- a/kernel/trace/trace_boot.c +++ b/kernel/trace/trace_boot.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kallsyms.h> | 11 | #include <linux/kallsyms.h> |
12 | 12 | ||
13 | #include "trace.h" | 13 | #include "trace.h" |
14 | #include "trace_output.h" | ||
14 | 15 | ||
15 | static struct trace_array *boot_trace; | 16 | static struct trace_array *boot_trace; |
16 | static bool pre_initcalls_finished; | 17 | static bool pre_initcalls_finished; |
@@ -27,13 +28,13 @@ void start_boot_trace(void) | |||
27 | 28 | ||
28 | void enable_boot_trace(void) | 29 | void enable_boot_trace(void) |
29 | { | 30 | { |
30 | if (pre_initcalls_finished) | 31 | if (boot_trace && pre_initcalls_finished) |
31 | tracing_start_sched_switch_record(); | 32 | tracing_start_sched_switch_record(); |
32 | } | 33 | } |
33 | 34 | ||
34 | void disable_boot_trace(void) | 35 | void disable_boot_trace(void) |
35 | { | 36 | { |
36 | if (pre_initcalls_finished) | 37 | if (boot_trace && pre_initcalls_finished) |
37 | tracing_stop_sched_switch_record(); | 38 | tracing_stop_sched_switch_record(); |
38 | } | 39 | } |
39 | 40 | ||
@@ -42,6 +43,9 @@ static int boot_trace_init(struct trace_array *tr) | |||
42 | int cpu; | 43 | int cpu; |
43 | boot_trace = tr; | 44 | boot_trace = tr; |
44 | 45 | ||
46 | if (!tr) | ||
47 | return 0; | ||
48 | |||
45 | for_each_cpu(cpu, cpu_possible_mask) | 49 | for_each_cpu(cpu, cpu_possible_mask) |
46 | tracing_reset(tr, cpu); | 50 | tracing_reset(tr, cpu); |
47 | 51 | ||
@@ -128,10 +132,9 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
128 | { | 132 | { |
129 | struct ring_buffer_event *event; | 133 | struct ring_buffer_event *event; |
130 | struct trace_boot_call *entry; | 134 | struct trace_boot_call *entry; |
131 | unsigned long irq_flags; | ||
132 | struct trace_array *tr = boot_trace; | 135 | struct trace_array *tr = boot_trace; |
133 | 136 | ||
134 | if (!pre_initcalls_finished) | 137 | if (!tr || !pre_initcalls_finished) |
135 | return; | 138 | return; |
136 | 139 | ||
137 | /* Get its name now since this function could | 140 | /* Get its name now since this function could |
@@ -140,18 +143,13 @@ void trace_boot_call(struct boot_trace_call *bt, initcall_t fn) | |||
140 | sprint_symbol(bt->func, (unsigned long)fn); | 143 | sprint_symbol(bt->func, (unsigned long)fn); |
141 | preempt_disable(); | 144 | preempt_disable(); |
142 | 145 | ||
143 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 146 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_CALL, |
144 | &irq_flags); | 147 | sizeof(*entry), 0, 0); |
145 | if (!event) | 148 | if (!event) |
146 | goto out; | 149 | goto out; |
147 | entry = ring_buffer_event_data(event); | 150 | entry = ring_buffer_event_data(event); |
148 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
149 | entry->ent.type = TRACE_BOOT_CALL; | ||
150 | entry->boot_call = *bt; | 151 | entry->boot_call = *bt; |
151 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 152 | trace_buffer_unlock_commit(tr, event, 0, 0); |
152 | |||
153 | trace_wake_up(); | ||
154 | |||
155 | out: | 153 | out: |
156 | preempt_enable(); | 154 | preempt_enable(); |
157 | } | 155 | } |
@@ -160,27 +158,21 @@ void trace_boot_ret(struct boot_trace_ret *bt, initcall_t fn) | |||
160 | { | 158 | { |
161 | struct ring_buffer_event *event; | 159 | struct ring_buffer_event *event; |
162 | struct trace_boot_ret *entry; | 160 | struct trace_boot_ret *entry; |
163 | unsigned long irq_flags; | ||
164 | struct trace_array *tr = boot_trace; | 161 | struct trace_array *tr = boot_trace; |
165 | 162 | ||
166 | if (!pre_initcalls_finished) | 163 | if (!tr || !pre_initcalls_finished) |
167 | return; | 164 | return; |
168 | 165 | ||
169 | sprint_symbol(bt->func, (unsigned long)fn); | 166 | sprint_symbol(bt->func, (unsigned long)fn); |
170 | preempt_disable(); | 167 | preempt_disable(); |
171 | 168 | ||
172 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 169 | event = trace_buffer_lock_reserve(tr, TRACE_BOOT_RET, |
173 | &irq_flags); | 170 | sizeof(*entry), 0, 0); |
174 | if (!event) | 171 | if (!event) |
175 | goto out; | 172 | goto out; |
176 | entry = ring_buffer_event_data(event); | 173 | entry = ring_buffer_event_data(event); |
177 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
178 | entry->ent.type = TRACE_BOOT_RET; | ||
179 | entry->boot_ret = *bt; | 174 | entry->boot_ret = *bt; |
180 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 175 | trace_buffer_unlock_commit(tr, event, 0, 0); |
181 | |||
182 | trace_wake_up(); | ||
183 | |||
184 | out: | 176 | out: |
185 | preempt_enable(); | 177 | preempt_enable(); |
186 | } | 178 | } |
diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 6c00feb3bac7..aaa0755268b9 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c | |||
@@ -14,12 +14,17 @@ | |||
14 | #include <linux/hash.h> | 14 | #include <linux/hash.h> |
15 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
16 | #include <asm/local.h> | 16 | #include <asm/local.h> |
17 | |||
17 | #include "trace.h" | 18 | #include "trace.h" |
19 | #include "trace_stat.h" | ||
20 | #include "trace_output.h" | ||
18 | 21 | ||
19 | #ifdef CONFIG_BRANCH_TRACER | 22 | #ifdef CONFIG_BRANCH_TRACER |
20 | 23 | ||
24 | static struct tracer branch_trace; | ||
21 | static int branch_tracing_enabled __read_mostly; | 25 | static int branch_tracing_enabled __read_mostly; |
22 | static DEFINE_MUTEX(branch_tracing_mutex); | 26 | static DEFINE_MUTEX(branch_tracing_mutex); |
27 | |||
23 | static struct trace_array *branch_tracer; | 28 | static struct trace_array *branch_tracer; |
24 | 29 | ||
25 | static void | 30 | static void |
@@ -28,7 +33,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
28 | struct trace_array *tr = branch_tracer; | 33 | struct trace_array *tr = branch_tracer; |
29 | struct ring_buffer_event *event; | 34 | struct ring_buffer_event *event; |
30 | struct trace_branch *entry; | 35 | struct trace_branch *entry; |
31 | unsigned long flags, irq_flags; | 36 | unsigned long flags; |
32 | int cpu, pc; | 37 | int cpu, pc; |
33 | const char *p; | 38 | const char *p; |
34 | 39 | ||
@@ -47,15 +52,13 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
47 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | 52 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) |
48 | goto out; | 53 | goto out; |
49 | 54 | ||
50 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 55 | pc = preempt_count(); |
51 | &irq_flags); | 56 | event = trace_buffer_lock_reserve(tr, TRACE_BRANCH, |
57 | sizeof(*entry), flags, pc); | ||
52 | if (!event) | 58 | if (!event) |
53 | goto out; | 59 | goto out; |
54 | 60 | ||
55 | pc = preempt_count(); | ||
56 | entry = ring_buffer_event_data(event); | 61 | entry = ring_buffer_event_data(event); |
57 | tracing_generic_entry_update(&entry->ent, flags, pc); | ||
58 | entry->ent.type = TRACE_BRANCH; | ||
59 | 62 | ||
60 | /* Strip off the path, only save the file */ | 63 | /* Strip off the path, only save the file */ |
61 | p = f->file + strlen(f->file); | 64 | p = f->file + strlen(f->file); |
@@ -70,7 +73,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
70 | entry->line = f->line; | 73 | entry->line = f->line; |
71 | entry->correct = val == expect; | 74 | entry->correct = val == expect; |
72 | 75 | ||
73 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 76 | ring_buffer_unlock_commit(tr->buffer, event); |
74 | 77 | ||
75 | out: | 78 | out: |
76 | atomic_dec(&tr->data[cpu]->disabled); | 79 | atomic_dec(&tr->data[cpu]->disabled); |
@@ -88,8 +91,6 @@ void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | |||
88 | 91 | ||
89 | int enable_branch_tracing(struct trace_array *tr) | 92 | int enable_branch_tracing(struct trace_array *tr) |
90 | { | 93 | { |
91 | int ret = 0; | ||
92 | |||
93 | mutex_lock(&branch_tracing_mutex); | 94 | mutex_lock(&branch_tracing_mutex); |
94 | branch_tracer = tr; | 95 | branch_tracer = tr; |
95 | /* | 96 | /* |
@@ -100,7 +101,7 @@ int enable_branch_tracing(struct trace_array *tr) | |||
100 | branch_tracing_enabled++; | 101 | branch_tracing_enabled++; |
101 | mutex_unlock(&branch_tracing_mutex); | 102 | mutex_unlock(&branch_tracing_mutex); |
102 | 103 | ||
103 | return ret; | 104 | return 0; |
104 | } | 105 | } |
105 | 106 | ||
106 | void disable_branch_tracing(void) | 107 | void disable_branch_tracing(void) |
@@ -128,11 +129,6 @@ static void stop_branch_trace(struct trace_array *tr) | |||
128 | 129 | ||
129 | static int branch_trace_init(struct trace_array *tr) | 130 | static int branch_trace_init(struct trace_array *tr) |
130 | { | 131 | { |
131 | int cpu; | ||
132 | |||
133 | for_each_online_cpu(cpu) | ||
134 | tracing_reset(tr, cpu); | ||
135 | |||
136 | start_branch_trace(tr); | 132 | start_branch_trace(tr); |
137 | return 0; | 133 | return 0; |
138 | } | 134 | } |
@@ -142,22 +138,53 @@ static void branch_trace_reset(struct trace_array *tr) | |||
142 | stop_branch_trace(tr); | 138 | stop_branch_trace(tr); |
143 | } | 139 | } |
144 | 140 | ||
145 | struct tracer branch_trace __read_mostly = | 141 | static enum print_line_t trace_branch_print(struct trace_iterator *iter, |
142 | int flags) | ||
143 | { | ||
144 | struct trace_branch *field; | ||
145 | |||
146 | trace_assign_type(field, iter->ent); | ||
147 | |||
148 | if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", | ||
149 | field->correct ? " ok " : " MISS ", | ||
150 | field->func, | ||
151 | field->file, | ||
152 | field->line)) | ||
153 | return TRACE_TYPE_PARTIAL_LINE; | ||
154 | |||
155 | return TRACE_TYPE_HANDLED; | ||
156 | } | ||
157 | |||
158 | |||
159 | static struct trace_event trace_branch_event = { | ||
160 | .type = TRACE_BRANCH, | ||
161 | .trace = trace_branch_print, | ||
162 | }; | ||
163 | |||
164 | static struct tracer branch_trace __read_mostly = | ||
146 | { | 165 | { |
147 | .name = "branch", | 166 | .name = "branch", |
148 | .init = branch_trace_init, | 167 | .init = branch_trace_init, |
149 | .reset = branch_trace_reset, | 168 | .reset = branch_trace_reset, |
150 | #ifdef CONFIG_FTRACE_SELFTEST | 169 | #ifdef CONFIG_FTRACE_SELFTEST |
151 | .selftest = trace_selftest_startup_branch, | 170 | .selftest = trace_selftest_startup_branch, |
152 | #endif | 171 | #endif /* CONFIG_FTRACE_SELFTEST */ |
153 | }; | 172 | }; |
154 | 173 | ||
155 | __init static int init_branch_trace(void) | 174 | __init static int init_branch_tracer(void) |
156 | { | 175 | { |
176 | int ret; | ||
177 | |||
178 | ret = register_ftrace_event(&trace_branch_event); | ||
179 | if (!ret) { | ||
180 | printk(KERN_WARNING "Warning: could not register " | ||
181 | "branch events\n"); | ||
182 | return 1; | ||
183 | } | ||
157 | return register_tracer(&branch_trace); | 184 | return register_tracer(&branch_trace); |
158 | } | 185 | } |
186 | device_initcall(init_branch_tracer); | ||
159 | 187 | ||
160 | device_initcall(init_branch_trace); | ||
161 | #else | 188 | #else |
162 | static inline | 189 | static inline |
163 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) | 190 | void trace_likely_condition(struct ftrace_branch_data *f, int val, int expect) |
@@ -183,66 +210,39 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect) | |||
183 | } | 210 | } |
184 | EXPORT_SYMBOL(ftrace_likely_update); | 211 | EXPORT_SYMBOL(ftrace_likely_update); |
185 | 212 | ||
186 | struct ftrace_pointer { | 213 | extern unsigned long __start_annotated_branch_profile[]; |
187 | void *start; | 214 | extern unsigned long __stop_annotated_branch_profile[]; |
188 | void *stop; | ||
189 | int hit; | ||
190 | }; | ||
191 | 215 | ||
192 | static void * | 216 | static int annotated_branch_stat_headers(struct seq_file *m) |
193 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
194 | { | 217 | { |
195 | const struct ftrace_pointer *f = m->private; | 218 | seq_printf(m, " correct incorrect %% "); |
196 | struct ftrace_branch_data *p = v; | 219 | seq_printf(m, " Function " |
197 | 220 | " File Line\n" | |
198 | (*pos)++; | 221 | " ------- --------- - " |
199 | 222 | " -------- " | |
200 | if (v == (void *)1) | 223 | " ---- ----\n"); |
201 | return f->start; | 224 | return 0; |
202 | |||
203 | ++p; | ||
204 | |||
205 | if ((void *)p >= (void *)f->stop) | ||
206 | return NULL; | ||
207 | |||
208 | return p; | ||
209 | } | 225 | } |
210 | 226 | ||
211 | static void *t_start(struct seq_file *m, loff_t *pos) | 227 | static inline long get_incorrect_percent(struct ftrace_branch_data *p) |
212 | { | 228 | { |
213 | void *t = (void *)1; | 229 | long percent; |
214 | loff_t l = 0; | ||
215 | |||
216 | for (; t && l < *pos; t = t_next(m, t, &l)) | ||
217 | ; | ||
218 | 230 | ||
219 | return t; | 231 | if (p->correct) { |
220 | } | 232 | percent = p->incorrect * 100; |
233 | percent /= p->correct + p->incorrect; | ||
234 | } else | ||
235 | percent = p->incorrect ? 100 : -1; | ||
221 | 236 | ||
222 | static void t_stop(struct seq_file *m, void *p) | 237 | return percent; |
223 | { | ||
224 | } | 238 | } |
225 | 239 | ||
226 | static int t_show(struct seq_file *m, void *v) | 240 | static int branch_stat_show(struct seq_file *m, void *v) |
227 | { | 241 | { |
228 | const struct ftrace_pointer *fp = m->private; | ||
229 | struct ftrace_branch_data *p = v; | 242 | struct ftrace_branch_data *p = v; |
230 | const char *f; | 243 | const char *f; |
231 | long percent; | 244 | long percent; |
232 | 245 | ||
233 | if (v == (void *)1) { | ||
234 | if (fp->hit) | ||
235 | seq_printf(m, " miss hit %% "); | ||
236 | else | ||
237 | seq_printf(m, " correct incorrect %% "); | ||
238 | seq_printf(m, " Function " | ||
239 | " File Line\n" | ||
240 | " ------- --------- - " | ||
241 | " -------- " | ||
242 | " ---- ----\n"); | ||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | /* Only print the file, not the path */ | 246 | /* Only print the file, not the path */ |
247 | f = p->file + strlen(p->file); | 247 | f = p->file + strlen(p->file); |
248 | while (f >= p->file && *f != '/') | 248 | while (f >= p->file && *f != '/') |
@@ -252,11 +252,7 @@ static int t_show(struct seq_file *m, void *v) | |||
252 | /* | 252 | /* |
253 | * The miss is overlayed on correct, and hit on incorrect. | 253 | * The miss is overlayed on correct, and hit on incorrect. |
254 | */ | 254 | */ |
255 | if (p->correct) { | 255 | percent = get_incorrect_percent(p); |
256 | percent = p->incorrect * 100; | ||
257 | percent /= p->correct + p->incorrect; | ||
258 | } else | ||
259 | percent = p->incorrect ? 100 : -1; | ||
260 | 256 | ||
261 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); | 257 | seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); |
262 | if (percent < 0) | 258 | if (percent < 0) |
@@ -267,76 +263,118 @@ static int t_show(struct seq_file *m, void *v) | |||
267 | return 0; | 263 | return 0; |
268 | } | 264 | } |
269 | 265 | ||
270 | static struct seq_operations tracing_likely_seq_ops = { | 266 | static void *annotated_branch_stat_start(void) |
271 | .start = t_start, | 267 | { |
272 | .next = t_next, | 268 | return __start_annotated_branch_profile; |
273 | .stop = t_stop, | 269 | } |
274 | .show = t_show, | 270 | |
271 | static void * | ||
272 | annotated_branch_stat_next(void *v, int idx) | ||
273 | { | ||
274 | struct ftrace_branch_data *p = v; | ||
275 | |||
276 | ++p; | ||
277 | |||
278 | if ((void *)p >= (void *)__stop_annotated_branch_profile) | ||
279 | return NULL; | ||
280 | |||
281 | return p; | ||
282 | } | ||
283 | |||
284 | static int annotated_branch_stat_cmp(void *p1, void *p2) | ||
285 | { | ||
286 | struct ftrace_branch_data *a = p1; | ||
287 | struct ftrace_branch_data *b = p2; | ||
288 | |||
289 | long percent_a, percent_b; | ||
290 | |||
291 | percent_a = get_incorrect_percent(a); | ||
292 | percent_b = get_incorrect_percent(b); | ||
293 | |||
294 | if (percent_a < percent_b) | ||
295 | return -1; | ||
296 | if (percent_a > percent_b) | ||
297 | return 1; | ||
298 | else | ||
299 | return 0; | ||
300 | } | ||
301 | |||
302 | static struct tracer_stat annotated_branch_stats = { | ||
303 | .name = "branch_annotated", | ||
304 | .stat_start = annotated_branch_stat_start, | ||
305 | .stat_next = annotated_branch_stat_next, | ||
306 | .stat_cmp = annotated_branch_stat_cmp, | ||
307 | .stat_headers = annotated_branch_stat_headers, | ||
308 | .stat_show = branch_stat_show | ||
275 | }; | 309 | }; |
276 | 310 | ||
277 | static int tracing_branch_open(struct inode *inode, struct file *file) | 311 | __init static int init_annotated_branch_stats(void) |
278 | { | 312 | { |
279 | int ret; | 313 | int ret; |
280 | 314 | ||
281 | ret = seq_open(file, &tracing_likely_seq_ops); | 315 | ret = register_stat_tracer(&annotated_branch_stats); |
282 | if (!ret) { | 316 | if (!ret) { |
283 | struct seq_file *m = file->private_data; | 317 | printk(KERN_WARNING "Warning: could not register " |
284 | m->private = (void *)inode->i_private; | 318 | "annotated branches stats\n"); |
319 | return 1; | ||
285 | } | 320 | } |
286 | 321 | return 0; | |
287 | return ret; | ||
288 | } | 322 | } |
289 | 323 | fs_initcall(init_annotated_branch_stats); | |
290 | static const struct file_operations tracing_branch_fops = { | ||
291 | .open = tracing_branch_open, | ||
292 | .read = seq_read, | ||
293 | .llseek = seq_lseek, | ||
294 | }; | ||
295 | 324 | ||
296 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | 325 | #ifdef CONFIG_PROFILE_ALL_BRANCHES |
326 | |||
297 | extern unsigned long __start_branch_profile[]; | 327 | extern unsigned long __start_branch_profile[]; |
298 | extern unsigned long __stop_branch_profile[]; | 328 | extern unsigned long __stop_branch_profile[]; |
299 | 329 | ||
300 | static const struct ftrace_pointer ftrace_branch_pos = { | 330 | static int all_branch_stat_headers(struct seq_file *m) |
301 | .start = __start_branch_profile, | 331 | { |
302 | .stop = __stop_branch_profile, | 332 | seq_printf(m, " miss hit %% "); |
303 | .hit = 1, | 333 | seq_printf(m, " Function " |
304 | }; | 334 | " File Line\n" |
335 | " ------- --------- - " | ||
336 | " -------- " | ||
337 | " ---- ----\n"); | ||
338 | return 0; | ||
339 | } | ||
305 | 340 | ||
306 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ | 341 | static void *all_branch_stat_start(void) |
342 | { | ||
343 | return __start_branch_profile; | ||
344 | } | ||
307 | 345 | ||
308 | extern unsigned long __start_annotated_branch_profile[]; | 346 | static void * |
309 | extern unsigned long __stop_annotated_branch_profile[]; | 347 | all_branch_stat_next(void *v, int idx) |
348 | { | ||
349 | struct ftrace_branch_data *p = v; | ||
310 | 350 | ||
311 | static const struct ftrace_pointer ftrace_annotated_branch_pos = { | 351 | ++p; |
312 | .start = __start_annotated_branch_profile, | ||
313 | .stop = __stop_annotated_branch_profile, | ||
314 | }; | ||
315 | 352 | ||
316 | static __init int ftrace_branch_init(void) | 353 | if ((void *)p >= (void *)__stop_branch_profile) |
317 | { | 354 | return NULL; |
318 | struct dentry *d_tracer; | ||
319 | struct dentry *entry; | ||
320 | 355 | ||
321 | d_tracer = tracing_init_dentry(); | 356 | return p; |
357 | } | ||
322 | 358 | ||
323 | entry = debugfs_create_file("profile_annotated_branch", 0444, d_tracer, | 359 | static struct tracer_stat all_branch_stats = { |
324 | (void *)&ftrace_annotated_branch_pos, | 360 | .name = "branch_all", |
325 | &tracing_branch_fops); | 361 | .stat_start = all_branch_stat_start, |
326 | if (!entry) | 362 | .stat_next = all_branch_stat_next, |
327 | pr_warning("Could not create debugfs " | 363 | .stat_headers = all_branch_stat_headers, |
328 | "'profile_annotatet_branch' entry\n"); | 364 | .stat_show = branch_stat_show |
365 | }; | ||
329 | 366 | ||
330 | #ifdef CONFIG_PROFILE_ALL_BRANCHES | 367 | __init static int all_annotated_branch_stats(void) |
331 | entry = debugfs_create_file("profile_branch", 0444, d_tracer, | 368 | { |
332 | (void *)&ftrace_branch_pos, | 369 | int ret; |
333 | &tracing_branch_fops); | ||
334 | if (!entry) | ||
335 | pr_warning("Could not create debugfs" | ||
336 | " 'profile_branch' entry\n"); | ||
337 | #endif | ||
338 | 370 | ||
371 | ret = register_stat_tracer(&all_branch_stats); | ||
372 | if (!ret) { | ||
373 | printk(KERN_WARNING "Warning: could not register " | ||
374 | "all branches stats\n"); | ||
375 | return 1; | ||
376 | } | ||
339 | return 0; | 377 | return 0; |
340 | } | 378 | } |
341 | 379 | fs_initcall(all_annotated_branch_stats); | |
342 | device_initcall(ftrace_branch_init); | 380 | #endif /* CONFIG_PROFILE_ALL_BRANCHES */ |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c new file mode 100644 index 000000000000..2d4953f93560 --- /dev/null +++ b/kernel/trace/trace_clock.c | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * tracing clocks | ||
3 | * | ||
4 | * Copyright (C) 2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
5 | * | ||
6 | * Implements 3 trace clock variants, with differing scalability/precision | ||
7 | * tradeoffs: | ||
8 | * | ||
9 | * - local: CPU-local trace clock | ||
10 | * - medium: scalable global clock with some jitter | ||
11 | * - global: globally monotonic, serialized clock | ||
12 | * | ||
13 | * Tracer plugins will chose a default from these clocks. | ||
14 | */ | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/hardirq.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/percpu.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/ktime.h> | ||
21 | |||
22 | /* | ||
23 | * trace_clock_local(): the simplest and least coherent tracing clock. | ||
24 | * | ||
25 | * Useful for tracing that does not cross to other CPUs nor | ||
26 | * does it go through idle events. | ||
27 | */ | ||
28 | u64 notrace trace_clock_local(void) | ||
29 | { | ||
30 | /* | ||
31 | * sched_clock() is an architecture implemented, fast, scalable, | ||
32 | * lockless clock. It is not guaranteed to be coherent across | ||
33 | * CPUs, nor across CPU idle events. | ||
34 | */ | ||
35 | return sched_clock(); | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * trace_clock(): 'inbetween' trace clock. Not completely serialized, | ||
40 | * but not completely incorrect when crossing CPUs either. | ||
41 | * | ||
42 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of | ||
43 | * jitter between CPUs. So it's a pretty scalable clock, but there | ||
44 | * can be offsets in the trace data. | ||
45 | */ | ||
46 | u64 notrace trace_clock(void) | ||
47 | { | ||
48 | return cpu_clock(raw_smp_processor_id()); | ||
49 | } | ||
50 | |||
51 | |||
52 | /* | ||
53 | * trace_clock_global(): special globally coherent trace clock | ||
54 | * | ||
55 | * It has higher overhead than the other trace clocks but is still | ||
56 | * an order of magnitude faster than GTOD derived hardware clocks. | ||
57 | * | ||
58 | * Used by plugins that need globally coherent timestamps. | ||
59 | */ | ||
60 | |||
61 | static u64 prev_trace_clock_time; | ||
62 | |||
63 | static raw_spinlock_t trace_clock_lock ____cacheline_aligned_in_smp = | ||
64 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | ||
65 | |||
66 | u64 notrace trace_clock_global(void) | ||
67 | { | ||
68 | unsigned long flags; | ||
69 | int this_cpu; | ||
70 | u64 now; | ||
71 | |||
72 | raw_local_irq_save(flags); | ||
73 | |||
74 | this_cpu = raw_smp_processor_id(); | ||
75 | now = cpu_clock(this_cpu); | ||
76 | /* | ||
77 | * If in an NMI context then dont risk lockups and return the | ||
78 | * cpu_clock() time: | ||
79 | */ | ||
80 | if (unlikely(in_nmi())) | ||
81 | goto out; | ||
82 | |||
83 | __raw_spin_lock(&trace_clock_lock); | ||
84 | |||
85 | /* | ||
86 | * TODO: if this happens often then maybe we should reset | ||
87 | * my_scd->clock to prev_trace_clock_time+1, to make sure | ||
88 | * we start ticking with the local clock from now on? | ||
89 | */ | ||
90 | if ((s64)(now - prev_trace_clock_time) < 0) | ||
91 | now = prev_trace_clock_time + 1; | ||
92 | |||
93 | prev_trace_clock_time = now; | ||
94 | |||
95 | __raw_spin_unlock(&trace_clock_lock); | ||
96 | |||
97 | out: | ||
98 | raw_local_irq_restore(flags); | ||
99 | |||
100 | return now; | ||
101 | } | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c new file mode 100644 index 000000000000..210e71ff82db --- /dev/null +++ b/kernel/trace/trace_events.c | |||
@@ -0,0 +1,731 @@ | |||
1 | /* | ||
2 | * event tracer | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | * - Added format output of fields of the trace point. | ||
7 | * This was based off of work by Tom Zanussi <tzanussi@gmail.com>. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/debugfs.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/ctype.h> | ||
15 | |||
16 | #include "trace_output.h" | ||
17 | |||
18 | #define TRACE_SYSTEM "TRACE_SYSTEM" | ||
19 | |||
20 | static DEFINE_MUTEX(event_mutex); | ||
21 | |||
22 | #define events_for_each(event) \ | ||
23 | for (event = __start_ftrace_events; \ | ||
24 | (unsigned long)event < (unsigned long)__stop_ftrace_events; \ | ||
25 | event++) | ||
26 | |||
27 | void event_trace_printk(unsigned long ip, const char *fmt, ...) | ||
28 | { | ||
29 | va_list ap; | ||
30 | |||
31 | va_start(ap, fmt); | ||
32 | tracing_record_cmdline(current); | ||
33 | trace_vprintk(ip, task_curr_ret_stack(current), fmt, ap); | ||
34 | va_end(ap); | ||
35 | } | ||
36 | |||
37 | static void ftrace_clear_events(void) | ||
38 | { | ||
39 | struct ftrace_event_call *call = (void *)__start_ftrace_events; | ||
40 | |||
41 | |||
42 | while ((unsigned long)call < (unsigned long)__stop_ftrace_events) { | ||
43 | |||
44 | if (call->enabled) { | ||
45 | call->enabled = 0; | ||
46 | call->unregfunc(); | ||
47 | } | ||
48 | call++; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | static void ftrace_event_enable_disable(struct ftrace_event_call *call, | ||
53 | int enable) | ||
54 | { | ||
55 | |||
56 | switch (enable) { | ||
57 | case 0: | ||
58 | if (call->enabled) { | ||
59 | call->enabled = 0; | ||
60 | call->unregfunc(); | ||
61 | } | ||
62 | if (call->raw_enabled) { | ||
63 | call->raw_enabled = 0; | ||
64 | call->raw_unreg(); | ||
65 | } | ||
66 | break; | ||
67 | case 1: | ||
68 | if (!call->enabled && | ||
69 | (call->type & TRACE_EVENT_TYPE_PRINTF)) { | ||
70 | call->enabled = 1; | ||
71 | call->regfunc(); | ||
72 | } | ||
73 | if (!call->raw_enabled && | ||
74 | (call->type & TRACE_EVENT_TYPE_RAW)) { | ||
75 | call->raw_enabled = 1; | ||
76 | call->raw_reg(); | ||
77 | } | ||
78 | break; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | static int ftrace_set_clr_event(char *buf, int set) | ||
83 | { | ||
84 | struct ftrace_event_call *call = __start_ftrace_events; | ||
85 | char *event = NULL, *sub = NULL, *match; | ||
86 | int ret = -EINVAL; | ||
87 | |||
88 | /* | ||
89 | * The buf format can be <subsystem>:<event-name> | ||
90 | * *:<event-name> means any event by that name. | ||
91 | * :<event-name> is the same. | ||
92 | * | ||
93 | * <subsystem>:* means all events in that subsystem | ||
94 | * <subsystem>: means the same. | ||
95 | * | ||
96 | * <name> (no ':') means all events in a subsystem with | ||
97 | * the name <name> or any event that matches <name> | ||
98 | */ | ||
99 | |||
100 | match = strsep(&buf, ":"); | ||
101 | if (buf) { | ||
102 | sub = match; | ||
103 | event = buf; | ||
104 | match = NULL; | ||
105 | |||
106 | if (!strlen(sub) || strcmp(sub, "*") == 0) | ||
107 | sub = NULL; | ||
108 | if (!strlen(event) || strcmp(event, "*") == 0) | ||
109 | event = NULL; | ||
110 | } | ||
111 | |||
112 | mutex_lock(&event_mutex); | ||
113 | events_for_each(call) { | ||
114 | |||
115 | if (!call->name) | ||
116 | continue; | ||
117 | |||
118 | if (match && | ||
119 | strcmp(match, call->name) != 0 && | ||
120 | strcmp(match, call->system) != 0) | ||
121 | continue; | ||
122 | |||
123 | if (sub && strcmp(sub, call->system) != 0) | ||
124 | continue; | ||
125 | |||
126 | if (event && strcmp(event, call->name) != 0) | ||
127 | continue; | ||
128 | |||
129 | ftrace_event_enable_disable(call, set); | ||
130 | |||
131 | ret = 0; | ||
132 | } | ||
133 | mutex_unlock(&event_mutex); | ||
134 | |||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | /* 128 should be much more than enough */ | ||
139 | #define EVENT_BUF_SIZE 127 | ||
140 | |||
141 | static ssize_t | ||
142 | ftrace_event_write(struct file *file, const char __user *ubuf, | ||
143 | size_t cnt, loff_t *ppos) | ||
144 | { | ||
145 | size_t read = 0; | ||
146 | int i, set = 1; | ||
147 | ssize_t ret; | ||
148 | char *buf; | ||
149 | char ch; | ||
150 | |||
151 | if (!cnt || cnt < 0) | ||
152 | return 0; | ||
153 | |||
154 | ret = get_user(ch, ubuf++); | ||
155 | if (ret) | ||
156 | return ret; | ||
157 | read++; | ||
158 | cnt--; | ||
159 | |||
160 | /* skip white space */ | ||
161 | while (cnt && isspace(ch)) { | ||
162 | ret = get_user(ch, ubuf++); | ||
163 | if (ret) | ||
164 | return ret; | ||
165 | read++; | ||
166 | cnt--; | ||
167 | } | ||
168 | |||
169 | /* Only white space found? */ | ||
170 | if (isspace(ch)) { | ||
171 | file->f_pos += read; | ||
172 | ret = read; | ||
173 | return ret; | ||
174 | } | ||
175 | |||
176 | buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL); | ||
177 | if (!buf) | ||
178 | return -ENOMEM; | ||
179 | |||
180 | if (cnt > EVENT_BUF_SIZE) | ||
181 | cnt = EVENT_BUF_SIZE; | ||
182 | |||
183 | i = 0; | ||
184 | while (cnt && !isspace(ch)) { | ||
185 | if (!i && ch == '!') | ||
186 | set = 0; | ||
187 | else | ||
188 | buf[i++] = ch; | ||
189 | |||
190 | ret = get_user(ch, ubuf++); | ||
191 | if (ret) | ||
192 | goto out_free; | ||
193 | read++; | ||
194 | cnt--; | ||
195 | } | ||
196 | buf[i] = 0; | ||
197 | |||
198 | file->f_pos += read; | ||
199 | |||
200 | ret = ftrace_set_clr_event(buf, set); | ||
201 | if (ret) | ||
202 | goto out_free; | ||
203 | |||
204 | ret = read; | ||
205 | |||
206 | out_free: | ||
207 | kfree(buf); | ||
208 | |||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | static void * | ||
213 | t_next(struct seq_file *m, void *v, loff_t *pos) | ||
214 | { | ||
215 | struct ftrace_event_call *call = m->private; | ||
216 | struct ftrace_event_call *next = call; | ||
217 | |||
218 | (*pos)++; | ||
219 | |||
220 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) | ||
221 | return NULL; | ||
222 | |||
223 | m->private = ++next; | ||
224 | |||
225 | return call; | ||
226 | } | ||
227 | |||
228 | static void *t_start(struct seq_file *m, loff_t *pos) | ||
229 | { | ||
230 | return t_next(m, NULL, pos); | ||
231 | } | ||
232 | |||
233 | static void * | ||
234 | s_next(struct seq_file *m, void *v, loff_t *pos) | ||
235 | { | ||
236 | struct ftrace_event_call *call = m->private; | ||
237 | struct ftrace_event_call *next; | ||
238 | |||
239 | (*pos)++; | ||
240 | |||
241 | retry: | ||
242 | if ((unsigned long)call >= (unsigned long)__stop_ftrace_events) | ||
243 | return NULL; | ||
244 | |||
245 | if (!call->enabled) { | ||
246 | call++; | ||
247 | goto retry; | ||
248 | } | ||
249 | |||
250 | next = call; | ||
251 | m->private = ++next; | ||
252 | |||
253 | return call; | ||
254 | } | ||
255 | |||
256 | static void *s_start(struct seq_file *m, loff_t *pos) | ||
257 | { | ||
258 | return s_next(m, NULL, pos); | ||
259 | } | ||
260 | |||
261 | static int t_show(struct seq_file *m, void *v) | ||
262 | { | ||
263 | struct ftrace_event_call *call = v; | ||
264 | |||
265 | if (strcmp(call->system, TRACE_SYSTEM) != 0) | ||
266 | seq_printf(m, "%s:", call->system); | ||
267 | seq_printf(m, "%s\n", call->name); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static void t_stop(struct seq_file *m, void *p) | ||
273 | { | ||
274 | } | ||
275 | |||
276 | static int | ||
277 | ftrace_event_seq_open(struct inode *inode, struct file *file) | ||
278 | { | ||
279 | int ret; | ||
280 | const struct seq_operations *seq_ops; | ||
281 | |||
282 | if ((file->f_mode & FMODE_WRITE) && | ||
283 | !(file->f_flags & O_APPEND)) | ||
284 | ftrace_clear_events(); | ||
285 | |||
286 | seq_ops = inode->i_private; | ||
287 | ret = seq_open(file, seq_ops); | ||
288 | if (!ret) { | ||
289 | struct seq_file *m = file->private_data; | ||
290 | |||
291 | m->private = __start_ftrace_events; | ||
292 | } | ||
293 | return ret; | ||
294 | } | ||
295 | |||
296 | static ssize_t | ||
297 | event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
298 | loff_t *ppos) | ||
299 | { | ||
300 | struct ftrace_event_call *call = filp->private_data; | ||
301 | char *buf; | ||
302 | |||
303 | if (call->enabled || call->raw_enabled) | ||
304 | buf = "1\n"; | ||
305 | else | ||
306 | buf = "0\n"; | ||
307 | |||
308 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2); | ||
309 | } | ||
310 | |||
311 | static ssize_t | ||
312 | event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
313 | loff_t *ppos) | ||
314 | { | ||
315 | struct ftrace_event_call *call = filp->private_data; | ||
316 | char buf[64]; | ||
317 | unsigned long val; | ||
318 | int ret; | ||
319 | |||
320 | if (cnt >= sizeof(buf)) | ||
321 | return -EINVAL; | ||
322 | |||
323 | if (copy_from_user(&buf, ubuf, cnt)) | ||
324 | return -EFAULT; | ||
325 | |||
326 | buf[cnt] = 0; | ||
327 | |||
328 | ret = strict_strtoul(buf, 10, &val); | ||
329 | if (ret < 0) | ||
330 | return ret; | ||
331 | |||
332 | switch (val) { | ||
333 | case 0: | ||
334 | case 1: | ||
335 | mutex_lock(&event_mutex); | ||
336 | ftrace_event_enable_disable(call, val); | ||
337 | mutex_unlock(&event_mutex); | ||
338 | break; | ||
339 | |||
340 | default: | ||
341 | return -EINVAL; | ||
342 | } | ||
343 | |||
344 | *ppos += cnt; | ||
345 | |||
346 | return cnt; | ||
347 | } | ||
348 | |||
349 | static ssize_t | ||
350 | event_type_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
351 | loff_t *ppos) | ||
352 | { | ||
353 | struct ftrace_event_call *call = filp->private_data; | ||
354 | char buf[16]; | ||
355 | int r = 0; | ||
356 | |||
357 | if (call->type & TRACE_EVENT_TYPE_PRINTF) | ||
358 | r += sprintf(buf, "printf\n"); | ||
359 | |||
360 | if (call->type & TRACE_EVENT_TYPE_RAW) | ||
361 | r += sprintf(buf+r, "raw\n"); | ||
362 | |||
363 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
364 | } | ||
365 | |||
366 | static ssize_t | ||
367 | event_type_write(struct file *filp, const char __user *ubuf, size_t cnt, | ||
368 | loff_t *ppos) | ||
369 | { | ||
370 | struct ftrace_event_call *call = filp->private_data; | ||
371 | char buf[64]; | ||
372 | |||
373 | /* | ||
374 | * If there's only one type, we can't change it. | ||
375 | * And currently we always have printf type, and we | ||
376 | * may or may not have raw type. | ||
377 | * | ||
378 | * This is a redundant check, the file should be read | ||
379 | * only if this is the case anyway. | ||
380 | */ | ||
381 | |||
382 | if (!call->raw_init) | ||
383 | return -EPERM; | ||
384 | |||
385 | if (cnt >= sizeof(buf)) | ||
386 | return -EINVAL; | ||
387 | |||
388 | if (copy_from_user(&buf, ubuf, cnt)) | ||
389 | return -EFAULT; | ||
390 | |||
391 | buf[cnt] = 0; | ||
392 | |||
393 | if (!strncmp(buf, "printf", 6) && | ||
394 | (!buf[6] || isspace(buf[6]))) { | ||
395 | |||
396 | call->type = TRACE_EVENT_TYPE_PRINTF; | ||
397 | |||
398 | /* | ||
399 | * If raw enabled, the disable it and enable | ||
400 | * printf type. | ||
401 | */ | ||
402 | if (call->raw_enabled) { | ||
403 | call->raw_enabled = 0; | ||
404 | call->raw_unreg(); | ||
405 | |||
406 | call->enabled = 1; | ||
407 | call->regfunc(); | ||
408 | } | ||
409 | |||
410 | } else if (!strncmp(buf, "raw", 3) && | ||
411 | (!buf[3] || isspace(buf[3]))) { | ||
412 | |||
413 | call->type = TRACE_EVENT_TYPE_RAW; | ||
414 | |||
415 | /* | ||
416 | * If printf enabled, the disable it and enable | ||
417 | * raw type. | ||
418 | */ | ||
419 | if (call->enabled) { | ||
420 | call->enabled = 0; | ||
421 | call->unregfunc(); | ||
422 | |||
423 | call->raw_enabled = 1; | ||
424 | call->raw_reg(); | ||
425 | } | ||
426 | } else | ||
427 | return -EINVAL; | ||
428 | |||
429 | *ppos += cnt; | ||
430 | |||
431 | return cnt; | ||
432 | } | ||
433 | |||
434 | static ssize_t | ||
435 | event_available_types_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
436 | loff_t *ppos) | ||
437 | { | ||
438 | struct ftrace_event_call *call = filp->private_data; | ||
439 | char buf[16]; | ||
440 | int r = 0; | ||
441 | |||
442 | r += sprintf(buf, "printf\n"); | ||
443 | |||
444 | if (call->raw_init) | ||
445 | r += sprintf(buf+r, "raw\n"); | ||
446 | |||
447 | return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); | ||
448 | } | ||
449 | |||
450 | #undef FIELD | ||
451 | #define FIELD(type, name) \ | ||
452 | #type, #name, offsetof(typeof(field), name), sizeof(field.name) | ||
453 | |||
454 | static int trace_write_header(struct trace_seq *s) | ||
455 | { | ||
456 | struct trace_entry field; | ||
457 | |||
458 | /* struct trace_entry */ | ||
459 | return trace_seq_printf(s, | ||
460 | "\tfield:%s %s;\toffset:%lu;\tsize:%lu;\n" | ||
461 | "\tfield:%s %s;\toffset:%lu;\tsize:%lu;\n" | ||
462 | "\tfield:%s %s;\toffset:%lu;\tsize:%lu;\n" | ||
463 | "\tfield:%s %s;\toffset:%lu;\tsize:%lu;\n" | ||
464 | "\tfield:%s %s;\toffset:%lu;\tsize:%lu;\n" | ||
465 | "\n", | ||
466 | FIELD(unsigned char, type), | ||
467 | FIELD(unsigned char, flags), | ||
468 | FIELD(unsigned char, preempt_count), | ||
469 | FIELD(int, pid), | ||
470 | FIELD(int, tgid)); | ||
471 | } | ||
472 | static ssize_t | ||
473 | event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | ||
474 | loff_t *ppos) | ||
475 | { | ||
476 | struct ftrace_event_call *call = filp->private_data; | ||
477 | struct trace_seq *s; | ||
478 | char *buf; | ||
479 | int r; | ||
480 | |||
481 | s = kmalloc(sizeof(*s), GFP_KERNEL); | ||
482 | if (!s) | ||
483 | return -ENOMEM; | ||
484 | |||
485 | trace_seq_init(s); | ||
486 | |||
487 | if (*ppos) | ||
488 | return 0; | ||
489 | |||
490 | /* If any of the first writes fail, so will the show_format. */ | ||
491 | |||
492 | trace_seq_printf(s, "name: %s\n", call->name); | ||
493 | trace_seq_printf(s, "ID: %d\n", call->id); | ||
494 | trace_seq_printf(s, "format:\n"); | ||
495 | trace_write_header(s); | ||
496 | |||
497 | r = call->show_format(s); | ||
498 | if (!r) { | ||
499 | /* | ||
500 | * ug! The format output is bigger than a PAGE!! | ||
501 | */ | ||
502 | buf = "FORMAT TOO BIG\n"; | ||
503 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
504 | buf, strlen(buf)); | ||
505 | goto out; | ||
506 | } | ||
507 | |||
508 | r = simple_read_from_buffer(ubuf, cnt, ppos, | ||
509 | s->buffer, s->len); | ||
510 | out: | ||
511 | kfree(s); | ||
512 | return r; | ||
513 | } | ||
514 | |||
515 | static const struct seq_operations show_event_seq_ops = { | ||
516 | .start = t_start, | ||
517 | .next = t_next, | ||
518 | .show = t_show, | ||
519 | .stop = t_stop, | ||
520 | }; | ||
521 | |||
522 | static const struct seq_operations show_set_event_seq_ops = { | ||
523 | .start = s_start, | ||
524 | .next = s_next, | ||
525 | .show = t_show, | ||
526 | .stop = t_stop, | ||
527 | }; | ||
528 | |||
529 | static const struct file_operations ftrace_avail_fops = { | ||
530 | .open = ftrace_event_seq_open, | ||
531 | .read = seq_read, | ||
532 | .llseek = seq_lseek, | ||
533 | .release = seq_release, | ||
534 | }; | ||
535 | |||
536 | static const struct file_operations ftrace_set_event_fops = { | ||
537 | .open = ftrace_event_seq_open, | ||
538 | .read = seq_read, | ||
539 | .write = ftrace_event_write, | ||
540 | .llseek = seq_lseek, | ||
541 | .release = seq_release, | ||
542 | }; | ||
543 | |||
544 | static const struct file_operations ftrace_enable_fops = { | ||
545 | .open = tracing_open_generic, | ||
546 | .read = event_enable_read, | ||
547 | .write = event_enable_write, | ||
548 | }; | ||
549 | |||
550 | static const struct file_operations ftrace_type_fops = { | ||
551 | .open = tracing_open_generic, | ||
552 | .read = event_type_read, | ||
553 | .write = event_type_write, | ||
554 | }; | ||
555 | |||
556 | static const struct file_operations ftrace_available_types_fops = { | ||
557 | .open = tracing_open_generic, | ||
558 | .read = event_available_types_read, | ||
559 | }; | ||
560 | |||
561 | static const struct file_operations ftrace_event_format_fops = { | ||
562 | .open = tracing_open_generic, | ||
563 | .read = event_format_read, | ||
564 | }; | ||
565 | |||
566 | static struct dentry *event_trace_events_dir(void) | ||
567 | { | ||
568 | static struct dentry *d_tracer; | ||
569 | static struct dentry *d_events; | ||
570 | |||
571 | if (d_events) | ||
572 | return d_events; | ||
573 | |||
574 | d_tracer = tracing_init_dentry(); | ||
575 | if (!d_tracer) | ||
576 | return NULL; | ||
577 | |||
578 | d_events = debugfs_create_dir("events", d_tracer); | ||
579 | if (!d_events) | ||
580 | pr_warning("Could not create debugfs " | ||
581 | "'events' directory\n"); | ||
582 | |||
583 | return d_events; | ||
584 | } | ||
585 | |||
586 | struct event_subsystem { | ||
587 | struct list_head list; | ||
588 | const char *name; | ||
589 | struct dentry *entry; | ||
590 | }; | ||
591 | |||
592 | static LIST_HEAD(event_subsystems); | ||
593 | |||
594 | static struct dentry * | ||
595 | event_subsystem_dir(const char *name, struct dentry *d_events) | ||
596 | { | ||
597 | struct event_subsystem *system; | ||
598 | |||
599 | /* First see if we did not already create this dir */ | ||
600 | list_for_each_entry(system, &event_subsystems, list) { | ||
601 | if (strcmp(system->name, name) == 0) | ||
602 | return system->entry; | ||
603 | } | ||
604 | |||
605 | /* need to create new entry */ | ||
606 | system = kmalloc(sizeof(*system), GFP_KERNEL); | ||
607 | if (!system) { | ||
608 | pr_warning("No memory to create event subsystem %s\n", | ||
609 | name); | ||
610 | return d_events; | ||
611 | } | ||
612 | |||
613 | system->entry = debugfs_create_dir(name, d_events); | ||
614 | if (!system->entry) { | ||
615 | pr_warning("Could not create event subsystem %s\n", | ||
616 | name); | ||
617 | kfree(system); | ||
618 | return d_events; | ||
619 | } | ||
620 | |||
621 | system->name = name; | ||
622 | list_add(&system->list, &event_subsystems); | ||
623 | |||
624 | return system->entry; | ||
625 | } | ||
626 | |||
627 | static int | ||
628 | event_create_dir(struct ftrace_event_call *call, struct dentry *d_events) | ||
629 | { | ||
630 | struct dentry *entry; | ||
631 | int ret; | ||
632 | |||
633 | /* | ||
634 | * If the trace point header did not define TRACE_SYSTEM | ||
635 | * then the system would be called "TRACE_SYSTEM". | ||
636 | */ | ||
637 | if (strcmp(call->system, "TRACE_SYSTEM") != 0) | ||
638 | d_events = event_subsystem_dir(call->system, d_events); | ||
639 | |||
640 | if (call->raw_init) { | ||
641 | ret = call->raw_init(); | ||
642 | if (ret < 0) { | ||
643 | pr_warning("Could not initialize trace point" | ||
644 | " events/%s\n", call->name); | ||
645 | return ret; | ||
646 | } | ||
647 | } | ||
648 | |||
649 | /* default the output to printf */ | ||
650 | call->type = TRACE_EVENT_TYPE_PRINTF; | ||
651 | |||
652 | call->dir = debugfs_create_dir(call->name, d_events); | ||
653 | if (!call->dir) { | ||
654 | pr_warning("Could not create debugfs " | ||
655 | "'%s' directory\n", call->name); | ||
656 | return -1; | ||
657 | } | ||
658 | |||
659 | entry = debugfs_create_file("enable", 0644, call->dir, call, | ||
660 | &ftrace_enable_fops); | ||
661 | if (!entry) | ||
662 | pr_warning("Could not create debugfs " | ||
663 | "'%s/enable' entry\n", call->name); | ||
664 | |||
665 | /* Only let type be writable, if we can change it */ | ||
666 | entry = debugfs_create_file("type", | ||
667 | call->raw_init ? 0644 : 0444, | ||
668 | call->dir, call, | ||
669 | &ftrace_type_fops); | ||
670 | if (!entry) | ||
671 | pr_warning("Could not create debugfs " | ||
672 | "'%s/type' entry\n", call->name); | ||
673 | |||
674 | entry = debugfs_create_file("available_types", 0444, call->dir, call, | ||
675 | &ftrace_available_types_fops); | ||
676 | if (!entry) | ||
677 | pr_warning("Could not create debugfs " | ||
678 | "'%s/available_types' entry\n", call->name); | ||
679 | |||
680 | /* A trace may not want to export its format */ | ||
681 | if (!call->show_format) | ||
682 | return 0; | ||
683 | |||
684 | entry = debugfs_create_file("format", 0444, call->dir, call, | ||
685 | &ftrace_event_format_fops); | ||
686 | if (!entry) | ||
687 | pr_warning("Could not create debugfs " | ||
688 | "'%s/format' entry\n", call->name); | ||
689 | |||
690 | return 0; | ||
691 | } | ||
692 | |||
693 | static __init int event_trace_init(void) | ||
694 | { | ||
695 | struct ftrace_event_call *call = __start_ftrace_events; | ||
696 | struct dentry *d_tracer; | ||
697 | struct dentry *entry; | ||
698 | struct dentry *d_events; | ||
699 | |||
700 | d_tracer = tracing_init_dentry(); | ||
701 | if (!d_tracer) | ||
702 | return 0; | ||
703 | |||
704 | entry = debugfs_create_file("available_events", 0444, d_tracer, | ||
705 | (void *)&show_event_seq_ops, | ||
706 | &ftrace_avail_fops); | ||
707 | if (!entry) | ||
708 | pr_warning("Could not create debugfs " | ||
709 | "'available_events' entry\n"); | ||
710 | |||
711 | entry = debugfs_create_file("set_event", 0644, d_tracer, | ||
712 | (void *)&show_set_event_seq_ops, | ||
713 | &ftrace_set_event_fops); | ||
714 | if (!entry) | ||
715 | pr_warning("Could not create debugfs " | ||
716 | "'set_event' entry\n"); | ||
717 | |||
718 | d_events = event_trace_events_dir(); | ||
719 | if (!d_events) | ||
720 | return 0; | ||
721 | |||
722 | events_for_each(call) { | ||
723 | /* The linker may leave blanks */ | ||
724 | if (!call->name) | ||
725 | continue; | ||
726 | event_create_dir(call, d_events); | ||
727 | } | ||
728 | |||
729 | return 0; | ||
730 | } | ||
731 | fs_initcall(event_trace_init); | ||
diff --git a/kernel/trace/trace_events_stage_1.h b/kernel/trace/trace_events_stage_1.h new file mode 100644 index 000000000000..3830a731424c --- /dev/null +++ b/kernel/trace/trace_events_stage_1.h | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Stage 1 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_event_types.h> to include the following: | ||
5 | * | ||
6 | * struct ftrace_raw_<call> { | ||
7 | * struct trace_entry ent; | ||
8 | * <type> <item>; | ||
9 | * [...] | ||
10 | * }; | ||
11 | * | ||
12 | * The <type> <item> is created by the TRACE_FIELD(type, item, assign) | ||
13 | * macro. We simply do "type item;", and that will create the fields | ||
14 | * in the structure. | ||
15 | */ | ||
16 | |||
17 | #undef TRACE_FORMAT | ||
18 | #define TRACE_FORMAT(call, proto, args, fmt) | ||
19 | |||
20 | #undef TRACE_EVENT_FORMAT | ||
21 | #define TRACE_EVENT_FORMAT(name, proto, args, fmt, tstruct, tpfmt) \ | ||
22 | struct ftrace_raw_##name { \ | ||
23 | struct trace_entry ent; \ | ||
24 | tstruct \ | ||
25 | }; \ | ||
26 | static struct ftrace_event_call event_##name | ||
27 | |||
28 | #undef TRACE_STRUCT | ||
29 | #define TRACE_STRUCT(args...) args | ||
30 | |||
31 | #define TRACE_FIELD(type, item, assign) \ | ||
32 | type item; | ||
33 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | ||
34 | type_item; | ||
35 | |||
36 | #include <trace/trace_event_types.h> | ||
diff --git a/kernel/trace/trace_events_stage_2.h b/kernel/trace/trace_events_stage_2.h new file mode 100644 index 000000000000..b1cebba1d9b4 --- /dev/null +++ b/kernel/trace/trace_events_stage_2.h | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * Stage 2 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_event_types.h> to include the following: | ||
5 | * | ||
6 | * enum print_line_t | ||
7 | * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags) | ||
8 | * { | ||
9 | * struct trace_seq *s = &iter->seq; | ||
10 | * struct ftrace_raw_<call> *field; <-- defined in stage 1 | ||
11 | * struct trace_entry *entry; | ||
12 | * int ret; | ||
13 | * | ||
14 | * entry = iter->ent; | ||
15 | * | ||
16 | * if (entry->type != event_<call>.id) { | ||
17 | * WARN_ON_ONCE(1); | ||
18 | * return TRACE_TYPE_UNHANDLED; | ||
19 | * } | ||
20 | * | ||
21 | * field = (typeof(field))entry; | ||
22 | * | ||
23 | * ret = trace_seq_printf(s, <TPRAWFMT> "%s", <ARGS> "\n"); | ||
24 | * if (!ret) | ||
25 | * return TRACE_TYPE_PARTIAL_LINE; | ||
26 | * | ||
27 | * return TRACE_TYPE_HANDLED; | ||
28 | * } | ||
29 | * | ||
30 | * This is the method used to print the raw event to the trace | ||
31 | * output format. Note, this is not needed if the data is read | ||
32 | * in binary. | ||
33 | */ | ||
34 | |||
35 | #undef TRACE_STRUCT | ||
36 | #define TRACE_STRUCT(args...) args | ||
37 | |||
38 | #undef TRACE_FIELD | ||
39 | #define TRACE_FIELD(type, item, assign) \ | ||
40 | field->item, | ||
41 | |||
42 | #undef TRACE_FIELD_SPECIAL | ||
43 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | ||
44 | field->item, | ||
45 | |||
46 | |||
47 | #undef TPRAWFMT | ||
48 | #define TPRAWFMT(args...) args | ||
49 | |||
50 | #undef TRACE_EVENT_FORMAT | ||
51 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
52 | enum print_line_t \ | ||
53 | ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \ | ||
54 | { \ | ||
55 | struct trace_seq *s = &iter->seq; \ | ||
56 | struct ftrace_raw_##call *field; \ | ||
57 | struct trace_entry *entry; \ | ||
58 | int ret; \ | ||
59 | \ | ||
60 | entry = iter->ent; \ | ||
61 | \ | ||
62 | if (entry->type != event_##call.id) { \ | ||
63 | WARN_ON_ONCE(1); \ | ||
64 | return TRACE_TYPE_UNHANDLED; \ | ||
65 | } \ | ||
66 | \ | ||
67 | field = (typeof(field))entry; \ | ||
68 | \ | ||
69 | ret = trace_seq_printf(s, tpfmt "%s", tstruct "\n"); \ | ||
70 | if (!ret) \ | ||
71 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
72 | \ | ||
73 | return TRACE_TYPE_HANDLED; \ | ||
74 | } | ||
75 | |||
76 | #include <trace/trace_event_types.h> | ||
77 | |||
78 | /* | ||
79 | * Setup the showing format of trace point. | ||
80 | * | ||
81 | * int | ||
82 | * ftrace_format_##call(struct trace_seq *s) | ||
83 | * { | ||
84 | * struct ftrace_raw_##call field; | ||
85 | * int ret; | ||
86 | * | ||
87 | * ret = trace_seq_printf(s, #type " " #item ";" | ||
88 | * " size:%d; offset:%d;\n", | ||
89 | * sizeof(field.type), | ||
90 | * offsetof(struct ftrace_raw_##call, | ||
91 | * item)); | ||
92 | * | ||
93 | * } | ||
94 | */ | ||
95 | |||
96 | #undef TRACE_FIELD | ||
97 | #define TRACE_FIELD(type, item, assign) \ | ||
98 | ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \ | ||
99 | "offset:%lu;\tsize:%lu;\n", \ | ||
100 | offsetof(typeof(field), item), \ | ||
101 | sizeof(field.item)); \ | ||
102 | if (!ret) \ | ||
103 | return 0; | ||
104 | |||
105 | |||
106 | #undef TRACE_FIELD_SPECIAL | ||
107 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | ||
108 | ret = trace_seq_printf(s, "\tfield special:" #type_item ";\t" \ | ||
109 | "offset:%lu;\tsize:%lu;\n", \ | ||
110 | offsetof(typeof(field), item), \ | ||
111 | sizeof(field.item)); \ | ||
112 | if (!ret) \ | ||
113 | return 0; | ||
114 | |||
115 | #undef TRACE_EVENT_FORMAT | ||
116 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
117 | int \ | ||
118 | ftrace_format_##call(struct trace_seq *s) \ | ||
119 | { \ | ||
120 | struct ftrace_raw_##call field; \ | ||
121 | int ret; \ | ||
122 | \ | ||
123 | tstruct; \ | ||
124 | \ | ||
125 | trace_seq_printf(s, "\nprint fmt: \"%s\"\n", tpfmt); \ | ||
126 | \ | ||
127 | return ret; \ | ||
128 | } | ||
129 | |||
130 | #include <trace/trace_event_types.h> | ||
diff --git a/kernel/trace/trace_events_stage_3.h b/kernel/trace/trace_events_stage_3.h new file mode 100644 index 000000000000..2c8d76c7dbed --- /dev/null +++ b/kernel/trace/trace_events_stage_3.h | |||
@@ -0,0 +1,235 @@ | |||
1 | /* | ||
2 | * Stage 3 of the trace events. | ||
3 | * | ||
4 | * Override the macros in <trace/trace_event_types.h> to include the following: | ||
5 | * | ||
6 | * static void ftrace_event_<call>(proto) | ||
7 | * { | ||
8 | * event_trace_printk(_RET_IP_, "<call>: " <fmt>); | ||
9 | * } | ||
10 | * | ||
11 | * static int ftrace_reg_event_<call>(void) | ||
12 | * { | ||
13 | * int ret; | ||
14 | * | ||
15 | * ret = register_trace_<call>(ftrace_event_<call>); | ||
16 | * if (!ret) | ||
17 | * pr_info("event trace: Could not activate trace point " | ||
18 | * "probe to <call>"); | ||
19 | * return ret; | ||
20 | * } | ||
21 | * | ||
22 | * static void ftrace_unreg_event_<call>(void) | ||
23 | * { | ||
24 | * unregister_trace_<call>(ftrace_event_<call>); | ||
25 | * } | ||
26 | * | ||
27 | * For those macros defined with TRACE_FORMAT: | ||
28 | * | ||
29 | * static struct ftrace_event_call __used | ||
30 | * __attribute__((__aligned__(4))) | ||
31 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
32 | * .name = "<call>", | ||
33 | * .regfunc = ftrace_reg_event_<call>, | ||
34 | * .unregfunc = ftrace_unreg_event_<call>, | ||
35 | * } | ||
36 | * | ||
37 | * | ||
38 | * For those macros defined with TRACE_EVENT_FORMAT: | ||
39 | * | ||
40 | * static struct ftrace_event_call event_<call>; | ||
41 | * | ||
42 | * static void ftrace_raw_event_<call>(proto) | ||
43 | * { | ||
44 | * struct ring_buffer_event *event; | ||
45 | * struct ftrace_raw_<call> *entry; <-- defined in stage 1 | ||
46 | * unsigned long irq_flags; | ||
47 | * int pc; | ||
48 | * | ||
49 | * local_save_flags(irq_flags); | ||
50 | * pc = preempt_count(); | ||
51 | * | ||
52 | * event = trace_current_buffer_lock_reserve(event_<call>.id, | ||
53 | * sizeof(struct ftrace_raw_<call>), | ||
54 | * irq_flags, pc); | ||
55 | * if (!event) | ||
56 | * return; | ||
57 | * entry = ring_buffer_event_data(event); | ||
58 | * | ||
59 | * <tstruct>; <-- Here we assign the entries by the TRACE_FIELD. | ||
60 | * | ||
61 | * trace_current_buffer_unlock_commit(event, irq_flags, pc); | ||
62 | * } | ||
63 | * | ||
64 | * static int ftrace_raw_reg_event_<call>(void) | ||
65 | * { | ||
66 | * int ret; | ||
67 | * | ||
68 | * ret = register_trace_<call>(ftrace_raw_event_<call>); | ||
69 | * if (!ret) | ||
70 | * pr_info("event trace: Could not activate trace point " | ||
71 | * "probe to <call>"); | ||
72 | * return ret; | ||
73 | * } | ||
74 | * | ||
75 | * static void ftrace_unreg_event_<call>(void) | ||
76 | * { | ||
77 | * unregister_trace_<call>(ftrace_raw_event_<call>); | ||
78 | * } | ||
79 | * | ||
80 | * static struct trace_event ftrace_event_type_<call> = { | ||
81 | * .trace = ftrace_raw_output_<call>, <-- stage 2 | ||
82 | * }; | ||
83 | * | ||
84 | * static int ftrace_raw_init_event_<call>(void) | ||
85 | * { | ||
86 | * int id; | ||
87 | * | ||
88 | * id = register_ftrace_event(&ftrace_event_type_<call>); | ||
89 | * if (!id) | ||
90 | * return -ENODEV; | ||
91 | * event_<call>.id = id; | ||
92 | * return 0; | ||
93 | * } | ||
94 | * | ||
95 | * static struct ftrace_event_call __used | ||
96 | * __attribute__((__aligned__(4))) | ||
97 | * __attribute__((section("_ftrace_events"))) event_<call> = { | ||
98 | * .name = "<call>", | ||
99 | * .regfunc = ftrace_reg_event_<call>, | ||
100 | * .unregfunc = ftrace_unreg_event_<call>, | ||
101 | * .raw_init = ftrace_raw_init_event_<call>, | ||
102 | * .raw_reg = ftrace_raw_reg_event_<call>, | ||
103 | * .raw_unreg = ftrace_raw_unreg_event_<call>, | ||
104 | * .show_format = ftrace_format_<call>, | ||
105 | * } | ||
106 | * | ||
107 | */ | ||
108 | |||
109 | #undef TPFMT | ||
110 | #define TPFMT(fmt, args...) fmt "\n", ##args | ||
111 | |||
112 | #define _TRACE_FORMAT(call, proto, args, fmt) \ | ||
113 | static void ftrace_event_##call(proto) \ | ||
114 | { \ | ||
115 | event_trace_printk(_RET_IP_, #call ": " fmt); \ | ||
116 | } \ | ||
117 | \ | ||
118 | static int ftrace_reg_event_##call(void) \ | ||
119 | { \ | ||
120 | int ret; \ | ||
121 | \ | ||
122 | ret = register_trace_##call(ftrace_event_##call); \ | ||
123 | if (ret) \ | ||
124 | pr_info("event trace: Could not activate trace point " \ | ||
125 | "probe to " #call "\n"); \ | ||
126 | return ret; \ | ||
127 | } \ | ||
128 | \ | ||
129 | static void ftrace_unreg_event_##call(void) \ | ||
130 | { \ | ||
131 | unregister_trace_##call(ftrace_event_##call); \ | ||
132 | } \ | ||
133 | |||
134 | |||
135 | #undef TRACE_FORMAT | ||
136 | #define TRACE_FORMAT(call, proto, args, fmt) \ | ||
137 | _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | ||
138 | static struct ftrace_event_call __used \ | ||
139 | __attribute__((__aligned__(4))) \ | ||
140 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
141 | .name = #call, \ | ||
142 | .system = STR(TRACE_SYSTEM), \ | ||
143 | .regfunc = ftrace_reg_event_##call, \ | ||
144 | .unregfunc = ftrace_unreg_event_##call, \ | ||
145 | } | ||
146 | |||
147 | #undef TRACE_FIELD | ||
148 | #define TRACE_FIELD(type, item, assign)\ | ||
149 | entry->item = assign; | ||
150 | |||
151 | #undef TRACE_FIELD | ||
152 | #define TRACE_FIELD(type, item, assign)\ | ||
153 | entry->item = assign; | ||
154 | |||
155 | #undef TPCMD | ||
156 | #define TPCMD(cmd...) cmd | ||
157 | |||
158 | #undef TRACE_ENTRY | ||
159 | #define TRACE_ENTRY entry | ||
160 | |||
161 | #undef TRACE_FIELD_SPECIAL | ||
162 | #define TRACE_FIELD_SPECIAL(type_item, item, cmd) \ | ||
163 | cmd; | ||
164 | |||
165 | #undef TRACE_EVENT_FORMAT | ||
166 | #define TRACE_EVENT_FORMAT(call, proto, args, fmt, tstruct, tpfmt) \ | ||
167 | _TRACE_FORMAT(call, PARAMS(proto), PARAMS(args), PARAMS(fmt)) \ | ||
168 | \ | ||
169 | static struct ftrace_event_call event_##call; \ | ||
170 | \ | ||
171 | static void ftrace_raw_event_##call(proto) \ | ||
172 | { \ | ||
173 | struct ring_buffer_event *event; \ | ||
174 | struct ftrace_raw_##call *entry; \ | ||
175 | unsigned long irq_flags; \ | ||
176 | int pc; \ | ||
177 | \ | ||
178 | local_save_flags(irq_flags); \ | ||
179 | pc = preempt_count(); \ | ||
180 | \ | ||
181 | event = trace_current_buffer_lock_reserve(event_##call.id, \ | ||
182 | sizeof(struct ftrace_raw_##call), \ | ||
183 | irq_flags, pc); \ | ||
184 | if (!event) \ | ||
185 | return; \ | ||
186 | entry = ring_buffer_event_data(event); \ | ||
187 | \ | ||
188 | tstruct; \ | ||
189 | \ | ||
190 | trace_current_buffer_unlock_commit(event, irq_flags, pc); \ | ||
191 | } \ | ||
192 | \ | ||
193 | static int ftrace_raw_reg_event_##call(void) \ | ||
194 | { \ | ||
195 | int ret; \ | ||
196 | \ | ||
197 | ret = register_trace_##call(ftrace_raw_event_##call); \ | ||
198 | if (ret) \ | ||
199 | pr_info("event trace: Could not activate trace point " \ | ||
200 | "probe to " #call "\n"); \ | ||
201 | return ret; \ | ||
202 | } \ | ||
203 | \ | ||
204 | static void ftrace_raw_unreg_event_##call(void) \ | ||
205 | { \ | ||
206 | unregister_trace_##call(ftrace_raw_event_##call); \ | ||
207 | } \ | ||
208 | \ | ||
209 | static struct trace_event ftrace_event_type_##call = { \ | ||
210 | .trace = ftrace_raw_output_##call, \ | ||
211 | }; \ | ||
212 | \ | ||
213 | static int ftrace_raw_init_event_##call(void) \ | ||
214 | { \ | ||
215 | int id; \ | ||
216 | \ | ||
217 | id = register_ftrace_event(&ftrace_event_type_##call); \ | ||
218 | if (!id) \ | ||
219 | return -ENODEV; \ | ||
220 | event_##call.id = id; \ | ||
221 | return 0; \ | ||
222 | } \ | ||
223 | \ | ||
224 | static struct ftrace_event_call __used \ | ||
225 | __attribute__((__aligned__(4))) \ | ||
226 | __attribute__((section("_ftrace_events"))) event_##call = { \ | ||
227 | .name = #call, \ | ||
228 | .system = STR(TRACE_SYSTEM), \ | ||
229 | .regfunc = ftrace_reg_event_##call, \ | ||
230 | .unregfunc = ftrace_unreg_event_##call, \ | ||
231 | .raw_init = ftrace_raw_init_event_##call, \ | ||
232 | .raw_reg = ftrace_raw_reg_event_##call, \ | ||
233 | .raw_unreg = ftrace_raw_unreg_event_##call, \ | ||
234 | .show_format = ftrace_format_##call, \ | ||
235 | } | ||
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index 9236d7e25a16..c9a0b7df44ff 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * Copyright (C) 2004-2006 Ingo Molnar | 9 | * Copyright (C) 2004-2006 Ingo Molnar |
10 | * Copyright (C) 2004 William Lee Irwin III | 10 | * Copyright (C) 2004 William Lee Irwin III |
11 | */ | 11 | */ |
12 | #include <linux/ring_buffer.h> | ||
12 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
13 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
14 | #include <linux/ftrace.h> | 15 | #include <linux/ftrace.h> |
@@ -16,52 +17,388 @@ | |||
16 | 17 | ||
17 | #include "trace.h" | 18 | #include "trace.h" |
18 | 19 | ||
19 | static void start_function_trace(struct trace_array *tr) | 20 | /* function tracing enabled */ |
21 | static int ftrace_function_enabled; | ||
22 | |||
23 | static struct trace_array *func_trace; | ||
24 | |||
25 | static void tracing_start_function_trace(void); | ||
26 | static void tracing_stop_function_trace(void); | ||
27 | |||
28 | static int function_trace_init(struct trace_array *tr) | ||
20 | { | 29 | { |
30 | func_trace = tr; | ||
21 | tr->cpu = get_cpu(); | 31 | tr->cpu = get_cpu(); |
22 | tracing_reset_online_cpus(tr); | ||
23 | put_cpu(); | 32 | put_cpu(); |
24 | 33 | ||
25 | tracing_start_cmdline_record(); | 34 | tracing_start_cmdline_record(); |
26 | tracing_start_function_trace(); | 35 | tracing_start_function_trace(); |
36 | return 0; | ||
27 | } | 37 | } |
28 | 38 | ||
29 | static void stop_function_trace(struct trace_array *tr) | 39 | static void function_trace_reset(struct trace_array *tr) |
30 | { | 40 | { |
31 | tracing_stop_function_trace(); | 41 | tracing_stop_function_trace(); |
32 | tracing_stop_cmdline_record(); | 42 | tracing_stop_cmdline_record(); |
33 | } | 43 | } |
34 | 44 | ||
35 | static int function_trace_init(struct trace_array *tr) | 45 | static void function_trace_start(struct trace_array *tr) |
36 | { | 46 | { |
37 | start_function_trace(tr); | 47 | tracing_reset_online_cpus(tr); |
38 | return 0; | ||
39 | } | 48 | } |
40 | 49 | ||
41 | static void function_trace_reset(struct trace_array *tr) | 50 | static void |
51 | function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) | ||
52 | { | ||
53 | struct trace_array *tr = func_trace; | ||
54 | struct trace_array_cpu *data; | ||
55 | unsigned long flags; | ||
56 | long disabled; | ||
57 | int cpu, resched; | ||
58 | int pc; | ||
59 | |||
60 | if (unlikely(!ftrace_function_enabled)) | ||
61 | return; | ||
62 | |||
63 | pc = preempt_count(); | ||
64 | resched = ftrace_preempt_disable(); | ||
65 | local_save_flags(flags); | ||
66 | cpu = raw_smp_processor_id(); | ||
67 | data = tr->data[cpu]; | ||
68 | disabled = atomic_inc_return(&data->disabled); | ||
69 | |||
70 | if (likely(disabled == 1)) | ||
71 | trace_function(tr, ip, parent_ip, flags, pc); | ||
72 | |||
73 | atomic_dec(&data->disabled); | ||
74 | ftrace_preempt_enable(resched); | ||
75 | } | ||
76 | |||
77 | static void | ||
78 | function_trace_call(unsigned long ip, unsigned long parent_ip) | ||
42 | { | 79 | { |
43 | stop_function_trace(tr); | 80 | struct trace_array *tr = func_trace; |
81 | struct trace_array_cpu *data; | ||
82 | unsigned long flags; | ||
83 | long disabled; | ||
84 | int cpu; | ||
85 | int pc; | ||
86 | |||
87 | if (unlikely(!ftrace_function_enabled)) | ||
88 | return; | ||
89 | |||
90 | /* | ||
91 | * Need to use raw, since this must be called before the | ||
92 | * recursive protection is performed. | ||
93 | */ | ||
94 | local_irq_save(flags); | ||
95 | cpu = raw_smp_processor_id(); | ||
96 | data = tr->data[cpu]; | ||
97 | disabled = atomic_inc_return(&data->disabled); | ||
98 | |||
99 | if (likely(disabled == 1)) { | ||
100 | pc = preempt_count(); | ||
101 | trace_function(tr, ip, parent_ip, flags, pc); | ||
102 | } | ||
103 | |||
104 | atomic_dec(&data->disabled); | ||
105 | local_irq_restore(flags); | ||
44 | } | 106 | } |
45 | 107 | ||
46 | static void function_trace_start(struct trace_array *tr) | 108 | static void |
109 | function_stack_trace_call(unsigned long ip, unsigned long parent_ip) | ||
47 | { | 110 | { |
48 | tracing_reset_online_cpus(tr); | 111 | struct trace_array *tr = func_trace; |
112 | struct trace_array_cpu *data; | ||
113 | unsigned long flags; | ||
114 | long disabled; | ||
115 | int cpu; | ||
116 | int pc; | ||
117 | |||
118 | if (unlikely(!ftrace_function_enabled)) | ||
119 | return; | ||
120 | |||
121 | /* | ||
122 | * Need to use raw, since this must be called before the | ||
123 | * recursive protection is performed. | ||
124 | */ | ||
125 | local_irq_save(flags); | ||
126 | cpu = raw_smp_processor_id(); | ||
127 | data = tr->data[cpu]; | ||
128 | disabled = atomic_inc_return(&data->disabled); | ||
129 | |||
130 | if (likely(disabled == 1)) { | ||
131 | pc = preempt_count(); | ||
132 | trace_function(tr, ip, parent_ip, flags, pc); | ||
133 | /* | ||
134 | * skip over 5 funcs: | ||
135 | * __ftrace_trace_stack, | ||
136 | * __trace_stack, | ||
137 | * function_stack_trace_call | ||
138 | * ftrace_list_func | ||
139 | * ftrace_call | ||
140 | */ | ||
141 | __trace_stack(tr, flags, 5, pc); | ||
142 | } | ||
143 | |||
144 | atomic_dec(&data->disabled); | ||
145 | local_irq_restore(flags); | ||
146 | } | ||
147 | |||
148 | |||
149 | static struct ftrace_ops trace_ops __read_mostly = | ||
150 | { | ||
151 | .func = function_trace_call, | ||
152 | }; | ||
153 | |||
154 | static struct ftrace_ops trace_stack_ops __read_mostly = | ||
155 | { | ||
156 | .func = function_stack_trace_call, | ||
157 | }; | ||
158 | |||
159 | /* Our two options */ | ||
160 | enum { | ||
161 | TRACE_FUNC_OPT_STACK = 0x1, | ||
162 | }; | ||
163 | |||
164 | static struct tracer_opt func_opts[] = { | ||
165 | #ifdef CONFIG_STACKTRACE | ||
166 | { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, | ||
167 | #endif | ||
168 | { } /* Always set a last empty entry */ | ||
169 | }; | ||
170 | |||
171 | static struct tracer_flags func_flags = { | ||
172 | .val = 0, /* By default: all flags disabled */ | ||
173 | .opts = func_opts | ||
174 | }; | ||
175 | |||
176 | static void tracing_start_function_trace(void) | ||
177 | { | ||
178 | ftrace_function_enabled = 0; | ||
179 | |||
180 | if (trace_flags & TRACE_ITER_PREEMPTONLY) | ||
181 | trace_ops.func = function_trace_call_preempt_only; | ||
182 | else | ||
183 | trace_ops.func = function_trace_call; | ||
184 | |||
185 | if (func_flags.val & TRACE_FUNC_OPT_STACK) | ||
186 | register_ftrace_function(&trace_stack_ops); | ||
187 | else | ||
188 | register_ftrace_function(&trace_ops); | ||
189 | |||
190 | ftrace_function_enabled = 1; | ||
191 | } | ||
192 | |||
193 | static void tracing_stop_function_trace(void) | ||
194 | { | ||
195 | ftrace_function_enabled = 0; | ||
196 | /* OK if they are not registered */ | ||
197 | unregister_ftrace_function(&trace_stack_ops); | ||
198 | unregister_ftrace_function(&trace_ops); | ||
199 | } | ||
200 | |||
201 | static int func_set_flag(u32 old_flags, u32 bit, int set) | ||
202 | { | ||
203 | if (bit == TRACE_FUNC_OPT_STACK) { | ||
204 | /* do nothing if already set */ | ||
205 | if (!!set == !!(func_flags.val & TRACE_FUNC_OPT_STACK)) | ||
206 | return 0; | ||
207 | |||
208 | if (set) { | ||
209 | unregister_ftrace_function(&trace_ops); | ||
210 | register_ftrace_function(&trace_stack_ops); | ||
211 | } else { | ||
212 | unregister_ftrace_function(&trace_stack_ops); | ||
213 | register_ftrace_function(&trace_ops); | ||
214 | } | ||
215 | |||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | return -EINVAL; | ||
49 | } | 220 | } |
50 | 221 | ||
51 | static struct tracer function_trace __read_mostly = | 222 | static struct tracer function_trace __read_mostly = |
52 | { | 223 | { |
53 | .name = "function", | 224 | .name = "function", |
54 | .init = function_trace_init, | 225 | .init = function_trace_init, |
55 | .reset = function_trace_reset, | 226 | .reset = function_trace_reset, |
56 | .start = function_trace_start, | 227 | .start = function_trace_start, |
228 | .wait_pipe = poll_wait_pipe, | ||
229 | .flags = &func_flags, | ||
230 | .set_flag = func_set_flag, | ||
57 | #ifdef CONFIG_FTRACE_SELFTEST | 231 | #ifdef CONFIG_FTRACE_SELFTEST |
58 | .selftest = trace_selftest_startup_function, | 232 | .selftest = trace_selftest_startup_function, |
59 | #endif | 233 | #endif |
60 | }; | 234 | }; |
61 | 235 | ||
236 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
237 | static void | ||
238 | ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data) | ||
239 | { | ||
240 | long *count = (long *)data; | ||
241 | |||
242 | if (tracing_is_on()) | ||
243 | return; | ||
244 | |||
245 | if (!*count) | ||
246 | return; | ||
247 | |||
248 | if (*count != -1) | ||
249 | (*count)--; | ||
250 | |||
251 | tracing_on(); | ||
252 | } | ||
253 | |||
254 | static void | ||
255 | ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) | ||
256 | { | ||
257 | long *count = (long *)data; | ||
258 | |||
259 | if (!tracing_is_on()) | ||
260 | return; | ||
261 | |||
262 | if (!*count) | ||
263 | return; | ||
264 | |||
265 | if (*count != -1) | ||
266 | (*count)--; | ||
267 | |||
268 | tracing_off(); | ||
269 | } | ||
270 | |||
271 | static int | ||
272 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | ||
273 | struct ftrace_probe_ops *ops, void *data); | ||
274 | |||
275 | static struct ftrace_probe_ops traceon_probe_ops = { | ||
276 | .func = ftrace_traceon, | ||
277 | .print = ftrace_trace_onoff_print, | ||
278 | }; | ||
279 | |||
280 | static struct ftrace_probe_ops traceoff_probe_ops = { | ||
281 | .func = ftrace_traceoff, | ||
282 | .print = ftrace_trace_onoff_print, | ||
283 | }; | ||
284 | |||
285 | static int | ||
286 | ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, | ||
287 | struct ftrace_probe_ops *ops, void *data) | ||
288 | { | ||
289 | char str[KSYM_SYMBOL_LEN]; | ||
290 | long count = (long)data; | ||
291 | |||
292 | kallsyms_lookup(ip, NULL, NULL, NULL, str); | ||
293 | seq_printf(m, "%s:", str); | ||
294 | |||
295 | if (ops == &traceon_probe_ops) | ||
296 | seq_printf(m, "traceon"); | ||
297 | else | ||
298 | seq_printf(m, "traceoff"); | ||
299 | |||
300 | if (count == -1) | ||
301 | seq_printf(m, ":unlimited\n"); | ||
302 | else | ||
303 | seq_printf(m, ":count=%ld", count); | ||
304 | seq_putc(m, '\n'); | ||
305 | |||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | static int | ||
310 | ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) | ||
311 | { | ||
312 | struct ftrace_probe_ops *ops; | ||
313 | |||
314 | /* we register both traceon and traceoff to this callback */ | ||
315 | if (strcmp(cmd, "traceon") == 0) | ||
316 | ops = &traceon_probe_ops; | ||
317 | else | ||
318 | ops = &traceoff_probe_ops; | ||
319 | |||
320 | unregister_ftrace_function_probe_func(glob, ops); | ||
321 | |||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | static int | ||
326 | ftrace_trace_onoff_callback(char *glob, char *cmd, char *param, int enable) | ||
327 | { | ||
328 | struct ftrace_probe_ops *ops; | ||
329 | void *count = (void *)-1; | ||
330 | char *number; | ||
331 | int ret; | ||
332 | |||
333 | /* hash funcs only work with set_ftrace_filter */ | ||
334 | if (!enable) | ||
335 | return -EINVAL; | ||
336 | |||
337 | if (glob[0] == '!') | ||
338 | return ftrace_trace_onoff_unreg(glob+1, cmd, param); | ||
339 | |||
340 | /* we register both traceon and traceoff to this callback */ | ||
341 | if (strcmp(cmd, "traceon") == 0) | ||
342 | ops = &traceon_probe_ops; | ||
343 | else | ||
344 | ops = &traceoff_probe_ops; | ||
345 | |||
346 | if (!param) | ||
347 | goto out_reg; | ||
348 | |||
349 | number = strsep(¶m, ":"); | ||
350 | |||
351 | if (!strlen(number)) | ||
352 | goto out_reg; | ||
353 | |||
354 | /* | ||
355 | * We use the callback data field (which is a pointer) | ||
356 | * as our counter. | ||
357 | */ | ||
358 | ret = strict_strtoul(number, 0, (unsigned long *)&count); | ||
359 | if (ret) | ||
360 | return ret; | ||
361 | |||
362 | out_reg: | ||
363 | ret = register_ftrace_function_probe(glob, ops, count); | ||
364 | |||
365 | return ret; | ||
366 | } | ||
367 | |||
368 | static struct ftrace_func_command ftrace_traceon_cmd = { | ||
369 | .name = "traceon", | ||
370 | .func = ftrace_trace_onoff_callback, | ||
371 | }; | ||
372 | |||
373 | static struct ftrace_func_command ftrace_traceoff_cmd = { | ||
374 | .name = "traceoff", | ||
375 | .func = ftrace_trace_onoff_callback, | ||
376 | }; | ||
377 | |||
378 | static int __init init_func_cmd_traceon(void) | ||
379 | { | ||
380 | int ret; | ||
381 | |||
382 | ret = register_ftrace_command(&ftrace_traceoff_cmd); | ||
383 | if (ret) | ||
384 | return ret; | ||
385 | |||
386 | ret = register_ftrace_command(&ftrace_traceon_cmd); | ||
387 | if (ret) | ||
388 | unregister_ftrace_command(&ftrace_traceoff_cmd); | ||
389 | return ret; | ||
390 | } | ||
391 | #else | ||
392 | static inline int init_func_cmd_traceon(void) | ||
393 | { | ||
394 | return 0; | ||
395 | } | ||
396 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
397 | |||
62 | static __init int init_function_trace(void) | 398 | static __init int init_function_trace(void) |
63 | { | 399 | { |
400 | init_func_cmd_traceon(); | ||
64 | return register_tracer(&function_trace); | 401 | return register_tracer(&function_trace); |
65 | } | 402 | } |
66 | |||
67 | device_initcall(init_function_trace); | 403 | device_initcall(init_function_trace); |
404 | |||
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 930c08e5b38e..e527f2f66c73 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * | 2 | * |
3 | * Function graph tracer. | 3 | * Function graph tracer. |
4 | * Copyright (c) 2008 Frederic Weisbecker <fweisbec@gmail.com> | 4 | * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com> |
5 | * Mostly borrowed from function tracer which | 5 | * Mostly borrowed from function tracer which |
6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> | 6 | * is Copyright (c) Steven Rostedt <srostedt@redhat.com> |
7 | * | 7 | * |
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | ||
15 | 16 | ||
16 | #define TRACE_GRAPH_INDENT 2 | 17 | #define TRACE_GRAPH_INDENT 2 |
17 | 18 | ||
@@ -20,9 +21,11 @@ | |||
20 | #define TRACE_GRAPH_PRINT_CPU 0x2 | 21 | #define TRACE_GRAPH_PRINT_CPU 0x2 |
21 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 | 22 | #define TRACE_GRAPH_PRINT_OVERHEAD 0x4 |
22 | #define TRACE_GRAPH_PRINT_PROC 0x8 | 23 | #define TRACE_GRAPH_PRINT_PROC 0x8 |
24 | #define TRACE_GRAPH_PRINT_DURATION 0x10 | ||
25 | #define TRACE_GRAPH_PRINT_ABS_TIME 0X20 | ||
23 | 26 | ||
24 | static struct tracer_opt trace_opts[] = { | 27 | static struct tracer_opt trace_opts[] = { |
25 | /* Display overruns ? */ | 28 | /* Display overruns? (for self-debug purpose) */ |
26 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, | 29 | { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) }, |
27 | /* Display CPU ? */ | 30 | /* Display CPU ? */ |
28 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, | 31 | { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) }, |
@@ -30,26 +33,101 @@ static struct tracer_opt trace_opts[] = { | |||
30 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, | 33 | { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) }, |
31 | /* Display proc name/pid */ | 34 | /* Display proc name/pid */ |
32 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, | 35 | { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) }, |
36 | /* Display duration of execution */ | ||
37 | { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) }, | ||
38 | /* Display absolute time of an entry */ | ||
39 | { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) }, | ||
33 | { } /* Empty entry */ | 40 | { } /* Empty entry */ |
34 | }; | 41 | }; |
35 | 42 | ||
36 | static struct tracer_flags tracer_flags = { | 43 | static struct tracer_flags tracer_flags = { |
37 | /* Don't display overruns and proc by default */ | 44 | /* Don't display overruns and proc by default */ |
38 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD, | 45 | .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD | |
46 | TRACE_GRAPH_PRINT_DURATION, | ||
39 | .opts = trace_opts | 47 | .opts = trace_opts |
40 | }; | 48 | }; |
41 | 49 | ||
42 | /* pid on the last trace processed */ | 50 | /* pid on the last trace processed */ |
43 | static pid_t last_pid[NR_CPUS] = { [0 ... NR_CPUS-1] = -1 }; | ||
44 | 51 | ||
45 | static int graph_trace_init(struct trace_array *tr) | 52 | |
53 | /* Add a function return address to the trace stack on thread info.*/ | ||
54 | int | ||
55 | ftrace_push_return_trace(unsigned long ret, unsigned long long time, | ||
56 | unsigned long func, int *depth) | ||
57 | { | ||
58 | int index; | ||
59 | |||
60 | if (!current->ret_stack) | ||
61 | return -EBUSY; | ||
62 | |||
63 | /* The return trace stack is full */ | ||
64 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | ||
65 | atomic_inc(¤t->trace_overrun); | ||
66 | return -EBUSY; | ||
67 | } | ||
68 | |||
69 | index = ++current->curr_ret_stack; | ||
70 | barrier(); | ||
71 | current->ret_stack[index].ret = ret; | ||
72 | current->ret_stack[index].func = func; | ||
73 | current->ret_stack[index].calltime = time; | ||
74 | *depth = index; | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | /* Retrieve a function return address to the trace stack on thread info.*/ | ||
80 | void | ||
81 | ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) | ||
46 | { | 82 | { |
47 | int cpu, ret; | 83 | int index; |
84 | |||
85 | index = current->curr_ret_stack; | ||
48 | 86 | ||
49 | for_each_online_cpu(cpu) | 87 | if (unlikely(index < 0)) { |
50 | tracing_reset(tr, cpu); | 88 | ftrace_graph_stop(); |
89 | WARN_ON(1); | ||
90 | /* Might as well panic, otherwise we have no where to go */ | ||
91 | *ret = (unsigned long)panic; | ||
92 | return; | ||
93 | } | ||
94 | |||
95 | *ret = current->ret_stack[index].ret; | ||
96 | trace->func = current->ret_stack[index].func; | ||
97 | trace->calltime = current->ret_stack[index].calltime; | ||
98 | trace->overrun = atomic_read(¤t->trace_overrun); | ||
99 | trace->depth = index; | ||
100 | barrier(); | ||
101 | current->curr_ret_stack--; | ||
102 | |||
103 | } | ||
104 | |||
105 | /* | ||
106 | * Send the trace to the ring-buffer. | ||
107 | * @return the original return address. | ||
108 | */ | ||
109 | unsigned long ftrace_return_to_handler(void) | ||
110 | { | ||
111 | struct ftrace_graph_ret trace; | ||
112 | unsigned long ret; | ||
113 | |||
114 | ftrace_pop_return_trace(&trace, &ret); | ||
115 | trace.rettime = trace_clock_local(); | ||
116 | ftrace_graph_return(&trace); | ||
117 | |||
118 | if (unlikely(!ret)) { | ||
119 | ftrace_graph_stop(); | ||
120 | WARN_ON(1); | ||
121 | /* Might as well panic. What else to do? */ | ||
122 | ret = (unsigned long)panic; | ||
123 | } | ||
124 | |||
125 | return ret; | ||
126 | } | ||
51 | 127 | ||
52 | ret = register_ftrace_graph(&trace_graph_return, | 128 | static int graph_trace_init(struct trace_array *tr) |
129 | { | ||
130 | int ret = register_ftrace_graph(&trace_graph_return, | ||
53 | &trace_graph_entry); | 131 | &trace_graph_entry); |
54 | if (ret) | 132 | if (ret) |
55 | return ret; | 133 | return ret; |
@@ -153,17 +231,25 @@ print_graph_proc(struct trace_seq *s, pid_t pid) | |||
153 | 231 | ||
154 | /* If the pid changed since the last trace, output this event */ | 232 | /* If the pid changed since the last trace, output this event */ |
155 | static enum print_line_t | 233 | static enum print_line_t |
156 | verif_pid(struct trace_seq *s, pid_t pid, int cpu) | 234 | verif_pid(struct trace_seq *s, pid_t pid, int cpu, pid_t *last_pids_cpu) |
157 | { | 235 | { |
158 | pid_t prev_pid; | 236 | pid_t prev_pid; |
237 | pid_t *last_pid; | ||
159 | int ret; | 238 | int ret; |
160 | 239 | ||
161 | if (last_pid[cpu] != -1 && last_pid[cpu] == pid) | 240 | if (!last_pids_cpu) |
241 | return TRACE_TYPE_HANDLED; | ||
242 | |||
243 | last_pid = per_cpu_ptr(last_pids_cpu, cpu); | ||
244 | |||
245 | if (*last_pid == pid) | ||
162 | return TRACE_TYPE_HANDLED; | 246 | return TRACE_TYPE_HANDLED; |
163 | 247 | ||
164 | prev_pid = last_pid[cpu]; | 248 | prev_pid = *last_pid; |
165 | last_pid[cpu] = pid; | 249 | *last_pid = pid; |
166 | 250 | ||
251 | if (prev_pid == -1) | ||
252 | return TRACE_TYPE_HANDLED; | ||
167 | /* | 253 | /* |
168 | * Context-switch trace line: | 254 | * Context-switch trace line: |
169 | 255 | ||
@@ -175,34 +261,34 @@ verif_pid(struct trace_seq *s, pid_t pid, int cpu) | |||
175 | ret = trace_seq_printf(s, | 261 | ret = trace_seq_printf(s, |
176 | " ------------------------------------------\n"); | 262 | " ------------------------------------------\n"); |
177 | if (!ret) | 263 | if (!ret) |
178 | TRACE_TYPE_PARTIAL_LINE; | 264 | return TRACE_TYPE_PARTIAL_LINE; |
179 | 265 | ||
180 | ret = print_graph_cpu(s, cpu); | 266 | ret = print_graph_cpu(s, cpu); |
181 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 267 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
182 | TRACE_TYPE_PARTIAL_LINE; | 268 | return TRACE_TYPE_PARTIAL_LINE; |
183 | 269 | ||
184 | ret = print_graph_proc(s, prev_pid); | 270 | ret = print_graph_proc(s, prev_pid); |
185 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 271 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
186 | TRACE_TYPE_PARTIAL_LINE; | 272 | return TRACE_TYPE_PARTIAL_LINE; |
187 | 273 | ||
188 | ret = trace_seq_printf(s, " => "); | 274 | ret = trace_seq_printf(s, " => "); |
189 | if (!ret) | 275 | if (!ret) |
190 | TRACE_TYPE_PARTIAL_LINE; | 276 | return TRACE_TYPE_PARTIAL_LINE; |
191 | 277 | ||
192 | ret = print_graph_proc(s, pid); | 278 | ret = print_graph_proc(s, pid); |
193 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 279 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
194 | TRACE_TYPE_PARTIAL_LINE; | 280 | return TRACE_TYPE_PARTIAL_LINE; |
195 | 281 | ||
196 | ret = trace_seq_printf(s, | 282 | ret = trace_seq_printf(s, |
197 | "\n ------------------------------------------\n\n"); | 283 | "\n ------------------------------------------\n\n"); |
198 | if (!ret) | 284 | if (!ret) |
199 | TRACE_TYPE_PARTIAL_LINE; | 285 | return TRACE_TYPE_PARTIAL_LINE; |
200 | 286 | ||
201 | return ret; | 287 | return TRACE_TYPE_HANDLED; |
202 | } | 288 | } |
203 | 289 | ||
204 | static bool | 290 | static struct ftrace_graph_ret_entry * |
205 | trace_branch_is_leaf(struct trace_iterator *iter, | 291 | get_return_for_leaf(struct trace_iterator *iter, |
206 | struct ftrace_graph_ent_entry *curr) | 292 | struct ftrace_graph_ent_entry *curr) |
207 | { | 293 | { |
208 | struct ring_buffer_iter *ring_iter; | 294 | struct ring_buffer_iter *ring_iter; |
@@ -211,65 +297,123 @@ trace_branch_is_leaf(struct trace_iterator *iter, | |||
211 | 297 | ||
212 | ring_iter = iter->buffer_iter[iter->cpu]; | 298 | ring_iter = iter->buffer_iter[iter->cpu]; |
213 | 299 | ||
214 | if (!ring_iter) | 300 | /* First peek to compare current entry and the next one */ |
215 | return false; | 301 | if (ring_iter) |
216 | 302 | event = ring_buffer_iter_peek(ring_iter, NULL); | |
217 | event = ring_buffer_iter_peek(ring_iter, NULL); | 303 | else { |
304 | /* We need to consume the current entry to see the next one */ | ||
305 | ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); | ||
306 | event = ring_buffer_peek(iter->tr->buffer, iter->cpu, | ||
307 | NULL); | ||
308 | } | ||
218 | 309 | ||
219 | if (!event) | 310 | if (!event) |
220 | return false; | 311 | return NULL; |
221 | 312 | ||
222 | next = ring_buffer_event_data(event); | 313 | next = ring_buffer_event_data(event); |
223 | 314 | ||
224 | if (next->ent.type != TRACE_GRAPH_RET) | 315 | if (next->ent.type != TRACE_GRAPH_RET) |
225 | return false; | 316 | return NULL; |
226 | 317 | ||
227 | if (curr->ent.pid != next->ent.pid || | 318 | if (curr->ent.pid != next->ent.pid || |
228 | curr->graph_ent.func != next->ret.func) | 319 | curr->graph_ent.func != next->ret.func) |
229 | return false; | 320 | return NULL; |
321 | |||
322 | /* this is a leaf, now advance the iterator */ | ||
323 | if (ring_iter) | ||
324 | ring_buffer_read(ring_iter, NULL); | ||
325 | |||
326 | return next; | ||
327 | } | ||
328 | |||
329 | /* Signal a overhead of time execution to the output */ | ||
330 | static int | ||
331 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
332 | { | ||
333 | /* If duration disappear, we don't need anything */ | ||
334 | if (!(tracer_flags.val & TRACE_GRAPH_PRINT_DURATION)) | ||
335 | return 1; | ||
336 | |||
337 | /* Non nested entry or return */ | ||
338 | if (duration == -1) | ||
339 | return trace_seq_printf(s, " "); | ||
340 | |||
341 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | ||
342 | /* Duration exceeded 100 msecs */ | ||
343 | if (duration > 100000ULL) | ||
344 | return trace_seq_printf(s, "! "); | ||
345 | |||
346 | /* Duration exceeded 10 msecs */ | ||
347 | if (duration > 10000ULL) | ||
348 | return trace_seq_printf(s, "+ "); | ||
349 | } | ||
230 | 350 | ||
231 | return true; | 351 | return trace_seq_printf(s, " "); |
352 | } | ||
353 | |||
354 | static int print_graph_abs_time(u64 t, struct trace_seq *s) | ||
355 | { | ||
356 | unsigned long usecs_rem; | ||
357 | |||
358 | usecs_rem = do_div(t, NSEC_PER_SEC); | ||
359 | usecs_rem /= 1000; | ||
360 | |||
361 | return trace_seq_printf(s, "%5lu.%06lu | ", | ||
362 | (unsigned long)t, usecs_rem); | ||
232 | } | 363 | } |
233 | 364 | ||
234 | static enum print_line_t | 365 | static enum print_line_t |
235 | print_graph_irq(struct trace_seq *s, unsigned long addr, | 366 | print_graph_irq(struct trace_iterator *iter, unsigned long addr, |
236 | enum trace_type type, int cpu, pid_t pid) | 367 | enum trace_type type, int cpu, pid_t pid) |
237 | { | 368 | { |
238 | int ret; | 369 | int ret; |
370 | struct trace_seq *s = &iter->seq; | ||
239 | 371 | ||
240 | if (addr < (unsigned long)__irqentry_text_start || | 372 | if (addr < (unsigned long)__irqentry_text_start || |
241 | addr >= (unsigned long)__irqentry_text_end) | 373 | addr >= (unsigned long)__irqentry_text_end) |
242 | return TRACE_TYPE_UNHANDLED; | 374 | return TRACE_TYPE_UNHANDLED; |
243 | 375 | ||
244 | if (type == TRACE_GRAPH_ENT) { | 376 | /* Absolute time */ |
245 | ret = trace_seq_printf(s, "==========> | "); | 377 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { |
246 | } else { | 378 | ret = print_graph_abs_time(iter->ts, s); |
247 | /* Cpu */ | 379 | if (!ret) |
248 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 380 | return TRACE_TYPE_PARTIAL_LINE; |
249 | ret = print_graph_cpu(s, cpu); | 381 | } |
250 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
251 | return TRACE_TYPE_PARTIAL_LINE; | ||
252 | } | ||
253 | /* Proc */ | ||
254 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
255 | ret = print_graph_proc(s, pid); | ||
256 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
257 | return TRACE_TYPE_PARTIAL_LINE; | ||
258 | 382 | ||
259 | ret = trace_seq_printf(s, " | "); | 383 | /* Cpu */ |
260 | if (!ret) | 384 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
261 | return TRACE_TYPE_PARTIAL_LINE; | 385 | ret = print_graph_cpu(s, cpu); |
262 | } | 386 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
387 | return TRACE_TYPE_PARTIAL_LINE; | ||
388 | } | ||
389 | /* Proc */ | ||
390 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) { | ||
391 | ret = print_graph_proc(s, pid); | ||
392 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
393 | return TRACE_TYPE_PARTIAL_LINE; | ||
394 | ret = trace_seq_printf(s, " | "); | ||
395 | if (!ret) | ||
396 | return TRACE_TYPE_PARTIAL_LINE; | ||
397 | } | ||
263 | 398 | ||
264 | /* No overhead */ | 399 | /* No overhead */ |
265 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 400 | ret = print_graph_overhead(-1, s); |
266 | ret = trace_seq_printf(s, " "); | 401 | if (!ret) |
267 | if (!ret) | 402 | return TRACE_TYPE_PARTIAL_LINE; |
268 | return TRACE_TYPE_PARTIAL_LINE; | 403 | |
269 | } | 404 | if (type == TRACE_GRAPH_ENT) |
405 | ret = trace_seq_printf(s, "==========>"); | ||
406 | else | ||
407 | ret = trace_seq_printf(s, "<=========="); | ||
408 | |||
409 | if (!ret) | ||
410 | return TRACE_TYPE_PARTIAL_LINE; | ||
411 | |||
412 | /* Don't close the duration column if haven't one */ | ||
413 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) | ||
414 | trace_seq_printf(s, " |"); | ||
415 | ret = trace_seq_printf(s, "\n"); | ||
270 | 416 | ||
271 | ret = trace_seq_printf(s, "<========== |\n"); | ||
272 | } | ||
273 | if (!ret) | 417 | if (!ret) |
274 | return TRACE_TYPE_PARTIAL_LINE; | 418 | return TRACE_TYPE_PARTIAL_LINE; |
275 | return TRACE_TYPE_HANDLED; | 419 | return TRACE_TYPE_HANDLED; |
@@ -288,7 +432,7 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
288 | sprintf(msecs_str, "%lu", (unsigned long) duration); | 432 | sprintf(msecs_str, "%lu", (unsigned long) duration); |
289 | 433 | ||
290 | /* Print msecs */ | 434 | /* Print msecs */ |
291 | ret = trace_seq_printf(s, msecs_str); | 435 | ret = trace_seq_printf(s, "%s", msecs_str); |
292 | if (!ret) | 436 | if (!ret) |
293 | return TRACE_TYPE_PARTIAL_LINE; | 437 | return TRACE_TYPE_PARTIAL_LINE; |
294 | 438 | ||
@@ -321,51 +465,33 @@ print_graph_duration(unsigned long long duration, struct trace_seq *s) | |||
321 | 465 | ||
322 | } | 466 | } |
323 | 467 | ||
324 | /* Signal a overhead of time execution to the output */ | ||
325 | static int | ||
326 | print_graph_overhead(unsigned long long duration, struct trace_seq *s) | ||
327 | { | ||
328 | /* Duration exceeded 100 msecs */ | ||
329 | if (duration > 100000ULL) | ||
330 | return trace_seq_printf(s, "! "); | ||
331 | |||
332 | /* Duration exceeded 10 msecs */ | ||
333 | if (duration > 10000ULL) | ||
334 | return trace_seq_printf(s, "+ "); | ||
335 | |||
336 | return trace_seq_printf(s, " "); | ||
337 | } | ||
338 | |||
339 | /* Case of a leaf function on its call entry */ | 468 | /* Case of a leaf function on its call entry */ |
340 | static enum print_line_t | 469 | static enum print_line_t |
341 | print_graph_entry_leaf(struct trace_iterator *iter, | 470 | print_graph_entry_leaf(struct trace_iterator *iter, |
342 | struct ftrace_graph_ent_entry *entry, struct trace_seq *s) | 471 | struct ftrace_graph_ent_entry *entry, |
472 | struct ftrace_graph_ret_entry *ret_entry, struct trace_seq *s) | ||
343 | { | 473 | { |
344 | struct ftrace_graph_ret_entry *ret_entry; | ||
345 | struct ftrace_graph_ret *graph_ret; | 474 | struct ftrace_graph_ret *graph_ret; |
346 | struct ring_buffer_event *event; | ||
347 | struct ftrace_graph_ent *call; | 475 | struct ftrace_graph_ent *call; |
348 | unsigned long long duration; | 476 | unsigned long long duration; |
349 | int ret; | 477 | int ret; |
350 | int i; | 478 | int i; |
351 | 479 | ||
352 | event = ring_buffer_read(iter->buffer_iter[iter->cpu], NULL); | ||
353 | ret_entry = ring_buffer_event_data(event); | ||
354 | graph_ret = &ret_entry->ret; | 480 | graph_ret = &ret_entry->ret; |
355 | call = &entry->graph_ent; | 481 | call = &entry->graph_ent; |
356 | duration = graph_ret->rettime - graph_ret->calltime; | 482 | duration = graph_ret->rettime - graph_ret->calltime; |
357 | 483 | ||
358 | /* Overhead */ | 484 | /* Overhead */ |
359 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 485 | ret = print_graph_overhead(duration, s); |
360 | ret = print_graph_overhead(duration, s); | 486 | if (!ret) |
361 | if (!ret) | 487 | return TRACE_TYPE_PARTIAL_LINE; |
362 | return TRACE_TYPE_PARTIAL_LINE; | ||
363 | } | ||
364 | 488 | ||
365 | /* Duration */ | 489 | /* Duration */ |
366 | ret = print_graph_duration(duration, s); | 490 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
367 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 491 | ret = print_graph_duration(duration, s); |
368 | return TRACE_TYPE_PARTIAL_LINE; | 492 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
493 | return TRACE_TYPE_PARTIAL_LINE; | ||
494 | } | ||
369 | 495 | ||
370 | /* Function */ | 496 | /* Function */ |
371 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 497 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -394,25 +520,17 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
394 | struct ftrace_graph_ent *call = &entry->graph_ent; | 520 | struct ftrace_graph_ent *call = &entry->graph_ent; |
395 | 521 | ||
396 | /* No overhead */ | 522 | /* No overhead */ |
397 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 523 | ret = print_graph_overhead(-1, s); |
398 | ret = trace_seq_printf(s, " "); | 524 | if (!ret) |
399 | if (!ret) | 525 | return TRACE_TYPE_PARTIAL_LINE; |
400 | return TRACE_TYPE_PARTIAL_LINE; | ||
401 | } | ||
402 | 526 | ||
403 | /* Interrupt */ | 527 | /* No time */ |
404 | ret = print_graph_irq(s, call->func, TRACE_GRAPH_ENT, cpu, pid); | 528 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
405 | if (ret == TRACE_TYPE_UNHANDLED) { | ||
406 | /* No time */ | ||
407 | ret = trace_seq_printf(s, " | "); | 529 | ret = trace_seq_printf(s, " | "); |
408 | if (!ret) | 530 | if (!ret) |
409 | return TRACE_TYPE_PARTIAL_LINE; | 531 | return TRACE_TYPE_PARTIAL_LINE; |
410 | } else { | ||
411 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
412 | return TRACE_TYPE_PARTIAL_LINE; | ||
413 | } | 532 | } |
414 | 533 | ||
415 | |||
416 | /* Function */ | 534 | /* Function */ |
417 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { | 535 | for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++) { |
418 | ret = trace_seq_printf(s, " "); | 536 | ret = trace_seq_printf(s, " "); |
@@ -428,20 +546,40 @@ print_graph_entry_nested(struct ftrace_graph_ent_entry *entry, | |||
428 | if (!ret) | 546 | if (!ret) |
429 | return TRACE_TYPE_PARTIAL_LINE; | 547 | return TRACE_TYPE_PARTIAL_LINE; |
430 | 548 | ||
431 | return TRACE_TYPE_HANDLED; | 549 | /* |
550 | * we already consumed the current entry to check the next one | ||
551 | * and see if this is a leaf. | ||
552 | */ | ||
553 | return TRACE_TYPE_NO_CONSUME; | ||
432 | } | 554 | } |
433 | 555 | ||
434 | static enum print_line_t | 556 | static enum print_line_t |
435 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | 557 | print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, |
436 | struct trace_iterator *iter, int cpu) | 558 | struct trace_iterator *iter) |
437 | { | 559 | { |
438 | int ret; | 560 | int ret; |
561 | int cpu = iter->cpu; | ||
562 | pid_t *last_entry = iter->private; | ||
439 | struct trace_entry *ent = iter->ent; | 563 | struct trace_entry *ent = iter->ent; |
564 | struct ftrace_graph_ent *call = &field->graph_ent; | ||
565 | struct ftrace_graph_ret_entry *leaf_ret; | ||
440 | 566 | ||
441 | /* Pid */ | 567 | /* Pid */ |
442 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 568 | if (verif_pid(s, ent->pid, cpu, last_entry) == TRACE_TYPE_PARTIAL_LINE) |
443 | return TRACE_TYPE_PARTIAL_LINE; | 569 | return TRACE_TYPE_PARTIAL_LINE; |
444 | 570 | ||
571 | /* Interrupt */ | ||
572 | ret = print_graph_irq(iter, call->func, TRACE_GRAPH_ENT, cpu, ent->pid); | ||
573 | if (ret == TRACE_TYPE_PARTIAL_LINE) | ||
574 | return TRACE_TYPE_PARTIAL_LINE; | ||
575 | |||
576 | /* Absolute time */ | ||
577 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
578 | ret = print_graph_abs_time(iter->ts, s); | ||
579 | if (!ret) | ||
580 | return TRACE_TYPE_PARTIAL_LINE; | ||
581 | } | ||
582 | |||
445 | /* Cpu */ | 583 | /* Cpu */ |
446 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 584 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
447 | ret = print_graph_cpu(s, cpu); | 585 | ret = print_graph_cpu(s, cpu); |
@@ -460,8 +598,9 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
460 | return TRACE_TYPE_PARTIAL_LINE; | 598 | return TRACE_TYPE_PARTIAL_LINE; |
461 | } | 599 | } |
462 | 600 | ||
463 | if (trace_branch_is_leaf(iter, field)) | 601 | leaf_ret = get_return_for_leaf(iter, field); |
464 | return print_graph_entry_leaf(iter, field, s); | 602 | if (leaf_ret) |
603 | return print_graph_entry_leaf(iter, field, leaf_ret, s); | ||
465 | else | 604 | else |
466 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); | 605 | return print_graph_entry_nested(field, s, iter->ent->pid, cpu); |
467 | 606 | ||
@@ -469,16 +608,25 @@ print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s, | |||
469 | 608 | ||
470 | static enum print_line_t | 609 | static enum print_line_t |
471 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | 610 | print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, |
472 | struct trace_entry *ent, int cpu) | 611 | struct trace_entry *ent, struct trace_iterator *iter) |
473 | { | 612 | { |
474 | int i; | 613 | int i; |
475 | int ret; | 614 | int ret; |
615 | int cpu = iter->cpu; | ||
616 | pid_t *last_pid = iter->private, pid = ent->pid; | ||
476 | unsigned long long duration = trace->rettime - trace->calltime; | 617 | unsigned long long duration = trace->rettime - trace->calltime; |
477 | 618 | ||
478 | /* Pid */ | 619 | /* Pid */ |
479 | if (verif_pid(s, ent->pid, cpu) == TRACE_TYPE_PARTIAL_LINE) | 620 | if (verif_pid(s, pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
480 | return TRACE_TYPE_PARTIAL_LINE; | 621 | return TRACE_TYPE_PARTIAL_LINE; |
481 | 622 | ||
623 | /* Absolute time */ | ||
624 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
625 | ret = print_graph_abs_time(iter->ts, s); | ||
626 | if (!ret) | ||
627 | return TRACE_TYPE_PARTIAL_LINE; | ||
628 | } | ||
629 | |||
482 | /* Cpu */ | 630 | /* Cpu */ |
483 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 631 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
484 | ret = print_graph_cpu(s, cpu); | 632 | ret = print_graph_cpu(s, cpu); |
@@ -498,16 +646,16 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
498 | } | 646 | } |
499 | 647 | ||
500 | /* Overhead */ | 648 | /* Overhead */ |
501 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 649 | ret = print_graph_overhead(duration, s); |
502 | ret = print_graph_overhead(duration, s); | 650 | if (!ret) |
503 | if (!ret) | 651 | return TRACE_TYPE_PARTIAL_LINE; |
504 | return TRACE_TYPE_PARTIAL_LINE; | ||
505 | } | ||
506 | 652 | ||
507 | /* Duration */ | 653 | /* Duration */ |
508 | ret = print_graph_duration(duration, s); | 654 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { |
509 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 655 | ret = print_graph_duration(duration, s); |
510 | return TRACE_TYPE_PARTIAL_LINE; | 656 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
657 | return TRACE_TYPE_PARTIAL_LINE; | ||
658 | } | ||
511 | 659 | ||
512 | /* Closing brace */ | 660 | /* Closing brace */ |
513 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { | 661 | for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++) { |
@@ -528,7 +676,7 @@ print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s, | |||
528 | return TRACE_TYPE_PARTIAL_LINE; | 676 | return TRACE_TYPE_PARTIAL_LINE; |
529 | } | 677 | } |
530 | 678 | ||
531 | ret = print_graph_irq(s, trace->func, TRACE_GRAPH_RET, cpu, ent->pid); | 679 | ret = print_graph_irq(iter, trace->func, TRACE_GRAPH_RET, cpu, pid); |
532 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 680 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
533 | return TRACE_TYPE_PARTIAL_LINE; | 681 | return TRACE_TYPE_PARTIAL_LINE; |
534 | 682 | ||
@@ -541,14 +689,23 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
541 | { | 689 | { |
542 | int i; | 690 | int i; |
543 | int ret; | 691 | int ret; |
692 | int cpu = iter->cpu; | ||
693 | pid_t *last_pid = iter->private; | ||
544 | 694 | ||
545 | /* Pid */ | 695 | /* Pid */ |
546 | if (verif_pid(s, ent->pid, iter->cpu) == TRACE_TYPE_PARTIAL_LINE) | 696 | if (verif_pid(s, ent->pid, cpu, last_pid) == TRACE_TYPE_PARTIAL_LINE) |
547 | return TRACE_TYPE_PARTIAL_LINE; | 697 | return TRACE_TYPE_PARTIAL_LINE; |
548 | 698 | ||
699 | /* Absolute time */ | ||
700 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) { | ||
701 | ret = print_graph_abs_time(iter->ts, s); | ||
702 | if (!ret) | ||
703 | return TRACE_TYPE_PARTIAL_LINE; | ||
704 | } | ||
705 | |||
549 | /* Cpu */ | 706 | /* Cpu */ |
550 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { | 707 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) { |
551 | ret = print_graph_cpu(s, iter->cpu); | 708 | ret = print_graph_cpu(s, cpu); |
552 | if (ret == TRACE_TYPE_PARTIAL_LINE) | 709 | if (ret == TRACE_TYPE_PARTIAL_LINE) |
553 | return TRACE_TYPE_PARTIAL_LINE; | 710 | return TRACE_TYPE_PARTIAL_LINE; |
554 | } | 711 | } |
@@ -565,17 +722,17 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
565 | } | 722 | } |
566 | 723 | ||
567 | /* No overhead */ | 724 | /* No overhead */ |
568 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 725 | ret = print_graph_overhead(-1, s); |
569 | ret = trace_seq_printf(s, " "); | 726 | if (!ret) |
727 | return TRACE_TYPE_PARTIAL_LINE; | ||
728 | |||
729 | /* No time */ | ||
730 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) { | ||
731 | ret = trace_seq_printf(s, " | "); | ||
570 | if (!ret) | 732 | if (!ret) |
571 | return TRACE_TYPE_PARTIAL_LINE; | 733 | return TRACE_TYPE_PARTIAL_LINE; |
572 | } | 734 | } |
573 | 735 | ||
574 | /* No time */ | ||
575 | ret = trace_seq_printf(s, " | "); | ||
576 | if (!ret) | ||
577 | return TRACE_TYPE_PARTIAL_LINE; | ||
578 | |||
579 | /* Indentation */ | 736 | /* Indentation */ |
580 | if (trace->depth > 0) | 737 | if (trace->depth > 0) |
581 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { | 738 | for (i = 0; i < (trace->depth + 1) * TRACE_GRAPH_INDENT; i++) { |
@@ -589,8 +746,11 @@ print_graph_comment(struct print_entry *trace, struct trace_seq *s, | |||
589 | if (!ret) | 746 | if (!ret) |
590 | return TRACE_TYPE_PARTIAL_LINE; | 747 | return TRACE_TYPE_PARTIAL_LINE; |
591 | 748 | ||
592 | if (ent->flags & TRACE_FLAG_CONT) | 749 | /* Strip ending newline */ |
593 | trace_seq_print_cont(s, iter); | 750 | if (s->buffer[s->len - 1] == '\n') { |
751 | s->buffer[s->len - 1] = '\0'; | ||
752 | s->len--; | ||
753 | } | ||
594 | 754 | ||
595 | ret = trace_seq_printf(s, " */\n"); | 755 | ret = trace_seq_printf(s, " */\n"); |
596 | if (!ret) | 756 | if (!ret) |
@@ -610,13 +770,12 @@ print_graph_function(struct trace_iterator *iter) | |||
610 | case TRACE_GRAPH_ENT: { | 770 | case TRACE_GRAPH_ENT: { |
611 | struct ftrace_graph_ent_entry *field; | 771 | struct ftrace_graph_ent_entry *field; |
612 | trace_assign_type(field, entry); | 772 | trace_assign_type(field, entry); |
613 | return print_graph_entry(field, s, iter, | 773 | return print_graph_entry(field, s, iter); |
614 | iter->cpu); | ||
615 | } | 774 | } |
616 | case TRACE_GRAPH_RET: { | 775 | case TRACE_GRAPH_RET: { |
617 | struct ftrace_graph_ret_entry *field; | 776 | struct ftrace_graph_ret_entry *field; |
618 | trace_assign_type(field, entry); | 777 | trace_assign_type(field, entry); |
619 | return print_graph_return(&field->ret, s, entry, iter->cpu); | 778 | return print_graph_return(&field->ret, s, entry, iter); |
620 | } | 779 | } |
621 | case TRACE_PRINT: { | 780 | case TRACE_PRINT: { |
622 | struct print_entry *field; | 781 | struct print_entry *field; |
@@ -632,33 +791,64 @@ static void print_graph_headers(struct seq_file *s) | |||
632 | { | 791 | { |
633 | /* 1st line */ | 792 | /* 1st line */ |
634 | seq_printf(s, "# "); | 793 | seq_printf(s, "# "); |
794 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
795 | seq_printf(s, " TIME "); | ||
635 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 796 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
636 | seq_printf(s, "CPU "); | 797 | seq_printf(s, "CPU"); |
637 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 798 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
638 | seq_printf(s, "TASK/PID "); | 799 | seq_printf(s, " TASK/PID "); |
639 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) | 800 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
640 | seq_printf(s, "OVERHEAD/"); | 801 | seq_printf(s, " DURATION "); |
641 | seq_printf(s, "DURATION FUNCTION CALLS\n"); | 802 | seq_printf(s, " FUNCTION CALLS\n"); |
642 | 803 | ||
643 | /* 2nd line */ | 804 | /* 2nd line */ |
644 | seq_printf(s, "# "); | 805 | seq_printf(s, "# "); |
806 | if (tracer_flags.val & TRACE_GRAPH_PRINT_ABS_TIME) | ||
807 | seq_printf(s, " | "); | ||
645 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) | 808 | if (tracer_flags.val & TRACE_GRAPH_PRINT_CPU) |
646 | seq_printf(s, "| "); | 809 | seq_printf(s, "| "); |
647 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) | 810 | if (tracer_flags.val & TRACE_GRAPH_PRINT_PROC) |
648 | seq_printf(s, "| | "); | 811 | seq_printf(s, " | | "); |
649 | if (tracer_flags.val & TRACE_GRAPH_PRINT_OVERHEAD) { | 812 | if (tracer_flags.val & TRACE_GRAPH_PRINT_DURATION) |
650 | seq_printf(s, "| "); | 813 | seq_printf(s, " | | "); |
651 | seq_printf(s, "| | | | |\n"); | 814 | seq_printf(s, " | | | |\n"); |
652 | } else | ||
653 | seq_printf(s, " | | | | |\n"); | ||
654 | } | 815 | } |
816 | |||
817 | static void graph_trace_open(struct trace_iterator *iter) | ||
818 | { | ||
819 | /* pid on the last trace processed */ | ||
820 | pid_t *last_pid = alloc_percpu(pid_t); | ||
821 | int cpu; | ||
822 | |||
823 | if (!last_pid) | ||
824 | pr_warning("function graph tracer: not enough memory\n"); | ||
825 | else | ||
826 | for_each_possible_cpu(cpu) { | ||
827 | pid_t *pid = per_cpu_ptr(last_pid, cpu); | ||
828 | *pid = -1; | ||
829 | } | ||
830 | |||
831 | iter->private = last_pid; | ||
832 | } | ||
833 | |||
834 | static void graph_trace_close(struct trace_iterator *iter) | ||
835 | { | ||
836 | percpu_free(iter->private); | ||
837 | } | ||
838 | |||
655 | static struct tracer graph_trace __read_mostly = { | 839 | static struct tracer graph_trace __read_mostly = { |
656 | .name = "function_graph", | 840 | .name = "function_graph", |
841 | .open = graph_trace_open, | ||
842 | .close = graph_trace_close, | ||
843 | .wait_pipe = poll_wait_pipe, | ||
657 | .init = graph_trace_init, | 844 | .init = graph_trace_init, |
658 | .reset = graph_trace_reset, | 845 | .reset = graph_trace_reset, |
659 | .print_line = print_graph_function, | 846 | .print_line = print_graph_function, |
660 | .print_header = print_graph_headers, | 847 | .print_header = print_graph_headers, |
661 | .flags = &tracer_flags, | 848 | .flags = &tracer_flags, |
849 | #ifdef CONFIG_FTRACE_SELFTEST | ||
850 | .selftest = trace_selftest_startup_function_graph, | ||
851 | #endif | ||
662 | }; | 852 | }; |
663 | 853 | ||
664 | static __init int init_graph_trace(void) | 854 | static __init int init_graph_trace(void) |
diff --git a/kernel/trace/trace_hw_branches.c b/kernel/trace/trace_hw_branches.c index 649df22d435f..7bfdf4c2347f 100644 --- a/kernel/trace/trace_hw_branches.c +++ b/kernel/trace/trace_hw_branches.c | |||
@@ -1,30 +1,53 @@ | |||
1 | /* | 1 | /* |
2 | * h/w branch tracer for x86 based on bts | 2 | * h/w branch tracer for x86 based on bts |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Markus Metzger <markus.t.metzger@gmail.com> | 4 | * Copyright (C) 2008-2009 Intel Corporation. |
5 | * | 5 | * Markus Metzger <markus.t.metzger@gmail.com>, 2008-2009 |
6 | */ | 6 | */ |
7 | 7 | #include <linux/spinlock.h> | |
8 | #include <linux/module.h> | 8 | #include <linux/kallsyms.h> |
9 | #include <linux/fs.h> | ||
10 | #include <linux/debugfs.h> | 9 | #include <linux/debugfs.h> |
11 | #include <linux/ftrace.h> | 10 | #include <linux/ftrace.h> |
12 | #include <linux/kallsyms.h> | 11 | #include <linux/module.h> |
12 | #include <linux/cpu.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/fs.h> | ||
13 | 15 | ||
14 | #include <asm/ds.h> | 16 | #include <asm/ds.h> |
15 | 17 | ||
16 | #include "trace.h" | 18 | #include "trace.h" |
19 | #include "trace_output.h" | ||
17 | 20 | ||
18 | 21 | ||
19 | #define SIZEOF_BTS (1 << 13) | 22 | #define SIZEOF_BTS (1 << 13) |
20 | 23 | ||
24 | /* | ||
25 | * The tracer lock protects the below per-cpu tracer array. | ||
26 | * It needs to be held to: | ||
27 | * - start tracing on all cpus | ||
28 | * - stop tracing on all cpus | ||
29 | * - start tracing on a single hotplug cpu | ||
30 | * - stop tracing on a single hotplug cpu | ||
31 | * - read the trace from all cpus | ||
32 | * - read the trace from a single cpu | ||
33 | */ | ||
34 | static DEFINE_SPINLOCK(bts_tracer_lock); | ||
21 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); | 35 | static DEFINE_PER_CPU(struct bts_tracer *, tracer); |
22 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); | 36 | static DEFINE_PER_CPU(unsigned char[SIZEOF_BTS], buffer); |
23 | 37 | ||
24 | #define this_tracer per_cpu(tracer, smp_processor_id()) | 38 | #define this_tracer per_cpu(tracer, smp_processor_id()) |
25 | #define this_buffer per_cpu(buffer, smp_processor_id()) | 39 | #define this_buffer per_cpu(buffer, smp_processor_id()) |
26 | 40 | ||
41 | static int __read_mostly trace_hw_branches_enabled; | ||
42 | static struct trace_array *hw_branch_trace __read_mostly; | ||
43 | |||
27 | 44 | ||
45 | /* | ||
46 | * Start tracing on the current cpu. | ||
47 | * The argument is ignored. | ||
48 | * | ||
49 | * pre: bts_tracer_lock must be locked. | ||
50 | */ | ||
28 | static void bts_trace_start_cpu(void *arg) | 51 | static void bts_trace_start_cpu(void *arg) |
29 | { | 52 | { |
30 | if (this_tracer) | 53 | if (this_tracer) |
@@ -42,14 +65,20 @@ static void bts_trace_start_cpu(void *arg) | |||
42 | 65 | ||
43 | static void bts_trace_start(struct trace_array *tr) | 66 | static void bts_trace_start(struct trace_array *tr) |
44 | { | 67 | { |
45 | int cpu; | 68 | spin_lock(&bts_tracer_lock); |
46 | 69 | ||
47 | tracing_reset_online_cpus(tr); | 70 | on_each_cpu(bts_trace_start_cpu, NULL, 1); |
71 | trace_hw_branches_enabled = 1; | ||
48 | 72 | ||
49 | for_each_cpu(cpu, cpu_possible_mask) | 73 | spin_unlock(&bts_tracer_lock); |
50 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
51 | } | 74 | } |
52 | 75 | ||
76 | /* | ||
77 | * Stop tracing on the current cpu. | ||
78 | * The argument is ignored. | ||
79 | * | ||
80 | * pre: bts_tracer_lock must be locked. | ||
81 | */ | ||
53 | static void bts_trace_stop_cpu(void *arg) | 82 | static void bts_trace_stop_cpu(void *arg) |
54 | { | 83 | { |
55 | if (this_tracer) { | 84 | if (this_tracer) { |
@@ -60,26 +89,60 @@ static void bts_trace_stop_cpu(void *arg) | |||
60 | 89 | ||
61 | static void bts_trace_stop(struct trace_array *tr) | 90 | static void bts_trace_stop(struct trace_array *tr) |
62 | { | 91 | { |
63 | int cpu; | 92 | spin_lock(&bts_tracer_lock); |
93 | |||
94 | trace_hw_branches_enabled = 0; | ||
95 | on_each_cpu(bts_trace_stop_cpu, NULL, 1); | ||
96 | |||
97 | spin_unlock(&bts_tracer_lock); | ||
98 | } | ||
99 | |||
100 | static int __cpuinit bts_hotcpu_handler(struct notifier_block *nfb, | ||
101 | unsigned long action, void *hcpu) | ||
102 | { | ||
103 | unsigned int cpu = (unsigned long)hcpu; | ||
64 | 104 | ||
65 | for_each_cpu(cpu, cpu_possible_mask) | 105 | spin_lock(&bts_tracer_lock); |
106 | |||
107 | if (!trace_hw_branches_enabled) | ||
108 | goto out; | ||
109 | |||
110 | switch (action) { | ||
111 | case CPU_ONLINE: | ||
112 | case CPU_DOWN_FAILED: | ||
113 | smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1); | ||
114 | break; | ||
115 | case CPU_DOWN_PREPARE: | ||
66 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); | 116 | smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1); |
117 | break; | ||
118 | } | ||
119 | |||
120 | out: | ||
121 | spin_unlock(&bts_tracer_lock); | ||
122 | return NOTIFY_DONE; | ||
67 | } | 123 | } |
68 | 124 | ||
125 | static struct notifier_block bts_hotcpu_notifier __cpuinitdata = { | ||
126 | .notifier_call = bts_hotcpu_handler | ||
127 | }; | ||
128 | |||
69 | static int bts_trace_init(struct trace_array *tr) | 129 | static int bts_trace_init(struct trace_array *tr) |
70 | { | 130 | { |
71 | tracing_reset_online_cpus(tr); | 131 | hw_branch_trace = tr; |
132 | |||
72 | bts_trace_start(tr); | 133 | bts_trace_start(tr); |
73 | 134 | ||
74 | return 0; | 135 | return 0; |
75 | } | 136 | } |
76 | 137 | ||
138 | static void bts_trace_reset(struct trace_array *tr) | ||
139 | { | ||
140 | bts_trace_stop(tr); | ||
141 | } | ||
142 | |||
77 | static void bts_trace_print_header(struct seq_file *m) | 143 | static void bts_trace_print_header(struct seq_file *m) |
78 | { | 144 | { |
79 | seq_puts(m, | 145 | seq_puts(m, "# CPU# TO <- FROM\n"); |
80 | "# CPU# FROM TO FUNCTION\n"); | ||
81 | seq_puts(m, | ||
82 | "# | | | |\n"); | ||
83 | } | 146 | } |
84 | 147 | ||
85 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | 148 | static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) |
@@ -87,15 +150,15 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
87 | struct trace_entry *entry = iter->ent; | 150 | struct trace_entry *entry = iter->ent; |
88 | struct trace_seq *seq = &iter->seq; | 151 | struct trace_seq *seq = &iter->seq; |
89 | struct hw_branch_entry *it; | 152 | struct hw_branch_entry *it; |
153 | unsigned long symflags = TRACE_ITER_SYM_OFFSET; | ||
90 | 154 | ||
91 | trace_assign_type(it, entry); | 155 | trace_assign_type(it, entry); |
92 | 156 | ||
93 | if (entry->type == TRACE_HW_BRANCHES) { | 157 | if (entry->type == TRACE_HW_BRANCHES) { |
94 | if (trace_seq_printf(seq, "%4d ", entry->cpu) && | 158 | if (trace_seq_printf(seq, "%4d ", iter->cpu) && |
95 | trace_seq_printf(seq, "0x%016llx -> 0x%016llx ", | 159 | seq_print_ip_sym(seq, it->to, symflags) && |
96 | it->from, it->to) && | 160 | trace_seq_printf(seq, "\t <- ") && |
97 | (!it->from || | 161 | seq_print_ip_sym(seq, it->from, symflags) && |
98 | seq_print_ip_sym(seq, it->from, /* sym_flags = */ 0)) && | ||
99 | trace_seq_printf(seq, "\n")) | 162 | trace_seq_printf(seq, "\n")) |
100 | return TRACE_TYPE_HANDLED; | 163 | return TRACE_TYPE_HANDLED; |
101 | return TRACE_TYPE_PARTIAL_LINE;; | 164 | return TRACE_TYPE_PARTIAL_LINE;; |
@@ -103,26 +166,42 @@ static enum print_line_t bts_trace_print_line(struct trace_iterator *iter) | |||
103 | return TRACE_TYPE_UNHANDLED; | 166 | return TRACE_TYPE_UNHANDLED; |
104 | } | 167 | } |
105 | 168 | ||
106 | void trace_hw_branch(struct trace_array *tr, u64 from, u64 to) | 169 | void trace_hw_branch(u64 from, u64 to) |
107 | { | 170 | { |
171 | struct trace_array *tr = hw_branch_trace; | ||
108 | struct ring_buffer_event *event; | 172 | struct ring_buffer_event *event; |
109 | struct hw_branch_entry *entry; | 173 | struct hw_branch_entry *entry; |
110 | unsigned long irq; | 174 | unsigned long irq1; |
175 | int cpu; | ||
111 | 176 | ||
112 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), &irq); | 177 | if (unlikely(!tr)) |
113 | if (!event) | ||
114 | return; | 178 | return; |
179 | |||
180 | if (unlikely(!trace_hw_branches_enabled)) | ||
181 | return; | ||
182 | |||
183 | local_irq_save(irq1); | ||
184 | cpu = raw_smp_processor_id(); | ||
185 | if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) | ||
186 | goto out; | ||
187 | |||
188 | event = trace_buffer_lock_reserve(tr, TRACE_HW_BRANCHES, | ||
189 | sizeof(*entry), 0, 0); | ||
190 | if (!event) | ||
191 | goto out; | ||
115 | entry = ring_buffer_event_data(event); | 192 | entry = ring_buffer_event_data(event); |
116 | tracing_generic_entry_update(&entry->ent, 0, from); | 193 | tracing_generic_entry_update(&entry->ent, 0, from); |
117 | entry->ent.type = TRACE_HW_BRANCHES; | 194 | entry->ent.type = TRACE_HW_BRANCHES; |
118 | entry->ent.cpu = smp_processor_id(); | ||
119 | entry->from = from; | 195 | entry->from = from; |
120 | entry->to = to; | 196 | entry->to = to; |
121 | ring_buffer_unlock_commit(tr->buffer, event, irq); | 197 | trace_buffer_unlock_commit(tr, event, 0, 0); |
198 | |||
199 | out: | ||
200 | atomic_dec(&tr->data[cpu]->disabled); | ||
201 | local_irq_restore(irq1); | ||
122 | } | 202 | } |
123 | 203 | ||
124 | static void trace_bts_at(struct trace_array *tr, | 204 | static void trace_bts_at(const struct bts_trace *trace, void *at) |
125 | const struct bts_trace *trace, void *at) | ||
126 | { | 205 | { |
127 | struct bts_struct bts; | 206 | struct bts_struct bts; |
128 | int err = 0; | 207 | int err = 0; |
@@ -137,18 +216,29 @@ static void trace_bts_at(struct trace_array *tr, | |||
137 | 216 | ||
138 | switch (bts.qualifier) { | 217 | switch (bts.qualifier) { |
139 | case BTS_BRANCH: | 218 | case BTS_BRANCH: |
140 | trace_hw_branch(tr, bts.variant.lbr.from, bts.variant.lbr.to); | 219 | trace_hw_branch(bts.variant.lbr.from, bts.variant.lbr.to); |
141 | break; | 220 | break; |
142 | } | 221 | } |
143 | } | 222 | } |
144 | 223 | ||
224 | /* | ||
225 | * Collect the trace on the current cpu and write it into the ftrace buffer. | ||
226 | * | ||
227 | * pre: bts_tracer_lock must be locked | ||
228 | */ | ||
145 | static void trace_bts_cpu(void *arg) | 229 | static void trace_bts_cpu(void *arg) |
146 | { | 230 | { |
147 | struct trace_array *tr = (struct trace_array *) arg; | 231 | struct trace_array *tr = (struct trace_array *) arg; |
148 | const struct bts_trace *trace; | 232 | const struct bts_trace *trace; |
149 | unsigned char *at; | 233 | unsigned char *at; |
150 | 234 | ||
151 | if (!this_tracer) | 235 | if (unlikely(!tr)) |
236 | return; | ||
237 | |||
238 | if (unlikely(atomic_read(&tr->data[raw_smp_processor_id()]->disabled))) | ||
239 | return; | ||
240 | |||
241 | if (unlikely(!this_tracer)) | ||
152 | return; | 242 | return; |
153 | 243 | ||
154 | ds_suspend_bts(this_tracer); | 244 | ds_suspend_bts(this_tracer); |
@@ -158,11 +248,11 @@ static void trace_bts_cpu(void *arg) | |||
158 | 248 | ||
159 | for (at = trace->ds.top; (void *)at < trace->ds.end; | 249 | for (at = trace->ds.top; (void *)at < trace->ds.end; |
160 | at += trace->ds.size) | 250 | at += trace->ds.size) |
161 | trace_bts_at(tr, trace, at); | 251 | trace_bts_at(trace, at); |
162 | 252 | ||
163 | for (at = trace->ds.begin; (void *)at < trace->ds.top; | 253 | for (at = trace->ds.begin; (void *)at < trace->ds.top; |
164 | at += trace->ds.size) | 254 | at += trace->ds.size) |
165 | trace_bts_at(tr, trace, at); | 255 | trace_bts_at(trace, at); |
166 | 256 | ||
167 | out: | 257 | out: |
168 | ds_resume_bts(this_tracer); | 258 | ds_resume_bts(this_tracer); |
@@ -170,26 +260,43 @@ out: | |||
170 | 260 | ||
171 | static void trace_bts_prepare(struct trace_iterator *iter) | 261 | static void trace_bts_prepare(struct trace_iterator *iter) |
172 | { | 262 | { |
173 | int cpu; | 263 | spin_lock(&bts_tracer_lock); |
264 | |||
265 | on_each_cpu(trace_bts_cpu, iter->tr, 1); | ||
266 | |||
267 | spin_unlock(&bts_tracer_lock); | ||
268 | } | ||
269 | |||
270 | static void trace_bts_close(struct trace_iterator *iter) | ||
271 | { | ||
272 | tracing_reset_online_cpus(iter->tr); | ||
273 | } | ||
274 | |||
275 | void trace_hw_branch_oops(void) | ||
276 | { | ||
277 | spin_lock(&bts_tracer_lock); | ||
278 | |||
279 | trace_bts_cpu(hw_branch_trace); | ||
174 | 280 | ||
175 | for_each_cpu(cpu, cpu_possible_mask) | 281 | spin_unlock(&bts_tracer_lock); |
176 | smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1); | ||
177 | } | 282 | } |
178 | 283 | ||
179 | struct tracer bts_tracer __read_mostly = | 284 | struct tracer bts_tracer __read_mostly = |
180 | { | 285 | { |
181 | .name = "hw-branch-tracer", | 286 | .name = "hw-branch-tracer", |
182 | .init = bts_trace_init, | 287 | .init = bts_trace_init, |
183 | .reset = bts_trace_stop, | 288 | .reset = bts_trace_reset, |
184 | .print_header = bts_trace_print_header, | 289 | .print_header = bts_trace_print_header, |
185 | .print_line = bts_trace_print_line, | 290 | .print_line = bts_trace_print_line, |
186 | .start = bts_trace_start, | 291 | .start = bts_trace_start, |
187 | .stop = bts_trace_stop, | 292 | .stop = bts_trace_stop, |
188 | .open = trace_bts_prepare | 293 | .open = trace_bts_prepare, |
294 | .close = trace_bts_close | ||
189 | }; | 295 | }; |
190 | 296 | ||
191 | __init static int init_bts_trace(void) | 297 | __init static int init_bts_trace(void) |
192 | { | 298 | { |
299 | register_hotcpu_notifier(&bts_hotcpu_notifier); | ||
193 | return register_tracer(&bts_tracer); | 300 | return register_tracer(&bts_tracer); |
194 | } | 301 | } |
195 | device_initcall(init_bts_trace); | 302 | device_initcall(init_bts_trace); |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 62a78d943534..b923d13e2fad 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * trace irqs off criticall timings | 2 | * trace irqs off critical timings |
3 | * | 3 | * |
4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | 4 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> |
5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> | 5 | * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com> |
@@ -32,6 +32,8 @@ enum { | |||
32 | 32 | ||
33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
34 | 34 | ||
35 | static int save_lat_flag; | ||
36 | |||
35 | #ifdef CONFIG_PREEMPT_TRACER | 37 | #ifdef CONFIG_PREEMPT_TRACER |
36 | static inline int | 38 | static inline int |
37 | preempt_trace(void) | 39 | preempt_trace(void) |
@@ -95,7 +97,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
95 | disabled = atomic_inc_return(&data->disabled); | 97 | disabled = atomic_inc_return(&data->disabled); |
96 | 98 | ||
97 | if (likely(disabled == 1)) | 99 | if (likely(disabled == 1)) |
98 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 100 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
99 | 101 | ||
100 | atomic_dec(&data->disabled); | 102 | atomic_dec(&data->disabled); |
101 | } | 103 | } |
@@ -153,7 +155,7 @@ check_critical_timing(struct trace_array *tr, | |||
153 | if (!report_latency(delta)) | 155 | if (!report_latency(delta)) |
154 | goto out_unlock; | 156 | goto out_unlock; |
155 | 157 | ||
156 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); | 158 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
157 | 159 | ||
158 | latency = nsecs_to_usecs(delta); | 160 | latency = nsecs_to_usecs(delta); |
159 | 161 | ||
@@ -177,7 +179,7 @@ out: | |||
177 | data->critical_sequence = max_sequence; | 179 | data->critical_sequence = max_sequence; |
178 | data->preempt_timestamp = ftrace_now(cpu); | 180 | data->preempt_timestamp = ftrace_now(cpu); |
179 | tracing_reset(tr, cpu); | 181 | tracing_reset(tr, cpu); |
180 | trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc); | 182 | trace_function(tr, CALLER_ADDR0, parent_ip, flags, pc); |
181 | } | 183 | } |
182 | 184 | ||
183 | static inline void | 185 | static inline void |
@@ -210,7 +212,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
210 | 212 | ||
211 | local_save_flags(flags); | 213 | local_save_flags(flags); |
212 | 214 | ||
213 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 215 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
214 | 216 | ||
215 | per_cpu(tracing_cpu, cpu) = 1; | 217 | per_cpu(tracing_cpu, cpu) = 1; |
216 | 218 | ||
@@ -244,7 +246,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) | |||
244 | atomic_inc(&data->disabled); | 246 | atomic_inc(&data->disabled); |
245 | 247 | ||
246 | local_save_flags(flags); | 248 | local_save_flags(flags); |
247 | trace_function(tr, data, ip, parent_ip, flags, preempt_count()); | 249 | trace_function(tr, ip, parent_ip, flags, preempt_count()); |
248 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); | 250 | check_critical_timing(tr, data, parent_ip ? : ip, cpu); |
249 | data->critical_start = 0; | 251 | data->critical_start = 0; |
250 | atomic_dec(&data->disabled); | 252 | atomic_dec(&data->disabled); |
@@ -353,33 +355,26 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) | |||
353 | } | 355 | } |
354 | #endif /* CONFIG_PREEMPT_TRACER */ | 356 | #endif /* CONFIG_PREEMPT_TRACER */ |
355 | 357 | ||
356 | /* | ||
357 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
358 | * variable when we disable it when we open a trace output file. | ||
359 | */ | ||
360 | static int save_tracer_enabled; | ||
361 | |||
362 | static void start_irqsoff_tracer(struct trace_array *tr) | 358 | static void start_irqsoff_tracer(struct trace_array *tr) |
363 | { | 359 | { |
364 | register_ftrace_function(&trace_ops); | 360 | register_ftrace_function(&trace_ops); |
365 | if (tracing_is_enabled()) { | 361 | if (tracing_is_enabled()) |
366 | tracer_enabled = 1; | 362 | tracer_enabled = 1; |
367 | save_tracer_enabled = 1; | 363 | else |
368 | } else { | ||
369 | tracer_enabled = 0; | 364 | tracer_enabled = 0; |
370 | save_tracer_enabled = 0; | ||
371 | } | ||
372 | } | 365 | } |
373 | 366 | ||
374 | static void stop_irqsoff_tracer(struct trace_array *tr) | 367 | static void stop_irqsoff_tracer(struct trace_array *tr) |
375 | { | 368 | { |
376 | tracer_enabled = 0; | 369 | tracer_enabled = 0; |
377 | save_tracer_enabled = 0; | ||
378 | unregister_ftrace_function(&trace_ops); | 370 | unregister_ftrace_function(&trace_ops); |
379 | } | 371 | } |
380 | 372 | ||
381 | static void __irqsoff_tracer_init(struct trace_array *tr) | 373 | static void __irqsoff_tracer_init(struct trace_array *tr) |
382 | { | 374 | { |
375 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | ||
376 | trace_flags |= TRACE_ITER_LATENCY_FMT; | ||
377 | |||
383 | tracing_max_latency = 0; | 378 | tracing_max_latency = 0; |
384 | irqsoff_trace = tr; | 379 | irqsoff_trace = tr; |
385 | /* make sure that the tracer is visible */ | 380 | /* make sure that the tracer is visible */ |
@@ -390,30 +385,19 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
390 | static void irqsoff_tracer_reset(struct trace_array *tr) | 385 | static void irqsoff_tracer_reset(struct trace_array *tr) |
391 | { | 386 | { |
392 | stop_irqsoff_tracer(tr); | 387 | stop_irqsoff_tracer(tr); |
388 | |||
389 | if (!save_lat_flag) | ||
390 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | ||
393 | } | 391 | } |
394 | 392 | ||
395 | static void irqsoff_tracer_start(struct trace_array *tr) | 393 | static void irqsoff_tracer_start(struct trace_array *tr) |
396 | { | 394 | { |
397 | tracer_enabled = 1; | 395 | tracer_enabled = 1; |
398 | save_tracer_enabled = 1; | ||
399 | } | 396 | } |
400 | 397 | ||
401 | static void irqsoff_tracer_stop(struct trace_array *tr) | 398 | static void irqsoff_tracer_stop(struct trace_array *tr) |
402 | { | 399 | { |
403 | tracer_enabled = 0; | 400 | tracer_enabled = 0; |
404 | save_tracer_enabled = 0; | ||
405 | } | ||
406 | |||
407 | static void irqsoff_tracer_open(struct trace_iterator *iter) | ||
408 | { | ||
409 | /* stop the trace while dumping */ | ||
410 | tracer_enabled = 0; | ||
411 | } | ||
412 | |||
413 | static void irqsoff_tracer_close(struct trace_iterator *iter) | ||
414 | { | ||
415 | /* restart tracing */ | ||
416 | tracer_enabled = save_tracer_enabled; | ||
417 | } | 401 | } |
418 | 402 | ||
419 | #ifdef CONFIG_IRQSOFF_TRACER | 403 | #ifdef CONFIG_IRQSOFF_TRACER |
@@ -431,8 +415,6 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
431 | .reset = irqsoff_tracer_reset, | 415 | .reset = irqsoff_tracer_reset, |
432 | .start = irqsoff_tracer_start, | 416 | .start = irqsoff_tracer_start, |
433 | .stop = irqsoff_tracer_stop, | 417 | .stop = irqsoff_tracer_stop, |
434 | .open = irqsoff_tracer_open, | ||
435 | .close = irqsoff_tracer_close, | ||
436 | .print_max = 1, | 418 | .print_max = 1, |
437 | #ifdef CONFIG_FTRACE_SELFTEST | 419 | #ifdef CONFIG_FTRACE_SELFTEST |
438 | .selftest = trace_selftest_startup_irqsoff, | 420 | .selftest = trace_selftest_startup_irqsoff, |
@@ -459,8 +441,6 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
459 | .reset = irqsoff_tracer_reset, | 441 | .reset = irqsoff_tracer_reset, |
460 | .start = irqsoff_tracer_start, | 442 | .start = irqsoff_tracer_start, |
461 | .stop = irqsoff_tracer_stop, | 443 | .stop = irqsoff_tracer_stop, |
462 | .open = irqsoff_tracer_open, | ||
463 | .close = irqsoff_tracer_close, | ||
464 | .print_max = 1, | 444 | .print_max = 1, |
465 | #ifdef CONFIG_FTRACE_SELFTEST | 445 | #ifdef CONFIG_FTRACE_SELFTEST |
466 | .selftest = trace_selftest_startup_preemptoff, | 446 | .selftest = trace_selftest_startup_preemptoff, |
@@ -489,8 +469,6 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
489 | .reset = irqsoff_tracer_reset, | 469 | .reset = irqsoff_tracer_reset, |
490 | .start = irqsoff_tracer_start, | 470 | .start = irqsoff_tracer_start, |
491 | .stop = irqsoff_tracer_stop, | 471 | .stop = irqsoff_tracer_stop, |
492 | .open = irqsoff_tracer_open, | ||
493 | .close = irqsoff_tracer_close, | ||
494 | .print_max = 1, | 472 | .print_max = 1, |
495 | #ifdef CONFIG_FTRACE_SELFTEST | 473 | #ifdef CONFIG_FTRACE_SELFTEST |
496 | .selftest = trace_selftest_startup_preemptirqsoff, | 474 | .selftest = trace_selftest_startup_preemptirqsoff, |
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c index 80e503ef6136..c401b908e805 100644 --- a/kernel/trace/trace_mmiotrace.c +++ b/kernel/trace/trace_mmiotrace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
13 | 13 | ||
14 | #include "trace.h" | 14 | #include "trace.h" |
15 | #include "trace_output.h" | ||
15 | 16 | ||
16 | struct header_iter { | 17 | struct header_iter { |
17 | struct pci_dev *dev; | 18 | struct pci_dev *dev; |
@@ -183,21 +184,22 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) | |||
183 | switch (rw->opcode) { | 184 | switch (rw->opcode) { |
184 | case MMIO_READ: | 185 | case MMIO_READ: |
185 | ret = trace_seq_printf(s, | 186 | ret = trace_seq_printf(s, |
186 | "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 187 | "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
187 | rw->width, secs, usec_rem, rw->map_id, | 188 | rw->width, secs, usec_rem, rw->map_id, |
188 | (unsigned long long)rw->phys, | 189 | (unsigned long long)rw->phys, |
189 | rw->value, rw->pc, 0); | 190 | rw->value, rw->pc, 0); |
190 | break; | 191 | break; |
191 | case MMIO_WRITE: | 192 | case MMIO_WRITE: |
192 | ret = trace_seq_printf(s, | 193 | ret = trace_seq_printf(s, |
193 | "W %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", | 194 | "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", |
194 | rw->width, secs, usec_rem, rw->map_id, | 195 | rw->width, secs, usec_rem, rw->map_id, |
195 | (unsigned long long)rw->phys, | 196 | (unsigned long long)rw->phys, |
196 | rw->value, rw->pc, 0); | 197 | rw->value, rw->pc, 0); |
197 | break; | 198 | break; |
198 | case MMIO_UNKNOWN_OP: | 199 | case MMIO_UNKNOWN_OP: |
199 | ret = trace_seq_printf(s, | 200 | ret = trace_seq_printf(s, |
200 | "UNKNOWN %lu.%06lu %d 0x%llx %02x,%02x,%02x 0x%lx %d\n", | 201 | "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," |
202 | "%02lx 0x%lx %d\n", | ||
201 | secs, usec_rem, rw->map_id, | 203 | secs, usec_rem, rw->map_id, |
202 | (unsigned long long)rw->phys, | 204 | (unsigned long long)rw->phys, |
203 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, | 205 | (rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff, |
@@ -229,14 +231,14 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) | |||
229 | switch (m->opcode) { | 231 | switch (m->opcode) { |
230 | case MMIO_PROBE: | 232 | case MMIO_PROBE: |
231 | ret = trace_seq_printf(s, | 233 | ret = trace_seq_printf(s, |
232 | "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", | 234 | "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", |
233 | secs, usec_rem, m->map_id, | 235 | secs, usec_rem, m->map_id, |
234 | (unsigned long long)m->phys, m->virt, m->len, | 236 | (unsigned long long)m->phys, m->virt, m->len, |
235 | 0UL, 0); | 237 | 0UL, 0); |
236 | break; | 238 | break; |
237 | case MMIO_UNPROBE: | 239 | case MMIO_UNPROBE: |
238 | ret = trace_seq_printf(s, | 240 | ret = trace_seq_printf(s, |
239 | "UNMAP %lu.%06lu %d 0x%lx %d\n", | 241 | "UNMAP %u.%06lu %d 0x%lx %d\n", |
240 | secs, usec_rem, m->map_id, 0UL, 0); | 242 | secs, usec_rem, m->map_id, 0UL, 0); |
241 | break; | 243 | break; |
242 | default: | 244 | default: |
@@ -260,13 +262,10 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) | |||
260 | int ret; | 262 | int ret; |
261 | 263 | ||
262 | /* The trailing newline must be in the message. */ | 264 | /* The trailing newline must be in the message. */ |
263 | ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg); | 265 | ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); |
264 | if (!ret) | 266 | if (!ret) |
265 | return TRACE_TYPE_PARTIAL_LINE; | 267 | return TRACE_TYPE_PARTIAL_LINE; |
266 | 268 | ||
267 | if (entry->flags & TRACE_FLAG_CONT) | ||
268 | trace_seq_print_cont(s, iter); | ||
269 | |||
270 | return TRACE_TYPE_HANDLED; | 269 | return TRACE_TYPE_HANDLED; |
271 | } | 270 | } |
272 | 271 | ||
@@ -308,21 +307,17 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, | |||
308 | { | 307 | { |
309 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
310 | struct trace_mmiotrace_rw *entry; | 309 | struct trace_mmiotrace_rw *entry; |
311 | unsigned long irq_flags; | 310 | int pc = preempt_count(); |
312 | 311 | ||
313 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 312 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_RW, |
314 | &irq_flags); | 313 | sizeof(*entry), 0, pc); |
315 | if (!event) { | 314 | if (!event) { |
316 | atomic_inc(&dropped_count); | 315 | atomic_inc(&dropped_count); |
317 | return; | 316 | return; |
318 | } | 317 | } |
319 | entry = ring_buffer_event_data(event); | 318 | entry = ring_buffer_event_data(event); |
320 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | ||
321 | entry->ent.type = TRACE_MMIO_RW; | ||
322 | entry->rw = *rw; | 319 | entry->rw = *rw; |
323 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 320 | trace_buffer_unlock_commit(tr, event, 0, pc); |
324 | |||
325 | trace_wake_up(); | ||
326 | } | 321 | } |
327 | 322 | ||
328 | void mmio_trace_rw(struct mmiotrace_rw *rw) | 323 | void mmio_trace_rw(struct mmiotrace_rw *rw) |
@@ -338,21 +333,17 @@ static void __trace_mmiotrace_map(struct trace_array *tr, | |||
338 | { | 333 | { |
339 | struct ring_buffer_event *event; | 334 | struct ring_buffer_event *event; |
340 | struct trace_mmiotrace_map *entry; | 335 | struct trace_mmiotrace_map *entry; |
341 | unsigned long irq_flags; | 336 | int pc = preempt_count(); |
342 | 337 | ||
343 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | 338 | event = trace_buffer_lock_reserve(tr, TRACE_MMIO_MAP, |
344 | &irq_flags); | 339 | sizeof(*entry), 0, pc); |
345 | if (!event) { | 340 | if (!event) { |
346 | atomic_inc(&dropped_count); | 341 | atomic_inc(&dropped_count); |
347 | return; | 342 | return; |
348 | } | 343 | } |
349 | entry = ring_buffer_event_data(event); | 344 | entry = ring_buffer_event_data(event); |
350 | tracing_generic_entry_update(&entry->ent, 0, preempt_count()); | ||
351 | entry->ent.type = TRACE_MMIO_MAP; | ||
352 | entry->map = *map; | 345 | entry->map = *map; |
353 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | 346 | trace_buffer_unlock_commit(tr, event, 0, pc); |
354 | |||
355 | trace_wake_up(); | ||
356 | } | 347 | } |
357 | 348 | ||
358 | void mmio_trace_mapping(struct mmiotrace_map *map) | 349 | void mmio_trace_mapping(struct mmiotrace_map *map) |
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c index b9767acd30ac..9aa84bde23cd 100644 --- a/kernel/trace/trace_nop.c +++ b/kernel/trace/trace_nop.c | |||
@@ -47,12 +47,7 @@ static void stop_nop_trace(struct trace_array *tr) | |||
47 | 47 | ||
48 | static int nop_trace_init(struct trace_array *tr) | 48 | static int nop_trace_init(struct trace_array *tr) |
49 | { | 49 | { |
50 | int cpu; | ||
51 | ctx_trace = tr; | 50 | ctx_trace = tr; |
52 | |||
53 | for_each_online_cpu(cpu) | ||
54 | tracing_reset(tr, cpu); | ||
55 | |||
56 | start_nop_trace(tr); | 51 | start_nop_trace(tr); |
57 | return 0; | 52 | return 0; |
58 | } | 53 | } |
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c new file mode 100644 index 000000000000..306fef84c503 --- /dev/null +++ b/kernel/trace/trace_output.c | |||
@@ -0,0 +1,887 @@ | |||
1 | /* | ||
2 | * trace_output.c | ||
3 | * | ||
4 | * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/module.h> | ||
9 | #include <linux/mutex.h> | ||
10 | #include <linux/ftrace.h> | ||
11 | |||
12 | #include "trace_output.h" | ||
13 | |||
14 | /* must be a power of 2 */ | ||
15 | #define EVENT_HASHSIZE 128 | ||
16 | |||
17 | static DEFINE_MUTEX(trace_event_mutex); | ||
18 | static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; | ||
19 | |||
20 | static int next_event_type = __TRACE_LAST_TYPE + 1; | ||
21 | |||
22 | /** | ||
23 | * trace_seq_printf - sequence printing of trace information | ||
24 | * @s: trace sequence descriptor | ||
25 | * @fmt: printf format string | ||
26 | * | ||
27 | * The tracer may use either sequence operations or its own | ||
28 | * copy to user routines. To simplify formating of a trace | ||
29 | * trace_seq_printf is used to store strings into a special | ||
30 | * buffer (@s). Then the output may be either used by | ||
31 | * the sequencer or pulled into another buffer. | ||
32 | */ | ||
33 | int | ||
34 | trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
35 | { | ||
36 | int len = (PAGE_SIZE - 1) - s->len; | ||
37 | va_list ap; | ||
38 | int ret; | ||
39 | |||
40 | if (!len) | ||
41 | return 0; | ||
42 | |||
43 | va_start(ap, fmt); | ||
44 | ret = vsnprintf(s->buffer + s->len, len, fmt, ap); | ||
45 | va_end(ap); | ||
46 | |||
47 | /* If we can't write it all, don't bother writing anything */ | ||
48 | if (ret >= len) | ||
49 | return 0; | ||
50 | |||
51 | s->len += ret; | ||
52 | |||
53 | return len; | ||
54 | } | ||
55 | |||
56 | /** | ||
57 | * trace_seq_puts - trace sequence printing of simple string | ||
58 | * @s: trace sequence descriptor | ||
59 | * @str: simple string to record | ||
60 | * | ||
61 | * The tracer may use either the sequence operations or its own | ||
62 | * copy to user routines. This function records a simple string | ||
63 | * into a special buffer (@s) for later retrieval by a sequencer | ||
64 | * or other mechanism. | ||
65 | */ | ||
66 | int trace_seq_puts(struct trace_seq *s, const char *str) | ||
67 | { | ||
68 | int len = strlen(str); | ||
69 | |||
70 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
71 | return 0; | ||
72 | |||
73 | memcpy(s->buffer + s->len, str, len); | ||
74 | s->len += len; | ||
75 | |||
76 | return len; | ||
77 | } | ||
78 | |||
79 | int trace_seq_putc(struct trace_seq *s, unsigned char c) | ||
80 | { | ||
81 | if (s->len >= (PAGE_SIZE - 1)) | ||
82 | return 0; | ||
83 | |||
84 | s->buffer[s->len++] = c; | ||
85 | |||
86 | return 1; | ||
87 | } | ||
88 | |||
89 | int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len) | ||
90 | { | ||
91 | if (len > ((PAGE_SIZE - 1) - s->len)) | ||
92 | return 0; | ||
93 | |||
94 | memcpy(s->buffer + s->len, mem, len); | ||
95 | s->len += len; | ||
96 | |||
97 | return len; | ||
98 | } | ||
99 | |||
100 | int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) | ||
101 | { | ||
102 | unsigned char hex[HEX_CHARS]; | ||
103 | unsigned char *data = mem; | ||
104 | int i, j; | ||
105 | |||
106 | #ifdef __BIG_ENDIAN | ||
107 | for (i = 0, j = 0; i < len; i++) { | ||
108 | #else | ||
109 | for (i = len-1, j = 0; i >= 0; i--) { | ||
110 | #endif | ||
111 | hex[j++] = hex_asc_hi(data[i]); | ||
112 | hex[j++] = hex_asc_lo(data[i]); | ||
113 | } | ||
114 | hex[j++] = ' '; | ||
115 | |||
116 | return trace_seq_putmem(s, hex, j); | ||
117 | } | ||
118 | |||
119 | int trace_seq_path(struct trace_seq *s, struct path *path) | ||
120 | { | ||
121 | unsigned char *p; | ||
122 | |||
123 | if (s->len >= (PAGE_SIZE - 1)) | ||
124 | return 0; | ||
125 | p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); | ||
126 | if (!IS_ERR(p)) { | ||
127 | p = mangle_path(s->buffer + s->len, p, "\n"); | ||
128 | if (p) { | ||
129 | s->len = p - s->buffer; | ||
130 | return 1; | ||
131 | } | ||
132 | } else { | ||
133 | s->buffer[s->len++] = '?'; | ||
134 | return 1; | ||
135 | } | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | #ifdef CONFIG_KRETPROBES | ||
141 | static inline const char *kretprobed(const char *name) | ||
142 | { | ||
143 | static const char tramp_name[] = "kretprobe_trampoline"; | ||
144 | int size = sizeof(tramp_name); | ||
145 | |||
146 | if (strncmp(tramp_name, name, size) == 0) | ||
147 | return "[unknown/kretprobe'd]"; | ||
148 | return name; | ||
149 | } | ||
150 | #else | ||
151 | static inline const char *kretprobed(const char *name) | ||
152 | { | ||
153 | return name; | ||
154 | } | ||
155 | #endif /* CONFIG_KRETPROBES */ | ||
156 | |||
157 | static int | ||
158 | seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address) | ||
159 | { | ||
160 | #ifdef CONFIG_KALLSYMS | ||
161 | char str[KSYM_SYMBOL_LEN]; | ||
162 | const char *name; | ||
163 | |||
164 | kallsyms_lookup(address, NULL, NULL, NULL, str); | ||
165 | |||
166 | name = kretprobed(str); | ||
167 | |||
168 | return trace_seq_printf(s, fmt, name); | ||
169 | #endif | ||
170 | return 1; | ||
171 | } | ||
172 | |||
173 | static int | ||
174 | seq_print_sym_offset(struct trace_seq *s, const char *fmt, | ||
175 | unsigned long address) | ||
176 | { | ||
177 | #ifdef CONFIG_KALLSYMS | ||
178 | char str[KSYM_SYMBOL_LEN]; | ||
179 | const char *name; | ||
180 | |||
181 | sprint_symbol(str, address); | ||
182 | name = kretprobed(str); | ||
183 | |||
184 | return trace_seq_printf(s, fmt, name); | ||
185 | #endif | ||
186 | return 1; | ||
187 | } | ||
188 | |||
189 | #ifndef CONFIG_64BIT | ||
190 | # define IP_FMT "%08lx" | ||
191 | #else | ||
192 | # define IP_FMT "%016lx" | ||
193 | #endif | ||
194 | |||
195 | int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
196 | unsigned long ip, unsigned long sym_flags) | ||
197 | { | ||
198 | struct file *file = NULL; | ||
199 | unsigned long vmstart = 0; | ||
200 | int ret = 1; | ||
201 | |||
202 | if (mm) { | ||
203 | const struct vm_area_struct *vma; | ||
204 | |||
205 | down_read(&mm->mmap_sem); | ||
206 | vma = find_vma(mm, ip); | ||
207 | if (vma) { | ||
208 | file = vma->vm_file; | ||
209 | vmstart = vma->vm_start; | ||
210 | } | ||
211 | if (file) { | ||
212 | ret = trace_seq_path(s, &file->f_path); | ||
213 | if (ret) | ||
214 | ret = trace_seq_printf(s, "[+0x%lx]", | ||
215 | ip - vmstart); | ||
216 | } | ||
217 | up_read(&mm->mmap_sem); | ||
218 | } | ||
219 | if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file)) | ||
220 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
221 | return ret; | ||
222 | } | ||
223 | |||
224 | int | ||
225 | seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s, | ||
226 | unsigned long sym_flags) | ||
227 | { | ||
228 | struct mm_struct *mm = NULL; | ||
229 | int ret = 1; | ||
230 | unsigned int i; | ||
231 | |||
232 | if (trace_flags & TRACE_ITER_SYM_USEROBJ) { | ||
233 | struct task_struct *task; | ||
234 | /* | ||
235 | * we do the lookup on the thread group leader, | ||
236 | * since individual threads might have already quit! | ||
237 | */ | ||
238 | rcu_read_lock(); | ||
239 | task = find_task_by_vpid(entry->ent.tgid); | ||
240 | if (task) | ||
241 | mm = get_task_mm(task); | ||
242 | rcu_read_unlock(); | ||
243 | } | ||
244 | |||
245 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
246 | unsigned long ip = entry->caller[i]; | ||
247 | |||
248 | if (ip == ULONG_MAX || !ret) | ||
249 | break; | ||
250 | if (i && ret) | ||
251 | ret = trace_seq_puts(s, " <- "); | ||
252 | if (!ip) { | ||
253 | if (ret) | ||
254 | ret = trace_seq_puts(s, "??"); | ||
255 | continue; | ||
256 | } | ||
257 | if (!ret) | ||
258 | break; | ||
259 | if (ret) | ||
260 | ret = seq_print_user_ip(s, mm, ip, sym_flags); | ||
261 | } | ||
262 | |||
263 | if (mm) | ||
264 | mmput(mm); | ||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | int | ||
269 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags) | ||
270 | { | ||
271 | int ret; | ||
272 | |||
273 | if (!ip) | ||
274 | return trace_seq_printf(s, "0"); | ||
275 | |||
276 | if (sym_flags & TRACE_ITER_SYM_OFFSET) | ||
277 | ret = seq_print_sym_offset(s, "%s", ip); | ||
278 | else | ||
279 | ret = seq_print_sym_short(s, "%s", ip); | ||
280 | |||
281 | if (!ret) | ||
282 | return 0; | ||
283 | |||
284 | if (sym_flags & TRACE_ITER_SYM_ADDR) | ||
285 | ret = trace_seq_printf(s, " <" IP_FMT ">", ip); | ||
286 | return ret; | ||
287 | } | ||
288 | |||
289 | static int | ||
290 | lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) | ||
291 | { | ||
292 | int hardirq, softirq; | ||
293 | char *comm; | ||
294 | |||
295 | comm = trace_find_cmdline(entry->pid); | ||
296 | hardirq = entry->flags & TRACE_FLAG_HARDIRQ; | ||
297 | softirq = entry->flags & TRACE_FLAG_SOFTIRQ; | ||
298 | |||
299 | if (!trace_seq_printf(s, "%8.8s-%-5d %3d%c%c%c", | ||
300 | comm, entry->pid, cpu, | ||
301 | (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : | ||
302 | (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? | ||
303 | 'X' : '.', | ||
304 | (entry->flags & TRACE_FLAG_NEED_RESCHED) ? | ||
305 | 'N' : '.', | ||
306 | (hardirq && softirq) ? 'H' : | ||
307 | hardirq ? 'h' : softirq ? 's' : '.')) | ||
308 | return 0; | ||
309 | |||
310 | if (entry->preempt_count) | ||
311 | return trace_seq_printf(s, "%x", entry->preempt_count); | ||
312 | return trace_seq_puts(s, "."); | ||
313 | } | ||
314 | |||
315 | static unsigned long preempt_mark_thresh = 100; | ||
316 | |||
317 | static int | ||
318 | lat_print_timestamp(struct trace_seq *s, u64 abs_usecs, | ||
319 | unsigned long rel_usecs) | ||
320 | { | ||
321 | return trace_seq_printf(s, " %4lldus%c: ", abs_usecs, | ||
322 | rel_usecs > preempt_mark_thresh ? '!' : | ||
323 | rel_usecs > 1 ? '+' : ' '); | ||
324 | } | ||
325 | |||
326 | int trace_print_context(struct trace_iterator *iter) | ||
327 | { | ||
328 | struct trace_seq *s = &iter->seq; | ||
329 | struct trace_entry *entry = iter->ent; | ||
330 | char *comm = trace_find_cmdline(entry->pid); | ||
331 | unsigned long long t = ns2usecs(iter->ts); | ||
332 | unsigned long usec_rem = do_div(t, USEC_PER_SEC); | ||
333 | unsigned long secs = (unsigned long)t; | ||
334 | |||
335 | return trace_seq_printf(s, "%16s-%-5d [%03d] %5lu.%06lu: ", | ||
336 | comm, entry->pid, iter->cpu, secs, usec_rem); | ||
337 | } | ||
338 | |||
339 | int trace_print_lat_context(struct trace_iterator *iter) | ||
340 | { | ||
341 | u64 next_ts; | ||
342 | int ret; | ||
343 | struct trace_seq *s = &iter->seq; | ||
344 | struct trace_entry *entry = iter->ent, | ||
345 | *next_entry = trace_find_next_entry(iter, NULL, | ||
346 | &next_ts); | ||
347 | unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); | ||
348 | unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start); | ||
349 | unsigned long rel_usecs; | ||
350 | |||
351 | if (!next_entry) | ||
352 | next_ts = iter->ts; | ||
353 | rel_usecs = ns2usecs(next_ts - iter->ts); | ||
354 | |||
355 | if (verbose) { | ||
356 | char *comm = trace_find_cmdline(entry->pid); | ||
357 | ret = trace_seq_printf(s, "%16s %5d %3d %d %08x %08lx [%08lx]" | ||
358 | " %ld.%03ldms (+%ld.%03ldms): ", comm, | ||
359 | entry->pid, iter->cpu, entry->flags, | ||
360 | entry->preempt_count, iter->idx, | ||
361 | ns2usecs(iter->ts), | ||
362 | abs_usecs / USEC_PER_MSEC, | ||
363 | abs_usecs % USEC_PER_MSEC, | ||
364 | rel_usecs / USEC_PER_MSEC, | ||
365 | rel_usecs % USEC_PER_MSEC); | ||
366 | } else { | ||
367 | ret = lat_print_generic(s, entry, iter->cpu); | ||
368 | if (ret) | ||
369 | ret = lat_print_timestamp(s, abs_usecs, rel_usecs); | ||
370 | } | ||
371 | |||
372 | return ret; | ||
373 | } | ||
374 | |||
375 | static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; | ||
376 | |||
377 | static int task_state_char(unsigned long state) | ||
378 | { | ||
379 | int bit = state ? __ffs(state) + 1 : 0; | ||
380 | |||
381 | return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?'; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * ftrace_find_event - find a registered event | ||
386 | * @type: the type of event to look for | ||
387 | * | ||
388 | * Returns an event of type @type otherwise NULL | ||
389 | */ | ||
390 | struct trace_event *ftrace_find_event(int type) | ||
391 | { | ||
392 | struct trace_event *event; | ||
393 | struct hlist_node *n; | ||
394 | unsigned key; | ||
395 | |||
396 | key = type & (EVENT_HASHSIZE - 1); | ||
397 | |||
398 | hlist_for_each_entry_rcu(event, n, &event_hash[key], node) { | ||
399 | if (event->type == type) | ||
400 | return event; | ||
401 | } | ||
402 | |||
403 | return NULL; | ||
404 | } | ||
405 | |||
406 | /** | ||
407 | * register_ftrace_event - register output for an event type | ||
408 | * @event: the event type to register | ||
409 | * | ||
410 | * Event types are stored in a hash and this hash is used to | ||
411 | * find a way to print an event. If the @event->type is set | ||
412 | * then it will use that type, otherwise it will assign a | ||
413 | * type to use. | ||
414 | * | ||
415 | * If you assign your own type, please make sure it is added | ||
416 | * to the trace_type enum in trace.h, to avoid collisions | ||
417 | * with the dynamic types. | ||
418 | * | ||
419 | * Returns the event type number or zero on error. | ||
420 | */ | ||
421 | int register_ftrace_event(struct trace_event *event) | ||
422 | { | ||
423 | unsigned key; | ||
424 | int ret = 0; | ||
425 | |||
426 | mutex_lock(&trace_event_mutex); | ||
427 | |||
428 | if (!event->type) | ||
429 | event->type = next_event_type++; | ||
430 | else if (event->type > __TRACE_LAST_TYPE) { | ||
431 | printk(KERN_WARNING "Need to add type to trace.h\n"); | ||
432 | WARN_ON(1); | ||
433 | } | ||
434 | |||
435 | if (ftrace_find_event(event->type)) | ||
436 | goto out; | ||
437 | |||
438 | if (event->trace == NULL) | ||
439 | event->trace = trace_nop_print; | ||
440 | if (event->raw == NULL) | ||
441 | event->raw = trace_nop_print; | ||
442 | if (event->hex == NULL) | ||
443 | event->hex = trace_nop_print; | ||
444 | if (event->binary == NULL) | ||
445 | event->binary = trace_nop_print; | ||
446 | |||
447 | key = event->type & (EVENT_HASHSIZE - 1); | ||
448 | |||
449 | hlist_add_head_rcu(&event->node, &event_hash[key]); | ||
450 | |||
451 | ret = event->type; | ||
452 | out: | ||
453 | mutex_unlock(&trace_event_mutex); | ||
454 | |||
455 | return ret; | ||
456 | } | ||
457 | |||
458 | /** | ||
459 | * unregister_ftrace_event - remove a no longer used event | ||
460 | * @event: the event to remove | ||
461 | */ | ||
462 | int unregister_ftrace_event(struct trace_event *event) | ||
463 | { | ||
464 | mutex_lock(&trace_event_mutex); | ||
465 | hlist_del(&event->node); | ||
466 | mutex_unlock(&trace_event_mutex); | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | /* | ||
472 | * Standard events | ||
473 | */ | ||
474 | |||
475 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags) | ||
476 | { | ||
477 | return TRACE_TYPE_HANDLED; | ||
478 | } | ||
479 | |||
480 | /* TRACE_FN */ | ||
481 | static enum print_line_t trace_fn_trace(struct trace_iterator *iter, int flags) | ||
482 | { | ||
483 | struct ftrace_entry *field; | ||
484 | struct trace_seq *s = &iter->seq; | ||
485 | |||
486 | trace_assign_type(field, iter->ent); | ||
487 | |||
488 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
489 | goto partial; | ||
490 | |||
491 | if ((flags & TRACE_ITER_PRINT_PARENT) && field->parent_ip) { | ||
492 | if (!trace_seq_printf(s, " <-")) | ||
493 | goto partial; | ||
494 | if (!seq_print_ip_sym(s, | ||
495 | field->parent_ip, | ||
496 | flags)) | ||
497 | goto partial; | ||
498 | } | ||
499 | if (!trace_seq_printf(s, "\n")) | ||
500 | goto partial; | ||
501 | |||
502 | return TRACE_TYPE_HANDLED; | ||
503 | |||
504 | partial: | ||
505 | return TRACE_TYPE_PARTIAL_LINE; | ||
506 | } | ||
507 | |||
508 | static enum print_line_t trace_fn_raw(struct trace_iterator *iter, int flags) | ||
509 | { | ||
510 | struct ftrace_entry *field; | ||
511 | |||
512 | trace_assign_type(field, iter->ent); | ||
513 | |||
514 | if (!trace_seq_printf(&iter->seq, "%lx %lx\n", | ||
515 | field->ip, | ||
516 | field->parent_ip)) | ||
517 | return TRACE_TYPE_PARTIAL_LINE; | ||
518 | |||
519 | return TRACE_TYPE_HANDLED; | ||
520 | } | ||
521 | |||
522 | static enum print_line_t trace_fn_hex(struct trace_iterator *iter, int flags) | ||
523 | { | ||
524 | struct ftrace_entry *field; | ||
525 | struct trace_seq *s = &iter->seq; | ||
526 | |||
527 | trace_assign_type(field, iter->ent); | ||
528 | |||
529 | SEQ_PUT_HEX_FIELD_RET(s, field->ip); | ||
530 | SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip); | ||
531 | |||
532 | return TRACE_TYPE_HANDLED; | ||
533 | } | ||
534 | |||
535 | static enum print_line_t trace_fn_bin(struct trace_iterator *iter, int flags) | ||
536 | { | ||
537 | struct ftrace_entry *field; | ||
538 | struct trace_seq *s = &iter->seq; | ||
539 | |||
540 | trace_assign_type(field, iter->ent); | ||
541 | |||
542 | SEQ_PUT_FIELD_RET(s, field->ip); | ||
543 | SEQ_PUT_FIELD_RET(s, field->parent_ip); | ||
544 | |||
545 | return TRACE_TYPE_HANDLED; | ||
546 | } | ||
547 | |||
548 | static struct trace_event trace_fn_event = { | ||
549 | .type = TRACE_FN, | ||
550 | .trace = trace_fn_trace, | ||
551 | .raw = trace_fn_raw, | ||
552 | .hex = trace_fn_hex, | ||
553 | .binary = trace_fn_bin, | ||
554 | }; | ||
555 | |||
556 | /* TRACE_CTX an TRACE_WAKE */ | ||
557 | static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, | ||
558 | char *delim) | ||
559 | { | ||
560 | struct ctx_switch_entry *field; | ||
561 | char *comm; | ||
562 | int S, T; | ||
563 | |||
564 | trace_assign_type(field, iter->ent); | ||
565 | |||
566 | T = task_state_char(field->next_state); | ||
567 | S = task_state_char(field->prev_state); | ||
568 | comm = trace_find_cmdline(field->next_pid); | ||
569 | if (!trace_seq_printf(&iter->seq, | ||
570 | " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", | ||
571 | field->prev_pid, | ||
572 | field->prev_prio, | ||
573 | S, delim, | ||
574 | field->next_cpu, | ||
575 | field->next_pid, | ||
576 | field->next_prio, | ||
577 | T, comm)) | ||
578 | return TRACE_TYPE_PARTIAL_LINE; | ||
579 | |||
580 | return TRACE_TYPE_HANDLED; | ||
581 | } | ||
582 | |||
583 | static enum print_line_t trace_ctx_print(struct trace_iterator *iter, int flags) | ||
584 | { | ||
585 | return trace_ctxwake_print(iter, "==>"); | ||
586 | } | ||
587 | |||
588 | static enum print_line_t trace_wake_print(struct trace_iterator *iter, | ||
589 | int flags) | ||
590 | { | ||
591 | return trace_ctxwake_print(iter, " +"); | ||
592 | } | ||
593 | |||
594 | static int trace_ctxwake_raw(struct trace_iterator *iter, char S) | ||
595 | { | ||
596 | struct ctx_switch_entry *field; | ||
597 | int T; | ||
598 | |||
599 | trace_assign_type(field, iter->ent); | ||
600 | |||
601 | if (!S) | ||
602 | task_state_char(field->prev_state); | ||
603 | T = task_state_char(field->next_state); | ||
604 | if (!trace_seq_printf(&iter->seq, "%d %d %c %d %d %d %c\n", | ||
605 | field->prev_pid, | ||
606 | field->prev_prio, | ||
607 | S, | ||
608 | field->next_cpu, | ||
609 | field->next_pid, | ||
610 | field->next_prio, | ||
611 | T)) | ||
612 | return TRACE_TYPE_PARTIAL_LINE; | ||
613 | |||
614 | return TRACE_TYPE_HANDLED; | ||
615 | } | ||
616 | |||
617 | static enum print_line_t trace_ctx_raw(struct trace_iterator *iter, int flags) | ||
618 | { | ||
619 | return trace_ctxwake_raw(iter, 0); | ||
620 | } | ||
621 | |||
622 | static enum print_line_t trace_wake_raw(struct trace_iterator *iter, int flags) | ||
623 | { | ||
624 | return trace_ctxwake_raw(iter, '+'); | ||
625 | } | ||
626 | |||
627 | |||
628 | static int trace_ctxwake_hex(struct trace_iterator *iter, char S) | ||
629 | { | ||
630 | struct ctx_switch_entry *field; | ||
631 | struct trace_seq *s = &iter->seq; | ||
632 | int T; | ||
633 | |||
634 | trace_assign_type(field, iter->ent); | ||
635 | |||
636 | if (!S) | ||
637 | task_state_char(field->prev_state); | ||
638 | T = task_state_char(field->next_state); | ||
639 | |||
640 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid); | ||
641 | SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio); | ||
642 | SEQ_PUT_HEX_FIELD_RET(s, S); | ||
643 | SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu); | ||
644 | SEQ_PUT_HEX_FIELD_RET(s, field->next_pid); | ||
645 | SEQ_PUT_HEX_FIELD_RET(s, field->next_prio); | ||
646 | SEQ_PUT_HEX_FIELD_RET(s, T); | ||
647 | |||
648 | return TRACE_TYPE_HANDLED; | ||
649 | } | ||
650 | |||
651 | static enum print_line_t trace_ctx_hex(struct trace_iterator *iter, int flags) | ||
652 | { | ||
653 | return trace_ctxwake_hex(iter, 0); | ||
654 | } | ||
655 | |||
656 | static enum print_line_t trace_wake_hex(struct trace_iterator *iter, int flags) | ||
657 | { | ||
658 | return trace_ctxwake_hex(iter, '+'); | ||
659 | } | ||
660 | |||
661 | static enum print_line_t trace_ctxwake_bin(struct trace_iterator *iter, | ||
662 | int flags) | ||
663 | { | ||
664 | struct ctx_switch_entry *field; | ||
665 | struct trace_seq *s = &iter->seq; | ||
666 | |||
667 | trace_assign_type(field, iter->ent); | ||
668 | |||
669 | SEQ_PUT_FIELD_RET(s, field->prev_pid); | ||
670 | SEQ_PUT_FIELD_RET(s, field->prev_prio); | ||
671 | SEQ_PUT_FIELD_RET(s, field->prev_state); | ||
672 | SEQ_PUT_FIELD_RET(s, field->next_pid); | ||
673 | SEQ_PUT_FIELD_RET(s, field->next_prio); | ||
674 | SEQ_PUT_FIELD_RET(s, field->next_state); | ||
675 | |||
676 | return TRACE_TYPE_HANDLED; | ||
677 | } | ||
678 | |||
679 | static struct trace_event trace_ctx_event = { | ||
680 | .type = TRACE_CTX, | ||
681 | .trace = trace_ctx_print, | ||
682 | .raw = trace_ctx_raw, | ||
683 | .hex = trace_ctx_hex, | ||
684 | .binary = trace_ctxwake_bin, | ||
685 | }; | ||
686 | |||
687 | static struct trace_event trace_wake_event = { | ||
688 | .type = TRACE_WAKE, | ||
689 | .trace = trace_wake_print, | ||
690 | .raw = trace_wake_raw, | ||
691 | .hex = trace_wake_hex, | ||
692 | .binary = trace_ctxwake_bin, | ||
693 | }; | ||
694 | |||
695 | /* TRACE_SPECIAL */ | ||
696 | static enum print_line_t trace_special_print(struct trace_iterator *iter, | ||
697 | int flags) | ||
698 | { | ||
699 | struct special_entry *field; | ||
700 | |||
701 | trace_assign_type(field, iter->ent); | ||
702 | |||
703 | if (!trace_seq_printf(&iter->seq, "# %ld %ld %ld\n", | ||
704 | field->arg1, | ||
705 | field->arg2, | ||
706 | field->arg3)) | ||
707 | return TRACE_TYPE_PARTIAL_LINE; | ||
708 | |||
709 | return TRACE_TYPE_HANDLED; | ||
710 | } | ||
711 | |||
712 | static enum print_line_t trace_special_hex(struct trace_iterator *iter, | ||
713 | int flags) | ||
714 | { | ||
715 | struct special_entry *field; | ||
716 | struct trace_seq *s = &iter->seq; | ||
717 | |||
718 | trace_assign_type(field, iter->ent); | ||
719 | |||
720 | SEQ_PUT_HEX_FIELD_RET(s, field->arg1); | ||
721 | SEQ_PUT_HEX_FIELD_RET(s, field->arg2); | ||
722 | SEQ_PUT_HEX_FIELD_RET(s, field->arg3); | ||
723 | |||
724 | return TRACE_TYPE_HANDLED; | ||
725 | } | ||
726 | |||
727 | static enum print_line_t trace_special_bin(struct trace_iterator *iter, | ||
728 | int flags) | ||
729 | { | ||
730 | struct special_entry *field; | ||
731 | struct trace_seq *s = &iter->seq; | ||
732 | |||
733 | trace_assign_type(field, iter->ent); | ||
734 | |||
735 | SEQ_PUT_FIELD_RET(s, field->arg1); | ||
736 | SEQ_PUT_FIELD_RET(s, field->arg2); | ||
737 | SEQ_PUT_FIELD_RET(s, field->arg3); | ||
738 | |||
739 | return TRACE_TYPE_HANDLED; | ||
740 | } | ||
741 | |||
742 | static struct trace_event trace_special_event = { | ||
743 | .type = TRACE_SPECIAL, | ||
744 | .trace = trace_special_print, | ||
745 | .raw = trace_special_print, | ||
746 | .hex = trace_special_hex, | ||
747 | .binary = trace_special_bin, | ||
748 | }; | ||
749 | |||
750 | /* TRACE_STACK */ | ||
751 | |||
752 | static enum print_line_t trace_stack_print(struct trace_iterator *iter, | ||
753 | int flags) | ||
754 | { | ||
755 | struct stack_entry *field; | ||
756 | struct trace_seq *s = &iter->seq; | ||
757 | int i; | ||
758 | |||
759 | trace_assign_type(field, iter->ent); | ||
760 | |||
761 | for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { | ||
762 | if (i) { | ||
763 | if (!trace_seq_puts(s, " <= ")) | ||
764 | goto partial; | ||
765 | |||
766 | if (!seq_print_ip_sym(s, field->caller[i], flags)) | ||
767 | goto partial; | ||
768 | } | ||
769 | if (!trace_seq_puts(s, "\n")) | ||
770 | goto partial; | ||
771 | } | ||
772 | |||
773 | return TRACE_TYPE_HANDLED; | ||
774 | |||
775 | partial: | ||
776 | return TRACE_TYPE_PARTIAL_LINE; | ||
777 | } | ||
778 | |||
779 | static struct trace_event trace_stack_event = { | ||
780 | .type = TRACE_STACK, | ||
781 | .trace = trace_stack_print, | ||
782 | .raw = trace_special_print, | ||
783 | .hex = trace_special_hex, | ||
784 | .binary = trace_special_bin, | ||
785 | }; | ||
786 | |||
787 | /* TRACE_USER_STACK */ | ||
788 | static enum print_line_t trace_user_stack_print(struct trace_iterator *iter, | ||
789 | int flags) | ||
790 | { | ||
791 | struct userstack_entry *field; | ||
792 | struct trace_seq *s = &iter->seq; | ||
793 | |||
794 | trace_assign_type(field, iter->ent); | ||
795 | |||
796 | if (!seq_print_userip_objs(field, s, flags)) | ||
797 | goto partial; | ||
798 | |||
799 | if (!trace_seq_putc(s, '\n')) | ||
800 | goto partial; | ||
801 | |||
802 | return TRACE_TYPE_HANDLED; | ||
803 | |||
804 | partial: | ||
805 | return TRACE_TYPE_PARTIAL_LINE; | ||
806 | } | ||
807 | |||
808 | static struct trace_event trace_user_stack_event = { | ||
809 | .type = TRACE_USER_STACK, | ||
810 | .trace = trace_user_stack_print, | ||
811 | .raw = trace_special_print, | ||
812 | .hex = trace_special_hex, | ||
813 | .binary = trace_special_bin, | ||
814 | }; | ||
815 | |||
816 | /* TRACE_PRINT */ | ||
817 | static enum print_line_t trace_print_print(struct trace_iterator *iter, | ||
818 | int flags) | ||
819 | { | ||
820 | struct print_entry *field; | ||
821 | struct trace_seq *s = &iter->seq; | ||
822 | |||
823 | trace_assign_type(field, iter->ent); | ||
824 | |||
825 | if (!seq_print_ip_sym(s, field->ip, flags)) | ||
826 | goto partial; | ||
827 | |||
828 | if (!trace_seq_printf(s, ": %s", field->buf)) | ||
829 | goto partial; | ||
830 | |||
831 | return TRACE_TYPE_HANDLED; | ||
832 | |||
833 | partial: | ||
834 | return TRACE_TYPE_PARTIAL_LINE; | ||
835 | } | ||
836 | |||
837 | static enum print_line_t trace_print_raw(struct trace_iterator *iter, int flags) | ||
838 | { | ||
839 | struct print_entry *field; | ||
840 | |||
841 | trace_assign_type(field, iter->ent); | ||
842 | |||
843 | if (!trace_seq_printf(&iter->seq, "# %lx %s", field->ip, field->buf)) | ||
844 | goto partial; | ||
845 | |||
846 | return TRACE_TYPE_HANDLED; | ||
847 | |||
848 | partial: | ||
849 | return TRACE_TYPE_PARTIAL_LINE; | ||
850 | } | ||
851 | |||
852 | static struct trace_event trace_print_event = { | ||
853 | .type = TRACE_PRINT, | ||
854 | .trace = trace_print_print, | ||
855 | .raw = trace_print_raw, | ||
856 | }; | ||
857 | |||
858 | static struct trace_event *events[] __initdata = { | ||
859 | &trace_fn_event, | ||
860 | &trace_ctx_event, | ||
861 | &trace_wake_event, | ||
862 | &trace_special_event, | ||
863 | &trace_stack_event, | ||
864 | &trace_user_stack_event, | ||
865 | &trace_print_event, | ||
866 | NULL | ||
867 | }; | ||
868 | |||
869 | __init static int init_events(void) | ||
870 | { | ||
871 | struct trace_event *event; | ||
872 | int i, ret; | ||
873 | |||
874 | for (i = 0; events[i]; i++) { | ||
875 | event = events[i]; | ||
876 | |||
877 | ret = register_ftrace_event(event); | ||
878 | if (!ret) { | ||
879 | printk(KERN_WARNING "event %d failed to register\n", | ||
880 | event->type); | ||
881 | WARN_ON_ONCE(1); | ||
882 | } | ||
883 | } | ||
884 | |||
885 | return 0; | ||
886 | } | ||
887 | device_initcall(init_events); | ||
diff --git a/kernel/trace/trace_output.h b/kernel/trace/trace_output.h new file mode 100644 index 000000000000..8a34d688ed63 --- /dev/null +++ b/kernel/trace/trace_output.h | |||
@@ -0,0 +1,61 @@ | |||
1 | #ifndef __TRACE_EVENTS_H | ||
2 | #define __TRACE_EVENTS_H | ||
3 | |||
4 | #include "trace.h" | ||
5 | |||
6 | typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, | ||
7 | int flags); | ||
8 | |||
9 | struct trace_event { | ||
10 | struct hlist_node node; | ||
11 | int type; | ||
12 | trace_print_func trace; | ||
13 | trace_print_func raw; | ||
14 | trace_print_func hex; | ||
15 | trace_print_func binary; | ||
16 | }; | ||
17 | |||
18 | extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) | ||
19 | __attribute__ ((format (printf, 2, 3))); | ||
20 | extern int | ||
21 | seq_print_ip_sym(struct trace_seq *s, unsigned long ip, | ||
22 | unsigned long sym_flags); | ||
23 | extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, | ||
24 | size_t cnt); | ||
25 | int trace_seq_puts(struct trace_seq *s, const char *str); | ||
26 | int trace_seq_putc(struct trace_seq *s, unsigned char c); | ||
27 | int trace_seq_putmem(struct trace_seq *s, void *mem, size_t len); | ||
28 | int trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len); | ||
29 | int trace_seq_path(struct trace_seq *s, struct path *path); | ||
30 | int seq_print_userip_objs(const struct userstack_entry *entry, | ||
31 | struct trace_seq *s, unsigned long sym_flags); | ||
32 | int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm, | ||
33 | unsigned long ip, unsigned long sym_flags); | ||
34 | |||
35 | int trace_print_context(struct trace_iterator *iter); | ||
36 | int trace_print_lat_context(struct trace_iterator *iter); | ||
37 | |||
38 | struct trace_event *ftrace_find_event(int type); | ||
39 | int register_ftrace_event(struct trace_event *event); | ||
40 | int unregister_ftrace_event(struct trace_event *event); | ||
41 | |||
42 | enum print_line_t trace_nop_print(struct trace_iterator *iter, int flags); | ||
43 | |||
44 | #define MAX_MEMHEX_BYTES 8 | ||
45 | #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) | ||
46 | |||
47 | #define SEQ_PUT_FIELD_RET(s, x) \ | ||
48 | do { \ | ||
49 | if (!trace_seq_putmem(s, &(x), sizeof(x))) \ | ||
50 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
51 | } while (0) | ||
52 | |||
53 | #define SEQ_PUT_HEX_FIELD_RET(s, x) \ | ||
54 | do { \ | ||
55 | BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \ | ||
56 | if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ | ||
57 | return TRACE_TYPE_PARTIAL_LINE; \ | ||
58 | } while (0) | ||
59 | |||
60 | #endif | ||
61 | |||
diff --git a/kernel/trace/trace_power.c b/kernel/trace/trace_power.c index 7bda248daf55..91ce672fb037 100644 --- a/kernel/trace/trace_power.c +++ b/kernel/trace/trace_power.c | |||
@@ -11,24 +11,126 @@ | |||
11 | 11 | ||
12 | #include <linux/init.h> | 12 | #include <linux/init.h> |
13 | #include <linux/debugfs.h> | 13 | #include <linux/debugfs.h> |
14 | #include <linux/ftrace.h> | 14 | #include <trace/power.h> |
15 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
16 | #include <linux/module.h> | 16 | #include <linux/module.h> |
17 | 17 | ||
18 | #include "trace.h" | 18 | #include "trace.h" |
19 | #include "trace_output.h" | ||
19 | 20 | ||
20 | static struct trace_array *power_trace; | 21 | static struct trace_array *power_trace; |
21 | static int __read_mostly trace_power_enabled; | 22 | static int __read_mostly trace_power_enabled; |
22 | 23 | ||
24 | static void probe_power_start(struct power_trace *it, unsigned int type, | ||
25 | unsigned int level) | ||
26 | { | ||
27 | if (!trace_power_enabled) | ||
28 | return; | ||
29 | |||
30 | memset(it, 0, sizeof(struct power_trace)); | ||
31 | it->state = level; | ||
32 | it->type = type; | ||
33 | it->stamp = ktime_get(); | ||
34 | } | ||
35 | |||
36 | |||
37 | static void probe_power_end(struct power_trace *it) | ||
38 | { | ||
39 | struct ring_buffer_event *event; | ||
40 | struct trace_power *entry; | ||
41 | struct trace_array_cpu *data; | ||
42 | struct trace_array *tr = power_trace; | ||
43 | |||
44 | if (!trace_power_enabled) | ||
45 | return; | ||
46 | |||
47 | preempt_disable(); | ||
48 | it->end = ktime_get(); | ||
49 | data = tr->data[smp_processor_id()]; | ||
50 | |||
51 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
52 | sizeof(*entry), 0, 0); | ||
53 | if (!event) | ||
54 | goto out; | ||
55 | entry = ring_buffer_event_data(event); | ||
56 | entry->state_data = *it; | ||
57 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
58 | out: | ||
59 | preempt_enable(); | ||
60 | } | ||
61 | |||
62 | static void probe_power_mark(struct power_trace *it, unsigned int type, | ||
63 | unsigned int level) | ||
64 | { | ||
65 | struct ring_buffer_event *event; | ||
66 | struct trace_power *entry; | ||
67 | struct trace_array_cpu *data; | ||
68 | struct trace_array *tr = power_trace; | ||
69 | |||
70 | if (!trace_power_enabled) | ||
71 | return; | ||
72 | |||
73 | memset(it, 0, sizeof(struct power_trace)); | ||
74 | it->state = level; | ||
75 | it->type = type; | ||
76 | it->stamp = ktime_get(); | ||
77 | preempt_disable(); | ||
78 | it->end = it->stamp; | ||
79 | data = tr->data[smp_processor_id()]; | ||
80 | |||
81 | event = trace_buffer_lock_reserve(tr, TRACE_POWER, | ||
82 | sizeof(*entry), 0, 0); | ||
83 | if (!event) | ||
84 | goto out; | ||
85 | entry = ring_buffer_event_data(event); | ||
86 | entry->state_data = *it; | ||
87 | trace_buffer_unlock_commit(tr, event, 0, 0); | ||
88 | out: | ||
89 | preempt_enable(); | ||
90 | } | ||
91 | |||
92 | static int tracing_power_register(void) | ||
93 | { | ||
94 | int ret; | ||
95 | |||
96 | ret = register_trace_power_start(probe_power_start); | ||
97 | if (ret) { | ||
98 | pr_info("power trace: Couldn't activate tracepoint" | ||
99 | " probe to trace_power_start\n"); | ||
100 | return ret; | ||
101 | } | ||
102 | ret = register_trace_power_end(probe_power_end); | ||
103 | if (ret) { | ||
104 | pr_info("power trace: Couldn't activate tracepoint" | ||
105 | " probe to trace_power_end\n"); | ||
106 | goto fail_start; | ||
107 | } | ||
108 | ret = register_trace_power_mark(probe_power_mark); | ||
109 | if (ret) { | ||
110 | pr_info("power trace: Couldn't activate tracepoint" | ||
111 | " probe to trace_power_mark\n"); | ||
112 | goto fail_end; | ||
113 | } | ||
114 | return ret; | ||
115 | fail_end: | ||
116 | unregister_trace_power_end(probe_power_end); | ||
117 | fail_start: | ||
118 | unregister_trace_power_start(probe_power_start); | ||
119 | return ret; | ||
120 | } | ||
23 | 121 | ||
24 | static void start_power_trace(struct trace_array *tr) | 122 | static void start_power_trace(struct trace_array *tr) |
25 | { | 123 | { |
26 | trace_power_enabled = 1; | 124 | trace_power_enabled = 1; |
125 | tracing_power_register(); | ||
27 | } | 126 | } |
28 | 127 | ||
29 | static void stop_power_trace(struct trace_array *tr) | 128 | static void stop_power_trace(struct trace_array *tr) |
30 | { | 129 | { |
31 | trace_power_enabled = 0; | 130 | trace_power_enabled = 0; |
131 | unregister_trace_power_start(probe_power_start); | ||
132 | unregister_trace_power_end(probe_power_end); | ||
133 | unregister_trace_power_mark(probe_power_mark); | ||
32 | } | 134 | } |
33 | 135 | ||
34 | 136 | ||
@@ -38,6 +140,7 @@ static int power_trace_init(struct trace_array *tr) | |||
38 | power_trace = tr; | 140 | power_trace = tr; |
39 | 141 | ||
40 | trace_power_enabled = 1; | 142 | trace_power_enabled = 1; |
143 | tracing_power_register(); | ||
41 | 144 | ||
42 | for_each_cpu(cpu, cpu_possible_mask) | 145 | for_each_cpu(cpu, cpu_possible_mask) |
43 | tracing_reset(tr, cpu); | 146 | tracing_reset(tr, cpu); |
@@ -94,86 +197,3 @@ static int init_power_trace(void) | |||
94 | return register_tracer(&power_tracer); | 197 | return register_tracer(&power_tracer); |
95 | } | 198 | } |
96 | device_initcall(init_power_trace); | 199 | device_initcall(init_power_trace); |
97 | |||
98 | void trace_power_start(struct power_trace *it, unsigned int type, | ||
99 | unsigned int level) | ||
100 | { | ||
101 | if (!trace_power_enabled) | ||
102 | return; | ||
103 | |||
104 | memset(it, 0, sizeof(struct power_trace)); | ||
105 | it->state = level; | ||
106 | it->type = type; | ||
107 | it->stamp = ktime_get(); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(trace_power_start); | ||
110 | |||
111 | |||
112 | void trace_power_end(struct power_trace *it) | ||
113 | { | ||
114 | struct ring_buffer_event *event; | ||
115 | struct trace_power *entry; | ||
116 | struct trace_array_cpu *data; | ||
117 | unsigned long irq_flags; | ||
118 | struct trace_array *tr = power_trace; | ||
119 | |||
120 | if (!trace_power_enabled) | ||
121 | return; | ||
122 | |||
123 | preempt_disable(); | ||
124 | it->end = ktime_get(); | ||
125 | data = tr->data[smp_processor_id()]; | ||
126 | |||
127 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
128 | &irq_flags); | ||
129 | if (!event) | ||
130 | goto out; | ||
131 | entry = ring_buffer_event_data(event); | ||
132 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
133 | entry->ent.type = TRACE_POWER; | ||
134 | entry->state_data = *it; | ||
135 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
136 | |||
137 | trace_wake_up(); | ||
138 | |||
139 | out: | ||
140 | preempt_enable(); | ||
141 | } | ||
142 | EXPORT_SYMBOL_GPL(trace_power_end); | ||
143 | |||
144 | void trace_power_mark(struct power_trace *it, unsigned int type, | ||
145 | unsigned int level) | ||
146 | { | ||
147 | struct ring_buffer_event *event; | ||
148 | struct trace_power *entry; | ||
149 | struct trace_array_cpu *data; | ||
150 | unsigned long irq_flags; | ||
151 | struct trace_array *tr = power_trace; | ||
152 | |||
153 | if (!trace_power_enabled) | ||
154 | return; | ||
155 | |||
156 | memset(it, 0, sizeof(struct power_trace)); | ||
157 | it->state = level; | ||
158 | it->type = type; | ||
159 | it->stamp = ktime_get(); | ||
160 | preempt_disable(); | ||
161 | it->end = it->stamp; | ||
162 | data = tr->data[smp_processor_id()]; | ||
163 | |||
164 | event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry), | ||
165 | &irq_flags); | ||
166 | if (!event) | ||
167 | goto out; | ||
168 | entry = ring_buffer_event_data(event); | ||
169 | tracing_generic_entry_update(&entry->ent, 0, 0); | ||
170 | entry->ent.type = TRACE_POWER; | ||
171 | entry->state_data = *it; | ||
172 | ring_buffer_unlock_commit(tr->buffer, event, irq_flags); | ||
173 | |||
174 | trace_wake_up(); | ||
175 | |||
176 | out: | ||
177 | preempt_enable(); | ||
178 | } | ||
179 | EXPORT_SYMBOL_GPL(trace_power_mark); | ||
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c index df175cb4564f..77132c2cf3d9 100644 --- a/kernel/trace/trace_sched_switch.c +++ b/kernel/trace/trace_sched_switch.c | |||
@@ -43,7 +43,7 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, | |||
43 | data = ctx_trace->data[cpu]; | 43 | data = ctx_trace->data[cpu]; |
44 | 44 | ||
45 | if (likely(!atomic_read(&data->disabled))) | 45 | if (likely(!atomic_read(&data->disabled))) |
46 | tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); | 46 | tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); |
47 | 47 | ||
48 | local_irq_restore(flags); | 48 | local_irq_restore(flags); |
49 | } | 49 | } |
@@ -66,7 +66,7 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee, int success) | |||
66 | data = ctx_trace->data[cpu]; | 66 | data = ctx_trace->data[cpu]; |
67 | 67 | ||
68 | if (likely(!atomic_read(&data->disabled))) | 68 | if (likely(!atomic_read(&data->disabled))) |
69 | tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, | 69 | tracing_sched_wakeup_trace(ctx_trace, wakee, current, |
70 | flags, pc); | 70 | flags, pc); |
71 | 71 | ||
72 | local_irq_restore(flags); | 72 | local_irq_restore(flags); |
@@ -93,7 +93,7 @@ static int tracing_sched_register(void) | |||
93 | ret = register_trace_sched_switch(probe_sched_switch); | 93 | ret = register_trace_sched_switch(probe_sched_switch); |
94 | if (ret) { | 94 | if (ret) { |
95 | pr_info("sched trace: Couldn't activate tracepoint" | 95 | pr_info("sched trace: Couldn't activate tracepoint" |
96 | " probe to kernel_sched_schedule\n"); | 96 | " probe to kernel_sched_switch\n"); |
97 | goto fail_deprobe_wake_new; | 97 | goto fail_deprobe_wake_new; |
98 | } | 98 | } |
99 | 99 | ||
@@ -185,12 +185,6 @@ void tracing_sched_switch_assign_trace(struct trace_array *tr) | |||
185 | ctx_trace = tr; | 185 | ctx_trace = tr; |
186 | } | 186 | } |
187 | 187 | ||
188 | static void start_sched_trace(struct trace_array *tr) | ||
189 | { | ||
190 | tracing_reset_online_cpus(tr); | ||
191 | tracing_start_sched_switch_record(); | ||
192 | } | ||
193 | |||
194 | static void stop_sched_trace(struct trace_array *tr) | 188 | static void stop_sched_trace(struct trace_array *tr) |
195 | { | 189 | { |
196 | tracing_stop_sched_switch_record(); | 190 | tracing_stop_sched_switch_record(); |
@@ -199,7 +193,7 @@ static void stop_sched_trace(struct trace_array *tr) | |||
199 | static int sched_switch_trace_init(struct trace_array *tr) | 193 | static int sched_switch_trace_init(struct trace_array *tr) |
200 | { | 194 | { |
201 | ctx_trace = tr; | 195 | ctx_trace = tr; |
202 | start_sched_trace(tr); | 196 | tracing_start_sched_switch_record(); |
203 | return 0; | 197 | return 0; |
204 | } | 198 | } |
205 | 199 | ||
@@ -227,6 +221,7 @@ static struct tracer sched_switch_trace __read_mostly = | |||
227 | .reset = sched_switch_trace_reset, | 221 | .reset = sched_switch_trace_reset, |
228 | .start = sched_switch_trace_start, | 222 | .start = sched_switch_trace_start, |
229 | .stop = sched_switch_trace_stop, | 223 | .stop = sched_switch_trace_stop, |
224 | .wait_pipe = poll_wait_pipe, | ||
230 | #ifdef CONFIG_FTRACE_SELFTEST | 225 | #ifdef CONFIG_FTRACE_SELFTEST |
231 | .selftest = trace_selftest_startup_sched_switch, | 226 | .selftest = trace_selftest_startup_sched_switch, |
232 | #endif | 227 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 42ae1e77b6b3..3c5ad6b2ec84 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
@@ -25,12 +25,15 @@ static int __read_mostly tracer_enabled; | |||
25 | static struct task_struct *wakeup_task; | 25 | static struct task_struct *wakeup_task; |
26 | static int wakeup_cpu; | 26 | static int wakeup_cpu; |
27 | static unsigned wakeup_prio = -1; | 27 | static unsigned wakeup_prio = -1; |
28 | static int wakeup_rt; | ||
28 | 29 | ||
29 | static raw_spinlock_t wakeup_lock = | 30 | static raw_spinlock_t wakeup_lock = |
30 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 31 | (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
31 | 32 | ||
32 | static void __wakeup_reset(struct trace_array *tr); | 33 | static void __wakeup_reset(struct trace_array *tr); |
33 | 34 | ||
35 | static int save_lat_flag; | ||
36 | |||
34 | #ifdef CONFIG_FUNCTION_TRACER | 37 | #ifdef CONFIG_FUNCTION_TRACER |
35 | /* | 38 | /* |
36 | * irqsoff uses its own tracer function to keep the overhead down: | 39 | * irqsoff uses its own tracer function to keep the overhead down: |
@@ -71,7 +74,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) | |||
71 | if (task_cpu(wakeup_task) != cpu) | 74 | if (task_cpu(wakeup_task) != cpu) |
72 | goto unlock; | 75 | goto unlock; |
73 | 76 | ||
74 | trace_function(tr, data, ip, parent_ip, flags, pc); | 77 | trace_function(tr, ip, parent_ip, flags, pc); |
75 | 78 | ||
76 | unlock: | 79 | unlock: |
77 | __raw_spin_unlock(&wakeup_lock); | 80 | __raw_spin_unlock(&wakeup_lock); |
@@ -151,7 +154,8 @@ probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev, | |||
151 | if (unlikely(!tracer_enabled || next != wakeup_task)) | 154 | if (unlikely(!tracer_enabled || next != wakeup_task)) |
152 | goto out_unlock; | 155 | goto out_unlock; |
153 | 156 | ||
154 | trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 157 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); |
158 | tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); | ||
155 | 159 | ||
156 | /* | 160 | /* |
157 | * usecs conversion is slow so we try to delay the conversion | 161 | * usecs conversion is slow so we try to delay the conversion |
@@ -182,13 +186,10 @@ out: | |||
182 | 186 | ||
183 | static void __wakeup_reset(struct trace_array *tr) | 187 | static void __wakeup_reset(struct trace_array *tr) |
184 | { | 188 | { |
185 | struct trace_array_cpu *data; | ||
186 | int cpu; | 189 | int cpu; |
187 | 190 | ||
188 | for_each_possible_cpu(cpu) { | 191 | for_each_possible_cpu(cpu) |
189 | data = tr->data[cpu]; | ||
190 | tracing_reset(tr, cpu); | 192 | tracing_reset(tr, cpu); |
191 | } | ||
192 | 193 | ||
193 | wakeup_cpu = -1; | 194 | wakeup_cpu = -1; |
194 | wakeup_prio = -1; | 195 | wakeup_prio = -1; |
@@ -213,6 +214,7 @@ static void wakeup_reset(struct trace_array *tr) | |||
213 | static void | 214 | static void |
214 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) | 215 | probe_wakeup(struct rq *rq, struct task_struct *p, int success) |
215 | { | 216 | { |
217 | struct trace_array_cpu *data; | ||
216 | int cpu = smp_processor_id(); | 218 | int cpu = smp_processor_id(); |
217 | unsigned long flags; | 219 | unsigned long flags; |
218 | long disabled; | 220 | long disabled; |
@@ -224,7 +226,7 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
224 | tracing_record_cmdline(p); | 226 | tracing_record_cmdline(p); |
225 | tracing_record_cmdline(current); | 227 | tracing_record_cmdline(current); |
226 | 228 | ||
227 | if (likely(!rt_task(p)) || | 229 | if ((wakeup_rt && !rt_task(p)) || |
228 | p->prio >= wakeup_prio || | 230 | p->prio >= wakeup_prio || |
229 | p->prio >= current->prio) | 231 | p->prio >= current->prio) |
230 | return; | 232 | return; |
@@ -252,9 +254,10 @@ probe_wakeup(struct rq *rq, struct task_struct *p, int success) | |||
252 | 254 | ||
253 | local_save_flags(flags); | 255 | local_save_flags(flags); |
254 | 256 | ||
255 | wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); | 257 | data = wakeup_trace->data[wakeup_cpu]; |
256 | trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu], | 258 | data->preempt_timestamp = ftrace_now(cpu); |
257 | CALLER_ADDR1, CALLER_ADDR2, flags, pc); | 259 | tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); |
260 | trace_function(wakeup_trace, CALLER_ADDR1, CALLER_ADDR2, flags, pc); | ||
258 | 261 | ||
259 | out_locked: | 262 | out_locked: |
260 | __raw_spin_unlock(&wakeup_lock); | 263 | __raw_spin_unlock(&wakeup_lock); |
@@ -262,12 +265,6 @@ out: | |||
262 | atomic_dec(&wakeup_trace->data[cpu]->disabled); | 265 | atomic_dec(&wakeup_trace->data[cpu]->disabled); |
263 | } | 266 | } |
264 | 267 | ||
265 | /* | ||
266 | * save_tracer_enabled is used to save the state of the tracer_enabled | ||
267 | * variable when we disable it when we open a trace output file. | ||
268 | */ | ||
269 | static int save_tracer_enabled; | ||
270 | |||
271 | static void start_wakeup_tracer(struct trace_array *tr) | 268 | static void start_wakeup_tracer(struct trace_array *tr) |
272 | { | 269 | { |
273 | int ret; | 270 | int ret; |
@@ -289,7 +286,7 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
289 | ret = register_trace_sched_switch(probe_wakeup_sched_switch); | 286 | ret = register_trace_sched_switch(probe_wakeup_sched_switch); |
290 | if (ret) { | 287 | if (ret) { |
291 | pr_info("sched trace: Couldn't activate tracepoint" | 288 | pr_info("sched trace: Couldn't activate tracepoint" |
292 | " probe to kernel_sched_schedule\n"); | 289 | " probe to kernel_sched_switch\n"); |
293 | goto fail_deprobe_wake_new; | 290 | goto fail_deprobe_wake_new; |
294 | } | 291 | } |
295 | 292 | ||
@@ -306,13 +303,10 @@ static void start_wakeup_tracer(struct trace_array *tr) | |||
306 | 303 | ||
307 | register_ftrace_function(&trace_ops); | 304 | register_ftrace_function(&trace_ops); |
308 | 305 | ||
309 | if (tracing_is_enabled()) { | 306 | if (tracing_is_enabled()) |
310 | tracer_enabled = 1; | 307 | tracer_enabled = 1; |
311 | save_tracer_enabled = 1; | 308 | else |
312 | } else { | ||
313 | tracer_enabled = 0; | 309 | tracer_enabled = 0; |
314 | save_tracer_enabled = 0; | ||
315 | } | ||
316 | 310 | ||
317 | return; | 311 | return; |
318 | fail_deprobe_wake_new: | 312 | fail_deprobe_wake_new: |
@@ -324,54 +318,54 @@ fail_deprobe: | |||
324 | static void stop_wakeup_tracer(struct trace_array *tr) | 318 | static void stop_wakeup_tracer(struct trace_array *tr) |
325 | { | 319 | { |
326 | tracer_enabled = 0; | 320 | tracer_enabled = 0; |
327 | save_tracer_enabled = 0; | ||
328 | unregister_ftrace_function(&trace_ops); | 321 | unregister_ftrace_function(&trace_ops); |
329 | unregister_trace_sched_switch(probe_wakeup_sched_switch); | 322 | unregister_trace_sched_switch(probe_wakeup_sched_switch); |
330 | unregister_trace_sched_wakeup_new(probe_wakeup); | 323 | unregister_trace_sched_wakeup_new(probe_wakeup); |
331 | unregister_trace_sched_wakeup(probe_wakeup); | 324 | unregister_trace_sched_wakeup(probe_wakeup); |
332 | } | 325 | } |
333 | 326 | ||
334 | static int wakeup_tracer_init(struct trace_array *tr) | 327 | static int __wakeup_tracer_init(struct trace_array *tr) |
335 | { | 328 | { |
329 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | ||
330 | trace_flags |= TRACE_ITER_LATENCY_FMT; | ||
331 | |||
336 | tracing_max_latency = 0; | 332 | tracing_max_latency = 0; |
337 | wakeup_trace = tr; | 333 | wakeup_trace = tr; |
338 | start_wakeup_tracer(tr); | 334 | start_wakeup_tracer(tr); |
339 | return 0; | 335 | return 0; |
340 | } | 336 | } |
341 | 337 | ||
338 | static int wakeup_tracer_init(struct trace_array *tr) | ||
339 | { | ||
340 | wakeup_rt = 0; | ||
341 | return __wakeup_tracer_init(tr); | ||
342 | } | ||
343 | |||
344 | static int wakeup_rt_tracer_init(struct trace_array *tr) | ||
345 | { | ||
346 | wakeup_rt = 1; | ||
347 | return __wakeup_tracer_init(tr); | ||
348 | } | ||
349 | |||
342 | static void wakeup_tracer_reset(struct trace_array *tr) | 350 | static void wakeup_tracer_reset(struct trace_array *tr) |
343 | { | 351 | { |
344 | stop_wakeup_tracer(tr); | 352 | stop_wakeup_tracer(tr); |
345 | /* make sure we put back any tasks we are tracing */ | 353 | /* make sure we put back any tasks we are tracing */ |
346 | wakeup_reset(tr); | 354 | wakeup_reset(tr); |
355 | |||
356 | if (!save_lat_flag) | ||
357 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | ||
347 | } | 358 | } |
348 | 359 | ||
349 | static void wakeup_tracer_start(struct trace_array *tr) | 360 | static void wakeup_tracer_start(struct trace_array *tr) |
350 | { | 361 | { |
351 | wakeup_reset(tr); | 362 | wakeup_reset(tr); |
352 | tracer_enabled = 1; | 363 | tracer_enabled = 1; |
353 | save_tracer_enabled = 1; | ||
354 | } | 364 | } |
355 | 365 | ||
356 | static void wakeup_tracer_stop(struct trace_array *tr) | 366 | static void wakeup_tracer_stop(struct trace_array *tr) |
357 | { | 367 | { |
358 | tracer_enabled = 0; | 368 | tracer_enabled = 0; |
359 | save_tracer_enabled = 0; | ||
360 | } | ||
361 | |||
362 | static void wakeup_tracer_open(struct trace_iterator *iter) | ||
363 | { | ||
364 | /* stop the trace while dumping */ | ||
365 | tracer_enabled = 0; | ||
366 | } | ||
367 | |||
368 | static void wakeup_tracer_close(struct trace_iterator *iter) | ||
369 | { | ||
370 | /* forget about any processes we were recording */ | ||
371 | if (save_tracer_enabled) { | ||
372 | wakeup_reset(iter->tr); | ||
373 | tracer_enabled = 1; | ||
374 | } | ||
375 | } | 369 | } |
376 | 370 | ||
377 | static struct tracer wakeup_tracer __read_mostly = | 371 | static struct tracer wakeup_tracer __read_mostly = |
@@ -381,8 +375,20 @@ static struct tracer wakeup_tracer __read_mostly = | |||
381 | .reset = wakeup_tracer_reset, | 375 | .reset = wakeup_tracer_reset, |
382 | .start = wakeup_tracer_start, | 376 | .start = wakeup_tracer_start, |
383 | .stop = wakeup_tracer_stop, | 377 | .stop = wakeup_tracer_stop, |
384 | .open = wakeup_tracer_open, | 378 | .print_max = 1, |
385 | .close = wakeup_tracer_close, | 379 | #ifdef CONFIG_FTRACE_SELFTEST |
380 | .selftest = trace_selftest_startup_wakeup, | ||
381 | #endif | ||
382 | }; | ||
383 | |||
384 | static struct tracer wakeup_rt_tracer __read_mostly = | ||
385 | { | ||
386 | .name = "wakeup_rt", | ||
387 | .init = wakeup_rt_tracer_init, | ||
388 | .reset = wakeup_tracer_reset, | ||
389 | .start = wakeup_tracer_start, | ||
390 | .stop = wakeup_tracer_stop, | ||
391 | .wait_pipe = poll_wait_pipe, | ||
386 | .print_max = 1, | 392 | .print_max = 1, |
387 | #ifdef CONFIG_FTRACE_SELFTEST | 393 | #ifdef CONFIG_FTRACE_SELFTEST |
388 | .selftest = trace_selftest_startup_wakeup, | 394 | .selftest = trace_selftest_startup_wakeup, |
@@ -397,6 +403,10 @@ __init static int init_wakeup_tracer(void) | |||
397 | if (ret) | 403 | if (ret) |
398 | return ret; | 404 | return ret; |
399 | 405 | ||
406 | ret = register_tracer(&wakeup_rt_tracer); | ||
407 | if (ret) | ||
408 | return ret; | ||
409 | |||
400 | return 0; | 410 | return 0; |
401 | } | 411 | } |
402 | device_initcall(init_wakeup_tracer); | 412 | device_initcall(init_wakeup_tracer); |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index bc8e80a86bca..7238646b8723 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -9,11 +9,12 @@ static inline int trace_valid_entry(struct trace_entry *entry) | |||
9 | case TRACE_FN: | 9 | case TRACE_FN: |
10 | case TRACE_CTX: | 10 | case TRACE_CTX: |
11 | case TRACE_WAKE: | 11 | case TRACE_WAKE: |
12 | case TRACE_CONT: | ||
13 | case TRACE_STACK: | 12 | case TRACE_STACK: |
14 | case TRACE_PRINT: | 13 | case TRACE_PRINT: |
15 | case TRACE_SPECIAL: | 14 | case TRACE_SPECIAL: |
16 | case TRACE_BRANCH: | 15 | case TRACE_BRANCH: |
16 | case TRACE_GRAPH_ENT: | ||
17 | case TRACE_GRAPH_RET: | ||
17 | return 1; | 18 | return 1; |
18 | } | 19 | } |
19 | return 0; | 20 | return 0; |
@@ -125,9 +126,9 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
125 | func(); | 126 | func(); |
126 | 127 | ||
127 | /* | 128 | /* |
128 | * Some archs *cough*PowerPC*cough* add charachters to the | 129 | * Some archs *cough*PowerPC*cough* add characters to the |
129 | * start of the function names. We simply put a '*' to | 130 | * start of the function names. We simply put a '*' to |
130 | * accomodate them. | 131 | * accommodate them. |
131 | */ | 132 | */ |
132 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); | 133 | func_name = "*" STR(DYN_FTRACE_TEST_NAME); |
133 | 134 | ||
@@ -135,7 +136,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, | |||
135 | ftrace_set_filter(func_name, strlen(func_name), 1); | 136 | ftrace_set_filter(func_name, strlen(func_name), 1); |
136 | 137 | ||
137 | /* enable tracing */ | 138 | /* enable tracing */ |
138 | ret = trace->init(tr); | 139 | ret = tracer_init(trace, tr); |
139 | if (ret) { | 140 | if (ret) { |
140 | warn_failed_init_tracer(trace, ret); | 141 | warn_failed_init_tracer(trace, ret); |
141 | goto out; | 142 | goto out; |
@@ -209,7 +210,7 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
209 | ftrace_enabled = 1; | 210 | ftrace_enabled = 1; |
210 | tracer_enabled = 1; | 211 | tracer_enabled = 1; |
211 | 212 | ||
212 | ret = trace->init(tr); | 213 | ret = tracer_init(trace, tr); |
213 | if (ret) { | 214 | if (ret) { |
214 | warn_failed_init_tracer(trace, ret); | 215 | warn_failed_init_tracer(trace, ret); |
215 | goto out; | 216 | goto out; |
@@ -247,6 +248,54 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
247 | } | 248 | } |
248 | #endif /* CONFIG_FUNCTION_TRACER */ | 249 | #endif /* CONFIG_FUNCTION_TRACER */ |
249 | 250 | ||
251 | |||
252 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
253 | /* | ||
254 | * Pretty much the same than for the function tracer from which the selftest | ||
255 | * has been borrowed. | ||
256 | */ | ||
257 | int | ||
258 | trace_selftest_startup_function_graph(struct tracer *trace, | ||
259 | struct trace_array *tr) | ||
260 | { | ||
261 | int ret; | ||
262 | unsigned long count; | ||
263 | |||
264 | ret = tracer_init(trace, tr); | ||
265 | if (ret) { | ||
266 | warn_failed_init_tracer(trace, ret); | ||
267 | goto out; | ||
268 | } | ||
269 | |||
270 | /* Sleep for a 1/10 of a second */ | ||
271 | msleep(100); | ||
272 | |||
273 | tracing_stop(); | ||
274 | |||
275 | /* check the trace buffer */ | ||
276 | ret = trace_test_buffer(tr, &count); | ||
277 | |||
278 | trace->reset(tr); | ||
279 | tracing_start(); | ||
280 | |||
281 | if (!ret && !count) { | ||
282 | printk(KERN_CONT ".. no entries found .."); | ||
283 | ret = -1; | ||
284 | goto out; | ||
285 | } | ||
286 | |||
287 | /* Don't test dynamic tracing, the function tracer already did */ | ||
288 | |||
289 | out: | ||
290 | /* Stop it if we failed */ | ||
291 | if (ret) | ||
292 | ftrace_graph_stop(); | ||
293 | |||
294 | return ret; | ||
295 | } | ||
296 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
297 | |||
298 | |||
250 | #ifdef CONFIG_IRQSOFF_TRACER | 299 | #ifdef CONFIG_IRQSOFF_TRACER |
251 | int | 300 | int |
252 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | 301 | trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) |
@@ -256,7 +305,7 @@ trace_selftest_startup_irqsoff(struct tracer *trace, struct trace_array *tr) | |||
256 | int ret; | 305 | int ret; |
257 | 306 | ||
258 | /* start the tracing */ | 307 | /* start the tracing */ |
259 | ret = trace->init(tr); | 308 | ret = tracer_init(trace, tr); |
260 | if (ret) { | 309 | if (ret) { |
261 | warn_failed_init_tracer(trace, ret); | 310 | warn_failed_init_tracer(trace, ret); |
262 | return ret; | 311 | return ret; |
@@ -310,7 +359,7 @@ trace_selftest_startup_preemptoff(struct tracer *trace, struct trace_array *tr) | |||
310 | } | 359 | } |
311 | 360 | ||
312 | /* start the tracing */ | 361 | /* start the tracing */ |
313 | ret = trace->init(tr); | 362 | ret = tracer_init(trace, tr); |
314 | if (ret) { | 363 | if (ret) { |
315 | warn_failed_init_tracer(trace, ret); | 364 | warn_failed_init_tracer(trace, ret); |
316 | return ret; | 365 | return ret; |
@@ -364,7 +413,7 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array * | |||
364 | } | 413 | } |
365 | 414 | ||
366 | /* start the tracing */ | 415 | /* start the tracing */ |
367 | ret = trace->init(tr); | 416 | ret = tracer_init(trace, tr); |
368 | if (ret) { | 417 | if (ret) { |
369 | warn_failed_init_tracer(trace, ret); | 418 | warn_failed_init_tracer(trace, ret); |
370 | goto out; | 419 | goto out; |
@@ -496,7 +545,7 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr) | |||
496 | wait_for_completion(&isrt); | 545 | wait_for_completion(&isrt); |
497 | 546 | ||
498 | /* start the tracing */ | 547 | /* start the tracing */ |
499 | ret = trace->init(tr); | 548 | ret = tracer_init(trace, tr); |
500 | if (ret) { | 549 | if (ret) { |
501 | warn_failed_init_tracer(trace, ret); | 550 | warn_failed_init_tracer(trace, ret); |
502 | return ret; | 551 | return ret; |
@@ -557,7 +606,7 @@ trace_selftest_startup_sched_switch(struct tracer *trace, struct trace_array *tr | |||
557 | int ret; | 606 | int ret; |
558 | 607 | ||
559 | /* start the tracing */ | 608 | /* start the tracing */ |
560 | ret = trace->init(tr); | 609 | ret = tracer_init(trace, tr); |
561 | if (ret) { | 610 | if (ret) { |
562 | warn_failed_init_tracer(trace, ret); | 611 | warn_failed_init_tracer(trace, ret); |
563 | return ret; | 612 | return ret; |
@@ -589,10 +638,10 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
589 | int ret; | 638 | int ret; |
590 | 639 | ||
591 | /* start the tracing */ | 640 | /* start the tracing */ |
592 | ret = trace->init(tr); | 641 | ret = tracer_init(trace, tr); |
593 | if (ret) { | 642 | if (ret) { |
594 | warn_failed_init_tracer(trace, ret); | 643 | warn_failed_init_tracer(trace, ret); |
595 | return 0; | 644 | return ret; |
596 | } | 645 | } |
597 | 646 | ||
598 | /* Sleep for a 1/10 of a second */ | 647 | /* Sleep for a 1/10 of a second */ |
@@ -604,6 +653,11 @@ trace_selftest_startup_sysprof(struct tracer *trace, struct trace_array *tr) | |||
604 | trace->reset(tr); | 653 | trace->reset(tr); |
605 | tracing_start(); | 654 | tracing_start(); |
606 | 655 | ||
656 | if (!ret && !count) { | ||
657 | printk(KERN_CONT ".. no entries found .."); | ||
658 | ret = -1; | ||
659 | } | ||
660 | |||
607 | return ret; | 661 | return ret; |
608 | } | 662 | } |
609 | #endif /* CONFIG_SYSPROF_TRACER */ | 663 | #endif /* CONFIG_SYSPROF_TRACER */ |
@@ -616,7 +670,7 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
616 | int ret; | 670 | int ret; |
617 | 671 | ||
618 | /* start the tracing */ | 672 | /* start the tracing */ |
619 | ret = trace->init(tr); | 673 | ret = tracer_init(trace, tr); |
620 | if (ret) { | 674 | if (ret) { |
621 | warn_failed_init_tracer(trace, ret); | 675 | warn_failed_init_tracer(trace, ret); |
622 | return ret; | 676 | return ret; |
@@ -631,6 +685,11 @@ trace_selftest_startup_branch(struct tracer *trace, struct trace_array *tr) | |||
631 | trace->reset(tr); | 685 | trace->reset(tr); |
632 | tracing_start(); | 686 | tracing_start(); |
633 | 687 | ||
688 | if (!ret && !count) { | ||
689 | printk(KERN_CONT ".. no entries found .."); | ||
690 | ret = -1; | ||
691 | } | ||
692 | |||
634 | return ret; | 693 | return ret; |
635 | } | 694 | } |
636 | #endif /* CONFIG_BRANCH_TRACER */ | 695 | #endif /* CONFIG_BRANCH_TRACER */ |
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c new file mode 100644 index 000000000000..39310e3434ee --- /dev/null +++ b/kernel/trace/trace_stat.c | |||
@@ -0,0 +1,319 @@ | |||
1 | /* | ||
2 | * Infrastructure for statistic tracing (histogram output). | ||
3 | * | ||
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * | ||
6 | * Based on the code from trace_branch.c which is | ||
7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | |||
12 | #include <linux/list.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include "trace_stat.h" | ||
15 | #include "trace.h" | ||
16 | |||
17 | |||
18 | /* List of stat entries from a tracer */ | ||
19 | struct trace_stat_list { | ||
20 | struct list_head list; | ||
21 | void *stat; | ||
22 | }; | ||
23 | |||
24 | /* A stat session is the stats output in one file */ | ||
25 | struct tracer_stat_session { | ||
26 | struct list_head session_list; | ||
27 | struct tracer_stat *ts; | ||
28 | struct list_head stat_list; | ||
29 | struct mutex stat_mutex; | ||
30 | struct dentry *file; | ||
31 | }; | ||
32 | |||
33 | /* All of the sessions currently in use. Each stat file embed one session */ | ||
34 | static LIST_HEAD(all_stat_sessions); | ||
35 | static DEFINE_MUTEX(all_stat_sessions_mutex); | ||
36 | |||
37 | /* The root directory for all stat files */ | ||
38 | static struct dentry *stat_dir; | ||
39 | |||
40 | |||
41 | static void reset_stat_session(struct tracer_stat_session *session) | ||
42 | { | ||
43 | struct trace_stat_list *node, *next; | ||
44 | |||
45 | list_for_each_entry_safe(node, next, &session->stat_list, list) | ||
46 | kfree(node); | ||
47 | |||
48 | INIT_LIST_HEAD(&session->stat_list); | ||
49 | } | ||
50 | |||
51 | static void destroy_session(struct tracer_stat_session *session) | ||
52 | { | ||
53 | debugfs_remove(session->file); | ||
54 | reset_stat_session(session); | ||
55 | mutex_destroy(&session->stat_mutex); | ||
56 | kfree(session); | ||
57 | } | ||
58 | |||
59 | /* | ||
60 | * For tracers that don't provide a stat_cmp callback. | ||
61 | * This one will force an immediate insertion on tail of | ||
62 | * the list. | ||
63 | */ | ||
64 | static int dummy_cmp(void *p1, void *p2) | ||
65 | { | ||
66 | return 1; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Initialize the stat list at each trace_stat file opening. | ||
71 | * All of these copies and sorting are required on all opening | ||
72 | * since the stats could have changed between two file sessions. | ||
73 | */ | ||
74 | static int stat_seq_init(struct tracer_stat_session *session) | ||
75 | { | ||
76 | struct trace_stat_list *iter_entry, *new_entry; | ||
77 | struct tracer_stat *ts = session->ts; | ||
78 | void *prev_stat; | ||
79 | int ret = 0; | ||
80 | int i; | ||
81 | |||
82 | mutex_lock(&session->stat_mutex); | ||
83 | reset_stat_session(session); | ||
84 | |||
85 | if (!ts->stat_cmp) | ||
86 | ts->stat_cmp = dummy_cmp; | ||
87 | |||
88 | /* | ||
89 | * The first entry. Actually this is the second, but the first | ||
90 | * one (the stat_list head) is pointless. | ||
91 | */ | ||
92 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | ||
93 | if (!new_entry) { | ||
94 | ret = -ENOMEM; | ||
95 | goto exit; | ||
96 | } | ||
97 | |||
98 | INIT_LIST_HEAD(&new_entry->list); | ||
99 | |||
100 | list_add(&new_entry->list, &session->stat_list); | ||
101 | |||
102 | new_entry->stat = ts->stat_start(); | ||
103 | prev_stat = new_entry->stat; | ||
104 | |||
105 | /* | ||
106 | * Iterate over the tracer stat entries and store them in a sorted | ||
107 | * list. | ||
108 | */ | ||
109 | for (i = 1; ; i++) { | ||
110 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | ||
111 | if (!new_entry) { | ||
112 | ret = -ENOMEM; | ||
113 | goto exit_free_list; | ||
114 | } | ||
115 | |||
116 | INIT_LIST_HEAD(&new_entry->list); | ||
117 | new_entry->stat = ts->stat_next(prev_stat, i); | ||
118 | |||
119 | /* End of insertion */ | ||
120 | if (!new_entry->stat) | ||
121 | break; | ||
122 | |||
123 | list_for_each_entry(iter_entry, &session->stat_list, list) { | ||
124 | |||
125 | /* Insertion with a descendent sorting */ | ||
126 | if (ts->stat_cmp(new_entry->stat, | ||
127 | iter_entry->stat) > 0) { | ||
128 | |||
129 | list_add_tail(&new_entry->list, | ||
130 | &iter_entry->list); | ||
131 | break; | ||
132 | |||
133 | /* The current smaller value */ | ||
134 | } else if (list_is_last(&iter_entry->list, | ||
135 | &session->stat_list)) { | ||
136 | list_add(&new_entry->list, &iter_entry->list); | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | prev_stat = new_entry->stat; | ||
142 | } | ||
143 | exit: | ||
144 | mutex_unlock(&session->stat_mutex); | ||
145 | return ret; | ||
146 | |||
147 | exit_free_list: | ||
148 | reset_stat_session(session); | ||
149 | mutex_unlock(&session->stat_mutex); | ||
150 | return ret; | ||
151 | } | ||
152 | |||
153 | |||
154 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | ||
155 | { | ||
156 | struct tracer_stat_session *session = s->private; | ||
157 | |||
158 | /* Prevent from tracer switch or stat_list modification */ | ||
159 | mutex_lock(&session->stat_mutex); | ||
160 | |||
161 | /* If we are in the beginning of the file, print the headers */ | ||
162 | if (!*pos && session->ts->stat_headers) | ||
163 | session->ts->stat_headers(s); | ||
164 | |||
165 | return seq_list_start(&session->stat_list, *pos); | ||
166 | } | ||
167 | |||
168 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | ||
169 | { | ||
170 | struct tracer_stat_session *session = s->private; | ||
171 | |||
172 | return seq_list_next(p, &session->stat_list, pos); | ||
173 | } | ||
174 | |||
175 | static void stat_seq_stop(struct seq_file *s, void *p) | ||
176 | { | ||
177 | struct tracer_stat_session *session = s->private; | ||
178 | mutex_unlock(&session->stat_mutex); | ||
179 | } | ||
180 | |||
181 | static int stat_seq_show(struct seq_file *s, void *v) | ||
182 | { | ||
183 | struct tracer_stat_session *session = s->private; | ||
184 | struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); | ||
185 | |||
186 | return session->ts->stat_show(s, l->stat); | ||
187 | } | ||
188 | |||
189 | static const struct seq_operations trace_stat_seq_ops = { | ||
190 | .start = stat_seq_start, | ||
191 | .next = stat_seq_next, | ||
192 | .stop = stat_seq_stop, | ||
193 | .show = stat_seq_show | ||
194 | }; | ||
195 | |||
196 | /* The session stat is refilled and resorted at each stat file opening */ | ||
197 | static int tracing_stat_open(struct inode *inode, struct file *file) | ||
198 | { | ||
199 | int ret; | ||
200 | |||
201 | struct tracer_stat_session *session = inode->i_private; | ||
202 | |||
203 | ret = seq_open(file, &trace_stat_seq_ops); | ||
204 | if (!ret) { | ||
205 | struct seq_file *m = file->private_data; | ||
206 | m->private = session; | ||
207 | ret = stat_seq_init(session); | ||
208 | } | ||
209 | |||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | /* | ||
214 | * Avoid consuming memory with our now useless list. | ||
215 | */ | ||
216 | static int tracing_stat_release(struct inode *i, struct file *f) | ||
217 | { | ||
218 | struct tracer_stat_session *session = i->i_private; | ||
219 | |||
220 | mutex_lock(&session->stat_mutex); | ||
221 | reset_stat_session(session); | ||
222 | mutex_unlock(&session->stat_mutex); | ||
223 | |||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | static const struct file_operations tracing_stat_fops = { | ||
228 | .open = tracing_stat_open, | ||
229 | .read = seq_read, | ||
230 | .llseek = seq_lseek, | ||
231 | .release = tracing_stat_release | ||
232 | }; | ||
233 | |||
234 | static int tracing_stat_init(void) | ||
235 | { | ||
236 | struct dentry *d_tracing; | ||
237 | |||
238 | d_tracing = tracing_init_dentry(); | ||
239 | |||
240 | stat_dir = debugfs_create_dir("trace_stat", d_tracing); | ||
241 | if (!stat_dir) | ||
242 | pr_warning("Could not create debugfs " | ||
243 | "'trace_stat' entry\n"); | ||
244 | return 0; | ||
245 | } | ||
246 | |||
247 | static int init_stat_file(struct tracer_stat_session *session) | ||
248 | { | ||
249 | if (!stat_dir && tracing_stat_init()) | ||
250 | return -ENODEV; | ||
251 | |||
252 | session->file = debugfs_create_file(session->ts->name, 0644, | ||
253 | stat_dir, | ||
254 | session, &tracing_stat_fops); | ||
255 | if (!session->file) | ||
256 | return -ENOMEM; | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | int register_stat_tracer(struct tracer_stat *trace) | ||
261 | { | ||
262 | struct tracer_stat_session *session, *node, *tmp; | ||
263 | int ret; | ||
264 | |||
265 | if (!trace) | ||
266 | return -EINVAL; | ||
267 | |||
268 | if (!trace->stat_start || !trace->stat_next || !trace->stat_show) | ||
269 | return -EINVAL; | ||
270 | |||
271 | /* Already registered? */ | ||
272 | mutex_lock(&all_stat_sessions_mutex); | ||
273 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | ||
274 | if (node->ts == trace) { | ||
275 | mutex_unlock(&all_stat_sessions_mutex); | ||
276 | return -EINVAL; | ||
277 | } | ||
278 | } | ||
279 | mutex_unlock(&all_stat_sessions_mutex); | ||
280 | |||
281 | /* Init the session */ | ||
282 | session = kmalloc(sizeof(struct tracer_stat_session), GFP_KERNEL); | ||
283 | if (!session) | ||
284 | return -ENOMEM; | ||
285 | |||
286 | session->ts = trace; | ||
287 | INIT_LIST_HEAD(&session->session_list); | ||
288 | INIT_LIST_HEAD(&session->stat_list); | ||
289 | mutex_init(&session->stat_mutex); | ||
290 | session->file = NULL; | ||
291 | |||
292 | ret = init_stat_file(session); | ||
293 | if (ret) { | ||
294 | destroy_session(session); | ||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | /* Register */ | ||
299 | mutex_lock(&all_stat_sessions_mutex); | ||
300 | list_add_tail(&session->session_list, &all_stat_sessions); | ||
301 | mutex_unlock(&all_stat_sessions_mutex); | ||
302 | |||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | void unregister_stat_tracer(struct tracer_stat *trace) | ||
307 | { | ||
308 | struct tracer_stat_session *node, *tmp; | ||
309 | |||
310 | mutex_lock(&all_stat_sessions_mutex); | ||
311 | list_for_each_entry_safe(node, tmp, &all_stat_sessions, session_list) { | ||
312 | if (node->ts == trace) { | ||
313 | list_del(&node->session_list); | ||
314 | destroy_session(node); | ||
315 | break; | ||
316 | } | ||
317 | } | ||
318 | mutex_unlock(&all_stat_sessions_mutex); | ||
319 | } | ||
diff --git a/kernel/trace/trace_stat.h b/kernel/trace/trace_stat.h new file mode 100644 index 000000000000..202274cf7f3d --- /dev/null +++ b/kernel/trace/trace_stat.h | |||
@@ -0,0 +1,31 @@ | |||
1 | #ifndef __TRACE_STAT_H | ||
2 | #define __TRACE_STAT_H | ||
3 | |||
4 | #include <linux/seq_file.h> | ||
5 | |||
6 | /* | ||
7 | * If you want to provide a stat file (one-shot statistics), fill | ||
8 | * an iterator with stat_start/stat_next and a stat_show callbacks. | ||
9 | * The others callbacks are optional. | ||
10 | */ | ||
11 | struct tracer_stat { | ||
12 | /* The name of your stat file */ | ||
13 | const char *name; | ||
14 | /* Iteration over statistic entries */ | ||
15 | void *(*stat_start)(void); | ||
16 | void *(*stat_next)(void *prev, int idx); | ||
17 | /* Compare two entries for stats sorting */ | ||
18 | int (*stat_cmp)(void *p1, void *p2); | ||
19 | /* Print a stat entry */ | ||
20 | int (*stat_show)(struct seq_file *s, void *p); | ||
21 | /* Print the headers of your stat entries */ | ||
22 | int (*stat_headers)(struct seq_file *s); | ||
23 | }; | ||
24 | |||
25 | /* | ||
26 | * Destroy or create a stat file | ||
27 | */ | ||
28 | extern int register_stat_tracer(struct tracer_stat *trace); | ||
29 | extern void unregister_stat_tracer(struct tracer_stat *trace); | ||
30 | |||
31 | #endif /* __TRACE_STAT_H */ | ||
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c index eaca5ad803ff..c771af4e8f1a 100644 --- a/kernel/trace/trace_sysprof.c +++ b/kernel/trace/trace_sysprof.c | |||
@@ -88,7 +88,7 @@ static void backtrace_address(void *data, unsigned long addr, int reliable) | |||
88 | } | 88 | } |
89 | } | 89 | } |
90 | 90 | ||
91 | const static struct stacktrace_ops backtrace_ops = { | 91 | static const struct stacktrace_ops backtrace_ops = { |
92 | .warning = backtrace_warning, | 92 | .warning = backtrace_warning, |
93 | .warning_symbol = backtrace_warning_symbol, | 93 | .warning_symbol = backtrace_warning_symbol, |
94 | .stack = backtrace_stack, | 94 | .stack = backtrace_stack, |
@@ -226,15 +226,6 @@ static void stop_stack_timers(void) | |||
226 | stop_stack_timer(cpu); | 226 | stop_stack_timer(cpu); |
227 | } | 227 | } |
228 | 228 | ||
229 | static void start_stack_trace(struct trace_array *tr) | ||
230 | { | ||
231 | mutex_lock(&sample_timer_lock); | ||
232 | tracing_reset_online_cpus(tr); | ||
233 | start_stack_timers(); | ||
234 | tracer_enabled = 1; | ||
235 | mutex_unlock(&sample_timer_lock); | ||
236 | } | ||
237 | |||
238 | static void stop_stack_trace(struct trace_array *tr) | 229 | static void stop_stack_trace(struct trace_array *tr) |
239 | { | 230 | { |
240 | mutex_lock(&sample_timer_lock); | 231 | mutex_lock(&sample_timer_lock); |
@@ -247,12 +238,18 @@ static int stack_trace_init(struct trace_array *tr) | |||
247 | { | 238 | { |
248 | sysprof_trace = tr; | 239 | sysprof_trace = tr; |
249 | 240 | ||
250 | start_stack_trace(tr); | 241 | tracing_start_cmdline_record(); |
242 | |||
243 | mutex_lock(&sample_timer_lock); | ||
244 | start_stack_timers(); | ||
245 | tracer_enabled = 1; | ||
246 | mutex_unlock(&sample_timer_lock); | ||
251 | return 0; | 247 | return 0; |
252 | } | 248 | } |
253 | 249 | ||
254 | static void stack_trace_reset(struct trace_array *tr) | 250 | static void stack_trace_reset(struct trace_array *tr) |
255 | { | 251 | { |
252 | tracing_stop_cmdline_record(); | ||
256 | stop_stack_trace(tr); | 253 | stop_stack_trace(tr); |
257 | } | 254 | } |
258 | 255 | ||
@@ -330,5 +327,5 @@ void init_tracer_sysprof_debugfs(struct dentry *d_tracer) | |||
330 | d_tracer, NULL, &sysprof_sample_fops); | 327 | d_tracer, NULL, &sysprof_sample_fops); |
331 | if (entry) | 328 | if (entry) |
332 | return; | 329 | return; |
333 | pr_warning("Could not create debugfs 'dyn_ftrace_total_info' entry\n"); | 330 | pr_warning("Could not create debugfs 'sysprof_sample_period' entry\n"); |
334 | } | 331 | } |
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c new file mode 100644 index 000000000000..4664990fe9c5 --- /dev/null +++ b/kernel/trace/trace_workqueue.c | |||
@@ -0,0 +1,281 @@ | |||
1 | /* | ||
2 | * Workqueue statistical tracer. | ||
3 | * | ||
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | |||
9 | #include <trace/workqueue.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/percpu.h> | ||
12 | #include "trace_stat.h" | ||
13 | #include "trace.h" | ||
14 | |||
15 | |||
16 | /* A cpu workqueue thread */ | ||
17 | struct cpu_workqueue_stats { | ||
18 | struct list_head list; | ||
19 | /* Useful to know if we print the cpu headers */ | ||
20 | bool first_entry; | ||
21 | int cpu; | ||
22 | pid_t pid; | ||
23 | /* Can be inserted from interrupt or user context, need to be atomic */ | ||
24 | atomic_t inserted; | ||
25 | /* | ||
26 | * Don't need to be atomic, works are serialized in a single workqueue thread | ||
27 | * on a single CPU. | ||
28 | */ | ||
29 | unsigned int executed; | ||
30 | }; | ||
31 | |||
32 | /* List of workqueue threads on one cpu */ | ||
33 | struct workqueue_global_stats { | ||
34 | struct list_head list; | ||
35 | spinlock_t lock; | ||
36 | }; | ||
37 | |||
38 | /* Don't need a global lock because allocated before the workqueues, and | ||
39 | * never freed. | ||
40 | */ | ||
41 | static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); | ||
42 | #define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu)) | ||
43 | |||
44 | /* Insertion of a work */ | ||
45 | static void | ||
46 | probe_workqueue_insertion(struct task_struct *wq_thread, | ||
47 | struct work_struct *work) | ||
48 | { | ||
49 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
50 | struct cpu_workqueue_stats *node, *next; | ||
51 | unsigned long flags; | ||
52 | |||
53 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
54 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
55 | list) { | ||
56 | if (node->pid == wq_thread->pid) { | ||
57 | atomic_inc(&node->inserted); | ||
58 | goto found; | ||
59 | } | ||
60 | } | ||
61 | pr_debug("trace_workqueue: entry not found\n"); | ||
62 | found: | ||
63 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
64 | } | ||
65 | |||
66 | /* Execution of a work */ | ||
67 | static void | ||
68 | probe_workqueue_execution(struct task_struct *wq_thread, | ||
69 | struct work_struct *work) | ||
70 | { | ||
71 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
72 | struct cpu_workqueue_stats *node, *next; | ||
73 | unsigned long flags; | ||
74 | |||
75 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
76 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
77 | list) { | ||
78 | if (node->pid == wq_thread->pid) { | ||
79 | node->executed++; | ||
80 | goto found; | ||
81 | } | ||
82 | } | ||
83 | pr_debug("trace_workqueue: entry not found\n"); | ||
84 | found: | ||
85 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
86 | } | ||
87 | |||
88 | /* Creation of a cpu workqueue thread */ | ||
89 | static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) | ||
90 | { | ||
91 | struct cpu_workqueue_stats *cws; | ||
92 | unsigned long flags; | ||
93 | |||
94 | WARN_ON(cpu < 0 || cpu >= num_possible_cpus()); | ||
95 | |||
96 | /* Workqueues are sometimes created in atomic context */ | ||
97 | cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC); | ||
98 | if (!cws) { | ||
99 | pr_warning("trace_workqueue: not enough memory\n"); | ||
100 | return; | ||
101 | } | ||
102 | tracing_record_cmdline(wq_thread); | ||
103 | |||
104 | INIT_LIST_HEAD(&cws->list); | ||
105 | cws->cpu = cpu; | ||
106 | |||
107 | cws->pid = wq_thread->pid; | ||
108 | |||
109 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
110 | if (list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
111 | cws->first_entry = true; | ||
112 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); | ||
113 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
114 | } | ||
115 | |||
116 | /* Destruction of a cpu workqueue thread */ | ||
117 | static void probe_workqueue_destruction(struct task_struct *wq_thread) | ||
118 | { | ||
119 | /* Workqueue only execute on one cpu */ | ||
120 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | ||
121 | struct cpu_workqueue_stats *node, *next; | ||
122 | unsigned long flags; | ||
123 | |||
124 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
125 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | ||
126 | list) { | ||
127 | if (node->pid == wq_thread->pid) { | ||
128 | list_del(&node->list); | ||
129 | kfree(node); | ||
130 | goto found; | ||
131 | } | ||
132 | } | ||
133 | |||
134 | pr_debug("trace_workqueue: don't find workqueue to destroy\n"); | ||
135 | found: | ||
136 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
137 | |||
138 | } | ||
139 | |||
140 | static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | struct cpu_workqueue_stats *ret = NULL; | ||
144 | |||
145 | |||
146 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
147 | |||
148 | if (!list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
149 | ret = list_entry(workqueue_cpu_stat(cpu)->list.next, | ||
150 | struct cpu_workqueue_stats, list); | ||
151 | |||
152 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
153 | |||
154 | return ret; | ||
155 | } | ||
156 | |||
157 | static void *workqueue_stat_start(void) | ||
158 | { | ||
159 | int cpu; | ||
160 | void *ret = NULL; | ||
161 | |||
162 | for_each_possible_cpu(cpu) { | ||
163 | ret = workqueue_stat_start_cpu(cpu); | ||
164 | if (ret) | ||
165 | return ret; | ||
166 | } | ||
167 | return NULL; | ||
168 | } | ||
169 | |||
170 | static void *workqueue_stat_next(void *prev, int idx) | ||
171 | { | ||
172 | struct cpu_workqueue_stats *prev_cws = prev; | ||
173 | int cpu = prev_cws->cpu; | ||
174 | unsigned long flags; | ||
175 | void *ret = NULL; | ||
176 | |||
177 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
178 | if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) { | ||
179 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
180 | for (++cpu ; cpu < num_possible_cpus(); cpu++) { | ||
181 | ret = workqueue_stat_start_cpu(cpu); | ||
182 | if (ret) | ||
183 | return ret; | ||
184 | } | ||
185 | return NULL; | ||
186 | } | ||
187 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
188 | |||
189 | return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, | ||
190 | list); | ||
191 | } | ||
192 | |||
193 | static int workqueue_stat_show(struct seq_file *s, void *p) | ||
194 | { | ||
195 | struct cpu_workqueue_stats *cws = p; | ||
196 | unsigned long flags; | ||
197 | int cpu = cws->cpu; | ||
198 | |||
199 | seq_printf(s, "%3d %6d %6u %s\n", cws->cpu, | ||
200 | atomic_read(&cws->inserted), | ||
201 | cws->executed, | ||
202 | trace_find_cmdline(cws->pid)); | ||
203 | |||
204 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
205 | if (&cws->list == workqueue_cpu_stat(cpu)->list.next) | ||
206 | seq_printf(s, "\n"); | ||
207 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int workqueue_stat_headers(struct seq_file *s) | ||
213 | { | ||
214 | seq_printf(s, "# CPU INSERTED EXECUTED NAME\n"); | ||
215 | seq_printf(s, "# | | | |\n\n"); | ||
216 | return 0; | ||
217 | } | ||
218 | |||
219 | struct tracer_stat workqueue_stats __read_mostly = { | ||
220 | .name = "workqueues", | ||
221 | .stat_start = workqueue_stat_start, | ||
222 | .stat_next = workqueue_stat_next, | ||
223 | .stat_show = workqueue_stat_show, | ||
224 | .stat_headers = workqueue_stat_headers | ||
225 | }; | ||
226 | |||
227 | |||
228 | int __init stat_workqueue_init(void) | ||
229 | { | ||
230 | if (register_stat_tracer(&workqueue_stats)) { | ||
231 | pr_warning("Unable to register workqueue stat tracer\n"); | ||
232 | return 1; | ||
233 | } | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | fs_initcall(stat_workqueue_init); | ||
238 | |||
239 | /* | ||
240 | * Workqueues are created very early, just after pre-smp initcalls. | ||
241 | * So we must register our tracepoints at this stage. | ||
242 | */ | ||
243 | int __init trace_workqueue_early_init(void) | ||
244 | { | ||
245 | int ret, cpu; | ||
246 | |||
247 | ret = register_trace_workqueue_insertion(probe_workqueue_insertion); | ||
248 | if (ret) | ||
249 | goto out; | ||
250 | |||
251 | ret = register_trace_workqueue_execution(probe_workqueue_execution); | ||
252 | if (ret) | ||
253 | goto no_insertion; | ||
254 | |||
255 | ret = register_trace_workqueue_creation(probe_workqueue_creation); | ||
256 | if (ret) | ||
257 | goto no_execution; | ||
258 | |||
259 | ret = register_trace_workqueue_destruction(probe_workqueue_destruction); | ||
260 | if (ret) | ||
261 | goto no_creation; | ||
262 | |||
263 | for_each_possible_cpu(cpu) { | ||
264 | spin_lock_init(&workqueue_cpu_stat(cpu)->lock); | ||
265 | INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list); | ||
266 | } | ||
267 | |||
268 | return 0; | ||
269 | |||
270 | no_creation: | ||
271 | unregister_trace_workqueue_creation(probe_workqueue_creation); | ||
272 | no_execution: | ||
273 | unregister_trace_workqueue_execution(probe_workqueue_execution); | ||
274 | no_insertion: | ||
275 | unregister_trace_workqueue_insertion(probe_workqueue_insertion); | ||
276 | out: | ||
277 | pr_warning("trace_workqueue: unable to trace workqueues\n"); | ||
278 | |||
279 | return 1; | ||
280 | } | ||
281 | early_initcall(trace_workqueue_early_init); | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d3..e53ee18ef431 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
36 | #include <trace/workqueue.h> | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * The per-CPU workqueue (if single thread, we always use the first | 39 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -125,9 +126,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
125 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 126 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
126 | } | 127 | } |
127 | 128 | ||
129 | DEFINE_TRACE(workqueue_insertion); | ||
130 | |||
128 | static void insert_work(struct cpu_workqueue_struct *cwq, | 131 | static void insert_work(struct cpu_workqueue_struct *cwq, |
129 | struct work_struct *work, struct list_head *head) | 132 | struct work_struct *work, struct list_head *head) |
130 | { | 133 | { |
134 | trace_workqueue_insertion(cwq->thread, work); | ||
135 | |||
131 | set_wq_data(work, cwq); | 136 | set_wq_data(work, cwq); |
132 | /* | 137 | /* |
133 | * Ensure that we get the right work->data if we see the | 138 | * Ensure that we get the right work->data if we see the |
@@ -259,6 +264,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
259 | } | 264 | } |
260 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 265 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
261 | 266 | ||
267 | DEFINE_TRACE(workqueue_execution); | ||
268 | |||
262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 269 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
263 | { | 270 | { |
264 | spin_lock_irq(&cwq->lock); | 271 | spin_lock_irq(&cwq->lock); |
@@ -284,7 +291,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
284 | */ | 291 | */ |
285 | struct lockdep_map lockdep_map = work->lockdep_map; | 292 | struct lockdep_map lockdep_map = work->lockdep_map; |
286 | #endif | 293 | #endif |
287 | 294 | trace_workqueue_execution(cwq->thread, work); | |
288 | cwq->current_work = work; | 295 | cwq->current_work = work; |
289 | list_del_init(cwq->worklist.next); | 296 | list_del_init(cwq->worklist.next); |
290 | spin_unlock_irq(&cwq->lock); | 297 | spin_unlock_irq(&cwq->lock); |
@@ -765,6 +772,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | |||
765 | return cwq; | 772 | return cwq; |
766 | } | 773 | } |
767 | 774 | ||
775 | DEFINE_TRACE(workqueue_creation); | ||
776 | |||
768 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 777 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
769 | { | 778 | { |
770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 779 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
@@ -787,6 +796,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
787 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 796 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
788 | cwq->thread = p; | 797 | cwq->thread = p; |
789 | 798 | ||
799 | trace_workqueue_creation(cwq->thread, cpu); | ||
800 | |||
790 | return 0; | 801 | return 0; |
791 | } | 802 | } |
792 | 803 | ||
@@ -868,6 +879,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
868 | } | 879 | } |
869 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 880 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
870 | 881 | ||
882 | DEFINE_TRACE(workqueue_destruction); | ||
883 | |||
871 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 884 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
872 | { | 885 | { |
873 | /* | 886 | /* |
@@ -891,6 +904,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
891 | * checks list_empty(), and a "normal" queue_work() can't use | 904 | * checks list_empty(), and a "normal" queue_work() can't use |
892 | * a dead CPU. | 905 | * a dead CPU. |
893 | */ | 906 | */ |
907 | trace_workqueue_destruction(cwq->thread); | ||
894 | kthread_stop(cwq->thread); | 908 | kthread_stop(cwq->thread); |
895 | cwq->thread = NULL; | 909 | cwq->thread = NULL; |
896 | } | 910 | } |