diff options
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/blktrace.c | 48 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 7 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 4 | ||||
-rw-r--r-- | kernel/trace/trace.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_clock.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_entries.h | 2 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_irqsoff.c | 2 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 2 |
9 files changed, 26 insertions, 45 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index cbafed7d4f38..6957aa298dfa 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -703,28 +703,21 @@ void blk_trace_shutdown(struct request_queue *q) | |||
703 | * | 703 | * |
704 | **/ | 704 | **/ |
705 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, | 705 | static void blk_add_trace_rq(struct request_queue *q, struct request *rq, |
706 | u32 what) | 706 | u32 what) |
707 | { | 707 | { |
708 | struct blk_trace *bt = q->blk_trace; | 708 | struct blk_trace *bt = q->blk_trace; |
709 | int rw = rq->cmd_flags & 0x03; | ||
710 | 709 | ||
711 | if (likely(!bt)) | 710 | if (likely(!bt)) |
712 | return; | 711 | return; |
713 | 712 | ||
714 | if (rq->cmd_flags & REQ_DISCARD) | ||
715 | rw |= REQ_DISCARD; | ||
716 | |||
717 | if (rq->cmd_flags & REQ_SECURE) | ||
718 | rw |= REQ_SECURE; | ||
719 | |||
720 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { | 713 | if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { |
721 | what |= BLK_TC_ACT(BLK_TC_PC); | 714 | what |= BLK_TC_ACT(BLK_TC_PC); |
722 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, | 715 | __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, |
723 | what, rq->errors, rq->cmd_len, rq->cmd); | 716 | what, rq->errors, rq->cmd_len, rq->cmd); |
724 | } else { | 717 | } else { |
725 | what |= BLK_TC_ACT(BLK_TC_FS); | 718 | what |= BLK_TC_ACT(BLK_TC_FS); |
726 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw, | 719 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), |
727 | what, rq->errors, 0, NULL); | 720 | rq->cmd_flags, what, rq->errors, 0, NULL); |
728 | } | 721 | } |
729 | } | 722 | } |
730 | 723 | ||
@@ -857,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q) | |||
857 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); | 850 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); |
858 | } | 851 | } |
859 | 852 | ||
860 | static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) | 853 | static void blk_add_trace_unplug(void *ignore, struct request_queue *q, |
854 | unsigned int depth, bool explicit) | ||
861 | { | 855 | { |
862 | struct blk_trace *bt = q->blk_trace; | 856 | struct blk_trace *bt = q->blk_trace; |
863 | 857 | ||
864 | if (bt) { | 858 | if (bt) { |
865 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | 859 | __be64 rpdu = cpu_to_be64(depth); |
866 | __be64 rpdu = cpu_to_be64(pdu); | 860 | u32 what; |
867 | |||
868 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, | ||
869 | sizeof(rpdu), &rpdu); | ||
870 | } | ||
871 | } | ||
872 | 861 | ||
873 | static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q) | 862 | if (explicit) |
874 | { | 863 | what = BLK_TA_UNPLUG_IO; |
875 | struct blk_trace *bt = q->blk_trace; | 864 | else |
876 | 865 | what = BLK_TA_UNPLUG_TIMER; | |
877 | if (bt) { | ||
878 | unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; | ||
879 | __be64 rpdu = cpu_to_be64(pdu); | ||
880 | 866 | ||
881 | __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, | 867 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); |
882 | sizeof(rpdu), &rpdu); | ||
883 | } | 868 | } |
884 | } | 869 | } |
885 | 870 | ||
@@ -1022,9 +1007,7 @@ static void blk_register_tracepoints(void) | |||
1022 | WARN_ON(ret); | 1007 | WARN_ON(ret); |
1023 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); | 1008 | ret = register_trace_block_plug(blk_add_trace_plug, NULL); |
1024 | WARN_ON(ret); | 1009 | WARN_ON(ret); |
1025 | ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | 1010 | ret = register_trace_block_unplug(blk_add_trace_unplug, NULL); |
1026 | WARN_ON(ret); | ||
1027 | ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | ||
1028 | WARN_ON(ret); | 1011 | WARN_ON(ret); |
1029 | ret = register_trace_block_split(blk_add_trace_split, NULL); | 1012 | ret = register_trace_block_split(blk_add_trace_split, NULL); |
1030 | WARN_ON(ret); | 1013 | WARN_ON(ret); |
@@ -1039,8 +1022,7 @@ static void blk_unregister_tracepoints(void) | |||
1039 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); | 1022 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); |
1040 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); | 1023 | unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); |
1041 | unregister_trace_block_split(blk_add_trace_split, NULL); | 1024 | unregister_trace_block_split(blk_add_trace_split, NULL); |
1042 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); | 1025 | unregister_trace_block_unplug(blk_add_trace_unplug, NULL); |
1043 | unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); | ||
1044 | unregister_trace_block_plug(blk_add_trace_plug, NULL); | 1026 | unregister_trace_block_plug(blk_add_trace_plug, NULL); |
1045 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); | 1027 | unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); |
1046 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); | 1028 | unregister_trace_block_getrq(blk_add_trace_getrq, NULL); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 888b611897d3..ee24fa1935ac 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod) | |||
1268 | p->flags = 0L; | 1268 | p->flags = 0L; |
1269 | 1269 | ||
1270 | /* | 1270 | /* |
1271 | * Do the initial record convertion from mcount jump | 1271 | * Do the initial record conversion from mcount jump |
1272 | * to the NOP instructions. | 1272 | * to the NOP instructions. |
1273 | */ | 1273 | */ |
1274 | if (!ftrace_code_disable(mod, p)) { | 1274 | if (!ftrace_code_disable(mod, p)) { |
@@ -1467,7 +1467,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1467 | return t_hash_next(m, pos); | 1467 | return t_hash_next(m, pos); |
1468 | 1468 | ||
1469 | (*pos)++; | 1469 | (*pos)++; |
1470 | iter->pos = *pos; | 1470 | iter->pos = iter->func_pos = *pos; |
1471 | 1471 | ||
1472 | if (iter->flags & FTRACE_ITER_PRINTALL) | 1472 | if (iter->flags & FTRACE_ITER_PRINTALL) |
1473 | return t_hash_start(m, pos); | 1473 | return t_hash_start(m, pos); |
@@ -1502,7 +1502,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
1502 | if (!rec) | 1502 | if (!rec) |
1503 | return t_hash_start(m, pos); | 1503 | return t_hash_start(m, pos); |
1504 | 1504 | ||
1505 | iter->func_pos = *pos; | ||
1506 | iter->func = rec; | 1505 | iter->func = rec; |
1507 | 1506 | ||
1508 | return iter; | 1507 | return iter; |
@@ -3426,7 +3425,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | |||
3426 | atomic_set(&t->tracing_graph_pause, 0); | 3425 | atomic_set(&t->tracing_graph_pause, 0); |
3427 | atomic_set(&t->trace_overrun, 0); | 3426 | atomic_set(&t->trace_overrun, 0); |
3428 | t->ftrace_timestamp = 0; | 3427 | t->ftrace_timestamp = 0; |
3429 | /* make curr_ret_stack visable before we add the ret_stack */ | 3428 | /* make curr_ret_stack visible before we add the ret_stack */ |
3430 | smp_wmb(); | 3429 | smp_wmb(); |
3431 | t->ret_stack = ret_stack; | 3430 | t->ret_stack = ret_stack; |
3432 | } | 3431 | } |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9c8bcafb120..0ef7b4b2a1f7 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1478,7 +1478,7 @@ static inline unsigned long rb_page_entries(struct buffer_page *bpage) | |||
1478 | return local_read(&bpage->entries) & RB_WRITE_MASK; | 1478 | return local_read(&bpage->entries) & RB_WRITE_MASK; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | /* Size is determined by what has been commited */ | 1481 | /* Size is determined by what has been committed */ |
1482 | static inline unsigned rb_page_size(struct buffer_page *bpage) | 1482 | static inline unsigned rb_page_size(struct buffer_page *bpage) |
1483 | { | 1483 | { |
1484 | return rb_page_commit(bpage); | 1484 | return rb_page_commit(bpage); |
@@ -2932,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) | |||
2932 | /* | 2932 | /* |
2933 | * cpu_buffer->pages just needs to point to the buffer, it | 2933 | * cpu_buffer->pages just needs to point to the buffer, it |
2934 | * has no specific buffer page to point to. Lets move it out | 2934 | * has no specific buffer page to point to. Lets move it out |
2935 | * of our way so we don't accidently swap it. | 2935 | * of our way so we don't accidentally swap it. |
2936 | */ | 2936 | */ |
2937 | cpu_buffer->pages = reader->list.prev; | 2937 | cpu_buffer->pages = reader->list.prev; |
2938 | 2938 | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9541c27c1cf2..d38c16a06a6f 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3239,7 +3239,7 @@ waitagain: | |||
3239 | trace_seq_init(&iter->seq); | 3239 | trace_seq_init(&iter->seq); |
3240 | 3240 | ||
3241 | /* | 3241 | /* |
3242 | * If there was nothing to send to user, inspite of consuming trace | 3242 | * If there was nothing to send to user, in spite of consuming trace |
3243 | * entries, go back to wait for more entries. | 3243 | * entries, go back to wait for more entries. |
3244 | */ | 3244 | */ |
3245 | if (sret == -EBUSY) | 3245 | if (sret == -EBUSY) |
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 685a67d55db0..6302747a1398 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c | |||
@@ -46,7 +46,7 @@ u64 notrace trace_clock_local(void) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * trace_clock(): 'inbetween' trace clock. Not completely serialized, | 49 | * trace_clock(): 'between' trace clock. Not completely serialized, |
50 | * but not completely incorrect when crossing CPUs either. | 50 | * but not completely incorrect when crossing CPUs either. |
51 | * | 51 | * |
52 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of | 52 | * This is based on cpu_clock(), which will allow at most ~1 jiffy of |
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 1516cb3ec549..e32744c84d94 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h | |||
@@ -27,7 +27,7 @@ | |||
27 | * in the structure. | 27 | * in the structure. |
28 | * | 28 | * |
29 | * * for structures within structures, the format of the internal | 29 | * * for structures within structures, the format of the internal |
30 | * structure is layed out. This allows the internal structure | 30 | * structure is laid out. This allows the internal structure |
31 | * to be deciphered for the format file. Although these macros | 31 | * to be deciphered for the format file. Although these macros |
32 | * may become out of sync with the internal structure, they | 32 | * may become out of sync with the internal structure, they |
33 | * will create a compile error if it happens. Since the | 33 | * will create a compile error if it happens. Since the |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 76b05980225c..962cdb24ed81 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -905,7 +905,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, | |||
905 | * | 905 | * |
906 | * returns 1 if | 906 | * returns 1 if |
907 | * - we are inside irq code | 907 | * - we are inside irq code |
908 | * - we just extered irq code | 908 | * - we just entered irq code |
909 | * | 909 | * |
910 | * retunns 0 if | 910 | * retunns 0 if |
911 | * - funcgraph-interrupts option is set | 911 | * - funcgraph-interrupts option is set |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 92b6e1e12d98..a4969b47afc1 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
@@ -80,7 +80,7 @@ static struct tracer_flags tracer_flags = { | |||
80 | * skip the latency if the sequence has changed - some other section | 80 | * skip the latency if the sequence has changed - some other section |
81 | * did a maximum and could disturb our measurement with serial console | 81 | * did a maximum and could disturb our measurement with serial console |
82 | * printouts, etc. Truly coinciding maximum latencies should be rare | 82 | * printouts, etc. Truly coinciding maximum latencies should be rare |
83 | * and what happens together happens separately as well, so this doesnt | 83 | * and what happens together happens separately as well, so this doesn't |
84 | * decrease the validity of the maximum found: | 84 | * decrease the validity of the maximum found: |
85 | */ | 85 | */ |
86 | static __cacheline_aligned_in_smp unsigned long max_sequence; | 86 | static __cacheline_aligned_in_smp unsigned long max_sequence; |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8435b43b1782..35d55a386145 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1839,7 +1839,7 @@ static void unregister_probe_event(struct trace_probe *tp) | |||
1839 | kfree(tp->call.print_fmt); | 1839 | kfree(tp->call.print_fmt); |
1840 | } | 1840 | } |
1841 | 1841 | ||
1842 | /* Make a debugfs interface for controling probe points */ | 1842 | /* Make a debugfs interface for controlling probe points */ |
1843 | static __init int init_kprobe_trace(void) | 1843 | static __init int init_kprobe_trace(void) |
1844 | { | 1844 | { |
1845 | struct dentry *d_tracer; | 1845 | struct dentry *d_tracer; |