aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/Kconfig2
-rw-r--r--kernel/trace/Makefile1
-rw-r--r--kernel/trace/trace_workqueue.c300
3 files changed, 1 insertions, 302 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index d81a1a532994..f347ac91292d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -271,7 +271,7 @@ config PROFILE_ANNOTATED_BRANCHES
271 bool "Trace likely/unlikely profiler" 271 bool "Trace likely/unlikely profiler"
272 select TRACE_BRANCH_PROFILING 272 select TRACE_BRANCH_PROFILING
273 help 273 help
274 This tracer profiles all the the likely and unlikely macros 274 This tracer profiles all likely and unlikely macros
275 in the kernel. It will display the results in: 275 in the kernel. It will display the results in:
276 276
277 /sys/kernel/debug/tracing/trace_stat/branch_annotated 277 /sys/kernel/debug/tracing/trace_stat/branch_annotated
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 5f39a07fe5ea..b3afe0e76f79 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -41,7 +41,6 @@ obj-$(CONFIG_STACK_TRACER) += trace_stack.o
41obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 41obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
42obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o 42obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += trace_functions_graph.o
43obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o 43obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
44obj-$(CONFIG_WORKQUEUE_TRACER) += trace_workqueue.o
45obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o 44obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o
46ifeq ($(CONFIG_BLOCK),y) 45ifeq ($(CONFIG_BLOCK),y)
47obj-$(CONFIG_EVENT_TRACING) += blktrace.o 46obj-$(CONFIG_EVENT_TRACING) += blktrace.o
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
deleted file mode 100644
index 209b379a4721..000000000000
--- a/kernel/trace/trace_workqueue.c
+++ /dev/null
@@ -1,300 +0,0 @@
1/*
2 * Workqueue statistical tracer.
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 */
7
8
9#include <trace/events/workqueue.h>
10#include <linux/list.h>
11#include <linux/percpu.h>
12#include <linux/slab.h>
13#include <linux/kref.h>
14#include "trace_stat.h"
15#include "trace.h"
16
17
18/* A cpu workqueue thread */
19struct cpu_workqueue_stats {
20 struct list_head list;
21 struct kref kref;
22 int cpu;
23 pid_t pid;
24/* Can be inserted from interrupt or user context, need to be atomic */
25 atomic_t inserted;
26/*
27 * Don't need to be atomic, works are serialized in a single workqueue thread
28 * on a single CPU.
29 */
30 unsigned int executed;
31};
32
33/* List of workqueue threads on one cpu */
34struct workqueue_global_stats {
35 struct list_head list;
36 spinlock_t lock;
37};
38
39/* Don't need a global lock because allocated before the workqueues, and
40 * never freed.
41 */
42static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
43#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
44
45static void cpu_workqueue_stat_free(struct kref *kref)
46{
47 kfree(container_of(kref, struct cpu_workqueue_stats, kref));
48}
49
50/* Insertion of a work */
51static void
52probe_workqueue_insertion(void *ignore,
53 struct task_struct *wq_thread,
54 struct work_struct *work)
55{
56 int cpu = cpumask_first(&wq_thread->cpus_allowed);
57 struct cpu_workqueue_stats *node;
58 unsigned long flags;
59
60 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
61 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
62 if (node->pid == wq_thread->pid) {
63 atomic_inc(&node->inserted);
64 goto found;
65 }
66 }
67 pr_debug("trace_workqueue: entry not found\n");
68found:
69 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
70}
71
72/* Execution of a work */
73static void
74probe_workqueue_execution(void *ignore,
75 struct task_struct *wq_thread,
76 struct work_struct *work)
77{
78 int cpu = cpumask_first(&wq_thread->cpus_allowed);
79 struct cpu_workqueue_stats *node;
80 unsigned long flags;
81
82 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
83 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
84 if (node->pid == wq_thread->pid) {
85 node->executed++;
86 goto found;
87 }
88 }
89 pr_debug("trace_workqueue: entry not found\n");
90found:
91 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
92}
93
94/* Creation of a cpu workqueue thread */
95static void probe_workqueue_creation(void *ignore,
96 struct task_struct *wq_thread, int cpu)
97{
98 struct cpu_workqueue_stats *cws;
99 unsigned long flags;
100
101 WARN_ON(cpu < 0);
102
103 /* Workqueues are sometimes created in atomic context */
104 cws = kzalloc(sizeof(struct cpu_workqueue_stats), GFP_ATOMIC);
105 if (!cws) {
106 pr_warning("trace_workqueue: not enough memory\n");
107 return;
108 }
109 INIT_LIST_HEAD(&cws->list);
110 kref_init(&cws->kref);
111 cws->cpu = cpu;
112 cws->pid = wq_thread->pid;
113
114 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
115 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
116 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
117}
118
119/* Destruction of a cpu workqueue thread */
120static void
121probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread)
122{
123 /* Workqueue only execute on one cpu */
124 int cpu = cpumask_first(&wq_thread->cpus_allowed);
125 struct cpu_workqueue_stats *node, *next;
126 unsigned long flags;
127
128 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
129 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
130 list) {
131 if (node->pid == wq_thread->pid) {
132 list_del(&node->list);
133 kref_put(&node->kref, cpu_workqueue_stat_free);
134 goto found;
135 }
136 }
137
138 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
139found:
140 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
141
142}
143
144static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
145{
146 unsigned long flags;
147 struct cpu_workqueue_stats *ret = NULL;
148
149
150 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
151
152 if (!list_empty(&workqueue_cpu_stat(cpu)->list)) {
153 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
154 struct cpu_workqueue_stats, list);
155 kref_get(&ret->kref);
156 }
157
158 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
159
160 return ret;
161}
162
163static void *workqueue_stat_start(struct tracer_stat *trace)
164{
165 int cpu;
166 void *ret = NULL;
167
168 for_each_possible_cpu(cpu) {
169 ret = workqueue_stat_start_cpu(cpu);
170 if (ret)
171 return ret;
172 }
173 return NULL;
174}
175
176static void *workqueue_stat_next(void *prev, int idx)
177{
178 struct cpu_workqueue_stats *prev_cws = prev;
179 struct cpu_workqueue_stats *ret;
180 int cpu = prev_cws->cpu;
181 unsigned long flags;
182
183 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
184 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
185 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
186 do {
187 cpu = cpumask_next(cpu, cpu_possible_mask);
188 if (cpu >= nr_cpu_ids)
189 return NULL;
190 } while (!(ret = workqueue_stat_start_cpu(cpu)));
191 return ret;
192 } else {
193 ret = list_entry(prev_cws->list.next,
194 struct cpu_workqueue_stats, list);
195 kref_get(&ret->kref);
196 }
197 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
198
199 return ret;
200}
201
202static int workqueue_stat_show(struct seq_file *s, void *p)
203{
204 struct cpu_workqueue_stats *cws = p;
205 struct pid *pid;
206 struct task_struct *tsk;
207
208 pid = find_get_pid(cws->pid);
209 if (pid) {
210 tsk = get_pid_task(pid, PIDTYPE_PID);
211 if (tsk) {
212 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
213 atomic_read(&cws->inserted), cws->executed,
214 tsk->comm);
215 put_task_struct(tsk);
216 }
217 put_pid(pid);
218 }
219
220 return 0;
221}
222
223static void workqueue_stat_release(void *stat)
224{
225 struct cpu_workqueue_stats *node = stat;
226
227 kref_put(&node->kref, cpu_workqueue_stat_free);
228}
229
230static int workqueue_stat_headers(struct seq_file *s)
231{
232 seq_printf(s, "# CPU INSERTED EXECUTED NAME\n");
233 seq_printf(s, "# | | | |\n");
234 return 0;
235}
236
237struct tracer_stat workqueue_stats __read_mostly = {
238 .name = "workqueues",
239 .stat_start = workqueue_stat_start,
240 .stat_next = workqueue_stat_next,
241 .stat_show = workqueue_stat_show,
242 .stat_release = workqueue_stat_release,
243 .stat_headers = workqueue_stat_headers
244};
245
246
247int __init stat_workqueue_init(void)
248{
249 if (register_stat_tracer(&workqueue_stats)) {
250 pr_warning("Unable to register workqueue stat tracer\n");
251 return 1;
252 }
253
254 return 0;
255}
256fs_initcall(stat_workqueue_init);
257
258/*
259 * Workqueues are created very early, just after pre-smp initcalls.
260 * So we must register our tracepoints at this stage.
261 */
262int __init trace_workqueue_early_init(void)
263{
264 int ret, cpu;
265
266 for_each_possible_cpu(cpu) {
267 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
268 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
269 }
270
271 ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
272 if (ret)
273 goto out;
274
275 ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL);
276 if (ret)
277 goto no_insertion;
278
279 ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL);
280 if (ret)
281 goto no_execution;
282
283 ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
284 if (ret)
285 goto no_creation;
286
287 return 0;
288
289no_creation:
290 unregister_trace_workqueue_creation(probe_workqueue_creation, NULL);
291no_execution:
292 unregister_trace_workqueue_execution(probe_workqueue_execution, NULL);
293no_insertion:
294 unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
295out:
296 pr_warning("trace_workqueue: unable to trace workqueues\n");
297
298 return 1;
299}
300early_initcall(trace_workqueue_early_init);