diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-09-14 00:16:56 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2009-09-14 00:16:56 -0400 |
| commit | fc8e1ead9314cf0e0f1922e661428b93d3a50d88 (patch) | |
| tree | f3cb97c4769b74f6627a59769f1ed5c92a13c58a /kernel/trace/trace_workqueue.c | |
| parent | 2bcaa6a4238094c5695d5b1943078388d82d3004 (diff) | |
| parent | 9de48cc300fb10f7d9faa978670becf5e352462a (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'kernel/trace/trace_workqueue.c')
| -rw-r--r-- | kernel/trace/trace_workqueue.c | 25 |
1 files changed, 6 insertions, 19 deletions
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c index 797201e4a137..97fcea4acce1 100644 --- a/kernel/trace/trace_workqueue.c +++ b/kernel/trace/trace_workqueue.c | |||
| @@ -6,7 +6,7 @@ | |||
| 6 | */ | 6 | */ |
| 7 | 7 | ||
| 8 | 8 | ||
| 9 | #include <trace/workqueue.h> | 9 | #include <trace/events/workqueue.h> |
| 10 | #include <linux/list.h> | 10 | #include <linux/list.h> |
| 11 | #include <linux/percpu.h> | 11 | #include <linux/percpu.h> |
| 12 | #include "trace_stat.h" | 12 | #include "trace_stat.h" |
| @@ -16,8 +16,6 @@ | |||
| 16 | /* A cpu workqueue thread */ | 16 | /* A cpu workqueue thread */ |
| 17 | struct cpu_workqueue_stats { | 17 | struct cpu_workqueue_stats { |
| 18 | struct list_head list; | 18 | struct list_head list; |
| 19 | /* Useful to know if we print the cpu headers */ | ||
| 20 | bool first_entry; | ||
| 21 | int cpu; | 19 | int cpu; |
| 22 | pid_t pid; | 20 | pid_t pid; |
| 23 | /* Can be inserted from interrupt or user context, need to be atomic */ | 21 | /* Can be inserted from interrupt or user context, need to be atomic */ |
| @@ -47,12 +45,11 @@ probe_workqueue_insertion(struct task_struct *wq_thread, | |||
| 47 | struct work_struct *work) | 45 | struct work_struct *work) |
| 48 | { | 46 | { |
| 49 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | 47 | int cpu = cpumask_first(&wq_thread->cpus_allowed); |
| 50 | struct cpu_workqueue_stats *node, *next; | 48 | struct cpu_workqueue_stats *node; |
| 51 | unsigned long flags; | 49 | unsigned long flags; |
| 52 | 50 | ||
| 53 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 51 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
| 54 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | 52 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
| 55 | list) { | ||
| 56 | if (node->pid == wq_thread->pid) { | 53 | if (node->pid == wq_thread->pid) { |
| 57 | atomic_inc(&node->inserted); | 54 | atomic_inc(&node->inserted); |
| 58 | goto found; | 55 | goto found; |
| @@ -69,12 +66,11 @@ probe_workqueue_execution(struct task_struct *wq_thread, | |||
| 69 | struct work_struct *work) | 66 | struct work_struct *work) |
| 70 | { | 67 | { |
| 71 | int cpu = cpumask_first(&wq_thread->cpus_allowed); | 68 | int cpu = cpumask_first(&wq_thread->cpus_allowed); |
| 72 | struct cpu_workqueue_stats *node, *next; | 69 | struct cpu_workqueue_stats *node; |
| 73 | unsigned long flags; | 70 | unsigned long flags; |
| 74 | 71 | ||
| 75 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 72 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
| 76 | list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list, | 73 | list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) { |
| 77 | list) { | ||
| 78 | if (node->pid == wq_thread->pid) { | 74 | if (node->pid == wq_thread->pid) { |
| 79 | node->executed++; | 75 | node->executed++; |
| 80 | goto found; | 76 | goto found; |
| @@ -105,8 +101,6 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) | |||
| 105 | cws->pid = wq_thread->pid; | 101 | cws->pid = wq_thread->pid; |
| 106 | 102 | ||
| 107 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | 103 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); |
| 108 | if (list_empty(&workqueue_cpu_stat(cpu)->list)) | ||
| 109 | cws->first_entry = true; | ||
| 110 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); | 104 | list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list); |
| 111 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | 105 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); |
| 112 | } | 106 | } |
| @@ -152,7 +146,7 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu) | |||
| 152 | return ret; | 146 | return ret; |
| 153 | } | 147 | } |
| 154 | 148 | ||
| 155 | static void *workqueue_stat_start(void) | 149 | static void *workqueue_stat_start(struct tracer_stat *trace) |
| 156 | { | 150 | { |
| 157 | int cpu; | 151 | int cpu; |
| 158 | void *ret = NULL; | 152 | void *ret = NULL; |
| @@ -191,16 +185,9 @@ static void *workqueue_stat_next(void *prev, int idx) | |||
| 191 | static int workqueue_stat_show(struct seq_file *s, void *p) | 185 | static int workqueue_stat_show(struct seq_file *s, void *p) |
| 192 | { | 186 | { |
| 193 | struct cpu_workqueue_stats *cws = p; | 187 | struct cpu_workqueue_stats *cws = p; |
| 194 | unsigned long flags; | ||
| 195 | int cpu = cws->cpu; | ||
| 196 | struct pid *pid; | 188 | struct pid *pid; |
| 197 | struct task_struct *tsk; | 189 | struct task_struct *tsk; |
| 198 | 190 | ||
| 199 | spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 200 | if (&cws->list == workqueue_cpu_stat(cpu)->list.next) | ||
| 201 | seq_printf(s, "\n"); | ||
| 202 | spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags); | ||
| 203 | |||
| 204 | pid = find_get_pid(cws->pid); | 191 | pid = find_get_pid(cws->pid); |
| 205 | if (pid) { | 192 | if (pid) { |
| 206 | tsk = get_pid_task(pid, PIDTYPE_PID); | 193 | tsk = get_pid_task(pid, PIDTYPE_PID); |
