aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2009-01-16 03:32:25 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-20 07:06:59 -0500
commit3690b5e6fd9daa030039ae9bda69044228bd476d (patch)
tree0c728e624889c4fb32e028786c5db91bbdb5def1 /kernel/trace/trace_workqueue.c
parentce5e5540c0e839781e7cd134517d5d2e9e819636 (diff)
trace_workqueue: use percpu data for workqueue stat
Impact: use percpu data instead of a global structure Use: static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat); instead of allocating a global structure. percpu data also works well on NUMA. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_workqueue.c')
-rw-r--r--kernel/trace/trace_workqueue.c64
1 files changed, 29 insertions, 35 deletions
diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
index f8118d39ca9b..4664990fe9c5 100644
--- a/kernel/trace/trace_workqueue.c
+++ b/kernel/trace/trace_workqueue.c
@@ -8,6 +8,7 @@
8 8
9#include <trace/workqueue.h> 9#include <trace/workqueue.h>
10#include <linux/list.h> 10#include <linux/list.h>
11#include <linux/percpu.h>
11#include "trace_stat.h" 12#include "trace_stat.h"
12#include "trace.h" 13#include "trace.h"
13 14
@@ -37,7 +38,8 @@ struct workqueue_global_stats {
37/* Don't need a global lock because allocated before the workqueues, and 38/* Don't need a global lock because allocated before the workqueues, and
38 * never freed. 39 * never freed.
39 */ 40 */
40static struct workqueue_global_stats *all_workqueue_stat; 41static DEFINE_PER_CPU(struct workqueue_global_stats, all_workqueue_stat);
42#define workqueue_cpu_stat(cpu) (&per_cpu(all_workqueue_stat, cpu))
41 43
42/* Insertion of a work */ 44/* Insertion of a work */
43static void 45static void
@@ -48,8 +50,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
48 struct cpu_workqueue_stats *node, *next; 50 struct cpu_workqueue_stats *node, *next;
49 unsigned long flags; 51 unsigned long flags;
50 52
51 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 53 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
52 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 54 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
53 list) { 55 list) {
54 if (node->pid == wq_thread->pid) { 56 if (node->pid == wq_thread->pid) {
55 atomic_inc(&node->inserted); 57 atomic_inc(&node->inserted);
@@ -58,7 +60,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
58 } 60 }
59 pr_debug("trace_workqueue: entry not found\n"); 61 pr_debug("trace_workqueue: entry not found\n");
60found: 62found:
61 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 63 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
62} 64}
63 65
64/* Execution of a work */ 66/* Execution of a work */
@@ -70,8 +72,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
70 struct cpu_workqueue_stats *node, *next; 72 struct cpu_workqueue_stats *node, *next;
71 unsigned long flags; 73 unsigned long flags;
72 74
73 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 75 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
74 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 76 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
75 list) { 77 list) {
76 if (node->pid == wq_thread->pid) { 78 if (node->pid == wq_thread->pid) {
77 node->executed++; 79 node->executed++;
@@ -80,7 +82,7 @@ probe_workqueue_execution(struct task_struct *wq_thread,
80 } 82 }
81 pr_debug("trace_workqueue: entry not found\n"); 83 pr_debug("trace_workqueue: entry not found\n");
82found: 84found:
83 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 85 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
84} 86}
85 87
86/* Creation of a cpu workqueue thread */ 88/* Creation of a cpu workqueue thread */
@@ -104,11 +106,11 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
104 106
105 cws->pid = wq_thread->pid; 107 cws->pid = wq_thread->pid;
106 108
107 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 109 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
108 if (list_empty(&all_workqueue_stat[cpu].list)) 110 if (list_empty(&workqueue_cpu_stat(cpu)->list))
109 cws->first_entry = true; 111 cws->first_entry = true;
110 list_add_tail(&cws->list, &all_workqueue_stat[cpu].list); 112 list_add_tail(&cws->list, &workqueue_cpu_stat(cpu)->list);
111 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 113 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
112} 114}
113 115
114/* Destruction of a cpu workqueue thread */ 116/* Destruction of a cpu workqueue thread */
@@ -119,8 +121,8 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
119 struct cpu_workqueue_stats *node, *next; 121 struct cpu_workqueue_stats *node, *next;
120 unsigned long flags; 122 unsigned long flags;
121 123
122 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 124 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
123 list_for_each_entry_safe(node, next, &all_workqueue_stat[cpu].list, 125 list_for_each_entry_safe(node, next, &workqueue_cpu_stat(cpu)->list,
124 list) { 126 list) {
125 if (node->pid == wq_thread->pid) { 127 if (node->pid == wq_thread->pid) {
126 list_del(&node->list); 128 list_del(&node->list);
@@ -131,7 +133,7 @@ static void probe_workqueue_destruction(struct task_struct *wq_thread)
131 133
132 pr_debug("trace_workqueue: don't find workqueue to destroy\n"); 134 pr_debug("trace_workqueue: don't find workqueue to destroy\n");
133found: 135found:
134 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 136 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
135 137
136} 138}
137 139
@@ -141,13 +143,13 @@ static struct cpu_workqueue_stats *workqueue_stat_start_cpu(int cpu)
141 struct cpu_workqueue_stats *ret = NULL; 143 struct cpu_workqueue_stats *ret = NULL;
142 144
143 145
144 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 146 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
145 147
146 if (!list_empty(&all_workqueue_stat[cpu].list)) 148 if (!list_empty(&workqueue_cpu_stat(cpu)->list))
147 ret = list_entry(all_workqueue_stat[cpu].list.next, 149 ret = list_entry(workqueue_cpu_stat(cpu)->list.next,
148 struct cpu_workqueue_stats, list); 150 struct cpu_workqueue_stats, list);
149 151
150 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 152 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
151 153
152 return ret; 154 return ret;
153} 155}
@@ -172,9 +174,9 @@ static void *workqueue_stat_next(void *prev, int idx)
172 unsigned long flags; 174 unsigned long flags;
173 void *ret = NULL; 175 void *ret = NULL;
174 176
175 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 177 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
176 if (list_is_last(&prev_cws->list, &all_workqueue_stat[cpu].list)) { 178 if (list_is_last(&prev_cws->list, &workqueue_cpu_stat(cpu)->list)) {
177 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 179 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
178 for (++cpu ; cpu < num_possible_cpus(); cpu++) { 180 for (++cpu ; cpu < num_possible_cpus(); cpu++) {
179 ret = workqueue_stat_start_cpu(cpu); 181 ret = workqueue_stat_start_cpu(cpu);
180 if (ret) 182 if (ret)
@@ -182,7 +184,7 @@ static void *workqueue_stat_next(void *prev, int idx)
182 } 184 }
183 return NULL; 185 return NULL;
184 } 186 }
185 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 187 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
186 188
187 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats, 189 return list_entry(prev_cws->list.next, struct cpu_workqueue_stats,
188 list); 190 list);
@@ -199,10 +201,10 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
199 cws->executed, 201 cws->executed,
200 trace_find_cmdline(cws->pid)); 202 trace_find_cmdline(cws->pid));
201 203
202 spin_lock_irqsave(&all_workqueue_stat[cpu].lock, flags); 204 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
203 if (&cws->list == all_workqueue_stat[cpu].list.next) 205 if (&cws->list == workqueue_cpu_stat(cpu)->list.next)
204 seq_printf(s, "\n"); 206 seq_printf(s, "\n");
205 spin_unlock_irqrestore(&all_workqueue_stat[cpu].lock, flags); 207 spin_unlock_irqrestore(&workqueue_cpu_stat(cpu)->lock, flags);
206 208
207 return 0; 209 return 0;
208} 210}
@@ -258,17 +260,9 @@ int __init trace_workqueue_early_init(void)
258 if (ret) 260 if (ret)
259 goto no_creation; 261 goto no_creation;
260 262
261 all_workqueue_stat = kmalloc(sizeof(struct workqueue_global_stats)
262 * num_possible_cpus(), GFP_KERNEL);
263
264 if (!all_workqueue_stat) {
265 pr_warning("trace_workqueue: not enough memory\n");
266 goto no_creation;
267 }
268
269 for_each_possible_cpu(cpu) { 263 for_each_possible_cpu(cpu) {
270 spin_lock_init(&all_workqueue_stat[cpu].lock); 264 spin_lock_init(&workqueue_cpu_stat(cpu)->lock);
271 INIT_LIST_HEAD(&all_workqueue_stat[cpu].list); 265 INIT_LIST_HEAD(&workqueue_cpu_stat(cpu)->list);
272 } 266 }
273 267
274 return 0; 268 return 0;