aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/trace/Kconfig11
-rw-r--r--kernel/workqueue.c14
2 files changed, 3 insertions, 22 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 8b1797c4545b..a0d95c1f3f82 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -391,17 +391,6 @@ config KMEMTRACE
391 391
392 If unsure, say N. 392 If unsure, say N.
393 393
394config WORKQUEUE_TRACER
395 bool "Trace workqueues"
396 select GENERIC_TRACER
397 help
398 The workqueue tracer provides some statistical information
399 about each cpu workqueue thread such as the number of the
400 works inserted and executed since their creation. It can help
401 to evaluate the amount of work each of them has to perform.
402 For example it can help a developer to decide whether he should
403 choose a per-cpu workqueue instead of a singlethreaded one.
404
405config BLK_DEV_IO_TRACE 394config BLK_DEV_IO_TRACE
406 bool "Support for tracing block IO actions" 395 bool "Support for tracing block IO actions"
407 depends on SYSFS 396 depends on SYSFS
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8e3082b76c7f..f7ab703285a6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -33,8 +33,6 @@
33#include <linux/kallsyms.h> 33#include <linux/kallsyms.h>
34#include <linux/debug_locks.h> 34#include <linux/debug_locks.h>
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#define CREATE_TRACE_POINTS
37#include <trace/events/workqueue.h>
38 36
39/* 37/*
40 * Structure fields follow one of the following exclusion rules. 38 * Structure fields follow one of the following exclusion rules.
@@ -243,10 +241,10 @@ static inline void clear_wq_data(struct work_struct *work)
243 atomic_long_set(&work->data, work_static(work)); 241 atomic_long_set(&work->data, work_static(work));
244} 242}
245 243
246static inline 244static inline struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
247struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
248{ 245{
249 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); 246 return (void *)(atomic_long_read(&work->data) &
247 WORK_STRUCT_WQ_DATA_MASK);
250} 248}
251 249
252/** 250/**
@@ -265,8 +263,6 @@ static void insert_work(struct cpu_workqueue_struct *cwq,
265 struct work_struct *work, struct list_head *head, 263 struct work_struct *work, struct list_head *head,
266 unsigned int extra_flags) 264 unsigned int extra_flags)
267{ 265{
268 trace_workqueue_insertion(cwq->thread, work);
269
270 /* we own @work, set data and link */ 266 /* we own @work, set data and link */
271 set_wq_data(work, cwq, extra_flags); 267 set_wq_data(work, cwq, extra_flags);
272 268
@@ -431,7 +427,6 @@ static void process_one_work(struct cpu_workqueue_struct *cwq,
431 struct lockdep_map lockdep_map = work->lockdep_map; 427 struct lockdep_map lockdep_map = work->lockdep_map;
432#endif 428#endif
433 /* claim and process */ 429 /* claim and process */
434 trace_workqueue_execution(cwq->thread, work);
435 debug_work_deactivate(work); 430 debug_work_deactivate(work);
436 cwq->current_work = work; 431 cwq->current_work = work;
437 list_del_init(&work->entry); 432 list_del_init(&work->entry);
@@ -1017,8 +1012,6 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1017 return PTR_ERR(p); 1012 return PTR_ERR(p);
1018 cwq->thread = p; 1013 cwq->thread = p;
1019 1014
1020 trace_workqueue_creation(cwq->thread, cpu);
1021
1022 return 0; 1015 return 0;
1023} 1016}
1024 1017
@@ -1123,7 +1116,6 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1123 * checks list_empty(), and a "normal" queue_work() can't use 1116 * checks list_empty(), and a "normal" queue_work() can't use
1124 * a dead CPU. 1117 * a dead CPU.
1125 */ 1118 */
1126 trace_workqueue_destruction(cwq->thread);
1127 kthread_stop(cwq->thread); 1119 kthread_stop(cwq->thread);
1128 cwq->thread = NULL; 1120 cwq->thread = NULL;
1129} 1121}