diff options
-rw-r--r-- | block/Kconfig | 12 | ||||
-rw-r--r-- | block/Makefile | 2 | ||||
-rw-r--r-- | block/blktrace.c | 538 | ||||
-rw-r--r-- | block/elevator.c | 4 | ||||
-rw-r--r-- | block/ioctl.c | 6 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 44 | ||||
-rw-r--r-- | drivers/block/cciss.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 13 | ||||
-rw-r--r-- | fs/bio.c | 4 | ||||
-rw-r--r-- | fs/compat_ioctl.c | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 3 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 277 | ||||
-rw-r--r-- | include/linux/compat_ioctl.h | 4 | ||||
-rw-r--r-- | include/linux/fs.h | 4 | ||||
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | mm/highmem.c | 3 |
17 files changed, 916 insertions, 3 deletions
diff --git a/block/Kconfig b/block/Kconfig index 377f6dd20e17..96783645092d 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -11,4 +11,16 @@ config LBD | |||
11 | your machine, or if you want to have a raid or loopback device | 11 | your machine, or if you want to have a raid or loopback device |
12 | bigger than 2TB. Otherwise say N. | 12 | bigger than 2TB. Otherwise say N. |
13 | 13 | ||
14 | config BLK_DEV_IO_TRACE | ||
15 | bool "Support for tracing block io actions" | ||
16 | select RELAY | ||
17 | select DEBUG_FS | ||
18 | help | ||
19 | Say Y here, if you want to be able to trace the block layer actions | ||
20 | on a given queue. Tracing allows you to see any traffic happening | ||
21 | on a block device queue. For more information (and the user space | ||
22 | support tools needed), fetch the blktrace app from: | ||
23 | |||
24 | git://brick.kernel.dk/data/git/blktrace.git | ||
25 | |||
14 | source block/Kconfig.iosched | 26 | source block/Kconfig.iosched |
diff --git a/block/Makefile b/block/Makefile index 7e4f93e2b44e..c05de0e0037f 100644 --- a/block/Makefile +++ b/block/Makefile | |||
@@ -8,3 +8,5 @@ obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o | |||
8 | obj-$(CONFIG_IOSCHED_AS) += as-iosched.o | 8 | obj-$(CONFIG_IOSCHED_AS) += as-iosched.o |
9 | obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o | 9 | obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o |
10 | obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o | 10 | obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o |
11 | |||
12 | obj-$(CONFIG_BLK_DEV_IO_TRACE) += blktrace.o | ||
diff --git a/block/blktrace.c b/block/blktrace.c new file mode 100644 index 000000000000..36f3a172275f --- /dev/null +++ b/block/blktrace.c | |||
@@ -0,0 +1,538 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Jens Axboe <axboe@suse.de> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * along with this program; if not, write to the Free Software | ||
15 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
16 | * | ||
17 | */ | ||
18 | #include <linux/config.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/blktrace_api.h> | ||
22 | #include <linux/percpu.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/mutex.h> | ||
25 | #include <linux/debugfs.h> | ||
26 | #include <asm/uaccess.h> | ||
27 | |||
28 | static DEFINE_PER_CPU(unsigned long long, blk_trace_cpu_offset) = { 0, }; | ||
29 | static unsigned int blktrace_seq __read_mostly = 1; | ||
30 | |||
31 | /* | ||
32 | * Send out a notify for this process, if we haven't done so since a trace | ||
33 | * started | ||
34 | */ | ||
35 | static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk) | ||
36 | { | ||
37 | struct blk_io_trace *t; | ||
38 | |||
39 | t = relay_reserve(bt->rchan, sizeof(*t) + sizeof(tsk->comm)); | ||
40 | if (t) { | ||
41 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | ||
42 | t->device = bt->dev; | ||
43 | t->action = BLK_TC_ACT(BLK_TC_NOTIFY); | ||
44 | t->pid = tsk->pid; | ||
45 | t->cpu = smp_processor_id(); | ||
46 | t->pdu_len = sizeof(tsk->comm); | ||
47 | memcpy((void *) t + sizeof(*t), tsk->comm, t->pdu_len); | ||
48 | tsk->btrace_seq = blktrace_seq; | ||
49 | } | ||
50 | } | ||
51 | |||
52 | static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | ||
53 | pid_t pid) | ||
54 | { | ||
55 | if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0) | ||
56 | return 1; | ||
57 | if (sector < bt->start_lba || sector > bt->end_lba) | ||
58 | return 1; | ||
59 | if (bt->pid && pid != bt->pid) | ||
60 | return 1; | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * Data direction bit lookup | ||
67 | */ | ||
68 | static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) }; | ||
69 | |||
70 | /* | ||
71 | * Bio action bits of interest | ||
72 | */ | ||
73 | static u32 bio_act[3] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC) }; | ||
74 | |||
75 | /* | ||
76 | * More could be added as needed, taking care to increment the decrementer | ||
77 | * to get correct indexing | ||
78 | */ | ||
79 | #define trace_barrier_bit(rw) \ | ||
80 | (((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0)) | ||
81 | #define trace_sync_bit(rw) \ | ||
82 | (((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1)) | ||
83 | |||
84 | /* | ||
85 | * The worker for the various blk_add_trace*() types. Fills out a | ||
86 | * blk_io_trace structure and places it in a per-cpu subbuffer. | ||
87 | */ | ||
88 | void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | ||
89 | int rw, u32 what, int error, int pdu_len, void *pdu_data) | ||
90 | { | ||
91 | struct task_struct *tsk = current; | ||
92 | struct blk_io_trace *t; | ||
93 | unsigned long flags; | ||
94 | unsigned long *sequence; | ||
95 | pid_t pid; | ||
96 | int cpu; | ||
97 | |||
98 | if (unlikely(bt->trace_state != Blktrace_running)) | ||
99 | return; | ||
100 | |||
101 | what |= ddir_act[rw & WRITE]; | ||
102 | what |= bio_act[trace_barrier_bit(rw)]; | ||
103 | what |= bio_act[trace_sync_bit(rw)]; | ||
104 | |||
105 | pid = tsk->pid; | ||
106 | if (unlikely(act_log_check(bt, what, sector, pid))) | ||
107 | return; | ||
108 | |||
109 | /* | ||
110 | * A word about the locking here - we disable interrupts to reserve | ||
111 | * some space in the relay per-cpu buffer, to prevent an irq | ||
112 | * from coming in and stepping on our toes. Once reserved, it's | ||
113 | * enough to get preemption disabled to prevent read of this data | ||
114 | * before we are through filling it. get_cpu()/put_cpu() does this | ||
115 | * for us | ||
116 | */ | ||
117 | local_irq_save(flags); | ||
118 | |||
119 | if (unlikely(tsk->btrace_seq != blktrace_seq)) | ||
120 | trace_note_tsk(bt, tsk); | ||
121 | |||
122 | t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len); | ||
123 | if (t) { | ||
124 | cpu = smp_processor_id(); | ||
125 | sequence = per_cpu_ptr(bt->sequence, cpu); | ||
126 | |||
127 | t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION; | ||
128 | t->sequence = ++(*sequence); | ||
129 | t->time = sched_clock() - per_cpu(blk_trace_cpu_offset, cpu); | ||
130 | t->sector = sector; | ||
131 | t->bytes = bytes; | ||
132 | t->action = what; | ||
133 | t->pid = pid; | ||
134 | t->device = bt->dev; | ||
135 | t->cpu = cpu; | ||
136 | t->error = error; | ||
137 | t->pdu_len = pdu_len; | ||
138 | |||
139 | if (pdu_len) | ||
140 | memcpy((void *) t + sizeof(*t), pdu_data, pdu_len); | ||
141 | } | ||
142 | |||
143 | local_irq_restore(flags); | ||
144 | } | ||
145 | |||
146 | EXPORT_SYMBOL_GPL(__blk_add_trace); | ||
147 | |||
148 | static struct dentry *blk_tree_root; | ||
149 | static struct mutex blk_tree_mutex; | ||
150 | static unsigned int root_users; | ||
151 | |||
152 | static inline void blk_remove_root(void) | ||
153 | { | ||
154 | if (blk_tree_root) { | ||
155 | debugfs_remove(blk_tree_root); | ||
156 | blk_tree_root = NULL; | ||
157 | } | ||
158 | } | ||
159 | |||
160 | static void blk_remove_tree(struct dentry *dir) | ||
161 | { | ||
162 | mutex_lock(&blk_tree_mutex); | ||
163 | debugfs_remove(dir); | ||
164 | if (--root_users == 0) | ||
165 | blk_remove_root(); | ||
166 | mutex_unlock(&blk_tree_mutex); | ||
167 | } | ||
168 | |||
169 | static struct dentry *blk_create_tree(const char *blk_name) | ||
170 | { | ||
171 | struct dentry *dir = NULL; | ||
172 | |||
173 | mutex_lock(&blk_tree_mutex); | ||
174 | |||
175 | if (!blk_tree_root) { | ||
176 | blk_tree_root = debugfs_create_dir("block", NULL); | ||
177 | if (!blk_tree_root) | ||
178 | goto err; | ||
179 | } | ||
180 | |||
181 | dir = debugfs_create_dir(blk_name, blk_tree_root); | ||
182 | if (dir) | ||
183 | root_users++; | ||
184 | else | ||
185 | blk_remove_root(); | ||
186 | |||
187 | err: | ||
188 | mutex_unlock(&blk_tree_mutex); | ||
189 | return dir; | ||
190 | } | ||
191 | |||
192 | static void blk_trace_cleanup(struct blk_trace *bt) | ||
193 | { | ||
194 | relay_close(bt->rchan); | ||
195 | debugfs_remove(bt->dropped_file); | ||
196 | blk_remove_tree(bt->dir); | ||
197 | free_percpu(bt->sequence); | ||
198 | kfree(bt); | ||
199 | } | ||
200 | |||
201 | static int blk_trace_remove(request_queue_t *q) | ||
202 | { | ||
203 | struct blk_trace *bt; | ||
204 | |||
205 | bt = xchg(&q->blk_trace, NULL); | ||
206 | if (!bt) | ||
207 | return -EINVAL; | ||
208 | |||
209 | if (bt->trace_state == Blktrace_setup || | ||
210 | bt->trace_state == Blktrace_stopped) | ||
211 | blk_trace_cleanup(bt); | ||
212 | |||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static int blk_dropped_open(struct inode *inode, struct file *filp) | ||
217 | { | ||
218 | filp->private_data = inode->u.generic_ip; | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static ssize_t blk_dropped_read(struct file *filp, char __user *buffer, | ||
224 | size_t count, loff_t *ppos) | ||
225 | { | ||
226 | struct blk_trace *bt = filp->private_data; | ||
227 | char buf[16]; | ||
228 | |||
229 | snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped)); | ||
230 | |||
231 | return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf)); | ||
232 | } | ||
233 | |||
234 | static struct file_operations blk_dropped_fops = { | ||
235 | .owner = THIS_MODULE, | ||
236 | .open = blk_dropped_open, | ||
237 | .read = blk_dropped_read, | ||
238 | }; | ||
239 | |||
240 | /* | ||
241 | * Keep track of how many times we encountered a full subbuffer, to aid | ||
242 | * the user space app in telling how many lost events there were. | ||
243 | */ | ||
244 | static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf, | ||
245 | void *prev_subbuf, size_t prev_padding) | ||
246 | { | ||
247 | struct blk_trace *bt; | ||
248 | |||
249 | if (!relay_buf_full(buf)) | ||
250 | return 1; | ||
251 | |||
252 | bt = buf->chan->private_data; | ||
253 | atomic_inc(&bt->dropped); | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | static int blk_remove_buf_file_callback(struct dentry *dentry) | ||
258 | { | ||
259 | debugfs_remove(dentry); | ||
260 | return 0; | ||
261 | } | ||
262 | |||
263 | static struct dentry *blk_create_buf_file_callback(const char *filename, | ||
264 | struct dentry *parent, | ||
265 | int mode, | ||
266 | struct rchan_buf *buf, | ||
267 | int *is_global) | ||
268 | { | ||
269 | return debugfs_create_file(filename, mode, parent, buf, | ||
270 | &relay_file_operations); | ||
271 | } | ||
272 | |||
273 | static struct rchan_callbacks blk_relay_callbacks = { | ||
274 | .subbuf_start = blk_subbuf_start_callback, | ||
275 | .create_buf_file = blk_create_buf_file_callback, | ||
276 | .remove_buf_file = blk_remove_buf_file_callback, | ||
277 | }; | ||
278 | |||
279 | /* | ||
280 | * Setup everything required to start tracing | ||
281 | */ | ||
282 | static int blk_trace_setup(request_queue_t *q, struct block_device *bdev, | ||
283 | char __user *arg) | ||
284 | { | ||
285 | struct blk_user_trace_setup buts; | ||
286 | struct blk_trace *old_bt, *bt = NULL; | ||
287 | struct dentry *dir = NULL; | ||
288 | char b[BDEVNAME_SIZE]; | ||
289 | int ret, i; | ||
290 | |||
291 | if (copy_from_user(&buts, arg, sizeof(buts))) | ||
292 | return -EFAULT; | ||
293 | |||
294 | if (!buts.buf_size || !buts.buf_nr) | ||
295 | return -EINVAL; | ||
296 | |||
297 | strcpy(buts.name, bdevname(bdev, b)); | ||
298 | |||
299 | /* | ||
300 | * some device names have larger paths - convert the slashes | ||
301 | * to underscores for this to work as expected | ||
302 | */ | ||
303 | for (i = 0; i < strlen(buts.name); i++) | ||
304 | if (buts.name[i] == '/') | ||
305 | buts.name[i] = '_'; | ||
306 | |||
307 | if (copy_to_user(arg, &buts, sizeof(buts))) | ||
308 | return -EFAULT; | ||
309 | |||
310 | ret = -ENOMEM; | ||
311 | bt = kzalloc(sizeof(*bt), GFP_KERNEL); | ||
312 | if (!bt) | ||
313 | goto err; | ||
314 | |||
315 | bt->sequence = alloc_percpu(unsigned long); | ||
316 | if (!bt->sequence) | ||
317 | goto err; | ||
318 | |||
319 | ret = -ENOENT; | ||
320 | dir = blk_create_tree(buts.name); | ||
321 | if (!dir) | ||
322 | goto err; | ||
323 | |||
324 | bt->dir = dir; | ||
325 | bt->dev = bdev->bd_dev; | ||
326 | atomic_set(&bt->dropped, 0); | ||
327 | |||
328 | ret = -EIO; | ||
329 | bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops); | ||
330 | if (!bt->dropped_file) | ||
331 | goto err; | ||
332 | |||
333 | bt->rchan = relay_open("trace", dir, buts.buf_size, buts.buf_nr, &blk_relay_callbacks); | ||
334 | if (!bt->rchan) | ||
335 | goto err; | ||
336 | bt->rchan->private_data = bt; | ||
337 | |||
338 | bt->act_mask = buts.act_mask; | ||
339 | if (!bt->act_mask) | ||
340 | bt->act_mask = (u16) -1; | ||
341 | |||
342 | bt->start_lba = buts.start_lba; | ||
343 | bt->end_lba = buts.end_lba; | ||
344 | if (!bt->end_lba) | ||
345 | bt->end_lba = -1ULL; | ||
346 | |||
347 | bt->pid = buts.pid; | ||
348 | bt->trace_state = Blktrace_setup; | ||
349 | |||
350 | ret = -EBUSY; | ||
351 | old_bt = xchg(&q->blk_trace, bt); | ||
352 | if (old_bt) { | ||
353 | (void) xchg(&q->blk_trace, old_bt); | ||
354 | goto err; | ||
355 | } | ||
356 | |||
357 | return 0; | ||
358 | err: | ||
359 | if (dir) | ||
360 | blk_remove_tree(dir); | ||
361 | if (bt) { | ||
362 | if (bt->dropped_file) | ||
363 | debugfs_remove(bt->dropped_file); | ||
364 | if (bt->sequence) | ||
365 | free_percpu(bt->sequence); | ||
366 | if (bt->rchan) | ||
367 | relay_close(bt->rchan); | ||
368 | kfree(bt); | ||
369 | } | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | static int blk_trace_startstop(request_queue_t *q, int start) | ||
374 | { | ||
375 | struct blk_trace *bt; | ||
376 | int ret; | ||
377 | |||
378 | if ((bt = q->blk_trace) == NULL) | ||
379 | return -EINVAL; | ||
380 | |||
381 | /* | ||
382 | * For starting a trace, we can transition from a setup or stopped | ||
383 | * trace. For stopping a trace, the state must be running | ||
384 | */ | ||
385 | ret = -EINVAL; | ||
386 | if (start) { | ||
387 | if (bt->trace_state == Blktrace_setup || | ||
388 | bt->trace_state == Blktrace_stopped) { | ||
389 | blktrace_seq++; | ||
390 | smp_mb(); | ||
391 | bt->trace_state = Blktrace_running; | ||
392 | ret = 0; | ||
393 | } | ||
394 | } else { | ||
395 | if (bt->trace_state == Blktrace_running) { | ||
396 | bt->trace_state = Blktrace_stopped; | ||
397 | relay_flush(bt->rchan); | ||
398 | ret = 0; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | /** | ||
406 | * blk_trace_ioctl: - handle the ioctls associated with tracing | ||
407 | * @bdev: the block device | ||
408 | * @cmd: the ioctl cmd | ||
409 | * @arg: the argument data, if any | ||
410 | * | ||
411 | **/ | ||
412 | int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg) | ||
413 | { | ||
414 | request_queue_t *q; | ||
415 | int ret, start = 0; | ||
416 | |||
417 | q = bdev_get_queue(bdev); | ||
418 | if (!q) | ||
419 | return -ENXIO; | ||
420 | |||
421 | mutex_lock(&bdev->bd_mutex); | ||
422 | |||
423 | switch (cmd) { | ||
424 | case BLKTRACESETUP: | ||
425 | ret = blk_trace_setup(q, bdev, arg); | ||
426 | break; | ||
427 | case BLKTRACESTART: | ||
428 | start = 1; | ||
429 | case BLKTRACESTOP: | ||
430 | ret = blk_trace_startstop(q, start); | ||
431 | break; | ||
432 | case BLKTRACETEARDOWN: | ||
433 | ret = blk_trace_remove(q); | ||
434 | break; | ||
435 | default: | ||
436 | ret = -ENOTTY; | ||
437 | break; | ||
438 | } | ||
439 | |||
440 | mutex_unlock(&bdev->bd_mutex); | ||
441 | return ret; | ||
442 | } | ||
443 | |||
444 | /** | ||
445 | * blk_trace_shutdown: - stop and cleanup trace structures | ||
446 | * @q: the request queue associated with the device | ||
447 | * | ||
448 | **/ | ||
449 | void blk_trace_shutdown(request_queue_t *q) | ||
450 | { | ||
451 | blk_trace_startstop(q, 0); | ||
452 | blk_trace_remove(q); | ||
453 | } | ||
454 | |||
455 | /* | ||
456 | * Average offset over two calls to sched_clock() with a gettimeofday() | ||
457 | * in the middle | ||
458 | */ | ||
459 | static void blk_check_time(unsigned long long *t) | ||
460 | { | ||
461 | unsigned long long a, b; | ||
462 | struct timeval tv; | ||
463 | |||
464 | a = sched_clock(); | ||
465 | do_gettimeofday(&tv); | ||
466 | b = sched_clock(); | ||
467 | |||
468 | *t = tv.tv_sec * 1000000000 + tv.tv_usec * 1000; | ||
469 | *t -= (a + b) / 2; | ||
470 | } | ||
471 | |||
472 | static void blk_trace_check_cpu_time(void *data) | ||
473 | { | ||
474 | unsigned long long *t; | ||
475 | int cpu = get_cpu(); | ||
476 | |||
477 | t = &per_cpu(blk_trace_cpu_offset, cpu); | ||
478 | |||
479 | /* | ||
480 | * Just call it twice, hopefully the second call will be cache hot | ||
481 | * and a little more precise | ||
482 | */ | ||
483 | blk_check_time(t); | ||
484 | blk_check_time(t); | ||
485 | |||
486 | put_cpu(); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Call blk_trace_check_cpu_time() on each CPU to calibrate our inter-CPU | ||
491 | * timings | ||
492 | */ | ||
493 | static void blk_trace_calibrate_offsets(void) | ||
494 | { | ||
495 | unsigned long flags; | ||
496 | |||
497 | smp_call_function(blk_trace_check_cpu_time, NULL, 1, 1); | ||
498 | local_irq_save(flags); | ||
499 | blk_trace_check_cpu_time(NULL); | ||
500 | local_irq_restore(flags); | ||
501 | } | ||
502 | |||
503 | static void blk_trace_set_ht_offsets(void) | ||
504 | { | ||
505 | #if defined(CONFIG_SCHED_SMT) | ||
506 | int cpu, i; | ||
507 | |||
508 | /* | ||
509 | * now make sure HT siblings have the same time offset | ||
510 | */ | ||
511 | preempt_disable(); | ||
512 | for_each_online_cpu(cpu) { | ||
513 | unsigned long long *cpu_off, *sibling_off; | ||
514 | |||
515 | for_each_cpu_mask(i, cpu_sibling_map[cpu]) { | ||
516 | if (i == cpu) | ||
517 | continue; | ||
518 | |||
519 | cpu_off = &per_cpu(blk_trace_cpu_offset, cpu); | ||
520 | sibling_off = &per_cpu(blk_trace_cpu_offset, i); | ||
521 | *sibling_off = *cpu_off; | ||
522 | } | ||
523 | } | ||
524 | preempt_enable(); | ||
525 | #endif | ||
526 | } | ||
527 | |||
528 | static __init int blk_trace_init(void) | ||
529 | { | ||
530 | mutex_init(&blk_tree_mutex); | ||
531 | blk_trace_calibrate_offsets(); | ||
532 | blk_trace_set_ht_offsets(); | ||
533 | |||
534 | return 0; | ||
535 | } | ||
536 | |||
537 | module_init(blk_trace_init); | ||
538 | |||
diff --git a/block/elevator.c b/block/elevator.c index db3d0d8296a0..5e558c4689a4 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/compiler.h> | 34 | #include <linux/compiler.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/blktrace_api.h> | ||
36 | 37 | ||
37 | #include <asm/uaccess.h> | 38 | #include <asm/uaccess.h> |
38 | 39 | ||
@@ -333,6 +334,8 @@ void elv_insert(request_queue_t *q, struct request *rq, int where) | |||
333 | struct list_head *pos; | 334 | struct list_head *pos; |
334 | unsigned ordseq; | 335 | unsigned ordseq; |
335 | 336 | ||
337 | blk_add_trace_rq(q, rq, BLK_TA_INSERT); | ||
338 | |||
336 | rq->q = q; | 339 | rq->q = q; |
337 | 340 | ||
338 | switch (where) { | 341 | switch (where) { |
@@ -499,6 +502,7 @@ struct request *elv_next_request(request_queue_t *q) | |||
499 | * not be passed by new incoming requests | 502 | * not be passed by new incoming requests |
500 | */ | 503 | */ |
501 | rq->flags |= REQ_STARTED; | 504 | rq->flags |= REQ_STARTED; |
505 | blk_add_trace_rq(q, rq, BLK_TA_ISSUE); | ||
502 | } | 506 | } |
503 | 507 | ||
504 | if (!q->boundary_rq || q->boundary_rq == rq) { | 508 | if (!q->boundary_rq || q->boundary_rq == rq) { |
diff --git a/block/ioctl.c b/block/ioctl.c index 35fdb7dc6512..9cfa2e1ecb24 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/backing-dev.h> | 5 | #include <linux/backing-dev.h> |
6 | #include <linux/buffer_head.h> | 6 | #include <linux/buffer_head.h> |
7 | #include <linux/smp_lock.h> | 7 | #include <linux/smp_lock.h> |
8 | #include <linux/blktrace_api.h> | ||
8 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
9 | 10 | ||
10 | static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) | 11 | static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user *arg) |
@@ -189,6 +190,11 @@ static int blkdev_locked_ioctl(struct file *file, struct block_device *bdev, | |||
189 | return put_ulong(arg, bdev->bd_inode->i_size >> 9); | 190 | return put_ulong(arg, bdev->bd_inode->i_size >> 9); |
190 | case BLKGETSIZE64: | 191 | case BLKGETSIZE64: |
191 | return put_u64(arg, bdev->bd_inode->i_size); | 192 | return put_u64(arg, bdev->bd_inode->i_size); |
193 | case BLKTRACESTART: | ||
194 | case BLKTRACESTOP: | ||
195 | case BLKTRACESETUP: | ||
196 | case BLKTRACETEARDOWN: | ||
197 | return blk_trace_ioctl(bdev, cmd, (char __user *) arg); | ||
192 | } | 198 | } |
193 | return -ENOIOCTLCMD; | 199 | return -ENOIOCTLCMD; |
194 | } | 200 | } |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 6c793b196aa9..062067fa7ead 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
29 | #include <linux/interrupt.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/cpu.h> | 30 | #include <linux/cpu.h> |
31 | #include <linux/blktrace_api.h> | ||
31 | 32 | ||
32 | /* | 33 | /* |
33 | * for max sense size | 34 | * for max sense size |
@@ -1556,8 +1557,10 @@ void blk_plug_device(request_queue_t *q) | |||
1556 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | 1557 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) |
1557 | return; | 1558 | return; |
1558 | 1559 | ||
1559 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) | 1560 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
1560 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); | 1561 | mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); |
1562 | blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); | ||
1563 | } | ||
1561 | } | 1564 | } |
1562 | 1565 | ||
1563 | EXPORT_SYMBOL(blk_plug_device); | 1566 | EXPORT_SYMBOL(blk_plug_device); |
@@ -1621,14 +1624,21 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1621 | /* | 1624 | /* |
1622 | * devices don't necessarily have an ->unplug_fn defined | 1625 | * devices don't necessarily have an ->unplug_fn defined |
1623 | */ | 1626 | */ |
1624 | if (q->unplug_fn) | 1627 | if (q->unplug_fn) { |
1628 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1629 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1630 | |||
1625 | q->unplug_fn(q); | 1631 | q->unplug_fn(q); |
1632 | } | ||
1626 | } | 1633 | } |
1627 | 1634 | ||
1628 | static void blk_unplug_work(void *data) | 1635 | static void blk_unplug_work(void *data) |
1629 | { | 1636 | { |
1630 | request_queue_t *q = data; | 1637 | request_queue_t *q = data; |
1631 | 1638 | ||
1639 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1640 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1641 | |||
1632 | q->unplug_fn(q); | 1642 | q->unplug_fn(q); |
1633 | } | 1643 | } |
1634 | 1644 | ||
@@ -1636,6 +1646,9 @@ static void blk_unplug_timeout(unsigned long data) | |||
1636 | { | 1646 | { |
1637 | request_queue_t *q = (request_queue_t *)data; | 1647 | request_queue_t *q = (request_queue_t *)data; |
1638 | 1648 | ||
1649 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_TIMER, NULL, | ||
1650 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1651 | |||
1639 | kblockd_schedule_work(&q->unplug_work); | 1652 | kblockd_schedule_work(&q->unplug_work); |
1640 | } | 1653 | } |
1641 | 1654 | ||
@@ -1753,6 +1766,9 @@ static void blk_release_queue(struct kobject *kobj) | |||
1753 | if (q->queue_tags) | 1766 | if (q->queue_tags) |
1754 | __blk_queue_free_tags(q); | 1767 | __blk_queue_free_tags(q); |
1755 | 1768 | ||
1769 | if (q->blk_trace) | ||
1770 | blk_trace_shutdown(q); | ||
1771 | |||
1756 | kmem_cache_free(requestq_cachep, q); | 1772 | kmem_cache_free(requestq_cachep, q); |
1757 | } | 1773 | } |
1758 | 1774 | ||
@@ -2129,6 +2145,8 @@ rq_starved: | |||
2129 | 2145 | ||
2130 | rq_init(q, rq); | 2146 | rq_init(q, rq); |
2131 | rq->rl = rl; | 2147 | rq->rl = rl; |
2148 | |||
2149 | blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); | ||
2132 | out: | 2150 | out: |
2133 | return rq; | 2151 | return rq; |
2134 | } | 2152 | } |
@@ -2157,6 +2175,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw, | |||
2157 | if (!rq) { | 2175 | if (!rq) { |
2158 | struct io_context *ioc; | 2176 | struct io_context *ioc; |
2159 | 2177 | ||
2178 | blk_add_trace_generic(q, bio, rw, BLK_TA_SLEEPRQ); | ||
2179 | |||
2160 | __generic_unplug_device(q); | 2180 | __generic_unplug_device(q); |
2161 | spin_unlock_irq(q->queue_lock); | 2181 | spin_unlock_irq(q->queue_lock); |
2162 | io_schedule(); | 2182 | io_schedule(); |
@@ -2210,6 +2230,8 @@ EXPORT_SYMBOL(blk_get_request); | |||
2210 | */ | 2230 | */ |
2211 | void blk_requeue_request(request_queue_t *q, struct request *rq) | 2231 | void blk_requeue_request(request_queue_t *q, struct request *rq) |
2212 | { | 2232 | { |
2233 | blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); | ||
2234 | |||
2213 | if (blk_rq_tagged(rq)) | 2235 | if (blk_rq_tagged(rq)) |
2214 | blk_queue_end_tag(q, rq); | 2236 | blk_queue_end_tag(q, rq); |
2215 | 2237 | ||
@@ -2844,6 +2866,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2844 | if (!q->back_merge_fn(q, req, bio)) | 2866 | if (!q->back_merge_fn(q, req, bio)) |
2845 | break; | 2867 | break; |
2846 | 2868 | ||
2869 | blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); | ||
2870 | |||
2847 | req->biotail->bi_next = bio; | 2871 | req->biotail->bi_next = bio; |
2848 | req->biotail = bio; | 2872 | req->biotail = bio; |
2849 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; | 2873 | req->nr_sectors = req->hard_nr_sectors += nr_sectors; |
@@ -2859,6 +2883,8 @@ static int __make_request(request_queue_t *q, struct bio *bio) | |||
2859 | if (!q->front_merge_fn(q, req, bio)) | 2883 | if (!q->front_merge_fn(q, req, bio)) |
2860 | break; | 2884 | break; |
2861 | 2885 | ||
2886 | blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); | ||
2887 | |||
2862 | bio->bi_next = req->bio; | 2888 | bio->bi_next = req->bio; |
2863 | req->bio = bio; | 2889 | req->bio = bio; |
2864 | 2890 | ||
@@ -2976,6 +3002,7 @@ void generic_make_request(struct bio *bio) | |||
2976 | request_queue_t *q; | 3002 | request_queue_t *q; |
2977 | sector_t maxsector; | 3003 | sector_t maxsector; |
2978 | int ret, nr_sectors = bio_sectors(bio); | 3004 | int ret, nr_sectors = bio_sectors(bio); |
3005 | dev_t old_dev; | ||
2979 | 3006 | ||
2980 | might_sleep(); | 3007 | might_sleep(); |
2981 | /* Test device or partition size, when known. */ | 3008 | /* Test device or partition size, when known. */ |
@@ -3002,6 +3029,8 @@ void generic_make_request(struct bio *bio) | |||
3002 | * NOTE: we don't repeat the blk_size check for each new device. | 3029 | * NOTE: we don't repeat the blk_size check for each new device. |
3003 | * Stacking drivers are expected to know what they are doing. | 3030 | * Stacking drivers are expected to know what they are doing. |
3004 | */ | 3031 | */ |
3032 | maxsector = -1; | ||
3033 | old_dev = 0; | ||
3005 | do { | 3034 | do { |
3006 | char b[BDEVNAME_SIZE]; | 3035 | char b[BDEVNAME_SIZE]; |
3007 | 3036 | ||
@@ -3034,6 +3063,15 @@ end_io: | |||
3034 | */ | 3063 | */ |
3035 | blk_partition_remap(bio); | 3064 | blk_partition_remap(bio); |
3036 | 3065 | ||
3066 | if (maxsector != -1) | ||
3067 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | ||
3068 | maxsector); | ||
3069 | |||
3070 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | ||
3071 | |||
3072 | maxsector = bio->bi_sector; | ||
3073 | old_dev = bio->bi_bdev->bd_dev; | ||
3074 | |||
3037 | ret = q->make_request_fn(q, bio); | 3075 | ret = q->make_request_fn(q, bio); |
3038 | } while (ret); | 3076 | } while (ret); |
3039 | } | 3077 | } |
@@ -3153,6 +3191,8 @@ static int __end_that_request_first(struct request *req, int uptodate, | |||
3153 | int total_bytes, bio_nbytes, error, next_idx = 0; | 3191 | int total_bytes, bio_nbytes, error, next_idx = 0; |
3154 | struct bio *bio; | 3192 | struct bio *bio; |
3155 | 3193 | ||
3194 | blk_add_trace_rq(req->q, req, BLK_TA_COMPLETE); | ||
3195 | |||
3156 | /* | 3196 | /* |
3157 | * extend uptodate bool to allow < 0 value to be direct io error | 3197 | * extend uptodate bool to allow < 0 value to be direct io error |
3158 | */ | 3198 | */ |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index e29b8926f80e..1f2890989b56 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/hdreg.h> | 38 | #include <linux/hdreg.h> |
39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
40 | #include <linux/compat.h> | 40 | #include <linux/compat.h> |
41 | #include <linux/blktrace_api.h> | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | #include <asm/io.h> | 43 | #include <asm/io.h> |
43 | 44 | ||
@@ -2331,6 +2332,7 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, | |||
2331 | 2332 | ||
2332 | cmd->rq->completion_data = cmd; | 2333 | cmd->rq->completion_data = cmd; |
2333 | cmd->rq->errors = status; | 2334 | cmd->rq->errors = status; |
2335 | blk_add_trace_rq(cmd->rq->q, cmd->rq, BLK_TA_COMPLETE); | ||
2334 | blk_complete_request(cmd->rq); | 2336 | blk_complete_request(cmd->rq); |
2335 | } | 2337 | } |
2336 | 2338 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 26b08ee425c7..8c82373f7ff3 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/mempool.h> | 17 | #include <linux/mempool.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/idr.h> | 19 | #include <linux/idr.h> |
20 | #include <linux/blktrace_api.h> | ||
20 | 21 | ||
21 | static const char *_name = DM_NAME; | 22 | static const char *_name = DM_NAME; |
22 | 23 | ||
@@ -334,6 +335,8 @@ static void dec_pending(struct dm_io *io, int error) | |||
334 | /* nudge anyone waiting on suspend queue */ | 335 | /* nudge anyone waiting on suspend queue */ |
335 | wake_up(&io->md->wait); | 336 | wake_up(&io->md->wait); |
336 | 337 | ||
338 | blk_add_trace_bio(io->md->queue, io->bio, BLK_TA_COMPLETE); | ||
339 | |||
337 | bio_endio(io->bio, io->bio->bi_size, io->error); | 340 | bio_endio(io->bio, io->bio->bi_size, io->error); |
338 | free_io(io->md, io); | 341 | free_io(io->md, io); |
339 | } | 342 | } |
@@ -392,6 +395,7 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
392 | struct target_io *tio) | 395 | struct target_io *tio) |
393 | { | 396 | { |
394 | int r; | 397 | int r; |
398 | sector_t sector; | ||
395 | 399 | ||
396 | /* | 400 | /* |
397 | * Sanity checks. | 401 | * Sanity checks. |
@@ -407,10 +411,17 @@ static void __map_bio(struct dm_target *ti, struct bio *clone, | |||
407 | * this io. | 411 | * this io. |
408 | */ | 412 | */ |
409 | atomic_inc(&tio->io->io_count); | 413 | atomic_inc(&tio->io->io_count); |
414 | sector = clone->bi_sector; | ||
410 | r = ti->type->map(ti, clone, &tio->info); | 415 | r = ti->type->map(ti, clone, &tio->info); |
411 | if (r > 0) | 416 | if (r > 0) { |
412 | /* the bio has been remapped so dispatch it */ | 417 | /* the bio has been remapped so dispatch it */ |
418 | |||
419 | blk_add_trace_remap(bdev_get_queue(clone->bi_bdev), clone, | ||
420 | tio->io->bio->bi_bdev->bd_dev, sector, | ||
421 | clone->bi_sector); | ||
422 | |||
413 | generic_make_request(clone); | 423 | generic_make_request(clone); |
424 | } | ||
414 | 425 | ||
415 | else if (r < 0) { | 426 | else if (r < 0) { |
416 | /* error the io and bail out */ | 427 | /* error the io and bail out */ |
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/module.h> | 25 | #include <linux/module.h> |
26 | #include <linux/mempool.h> | 26 | #include <linux/mempool.h> |
27 | #include <linux/workqueue.h> | 27 | #include <linux/workqueue.h> |
28 | #include <linux/blktrace_api.h> | ||
28 | #include <scsi/sg.h> /* for struct sg_iovec */ | 29 | #include <scsi/sg.h> /* for struct sg_iovec */ |
29 | 30 | ||
30 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 256 |
@@ -1095,6 +1096,9 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1095 | if (!bp) | 1096 | if (!bp) |
1096 | return bp; | 1097 | return bp; |
1097 | 1098 | ||
1099 | blk_add_trace_pdu_int(bdev_get_queue(bi->bi_bdev), BLK_TA_SPLIT, bi, | ||
1100 | bi->bi_sector + first_sectors); | ||
1101 | |||
1098 | BUG_ON(bi->bi_vcnt != 1); | 1102 | BUG_ON(bi->bi_vcnt != 1); |
1099 | BUG_ON(bi->bi_idx != 0); | 1103 | BUG_ON(bi->bi_idx != 0); |
1100 | atomic_set(&bp->cnt, 3); | 1104 | atomic_set(&bp->cnt, 3); |
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index c666769a875d..7c031f00fd79 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c | |||
@@ -72,6 +72,7 @@ | |||
72 | #include <linux/i2c-dev.h> | 72 | #include <linux/i2c-dev.h> |
73 | #include <linux/wireless.h> | 73 | #include <linux/wireless.h> |
74 | #include <linux/atalk.h> | 74 | #include <linux/atalk.h> |
75 | #include <linux/blktrace_api.h> | ||
75 | 76 | ||
76 | #include <net/sock.h> /* siocdevprivate_ioctl */ | 77 | #include <net/sock.h> /* siocdevprivate_ioctl */ |
77 | #include <net/bluetooth/bluetooth.h> | 78 | #include <net/bluetooth/bluetooth.h> |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 56bb6a4e15f3..c179966f1a2f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -22,6 +22,7 @@ typedef struct request_queue request_queue_t; | |||
22 | struct elevator_queue; | 22 | struct elevator_queue; |
23 | typedef struct elevator_queue elevator_t; | 23 | typedef struct elevator_queue elevator_t; |
24 | struct request_pm_state; | 24 | struct request_pm_state; |
25 | struct blk_trace; | ||
25 | 26 | ||
26 | #define BLKDEV_MIN_RQ 4 | 27 | #define BLKDEV_MIN_RQ 4 |
27 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 28 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
@@ -416,6 +417,8 @@ struct request_queue | |||
416 | unsigned int sg_reserved_size; | 417 | unsigned int sg_reserved_size; |
417 | int node; | 418 | int node; |
418 | 419 | ||
420 | struct blk_trace *blk_trace; | ||
421 | |||
419 | /* | 422 | /* |
420 | * reserved for flush operations | 423 | * reserved for flush operations |
421 | */ | 424 | */ |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h new file mode 100644 index 000000000000..b34d3e73d5ea --- /dev/null +++ b/include/linux/blktrace_api.h | |||
@@ -0,0 +1,277 @@ | |||
1 | #ifndef BLKTRACE_H | ||
2 | #define BLKTRACE_H | ||
3 | |||
4 | #include <linux/config.h> | ||
5 | #include <linux/blkdev.h> | ||
6 | #include <linux/relay.h> | ||
7 | |||
8 | /* | ||
9 | * Trace categories | ||
10 | */ | ||
11 | enum blktrace_cat { | ||
12 | BLK_TC_READ = 1 << 0, /* reads */ | ||
13 | BLK_TC_WRITE = 1 << 1, /* writes */ | ||
14 | BLK_TC_BARRIER = 1 << 2, /* barrier */ | ||
15 | BLK_TC_SYNC = 1 << 3, /* barrier */ | ||
16 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ | ||
17 | BLK_TC_REQUEUE = 1 << 5, /* requeueing */ | ||
18 | BLK_TC_ISSUE = 1 << 6, /* issue */ | ||
19 | BLK_TC_COMPLETE = 1 << 7, /* completions */ | ||
20 | BLK_TC_FS = 1 << 8, /* fs requests */ | ||
21 | BLK_TC_PC = 1 << 9, /* pc requests */ | ||
22 | BLK_TC_NOTIFY = 1 << 10, /* special message */ | ||
23 | |||
24 | BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ | ||
25 | }; | ||
26 | |||
27 | #define BLK_TC_SHIFT (16) | ||
28 | #define BLK_TC_ACT(act) ((act) << BLK_TC_SHIFT) | ||
29 | |||
30 | /* | ||
31 | * Basic trace actions | ||
32 | */ | ||
33 | enum blktrace_act { | ||
34 | __BLK_TA_QUEUE = 1, /* queued */ | ||
35 | __BLK_TA_BACKMERGE, /* back merged to existing rq */ | ||
36 | __BLK_TA_FRONTMERGE, /* front merge to existing rq */ | ||
37 | __BLK_TA_GETRQ, /* allocated new request */ | ||
38 | __BLK_TA_SLEEPRQ, /* sleeping on rq allocation */ | ||
39 | __BLK_TA_REQUEUE, /* request requeued */ | ||
40 | __BLK_TA_ISSUE, /* sent to driver */ | ||
41 | __BLK_TA_COMPLETE, /* completed by driver */ | ||
42 | __BLK_TA_PLUG, /* queue was plugged */ | ||
43 | __BLK_TA_UNPLUG_IO, /* queue was unplugged by io */ | ||
44 | __BLK_TA_UNPLUG_TIMER, /* queue was unplugged by timer */ | ||
45 | __BLK_TA_INSERT, /* insert request */ | ||
46 | __BLK_TA_SPLIT, /* bio was split */ | ||
47 | __BLK_TA_BOUNCE, /* bio was bounced */ | ||
48 | __BLK_TA_REMAP, /* bio was remapped */ | ||
49 | }; | ||
50 | |||
51 | /* | ||
52 | * Trace actions in full. Additionally, read or write is masked | ||
53 | */ | ||
54 | #define BLK_TA_QUEUE (__BLK_TA_QUEUE | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
55 | #define BLK_TA_BACKMERGE (__BLK_TA_BACKMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
56 | #define BLK_TA_FRONTMERGE (__BLK_TA_FRONTMERGE | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
57 | #define BLK_TA_GETRQ (__BLK_TA_GETRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
58 | #define BLK_TA_SLEEPRQ (__BLK_TA_SLEEPRQ | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
59 | #define BLK_TA_REQUEUE (__BLK_TA_REQUEUE | BLK_TC_ACT(BLK_TC_REQUEUE)) | ||
60 | #define BLK_TA_ISSUE (__BLK_TA_ISSUE | BLK_TC_ACT(BLK_TC_ISSUE)) | ||
61 | #define BLK_TA_COMPLETE (__BLK_TA_COMPLETE| BLK_TC_ACT(BLK_TC_COMPLETE)) | ||
62 | #define BLK_TA_PLUG (__BLK_TA_PLUG | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
63 | #define BLK_TA_UNPLUG_IO (__BLK_TA_UNPLUG_IO | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
64 | #define BLK_TA_UNPLUG_TIMER (__BLK_TA_UNPLUG_TIMER | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
65 | #define BLK_TA_INSERT (__BLK_TA_INSERT | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
66 | #define BLK_TA_SPLIT (__BLK_TA_SPLIT) | ||
67 | #define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) | ||
68 | #define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) | ||
69 | |||
70 | #define BLK_IO_TRACE_MAGIC 0x65617400 | ||
71 | #define BLK_IO_TRACE_VERSION 0x07 | ||
72 | |||
73 | /* | ||
74 | * The trace itself | ||
75 | */ | ||
76 | struct blk_io_trace { | ||
77 | u32 magic; /* MAGIC << 8 | version */ | ||
78 | u32 sequence; /* event number */ | ||
79 | u64 time; /* in microseconds */ | ||
80 | u64 sector; /* disk offset */ | ||
81 | u32 bytes; /* transfer length */ | ||
82 | u32 action; /* what happened */ | ||
83 | u32 pid; /* who did it */ | ||
84 | u32 device; /* device number */ | ||
85 | u32 cpu; /* on what cpu did it happen */ | ||
86 | u16 error; /* completion error */ | ||
87 | u16 pdu_len; /* length of data after this trace */ | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * The remap event | ||
92 | */ | ||
93 | struct blk_io_trace_remap { | ||
94 | u32 device; | ||
95 | u32 __pad; | ||
96 | u64 sector; | ||
97 | }; | ||
98 | |||
99 | enum { | ||
100 | Blktrace_setup = 1, | ||
101 | Blktrace_running, | ||
102 | Blktrace_stopped, | ||
103 | }; | ||
104 | |||
105 | struct blk_trace { | ||
106 | int trace_state; | ||
107 | struct rchan *rchan; | ||
108 | unsigned long *sequence; | ||
109 | u16 act_mask; | ||
110 | u64 start_lba; | ||
111 | u64 end_lba; | ||
112 | u32 pid; | ||
113 | u32 dev; | ||
114 | struct dentry *dir; | ||
115 | struct dentry *dropped_file; | ||
116 | atomic_t dropped; | ||
117 | }; | ||
118 | |||
119 | /* | ||
120 | * User setup structure passed with BLKTRACESTART | ||
121 | */ | ||
122 | struct blk_user_trace_setup { | ||
123 | char name[BDEVNAME_SIZE]; /* output */ | ||
124 | u16 act_mask; /* input */ | ||
125 | u32 buf_size; /* input */ | ||
126 | u32 buf_nr; /* input */ | ||
127 | u64 start_lba; | ||
128 | u64 end_lba; | ||
129 | u32 pid; | ||
130 | }; | ||
131 | |||
132 | #if defined(CONFIG_BLK_DEV_IO_TRACE) | ||
133 | extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *); | ||
134 | extern void blk_trace_shutdown(request_queue_t *); | ||
135 | extern void __blk_add_trace(struct blk_trace *, sector_t, int, int, u32, int, int, void *); | ||
136 | |||
137 | /** | ||
138 | * blk_add_trace_rq - Add a trace for a request oriented action | ||
139 | * @q: queue the io is for | ||
140 | * @rq: the source request | ||
141 | * @what: the action | ||
142 | * | ||
143 | * Description: | ||
144 | * Records an action against a request. Will log the bio offset + size. | ||
145 | * | ||
146 | **/ | ||
147 | static inline void blk_add_trace_rq(struct request_queue *q, struct request *rq, | ||
148 | u32 what) | ||
149 | { | ||
150 | struct blk_trace *bt = q->blk_trace; | ||
151 | int rw = rq->flags & 0x07; | ||
152 | |||
153 | if (likely(!bt)) | ||
154 | return; | ||
155 | |||
156 | if (blk_pc_request(rq)) { | ||
157 | what |= BLK_TC_ACT(BLK_TC_PC); | ||
158 | __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, sizeof(rq->cmd), rq->cmd); | ||
159 | } else { | ||
160 | what |= BLK_TC_ACT(BLK_TC_FS); | ||
161 | __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, rw, what, rq->errors, 0, NULL); | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /** | ||
166 | * blk_add_trace_bio - Add a trace for a bio oriented action | ||
167 | * @q: queue the io is for | ||
168 | * @bio: the source bio | ||
169 | * @what: the action | ||
170 | * | ||
171 | * Description: | ||
172 | * Records an action against a bio. Will log the bio offset + size. | ||
173 | * | ||
174 | **/ | ||
175 | static inline void blk_add_trace_bio(struct request_queue *q, struct bio *bio, | ||
176 | u32 what) | ||
177 | { | ||
178 | struct blk_trace *bt = q->blk_trace; | ||
179 | |||
180 | if (likely(!bt)) | ||
181 | return; | ||
182 | |||
183 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), 0, NULL); | ||
184 | } | ||
185 | |||
186 | /** | ||
187 | * blk_add_trace_generic - Add a trace for a generic action | ||
188 | * @q: queue the io is for | ||
189 | * @bio: the source bio | ||
190 | * @rw: the data direction | ||
191 | * @what: the action | ||
192 | * | ||
193 | * Description: | ||
194 | * Records a simple trace | ||
195 | * | ||
196 | **/ | ||
197 | static inline void blk_add_trace_generic(struct request_queue *q, | ||
198 | struct bio *bio, int rw, u32 what) | ||
199 | { | ||
200 | struct blk_trace *bt = q->blk_trace; | ||
201 | |||
202 | if (likely(!bt)) | ||
203 | return; | ||
204 | |||
205 | if (bio) | ||
206 | blk_add_trace_bio(q, bio, what); | ||
207 | else | ||
208 | __blk_add_trace(bt, 0, 0, rw, what, 0, 0, NULL); | ||
209 | } | ||
210 | |||
211 | /** | ||
212 | * blk_add_trace_pdu_int - Add a trace for a bio with an integer payload | ||
213 | * @q: queue the io is for | ||
214 | * @what: the action | ||
215 | * @bio: the source bio | ||
216 | * @pdu: the integer payload | ||
217 | * | ||
218 | * Description: | ||
219 | * Adds a trace with some integer payload. This might be an unplug | ||
220 | * option given as the action, with the depth at unplug time given | ||
221 | * as the payload | ||
222 | * | ||
223 | **/ | ||
224 | static inline void blk_add_trace_pdu_int(struct request_queue *q, u32 what, | ||
225 | struct bio *bio, unsigned int pdu) | ||
226 | { | ||
227 | struct blk_trace *bt = q->blk_trace; | ||
228 | u64 rpdu = cpu_to_be64(pdu); | ||
229 | |||
230 | if (likely(!bt)) | ||
231 | return; | ||
232 | |||
233 | if (bio) | ||
234 | __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, !bio_flagged(bio, BIO_UPTODATE), sizeof(rpdu), &rpdu); | ||
235 | else | ||
236 | __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu); | ||
237 | } | ||
238 | |||
239 | /** | ||
240 | * blk_add_trace_remap - Add a trace for a remap operation | ||
241 | * @q: queue the io is for | ||
242 | * @bio: the source bio | ||
243 | * @dev: target device | ||
244 | * @from: source sector | ||
245 | * @to: target sector | ||
246 | * | ||
247 | * Description: | ||
248 | * Device mapper or raid target sometimes need to split a bio because | ||
249 | * it spans a stripe (or similar). Add a trace for that action. | ||
250 | * | ||
251 | **/ | ||
252 | static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | ||
253 | dev_t dev, sector_t from, sector_t to) | ||
254 | { | ||
255 | struct blk_trace *bt = q->blk_trace; | ||
256 | struct blk_io_trace_remap r; | ||
257 | |||
258 | if (likely(!bt)) | ||
259 | return; | ||
260 | |||
261 | r.device = cpu_to_be32(dev); | ||
262 | r.sector = cpu_to_be64(to); | ||
263 | |||
264 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | ||
265 | } | ||
266 | |||
267 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | ||
268 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | ||
269 | #define blk_trace_shutdown(q) do { } while (0) | ||
270 | #define blk_add_trace_rq(q, rq, what) do { } while (0) | ||
271 | #define blk_add_trace_bio(q, rq, what) do { } while (0) | ||
272 | #define blk_add_trace_generic(q, rq, rw, what) do { } while (0) | ||
273 | #define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) | ||
274 | #define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) | ||
275 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | ||
276 | |||
277 | #endif | ||
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h index ae7dfb790df3..efb518f16bb3 100644 --- a/include/linux/compat_ioctl.h +++ b/include/linux/compat_ioctl.h | |||
@@ -97,6 +97,10 @@ COMPATIBLE_IOCTL(BLKRRPART) | |||
97 | COMPATIBLE_IOCTL(BLKFLSBUF) | 97 | COMPATIBLE_IOCTL(BLKFLSBUF) |
98 | COMPATIBLE_IOCTL(BLKSECTSET) | 98 | COMPATIBLE_IOCTL(BLKSECTSET) |
99 | COMPATIBLE_IOCTL(BLKSSZGET) | 99 | COMPATIBLE_IOCTL(BLKSSZGET) |
100 | COMPATIBLE_IOCTL(BLKTRACESTART) | ||
101 | COMPATIBLE_IOCTL(BLKTRACESTOP) | ||
102 | COMPATIBLE_IOCTL(BLKTRACESETUP) | ||
103 | COMPATIBLE_IOCTL(BLKTRACETEARDOWN) | ||
100 | ULONG_IOCTL(BLKRASET) | 104 | ULONG_IOCTL(BLKRASET) |
101 | ULONG_IOCTL(BLKFRASET) | 105 | ULONG_IOCTL(BLKFRASET) |
102 | /* RAID */ | 106 | /* RAID */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index f9c9dea636d0..9b34a1b03455 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -197,6 +197,10 @@ extern int dir_notify_enable; | |||
197 | #define BLKBSZGET _IOR(0x12,112,size_t) | 197 | #define BLKBSZGET _IOR(0x12,112,size_t) |
198 | #define BLKBSZSET _IOW(0x12,113,size_t) | 198 | #define BLKBSZSET _IOW(0x12,113,size_t) |
199 | #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ | 199 | #define BLKGETSIZE64 _IOR(0x12,114,size_t) /* return device size in bytes (u64 *arg) */ |
200 | #define BLKTRACESETUP _IOWR(0x12,115,struct blk_user_trace_setup) | ||
201 | #define BLKTRACESTART _IO(0x12,116) | ||
202 | #define BLKTRACESTOP _IO(0x12,117) | ||
203 | #define BLKTRACETEARDOWN _IO(0x12,118) | ||
200 | 204 | ||
201 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | 205 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ |
202 | #define FIBMAP _IO(0x00,1) /* bmap access */ | 206 | #define FIBMAP _IO(0x00,1) /* bmap access */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 62e6314382f0..e60a91d5b369 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -706,6 +706,7 @@ struct task_struct { | |||
706 | prio_array_t *array; | 706 | prio_array_t *array; |
707 | 707 | ||
708 | unsigned short ioprio; | 708 | unsigned short ioprio; |
709 | unsigned int btrace_seq; | ||
709 | 710 | ||
710 | unsigned long sleep_avg; | 711 | unsigned long sleep_avg; |
711 | unsigned long long timestamp, last_ran; | 712 | unsigned long long timestamp, last_ran; |
diff --git a/kernel/fork.c b/kernel/fork.c index c79ae0b19a49..c21bae8c93b9 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -181,6 +181,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
181 | /* One for us, one for whoever does the "release_task()" (usually parent) */ | 181 | /* One for us, one for whoever does the "release_task()" (usually parent) */ |
182 | atomic_set(&tsk->usage,2); | 182 | atomic_set(&tsk->usage,2); |
183 | atomic_set(&tsk->fs_excl, 0); | 183 | atomic_set(&tsk->fs_excl, 0); |
184 | tsk->btrace_seq = 0; | ||
184 | return tsk; | 185 | return tsk; |
185 | } | 186 | } |
186 | 187 | ||
diff --git a/mm/highmem.c b/mm/highmem.c index ce2e7e8bbfa7..d0ea1eec6a9a 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/hash.h> | 27 | #include <linux/hash.h> |
28 | #include <linux/highmem.h> | 28 | #include <linux/highmem.h> |
29 | #include <linux/blktrace_api.h> | ||
29 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
30 | 31 | ||
31 | static mempool_t *page_pool, *isa_page_pool; | 32 | static mempool_t *page_pool, *isa_page_pool; |
@@ -483,6 +484,8 @@ void blk_queue_bounce(request_queue_t *q, struct bio **bio_orig) | |||
483 | pool = isa_page_pool; | 484 | pool = isa_page_pool; |
484 | } | 485 | } |
485 | 486 | ||
487 | blk_add_trace_bio(q, *bio_orig, BLK_TA_BOUNCE); | ||
488 | |||
486 | /* | 489 | /* |
487 | * slow path | 490 | * slow path |
488 | */ | 491 | */ |