aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/fs-writeback.c5
-rw-r--r--include/trace/events/writeback.h64
-rw-r--r--mm/page-writeback.c4
3 files changed, 73 insertions, 0 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 73acab4dc2b7..bf10cbf379dd 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -656,10 +656,14 @@ static long wb_writeback(struct bdi_writeback *wb,
656 wbc.more_io = 0; 656 wbc.more_io = 0;
657 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 657 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
658 wbc.pages_skipped = 0; 658 wbc.pages_skipped = 0;
659
660 trace_wbc_writeback_start(&wbc, wb->bdi);
659 if (work->sb) 661 if (work->sb)
660 __writeback_inodes_sb(work->sb, wb, &wbc); 662 __writeback_inodes_sb(work->sb, wb, &wbc);
661 else 663 else
662 writeback_inodes_wb(wb, &wbc); 664 writeback_inodes_wb(wb, &wbc);
665 trace_wbc_writeback_written(&wbc, wb->bdi);
666
663 work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 667 work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
664 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 668 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
665 669
@@ -687,6 +691,7 @@ static long wb_writeback(struct bdi_writeback *wb,
687 if (!list_empty(&wb->b_more_io)) { 691 if (!list_empty(&wb->b_more_io)) {
688 inode = list_entry(wb->b_more_io.prev, 692 inode = list_entry(wb->b_more_io.prev,
689 struct inode, i_list); 693 struct inode, i_list);
694 trace_wbc_writeback_wait(&wbc, wb->bdi);
690 inode_wait_for_writeback(inode); 695 inode_wait_for_writeback(inode);
691 } 696 }
692 spin_unlock(&inode_lock); 697 spin_unlock(&inode_lock);
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 562fcae10d9d..0be26acae064 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -85,6 +85,70 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
85DEFINE_WRITEBACK_EVENT(writeback_thread_start); 85DEFINE_WRITEBACK_EVENT(writeback_thread_start);
86DEFINE_WRITEBACK_EVENT(writeback_thread_stop); 86DEFINE_WRITEBACK_EVENT(writeback_thread_stop);
87 87
88DECLARE_EVENT_CLASS(wbc_class,
89 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
90 TP_ARGS(wbc, bdi),
91 TP_STRUCT__entry(
92 __array(char, name, 32)
93 __field(long, nr_to_write)
94 __field(long, pages_skipped)
95 __field(int, sync_mode)
96 __field(int, nonblocking)
97 __field(int, encountered_congestion)
98 __field(int, for_kupdate)
99 __field(int, for_background)
100 __field(int, for_reclaim)
101 __field(int, range_cyclic)
102 __field(int, more_io)
103 __field(unsigned long, older_than_this)
104 __field(long, range_start)
105 __field(long, range_end)
106 ),
107
108 TP_fast_assign(
109 strncpy(__entry->name, dev_name(bdi->dev), 32);
110 __entry->nr_to_write = wbc->nr_to_write;
111 __entry->pages_skipped = wbc->pages_skipped;
112 __entry->sync_mode = wbc->sync_mode;
113 __entry->for_kupdate = wbc->for_kupdate;
114 __entry->for_background = wbc->for_background;
115 __entry->for_reclaim = wbc->for_reclaim;
116 __entry->range_cyclic = wbc->range_cyclic;
117 __entry->more_io = wbc->more_io;
118 __entry->older_than_this = wbc->older_than_this ?
119 *wbc->older_than_this : 0;
120 __entry->range_start = (long)wbc->range_start;
121 __entry->range_end = (long)wbc->range_end;
122 ),
123
124 TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
125 "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx "
126 "start=0x%lx end=0x%lx",
127 __entry->name,
128 __entry->nr_to_write,
129 __entry->pages_skipped,
130 __entry->sync_mode,
131 __entry->for_kupdate,
132 __entry->for_background,
133 __entry->for_reclaim,
134 __entry->range_cyclic,
135 __entry->more_io,
136 __entry->older_than_this,
137 __entry->range_start,
138 __entry->range_end)
139)
140
141#define DEFINE_WBC_EVENT(name) \
142DEFINE_EVENT(wbc_class, name, \
143 TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \
144 TP_ARGS(wbc, bdi))
145DEFINE_WBC_EVENT(wbc_writeback_start);
146DEFINE_WBC_EVENT(wbc_writeback_written);
147DEFINE_WBC_EVENT(wbc_writeback_wait);
148DEFINE_WBC_EVENT(wbc_balance_dirty_start);
149DEFINE_WBC_EVENT(wbc_balance_dirty_written);
150DEFINE_WBC_EVENT(wbc_balance_dirty_wait);
151
88#endif /* _TRACE_WRITEBACK_H */ 152#endif /* _TRACE_WRITEBACK_H */
89 153
90/* This part must be outside protection */ 154/* This part must be outside protection */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 37498ef61548..d556cd829af6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -34,6 +34,7 @@
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/buffer_head.h> 35#include <linux/buffer_head.h>
36#include <linux/pagevec.h> 36#include <linux/pagevec.h>
37#include <trace/events/writeback.h>
37 38
38/* 39/*
39 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited 40 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited
@@ -535,11 +536,13 @@ static void balance_dirty_pages(struct address_space *mapping,
535 * threshold otherwise wait until the disk writes catch 536 * threshold otherwise wait until the disk writes catch
536 * up. 537 * up.
537 */ 538 */
539 trace_wbc_balance_dirty_start(&wbc, bdi);
538 if (bdi_nr_reclaimable > bdi_thresh) { 540 if (bdi_nr_reclaimable > bdi_thresh) {
539 writeback_inodes_wb(&bdi->wb, &wbc); 541 writeback_inodes_wb(&bdi->wb, &wbc);
540 pages_written += write_chunk - wbc.nr_to_write; 542 pages_written += write_chunk - wbc.nr_to_write;
541 get_dirty_limits(&background_thresh, &dirty_thresh, 543 get_dirty_limits(&background_thresh, &dirty_thresh,
542 &bdi_thresh, bdi); 544 &bdi_thresh, bdi);
545 trace_wbc_balance_dirty_written(&wbc, bdi);
543 } 546 }
544 547
545 /* 548 /*
@@ -565,6 +568,7 @@ static void balance_dirty_pages(struct address_space *mapping,
565 if (pages_written >= write_chunk) 568 if (pages_written >= write_chunk)
566 break; /* We've done our duty */ 569 break; /* We've done our duty */
567 570
571 trace_wbc_balance_dirty_wait(&wbc, bdi);
568 __set_current_state(TASK_INTERRUPTIBLE); 572 __set_current_state(TASK_INTERRUPTIBLE);
569 io_schedule_timeout(pause); 573 io_schedule_timeout(pause);
570 574