aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2017-11-02 11:27:53 -0400
committerDavid Howells <dhowells@redhat.com>2017-11-13 10:38:21 -0500
commit13524ab3c6f41bcd257d28644414297bea8282b7 (patch)
tree9ac5ab8bfd44026bb13783cf540f4fdb57d72d44
parent1cf7a1518aefa69ac6ba0c3f9206073e4221e3c8 (diff)
afs: Trace page dirty/clean
Add a trace event that logs the dirtying and cleaning of pages attached to AFS inodes. Signed-off-by: David Howells <dhowells@redhat.com>
-rw-r--r--fs/afs/file.c10
-rw-r--r--fs/afs/write.c34
-rw-r--r--include/trace/events/afs.h39
3 files changed, 70 insertions, 13 deletions
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 675c5c268a52..a39192ced99e 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -583,6 +583,9 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
583static void afs_invalidatepage(struct page *page, unsigned int offset, 583static void afs_invalidatepage(struct page *page, unsigned int offset,
584 unsigned int length) 584 unsigned int length)
585{ 585{
586 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
587 unsigned long priv;
588
586 _enter("{%lu},%u,%u", page->index, offset, length); 589 _enter("{%lu},%u,%u", page->index, offset, length);
587 590
588 BUG_ON(!PageLocked(page)); 591 BUG_ON(!PageLocked(page));
@@ -598,6 +601,9 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
598#endif 601#endif
599 602
600 if (PagePrivate(page)) { 603 if (PagePrivate(page)) {
604 priv = page_private(page);
605 trace_afs_page_dirty(vnode, tracepoint_string("inval"),
606 page->index, priv);
601 set_page_private(page, 0); 607 set_page_private(page, 0);
602 ClearPagePrivate(page); 608 ClearPagePrivate(page);
603 } 609 }
@@ -613,6 +619,7 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
613static int afs_releasepage(struct page *page, gfp_t gfp_flags) 619static int afs_releasepage(struct page *page, gfp_t gfp_flags)
614{ 620{
615 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host); 621 struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
622 unsigned long priv;
616 623
617 _enter("{{%x:%u}[%lu],%lx},%x", 624 _enter("{{%x:%u}[%lu],%lx},%x",
618 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags, 625 vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
@@ -628,6 +635,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
628#endif 635#endif
629 636
630 if (PagePrivate(page)) { 637 if (PagePrivate(page)) {
638 priv = page_private(page);
639 trace_afs_page_dirty(vnode, tracepoint_string("rel"),
640 page->index, priv);
631 set_page_private(page, 0); 641 set_page_private(page, 0);
632 ClearPagePrivate(page); 642 ClearPagePrivate(page);
633 } 643 }
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 6807277ef956..4472882f06df 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -18,19 +18,6 @@
18#include "internal.h" 18#include "internal.h"
19 19
20/* 20/*
21 * We use page->private to hold the amount of the page that we've written to,
22 * splitting the field into two parts. However, we need to represent a range
23 * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
24 */
25#if PAGE_SIZE > 32768
26#define AFS_PRIV_MAX 0xffffffff
27#define AFS_PRIV_SHIFT 32
28#else
29#define AFS_PRIV_MAX 0xffff
30#define AFS_PRIV_SHIFT 16
31#endif
32
33/*
34 * mark a page as having been made dirty and thus needing writeback 21 * mark a page as having been made dirty and thus needing writeback
35 */ 22 */
36int afs_set_page_dirty(struct page *page) 23int afs_set_page_dirty(struct page *page)
@@ -145,6 +132,8 @@ try_again:
145 132
146 priv = (unsigned long)t << AFS_PRIV_SHIFT; 133 priv = (unsigned long)t << AFS_PRIV_SHIFT;
147 priv |= f; 134 priv |= f;
135 trace_afs_page_dirty(vnode, tracepoint_string("begin"),
136 page->index, priv);
148 SetPagePrivate(page); 137 SetPagePrivate(page);
149 set_page_private(page, priv); 138 set_page_private(page, priv);
150 _leave(" = 0"); 139 _leave(" = 0");
@@ -386,6 +375,7 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
386 struct page *primary_page, 375 struct page *primary_page,
387 pgoff_t final_page) 376 pgoff_t final_page)
388{ 377{
378 struct afs_vnode *vnode = AFS_FS_I(mapping->host);
389 struct page *pages[8], *page; 379 struct page *pages[8], *page;
390 unsigned long count, priv; 380 unsigned long count, priv;
391 unsigned n, offset, to, f, t; 381 unsigned n, offset, to, f, t;
@@ -407,8 +397,13 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
407 priv = page_private(primary_page); 397 priv = page_private(primary_page);
408 offset = priv & AFS_PRIV_MAX; 398 offset = priv & AFS_PRIV_MAX;
409 to = priv >> AFS_PRIV_SHIFT; 399 to = priv >> AFS_PRIV_SHIFT;
400 trace_afs_page_dirty(vnode, tracepoint_string("store"),
401 primary_page->index, priv);
410 402
411 WARN_ON(offset == to); 403 WARN_ON(offset == to);
404 if (offset == to)
405 trace_afs_page_dirty(vnode, tracepoint_string("WARN"),
406 primary_page->index, priv);
412 407
413 if (start >= final_page || to < PAGE_SIZE) 408 if (start >= final_page || to < PAGE_SIZE)
414 goto no_more; 409 goto no_more;
@@ -452,6 +447,9 @@ static int afs_write_back_from_locked_page(struct address_space *mapping,
452 } 447 }
453 to = t; 448 to = t;
454 449
450 trace_afs_page_dirty(vnode, tracepoint_string("store+"),
451 page->index, priv);
452
455 if (!clear_page_dirty_for_io(page)) 453 if (!clear_page_dirty_for_io(page))
456 BUG(); 454 BUG();
457 if (test_set_page_writeback(page)) 455 if (test_set_page_writeback(page))
@@ -657,6 +655,7 @@ int afs_writepages(struct address_space *mapping,
657void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call) 655void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
658{ 656{
659 struct pagevec pv; 657 struct pagevec pv;
658 unsigned long priv;
660 unsigned count, loop; 659 unsigned count, loop;
661 pgoff_t first = call->first, last = call->last; 660 pgoff_t first = call->first, last = call->last;
662 661
@@ -676,6 +675,9 @@ void afs_pages_written_back(struct afs_vnode *vnode, struct afs_call *call)
676 ASSERTCMP(pv.nr, ==, count); 675 ASSERTCMP(pv.nr, ==, count);
677 676
678 for (loop = 0; loop < count; loop++) { 677 for (loop = 0; loop < count; loop++) {
678 priv = page_private(pv.pages[loop]);
679 trace_afs_page_dirty(vnode, tracepoint_string("clear"),
680 pv.pages[loop]->index, priv);
679 set_page_private(pv.pages[loop], 0); 681 set_page_private(pv.pages[loop], 0);
680 end_page_writeback(pv.pages[loop]); 682 end_page_writeback(pv.pages[loop]);
681 } 683 }
@@ -783,6 +785,8 @@ int afs_page_mkwrite(struct vm_fault *vmf)
783 785
784 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */ 786 priv = (unsigned long)PAGE_SIZE << AFS_PRIV_SHIFT; /* To */
785 priv |= 0; /* From */ 787 priv |= 0; /* From */
788 trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"),
789 vmf->page->index, priv);
786 SetPagePrivate(vmf->page); 790 SetPagePrivate(vmf->page);
787 set_page_private(vmf->page, priv); 791 set_page_private(vmf->page, priv);
788 792
@@ -840,9 +844,13 @@ int afs_launder_page(struct page *page)
840 t = priv >> AFS_PRIV_SHIFT; 844 t = priv >> AFS_PRIV_SHIFT;
841 } 845 }
842 846
847 trace_afs_page_dirty(vnode, tracepoint_string("launder"),
848 page->index, priv);
843 ret = afs_store_data(mapping, page->index, page->index, t, f); 849 ret = afs_store_data(mapping, page->index, page->index, t, f);
844 } 850 }
845 851
852 trace_afs_page_dirty(vnode, tracepoint_string("laundered"),
853 page->index, priv);
846 set_page_private(page, 0); 854 set_page_private(page, 0);
847 ClearPagePrivate(page); 855 ClearPagePrivate(page);
848 856
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index 9cfb7657b72c..5f4e8193932d 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -402,6 +402,45 @@ TRACE_EVENT(afs_dir_check_failed,
402 __entry->vnode, __entry->off, __entry->i_size) 402 __entry->vnode, __entry->off, __entry->i_size)
403 ); 403 );
404 404
405/*
406 * We use page->private to hold the amount of the page that we've written to,
407 * splitting the field into two parts. However, we need to represent a range
408 * 0...PAGE_SIZE inclusive, so we can't support 64K pages on a 32-bit system.
409 */
410#if PAGE_SIZE > 32768
411#define AFS_PRIV_MAX 0xffffffff
412#define AFS_PRIV_SHIFT 32
413#else
414#define AFS_PRIV_MAX 0xffff
415#define AFS_PRIV_SHIFT 16
416#endif
417
418TRACE_EVENT(afs_page_dirty,
419 TP_PROTO(struct afs_vnode *vnode, const char *where,
420 pgoff_t page, unsigned long priv),
421
422 TP_ARGS(vnode, where, page, priv),
423
424 TP_STRUCT__entry(
425 __field(struct afs_vnode *, vnode )
426 __field(const char *, where )
427 __field(pgoff_t, page )
428 __field(unsigned long, priv )
429 ),
430
431 TP_fast_assign(
432 __entry->vnode = vnode;
433 __entry->where = where;
434 __entry->page = page;
435 __entry->priv = priv;
436 ),
437
438 TP_printk("vn=%p %lx %s %lu-%lu",
439 __entry->vnode, __entry->page, __entry->where,
440 __entry->priv & AFS_PRIV_MAX,
441 __entry->priv >> AFS_PRIV_SHIFT)
442 );
443
405#endif /* _TRACE_AFS_H */ 444#endif /* _TRACE_AFS_H */
406 445
407/* This part must be outside protection */ 446/* This part must be outside protection */