aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-25 12:57:40 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-25 12:57:41 -0400
commitae005cbed12d0b340b04b59d6f5c56e710b3895d (patch)
treed464865bcc97bea05eab4eba0d10bcad4ec89b93
parent3961cdf85b749f6bab50ad31ee97e9277e7a3b70 (diff)
parent0ba0851714beebb800992e5105a79dc3a4c504b0 (diff)
Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4
* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (43 commits) ext4: fix a BUG in mb_mark_used during trim. ext4: unused variables cleanup in fs/ext4/extents.c ext4: remove redundant set_buffer_mapped() in ext4_da_get_block_prep() ext4: add more tracepoints and use dev_t in the trace buffer ext4: don't kfree uninitialized s_group_info members ext4: add missing space in printk's in __ext4_grp_locked_error() ext4: add FITRIM to compat_ioctl. ext4: handle errors in ext4_clear_blocks() ext4: unify the ext4_handle_release_buffer() api ext4: handle errors in ext4_rename jbd2: add COW fields to struct jbd2_journal_handle jbd2: add the b_cow_tid field to journal_head struct ext4: Initialize fsync transaction ids in ext4_new_inode() ext4: Use single thread to perform DIO unwritten convertion ext4: optimize ext4_bio_write_page() when no extent conversion is needed ext4: skip orphan cleanup if fs has unknown ROCOMPAT features ext4: use the nblocks arg to ext4_truncate_restart_trans() ext4: fix missing iput of root inode for some mount error paths ext4: make FIEMAP and delayed allocation play well together ext4: suppress verbose debugging information if malloc-debug is off ... Fi up conflicts in fs/ext4/super.c due to workqueue changes
-rw-r--r--Documentation/ABI/testing/sysfs-fs-ext413
-rw-r--r--Documentation/filesystems/ext4.txt207
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/ext4_jbd2.h7
-rw-r--r--fs/ext4/extents.c213
-rw-r--r--fs/ext4/fsync.c14
-rw-r--r--fs/ext4/ialloc.c8
-rw-r--r--fs/ext4/inode.c410
-rw-r--r--fs/ext4/ioctl.c7
-rw-r--r--fs/ext4/mballoc.c34
-rw-r--r--fs/ext4/mballoc.h2
-rw-r--r--fs/ext4/migrate.c10
-rw-r--r--fs/ext4/namei.c13
-rw-r--r--fs/ext4/page-io.c13
-rw-r--r--fs/ext4/resize.c12
-rw-r--r--fs/ext4/super.c48
-rw-r--r--fs/ext4/xattr.c4
-rw-r--r--include/linux/jbd2.h28
-rw-r--r--include/linux/journal-head.h7
-rw-r--r--include/trace/events/ext4.h775
-rw-r--r--include/trace/events/jbd2.h78
21 files changed, 1307 insertions, 599 deletions
diff --git a/Documentation/ABI/testing/sysfs-fs-ext4 b/Documentation/ABI/testing/sysfs-fs-ext4
index 5fb709997d9..f22ac0872ae 100644
--- a/Documentation/ABI/testing/sysfs-fs-ext4
+++ b/Documentation/ABI/testing/sysfs-fs-ext4
@@ -48,7 +48,7 @@ Description:
48 will have its blocks allocated out of its own unique 48 will have its blocks allocated out of its own unique
49 preallocation pool. 49 preallocation pool.
50 50
51What: /sys/fs/ext4/<disk>/inode_readahead 51What: /sys/fs/ext4/<disk>/inode_readahead_blks
52Date: March 2008 52Date: March 2008
53Contact: "Theodore Ts'o" <tytso@mit.edu> 53Contact: "Theodore Ts'o" <tytso@mit.edu>
54Description: 54Description:
@@ -85,7 +85,14 @@ Date: June 2008
85Contact: "Theodore Ts'o" <tytso@mit.edu> 85Contact: "Theodore Ts'o" <tytso@mit.edu>
86Description: 86Description:
87 Tuning parameter which (if non-zero) controls the goal 87 Tuning parameter which (if non-zero) controls the goal
88 inode used by the inode allocator in p0reference to 88 inode used by the inode allocator in preference to
89 all other allocation hueristics. This is intended for 89 all other allocation heuristics. This is intended for
90 debugging use only, and should be 0 on production 90 debugging use only, and should be 0 on production
91 systems. 91 systems.
92
93What: /sys/fs/ext4/<disk>/max_writeback_mb_bump
94Date: September 2009
95Contact: "Theodore Ts'o" <tytso@mit.edu>
96Description:
97 The maximum number of megabytes the writeback code will
98 try to write out before move on to another inode.
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
index 6ab9442d7ee..6b050464a90 100644
--- a/Documentation/filesystems/ext4.txt
+++ b/Documentation/filesystems/ext4.txt
@@ -367,12 +367,47 @@ init_itable=n The lazy itable init code will wait n times the
367 minimizes the impact on the systme performance 367 minimizes the impact on the systme performance
368 while file system's inode table is being initialized. 368 while file system's inode table is being initialized.
369 369
370discard Controls whether ext4 should issue discard/TRIM 370discard Controls whether ext4 should issue discard/TRIM
371nodiscard(*) commands to the underlying block device when 371nodiscard(*) commands to the underlying block device when
372 blocks are freed. This is useful for SSD devices 372 blocks are freed. This is useful for SSD devices
373 and sparse/thinly-provisioned LUNs, but it is off 373 and sparse/thinly-provisioned LUNs, but it is off
374 by default until sufficient testing has been done. 374 by default until sufficient testing has been done.
375 375
376nouid32 Disables 32-bit UIDs and GIDs. This is for
377 interoperability with older kernels which only
378 store and expect 16-bit values.
379
380resize Allows to resize filesystem to the end of the last
381 existing block group, further resize has to be done
382 with resize2fs either online, or offline. It can be
383 used only with conjunction with remount.
384
385block_validity This options allows to enables/disables the in-kernel
386noblock_validity facility for tracking filesystem metadata blocks
387 within internal data structures. This allows multi-
388 block allocator and other routines to quickly locate
389 extents which might overlap with filesystem metadata
390 blocks. This option is intended for debugging
391 purposes and since it negatively affects the
392 performance, it is off by default.
393
394dioread_lock Controls whether or not ext4 should use the DIO read
395dioread_nolock locking. If the dioread_nolock option is specified
396 ext4 will allocate uninitialized extent before buffer
397 write and convert the extent to initialized after IO
398 completes. This approach allows ext4 code to avoid
399 using inode mutex, which improves scalability on high
400 speed storages. However this does not work with nobh
401 option and the mount will fail. Nor does it work with
402 data journaling and dioread_nolock option will be
403 ignored with kernel warning. Note that dioread_nolock
404 code path is only used for extent-based files.
405 Because of the restrictions this options comprises
406 it is off by default (e.g. dioread_lock).
407
408i_version Enable 64-bit inode version support. This option is
409 off by default.
410
376Data Mode 411Data Mode
377========= 412=========
378There are 3 different data modes: 413There are 3 different data modes:
@@ -400,6 +435,176 @@ needs to be read from and written to disk at the same time where it
400outperforms all others modes. Currently ext4 does not have delayed 435outperforms all others modes. Currently ext4 does not have delayed
401allocation support if this data journalling mode is selected. 436allocation support if this data journalling mode is selected.
402 437
438/proc entries
439=============
440
441Information about mounted ext4 file systems can be found in
442/proc/fs/ext4. Each mounted filesystem will have a directory in
443/proc/fs/ext4 based on its device name (i.e., /proc/fs/ext4/hdc or
444/proc/fs/ext4/dm-0). The files in each per-device directory are shown
445in table below.
446
447Files in /proc/fs/ext4/<devname>
448..............................................................................
449 File Content
450 mb_groups details of multiblock allocator buddy cache of free blocks
451..............................................................................
452
453/sys entries
454============
455
456Information about mounted ext4 file systems can be found in
457/sys/fs/ext4. Each mounted filesystem will have a directory in
458/sys/fs/ext4 based on its device name (i.e., /sys/fs/ext4/hdc or
459/sys/fs/ext4/dm-0). The files in each per-device directory are shown
460in table below.
461
462Files in /sys/fs/ext4/<devname>
463(see also Documentation/ABI/testing/sysfs-fs-ext4)
464..............................................................................
465 File Content
466
467 delayed_allocation_blocks This file is read-only and shows the number of
468 blocks that are dirty in the page cache, but
469 which do not have their location in the
470 filesystem allocated yet.
471
472 inode_goal Tuning parameter which (if non-zero) controls
473 the goal inode used by the inode allocator in
474 preference to all other allocation heuristics.
475 This is intended for debugging use only, and
476 should be 0 on production systems.
477
478 inode_readahead_blks Tuning parameter which controls the maximum
479 number of inode table blocks that ext4's inode
480 table readahead algorithm will pre-read into
481 the buffer cache
482
483 lifetime_write_kbytes This file is read-only and shows the number of
484 kilobytes of data that have been written to this
485 filesystem since it was created.
486
487 max_writeback_mb_bump The maximum number of megabytes the writeback
488 code will try to write out before move on to
489 another inode.
490
491 mb_group_prealloc The multiblock allocator will round up allocation
492 requests to a multiple of this tuning parameter if
493 the stripe size is not set in the ext4 superblock
494
495 mb_max_to_scan The maximum number of extents the multiblock
496 allocator will search to find the best extent
497
498 mb_min_to_scan The minimum number of extents the multiblock
499 allocator will search to find the best extent
500
501 mb_order2_req Tuning parameter which controls the minimum size
502 for requests (as a power of 2) where the buddy
503 cache is used
504
505 mb_stats Controls whether the multiblock allocator should
506 collect statistics, which are shown during the
507 unmount. 1 means to collect statistics, 0 means
508 not to collect statistics
509
510 mb_stream_req Files which have fewer blocks than this tunable
511 parameter will have their blocks allocated out
512 of a block group specific preallocation pool, so
513 that small files are packed closely together.
514 Each large file will have its blocks allocated
515 out of its own unique preallocation pool.
516
517 session_write_kbytes This file is read-only and shows the number of
518 kilobytes of data that have been written to this
519 filesystem since it was mounted.
520..............................................................................
521
522Ioctls
523======
524
525There is some Ext4 specific functionality which can be accessed by applications
526through the system call interfaces. The list of all Ext4 specific ioctls are
527shown in the table below.
528
529Table of Ext4 specific ioctls
530..............................................................................
531 Ioctl Description
532 EXT4_IOC_GETFLAGS Get additional attributes associated with inode.
533 The ioctl argument is an integer bitfield, with
534 bit values described in ext4.h. This ioctl is an
535 alias for FS_IOC_GETFLAGS.
536
537 EXT4_IOC_SETFLAGS Set additional attributes associated with inode.
538 The ioctl argument is an integer bitfield, with
539 bit values described in ext4.h. This ioctl is an
540 alias for FS_IOC_SETFLAGS.
541
542 EXT4_IOC_GETVERSION
543 EXT4_IOC_GETVERSION_OLD
544 Get the inode i_generation number stored for
545 each inode. The i_generation number is normally
546 changed only when new inode is created and it is
547 particularly useful for network filesystems. The
548 '_OLD' version of this ioctl is an alias for
549 FS_IOC_GETVERSION.
550
551 EXT4_IOC_SETVERSION
552 EXT4_IOC_SETVERSION_OLD
553 Set the inode i_generation number stored for
554 each inode. The '_OLD' version of this ioctl
555 is an alias for FS_IOC_SETVERSION.
556
557 EXT4_IOC_GROUP_EXTEND This ioctl has the same purpose as the resize
558 mount option. It allows to resize filesystem
559 to the end of the last existing block group,
560 further resize has to be done with resize2fs,
561 either online, or offline. The argument points
562 to the unsigned logn number representing the
563 filesystem new block count.
564
565 EXT4_IOC_MOVE_EXT Move the block extents from orig_fd (the one
566 this ioctl is pointing to) to the donor_fd (the
567 one specified in move_extent structure passed
568 as an argument to this ioctl). Then, exchange
569 inode metadata between orig_fd and donor_fd.
570 This is especially useful for online
571 defragmentation, because the allocator has the
572 opportunity to allocate moved blocks better,
573 ideally into one contiguous extent.
574
575 EXT4_IOC_GROUP_ADD Add a new group descriptor to an existing or
576 new group descriptor block. The new group
577 descriptor is described by ext4_new_group_input
578 structure, which is passed as an argument to
579 this ioctl. This is especially useful in
580 conjunction with EXT4_IOC_GROUP_EXTEND,
581 which allows online resize of the filesystem
582 to the end of the last existing block group.
583 Those two ioctls combined is used in userspace
584 online resize tool (e.g. resize2fs).
585
586 EXT4_IOC_MIGRATE This ioctl operates on the filesystem itself.
587 It converts (migrates) ext3 indirect block mapped
588 inode to ext4 extent mapped inode by walking
589 through indirect block mapping of the original
590 inode and converting contiguous block ranges
591 into ext4 extents of the temporary inode. Then,
592 inodes are swapped. This ioctl might help, when
593 migrating from ext3 to ext4 filesystem, however
594 suggestion is to create fresh ext4 filesystem
595 and copy data from the backup. Note, that
596 filesystem has to support extents for this ioctl
597 to work.
598
599 EXT4_IOC_ALLOC_DA_BLKS Force all of the delay allocated blocks to be
600 allocated to preserve application-expected ext3
601 behaviour. Note that this will also start
602 triggering a write of the data blocks, but this
603 behaviour may change in the future as it is
604 not necessary and has been done this way only
605 for sake of simplicity.
606..............................................................................
607
403References 608References
404========== 609==========
405 610
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index adf96b82278..97b970e7dd1 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -21,6 +21,8 @@
21#include "ext4_jbd2.h" 21#include "ext4_jbd2.h"
22#include "mballoc.h" 22#include "mballoc.h"
23 23
24#include <trace/events/ext4.h>
25
24/* 26/*
25 * balloc.c contains the blocks allocation and deallocation routines 27 * balloc.c contains the blocks allocation and deallocation routines
26 */ 28 */
@@ -342,6 +344,7 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
342 * We do it here so the bitmap uptodate bit 344 * We do it here so the bitmap uptodate bit
343 * get set with buffer lock held. 345 * get set with buffer lock held.
344 */ 346 */
347 trace_ext4_read_block_bitmap_load(sb, block_group);
345 set_bitmap_uptodate(bh); 348 set_bitmap_uptodate(bh);
346 if (bh_submit_read(bh) < 0) { 349 if (bh_submit_read(bh) < 0) {
347 put_bh(bh); 350 put_bh(bh);
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index d8b992e658c..e25e99bf7ee 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -202,13 +202,6 @@ static inline int ext4_handle_has_enough_credits(handle_t *handle, int needed)
202 return 1; 202 return 1;
203} 203}
204 204
205static inline void ext4_journal_release_buffer(handle_t *handle,
206 struct buffer_head *bh)
207{
208 if (ext4_handle_valid(handle))
209 jbd2_journal_release_buffer(handle, bh);
210}
211
212static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks) 205static inline handle_t *ext4_journal_start(struct inode *inode, int nblocks)
213{ 206{
214 return ext4_journal_start_sb(inode->i_sb, nblocks); 207 return ext4_journal_start_sb(inode->i_sb, nblocks);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 7516fb9c0bd..dd2cb5076ff 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -44,6 +44,8 @@
44#include "ext4_jbd2.h" 44#include "ext4_jbd2.h"
45#include "ext4_extents.h" 45#include "ext4_extents.h"
46 46
47#include <trace/events/ext4.h>
48
47static int ext4_ext_truncate_extend_restart(handle_t *handle, 49static int ext4_ext_truncate_extend_restart(handle_t *handle,
48 struct inode *inode, 50 struct inode *inode,
49 int needed) 51 int needed)
@@ -664,6 +666,8 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
664 if (unlikely(!bh)) 666 if (unlikely(!bh))
665 goto err; 667 goto err;
666 if (!bh_uptodate_or_lock(bh)) { 668 if (!bh_uptodate_or_lock(bh)) {
669 trace_ext4_ext_load_extent(inode, block,
670 path[ppos].p_block);
667 if (bh_submit_read(bh) < 0) { 671 if (bh_submit_read(bh) < 0) {
668 put_bh(bh); 672 put_bh(bh);
669 goto err; 673 goto err;
@@ -1034,7 +1038,7 @@ cleanup:
1034 for (i = 0; i < depth; i++) { 1038 for (i = 0; i < depth; i++) {
1035 if (!ablocks[i]) 1039 if (!ablocks[i])
1036 continue; 1040 continue;
1037 ext4_free_blocks(handle, inode, 0, ablocks[i], 1, 1041 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1,
1038 EXT4_FREE_BLOCKS_METADATA); 1042 EXT4_FREE_BLOCKS_METADATA);
1039 } 1043 }
1040 } 1044 }
@@ -2059,7 +2063,7 @@ static int ext4_ext_rm_idx(handle_t *handle, struct inode *inode,
2059 if (err) 2063 if (err)
2060 return err; 2064 return err;
2061 ext_debug("index is empty, remove it, free block %llu\n", leaf); 2065 ext_debug("index is empty, remove it, free block %llu\n", leaf);
2062 ext4_free_blocks(handle, inode, 0, leaf, 1, 2066 ext4_free_blocks(handle, inode, NULL, leaf, 1,
2063 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2067 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
2064 return err; 2068 return err;
2065} 2069}
@@ -2156,7 +2160,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2156 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2160 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2157 start = ext4_ext_pblock(ex) + ee_len - num; 2161 start = ext4_ext_pblock(ex) + ee_len - num;
2158 ext_debug("free last %u blocks starting %llu\n", num, start); 2162 ext_debug("free last %u blocks starting %llu\n", num, start);
2159 ext4_free_blocks(handle, inode, 0, start, num, flags); 2163 ext4_free_blocks(handle, inode, NULL, start, num, flags);
2160 } else if (from == le32_to_cpu(ex->ee_block) 2164 } else if (from == le32_to_cpu(ex->ee_block)
2161 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2165 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) {
2162 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", 2166 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n",
@@ -3108,14 +3112,13 @@ static int check_eofblocks_fl(handle_t *handle, struct inode *inode,
3108{ 3112{
3109 int i, depth; 3113 int i, depth;
3110 struct ext4_extent_header *eh; 3114 struct ext4_extent_header *eh;
3111 struct ext4_extent *ex, *last_ex; 3115 struct ext4_extent *last_ex;
3112 3116
3113 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) 3117 if (!ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
3114 return 0; 3118 return 0;
3115 3119
3116 depth = ext_depth(inode); 3120 depth = ext_depth(inode);
3117 eh = path[depth].p_hdr; 3121 eh = path[depth].p_hdr;
3118 ex = path[depth].p_ext;
3119 3122
3120 if (unlikely(!eh->eh_entries)) { 3123 if (unlikely(!eh->eh_entries)) {
3121 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and " 3124 EXT4_ERROR_INODE(inode, "eh->eh_entries == 0 and "
@@ -3295,9 +3298,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3295 struct ext4_map_blocks *map, int flags) 3298 struct ext4_map_blocks *map, int flags)
3296{ 3299{
3297 struct ext4_ext_path *path = NULL; 3300 struct ext4_ext_path *path = NULL;
3298 struct ext4_extent_header *eh;
3299 struct ext4_extent newex, *ex; 3301 struct ext4_extent newex, *ex;
3300 ext4_fsblk_t newblock; 3302 ext4_fsblk_t newblock = 0;
3301 int err = 0, depth, ret; 3303 int err = 0, depth, ret;
3302 unsigned int allocated = 0; 3304 unsigned int allocated = 0;
3303 struct ext4_allocation_request ar; 3305 struct ext4_allocation_request ar;
@@ -3305,6 +3307,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3305 3307
3306 ext_debug("blocks %u/%u requested for inode %lu\n", 3308 ext_debug("blocks %u/%u requested for inode %lu\n",
3307 map->m_lblk, map->m_len, inode->i_ino); 3309 map->m_lblk, map->m_len, inode->i_ino);
3310 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
3308 3311
3309 /* check in cache */ 3312 /* check in cache */
3310 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3313 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) {
@@ -3352,7 +3355,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3352 err = -EIO; 3355 err = -EIO;
3353 goto out2; 3356 goto out2;
3354 } 3357 }
3355 eh = path[depth].p_hdr;
3356 3358
3357 ex = path[depth].p_ext; 3359 ex = path[depth].p_ext;
3358 if (ex) { 3360 if (ex) {
@@ -3485,7 +3487,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
3485 /* not a good idea to call discard here directly, 3487 /* not a good idea to call discard here directly,
3486 * but otherwise we'd need to call it every free() */ 3488 * but otherwise we'd need to call it every free() */
3487 ext4_discard_preallocations(inode); 3489 ext4_discard_preallocations(inode);
3488 ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex), 3490 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex),
3489 ext4_ext_get_actual_len(&newex), 0); 3491 ext4_ext_get_actual_len(&newex), 0);
3490 goto out2; 3492 goto out2;
3491 } 3493 }
@@ -3525,6 +3527,8 @@ out2:
3525 ext4_ext_drop_refs(path); 3527 ext4_ext_drop_refs(path);
3526 kfree(path); 3528 kfree(path);
3527 } 3529 }
3530 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk,
3531 newblock, map->m_len, err ? err : allocated);
3528 return err ? err : allocated; 3532 return err ? err : allocated;
3529} 3533}
3530 3534
@@ -3658,6 +3662,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3658 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3662 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
3659 return -EOPNOTSUPP; 3663 return -EOPNOTSUPP;
3660 3664
3665 trace_ext4_fallocate_enter(inode, offset, len, mode);
3661 map.m_lblk = offset >> blkbits; 3666 map.m_lblk = offset >> blkbits;
3662 /* 3667 /*
3663 * We can't just convert len to max_blocks because 3668 * We can't just convert len to max_blocks because
@@ -3673,6 +3678,7 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3673 ret = inode_newsize_ok(inode, (len + offset)); 3678 ret = inode_newsize_ok(inode, (len + offset));
3674 if (ret) { 3679 if (ret) {
3675 mutex_unlock(&inode->i_mutex); 3680 mutex_unlock(&inode->i_mutex);
3681 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret);
3676 return ret; 3682 return ret;
3677 } 3683 }
3678retry: 3684retry:
@@ -3717,6 +3723,8 @@ retry:
3717 goto retry; 3723 goto retry;
3718 } 3724 }
3719 mutex_unlock(&inode->i_mutex); 3725 mutex_unlock(&inode->i_mutex);
3726 trace_ext4_fallocate_exit(inode, offset, max_blocks,
3727 ret > 0 ? ret2 : ret);
3720 return ret > 0 ? ret2 : ret; 3728 return ret > 0 ? ret2 : ret;
3721} 3729}
3722 3730
@@ -3775,6 +3783,7 @@ int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
3775 } 3783 }
3776 return ret > 0 ? ret2 : ret; 3784 return ret > 0 ? ret2 : ret;
3777} 3785}
3786
3778/* 3787/*
3779 * Callback function called for each extent to gather FIEMAP information. 3788 * Callback function called for each extent to gather FIEMAP information.
3780 */ 3789 */
@@ -3782,38 +3791,162 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3782 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3791 struct ext4_ext_cache *newex, struct ext4_extent *ex,
3783 void *data) 3792 void *data)
3784{ 3793{
3785 struct fiemap_extent_info *fieinfo = data;
3786 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits;
3787 __u64 logical; 3794 __u64 logical;
3788 __u64 physical; 3795 __u64 physical;
3789 __u64 length; 3796 __u64 length;
3797 loff_t size;
3790 __u32 flags = 0; 3798 __u32 flags = 0;
3791 int error; 3799 int ret = 0;
3800 struct fiemap_extent_info *fieinfo = data;
3801 unsigned char blksize_bits;
3792 3802
3793 logical = (__u64)newex->ec_block << blksize_bits; 3803 blksize_bits = inode->i_sb->s_blocksize_bits;
3804 logical = (__u64)newex->ec_block << blksize_bits;
3794 3805
3795 if (newex->ec_start == 0) { 3806 if (newex->ec_start == 0) {
3796 pgoff_t offset; 3807 /*
3797 struct page *page; 3808 * No extent in extent-tree contains block @newex->ec_start,
3809 * then the block may stay in 1)a hole or 2)delayed-extent.
3810 *
3811 * Holes or delayed-extents are processed as follows.
3812 * 1. lookup dirty pages with specified range in pagecache.
3813 * If no page is got, then there is no delayed-extent and
3814 * return with EXT_CONTINUE.
3815 * 2. find the 1st mapped buffer,
3816 * 3. check if the mapped buffer is both in the request range
3817 * and a delayed buffer. If not, there is no delayed-extent,
3818 * then return.
3819 * 4. a delayed-extent is found, the extent will be collected.
3820 */
3821 ext4_lblk_t end = 0;
3822 pgoff_t last_offset;
3823 pgoff_t offset;
3824 pgoff_t index;
3825 struct page **pages = NULL;
3798 struct buffer_head *bh = NULL; 3826 struct buffer_head *bh = NULL;
3827 struct buffer_head *head = NULL;
3828 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *);
3829
3830 pages = kmalloc(PAGE_SIZE, GFP_KERNEL);
3831 if (pages == NULL)
3832 return -ENOMEM;
3799 3833
3800 offset = logical >> PAGE_SHIFT; 3834 offset = logical >> PAGE_SHIFT;
3801 page = find_get_page(inode->i_mapping, offset); 3835repeat:
3802 if (!page || !page_has_buffers(page)) 3836 last_offset = offset;
3803 return EXT_CONTINUE; 3837 head = NULL;
3838 ret = find_get_pages_tag(inode->i_mapping, &offset,
3839 PAGECACHE_TAG_DIRTY, nr_pages, pages);
3840
3841 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
3842 /* First time, try to find a mapped buffer. */
3843 if (ret == 0) {
3844out:
3845 for (index = 0; index < ret; index++)
3846 page_cache_release(pages[index]);
3847 /* just a hole. */
3848 kfree(pages);
3849 return EXT_CONTINUE;
3850 }
3804 3851
3805 bh = page_buffers(page); 3852 /* Try to find the 1st mapped buffer. */
3853 end = ((__u64)pages[0]->index << PAGE_SHIFT) >>
3854 blksize_bits;
3855 if (!page_has_buffers(pages[0]))
3856 goto out;
3857 head = page_buffers(pages[0]);
3858 if (!head)
3859 goto out;
3806 3860
3807 if (!bh) 3861 bh = head;
3808 return EXT_CONTINUE; 3862 do {
3863 if (buffer_mapped(bh)) {
3864 /* get the 1st mapped buffer. */
3865 if (end > newex->ec_block +
3866 newex->ec_len)
3867 /* The buffer is out of
3868 * the request range.
3869 */
3870 goto out;
3871 goto found_mapped_buffer;
3872 }
3873 bh = bh->b_this_page;
3874 end++;
3875 } while (bh != head);
3809 3876
3810 if (buffer_delay(bh)) { 3877 /* No mapped buffer found. */
3811 flags |= FIEMAP_EXTENT_DELALLOC; 3878 goto out;
3812 page_cache_release(page);
3813 } else { 3879 } else {
3814 page_cache_release(page); 3880 /*Find contiguous delayed buffers. */
3815 return EXT_CONTINUE; 3881 if (ret > 0 && pages[0]->index == last_offset)
3882 head = page_buffers(pages[0]);
3883 bh = head;
3816 } 3884 }
3885
3886found_mapped_buffer:
3887 if (bh != NULL && buffer_delay(bh)) {
3888 /* 1st or contiguous delayed buffer found. */
3889 if (!(flags & FIEMAP_EXTENT_DELALLOC)) {
3890 /*
3891 * 1st delayed buffer found, record
3892 * the start of extent.
3893 */
3894 flags |= FIEMAP_EXTENT_DELALLOC;
3895 newex->ec_block = end;
3896 logical = (__u64)end << blksize_bits;
3897 }
3898 /* Find contiguous delayed buffers. */
3899 do {
3900 if (!buffer_delay(bh))
3901 goto found_delayed_extent;
3902 bh = bh->b_this_page;
3903 end++;
3904 } while (bh != head);
3905
3906 for (index = 1; index < ret; index++) {
3907 if (!page_has_buffers(pages[index])) {
3908 bh = NULL;
3909 break;
3910 }
3911 head = page_buffers(pages[index]);
3912 if (!head) {
3913 bh = NULL;
3914 break;
3915 }
3916 if (pages[index]->index !=
3917 pages[0]->index + index) {
3918 /* Blocks are not contiguous. */
3919 bh = NULL;
3920 break;
3921 }
3922 bh = head;
3923 do {
3924 if (!buffer_delay(bh))
3925 /* Delayed-extent ends. */
3926 goto found_delayed_extent;
3927 bh = bh->b_this_page;
3928 end++;
3929 } while (bh != head);
3930 }
3931 } else if (!(flags & FIEMAP_EXTENT_DELALLOC))
3932 /* a hole found. */
3933 goto out;
3934
3935found_delayed_extent:
3936 newex->ec_len = min(end - newex->ec_block,
3937 (ext4_lblk_t)EXT_INIT_MAX_LEN);
3938 if (ret == nr_pages && bh != NULL &&
3939 newex->ec_len < EXT_INIT_MAX_LEN &&
3940 buffer_delay(bh)) {
3941 /* Have not collected an extent and continue. */
3942 for (index = 0; index < ret; index++)
3943 page_cache_release(pages[index]);
3944 goto repeat;
3945 }
3946
3947 for (index = 0; index < ret; index++)
3948 page_cache_release(pages[index]);
3949 kfree(pages);
3817 } 3950 }
3818 3951
3819 physical = (__u64)newex->ec_start << blksize_bits; 3952 physical = (__u64)newex->ec_start << blksize_bits;
@@ -3822,32 +3955,16 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path,
3822 if (ex && ext4_ext_is_uninitialized(ex)) 3955 if (ex && ext4_ext_is_uninitialized(ex))
3823 flags |= FIEMAP_EXTENT_UNWRITTEN; 3956 flags |= FIEMAP_EXTENT_UNWRITTEN;
3824 3957
3825 /* 3958 size = i_size_read(inode);
3826 * If this extent reaches EXT_MAX_BLOCK, it must be last. 3959 if (logical + length >= size)
3827 *
3828 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK,
3829 * this also indicates no more allocated blocks.
3830 *
3831 * XXX this might miss a single-block extent at EXT_MAX_BLOCK
3832 */
3833 if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK ||
3834 newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) {
3835 loff_t size = i_size_read(inode);
3836 loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb);
3837
3838 flags |= FIEMAP_EXTENT_LAST; 3960 flags |= FIEMAP_EXTENT_LAST;
3839 if ((flags & FIEMAP_EXTENT_DELALLOC) &&
3840 logical+length > size)
3841 length = (size - logical + bs - 1) & ~(bs-1);
3842 }
3843 3961
3844 error = fiemap_fill_next_extent(fieinfo, logical, physical, 3962 ret = fiemap_fill_next_extent(fieinfo, logical, physical,
3845 length, flags); 3963 length, flags);
3846 if (error < 0) 3964 if (ret < 0)
3847 return error; 3965 return ret;
3848 if (error == 1) 3966 if (ret == 1)
3849 return EXT_BREAK; 3967 return EXT_BREAK;
3850
3851 return EXT_CONTINUE; 3968 return EXT_CONTINUE;
3852} 3969}
3853 3970
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c
index 7829b287822..7f74019d6d7 100644
--- a/fs/ext4/fsync.c
+++ b/fs/ext4/fsync.c
@@ -164,20 +164,20 @@ int ext4_sync_file(struct file *file, int datasync)
164 164
165 J_ASSERT(ext4_journal_current_handle() == NULL); 165 J_ASSERT(ext4_journal_current_handle() == NULL);
166 166
167 trace_ext4_sync_file(file, datasync); 167 trace_ext4_sync_file_enter(file, datasync);
168 168
169 if (inode->i_sb->s_flags & MS_RDONLY) 169 if (inode->i_sb->s_flags & MS_RDONLY)
170 return 0; 170 return 0;
171 171
172 ret = ext4_flush_completed_IO(inode); 172 ret = ext4_flush_completed_IO(inode);
173 if (ret < 0) 173 if (ret < 0)
174 return ret; 174 goto out;
175 175
176 if (!journal) { 176 if (!journal) {
177 ret = generic_file_fsync(file, datasync); 177 ret = generic_file_fsync(file, datasync);
178 if (!ret && !list_empty(&inode->i_dentry)) 178 if (!ret && !list_empty(&inode->i_dentry))
179 ext4_sync_parent(inode); 179 ext4_sync_parent(inode);
180 return ret; 180 goto out;
181 } 181 }
182 182
183 /* 183 /*
@@ -194,8 +194,10 @@ int ext4_sync_file(struct file *file, int datasync)
194 * (they were dirtied by commit). But that's OK - the blocks are 194 * (they were dirtied by commit). But that's OK - the blocks are
195 * safe in-journal, which is all fsync() needs to ensure. 195 * safe in-journal, which is all fsync() needs to ensure.
196 */ 196 */
197 if (ext4_should_journal_data(inode)) 197 if (ext4_should_journal_data(inode)) {
198 return ext4_force_commit(inode->i_sb); 198 ret = ext4_force_commit(inode->i_sb);
199 goto out;
200 }
199 201
200 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; 202 commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid;
201 if (jbd2_log_start_commit(journal, commit_tid)) { 203 if (jbd2_log_start_commit(journal, commit_tid)) {
@@ -215,5 +217,7 @@ int ext4_sync_file(struct file *file, int datasync)
215 ret = jbd2_log_wait_commit(journal, commit_tid); 217 ret = jbd2_log_wait_commit(journal, commit_tid);
216 } else if (journal->j_flags & JBD2_BARRIER) 218 } else if (journal->j_flags & JBD2_BARRIER)
217 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); 219 blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
220 out:
221 trace_ext4_sync_file_exit(inode, ret);
218 return ret; 222 return ret;
219} 223}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 78b79e1bd7e..21bb2f61e50 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -152,6 +152,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
152 * We do it here so the bitmap uptodate bit 152 * We do it here so the bitmap uptodate bit
153 * get set with buffer lock held. 153 * get set with buffer lock held.
154 */ 154 */
155 trace_ext4_load_inode_bitmap(sb, block_group);
155 set_bitmap_uptodate(bh); 156 set_bitmap_uptodate(bh);
156 if (bh_submit_read(bh) < 0) { 157 if (bh_submit_read(bh) < 0) {
157 put_bh(bh); 158 put_bh(bh);
@@ -649,7 +650,7 @@ static int find_group_other(struct super_block *sb, struct inode *parent,
649 *group = parent_group + flex_size; 650 *group = parent_group + flex_size;
650 if (*group > ngroups) 651 if (*group > ngroups)
651 *group = 0; 652 *group = 0;
652 return find_group_orlov(sb, parent, group, mode, 0); 653 return find_group_orlov(sb, parent, group, mode, NULL);
653 } 654 }
654 655
655 /* 656 /*
@@ -1054,6 +1055,11 @@ got:
1054 } 1055 }
1055 } 1056 }
1056 1057
1058 if (ext4_handle_valid(handle)) {
1059 ei->i_sync_tid = handle->h_transaction->t_tid;
1060 ei->i_datasync_tid = handle->h_transaction->t_tid;
1061 }
1062
1057 err = ext4_mark_inode_dirty(handle, inode); 1063 err = ext4_mark_inode_dirty(handle, inode);
1058 if (err) { 1064 if (err) {
1059 ext4_std_error(sb, err); 1065 ext4_std_error(sb, err);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 9297ad46c46..1a86282b902 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -173,7 +173,7 @@ int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
173 BUG_ON(EXT4_JOURNAL(inode) == NULL); 173 BUG_ON(EXT4_JOURNAL(inode) == NULL);
174 jbd_debug(2, "restarting handle %p\n", handle); 174 jbd_debug(2, "restarting handle %p\n", handle);
175 up_write(&EXT4_I(inode)->i_data_sem); 175 up_write(&EXT4_I(inode)->i_data_sem);
176 ret = ext4_journal_restart(handle, blocks_for_truncate(inode)); 176 ret = ext4_journal_restart(handle, nblocks);
177 down_write(&EXT4_I(inode)->i_data_sem); 177 down_write(&EXT4_I(inode)->i_data_sem);
178 ext4_discard_preallocations(inode); 178 ext4_discard_preallocations(inode);
179 179
@@ -720,7 +720,7 @@ allocated:
720 return ret; 720 return ret;
721failed_out: 721failed_out:
722 for (i = 0; i < index; i++) 722 for (i = 0; i < index; i++)
723 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 723 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
724 return ret; 724 return ret;
725} 725}
726 726
@@ -823,20 +823,20 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
823 return err; 823 return err;
824failed: 824failed:
825 /* Allocation failed, free what we already allocated */ 825 /* Allocation failed, free what we already allocated */
826 ext4_free_blocks(handle, inode, 0, new_blocks[0], 1, 0); 826 ext4_free_blocks(handle, inode, NULL, new_blocks[0], 1, 0);
827 for (i = 1; i <= n ; i++) { 827 for (i = 1; i <= n ; i++) {
828 /* 828 /*
829 * branch[i].bh is newly allocated, so there is no 829 * branch[i].bh is newly allocated, so there is no
830 * need to revoke the block, which is why we don't 830 * need to revoke the block, which is why we don't
831 * need to set EXT4_FREE_BLOCKS_METADATA. 831 * need to set EXT4_FREE_BLOCKS_METADATA.
832 */ 832 */
833 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 833 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1,
834 EXT4_FREE_BLOCKS_FORGET); 834 EXT4_FREE_BLOCKS_FORGET);
835 } 835 }
836 for (i = n+1; i < indirect_blks; i++) 836 for (i = n+1; i < indirect_blks; i++)
837 ext4_free_blocks(handle, inode, 0, new_blocks[i], 1, 0); 837 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 1, 0);
838 838
839 ext4_free_blocks(handle, inode, 0, new_blocks[i], num, 0); 839 ext4_free_blocks(handle, inode, NULL, new_blocks[i], num, 0);
840 840
841 return err; 841 return err;
842} 842}
@@ -924,7 +924,7 @@ err_out:
924 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 924 ext4_free_blocks(handle, inode, where[i].bh, 0, 1,
925 EXT4_FREE_BLOCKS_FORGET); 925 EXT4_FREE_BLOCKS_FORGET);
926 } 926 }
927 ext4_free_blocks(handle, inode, 0, le32_to_cpu(where[num].key), 927 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key),
928 blks, 0); 928 blks, 0);
929 929
930 return err; 930 return err;
@@ -973,6 +973,7 @@ static int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
973 int count = 0; 973 int count = 0;
974 ext4_fsblk_t first_block = 0; 974 ext4_fsblk_t first_block = 0;
975 975
976 trace_ext4_ind_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
976 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))); 977 J_ASSERT(!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)));
977 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0); 978 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
978 depth = ext4_block_to_path(inode, map->m_lblk, offsets, 979 depth = ext4_block_to_path(inode, map->m_lblk, offsets,
@@ -1058,6 +1059,8 @@ cleanup:
1058 partial--; 1059 partial--;
1059 } 1060 }
1060out: 1061out:
1062 trace_ext4_ind_map_blocks_exit(inode, map->m_lblk,
1063 map->m_pblk, map->m_len, err);
1061 return err; 1064 return err;
1062} 1065}
1063 1066
@@ -2060,7 +2063,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2060 if (nr_pages == 0) 2063 if (nr_pages == 0)
2061 break; 2064 break;
2062 for (i = 0; i < nr_pages; i++) { 2065 for (i = 0; i < nr_pages; i++) {
2063 int commit_write = 0, redirty_page = 0; 2066 int commit_write = 0, skip_page = 0;
2064 struct page *page = pvec.pages[i]; 2067 struct page *page = pvec.pages[i];
2065 2068
2066 index = page->index; 2069 index = page->index;
@@ -2086,14 +2089,12 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2086 * If the page does not have buffers (for 2089 * If the page does not have buffers (for
2087 * whatever reason), try to create them using 2090 * whatever reason), try to create them using
2088 * __block_write_begin. If this fails, 2091 * __block_write_begin. If this fails,
2089 * redirty the page and move on. 2092 * skip the page and move on.
2090 */ 2093 */
2091 if (!page_has_buffers(page)) { 2094 if (!page_has_buffers(page)) {
2092 if (__block_write_begin(page, 0, len, 2095 if (__block_write_begin(page, 0, len,
2093 noalloc_get_block_write)) { 2096 noalloc_get_block_write)) {
2094 redirty_page: 2097 skip_page:
2095 redirty_page_for_writepage(mpd->wbc,
2096 page);
2097 unlock_page(page); 2098 unlock_page(page);
2098 continue; 2099 continue;
2099 } 2100 }
@@ -2104,7 +2105,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2104 block_start = 0; 2105 block_start = 0;
2105 do { 2106 do {
2106 if (!bh) 2107 if (!bh)
2107 goto redirty_page; 2108 goto skip_page;
2108 if (map && (cur_logical >= map->m_lblk) && 2109 if (map && (cur_logical >= map->m_lblk) &&
2109 (cur_logical <= (map->m_lblk + 2110 (cur_logical <= (map->m_lblk +
2110 (map->m_len - 1)))) { 2111 (map->m_len - 1)))) {
@@ -2120,22 +2121,23 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2120 clear_buffer_unwritten(bh); 2121 clear_buffer_unwritten(bh);
2121 } 2122 }
2122 2123
2123 /* redirty page if block allocation undone */ 2124 /* skip page if block allocation undone */
2124 if (buffer_delay(bh) || buffer_unwritten(bh)) 2125 if (buffer_delay(bh) || buffer_unwritten(bh))
2125 redirty_page = 1; 2126 skip_page = 1;
2126 bh = bh->b_this_page; 2127 bh = bh->b_this_page;
2127 block_start += bh->b_size; 2128 block_start += bh->b_size;
2128 cur_logical++; 2129 cur_logical++;
2129 pblock++; 2130 pblock++;
2130 } while (bh != page_bufs); 2131 } while (bh != page_bufs);
2131 2132
2132 if (redirty_page) 2133 if (skip_page)
2133 goto redirty_page; 2134 goto skip_page;
2134 2135
2135 if (commit_write) 2136 if (commit_write)
2136 /* mark the buffer_heads as dirty & uptodate */ 2137 /* mark the buffer_heads as dirty & uptodate */
2137 block_commit_write(page, 0, len); 2138 block_commit_write(page, 0, len);
2138 2139
2140 clear_page_dirty_for_io(page);
2139 /* 2141 /*
2140 * Delalloc doesn't support data journalling, 2142 * Delalloc doesn't support data journalling,
2141 * but eventually maybe we'll lift this 2143 * but eventually maybe we'll lift this
@@ -2165,8 +2167,7 @@ static int mpage_da_submit_io(struct mpage_da_data *mpd,
2165 return ret; 2167 return ret;
2166} 2168}
2167 2169
2168static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd, 2170static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
2169 sector_t logical, long blk_cnt)
2170{ 2171{
2171 int nr_pages, i; 2172 int nr_pages, i;
2172 pgoff_t index, end; 2173 pgoff_t index, end;
@@ -2174,9 +2175,8 @@ static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd,
2174 struct inode *inode = mpd->inode; 2175 struct inode *inode = mpd->inode;
2175 struct address_space *mapping = inode->i_mapping; 2176 struct address_space *mapping = inode->i_mapping;
2176 2177
2177 index = logical >> (PAGE_CACHE_SHIFT - inode->i_blkbits); 2178 index = mpd->first_page;
2178 end = (logical + blk_cnt - 1) >> 2179 end = mpd->next_page - 1;
2179 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2180 while (index <= end) { 2180 while (index <= end) {
2181 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE); 2181 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
2182 if (nr_pages == 0) 2182 if (nr_pages == 0)
@@ -2279,9 +2279,8 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2279 err = blks; 2279 err = blks;
2280 /* 2280 /*
2281 * If get block returns EAGAIN or ENOSPC and there 2281 * If get block returns EAGAIN or ENOSPC and there
2282 * appears to be free blocks we will call 2282 * appears to be free blocks we will just let
2283 * ext4_writepage() for all of the pages which will 2283 * mpage_da_submit_io() unlock all of the pages.
2284 * just redirty the pages.
2285 */ 2284 */
2286 if (err == -EAGAIN) 2285 if (err == -EAGAIN)
2287 goto submit_io; 2286 goto submit_io;
@@ -2312,8 +2311,10 @@ static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
2312 ext4_print_free_blocks(mpd->inode); 2311 ext4_print_free_blocks(mpd->inode);
2313 } 2312 }
2314 /* invalidate all the pages */ 2313 /* invalidate all the pages */
2315 ext4_da_block_invalidatepages(mpd, next, 2314 ext4_da_block_invalidatepages(mpd);
2316 mpd->b_size >> mpd->inode->i_blkbits); 2315
2316 /* Mark this page range as having been completed */
2317 mpd->io_done = 1;
2317 return; 2318 return;
2318 } 2319 }
2319 BUG_ON(blks == 0); 2320 BUG_ON(blks == 0);
@@ -2438,102 +2439,6 @@ static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
2438} 2439}
2439 2440
2440/* 2441/*
2441 * __mpage_da_writepage - finds extent of pages and blocks
2442 *
2443 * @page: page to consider
2444 * @wbc: not used, we just follow rules
2445 * @data: context
2446 *
2447 * The function finds extents of pages and scan them for all blocks.
2448 */
2449static int __mpage_da_writepage(struct page *page,
2450 struct writeback_control *wbc,
2451 struct mpage_da_data *mpd)
2452{
2453 struct inode *inode = mpd->inode;
2454 struct buffer_head *bh, *head;
2455 sector_t logical;
2456
2457 /*
2458 * Can we merge this page to current extent?
2459 */
2460 if (mpd->next_page != page->index) {
2461 /*
2462 * Nope, we can't. So, we map non-allocated blocks
2463 * and start IO on them
2464 */
2465 if (mpd->next_page != mpd->first_page) {
2466 mpage_da_map_and_submit(mpd);
2467 /*
2468 * skip rest of the page in the page_vec
2469 */
2470 redirty_page_for_writepage(wbc, page);
2471 unlock_page(page);
2472 return MPAGE_DA_EXTENT_TAIL;
2473 }
2474
2475 /*
2476 * Start next extent of pages ...
2477 */
2478 mpd->first_page = page->index;
2479
2480 /*
2481 * ... and blocks
2482 */
2483 mpd->b_size = 0;
2484 mpd->b_state = 0;
2485 mpd->b_blocknr = 0;
2486 }
2487
2488 mpd->next_page = page->index + 1;
2489 logical = (sector_t) page->index <<
2490 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2491
2492 if (!page_has_buffers(page)) {
2493 mpage_add_bh_to_extent(mpd, logical, PAGE_CACHE_SIZE,
2494 (1 << BH_Dirty) | (1 << BH_Uptodate));
2495 if (mpd->io_done)
2496 return MPAGE_DA_EXTENT_TAIL;
2497 } else {
2498 /*
2499 * Page with regular buffer heads, just add all dirty ones
2500 */
2501 head = page_buffers(page);
2502 bh = head;
2503 do {
2504 BUG_ON(buffer_locked(bh));
2505 /*
2506 * We need to try to allocate
2507 * unmapped blocks in the same page.
2508 * Otherwise we won't make progress
2509 * with the page in ext4_writepage
2510 */
2511 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2512 mpage_add_bh_to_extent(mpd, logical,
2513 bh->b_size,
2514 bh->b_state);
2515 if (mpd->io_done)
2516 return MPAGE_DA_EXTENT_TAIL;
2517 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2518 /*
2519 * mapped dirty buffer. We need to update
2520 * the b_state because we look at
2521 * b_state in mpage_da_map_blocks. We don't
2522 * update b_size because if we find an
2523 * unmapped buffer_head later we need to
2524 * use the b_state flag of that buffer_head.
2525 */
2526 if (mpd->b_size == 0)
2527 mpd->b_state = bh->b_state & BH_FLAGS;
2528 }
2529 logical++;
2530 } while ((bh = bh->b_this_page) != head);
2531 }
2532
2533 return 0;
2534}
2535
2536/*
2537 * This is a special get_blocks_t callback which is used by 2442 * This is a special get_blocks_t callback which is used by
2538 * ext4_da_write_begin(). It will either return mapped block or 2443 * ext4_da_write_begin(). It will either return mapped block or
2539 * reserve space for a single block. 2444 * reserve space for a single block.
@@ -2597,7 +2502,6 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2597 * for partial write. 2502 * for partial write.
2598 */ 2503 */
2599 set_buffer_new(bh); 2504 set_buffer_new(bh);
2600 set_buffer_mapped(bh);
2601 } 2505 }
2602 return 0; 2506 return 0;
2603} 2507}
@@ -2811,27 +2715,27 @@ static int ext4_da_writepages_trans_blocks(struct inode *inode)
2811 2715
2812/* 2716/*
2813 * write_cache_pages_da - walk the list of dirty pages of the given 2717 * write_cache_pages_da - walk the list of dirty pages of the given
2814 * address space and call the callback function (which usually writes 2718 * address space and accumulate pages that need writing, and call
2815 * the pages). 2719 * mpage_da_map_and_submit to map a single contiguous memory region
2816 * 2720 * and then write them.
2817 * This is a forked version of write_cache_pages(). Differences:
2818 * Range cyclic is ignored.
2819 * no_nrwrite_index_update is always presumed true
2820 */ 2721 */
2821static int write_cache_pages_da(struct address_space *mapping, 2722static int write_cache_pages_da(struct address_space *mapping,
2822 struct writeback_control *wbc, 2723 struct writeback_control *wbc,
2823 struct mpage_da_data *mpd, 2724 struct mpage_da_data *mpd,
2824 pgoff_t *done_index) 2725 pgoff_t *done_index)
2825{ 2726{
2826 int ret = 0; 2727 struct buffer_head *bh, *head;
2827 int done = 0; 2728 struct inode *inode = mapping->host;
2828 struct pagevec pvec; 2729 struct pagevec pvec;
2829 unsigned nr_pages; 2730 unsigned int nr_pages;
2830 pgoff_t index; 2731 sector_t logical;
2831 pgoff_t end; /* Inclusive */ 2732 pgoff_t index, end;
2832 long nr_to_write = wbc->nr_to_write; 2733 long nr_to_write = wbc->nr_to_write;
2833 int tag; 2734 int i, tag, ret = 0;
2834 2735
2736 memset(mpd, 0, sizeof(struct mpage_da_data));
2737 mpd->wbc = wbc;
2738 mpd->inode = inode;
2835 pagevec_init(&pvec, 0); 2739 pagevec_init(&pvec, 0);
2836 index = wbc->range_start >> PAGE_CACHE_SHIFT; 2740 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2837 end = wbc->range_end >> PAGE_CACHE_SHIFT; 2741 end = wbc->range_end >> PAGE_CACHE_SHIFT;
@@ -2842,13 +2746,11 @@ static int write_cache_pages_da(struct address_space *mapping,
2842 tag = PAGECACHE_TAG_DIRTY; 2746 tag = PAGECACHE_TAG_DIRTY;
2843 2747
2844 *done_index = index; 2748 *done_index = index;
2845 while (!done && (index <= end)) { 2749 while (index <= end) {
2846 int i;
2847
2848 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 2750 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2849 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 2751 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2850 if (nr_pages == 0) 2752 if (nr_pages == 0)
2851 break; 2753 return 0;
2852 2754
2853 for (i = 0; i < nr_pages; i++) { 2755 for (i = 0; i < nr_pages; i++) {
2854 struct page *page = pvec.pages[i]; 2756 struct page *page = pvec.pages[i];
@@ -2860,60 +2762,100 @@ static int write_cache_pages_da(struct address_space *mapping,
2860 * mapping. However, page->index will not change 2762 * mapping. However, page->index will not change
2861 * because we have a reference on the page. 2763 * because we have a reference on the page.
2862 */ 2764 */
2863 if (page->index > end) { 2765 if (page->index > end)
2864 done = 1; 2766 goto out;
2865 break;
2866 }
2867 2767
2868 *done_index = page->index + 1; 2768 *done_index = page->index + 1;
2869 2769
2770 /*
2771 * If we can't merge this page, and we have
2772 * accumulated an contiguous region, write it
2773 */
2774 if ((mpd->next_page != page->index) &&
2775 (mpd->next_page != mpd->first_page)) {
2776 mpage_da_map_and_submit(mpd);
2777 goto ret_extent_tail;
2778 }
2779
2870 lock_page(page); 2780 lock_page(page);
2871 2781
2872 /* 2782 /*
2873 * Page truncated or invalidated. We can freely skip it 2783 * If the page is no longer dirty, or its
2874 * then, even for data integrity operations: the page 2784 * mapping no longer corresponds to inode we
2875 * has disappeared concurrently, so there could be no 2785 * are writing (which means it has been
2876 * real expectation of this data interity operation 2786 * truncated or invalidated), or the page is
2877 * even if there is now a new, dirty page at the same 2787 * already under writeback and we are not
2878 * pagecache address. 2788 * doing a data integrity writeback, skip the page
2879 */ 2789 */
2880 if (unlikely(page->mapping != mapping)) { 2790 if (!PageDirty(page) ||
2881continue_unlock: 2791 (PageWriteback(page) &&
2792 (wbc->sync_mode == WB_SYNC_NONE)) ||
2793 unlikely(page->mapping != mapping)) {
2882 unlock_page(page); 2794 unlock_page(page);
2883 continue; 2795 continue;
2884 } 2796 }
2885 2797
2886 if (!PageDirty(page)) { 2798 if (PageWriteback(page))
2887 /* someone wrote it for us */ 2799 wait_on_page_writeback(page);
2888 goto continue_unlock;
2889 }
2890
2891 if (PageWriteback(page)) {
2892 if (wbc->sync_mode != WB_SYNC_NONE)
2893 wait_on_page_writeback(page);
2894 else
2895 goto continue_unlock;
2896 }
2897 2800
2898 BUG_ON(PageWriteback(page)); 2801 BUG_ON(PageWriteback(page));
2899 if (!clear_page_dirty_for_io(page))
2900 goto continue_unlock;
2901 2802
2902 ret = __mpage_da_writepage(page, wbc, mpd); 2803 if (mpd->next_page != page->index)
2903 if (unlikely(ret)) { 2804 mpd->first_page = page->index;
2904 if (ret == AOP_WRITEPAGE_ACTIVATE) { 2805 mpd->next_page = page->index + 1;
2905 unlock_page(page); 2806 logical = (sector_t) page->index <<
2906 ret = 0; 2807 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2907 } else { 2808
2908 done = 1; 2809 if (!page_has_buffers(page)) {
2909 break; 2810 mpage_add_bh_to_extent(mpd, logical,
2910 } 2811 PAGE_CACHE_SIZE,
2812 (1 << BH_Dirty) | (1 << BH_Uptodate));
2813 if (mpd->io_done)
2814 goto ret_extent_tail;
2815 } else {
2816 /*
2817 * Page with regular buffer heads,
2818 * just add all dirty ones
2819 */
2820 head = page_buffers(page);
2821 bh = head;
2822 do {
2823 BUG_ON(buffer_locked(bh));
2824 /*
2825 * We need to try to allocate
2826 * unmapped blocks in the same page.
2827 * Otherwise we won't make progress
2828 * with the page in ext4_writepage
2829 */
2830 if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2831 mpage_add_bh_to_extent(mpd, logical,
2832 bh->b_size,
2833 bh->b_state);
2834 if (mpd->io_done)
2835 goto ret_extent_tail;
2836 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2837 /*
2838 * mapped dirty buffer. We need
2839 * to update the b_state
2840 * because we look at b_state
2841 * in mpage_da_map_blocks. We
2842 * don't update b_size because
2843 * if we find an unmapped
2844 * buffer_head later we need to
2845 * use the b_state flag of that
2846 * buffer_head.
2847 */
2848 if (mpd->b_size == 0)
2849 mpd->b_state = bh->b_state & BH_FLAGS;
2850 }
2851 logical++;
2852 } while ((bh = bh->b_this_page) != head);
2911 } 2853 }
2912 2854
2913 if (nr_to_write > 0) { 2855 if (nr_to_write > 0) {
2914 nr_to_write--; 2856 nr_to_write--;
2915 if (nr_to_write == 0 && 2857 if (nr_to_write == 0 &&
2916 wbc->sync_mode == WB_SYNC_NONE) { 2858 wbc->sync_mode == WB_SYNC_NONE)
2917 /* 2859 /*
2918 * We stop writing back only if we are 2860 * We stop writing back only if we are
2919 * not doing integrity sync. In case of 2861 * not doing integrity sync. In case of
@@ -2924,14 +2866,18 @@ continue_unlock:
2924 * pages, but have not synced all of the 2866 * pages, but have not synced all of the
2925 * old dirty pages. 2867 * old dirty pages.
2926 */ 2868 */
2927 done = 1; 2869 goto out;
2928 break;
2929 }
2930 } 2870 }
2931 } 2871 }
2932 pagevec_release(&pvec); 2872 pagevec_release(&pvec);
2933 cond_resched(); 2873 cond_resched();
2934 } 2874 }
2875 return 0;
2876ret_extent_tail:
2877 ret = MPAGE_DA_EXTENT_TAIL;
2878out:
2879 pagevec_release(&pvec);
2880 cond_resched();
2935 return ret; 2881 return ret;
2936} 2882}
2937 2883
@@ -2945,7 +2891,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2945 struct mpage_da_data mpd; 2891 struct mpage_da_data mpd;
2946 struct inode *inode = mapping->host; 2892 struct inode *inode = mapping->host;
2947 int pages_written = 0; 2893 int pages_written = 0;
2948 long pages_skipped;
2949 unsigned int max_pages; 2894 unsigned int max_pages;
2950 int range_cyclic, cycled = 1, io_done = 0; 2895 int range_cyclic, cycled = 1, io_done = 0;
2951 int needed_blocks, ret = 0; 2896 int needed_blocks, ret = 0;
@@ -3028,11 +2973,6 @@ static int ext4_da_writepages(struct address_space *mapping,
3028 wbc->nr_to_write = desired_nr_to_write; 2973 wbc->nr_to_write = desired_nr_to_write;
3029 } 2974 }
3030 2975
3031 mpd.wbc = wbc;
3032 mpd.inode = mapping->host;
3033
3034 pages_skipped = wbc->pages_skipped;
3035
3036retry: 2976retry:
3037 if (wbc->sync_mode == WB_SYNC_ALL) 2977 if (wbc->sync_mode == WB_SYNC_ALL)
3038 tag_pages_for_writeback(mapping, index, end); 2978 tag_pages_for_writeback(mapping, index, end);
@@ -3059,22 +2999,10 @@ retry:
3059 } 2999 }
3060 3000
3061 /* 3001 /*
3062 * Now call __mpage_da_writepage to find the next 3002 * Now call write_cache_pages_da() to find the next
3063 * contiguous region of logical blocks that need 3003 * contiguous region of logical blocks that need
3064 * blocks to be allocated by ext4. We don't actually 3004 * blocks to be allocated by ext4 and submit them.
3065 * submit the blocks for I/O here, even though
3066 * write_cache_pages thinks it will, and will set the
3067 * pages as clean for write before calling
3068 * __mpage_da_writepage().
3069 */ 3005 */
3070 mpd.b_size = 0;
3071 mpd.b_state = 0;
3072 mpd.b_blocknr = 0;
3073 mpd.first_page = 0;
3074 mpd.next_page = 0;
3075 mpd.io_done = 0;
3076 mpd.pages_written = 0;
3077 mpd.retval = 0;
3078 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index); 3006 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
3079 /* 3007 /*
3080 * If we have a contiguous extent of pages and we 3008 * If we have a contiguous extent of pages and we
@@ -3096,7 +3024,6 @@ retry:
3096 * and try again 3024 * and try again
3097 */ 3025 */
3098 jbd2_journal_force_commit_nested(sbi->s_journal); 3026 jbd2_journal_force_commit_nested(sbi->s_journal);
3099 wbc->pages_skipped = pages_skipped;
3100 ret = 0; 3027 ret = 0;
3101 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 3028 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
3102 /* 3029 /*
@@ -3104,7 +3031,6 @@ retry:
3104 * rest of the pages 3031 * rest of the pages
3105 */ 3032 */
3106 pages_written += mpd.pages_written; 3033 pages_written += mpd.pages_written;
3107 wbc->pages_skipped = pages_skipped;
3108 ret = 0; 3034 ret = 0;
3109 io_done = 1; 3035 io_done = 1;
3110 } else if (wbc->nr_to_write) 3036 } else if (wbc->nr_to_write)
@@ -3122,11 +3048,6 @@ retry:
3122 wbc->range_end = mapping->writeback_index - 1; 3048 wbc->range_end = mapping->writeback_index - 1;
3123 goto retry; 3049 goto retry;
3124 } 3050 }
3125 if (pages_skipped != wbc->pages_skipped)
3126 ext4_msg(inode->i_sb, KERN_CRIT,
3127 "This should not happen leaving %s "
3128 "with nr_to_write = %ld ret = %d",
3129 __func__, wbc->nr_to_write, ret);
3130 3051
3131 /* Update index */ 3052 /* Update index */
3132 wbc->range_cyclic = range_cyclic; 3053 wbc->range_cyclic = range_cyclic;
@@ -3460,6 +3381,7 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
3460 3381
3461static int ext4_readpage(struct file *file, struct page *page) 3382static int ext4_readpage(struct file *file, struct page *page)
3462{ 3383{
3384 trace_ext4_readpage(page);
3463 return mpage_readpage(page, ext4_get_block); 3385 return mpage_readpage(page, ext4_get_block);
3464} 3386}
3465 3387
@@ -3494,6 +3416,8 @@ static void ext4_invalidatepage(struct page *page, unsigned long offset)
3494{ 3416{
3495 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3417 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3496 3418
3419 trace_ext4_invalidatepage(page, offset);
3420
3497 /* 3421 /*
3498 * free any io_end structure allocated for buffers to be discarded 3422 * free any io_end structure allocated for buffers to be discarded
3499 */ 3423 */
@@ -3515,6 +3439,8 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
3515{ 3439{
3516 journal_t *journal = EXT4_JOURNAL(page->mapping->host); 3440 journal_t *journal = EXT4_JOURNAL(page->mapping->host);
3517 3441
3442 trace_ext4_releasepage(page);
3443
3518 WARN_ON(PageChecked(page)); 3444 WARN_ON(PageChecked(page));
3519 if (!page_has_buffers(page)) 3445 if (!page_has_buffers(page))
3520 return 0; 3446 return 0;
@@ -3873,11 +3799,16 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3873{ 3799{
3874 struct file *file = iocb->ki_filp; 3800 struct file *file = iocb->ki_filp;
3875 struct inode *inode = file->f_mapping->host; 3801 struct inode *inode = file->f_mapping->host;
3802 ssize_t ret;
3876 3803
3804 trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3877 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 3805 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3878 return ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs); 3806 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3879 3807 else
3880 return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs); 3808 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3809 trace_ext4_direct_IO_exit(inode, offset,
3810 iov_length(iov, nr_segs), rw, ret);
3811 return ret;
3881} 3812}
3882 3813
3883/* 3814/*
@@ -4173,6 +4104,9 @@ no_top:
4173 * 4104 *
4174 * We release `count' blocks on disk, but (last - first) may be greater 4105 * We release `count' blocks on disk, but (last - first) may be greater
4175 * than `count' because there can be holes in there. 4106 * than `count' because there can be holes in there.
4107 *
4108 * Return 0 on success, 1 on invalid block range
4109 * and < 0 on fatal error.
4176 */ 4110 */
4177static int ext4_clear_blocks(handle_t *handle, struct inode *inode, 4111static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4178 struct buffer_head *bh, 4112 struct buffer_head *bh,
@@ -4199,33 +4133,32 @@ static int ext4_clear_blocks(handle_t *handle, struct inode *inode,
4199 if (bh) { 4133 if (bh) {
4200 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4134 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4201 err = ext4_handle_dirty_metadata(handle, inode, bh); 4135 err = ext4_handle_dirty_metadata(handle, inode, bh);
4202 if (unlikely(err)) { 4136 if (unlikely(err))
4203 ext4_std_error(inode->i_sb, err); 4137 goto out_err;
4204 return 1;
4205 }
4206 } 4138 }
4207 err = ext4_mark_inode_dirty(handle, inode); 4139 err = ext4_mark_inode_dirty(handle, inode);
4208 if (unlikely(err)) { 4140 if (unlikely(err))
4209 ext4_std_error(inode->i_sb, err); 4141 goto out_err;
4210 return 1;
4211 }
4212 err = ext4_truncate_restart_trans(handle, inode, 4142 err = ext4_truncate_restart_trans(handle, inode,
4213 blocks_for_truncate(inode)); 4143 blocks_for_truncate(inode));
4214 if (unlikely(err)) { 4144 if (unlikely(err))
4215 ext4_std_error(inode->i_sb, err); 4145 goto out_err;
4216 return 1;
4217 }
4218 if (bh) { 4146 if (bh) {
4219 BUFFER_TRACE(bh, "retaking write access"); 4147 BUFFER_TRACE(bh, "retaking write access");
4220 ext4_journal_get_write_access(handle, bh); 4148 err = ext4_journal_get_write_access(handle, bh);
4149 if (unlikely(err))
4150 goto out_err;
4221 } 4151 }
4222 } 4152 }
4223 4153
4224 for (p = first; p < last; p++) 4154 for (p = first; p < last; p++)
4225 *p = 0; 4155 *p = 0;
4226 4156
4227 ext4_free_blocks(handle, inode, 0, block_to_free, count, flags); 4157 ext4_free_blocks(handle, inode, NULL, block_to_free, count, flags);
4228 return 0; 4158 return 0;
4159out_err:
4160 ext4_std_error(inode->i_sb, err);
4161 return err;
4229} 4162}
4230 4163
4231/** 4164/**
@@ -4259,7 +4192,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4259 ext4_fsblk_t nr; /* Current block # */ 4192 ext4_fsblk_t nr; /* Current block # */
4260 __le32 *p; /* Pointer into inode/ind 4193 __le32 *p; /* Pointer into inode/ind
4261 for current block */ 4194 for current block */
4262 int err; 4195 int err = 0;
4263 4196
4264 if (this_bh) { /* For indirect block */ 4197 if (this_bh) { /* For indirect block */
4265 BUFFER_TRACE(this_bh, "get_write_access"); 4198 BUFFER_TRACE(this_bh, "get_write_access");
@@ -4281,9 +4214,10 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4281 } else if (nr == block_to_free + count) { 4214 } else if (nr == block_to_free + count) {
4282 count++; 4215 count++;
4283 } else { 4216 } else {
4284 if (ext4_clear_blocks(handle, inode, this_bh, 4217 err = ext4_clear_blocks(handle, inode, this_bh,
4285 block_to_free, count, 4218 block_to_free, count,
4286 block_to_free_p, p)) 4219 block_to_free_p, p);
4220 if (err)
4287 break; 4221 break;
4288 block_to_free = nr; 4222 block_to_free = nr;
4289 block_to_free_p = p; 4223 block_to_free_p = p;
@@ -4292,9 +4226,12 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
4292 } 4226 }
4293 } 4227 }
4294 4228
4295 if (count > 0) 4229 if (!err && count > 0)
4296 ext4_clear_blocks(handle, inode, this_bh, block_to_free, 4230 err = ext4_clear_blocks(handle, inode, this_bh, block_to_free,
4297 count, block_to_free_p, p); 4231 count, block_to_free_p, p);
4232 if (err < 0)
4233 /* fatal error */
4234 return;
4298 4235
4299 if (this_bh) { 4236 if (this_bh) {
4300 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata"); 4237 BUFFER_TRACE(this_bh, "call ext4_handle_dirty_metadata");
@@ -4412,7 +4349,7 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode,
4412 * transaction where the data blocks are 4349 * transaction where the data blocks are
4413 * actually freed. 4350 * actually freed.
4414 */ 4351 */
4415 ext4_free_blocks(handle, inode, 0, nr, 1, 4352 ext4_free_blocks(handle, inode, NULL, nr, 1,
4416 EXT4_FREE_BLOCKS_METADATA| 4353 EXT4_FREE_BLOCKS_METADATA|
4417 EXT4_FREE_BLOCKS_FORGET); 4354 EXT4_FREE_BLOCKS_FORGET);
4418 4355
@@ -4496,6 +4433,8 @@ void ext4_truncate(struct inode *inode)
4496 ext4_lblk_t last_block; 4433 ext4_lblk_t last_block;
4497 unsigned blocksize = inode->i_sb->s_blocksize; 4434 unsigned blocksize = inode->i_sb->s_blocksize;
4498 4435
4436 trace_ext4_truncate_enter(inode);
4437
4499 if (!ext4_can_truncate(inode)) 4438 if (!ext4_can_truncate(inode))
4500 return; 4439 return;
4501 4440
@@ -4506,6 +4445,7 @@ void ext4_truncate(struct inode *inode)
4506 4445
4507 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { 4446 if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4508 ext4_ext_truncate(inode); 4447 ext4_ext_truncate(inode);
4448 trace_ext4_truncate_exit(inode);
4509 return; 4449 return;
4510 } 4450 }
4511 4451
@@ -4635,6 +4575,7 @@ out_stop:
4635 ext4_orphan_del(handle, inode); 4575 ext4_orphan_del(handle, inode);
4636 4576
4637 ext4_journal_stop(handle); 4577 ext4_journal_stop(handle);
4578 trace_ext4_truncate_exit(inode);
4638} 4579}
4639 4580
4640/* 4581/*
@@ -4766,6 +4707,7 @@ make_io:
4766 * has in-inode xattrs, or we don't have this inode in memory. 4707 * has in-inode xattrs, or we don't have this inode in memory.
4767 * Read the block from disk. 4708 * Read the block from disk.
4768 */ 4709 */
4710 trace_ext4_load_inode(inode);
4769 get_bh(bh); 4711 get_bh(bh);
4770 bh->b_end_io = end_buffer_read_sync; 4712 bh->b_end_io = end_buffer_read_sync;
4771 submit_bh(READ_META, bh); 4713 submit_bh(READ_META, bh);
@@ -4871,7 +4813,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4871 return inode; 4813 return inode;
4872 4814
4873 ei = EXT4_I(inode); 4815 ei = EXT4_I(inode);
4874 iloc.bh = 0; 4816 iloc.bh = NULL;
4875 4817
4876 ret = __ext4_get_inode_loc(inode, &iloc, 0); 4818 ret = __ext4_get_inode_loc(inode, &iloc, 0);
4877 if (ret < 0) 4819 if (ret < 0)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index a84faa110bc..808c554e773 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -334,16 +334,22 @@ mext_out:
334 case FITRIM: 334 case FITRIM:
335 { 335 {
336 struct super_block *sb = inode->i_sb; 336 struct super_block *sb = inode->i_sb;
337 struct request_queue *q = bdev_get_queue(sb->s_bdev);
337 struct fstrim_range range; 338 struct fstrim_range range;
338 int ret = 0; 339 int ret = 0;
339 340
340 if (!capable(CAP_SYS_ADMIN)) 341 if (!capable(CAP_SYS_ADMIN))
341 return -EPERM; 342 return -EPERM;
342 343
344 if (!blk_queue_discard(q))
345 return -EOPNOTSUPP;
346
343 if (copy_from_user(&range, (struct fstrim_range *)arg, 347 if (copy_from_user(&range, (struct fstrim_range *)arg,
344 sizeof(range))) 348 sizeof(range)))
345 return -EFAULT; 349 return -EFAULT;
346 350
351 range.minlen = max((unsigned int)range.minlen,
352 q->limits.discard_granularity);
347 ret = ext4_trim_fs(sb, &range); 353 ret = ext4_trim_fs(sb, &range);
348 if (ret < 0) 354 if (ret < 0)
349 return ret; 355 return ret;
@@ -421,6 +427,7 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
421 return err; 427 return err;
422 } 428 }
423 case EXT4_IOC_MOVE_EXT: 429 case EXT4_IOC_MOVE_EXT:
430 case FITRIM:
424 break; 431 break;
425 default: 432 default:
426 return -ENOIOCTLCMD; 433 return -ENOIOCTLCMD;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index d1fe09aea73..a5837a837a8 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -432,9 +432,10 @@ static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max)
432 } 432 }
433 433
434 /* at order 0 we see each particular block */ 434 /* at order 0 we see each particular block */
435 *max = 1 << (e4b->bd_blkbits + 3); 435 if (order == 0) {
436 if (order == 0) 436 *max = 1 << (e4b->bd_blkbits + 3);
437 return EXT4_MB_BITMAP(e4b); 437 return EXT4_MB_BITMAP(e4b);
438 }
438 439
439 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 440 bb = EXT4_MB_BUDDY(e4b) + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order];
440 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 441 *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order];
@@ -616,7 +617,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
616 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 617 MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments);
617 618
618 grp = ext4_get_group_info(sb, e4b->bd_group); 619 grp = ext4_get_group_info(sb, e4b->bd_group);
619 buddy = mb_find_buddy(e4b, 0, &max);
620 list_for_each(cur, &grp->bb_prealloc_list) { 620 list_for_each(cur, &grp->bb_prealloc_list) {
621 ext4_group_t groupnr; 621 ext4_group_t groupnr;
622 struct ext4_prealloc_space *pa; 622 struct ext4_prealloc_space *pa;
@@ -635,7 +635,12 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file,
635#define mb_check_buddy(e4b) 635#define mb_check_buddy(e4b)
636#endif 636#endif
637 637
638/* FIXME!! need more doc */ 638/*
639 * Divide blocks started from @first with length @len into
640 * smaller chunks with power of 2 blocks.
641 * Clear the bits in bitmap which the blocks of the chunk(s) covered,
642 * then increase bb_counters[] for corresponded chunk size.
643 */
639static void ext4_mb_mark_free_simple(struct super_block *sb, 644static void ext4_mb_mark_free_simple(struct super_block *sb,
640 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 645 void *buddy, ext4_grpblk_t first, ext4_grpblk_t len,
641 struct ext4_group_info *grp) 646 struct ext4_group_info *grp)
@@ -2381,7 +2386,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
2381 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte 2386 /* An 8TB filesystem with 64-bit pointers requires a 4096 byte
2382 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem. 2387 * kmalloc. A 128kb malloc should suffice for a 256TB filesystem.
2383 * So a two level scheme suffices for now. */ 2388 * So a two level scheme suffices for now. */
2384 sbi->s_group_info = kmalloc(array_size, GFP_KERNEL); 2389 sbi->s_group_info = kzalloc(array_size, GFP_KERNEL);
2385 if (sbi->s_group_info == NULL) { 2390 if (sbi->s_group_info == NULL) {
2386 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n"); 2391 printk(KERN_ERR "EXT4-fs: can't allocate buddy meta group\n");
2387 return -ENOMEM; 2392 return -ENOMEM;
@@ -3208,7 +3213,7 @@ ext4_mb_check_group_pa(ext4_fsblk_t goal_block,
3208 cur_distance = abs(goal_block - cpa->pa_pstart); 3213 cur_distance = abs(goal_block - cpa->pa_pstart);
3209 new_distance = abs(goal_block - pa->pa_pstart); 3214 new_distance = abs(goal_block - pa->pa_pstart);
3210 3215
3211 if (cur_distance < new_distance) 3216 if (cur_distance <= new_distance)
3212 return cpa; 3217 return cpa;
3213 3218
3214 /* drop the previous reference */ 3219 /* drop the previous reference */
@@ -3907,7 +3912,8 @@ static void ext4_mb_show_ac(struct ext4_allocation_context *ac)
3907 struct super_block *sb = ac->ac_sb; 3912 struct super_block *sb = ac->ac_sb;
3908 ext4_group_t ngroups, i; 3913 ext4_group_t ngroups, i;
3909 3914
3910 if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED) 3915 if (!mb_enable_debug ||
3916 (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED))
3911 return; 3917 return;
3912 3918
3913 printk(KERN_ERR "EXT4-fs: Can't allocate:" 3919 printk(KERN_ERR "EXT4-fs: Can't allocate:"
@@ -4753,7 +4759,8 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count,
4753 * bitmap. Then issue a TRIM command on this extent and free the extent in 4759 * bitmap. Then issue a TRIM command on this extent and free the extent in
4754 * the group buddy bitmap. This is done until whole group is scanned. 4760 * the group buddy bitmap. This is done until whole group is scanned.
4755 */ 4761 */
4756ext4_grpblk_t ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, 4762static ext4_grpblk_t
4763ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b,
4757 ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) 4764 ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks)
4758{ 4765{
4759 void *bitmap; 4766 void *bitmap;
@@ -4863,10 +4870,15 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range)
4863 break; 4870 break;
4864 } 4871 }
4865 4872
4866 if (len >= EXT4_BLOCKS_PER_GROUP(sb)) 4873 /*
4867 len -= (EXT4_BLOCKS_PER_GROUP(sb) - first_block); 4874 * For all the groups except the last one, last block will
4868 else 4875 * always be EXT4_BLOCKS_PER_GROUP(sb), so we only need to
4876 * change it for the last group in which case start +
4877 * len < EXT4_BLOCKS_PER_GROUP(sb).
4878 */
4879 if (first_block + len < EXT4_BLOCKS_PER_GROUP(sb))
4869 last_block = first_block + len; 4880 last_block = first_block + len;
4881 len -= last_block - first_block;
4870 4882
4871 if (e4b.bd_info->bb_free >= minlen) { 4883 if (e4b.bd_info->bb_free >= minlen) {
4872 cnt = ext4_trim_all_free(sb, &e4b, first_block, 4884 cnt = ext4_trim_all_free(sb, &e4b, first_block,
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index b619322c76f..22bd4d7f289 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -169,7 +169,7 @@ struct ext4_allocation_context {
169 /* original request */ 169 /* original request */
170 struct ext4_free_extent ac_o_ex; 170 struct ext4_free_extent ac_o_ex;
171 171
172 /* goal request (after normalization) */ 172 /* goal request (normalized ac_o_ex) */
173 struct ext4_free_extent ac_g_ex; 173 struct ext4_free_extent ac_g_ex;
174 174
175 /* the best found extent */ 175 /* the best found extent */
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index b0a126f23c2..d1bafa57f48 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -263,7 +263,7 @@ static int free_dind_blocks(handle_t *handle,
263 for (i = 0; i < max_entries; i++) { 263 for (i = 0; i < max_entries; i++) {
264 if (tmp_idata[i]) { 264 if (tmp_idata[i]) {
265 extend_credit_for_blkdel(handle, inode); 265 extend_credit_for_blkdel(handle, inode);
266 ext4_free_blocks(handle, inode, 0, 266 ext4_free_blocks(handle, inode, NULL,
267 le32_to_cpu(tmp_idata[i]), 1, 267 le32_to_cpu(tmp_idata[i]), 1,
268 EXT4_FREE_BLOCKS_METADATA | 268 EXT4_FREE_BLOCKS_METADATA |
269 EXT4_FREE_BLOCKS_FORGET); 269 EXT4_FREE_BLOCKS_FORGET);
@@ -271,7 +271,7 @@ static int free_dind_blocks(handle_t *handle,
271 } 271 }
272 put_bh(bh); 272 put_bh(bh);
273 extend_credit_for_blkdel(handle, inode); 273 extend_credit_for_blkdel(handle, inode);
274 ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, 274 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
275 EXT4_FREE_BLOCKS_METADATA | 275 EXT4_FREE_BLOCKS_METADATA |
276 EXT4_FREE_BLOCKS_FORGET); 276 EXT4_FREE_BLOCKS_FORGET);
277 return 0; 277 return 0;
@@ -302,7 +302,7 @@ static int free_tind_blocks(handle_t *handle,
302 } 302 }
303 put_bh(bh); 303 put_bh(bh);
304 extend_credit_for_blkdel(handle, inode); 304 extend_credit_for_blkdel(handle, inode);
305 ext4_free_blocks(handle, inode, 0, le32_to_cpu(i_data), 1, 305 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
306 EXT4_FREE_BLOCKS_METADATA | 306 EXT4_FREE_BLOCKS_METADATA |
307 EXT4_FREE_BLOCKS_FORGET); 307 EXT4_FREE_BLOCKS_FORGET);
308 return 0; 308 return 0;
@@ -315,7 +315,7 @@ static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
315 /* ei->i_data[EXT4_IND_BLOCK] */ 315 /* ei->i_data[EXT4_IND_BLOCK] */
316 if (i_data[0]) { 316 if (i_data[0]) {
317 extend_credit_for_blkdel(handle, inode); 317 extend_credit_for_blkdel(handle, inode);
318 ext4_free_blocks(handle, inode, 0, 318 ext4_free_blocks(handle, inode, NULL,
319 le32_to_cpu(i_data[0]), 1, 319 le32_to_cpu(i_data[0]), 1,
320 EXT4_FREE_BLOCKS_METADATA | 320 EXT4_FREE_BLOCKS_METADATA |
321 EXT4_FREE_BLOCKS_FORGET); 321 EXT4_FREE_BLOCKS_FORGET);
@@ -428,7 +428,7 @@ static int free_ext_idx(handle_t *handle, struct inode *inode,
428 } 428 }
429 put_bh(bh); 429 put_bh(bh);
430 extend_credit_for_blkdel(handle, inode); 430 extend_credit_for_blkdel(handle, inode);
431 ext4_free_blocks(handle, inode, 0, block, 1, 431 ext4_free_blocks(handle, inode, NULL, block, 1,
432 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 432 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
433 return retval; 433 return retval;
434} 434}
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e781b7ea563..67fd0b02585 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -40,6 +40,7 @@
40#include "xattr.h" 40#include "xattr.h"
41#include "acl.h" 41#include "acl.h"
42 42
43#include <trace/events/ext4.h>
43/* 44/*
44 * define how far ahead to read directories while searching them. 45 * define how far ahead to read directories while searching them.
45 */ 46 */
@@ -2183,6 +2184,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2183 struct ext4_dir_entry_2 *de; 2184 struct ext4_dir_entry_2 *de;
2184 handle_t *handle; 2185 handle_t *handle;
2185 2186
2187 trace_ext4_unlink_enter(dir, dentry);
2186 /* Initialize quotas before so that eventual writes go 2188 /* Initialize quotas before so that eventual writes go
2187 * in separate transaction */ 2189 * in separate transaction */
2188 dquot_initialize(dir); 2190 dquot_initialize(dir);
@@ -2228,6 +2230,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2228end_unlink: 2230end_unlink:
2229 ext4_journal_stop(handle); 2231 ext4_journal_stop(handle);
2230 brelse(bh); 2232 brelse(bh);
2233 trace_ext4_unlink_exit(dentry, retval);
2231 return retval; 2234 return retval;
2232} 2235}
2233 2236
@@ -2402,6 +2405,10 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2402 if (!new_inode && new_dir != old_dir && 2405 if (!new_inode && new_dir != old_dir &&
2403 EXT4_DIR_LINK_MAX(new_dir)) 2406 EXT4_DIR_LINK_MAX(new_dir))
2404 goto end_rename; 2407 goto end_rename;
2408 BUFFER_TRACE(dir_bh, "get_write_access");
2409 retval = ext4_journal_get_write_access(handle, dir_bh);
2410 if (retval)
2411 goto end_rename;
2405 } 2412 }
2406 if (!new_bh) { 2413 if (!new_bh) {
2407 retval = ext4_add_entry(handle, new_dentry, old_inode); 2414 retval = ext4_add_entry(handle, new_dentry, old_inode);
@@ -2409,7 +2416,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2409 goto end_rename; 2416 goto end_rename;
2410 } else { 2417 } else {
2411 BUFFER_TRACE(new_bh, "get write access"); 2418 BUFFER_TRACE(new_bh, "get write access");
2412 ext4_journal_get_write_access(handle, new_bh); 2419 retval = ext4_journal_get_write_access(handle, new_bh);
2420 if (retval)
2421 goto end_rename;
2413 new_de->inode = cpu_to_le32(old_inode->i_ino); 2422 new_de->inode = cpu_to_le32(old_inode->i_ino);
2414 if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb, 2423 if (EXT4_HAS_INCOMPAT_FEATURE(new_dir->i_sb,
2415 EXT4_FEATURE_INCOMPAT_FILETYPE)) 2424 EXT4_FEATURE_INCOMPAT_FILETYPE))
@@ -2470,8 +2479,6 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2470 old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir); 2479 old_dir->i_ctime = old_dir->i_mtime = ext4_current_time(old_dir);
2471 ext4_update_dx_flag(old_dir); 2480 ext4_update_dx_flag(old_dir);
2472 if (dir_bh) { 2481 if (dir_bh) {
2473 BUFFER_TRACE(dir_bh, "get_write_access");
2474 ext4_journal_get_write_access(handle, dir_bh);
2475 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = 2482 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
2476 cpu_to_le32(new_dir->i_ino); 2483 cpu_to_le32(new_dir->i_ino);
2477 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); 2484 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index e2cd90e4bb7..b6dbd056fcb 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -259,6 +259,11 @@ static void ext4_end_bio(struct bio *bio, int error)
259 bi_sector >> (inode->i_blkbits - 9)); 259 bi_sector >> (inode->i_blkbits - 9));
260 } 260 }
261 261
262 if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
263 ext4_free_io_end(io_end);
264 return;
265 }
266
262 /* Add the io_end to per-inode completed io list*/ 267 /* Add the io_end to per-inode completed io list*/
263 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags); 268 spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
264 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list); 269 list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
@@ -279,9 +284,9 @@ void ext4_io_submit(struct ext4_io_submit *io)
279 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP)); 284 BUG_ON(bio_flagged(io->io_bio, BIO_EOPNOTSUPP));
280 bio_put(io->io_bio); 285 bio_put(io->io_bio);
281 } 286 }
282 io->io_bio = 0; 287 io->io_bio = NULL;
283 io->io_op = 0; 288 io->io_op = 0;
284 io->io_end = 0; 289 io->io_end = NULL;
285} 290}
286 291
287static int io_submit_init(struct ext4_io_submit *io, 292static int io_submit_init(struct ext4_io_submit *io,
@@ -380,8 +385,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
380 385
381 BUG_ON(!PageLocked(page)); 386 BUG_ON(!PageLocked(page));
382 BUG_ON(PageWriteback(page)); 387 BUG_ON(PageWriteback(page));
383 set_page_writeback(page);
384 ClearPageError(page);
385 388
386 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS); 389 io_page = kmem_cache_alloc(io_page_cachep, GFP_NOFS);
387 if (!io_page) { 390 if (!io_page) {
@@ -392,6 +395,8 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
392 io_page->p_page = page; 395 io_page->p_page = page;
393 atomic_set(&io_page->p_count, 1); 396 atomic_set(&io_page->p_count, 1);
394 get_page(page); 397 get_page(page);
398 set_page_writeback(page);
399 ClearPageError(page);
395 400
396 for (bh = head = page_buffers(page), block_start = 0; 401 for (bh = head = page_buffers(page), block_start = 0;
397 bh != head || !block_start; 402 bh != head || !block_start;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 3ecc6e45d2f..80bbc9c60c2 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -230,7 +230,7 @@ static int setup_new_group_blocks(struct super_block *sb,
230 } 230 }
231 231
232 /* Zero out all of the reserved backup group descriptor table blocks */ 232 /* Zero out all of the reserved backup group descriptor table blocks */
233 ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", 233 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
234 block, sbi->s_itb_per_group); 234 block, sbi->s_itb_per_group);
235 err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb, 235 err = sb_issue_zeroout(sb, gdblocks + start + 1, reserved_gdb,
236 GFP_NOFS); 236 GFP_NOFS);
@@ -248,7 +248,7 @@ static int setup_new_group_blocks(struct super_block *sb,
248 248
249 /* Zero out all of the inode table blocks */ 249 /* Zero out all of the inode table blocks */
250 block = input->inode_table; 250 block = input->inode_table;
251 ext4_debug("clear inode table blocks %#04llx -> %#04llx\n", 251 ext4_debug("clear inode table blocks %#04llx -> %#04lx\n",
252 block, sbi->s_itb_per_group); 252 block, sbi->s_itb_per_group);
253 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS); 253 err = sb_issue_zeroout(sb, block, sbi->s_itb_per_group, GFP_NOFS);
254 if (err) 254 if (err)
@@ -499,12 +499,12 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
499 return err; 499 return err;
500 500
501exit_inode: 501exit_inode:
502 /* ext4_journal_release_buffer(handle, iloc.bh); */ 502 /* ext4_handle_release_buffer(handle, iloc.bh); */
503 brelse(iloc.bh); 503 brelse(iloc.bh);
504exit_dindj: 504exit_dindj:
505 /* ext4_journal_release_buffer(handle, dind); */ 505 /* ext4_handle_release_buffer(handle, dind); */
506exit_sbh: 506exit_sbh:
507 /* ext4_journal_release_buffer(handle, EXT4_SB(sb)->s_sbh); */ 507 /* ext4_handle_release_buffer(handle, EXT4_SB(sb)->s_sbh); */
508exit_dind: 508exit_dind:
509 brelse(dind); 509 brelse(dind);
510exit_bh: 510exit_bh:
@@ -586,7 +586,7 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
586 /* 586 /*
587 int j; 587 int j;
588 for (j = 0; j < i; j++) 588 for (j = 0; j < i; j++)
589 ext4_journal_release_buffer(handle, primary[j]); 589 ext4_handle_release_buffer(handle, primary[j]);
590 */ 590 */
591 goto exit_bh; 591 goto exit_bh;
592 } 592 }
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 203f9e4a70b..22546ad7f0a 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -54,9 +54,9 @@
54 54
55static struct proc_dir_entry *ext4_proc_root; 55static struct proc_dir_entry *ext4_proc_root;
56static struct kset *ext4_kset; 56static struct kset *ext4_kset;
57struct ext4_lazy_init *ext4_li_info; 57static struct ext4_lazy_init *ext4_li_info;
58struct mutex ext4_li_mtx; 58static struct mutex ext4_li_mtx;
59struct ext4_features *ext4_feat; 59static struct ext4_features *ext4_feat;
60 60
61static int ext4_load_journal(struct super_block *, struct ext4_super_block *, 61static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
62 unsigned long journal_devnum); 62 unsigned long journal_devnum);
@@ -75,6 +75,7 @@ static void ext4_write_super(struct super_block *sb);
75static int ext4_freeze(struct super_block *sb); 75static int ext4_freeze(struct super_block *sb);
76static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, 76static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags,
77 const char *dev_name, void *data); 77 const char *dev_name, void *data);
78static int ext4_feature_set_ok(struct super_block *sb, int readonly);
78static void ext4_destroy_lazyinit_thread(void); 79static void ext4_destroy_lazyinit_thread(void);
79static void ext4_unregister_li_request(struct super_block *sb); 80static void ext4_unregister_li_request(struct super_block *sb);
80static void ext4_clear_request_list(void); 81static void ext4_clear_request_list(void);
@@ -594,7 +595,7 @@ __acquires(bitlock)
594 595
595 vaf.fmt = fmt; 596 vaf.fmt = fmt;
596 vaf.va = &args; 597 vaf.va = &args;
597 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u", 598 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u, ",
598 sb->s_id, function, line, grp); 599 sb->s_id, function, line, grp);
599 if (ino) 600 if (ino)
600 printk(KERN_CONT "inode %lu: ", ino); 601 printk(KERN_CONT "inode %lu: ", ino);
@@ -997,13 +998,10 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
997 if (test_opt(sb, OLDALLOC)) 998 if (test_opt(sb, OLDALLOC))
998 seq_puts(seq, ",oldalloc"); 999 seq_puts(seq, ",oldalloc");
999#ifdef CONFIG_EXT4_FS_XATTR 1000#ifdef CONFIG_EXT4_FS_XATTR
1000 if (test_opt(sb, XATTR_USER) && 1001 if (test_opt(sb, XATTR_USER))
1001 !(def_mount_opts & EXT4_DEFM_XATTR_USER))
1002 seq_puts(seq, ",user_xattr"); 1002 seq_puts(seq, ",user_xattr");
1003 if (!test_opt(sb, XATTR_USER) && 1003 if (!test_opt(sb, XATTR_USER))
1004 (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
1005 seq_puts(seq, ",nouser_xattr"); 1004 seq_puts(seq, ",nouser_xattr");
1006 }
1007#endif 1005#endif
1008#ifdef CONFIG_EXT4_FS_POSIX_ACL 1006#ifdef CONFIG_EXT4_FS_POSIX_ACL
1009 if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL)) 1007 if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
@@ -1041,8 +1039,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
1041 !(def_mount_opts & EXT4_DEFM_NODELALLOC)) 1039 !(def_mount_opts & EXT4_DEFM_NODELALLOC))
1042 seq_puts(seq, ",nodelalloc"); 1040 seq_puts(seq, ",nodelalloc");
1043 1041
1044 if (test_opt(sb, MBLK_IO_SUBMIT)) 1042 if (!test_opt(sb, MBLK_IO_SUBMIT))
1045 seq_puts(seq, ",mblk_io_submit"); 1043 seq_puts(seq, ",nomblk_io_submit");
1046 if (sbi->s_stripe) 1044 if (sbi->s_stripe)
1047 seq_printf(seq, ",stripe=%lu", sbi->s_stripe); 1045 seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
1048 /* 1046 /*
@@ -1451,7 +1449,7 @@ static int parse_options(char *options, struct super_block *sb,
1451 * Initialize args struct so we know whether arg was 1449 * Initialize args struct so we know whether arg was
1452 * found; some options take optional arguments. 1450 * found; some options take optional arguments.
1453 */ 1451 */
1454 args[0].to = args[0].from = 0; 1452 args[0].to = args[0].from = NULL;
1455 token = match_token(p, tokens, args); 1453 token = match_token(p, tokens, args);
1456 switch (token) { 1454 switch (token) {
1457 case Opt_bsd_df: 1455 case Opt_bsd_df:
@@ -1771,7 +1769,7 @@ set_qf_format:
1771 return 0; 1769 return 0;
1772 if (option < 0 || option > (1 << 30)) 1770 if (option < 0 || option > (1 << 30))
1773 return 0; 1771 return 0;
1774 if (!is_power_of_2(option)) { 1772 if (option && !is_power_of_2(option)) {
1775 ext4_msg(sb, KERN_ERR, 1773 ext4_msg(sb, KERN_ERR,
1776 "EXT4-fs: inode_readahead_blks" 1774 "EXT4-fs: inode_readahead_blks"
1777 " must be a power of 2"); 1775 " must be a power of 2");
@@ -2120,6 +2118,13 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2120 return; 2118 return;
2121 } 2119 }
2122 2120
2121 /* Check if feature set would not allow a r/w mount */
2122 if (!ext4_feature_set_ok(sb, 0)) {
2123 ext4_msg(sb, KERN_INFO, "Skipping orphan cleanup due to "
2124 "unknown ROCOMPAT features");
2125 return;
2126 }
2127
2123 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 2128 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2124 if (es->s_last_orphan) 2129 if (es->s_last_orphan)
2125 jbd_debug(1, "Errors on filesystem, " 2130 jbd_debug(1, "Errors on filesystem, "
@@ -2412,7 +2417,7 @@ static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
2412 if (parse_strtoul(buf, 0x40000000, &t)) 2417 if (parse_strtoul(buf, 0x40000000, &t))
2413 return -EINVAL; 2418 return -EINVAL;
2414 2419
2415 if (!is_power_of_2(t)) 2420 if (t && !is_power_of_2(t))
2416 return -EINVAL; 2421 return -EINVAL;
2417 2422
2418 sbi->s_inode_readahead_blks = t; 2423 sbi->s_inode_readahead_blks = t;
@@ -3095,14 +3100,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3095 } 3100 }
3096 if (def_mount_opts & EXT4_DEFM_UID16) 3101 if (def_mount_opts & EXT4_DEFM_UID16)
3097 set_opt(sb, NO_UID32); 3102 set_opt(sb, NO_UID32);
3103 /* xattr user namespace & acls are now defaulted on */
3098#ifdef CONFIG_EXT4_FS_XATTR 3104#ifdef CONFIG_EXT4_FS_XATTR
3099 if (def_mount_opts & EXT4_DEFM_XATTR_USER) 3105 set_opt(sb, XATTR_USER);
3100 set_opt(sb, XATTR_USER);
3101#endif 3106#endif
3102#ifdef CONFIG_EXT4_FS_POSIX_ACL 3107#ifdef CONFIG_EXT4_FS_POSIX_ACL
3103 if (def_mount_opts & EXT4_DEFM_ACL) 3108 set_opt(sb, POSIX_ACL);
3104 set_opt(sb, POSIX_ACL);
3105#endif 3109#endif
3110 set_opt(sb, MBLK_IO_SUBMIT);
3106 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 3111 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3107 set_opt(sb, JOURNAL_DATA); 3112 set_opt(sb, JOURNAL_DATA);
3108 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 3113 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3516,7 +3521,7 @@ no_journal:
3516 * concurrency isn't really necessary. Limit it to 1. 3521 * concurrency isn't really necessary. Limit it to 1.
3517 */ 3522 */
3518 EXT4_SB(sb)->dio_unwritten_wq = 3523 EXT4_SB(sb)->dio_unwritten_wq =
3519 alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM, 1); 3524 alloc_workqueue("ext4-dio-unwritten", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3520 if (!EXT4_SB(sb)->dio_unwritten_wq) { 3525 if (!EXT4_SB(sb)->dio_unwritten_wq) {
3521 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n"); 3526 printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
3522 goto failed_mount_wq; 3527 goto failed_mount_wq;
@@ -3531,17 +3536,16 @@ no_journal:
3531 if (IS_ERR(root)) { 3536 if (IS_ERR(root)) {
3532 ext4_msg(sb, KERN_ERR, "get root inode failed"); 3537 ext4_msg(sb, KERN_ERR, "get root inode failed");
3533 ret = PTR_ERR(root); 3538 ret = PTR_ERR(root);
3539 root = NULL;
3534 goto failed_mount4; 3540 goto failed_mount4;
3535 } 3541 }
3536 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) { 3542 if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
3537 iput(root);
3538 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck"); 3543 ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
3539 goto failed_mount4; 3544 goto failed_mount4;
3540 } 3545 }
3541 sb->s_root = d_alloc_root(root); 3546 sb->s_root = d_alloc_root(root);
3542 if (!sb->s_root) { 3547 if (!sb->s_root) {
3543 ext4_msg(sb, KERN_ERR, "get root dentry failed"); 3548 ext4_msg(sb, KERN_ERR, "get root dentry failed");
3544 iput(root);
3545 ret = -ENOMEM; 3549 ret = -ENOMEM;
3546 goto failed_mount4; 3550 goto failed_mount4;
3547 } 3551 }
@@ -3657,6 +3661,8 @@ cantfind_ext4:
3657 goto failed_mount; 3661 goto failed_mount;
3658 3662
3659failed_mount4: 3663failed_mount4:
3664 iput(root);
3665 sb->s_root = NULL;
3660 ext4_msg(sb, KERN_ERR, "mount failed"); 3666 ext4_msg(sb, KERN_ERR, "mount failed");
3661 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq); 3667 destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
3662failed_mount_wq: 3668failed_mount_wq:
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fc32176eee3..b545ca1c459 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -735,7 +735,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
735 int offset = (char *)s->here - bs->bh->b_data; 735 int offset = (char *)s->here - bs->bh->b_data;
736 736
737 unlock_buffer(bs->bh); 737 unlock_buffer(bs->bh);
738 jbd2_journal_release_buffer(handle, bs->bh); 738 ext4_handle_release_buffer(handle, bs->bh);
739 if (ce) { 739 if (ce) {
740 mb_cache_entry_release(ce); 740 mb_cache_entry_release(ce);
741 ce = NULL; 741 ce = NULL;
@@ -833,7 +833,7 @@ inserted:
833 new_bh = sb_getblk(sb, block); 833 new_bh = sb_getblk(sb, block);
834 if (!new_bh) { 834 if (!new_bh) {
835getblk_failed: 835getblk_failed:
836 ext4_free_blocks(handle, inode, 0, block, 1, 836 ext4_free_blocks(handle, inode, NULL, block, 1,
837 EXT4_FREE_BLOCKS_METADATA); 837 EXT4_FREE_BLOCKS_METADATA);
838 error = -EIO; 838 error = -EIO;
839 goto cleanup; 839 goto cleanup;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 27e79c27ba0..a32dcaec04e 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -432,13 +432,35 @@ struct jbd2_journal_handle
432 int h_err; 432 int h_err;
433 433
434 /* Flags [no locking] */ 434 /* Flags [no locking] */
435 unsigned int h_sync: 1; /* sync-on-close */ 435 unsigned int h_sync:1; /* sync-on-close */
436 unsigned int h_jdata: 1; /* force data journaling */ 436 unsigned int h_jdata:1; /* force data journaling */
437 unsigned int h_aborted: 1; /* fatal error on handle */ 437 unsigned int h_aborted:1; /* fatal error on handle */
438 unsigned int h_cowing:1; /* COWing block to snapshot */
439
440 /* Number of buffers requested by user:
441 * (before adding the COW credits factor) */
442 unsigned int h_base_credits:14;
443
444 /* Number of buffers the user is allowed to dirty:
445 * (counts only buffers dirtied when !h_cowing) */
446 unsigned int h_user_credits:14;
447
438 448
439#ifdef CONFIG_DEBUG_LOCK_ALLOC 449#ifdef CONFIG_DEBUG_LOCK_ALLOC
440 struct lockdep_map h_lockdep_map; 450 struct lockdep_map h_lockdep_map;
441#endif 451#endif
452
453#ifdef CONFIG_JBD2_DEBUG
454 /* COW debugging counters: */
455 unsigned int h_cow_moved; /* blocks moved to snapshot */
456 unsigned int h_cow_copied; /* blocks copied to snapshot */
457 unsigned int h_cow_ok_jh; /* blocks already COWed during current
458 transaction */
459 unsigned int h_cow_ok_bitmap; /* blocks not set in COW bitmap */
460 unsigned int h_cow_ok_mapped;/* blocks already mapped in snapshot */
461 unsigned int h_cow_bitmaps; /* COW bitmaps created */
462 unsigned int h_cow_excluded; /* blocks set in exclude bitmap */
463#endif
442}; 464};
443 465
444 466
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 525aac3c97d..44e95d0a721 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -41,6 +41,13 @@ struct journal_head {
41 unsigned b_modified; 41 unsigned b_modified;
42 42
43 /* 43 /*
44 * This feild tracks the last transaction id in which this buffer
45 * has been cowed
46 * [jbd_lock_bh_state()]
47 */
48 unsigned b_cow_tid;
49
50 /*
44 * Copy of the buffer data frozen for writing to the log. 51 * Copy of the buffer data frozen for writing to the log.
45 * [jbd_lock_bh_state()] 52 * [jbd_lock_bh_state()]
46 */ 53 */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index e5e345fb2a5..e09592d2f91 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -21,8 +21,7 @@ TRACE_EVENT(ext4_free_inode,
21 TP_ARGS(inode), 21 TP_ARGS(inode),
22 22
23 TP_STRUCT__entry( 23 TP_STRUCT__entry(
24 __field( int, dev_major ) 24 __field( dev_t, dev )
25 __field( int, dev_minor )
26 __field( ino_t, ino ) 25 __field( ino_t, ino )
27 __field( umode_t, mode ) 26 __field( umode_t, mode )
28 __field( uid_t, uid ) 27 __field( uid_t, uid )
@@ -31,8 +30,7 @@ TRACE_EVENT(ext4_free_inode,
31 ), 30 ),
32 31
33 TP_fast_assign( 32 TP_fast_assign(
34 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 33 __entry->dev = inode->i_sb->s_dev;
35 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
36 __entry->ino = inode->i_ino; 34 __entry->ino = inode->i_ino;
37 __entry->mode = inode->i_mode; 35 __entry->mode = inode->i_mode;
38 __entry->uid = inode->i_uid; 36 __entry->uid = inode->i_uid;
@@ -41,9 +39,9 @@ TRACE_EVENT(ext4_free_inode,
41 ), 39 ),
42 40
43 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu", 41 TP_printk("dev %d,%d ino %lu mode 0%o uid %u gid %u blocks %llu",
44 __entry->dev_major, __entry->dev_minor, 42 MAJOR(__entry->dev), MINOR(__entry->dev),
45 (unsigned long) __entry->ino, __entry->mode, 43 (unsigned long) __entry->ino,
46 __entry->uid, __entry->gid, 44 __entry->mode, __entry->uid, __entry->gid,
47 (unsigned long long) __entry->blocks) 45 (unsigned long long) __entry->blocks)
48); 46);
49 47
@@ -53,21 +51,19 @@ TRACE_EVENT(ext4_request_inode,
53 TP_ARGS(dir, mode), 51 TP_ARGS(dir, mode),
54 52
55 TP_STRUCT__entry( 53 TP_STRUCT__entry(
56 __field( int, dev_major ) 54 __field( dev_t, dev )
57 __field( int, dev_minor )
58 __field( ino_t, dir ) 55 __field( ino_t, dir )
59 __field( umode_t, mode ) 56 __field( umode_t, mode )
60 ), 57 ),
61 58
62 TP_fast_assign( 59 TP_fast_assign(
63 __entry->dev_major = MAJOR(dir->i_sb->s_dev); 60 __entry->dev = dir->i_sb->s_dev;
64 __entry->dev_minor = MINOR(dir->i_sb->s_dev);
65 __entry->dir = dir->i_ino; 61 __entry->dir = dir->i_ino;
66 __entry->mode = mode; 62 __entry->mode = mode;
67 ), 63 ),
68 64
69 TP_printk("dev %d,%d dir %lu mode 0%o", 65 TP_printk("dev %d,%d dir %lu mode 0%o",
70 __entry->dev_major, __entry->dev_minor, 66 MAJOR(__entry->dev), MINOR(__entry->dev),
71 (unsigned long) __entry->dir, __entry->mode) 67 (unsigned long) __entry->dir, __entry->mode)
72); 68);
73 69
@@ -77,23 +73,21 @@ TRACE_EVENT(ext4_allocate_inode,
77 TP_ARGS(inode, dir, mode), 73 TP_ARGS(inode, dir, mode),
78 74
79 TP_STRUCT__entry( 75 TP_STRUCT__entry(
80 __field( int, dev_major ) 76 __field( dev_t, dev )
81 __field( int, dev_minor )
82 __field( ino_t, ino ) 77 __field( ino_t, ino )
83 __field( ino_t, dir ) 78 __field( ino_t, dir )
84 __field( umode_t, mode ) 79 __field( umode_t, mode )
85 ), 80 ),
86 81
87 TP_fast_assign( 82 TP_fast_assign(
88 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 83 __entry->dev = inode->i_sb->s_dev;
89 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
90 __entry->ino = inode->i_ino; 84 __entry->ino = inode->i_ino;
91 __entry->dir = dir->i_ino; 85 __entry->dir = dir->i_ino;
92 __entry->mode = mode; 86 __entry->mode = mode;
93 ), 87 ),
94 88
95 TP_printk("dev %d,%d ino %lu dir %lu mode 0%o", 89 TP_printk("dev %d,%d ino %lu dir %lu mode 0%o",
96 __entry->dev_major, __entry->dev_minor, 90 MAJOR(__entry->dev), MINOR(__entry->dev),
97 (unsigned long) __entry->ino, 91 (unsigned long) __entry->ino,
98 (unsigned long) __entry->dir, __entry->mode) 92 (unsigned long) __entry->dir, __entry->mode)
99); 93);
@@ -104,21 +98,19 @@ TRACE_EVENT(ext4_evict_inode,
104 TP_ARGS(inode), 98 TP_ARGS(inode),
105 99
106 TP_STRUCT__entry( 100 TP_STRUCT__entry(
107 __field( int, dev_major ) 101 __field( dev_t, dev )
108 __field( int, dev_minor )
109 __field( ino_t, ino ) 102 __field( ino_t, ino )
110 __field( int, nlink ) 103 __field( int, nlink )
111 ), 104 ),
112 105
113 TP_fast_assign( 106 TP_fast_assign(
114 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 107 __entry->dev = inode->i_sb->s_dev;
115 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
116 __entry->ino = inode->i_ino; 108 __entry->ino = inode->i_ino;
117 __entry->nlink = inode->i_nlink; 109 __entry->nlink = inode->i_nlink;
118 ), 110 ),
119 111
120 TP_printk("dev %d,%d ino %lu nlink %d", 112 TP_printk("dev %d,%d ino %lu nlink %d",
121 __entry->dev_major, __entry->dev_minor, 113 MAJOR(__entry->dev), MINOR(__entry->dev),
122 (unsigned long) __entry->ino, __entry->nlink) 114 (unsigned long) __entry->ino, __entry->nlink)
123); 115);
124 116
@@ -128,21 +120,19 @@ TRACE_EVENT(ext4_drop_inode,
128 TP_ARGS(inode, drop), 120 TP_ARGS(inode, drop),
129 121
130 TP_STRUCT__entry( 122 TP_STRUCT__entry(
131 __field( int, dev_major ) 123 __field( dev_t, dev )
132 __field( int, dev_minor )
133 __field( ino_t, ino ) 124 __field( ino_t, ino )
134 __field( int, drop ) 125 __field( int, drop )
135 ), 126 ),
136 127
137 TP_fast_assign( 128 TP_fast_assign(
138 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 129 __entry->dev = inode->i_sb->s_dev;
139 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
140 __entry->ino = inode->i_ino; 130 __entry->ino = inode->i_ino;
141 __entry->drop = drop; 131 __entry->drop = drop;
142 ), 132 ),
143 133
144 TP_printk("dev %d,%d ino %lu drop %d", 134 TP_printk("dev %d,%d ino %lu drop %d",
145 __entry->dev_major, __entry->dev_minor, 135 MAJOR(__entry->dev), MINOR(__entry->dev),
146 (unsigned long) __entry->ino, __entry->drop) 136 (unsigned long) __entry->ino, __entry->drop)
147); 137);
148 138
@@ -152,21 +142,19 @@ TRACE_EVENT(ext4_mark_inode_dirty,
152 TP_ARGS(inode, IP), 142 TP_ARGS(inode, IP),
153 143
154 TP_STRUCT__entry( 144 TP_STRUCT__entry(
155 __field( int, dev_major ) 145 __field( dev_t, dev )
156 __field( int, dev_minor )
157 __field( ino_t, ino ) 146 __field( ino_t, ino )
158 __field(unsigned long, ip ) 147 __field(unsigned long, ip )
159 ), 148 ),
160 149
161 TP_fast_assign( 150 TP_fast_assign(
162 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 151 __entry->dev = inode->i_sb->s_dev;
163 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
164 __entry->ino = inode->i_ino; 152 __entry->ino = inode->i_ino;
165 __entry->ip = IP; 153 __entry->ip = IP;
166 ), 154 ),
167 155
168 TP_printk("dev %d,%d ino %lu caller %pF", 156 TP_printk("dev %d,%d ino %lu caller %pF",
169 __entry->dev_major, __entry->dev_minor, 157 MAJOR(__entry->dev), MINOR(__entry->dev),
170 (unsigned long) __entry->ino, (void *)__entry->ip) 158 (unsigned long) __entry->ino, (void *)__entry->ip)
171); 159);
172 160
@@ -176,21 +164,19 @@ TRACE_EVENT(ext4_begin_ordered_truncate,
176 TP_ARGS(inode, new_size), 164 TP_ARGS(inode, new_size),
177 165
178 TP_STRUCT__entry( 166 TP_STRUCT__entry(
179 __field( int, dev_major ) 167 __field( dev_t, dev )
180 __field( int, dev_minor )
181 __field( ino_t, ino ) 168 __field( ino_t, ino )
182 __field( loff_t, new_size ) 169 __field( loff_t, new_size )
183 ), 170 ),
184 171
185 TP_fast_assign( 172 TP_fast_assign(
186 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 173 __entry->dev = inode->i_sb->s_dev;
187 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
188 __entry->ino = inode->i_ino; 174 __entry->ino = inode->i_ino;
189 __entry->new_size = new_size; 175 __entry->new_size = new_size;
190 ), 176 ),
191 177
192 TP_printk("dev %d,%d ino %lu new_size %lld", 178 TP_printk("dev %d,%d ino %lu new_size %lld",
193 __entry->dev_major, __entry->dev_minor, 179 MAJOR(__entry->dev), MINOR(__entry->dev),
194 (unsigned long) __entry->ino, 180 (unsigned long) __entry->ino,
195 (long long) __entry->new_size) 181 (long long) __entry->new_size)
196); 182);
@@ -203,8 +189,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
203 TP_ARGS(inode, pos, len, flags), 189 TP_ARGS(inode, pos, len, flags),
204 190
205 TP_STRUCT__entry( 191 TP_STRUCT__entry(
206 __field( int, dev_major ) 192 __field( dev_t, dev )
207 __field( int, dev_minor )
208 __field( ino_t, ino ) 193 __field( ino_t, ino )
209 __field( loff_t, pos ) 194 __field( loff_t, pos )
210 __field( unsigned int, len ) 195 __field( unsigned int, len )
@@ -212,8 +197,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
212 ), 197 ),
213 198
214 TP_fast_assign( 199 TP_fast_assign(
215 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 200 __entry->dev = inode->i_sb->s_dev;
216 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
217 __entry->ino = inode->i_ino; 201 __entry->ino = inode->i_ino;
218 __entry->pos = pos; 202 __entry->pos = pos;
219 __entry->len = len; 203 __entry->len = len;
@@ -221,7 +205,7 @@ DECLARE_EVENT_CLASS(ext4__write_begin,
221 ), 205 ),
222 206
223 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u", 207 TP_printk("dev %d,%d ino %lu pos %llu len %u flags %u",
224 __entry->dev_major, __entry->dev_minor, 208 MAJOR(__entry->dev), MINOR(__entry->dev),
225 (unsigned long) __entry->ino, 209 (unsigned long) __entry->ino,
226 __entry->pos, __entry->len, __entry->flags) 210 __entry->pos, __entry->len, __entry->flags)
227); 211);
@@ -249,8 +233,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
249 TP_ARGS(inode, pos, len, copied), 233 TP_ARGS(inode, pos, len, copied),
250 234
251 TP_STRUCT__entry( 235 TP_STRUCT__entry(
252 __field( int, dev_major ) 236 __field( dev_t, dev )
253 __field( int, dev_minor )
254 __field( ino_t, ino ) 237 __field( ino_t, ino )
255 __field( loff_t, pos ) 238 __field( loff_t, pos )
256 __field( unsigned int, len ) 239 __field( unsigned int, len )
@@ -258,8 +241,7 @@ DECLARE_EVENT_CLASS(ext4__write_end,
258 ), 241 ),
259 242
260 TP_fast_assign( 243 TP_fast_assign(
261 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 244 __entry->dev = inode->i_sb->s_dev;
262 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
263 __entry->ino = inode->i_ino; 245 __entry->ino = inode->i_ino;
264 __entry->pos = pos; 246 __entry->pos = pos;
265 __entry->len = len; 247 __entry->len = len;
@@ -267,9 +249,9 @@ DECLARE_EVENT_CLASS(ext4__write_end,
267 ), 249 ),
268 250
269 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u", 251 TP_printk("dev %d,%d ino %lu pos %llu len %u copied %u",
270 __entry->dev_major, __entry->dev_minor, 252 MAJOR(__entry->dev), MINOR(__entry->dev),
271 (unsigned long) __entry->ino, __entry->pos, 253 (unsigned long) __entry->ino,
272 __entry->len, __entry->copied) 254 __entry->pos, __entry->len, __entry->copied)
273); 255);
274 256
275DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end, 257DEFINE_EVENT(ext4__write_end, ext4_ordered_write_end,
@@ -310,22 +292,20 @@ TRACE_EVENT(ext4_writepage,
310 TP_ARGS(inode, page), 292 TP_ARGS(inode, page),
311 293
312 TP_STRUCT__entry( 294 TP_STRUCT__entry(
313 __field( int, dev_major ) 295 __field( dev_t, dev )
314 __field( int, dev_minor )
315 __field( ino_t, ino ) 296 __field( ino_t, ino )
316 __field( pgoff_t, index ) 297 __field( pgoff_t, index )
317 298
318 ), 299 ),
319 300
320 TP_fast_assign( 301 TP_fast_assign(
321 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 302 __entry->dev = inode->i_sb->s_dev;
322 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
323 __entry->ino = inode->i_ino; 303 __entry->ino = inode->i_ino;
324 __entry->index = page->index; 304 __entry->index = page->index;
325 ), 305 ),
326 306
327 TP_printk("dev %d,%d ino %lu page_index %lu", 307 TP_printk("dev %d,%d ino %lu page_index %lu",
328 __entry->dev_major, __entry->dev_minor, 308 MAJOR(__entry->dev), MINOR(__entry->dev),
329 (unsigned long) __entry->ino, __entry->index) 309 (unsigned long) __entry->ino, __entry->index)
330); 310);
331 311
@@ -335,43 +315,39 @@ TRACE_EVENT(ext4_da_writepages,
335 TP_ARGS(inode, wbc), 315 TP_ARGS(inode, wbc),
336 316
337 TP_STRUCT__entry( 317 TP_STRUCT__entry(
338 __field( int, dev_major ) 318 __field( dev_t, dev )
339 __field( int, dev_minor )
340 __field( ino_t, ino ) 319 __field( ino_t, ino )
341 __field( long, nr_to_write ) 320 __field( long, nr_to_write )
342 __field( long, pages_skipped ) 321 __field( long, pages_skipped )
343 __field( loff_t, range_start ) 322 __field( loff_t, range_start )
344 __field( loff_t, range_end ) 323 __field( loff_t, range_end )
324 __field( int, sync_mode )
345 __field( char, for_kupdate ) 325 __field( char, for_kupdate )
346 __field( char, for_reclaim )
347 __field( char, range_cyclic ) 326 __field( char, range_cyclic )
348 __field( pgoff_t, writeback_index ) 327 __field( pgoff_t, writeback_index )
349 ), 328 ),
350 329
351 TP_fast_assign( 330 TP_fast_assign(
352 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 331 __entry->dev = inode->i_sb->s_dev;
353 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
354 __entry->ino = inode->i_ino; 332 __entry->ino = inode->i_ino;
355 __entry->nr_to_write = wbc->nr_to_write; 333 __entry->nr_to_write = wbc->nr_to_write;
356 __entry->pages_skipped = wbc->pages_skipped; 334 __entry->pages_skipped = wbc->pages_skipped;
357 __entry->range_start = wbc->range_start; 335 __entry->range_start = wbc->range_start;
358 __entry->range_end = wbc->range_end; 336 __entry->range_end = wbc->range_end;
337 __entry->sync_mode = wbc->sync_mode;
359 __entry->for_kupdate = wbc->for_kupdate; 338 __entry->for_kupdate = wbc->for_kupdate;
360 __entry->for_reclaim = wbc->for_reclaim;
361 __entry->range_cyclic = wbc->range_cyclic; 339 __entry->range_cyclic = wbc->range_cyclic;
362 __entry->writeback_index = inode->i_mapping->writeback_index; 340 __entry->writeback_index = inode->i_mapping->writeback_index;
363 ), 341 ),
364 342
365 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld " 343 TP_printk("dev %d,%d ino %lu nr_to_write %ld pages_skipped %ld "
366 "range_start %llu range_end %llu " 344 "range_start %llu range_end %llu sync_mode %d"
367 "for_kupdate %d for_reclaim %d " 345 "for_kupdate %d range_cyclic %d writeback_index %lu",
368 "range_cyclic %d writeback_index %lu", 346 MAJOR(__entry->dev), MINOR(__entry->dev),
369 __entry->dev_major, __entry->dev_minor,
370 (unsigned long) __entry->ino, __entry->nr_to_write, 347 (unsigned long) __entry->ino, __entry->nr_to_write,
371 __entry->pages_skipped, __entry->range_start, 348 __entry->pages_skipped, __entry->range_start,
372 __entry->range_end, 349 __entry->range_end, __entry->sync_mode,
373 __entry->for_kupdate, __entry->for_reclaim, 350 __entry->for_kupdate, __entry->range_cyclic,
374 __entry->range_cyclic,
375 (unsigned long) __entry->writeback_index) 351 (unsigned long) __entry->writeback_index)
376); 352);
377 353
@@ -381,8 +357,7 @@ TRACE_EVENT(ext4_da_write_pages,
381 TP_ARGS(inode, mpd), 357 TP_ARGS(inode, mpd),
382 358
383 TP_STRUCT__entry( 359 TP_STRUCT__entry(
384 __field( int, dev_major ) 360 __field( dev_t, dev )
385 __field( int, dev_minor )
386 __field( ino_t, ino ) 361 __field( ino_t, ino )
387 __field( __u64, b_blocknr ) 362 __field( __u64, b_blocknr )
388 __field( __u32, b_size ) 363 __field( __u32, b_size )
@@ -390,11 +365,11 @@ TRACE_EVENT(ext4_da_write_pages,
390 __field( unsigned long, first_page ) 365 __field( unsigned long, first_page )
391 __field( int, io_done ) 366 __field( int, io_done )
392 __field( int, pages_written ) 367 __field( int, pages_written )
368 __field( int, sync_mode )
393 ), 369 ),
394 370
395 TP_fast_assign( 371 TP_fast_assign(
396 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 372 __entry->dev = inode->i_sb->s_dev;
397 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
398 __entry->ino = inode->i_ino; 373 __entry->ino = inode->i_ino;
399 __entry->b_blocknr = mpd->b_blocknr; 374 __entry->b_blocknr = mpd->b_blocknr;
400 __entry->b_size = mpd->b_size; 375 __entry->b_size = mpd->b_size;
@@ -402,14 +377,18 @@ TRACE_EVENT(ext4_da_write_pages,
402 __entry->first_page = mpd->first_page; 377 __entry->first_page = mpd->first_page;
403 __entry->io_done = mpd->io_done; 378 __entry->io_done = mpd->io_done;
404 __entry->pages_written = mpd->pages_written; 379 __entry->pages_written = mpd->pages_written;
380 __entry->sync_mode = mpd->wbc->sync_mode;
405 ), 381 ),
406 382
407 TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x first_page %lu io_done %d pages_written %d", 383 TP_printk("dev %d,%d ino %lu b_blocknr %llu b_size %u b_state 0x%04x "
408 __entry->dev_major, __entry->dev_minor, 384 "first_page %lu io_done %d pages_written %d sync_mode %d",
385 MAJOR(__entry->dev), MINOR(__entry->dev),
409 (unsigned long) __entry->ino, 386 (unsigned long) __entry->ino,
410 __entry->b_blocknr, __entry->b_size, 387 __entry->b_blocknr, __entry->b_size,
411 __entry->b_state, __entry->first_page, 388 __entry->b_state, __entry->first_page,
412 __entry->io_done, __entry->pages_written) 389 __entry->io_done, __entry->pages_written,
390 __entry->sync_mode
391 )
413); 392);
414 393
415TRACE_EVENT(ext4_da_writepages_result, 394TRACE_EVENT(ext4_da_writepages_result,
@@ -419,35 +398,100 @@ TRACE_EVENT(ext4_da_writepages_result,
419 TP_ARGS(inode, wbc, ret, pages_written), 398 TP_ARGS(inode, wbc, ret, pages_written),
420 399
421 TP_STRUCT__entry( 400 TP_STRUCT__entry(
422 __field( int, dev_major ) 401 __field( dev_t, dev )
423 __field( int, dev_minor )
424 __field( ino_t, ino ) 402 __field( ino_t, ino )
425 __field( int, ret ) 403 __field( int, ret )
426 __field( int, pages_written ) 404 __field( int, pages_written )
427 __field( long, pages_skipped ) 405 __field( long, pages_skipped )
406 __field( int, sync_mode )
428 __field( char, more_io ) 407 __field( char, more_io )
429 __field( pgoff_t, writeback_index ) 408 __field( pgoff_t, writeback_index )
430 ), 409 ),
431 410
432 TP_fast_assign( 411 TP_fast_assign(
433 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 412 __entry->dev = inode->i_sb->s_dev;
434 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
435 __entry->ino = inode->i_ino; 413 __entry->ino = inode->i_ino;
436 __entry->ret = ret; 414 __entry->ret = ret;
437 __entry->pages_written = pages_written; 415 __entry->pages_written = pages_written;
438 __entry->pages_skipped = wbc->pages_skipped; 416 __entry->pages_skipped = wbc->pages_skipped;
417 __entry->sync_mode = wbc->sync_mode;
439 __entry->more_io = wbc->more_io; 418 __entry->more_io = wbc->more_io;
440 __entry->writeback_index = inode->i_mapping->writeback_index; 419 __entry->writeback_index = inode->i_mapping->writeback_index;
441 ), 420 ),
442 421
443 TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld more_io %d writeback_index %lu", 422 TP_printk("dev %d,%d ino %lu ret %d pages_written %d pages_skipped %ld "
444 __entry->dev_major, __entry->dev_minor, 423 " more_io %d sync_mode %d writeback_index %lu",
424 MAJOR(__entry->dev), MINOR(__entry->dev),
445 (unsigned long) __entry->ino, __entry->ret, 425 (unsigned long) __entry->ino, __entry->ret,
446 __entry->pages_written, __entry->pages_skipped, 426 __entry->pages_written, __entry->pages_skipped,
447 __entry->more_io, 427 __entry->more_io, __entry->sync_mode,
448 (unsigned long) __entry->writeback_index) 428 (unsigned long) __entry->writeback_index)
449); 429);
450 430
431DECLARE_EVENT_CLASS(ext4__page_op,
432 TP_PROTO(struct page *page),
433
434 TP_ARGS(page),
435
436 TP_STRUCT__entry(
437 __field( pgoff_t, index )
438 __field( ino_t, ino )
439 __field( dev_t, dev )
440
441 ),
442
443 TP_fast_assign(
444 __entry->index = page->index;
445 __entry->ino = page->mapping->host->i_ino;
446 __entry->dev = page->mapping->host->i_sb->s_dev;
447 ),
448
449 TP_printk("dev %d,%d ino %lu page_index %lu",
450 MAJOR(__entry->dev), MINOR(__entry->dev),
451 (unsigned long) __entry->ino,
452 __entry->index)
453);
454
455DEFINE_EVENT(ext4__page_op, ext4_readpage,
456
457 TP_PROTO(struct page *page),
458
459 TP_ARGS(page)
460);
461
462DEFINE_EVENT(ext4__page_op, ext4_releasepage,
463
464 TP_PROTO(struct page *page),
465
466 TP_ARGS(page)
467);
468
469TRACE_EVENT(ext4_invalidatepage,
470 TP_PROTO(struct page *page, unsigned long offset),
471
472 TP_ARGS(page, offset),
473
474 TP_STRUCT__entry(
475 __field( pgoff_t, index )
476 __field( unsigned long, offset )
477 __field( ino_t, ino )
478 __field( dev_t, dev )
479
480 ),
481
482 TP_fast_assign(
483 __entry->index = page->index;
484 __entry->offset = offset;
485 __entry->ino = page->mapping->host->i_ino;
486 __entry->dev = page->mapping->host->i_sb->s_dev;
487 ),
488
489 TP_printk("dev %d,%d ino %lu page_index %lu offset %lu",
490 MAJOR(__entry->dev), MINOR(__entry->dev),
491 (unsigned long) __entry->ino,
492 __entry->index, __entry->offset)
493);
494
451TRACE_EVENT(ext4_discard_blocks, 495TRACE_EVENT(ext4_discard_blocks,
452 TP_PROTO(struct super_block *sb, unsigned long long blk, 496 TP_PROTO(struct super_block *sb, unsigned long long blk,
453 unsigned long long count), 497 unsigned long long count),
@@ -455,22 +499,20 @@ TRACE_EVENT(ext4_discard_blocks,
455 TP_ARGS(sb, blk, count), 499 TP_ARGS(sb, blk, count),
456 500
457 TP_STRUCT__entry( 501 TP_STRUCT__entry(
458 __field( int, dev_major ) 502 __field( dev_t, dev )
459 __field( int, dev_minor )
460 __field( __u64, blk ) 503 __field( __u64, blk )
461 __field( __u64, count ) 504 __field( __u64, count )
462 505
463 ), 506 ),
464 507
465 TP_fast_assign( 508 TP_fast_assign(
466 __entry->dev_major = MAJOR(sb->s_dev); 509 __entry->dev = sb->s_dev;
467 __entry->dev_minor = MINOR(sb->s_dev);
468 __entry->blk = blk; 510 __entry->blk = blk;
469 __entry->count = count; 511 __entry->count = count;
470 ), 512 ),
471 513
472 TP_printk("dev %d,%d blk %llu count %llu", 514 TP_printk("dev %d,%d blk %llu count %llu",
473 __entry->dev_major, __entry->dev_minor, 515 MAJOR(__entry->dev), MINOR(__entry->dev),
474 __entry->blk, __entry->count) 516 __entry->blk, __entry->count)
475); 517);
476 518
@@ -481,8 +523,7 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
481 TP_ARGS(ac, pa), 523 TP_ARGS(ac, pa),
482 524
483 TP_STRUCT__entry( 525 TP_STRUCT__entry(
484 __field( int, dev_major ) 526 __field( dev_t, dev )
485 __field( int, dev_minor )
486 __field( ino_t, ino ) 527 __field( ino_t, ino )
487 __field( __u64, pa_pstart ) 528 __field( __u64, pa_pstart )
488 __field( __u32, pa_len ) 529 __field( __u32, pa_len )
@@ -491,8 +532,7 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
491 ), 532 ),
492 533
493 TP_fast_assign( 534 TP_fast_assign(
494 __entry->dev_major = MAJOR(ac->ac_sb->s_dev); 535 __entry->dev = ac->ac_sb->s_dev;
495 __entry->dev_minor = MINOR(ac->ac_sb->s_dev);
496 __entry->ino = ac->ac_inode->i_ino; 536 __entry->ino = ac->ac_inode->i_ino;
497 __entry->pa_pstart = pa->pa_pstart; 537 __entry->pa_pstart = pa->pa_pstart;
498 __entry->pa_len = pa->pa_len; 538 __entry->pa_len = pa->pa_len;
@@ -500,9 +540,9 @@ DECLARE_EVENT_CLASS(ext4__mb_new_pa,
500 ), 540 ),
501 541
502 TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu", 542 TP_printk("dev %d,%d ino %lu pstart %llu len %u lstart %llu",
503 __entry->dev_major, __entry->dev_minor, 543 MAJOR(__entry->dev), MINOR(__entry->dev),
504 (unsigned long) __entry->ino, __entry->pa_pstart, 544 (unsigned long) __entry->ino,
505 __entry->pa_len, __entry->pa_lstart) 545 __entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
506); 546);
507 547
508DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa, 548DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
@@ -530,8 +570,7 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
530 TP_ARGS(sb, inode, pa, block, count), 570 TP_ARGS(sb, inode, pa, block, count),
531 571
532 TP_STRUCT__entry( 572 TP_STRUCT__entry(
533 __field( int, dev_major ) 573 __field( dev_t, dev )
534 __field( int, dev_minor )
535 __field( ino_t, ino ) 574 __field( ino_t, ino )
536 __field( __u64, block ) 575 __field( __u64, block )
537 __field( __u32, count ) 576 __field( __u32, count )
@@ -539,16 +578,16 @@ TRACE_EVENT(ext4_mb_release_inode_pa,
539 ), 578 ),
540 579
541 TP_fast_assign( 580 TP_fast_assign(
542 __entry->dev_major = MAJOR(sb->s_dev); 581 __entry->dev = sb->s_dev;
543 __entry->dev_minor = MINOR(sb->s_dev);
544 __entry->ino = inode->i_ino; 582 __entry->ino = inode->i_ino;
545 __entry->block = block; 583 __entry->block = block;
546 __entry->count = count; 584 __entry->count = count;
547 ), 585 ),
548 586
549 TP_printk("dev %d,%d ino %lu block %llu count %u", 587 TP_printk("dev %d,%d ino %lu block %llu count %u",
550 __entry->dev_major, __entry->dev_minor, 588 MAJOR(__entry->dev), MINOR(__entry->dev),
551 (unsigned long) __entry->ino, __entry->block, __entry->count) 589 (unsigned long) __entry->ino,
590 __entry->block, __entry->count)
552); 591);
553 592
554TRACE_EVENT(ext4_mb_release_group_pa, 593TRACE_EVENT(ext4_mb_release_group_pa,
@@ -558,22 +597,20 @@ TRACE_EVENT(ext4_mb_release_group_pa,
558 TP_ARGS(sb, pa), 597 TP_ARGS(sb, pa),
559 598
560 TP_STRUCT__entry( 599 TP_STRUCT__entry(
561 __field( int, dev_major ) 600 __field( dev_t, dev )
562 __field( int, dev_minor )
563 __field( __u64, pa_pstart ) 601 __field( __u64, pa_pstart )
564 __field( __u32, pa_len ) 602 __field( __u32, pa_len )
565 603
566 ), 604 ),
567 605
568 TP_fast_assign( 606 TP_fast_assign(
569 __entry->dev_major = MAJOR(sb->s_dev); 607 __entry->dev = sb->s_dev;
570 __entry->dev_minor = MINOR(sb->s_dev);
571 __entry->pa_pstart = pa->pa_pstart; 608 __entry->pa_pstart = pa->pa_pstart;
572 __entry->pa_len = pa->pa_len; 609 __entry->pa_len = pa->pa_len;
573 ), 610 ),
574 611
575 TP_printk("dev %d,%d pstart %llu len %u", 612 TP_printk("dev %d,%d pstart %llu len %u",
576 __entry->dev_major, __entry->dev_minor, 613 MAJOR(__entry->dev), MINOR(__entry->dev),
577 __entry->pa_pstart, __entry->pa_len) 614 __entry->pa_pstart, __entry->pa_len)
578); 615);
579 616
@@ -583,20 +620,18 @@ TRACE_EVENT(ext4_discard_preallocations,
583 TP_ARGS(inode), 620 TP_ARGS(inode),
584 621
585 TP_STRUCT__entry( 622 TP_STRUCT__entry(
586 __field( int, dev_major ) 623 __field( dev_t, dev )
587 __field( int, dev_minor )
588 __field( ino_t, ino ) 624 __field( ino_t, ino )
589 625
590 ), 626 ),
591 627
592 TP_fast_assign( 628 TP_fast_assign(
593 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 629 __entry->dev = inode->i_sb->s_dev;
594 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
595 __entry->ino = inode->i_ino; 630 __entry->ino = inode->i_ino;
596 ), 631 ),
597 632
598 TP_printk("dev %d,%d ino %lu", 633 TP_printk("dev %d,%d ino %lu",
599 __entry->dev_major, __entry->dev_minor, 634 MAJOR(__entry->dev), MINOR(__entry->dev),
600 (unsigned long) __entry->ino) 635 (unsigned long) __entry->ino)
601); 636);
602 637
@@ -606,20 +641,19 @@ TRACE_EVENT(ext4_mb_discard_preallocations,
606 TP_ARGS(sb, needed), 641 TP_ARGS(sb, needed),
607 642
608 TP_STRUCT__entry( 643 TP_STRUCT__entry(
609 __field( int, dev_major ) 644 __field( dev_t, dev )
610 __field( int, dev_minor )
611 __field( int, needed ) 645 __field( int, needed )
612 646
613 ), 647 ),
614 648
615 TP_fast_assign( 649 TP_fast_assign(
616 __entry->dev_major = MAJOR(sb->s_dev); 650 __entry->dev = sb->s_dev;
617 __entry->dev_minor = MINOR(sb->s_dev);
618 __entry->needed = needed; 651 __entry->needed = needed;
619 ), 652 ),
620 653
621 TP_printk("dev %d,%d needed %d", 654 TP_printk("dev %d,%d needed %d",
622 __entry->dev_major, __entry->dev_minor, __entry->needed) 655 MAJOR(__entry->dev), MINOR(__entry->dev),
656 __entry->needed)
623); 657);
624 658
625TRACE_EVENT(ext4_request_blocks, 659TRACE_EVENT(ext4_request_blocks,
@@ -628,8 +662,7 @@ TRACE_EVENT(ext4_request_blocks,
628 TP_ARGS(ar), 662 TP_ARGS(ar),
629 663
630 TP_STRUCT__entry( 664 TP_STRUCT__entry(
631 __field( int, dev_major ) 665 __field( dev_t, dev )
632 __field( int, dev_minor )
633 __field( ino_t, ino ) 666 __field( ino_t, ino )
634 __field( unsigned int, flags ) 667 __field( unsigned int, flags )
635 __field( unsigned int, len ) 668 __field( unsigned int, len )
@@ -642,8 +675,7 @@ TRACE_EVENT(ext4_request_blocks,
642 ), 675 ),
643 676
644 TP_fast_assign( 677 TP_fast_assign(
645 __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); 678 __entry->dev = ar->inode->i_sb->s_dev;
646 __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
647 __entry->ino = ar->inode->i_ino; 679 __entry->ino = ar->inode->i_ino;
648 __entry->flags = ar->flags; 680 __entry->flags = ar->flags;
649 __entry->len = ar->len; 681 __entry->len = ar->len;
@@ -655,8 +687,9 @@ TRACE_EVENT(ext4_request_blocks,
655 __entry->pright = ar->pright; 687 __entry->pright = ar->pright;
656 ), 688 ),
657 689
658 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 690 TP_printk("dev %d,%d ino %lu flags %u len %u lblk %llu goal %llu "
659 __entry->dev_major, __entry->dev_minor, 691 "lleft %llu lright %llu pleft %llu pright %llu ",
692 MAJOR(__entry->dev), MINOR(__entry->dev),
660 (unsigned long) __entry->ino, 693 (unsigned long) __entry->ino,
661 __entry->flags, __entry->len, 694 __entry->flags, __entry->len,
662 (unsigned long long) __entry->logical, 695 (unsigned long long) __entry->logical,
@@ -673,8 +706,7 @@ TRACE_EVENT(ext4_allocate_blocks,
673 TP_ARGS(ar, block), 706 TP_ARGS(ar, block),
674 707
675 TP_STRUCT__entry( 708 TP_STRUCT__entry(
676 __field( int, dev_major ) 709 __field( dev_t, dev )
677 __field( int, dev_minor )
678 __field( ino_t, ino ) 710 __field( ino_t, ino )
679 __field( __u64, block ) 711 __field( __u64, block )
680 __field( unsigned int, flags ) 712 __field( unsigned int, flags )
@@ -688,8 +720,7 @@ TRACE_EVENT(ext4_allocate_blocks,
688 ), 720 ),
689 721
690 TP_fast_assign( 722 TP_fast_assign(
691 __entry->dev_major = MAJOR(ar->inode->i_sb->s_dev); 723 __entry->dev = ar->inode->i_sb->s_dev;
692 __entry->dev_minor = MINOR(ar->inode->i_sb->s_dev);
693 __entry->ino = ar->inode->i_ino; 724 __entry->ino = ar->inode->i_ino;
694 __entry->block = block; 725 __entry->block = block;
695 __entry->flags = ar->flags; 726 __entry->flags = ar->flags;
@@ -702,10 +733,11 @@ TRACE_EVENT(ext4_allocate_blocks,
702 __entry->pright = ar->pright; 733 __entry->pright = ar->pright;
703 ), 734 ),
704 735
705 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu goal %llu lleft %llu lright %llu pleft %llu pright %llu ", 736 TP_printk("dev %d,%d ino %lu flags %u len %u block %llu lblk %llu "
706 __entry->dev_major, __entry->dev_minor, 737 "goal %llu lleft %llu lright %llu pleft %llu pright %llu",
707 (unsigned long) __entry->ino, __entry->flags, 738 MAJOR(__entry->dev), MINOR(__entry->dev),
708 __entry->len, __entry->block, 739 (unsigned long) __entry->ino,
740 __entry->flags, __entry->len, __entry->block,
709 (unsigned long long) __entry->logical, 741 (unsigned long long) __entry->logical,
710 (unsigned long long) __entry->goal, 742 (unsigned long long) __entry->goal,
711 (unsigned long long) __entry->lleft, 743 (unsigned long long) __entry->lleft,
@@ -721,8 +753,7 @@ TRACE_EVENT(ext4_free_blocks,
721 TP_ARGS(inode, block, count, flags), 753 TP_ARGS(inode, block, count, flags),
722 754
723 TP_STRUCT__entry( 755 TP_STRUCT__entry(
724 __field( int, dev_major ) 756 __field( dev_t, dev )
725 __field( int, dev_minor )
726 __field( ino_t, ino ) 757 __field( ino_t, ino )
727 __field( umode_t, mode ) 758 __field( umode_t, mode )
728 __field( __u64, block ) 759 __field( __u64, block )
@@ -731,8 +762,7 @@ TRACE_EVENT(ext4_free_blocks,
731 ), 762 ),
732 763
733 TP_fast_assign( 764 TP_fast_assign(
734 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 765 __entry->dev = inode->i_sb->s_dev;
735 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
736 __entry->ino = inode->i_ino; 766 __entry->ino = inode->i_ino;
737 __entry->mode = inode->i_mode; 767 __entry->mode = inode->i_mode;
738 __entry->block = block; 768 __entry->block = block;
@@ -741,20 +771,19 @@ TRACE_EVENT(ext4_free_blocks,
741 ), 771 ),
742 772
743 TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d", 773 TP_printk("dev %d,%d ino %lu mode 0%o block %llu count %lu flags %d",
744 __entry->dev_major, __entry->dev_minor, 774 MAJOR(__entry->dev), MINOR(__entry->dev),
745 (unsigned long) __entry->ino, 775 (unsigned long) __entry->ino,
746 __entry->mode, __entry->block, __entry->count, 776 __entry->mode, __entry->block, __entry->count,
747 __entry->flags) 777 __entry->flags)
748); 778);
749 779
750TRACE_EVENT(ext4_sync_file, 780TRACE_EVENT(ext4_sync_file_enter,
751 TP_PROTO(struct file *file, int datasync), 781 TP_PROTO(struct file *file, int datasync),
752 782
753 TP_ARGS(file, datasync), 783 TP_ARGS(file, datasync),
754 784
755 TP_STRUCT__entry( 785 TP_STRUCT__entry(
756 __field( int, dev_major ) 786 __field( dev_t, dev )
757 __field( int, dev_minor )
758 __field( ino_t, ino ) 787 __field( ino_t, ino )
759 __field( ino_t, parent ) 788 __field( ino_t, parent )
760 __field( int, datasync ) 789 __field( int, datasync )
@@ -763,39 +792,60 @@ TRACE_EVENT(ext4_sync_file,
763 TP_fast_assign( 792 TP_fast_assign(
764 struct dentry *dentry = file->f_path.dentry; 793 struct dentry *dentry = file->f_path.dentry;
765 794
766 __entry->dev_major = MAJOR(dentry->d_inode->i_sb->s_dev); 795 __entry->dev = dentry->d_inode->i_sb->s_dev;
767 __entry->dev_minor = MINOR(dentry->d_inode->i_sb->s_dev);
768 __entry->ino = dentry->d_inode->i_ino; 796 __entry->ino = dentry->d_inode->i_ino;
769 __entry->datasync = datasync; 797 __entry->datasync = datasync;
770 __entry->parent = dentry->d_parent->d_inode->i_ino; 798 __entry->parent = dentry->d_parent->d_inode->i_ino;
771 ), 799 ),
772 800
773 TP_printk("dev %d,%d ino %ld parent %ld datasync %d ", 801 TP_printk("dev %d,%d ino %ld parent %ld datasync %d ",
774 __entry->dev_major, __entry->dev_minor, 802 MAJOR(__entry->dev), MINOR(__entry->dev),
775 (unsigned long) __entry->ino, 803 (unsigned long) __entry->ino,
776 (unsigned long) __entry->parent, __entry->datasync) 804 (unsigned long) __entry->parent, __entry->datasync)
777); 805);
778 806
807TRACE_EVENT(ext4_sync_file_exit,
808 TP_PROTO(struct inode *inode, int ret),
809
810 TP_ARGS(inode, ret),
811
812 TP_STRUCT__entry(
813 __field( int, ret )
814 __field( ino_t, ino )
815 __field( dev_t, dev )
816 ),
817
818 TP_fast_assign(
819 __entry->ret = ret;
820 __entry->ino = inode->i_ino;
821 __entry->dev = inode->i_sb->s_dev;
822 ),
823
824 TP_printk("dev %d,%d ino %ld ret %d",
825 MAJOR(__entry->dev), MINOR(__entry->dev),
826 (unsigned long) __entry->ino,
827 __entry->ret)
828);
829
779TRACE_EVENT(ext4_sync_fs, 830TRACE_EVENT(ext4_sync_fs,
780 TP_PROTO(struct super_block *sb, int wait), 831 TP_PROTO(struct super_block *sb, int wait),
781 832
782 TP_ARGS(sb, wait), 833 TP_ARGS(sb, wait),
783 834
784 TP_STRUCT__entry( 835 TP_STRUCT__entry(
785 __field( int, dev_major ) 836 __field( dev_t, dev )
786 __field( int, dev_minor )
787 __field( int, wait ) 837 __field( int, wait )
788 838
789 ), 839 ),
790 840
791 TP_fast_assign( 841 TP_fast_assign(
792 __entry->dev_major = MAJOR(sb->s_dev); 842 __entry->dev = sb->s_dev;
793 __entry->dev_minor = MINOR(sb->s_dev);
794 __entry->wait = wait; 843 __entry->wait = wait;
795 ), 844 ),
796 845
797 TP_printk("dev %d,%d wait %d", __entry->dev_major, 846 TP_printk("dev %d,%d wait %d",
798 __entry->dev_minor, __entry->wait) 847 MAJOR(__entry->dev), MINOR(__entry->dev),
848 __entry->wait)
799); 849);
800 850
801TRACE_EVENT(ext4_alloc_da_blocks, 851TRACE_EVENT(ext4_alloc_da_blocks,
@@ -804,23 +854,21 @@ TRACE_EVENT(ext4_alloc_da_blocks,
804 TP_ARGS(inode), 854 TP_ARGS(inode),
805 855
806 TP_STRUCT__entry( 856 TP_STRUCT__entry(
807 __field( int, dev_major ) 857 __field( dev_t, dev )
808 __field( int, dev_minor )
809 __field( ino_t, ino ) 858 __field( ino_t, ino )
810 __field( unsigned int, data_blocks ) 859 __field( unsigned int, data_blocks )
811 __field( unsigned int, meta_blocks ) 860 __field( unsigned int, meta_blocks )
812 ), 861 ),
813 862
814 TP_fast_assign( 863 TP_fast_assign(
815 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 864 __entry->dev = inode->i_sb->s_dev;
816 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
817 __entry->ino = inode->i_ino; 865 __entry->ino = inode->i_ino;
818 __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks; 866 __entry->data_blocks = EXT4_I(inode)->i_reserved_data_blocks;
819 __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; 867 __entry->meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
820 ), 868 ),
821 869
822 TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u", 870 TP_printk("dev %d,%d ino %lu data_blocks %u meta_blocks %u",
823 __entry->dev_major, __entry->dev_minor, 871 MAJOR(__entry->dev), MINOR(__entry->dev),
824 (unsigned long) __entry->ino, 872 (unsigned long) __entry->ino,
825 __entry->data_blocks, __entry->meta_blocks) 873 __entry->data_blocks, __entry->meta_blocks)
826); 874);
@@ -831,8 +879,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
831 TP_ARGS(ac), 879 TP_ARGS(ac),
832 880
833 TP_STRUCT__entry( 881 TP_STRUCT__entry(
834 __field( int, dev_major ) 882 __field( dev_t, dev )
835 __field( int, dev_minor )
836 __field( ino_t, ino ) 883 __field( ino_t, ino )
837 __field( __u16, found ) 884 __field( __u16, found )
838 __field( __u16, groups ) 885 __field( __u16, groups )
@@ -855,8 +902,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
855 ), 902 ),
856 903
857 TP_fast_assign( 904 TP_fast_assign(
858 __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); 905 __entry->dev = ac->ac_inode->i_sb->s_dev;
859 __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev);
860 __entry->ino = ac->ac_inode->i_ino; 906 __entry->ino = ac->ac_inode->i_ino;
861 __entry->found = ac->ac_found; 907 __entry->found = ac->ac_found;
862 __entry->flags = ac->ac_flags; 908 __entry->flags = ac->ac_flags;
@@ -881,7 +927,7 @@ TRACE_EVENT(ext4_mballoc_alloc,
881 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u " 927 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u goal %u/%d/%u@%u "
882 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x " 928 "result %u/%d/%u@%u blks %u grps %u cr %u flags 0x%04x "
883 "tail %u broken %u", 929 "tail %u broken %u",
884 __entry->dev_major, __entry->dev_minor, 930 MAJOR(__entry->dev), MINOR(__entry->dev),
885 (unsigned long) __entry->ino, 931 (unsigned long) __entry->ino,
886 __entry->orig_group, __entry->orig_start, 932 __entry->orig_group, __entry->orig_start,
887 __entry->orig_len, __entry->orig_logical, 933 __entry->orig_len, __entry->orig_logical,
@@ -900,8 +946,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,
900 TP_ARGS(ac), 946 TP_ARGS(ac),
901 947
902 TP_STRUCT__entry( 948 TP_STRUCT__entry(
903 __field( int, dev_major ) 949 __field( dev_t, dev )
904 __field( int, dev_minor )
905 __field( ino_t, ino ) 950 __field( ino_t, ino )
906 __field( __u32, orig_logical ) 951 __field( __u32, orig_logical )
907 __field( int, orig_start ) 952 __field( int, orig_start )
@@ -914,8 +959,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,
914 ), 959 ),
915 960
916 TP_fast_assign( 961 TP_fast_assign(
917 __entry->dev_major = MAJOR(ac->ac_inode->i_sb->s_dev); 962 __entry->dev = ac->ac_inode->i_sb->s_dev;
918 __entry->dev_minor = MINOR(ac->ac_inode->i_sb->s_dev);
919 __entry->ino = ac->ac_inode->i_ino; 963 __entry->ino = ac->ac_inode->i_ino;
920 __entry->orig_logical = ac->ac_o_ex.fe_logical; 964 __entry->orig_logical = ac->ac_o_ex.fe_logical;
921 __entry->orig_start = ac->ac_o_ex.fe_start; 965 __entry->orig_start = ac->ac_o_ex.fe_start;
@@ -928,7 +972,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,
928 ), 972 ),
929 973
930 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u", 974 TP_printk("dev %d,%d inode %lu orig %u/%d/%u@%u result %u/%d/%u@%u",
931 __entry->dev_major, __entry->dev_minor, 975 MAJOR(__entry->dev), MINOR(__entry->dev),
932 (unsigned long) __entry->ino, 976 (unsigned long) __entry->ino,
933 __entry->orig_group, __entry->orig_start, 977 __entry->orig_group, __entry->orig_start,
934 __entry->orig_len, __entry->orig_logical, 978 __entry->orig_len, __entry->orig_logical,
@@ -946,8 +990,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
946 TP_ARGS(sb, inode, group, start, len), 990 TP_ARGS(sb, inode, group, start, len),
947 991
948 TP_STRUCT__entry( 992 TP_STRUCT__entry(
949 __field( int, dev_major ) 993 __field( dev_t, dev )
950 __field( int, dev_minor )
951 __field( ino_t, ino ) 994 __field( ino_t, ino )
952 __field( int, result_start ) 995 __field( int, result_start )
953 __field( __u32, result_group ) 996 __field( __u32, result_group )
@@ -955,8 +998,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
955 ), 998 ),
956 999
957 TP_fast_assign( 1000 TP_fast_assign(
958 __entry->dev_major = MAJOR(sb->s_dev); 1001 __entry->dev = sb->s_dev;
959 __entry->dev_minor = MINOR(sb->s_dev);
960 __entry->ino = inode ? inode->i_ino : 0; 1002 __entry->ino = inode ? inode->i_ino : 0;
961 __entry->result_start = start; 1003 __entry->result_start = start;
962 __entry->result_group = group; 1004 __entry->result_group = group;
@@ -964,7 +1006,7 @@ DECLARE_EVENT_CLASS(ext4__mballoc,
964 ), 1006 ),
965 1007
966 TP_printk("dev %d,%d inode %lu extent %u/%d/%u ", 1008 TP_printk("dev %d,%d inode %lu extent %u/%d/%u ",
967 __entry->dev_major, __entry->dev_minor, 1009 MAJOR(__entry->dev), MINOR(__entry->dev),
968 (unsigned long) __entry->ino, 1010 (unsigned long) __entry->ino,
969 __entry->result_group, __entry->result_start, 1011 __entry->result_group, __entry->result_start,
970 __entry->result_len) 1012 __entry->result_len)
@@ -998,8 +1040,7 @@ TRACE_EVENT(ext4_forget,
998 TP_ARGS(inode, is_metadata, block), 1040 TP_ARGS(inode, is_metadata, block),
999 1041
1000 TP_STRUCT__entry( 1042 TP_STRUCT__entry(
1001 __field( int, dev_major ) 1043 __field( dev_t, dev )
1002 __field( int, dev_minor )
1003 __field( ino_t, ino ) 1044 __field( ino_t, ino )
1004 __field( umode_t, mode ) 1045 __field( umode_t, mode )
1005 __field( int, is_metadata ) 1046 __field( int, is_metadata )
@@ -1007,8 +1048,7 @@ TRACE_EVENT(ext4_forget,
1007 ), 1048 ),
1008 1049
1009 TP_fast_assign( 1050 TP_fast_assign(
1010 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1051 __entry->dev = inode->i_sb->s_dev;
1011 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
1012 __entry->ino = inode->i_ino; 1052 __entry->ino = inode->i_ino;
1013 __entry->mode = inode->i_mode; 1053 __entry->mode = inode->i_mode;
1014 __entry->is_metadata = is_metadata; 1054 __entry->is_metadata = is_metadata;
@@ -1016,9 +1056,9 @@ TRACE_EVENT(ext4_forget,
1016 ), 1056 ),
1017 1057
1018 TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu", 1058 TP_printk("dev %d,%d ino %lu mode 0%o is_metadata %d block %llu",
1019 __entry->dev_major, __entry->dev_minor, 1059 MAJOR(__entry->dev), MINOR(__entry->dev),
1020 (unsigned long) __entry->ino, __entry->mode, 1060 (unsigned long) __entry->ino,
1021 __entry->is_metadata, __entry->block) 1061 __entry->mode, __entry->is_metadata, __entry->block)
1022); 1062);
1023 1063
1024TRACE_EVENT(ext4_da_update_reserve_space, 1064TRACE_EVENT(ext4_da_update_reserve_space,
@@ -1027,8 +1067,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1027 TP_ARGS(inode, used_blocks), 1067 TP_ARGS(inode, used_blocks),
1028 1068
1029 TP_STRUCT__entry( 1069 TP_STRUCT__entry(
1030 __field( int, dev_major ) 1070 __field( dev_t, dev )
1031 __field( int, dev_minor )
1032 __field( ino_t, ino ) 1071 __field( ino_t, ino )
1033 __field( umode_t, mode ) 1072 __field( umode_t, mode )
1034 __field( __u64, i_blocks ) 1073 __field( __u64, i_blocks )
@@ -1039,8 +1078,7 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1039 ), 1078 ),
1040 1079
1041 TP_fast_assign( 1080 TP_fast_assign(
1042 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1081 __entry->dev = inode->i_sb->s_dev;
1043 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
1044 __entry->ino = inode->i_ino; 1082 __entry->ino = inode->i_ino;
1045 __entry->mode = inode->i_mode; 1083 __entry->mode = inode->i_mode;
1046 __entry->i_blocks = inode->i_blocks; 1084 __entry->i_blocks = inode->i_blocks;
@@ -1050,10 +1088,12 @@ TRACE_EVENT(ext4_da_update_reserve_space,
1050 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; 1088 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
1051 ), 1089 ),
1052 1090
1053 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", 1091 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu used_blocks %d "
1054 __entry->dev_major, __entry->dev_minor, 1092 "reserved_data_blocks %d reserved_meta_blocks %d "
1055 (unsigned long) __entry->ino, __entry->mode, 1093 "allocated_meta_blocks %d",
1056 (unsigned long long) __entry->i_blocks, 1094 MAJOR(__entry->dev), MINOR(__entry->dev),
1095 (unsigned long) __entry->ino,
1096 __entry->mode, (unsigned long long) __entry->i_blocks,
1057 __entry->used_blocks, __entry->reserved_data_blocks, 1097 __entry->used_blocks, __entry->reserved_data_blocks,
1058 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks) 1098 __entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
1059); 1099);
@@ -1064,8 +1104,7 @@ TRACE_EVENT(ext4_da_reserve_space,
1064 TP_ARGS(inode, md_needed), 1104 TP_ARGS(inode, md_needed),
1065 1105
1066 TP_STRUCT__entry( 1106 TP_STRUCT__entry(
1067 __field( int, dev_major ) 1107 __field( dev_t, dev )
1068 __field( int, dev_minor )
1069 __field( ino_t, ino ) 1108 __field( ino_t, ino )
1070 __field( umode_t, mode ) 1109 __field( umode_t, mode )
1071 __field( __u64, i_blocks ) 1110 __field( __u64, i_blocks )
@@ -1075,8 +1114,7 @@ TRACE_EVENT(ext4_da_reserve_space,
1075 ), 1114 ),
1076 1115
1077 TP_fast_assign( 1116 TP_fast_assign(
1078 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1117 __entry->dev = inode->i_sb->s_dev;
1079 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
1080 __entry->ino = inode->i_ino; 1118 __entry->ino = inode->i_ino;
1081 __entry->mode = inode->i_mode; 1119 __entry->mode = inode->i_mode;
1082 __entry->i_blocks = inode->i_blocks; 1120 __entry->i_blocks = inode->i_blocks;
@@ -1085,8 +1123,9 @@ TRACE_EVENT(ext4_da_reserve_space,
1085 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks; 1123 __entry->reserved_meta_blocks = EXT4_I(inode)->i_reserved_meta_blocks;
1086 ), 1124 ),
1087 1125
1088 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d reserved_data_blocks %d reserved_meta_blocks %d", 1126 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu md_needed %d "
1089 __entry->dev_major, __entry->dev_minor, 1127 "reserved_data_blocks %d reserved_meta_blocks %d",
1128 MAJOR(__entry->dev), MINOR(__entry->dev),
1090 (unsigned long) __entry->ino, 1129 (unsigned long) __entry->ino,
1091 __entry->mode, (unsigned long long) __entry->i_blocks, 1130 __entry->mode, (unsigned long long) __entry->i_blocks,
1092 __entry->md_needed, __entry->reserved_data_blocks, 1131 __entry->md_needed, __entry->reserved_data_blocks,
@@ -1099,8 +1138,7 @@ TRACE_EVENT(ext4_da_release_space,
1099 TP_ARGS(inode, freed_blocks), 1138 TP_ARGS(inode, freed_blocks),
1100 1139
1101 TP_STRUCT__entry( 1140 TP_STRUCT__entry(
1102 __field( int, dev_major ) 1141 __field( dev_t, dev )
1103 __field( int, dev_minor )
1104 __field( ino_t, ino ) 1142 __field( ino_t, ino )
1105 __field( umode_t, mode ) 1143 __field( umode_t, mode )
1106 __field( __u64, i_blocks ) 1144 __field( __u64, i_blocks )
@@ -1111,8 +1149,7 @@ TRACE_EVENT(ext4_da_release_space,
1111 ), 1149 ),
1112 1150
1113 TP_fast_assign( 1151 TP_fast_assign(
1114 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 1152 __entry->dev = inode->i_sb->s_dev;
1115 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
1116 __entry->ino = inode->i_ino; 1153 __entry->ino = inode->i_ino;
1117 __entry->mode = inode->i_mode; 1154 __entry->mode = inode->i_mode;
1118 __entry->i_blocks = inode->i_blocks; 1155 __entry->i_blocks = inode->i_blocks;
@@ -1122,8 +1159,10 @@ TRACE_EVENT(ext4_da_release_space,
1122 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks; 1159 __entry->allocated_meta_blocks = EXT4_I(inode)->i_allocated_meta_blocks;
1123 ), 1160 ),
1124 1161
1125 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d reserved_data_blocks %d reserved_meta_blocks %d allocated_meta_blocks %d", 1162 TP_printk("dev %d,%d ino %lu mode 0%o i_blocks %llu freed_blocks %d "
1126 __entry->dev_major, __entry->dev_minor, 1163 "reserved_data_blocks %d reserved_meta_blocks %d "
1164 "allocated_meta_blocks %d",
1165 MAJOR(__entry->dev), MINOR(__entry->dev),
1127 (unsigned long) __entry->ino, 1166 (unsigned long) __entry->ino,
1128 __entry->mode, (unsigned long long) __entry->i_blocks, 1167 __entry->mode, (unsigned long long) __entry->i_blocks,
1129 __entry->freed_blocks, __entry->reserved_data_blocks, 1168 __entry->freed_blocks, __entry->reserved_data_blocks,
@@ -1136,20 +1175,19 @@ DECLARE_EVENT_CLASS(ext4__bitmap_load,
1136 TP_ARGS(sb, group), 1175 TP_ARGS(sb, group),
1137 1176
1138 TP_STRUCT__entry( 1177 TP_STRUCT__entry(
1139 __field( int, dev_major ) 1178 __field( dev_t, dev )
1140 __field( int, dev_minor )
1141 __field( __u32, group ) 1179 __field( __u32, group )
1142 1180
1143 ), 1181 ),
1144 1182
1145 TP_fast_assign( 1183 TP_fast_assign(
1146 __entry->dev_major = MAJOR(sb->s_dev); 1184 __entry->dev = sb->s_dev;
1147 __entry->dev_minor = MINOR(sb->s_dev);
1148 __entry->group = group; 1185 __entry->group = group;
1149 ), 1186 ),
1150 1187
1151 TP_printk("dev %d,%d group %u", 1188 TP_printk("dev %d,%d group %u",
1152 __entry->dev_major, __entry->dev_minor, __entry->group) 1189 MAJOR(__entry->dev), MINOR(__entry->dev),
1190 __entry->group)
1153); 1191);
1154 1192
1155DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load, 1193DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
@@ -1166,6 +1204,349 @@ DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
1166 TP_ARGS(sb, group) 1204 TP_ARGS(sb, group)
1167); 1205);
1168 1206
1207DEFINE_EVENT(ext4__bitmap_load, ext4_read_block_bitmap_load,
1208
1209 TP_PROTO(struct super_block *sb, unsigned long group),
1210
1211 TP_ARGS(sb, group)
1212);
1213
1214DEFINE_EVENT(ext4__bitmap_load, ext4_load_inode_bitmap,
1215
1216 TP_PROTO(struct super_block *sb, unsigned long group),
1217
1218 TP_ARGS(sb, group)
1219);
1220
1221TRACE_EVENT(ext4_direct_IO_enter,
1222 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw),
1223
1224 TP_ARGS(inode, offset, len, rw),
1225
1226 TP_STRUCT__entry(
1227 __field( ino_t, ino )
1228 __field( dev_t, dev )
1229 __field( loff_t, pos )
1230 __field( unsigned long, len )
1231 __field( int, rw )
1232 ),
1233
1234 TP_fast_assign(
1235 __entry->ino = inode->i_ino;
1236 __entry->dev = inode->i_sb->s_dev;
1237 __entry->pos = offset;
1238 __entry->len = len;
1239 __entry->rw = rw;
1240 ),
1241
1242 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d",
1243 MAJOR(__entry->dev), MINOR(__entry->dev),
1244 (unsigned long) __entry->ino,
1245 (unsigned long long) __entry->pos, __entry->len, __entry->rw)
1246);
1247
1248TRACE_EVENT(ext4_direct_IO_exit,
1249 TP_PROTO(struct inode *inode, loff_t offset, unsigned long len, int rw, int ret),
1250
1251 TP_ARGS(inode, offset, len, rw, ret),
1252
1253 TP_STRUCT__entry(
1254 __field( ino_t, ino )
1255 __field( dev_t, dev )
1256 __field( loff_t, pos )
1257 __field( unsigned long, len )
1258 __field( int, rw )
1259 __field( int, ret )
1260 ),
1261
1262 TP_fast_assign(
1263 __entry->ino = inode->i_ino;
1264 __entry->dev = inode->i_sb->s_dev;
1265 __entry->pos = offset;
1266 __entry->len = len;
1267 __entry->rw = rw;
1268 __entry->ret = ret;
1269 ),
1270
1271 TP_printk("dev %d,%d ino %lu pos %llu len %lu rw %d ret %d",
1272 MAJOR(__entry->dev), MINOR(__entry->dev),
1273 (unsigned long) __entry->ino,
1274 (unsigned long long) __entry->pos, __entry->len,
1275 __entry->rw, __entry->ret)
1276);
1277
1278TRACE_EVENT(ext4_fallocate_enter,
1279 TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int mode),
1280
1281 TP_ARGS(inode, offset, len, mode),
1282
1283 TP_STRUCT__entry(
1284 __field( ino_t, ino )
1285 __field( dev_t, dev )
1286 __field( loff_t, pos )
1287 __field( loff_t, len )
1288 __field( int, mode )
1289 ),
1290
1291 TP_fast_assign(
1292 __entry->ino = inode->i_ino;
1293 __entry->dev = inode->i_sb->s_dev;
1294 __entry->pos = offset;
1295 __entry->len = len;
1296 __entry->mode = mode;
1297 ),
1298
1299 TP_printk("dev %d,%d ino %ld pos %llu len %llu mode %d",
1300 MAJOR(__entry->dev), MINOR(__entry->dev),
1301 (unsigned long) __entry->ino,
1302 (unsigned long long) __entry->pos,
1303 (unsigned long long) __entry->len, __entry->mode)
1304);
1305
1306TRACE_EVENT(ext4_fallocate_exit,
1307 TP_PROTO(struct inode *inode, loff_t offset, unsigned int max_blocks, int ret),
1308
1309 TP_ARGS(inode, offset, max_blocks, ret),
1310
1311 TP_STRUCT__entry(
1312 __field( ino_t, ino )
1313 __field( dev_t, dev )
1314 __field( loff_t, pos )
1315 __field( unsigned, blocks )
1316 __field( int, ret )
1317 ),
1318
1319 TP_fast_assign(
1320 __entry->ino = inode->i_ino;
1321 __entry->dev = inode->i_sb->s_dev;
1322 __entry->pos = offset;
1323 __entry->blocks = max_blocks;
1324 __entry->ret = ret;
1325 ),
1326
1327 TP_printk("dev %d,%d ino %ld pos %llu blocks %d ret %d",
1328 MAJOR(__entry->dev), MINOR(__entry->dev),
1329 (unsigned long) __entry->ino,
1330 (unsigned long long) __entry->pos, __entry->blocks,
1331 __entry->ret)
1332);
1333
1334TRACE_EVENT(ext4_unlink_enter,
1335 TP_PROTO(struct inode *parent, struct dentry *dentry),
1336
1337 TP_ARGS(parent, dentry),
1338
1339 TP_STRUCT__entry(
1340 __field( ino_t, parent )
1341 __field( ino_t, ino )
1342 __field( loff_t, size )
1343 __field( dev_t, dev )
1344 ),
1345
1346 TP_fast_assign(
1347 __entry->parent = parent->i_ino;
1348 __entry->ino = dentry->d_inode->i_ino;
1349 __entry->size = dentry->d_inode->i_size;
1350 __entry->dev = dentry->d_inode->i_sb->s_dev;
1351 ),
1352
1353 TP_printk("dev %d,%d ino %ld size %lld parent %ld",
1354 MAJOR(__entry->dev), MINOR(__entry->dev),
1355 (unsigned long) __entry->ino, __entry->size,
1356 (unsigned long) __entry->parent)
1357);
1358
1359TRACE_EVENT(ext4_unlink_exit,
1360 TP_PROTO(struct dentry *dentry, int ret),
1361
1362 TP_ARGS(dentry, ret),
1363
1364 TP_STRUCT__entry(
1365 __field( ino_t, ino )
1366 __field( dev_t, dev )
1367 __field( int, ret )
1368 ),
1369
1370 TP_fast_assign(
1371 __entry->ino = dentry->d_inode->i_ino;
1372 __entry->dev = dentry->d_inode->i_sb->s_dev;
1373 __entry->ret = ret;
1374 ),
1375
1376 TP_printk("dev %d,%d ino %ld ret %d",
1377 MAJOR(__entry->dev), MINOR(__entry->dev),
1378 (unsigned long) __entry->ino,
1379 __entry->ret)
1380);
1381
1382DECLARE_EVENT_CLASS(ext4__truncate,
1383 TP_PROTO(struct inode *inode),
1384
1385 TP_ARGS(inode),
1386
1387 TP_STRUCT__entry(
1388 __field( ino_t, ino )
1389 __field( dev_t, dev )
1390 __field( blkcnt_t, blocks )
1391 ),
1392
1393 TP_fast_assign(
1394 __entry->ino = inode->i_ino;
1395 __entry->dev = inode->i_sb->s_dev;
1396 __entry->blocks = inode->i_blocks;
1397 ),
1398
1399 TP_printk("dev %d,%d ino %lu blocks %lu",
1400 MAJOR(__entry->dev), MINOR(__entry->dev),
1401 (unsigned long) __entry->ino, (unsigned long) __entry->blocks)
1402);
1403
1404DEFINE_EVENT(ext4__truncate, ext4_truncate_enter,
1405
1406 TP_PROTO(struct inode *inode),
1407
1408 TP_ARGS(inode)
1409);
1410
1411DEFINE_EVENT(ext4__truncate, ext4_truncate_exit,
1412
1413 TP_PROTO(struct inode *inode),
1414
1415 TP_ARGS(inode)
1416);
1417
1418DECLARE_EVENT_CLASS(ext4__map_blocks_enter,
1419 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1420 unsigned len, unsigned flags),
1421
1422 TP_ARGS(inode, lblk, len, flags),
1423
1424 TP_STRUCT__entry(
1425 __field( ino_t, ino )
1426 __field( dev_t, dev )
1427 __field( ext4_lblk_t, lblk )
1428 __field( unsigned, len )
1429 __field( unsigned, flags )
1430 ),
1431
1432 TP_fast_assign(
1433 __entry->ino = inode->i_ino;
1434 __entry->dev = inode->i_sb->s_dev;
1435 __entry->lblk = lblk;
1436 __entry->len = len;
1437 __entry->flags = flags;
1438 ),
1439
1440 TP_printk("dev %d,%d ino %lu lblk %u len %u flags %u",
1441 MAJOR(__entry->dev), MINOR(__entry->dev),
1442 (unsigned long) __entry->ino,
1443 (unsigned) __entry->lblk, __entry->len, __entry->flags)
1444);
1445
1446DEFINE_EVENT(ext4__map_blocks_enter, ext4_ext_map_blocks_enter,
1447 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1448 unsigned len, unsigned flags),
1449
1450 TP_ARGS(inode, lblk, len, flags)
1451);
1452
1453DEFINE_EVENT(ext4__map_blocks_enter, ext4_ind_map_blocks_enter,
1454 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1455 unsigned len, unsigned flags),
1456
1457 TP_ARGS(inode, lblk, len, flags)
1458);
1459
1460DECLARE_EVENT_CLASS(ext4__map_blocks_exit,
1461 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1462 ext4_fsblk_t pblk, unsigned len, int ret),
1463
1464 TP_ARGS(inode, lblk, pblk, len, ret),
1465
1466 TP_STRUCT__entry(
1467 __field( ino_t, ino )
1468 __field( dev_t, dev )
1469 __field( ext4_lblk_t, lblk )
1470 __field( ext4_fsblk_t, pblk )
1471 __field( unsigned, len )
1472 __field( int, ret )
1473 ),
1474
1475 TP_fast_assign(
1476 __entry->ino = inode->i_ino;
1477 __entry->dev = inode->i_sb->s_dev;
1478 __entry->lblk = lblk;
1479 __entry->pblk = pblk;
1480 __entry->len = len;
1481 __entry->ret = ret;
1482 ),
1483
1484 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu len %u ret %d",
1485 MAJOR(__entry->dev), MINOR(__entry->dev),
1486 (unsigned long) __entry->ino,
1487 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk,
1488 __entry->len, __entry->ret)
1489);
1490
1491DEFINE_EVENT(ext4__map_blocks_exit, ext4_ext_map_blocks_exit,
1492 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1493 ext4_fsblk_t pblk, unsigned len, int ret),
1494
1495 TP_ARGS(inode, lblk, pblk, len, ret)
1496);
1497
1498DEFINE_EVENT(ext4__map_blocks_exit, ext4_ind_map_blocks_exit,
1499 TP_PROTO(struct inode *inode, ext4_lblk_t lblk,
1500 ext4_fsblk_t pblk, unsigned len, int ret),
1501
1502 TP_ARGS(inode, lblk, pblk, len, ret)
1503);
1504
1505TRACE_EVENT(ext4_ext_load_extent,
1506 TP_PROTO(struct inode *inode, ext4_lblk_t lblk, ext4_fsblk_t pblk),
1507
1508 TP_ARGS(inode, lblk, pblk),
1509
1510 TP_STRUCT__entry(
1511 __field( ino_t, ino )
1512 __field( dev_t, dev )
1513 __field( ext4_lblk_t, lblk )
1514 __field( ext4_fsblk_t, pblk )
1515 ),
1516
1517 TP_fast_assign(
1518 __entry->ino = inode->i_ino;
1519 __entry->dev = inode->i_sb->s_dev;
1520 __entry->lblk = lblk;
1521 __entry->pblk = pblk;
1522 ),
1523
1524 TP_printk("dev %d,%d ino %lu lblk %u pblk %llu",
1525 MAJOR(__entry->dev), MINOR(__entry->dev),
1526 (unsigned long) __entry->ino,
1527 (unsigned) __entry->lblk, (unsigned long long) __entry->pblk)
1528);
1529
1530TRACE_EVENT(ext4_load_inode,
1531 TP_PROTO(struct inode *inode),
1532
1533 TP_ARGS(inode),
1534
1535 TP_STRUCT__entry(
1536 __field( ino_t, ino )
1537 __field( dev_t, dev )
1538 ),
1539
1540 TP_fast_assign(
1541 __entry->ino = inode->i_ino;
1542 __entry->dev = inode->i_sb->s_dev;
1543 ),
1544
1545 TP_printk("dev %d,%d ino %ld",
1546 MAJOR(__entry->dev), MINOR(__entry->dev),
1547 (unsigned long) __entry->ino)
1548);
1549
1169#endif /* _TRACE_EXT4_H */ 1550#endif /* _TRACE_EXT4_H */
1170 1551
1171/* This part must be outside protection */ 1552/* This part must be outside protection */
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 7447ea9305b..bf16545cc97 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -17,19 +17,17 @@ TRACE_EVENT(jbd2_checkpoint,
17 TP_ARGS(journal, result), 17 TP_ARGS(journal, result),
18 18
19 TP_STRUCT__entry( 19 TP_STRUCT__entry(
20 __field( int, dev_major ) 20 __field( dev_t, dev )
21 __field( int, dev_minor )
22 __field( int, result ) 21 __field( int, result )
23 ), 22 ),
24 23
25 TP_fast_assign( 24 TP_fast_assign(
26 __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 25 __entry->dev = journal->j_fs_dev->bd_dev;
27 __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
28 __entry->result = result; 26 __entry->result = result;
29 ), 27 ),
30 28
31 TP_printk("dev %d,%d result %d", 29 TP_printk("dev %s result %d",
32 __entry->dev_major, __entry->dev_minor, __entry->result) 30 jbd2_dev_to_name(__entry->dev), __entry->result)
33); 31);
34 32
35DECLARE_EVENT_CLASS(jbd2_commit, 33DECLARE_EVENT_CLASS(jbd2_commit,
@@ -39,22 +37,20 @@ DECLARE_EVENT_CLASS(jbd2_commit,
39 TP_ARGS(journal, commit_transaction), 37 TP_ARGS(journal, commit_transaction),
40 38
41 TP_STRUCT__entry( 39 TP_STRUCT__entry(
42 __field( int, dev_major ) 40 __field( dev_t, dev )
43 __field( int, dev_minor )
44 __field( char, sync_commit ) 41 __field( char, sync_commit )
45 __field( int, transaction ) 42 __field( int, transaction )
46 ), 43 ),
47 44
48 TP_fast_assign( 45 TP_fast_assign(
49 __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 46 __entry->dev = journal->j_fs_dev->bd_dev;
50 __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
51 __entry->sync_commit = commit_transaction->t_synchronous_commit; 47 __entry->sync_commit = commit_transaction->t_synchronous_commit;
52 __entry->transaction = commit_transaction->t_tid; 48 __entry->transaction = commit_transaction->t_tid;
53 ), 49 ),
54 50
55 TP_printk("dev %d,%d transaction %d sync %d", 51 TP_printk("dev %s transaction %d sync %d",
56 __entry->dev_major, __entry->dev_minor, 52 jbd2_dev_to_name(__entry->dev), __entry->transaction,
57 __entry->transaction, __entry->sync_commit) 53 __entry->sync_commit)
58); 54);
59 55
60DEFINE_EVENT(jbd2_commit, jbd2_start_commit, 56DEFINE_EVENT(jbd2_commit, jbd2_start_commit,
@@ -91,24 +87,22 @@ TRACE_EVENT(jbd2_end_commit,
91 TP_ARGS(journal, commit_transaction), 87 TP_ARGS(journal, commit_transaction),
92 88
93 TP_STRUCT__entry( 89 TP_STRUCT__entry(
94 __field( int, dev_major ) 90 __field( dev_t, dev )
95 __field( int, dev_minor )
96 __field( char, sync_commit ) 91 __field( char, sync_commit )
97 __field( int, transaction ) 92 __field( int, transaction )
98 __field( int, head ) 93 __field( int, head )
99 ), 94 ),
100 95
101 TP_fast_assign( 96 TP_fast_assign(
102 __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 97 __entry->dev = journal->j_fs_dev->bd_dev;
103 __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
104 __entry->sync_commit = commit_transaction->t_synchronous_commit; 98 __entry->sync_commit = commit_transaction->t_synchronous_commit;
105 __entry->transaction = commit_transaction->t_tid; 99 __entry->transaction = commit_transaction->t_tid;
106 __entry->head = journal->j_tail_sequence; 100 __entry->head = journal->j_tail_sequence;
107 ), 101 ),
108 102
109 TP_printk("dev %d,%d transaction %d sync %d head %d", 103 TP_printk("dev %s transaction %d sync %d head %d",
110 __entry->dev_major, __entry->dev_minor, 104 jbd2_dev_to_name(__entry->dev), __entry->transaction,
111 __entry->transaction, __entry->sync_commit, __entry->head) 105 __entry->sync_commit, __entry->head)
112); 106);
113 107
114TRACE_EVENT(jbd2_submit_inode_data, 108TRACE_EVENT(jbd2_submit_inode_data,
@@ -117,20 +111,17 @@ TRACE_EVENT(jbd2_submit_inode_data,
117 TP_ARGS(inode), 111 TP_ARGS(inode),
118 112
119 TP_STRUCT__entry( 113 TP_STRUCT__entry(
120 __field( int, dev_major ) 114 __field( dev_t, dev )
121 __field( int, dev_minor )
122 __field( ino_t, ino ) 115 __field( ino_t, ino )
123 ), 116 ),
124 117
125 TP_fast_assign( 118 TP_fast_assign(
126 __entry->dev_major = MAJOR(inode->i_sb->s_dev); 119 __entry->dev = inode->i_sb->s_dev;
127 __entry->dev_minor = MINOR(inode->i_sb->s_dev);
128 __entry->ino = inode->i_ino; 120 __entry->ino = inode->i_ino;
129 ), 121 ),
130 122
131 TP_printk("dev %d,%d ino %lu", 123 TP_printk("dev %s ino %lu",
132 __entry->dev_major, __entry->dev_minor, 124 jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino)
133 (unsigned long) __entry->ino)
134); 125);
135 126
136TRACE_EVENT(jbd2_run_stats, 127TRACE_EVENT(jbd2_run_stats,
@@ -140,8 +131,7 @@ TRACE_EVENT(jbd2_run_stats,
140 TP_ARGS(dev, tid, stats), 131 TP_ARGS(dev, tid, stats),
141 132
142 TP_STRUCT__entry( 133 TP_STRUCT__entry(
143 __field( int, dev_major ) 134 __field( dev_t, dev )
144 __field( int, dev_minor )
145 __field( unsigned long, tid ) 135 __field( unsigned long, tid )
146 __field( unsigned long, wait ) 136 __field( unsigned long, wait )
147 __field( unsigned long, running ) 137 __field( unsigned long, running )
@@ -154,8 +144,7 @@ TRACE_EVENT(jbd2_run_stats,
154 ), 144 ),
155 145
156 TP_fast_assign( 146 TP_fast_assign(
157 __entry->dev_major = MAJOR(dev); 147 __entry->dev = dev;
158 __entry->dev_minor = MINOR(dev);
159 __entry->tid = tid; 148 __entry->tid = tid;
160 __entry->wait = stats->rs_wait; 149 __entry->wait = stats->rs_wait;
161 __entry->running = stats->rs_running; 150 __entry->running = stats->rs_running;
@@ -167,9 +156,9 @@ TRACE_EVENT(jbd2_run_stats,
167 __entry->blocks_logged = stats->rs_blocks_logged; 156 __entry->blocks_logged = stats->rs_blocks_logged;
168 ), 157 ),
169 158
170 TP_printk("dev %d,%d tid %lu wait %u running %u locked %u flushing %u " 159 TP_printk("dev %s tid %lu wait %u running %u locked %u flushing %u "
171 "logging %u handle_count %u blocks %u blocks_logged %u", 160 "logging %u handle_count %u blocks %u blocks_logged %u",
172 __entry->dev_major, __entry->dev_minor, __entry->tid, 161 jbd2_dev_to_name(__entry->dev), __entry->tid,
173 jiffies_to_msecs(__entry->wait), 162 jiffies_to_msecs(__entry->wait),
174 jiffies_to_msecs(__entry->running), 163 jiffies_to_msecs(__entry->running),
175 jiffies_to_msecs(__entry->locked), 164 jiffies_to_msecs(__entry->locked),
@@ -186,8 +175,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
186 TP_ARGS(dev, tid, stats), 175 TP_ARGS(dev, tid, stats),
187 176
188 TP_STRUCT__entry( 177 TP_STRUCT__entry(
189 __field( int, dev_major ) 178 __field( dev_t, dev )
190 __field( int, dev_minor )
191 __field( unsigned long, tid ) 179 __field( unsigned long, tid )
192 __field( unsigned long, chp_time ) 180 __field( unsigned long, chp_time )
193 __field( __u32, forced_to_close ) 181 __field( __u32, forced_to_close )
@@ -196,8 +184,7 @@ TRACE_EVENT(jbd2_checkpoint_stats,
196 ), 184 ),
197 185
198 TP_fast_assign( 186 TP_fast_assign(
199 __entry->dev_major = MAJOR(dev); 187 __entry->dev = dev;
200 __entry->dev_minor = MINOR(dev);
201 __entry->tid = tid; 188 __entry->tid = tid;
202 __entry->chp_time = stats->cs_chp_time; 189 __entry->chp_time = stats->cs_chp_time;
203 __entry->forced_to_close= stats->cs_forced_to_close; 190 __entry->forced_to_close= stats->cs_forced_to_close;
@@ -205,9 +192,9 @@ TRACE_EVENT(jbd2_checkpoint_stats,
205 __entry->dropped = stats->cs_dropped; 192 __entry->dropped = stats->cs_dropped;
206 ), 193 ),
207 194
208 TP_printk("dev %d,%d tid %lu chp_time %u forced_to_close %u " 195 TP_printk("dev %s tid %lu chp_time %u forced_to_close %u "
209 "written %u dropped %u", 196 "written %u dropped %u",
210 __entry->dev_major, __entry->dev_minor, __entry->tid, 197 jbd2_dev_to_name(__entry->dev), __entry->tid,
211 jiffies_to_msecs(__entry->chp_time), 198 jiffies_to_msecs(__entry->chp_time),
212 __entry->forced_to_close, __entry->written, __entry->dropped) 199 __entry->forced_to_close, __entry->written, __entry->dropped)
213); 200);
@@ -220,8 +207,7 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
220 TP_ARGS(journal, first_tid, block_nr, freed), 207 TP_ARGS(journal, first_tid, block_nr, freed),
221 208
222 TP_STRUCT__entry( 209 TP_STRUCT__entry(
223 __field( int, dev_major ) 210 __field( dev_t, dev )
224 __field( int, dev_minor )
225 __field( tid_t, tail_sequence ) 211 __field( tid_t, tail_sequence )
226 __field( tid_t, first_tid ) 212 __field( tid_t, first_tid )
227 __field(unsigned long, block_nr ) 213 __field(unsigned long, block_nr )
@@ -229,18 +215,16 @@ TRACE_EVENT(jbd2_cleanup_journal_tail,
229 ), 215 ),
230 216
231 TP_fast_assign( 217 TP_fast_assign(
232 __entry->dev_major = MAJOR(journal->j_fs_dev->bd_dev); 218 __entry->dev = journal->j_fs_dev->bd_dev;
233 __entry->dev_minor = MINOR(journal->j_fs_dev->bd_dev);
234 __entry->tail_sequence = journal->j_tail_sequence; 219 __entry->tail_sequence = journal->j_tail_sequence;
235 __entry->first_tid = first_tid; 220 __entry->first_tid = first_tid;
236 __entry->block_nr = block_nr; 221 __entry->block_nr = block_nr;
237 __entry->freed = freed; 222 __entry->freed = freed;
238 ), 223 ),
239 224
240 TP_printk("dev %d,%d from %u to %u offset %lu freed %lu", 225 TP_printk("dev %s from %u to %u offset %lu freed %lu",
241 __entry->dev_major, __entry->dev_minor, 226 jbd2_dev_to_name(__entry->dev), __entry->tail_sequence,
242 __entry->tail_sequence, __entry->first_tid, 227 __entry->first_tid, __entry->block_nr, __entry->freed)
243 __entry->block_nr, __entry->freed)
244); 228);
245 229
246#endif /* _TRACE_JBD2_H */ 230#endif /* _TRACE_JBD2_H */