aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-fs-f2fs8
-rw-r--r--Documentation/filesystems/f2fs.txt133
-rw-r--r--fs/f2fs/checkpoint.c107
-rw-r--r--fs/f2fs/data.c249
-rw-r--r--fs/f2fs/debug.c7
-rw-r--r--fs/f2fs/dir.c16
-rw-r--r--fs/f2fs/extent_cache.c7
-rw-r--r--fs/f2fs/f2fs.h129
-rw-r--r--fs/f2fs/file.c302
-rw-r--r--fs/f2fs/gc.c196
-rw-r--r--fs/f2fs/inline.c16
-rw-r--r--fs/f2fs/inode.c78
-rw-r--r--fs/f2fs/namei.c10
-rw-r--r--fs/f2fs/node.c38
-rw-r--r--fs/f2fs/recovery.c43
-rw-r--r--fs/f2fs/segment.c170
-rw-r--r--fs/f2fs/segment.h16
-rw-r--r--fs/f2fs/super.c610
-rw-r--r--fs/f2fs/sysfs.c22
-rw-r--r--fs/f2fs/xattr.c10
-rw-r--r--include/trace/events/f2fs.h11
21 files changed, 1405 insertions, 773 deletions
diff --git a/Documentation/ABI/testing/sysfs-fs-f2fs b/Documentation/ABI/testing/sysfs-fs-f2fs
index 91822ce25831..dca326e0ee3e 100644
--- a/Documentation/ABI/testing/sysfs-fs-f2fs
+++ b/Documentation/ABI/testing/sysfs-fs-f2fs
@@ -243,3 +243,11 @@ Description:
243 - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list 243 - Del: echo '[h/c]!extension' > /sys/fs/f2fs/<disk>/extension_list
244 - [h] means add/del hot file extension 244 - [h] means add/del hot file extension
245 - [c] means add/del cold file extension 245 - [c] means add/del cold file extension
246
247What: /sys/fs/f2fs/<disk>/unusable
248Date April 2019
249Contact: "Daniel Rosenberg" <drosen@google.com>
250Description:
251 If checkpoint=disable, it displays the number of blocks that are unusable.
252 If checkpoint=enable it displays the enumber of blocks that would be unusable
253 if checkpoint=disable were to be set.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index f7b5e4ff0de3..496fa28b2492 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -214,11 +214,22 @@ fsync_mode=%s Control the policy of fsync. Currently supports "posix",
214 non-atomic files likewise "nobarrier" mount option. 214 non-atomic files likewise "nobarrier" mount option.
215test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt 215test_dummy_encryption Enable dummy encryption, which provides a fake fscrypt
216 context. The fake fscrypt context is used by xfstests. 216 context. The fake fscrypt context is used by xfstests.
217checkpoint=%s Set to "disable" to turn off checkpointing. Set to "enable" 217checkpoint=%s[:%u[%]] Set to "disable" to turn off checkpointing. Set to "enable"
218 to reenable checkpointing. Is enabled by default. While 218 to reenable checkpointing. Is enabled by default. While
219 disabled, any unmounting or unexpected shutdowns will cause 219 disabled, any unmounting or unexpected shutdowns will cause
220 the filesystem contents to appear as they did when the 220 the filesystem contents to appear as they did when the
221 filesystem was mounted with that option. 221 filesystem was mounted with that option.
222 While mounting with checkpoint=disabled, the filesystem must
223 run garbage collection to ensure that all available space can
224 be used. If this takes too much time, the mount may return
225 EAGAIN. You may optionally add a value to indicate how much
226 of the disk you would be willing to temporarily give up to
227 avoid additional garbage collection. This can be given as a
228 number of blocks, or as a percent. For instance, mounting
229 with checkpoint=disable:100% would always succeed, but it may
230 hide up to all remaining free space. The actual space that
231 would be unusable can be viewed at /sys/fs/f2fs/<disk>/unusable
232 This space is reclaimed once checkpoint=enable.
222 233
223================================================================================ 234================================================================================
224DEBUGFS ENTRIES 235DEBUGFS ENTRIES
@@ -246,11 +257,14 @@ Files in /sys/fs/f2fs/<devname>
246.............................................................................. 257..............................................................................
247 File Content 258 File Content
248 259
249 gc_max_sleep_time This tuning parameter controls the maximum sleep 260 gc_urgent_sleep_time This parameter controls sleep time for gc_urgent.
261 500 ms is set by default. See above gc_urgent.
262
263 gc_min_sleep_time This tuning parameter controls the minimum sleep
250 time for the garbage collection thread. Time is 264 time for the garbage collection thread. Time is
251 in milliseconds. 265 in milliseconds.
252 266
253 gc_min_sleep_time This tuning parameter controls the minimum sleep 267 gc_max_sleep_time This tuning parameter controls the maximum sleep
254 time for the garbage collection thread. Time is 268 time for the garbage collection thread. Time is
255 in milliseconds. 269 in milliseconds.
256 270
@@ -270,9 +284,6 @@ Files in /sys/fs/f2fs/<devname>
270 to 1, background thread starts to do GC by given 284 to 1, background thread starts to do GC by given
271 gc_urgent_sleep_time interval. 285 gc_urgent_sleep_time interval.
272 286
273 gc_urgent_sleep_time This parameter controls sleep time for gc_urgent.
274 500 ms is set by default. See above gc_urgent.
275
276 reclaim_segments This parameter controls the number of prefree 287 reclaim_segments This parameter controls the number of prefree
277 segments to be reclaimed. If the number of prefree 288 segments to be reclaimed. If the number of prefree
278 segments is larger than the number of segments 289 segments is larger than the number of segments
@@ -287,7 +298,16 @@ Files in /sys/fs/f2fs/<devname>
287 checkpoint is triggered, and issued during the 298 checkpoint is triggered, and issued during the
288 checkpoint. By default, it is disabled with 0. 299 checkpoint. By default, it is disabled with 0.
289 300
290 trim_sections This parameter controls the number of sections 301 discard_granularity This parameter controls the granularity of discard
302 command size. It will issue discard commands iif
303 the size is larger than given granularity. Its
304 unit size is 4KB, and 4 (=16KB) is set by default.
305 The maximum value is 128 (=512KB).
306
307 reserved_blocks This parameter indicates the number of blocks that
308 f2fs reserves internally for root.
309
310 batched_trim_sections This parameter controls the number of sections
291 to be trimmed out in batch mode when FITRIM 311 to be trimmed out in batch mode when FITRIM
292 conducts. 32 sections is set by default. 312 conducts. 32 sections is set by default.
293 313
@@ -309,11 +329,35 @@ Files in /sys/fs/f2fs/<devname>
309 the number is less than this value, it triggers 329 the number is less than this value, it triggers
310 in-place-updates. 330 in-place-updates.
311 331
332 min_seq_blocks This parameter controls the threshold to serialize
333 write IOs issued by multiple threads in parallel.
334
335 min_hot_blocks This parameter controls the threshold to allocate
336 a hot data log for pending data blocks to write.
337
338 min_ssr_sections This parameter adds the threshold when deciding
339 SSR block allocation. If this is large, SSR mode
340 will be enabled early.
341
342 ram_thresh This parameter controls the memory footprint used
343 by free nids and cached nat entries. By default,
344 10 is set, which indicates 10 MB / 1 GB RAM.
345
346 ra_nid_pages When building free nids, F2FS reads NAT blocks
347 ahead for speed up. Default is 0.
348
349 dirty_nats_ratio Given dirty ratio of cached nat entries, F2FS
350 determines flushing them in background.
351
312 max_victim_search This parameter controls the number of trials to 352 max_victim_search This parameter controls the number of trials to
313 find a victim segment when conducting SSR and 353 find a victim segment when conducting SSR and
314 cleaning operations. The default value is 4096 354 cleaning operations. The default value is 4096
315 which covers 8GB block address range. 355 which covers 8GB block address range.
316 356
357 migration_granularity For large-sized sections, F2FS can stop GC given
358 this granularity instead of reclaiming entire
359 section.
360
317 dir_level This parameter controls the directory level to 361 dir_level This parameter controls the directory level to
318 support large directory. If a directory has a 362 support large directory. If a directory has a
319 number of files, it can reduce the file lookup 363 number of files, it can reduce the file lookup
@@ -321,9 +365,53 @@ Files in /sys/fs/f2fs/<devname>
321 Otherwise, it needs to decrease this value to 365 Otherwise, it needs to decrease this value to
322 reduce the space overhead. The default value is 0. 366 reduce the space overhead. The default value is 0.
323 367
324 ram_thresh This parameter controls the memory footprint used 368 cp_interval F2FS tries to do checkpoint periodically, 60 secs
325 by free nids and cached nat entries. By default, 369 by default.
326 10 is set, which indicates 10 MB / 1 GB RAM. 370
371 idle_interval F2FS detects system is idle, if there's no F2FS
372 operations during given interval, 5 secs by
373 default.
374
375 discard_idle_interval F2FS detects the discard thread is idle, given
376 time interval. Default is 5 secs.
377
378 gc_idle_interval F2FS detects the GC thread is idle, given time
379 interval. Default is 5 secs.
380
381 umount_discard_timeout When unmounting the disk, F2FS waits for finishing
382 queued discard commands which can take huge time.
383 This gives time out for it, 5 secs by default.
384
385 iostat_enable This controls to enable/disable iostat in F2FS.
386
387 readdir_ra This enables/disabled readahead of inode blocks
388 in readdir, and default is enabled.
389
390 gc_pin_file_thresh This indicates how many GC can be failed for the
391 pinned file. If it exceeds this, F2FS doesn't
392 guarantee its pinning state. 2048 trials is set
393 by default.
394
395 extension_list This enables to change extension_list for hot/cold
396 files in runtime.
397
398 inject_rate This controls injection rate of arbitrary faults.
399
400 inject_type This controls injection type of arbitrary faults.
401
402 dirty_segments This shows # of dirty segments.
403
404 lifetime_write_kbytes This shows # of data written to the disk.
405
406 features This shows current features enabled on F2FS.
407
408 current_reserved_blocks This shows # of blocks currently reserved.
409
410 unusable If checkpoint=disable, this shows the number of
411 blocks that are unusable.
412 If checkpoint=enable it shows the number of blocks
413 that would be unusable if checkpoint=disable were
414 to be set.
327 415
328================================================================================ 416================================================================================
329USAGE 417USAGE
@@ -716,3 +804,28 @@ WRITE_LIFE_NOT_SET WARM_DATA WRITE_LIFE_NOT_SET
716WRITE_LIFE_NONE " WRITE_LIFE_NONE 804WRITE_LIFE_NONE " WRITE_LIFE_NONE
717WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM 805WRITE_LIFE_MEDIUM " WRITE_LIFE_MEDIUM
718WRITE_LIFE_LONG " WRITE_LIFE_LONG 806WRITE_LIFE_LONG " WRITE_LIFE_LONG
807
808Fallocate(2) Policy
809-------------------
810
811The default policy follows the below posix rule.
812
813Allocating disk space
814 The default operation (i.e., mode is zero) of fallocate() allocates
815 the disk space within the range specified by offset and len. The
816 file size (as reported by stat(2)) will be changed if offset+len is
817 greater than the file size. Any subregion within the range specified
818 by offset and len that did not contain data before the call will be
819 initialized to zero. This default behavior closely resembles the
820 behavior of the posix_fallocate(3) library function, and is intended
821 as a method of optimally implementing that function.
822
823However, once F2FS receives ioctl(fd, F2FS_IOC_SET_PIN_FILE) in prior to
824fallocate(fd, DEFAULT_MODE), it allocates on-disk blocks addressess having
825zero or random data, which is useful to the below scenario where:
826 1. create(fd)
827 2. ioctl(fd, F2FS_IOC_SET_PIN_FILE)
828 3. fallocate(fd, 0, 0, size)
829 4. address = fibmap(fd, offset)
830 5. open(blkdev)
831 6. write(blkdev, address)
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index ed70b68b2b38..a0eef95b9e0e 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -146,8 +146,8 @@ static bool __is_bitmap_valid(struct f2fs_sb_info *sbi, block_t blkaddr,
146 146
147 exist = f2fs_test_bit(offset, se->cur_valid_map); 147 exist = f2fs_test_bit(offset, se->cur_valid_map);
148 if (!exist && type == DATA_GENERIC_ENHANCE) { 148 if (!exist && type == DATA_GENERIC_ENHANCE) {
149 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 149 f2fs_err(sbi, "Inconsistent error blkaddr:%u, sit bitmap:%d",
150 "blkaddr:%u, sit bitmap:%d", blkaddr, exist); 150 blkaddr, exist);
151 set_sbi_flag(sbi, SBI_NEED_FSCK); 151 set_sbi_flag(sbi, SBI_NEED_FSCK);
152 WARN_ON(1); 152 WARN_ON(1);
153 } 153 }
@@ -184,8 +184,8 @@ bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
184 case DATA_GENERIC_ENHANCE_READ: 184 case DATA_GENERIC_ENHANCE_READ:
185 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) || 185 if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
186 blkaddr < MAIN_BLKADDR(sbi))) { 186 blkaddr < MAIN_BLKADDR(sbi))) {
187 f2fs_msg(sbi->sb, KERN_WARNING, 187 f2fs_warn(sbi, "access invalid blkaddr:%u",
188 "access invalid blkaddr:%u", blkaddr); 188 blkaddr);
189 set_sbi_flag(sbi, SBI_NEED_FSCK); 189 set_sbi_flag(sbi, SBI_NEED_FSCK);
190 WARN_ON(1); 190 WARN_ON(1);
191 return false; 191 return false;
@@ -657,9 +657,8 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
657 657
658err_out: 658err_out:
659 set_sbi_flag(sbi, SBI_NEED_FSCK); 659 set_sbi_flag(sbi, SBI_NEED_FSCK);
660 f2fs_msg(sbi->sb, KERN_WARNING, 660 f2fs_warn(sbi, "%s: orphan failed (ino=%x), run fsck to fix.",
661 "%s: orphan failed (ino=%x), run fsck to fix.", 661 __func__, ino);
662 __func__, ino);
663 return err; 662 return err;
664} 663}
665 664
@@ -676,13 +675,12 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
676 return 0; 675 return 0;
677 676
678 if (bdev_read_only(sbi->sb->s_bdev)) { 677 if (bdev_read_only(sbi->sb->s_bdev)) {
679 f2fs_msg(sbi->sb, KERN_INFO, "write access " 678 f2fs_info(sbi, "write access unavailable, skipping orphan cleanup");
680 "unavailable, skipping orphan cleanup");
681 return 0; 679 return 0;
682 } 680 }
683 681
684 if (s_flags & SB_RDONLY) { 682 if (s_flags & SB_RDONLY) {
685 f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs"); 683 f2fs_info(sbi, "orphan cleanup on readonly fs");
686 sbi->sb->s_flags &= ~SB_RDONLY; 684 sbi->sb->s_flags &= ~SB_RDONLY;
687 } 685 }
688 686
@@ -827,26 +825,14 @@ static int get_checkpoint_version(struct f2fs_sb_info *sbi, block_t cp_addr,
827 if (crc_offset < CP_MIN_CHKSUM_OFFSET || 825 if (crc_offset < CP_MIN_CHKSUM_OFFSET ||
828 crc_offset > CP_CHKSUM_OFFSET) { 826 crc_offset > CP_CHKSUM_OFFSET) {
829 f2fs_put_page(*cp_page, 1); 827 f2fs_put_page(*cp_page, 1);
830 f2fs_msg(sbi->sb, KERN_WARNING, 828 f2fs_warn(sbi, "invalid crc_offset: %zu", crc_offset);
831 "invalid crc_offset: %zu", crc_offset);
832 return -EINVAL; 829 return -EINVAL;
833 } 830 }
834 831
835 if (__is_set_ckpt_flags(*cp_block, CP_LARGE_NAT_BITMAP_FLAG)) {
836 if (crc_offset != CP_MIN_CHKSUM_OFFSET) {
837 f2fs_put_page(*cp_page, 1);
838 f2fs_msg(sbi->sb, KERN_WARNING,
839 "layout of large_nat_bitmap is deprecated, "
840 "run fsck to repair, chksum_offset: %zu",
841 crc_offset);
842 return -EINVAL;
843 }
844 }
845
846 crc = f2fs_checkpoint_chksum(sbi, *cp_block); 832 crc = f2fs_checkpoint_chksum(sbi, *cp_block);
847 if (crc != cur_cp_crc(*cp_block)) { 833 if (crc != cur_cp_crc(*cp_block)) {
848 f2fs_put_page(*cp_page, 1); 834 f2fs_put_page(*cp_page, 1);
849 f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value"); 835 f2fs_warn(sbi, "invalid crc value");
850 return -EINVAL; 836 return -EINVAL;
851 } 837 }
852 838
@@ -869,9 +855,8 @@ static struct page *validate_checkpoint(struct f2fs_sb_info *sbi,
869 855
870 if (le32_to_cpu(cp_block->cp_pack_total_block_count) > 856 if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
871 sbi->blocks_per_seg) { 857 sbi->blocks_per_seg) {
872 f2fs_msg(sbi->sb, KERN_WARNING, 858 f2fs_warn(sbi, "invalid cp_pack_total_block_count:%u",
873 "invalid cp_pack_total_block_count:%u", 859 le32_to_cpu(cp_block->cp_pack_total_block_count));
874 le32_to_cpu(cp_block->cp_pack_total_block_count));
875 goto invalid_cp; 860 goto invalid_cp;
876 } 861 }
877 pre_version = *version; 862 pre_version = *version;
@@ -905,6 +890,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
905 unsigned int cp_blks = 1 + __cp_payload(sbi); 890 unsigned int cp_blks = 1 + __cp_payload(sbi);
906 block_t cp_blk_no; 891 block_t cp_blk_no;
907 int i; 892 int i;
893 int err;
908 894
909 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks), 895 sbi->ckpt = f2fs_kzalloc(sbi, array_size(blk_size, cp_blks),
910 GFP_KERNEL); 896 GFP_KERNEL);
@@ -932,6 +918,7 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
932 } else if (cp2) { 918 } else if (cp2) {
933 cur_page = cp2; 919 cur_page = cp2;
934 } else { 920 } else {
921 err = -EFSCORRUPTED;
935 goto fail_no_cp; 922 goto fail_no_cp;
936 } 923 }
937 924
@@ -944,8 +931,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
944 sbi->cur_cp_pack = 2; 931 sbi->cur_cp_pack = 2;
945 932
946 /* Sanity checking of checkpoint */ 933 /* Sanity checking of checkpoint */
947 if (f2fs_sanity_check_ckpt(sbi)) 934 if (f2fs_sanity_check_ckpt(sbi)) {
935 err = -EFSCORRUPTED;
948 goto free_fail_no_cp; 936 goto free_fail_no_cp;
937 }
949 938
950 if (cp_blks <= 1) 939 if (cp_blks <= 1)
951 goto done; 940 goto done;
@@ -959,8 +948,10 @@ int f2fs_get_valid_checkpoint(struct f2fs_sb_info *sbi)
959 unsigned char *ckpt = (unsigned char *)sbi->ckpt; 948 unsigned char *ckpt = (unsigned char *)sbi->ckpt;
960 949
961 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i); 950 cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
962 if (IS_ERR(cur_page)) 951 if (IS_ERR(cur_page)) {
952 err = PTR_ERR(cur_page);
963 goto free_fail_no_cp; 953 goto free_fail_no_cp;
954 }
964 sit_bitmap_ptr = page_address(cur_page); 955 sit_bitmap_ptr = page_address(cur_page);
965 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size); 956 memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
966 f2fs_put_page(cur_page, 1); 957 f2fs_put_page(cur_page, 1);
@@ -975,7 +966,7 @@ free_fail_no_cp:
975 f2fs_put_page(cp2, 1); 966 f2fs_put_page(cp2, 1);
976fail_no_cp: 967fail_no_cp:
977 kvfree(sbi->ckpt); 968 kvfree(sbi->ckpt);
978 return -EINVAL; 969 return err;
979} 970}
980 971
981static void __add_dirty_inode(struct inode *inode, enum inode_type type) 972static void __add_dirty_inode(struct inode *inode, enum inode_type type)
@@ -1142,17 +1133,24 @@ static void __prepare_cp_block(struct f2fs_sb_info *sbi)
1142 1133
1143static bool __need_flush_quota(struct f2fs_sb_info *sbi) 1134static bool __need_flush_quota(struct f2fs_sb_info *sbi)
1144{ 1135{
1136 bool ret = false;
1137
1145 if (!is_journalled_quota(sbi)) 1138 if (!is_journalled_quota(sbi))
1146 return false; 1139 return false;
1147 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) 1140
1148 return false; 1141 down_write(&sbi->quota_sem);
1149 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) 1142 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
1150 return false; 1143 ret = false;
1151 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) 1144 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
1152 return true; 1145 ret = false;
1153 if (get_pages(sbi, F2FS_DIRTY_QDATA)) 1146 } else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_FLUSH)) {
1154 return true; 1147 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1155 return false; 1148 ret = true;
1149 } else if (get_pages(sbi, F2FS_DIRTY_QDATA)) {
1150 ret = true;
1151 }
1152 up_write(&sbi->quota_sem);
1153 return ret;
1156} 1154}
1157 1155
1158/* 1156/*
@@ -1171,26 +1169,22 @@ static int block_operations(struct f2fs_sb_info *sbi)
1171 blk_start_plug(&plug); 1169 blk_start_plug(&plug);
1172 1170
1173retry_flush_quotas: 1171retry_flush_quotas:
1172 f2fs_lock_all(sbi);
1174 if (__need_flush_quota(sbi)) { 1173 if (__need_flush_quota(sbi)) {
1175 int locked; 1174 int locked;
1176 1175
1177 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) { 1176 if (++cnt > DEFAULT_RETRY_QUOTA_FLUSH_COUNT) {
1178 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH); 1177 set_sbi_flag(sbi, SBI_QUOTA_SKIP_FLUSH);
1179 f2fs_lock_all(sbi); 1178 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
1180 goto retry_flush_dents; 1179 goto retry_flush_dents;
1181 } 1180 }
1182 clear_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); 1181 f2fs_unlock_all(sbi);
1183 1182
1184 /* only failed during mount/umount/freeze/quotactl */ 1183 /* only failed during mount/umount/freeze/quotactl */
1185 locked = down_read_trylock(&sbi->sb->s_umount); 1184 locked = down_read_trylock(&sbi->sb->s_umount);
1186 f2fs_quota_sync(sbi->sb, -1); 1185 f2fs_quota_sync(sbi->sb, -1);
1187 if (locked) 1186 if (locked)
1188 up_read(&sbi->sb->s_umount); 1187 up_read(&sbi->sb->s_umount);
1189 }
1190
1191 f2fs_lock_all(sbi);
1192 if (__need_flush_quota(sbi)) {
1193 f2fs_unlock_all(sbi);
1194 cond_resched(); 1188 cond_resched();
1195 goto retry_flush_quotas; 1189 goto retry_flush_quotas;
1196 } 1190 }
@@ -1212,12 +1206,6 @@ retry_flush_dents:
1212 */ 1206 */
1213 down_write(&sbi->node_change); 1207 down_write(&sbi->node_change);
1214 1208
1215 if (__need_flush_quota(sbi)) {
1216 up_write(&sbi->node_change);
1217 f2fs_unlock_all(sbi);
1218 goto retry_flush_quotas;
1219 }
1220
1221 if (get_pages(sbi, F2FS_DIRTY_IMETA)) { 1209 if (get_pages(sbi, F2FS_DIRTY_IMETA)) {
1222 up_write(&sbi->node_change); 1210 up_write(&sbi->node_change);
1223 f2fs_unlock_all(sbi); 1211 f2fs_unlock_all(sbi);
@@ -1313,7 +1301,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1313 else 1301 else
1314 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG); 1302 __clear_ckpt_flags(ckpt, CP_ORPHAN_PRESENT_FLAG);
1315 1303
1316 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) 1304 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK) ||
1305 is_sbi_flag_set(sbi, SBI_IS_RESIZEFS))
1317 __set_ckpt_flags(ckpt, CP_FSCK_FLAG); 1306 __set_ckpt_flags(ckpt, CP_FSCK_FLAG);
1318 1307
1319 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED)) 1308 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
@@ -1328,10 +1317,8 @@ static void update_ckpt_flags(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1328 1317
1329 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) 1318 if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH))
1330 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); 1319 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1331 /* 1320 else
1332 * TODO: we count on fsck.f2fs to clear this flag until we figure out 1321 __clear_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
1333 * missing cases which clear it incorrectly.
1334 */
1335 1322
1336 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) 1323 if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR))
1337 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG); 1324 __set_ckpt_flags(ckpt, CP_QUOTA_NEED_FSCK_FLAG);
@@ -1571,8 +1558,7 @@ int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
1571 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 1558 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1572 if (cpc->reason != CP_PAUSE) 1559 if (cpc->reason != CP_PAUSE)
1573 return 0; 1560 return 0;
1574 f2fs_msg(sbi->sb, KERN_WARNING, 1561 f2fs_warn(sbi, "Start checkpoint disabled!");
1575 "Start checkpoint disabled!");
1576 } 1562 }
1577 mutex_lock(&sbi->cp_mutex); 1563 mutex_lock(&sbi->cp_mutex);
1578 1564
@@ -1638,8 +1624,7 @@ stop:
1638 stat_inc_cp_count(sbi->stat_info); 1624 stat_inc_cp_count(sbi->stat_info);
1639 1625
1640 if (cpc->reason & CP_RECOVERY) 1626 if (cpc->reason & CP_RECOVERY)
1641 f2fs_msg(sbi->sb, KERN_NOTICE, 1627 f2fs_notice(sbi, "checkpoint: version = %llx", ckpt_ver);
1642 "checkpoint: version = %llx", ckpt_ver);
1643 1628
1644 /* do checkpoint periodically */ 1629 /* do checkpoint periodically */
1645 f2fs_update_time(sbi, CP_TIME); 1630 f2fs_update_time(sbi, CP_TIME);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a546ac8685ea..0ca530afc684 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -14,6 +14,7 @@
14#include <linux/pagevec.h> 14#include <linux/pagevec.h>
15#include <linux/blkdev.h> 15#include <linux/blkdev.h>
16#include <linux/bio.h> 16#include <linux/bio.h>
17#include <linux/swap.h>
17#include <linux/prefetch.h> 18#include <linux/prefetch.h>
18#include <linux/uio.h> 19#include <linux/uio.h>
19#include <linux/cleancache.h> 20#include <linux/cleancache.h>
@@ -54,7 +55,7 @@ static bool __is_cp_guaranteed(struct page *page)
54 55
55static enum count_type __read_io_type(struct page *page) 56static enum count_type __read_io_type(struct page *page)
56{ 57{
57 struct address_space *mapping = page->mapping; 58 struct address_space *mapping = page_file_mapping(page);
58 59
59 if (mapping) { 60 if (mapping) {
60 struct inode *inode = mapping->host; 61 struct inode *inode = mapping->host;
@@ -347,20 +348,20 @@ static void __submit_merged_bio(struct f2fs_bio_info *io)
347 io->bio = NULL; 348 io->bio = NULL;
348} 349}
349 350
350static bool __has_merged_page(struct f2fs_bio_info *io, struct inode *inode, 351static bool __has_merged_page(struct bio *bio, struct inode *inode,
351 struct page *page, nid_t ino) 352 struct page *page, nid_t ino)
352{ 353{
353 struct bio_vec *bvec; 354 struct bio_vec *bvec;
354 struct page *target; 355 struct page *target;
355 struct bvec_iter_all iter_all; 356 struct bvec_iter_all iter_all;
356 357
357 if (!io->bio) 358 if (!bio)
358 return false; 359 return false;
359 360
360 if (!inode && !page && !ino) 361 if (!inode && !page && !ino)
361 return true; 362 return true;
362 363
363 bio_for_each_segment_all(bvec, io->bio, iter_all) { 364 bio_for_each_segment_all(bvec, bio, iter_all) {
364 365
365 target = bvec->bv_page; 366 target = bvec->bv_page;
366 if (fscrypt_is_bounce_page(target)) 367 if (fscrypt_is_bounce_page(target))
@@ -410,7 +411,7 @@ static void __submit_merged_write_cond(struct f2fs_sb_info *sbi,
410 struct f2fs_bio_info *io = sbi->write_io[btype] + temp; 411 struct f2fs_bio_info *io = sbi->write_io[btype] + temp;
411 412
412 down_read(&io->io_rwsem); 413 down_read(&io->io_rwsem);
413 ret = __has_merged_page(io, inode, page, ino); 414 ret = __has_merged_page(io->bio, inode, page, ino);
414 up_read(&io->io_rwsem); 415 up_read(&io->io_rwsem);
415 } 416 }
416 if (ret) 417 if (ret)
@@ -454,7 +455,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
454 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr, 455 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
455 fio->is_por ? META_POR : (__is_meta_io(fio) ? 456 fio->is_por ? META_POR : (__is_meta_io(fio) ?
456 META_GENERIC : DATA_GENERIC_ENHANCE))) 457 META_GENERIC : DATA_GENERIC_ENHANCE)))
457 return -EFAULT; 458 return -EFSCORRUPTED;
458 459
459 trace_f2fs_submit_page_bio(page, fio); 460 trace_f2fs_submit_page_bio(page, fio);
460 f2fs_trace_ios(fio, 0); 461 f2fs_trace_ios(fio, 0);
@@ -480,6 +481,61 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
480 return 0; 481 return 0;
481} 482}
482 483
484int f2fs_merge_page_bio(struct f2fs_io_info *fio)
485{
486 struct bio *bio = *fio->bio;
487 struct page *page = fio->encrypted_page ?
488 fio->encrypted_page : fio->page;
489
490 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
491 __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
492 return -EFSCORRUPTED;
493
494 trace_f2fs_submit_page_bio(page, fio);
495 f2fs_trace_ios(fio, 0);
496
497 if (bio && (*fio->last_block + 1 != fio->new_blkaddr ||
498 !__same_bdev(fio->sbi, fio->new_blkaddr, bio))) {
499 __submit_bio(fio->sbi, bio, fio->type);
500 bio = NULL;
501 }
502alloc_new:
503 if (!bio) {
504 bio = __bio_alloc(fio->sbi, fio->new_blkaddr, fio->io_wbc,
505 BIO_MAX_PAGES, false, fio->type, fio->temp);
506 bio_set_op_attrs(bio, fio->op, fio->op_flags);
507 }
508
509 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {
510 __submit_bio(fio->sbi, bio, fio->type);
511 bio = NULL;
512 goto alloc_new;
513 }
514
515 if (fio->io_wbc)
516 wbc_account_io(fio->io_wbc, page, PAGE_SIZE);
517
518 inc_page_count(fio->sbi, WB_DATA_TYPE(page));
519
520 *fio->last_block = fio->new_blkaddr;
521 *fio->bio = bio;
522
523 return 0;
524}
525
526static void f2fs_submit_ipu_bio(struct f2fs_sb_info *sbi, struct bio **bio,
527 struct page *page)
528{
529 if (!bio)
530 return;
531
532 if (!__has_merged_page(*bio, NULL, page, 0))
533 return;
534
535 __submit_bio(sbi, *bio, DATA);
536 *bio = NULL;
537}
538
483void f2fs_submit_page_write(struct f2fs_io_info *fio) 539void f2fs_submit_page_write(struct f2fs_io_info *fio)
484{ 540{
485 struct f2fs_sb_info *sbi = fio->sbi; 541 struct f2fs_sb_info *sbi = fio->sbi;
@@ -733,7 +789,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
733 dn.data_blkaddr = ei.blk + index - ei.fofs; 789 dn.data_blkaddr = ei.blk + index - ei.fofs;
734 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr, 790 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), dn.data_blkaddr,
735 DATA_GENERIC_ENHANCE_READ)) { 791 DATA_GENERIC_ENHANCE_READ)) {
736 err = -EFAULT; 792 err = -EFSCORRUPTED;
737 goto put_err; 793 goto put_err;
738 } 794 }
739 goto got_it; 795 goto got_it;
@@ -753,7 +809,7 @@ struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
753 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode), 809 !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
754 dn.data_blkaddr, 810 dn.data_blkaddr,
755 DATA_GENERIC_ENHANCE)) { 811 DATA_GENERIC_ENHANCE)) {
756 err = -EFAULT; 812 err = -EFSCORRUPTED;
757 goto put_err; 813 goto put_err;
758 } 814 }
759got_it: 815got_it:
@@ -1099,7 +1155,7 @@ next_block:
1099 1155
1100 if (__is_valid_data_blkaddr(blkaddr) && 1156 if (__is_valid_data_blkaddr(blkaddr) &&
1101 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) { 1157 !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC_ENHANCE)) {
1102 err = -EFAULT; 1158 err = -EFSCORRUPTED;
1103 goto sync_out; 1159 goto sync_out;
1104 } 1160 }
1105 1161
@@ -1529,7 +1585,7 @@ static int f2fs_read_single_page(struct inode *inode, struct page *page,
1529 sector_t block_nr; 1585 sector_t block_nr;
1530 int ret = 0; 1586 int ret = 0;
1531 1587
1532 block_in_file = (sector_t)page->index; 1588 block_in_file = (sector_t)page_index(page);
1533 last_block = block_in_file + nr_pages; 1589 last_block = block_in_file + nr_pages;
1534 last_block_in_file = (i_size_read(inode) + blocksize - 1) >> 1590 last_block_in_file = (i_size_read(inode) + blocksize - 1) >>
1535 blkbits; 1591 blkbits;
@@ -1562,14 +1618,15 @@ got_it:
1562 block_nr = map->m_pblk + block_in_file - map->m_lblk; 1618 block_nr = map->m_pblk + block_in_file - map->m_lblk;
1563 SetPageMappedToDisk(page); 1619 SetPageMappedToDisk(page);
1564 1620
1565 if (!PageUptodate(page) && !cleancache_get_page(page)) { 1621 if (!PageUptodate(page) && (!PageSwapCache(page) &&
1622 !cleancache_get_page(page))) {
1566 SetPageUptodate(page); 1623 SetPageUptodate(page);
1567 goto confused; 1624 goto confused;
1568 } 1625 }
1569 1626
1570 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr, 1627 if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
1571 DATA_GENERIC_ENHANCE_READ)) { 1628 DATA_GENERIC_ENHANCE_READ)) {
1572 ret = -EFAULT; 1629 ret = -EFSCORRUPTED;
1573 goto out; 1630 goto out;
1574 } 1631 }
1575 } else { 1632 } else {
@@ -1660,7 +1717,7 @@ static int f2fs_mpage_readpages(struct address_space *mapping,
1660 prefetchw(&page->flags); 1717 prefetchw(&page->flags);
1661 list_del(&page->lru); 1718 list_del(&page->lru);
1662 if (add_to_page_cache_lru(page, mapping, 1719 if (add_to_page_cache_lru(page, mapping,
1663 page->index, 1720 page_index(page),
1664 readahead_gfp_mask(mapping))) 1721 readahead_gfp_mask(mapping)))
1665 goto next_page; 1722 goto next_page;
1666 } 1723 }
@@ -1684,7 +1741,7 @@ next_page:
1684 1741
1685static int f2fs_read_data_page(struct file *file, struct page *page) 1742static int f2fs_read_data_page(struct file *file, struct page *page)
1686{ 1743{
1687 struct inode *inode = page->mapping->host; 1744 struct inode *inode = page_file_mapping(page)->host;
1688 int ret = -EAGAIN; 1745 int ret = -EAGAIN;
1689 1746
1690 trace_f2fs_readpage(page, DATA); 1747 trace_f2fs_readpage(page, DATA);
@@ -1693,7 +1750,8 @@ static int f2fs_read_data_page(struct file *file, struct page *page)
1693 if (f2fs_has_inline_data(inode)) 1750 if (f2fs_has_inline_data(inode))
1694 ret = f2fs_read_inline_data(inode, page); 1751 ret = f2fs_read_inline_data(inode, page);
1695 if (ret == -EAGAIN) 1752 if (ret == -EAGAIN)
1696 ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false); 1753 ret = f2fs_mpage_readpages(page_file_mapping(page),
1754 NULL, page, 1, false);
1697 return ret; 1755 return ret;
1698} 1756}
1699 1757
@@ -1851,7 +1909,7 @@ int f2fs_do_write_data_page(struct f2fs_io_info *fio)
1851 1909
1852 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 1910 if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1853 DATA_GENERIC_ENHANCE)) 1911 DATA_GENERIC_ENHANCE))
1854 return -EFAULT; 1912 return -EFSCORRUPTED;
1855 1913
1856 ipu_force = true; 1914 ipu_force = true;
1857 fio->need_lock = LOCK_DONE; 1915 fio->need_lock = LOCK_DONE;
@@ -1878,7 +1936,7 @@ got_it:
1878 if (__is_valid_data_blkaddr(fio->old_blkaddr) && 1936 if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
1879 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr, 1937 !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
1880 DATA_GENERIC_ENHANCE)) { 1938 DATA_GENERIC_ENHANCE)) {
1881 err = -EFAULT; 1939 err = -EFSCORRUPTED;
1882 goto out_writepage; 1940 goto out_writepage;
1883 } 1941 }
1884 /* 1942 /*
@@ -1946,6 +2004,8 @@ out:
1946} 2004}
1947 2005
1948static int __write_data_page(struct page *page, bool *submitted, 2006static int __write_data_page(struct page *page, bool *submitted,
2007 struct bio **bio,
2008 sector_t *last_block,
1949 struct writeback_control *wbc, 2009 struct writeback_control *wbc,
1950 enum iostat_type io_type) 2010 enum iostat_type io_type)
1951{ 2011{
@@ -1971,6 +2031,8 @@ static int __write_data_page(struct page *page, bool *submitted,
1971 .need_lock = LOCK_RETRY, 2031 .need_lock = LOCK_RETRY,
1972 .io_type = io_type, 2032 .io_type = io_type,
1973 .io_wbc = wbc, 2033 .io_wbc = wbc,
2034 .bio = bio,
2035 .last_block = last_block,
1974 }; 2036 };
1975 2037
1976 trace_f2fs_writepage(page, DATA); 2038 trace_f2fs_writepage(page, DATA);
@@ -2069,10 +2131,13 @@ out:
2069 2131
2070 unlock_page(page); 2132 unlock_page(page);
2071 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) && 2133 if (!S_ISDIR(inode->i_mode) && !IS_NOQUOTA(inode) &&
2072 !F2FS_I(inode)->cp_task) 2134 !F2FS_I(inode)->cp_task) {
2135 f2fs_submit_ipu_bio(sbi, bio, page);
2073 f2fs_balance_fs(sbi, need_balance_fs); 2136 f2fs_balance_fs(sbi, need_balance_fs);
2137 }
2074 2138
2075 if (unlikely(f2fs_cp_error(sbi))) { 2139 if (unlikely(f2fs_cp_error(sbi))) {
2140 f2fs_submit_ipu_bio(sbi, bio, page);
2076 f2fs_submit_merged_write(sbi, DATA); 2141 f2fs_submit_merged_write(sbi, DATA);
2077 submitted = NULL; 2142 submitted = NULL;
2078 } 2143 }
@@ -2099,7 +2164,7 @@ redirty_out:
2099static int f2fs_write_data_page(struct page *page, 2164static int f2fs_write_data_page(struct page *page,
2100 struct writeback_control *wbc) 2165 struct writeback_control *wbc)
2101{ 2166{
2102 return __write_data_page(page, NULL, wbc, FS_DATA_IO); 2167 return __write_data_page(page, NULL, NULL, NULL, wbc, FS_DATA_IO);
2103} 2168}
2104 2169
2105/* 2170/*
@@ -2115,6 +2180,8 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
2115 int done = 0; 2180 int done = 0;
2116 struct pagevec pvec; 2181 struct pagevec pvec;
2117 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping); 2182 struct f2fs_sb_info *sbi = F2FS_M_SB(mapping);
2183 struct bio *bio = NULL;
2184 sector_t last_block;
2118 int nr_pages; 2185 int nr_pages;
2119 pgoff_t uninitialized_var(writeback_index); 2186 pgoff_t uninitialized_var(writeback_index);
2120 pgoff_t index; 2187 pgoff_t index;
@@ -2191,17 +2258,20 @@ continue_unlock:
2191 } 2258 }
2192 2259
2193 if (PageWriteback(page)) { 2260 if (PageWriteback(page)) {
2194 if (wbc->sync_mode != WB_SYNC_NONE) 2261 if (wbc->sync_mode != WB_SYNC_NONE) {
2195 f2fs_wait_on_page_writeback(page, 2262 f2fs_wait_on_page_writeback(page,
2196 DATA, true, true); 2263 DATA, true, true);
2197 else 2264 f2fs_submit_ipu_bio(sbi, &bio, page);
2265 } else {
2198 goto continue_unlock; 2266 goto continue_unlock;
2267 }
2199 } 2268 }
2200 2269
2201 if (!clear_page_dirty_for_io(page)) 2270 if (!clear_page_dirty_for_io(page))
2202 goto continue_unlock; 2271 goto continue_unlock;
2203 2272
2204 ret = __write_data_page(page, &submitted, wbc, io_type); 2273 ret = __write_data_page(page, &submitted, &bio,
2274 &last_block, wbc, io_type);
2205 if (unlikely(ret)) { 2275 if (unlikely(ret)) {
2206 /* 2276 /*
2207 * keep nr_to_write, since vfs uses this to 2277 * keep nr_to_write, since vfs uses this to
@@ -2250,6 +2320,9 @@ continue_unlock:
2250 if (nwritten) 2320 if (nwritten)
2251 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host, 2321 f2fs_submit_merged_write_cond(F2FS_M_SB(mapping), mapping->host,
2252 NULL, 0, DATA); 2322 NULL, 0, DATA);
2323 /* submit cached bio of IPU write */
2324 if (bio)
2325 __submit_bio(sbi, bio, DATA);
2253 2326
2254 return ret; 2327 return ret;
2255} 2328}
@@ -2261,6 +2334,9 @@ static inline bool __should_serialize_io(struct inode *inode,
2261 return false; 2334 return false;
2262 if (IS_NOQUOTA(inode)) 2335 if (IS_NOQUOTA(inode))
2263 return false; 2336 return false;
2337 /* to avoid deadlock in path of data flush */
2338 if (F2FS_I(inode)->cp_task)
2339 return false;
2264 if (wbc->sync_mode != WB_SYNC_ALL) 2340 if (wbc->sync_mode != WB_SYNC_ALL)
2265 return true; 2341 return true;
2266 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks) 2342 if (get_dirty_pages(inode) >= SM_I(F2FS_I_SB(inode))->min_seq_blocks)
@@ -2532,7 +2608,7 @@ repeat:
2532 } else { 2608 } else {
2533 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, 2609 if (!f2fs_is_valid_blkaddr(sbi, blkaddr,
2534 DATA_GENERIC_ENHANCE_READ)) { 2610 DATA_GENERIC_ENHANCE_READ)) {
2535 err = -EFAULT; 2611 err = -EFSCORRUPTED;
2536 goto fail; 2612 goto fail;
2537 } 2613 }
2538 err = f2fs_submit_page_read(inode, page, blkaddr); 2614 err = f2fs_submit_page_read(inode, page, blkaddr);
@@ -2777,13 +2853,14 @@ int f2fs_release_page(struct page *page, gfp_t wait)
2777 2853
2778static int f2fs_set_data_page_dirty(struct page *page) 2854static int f2fs_set_data_page_dirty(struct page *page)
2779{ 2855{
2780 struct address_space *mapping = page->mapping; 2856 struct inode *inode = page_file_mapping(page)->host;
2781 struct inode *inode = mapping->host;
2782 2857
2783 trace_f2fs_set_page_dirty(page, DATA); 2858 trace_f2fs_set_page_dirty(page, DATA);
2784 2859
2785 if (!PageUptodate(page)) 2860 if (!PageUptodate(page))
2786 SetPageUptodate(page); 2861 SetPageUptodate(page);
2862 if (PageSwapCache(page))
2863 return __set_page_dirty_nobuffers(page);
2787 2864
2788 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) { 2865 if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
2789 if (!IS_ATOMIC_WRITTEN_PAGE(page)) { 2866 if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
@@ -2875,6 +2952,126 @@ int f2fs_migrate_page(struct address_space *mapping,
2875} 2952}
2876#endif 2953#endif
2877 2954
2955#ifdef CONFIG_SWAP
2956/* Copied from generic_swapfile_activate() to check any holes */
2957static int check_swap_activate(struct file *swap_file, unsigned int max)
2958{
2959 struct address_space *mapping = swap_file->f_mapping;
2960 struct inode *inode = mapping->host;
2961 unsigned blocks_per_page;
2962 unsigned long page_no;
2963 unsigned blkbits;
2964 sector_t probe_block;
2965 sector_t last_block;
2966 sector_t lowest_block = -1;
2967 sector_t highest_block = 0;
2968
2969 blkbits = inode->i_blkbits;
2970 blocks_per_page = PAGE_SIZE >> blkbits;
2971
2972 /*
2973 * Map all the blocks into the extent list. This code doesn't try
2974 * to be very smart.
2975 */
2976 probe_block = 0;
2977 page_no = 0;
2978 last_block = i_size_read(inode) >> blkbits;
2979 while ((probe_block + blocks_per_page) <= last_block && page_no < max) {
2980 unsigned block_in_page;
2981 sector_t first_block;
2982
2983 cond_resched();
2984
2985 first_block = bmap(inode, probe_block);
2986 if (first_block == 0)
2987 goto bad_bmap;
2988
2989 /*
2990 * It must be PAGE_SIZE aligned on-disk
2991 */
2992 if (first_block & (blocks_per_page - 1)) {
2993 probe_block++;
2994 goto reprobe;
2995 }
2996
2997 for (block_in_page = 1; block_in_page < blocks_per_page;
2998 block_in_page++) {
2999 sector_t block;
3000
3001 block = bmap(inode, probe_block + block_in_page);
3002 if (block == 0)
3003 goto bad_bmap;
3004 if (block != first_block + block_in_page) {
3005 /* Discontiguity */
3006 probe_block++;
3007 goto reprobe;
3008 }
3009 }
3010
3011 first_block >>= (PAGE_SHIFT - blkbits);
3012 if (page_no) { /* exclude the header page */
3013 if (first_block < lowest_block)
3014 lowest_block = first_block;
3015 if (first_block > highest_block)
3016 highest_block = first_block;
3017 }
3018
3019 page_no++;
3020 probe_block += blocks_per_page;
3021reprobe:
3022 continue;
3023 }
3024 return 0;
3025
3026bad_bmap:
3027 pr_err("swapon: swapfile has holes\n");
3028 return -EINVAL;
3029}
3030
3031static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3032 sector_t *span)
3033{
3034 struct inode *inode = file_inode(file);
3035 int ret;
3036
3037 if (!S_ISREG(inode->i_mode))
3038 return -EINVAL;
3039
3040 if (f2fs_readonly(F2FS_I_SB(inode)->sb))
3041 return -EROFS;
3042
3043 ret = f2fs_convert_inline_inode(inode);
3044 if (ret)
3045 return ret;
3046
3047 ret = check_swap_activate(file, sis->max);
3048 if (ret)
3049 return ret;
3050
3051 set_inode_flag(inode, FI_PIN_FILE);
3052 f2fs_precache_extents(inode);
3053 f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
3054 return 0;
3055}
3056
3057static void f2fs_swap_deactivate(struct file *file)
3058{
3059 struct inode *inode = file_inode(file);
3060
3061 clear_inode_flag(inode, FI_PIN_FILE);
3062}
3063#else
3064static int f2fs_swap_activate(struct swap_info_struct *sis, struct file *file,
3065 sector_t *span)
3066{
3067 return -EOPNOTSUPP;
3068}
3069
3070static void f2fs_swap_deactivate(struct file *file)
3071{
3072}
3073#endif
3074
2878const struct address_space_operations f2fs_dblock_aops = { 3075const struct address_space_operations f2fs_dblock_aops = {
2879 .readpage = f2fs_read_data_page, 3076 .readpage = f2fs_read_data_page,
2880 .readpages = f2fs_read_data_pages, 3077 .readpages = f2fs_read_data_pages,
@@ -2887,6 +3084,8 @@ const struct address_space_operations f2fs_dblock_aops = {
2887 .releasepage = f2fs_release_page, 3084 .releasepage = f2fs_release_page,
2888 .direct_IO = f2fs_direct_IO, 3085 .direct_IO = f2fs_direct_IO,
2889 .bmap = f2fs_bmap, 3086 .bmap = f2fs_bmap,
3087 .swap_activate = f2fs_swap_activate,
3088 .swap_deactivate = f2fs_swap_deactivate,
2890#ifdef CONFIG_MIGRATION 3089#ifdef CONFIG_MIGRATION
2891 .migratepage = f2fs_migrate_page, 3090 .migratepage = f2fs_migrate_page,
2892#endif 3091#endif
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 99e9a5c37b71..7706049d23bf 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -27,8 +27,15 @@ static DEFINE_MUTEX(f2fs_stat_mutex);
27static void update_general_status(struct f2fs_sb_info *sbi) 27static void update_general_status(struct f2fs_sb_info *sbi)
28{ 28{
29 struct f2fs_stat_info *si = F2FS_STAT(sbi); 29 struct f2fs_stat_info *si = F2FS_STAT(sbi);
30 struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
30 int i; 31 int i;
31 32
33 /* these will be changed if online resize is done */
34 si->main_area_segs = le32_to_cpu(raw_super->segment_count_main);
35 si->main_area_sections = le32_to_cpu(raw_super->section_count);
36 si->main_area_zones = si->main_area_sections /
37 le32_to_cpu(raw_super->secs_per_zone);
38
32 /* validation check of the segment numbers */ 39 /* validation check of the segment numbers */
33 si->hit_largest = atomic64_read(&sbi->read_hit_largest); 40 si->hit_largest = atomic64_read(&sbi->read_hit_largest);
34 si->hit_cached = atomic64_read(&sbi->read_hit_cached); 41 si->hit_cached = atomic64_read(&sbi->read_hit_cached);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 59bc46017855..85a1528f319f 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -218,9 +218,8 @@ struct f2fs_dir_entry *__f2fs_find_entry(struct inode *dir,
218 218
219 max_depth = F2FS_I(dir)->i_current_depth; 219 max_depth = F2FS_I(dir)->i_current_depth;
220 if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) { 220 if (unlikely(max_depth > MAX_DIR_HASH_DEPTH)) {
221 f2fs_msg(F2FS_I_SB(dir)->sb, KERN_WARNING, 221 f2fs_warn(F2FS_I_SB(dir), "Corrupted max_depth of %lu: %u",
222 "Corrupted max_depth of %lu: %u", 222 dir->i_ino, max_depth);
223 dir->i_ino, max_depth);
224 max_depth = MAX_DIR_HASH_DEPTH; 223 max_depth = MAX_DIR_HASH_DEPTH;
225 f2fs_i_depth_write(dir, max_depth); 224 f2fs_i_depth_write(dir, max_depth);
226 } 225 }
@@ -816,11 +815,10 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
816 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len)); 815 bit_pos += GET_DENTRY_SLOTS(le16_to_cpu(de->name_len));
817 if (unlikely(bit_pos > d->max || 816 if (unlikely(bit_pos > d->max ||
818 le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) { 817 le16_to_cpu(de->name_len) > F2FS_NAME_LEN)) {
819 f2fs_msg(sbi->sb, KERN_WARNING, 818 f2fs_warn(sbi, "%s: corrupted namelen=%d, run fsck to fix.",
820 "%s: corrupted namelen=%d, run fsck to fix.", 819 __func__, le16_to_cpu(de->name_len));
821 __func__, le16_to_cpu(de->name_len));
822 set_sbi_flag(sbi, SBI_NEED_FSCK); 820 set_sbi_flag(sbi, SBI_NEED_FSCK);
823 err = -EINVAL; 821 err = -EFSCORRUPTED;
824 goto out; 822 goto out;
825 } 823 }
826 824
@@ -828,8 +826,8 @@ int f2fs_fill_dentries(struct dir_context *ctx, struct f2fs_dentry_ptr *d,
828 int save_len = fstr->len; 826 int save_len = fstr->len;
829 827
830 err = fscrypt_fname_disk_to_usr(d->inode, 828 err = fscrypt_fname_disk_to_usr(d->inode,
831 (u32)de->hash_code, 0, 829 (u32)le32_to_cpu(de->hash_code),
832 &de_name, fstr); 830 0, &de_name, fstr);
833 if (err) 831 if (err)
834 goto out; 832 goto out;
835 833
diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
index caf77fe8ac07..e60078460ad1 100644
--- a/fs/f2fs/extent_cache.c
+++ b/fs/f2fs/extent_cache.c
@@ -184,10 +184,9 @@ bool f2fs_check_rb_tree_consistence(struct f2fs_sb_info *sbi,
184 next_re = rb_entry(next, struct rb_entry, rb_node); 184 next_re = rb_entry(next, struct rb_entry, rb_node);
185 185
186 if (cur_re->ofs + cur_re->len > next_re->ofs) { 186 if (cur_re->ofs + cur_re->len > next_re->ofs) {
187 f2fs_msg(sbi->sb, KERN_INFO, "inconsistent rbtree, " 187 f2fs_info(sbi, "inconsistent rbtree, cur(%u, %u) next(%u, %u)",
188 "cur(%u, %u) next(%u, %u)", 188 cur_re->ofs, cur_re->len,
189 cur_re->ofs, cur_re->len, 189 next_re->ofs, next_re->len);
190 next_re->ofs, next_re->len);
191 return false; 190 return false;
192 } 191 }
193 192
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 06b89a9862ab..17382da7f0bd 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -136,6 +136,9 @@ struct f2fs_mount_info {
136 int alloc_mode; /* segment allocation policy */ 136 int alloc_mode; /* segment allocation policy */
137 int fsync_mode; /* fsync policy */ 137 int fsync_mode; /* fsync policy */
138 bool test_dummy_encryption; /* test dummy encryption */ 138 bool test_dummy_encryption; /* test dummy encryption */
139 block_t unusable_cap; /* Amount of space allowed to be
140 * unusable when disabling checkpoint
141 */
139}; 142};
140 143
141#define F2FS_FEATURE_ENCRYPT 0x0001 144#define F2FS_FEATURE_ENCRYPT 0x0001
@@ -412,6 +415,7 @@ static inline bool __has_cursum_space(struct f2fs_journal *journal,
412#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32) 415#define F2FS_IOC_SET_PIN_FILE _IOW(F2FS_IOCTL_MAGIC, 13, __u32)
413#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32) 416#define F2FS_IOC_GET_PIN_FILE _IOR(F2FS_IOCTL_MAGIC, 14, __u32)
414#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15) 417#define F2FS_IOC_PRECACHE_EXTENTS _IO(F2FS_IOCTL_MAGIC, 15)
418#define F2FS_IOC_RESIZE_FS _IOW(F2FS_IOCTL_MAGIC, 16, __u64)
415 419
416#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY 420#define F2FS_IOC_SET_ENCRYPTION_POLICY FS_IOC_SET_ENCRYPTION_POLICY
417#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY 421#define F2FS_IOC_GET_ENCRYPTION_POLICY FS_IOC_GET_ENCRYPTION_POLICY
@@ -476,8 +480,8 @@ static inline int get_inline_xattr_addrs(struct inode *inode);
476#define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \ 480#define NR_INLINE_DENTRY(inode) (MAX_INLINE_DATA(inode) * BITS_PER_BYTE / \
477 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 481 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
478 BITS_PER_BYTE + 1)) 482 BITS_PER_BYTE + 1))
479#define INLINE_DENTRY_BITMAP_SIZE(inode) ((NR_INLINE_DENTRY(inode) + \ 483#define INLINE_DENTRY_BITMAP_SIZE(inode) \
480 BITS_PER_BYTE - 1) / BITS_PER_BYTE) 484 DIV_ROUND_UP(NR_INLINE_DENTRY(inode), BITS_PER_BYTE)
481#define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \ 485#define INLINE_RESERVED_SIZE(inode) (MAX_INLINE_DATA(inode) - \
482 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \ 486 ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * \
483 NR_INLINE_DENTRY(inode) + \ 487 NR_INLINE_DENTRY(inode) + \
@@ -1052,6 +1056,8 @@ struct f2fs_io_info {
1052 bool retry; /* need to reallocate block address */ 1056 bool retry; /* need to reallocate block address */
1053 enum iostat_type io_type; /* io type */ 1057 enum iostat_type io_type; /* io type */
1054 struct writeback_control *io_wbc; /* writeback control */ 1058 struct writeback_control *io_wbc; /* writeback control */
1059 struct bio **bio; /* bio for ipu */
1060 sector_t *last_block; /* last block number in bio */
1055 unsigned char version; /* version of the node */ 1061 unsigned char version; /* version of the node */
1056}; 1062};
1057 1063
@@ -1111,6 +1117,7 @@ enum {
1111 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */ 1117 SBI_QUOTA_NEED_FLUSH, /* need to flush quota info in CP */
1112 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */ 1118 SBI_QUOTA_SKIP_FLUSH, /* skip flushing quota in current CP */
1113 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */ 1119 SBI_QUOTA_NEED_REPAIR, /* quota file may be corrupted */
1120 SBI_IS_RESIZEFS, /* resizefs is in process */
1114}; 1121};
1115 1122
1116enum { 1123enum {
@@ -1207,6 +1214,7 @@ struct f2fs_sb_info {
1207 /* for inode management */ 1214 /* for inode management */
1208 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */ 1215 struct list_head inode_list[NR_INODE_TYPE]; /* dirty inode list */
1209 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */ 1216 spinlock_t inode_lock[NR_INODE_TYPE]; /* for dirty inode list lock */
1217 struct mutex flush_lock; /* for flush exclusion */
1210 1218
1211 /* for extent tree cache */ 1219 /* for extent tree cache */
1212 struct radix_tree_root extent_tree_root;/* cache extent cache entries */ 1220 struct radix_tree_root extent_tree_root;/* cache extent cache entries */
@@ -1230,6 +1238,7 @@ struct f2fs_sb_info {
1230 unsigned int segs_per_sec; /* segments per section */ 1238 unsigned int segs_per_sec; /* segments per section */
1231 unsigned int secs_per_zone; /* sections per zone */ 1239 unsigned int secs_per_zone; /* sections per zone */
1232 unsigned int total_sections; /* total section count */ 1240 unsigned int total_sections; /* total section count */
1241 struct mutex resize_mutex; /* for resize exclusion */
1233 unsigned int total_node_count; /* total node block count */ 1242 unsigned int total_node_count; /* total node block count */
1234 unsigned int total_valid_node_count; /* valid node block count */ 1243 unsigned int total_valid_node_count; /* valid node block count */
1235 loff_t max_file_blocks; /* max block index of file */ 1244 loff_t max_file_blocks; /* max block index of file */
@@ -1247,6 +1256,7 @@ struct f2fs_sb_info {
1247 block_t unusable_block_count; /* # of blocks saved by last cp */ 1256 block_t unusable_block_count; /* # of blocks saved by last cp */
1248 1257
1249 unsigned int nquota_files; /* # of quota sysfile */ 1258 unsigned int nquota_files; /* # of quota sysfile */
1259 struct rw_semaphore quota_sem; /* blocking cp for flags */
1250 1260
1251 /* # of pages, see count_type */ 1261 /* # of pages, see count_type */
1252 atomic_t nr_pages[NR_COUNT_TYPE]; 1262 atomic_t nr_pages[NR_COUNT_TYPE];
@@ -1488,7 +1498,7 @@ static inline struct f2fs_sb_info *F2FS_M_SB(struct address_space *mapping)
1488 1498
1489static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page) 1499static inline struct f2fs_sb_info *F2FS_P_SB(struct page *page)
1490{ 1500{
1491 return F2FS_M_SB(page->mapping); 1501 return F2FS_M_SB(page_file_mapping(page));
1492} 1502}
1493 1503
1494static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi) 1504static inline struct f2fs_super_block *F2FS_RAW_SUPER(struct f2fs_sb_info *sbi)
@@ -1766,8 +1776,12 @@ static inline int inc_valid_block_count(struct f2fs_sb_info *sbi,
1766 1776
1767 if (!__allow_reserved_blocks(sbi, inode, true)) 1777 if (!__allow_reserved_blocks(sbi, inode, true))
1768 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks; 1778 avail_user_block_count -= F2FS_OPTION(sbi).root_reserved_blocks;
1769 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 1779 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
1770 avail_user_block_count -= sbi->unusable_block_count; 1780 if (avail_user_block_count > sbi->unusable_block_count)
1781 avail_user_block_count -= sbi->unusable_block_count;
1782 else
1783 avail_user_block_count = 0;
1784 }
1771 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) { 1785 if (unlikely(sbi->total_valid_block_count > avail_user_block_count)) {
1772 diff = sbi->total_valid_block_count - avail_user_block_count; 1786 diff = sbi->total_valid_block_count - avail_user_block_count;
1773 if (diff > *count) 1787 if (diff > *count)
@@ -1795,7 +1809,20 @@ enospc:
1795 return -ENOSPC; 1809 return -ENOSPC;
1796} 1810}
1797 1811
1798void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...); 1812__printf(2, 3)
1813void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...);
1814
1815#define f2fs_err(sbi, fmt, ...) \
1816 f2fs_printk(sbi, KERN_ERR fmt, ##__VA_ARGS__)
1817#define f2fs_warn(sbi, fmt, ...) \
1818 f2fs_printk(sbi, KERN_WARNING fmt, ##__VA_ARGS__)
1819#define f2fs_notice(sbi, fmt, ...) \
1820 f2fs_printk(sbi, KERN_NOTICE fmt, ##__VA_ARGS__)
1821#define f2fs_info(sbi, fmt, ...) \
1822 f2fs_printk(sbi, KERN_INFO fmt, ##__VA_ARGS__)
1823#define f2fs_debug(sbi, fmt, ...) \
1824 f2fs_printk(sbi, KERN_DEBUG fmt, ##__VA_ARGS__)
1825
1799static inline void dec_valid_block_count(struct f2fs_sb_info *sbi, 1826static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
1800 struct inode *inode, 1827 struct inode *inode,
1801 block_t count) 1828 block_t count)
@@ -1811,11 +1838,10 @@ static inline void dec_valid_block_count(struct f2fs_sb_info *sbi,
1811 sbi->current_reserved_blocks + count); 1838 sbi->current_reserved_blocks + count);
1812 spin_unlock(&sbi->stat_lock); 1839 spin_unlock(&sbi->stat_lock);
1813 if (unlikely(inode->i_blocks < sectors)) { 1840 if (unlikely(inode->i_blocks < sectors)) {
1814 f2fs_msg(sbi->sb, KERN_WARNING, 1841 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu",
1815 "Inconsistent i_blocks, ino:%lu, iblocks:%llu, sectors:%llu", 1842 inode->i_ino,
1816 inode->i_ino, 1843 (unsigned long long)inode->i_blocks,
1817 (unsigned long long)inode->i_blocks, 1844 (unsigned long long)sectors);
1818 (unsigned long long)sectors);
1819 set_sbi_flag(sbi, SBI_NEED_FSCK); 1845 set_sbi_flag(sbi, SBI_NEED_FSCK);
1820 return; 1846 return;
1821 } 1847 }
@@ -1967,7 +1993,7 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
1967 struct inode *inode, bool is_inode) 1993 struct inode *inode, bool is_inode)
1968{ 1994{
1969 block_t valid_block_count; 1995 block_t valid_block_count;
1970 unsigned int valid_node_count; 1996 unsigned int valid_node_count, user_block_count;
1971 int err; 1997 int err;
1972 1998
1973 if (is_inode) { 1999 if (is_inode) {
@@ -1994,10 +2020,11 @@ static inline int inc_valid_node_count(struct f2fs_sb_info *sbi,
1994 2020
1995 if (!__allow_reserved_blocks(sbi, inode, false)) 2021 if (!__allow_reserved_blocks(sbi, inode, false))
1996 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks; 2022 valid_block_count += F2FS_OPTION(sbi).root_reserved_blocks;
2023 user_block_count = sbi->user_block_count;
1997 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) 2024 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED)))
1998 valid_block_count += sbi->unusable_block_count; 2025 user_block_count -= sbi->unusable_block_count;
1999 2026
2000 if (unlikely(valid_block_count > sbi->user_block_count)) { 2027 if (unlikely(valid_block_count > user_block_count)) {
2001 spin_unlock(&sbi->stat_lock); 2028 spin_unlock(&sbi->stat_lock);
2002 goto enospc; 2029 goto enospc;
2003 } 2030 }
@@ -2052,10 +2079,9 @@ static inline void dec_valid_node_count(struct f2fs_sb_info *sbi,
2052 dquot_free_inode(inode); 2079 dquot_free_inode(inode);
2053 } else { 2080 } else {
2054 if (unlikely(inode->i_blocks == 0)) { 2081 if (unlikely(inode->i_blocks == 0)) {
2055 f2fs_msg(sbi->sb, KERN_WARNING, 2082 f2fs_warn(sbi, "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
2056 "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 2083 inode->i_ino,
2057 inode->i_ino, 2084 (unsigned long long)inode->i_blocks);
2058 (unsigned long long)inode->i_blocks);
2059 set_sbi_flag(sbi, SBI_NEED_FSCK); 2085 set_sbi_flag(sbi, SBI_NEED_FSCK);
2060 return; 2086 return;
2061 } 2087 }
@@ -2191,6 +2217,9 @@ static inline struct bio *f2fs_bio_alloc(struct f2fs_sb_info *sbi,
2191 2217
2192static inline bool is_idle(struct f2fs_sb_info *sbi, int type) 2218static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2193{ 2219{
2220 if (sbi->gc_mode == GC_URGENT)
2221 return true;
2222
2194 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) || 2223 if (get_pages(sbi, F2FS_RD_DATA) || get_pages(sbi, F2FS_RD_NODE) ||
2195 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) || 2224 get_pages(sbi, F2FS_RD_META) || get_pages(sbi, F2FS_WB_DATA) ||
2196 get_pages(sbi, F2FS_WB_CP_DATA) || 2225 get_pages(sbi, F2FS_WB_CP_DATA) ||
@@ -2198,7 +2227,7 @@ static inline bool is_idle(struct f2fs_sb_info *sbi, int type)
2198 get_pages(sbi, F2FS_DIO_WRITE)) 2227 get_pages(sbi, F2FS_DIO_WRITE))
2199 return false; 2228 return false;
2200 2229
2201 if (SM_I(sbi) && SM_I(sbi)->dcc_info && 2230 if (type != DISCARD_TIME && SM_I(sbi) && SM_I(sbi)->dcc_info &&
2202 atomic_read(&SM_I(sbi)->dcc_info->queued_discard)) 2231 atomic_read(&SM_I(sbi)->dcc_info->queued_discard))
2203 return false; 2232 return false;
2204 2233
@@ -2320,57 +2349,23 @@ static inline void f2fs_change_bit(unsigned int nr, char *addr)
2320} 2349}
2321 2350
2322/* 2351/*
2323 * Inode flags 2352 * On-disk inode flags (f2fs_inode::i_flags)
2324 */ 2353 */
2325#define F2FS_SECRM_FL 0x00000001 /* Secure deletion */
2326#define F2FS_UNRM_FL 0x00000002 /* Undelete */
2327#define F2FS_COMPR_FL 0x00000004 /* Compress file */
2328#define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */ 2354#define F2FS_SYNC_FL 0x00000008 /* Synchronous updates */
2329#define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */ 2355#define F2FS_IMMUTABLE_FL 0x00000010 /* Immutable file */
2330#define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */ 2356#define F2FS_APPEND_FL 0x00000020 /* writes to file may only append */
2331#define F2FS_NODUMP_FL 0x00000040 /* do not dump file */ 2357#define F2FS_NODUMP_FL 0x00000040 /* do not dump file */
2332#define F2FS_NOATIME_FL 0x00000080 /* do not update atime */ 2358#define F2FS_NOATIME_FL 0x00000080 /* do not update atime */
2333/* Reserved for compression usage... */
2334#define F2FS_DIRTY_FL 0x00000100
2335#define F2FS_COMPRBLK_FL 0x00000200 /* One or more compressed clusters */
2336#define F2FS_NOCOMPR_FL 0x00000400 /* Don't compress */
2337#define F2FS_ENCRYPT_FL 0x00000800 /* encrypted file */
2338/* End compression flags --- maybe not all used */
2339#define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */ 2359#define F2FS_INDEX_FL 0x00001000 /* hash-indexed directory */
2340#define F2FS_IMAGIC_FL 0x00002000 /* AFS directory */
2341#define F2FS_JOURNAL_DATA_FL 0x00004000 /* file data should be journaled */
2342#define F2FS_NOTAIL_FL 0x00008000 /* file tail should not be merged */
2343#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */ 2360#define F2FS_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
2344#define F2FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
2345#define F2FS_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
2346#define F2FS_EXTENTS_FL 0x00080000 /* Inode uses extents */
2347#define F2FS_EA_INODE_FL 0x00200000 /* Inode used for large EA */
2348#define F2FS_EOFBLOCKS_FL 0x00400000 /* Blocks allocated beyond EOF */
2349#define F2FS_NOCOW_FL 0x00800000 /* Do not cow file */
2350#define F2FS_INLINE_DATA_FL 0x10000000 /* Inode has inline data. */
2351#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */ 2361#define F2FS_PROJINHERIT_FL 0x20000000 /* Create with parents projid */
2352#define F2FS_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
2353
2354#define F2FS_FL_USER_VISIBLE 0x30CBDFFF /* User visible flags */
2355#define F2FS_FL_USER_MODIFIABLE 0x204BC0FF /* User modifiable flags */
2356
2357/* Flags we can manipulate with through F2FS_IOC_FSSETXATTR */
2358#define F2FS_FL_XFLAG_VISIBLE (F2FS_SYNC_FL | \
2359 F2FS_IMMUTABLE_FL | \
2360 F2FS_APPEND_FL | \
2361 F2FS_NODUMP_FL | \
2362 F2FS_NOATIME_FL | \
2363 F2FS_PROJINHERIT_FL)
2364 2362
2365/* Flags that should be inherited by new inodes from their parent. */ 2363/* Flags that should be inherited by new inodes from their parent. */
2366#define F2FS_FL_INHERITED (F2FS_SECRM_FL | F2FS_UNRM_FL | F2FS_COMPR_FL |\ 2364#define F2FS_FL_INHERITED (F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL | \
2367 F2FS_SYNC_FL | F2FS_NODUMP_FL | F2FS_NOATIME_FL |\ 2365 F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL)
2368 F2FS_NOCOMPR_FL | F2FS_JOURNAL_DATA_FL |\
2369 F2FS_NOTAIL_FL | F2FS_DIRSYNC_FL |\
2370 F2FS_PROJINHERIT_FL)
2371 2366
2372/* Flags that are appropriate for regular files (all but dir-specific ones). */ 2367/* Flags that are appropriate for regular files (all but dir-specific ones). */
2373#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_TOPDIR_FL)) 2368#define F2FS_REG_FLMASK (~(F2FS_DIRSYNC_FL | F2FS_PROJINHERIT_FL))
2374 2369
2375/* Flags that are appropriate for non-directories/regular files. */ 2370/* Flags that are appropriate for non-directories/regular files. */
2376#define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL) 2371#define F2FS_OTHER_FLMASK (F2FS_NODUMP_FL | F2FS_NOATIME_FL)
@@ -2856,9 +2851,8 @@ static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
2856 block_t blkaddr, int type) 2851 block_t blkaddr, int type)
2857{ 2852{
2858 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) { 2853 if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
2859 f2fs_msg(sbi->sb, KERN_ERR, 2854 f2fs_err(sbi, "invalid blkaddr: %u, type: %d, run fsck to fix.",
2860 "invalid blkaddr: %u, type: %d, run fsck to fix.", 2855 blkaddr, type);
2861 blkaddr, type);
2862 f2fs_bug_on(sbi, 1); 2856 f2fs_bug_on(sbi, 1);
2863 } 2857 }
2864} 2858}
@@ -2989,8 +2983,6 @@ int f2fs_quota_sync(struct super_block *sb, int type);
2989void f2fs_quota_off_umount(struct super_block *sb); 2983void f2fs_quota_off_umount(struct super_block *sb);
2990int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover); 2984int f2fs_commit_super(struct f2fs_sb_info *sbi, bool recover);
2991int f2fs_sync_fs(struct super_block *sb, int sync); 2985int f2fs_sync_fs(struct super_block *sb, int sync);
2992extern __printf(3, 4)
2993void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
2994int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi); 2986int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi);
2995 2987
2996/* 2988/*
@@ -3074,9 +3066,12 @@ bool f2fs_issue_discard_timeout(struct f2fs_sb_info *sbi);
3074void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi, 3066void f2fs_clear_prefree_segments(struct f2fs_sb_info *sbi,
3075 struct cp_control *cpc); 3067 struct cp_control *cpc);
3076void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi); 3068void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi);
3077int f2fs_disable_cp_again(struct f2fs_sb_info *sbi); 3069block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi);
3070int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable);
3078void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi); 3071void f2fs_release_discard_addrs(struct f2fs_sb_info *sbi);
3079int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra); 3072int f2fs_npages_for_summary_flush(struct f2fs_sb_info *sbi, bool for_ra);
3073void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
3074 unsigned int start, unsigned int end);
3080void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi); 3075void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi);
3081int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range); 3076int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range);
3082bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi, 3077bool f2fs_exist_trim_candidates(struct f2fs_sb_info *sbi,
@@ -3169,6 +3164,7 @@ void f2fs_submit_merged_write_cond(struct f2fs_sb_info *sbi,
3169 nid_t ino, enum page_type type); 3164 nid_t ino, enum page_type type);
3170void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi); 3165void f2fs_flush_merged_writes(struct f2fs_sb_info *sbi);
3171int f2fs_submit_page_bio(struct f2fs_io_info *fio); 3166int f2fs_submit_page_bio(struct f2fs_io_info *fio);
3167int f2fs_merge_page_bio(struct f2fs_io_info *fio);
3172void f2fs_submit_page_write(struct f2fs_io_info *fio); 3168void f2fs_submit_page_write(struct f2fs_io_info *fio);
3173struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi, 3169struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
3174 block_t blk_addr, struct bio *bio); 3170 block_t blk_addr, struct bio *bio);
@@ -3214,6 +3210,7 @@ block_t f2fs_start_bidx_of_node(unsigned int node_ofs, struct inode *inode);
3214int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background, 3210int f2fs_gc(struct f2fs_sb_info *sbi, bool sync, bool background,
3215 unsigned int segno); 3211 unsigned int segno);
3216void f2fs_build_gc_manager(struct f2fs_sb_info *sbi); 3212void f2fs_build_gc_manager(struct f2fs_sb_info *sbi);
3213int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count);
3217 3214
3218/* 3215/*
3219 * recovery.c 3216 * recovery.c
@@ -3686,7 +3683,8 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
3686 if (test_opt(sbi, LFS) && (rw == WRITE) && 3683 if (test_opt(sbi, LFS) && (rw == WRITE) &&
3687 block_unaligned_IO(inode, iocb, iter)) 3684 block_unaligned_IO(inode, iocb, iter))
3688 return true; 3685 return true;
3689 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED)) 3686 if (is_sbi_flag_set(F2FS_I_SB(inode), SBI_CP_DISABLED) &&
3687 !(inode->i_flags & S_SWAPFILE))
3690 return true; 3688 return true;
3691 3689
3692 return false; 3690 return false;
@@ -3712,4 +3710,7 @@ static inline bool is_journalled_quota(struct f2fs_sb_info *sbi)
3712 return false; 3710 return false;
3713} 3711}
3714 3712
3713#define EFSBADCRC EBADMSG /* Bad CRC detected */
3714#define EFSCORRUPTED EUCLEAN /* Filesystem is corrupted */
3715
3715#endif /* _LINUX_F2FS_H */ 3716#endif /* _LINUX_F2FS_H */
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 45b45f37d347..f8d46df8fa9e 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -707,11 +707,9 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
707 stat->btime.tv_nsec = fi->i_crtime.tv_nsec; 707 stat->btime.tv_nsec = fi->i_crtime.tv_nsec;
708 } 708 }
709 709
710 flags = fi->i_flags & F2FS_FL_USER_VISIBLE; 710 flags = fi->i_flags;
711 if (flags & F2FS_APPEND_FL) 711 if (flags & F2FS_APPEND_FL)
712 stat->attributes |= STATX_ATTR_APPEND; 712 stat->attributes |= STATX_ATTR_APPEND;
713 if (flags & F2FS_COMPR_FL)
714 stat->attributes |= STATX_ATTR_COMPRESSED;
715 if (IS_ENCRYPTED(inode)) 713 if (IS_ENCRYPTED(inode))
716 stat->attributes |= STATX_ATTR_ENCRYPTED; 714 stat->attributes |= STATX_ATTR_ENCRYPTED;
717 if (flags & F2FS_IMMUTABLE_FL) 715 if (flags & F2FS_IMMUTABLE_FL)
@@ -720,7 +718,6 @@ int f2fs_getattr(const struct path *path, struct kstat *stat,
720 stat->attributes |= STATX_ATTR_NODUMP; 718 stat->attributes |= STATX_ATTR_NODUMP;
721 719
722 stat->attributes_mask |= (STATX_ATTR_APPEND | 720 stat->attributes_mask |= (STATX_ATTR_APPEND |
723 STATX_ATTR_COMPRESSED |
724 STATX_ATTR_ENCRYPTED | 721 STATX_ATTR_ENCRYPTED |
725 STATX_ATTR_IMMUTABLE | 722 STATX_ATTR_IMMUTABLE |
726 STATX_ATTR_NODUMP); 723 STATX_ATTR_NODUMP);
@@ -1026,7 +1023,7 @@ next_dnode:
1026 !f2fs_is_valid_blkaddr(sbi, *blkaddr, 1023 !f2fs_is_valid_blkaddr(sbi, *blkaddr,
1027 DATA_GENERIC_ENHANCE)) { 1024 DATA_GENERIC_ENHANCE)) {
1028 f2fs_put_dnode(&dn); 1025 f2fs_put_dnode(&dn);
1029 return -EFAULT; 1026 return -EFSCORRUPTED;
1030 } 1027 }
1031 1028
1032 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) { 1029 if (!f2fs_is_checkpointed_data(sbi, *blkaddr)) {
@@ -1214,7 +1211,7 @@ roll_back:
1214static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len) 1211static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
1215{ 1212{
1216 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1213 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
1217 pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1214 pgoff_t nrpages = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1218 pgoff_t start = offset >> PAGE_SHIFT; 1215 pgoff_t start = offset >> PAGE_SHIFT;
1219 pgoff_t end = (offset + len) >> PAGE_SHIFT; 1216 pgoff_t end = (offset + len) >> PAGE_SHIFT;
1220 int ret; 1217 int ret;
@@ -1467,7 +1464,7 @@ static int f2fs_insert_range(struct inode *inode, loff_t offset, loff_t len)
1467 pg_start = offset >> PAGE_SHIFT; 1464 pg_start = offset >> PAGE_SHIFT;
1468 pg_end = (offset + len) >> PAGE_SHIFT; 1465 pg_end = (offset + len) >> PAGE_SHIFT;
1469 delta = pg_end - pg_start; 1466 delta = pg_end - pg_start;
1470 idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE; 1467 idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);
1471 1468
1472 /* avoid gc operation during block exchange */ 1469 /* avoid gc operation during block exchange */
1473 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1470 down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -1531,7 +1528,12 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
1531 if (off_end) 1528 if (off_end)
1532 map.m_len++; 1529 map.m_len++;
1533 1530
1534 err = f2fs_map_blocks(inode, &map, 1, F2FS_GET_BLOCK_PRE_AIO); 1531 if (f2fs_is_pinned_file(inode))
1532 map.m_seg_type = CURSEG_COLD_DATA;
1533
1534 err = f2fs_map_blocks(inode, &map, 1, (f2fs_is_pinned_file(inode) ?
1535 F2FS_GET_BLOCK_PRE_DIO :
1536 F2FS_GET_BLOCK_PRE_AIO));
1535 if (err) { 1537 if (err) {
1536 pgoff_t last_off; 1538 pgoff_t last_off;
1537 1539
@@ -1648,44 +1650,22 @@ static int f2fs_file_flush(struct file *file, fl_owner_t id)
1648 return 0; 1650 return 0;
1649} 1651}
1650 1652
1651static int f2fs_ioc_getflags(struct file *filp, unsigned long arg) 1653static int f2fs_setflags_common(struct inode *inode, u32 iflags, u32 mask)
1652{ 1654{
1653 struct inode *inode = file_inode(filp);
1654 struct f2fs_inode_info *fi = F2FS_I(inode); 1655 struct f2fs_inode_info *fi = F2FS_I(inode);
1655 unsigned int flags = fi->i_flags; 1656 u32 oldflags;
1656
1657 if (IS_ENCRYPTED(inode))
1658 flags |= F2FS_ENCRYPT_FL;
1659 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1660 flags |= F2FS_INLINE_DATA_FL;
1661 if (is_inode_flag_set(inode, FI_PIN_FILE))
1662 flags |= F2FS_NOCOW_FL;
1663
1664 flags &= F2FS_FL_USER_VISIBLE;
1665
1666 return put_user(flags, (int __user *)arg);
1667}
1668
1669static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
1670{
1671 struct f2fs_inode_info *fi = F2FS_I(inode);
1672 unsigned int oldflags;
1673 1657
1674 /* Is it quota file? Do not allow user to mess with it */ 1658 /* Is it quota file? Do not allow user to mess with it */
1675 if (IS_NOQUOTA(inode)) 1659 if (IS_NOQUOTA(inode))
1676 return -EPERM; 1660 return -EPERM;
1677 1661
1678 flags = f2fs_mask_flags(inode->i_mode, flags);
1679
1680 oldflags = fi->i_flags; 1662 oldflags = fi->i_flags;
1681 1663
1682 if ((flags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL)) 1664 if ((iflags ^ oldflags) & (F2FS_APPEND_FL | F2FS_IMMUTABLE_FL))
1683 if (!capable(CAP_LINUX_IMMUTABLE)) 1665 if (!capable(CAP_LINUX_IMMUTABLE))
1684 return -EPERM; 1666 return -EPERM;
1685 1667
1686 flags = flags & F2FS_FL_USER_MODIFIABLE; 1668 fi->i_flags = iflags | (oldflags & ~mask);
1687 flags |= oldflags & ~F2FS_FL_USER_MODIFIABLE;
1688 fi->i_flags = flags;
1689 1669
1690 if (fi->i_flags & F2FS_PROJINHERIT_FL) 1670 if (fi->i_flags & F2FS_PROJINHERIT_FL)
1691 set_inode_flag(inode, FI_PROJ_INHERIT); 1671 set_inode_flag(inode, FI_PROJ_INHERIT);
@@ -1698,26 +1678,124 @@ static int __f2fs_ioc_setflags(struct inode *inode, unsigned int flags)
1698 return 0; 1678 return 0;
1699} 1679}
1700 1680
1681/* FS_IOC_GETFLAGS and FS_IOC_SETFLAGS support */
1682
1683/*
1684 * To make a new on-disk f2fs i_flag gettable via FS_IOC_GETFLAGS, add an entry
1685 * for it to f2fs_fsflags_map[], and add its FS_*_FL equivalent to
1686 * F2FS_GETTABLE_FS_FL. To also make it settable via FS_IOC_SETFLAGS, also add
1687 * its FS_*_FL equivalent to F2FS_SETTABLE_FS_FL.
1688 */
1689
1690static const struct {
1691 u32 iflag;
1692 u32 fsflag;
1693} f2fs_fsflags_map[] = {
1694 { F2FS_SYNC_FL, FS_SYNC_FL },
1695 { F2FS_IMMUTABLE_FL, FS_IMMUTABLE_FL },
1696 { F2FS_APPEND_FL, FS_APPEND_FL },
1697 { F2FS_NODUMP_FL, FS_NODUMP_FL },
1698 { F2FS_NOATIME_FL, FS_NOATIME_FL },
1699 { F2FS_INDEX_FL, FS_INDEX_FL },
1700 { F2FS_DIRSYNC_FL, FS_DIRSYNC_FL },
1701 { F2FS_PROJINHERIT_FL, FS_PROJINHERIT_FL },
1702};
1703
1704#define F2FS_GETTABLE_FS_FL ( \
1705 FS_SYNC_FL | \
1706 FS_IMMUTABLE_FL | \
1707 FS_APPEND_FL | \
1708 FS_NODUMP_FL | \
1709 FS_NOATIME_FL | \
1710 FS_INDEX_FL | \
1711 FS_DIRSYNC_FL | \
1712 FS_PROJINHERIT_FL | \
1713 FS_ENCRYPT_FL | \
1714 FS_INLINE_DATA_FL | \
1715 FS_NOCOW_FL)
1716
1717#define F2FS_SETTABLE_FS_FL ( \
1718 FS_SYNC_FL | \
1719 FS_IMMUTABLE_FL | \
1720 FS_APPEND_FL | \
1721 FS_NODUMP_FL | \
1722 FS_NOATIME_FL | \
1723 FS_DIRSYNC_FL | \
1724 FS_PROJINHERIT_FL)
1725
1726/* Convert f2fs on-disk i_flags to FS_IOC_{GET,SET}FLAGS flags */
1727static inline u32 f2fs_iflags_to_fsflags(u32 iflags)
1728{
1729 u32 fsflags = 0;
1730 int i;
1731
1732 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1733 if (iflags & f2fs_fsflags_map[i].iflag)
1734 fsflags |= f2fs_fsflags_map[i].fsflag;
1735
1736 return fsflags;
1737}
1738
1739/* Convert FS_IOC_{GET,SET}FLAGS flags to f2fs on-disk i_flags */
1740static inline u32 f2fs_fsflags_to_iflags(u32 fsflags)
1741{
1742 u32 iflags = 0;
1743 int i;
1744
1745 for (i = 0; i < ARRAY_SIZE(f2fs_fsflags_map); i++)
1746 if (fsflags & f2fs_fsflags_map[i].fsflag)
1747 iflags |= f2fs_fsflags_map[i].iflag;
1748
1749 return iflags;
1750}
1751
1752static int f2fs_ioc_getflags(struct file *filp, unsigned long arg)
1753{
1754 struct inode *inode = file_inode(filp);
1755 struct f2fs_inode_info *fi = F2FS_I(inode);
1756 u32 fsflags = f2fs_iflags_to_fsflags(fi->i_flags);
1757
1758 if (IS_ENCRYPTED(inode))
1759 fsflags |= FS_ENCRYPT_FL;
1760 if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
1761 fsflags |= FS_INLINE_DATA_FL;
1762 if (is_inode_flag_set(inode, FI_PIN_FILE))
1763 fsflags |= FS_NOCOW_FL;
1764
1765 fsflags &= F2FS_GETTABLE_FS_FL;
1766
1767 return put_user(fsflags, (int __user *)arg);
1768}
1769
1701static int f2fs_ioc_setflags(struct file *filp, unsigned long arg) 1770static int f2fs_ioc_setflags(struct file *filp, unsigned long arg)
1702{ 1771{
1703 struct inode *inode = file_inode(filp); 1772 struct inode *inode = file_inode(filp);
1704 unsigned int flags; 1773 u32 fsflags;
1774 u32 iflags;
1705 int ret; 1775 int ret;
1706 1776
1707 if (!inode_owner_or_capable(inode)) 1777 if (!inode_owner_or_capable(inode))
1708 return -EACCES; 1778 return -EACCES;
1709 1779
1710 if (get_user(flags, (int __user *)arg)) 1780 if (get_user(fsflags, (int __user *)arg))
1711 return -EFAULT; 1781 return -EFAULT;
1712 1782
1783 if (fsflags & ~F2FS_GETTABLE_FS_FL)
1784 return -EOPNOTSUPP;
1785 fsflags &= F2FS_SETTABLE_FS_FL;
1786
1787 iflags = f2fs_fsflags_to_iflags(fsflags);
1788 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
1789 return -EOPNOTSUPP;
1790
1713 ret = mnt_want_write_file(filp); 1791 ret = mnt_want_write_file(filp);
1714 if (ret) 1792 if (ret)
1715 return ret; 1793 return ret;
1716 1794
1717 inode_lock(inode); 1795 inode_lock(inode);
1718 1796
1719 ret = __f2fs_ioc_setflags(inode, flags); 1797 ret = f2fs_setflags_common(inode, iflags,
1720 1798 f2fs_fsflags_to_iflags(F2FS_SETTABLE_FS_FL));
1721 inode_unlock(inode); 1799 inode_unlock(inode);
1722 mnt_drop_write_file(filp); 1800 mnt_drop_write_file(filp);
1723 return ret; 1801 return ret;
@@ -1764,9 +1842,8 @@ static int f2fs_ioc_start_atomic_write(struct file *filp)
1764 * f2fs_is_atomic_file. 1842 * f2fs_is_atomic_file.
1765 */ 1843 */
1766 if (get_dirty_pages(inode)) 1844 if (get_dirty_pages(inode))
1767 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1845 f2fs_warn(F2FS_I_SB(inode), "Unexpected flush for atomic writes: ino=%lu, npages=%u",
1768 "Unexpected flush for atomic writes: ino=%lu, npages=%u", 1846 inode->i_ino, get_dirty_pages(inode));
1769 inode->i_ino, get_dirty_pages(inode));
1770 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX); 1847 ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
1771 if (ret) { 1848 if (ret) {
1772 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]); 1849 up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
@@ -2201,8 +2278,7 @@ static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
2201 return -EROFS; 2278 return -EROFS;
2202 2279
2203 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) { 2280 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED))) {
2204 f2fs_msg(sbi->sb, KERN_INFO, 2281 f2fs_info(sbi, "Skipping Checkpoint. Checkpoints currently disabled.");
2205 "Skipping Checkpoint. Checkpoints currently disabled.");
2206 return -EINVAL; 2282 return -EINVAL;
2207 } 2283 }
2208 2284
@@ -2291,7 +2367,7 @@ static int f2fs_defragment_range(struct f2fs_sb_info *sbi,
2291 if (!fragmented) 2367 if (!fragmented)
2292 goto out; 2368 goto out;
2293 2369
2294 sec_num = (total + BLKS_PER_SEC(sbi) - 1) / BLKS_PER_SEC(sbi); 2370 sec_num = DIV_ROUND_UP(total, BLKS_PER_SEC(sbi));
2295 2371
2296 /* 2372 /*
2297 * make sure there are enough free section for LFS allocation, this can 2373 * make sure there are enough free section for LFS allocation, this can
@@ -2587,10 +2663,8 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
2587 2663
2588 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num || 2664 if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
2589 __is_large_section(sbi)) { 2665 __is_large_section(sbi)) {
2590 f2fs_msg(sbi->sb, KERN_WARNING, 2666 f2fs_warn(sbi, "Can't flush %u in %d for segs_per_sec %u != 1",
2591 "Can't flush %u in %d for segs_per_sec %u != 1", 2667 range.dev_num, sbi->s_ndevs, sbi->segs_per_sec);
2592 range.dev_num, sbi->s_ndevs,
2593 sbi->segs_per_sec);
2594 return -EINVAL; 2668 return -EINVAL;
2595 } 2669 }
2596 2670
@@ -2727,47 +2801,56 @@ static int f2fs_ioc_setproject(struct file *filp, __u32 projid)
2727} 2801}
2728#endif 2802#endif
2729 2803
2730/* Transfer internal flags to xflags */ 2804/* FS_IOC_FSGETXATTR and FS_IOC_FSSETXATTR support */
2731static inline __u32 f2fs_iflags_to_xflags(unsigned long iflags) 2805
2732{ 2806/*
2733 __u32 xflags = 0; 2807 * To make a new on-disk f2fs i_flag gettable via FS_IOC_FSGETXATTR and settable
2734 2808 * via FS_IOC_FSSETXATTR, add an entry for it to f2fs_xflags_map[], and add its
2735 if (iflags & F2FS_SYNC_FL) 2809 * FS_XFLAG_* equivalent to F2FS_SUPPORTED_XFLAGS.
2736 xflags |= FS_XFLAG_SYNC; 2810 */
2737 if (iflags & F2FS_IMMUTABLE_FL) 2811
2738 xflags |= FS_XFLAG_IMMUTABLE; 2812static const struct {
2739 if (iflags & F2FS_APPEND_FL) 2813 u32 iflag;
2740 xflags |= FS_XFLAG_APPEND; 2814 u32 xflag;
2741 if (iflags & F2FS_NODUMP_FL) 2815} f2fs_xflags_map[] = {
2742 xflags |= FS_XFLAG_NODUMP; 2816 { F2FS_SYNC_FL, FS_XFLAG_SYNC },
2743 if (iflags & F2FS_NOATIME_FL) 2817 { F2FS_IMMUTABLE_FL, FS_XFLAG_IMMUTABLE },
2744 xflags |= FS_XFLAG_NOATIME; 2818 { F2FS_APPEND_FL, FS_XFLAG_APPEND },
2745 if (iflags & F2FS_PROJINHERIT_FL) 2819 { F2FS_NODUMP_FL, FS_XFLAG_NODUMP },
2746 xflags |= FS_XFLAG_PROJINHERIT; 2820 { F2FS_NOATIME_FL, FS_XFLAG_NOATIME },
2821 { F2FS_PROJINHERIT_FL, FS_XFLAG_PROJINHERIT },
2822};
2823
2824#define F2FS_SUPPORTED_XFLAGS ( \
2825 FS_XFLAG_SYNC | \
2826 FS_XFLAG_IMMUTABLE | \
2827 FS_XFLAG_APPEND | \
2828 FS_XFLAG_NODUMP | \
2829 FS_XFLAG_NOATIME | \
2830 FS_XFLAG_PROJINHERIT)
2831
2832/* Convert f2fs on-disk i_flags to FS_IOC_FS{GET,SET}XATTR flags */
2833static inline u32 f2fs_iflags_to_xflags(u32 iflags)
2834{
2835 u32 xflags = 0;
2836 int i;
2837
2838 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2839 if (iflags & f2fs_xflags_map[i].iflag)
2840 xflags |= f2fs_xflags_map[i].xflag;
2841
2747 return xflags; 2842 return xflags;
2748} 2843}
2749 2844
2750#define F2FS_SUPPORTED_FS_XFLAGS (FS_XFLAG_SYNC | FS_XFLAG_IMMUTABLE | \ 2845/* Convert FS_IOC_FS{GET,SET}XATTR flags to f2fs on-disk i_flags */
2751 FS_XFLAG_APPEND | FS_XFLAG_NODUMP | \ 2846static inline u32 f2fs_xflags_to_iflags(u32 xflags)
2752 FS_XFLAG_NOATIME | FS_XFLAG_PROJINHERIT)
2753
2754/* Transfer xflags flags to internal */
2755static inline unsigned long f2fs_xflags_to_iflags(__u32 xflags)
2756{ 2847{
2757 unsigned long iflags = 0; 2848 u32 iflags = 0;
2849 int i;
2758 2850
2759 if (xflags & FS_XFLAG_SYNC) 2851 for (i = 0; i < ARRAY_SIZE(f2fs_xflags_map); i++)
2760 iflags |= F2FS_SYNC_FL; 2852 if (xflags & f2fs_xflags_map[i].xflag)
2761 if (xflags & FS_XFLAG_IMMUTABLE) 2853 iflags |= f2fs_xflags_map[i].iflag;
2762 iflags |= F2FS_IMMUTABLE_FL;
2763 if (xflags & FS_XFLAG_APPEND)
2764 iflags |= F2FS_APPEND_FL;
2765 if (xflags & FS_XFLAG_NODUMP)
2766 iflags |= F2FS_NODUMP_FL;
2767 if (xflags & FS_XFLAG_NOATIME)
2768 iflags |= F2FS_NOATIME_FL;
2769 if (xflags & FS_XFLAG_PROJINHERIT)
2770 iflags |= F2FS_PROJINHERIT_FL;
2771 2854
2772 return iflags; 2855 return iflags;
2773} 2856}
@@ -2779,8 +2862,7 @@ static int f2fs_ioc_fsgetxattr(struct file *filp, unsigned long arg)
2779 struct fsxattr fa; 2862 struct fsxattr fa;
2780 2863
2781 memset(&fa, 0, sizeof(struct fsxattr)); 2864 memset(&fa, 0, sizeof(struct fsxattr));
2782 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags & 2865 fa.fsx_xflags = f2fs_iflags_to_xflags(fi->i_flags);
2783 F2FS_FL_USER_VISIBLE);
2784 2866
2785 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode))) 2867 if (f2fs_sb_has_project_quota(F2FS_I_SB(inode)))
2786 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns, 2868 fa.fsx_projid = (__u32)from_kprojid(&init_user_ns,
@@ -2818,9 +2900,8 @@ static int f2fs_ioctl_check_project(struct inode *inode, struct fsxattr *fa)
2818static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg) 2900static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2819{ 2901{
2820 struct inode *inode = file_inode(filp); 2902 struct inode *inode = file_inode(filp);
2821 struct f2fs_inode_info *fi = F2FS_I(inode);
2822 struct fsxattr fa; 2903 struct fsxattr fa;
2823 unsigned int flags; 2904 u32 iflags;
2824 int err; 2905 int err;
2825 2906
2826 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa))) 2907 if (copy_from_user(&fa, (struct fsxattr __user *)arg, sizeof(fa)))
@@ -2830,11 +2911,11 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2830 if (!inode_owner_or_capable(inode)) 2911 if (!inode_owner_or_capable(inode))
2831 return -EACCES; 2912 return -EACCES;
2832 2913
2833 if (fa.fsx_xflags & ~F2FS_SUPPORTED_FS_XFLAGS) 2914 if (fa.fsx_xflags & ~F2FS_SUPPORTED_XFLAGS)
2834 return -EOPNOTSUPP; 2915 return -EOPNOTSUPP;
2835 2916
2836 flags = f2fs_xflags_to_iflags(fa.fsx_xflags); 2917 iflags = f2fs_xflags_to_iflags(fa.fsx_xflags);
2837 if (f2fs_mask_flags(inode->i_mode, flags) != flags) 2918 if (f2fs_mask_flags(inode->i_mode, iflags) != iflags)
2838 return -EOPNOTSUPP; 2919 return -EOPNOTSUPP;
2839 2920
2840 err = mnt_want_write_file(filp); 2921 err = mnt_want_write_file(filp);
@@ -2845,9 +2926,8 @@ static int f2fs_ioc_fssetxattr(struct file *filp, unsigned long arg)
2845 err = f2fs_ioctl_check_project(inode, &fa); 2926 err = f2fs_ioctl_check_project(inode, &fa);
2846 if (err) 2927 if (err)
2847 goto out; 2928 goto out;
2848 flags = (fi->i_flags & ~F2FS_FL_XFLAG_VISIBLE) | 2929 err = f2fs_setflags_common(inode, iflags,
2849 (flags & F2FS_FL_XFLAG_VISIBLE); 2930 f2fs_xflags_to_iflags(F2FS_SUPPORTED_XFLAGS));
2850 err = __f2fs_ioc_setflags(inode, flags);
2851 if (err) 2931 if (err)
2852 goto out; 2932 goto out;
2853 2933
@@ -2869,10 +2949,9 @@ int f2fs_pin_file_control(struct inode *inode, bool inc)
2869 fi->i_gc_failures[GC_FAILURE_PIN] + 1); 2949 fi->i_gc_failures[GC_FAILURE_PIN] + 1);
2870 2950
2871 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) { 2951 if (fi->i_gc_failures[GC_FAILURE_PIN] > sbi->gc_pin_file_threshold) {
2872 f2fs_msg(sbi->sb, KERN_WARNING, 2952 f2fs_warn(sbi, "%s: Enable GC = ino %lx after %x GC trials",
2873 "%s: Enable GC = ino %lx after %x GC trials", 2953 __func__, inode->i_ino,
2874 __func__, inode->i_ino, 2954 fi->i_gc_failures[GC_FAILURE_PIN]);
2875 fi->i_gc_failures[GC_FAILURE_PIN]);
2876 clear_inode_flag(inode, FI_PIN_FILE); 2955 clear_inode_flag(inode, FI_PIN_FILE);
2877 return -EAGAIN; 2956 return -EAGAIN;
2878 } 2957 }
@@ -2885,9 +2964,6 @@ static int f2fs_ioc_set_pin_file(struct file *filp, unsigned long arg)
2885 __u32 pin; 2964 __u32 pin;
2886 int ret = 0; 2965 int ret = 0;
2887 2966
2888 if (!capable(CAP_SYS_ADMIN))
2889 return -EPERM;
2890
2891 if (get_user(pin, (__u32 __user *)arg)) 2967 if (get_user(pin, (__u32 __user *)arg))
2892 return -EFAULT; 2968 return -EFAULT;
2893 2969
@@ -2980,6 +3056,27 @@ static int f2fs_ioc_precache_extents(struct file *filp, unsigned long arg)
2980 return f2fs_precache_extents(file_inode(filp)); 3056 return f2fs_precache_extents(file_inode(filp));
2981} 3057}
2982 3058
3059static int f2fs_ioc_resize_fs(struct file *filp, unsigned long arg)
3060{
3061 struct f2fs_sb_info *sbi = F2FS_I_SB(file_inode(filp));
3062 __u64 block_count;
3063 int ret;
3064
3065 if (!capable(CAP_SYS_ADMIN))
3066 return -EPERM;
3067
3068 if (f2fs_readonly(sbi->sb))
3069 return -EROFS;
3070
3071 if (copy_from_user(&block_count, (void __user *)arg,
3072 sizeof(block_count)))
3073 return -EFAULT;
3074
3075 ret = f2fs_resize_fs(sbi, block_count);
3076
3077 return ret;
3078}
3079
2983long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 3080long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2984{ 3081{
2985 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp))))) 3082 if (unlikely(f2fs_cp_error(F2FS_I_SB(file_inode(filp)))))
@@ -3036,6 +3133,8 @@ long f2fs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
3036 return f2fs_ioc_set_pin_file(filp, arg); 3133 return f2fs_ioc_set_pin_file(filp, arg);
3037 case F2FS_IOC_PRECACHE_EXTENTS: 3134 case F2FS_IOC_PRECACHE_EXTENTS:
3038 return f2fs_ioc_precache_extents(filp, arg); 3135 return f2fs_ioc_precache_extents(filp, arg);
3136 case F2FS_IOC_RESIZE_FS:
3137 return f2fs_ioc_resize_fs(filp, arg);
3039 default: 3138 default:
3040 return -ENOTTY; 3139 return -ENOTTY;
3041 } 3140 }
@@ -3149,6 +3248,7 @@ long f2fs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3149 case F2FS_IOC_GET_PIN_FILE: 3248 case F2FS_IOC_GET_PIN_FILE:
3150 case F2FS_IOC_SET_PIN_FILE: 3249 case F2FS_IOC_SET_PIN_FILE:
3151 case F2FS_IOC_PRECACHE_EXTENTS: 3250 case F2FS_IOC_PRECACHE_EXTENTS:
3251 case F2FS_IOC_RESIZE_FS:
3152 break; 3252 break;
3153 default: 3253 default:
3154 return -ENOIOCTLCMD; 3254 return -ENOIOCTLCMD;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 963fb4571fd9..6691f526fa40 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -311,10 +311,11 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
311 struct sit_info *sm = SIT_I(sbi); 311 struct sit_info *sm = SIT_I(sbi);
312 struct victim_sel_policy p; 312 struct victim_sel_policy p;
313 unsigned int secno, last_victim; 313 unsigned int secno, last_victim;
314 unsigned int last_segment = MAIN_SEGS(sbi); 314 unsigned int last_segment;
315 unsigned int nsearched = 0; 315 unsigned int nsearched = 0;
316 316
317 mutex_lock(&dirty_i->seglist_lock); 317 mutex_lock(&dirty_i->seglist_lock);
318 last_segment = MAIN_SECS(sbi) * sbi->segs_per_sec;
318 319
319 p.alloc_mode = alloc_mode; 320 p.alloc_mode = alloc_mode;
320 select_policy(sbi, gc_type, type, &p); 321 select_policy(sbi, gc_type, type, &p);
@@ -387,7 +388,8 @@ static int get_victim_by_default(struct f2fs_sb_info *sbi,
387 goto next; 388 goto next;
388 /* Don't touch checkpointed data */ 389 /* Don't touch checkpointed data */
389 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) && 390 if (unlikely(is_sbi_flag_set(sbi, SBI_CP_DISABLED) &&
390 get_ckpt_valid_blocks(sbi, segno))) 391 get_ckpt_valid_blocks(sbi, segno) &&
392 p.alloc_mode != SSR))
391 goto next; 393 goto next;
392 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap)) 394 if (gc_type == BG_GC && test_bit(secno, dirty_i->victim_secmap))
393 goto next; 395 goto next;
@@ -404,7 +406,8 @@ next:
404 sm->last_victim[p.gc_mode] = last_victim + 1; 406 sm->last_victim[p.gc_mode] = last_victim + 1;
405 else 407 else
406 sm->last_victim[p.gc_mode] = segno + 1; 408 sm->last_victim[p.gc_mode] = segno + 1;
407 sm->last_victim[p.gc_mode] %= MAIN_SEGS(sbi); 409 sm->last_victim[p.gc_mode] %=
410 (MAIN_SECS(sbi) * sbi->segs_per_sec);
408 break; 411 break;
409 } 412 }
410 } 413 }
@@ -615,9 +618,8 @@ static bool is_alive(struct f2fs_sb_info *sbi, struct f2fs_summary *sum,
615 } 618 }
616 619
617 if (sum->version != dni->version) { 620 if (sum->version != dni->version) {
618 f2fs_msg(sbi->sb, KERN_WARNING, 621 f2fs_warn(sbi, "%s: valid data with mismatched node version.",
619 "%s: valid data with mismatched node version.", 622 __func__);
620 __func__);
621 set_sbi_flag(sbi, SBI_NEED_FSCK); 623 set_sbi_flag(sbi, SBI_NEED_FSCK);
622 } 624 }
623 625
@@ -658,7 +660,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
658 dn.data_blkaddr = ei.blk + index - ei.fofs; 660 dn.data_blkaddr = ei.blk + index - ei.fofs;
659 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 661 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
660 DATA_GENERIC_ENHANCE_READ))) { 662 DATA_GENERIC_ENHANCE_READ))) {
661 err = -EFAULT; 663 err = -EFSCORRUPTED;
662 goto put_page; 664 goto put_page;
663 } 665 }
664 goto got_it; 666 goto got_it;
@@ -676,7 +678,7 @@ static int ra_data_block(struct inode *inode, pgoff_t index)
676 } 678 }
677 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr, 679 if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
678 DATA_GENERIC_ENHANCE))) { 680 DATA_GENERIC_ENHANCE))) {
679 err = -EFAULT; 681 err = -EFSCORRUPTED;
680 goto put_page; 682 goto put_page;
681 } 683 }
682got_it: 684got_it:
@@ -1180,9 +1182,8 @@ static int do_garbage_collect(struct f2fs_sb_info *sbi,
1180 1182
1181 sum = page_address(sum_page); 1183 sum = page_address(sum_page);
1182 if (type != GET_SUM_TYPE((&sum->footer))) { 1184 if (type != GET_SUM_TYPE((&sum->footer))) {
1183 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent segment (%u) " 1185 f2fs_err(sbi, "Inconsistent segment (%u) type [%d, %d] in SSA and SIT",
1184 "type [%d, %d] in SSA and SIT", 1186 segno, type, GET_SUM_TYPE((&sum->footer)));
1185 segno, type, GET_SUM_TYPE((&sum->footer)));
1186 set_sbi_flag(sbi, SBI_NEED_FSCK); 1187 set_sbi_flag(sbi, SBI_NEED_FSCK);
1187 f2fs_stop_checkpoint(sbi, false); 1188 f2fs_stop_checkpoint(sbi, false);
1188 goto skip; 1189 goto skip;
@@ -1360,3 +1361,176 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
1360 SIT_I(sbi)->last_victim[ALLOC_NEXT] = 1361 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
1361 GET_SEGNO(sbi, FDEV(0).end_blk) + 1; 1362 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
1362} 1363}
1364
1365static int free_segment_range(struct f2fs_sb_info *sbi, unsigned int start,
1366 unsigned int end)
1367{
1368 int type;
1369 unsigned int segno, next_inuse;
1370 int err = 0;
1371
1372 /* Move out cursegs from the target range */
1373 for (type = CURSEG_HOT_DATA; type < NR_CURSEG_TYPE; type++)
1374 allocate_segment_for_resize(sbi, type, start, end);
1375
1376 /* do GC to move out valid blocks in the range */
1377 for (segno = start; segno <= end; segno += sbi->segs_per_sec) {
1378 struct gc_inode_list gc_list = {
1379 .ilist = LIST_HEAD_INIT(gc_list.ilist),
1380 .iroot = RADIX_TREE_INIT(gc_list.iroot, GFP_NOFS),
1381 };
1382
1383 mutex_lock(&sbi->gc_mutex);
1384 do_garbage_collect(sbi, segno, &gc_list, FG_GC);
1385 mutex_unlock(&sbi->gc_mutex);
1386 put_gc_inode(&gc_list);
1387
1388 if (get_valid_blocks(sbi, segno, true))
1389 return -EAGAIN;
1390 }
1391
1392 err = f2fs_sync_fs(sbi->sb, 1);
1393 if (err)
1394 return err;
1395
1396 next_inuse = find_next_inuse(FREE_I(sbi), end + 1, start);
1397 if (next_inuse <= end) {
1398 f2fs_err(sbi, "segno %u should be free but still inuse!",
1399 next_inuse);
1400 f2fs_bug_on(sbi, 1);
1401 }
1402 return err;
1403}
1404
1405static void update_sb_metadata(struct f2fs_sb_info *sbi, int secs)
1406{
1407 struct f2fs_super_block *raw_sb = F2FS_RAW_SUPER(sbi);
1408 int section_count = le32_to_cpu(raw_sb->section_count);
1409 int segment_count = le32_to_cpu(raw_sb->segment_count);
1410 int segment_count_main = le32_to_cpu(raw_sb->segment_count_main);
1411 long long block_count = le64_to_cpu(raw_sb->block_count);
1412 int segs = secs * sbi->segs_per_sec;
1413
1414 raw_sb->section_count = cpu_to_le32(section_count + secs);
1415 raw_sb->segment_count = cpu_to_le32(segment_count + segs);
1416 raw_sb->segment_count_main = cpu_to_le32(segment_count_main + segs);
1417 raw_sb->block_count = cpu_to_le64(block_count +
1418 (long long)segs * sbi->blocks_per_seg);
1419}
1420
1421static void update_fs_metadata(struct f2fs_sb_info *sbi, int secs)
1422{
1423 int segs = secs * sbi->segs_per_sec;
1424 long long user_block_count =
1425 le64_to_cpu(F2FS_CKPT(sbi)->user_block_count);
1426
1427 SM_I(sbi)->segment_count = (int)SM_I(sbi)->segment_count + segs;
1428 MAIN_SEGS(sbi) = (int)MAIN_SEGS(sbi) + segs;
1429 FREE_I(sbi)->free_sections = (int)FREE_I(sbi)->free_sections + secs;
1430 FREE_I(sbi)->free_segments = (int)FREE_I(sbi)->free_segments + segs;
1431 F2FS_CKPT(sbi)->user_block_count = cpu_to_le64(user_block_count +
1432 (long long)segs * sbi->blocks_per_seg);
1433}
1434
1435int f2fs_resize_fs(struct f2fs_sb_info *sbi, __u64 block_count)
1436{
1437 __u64 old_block_count, shrunk_blocks;
1438 unsigned int secs;
1439 int gc_mode, gc_type;
1440 int err = 0;
1441 __u32 rem;
1442
1443 old_block_count = le64_to_cpu(F2FS_RAW_SUPER(sbi)->block_count);
1444 if (block_count > old_block_count)
1445 return -EINVAL;
1446
1447 /* new fs size should align to section size */
1448 div_u64_rem(block_count, BLKS_PER_SEC(sbi), &rem);
1449 if (rem)
1450 return -EINVAL;
1451
1452 if (block_count == old_block_count)
1453 return 0;
1454
1455 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
1456 f2fs_err(sbi, "Should run fsck to repair first.");
1457 return -EFSCORRUPTED;
1458 }
1459
1460 if (test_opt(sbi, DISABLE_CHECKPOINT)) {
1461 f2fs_err(sbi, "Checkpoint should be enabled.");
1462 return -EINVAL;
1463 }
1464
1465 freeze_bdev(sbi->sb->s_bdev);
1466
1467 shrunk_blocks = old_block_count - block_count;
1468 secs = div_u64(shrunk_blocks, BLKS_PER_SEC(sbi));
1469 spin_lock(&sbi->stat_lock);
1470 if (shrunk_blocks + valid_user_blocks(sbi) +
1471 sbi->current_reserved_blocks + sbi->unusable_block_count +
1472 F2FS_OPTION(sbi).root_reserved_blocks > sbi->user_block_count)
1473 err = -ENOSPC;
1474 else
1475 sbi->user_block_count -= shrunk_blocks;
1476 spin_unlock(&sbi->stat_lock);
1477 if (err) {
1478 thaw_bdev(sbi->sb->s_bdev, sbi->sb);
1479 return err;
1480 }
1481
1482 mutex_lock(&sbi->resize_mutex);
1483 set_sbi_flag(sbi, SBI_IS_RESIZEFS);
1484
1485 mutex_lock(&DIRTY_I(sbi)->seglist_lock);
1486
1487 MAIN_SECS(sbi) -= secs;
1488
1489 for (gc_mode = 0; gc_mode < MAX_GC_POLICY; gc_mode++)
1490 if (SIT_I(sbi)->last_victim[gc_mode] >=
1491 MAIN_SECS(sbi) * sbi->segs_per_sec)
1492 SIT_I(sbi)->last_victim[gc_mode] = 0;
1493
1494 for (gc_type = BG_GC; gc_type <= FG_GC; gc_type++)
1495 if (sbi->next_victim_seg[gc_type] >=
1496 MAIN_SECS(sbi) * sbi->segs_per_sec)
1497 sbi->next_victim_seg[gc_type] = NULL_SEGNO;
1498
1499 mutex_unlock(&DIRTY_I(sbi)->seglist_lock);
1500
1501 err = free_segment_range(sbi, MAIN_SECS(sbi) * sbi->segs_per_sec,
1502 MAIN_SEGS(sbi) - 1);
1503 if (err)
1504 goto out;
1505
1506 update_sb_metadata(sbi, -secs);
1507
1508 err = f2fs_commit_super(sbi, false);
1509 if (err) {
1510 update_sb_metadata(sbi, secs);
1511 goto out;
1512 }
1513
1514 update_fs_metadata(sbi, -secs);
1515 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1516 err = f2fs_sync_fs(sbi->sb, 1);
1517 if (err) {
1518 update_fs_metadata(sbi, secs);
1519 update_sb_metadata(sbi, secs);
1520 f2fs_commit_super(sbi, false);
1521 }
1522out:
1523 if (err) {
1524 set_sbi_flag(sbi, SBI_NEED_FSCK);
1525 f2fs_err(sbi, "resize_fs failed, should run fsck to repair!");
1526
1527 MAIN_SECS(sbi) += secs;
1528 spin_lock(&sbi->stat_lock);
1529 sbi->user_block_count += shrunk_blocks;
1530 spin_unlock(&sbi->stat_lock);
1531 }
1532 clear_sbi_flag(sbi, SBI_IS_RESIZEFS);
1533 mutex_unlock(&sbi->resize_mutex);
1534 thaw_bdev(sbi->sb->s_bdev, sbi->sb);
1535 return err;
1536}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 404d2462a0fe..3613efca8c00 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -140,11 +140,9 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
140 if (unlikely(dn->data_blkaddr != NEW_ADDR)) { 140 if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
141 f2fs_put_dnode(dn); 141 f2fs_put_dnode(dn);
142 set_sbi_flag(fio.sbi, SBI_NEED_FSCK); 142 set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
143 f2fs_msg(fio.sbi->sb, KERN_WARNING, 143 f2fs_warn(fio.sbi, "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
144 "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " 144 __func__, dn->inode->i_ino, dn->data_blkaddr);
145 "run fsck to fix.", 145 return -EFSCORRUPTED;
146 __func__, dn->inode->i_ino, dn->data_blkaddr);
147 return -EINVAL;
148 } 146 }
149 147
150 f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page)); 148 f2fs_bug_on(F2FS_P_SB(page), PageWriteback(page));
@@ -383,11 +381,9 @@ static int f2fs_move_inline_dirents(struct inode *dir, struct page *ipage,
383 if (unlikely(dn.data_blkaddr != NEW_ADDR)) { 381 if (unlikely(dn.data_blkaddr != NEW_ADDR)) {
384 f2fs_put_dnode(&dn); 382 f2fs_put_dnode(&dn);
385 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK); 383 set_sbi_flag(F2FS_P_SB(page), SBI_NEED_FSCK);
386 f2fs_msg(F2FS_P_SB(page)->sb, KERN_WARNING, 384 f2fs_warn(F2FS_P_SB(page), "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, run fsck to fix.",
387 "%s: corrupted inline inode ino=%lx, i_addr[0]:0x%x, " 385 __func__, dir->i_ino, dn.data_blkaddr);
388 "run fsck to fix.", 386 err = -EFSCORRUPTED;
389 __func__, dir->i_ino, dn.data_blkaddr);
390 err = -EINVAL;
391 goto out; 387 goto out;
392 } 388 }
393 389
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index ccb02226dd2c..a33d7a849b2d 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -74,7 +74,7 @@ static int __written_first_block(struct f2fs_sb_info *sbi,
74 if (!__is_valid_data_blkaddr(addr)) 74 if (!__is_valid_data_blkaddr(addr))
75 return 1; 75 return 1;
76 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE)) 76 if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC_ENHANCE))
77 return -EFAULT; 77 return -EFSCORRUPTED;
78 return 0; 78 return 0;
79} 79}
80 80
@@ -176,9 +176,8 @@ bool f2fs_inode_chksum_verify(struct f2fs_sb_info *sbi, struct page *page)
176 calculated = f2fs_inode_chksum(sbi, page); 176 calculated = f2fs_inode_chksum(sbi, page);
177 177
178 if (provided != calculated) 178 if (provided != calculated)
179 f2fs_msg(sbi->sb, KERN_WARNING, 179 f2fs_warn(sbi, "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x",
180 "checksum invalid, nid = %lu, ino_of_node = %x, %x vs. %x", 180 page->index, ino_of_node(page), provided, calculated);
181 page->index, ino_of_node(page), provided, calculated);
182 181
183 return provided == calculated; 182 return provided == calculated;
184} 183}
@@ -202,50 +201,41 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
202 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks); 201 iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
203 if (!iblocks) { 202 if (!iblocks) {
204 set_sbi_flag(sbi, SBI_NEED_FSCK); 203 set_sbi_flag(sbi, SBI_NEED_FSCK);
205 f2fs_msg(sbi->sb, KERN_WARNING, 204 f2fs_warn(sbi, "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, run fsck to fix.",
206 "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, " 205 __func__, inode->i_ino, iblocks);
207 "run fsck to fix.",
208 __func__, inode->i_ino, iblocks);
209 return false; 206 return false;
210 } 207 }
211 208
212 if (ino_of_node(node_page) != nid_of_node(node_page)) { 209 if (ino_of_node(node_page) != nid_of_node(node_page)) {
213 set_sbi_flag(sbi, SBI_NEED_FSCK); 210 set_sbi_flag(sbi, SBI_NEED_FSCK);
214 f2fs_msg(sbi->sb, KERN_WARNING, 211 f2fs_warn(sbi, "%s: corrupted inode footer i_ino=%lx, ino,nid: [%u, %u] run fsck to fix.",
215 "%s: corrupted inode footer i_ino=%lx, ino,nid: " 212 __func__, inode->i_ino,
216 "[%u, %u] run fsck to fix.", 213 ino_of_node(node_page), nid_of_node(node_page));
217 __func__, inode->i_ino,
218 ino_of_node(node_page), nid_of_node(node_page));
219 return false; 214 return false;
220 } 215 }
221 216
222 if (f2fs_sb_has_flexible_inline_xattr(sbi) 217 if (f2fs_sb_has_flexible_inline_xattr(sbi)
223 && !f2fs_has_extra_attr(inode)) { 218 && !f2fs_has_extra_attr(inode)) {
224 set_sbi_flag(sbi, SBI_NEED_FSCK); 219 set_sbi_flag(sbi, SBI_NEED_FSCK);
225 f2fs_msg(sbi->sb, KERN_WARNING, 220 f2fs_warn(sbi, "%s: corrupted inode ino=%lx, run fsck to fix.",
226 "%s: corrupted inode ino=%lx, run fsck to fix.", 221 __func__, inode->i_ino);
227 __func__, inode->i_ino);
228 return false; 222 return false;
229 } 223 }
230 224
231 if (f2fs_has_extra_attr(inode) && 225 if (f2fs_has_extra_attr(inode) &&
232 !f2fs_sb_has_extra_attr(sbi)) { 226 !f2fs_sb_has_extra_attr(sbi)) {
233 set_sbi_flag(sbi, SBI_NEED_FSCK); 227 set_sbi_flag(sbi, SBI_NEED_FSCK);
234 f2fs_msg(sbi->sb, KERN_WARNING, 228 f2fs_warn(sbi, "%s: inode (ino=%lx) is with extra_attr, but extra_attr feature is off",
235 "%s: inode (ino=%lx) is with extra_attr, " 229 __func__, inode->i_ino);
236 "but extra_attr feature is off",
237 __func__, inode->i_ino);
238 return false; 230 return false;
239 } 231 }
240 232
241 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE || 233 if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
242 fi->i_extra_isize % sizeof(__le32)) { 234 fi->i_extra_isize % sizeof(__le32)) {
243 set_sbi_flag(sbi, SBI_NEED_FSCK); 235 set_sbi_flag(sbi, SBI_NEED_FSCK);
244 f2fs_msg(sbi->sb, KERN_WARNING, 236 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, max: %zu",
245 "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, " 237 __func__, inode->i_ino, fi->i_extra_isize,
246 "max: %zu", 238 F2FS_TOTAL_EXTRA_ATTR_SIZE);
247 __func__, inode->i_ino, fi->i_extra_isize,
248 F2FS_TOTAL_EXTRA_ATTR_SIZE);
249 return false; 239 return false;
250 } 240 }
251 241
@@ -255,11 +245,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
255 (!fi->i_inline_xattr_size || 245 (!fi->i_inline_xattr_size ||
256 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) { 246 fi->i_inline_xattr_size > MAX_INLINE_XATTR_SIZE)) {
257 set_sbi_flag(sbi, SBI_NEED_FSCK); 247 set_sbi_flag(sbi, SBI_NEED_FSCK);
258 f2fs_msg(sbi->sb, KERN_WARNING, 248 f2fs_warn(sbi, "%s: inode (ino=%lx) has corrupted i_inline_xattr_size: %d, max: %zu",
259 "%s: inode (ino=%lx) has corrupted " 249 __func__, inode->i_ino, fi->i_inline_xattr_size,
260 "i_inline_xattr_size: %d, max: %zu", 250 MAX_INLINE_XATTR_SIZE);
261 __func__, inode->i_ino, fi->i_inline_xattr_size,
262 MAX_INLINE_XATTR_SIZE);
263 return false; 251 return false;
264 } 252 }
265 253
@@ -272,11 +260,9 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
272 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1, 260 !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
273 DATA_GENERIC_ENHANCE))) { 261 DATA_GENERIC_ENHANCE))) {
274 set_sbi_flag(sbi, SBI_NEED_FSCK); 262 set_sbi_flag(sbi, SBI_NEED_FSCK);
275 f2fs_msg(sbi->sb, KERN_WARNING, 263 f2fs_warn(sbi, "%s: inode (ino=%lx) extent info [%u, %u, %u] is incorrect, run fsck to fix",
276 "%s: inode (ino=%lx) extent info [%u, %u, %u] " 264 __func__, inode->i_ino,
277 "is incorrect, run fsck to fix", 265 ei->blk, ei->fofs, ei->len);
278 __func__, inode->i_ino,
279 ei->blk, ei->fofs, ei->len);
280 return false; 266 return false;
281 } 267 }
282 } 268 }
@@ -284,19 +270,15 @@ static bool sanity_check_inode(struct inode *inode, struct page *node_page)
284 if (f2fs_has_inline_data(inode) && 270 if (f2fs_has_inline_data(inode) &&
285 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) { 271 (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
286 set_sbi_flag(sbi, SBI_NEED_FSCK); 272 set_sbi_flag(sbi, SBI_NEED_FSCK);
287 f2fs_msg(sbi->sb, KERN_WARNING, 273 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_data, run fsck to fix",
288 "%s: inode (ino=%lx, mode=%u) should not have " 274 __func__, inode->i_ino, inode->i_mode);
289 "inline_data, run fsck to fix",
290 __func__, inode->i_ino, inode->i_mode);
291 return false; 275 return false;
292 } 276 }
293 277
294 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) { 278 if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
295 set_sbi_flag(sbi, SBI_NEED_FSCK); 279 set_sbi_flag(sbi, SBI_NEED_FSCK);
296 f2fs_msg(sbi->sb, KERN_WARNING, 280 f2fs_warn(sbi, "%s: inode (ino=%lx, mode=%u) should not have inline_dentry, run fsck to fix",
297 "%s: inode (ino=%lx, mode=%u) should not have " 281 __func__, inode->i_ino, inode->i_mode);
298 "inline_dentry, run fsck to fix",
299 __func__, inode->i_ino, inode->i_mode);
300 return false; 282 return false;
301 } 283 }
302 284
@@ -343,6 +325,8 @@ static int do_read_inode(struct inode *inode)
343 le16_to_cpu(ri->i_gc_failures); 325 le16_to_cpu(ri->i_gc_failures);
344 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid); 326 fi->i_xattr_nid = le32_to_cpu(ri->i_xattr_nid);
345 fi->i_flags = le32_to_cpu(ri->i_flags); 327 fi->i_flags = le32_to_cpu(ri->i_flags);
328 if (S_ISREG(inode->i_mode))
329 fi->i_flags &= ~F2FS_PROJINHERIT_FL;
346 fi->flags = 0; 330 fi->flags = 0;
347 fi->i_advise = ri->i_advise; 331 fi->i_advise = ri->i_advise;
348 fi->i_pino = le32_to_cpu(ri->i_pino); 332 fi->i_pino = le32_to_cpu(ri->i_pino);
@@ -374,7 +358,7 @@ static int do_read_inode(struct inode *inode)
374 358
375 if (!sanity_check_inode(inode, node_page)) { 359 if (!sanity_check_inode(inode, node_page)) {
376 f2fs_put_page(node_page, 1); 360 f2fs_put_page(node_page, 1);
377 return -EINVAL; 361 return -EFSCORRUPTED;
378 } 362 }
379 363
380 /* check data exist */ 364 /* check data exist */
@@ -783,8 +767,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
783 err = f2fs_get_node_info(sbi, inode->i_ino, &ni); 767 err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
784 if (err) { 768 if (err) {
785 set_sbi_flag(sbi, SBI_NEED_FSCK); 769 set_sbi_flag(sbi, SBI_NEED_FSCK);
786 f2fs_msg(sbi->sb, KERN_WARNING, 770 f2fs_warn(sbi, "May loss orphan inode, run fsck to fix.");
787 "May loss orphan inode, run fsck to fix.");
788 goto out; 771 goto out;
789 } 772 }
790 773
@@ -792,8 +775,7 @@ void f2fs_handle_failed_inode(struct inode *inode)
792 err = f2fs_acquire_orphan_inode(sbi); 775 err = f2fs_acquire_orphan_inode(sbi);
793 if (err) { 776 if (err) {
794 set_sbi_flag(sbi, SBI_NEED_FSCK); 777 set_sbi_flag(sbi, SBI_NEED_FSCK);
795 f2fs_msg(sbi->sb, KERN_WARNING, 778 f2fs_warn(sbi, "Too many orphan inodes, run fsck to fix.");
796 "Too many orphan inodes, run fsck to fix.");
797 } else { 779 } else {
798 f2fs_add_orphan_inode(inode); 780 f2fs_add_orphan_inode(inode);
799 } 781 }
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 0f77f9242751..c5b99042e6f2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -385,9 +385,8 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
385 int err = 0; 385 int err = 0;
386 386
387 if (f2fs_readonly(sbi->sb)) { 387 if (f2fs_readonly(sbi->sb)) {
388 f2fs_msg(sbi->sb, KERN_INFO, 388 f2fs_info(sbi, "skip recovering inline_dots inode (ino:%lu, pino:%u) in readonly mountpoint",
389 "skip recovering inline_dots inode (ino:%lu, pino:%u) " 389 dir->i_ino, pino);
390 "in readonly mountpoint", dir->i_ino, pino);
391 return 0; 390 return 0;
392 } 391 }
393 392
@@ -484,9 +483,8 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry,
484 if (IS_ENCRYPTED(dir) && 483 if (IS_ENCRYPTED(dir) &&
485 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) && 484 (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
486 !fscrypt_has_permitted_context(dir, inode)) { 485 !fscrypt_has_permitted_context(dir, inode)) {
487 f2fs_msg(inode->i_sb, KERN_WARNING, 486 f2fs_warn(F2FS_I_SB(inode), "Inconsistent encryption contexts: %lu/%lu",
488 "Inconsistent encryption contexts: %lu/%lu", 487 dir->i_ino, inode->i_ino);
489 dir->i_ino, inode->i_ino);
490 err = -EPERM; 488 err = -EPERM;
491 goto out_iput; 489 goto out_iput;
492 } 490 }
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 18a038a2a9fa..a18b2a895771 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -34,10 +34,9 @@ int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
34{ 34{
35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) { 35 if (unlikely(nid < F2FS_ROOT_INO(sbi) || nid >= NM_I(sbi)->max_nid)) {
36 set_sbi_flag(sbi, SBI_NEED_FSCK); 36 set_sbi_flag(sbi, SBI_NEED_FSCK);
37 f2fs_msg(sbi->sb, KERN_WARNING, 37 f2fs_warn(sbi, "%s: out-of-range nid=%x, run fsck to fix.",
38 "%s: out-of-range nid=%x, run fsck to fix.", 38 __func__, nid);
39 __func__, nid); 39 return -EFSCORRUPTED;
40 return -EINVAL;
41 } 40 }
42 return 0; 41 return 0;
43} 42}
@@ -1189,10 +1188,8 @@ int f2fs_remove_inode_page(struct inode *inode)
1189 } 1188 }
1190 1189
1191 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) { 1190 if (unlikely(inode->i_blocks != 0 && inode->i_blocks != 8)) {
1192 f2fs_msg(F2FS_I_SB(inode)->sb, KERN_WARNING, 1191 f2fs_warn(F2FS_I_SB(inode), "Inconsistent i_blocks, ino:%lu, iblocks:%llu",
1193 "Inconsistent i_blocks, ino:%lu, iblocks:%llu", 1192 inode->i_ino, (unsigned long long)inode->i_blocks);
1194 inode->i_ino,
1195 (unsigned long long)inode->i_blocks);
1196 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK); 1193 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
1197 } 1194 }
1198 1195
@@ -1291,7 +1288,7 @@ static int read_node_page(struct page *page, int op_flags)
1291 if (PageUptodate(page)) { 1288 if (PageUptodate(page)) {
1292 if (!f2fs_inode_chksum_verify(sbi, page)) { 1289 if (!f2fs_inode_chksum_verify(sbi, page)) {
1293 ClearPageUptodate(page); 1290 ClearPageUptodate(page);
1294 return -EBADMSG; 1291 return -EFSBADCRC;
1295 } 1292 }
1296 return LOCKED_PAGE; 1293 return LOCKED_PAGE;
1297 } 1294 }
@@ -1375,16 +1372,15 @@ repeat:
1375 } 1372 }
1376 1373
1377 if (!f2fs_inode_chksum_verify(sbi, page)) { 1374 if (!f2fs_inode_chksum_verify(sbi, page)) {
1378 err = -EBADMSG; 1375 err = -EFSBADCRC;
1379 goto out_err; 1376 goto out_err;
1380 } 1377 }
1381page_hit: 1378page_hit:
1382 if(unlikely(nid != nid_of_node(page))) { 1379 if(unlikely(nid != nid_of_node(page))) {
1383 f2fs_msg(sbi->sb, KERN_WARNING, "inconsistent node block, " 1380 f2fs_warn(sbi, "inconsistent node block, nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]",
1384 "nid:%lu, node_footer[nid:%u,ino:%u,ofs:%u,cpver:%llu,blkaddr:%u]", 1381 nid, nid_of_node(page), ino_of_node(page),
1385 nid, nid_of_node(page), ino_of_node(page), 1382 ofs_of_node(page), cpver_of_node(page),
1386 ofs_of_node(page), cpver_of_node(page), 1383 next_blkaddr_of_node(page));
1387 next_blkaddr_of_node(page));
1388 err = -EINVAL; 1384 err = -EINVAL;
1389out_err: 1385out_err:
1390 ClearPageUptodate(page); 1386 ClearPageUptodate(page);
@@ -1752,9 +1748,8 @@ continue_unlock:
1752 break; 1748 break;
1753 } 1749 }
1754 if (!ret && atomic && !marked) { 1750 if (!ret && atomic && !marked) {
1755 f2fs_msg(sbi->sb, KERN_DEBUG, 1751 f2fs_debug(sbi, "Retry to write fsync mark: ino=%u, idx=%lx",
1756 "Retry to write fsync mark: ino=%u, idx=%lx", 1752 ino, last_page->index);
1757 ino, last_page->index);
1758 lock_page(last_page); 1753 lock_page(last_page);
1759 f2fs_wait_on_page_writeback(last_page, NODE, true, true); 1754 f2fs_wait_on_page_writeback(last_page, NODE, true, true);
1760 set_page_dirty(last_page); 1755 set_page_dirty(last_page);
@@ -2304,8 +2299,7 @@ static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
2304 if (ret) { 2299 if (ret) {
2305 up_read(&nm_i->nat_tree_lock); 2300 up_read(&nm_i->nat_tree_lock);
2306 f2fs_bug_on(sbi, !mount); 2301 f2fs_bug_on(sbi, !mount);
2307 f2fs_msg(sbi->sb, KERN_ERR, 2302 f2fs_err(sbi, "NAT is corrupt, run fsck to fix it");
2308 "NAT is corrupt, run fsck to fix it");
2309 return ret; 2303 return ret;
2310 } 2304 }
2311 } 2305 }
@@ -2725,7 +2719,7 @@ static void __update_nat_bits(struct f2fs_sb_info *sbi, nid_t start_nid,
2725 i = 1; 2719 i = 1;
2726 } 2720 }
2727 for (; i < NAT_ENTRY_PER_BLOCK; i++) { 2721 for (; i < NAT_ENTRY_PER_BLOCK; i++) {
2728 if (nat_blk->entries[i].block_addr != NULL_ADDR) 2722 if (le32_to_cpu(nat_blk->entries[i].block_addr) != NULL_ADDR)
2729 valid++; 2723 valid++;
2730 } 2724 }
2731 if (valid == 0) { 2725 if (valid == 0) {
@@ -2915,7 +2909,7 @@ static int __get_nat_bitmaps(struct f2fs_sb_info *sbi)
2915 nm_i->full_nat_bits = nm_i->nat_bits + 8; 2909 nm_i->full_nat_bits = nm_i->nat_bits + 8;
2916 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes; 2910 nm_i->empty_nat_bits = nm_i->full_nat_bits + nat_bits_bytes;
2917 2911
2918 f2fs_msg(sbi->sb, KERN_NOTICE, "Found nat_bits in checkpoint"); 2912 f2fs_notice(sbi, "Found nat_bits in checkpoint");
2919 return 0; 2913 return 0;
2920} 2914}
2921 2915
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index e04f82b3f4fc..783773e4560d 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -188,10 +188,9 @@ out:
188 name = "<encrypted>"; 188 name = "<encrypted>";
189 else 189 else
190 name = raw_inode->i_name; 190 name = raw_inode->i_name;
191 f2fs_msg(inode->i_sb, KERN_NOTICE, 191 f2fs_notice(F2FS_I_SB(inode), "%s: ino = %x, name = %s, dir = %lx, err = %d",
192 "%s: ino = %x, name = %s, dir = %lx, err = %d", 192 __func__, ino_of_node(ipage), name,
193 __func__, ino_of_node(ipage), name, 193 IS_ERR(dir) ? 0 : dir->i_ino, err);
194 IS_ERR(dir) ? 0 : dir->i_ino, err);
195 return err; 194 return err;
196} 195}
197 196
@@ -292,9 +291,8 @@ static int recover_inode(struct inode *inode, struct page *page)
292 else 291 else
293 name = F2FS_INODE(page)->i_name; 292 name = F2FS_INODE(page)->i_name;
294 293
295 f2fs_msg(inode->i_sb, KERN_NOTICE, 294 f2fs_notice(F2FS_I_SB(inode), "recover_inode: ino = %x, name = %s, inline = %x",
296 "recover_inode: ino = %x, name = %s, inline = %x", 295 ino_of_node(page), name, raw->i_inline);
297 ino_of_node(page), name, raw->i_inline);
298 return 0; 296 return 0;
299} 297}
300 298
@@ -371,10 +369,9 @@ next:
371 /* sanity check in order to detect looped node chain */ 369 /* sanity check in order to detect looped node chain */
372 if (++loop_cnt >= free_blocks || 370 if (++loop_cnt >= free_blocks ||
373 blkaddr == next_blkaddr_of_node(page)) { 371 blkaddr == next_blkaddr_of_node(page)) {
374 f2fs_msg(sbi->sb, KERN_NOTICE, 372 f2fs_notice(sbi, "%s: detect looped node chain, blkaddr:%u, next:%u",
375 "%s: detect looped node chain, " 373 __func__, blkaddr,
376 "blkaddr:%u, next:%u", 374 next_blkaddr_of_node(page));
377 __func__, blkaddr, next_blkaddr_of_node(page));
378 f2fs_put_page(page, 1); 375 f2fs_put_page(page, 1);
379 err = -EINVAL; 376 err = -EINVAL;
380 break; 377 break;
@@ -553,11 +550,10 @@ retry_dn:
553 f2fs_bug_on(sbi, ni.ino != ino_of_node(page)); 550 f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
554 551
555 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) { 552 if (ofs_of_node(dn.node_page) != ofs_of_node(page)) {
556 f2fs_msg(sbi->sb, KERN_WARNING, 553 f2fs_warn(sbi, "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u",
557 "Inconsistent ofs_of_node, ino:%lu, ofs:%u, %u", 554 inode->i_ino, ofs_of_node(dn.node_page),
558 inode->i_ino, ofs_of_node(dn.node_page), 555 ofs_of_node(page));
559 ofs_of_node(page)); 556 err = -EFSCORRUPTED;
560 err = -EFAULT;
561 goto err; 557 goto err;
562 } 558 }
563 559
@@ -569,13 +565,13 @@ retry_dn:
569 565
570 if (__is_valid_data_blkaddr(src) && 566 if (__is_valid_data_blkaddr(src) &&
571 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) { 567 !f2fs_is_valid_blkaddr(sbi, src, META_POR)) {
572 err = -EFAULT; 568 err = -EFSCORRUPTED;
573 goto err; 569 goto err;
574 } 570 }
575 571
576 if (__is_valid_data_blkaddr(dest) && 572 if (__is_valid_data_blkaddr(dest) &&
577 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) { 573 !f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
578 err = -EFAULT; 574 err = -EFSCORRUPTED;
579 goto err; 575 goto err;
580 } 576 }
581 577
@@ -642,11 +638,9 @@ retry_prev:
642err: 638err:
643 f2fs_put_dnode(&dn); 639 f2fs_put_dnode(&dn);
644out: 640out:
645 f2fs_msg(sbi->sb, KERN_NOTICE, 641 f2fs_notice(sbi, "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d",
646 "recover_data: ino = %lx (i_size: %s) recovered = %d, err = %d", 642 inode->i_ino, file_keep_isize(inode) ? "keep" : "recover",
647 inode->i_ino, 643 recovered, err);
648 file_keep_isize(inode) ? "keep" : "recover",
649 recovered, err);
650 return err; 644 return err;
651} 645}
652 646
@@ -734,8 +728,7 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
734#endif 728#endif
735 729
736 if (s_flags & SB_RDONLY) { 730 if (s_flags & SB_RDONLY) {
737 f2fs_msg(sbi->sb, KERN_INFO, 731 f2fs_info(sbi, "recover fsync data on readonly fs");
738 "recover fsync data on readonly fs");
739 sbi->sb->s_flags &= ~SB_RDONLY; 732 sbi->sb->s_flags &= ~SB_RDONLY;
740 } 733 }
741 734
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 8dee063c833f..a661ac32e829 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -546,9 +546,13 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi)
546 if (test_opt(sbi, DATA_FLUSH)) { 546 if (test_opt(sbi, DATA_FLUSH)) {
547 struct blk_plug plug; 547 struct blk_plug plug;
548 548
549 mutex_lock(&sbi->flush_lock);
550
549 blk_start_plug(&plug); 551 blk_start_plug(&plug);
550 f2fs_sync_dirty_inodes(sbi, FILE_INODE); 552 f2fs_sync_dirty_inodes(sbi, FILE_INODE);
551 blk_finish_plug(&plug); 553 blk_finish_plug(&plug);
554
555 mutex_unlock(&sbi->flush_lock);
552 } 556 }
553 f2fs_sync_fs(sbi->sb, true); 557 f2fs_sync_fs(sbi->sb, true);
554 stat_inc_bg_cp_count(sbi->stat_info); 558 stat_inc_bg_cp_count(sbi->stat_info);
@@ -869,11 +873,14 @@ void f2fs_dirty_to_prefree(struct f2fs_sb_info *sbi)
869 mutex_unlock(&dirty_i->seglist_lock); 873 mutex_unlock(&dirty_i->seglist_lock);
870} 874}
871 875
872int f2fs_disable_cp_again(struct f2fs_sb_info *sbi) 876block_t f2fs_get_unusable_blocks(struct f2fs_sb_info *sbi)
873{ 877{
878 int ovp_hole_segs =
879 (overprovision_segments(sbi) - reserved_segments(sbi));
880 block_t ovp_holes = ovp_hole_segs << sbi->log_blocks_per_seg;
874 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi); 881 struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
875 block_t ovp = overprovision_segments(sbi) << sbi->log_blocks_per_seg;
876 block_t holes[2] = {0, 0}; /* DATA and NODE */ 882 block_t holes[2] = {0, 0}; /* DATA and NODE */
883 block_t unusable;
877 struct seg_entry *se; 884 struct seg_entry *se;
878 unsigned int segno; 885 unsigned int segno;
879 886
@@ -887,10 +894,20 @@ int f2fs_disable_cp_again(struct f2fs_sb_info *sbi)
887 } 894 }
888 mutex_unlock(&dirty_i->seglist_lock); 895 mutex_unlock(&dirty_i->seglist_lock);
889 896
890 if (holes[DATA] > ovp || holes[NODE] > ovp) 897 unusable = holes[DATA] > holes[NODE] ? holes[DATA] : holes[NODE];
898 if (unusable > ovp_holes)
899 return unusable - ovp_holes;
900 return 0;
901}
902
903int f2fs_disable_cp_again(struct f2fs_sb_info *sbi, block_t unusable)
904{
905 int ovp_hole_segs =
906 (overprovision_segments(sbi) - reserved_segments(sbi));
907 if (unusable > F2FS_OPTION(sbi).unusable_cap)
891 return -EAGAIN; 908 return -EAGAIN;
892 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) && 909 if (is_sbi_flag_set(sbi, SBI_CP_DISABLED_QUICK) &&
893 dirty_segments(sbi) > overprovision_segments(sbi)) 910 dirty_segments(sbi) > ovp_hole_segs)
894 return -EAGAIN; 911 return -EAGAIN;
895 return 0; 912 return 0;
896} 913}
@@ -1480,6 +1497,10 @@ static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
1480 list_for_each_entry_safe(dc, tmp, pend_list, list) { 1497 list_for_each_entry_safe(dc, tmp, pend_list, list) {
1481 f2fs_bug_on(sbi, dc->state != D_PREP); 1498 f2fs_bug_on(sbi, dc->state != D_PREP);
1482 1499
1500 if (dpolicy->timeout != 0 &&
1501 f2fs_time_over(sbi, dpolicy->timeout))
1502 break;
1503
1483 if (dpolicy->io_aware && i < dpolicy->io_aware_gran && 1504 if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
1484 !is_idle(sbi, DISCARD_TIME)) { 1505 !is_idle(sbi, DISCARD_TIME)) {
1485 io_interrupted = true; 1506 io_interrupted = true;
@@ -1740,8 +1761,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1740 devi = f2fs_target_device_index(sbi, blkstart); 1761 devi = f2fs_target_device_index(sbi, blkstart);
1741 if (blkstart < FDEV(devi).start_blk || 1762 if (blkstart < FDEV(devi).start_blk ||
1742 blkstart > FDEV(devi).end_blk) { 1763 blkstart > FDEV(devi).end_blk) {
1743 f2fs_msg(sbi->sb, KERN_ERR, "Invalid block %x", 1764 f2fs_err(sbi, "Invalid block %x", blkstart);
1744 blkstart);
1745 return -EIO; 1765 return -EIO;
1746 } 1766 }
1747 blkstart -= FDEV(devi).start_blk; 1767 blkstart -= FDEV(devi).start_blk;
@@ -1754,10 +1774,9 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
1754 1774
1755 if (sector & (bdev_zone_sectors(bdev) - 1) || 1775 if (sector & (bdev_zone_sectors(bdev) - 1) ||
1756 nr_sects != bdev_zone_sectors(bdev)) { 1776 nr_sects != bdev_zone_sectors(bdev)) {
1757 f2fs_msg(sbi->sb, KERN_ERR, 1777 f2fs_err(sbi, "(%d) %s: Unaligned zone reset attempted (block %x + %x)",
1758 "(%d) %s: Unaligned zone reset attempted (block %x + %x)", 1778 devi, sbi->s_ndevs ? FDEV(devi).path : "",
1759 devi, sbi->s_ndevs ? FDEV(devi).path: "", 1779 blkstart, blklen);
1760 blkstart, blklen);
1761 return -EIO; 1780 return -EIO;
1762 } 1781 }
1763 trace_f2fs_issue_reset_zone(bdev, blkstart); 1782 trace_f2fs_issue_reset_zone(bdev, blkstart);
@@ -2121,15 +2140,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2121 mir_exist = f2fs_test_and_set_bit(offset, 2140 mir_exist = f2fs_test_and_set_bit(offset,
2122 se->cur_valid_map_mir); 2141 se->cur_valid_map_mir);
2123 if (unlikely(exist != mir_exist)) { 2142 if (unlikely(exist != mir_exist)) {
2124 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 2143 f2fs_err(sbi, "Inconsistent error when setting bitmap, blk:%u, old bit:%d",
2125 "when setting bitmap, blk:%u, old bit:%d", 2144 blkaddr, exist);
2126 blkaddr, exist);
2127 f2fs_bug_on(sbi, 1); 2145 f2fs_bug_on(sbi, 1);
2128 } 2146 }
2129#endif 2147#endif
2130 if (unlikely(exist)) { 2148 if (unlikely(exist)) {
2131 f2fs_msg(sbi->sb, KERN_ERR, 2149 f2fs_err(sbi, "Bitmap was wrongly set, blk:%u",
2132 "Bitmap was wrongly set, blk:%u", blkaddr); 2150 blkaddr);
2133 f2fs_bug_on(sbi, 1); 2151 f2fs_bug_on(sbi, 1);
2134 se->valid_blocks--; 2152 se->valid_blocks--;
2135 del = 0; 2153 del = 0;
@@ -2150,15 +2168,14 @@ static void update_sit_entry(struct f2fs_sb_info *sbi, block_t blkaddr, int del)
2150 mir_exist = f2fs_test_and_clear_bit(offset, 2168 mir_exist = f2fs_test_and_clear_bit(offset,
2151 se->cur_valid_map_mir); 2169 se->cur_valid_map_mir);
2152 if (unlikely(exist != mir_exist)) { 2170 if (unlikely(exist != mir_exist)) {
2153 f2fs_msg(sbi->sb, KERN_ERR, "Inconsistent error " 2171 f2fs_err(sbi, "Inconsistent error when clearing bitmap, blk:%u, old bit:%d",
2154 "when clearing bitmap, blk:%u, old bit:%d", 2172 blkaddr, exist);
2155 blkaddr, exist);
2156 f2fs_bug_on(sbi, 1); 2173 f2fs_bug_on(sbi, 1);
2157 } 2174 }
2158#endif 2175#endif
2159 if (unlikely(!exist)) { 2176 if (unlikely(!exist)) {
2160 f2fs_msg(sbi->sb, KERN_ERR, 2177 f2fs_err(sbi, "Bitmap was wrongly cleared, blk:%u",
2161 "Bitmap was wrongly cleared, blk:%u", blkaddr); 2178 blkaddr);
2162 f2fs_bug_on(sbi, 1); 2179 f2fs_bug_on(sbi, 1);
2163 se->valid_blocks++; 2180 se->valid_blocks++;
2164 del = 0; 2181 del = 0;
@@ -2640,6 +2657,39 @@ static void allocate_segment_by_default(struct f2fs_sb_info *sbi,
2640 stat_inc_seg_type(sbi, curseg); 2657 stat_inc_seg_type(sbi, curseg);
2641} 2658}
2642 2659
2660void allocate_segment_for_resize(struct f2fs_sb_info *sbi, int type,
2661 unsigned int start, unsigned int end)
2662{
2663 struct curseg_info *curseg = CURSEG_I(sbi, type);
2664 unsigned int segno;
2665
2666 down_read(&SM_I(sbi)->curseg_lock);
2667 mutex_lock(&curseg->curseg_mutex);
2668 down_write(&SIT_I(sbi)->sentry_lock);
2669
2670 segno = CURSEG_I(sbi, type)->segno;
2671 if (segno < start || segno > end)
2672 goto unlock;
2673
2674 if (f2fs_need_SSR(sbi) && get_ssr_segment(sbi, type))
2675 change_curseg(sbi, type);
2676 else
2677 new_curseg(sbi, type, true);
2678
2679 stat_inc_seg_type(sbi, curseg);
2680
2681 locate_dirty_segment(sbi, segno);
2682unlock:
2683 up_write(&SIT_I(sbi)->sentry_lock);
2684
2685 if (segno != curseg->segno)
2686 f2fs_notice(sbi, "For resize: curseg of type %d: %u ==> %u",
2687 type, segno, curseg->segno);
2688
2689 mutex_unlock(&curseg->curseg_mutex);
2690 up_read(&SM_I(sbi)->curseg_lock);
2691}
2692
2643void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi) 2693void f2fs_allocate_new_segments(struct f2fs_sb_info *sbi)
2644{ 2694{
2645 struct curseg_info *curseg; 2695 struct curseg_info *curseg;
@@ -2772,9 +2822,8 @@ int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
2772 goto out; 2822 goto out;
2773 2823
2774 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) { 2824 if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
2775 f2fs_msg(sbi->sb, KERN_WARNING, 2825 f2fs_warn(sbi, "Found FS corruption, run fsck to fix.");
2776 "Found FS corruption, run fsck to fix."); 2826 return -EFSCORRUPTED;
2777 return -EIO;
2778 } 2827 }
2779 2828
2780 /* start/end segment number in main_area */ 2829 /* start/end segment number in main_area */
@@ -3197,12 +3246,17 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
3197 3246
3198 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) { 3247 if (!IS_DATASEG(get_seg_entry(sbi, segno)->type)) {
3199 set_sbi_flag(sbi, SBI_NEED_FSCK); 3248 set_sbi_flag(sbi, SBI_NEED_FSCK);
3200 return -EFAULT; 3249 f2fs_warn(sbi, "%s: incorrect segment(%u) type, run fsck to fix.",
3250 __func__, segno);
3251 return -EFSCORRUPTED;
3201 } 3252 }
3202 3253
3203 stat_inc_inplace_blocks(fio->sbi); 3254 stat_inc_inplace_blocks(fio->sbi);
3204 3255
3205 err = f2fs_submit_page_bio(fio); 3256 if (fio->bio)
3257 err = f2fs_merge_page_bio(fio);
3258 else
3259 err = f2fs_submit_page_bio(fio);
3206 if (!err) { 3260 if (!err) {
3207 update_device_state(fio); 3261 update_device_state(fio);
3208 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE); 3262 f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
@@ -3393,6 +3447,11 @@ static int read_compacted_summaries(struct f2fs_sb_info *sbi)
3393 seg_i = CURSEG_I(sbi, i); 3447 seg_i = CURSEG_I(sbi, i);
3394 segno = le32_to_cpu(ckpt->cur_data_segno[i]); 3448 segno = le32_to_cpu(ckpt->cur_data_segno[i]);
3395 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]); 3449 blk_off = le16_to_cpu(ckpt->cur_data_blkoff[i]);
3450 if (blk_off > ENTRIES_IN_SUM) {
3451 f2fs_bug_on(sbi, 1);
3452 f2fs_put_page(page, 1);
3453 return -EFAULT;
3454 }
3396 seg_i->next_segno = segno; 3455 seg_i->next_segno = segno;
3397 reset_curseg(sbi, i, 0); 3456 reset_curseg(sbi, i, 0);
3398 seg_i->alloc_type = ckpt->alloc_type[i]; 3457 seg_i->alloc_type = ckpt->alloc_type[i];
@@ -3530,8 +3589,11 @@ static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
3530 3589
3531 /* sanity check for summary blocks */ 3590 /* sanity check for summary blocks */
3532 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES || 3591 if (nats_in_cursum(nat_j) > NAT_JOURNAL_ENTRIES ||
3533 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) 3592 sits_in_cursum(sit_j) > SIT_JOURNAL_ENTRIES) {
3593 f2fs_err(sbi, "invalid journal entries nats %u sits %u\n",
3594 nats_in_cursum(nat_j), sits_in_cursum(sit_j));
3534 return -EINVAL; 3595 return -EINVAL;
3596 }
3535 3597
3536 return 0; 3598 return 0;
3537} 3599}
@@ -3762,7 +3824,7 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3762 struct f2fs_journal *journal = curseg->journal; 3824 struct f2fs_journal *journal = curseg->journal;
3763 struct sit_entry_set *ses, *tmp; 3825 struct sit_entry_set *ses, *tmp;
3764 struct list_head *head = &SM_I(sbi)->sit_entry_set; 3826 struct list_head *head = &SM_I(sbi)->sit_entry_set;
3765 bool to_journal = true; 3827 bool to_journal = !is_sbi_flag_set(sbi, SBI_IS_RESIZEFS);
3766 struct seg_entry *se; 3828 struct seg_entry *se;
3767 3829
3768 down_write(&sit_i->sentry_lock); 3830 down_write(&sit_i->sentry_lock);
@@ -3781,7 +3843,8 @@ void f2fs_flush_sit_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc)
3781 * entries, remove all entries from journal and add and account 3843 * entries, remove all entries from journal and add and account
3782 * them in sit entry set. 3844 * them in sit entry set.
3783 */ 3845 */
3784 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL)) 3846 if (!__has_cursum_space(journal, sit_i->dirty_sentries, SIT_JOURNAL) ||
3847 !to_journal)
3785 remove_sits_in_journal(sbi); 3848 remove_sits_in_journal(sbi);
3786 3849
3787 /* 3850 /*
@@ -4096,11 +4159,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
4096 4159
4097 start = le32_to_cpu(segno_in_journal(journal, i)); 4160 start = le32_to_cpu(segno_in_journal(journal, i));
4098 if (start >= MAIN_SEGS(sbi)) { 4161 if (start >= MAIN_SEGS(sbi)) {
4099 f2fs_msg(sbi->sb, KERN_ERR, 4162 f2fs_err(sbi, "Wrong journal entry on segno %u",
4100 "Wrong journal entry on segno %u", 4163 start);
4101 start);
4102 set_sbi_flag(sbi, SBI_NEED_FSCK); 4164 set_sbi_flag(sbi, SBI_NEED_FSCK);
4103 err = -EINVAL; 4165 err = -EFSCORRUPTED;
4104 break; 4166 break;
4105 } 4167 }
4106 4168
@@ -4137,11 +4199,10 @@ static int build_sit_entries(struct f2fs_sb_info *sbi)
4137 up_read(&curseg->journal_rwsem); 4199 up_read(&curseg->journal_rwsem);
4138 4200
4139 if (!err && total_node_blocks != valid_node_count(sbi)) { 4201 if (!err && total_node_blocks != valid_node_count(sbi)) {
4140 f2fs_msg(sbi->sb, KERN_ERR, 4202 f2fs_err(sbi, "SIT is corrupted node# %u vs %u",
4141 "SIT is corrupted node# %u vs %u", 4203 total_node_blocks, valid_node_count(sbi));
4142 total_node_blocks, valid_node_count(sbi));
4143 set_sbi_flag(sbi, SBI_NEED_FSCK); 4204 set_sbi_flag(sbi, SBI_NEED_FSCK);
4144 err = -EINVAL; 4205 err = -EFSCORRUPTED;
4145 } 4206 }
4146 4207
4147 return err; 4208 return err;
@@ -4232,6 +4293,39 @@ static int build_dirty_segmap(struct f2fs_sb_info *sbi)
4232 return init_victim_secmap(sbi); 4293 return init_victim_secmap(sbi);
4233} 4294}
4234 4295
4296static int sanity_check_curseg(struct f2fs_sb_info *sbi)
4297{
4298 int i;
4299
4300 /*
4301 * In LFS/SSR curseg, .next_blkoff should point to an unused blkaddr;
4302 * In LFS curseg, all blkaddr after .next_blkoff should be unused.
4303 */
4304 for (i = 0; i < NO_CHECK_TYPE; i++) {
4305 struct curseg_info *curseg = CURSEG_I(sbi, i);
4306 struct seg_entry *se = get_seg_entry(sbi, curseg->segno);
4307 unsigned int blkofs = curseg->next_blkoff;
4308
4309 if (f2fs_test_bit(blkofs, se->cur_valid_map))
4310 goto out;
4311
4312 if (curseg->alloc_type == SSR)
4313 continue;
4314
4315 for (blkofs += 1; blkofs < sbi->blocks_per_seg; blkofs++) {
4316 if (!f2fs_test_bit(blkofs, se->cur_valid_map))
4317 continue;
4318out:
4319 f2fs_err(sbi,
4320 "Current segment's next free block offset is inconsistent with bitmap, logtype:%u, segno:%u, type:%u, next_blkoff:%u, blkofs:%u",
4321 i, curseg->segno, curseg->alloc_type,
4322 curseg->next_blkoff, blkofs);
4323 return -EFSCORRUPTED;
4324 }
4325 }
4326 return 0;
4327}
4328
4235/* 4329/*
4236 * Update min, max modified time for cost-benefit GC algorithm 4330 * Update min, max modified time for cost-benefit GC algorithm
4237 */ 4331 */
@@ -4327,6 +4421,10 @@ int f2fs_build_segment_manager(struct f2fs_sb_info *sbi)
4327 if (err) 4421 if (err)
4328 return err; 4422 return err;
4329 4423
4424 err = sanity_check_curseg(sbi);
4425 if (err)
4426 return err;
4427
4330 init_min_max_mtime(sbi); 4428 init_min_max_mtime(sbi);
4331 return 0; 4429 return 0;
4332} 4430}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 429007b8036e..b74602813a05 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -109,7 +109,7 @@
109#define START_SEGNO(segno) \ 109#define START_SEGNO(segno) \
110 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK) 110 (SIT_BLOCK_OFFSET(segno) * SIT_ENTRY_PER_BLOCK)
111#define SIT_BLK_CNT(sbi) \ 111#define SIT_BLK_CNT(sbi) \
112 ((MAIN_SEGS(sbi) + SIT_ENTRY_PER_BLOCK - 1) / SIT_ENTRY_PER_BLOCK) 112 DIV_ROUND_UP(MAIN_SEGS(sbi), SIT_ENTRY_PER_BLOCK)
113#define f2fs_bitmap_size(nr) \ 113#define f2fs_bitmap_size(nr) \
114 (BITS_TO_LONGS(nr) * sizeof(unsigned long)) 114 (BITS_TO_LONGS(nr) * sizeof(unsigned long))
115 115
@@ -693,21 +693,19 @@ static inline int check_block_count(struct f2fs_sb_info *sbi,
693 } while (cur_pos < sbi->blocks_per_seg); 693 } while (cur_pos < sbi->blocks_per_seg);
694 694
695 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) { 695 if (unlikely(GET_SIT_VBLOCKS(raw_sit) != valid_blocks)) {
696 f2fs_msg(sbi->sb, KERN_ERR, 696 f2fs_err(sbi, "Mismatch valid blocks %d vs. %d",
697 "Mismatch valid blocks %d vs. %d", 697 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
698 GET_SIT_VBLOCKS(raw_sit), valid_blocks);
699 set_sbi_flag(sbi, SBI_NEED_FSCK); 698 set_sbi_flag(sbi, SBI_NEED_FSCK);
700 return -EINVAL; 699 return -EFSCORRUPTED;
701 } 700 }
702 701
703 /* check segment usage, and check boundary of a given segment number */ 702 /* check segment usage, and check boundary of a given segment number */
704 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg 703 if (unlikely(GET_SIT_VBLOCKS(raw_sit) > sbi->blocks_per_seg
705 || segno > TOTAL_SEGS(sbi) - 1)) { 704 || segno > TOTAL_SEGS(sbi) - 1)) {
706 f2fs_msg(sbi->sb, KERN_ERR, 705 f2fs_err(sbi, "Wrong valid blocks %d or segno %u",
707 "Wrong valid blocks %d or segno %u", 706 GET_SIT_VBLOCKS(raw_sit), segno);
708 GET_SIT_VBLOCKS(raw_sit), segno);
709 set_sbi_flag(sbi, SBI_NEED_FSCK); 707 set_sbi_flag(sbi, SBI_NEED_FSCK);
710 return -EINVAL; 708 return -EFSCORRUPTED;
711 } 709 }
712 return 0; 710 return 0;
713} 711}
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 6b959bbb336a..d95a681ef7c9 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -136,7 +136,10 @@ enum {
136 Opt_alloc, 136 Opt_alloc,
137 Opt_fsync, 137 Opt_fsync,
138 Opt_test_dummy_encryption, 138 Opt_test_dummy_encryption,
139 Opt_checkpoint, 139 Opt_checkpoint_disable,
140 Opt_checkpoint_disable_cap,
141 Opt_checkpoint_disable_cap_perc,
142 Opt_checkpoint_enable,
140 Opt_err, 143 Opt_err,
141}; 144};
142 145
@@ -195,45 +198,52 @@ static match_table_t f2fs_tokens = {
195 {Opt_alloc, "alloc_mode=%s"}, 198 {Opt_alloc, "alloc_mode=%s"},
196 {Opt_fsync, "fsync_mode=%s"}, 199 {Opt_fsync, "fsync_mode=%s"},
197 {Opt_test_dummy_encryption, "test_dummy_encryption"}, 200 {Opt_test_dummy_encryption, "test_dummy_encryption"},
198 {Opt_checkpoint, "checkpoint=%s"}, 201 {Opt_checkpoint_disable, "checkpoint=disable"},
202 {Opt_checkpoint_disable_cap, "checkpoint=disable:%u"},
203 {Opt_checkpoint_disable_cap_perc, "checkpoint=disable:%u%%"},
204 {Opt_checkpoint_enable, "checkpoint=enable"},
199 {Opt_err, NULL}, 205 {Opt_err, NULL},
200}; 206};
201 207
202void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 208void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
203{ 209{
204 struct va_format vaf; 210 struct va_format vaf;
205 va_list args; 211 va_list args;
212 int level;
206 213
207 va_start(args, fmt); 214 va_start(args, fmt);
208 vaf.fmt = fmt; 215
216 level = printk_get_level(fmt);
217 vaf.fmt = printk_skip_level(fmt);
209 vaf.va = &args; 218 vaf.va = &args;
210 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 219 printk("%c%cF2FS-fs (%s): %pV\n",
220 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
221
211 va_end(args); 222 va_end(args);
212} 223}
213 224
214static inline void limit_reserve_root(struct f2fs_sb_info *sbi) 225static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
215{ 226{
216 block_t limit = (sbi->user_block_count << 1) / 1000; 227 block_t limit = min((sbi->user_block_count << 1) / 1000,
228 sbi->user_block_count - sbi->reserved_blocks);
217 229
218 /* limit is 0.2% */ 230 /* limit is 0.2% */
219 if (test_opt(sbi, RESERVE_ROOT) && 231 if (test_opt(sbi, RESERVE_ROOT) &&
220 F2FS_OPTION(sbi).root_reserved_blocks > limit) { 232 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
221 F2FS_OPTION(sbi).root_reserved_blocks = limit; 233 F2FS_OPTION(sbi).root_reserved_blocks = limit;
222 f2fs_msg(sbi->sb, KERN_INFO, 234 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
223 "Reduce reserved blocks for root = %u", 235 F2FS_OPTION(sbi).root_reserved_blocks);
224 F2FS_OPTION(sbi).root_reserved_blocks);
225 } 236 }
226 if (!test_opt(sbi, RESERVE_ROOT) && 237 if (!test_opt(sbi, RESERVE_ROOT) &&
227 (!uid_eq(F2FS_OPTION(sbi).s_resuid, 238 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
228 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || 239 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
229 !gid_eq(F2FS_OPTION(sbi).s_resgid, 240 !gid_eq(F2FS_OPTION(sbi).s_resgid,
230 make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) 241 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
231 f2fs_msg(sbi->sb, KERN_INFO, 242 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
232 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", 243 from_kuid_munged(&init_user_ns,
233 from_kuid_munged(&init_user_ns, 244 F2FS_OPTION(sbi).s_resuid),
234 F2FS_OPTION(sbi).s_resuid), 245 from_kgid_munged(&init_user_ns,
235 from_kgid_munged(&init_user_ns, 246 F2FS_OPTION(sbi).s_resgid));
236 F2FS_OPTION(sbi).s_resgid));
237} 247}
238 248
239static void init_once(void *foo) 249static void init_once(void *foo)
@@ -254,35 +264,29 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
254 int ret = -EINVAL; 264 int ret = -EINVAL;
255 265
256 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { 266 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
257 f2fs_msg(sb, KERN_ERR, 267 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
258 "Cannot change journaled "
259 "quota options when quota turned on");
260 return -EINVAL; 268 return -EINVAL;
261 } 269 }
262 if (f2fs_sb_has_quota_ino(sbi)) { 270 if (f2fs_sb_has_quota_ino(sbi)) {
263 f2fs_msg(sb, KERN_INFO, 271 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
264 "QUOTA feature is enabled, so ignore qf_name");
265 return 0; 272 return 0;
266 } 273 }
267 274
268 qname = match_strdup(args); 275 qname = match_strdup(args);
269 if (!qname) { 276 if (!qname) {
270 f2fs_msg(sb, KERN_ERR, 277 f2fs_err(sbi, "Not enough memory for storing quotafile name");
271 "Not enough memory for storing quotafile name");
272 return -ENOMEM; 278 return -ENOMEM;
273 } 279 }
274 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { 280 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
275 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) 281 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
276 ret = 0; 282 ret = 0;
277 else 283 else
278 f2fs_msg(sb, KERN_ERR, 284 f2fs_err(sbi, "%s quota file already specified",
279 "%s quota file already specified",
280 QTYPE2NAME(qtype)); 285 QTYPE2NAME(qtype));
281 goto errout; 286 goto errout;
282 } 287 }
283 if (strchr(qname, '/')) { 288 if (strchr(qname, '/')) {
284 f2fs_msg(sb, KERN_ERR, 289 f2fs_err(sbi, "quotafile must be on filesystem root");
285 "quotafile must be on filesystem root");
286 goto errout; 290 goto errout;
287 } 291 }
288 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; 292 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
@@ -298,8 +302,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
298 struct f2fs_sb_info *sbi = F2FS_SB(sb); 302 struct f2fs_sb_info *sbi = F2FS_SB(sb);
299 303
300 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { 304 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
301 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" 305 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
302 " when quota turned on");
303 return -EINVAL; 306 return -EINVAL;
304 } 307 }
305 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); 308 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
@@ -315,8 +318,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
315 * to support legacy quotas in quota files. 318 * to support legacy quotas in quota files.
316 */ 319 */
317 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { 320 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
318 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. " 321 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
319 "Cannot enable project quota enforcement.");
320 return -1; 322 return -1;
321 } 323 }
322 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 324 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
@@ -336,21 +338,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
336 338
337 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || 339 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
338 test_opt(sbi, PRJQUOTA)) { 340 test_opt(sbi, PRJQUOTA)) {
339 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota " 341 f2fs_err(sbi, "old and new quota format mixing");
340 "format mixing");
341 return -1; 342 return -1;
342 } 343 }
343 344
344 if (!F2FS_OPTION(sbi).s_jquota_fmt) { 345 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
345 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " 346 f2fs_err(sbi, "journaled quota format not specified");
346 "not specified");
347 return -1; 347 return -1;
348 } 348 }
349 } 349 }
350 350
351 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { 351 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
352 f2fs_msg(sbi->sb, KERN_INFO, 352 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
353 "QUOTA feature is enabled, so ignore jquota_fmt");
354 F2FS_OPTION(sbi).s_jquota_fmt = 0; 353 F2FS_OPTION(sbi).s_jquota_fmt = 0;
355 } 354 }
356 return 0; 355 return 0;
@@ -418,8 +417,7 @@ static int parse_options(struct super_block *sb, char *options)
418 break; 417 break;
419 case Opt_nodiscard: 418 case Opt_nodiscard:
420 if (f2fs_sb_has_blkzoned(sbi)) { 419 if (f2fs_sb_has_blkzoned(sbi)) {
421 f2fs_msg(sb, KERN_WARNING, 420 f2fs_warn(sbi, "discard is required for zoned block devices");
422 "discard is required for zoned block devices");
423 return -EINVAL; 421 return -EINVAL;
424 } 422 }
425 clear_opt(sbi, DISCARD); 423 clear_opt(sbi, DISCARD);
@@ -451,20 +449,16 @@ static int parse_options(struct super_block *sb, char *options)
451 break; 449 break;
452#else 450#else
453 case Opt_user_xattr: 451 case Opt_user_xattr:
454 f2fs_msg(sb, KERN_INFO, 452 f2fs_info(sbi, "user_xattr options not supported");
455 "user_xattr options not supported");
456 break; 453 break;
457 case Opt_nouser_xattr: 454 case Opt_nouser_xattr:
458 f2fs_msg(sb, KERN_INFO, 455 f2fs_info(sbi, "nouser_xattr options not supported");
459 "nouser_xattr options not supported");
460 break; 456 break;
461 case Opt_inline_xattr: 457 case Opt_inline_xattr:
462 f2fs_msg(sb, KERN_INFO, 458 f2fs_info(sbi, "inline_xattr options not supported");
463 "inline_xattr options not supported");
464 break; 459 break;
465 case Opt_noinline_xattr: 460 case Opt_noinline_xattr:
466 f2fs_msg(sb, KERN_INFO, 461 f2fs_info(sbi, "noinline_xattr options not supported");
467 "noinline_xattr options not supported");
468 break; 462 break;
469#endif 463#endif
470#ifdef CONFIG_F2FS_FS_POSIX_ACL 464#ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -476,10 +470,10 @@ static int parse_options(struct super_block *sb, char *options)
476 break; 470 break;
477#else 471#else
478 case Opt_acl: 472 case Opt_acl:
479 f2fs_msg(sb, KERN_INFO, "acl options not supported"); 473 f2fs_info(sbi, "acl options not supported");
480 break; 474 break;
481 case Opt_noacl: 475 case Opt_noacl:
482 f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 476 f2fs_info(sbi, "noacl options not supported");
483 break; 477 break;
484#endif 478#endif
485 case Opt_active_logs: 479 case Opt_active_logs:
@@ -529,9 +523,8 @@ static int parse_options(struct super_block *sb, char *options)
529 if (args->from && match_int(args, &arg)) 523 if (args->from && match_int(args, &arg))
530 return -EINVAL; 524 return -EINVAL;
531 if (test_opt(sbi, RESERVE_ROOT)) { 525 if (test_opt(sbi, RESERVE_ROOT)) {
532 f2fs_msg(sb, KERN_INFO, 526 f2fs_info(sbi, "Preserve previous reserve_root=%u",
533 "Preserve previous reserve_root=%u", 527 F2FS_OPTION(sbi).root_reserved_blocks);
534 F2FS_OPTION(sbi).root_reserved_blocks);
535 } else { 528 } else {
536 F2FS_OPTION(sbi).root_reserved_blocks = arg; 529 F2FS_OPTION(sbi).root_reserved_blocks = arg;
537 set_opt(sbi, RESERVE_ROOT); 530 set_opt(sbi, RESERVE_ROOT);
@@ -542,8 +535,7 @@ static int parse_options(struct super_block *sb, char *options)
542 return -EINVAL; 535 return -EINVAL;
543 uid = make_kuid(current_user_ns(), arg); 536 uid = make_kuid(current_user_ns(), arg);
544 if (!uid_valid(uid)) { 537 if (!uid_valid(uid)) {
545 f2fs_msg(sb, KERN_ERR, 538 f2fs_err(sbi, "Invalid uid value %d", arg);
546 "Invalid uid value %d", arg);
547 return -EINVAL; 539 return -EINVAL;
548 } 540 }
549 F2FS_OPTION(sbi).s_resuid = uid; 541 F2FS_OPTION(sbi).s_resuid = uid;
@@ -553,8 +545,7 @@ static int parse_options(struct super_block *sb, char *options)
553 return -EINVAL; 545 return -EINVAL;
554 gid = make_kgid(current_user_ns(), arg); 546 gid = make_kgid(current_user_ns(), arg);
555 if (!gid_valid(gid)) { 547 if (!gid_valid(gid)) {
556 f2fs_msg(sb, KERN_ERR, 548 f2fs_err(sbi, "Invalid gid value %d", arg);
557 "Invalid gid value %d", arg);
558 return -EINVAL; 549 return -EINVAL;
559 } 550 }
560 F2FS_OPTION(sbi).s_resgid = gid; 551 F2FS_OPTION(sbi).s_resgid = gid;
@@ -567,9 +558,7 @@ static int parse_options(struct super_block *sb, char *options)
567 if (strlen(name) == 8 && 558 if (strlen(name) == 8 &&
568 !strncmp(name, "adaptive", 8)) { 559 !strncmp(name, "adaptive", 8)) {
569 if (f2fs_sb_has_blkzoned(sbi)) { 560 if (f2fs_sb_has_blkzoned(sbi)) {
570 f2fs_msg(sb, KERN_WARNING, 561 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
571 "adaptive mode is not allowed with "
572 "zoned block device feature");
573 kvfree(name); 562 kvfree(name);
574 return -EINVAL; 563 return -EINVAL;
575 } 564 }
@@ -587,9 +576,8 @@ static int parse_options(struct super_block *sb, char *options)
587 if (args->from && match_int(args, &arg)) 576 if (args->from && match_int(args, &arg))
588 return -EINVAL; 577 return -EINVAL;
589 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) { 578 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
590 f2fs_msg(sb, KERN_WARNING, 579 f2fs_warn(sbi, "Not support %d, larger than %d",
591 "Not support %d, larger than %d", 580 1 << arg, BIO_MAX_PAGES);
592 1 << arg, BIO_MAX_PAGES);
593 return -EINVAL; 581 return -EINVAL;
594 } 582 }
595 F2FS_OPTION(sbi).write_io_size_bits = arg; 583 F2FS_OPTION(sbi).write_io_size_bits = arg;
@@ -610,13 +598,11 @@ static int parse_options(struct super_block *sb, char *options)
610 break; 598 break;
611#else 599#else
612 case Opt_fault_injection: 600 case Opt_fault_injection:
613 f2fs_msg(sb, KERN_INFO, 601 f2fs_info(sbi, "fault_injection options not supported");
614 "fault_injection options not supported");
615 break; 602 break;
616 603
617 case Opt_fault_type: 604 case Opt_fault_type:
618 f2fs_msg(sb, KERN_INFO, 605 f2fs_info(sbi, "fault_type options not supported");
619 "fault_type options not supported");
620 break; 606 break;
621#endif 607#endif
622 case Opt_lazytime: 608 case Opt_lazytime:
@@ -696,8 +682,7 @@ static int parse_options(struct super_block *sb, char *options)
696 case Opt_jqfmt_vfsv0: 682 case Opt_jqfmt_vfsv0:
697 case Opt_jqfmt_vfsv1: 683 case Opt_jqfmt_vfsv1:
698 case Opt_noquota: 684 case Opt_noquota:
699 f2fs_msg(sb, KERN_INFO, 685 f2fs_info(sbi, "quota operations not supported");
700 "quota operations not supported");
701 break; 686 break;
702#endif 687#endif
703 case Opt_whint: 688 case Opt_whint:
@@ -759,39 +744,44 @@ static int parse_options(struct super_block *sb, char *options)
759 case Opt_test_dummy_encryption: 744 case Opt_test_dummy_encryption:
760#ifdef CONFIG_FS_ENCRYPTION 745#ifdef CONFIG_FS_ENCRYPTION
761 if (!f2fs_sb_has_encrypt(sbi)) { 746 if (!f2fs_sb_has_encrypt(sbi)) {
762 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off"); 747 f2fs_err(sbi, "Encrypt feature is off");
763 return -EINVAL; 748 return -EINVAL;
764 } 749 }
765 750
766 F2FS_OPTION(sbi).test_dummy_encryption = true; 751 F2FS_OPTION(sbi).test_dummy_encryption = true;
767 f2fs_msg(sb, KERN_INFO, 752 f2fs_info(sbi, "Test dummy encryption mode enabled");
768 "Test dummy encryption mode enabled");
769#else 753#else
770 f2fs_msg(sb, KERN_INFO, 754 f2fs_info(sbi, "Test dummy encryption mount option ignored");
771 "Test dummy encryption mount option ignored");
772#endif 755#endif
773 break; 756 break;
774 case Opt_checkpoint: 757 case Opt_checkpoint_disable_cap_perc:
775 name = match_strdup(&args[0]); 758 if (args->from && match_int(args, &arg))
776 if (!name)
777 return -ENOMEM;
778
779 if (strlen(name) == 6 &&
780 !strncmp(name, "enable", 6)) {
781 clear_opt(sbi, DISABLE_CHECKPOINT);
782 } else if (strlen(name) == 7 &&
783 !strncmp(name, "disable", 7)) {
784 set_opt(sbi, DISABLE_CHECKPOINT);
785 } else {
786 kvfree(name);
787 return -EINVAL; 759 return -EINVAL;
788 } 760 if (arg < 0 || arg > 100)
789 kvfree(name); 761 return -EINVAL;
762 if (arg == 100)
763 F2FS_OPTION(sbi).unusable_cap =
764 sbi->user_block_count;
765 else
766 F2FS_OPTION(sbi).unusable_cap =
767 (sbi->user_block_count / 100) * arg;
768 set_opt(sbi, DISABLE_CHECKPOINT);
769 break;
770 case Opt_checkpoint_disable_cap:
771 if (args->from && match_int(args, &arg))
772 return -EINVAL;
773 F2FS_OPTION(sbi).unusable_cap = arg;
774 set_opt(sbi, DISABLE_CHECKPOINT);
775 break;
776 case Opt_checkpoint_disable:
777 set_opt(sbi, DISABLE_CHECKPOINT);
778 break;
779 case Opt_checkpoint_enable:
780 clear_opt(sbi, DISABLE_CHECKPOINT);
790 break; 781 break;
791 default: 782 default:
792 f2fs_msg(sb, KERN_ERR, 783 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
793 "Unrecognized mount option \"%s\" or missing value", 784 p);
794 p);
795 return -EINVAL; 785 return -EINVAL;
796 } 786 }
797 } 787 }
@@ -800,23 +790,18 @@ static int parse_options(struct super_block *sb, char *options)
800 return -EINVAL; 790 return -EINVAL;
801#else 791#else
802 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { 792 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
803 f2fs_msg(sbi->sb, KERN_INFO, 793 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
804 "Filesystem with quota feature cannot be mounted RDWR "
805 "without CONFIG_QUOTA");
806 return -EINVAL; 794 return -EINVAL;
807 } 795 }
808 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { 796 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
809 f2fs_msg(sb, KERN_ERR, 797 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
810 "Filesystem with project quota feature cannot be "
811 "mounted RDWR without CONFIG_QUOTA");
812 return -EINVAL; 798 return -EINVAL;
813 } 799 }
814#endif 800#endif
815 801
816 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { 802 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
817 f2fs_msg(sb, KERN_ERR, 803 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
818 "Should set mode=lfs with %uKB-sized IO", 804 F2FS_IO_SIZE_KB(sbi));
819 F2FS_IO_SIZE_KB(sbi));
820 return -EINVAL; 805 return -EINVAL;
821 } 806 }
822 807
@@ -825,15 +810,11 @@ static int parse_options(struct super_block *sb, char *options)
825 810
826 if (!f2fs_sb_has_extra_attr(sbi) || 811 if (!f2fs_sb_has_extra_attr(sbi) ||
827 !f2fs_sb_has_flexible_inline_xattr(sbi)) { 812 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
828 f2fs_msg(sb, KERN_ERR, 813 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
829 "extra_attr or flexible_inline_xattr "
830 "feature is off");
831 return -EINVAL; 814 return -EINVAL;
832 } 815 }
833 if (!test_opt(sbi, INLINE_XATTR)) { 816 if (!test_opt(sbi, INLINE_XATTR)) {
834 f2fs_msg(sb, KERN_ERR, 817 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
835 "inline_xattr_size option should be "
836 "set with inline_xattr option");
837 return -EINVAL; 818 return -EINVAL;
838 } 819 }
839 820
@@ -842,16 +823,14 @@ static int parse_options(struct super_block *sb, char *options)
842 823
843 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || 824 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
844 F2FS_OPTION(sbi).inline_xattr_size > max_size) { 825 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
845 f2fs_msg(sb, KERN_ERR, 826 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
846 "inline xattr size is out of range: %d ~ %d", 827 min_size, max_size);
847 min_size, max_size);
848 return -EINVAL; 828 return -EINVAL;
849 } 829 }
850 } 830 }
851 831
852 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) { 832 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
853 f2fs_msg(sb, KERN_ERR, 833 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
854 "LFS not compatible with checkpoint=disable\n");
855 return -EINVAL; 834 return -EINVAL;
856 } 835 }
857 836
@@ -1313,6 +1292,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1313 seq_puts(seq, ",disable_roll_forward"); 1292 seq_puts(seq, ",disable_roll_forward");
1314 if (test_opt(sbi, DISCARD)) 1293 if (test_opt(sbi, DISCARD))
1315 seq_puts(seq, ",discard"); 1294 seq_puts(seq, ",discard");
1295 else
1296 seq_puts(seq, ",nodiscard");
1316 if (test_opt(sbi, NOHEAP)) 1297 if (test_opt(sbi, NOHEAP))
1317 seq_puts(seq, ",no_heap"); 1298 seq_puts(seq, ",no_heap");
1318 else 1299 else
@@ -1409,8 +1390,8 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
1409 seq_printf(seq, ",alloc_mode=%s", "reuse"); 1390 seq_printf(seq, ",alloc_mode=%s", "reuse");
1410 1391
1411 if (test_opt(sbi, DISABLE_CHECKPOINT)) 1392 if (test_opt(sbi, DISABLE_CHECKPOINT))
1412 seq_puts(seq, ",checkpoint=disable"); 1393 seq_printf(seq, ",checkpoint=disable:%u",
1413 1394 F2FS_OPTION(sbi).unusable_cap);
1414 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX) 1395 if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_POSIX)
1415 seq_printf(seq, ",fsync_mode=%s", "posix"); 1396 seq_printf(seq, ",fsync_mode=%s", "posix");
1416 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT) 1397 else if (F2FS_OPTION(sbi).fsync_mode == FSYNC_MODE_STRICT)
@@ -1439,6 +1420,7 @@ static void default_options(struct f2fs_sb_info *sbi)
1439 set_opt(sbi, EXTENT_CACHE); 1420 set_opt(sbi, EXTENT_CACHE);
1440 set_opt(sbi, NOHEAP); 1421 set_opt(sbi, NOHEAP);
1441 clear_opt(sbi, DISABLE_CHECKPOINT); 1422 clear_opt(sbi, DISABLE_CHECKPOINT);
1423 F2FS_OPTION(sbi).unusable_cap = 0;
1442 sbi->sb->s_flags |= SB_LAZYTIME; 1424 sbi->sb->s_flags |= SB_LAZYTIME;
1443 set_opt(sbi, FLUSH_MERGE); 1425 set_opt(sbi, FLUSH_MERGE);
1444 set_opt(sbi, DISCARD); 1426 set_opt(sbi, DISCARD);
@@ -1467,10 +1449,10 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1467 struct cp_control cpc; 1449 struct cp_control cpc;
1468 int err = 0; 1450 int err = 0;
1469 int ret; 1451 int ret;
1452 block_t unusable;
1470 1453
1471 if (s_flags & SB_RDONLY) { 1454 if (s_flags & SB_RDONLY) {
1472 f2fs_msg(sbi->sb, KERN_ERR, 1455 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1473 "checkpoint=disable on readonly fs");
1474 return -EINVAL; 1456 return -EINVAL;
1475 } 1457 }
1476 sbi->sb->s_flags |= SB_ACTIVE; 1458 sbi->sb->s_flags |= SB_ACTIVE;
@@ -1494,7 +1476,8 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1494 goto restore_flag; 1476 goto restore_flag;
1495 } 1477 }
1496 1478
1497 if (f2fs_disable_cp_again(sbi)) { 1479 unusable = f2fs_get_unusable_blocks(sbi);
1480 if (f2fs_disable_cp_again(sbi, unusable)) {
1498 err = -EAGAIN; 1481 err = -EAGAIN;
1499 goto restore_flag; 1482 goto restore_flag;
1500 } 1483 }
@@ -1507,7 +1490,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1507 goto out_unlock; 1490 goto out_unlock;
1508 1491
1509 spin_lock(&sbi->stat_lock); 1492 spin_lock(&sbi->stat_lock);
1510 sbi->unusable_block_count = 0; 1493 sbi->unusable_block_count = unusable;
1511 spin_unlock(&sbi->stat_lock); 1494 spin_unlock(&sbi->stat_lock);
1512 1495
1513out_unlock: 1496out_unlock:
@@ -1572,8 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1572 /* recover superblocks we couldn't write due to previous RO mount */ 1555 /* recover superblocks we couldn't write due to previous RO mount */
1573 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { 1556 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1574 err = f2fs_commit_super(sbi, false); 1557 err = f2fs_commit_super(sbi, false);
1575 f2fs_msg(sb, KERN_INFO, 1558 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1576 "Try to recover all the superblocks, ret: %d", err); 1559 err);
1577 if (!err) 1560 if (!err)
1578 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); 1561 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1579 } 1562 }
@@ -1614,15 +1597,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1614 /* disallow enable/disable extent_cache dynamically */ 1597 /* disallow enable/disable extent_cache dynamically */
1615 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { 1598 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1616 err = -EINVAL; 1599 err = -EINVAL;
1617 f2fs_msg(sbi->sb, KERN_WARNING, 1600 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1618 "switch extent_cache option is not allowed");
1619 goto restore_opts; 1601 goto restore_opts;
1620 } 1602 }
1621 1603
1622 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { 1604 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1623 err = -EINVAL; 1605 err = -EINVAL;
1624 f2fs_msg(sbi->sb, KERN_WARNING, 1606 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
1625 "disabling checkpoint not compatible with read-only");
1626 goto restore_opts; 1607 goto restore_opts;
1627 } 1608 }
1628 1609
@@ -1692,8 +1673,7 @@ skip:
1692restore_gc: 1673restore_gc:
1693 if (need_restart_gc) { 1674 if (need_restart_gc) {
1694 if (f2fs_start_gc_thread(sbi)) 1675 if (f2fs_start_gc_thread(sbi))
1695 f2fs_msg(sbi->sb, KERN_WARNING, 1676 f2fs_warn(sbi, "background gc thread has stopped");
1696 "background gc thread has stopped");
1697 } else if (need_stop_gc) { 1677 } else if (need_stop_gc) {
1698 f2fs_stop_gc_thread(sbi); 1678 f2fs_stop_gc_thread(sbi);
1699 } 1679 }
@@ -1832,8 +1812,7 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1832static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) 1812static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1833{ 1813{
1834 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { 1814 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
1835 f2fs_msg(sbi->sb, KERN_ERR, 1815 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
1836 "quota sysfile may be corrupted, skip loading it");
1837 return 0; 1816 return 0;
1838 } 1817 }
1839 1818
@@ -1849,8 +1828,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1849 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { 1828 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
1850 err = f2fs_enable_quotas(sbi->sb); 1829 err = f2fs_enable_quotas(sbi->sb);
1851 if (err) { 1830 if (err) {
1852 f2fs_msg(sbi->sb, KERN_ERR, 1831 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
1853 "Cannot turn on quota_ino: %d", err);
1854 return 0; 1832 return 0;
1855 } 1833 }
1856 return 1; 1834 return 1;
@@ -1863,8 +1841,8 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1863 enabled = 1; 1841 enabled = 1;
1864 continue; 1842 continue;
1865 } 1843 }
1866 f2fs_msg(sbi->sb, KERN_ERR, 1844 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
1867 "Cannot turn on quotas: %d on %d", err, i); 1845 err, i);
1868 } 1846 }
1869 } 1847 }
1870 return enabled; 1848 return enabled;
@@ -1885,8 +1863,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1885 1863
1886 qf_inode = f2fs_iget(sb, qf_inum); 1864 qf_inode = f2fs_iget(sb, qf_inum);
1887 if (IS_ERR(qf_inode)) { 1865 if (IS_ERR(qf_inode)) {
1888 f2fs_msg(sb, KERN_ERR, 1866 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
1889 "Bad quota inode %u:%lu", type, qf_inum);
1890 return PTR_ERR(qf_inode); 1867 return PTR_ERR(qf_inode);
1891 } 1868 }
1892 1869
@@ -1899,17 +1876,17 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1899 1876
1900static int f2fs_enable_quotas(struct super_block *sb) 1877static int f2fs_enable_quotas(struct super_block *sb)
1901{ 1878{
1879 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1902 int type, err = 0; 1880 int type, err = 0;
1903 unsigned long qf_inum; 1881 unsigned long qf_inum;
1904 bool quota_mopt[MAXQUOTAS] = { 1882 bool quota_mopt[MAXQUOTAS] = {
1905 test_opt(F2FS_SB(sb), USRQUOTA), 1883 test_opt(sbi, USRQUOTA),
1906 test_opt(F2FS_SB(sb), GRPQUOTA), 1884 test_opt(sbi, GRPQUOTA),
1907 test_opt(F2FS_SB(sb), PRJQUOTA), 1885 test_opt(sbi, PRJQUOTA),
1908 }; 1886 };
1909 1887
1910 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) { 1888 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
1911 f2fs_msg(sb, KERN_ERR, 1889 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
1912 "quota file may be corrupted, skip loading it");
1913 return 0; 1890 return 0;
1914 } 1891 }
1915 1892
@@ -1922,10 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
1922 DQUOT_USAGE_ENABLED | 1899 DQUOT_USAGE_ENABLED |
1923 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 1900 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1924 if (err) { 1901 if (err) {
1925 f2fs_msg(sb, KERN_ERR, 1902 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
1926 "Failed to enable quota tracking " 1903 type, err);
1927 "(type=%d, err=%d). Please run "
1928 "fsck to fix.", type, err);
1929 for (type--; type >= 0; type--) 1904 for (type--; type >= 0; type--)
1930 dquot_quota_off(sb, type); 1905 dquot_quota_off(sb, type);
1931 set_sbi_flag(F2FS_SB(sb), 1906 set_sbi_flag(F2FS_SB(sb),
@@ -1944,6 +1919,18 @@ int f2fs_quota_sync(struct super_block *sb, int type)
1944 int cnt; 1919 int cnt;
1945 int ret; 1920 int ret;
1946 1921
1922 /*
1923 * do_quotactl
1924 * f2fs_quota_sync
1925 * down_read(quota_sem)
1926 * dquot_writeback_dquots()
1927 * f2fs_dquot_commit
1928 * block_operation
1929 * down_read(quota_sem)
1930 */
1931 f2fs_lock_op(sbi);
1932
1933 down_read(&sbi->quota_sem);
1947 ret = dquot_writeback_dquots(sb, type); 1934 ret = dquot_writeback_dquots(sb, type);
1948 if (ret) 1935 if (ret)
1949 goto out; 1936 goto out;
@@ -1981,6 +1968,8 @@ int f2fs_quota_sync(struct super_block *sb, int type)
1981out: 1968out:
1982 if (ret) 1969 if (ret)
1983 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 1970 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
1971 up_read(&sbi->quota_sem);
1972 f2fs_unlock_op(sbi);
1984 return ret; 1973 return ret;
1985} 1974}
1986 1975
@@ -2045,10 +2034,8 @@ void f2fs_quota_off_umount(struct super_block *sb)
2045 if (err) { 2034 if (err) {
2046 int ret = dquot_quota_off(sb, type); 2035 int ret = dquot_quota_off(sb, type);
2047 2036
2048 f2fs_msg(sb, KERN_ERR, 2037 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2049 "Fail to turn off disk quota " 2038 type, err, ret);
2050 "(type: %d, err: %d, ret:%d), Please "
2051 "run fsck to fix it.", type, err, ret);
2052 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 2039 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2053 } 2040 }
2054 } 2041 }
@@ -2074,32 +2061,40 @@ static void f2fs_truncate_quota_inode_pages(struct super_block *sb)
2074 2061
2075static int f2fs_dquot_commit(struct dquot *dquot) 2062static int f2fs_dquot_commit(struct dquot *dquot)
2076{ 2063{
2064 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2077 int ret; 2065 int ret;
2078 2066
2067 down_read(&sbi->quota_sem);
2079 ret = dquot_commit(dquot); 2068 ret = dquot_commit(dquot);
2080 if (ret < 0) 2069 if (ret < 0)
2081 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); 2070 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2071 up_read(&sbi->quota_sem);
2082 return ret; 2072 return ret;
2083} 2073}
2084 2074
2085static int f2fs_dquot_acquire(struct dquot *dquot) 2075static int f2fs_dquot_acquire(struct dquot *dquot)
2086{ 2076{
2077 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2087 int ret; 2078 int ret;
2088 2079
2080 down_read(&sbi->quota_sem);
2089 ret = dquot_acquire(dquot); 2081 ret = dquot_acquire(dquot);
2090 if (ret < 0) 2082 if (ret < 0)
2091 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); 2083 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2092 2084 up_read(&sbi->quota_sem);
2093 return ret; 2085 return ret;
2094} 2086}
2095 2087
2096static int f2fs_dquot_release(struct dquot *dquot) 2088static int f2fs_dquot_release(struct dquot *dquot)
2097{ 2089{
2090 struct f2fs_sb_info *sbi = F2FS_SB(dquot->dq_sb);
2098 int ret; 2091 int ret;
2099 2092
2093 down_read(&sbi->quota_sem);
2100 ret = dquot_release(dquot); 2094 ret = dquot_release(dquot);
2101 if (ret < 0) 2095 if (ret < 0)
2102 set_sbi_flag(F2FS_SB(dquot->dq_sb), SBI_QUOTA_NEED_REPAIR); 2096 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2097 up_read(&sbi->quota_sem);
2103 return ret; 2098 return ret;
2104} 2099}
2105 2100
@@ -2109,22 +2104,27 @@ static int f2fs_dquot_mark_dquot_dirty(struct dquot *dquot)
2109 struct f2fs_sb_info *sbi = F2FS_SB(sb); 2104 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2110 int ret; 2105 int ret;
2111 2106
2107 down_read(&sbi->quota_sem);
2112 ret = dquot_mark_dquot_dirty(dquot); 2108 ret = dquot_mark_dquot_dirty(dquot);
2113 2109
2114 /* if we are using journalled quota */ 2110 /* if we are using journalled quota */
2115 if (is_journalled_quota(sbi)) 2111 if (is_journalled_quota(sbi))
2116 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH); 2112 set_sbi_flag(sbi, SBI_QUOTA_NEED_FLUSH);
2117 2113
2114 up_read(&sbi->quota_sem);
2118 return ret; 2115 return ret;
2119} 2116}
2120 2117
2121static int f2fs_dquot_commit_info(struct super_block *sb, int type) 2118static int f2fs_dquot_commit_info(struct super_block *sb, int type)
2122{ 2119{
2120 struct f2fs_sb_info *sbi = F2FS_SB(sb);
2123 int ret; 2121 int ret;
2124 2122
2123 down_read(&sbi->quota_sem);
2125 ret = dquot_commit_info(sb, type); 2124 ret = dquot_commit_info(sb, type);
2126 if (ret < 0) 2125 if (ret < 0)
2127 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 2126 set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
2127 up_read(&sbi->quota_sem);
2128 return ret; 2128 return ret;
2129} 2129}
2130 2130
@@ -2341,55 +2341,49 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2341 (segment_count << log_blocks_per_seg); 2341 (segment_count << log_blocks_per_seg);
2342 2342
2343 if (segment0_blkaddr != cp_blkaddr) { 2343 if (segment0_blkaddr != cp_blkaddr) {
2344 f2fs_msg(sb, KERN_INFO, 2344 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2345 "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 2345 segment0_blkaddr, cp_blkaddr);
2346 segment0_blkaddr, cp_blkaddr);
2347 return true; 2346 return true;
2348 } 2347 }
2349 2348
2350 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != 2349 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2351 sit_blkaddr) { 2350 sit_blkaddr) {
2352 f2fs_msg(sb, KERN_INFO, 2351 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2353 "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 2352 cp_blkaddr, sit_blkaddr,
2354 cp_blkaddr, sit_blkaddr, 2353 segment_count_ckpt << log_blocks_per_seg);
2355 segment_count_ckpt << log_blocks_per_seg);
2356 return true; 2354 return true;
2357 } 2355 }
2358 2356
2359 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != 2357 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2360 nat_blkaddr) { 2358 nat_blkaddr) {
2361 f2fs_msg(sb, KERN_INFO, 2359 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2362 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 2360 sit_blkaddr, nat_blkaddr,
2363 sit_blkaddr, nat_blkaddr, 2361 segment_count_sit << log_blocks_per_seg);
2364 segment_count_sit << log_blocks_per_seg);
2365 return true; 2362 return true;
2366 } 2363 }
2367 2364
2368 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != 2365 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2369 ssa_blkaddr) { 2366 ssa_blkaddr) {
2370 f2fs_msg(sb, KERN_INFO, 2367 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2371 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 2368 nat_blkaddr, ssa_blkaddr,
2372 nat_blkaddr, ssa_blkaddr, 2369 segment_count_nat << log_blocks_per_seg);
2373 segment_count_nat << log_blocks_per_seg);
2374 return true; 2370 return true;
2375 } 2371 }
2376 2372
2377 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != 2373 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2378 main_blkaddr) { 2374 main_blkaddr) {
2379 f2fs_msg(sb, KERN_INFO, 2375 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2380 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 2376 ssa_blkaddr, main_blkaddr,
2381 ssa_blkaddr, main_blkaddr, 2377 segment_count_ssa << log_blocks_per_seg);
2382 segment_count_ssa << log_blocks_per_seg);
2383 return true; 2378 return true;
2384 } 2379 }
2385 2380
2386 if (main_end_blkaddr > seg_end_blkaddr) { 2381 if (main_end_blkaddr > seg_end_blkaddr) {
2387 f2fs_msg(sb, KERN_INFO, 2382 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2388 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", 2383 main_blkaddr,
2389 main_blkaddr, 2384 segment0_blkaddr +
2390 segment0_blkaddr + 2385 (segment_count << log_blocks_per_seg),
2391 (segment_count << log_blocks_per_seg), 2386 segment_count_main << log_blocks_per_seg);
2392 segment_count_main << log_blocks_per_seg);
2393 return true; 2387 return true;
2394 } else if (main_end_blkaddr < seg_end_blkaddr) { 2388 } else if (main_end_blkaddr < seg_end_blkaddr) {
2395 int err = 0; 2389 int err = 0;
@@ -2406,12 +2400,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2406 err = __f2fs_commit_super(bh, NULL); 2400 err = __f2fs_commit_super(bh, NULL);
2407 res = err ? "failed" : "done"; 2401 res = err ? "failed" : "done";
2408 } 2402 }
2409 f2fs_msg(sb, KERN_INFO, 2403 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)",
2410 "Fix alignment : %s, start(%u) end(%u) block(%u)", 2404 res, main_blkaddr,
2411 res, main_blkaddr, 2405 segment0_blkaddr +
2412 segment0_blkaddr + 2406 (segment_count << log_blocks_per_seg),
2413 (segment_count << log_blocks_per_seg), 2407 segment_count_main << log_blocks_per_seg);
2414 segment_count_main << log_blocks_per_seg);
2415 if (err) 2408 if (err)
2416 return true; 2409 return true;
2417 } 2410 }
@@ -2425,7 +2418,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2425 block_t total_sections, blocks_per_seg; 2418 block_t total_sections, blocks_per_seg;
2426 struct f2fs_super_block *raw_super = (struct f2fs_super_block *) 2419 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2427 (bh->b_data + F2FS_SUPER_OFFSET); 2420 (bh->b_data + F2FS_SUPER_OFFSET);
2428 struct super_block *sb = sbi->sb;
2429 unsigned int blocksize; 2421 unsigned int blocksize;
2430 size_t crc_offset = 0; 2422 size_t crc_offset = 0;
2431 __u32 crc = 0; 2423 __u32 crc = 0;
@@ -2435,48 +2427,42 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2435 crc_offset = le32_to_cpu(raw_super->checksum_offset); 2427 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2436 if (crc_offset != 2428 if (crc_offset !=
2437 offsetof(struct f2fs_super_block, crc)) { 2429 offsetof(struct f2fs_super_block, crc)) {
2438 f2fs_msg(sb, KERN_INFO, 2430 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2439 "Invalid SB checksum offset: %zu", 2431 crc_offset);
2440 crc_offset);
2441 return 1; 2432 return 1;
2442 } 2433 }
2443 crc = le32_to_cpu(raw_super->crc); 2434 crc = le32_to_cpu(raw_super->crc);
2444 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { 2435 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2445 f2fs_msg(sb, KERN_INFO, 2436 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2446 "Invalid SB checksum value: %u", crc);
2447 return 1; 2437 return 1;
2448 } 2438 }
2449 } 2439 }
2450 2440
2451 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 2441 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2452 f2fs_msg(sb, KERN_INFO, 2442 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2453 "Magic Mismatch, valid(0x%x) - read(0x%x)", 2443 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2454 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2455 return 1; 2444 return 1;
2456 } 2445 }
2457 2446
2458 /* Currently, support only 4KB page cache size */ 2447 /* Currently, support only 4KB page cache size */
2459 if (F2FS_BLKSIZE != PAGE_SIZE) { 2448 if (F2FS_BLKSIZE != PAGE_SIZE) {
2460 f2fs_msg(sb, KERN_INFO, 2449 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2461 "Invalid page_cache_size (%lu), supports only 4KB", 2450 PAGE_SIZE);
2462 PAGE_SIZE);
2463 return 1; 2451 return 1;
2464 } 2452 }
2465 2453
2466 /* Currently, support only 4KB block size */ 2454 /* Currently, support only 4KB block size */
2467 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 2455 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2468 if (blocksize != F2FS_BLKSIZE) { 2456 if (blocksize != F2FS_BLKSIZE) {
2469 f2fs_msg(sb, KERN_INFO, 2457 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
2470 "Invalid blocksize (%u), supports only 4KB", 2458 blocksize);
2471 blocksize);
2472 return 1; 2459 return 1;
2473 } 2460 }
2474 2461
2475 /* check log blocks per segment */ 2462 /* check log blocks per segment */
2476 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 2463 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2477 f2fs_msg(sb, KERN_INFO, 2464 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2478 "Invalid log blocks per segment (%u)", 2465 le32_to_cpu(raw_super->log_blocks_per_seg));
2479 le32_to_cpu(raw_super->log_blocks_per_seg));
2480 return 1; 2466 return 1;
2481 } 2467 }
2482 2468
@@ -2485,17 +2471,16 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2485 F2FS_MAX_LOG_SECTOR_SIZE || 2471 F2FS_MAX_LOG_SECTOR_SIZE ||
2486 le32_to_cpu(raw_super->log_sectorsize) < 2472 le32_to_cpu(raw_super->log_sectorsize) <
2487 F2FS_MIN_LOG_SECTOR_SIZE) { 2473 F2FS_MIN_LOG_SECTOR_SIZE) {
2488 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", 2474 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2489 le32_to_cpu(raw_super->log_sectorsize)); 2475 le32_to_cpu(raw_super->log_sectorsize));
2490 return 1; 2476 return 1;
2491 } 2477 }
2492 if (le32_to_cpu(raw_super->log_sectors_per_block) + 2478 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2493 le32_to_cpu(raw_super->log_sectorsize) != 2479 le32_to_cpu(raw_super->log_sectorsize) !=
2494 F2FS_MAX_LOG_SECTOR_SIZE) { 2480 F2FS_MAX_LOG_SECTOR_SIZE) {
2495 f2fs_msg(sb, KERN_INFO, 2481 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2496 "Invalid log sectors per block(%u) log sectorsize(%u)", 2482 le32_to_cpu(raw_super->log_sectors_per_block),
2497 le32_to_cpu(raw_super->log_sectors_per_block), 2483 le32_to_cpu(raw_super->log_sectorsize));
2498 le32_to_cpu(raw_super->log_sectorsize));
2499 return 1; 2484 return 1;
2500 } 2485 }
2501 2486
@@ -2509,59 +2494,51 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2509 2494
2510 if (segment_count > F2FS_MAX_SEGMENT || 2495 if (segment_count > F2FS_MAX_SEGMENT ||
2511 segment_count < F2FS_MIN_SEGMENTS) { 2496 segment_count < F2FS_MIN_SEGMENTS) {
2512 f2fs_msg(sb, KERN_INFO, 2497 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2513 "Invalid segment count (%u)",
2514 segment_count);
2515 return 1; 2498 return 1;
2516 } 2499 }
2517 2500
2518 if (total_sections > segment_count || 2501 if (total_sections > segment_count ||
2519 total_sections < F2FS_MIN_SEGMENTS || 2502 total_sections < F2FS_MIN_SEGMENTS ||
2520 segs_per_sec > segment_count || !segs_per_sec) { 2503 segs_per_sec > segment_count || !segs_per_sec) {
2521 f2fs_msg(sb, KERN_INFO, 2504 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2522 "Invalid segment/section count (%u, %u x %u)", 2505 segment_count, total_sections, segs_per_sec);
2523 segment_count, total_sections, segs_per_sec);
2524 return 1; 2506 return 1;
2525 } 2507 }
2526 2508
2527 if ((segment_count / segs_per_sec) < total_sections) { 2509 if ((segment_count / segs_per_sec) < total_sections) {
2528 f2fs_msg(sb, KERN_INFO, 2510 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2529 "Small segment_count (%u < %u * %u)", 2511 segment_count, segs_per_sec, total_sections);
2530 segment_count, segs_per_sec, total_sections);
2531 return 1; 2512 return 1;
2532 } 2513 }
2533 2514
2534 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { 2515 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2535 f2fs_msg(sb, KERN_INFO, 2516 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2536 "Wrong segment_count / block_count (%u > %llu)", 2517 segment_count, le64_to_cpu(raw_super->block_count));
2537 segment_count, le64_to_cpu(raw_super->block_count));
2538 return 1; 2518 return 1;
2539 } 2519 }
2540 2520
2541 if (secs_per_zone > total_sections || !secs_per_zone) { 2521 if (secs_per_zone > total_sections || !secs_per_zone) {
2542 f2fs_msg(sb, KERN_INFO, 2522 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2543 "Wrong secs_per_zone / total_sections (%u, %u)", 2523 secs_per_zone, total_sections);
2544 secs_per_zone, total_sections);
2545 return 1; 2524 return 1;
2546 } 2525 }
2547 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || 2526 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2548 raw_super->hot_ext_count > F2FS_MAX_EXTENSION || 2527 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2549 (le32_to_cpu(raw_super->extension_count) + 2528 (le32_to_cpu(raw_super->extension_count) +
2550 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) { 2529 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2551 f2fs_msg(sb, KERN_INFO, 2530 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2552 "Corrupted extension count (%u + %u > %u)", 2531 le32_to_cpu(raw_super->extension_count),
2553 le32_to_cpu(raw_super->extension_count), 2532 raw_super->hot_ext_count,
2554 raw_super->hot_ext_count, 2533 F2FS_MAX_EXTENSION);
2555 F2FS_MAX_EXTENSION);
2556 return 1; 2534 return 1;
2557 } 2535 }
2558 2536
2559 if (le32_to_cpu(raw_super->cp_payload) > 2537 if (le32_to_cpu(raw_super->cp_payload) >
2560 (blocks_per_seg - F2FS_CP_PACKS)) { 2538 (blocks_per_seg - F2FS_CP_PACKS)) {
2561 f2fs_msg(sb, KERN_INFO, 2539 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
2562 "Insane cp_payload (%u > %u)", 2540 le32_to_cpu(raw_super->cp_payload),
2563 le32_to_cpu(raw_super->cp_payload), 2541 blocks_per_seg - F2FS_CP_PACKS);
2564 blocks_per_seg - F2FS_CP_PACKS);
2565 return 1; 2542 return 1;
2566 } 2543 }
2567 2544
@@ -2569,11 +2546,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2569 if (le32_to_cpu(raw_super->node_ino) != 1 || 2546 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2570 le32_to_cpu(raw_super->meta_ino) != 2 || 2547 le32_to_cpu(raw_super->meta_ino) != 2 ||
2571 le32_to_cpu(raw_super->root_ino) != 3) { 2548 le32_to_cpu(raw_super->root_ino) != 3) {
2572 f2fs_msg(sb, KERN_INFO, 2549 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2573 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 2550 le32_to_cpu(raw_super->node_ino),
2574 le32_to_cpu(raw_super->node_ino), 2551 le32_to_cpu(raw_super->meta_ino),
2575 le32_to_cpu(raw_super->meta_ino), 2552 le32_to_cpu(raw_super->root_ino));
2576 le32_to_cpu(raw_super->root_ino));
2577 return 1; 2553 return 1;
2578 } 2554 }
2579 2555
@@ -2617,8 +2593,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2617 2593
2618 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || 2594 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2619 ovp_segments == 0 || reserved_segments == 0)) { 2595 ovp_segments == 0 || reserved_segments == 0)) {
2620 f2fs_msg(sbi->sb, KERN_ERR, 2596 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
2621 "Wrong layout: check mkfs.f2fs version");
2622 return 1; 2597 return 1;
2623 } 2598 }
2624 2599
@@ -2627,16 +2602,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2627 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 2602 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2628 if (!user_block_count || user_block_count >= 2603 if (!user_block_count || user_block_count >=
2629 segment_count_main << log_blocks_per_seg) { 2604 segment_count_main << log_blocks_per_seg) {
2630 f2fs_msg(sbi->sb, KERN_ERR, 2605 f2fs_err(sbi, "Wrong user_block_count: %u",
2631 "Wrong user_block_count: %u", user_block_count); 2606 user_block_count);
2632 return 1; 2607 return 1;
2633 } 2608 }
2634 2609
2635 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count); 2610 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
2636 if (valid_user_blocks > user_block_count) { 2611 if (valid_user_blocks > user_block_count) {
2637 f2fs_msg(sbi->sb, KERN_ERR, 2612 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
2638 "Wrong valid_user_blocks: %u, user_block_count: %u", 2613 valid_user_blocks, user_block_count);
2639 valid_user_blocks, user_block_count);
2640 return 1; 2614 return 1;
2641 } 2615 }
2642 2616
@@ -2644,9 +2618,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2644 avail_node_count = sbi->total_node_count - sbi->nquota_files - 2618 avail_node_count = sbi->total_node_count - sbi->nquota_files -
2645 F2FS_RESERVED_NODE_NUM; 2619 F2FS_RESERVED_NODE_NUM;
2646 if (valid_node_count > avail_node_count) { 2620 if (valid_node_count > avail_node_count) {
2647 f2fs_msg(sbi->sb, KERN_ERR, 2621 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
2648 "Wrong valid_node_count: %u, avail_node_count: %u", 2622 valid_node_count, avail_node_count);
2649 valid_node_count, avail_node_count);
2650 return 1; 2623 return 1;
2651 } 2624 }
2652 2625
@@ -2660,10 +2633,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2660 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) { 2633 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2661 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2634 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2662 le32_to_cpu(ckpt->cur_node_segno[j])) { 2635 le32_to_cpu(ckpt->cur_node_segno[j])) {
2663 f2fs_msg(sbi->sb, KERN_ERR, 2636 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
2664 "Node segment (%u, %u) has the same " 2637 i, j,
2665 "segno: %u", i, j, 2638 le32_to_cpu(ckpt->cur_node_segno[i]));
2666 le32_to_cpu(ckpt->cur_node_segno[i]));
2667 return 1; 2639 return 1;
2668 } 2640 }
2669 } 2641 }
@@ -2675,10 +2647,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2675 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) { 2647 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2676 if (le32_to_cpu(ckpt->cur_data_segno[i]) == 2648 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2677 le32_to_cpu(ckpt->cur_data_segno[j])) { 2649 le32_to_cpu(ckpt->cur_data_segno[j])) {
2678 f2fs_msg(sbi->sb, KERN_ERR, 2650 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
2679 "Data segment (%u, %u) has the same " 2651 i, j,
2680 "segno: %u", i, j, 2652 le32_to_cpu(ckpt->cur_data_segno[i]));
2681 le32_to_cpu(ckpt->cur_data_segno[i]));
2682 return 1; 2653 return 1;
2683 } 2654 }
2684 } 2655 }
@@ -2687,10 +2658,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2687 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { 2658 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
2688 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2659 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2689 le32_to_cpu(ckpt->cur_data_segno[j])) { 2660 le32_to_cpu(ckpt->cur_data_segno[j])) {
2690 f2fs_msg(sbi->sb, KERN_ERR, 2661 f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u",
2691 "Data segment (%u) and Data segment (%u)" 2662 i, j,
2692 " has the same segno: %u", i, j, 2663 le32_to_cpu(ckpt->cur_node_segno[i]));
2693 le32_to_cpu(ckpt->cur_node_segno[i]));
2694 return 1; 2664 return 1;
2695 } 2665 }
2696 } 2666 }
@@ -2701,9 +2671,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2701 2671
2702 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || 2672 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2703 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { 2673 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2704 f2fs_msg(sbi->sb, KERN_ERR, 2674 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
2705 "Wrong bitmap size: sit: %u, nat:%u", 2675 sit_bitmap_size, nat_bitmap_size);
2706 sit_bitmap_size, nat_bitmap_size);
2707 return 1; 2676 return 1;
2708 } 2677 }
2709 2678
@@ -2712,14 +2681,22 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2712 if (cp_pack_start_sum < cp_payload + 1 || 2681 if (cp_pack_start_sum < cp_payload + 1 ||
2713 cp_pack_start_sum > blocks_per_seg - 1 - 2682 cp_pack_start_sum > blocks_per_seg - 1 -
2714 NR_CURSEG_TYPE) { 2683 NR_CURSEG_TYPE) {
2715 f2fs_msg(sbi->sb, KERN_ERR, 2684 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
2716 "Wrong cp_pack_start_sum: %u", 2685 cp_pack_start_sum);
2717 cp_pack_start_sum); 2686 return 1;
2687 }
2688
2689 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
2690 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2691 f2fs_warn(sbi, "using deprecated layout of large_nat_bitmap, "
2692 "please run fsck v1.13.0 or higher to repair, chksum_offset: %u, "
2693 "fixed with patch: \"f2fs-tools: relocate chksum_offset for large_nat_bitmap feature\"",
2694 le32_to_cpu(ckpt->checksum_offset));
2718 return 1; 2695 return 1;
2719 } 2696 }
2720 2697
2721 if (unlikely(f2fs_cp_error(sbi))) { 2698 if (unlikely(f2fs_cp_error(sbi))) {
2722 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); 2699 f2fs_err(sbi, "A bug case: need to run fsck");
2723 return 1; 2700 return 1;
2724 } 2701 }
2725 return 0; 2702 return 0;
@@ -2888,18 +2865,17 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
2888 for (block = 0; block < 2; block++) { 2865 for (block = 0; block < 2; block++) {
2889 bh = sb_bread(sb, block); 2866 bh = sb_bread(sb, block);
2890 if (!bh) { 2867 if (!bh) {
2891 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", 2868 f2fs_err(sbi, "Unable to read %dth superblock",
2892 block + 1); 2869 block + 1);
2893 err = -EIO; 2870 err = -EIO;
2894 continue; 2871 continue;
2895 } 2872 }
2896 2873
2897 /* sanity checking of raw super */ 2874 /* sanity checking of raw super */
2898 if (sanity_check_raw_super(sbi, bh)) { 2875 if (sanity_check_raw_super(sbi, bh)) {
2899 f2fs_msg(sb, KERN_ERR, 2876 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
2900 "Can't find valid F2FS filesystem in %dth superblock", 2877 block + 1);
2901 block + 1); 2878 err = -EFSCORRUPTED;
2902 err = -EINVAL;
2903 brelse(bh); 2879 brelse(bh);
2904 continue; 2880 continue;
2905 } 2881 }
@@ -3028,36 +3004,32 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3028#ifdef CONFIG_BLK_DEV_ZONED 3004#ifdef CONFIG_BLK_DEV_ZONED
3029 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && 3005 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3030 !f2fs_sb_has_blkzoned(sbi)) { 3006 !f2fs_sb_has_blkzoned(sbi)) {
3031 f2fs_msg(sbi->sb, KERN_ERR, 3007 f2fs_err(sbi, "Zoned block device feature not enabled\n");
3032 "Zoned block device feature not enabled\n");
3033 return -EINVAL; 3008 return -EINVAL;
3034 } 3009 }
3035 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) { 3010 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3036 if (init_blkz_info(sbi, i)) { 3011 if (init_blkz_info(sbi, i)) {
3037 f2fs_msg(sbi->sb, KERN_ERR, 3012 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3038 "Failed to initialize F2FS blkzone information");
3039 return -EINVAL; 3013 return -EINVAL;
3040 } 3014 }
3041 if (max_devices == 1) 3015 if (max_devices == 1)
3042 break; 3016 break;
3043 f2fs_msg(sbi->sb, KERN_INFO, 3017 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3044 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", 3018 i, FDEV(i).path,
3045 i, FDEV(i).path, 3019 FDEV(i).total_segments,
3046 FDEV(i).total_segments, 3020 FDEV(i).start_blk, FDEV(i).end_blk,
3047 FDEV(i).start_blk, FDEV(i).end_blk, 3021 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3048 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? 3022 "Host-aware" : "Host-managed");
3049 "Host-aware" : "Host-managed");
3050 continue; 3023 continue;
3051 } 3024 }
3052#endif 3025#endif
3053 f2fs_msg(sbi->sb, KERN_INFO, 3026 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3054 "Mount Device [%2d]: %20s, %8u, %8x - %8x", 3027 i, FDEV(i).path,
3055 i, FDEV(i).path, 3028 FDEV(i).total_segments,
3056 FDEV(i).total_segments, 3029 FDEV(i).start_blk, FDEV(i).end_blk);
3057 FDEV(i).start_blk, FDEV(i).end_blk); 3030 }
3058 } 3031 f2fs_info(sbi,
3059 f2fs_msg(sbi->sb, KERN_INFO, 3032 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3060 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3061 return 0; 3033 return 0;
3062} 3034}
3063 3035
@@ -3103,7 +3075,7 @@ try_onemore:
3103 /* Load the checksum driver */ 3075 /* Load the checksum driver */
3104 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); 3076 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3105 if (IS_ERR(sbi->s_chksum_driver)) { 3077 if (IS_ERR(sbi->s_chksum_driver)) {
3106 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); 3078 f2fs_err(sbi, "Cannot load crc32 driver.");
3107 err = PTR_ERR(sbi->s_chksum_driver); 3079 err = PTR_ERR(sbi->s_chksum_driver);
3108 sbi->s_chksum_driver = NULL; 3080 sbi->s_chksum_driver = NULL;
3109 goto free_sbi; 3081 goto free_sbi;
@@ -3111,7 +3083,7 @@ try_onemore:
3111 3083
3112 /* set a block size */ 3084 /* set a block size */
3113 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { 3085 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3114 f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 3086 f2fs_err(sbi, "unable to set blocksize");
3115 goto free_sbi; 3087 goto free_sbi;
3116 } 3088 }
3117 3089
@@ -3135,8 +3107,7 @@ try_onemore:
3135 */ 3107 */
3136#ifndef CONFIG_BLK_DEV_ZONED 3108#ifndef CONFIG_BLK_DEV_ZONED
3137 if (f2fs_sb_has_blkzoned(sbi)) { 3109 if (f2fs_sb_has_blkzoned(sbi)) {
3138 f2fs_msg(sb, KERN_ERR, 3110 f2fs_err(sbi, "Zoned block device support is not enabled");
3139 "Zoned block device support is not enabled");
3140 err = -EOPNOTSUPP; 3111 err = -EOPNOTSUPP;
3141 goto free_sb_buf; 3112 goto free_sb_buf;
3142 } 3113 }
@@ -3160,10 +3131,7 @@ try_onemore:
3160 3131
3161#ifdef CONFIG_QUOTA 3132#ifdef CONFIG_QUOTA
3162 sb->dq_op = &f2fs_quota_operations; 3133 sb->dq_op = &f2fs_quota_operations;
3163 if (f2fs_sb_has_quota_ino(sbi)) 3134 sb->s_qcop = &f2fs_quotactl_ops;
3164 sb->s_qcop = &dquot_quotactl_sysfile_ops;
3165 else
3166 sb->s_qcop = &f2fs_quotactl_ops;
3167 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ; 3135 sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP | QTYPE_MASK_PRJ;
3168 3136
3169 if (f2fs_sb_has_quota_ino(sbi)) { 3137 if (f2fs_sb_has_quota_ino(sbi)) {
@@ -3192,6 +3160,7 @@ try_onemore:
3192 mutex_init(&sbi->gc_mutex); 3160 mutex_init(&sbi->gc_mutex);
3193 mutex_init(&sbi->writepages); 3161 mutex_init(&sbi->writepages);
3194 mutex_init(&sbi->cp_mutex); 3162 mutex_init(&sbi->cp_mutex);
3163 mutex_init(&sbi->resize_mutex);
3195 init_rwsem(&sbi->node_write); 3164 init_rwsem(&sbi->node_write);
3196 init_rwsem(&sbi->node_change); 3165 init_rwsem(&sbi->node_change);
3197 3166
@@ -3227,6 +3196,7 @@ try_onemore:
3227 } 3196 }
3228 3197
3229 init_rwsem(&sbi->cp_rwsem); 3198 init_rwsem(&sbi->cp_rwsem);
3199 init_rwsem(&sbi->quota_sem);
3230 init_waitqueue_head(&sbi->cp_wait); 3200 init_waitqueue_head(&sbi->cp_wait);
3231 init_sb_info(sbi); 3201 init_sb_info(sbi);
3232 3202
@@ -3246,14 +3216,14 @@ try_onemore:
3246 /* get an inode for meta space */ 3216 /* get an inode for meta space */
3247 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 3217 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3248 if (IS_ERR(sbi->meta_inode)) { 3218 if (IS_ERR(sbi->meta_inode)) {
3249 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 3219 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3250 err = PTR_ERR(sbi->meta_inode); 3220 err = PTR_ERR(sbi->meta_inode);
3251 goto free_io_dummy; 3221 goto free_io_dummy;
3252 } 3222 }
3253 3223
3254 err = f2fs_get_valid_checkpoint(sbi); 3224 err = f2fs_get_valid_checkpoint(sbi);
3255 if (err) { 3225 if (err) {
3256 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 3226 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3257 goto free_meta_inode; 3227 goto free_meta_inode;
3258 } 3228 }
3259 3229
@@ -3264,10 +3234,13 @@ try_onemore:
3264 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL; 3234 sbi->interval_time[DISABLE_TIME] = DEF_DISABLE_QUICK_INTERVAL;
3265 } 3235 }
3266 3236
3237 if (__is_set_ckpt_flags(F2FS_CKPT(sbi), CP_FSCK_FLAG))
3238 set_sbi_flag(sbi, SBI_NEED_FSCK);
3239
3267 /* Initialize device list */ 3240 /* Initialize device list */
3268 err = f2fs_scan_devices(sbi); 3241 err = f2fs_scan_devices(sbi);
3269 if (err) { 3242 if (err) {
3270 f2fs_msg(sb, KERN_ERR, "Failed to find devices"); 3243 f2fs_err(sbi, "Failed to find devices");
3271 goto free_devices; 3244 goto free_devices;
3272 } 3245 }
3273 3246
@@ -3287,6 +3260,7 @@ try_onemore:
3287 INIT_LIST_HEAD(&sbi->inode_list[i]); 3260 INIT_LIST_HEAD(&sbi->inode_list[i]);
3288 spin_lock_init(&sbi->inode_lock[i]); 3261 spin_lock_init(&sbi->inode_lock[i]);
3289 } 3262 }
3263 mutex_init(&sbi->flush_lock);
3290 3264
3291 f2fs_init_extent_cache_info(sbi); 3265 f2fs_init_extent_cache_info(sbi);
3292 3266
@@ -3297,14 +3271,14 @@ try_onemore:
3297 /* setup f2fs internal modules */ 3271 /* setup f2fs internal modules */
3298 err = f2fs_build_segment_manager(sbi); 3272 err = f2fs_build_segment_manager(sbi);
3299 if (err) { 3273 if (err) {
3300 f2fs_msg(sb, KERN_ERR, 3274 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3301 "Failed to initialize F2FS segment manager"); 3275 err);
3302 goto free_sm; 3276 goto free_sm;
3303 } 3277 }
3304 err = f2fs_build_node_manager(sbi); 3278 err = f2fs_build_node_manager(sbi);
3305 if (err) { 3279 if (err) {
3306 f2fs_msg(sb, KERN_ERR, 3280 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3307 "Failed to initialize F2FS node manager"); 3281 err);
3308 goto free_nm; 3282 goto free_nm;
3309 } 3283 }
3310 3284
@@ -3329,7 +3303,7 @@ try_onemore:
3329 /* get an inode for node space */ 3303 /* get an inode for node space */
3330 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 3304 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3331 if (IS_ERR(sbi->node_inode)) { 3305 if (IS_ERR(sbi->node_inode)) {
3332 f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 3306 f2fs_err(sbi, "Failed to read node inode");
3333 err = PTR_ERR(sbi->node_inode); 3307 err = PTR_ERR(sbi->node_inode);
3334 goto free_stats; 3308 goto free_stats;
3335 } 3309 }
@@ -3337,7 +3311,7 @@ try_onemore:
3337 /* read root inode and dentry */ 3311 /* read root inode and dentry */
3338 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 3312 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3339 if (IS_ERR(root)) { 3313 if (IS_ERR(root)) {
3340 f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 3314 f2fs_err(sbi, "Failed to read root inode");
3341 err = PTR_ERR(root); 3315 err = PTR_ERR(root);
3342 goto free_node_inode; 3316 goto free_node_inode;
3343 } 3317 }
@@ -3363,8 +3337,7 @@ try_onemore:
3363 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { 3337 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3364 err = f2fs_enable_quotas(sb); 3338 err = f2fs_enable_quotas(sb);
3365 if (err) 3339 if (err)
3366 f2fs_msg(sb, KERN_ERR, 3340 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
3367 "Cannot turn on quotas: error %d", err);
3368 } 3341 }
3369#endif 3342#endif
3370 /* if there are nt orphan nodes free them */ 3343 /* if there are nt orphan nodes free them */
@@ -3384,13 +3357,10 @@ try_onemore:
3384 if (f2fs_hw_is_readonly(sbi)) { 3357 if (f2fs_hw_is_readonly(sbi)) {
3385 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 3358 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3386 err = -EROFS; 3359 err = -EROFS;
3387 f2fs_msg(sb, KERN_ERR, 3360 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3388 "Need to recover fsync data, but "
3389 "write access unavailable");
3390 goto free_meta; 3361 goto free_meta;
3391 } 3362 }
3392 f2fs_msg(sbi->sb, KERN_INFO, "write access " 3363 f2fs_info(sbi, "write access unavailable, skipping recovery");
3393 "unavailable, skipping recovery");
3394 goto reset_checkpoint; 3364 goto reset_checkpoint;
3395 } 3365 }
3396 3366
@@ -3405,8 +3375,8 @@ try_onemore:
3405 if (err != -ENOMEM) 3375 if (err != -ENOMEM)
3406 skip_recovery = true; 3376 skip_recovery = true;
3407 need_fsck = true; 3377 need_fsck = true;
3408 f2fs_msg(sb, KERN_ERR, 3378 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3409 "Cannot recover all fsync data errno=%d", err); 3379 err);
3410 goto free_meta; 3380 goto free_meta;
3411 } 3381 }
3412 } else { 3382 } else {
@@ -3414,8 +3384,7 @@ try_onemore:
3414 3384
3415 if (!f2fs_readonly(sb) && err > 0) { 3385 if (!f2fs_readonly(sb) && err > 0) {
3416 err = -EINVAL; 3386 err = -EINVAL;
3417 f2fs_msg(sb, KERN_ERR, 3387 f2fs_err(sbi, "Need to recover fsync data");
3418 "Need to recover fsync data");
3419 goto free_meta; 3388 goto free_meta;
3420 } 3389 }
3421 } 3390 }
@@ -3446,17 +3415,16 @@ reset_checkpoint:
3446 /* recover broken superblock */ 3415 /* recover broken superblock */
3447 if (recovery) { 3416 if (recovery) {
3448 err = f2fs_commit_super(sbi, true); 3417 err = f2fs_commit_super(sbi, true);
3449 f2fs_msg(sb, KERN_INFO, 3418 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3450 "Try to recover %dth superblock, ret: %d", 3419 sbi->valid_super_block ? 1 : 2, err);
3451 sbi->valid_super_block ? 1 : 2, err);
3452 } 3420 }
3453 3421
3454 f2fs_join_shrinker(sbi); 3422 f2fs_join_shrinker(sbi);
3455 3423
3456 f2fs_tuning_parameters(sbi); 3424 f2fs_tuning_parameters(sbi);
3457 3425
3458 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", 3426 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3459 cur_cp_version(F2FS_CKPT(sbi))); 3427 cur_cp_version(F2FS_CKPT(sbi)));
3460 f2fs_update_time(sbi, CP_TIME); 3428 f2fs_update_time(sbi, CP_TIME);
3461 f2fs_update_time(sbi, REQ_TIME); 3429 f2fs_update_time(sbi, REQ_TIME);
3462 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 3430 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index 5c85166677d4..3aeacd0aacfd 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -68,6 +68,20 @@ static ssize_t dirty_segments_show(struct f2fs_attr *a,
68 (unsigned long long)(dirty_segments(sbi))); 68 (unsigned long long)(dirty_segments(sbi)));
69} 69}
70 70
71static ssize_t unusable_show(struct f2fs_attr *a,
72 struct f2fs_sb_info *sbi, char *buf)
73{
74 block_t unusable;
75
76 if (test_opt(sbi, DISABLE_CHECKPOINT))
77 unusable = sbi->unusable_block_count;
78 else
79 unusable = f2fs_get_unusable_blocks(sbi);
80 return snprintf(buf, PAGE_SIZE, "%llu\n",
81 (unsigned long long)unusable);
82}
83
84
71static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a, 85static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
72 struct f2fs_sb_info *sbi, char *buf) 86 struct f2fs_sb_info *sbi, char *buf)
73{ 87{
@@ -440,6 +454,7 @@ F2FS_GENERAL_RO_ATTR(dirty_segments);
440F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes); 454F2FS_GENERAL_RO_ATTR(lifetime_write_kbytes);
441F2FS_GENERAL_RO_ATTR(features); 455F2FS_GENERAL_RO_ATTR(features);
442F2FS_GENERAL_RO_ATTR(current_reserved_blocks); 456F2FS_GENERAL_RO_ATTR(current_reserved_blocks);
457F2FS_GENERAL_RO_ATTR(unusable);
443 458
444#ifdef CONFIG_FS_ENCRYPTION 459#ifdef CONFIG_FS_ENCRYPTION
445F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO); 460F2FS_FEATURE_RO_ATTR(encryption, FEAT_CRYPTO);
@@ -495,6 +510,7 @@ static struct attribute *f2fs_attrs[] = {
495 ATTR_LIST(inject_type), 510 ATTR_LIST(inject_type),
496#endif 511#endif
497 ATTR_LIST(dirty_segments), 512 ATTR_LIST(dirty_segments),
513 ATTR_LIST(unusable),
498 ATTR_LIST(lifetime_write_kbytes), 514 ATTR_LIST(lifetime_write_kbytes),
499 ATTR_LIST(features), 515 ATTR_LIST(features),
500 ATTR_LIST(reserved_blocks), 516 ATTR_LIST(reserved_blocks),
@@ -568,8 +584,7 @@ static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
568 584
569 if ((i % 10) == 0) 585 if ((i % 10) == 0)
570 seq_printf(seq, "%-10d", i); 586 seq_printf(seq, "%-10d", i);
571 seq_printf(seq, "%d|%-3u", se->type, 587 seq_printf(seq, "%d|%-3u", se->type, se->valid_blocks);
572 get_valid_blocks(sbi, i, false));
573 if ((i % 10) == 9 || i == (total_segs - 1)) 588 if ((i % 10) == 9 || i == (total_segs - 1))
574 seq_putc(seq, '\n'); 589 seq_putc(seq, '\n');
575 else 590 else
@@ -595,8 +610,7 @@ static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
595 struct seg_entry *se = get_seg_entry(sbi, i); 610 struct seg_entry *se = get_seg_entry(sbi, i);
596 611
597 seq_printf(seq, "%-10d", i); 612 seq_printf(seq, "%-10d", i);
598 seq_printf(seq, "%d|%-3u|", se->type, 613 seq_printf(seq, "%d|%-3u|", se->type, se->valid_blocks);
599 get_valid_blocks(sbi, i, false));
600 for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++) 614 for (j = 0; j < SIT_VBLOCK_MAP_SIZE; j++)
601 seq_printf(seq, " %.2x", se->cur_valid_map[j]); 615 seq_printf(seq, " %.2x", se->cur_valid_map[j]);
602 seq_putc(seq, '\n'); 616 seq_putc(seq, '\n');
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index e791741d193b..b32c45621679 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -346,7 +346,10 @@ static int lookup_all_xattrs(struct inode *inode, struct page *ipage,
346 346
347 *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name); 347 *xe = __find_xattr(cur_addr, last_txattr_addr, index, len, name);
348 if (!*xe) { 348 if (!*xe) {
349 err = -EFAULT; 349 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
350 inode->i_ino);
351 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
352 err = -EFSCORRUPTED;
350 goto out; 353 goto out;
351 } 354 }
352check: 355check:
@@ -622,7 +625,10 @@ static int __f2fs_setxattr(struct inode *inode, int index,
622 /* find entry with wanted name. */ 625 /* find entry with wanted name. */
623 here = __find_xattr(base_addr, last_base_addr, index, len, name); 626 here = __find_xattr(base_addr, last_base_addr, index, len, name);
624 if (!here) { 627 if (!here) {
625 error = -EFAULT; 628 f2fs_err(F2FS_I_SB(inode), "inode (%lu) has corrupted xattr",
629 inode->i_ino);
630 set_sbi_flag(F2FS_I_SB(inode), SBI_NEED_FSCK);
631 error = -EFSCORRUPTED;
626 goto exit; 632 goto exit;
627 } 633 }
628 634
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e3dc031af7f5..1796ff99c3e9 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -1019,8 +1019,8 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
1019 ), 1019 ),
1020 1020
1021 TP_fast_assign( 1021 TP_fast_assign(
1022 __entry->dev = page->mapping->host->i_sb->s_dev; 1022 __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
1023 __entry->ino = page->mapping->host->i_ino; 1023 __entry->ino = page_file_mapping(page)->host->i_ino;
1024 __entry->index = page->index; 1024 __entry->index = page->index;
1025 __entry->old_blkaddr = fio->old_blkaddr; 1025 __entry->old_blkaddr = fio->old_blkaddr;
1026 __entry->new_blkaddr = fio->new_blkaddr; 1026 __entry->new_blkaddr = fio->new_blkaddr;
@@ -1207,10 +1207,11 @@ DECLARE_EVENT_CLASS(f2fs__page,
1207 ), 1207 ),
1208 1208
1209 TP_fast_assign( 1209 TP_fast_assign(
1210 __entry->dev = page->mapping->host->i_sb->s_dev; 1210 __entry->dev = page_file_mapping(page)->host->i_sb->s_dev;
1211 __entry->ino = page->mapping->host->i_ino; 1211 __entry->ino = page_file_mapping(page)->host->i_ino;
1212 __entry->type = type; 1212 __entry->type = type;
1213 __entry->dir = S_ISDIR(page->mapping->host->i_mode); 1213 __entry->dir =
1214 S_ISDIR(page_file_mapping(page)->host->i_mode);
1214 __entry->index = page->index; 1215 __entry->index = page->index;
1215 __entry->dirty = PageDirty(page); 1216 __entry->dirty = PageDirty(page);
1216 __entry->uptodate = PageUptodate(page); 1217 __entry->uptodate = PageUptodate(page);