aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/super.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2019-06-18 05:48:42 -0400
committerJaegeuk Kim <jaegeuk@kernel.org>2019-07-02 18:40:40 -0400
commitdcbb4c10e6d9693cc9d6fa493b4d130b66a60c7d (patch)
treeb725d2166c9325de23807cc204abc7c153813aa4 /fs/f2fs/super.c
parent8740edc3e5cca78e20ac3ba0127f61ab7cdb4d2f (diff)
f2fs: introduce f2fs_<level> macros to wrap f2fs_printk()
- Add and use f2fs_<level> macros - Convert f2fs_msg to f2fs_printk - Remove level from f2fs_printk and embed the level in the format - Coalesce formats and align multi-line arguments - Remove unnecessary duplicate extern f2fs_msg f2fs.h Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Chao Yu <yuchao0@huawei.com> Reviewed-by: Chao Yu <yuchao0@huawei.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs/super.c')
-rw-r--r--fs/f2fs/super.c493
1 files changed, 200 insertions, 293 deletions
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 049a5957532e..2a40944d0810 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -205,15 +205,20 @@ static match_table_t f2fs_tokens = {
205 {Opt_err, NULL}, 205 {Opt_err, NULL},
206}; 206};
207 207
208void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...) 208void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...)
209{ 209{
210 struct va_format vaf; 210 struct va_format vaf;
211 va_list args; 211 va_list args;
212 int level;
212 213
213 va_start(args, fmt); 214 va_start(args, fmt);
214 vaf.fmt = fmt; 215
216 level = printk_get_level(fmt);
217 vaf.fmt = printk_skip_level(fmt);
215 vaf.va = &args; 218 vaf.va = &args;
216 printk("%sF2FS-fs (%s): %pV\n", level, sb->s_id, &vaf); 219 printk("%c%cF2FS-fs (%s): %pV\n",
220 KERN_SOH_ASCII, level, sbi->sb->s_id, &vaf);
221
217 va_end(args); 222 va_end(args);
218} 223}
219 224
@@ -226,21 +231,19 @@ static inline void limit_reserve_root(struct f2fs_sb_info *sbi)
226 if (test_opt(sbi, RESERVE_ROOT) && 231 if (test_opt(sbi, RESERVE_ROOT) &&
227 F2FS_OPTION(sbi).root_reserved_blocks > limit) { 232 F2FS_OPTION(sbi).root_reserved_blocks > limit) {
228 F2FS_OPTION(sbi).root_reserved_blocks = limit; 233 F2FS_OPTION(sbi).root_reserved_blocks = limit;
229 f2fs_msg(sbi->sb, KERN_INFO, 234 f2fs_info(sbi, "Reduce reserved blocks for root = %u",
230 "Reduce reserved blocks for root = %u", 235 F2FS_OPTION(sbi).root_reserved_blocks);
231 F2FS_OPTION(sbi).root_reserved_blocks);
232 } 236 }
233 if (!test_opt(sbi, RESERVE_ROOT) && 237 if (!test_opt(sbi, RESERVE_ROOT) &&
234 (!uid_eq(F2FS_OPTION(sbi).s_resuid, 238 (!uid_eq(F2FS_OPTION(sbi).s_resuid,
235 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) || 239 make_kuid(&init_user_ns, F2FS_DEF_RESUID)) ||
236 !gid_eq(F2FS_OPTION(sbi).s_resgid, 240 !gid_eq(F2FS_OPTION(sbi).s_resgid,
237 make_kgid(&init_user_ns, F2FS_DEF_RESGID)))) 241 make_kgid(&init_user_ns, F2FS_DEF_RESGID))))
238 f2fs_msg(sbi->sb, KERN_INFO, 242 f2fs_info(sbi, "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root",
239 "Ignore s_resuid=%u, s_resgid=%u w/o reserve_root", 243 from_kuid_munged(&init_user_ns,
240 from_kuid_munged(&init_user_ns, 244 F2FS_OPTION(sbi).s_resuid),
241 F2FS_OPTION(sbi).s_resuid), 245 from_kgid_munged(&init_user_ns,
242 from_kgid_munged(&init_user_ns, 246 F2FS_OPTION(sbi).s_resgid));
243 F2FS_OPTION(sbi).s_resgid));
244} 247}
245 248
246static void init_once(void *foo) 249static void init_once(void *foo)
@@ -261,35 +264,29 @@ static int f2fs_set_qf_name(struct super_block *sb, int qtype,
261 int ret = -EINVAL; 264 int ret = -EINVAL;
262 265
263 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) { 266 if (sb_any_quota_loaded(sb) && !F2FS_OPTION(sbi).s_qf_names[qtype]) {
264 f2fs_msg(sb, KERN_ERR, 267 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
265 "Cannot change journaled "
266 "quota options when quota turned on");
267 return -EINVAL; 268 return -EINVAL;
268 } 269 }
269 if (f2fs_sb_has_quota_ino(sbi)) { 270 if (f2fs_sb_has_quota_ino(sbi)) {
270 f2fs_msg(sb, KERN_INFO, 271 f2fs_info(sbi, "QUOTA feature is enabled, so ignore qf_name");
271 "QUOTA feature is enabled, so ignore qf_name");
272 return 0; 272 return 0;
273 } 273 }
274 274
275 qname = match_strdup(args); 275 qname = match_strdup(args);
276 if (!qname) { 276 if (!qname) {
277 f2fs_msg(sb, KERN_ERR, 277 f2fs_err(sbi, "Not enough memory for storing quotafile name");
278 "Not enough memory for storing quotafile name");
279 return -ENOMEM; 278 return -ENOMEM;
280 } 279 }
281 if (F2FS_OPTION(sbi).s_qf_names[qtype]) { 280 if (F2FS_OPTION(sbi).s_qf_names[qtype]) {
282 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0) 281 if (strcmp(F2FS_OPTION(sbi).s_qf_names[qtype], qname) == 0)
283 ret = 0; 282 ret = 0;
284 else 283 else
285 f2fs_msg(sb, KERN_ERR, 284 f2fs_err(sbi, "%s quota file already specified",
286 "%s quota file already specified",
287 QTYPE2NAME(qtype)); 285 QTYPE2NAME(qtype));
288 goto errout; 286 goto errout;
289 } 287 }
290 if (strchr(qname, '/')) { 288 if (strchr(qname, '/')) {
291 f2fs_msg(sb, KERN_ERR, 289 f2fs_err(sbi, "quotafile must be on filesystem root");
292 "quotafile must be on filesystem root");
293 goto errout; 290 goto errout;
294 } 291 }
295 F2FS_OPTION(sbi).s_qf_names[qtype] = qname; 292 F2FS_OPTION(sbi).s_qf_names[qtype] = qname;
@@ -305,8 +302,7 @@ static int f2fs_clear_qf_name(struct super_block *sb, int qtype)
305 struct f2fs_sb_info *sbi = F2FS_SB(sb); 302 struct f2fs_sb_info *sbi = F2FS_SB(sb);
306 303
307 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) { 304 if (sb_any_quota_loaded(sb) && F2FS_OPTION(sbi).s_qf_names[qtype]) {
308 f2fs_msg(sb, KERN_ERR, "Cannot change journaled quota options" 305 f2fs_err(sbi, "Cannot change journaled quota options when quota turned on");
309 " when quota turned on");
310 return -EINVAL; 306 return -EINVAL;
311 } 307 }
312 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]); 308 kvfree(F2FS_OPTION(sbi).s_qf_names[qtype]);
@@ -322,8 +318,7 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
322 * to support legacy quotas in quota files. 318 * to support legacy quotas in quota files.
323 */ 319 */
324 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) { 320 if (test_opt(sbi, PRJQUOTA) && !f2fs_sb_has_project_quota(sbi)) {
325 f2fs_msg(sbi->sb, KERN_ERR, "Project quota feature not enabled. " 321 f2fs_err(sbi, "Project quota feature not enabled. Cannot enable project quota enforcement.");
326 "Cannot enable project quota enforcement.");
327 return -1; 322 return -1;
328 } 323 }
329 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] || 324 if (F2FS_OPTION(sbi).s_qf_names[USRQUOTA] ||
@@ -343,21 +338,18 @@ static int f2fs_check_quota_options(struct f2fs_sb_info *sbi)
343 338
344 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) || 339 if (test_opt(sbi, GRPQUOTA) || test_opt(sbi, USRQUOTA) ||
345 test_opt(sbi, PRJQUOTA)) { 340 test_opt(sbi, PRJQUOTA)) {
346 f2fs_msg(sbi->sb, KERN_ERR, "old and new quota " 341 f2fs_err(sbi, "old and new quota format mixing");
347 "format mixing");
348 return -1; 342 return -1;
349 } 343 }
350 344
351 if (!F2FS_OPTION(sbi).s_jquota_fmt) { 345 if (!F2FS_OPTION(sbi).s_jquota_fmt) {
352 f2fs_msg(sbi->sb, KERN_ERR, "journaled quota format " 346 f2fs_err(sbi, "journaled quota format not specified");
353 "not specified");
354 return -1; 347 return -1;
355 } 348 }
356 } 349 }
357 350
358 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) { 351 if (f2fs_sb_has_quota_ino(sbi) && F2FS_OPTION(sbi).s_jquota_fmt) {
359 f2fs_msg(sbi->sb, KERN_INFO, 352 f2fs_info(sbi, "QUOTA feature is enabled, so ignore jquota_fmt");
360 "QUOTA feature is enabled, so ignore jquota_fmt");
361 F2FS_OPTION(sbi).s_jquota_fmt = 0; 353 F2FS_OPTION(sbi).s_jquota_fmt = 0;
362 } 354 }
363 return 0; 355 return 0;
@@ -425,8 +417,7 @@ static int parse_options(struct super_block *sb, char *options)
425 break; 417 break;
426 case Opt_nodiscard: 418 case Opt_nodiscard:
427 if (f2fs_sb_has_blkzoned(sbi)) { 419 if (f2fs_sb_has_blkzoned(sbi)) {
428 f2fs_msg(sb, KERN_WARNING, 420 f2fs_warn(sbi, "discard is required for zoned block devices");
429 "discard is required for zoned block devices");
430 return -EINVAL; 421 return -EINVAL;
431 } 422 }
432 clear_opt(sbi, DISCARD); 423 clear_opt(sbi, DISCARD);
@@ -458,20 +449,16 @@ static int parse_options(struct super_block *sb, char *options)
458 break; 449 break;
459#else 450#else
460 case Opt_user_xattr: 451 case Opt_user_xattr:
461 f2fs_msg(sb, KERN_INFO, 452 f2fs_info(sbi, "user_xattr options not supported");
462 "user_xattr options not supported");
463 break; 453 break;
464 case Opt_nouser_xattr: 454 case Opt_nouser_xattr:
465 f2fs_msg(sb, KERN_INFO, 455 f2fs_info(sbi, "nouser_xattr options not supported");
466 "nouser_xattr options not supported");
467 break; 456 break;
468 case Opt_inline_xattr: 457 case Opt_inline_xattr:
469 f2fs_msg(sb, KERN_INFO, 458 f2fs_info(sbi, "inline_xattr options not supported");
470 "inline_xattr options not supported");
471 break; 459 break;
472 case Opt_noinline_xattr: 460 case Opt_noinline_xattr:
473 f2fs_msg(sb, KERN_INFO, 461 f2fs_info(sbi, "noinline_xattr options not supported");
474 "noinline_xattr options not supported");
475 break; 462 break;
476#endif 463#endif
477#ifdef CONFIG_F2FS_FS_POSIX_ACL 464#ifdef CONFIG_F2FS_FS_POSIX_ACL
@@ -483,10 +470,10 @@ static int parse_options(struct super_block *sb, char *options)
483 break; 470 break;
484#else 471#else
485 case Opt_acl: 472 case Opt_acl:
486 f2fs_msg(sb, KERN_INFO, "acl options not supported"); 473 f2fs_info(sbi, "acl options not supported");
487 break; 474 break;
488 case Opt_noacl: 475 case Opt_noacl:
489 f2fs_msg(sb, KERN_INFO, "noacl options not supported"); 476 f2fs_info(sbi, "noacl options not supported");
490 break; 477 break;
491#endif 478#endif
492 case Opt_active_logs: 479 case Opt_active_logs:
@@ -536,9 +523,8 @@ static int parse_options(struct super_block *sb, char *options)
536 if (args->from && match_int(args, &arg)) 523 if (args->from && match_int(args, &arg))
537 return -EINVAL; 524 return -EINVAL;
538 if (test_opt(sbi, RESERVE_ROOT)) { 525 if (test_opt(sbi, RESERVE_ROOT)) {
539 f2fs_msg(sb, KERN_INFO, 526 f2fs_info(sbi, "Preserve previous reserve_root=%u",
540 "Preserve previous reserve_root=%u", 527 F2FS_OPTION(sbi).root_reserved_blocks);
541 F2FS_OPTION(sbi).root_reserved_blocks);
542 } else { 528 } else {
543 F2FS_OPTION(sbi).root_reserved_blocks = arg; 529 F2FS_OPTION(sbi).root_reserved_blocks = arg;
544 set_opt(sbi, RESERVE_ROOT); 530 set_opt(sbi, RESERVE_ROOT);
@@ -549,8 +535,7 @@ static int parse_options(struct super_block *sb, char *options)
549 return -EINVAL; 535 return -EINVAL;
550 uid = make_kuid(current_user_ns(), arg); 536 uid = make_kuid(current_user_ns(), arg);
551 if (!uid_valid(uid)) { 537 if (!uid_valid(uid)) {
552 f2fs_msg(sb, KERN_ERR, 538 f2fs_err(sbi, "Invalid uid value %d", arg);
553 "Invalid uid value %d", arg);
554 return -EINVAL; 539 return -EINVAL;
555 } 540 }
556 F2FS_OPTION(sbi).s_resuid = uid; 541 F2FS_OPTION(sbi).s_resuid = uid;
@@ -560,8 +545,7 @@ static int parse_options(struct super_block *sb, char *options)
560 return -EINVAL; 545 return -EINVAL;
561 gid = make_kgid(current_user_ns(), arg); 546 gid = make_kgid(current_user_ns(), arg);
562 if (!gid_valid(gid)) { 547 if (!gid_valid(gid)) {
563 f2fs_msg(sb, KERN_ERR, 548 f2fs_err(sbi, "Invalid gid value %d", arg);
564 "Invalid gid value %d", arg);
565 return -EINVAL; 549 return -EINVAL;
566 } 550 }
567 F2FS_OPTION(sbi).s_resgid = gid; 551 F2FS_OPTION(sbi).s_resgid = gid;
@@ -574,9 +558,7 @@ static int parse_options(struct super_block *sb, char *options)
574 if (strlen(name) == 8 && 558 if (strlen(name) == 8 &&
575 !strncmp(name, "adaptive", 8)) { 559 !strncmp(name, "adaptive", 8)) {
576 if (f2fs_sb_has_blkzoned(sbi)) { 560 if (f2fs_sb_has_blkzoned(sbi)) {
577 f2fs_msg(sb, KERN_WARNING, 561 f2fs_warn(sbi, "adaptive mode is not allowed with zoned block device feature");
578 "adaptive mode is not allowed with "
579 "zoned block device feature");
580 kvfree(name); 562 kvfree(name);
581 return -EINVAL; 563 return -EINVAL;
582 } 564 }
@@ -594,9 +576,8 @@ static int parse_options(struct super_block *sb, char *options)
594 if (args->from && match_int(args, &arg)) 576 if (args->from && match_int(args, &arg))
595 return -EINVAL; 577 return -EINVAL;
596 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) { 578 if (arg <= 0 || arg > __ilog2_u32(BIO_MAX_PAGES)) {
597 f2fs_msg(sb, KERN_WARNING, 579 f2fs_warn(sbi, "Not support %d, larger than %d",
598 "Not support %d, larger than %d", 580 1 << arg, BIO_MAX_PAGES);
599 1 << arg, BIO_MAX_PAGES);
600 return -EINVAL; 581 return -EINVAL;
601 } 582 }
602 F2FS_OPTION(sbi).write_io_size_bits = arg; 583 F2FS_OPTION(sbi).write_io_size_bits = arg;
@@ -617,13 +598,11 @@ static int parse_options(struct super_block *sb, char *options)
617 break; 598 break;
618#else 599#else
619 case Opt_fault_injection: 600 case Opt_fault_injection:
620 f2fs_msg(sb, KERN_INFO, 601 f2fs_info(sbi, "fault_injection options not supported");
621 "fault_injection options not supported");
622 break; 602 break;
623 603
624 case Opt_fault_type: 604 case Opt_fault_type:
625 f2fs_msg(sb, KERN_INFO, 605 f2fs_info(sbi, "fault_type options not supported");
626 "fault_type options not supported");
627 break; 606 break;
628#endif 607#endif
629 case Opt_lazytime: 608 case Opt_lazytime:
@@ -703,8 +682,7 @@ static int parse_options(struct super_block *sb, char *options)
703 case Opt_jqfmt_vfsv0: 682 case Opt_jqfmt_vfsv0:
704 case Opt_jqfmt_vfsv1: 683 case Opt_jqfmt_vfsv1:
705 case Opt_noquota: 684 case Opt_noquota:
706 f2fs_msg(sb, KERN_INFO, 685 f2fs_info(sbi, "quota operations not supported");
707 "quota operations not supported");
708 break; 686 break;
709#endif 687#endif
710 case Opt_whint: 688 case Opt_whint:
@@ -766,16 +744,14 @@ static int parse_options(struct super_block *sb, char *options)
766 case Opt_test_dummy_encryption: 744 case Opt_test_dummy_encryption:
767#ifdef CONFIG_FS_ENCRYPTION 745#ifdef CONFIG_FS_ENCRYPTION
768 if (!f2fs_sb_has_encrypt(sbi)) { 746 if (!f2fs_sb_has_encrypt(sbi)) {
769 f2fs_msg(sb, KERN_ERR, "Encrypt feature is off"); 747 f2fs_err(sbi, "Encrypt feature is off");
770 return -EINVAL; 748 return -EINVAL;
771 } 749 }
772 750
773 F2FS_OPTION(sbi).test_dummy_encryption = true; 751 F2FS_OPTION(sbi).test_dummy_encryption = true;
774 f2fs_msg(sb, KERN_INFO, 752 f2fs_info(sbi, "Test dummy encryption mode enabled");
775 "Test dummy encryption mode enabled");
776#else 753#else
777 f2fs_msg(sb, KERN_INFO, 754 f2fs_info(sbi, "Test dummy encryption mount option ignored");
778 "Test dummy encryption mount option ignored");
779#endif 755#endif
780 break; 756 break;
781 case Opt_checkpoint_disable_cap_perc: 757 case Opt_checkpoint_disable_cap_perc:
@@ -804,9 +780,8 @@ static int parse_options(struct super_block *sb, char *options)
804 clear_opt(sbi, DISABLE_CHECKPOINT); 780 clear_opt(sbi, DISABLE_CHECKPOINT);
805 break; 781 break;
806 default: 782 default:
807 f2fs_msg(sb, KERN_ERR, 783 f2fs_err(sbi, "Unrecognized mount option \"%s\" or missing value",
808 "Unrecognized mount option \"%s\" or missing value", 784 p);
809 p);
810 return -EINVAL; 785 return -EINVAL;
811 } 786 }
812 } 787 }
@@ -815,23 +790,18 @@ static int parse_options(struct super_block *sb, char *options)
815 return -EINVAL; 790 return -EINVAL;
816#else 791#else
817 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) { 792 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sbi->sb)) {
818 f2fs_msg(sbi->sb, KERN_INFO, 793 f2fs_info(sbi, "Filesystem with quota feature cannot be mounted RDWR without CONFIG_QUOTA");
819 "Filesystem with quota feature cannot be mounted RDWR "
820 "without CONFIG_QUOTA");
821 return -EINVAL; 794 return -EINVAL;
822 } 795 }
823 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) { 796 if (f2fs_sb_has_project_quota(sbi) && !f2fs_readonly(sbi->sb)) {
824 f2fs_msg(sb, KERN_ERR, 797 f2fs_err(sbi, "Filesystem with project quota feature cannot be mounted RDWR without CONFIG_QUOTA");
825 "Filesystem with project quota feature cannot be "
826 "mounted RDWR without CONFIG_QUOTA");
827 return -EINVAL; 798 return -EINVAL;
828 } 799 }
829#endif 800#endif
830 801
831 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) { 802 if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
832 f2fs_msg(sb, KERN_ERR, 803 f2fs_err(sbi, "Should set mode=lfs with %uKB-sized IO",
833 "Should set mode=lfs with %uKB-sized IO", 804 F2FS_IO_SIZE_KB(sbi));
834 F2FS_IO_SIZE_KB(sbi));
835 return -EINVAL; 805 return -EINVAL;
836 } 806 }
837 807
@@ -840,15 +810,11 @@ static int parse_options(struct super_block *sb, char *options)
840 810
841 if (!f2fs_sb_has_extra_attr(sbi) || 811 if (!f2fs_sb_has_extra_attr(sbi) ||
842 !f2fs_sb_has_flexible_inline_xattr(sbi)) { 812 !f2fs_sb_has_flexible_inline_xattr(sbi)) {
843 f2fs_msg(sb, KERN_ERR, 813 f2fs_err(sbi, "extra_attr or flexible_inline_xattr feature is off");
844 "extra_attr or flexible_inline_xattr "
845 "feature is off");
846 return -EINVAL; 814 return -EINVAL;
847 } 815 }
848 if (!test_opt(sbi, INLINE_XATTR)) { 816 if (!test_opt(sbi, INLINE_XATTR)) {
849 f2fs_msg(sb, KERN_ERR, 817 f2fs_err(sbi, "inline_xattr_size option should be set with inline_xattr option");
850 "inline_xattr_size option should be "
851 "set with inline_xattr option");
852 return -EINVAL; 818 return -EINVAL;
853 } 819 }
854 820
@@ -857,16 +823,14 @@ static int parse_options(struct super_block *sb, char *options)
857 823
858 if (F2FS_OPTION(sbi).inline_xattr_size < min_size || 824 if (F2FS_OPTION(sbi).inline_xattr_size < min_size ||
859 F2FS_OPTION(sbi).inline_xattr_size > max_size) { 825 F2FS_OPTION(sbi).inline_xattr_size > max_size) {
860 f2fs_msg(sb, KERN_ERR, 826 f2fs_err(sbi, "inline xattr size is out of range: %d ~ %d",
861 "inline xattr size is out of range: %d ~ %d", 827 min_size, max_size);
862 min_size, max_size);
863 return -EINVAL; 828 return -EINVAL;
864 } 829 }
865 } 830 }
866 831
867 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) { 832 if (test_opt(sbi, DISABLE_CHECKPOINT) && test_opt(sbi, LFS)) {
868 f2fs_msg(sb, KERN_ERR, 833 f2fs_err(sbi, "LFS not compatible with checkpoint=disable\n");
869 "LFS not compatible with checkpoint=disable\n");
870 return -EINVAL; 834 return -EINVAL;
871 } 835 }
872 836
@@ -1488,8 +1452,7 @@ static int f2fs_disable_checkpoint(struct f2fs_sb_info *sbi)
1488 block_t unusable; 1452 block_t unusable;
1489 1453
1490 if (s_flags & SB_RDONLY) { 1454 if (s_flags & SB_RDONLY) {
1491 f2fs_msg(sbi->sb, KERN_ERR, 1455 f2fs_err(sbi, "checkpoint=disable on readonly fs");
1492 "checkpoint=disable on readonly fs");
1493 return -EINVAL; 1456 return -EINVAL;
1494 } 1457 }
1495 sbi->sb->s_flags |= SB_ACTIVE; 1458 sbi->sb->s_flags |= SB_ACTIVE;
@@ -1592,8 +1555,8 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1592 /* recover superblocks we couldn't write due to previous RO mount */ 1555 /* recover superblocks we couldn't write due to previous RO mount */
1593 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) { 1556 if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
1594 err = f2fs_commit_super(sbi, false); 1557 err = f2fs_commit_super(sbi, false);
1595 f2fs_msg(sb, KERN_INFO, 1558 f2fs_info(sbi, "Try to recover all the superblocks, ret: %d",
1596 "Try to recover all the superblocks, ret: %d", err); 1559 err);
1597 if (!err) 1560 if (!err)
1598 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE); 1561 clear_sbi_flag(sbi, SBI_NEED_SB_WRITE);
1599 } 1562 }
@@ -1634,15 +1597,13 @@ static int f2fs_remount(struct super_block *sb, int *flags, char *data)
1634 /* disallow enable/disable extent_cache dynamically */ 1597 /* disallow enable/disable extent_cache dynamically */
1635 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) { 1598 if (no_extent_cache == !!test_opt(sbi, EXTENT_CACHE)) {
1636 err = -EINVAL; 1599 err = -EINVAL;
1637 f2fs_msg(sbi->sb, KERN_WARNING, 1600 f2fs_warn(sbi, "switch extent_cache option is not allowed");
1638 "switch extent_cache option is not allowed");
1639 goto restore_opts; 1601 goto restore_opts;
1640 } 1602 }
1641 1603
1642 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) { 1604 if ((*flags & SB_RDONLY) && test_opt(sbi, DISABLE_CHECKPOINT)) {
1643 err = -EINVAL; 1605 err = -EINVAL;
1644 f2fs_msg(sbi->sb, KERN_WARNING, 1606 f2fs_warn(sbi, "disabling checkpoint not compatible with read-only");
1645 "disabling checkpoint not compatible with read-only");
1646 goto restore_opts; 1607 goto restore_opts;
1647 } 1608 }
1648 1609
@@ -1712,8 +1673,7 @@ skip:
1712restore_gc: 1673restore_gc:
1713 if (need_restart_gc) { 1674 if (need_restart_gc) {
1714 if (f2fs_start_gc_thread(sbi)) 1675 if (f2fs_start_gc_thread(sbi))
1715 f2fs_msg(sbi->sb, KERN_WARNING, 1676 f2fs_warn(sbi, "background gc thread has stopped");
1716 "background gc thread has stopped");
1717 } else if (need_stop_gc) { 1677 } else if (need_stop_gc) {
1718 f2fs_stop_gc_thread(sbi); 1678 f2fs_stop_gc_thread(sbi);
1719 } 1679 }
@@ -1852,8 +1812,7 @@ static qsize_t *f2fs_get_reserved_space(struct inode *inode)
1852static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type) 1812static int f2fs_quota_on_mount(struct f2fs_sb_info *sbi, int type)
1853{ 1813{
1854 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) { 1814 if (is_set_ckpt_flags(sbi, CP_QUOTA_NEED_FSCK_FLAG)) {
1855 f2fs_msg(sbi->sb, KERN_ERR, 1815 f2fs_err(sbi, "quota sysfile may be corrupted, skip loading it");
1856 "quota sysfile may be corrupted, skip loading it");
1857 return 0; 1816 return 0;
1858 } 1817 }
1859 1818
@@ -1869,8 +1828,7 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1869 if (f2fs_sb_has_quota_ino(sbi) && rdonly) { 1828 if (f2fs_sb_has_quota_ino(sbi) && rdonly) {
1870 err = f2fs_enable_quotas(sbi->sb); 1829 err = f2fs_enable_quotas(sbi->sb);
1871 if (err) { 1830 if (err) {
1872 f2fs_msg(sbi->sb, KERN_ERR, 1831 f2fs_err(sbi, "Cannot turn on quota_ino: %d", err);
1873 "Cannot turn on quota_ino: %d", err);
1874 return 0; 1832 return 0;
1875 } 1833 }
1876 return 1; 1834 return 1;
@@ -1883,8 +1841,8 @@ int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly)
1883 enabled = 1; 1841 enabled = 1;
1884 continue; 1842 continue;
1885 } 1843 }
1886 f2fs_msg(sbi->sb, KERN_ERR, 1844 f2fs_err(sbi, "Cannot turn on quotas: %d on %d",
1887 "Cannot turn on quotas: %d on %d", err, i); 1845 err, i);
1888 } 1846 }
1889 } 1847 }
1890 return enabled; 1848 return enabled;
@@ -1905,8 +1863,7 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1905 1863
1906 qf_inode = f2fs_iget(sb, qf_inum); 1864 qf_inode = f2fs_iget(sb, qf_inum);
1907 if (IS_ERR(qf_inode)) { 1865 if (IS_ERR(qf_inode)) {
1908 f2fs_msg(sb, KERN_ERR, 1866 f2fs_err(F2FS_SB(sb), "Bad quota inode %u:%lu", type, qf_inum);
1909 "Bad quota inode %u:%lu", type, qf_inum);
1910 return PTR_ERR(qf_inode); 1867 return PTR_ERR(qf_inode);
1911 } 1868 }
1912 1869
@@ -1919,17 +1876,17 @@ static int f2fs_quota_enable(struct super_block *sb, int type, int format_id,
1919 1876
1920static int f2fs_enable_quotas(struct super_block *sb) 1877static int f2fs_enable_quotas(struct super_block *sb)
1921{ 1878{
1879 struct f2fs_sb_info *sbi = F2FS_SB(sb);
1922 int type, err = 0; 1880 int type, err = 0;
1923 unsigned long qf_inum; 1881 unsigned long qf_inum;
1924 bool quota_mopt[MAXQUOTAS] = { 1882 bool quota_mopt[MAXQUOTAS] = {
1925 test_opt(F2FS_SB(sb), USRQUOTA), 1883 test_opt(sbi, USRQUOTA),
1926 test_opt(F2FS_SB(sb), GRPQUOTA), 1884 test_opt(sbi, GRPQUOTA),
1927 test_opt(F2FS_SB(sb), PRJQUOTA), 1885 test_opt(sbi, PRJQUOTA),
1928 }; 1886 };
1929 1887
1930 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) { 1888 if (is_set_ckpt_flags(F2FS_SB(sb), CP_QUOTA_NEED_FSCK_FLAG)) {
1931 f2fs_msg(sb, KERN_ERR, 1889 f2fs_err(sbi, "quota file may be corrupted, skip loading it");
1932 "quota file may be corrupted, skip loading it");
1933 return 0; 1890 return 0;
1934 } 1891 }
1935 1892
@@ -1942,10 +1899,8 @@ static int f2fs_enable_quotas(struct super_block *sb)
1942 DQUOT_USAGE_ENABLED | 1899 DQUOT_USAGE_ENABLED |
1943 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0)); 1900 (quota_mopt[type] ? DQUOT_LIMITS_ENABLED : 0));
1944 if (err) { 1901 if (err) {
1945 f2fs_msg(sb, KERN_ERR, 1902 f2fs_err(sbi, "Failed to enable quota tracking (type=%d, err=%d). Please run fsck to fix.",
1946 "Failed to enable quota tracking " 1903 type, err);
1947 "(type=%d, err=%d). Please run "
1948 "fsck to fix.", type, err);
1949 for (type--; type >= 0; type--) 1904 for (type--; type >= 0; type--)
1950 dquot_quota_off(sb, type); 1905 dquot_quota_off(sb, type);
1951 set_sbi_flag(F2FS_SB(sb), 1906 set_sbi_flag(F2FS_SB(sb),
@@ -2065,10 +2020,8 @@ void f2fs_quota_off_umount(struct super_block *sb)
2065 if (err) { 2020 if (err) {
2066 int ret = dquot_quota_off(sb, type); 2021 int ret = dquot_quota_off(sb, type);
2067 2022
2068 f2fs_msg(sb, KERN_ERR, 2023 f2fs_err(F2FS_SB(sb), "Fail to turn off disk quota (type: %d, err: %d, ret:%d), Please run fsck to fix it.",
2069 "Fail to turn off disk quota " 2024 type, err, ret);
2070 "(type: %d, err: %d, ret:%d), Please "
2071 "run fsck to fix it.", type, err, ret);
2072 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR); 2025 set_sbi_flag(F2FS_SB(sb), SBI_QUOTA_NEED_REPAIR);
2073 } 2026 }
2074 } 2027 }
@@ -2361,55 +2314,49 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2361 (segment_count << log_blocks_per_seg); 2314 (segment_count << log_blocks_per_seg);
2362 2315
2363 if (segment0_blkaddr != cp_blkaddr) { 2316 if (segment0_blkaddr != cp_blkaddr) {
2364 f2fs_msg(sb, KERN_INFO, 2317 f2fs_info(sbi, "Mismatch start address, segment0(%u) cp_blkaddr(%u)",
2365 "Mismatch start address, segment0(%u) cp_blkaddr(%u)", 2318 segment0_blkaddr, cp_blkaddr);
2366 segment0_blkaddr, cp_blkaddr);
2367 return true; 2319 return true;
2368 } 2320 }
2369 2321
2370 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) != 2322 if (cp_blkaddr + (segment_count_ckpt << log_blocks_per_seg) !=
2371 sit_blkaddr) { 2323 sit_blkaddr) {
2372 f2fs_msg(sb, KERN_INFO, 2324 f2fs_info(sbi, "Wrong CP boundary, start(%u) end(%u) blocks(%u)",
2373 "Wrong CP boundary, start(%u) end(%u) blocks(%u)", 2325 cp_blkaddr, sit_blkaddr,
2374 cp_blkaddr, sit_blkaddr, 2326 segment_count_ckpt << log_blocks_per_seg);
2375 segment_count_ckpt << log_blocks_per_seg);
2376 return true; 2327 return true;
2377 } 2328 }
2378 2329
2379 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) != 2330 if (sit_blkaddr + (segment_count_sit << log_blocks_per_seg) !=
2380 nat_blkaddr) { 2331 nat_blkaddr) {
2381 f2fs_msg(sb, KERN_INFO, 2332 f2fs_info(sbi, "Wrong SIT boundary, start(%u) end(%u) blocks(%u)",
2382 "Wrong SIT boundary, start(%u) end(%u) blocks(%u)", 2333 sit_blkaddr, nat_blkaddr,
2383 sit_blkaddr, nat_blkaddr, 2334 segment_count_sit << log_blocks_per_seg);
2384 segment_count_sit << log_blocks_per_seg);
2385 return true; 2335 return true;
2386 } 2336 }
2387 2337
2388 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) != 2338 if (nat_blkaddr + (segment_count_nat << log_blocks_per_seg) !=
2389 ssa_blkaddr) { 2339 ssa_blkaddr) {
2390 f2fs_msg(sb, KERN_INFO, 2340 f2fs_info(sbi, "Wrong NAT boundary, start(%u) end(%u) blocks(%u)",
2391 "Wrong NAT boundary, start(%u) end(%u) blocks(%u)", 2341 nat_blkaddr, ssa_blkaddr,
2392 nat_blkaddr, ssa_blkaddr, 2342 segment_count_nat << log_blocks_per_seg);
2393 segment_count_nat << log_blocks_per_seg);
2394 return true; 2343 return true;
2395 } 2344 }
2396 2345
2397 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) != 2346 if (ssa_blkaddr + (segment_count_ssa << log_blocks_per_seg) !=
2398 main_blkaddr) { 2347 main_blkaddr) {
2399 f2fs_msg(sb, KERN_INFO, 2348 f2fs_info(sbi, "Wrong SSA boundary, start(%u) end(%u) blocks(%u)",
2400 "Wrong SSA boundary, start(%u) end(%u) blocks(%u)", 2349 ssa_blkaddr, main_blkaddr,
2401 ssa_blkaddr, main_blkaddr, 2350 segment_count_ssa << log_blocks_per_seg);
2402 segment_count_ssa << log_blocks_per_seg);
2403 return true; 2351 return true;
2404 } 2352 }
2405 2353
2406 if (main_end_blkaddr > seg_end_blkaddr) { 2354 if (main_end_blkaddr > seg_end_blkaddr) {
2407 f2fs_msg(sb, KERN_INFO, 2355 f2fs_info(sbi, "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)",
2408 "Wrong MAIN_AREA boundary, start(%u) end(%u) block(%u)", 2356 main_blkaddr,
2409 main_blkaddr, 2357 segment0_blkaddr +
2410 segment0_blkaddr + 2358 (segment_count << log_blocks_per_seg),
2411 (segment_count << log_blocks_per_seg), 2359 segment_count_main << log_blocks_per_seg);
2412 segment_count_main << log_blocks_per_seg);
2413 return true; 2360 return true;
2414 } else if (main_end_blkaddr < seg_end_blkaddr) { 2361 } else if (main_end_blkaddr < seg_end_blkaddr) {
2415 int err = 0; 2362 int err = 0;
@@ -2426,12 +2373,11 @@ static inline bool sanity_check_area_boundary(struct f2fs_sb_info *sbi,
2426 err = __f2fs_commit_super(bh, NULL); 2373 err = __f2fs_commit_super(bh, NULL);
2427 res = err ? "failed" : "done"; 2374 res = err ? "failed" : "done";
2428 } 2375 }
2429 f2fs_msg(sb, KERN_INFO, 2376 f2fs_info(sbi, "Fix alignment : %s, start(%u) end(%u) block(%u)",
2430 "Fix alignment : %s, start(%u) end(%u) block(%u)", 2377 res, main_blkaddr,
2431 res, main_blkaddr, 2378 segment0_blkaddr +
2432 segment0_blkaddr + 2379 (segment_count << log_blocks_per_seg),
2433 (segment_count << log_blocks_per_seg), 2380 segment_count_main << log_blocks_per_seg);
2434 segment_count_main << log_blocks_per_seg);
2435 if (err) 2381 if (err)
2436 return true; 2382 return true;
2437 } 2383 }
@@ -2445,7 +2391,6 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2445 block_t total_sections, blocks_per_seg; 2391 block_t total_sections, blocks_per_seg;
2446 struct f2fs_super_block *raw_super = (struct f2fs_super_block *) 2392 struct f2fs_super_block *raw_super = (struct f2fs_super_block *)
2447 (bh->b_data + F2FS_SUPER_OFFSET); 2393 (bh->b_data + F2FS_SUPER_OFFSET);
2448 struct super_block *sb = sbi->sb;
2449 unsigned int blocksize; 2394 unsigned int blocksize;
2450 size_t crc_offset = 0; 2395 size_t crc_offset = 0;
2451 __u32 crc = 0; 2396 __u32 crc = 0;
@@ -2455,48 +2400,42 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2455 crc_offset = le32_to_cpu(raw_super->checksum_offset); 2400 crc_offset = le32_to_cpu(raw_super->checksum_offset);
2456 if (crc_offset != 2401 if (crc_offset !=
2457 offsetof(struct f2fs_super_block, crc)) { 2402 offsetof(struct f2fs_super_block, crc)) {
2458 f2fs_msg(sb, KERN_INFO, 2403 f2fs_info(sbi, "Invalid SB checksum offset: %zu",
2459 "Invalid SB checksum offset: %zu", 2404 crc_offset);
2460 crc_offset);
2461 return 1; 2405 return 1;
2462 } 2406 }
2463 crc = le32_to_cpu(raw_super->crc); 2407 crc = le32_to_cpu(raw_super->crc);
2464 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) { 2408 if (!f2fs_crc_valid(sbi, crc, raw_super, crc_offset)) {
2465 f2fs_msg(sb, KERN_INFO, 2409 f2fs_info(sbi, "Invalid SB checksum value: %u", crc);
2466 "Invalid SB checksum value: %u", crc);
2467 return 1; 2410 return 1;
2468 } 2411 }
2469 } 2412 }
2470 2413
2471 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) { 2414 if (F2FS_SUPER_MAGIC != le32_to_cpu(raw_super->magic)) {
2472 f2fs_msg(sb, KERN_INFO, 2415 f2fs_info(sbi, "Magic Mismatch, valid(0x%x) - read(0x%x)",
2473 "Magic Mismatch, valid(0x%x) - read(0x%x)", 2416 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2474 F2FS_SUPER_MAGIC, le32_to_cpu(raw_super->magic));
2475 return 1; 2417 return 1;
2476 } 2418 }
2477 2419
2478 /* Currently, support only 4KB page cache size */ 2420 /* Currently, support only 4KB page cache size */
2479 if (F2FS_BLKSIZE != PAGE_SIZE) { 2421 if (F2FS_BLKSIZE != PAGE_SIZE) {
2480 f2fs_msg(sb, KERN_INFO, 2422 f2fs_info(sbi, "Invalid page_cache_size (%lu), supports only 4KB",
2481 "Invalid page_cache_size (%lu), supports only 4KB", 2423 PAGE_SIZE);
2482 PAGE_SIZE);
2483 return 1; 2424 return 1;
2484 } 2425 }
2485 2426
2486 /* Currently, support only 4KB block size */ 2427 /* Currently, support only 4KB block size */
2487 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize); 2428 blocksize = 1 << le32_to_cpu(raw_super->log_blocksize);
2488 if (blocksize != F2FS_BLKSIZE) { 2429 if (blocksize != F2FS_BLKSIZE) {
2489 f2fs_msg(sb, KERN_INFO, 2430 f2fs_info(sbi, "Invalid blocksize (%u), supports only 4KB",
2490 "Invalid blocksize (%u), supports only 4KB", 2431 blocksize);
2491 blocksize);
2492 return 1; 2432 return 1;
2493 } 2433 }
2494 2434
2495 /* check log blocks per segment */ 2435 /* check log blocks per segment */
2496 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) { 2436 if (le32_to_cpu(raw_super->log_blocks_per_seg) != 9) {
2497 f2fs_msg(sb, KERN_INFO, 2437 f2fs_info(sbi, "Invalid log blocks per segment (%u)",
2498 "Invalid log blocks per segment (%u)", 2438 le32_to_cpu(raw_super->log_blocks_per_seg));
2499 le32_to_cpu(raw_super->log_blocks_per_seg));
2500 return 1; 2439 return 1;
2501 } 2440 }
2502 2441
@@ -2505,17 +2444,16 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2505 F2FS_MAX_LOG_SECTOR_SIZE || 2444 F2FS_MAX_LOG_SECTOR_SIZE ||
2506 le32_to_cpu(raw_super->log_sectorsize) < 2445 le32_to_cpu(raw_super->log_sectorsize) <
2507 F2FS_MIN_LOG_SECTOR_SIZE) { 2446 F2FS_MIN_LOG_SECTOR_SIZE) {
2508 f2fs_msg(sb, KERN_INFO, "Invalid log sectorsize (%u)", 2447 f2fs_info(sbi, "Invalid log sectorsize (%u)",
2509 le32_to_cpu(raw_super->log_sectorsize)); 2448 le32_to_cpu(raw_super->log_sectorsize));
2510 return 1; 2449 return 1;
2511 } 2450 }
2512 if (le32_to_cpu(raw_super->log_sectors_per_block) + 2451 if (le32_to_cpu(raw_super->log_sectors_per_block) +
2513 le32_to_cpu(raw_super->log_sectorsize) != 2452 le32_to_cpu(raw_super->log_sectorsize) !=
2514 F2FS_MAX_LOG_SECTOR_SIZE) { 2453 F2FS_MAX_LOG_SECTOR_SIZE) {
2515 f2fs_msg(sb, KERN_INFO, 2454 f2fs_info(sbi, "Invalid log sectors per block(%u) log sectorsize(%u)",
2516 "Invalid log sectors per block(%u) log sectorsize(%u)", 2455 le32_to_cpu(raw_super->log_sectors_per_block),
2517 le32_to_cpu(raw_super->log_sectors_per_block), 2456 le32_to_cpu(raw_super->log_sectorsize));
2518 le32_to_cpu(raw_super->log_sectorsize));
2519 return 1; 2457 return 1;
2520 } 2458 }
2521 2459
@@ -2529,59 +2467,51 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2529 2467
2530 if (segment_count > F2FS_MAX_SEGMENT || 2468 if (segment_count > F2FS_MAX_SEGMENT ||
2531 segment_count < F2FS_MIN_SEGMENTS) { 2469 segment_count < F2FS_MIN_SEGMENTS) {
2532 f2fs_msg(sb, KERN_INFO, 2470 f2fs_info(sbi, "Invalid segment count (%u)", segment_count);
2533 "Invalid segment count (%u)",
2534 segment_count);
2535 return 1; 2471 return 1;
2536 } 2472 }
2537 2473
2538 if (total_sections > segment_count || 2474 if (total_sections > segment_count ||
2539 total_sections < F2FS_MIN_SEGMENTS || 2475 total_sections < F2FS_MIN_SEGMENTS ||
2540 segs_per_sec > segment_count || !segs_per_sec) { 2476 segs_per_sec > segment_count || !segs_per_sec) {
2541 f2fs_msg(sb, KERN_INFO, 2477 f2fs_info(sbi, "Invalid segment/section count (%u, %u x %u)",
2542 "Invalid segment/section count (%u, %u x %u)", 2478 segment_count, total_sections, segs_per_sec);
2543 segment_count, total_sections, segs_per_sec);
2544 return 1; 2479 return 1;
2545 } 2480 }
2546 2481
2547 if ((segment_count / segs_per_sec) < total_sections) { 2482 if ((segment_count / segs_per_sec) < total_sections) {
2548 f2fs_msg(sb, KERN_INFO, 2483 f2fs_info(sbi, "Small segment_count (%u < %u * %u)",
2549 "Small segment_count (%u < %u * %u)", 2484 segment_count, segs_per_sec, total_sections);
2550 segment_count, segs_per_sec, total_sections);
2551 return 1; 2485 return 1;
2552 } 2486 }
2553 2487
2554 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) { 2488 if (segment_count > (le64_to_cpu(raw_super->block_count) >> 9)) {
2555 f2fs_msg(sb, KERN_INFO, 2489 f2fs_info(sbi, "Wrong segment_count / block_count (%u > %llu)",
2556 "Wrong segment_count / block_count (%u > %llu)", 2490 segment_count, le64_to_cpu(raw_super->block_count));
2557 segment_count, le64_to_cpu(raw_super->block_count));
2558 return 1; 2491 return 1;
2559 } 2492 }
2560 2493
2561 if (secs_per_zone > total_sections || !secs_per_zone) { 2494 if (secs_per_zone > total_sections || !secs_per_zone) {
2562 f2fs_msg(sb, KERN_INFO, 2495 f2fs_info(sbi, "Wrong secs_per_zone / total_sections (%u, %u)",
2563 "Wrong secs_per_zone / total_sections (%u, %u)", 2496 secs_per_zone, total_sections);
2564 secs_per_zone, total_sections);
2565 return 1; 2497 return 1;
2566 } 2498 }
2567 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION || 2499 if (le32_to_cpu(raw_super->extension_count) > F2FS_MAX_EXTENSION ||
2568 raw_super->hot_ext_count > F2FS_MAX_EXTENSION || 2500 raw_super->hot_ext_count > F2FS_MAX_EXTENSION ||
2569 (le32_to_cpu(raw_super->extension_count) + 2501 (le32_to_cpu(raw_super->extension_count) +
2570 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) { 2502 raw_super->hot_ext_count) > F2FS_MAX_EXTENSION) {
2571 f2fs_msg(sb, KERN_INFO, 2503 f2fs_info(sbi, "Corrupted extension count (%u + %u > %u)",
2572 "Corrupted extension count (%u + %u > %u)", 2504 le32_to_cpu(raw_super->extension_count),
2573 le32_to_cpu(raw_super->extension_count), 2505 raw_super->hot_ext_count,
2574 raw_super->hot_ext_count, 2506 F2FS_MAX_EXTENSION);
2575 F2FS_MAX_EXTENSION);
2576 return 1; 2507 return 1;
2577 } 2508 }
2578 2509
2579 if (le32_to_cpu(raw_super->cp_payload) > 2510 if (le32_to_cpu(raw_super->cp_payload) >
2580 (blocks_per_seg - F2FS_CP_PACKS)) { 2511 (blocks_per_seg - F2FS_CP_PACKS)) {
2581 f2fs_msg(sb, KERN_INFO, 2512 f2fs_info(sbi, "Insane cp_payload (%u > %u)",
2582 "Insane cp_payload (%u > %u)", 2513 le32_to_cpu(raw_super->cp_payload),
2583 le32_to_cpu(raw_super->cp_payload), 2514 blocks_per_seg - F2FS_CP_PACKS);
2584 blocks_per_seg - F2FS_CP_PACKS);
2585 return 1; 2515 return 1;
2586 } 2516 }
2587 2517
@@ -2589,11 +2519,10 @@ static int sanity_check_raw_super(struct f2fs_sb_info *sbi,
2589 if (le32_to_cpu(raw_super->node_ino) != 1 || 2519 if (le32_to_cpu(raw_super->node_ino) != 1 ||
2590 le32_to_cpu(raw_super->meta_ino) != 2 || 2520 le32_to_cpu(raw_super->meta_ino) != 2 ||
2591 le32_to_cpu(raw_super->root_ino) != 3) { 2521 le32_to_cpu(raw_super->root_ino) != 3) {
2592 f2fs_msg(sb, KERN_INFO, 2522 f2fs_info(sbi, "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)",
2593 "Invalid Fs Meta Ino: node(%u) meta(%u) root(%u)", 2523 le32_to_cpu(raw_super->node_ino),
2594 le32_to_cpu(raw_super->node_ino), 2524 le32_to_cpu(raw_super->meta_ino),
2595 le32_to_cpu(raw_super->meta_ino), 2525 le32_to_cpu(raw_super->root_ino));
2596 le32_to_cpu(raw_super->root_ino));
2597 return 1; 2526 return 1;
2598 } 2527 }
2599 2528
@@ -2637,8 +2566,7 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2637 2566
2638 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS || 2567 if (unlikely(fsmeta < F2FS_MIN_SEGMENTS ||
2639 ovp_segments == 0 || reserved_segments == 0)) { 2568 ovp_segments == 0 || reserved_segments == 0)) {
2640 f2fs_msg(sbi->sb, KERN_ERR, 2569 f2fs_err(sbi, "Wrong layout: check mkfs.f2fs version");
2641 "Wrong layout: check mkfs.f2fs version");
2642 return 1; 2570 return 1;
2643 } 2571 }
2644 2572
@@ -2647,16 +2575,15 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2647 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg); 2575 log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
2648 if (!user_block_count || user_block_count >= 2576 if (!user_block_count || user_block_count >=
2649 segment_count_main << log_blocks_per_seg) { 2577 segment_count_main << log_blocks_per_seg) {
2650 f2fs_msg(sbi->sb, KERN_ERR, 2578 f2fs_err(sbi, "Wrong user_block_count: %u",
2651 "Wrong user_block_count: %u", user_block_count); 2579 user_block_count);
2652 return 1; 2580 return 1;
2653 } 2581 }
2654 2582
2655 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count); 2583 valid_user_blocks = le64_to_cpu(ckpt->valid_block_count);
2656 if (valid_user_blocks > user_block_count) { 2584 if (valid_user_blocks > user_block_count) {
2657 f2fs_msg(sbi->sb, KERN_ERR, 2585 f2fs_err(sbi, "Wrong valid_user_blocks: %u, user_block_count: %u",
2658 "Wrong valid_user_blocks: %u, user_block_count: %u", 2586 valid_user_blocks, user_block_count);
2659 valid_user_blocks, user_block_count);
2660 return 1; 2587 return 1;
2661 } 2588 }
2662 2589
@@ -2664,9 +2591,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2664 avail_node_count = sbi->total_node_count - sbi->nquota_files - 2591 avail_node_count = sbi->total_node_count - sbi->nquota_files -
2665 F2FS_RESERVED_NODE_NUM; 2592 F2FS_RESERVED_NODE_NUM;
2666 if (valid_node_count > avail_node_count) { 2593 if (valid_node_count > avail_node_count) {
2667 f2fs_msg(sbi->sb, KERN_ERR, 2594 f2fs_err(sbi, "Wrong valid_node_count: %u, avail_node_count: %u",
2668 "Wrong valid_node_count: %u, avail_node_count: %u", 2595 valid_node_count, avail_node_count);
2669 valid_node_count, avail_node_count);
2670 return 1; 2596 return 1;
2671 } 2597 }
2672 2598
@@ -2680,10 +2606,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2680 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) { 2606 for (j = i + 1; j < NR_CURSEG_NODE_TYPE; j++) {
2681 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2607 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2682 le32_to_cpu(ckpt->cur_node_segno[j])) { 2608 le32_to_cpu(ckpt->cur_node_segno[j])) {
2683 f2fs_msg(sbi->sb, KERN_ERR, 2609 f2fs_err(sbi, "Node segment (%u, %u) has the same segno: %u",
2684 "Node segment (%u, %u) has the same " 2610 i, j,
2685 "segno: %u", i, j, 2611 le32_to_cpu(ckpt->cur_node_segno[i]));
2686 le32_to_cpu(ckpt->cur_node_segno[i]));
2687 return 1; 2612 return 1;
2688 } 2613 }
2689 } 2614 }
@@ -2695,10 +2620,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2695 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) { 2620 for (j = i + 1; j < NR_CURSEG_DATA_TYPE; j++) {
2696 if (le32_to_cpu(ckpt->cur_data_segno[i]) == 2621 if (le32_to_cpu(ckpt->cur_data_segno[i]) ==
2697 le32_to_cpu(ckpt->cur_data_segno[j])) { 2622 le32_to_cpu(ckpt->cur_data_segno[j])) {
2698 f2fs_msg(sbi->sb, KERN_ERR, 2623 f2fs_err(sbi, "Data segment (%u, %u) has the same segno: %u",
2699 "Data segment (%u, %u) has the same " 2624 i, j,
2700 "segno: %u", i, j, 2625 le32_to_cpu(ckpt->cur_data_segno[i]));
2701 le32_to_cpu(ckpt->cur_data_segno[i]));
2702 return 1; 2626 return 1;
2703 } 2627 }
2704 } 2628 }
@@ -2707,10 +2631,9 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2707 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) { 2631 for (j = i; j < NR_CURSEG_DATA_TYPE; j++) {
2708 if (le32_to_cpu(ckpt->cur_node_segno[i]) == 2632 if (le32_to_cpu(ckpt->cur_node_segno[i]) ==
2709 le32_to_cpu(ckpt->cur_data_segno[j])) { 2633 le32_to_cpu(ckpt->cur_data_segno[j])) {
2710 f2fs_msg(sbi->sb, KERN_ERR, 2634 f2fs_err(sbi, "Data segment (%u) and Data segment (%u) has the same segno: %u",
2711 "Data segment (%u) and Data segment (%u)" 2635 i, j,
2712 " has the same segno: %u", i, j, 2636 le32_to_cpu(ckpt->cur_node_segno[i]));
2713 le32_to_cpu(ckpt->cur_node_segno[i]));
2714 return 1; 2637 return 1;
2715 } 2638 }
2716 } 2639 }
@@ -2721,9 +2644,8 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2721 2644
2722 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 || 2645 if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
2723 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) { 2646 nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
2724 f2fs_msg(sbi->sb, KERN_ERR, 2647 f2fs_err(sbi, "Wrong bitmap size: sit: %u, nat:%u",
2725 "Wrong bitmap size: sit: %u, nat:%u", 2648 sit_bitmap_size, nat_bitmap_size);
2726 sit_bitmap_size, nat_bitmap_size);
2727 return 1; 2649 return 1;
2728 } 2650 }
2729 2651
@@ -2732,23 +2654,20 @@ int f2fs_sanity_check_ckpt(struct f2fs_sb_info *sbi)
2732 if (cp_pack_start_sum < cp_payload + 1 || 2654 if (cp_pack_start_sum < cp_payload + 1 ||
2733 cp_pack_start_sum > blocks_per_seg - 1 - 2655 cp_pack_start_sum > blocks_per_seg - 1 -
2734 NR_CURSEG_TYPE) { 2656 NR_CURSEG_TYPE) {
2735 f2fs_msg(sbi->sb, KERN_ERR, 2657 f2fs_err(sbi, "Wrong cp_pack_start_sum: %u",
2736 "Wrong cp_pack_start_sum: %u", 2658 cp_pack_start_sum);
2737 cp_pack_start_sum);
2738 return 1; 2659 return 1;
2739 } 2660 }
2740 2661
2741 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) && 2662 if (__is_set_ckpt_flags(ckpt, CP_LARGE_NAT_BITMAP_FLAG) &&
2742 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) { 2663 le32_to_cpu(ckpt->checksum_offset) != CP_MIN_CHKSUM_OFFSET) {
2743 f2fs_msg(sbi->sb, KERN_WARNING, 2664 f2fs_warn(sbi, "layout of large_nat_bitmap is deprecated, run fsck to repair, chksum_offset: %u",
2744 "layout of large_nat_bitmap is deprecated, " 2665 le32_to_cpu(ckpt->checksum_offset));
2745 "run fsck to repair, chksum_offset: %u",
2746 le32_to_cpu(ckpt->checksum_offset));
2747 return 1; 2666 return 1;
2748 } 2667 }
2749 2668
2750 if (unlikely(f2fs_cp_error(sbi))) { 2669 if (unlikely(f2fs_cp_error(sbi))) {
2751 f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck"); 2670 f2fs_err(sbi, "A bug case: need to run fsck");
2752 return 1; 2671 return 1;
2753 } 2672 }
2754 return 0; 2673 return 0;
@@ -2917,17 +2836,16 @@ static int read_raw_super_block(struct f2fs_sb_info *sbi,
2917 for (block = 0; block < 2; block++) { 2836 for (block = 0; block < 2; block++) {
2918 bh = sb_bread(sb, block); 2837 bh = sb_bread(sb, block);
2919 if (!bh) { 2838 if (!bh) {
2920 f2fs_msg(sb, KERN_ERR, "Unable to read %dth superblock", 2839 f2fs_err(sbi, "Unable to read %dth superblock",
2921 block + 1); 2840 block + 1);
2922 err = -EIO; 2841 err = -EIO;
2923 continue; 2842 continue;
2924 } 2843 }
2925 2844
2926 /* sanity checking of raw super */ 2845 /* sanity checking of raw super */
2927 if (sanity_check_raw_super(sbi, bh)) { 2846 if (sanity_check_raw_super(sbi, bh)) {
2928 f2fs_msg(sb, KERN_ERR, 2847 f2fs_err(sbi, "Can't find valid F2FS filesystem in %dth superblock",
2929 "Can't find valid F2FS filesystem in %dth superblock", 2848 block + 1);
2930 block + 1);
2931 err = -EINVAL; 2849 err = -EINVAL;
2932 brelse(bh); 2850 brelse(bh);
2933 continue; 2851 continue;
@@ -3057,36 +2975,32 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
3057#ifdef CONFIG_BLK_DEV_ZONED 2975#ifdef CONFIG_BLK_DEV_ZONED
3058 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM && 2976 if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
3059 !f2fs_sb_has_blkzoned(sbi)) { 2977 !f2fs_sb_has_blkzoned(sbi)) {
3060 f2fs_msg(sbi->sb, KERN_ERR, 2978 f2fs_err(sbi, "Zoned block device feature not enabled\n");
3061 "Zoned block device feature not enabled\n");
3062 return -EINVAL; 2979 return -EINVAL;
3063 } 2980 }
3064 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) { 2981 if (bdev_zoned_model(FDEV(i).bdev) != BLK_ZONED_NONE) {
3065 if (init_blkz_info(sbi, i)) { 2982 if (init_blkz_info(sbi, i)) {
3066 f2fs_msg(sbi->sb, KERN_ERR, 2983 f2fs_err(sbi, "Failed to initialize F2FS blkzone information");
3067 "Failed to initialize F2FS blkzone information");
3068 return -EINVAL; 2984 return -EINVAL;
3069 } 2985 }
3070 if (max_devices == 1) 2986 if (max_devices == 1)
3071 break; 2987 break;
3072 f2fs_msg(sbi->sb, KERN_INFO, 2988 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)",
3073 "Mount Device [%2d]: %20s, %8u, %8x - %8x (zone: %s)", 2989 i, FDEV(i).path,
3074 i, FDEV(i).path, 2990 FDEV(i).total_segments,
3075 FDEV(i).total_segments, 2991 FDEV(i).start_blk, FDEV(i).end_blk,
3076 FDEV(i).start_blk, FDEV(i).end_blk, 2992 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ?
3077 bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HA ? 2993 "Host-aware" : "Host-managed");
3078 "Host-aware" : "Host-managed");
3079 continue; 2994 continue;
3080 } 2995 }
3081#endif 2996#endif
3082 f2fs_msg(sbi->sb, KERN_INFO, 2997 f2fs_info(sbi, "Mount Device [%2d]: %20s, %8u, %8x - %8x",
3083 "Mount Device [%2d]: %20s, %8u, %8x - %8x", 2998 i, FDEV(i).path,
3084 i, FDEV(i).path, 2999 FDEV(i).total_segments,
3085 FDEV(i).total_segments, 3000 FDEV(i).start_blk, FDEV(i).end_blk);
3086 FDEV(i).start_blk, FDEV(i).end_blk); 3001 }
3087 } 3002 f2fs_info(sbi,
3088 f2fs_msg(sbi->sb, KERN_INFO, 3003 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3089 "IO Block Size: %8d KB", F2FS_IO_SIZE_KB(sbi));
3090 return 0; 3004 return 0;
3091} 3005}
3092 3006
@@ -3132,7 +3046,7 @@ try_onemore:
3132 /* Load the checksum driver */ 3046 /* Load the checksum driver */
3133 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0); 3047 sbi->s_chksum_driver = crypto_alloc_shash("crc32", 0, 0);
3134 if (IS_ERR(sbi->s_chksum_driver)) { 3048 if (IS_ERR(sbi->s_chksum_driver)) {
3135 f2fs_msg(sb, KERN_ERR, "Cannot load crc32 driver."); 3049 f2fs_err(sbi, "Cannot load crc32 driver.");
3136 err = PTR_ERR(sbi->s_chksum_driver); 3050 err = PTR_ERR(sbi->s_chksum_driver);
3137 sbi->s_chksum_driver = NULL; 3051 sbi->s_chksum_driver = NULL;
3138 goto free_sbi; 3052 goto free_sbi;
@@ -3140,7 +3054,7 @@ try_onemore:
3140 3054
3141 /* set a block size */ 3055 /* set a block size */
3142 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) { 3056 if (unlikely(!sb_set_blocksize(sb, F2FS_BLKSIZE))) {
3143 f2fs_msg(sb, KERN_ERR, "unable to set blocksize"); 3057 f2fs_err(sbi, "unable to set blocksize");
3144 goto free_sbi; 3058 goto free_sbi;
3145 } 3059 }
3146 3060
@@ -3164,8 +3078,7 @@ try_onemore:
3164 */ 3078 */
3165#ifndef CONFIG_BLK_DEV_ZONED 3079#ifndef CONFIG_BLK_DEV_ZONED
3166 if (f2fs_sb_has_blkzoned(sbi)) { 3080 if (f2fs_sb_has_blkzoned(sbi)) {
3167 f2fs_msg(sb, KERN_ERR, 3081 f2fs_err(sbi, "Zoned block device support is not enabled");
3168 "Zoned block device support is not enabled");
3169 err = -EOPNOTSUPP; 3082 err = -EOPNOTSUPP;
3170 goto free_sb_buf; 3083 goto free_sb_buf;
3171 } 3084 }
@@ -3273,14 +3186,14 @@ try_onemore:
3273 /* get an inode for meta space */ 3186 /* get an inode for meta space */
3274 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi)); 3187 sbi->meta_inode = f2fs_iget(sb, F2FS_META_INO(sbi));
3275 if (IS_ERR(sbi->meta_inode)) { 3188 if (IS_ERR(sbi->meta_inode)) {
3276 f2fs_msg(sb, KERN_ERR, "Failed to read F2FS meta data inode"); 3189 f2fs_err(sbi, "Failed to read F2FS meta data inode");
3277 err = PTR_ERR(sbi->meta_inode); 3190 err = PTR_ERR(sbi->meta_inode);
3278 goto free_io_dummy; 3191 goto free_io_dummy;
3279 } 3192 }
3280 3193
3281 err = f2fs_get_valid_checkpoint(sbi); 3194 err = f2fs_get_valid_checkpoint(sbi);
3282 if (err) { 3195 if (err) {
3283 f2fs_msg(sb, KERN_ERR, "Failed to get valid F2FS checkpoint"); 3196 f2fs_err(sbi, "Failed to get valid F2FS checkpoint");
3284 goto free_meta_inode; 3197 goto free_meta_inode;
3285 } 3198 }
3286 3199
@@ -3297,7 +3210,7 @@ try_onemore:
3297 /* Initialize device list */ 3210 /* Initialize device list */
3298 err = f2fs_scan_devices(sbi); 3211 err = f2fs_scan_devices(sbi);
3299 if (err) { 3212 if (err) {
3300 f2fs_msg(sb, KERN_ERR, "Failed to find devices"); 3213 f2fs_err(sbi, "Failed to find devices");
3301 goto free_devices; 3214 goto free_devices;
3302 } 3215 }
3303 3216
@@ -3328,14 +3241,14 @@ try_onemore:
3328 /* setup f2fs internal modules */ 3241 /* setup f2fs internal modules */
3329 err = f2fs_build_segment_manager(sbi); 3242 err = f2fs_build_segment_manager(sbi);
3330 if (err) { 3243 if (err) {
3331 f2fs_msg(sb, KERN_ERR, 3244 f2fs_err(sbi, "Failed to initialize F2FS segment manager (%d)",
3332 "Failed to initialize F2FS segment manager (%d)", err); 3245 err);
3333 goto free_sm; 3246 goto free_sm;
3334 } 3247 }
3335 err = f2fs_build_node_manager(sbi); 3248 err = f2fs_build_node_manager(sbi);
3336 if (err) { 3249 if (err) {
3337 f2fs_msg(sb, KERN_ERR, 3250 f2fs_err(sbi, "Failed to initialize F2FS node manager (%d)",
3338 "Failed to initialize F2FS node manager (%d)", err); 3251 err);
3339 goto free_nm; 3252 goto free_nm;
3340 } 3253 }
3341 3254
@@ -3360,7 +3273,7 @@ try_onemore:
3360 /* get an inode for node space */ 3273 /* get an inode for node space */
3361 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi)); 3274 sbi->node_inode = f2fs_iget(sb, F2FS_NODE_INO(sbi));
3362 if (IS_ERR(sbi->node_inode)) { 3275 if (IS_ERR(sbi->node_inode)) {
3363 f2fs_msg(sb, KERN_ERR, "Failed to read node inode"); 3276 f2fs_err(sbi, "Failed to read node inode");
3364 err = PTR_ERR(sbi->node_inode); 3277 err = PTR_ERR(sbi->node_inode);
3365 goto free_stats; 3278 goto free_stats;
3366 } 3279 }
@@ -3368,7 +3281,7 @@ try_onemore:
3368 /* read root inode and dentry */ 3281 /* read root inode and dentry */
3369 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi)); 3282 root = f2fs_iget(sb, F2FS_ROOT_INO(sbi));
3370 if (IS_ERR(root)) { 3283 if (IS_ERR(root)) {
3371 f2fs_msg(sb, KERN_ERR, "Failed to read root inode"); 3284 f2fs_err(sbi, "Failed to read root inode");
3372 err = PTR_ERR(root); 3285 err = PTR_ERR(root);
3373 goto free_node_inode; 3286 goto free_node_inode;
3374 } 3287 }
@@ -3394,8 +3307,7 @@ try_onemore:
3394 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) { 3307 if (f2fs_sb_has_quota_ino(sbi) && !f2fs_readonly(sb)) {
3395 err = f2fs_enable_quotas(sb); 3308 err = f2fs_enable_quotas(sb);
3396 if (err) 3309 if (err)
3397 f2fs_msg(sb, KERN_ERR, 3310 f2fs_err(sbi, "Cannot turn on quotas: error %d", err);
3398 "Cannot turn on quotas: error %d", err);
3399 } 3311 }
3400#endif 3312#endif
3401 /* if there are nt orphan nodes free them */ 3313 /* if there are nt orphan nodes free them */
@@ -3415,13 +3327,10 @@ try_onemore:
3415 if (f2fs_hw_is_readonly(sbi)) { 3327 if (f2fs_hw_is_readonly(sbi)) {
3416 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) { 3328 if (!is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
3417 err = -EROFS; 3329 err = -EROFS;
3418 f2fs_msg(sb, KERN_ERR, 3330 f2fs_err(sbi, "Need to recover fsync data, but write access unavailable");
3419 "Need to recover fsync data, but "
3420 "write access unavailable");
3421 goto free_meta; 3331 goto free_meta;
3422 } 3332 }
3423 f2fs_msg(sbi->sb, KERN_INFO, "write access " 3333 f2fs_info(sbi, "write access unavailable, skipping recovery");
3424 "unavailable, skipping recovery");
3425 goto reset_checkpoint; 3334 goto reset_checkpoint;
3426 } 3335 }
3427 3336
@@ -3436,8 +3345,8 @@ try_onemore:
3436 if (err != -ENOMEM) 3345 if (err != -ENOMEM)
3437 skip_recovery = true; 3346 skip_recovery = true;
3438 need_fsck = true; 3347 need_fsck = true;
3439 f2fs_msg(sb, KERN_ERR, 3348 f2fs_err(sbi, "Cannot recover all fsync data errno=%d",
3440 "Cannot recover all fsync data errno=%d", err); 3349 err);
3441 goto free_meta; 3350 goto free_meta;
3442 } 3351 }
3443 } else { 3352 } else {
@@ -3445,8 +3354,7 @@ try_onemore:
3445 3354
3446 if (!f2fs_readonly(sb) && err > 0) { 3355 if (!f2fs_readonly(sb) && err > 0) {
3447 err = -EINVAL; 3356 err = -EINVAL;
3448 f2fs_msg(sb, KERN_ERR, 3357 f2fs_err(sbi, "Need to recover fsync data");
3449 "Need to recover fsync data");
3450 goto free_meta; 3358 goto free_meta;
3451 } 3359 }
3452 } 3360 }
@@ -3477,17 +3385,16 @@ reset_checkpoint:
3477 /* recover broken superblock */ 3385 /* recover broken superblock */
3478 if (recovery) { 3386 if (recovery) {
3479 err = f2fs_commit_super(sbi, true); 3387 err = f2fs_commit_super(sbi, true);
3480 f2fs_msg(sb, KERN_INFO, 3388 f2fs_info(sbi, "Try to recover %dth superblock, ret: %d",
3481 "Try to recover %dth superblock, ret: %d", 3389 sbi->valid_super_block ? 1 : 2, err);
3482 sbi->valid_super_block ? 1 : 2, err);
3483 } 3390 }
3484 3391
3485 f2fs_join_shrinker(sbi); 3392 f2fs_join_shrinker(sbi);
3486 3393
3487 f2fs_tuning_parameters(sbi); 3394 f2fs_tuning_parameters(sbi);
3488 3395
3489 f2fs_msg(sbi->sb, KERN_NOTICE, "Mounted with checkpoint version = %llx", 3396 f2fs_notice(sbi, "Mounted with checkpoint version = %llx",
3490 cur_cp_version(F2FS_CKPT(sbi))); 3397 cur_cp_version(F2FS_CKPT(sbi)));
3491 f2fs_update_time(sbi, CP_TIME); 3398 f2fs_update_time(sbi, CP_TIME);
3492 f2fs_update_time(sbi, REQ_TIME); 3399 f2fs_update_time(sbi, REQ_TIME);
3493 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK); 3400 clear_sbi_flag(sbi, SBI_CP_DISABLED_QUICK);