aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/nilfs2/btree.c27
-rw-r--r--fs/nilfs2/cpfile.c38
-rw-r--r--fs/nilfs2/dat.c15
-rw-r--r--fs/nilfs2/direct.c13
-rw-r--r--fs/nilfs2/inode.c19
-rw-r--r--fs/nilfs2/ioctl.c63
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/nilfs.h1
-rw-r--r--fs/nilfs2/page.c10
-rw-r--r--fs/nilfs2/recovery.c3
-rw-r--r--fs/nilfs2/segment.c78
-rw-r--r--fs/nilfs2/sufile.c25
-rw-r--r--fs/nilfs2/super.c8
13 files changed, 144 insertions, 160 deletions
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 53f0d4c31cb0..6b37a2767293 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -425,7 +425,6 @@ static int nilfs_btree_node_lookup(const struct nilfs_btree *btree,
425 index++; 425 index++;
426 426
427 out: 427 out:
428 BUG_ON(indexp == NULL);
429 *indexp = index; 428 *indexp = index;
430 429
431 return s == 0; 430 return s == 0;
@@ -477,8 +476,6 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
477 __u64 ptr; 476 __u64 ptr;
478 int level, index, found, ret; 477 int level, index, found, ret;
479 478
480 BUG_ON(minlevel <= NILFS_BTREE_LEVEL_DATA);
481
482 node = nilfs_btree_get_root(btree); 479 node = nilfs_btree_get_root(btree);
483 level = nilfs_btree_node_get_level(btree, node); 480 level = nilfs_btree_node_get_level(btree, node);
484 if ((level < minlevel) || 481 if ((level < minlevel) ||
@@ -505,7 +502,7 @@ static int nilfs_btree_do_lookup(const struct nilfs_btree *btree,
505 if (index < nilfs_btree_node_nchildren_max(btree, node)) 502 if (index < nilfs_btree_node_nchildren_max(btree, node))
506 ptr = nilfs_btree_node_get_ptr(btree, node, index); 503 ptr = nilfs_btree_node_get_ptr(btree, node, index);
507 else { 504 else {
508 BUG_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN); 505 WARN_ON(found || level != NILFS_BTREE_LEVEL_NODE_MIN);
509 /* insert */ 506 /* insert */
510 ptr = NILFS_BMAP_INVALID_PTR; 507 ptr = NILFS_BMAP_INVALID_PTR;
511 } 508 }
@@ -1366,7 +1363,7 @@ static int nilfs_btree_prepare_delete(struct nilfs_btree *btree,
1366 } else { 1363 } else {
1367 /* no siblings */ 1364 /* no siblings */
1368 /* the only child of the root node */ 1365 /* the only child of the root node */
1369 BUG_ON(level != nilfs_btree_height(btree) - 2); 1366 WARN_ON(level != nilfs_btree_height(btree) - 2);
1370 if (nilfs_btree_node_get_nchildren(btree, node) - 1 <= 1367 if (nilfs_btree_node_get_nchildren(btree, node) - 1 <=
1371 NILFS_BTREE_ROOT_NCHILDREN_MAX) { 1368 NILFS_BTREE_ROOT_NCHILDREN_MAX) {
1372 path[level].bp_op = nilfs_btree_shrink; 1369 path[level].bp_op = nilfs_btree_shrink;
@@ -1543,7 +1540,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
1543 break; 1540 break;
1544 case 3: 1541 case 3:
1545 nchildren = nilfs_btree_node_get_nchildren(btree, root); 1542 nchildren = nilfs_btree_node_get_nchildren(btree, root);
1546 BUG_ON(nchildren > 1); 1543 WARN_ON(nchildren > 1);
1547 ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1); 1544 ptr = nilfs_btree_node_get_ptr(btree, root, nchildren - 1);
1548 ret = nilfs_bmap_get_block(bmap, ptr, &bh); 1545 ret = nilfs_bmap_get_block(bmap, ptr, &bh);
1549 if (ret < 0) 1546 if (ret < 0)
@@ -1552,7 +1549,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *bmap,
1552 break; 1549 break;
1553 default: 1550 default:
1554 node = NULL; 1551 node = NULL;
1555 BUG(); 1552 return -EINVAL;
1556 } 1553 }
1557 1554
1558 nchildren = nilfs_btree_node_get_nchildren(btree, node); 1555 nchildren = nilfs_btree_node_get_nchildren(btree, node);
@@ -1833,14 +1830,13 @@ static int nilfs_btree_prepare_propagate_v(struct nilfs_btree *btree,
1833 while ((++level < nilfs_btree_height(btree) - 1) && 1830 while ((++level < nilfs_btree_height(btree) - 1) &&
1834 !buffer_dirty(path[level].bp_bh)) { 1831 !buffer_dirty(path[level].bp_bh)) {
1835 1832
1836 BUG_ON(buffer_nilfs_volatile(path[level].bp_bh)); 1833 WARN_ON(buffer_nilfs_volatile(path[level].bp_bh));
1837 ret = nilfs_btree_prepare_update_v(btree, path, level); 1834 ret = nilfs_btree_prepare_update_v(btree, path, level);
1838 if (ret < 0) 1835 if (ret < 0)
1839 goto out; 1836 goto out;
1840 } 1837 }
1841 1838
1842 /* success */ 1839 /* success */
1843 BUG_ON(maxlevelp == NULL);
1844 *maxlevelp = level - 1; 1840 *maxlevelp = level - 1;
1845 return 0; 1841 return 0;
1846 1842
@@ -1909,7 +1905,7 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
1909 __u64 key; 1905 __u64 key;
1910 int level, ret; 1906 int level, ret;
1911 1907
1912 BUG_ON(!buffer_dirty(bh)); 1908 WARN_ON(!buffer_dirty(bh));
1913 1909
1914 btree = (struct nilfs_btree *)bmap; 1910 btree = (struct nilfs_btree *)bmap;
1915 path = nilfs_btree_alloc_path(btree); 1911 path = nilfs_btree_alloc_path(btree);
@@ -1928,12 +1924,9 @@ static int nilfs_btree_propagate(const struct nilfs_bmap *bmap,
1928 1924
1929 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1); 1925 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
1930 if (ret < 0) { 1926 if (ret < 0) {
1931 /* BUG_ON(ret == -ENOENT); */ 1927 if (unlikely(ret == -ENOENT))
1932 if (ret == -ENOENT) {
1933 printk(KERN_CRIT "%s: key = %llu, level == %d\n", 1928 printk(KERN_CRIT "%s: key = %llu, level == %d\n",
1934 __func__, (unsigned long long)key, level); 1929 __func__, (unsigned long long)key, level);
1935 BUG();
1936 }
1937 goto out; 1930 goto out;
1938 } 1931 }
1939 1932
@@ -2117,7 +2110,7 @@ static int nilfs_btree_assign(struct nilfs_bmap *bmap,
2117 2110
2118 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1); 2111 ret = nilfs_btree_do_lookup(btree, path, key, NULL, level + 1);
2119 if (ret < 0) { 2112 if (ret < 0) {
2120 BUG_ON(ret == -ENOENT); 2113 WARN_ON(ret == -ENOENT);
2121 goto out; 2114 goto out;
2122 } 2115 }
2123 2116
@@ -2175,12 +2168,12 @@ static int nilfs_btree_mark(struct nilfs_bmap *bmap, __u64 key, int level)
2175 2168
2176 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1); 2169 ret = nilfs_btree_do_lookup(btree, path, key, &ptr, level + 1);
2177 if (ret < 0) { 2170 if (ret < 0) {
2178 BUG_ON(ret == -ENOENT); 2171 WARN_ON(ret == -ENOENT);
2179 goto out; 2172 goto out;
2180 } 2173 }
2181 ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, &bh); 2174 ret = nilfs_bmap_get_block(&btree->bt_bmap, ptr, &bh);
2182 if (ret < 0) { 2175 if (ret < 0) {
2183 BUG_ON(ret == -ENOENT); 2176 WARN_ON(ret == -ENOENT);
2184 goto out; 2177 goto out;
2185 } 2178 }
2186 2179
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index 218b34418508..e90b60dfced9 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -40,10 +40,7 @@ nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile)
40static unsigned long 40static unsigned long
41nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) 41nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno)
42{ 42{
43 __u64 tcno; 43 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
44
45 BUG_ON(cno == 0); /* checkpoint number 0 is invalid */
46 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1;
47 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 44 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile));
48 return (unsigned long)tcno; 45 return (unsigned long)tcno;
49} 46}
@@ -96,7 +93,7 @@ nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile,
96 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 93 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh);
97 unsigned int count; 94 unsigned int count;
98 95
99 BUG_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); 96 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n);
100 count = le32_to_cpu(cp->cp_checkpoints_count) - n; 97 count = le32_to_cpu(cp->cp_checkpoints_count) - n;
101 cp->cp_checkpoints_count = cpu_to_le32(count); 98 cp->cp_checkpoints_count = cpu_to_le32(count);
102 return count; 99 return count;
@@ -178,6 +175,8 @@ static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile,
178 * %-ENOMEM - Insufficient amount of memory available. 175 * %-ENOMEM - Insufficient amount of memory available.
179 * 176 *
180 * %-ENOENT - No such checkpoint. 177 * %-ENOENT - No such checkpoint.
178 *
179 * %-EINVAL - invalid checkpoint.
181 */ 180 */
182int nilfs_cpfile_get_checkpoint(struct inode *cpfile, 181int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
183 __u64 cno, 182 __u64 cno,
@@ -191,8 +190,9 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
191 void *kaddr; 190 void *kaddr;
192 int ret; 191 int ret;
193 192
194 BUG_ON(cno < 1 || cno > nilfs_mdt_cno(cpfile) || 193 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) ||
195 (cno < nilfs_mdt_cno(cpfile) && create)); 194 (cno < nilfs_mdt_cno(cpfile) && create)))
195 return -EINVAL;
196 196
197 down_write(&NILFS_MDT(cpfile)->mi_sem); 197 down_write(&NILFS_MDT(cpfile)->mi_sem);
198 198
@@ -288,12 +288,11 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
288 unsigned long tnicps; 288 unsigned long tnicps;
289 int ret, ncps, nicps, count, i; 289 int ret, ncps, nicps, count, i;
290 290
291 if ((start == 0) || (start > end)) { 291 if (unlikely(start == 0 || start > end)) {
292 printk(KERN_CRIT "%s: start = %llu, end = %llu\n", 292 printk(KERN_ERR "%s: invalid range of checkpoint numbers: "
293 __func__, 293 "[%llu, %llu)\n", __func__,
294 (unsigned long long)start, 294 (unsigned long long)start, (unsigned long long)end);
295 (unsigned long long)end); 295 return -EINVAL;
296 BUG();
297 } 296 }
298 297
299 /* cannot delete the latest checkpoint */ 298 /* cannot delete the latest checkpoint */
@@ -323,7 +322,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
323 cpfile, cno, cp_bh, kaddr); 322 cpfile, cno, cp_bh, kaddr);
324 nicps = 0; 323 nicps = 0;
325 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { 324 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) {
326 BUG_ON(nilfs_checkpoint_snapshot(cp)); 325 WARN_ON(nilfs_checkpoint_snapshot(cp));
327 if (!nilfs_checkpoint_invalid(cp)) { 326 if (!nilfs_checkpoint_invalid(cp)) {
328 nilfs_checkpoint_set_invalid(cp); 327 nilfs_checkpoint_set_invalid(cp);
329 nicps++; 328 nicps++;
@@ -393,6 +392,8 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
393 int n, ret; 392 int n, ret;
394 int ncps, i; 393 int ncps, i;
395 394
395 if (cno == 0)
396 return -ENOENT; /* checkpoint number 0 is invalid */
396 down_read(&NILFS_MDT(cpfile)->mi_sem); 397 down_read(&NILFS_MDT(cpfile)->mi_sem);
397 398
398 for (n = 0; cno < cur_cno && n < nci; cno += ncps) { 399 for (n = 0; cno < cur_cno && n < nci; cno += ncps) {
@@ -532,9 +533,6 @@ int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno)
532 ssize_t nci; 533 ssize_t nci;
533 int ret; 534 int ret;
534 535
535 /* checkpoint number 0 is invalid */
536 if (cno == 0)
537 return -ENOENT;
538 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, 1); 536 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, 1);
539 if (nci < 0) 537 if (nci < 0)
540 return nci; 538 return nci;
@@ -582,6 +580,8 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
582 void *kaddr; 580 void *kaddr;
583 int ret; 581 int ret;
584 582
583 if (cno == 0)
584 return -ENOENT; /* checkpoint number 0 is invalid */
585 down_write(&NILFS_MDT(cpfile)->mi_sem); 585 down_write(&NILFS_MDT(cpfile)->mi_sem);
586 586
587 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 587 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
@@ -698,6 +698,8 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
698 void *kaddr; 698 void *kaddr;
699 int ret; 699 int ret;
700 700
701 if (cno == 0)
702 return -ENOENT; /* checkpoint number 0 is invalid */
701 down_write(&NILFS_MDT(cpfile)->mi_sem); 703 down_write(&NILFS_MDT(cpfile)->mi_sem);
702 704
703 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 705 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
@@ -813,6 +815,8 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
813 void *kaddr; 815 void *kaddr;
814 int ret; 816 int ret;
815 817
818 if (cno == 0)
819 return -ENOENT; /* checkpoint number 0 is invalid */
816 down_read(&NILFS_MDT(cpfile)->mi_sem); 820 down_read(&NILFS_MDT(cpfile)->mi_sem);
817 821
818 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 822 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index 9360920f7d38..bb8a5818e7f1 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -135,7 +135,7 @@ int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req)
135 int ret; 135 int ret;
136 136
137 ret = nilfs_dat_prepare_entry(dat, req, 0); 137 ret = nilfs_dat_prepare_entry(dat, req, 0);
138 BUG_ON(ret == -ENOENT); 138 WARN_ON(ret == -ENOENT);
139 return ret; 139 return ret;
140} 140}
141 141
@@ -157,7 +157,6 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
157 (unsigned long long)le64_to_cpu(entry->de_start), 157 (unsigned long long)le64_to_cpu(entry->de_start),
158 (unsigned long long)le64_to_cpu(entry->de_end), 158 (unsigned long long)le64_to_cpu(entry->de_end),
159 (unsigned long long)le64_to_cpu(entry->de_blocknr)); 159 (unsigned long long)le64_to_cpu(entry->de_blocknr));
160 BUG();
161 } 160 }
162 entry->de_blocknr = cpu_to_le64(blocknr); 161 entry->de_blocknr = cpu_to_le64(blocknr);
163 kunmap_atomic(kaddr, KM_USER0); 162 kunmap_atomic(kaddr, KM_USER0);
@@ -180,7 +179,7 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
180 179
181 ret = nilfs_dat_prepare_entry(dat, req, 0); 180 ret = nilfs_dat_prepare_entry(dat, req, 0);
182 if (ret < 0) { 181 if (ret < 0) {
183 BUG_ON(ret == -ENOENT); 182 WARN_ON(ret == -ENOENT);
184 return ret; 183 return ret;
185 } 184 }
186 185
@@ -216,7 +215,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
216 end = start = le64_to_cpu(entry->de_start); 215 end = start = le64_to_cpu(entry->de_start);
217 if (!dead) { 216 if (!dead) {
218 end = nilfs_mdt_cno(dat); 217 end = nilfs_mdt_cno(dat);
219 BUG_ON(start > end); 218 WARN_ON(start > end);
220 } 219 }
221 entry->de_end = cpu_to_le64(end); 220 entry->de_end = cpu_to_le64(end);
222 blocknr = le64_to_cpu(entry->de_blocknr); 221 blocknr = le64_to_cpu(entry->de_blocknr);
@@ -324,14 +323,16 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
324 return ret; 323 return ret;
325 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 324 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0);
326 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 325 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
327 if (entry->de_blocknr == cpu_to_le64(0)) { 326 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
328 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, 327 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
329 (unsigned long long)vblocknr, 328 (unsigned long long)vblocknr,
330 (unsigned long long)le64_to_cpu(entry->de_start), 329 (unsigned long long)le64_to_cpu(entry->de_start),
331 (unsigned long long)le64_to_cpu(entry->de_end)); 330 (unsigned long long)le64_to_cpu(entry->de_end));
332 BUG(); 331 kunmap_atomic(kaddr, KM_USER0);
332 brelse(entry_bh);
333 return -EINVAL;
333 } 334 }
334 BUG_ON(blocknr == 0); 335 WARN_ON(blocknr == 0);
335 entry->de_blocknr = cpu_to_le64(blocknr); 336 entry->de_blocknr = cpu_to_le64(blocknr);
336 kunmap_atomic(kaddr, KM_USER0); 337 kunmap_atomic(kaddr, KM_USER0);
337 338
diff --git a/fs/nilfs2/direct.c b/fs/nilfs2/direct.c
index e3ec24850089..c6379e482781 100644
--- a/fs/nilfs2/direct.c
+++ b/fs/nilfs2/direct.c
@@ -210,7 +210,6 @@ static int nilfs_direct_last_key(const struct nilfs_bmap *bmap, __u64 *keyp)
210 if (lastkey == NILFS_DIRECT_KEY_MAX + 1) 210 if (lastkey == NILFS_DIRECT_KEY_MAX + 1)
211 return -ENOENT; 211 return -ENOENT;
212 212
213 BUG_ON(keyp == NULL);
214 *keyp = lastkey; 213 *keyp = lastkey;
215 214
216 return 0; 215 return 0;
@@ -366,9 +365,17 @@ static int nilfs_direct_assign(struct nilfs_bmap *bmap,
366 365
367 direct = (struct nilfs_direct *)bmap; 366 direct = (struct nilfs_direct *)bmap;
368 key = nilfs_bmap_data_get_key(bmap, *bh); 367 key = nilfs_bmap_data_get_key(bmap, *bh);
369 BUG_ON(key > NILFS_DIRECT_KEY_MAX); 368 if (unlikely(key > NILFS_DIRECT_KEY_MAX)) {
369 printk(KERN_CRIT "%s: invalid key: %llu\n", __func__,
370 (unsigned long long)key);
371 return -EINVAL;
372 }
370 ptr = nilfs_direct_get_ptr(direct, key); 373 ptr = nilfs_direct_get_ptr(direct, key);
371 BUG_ON(ptr == NILFS_BMAP_INVALID_PTR); 374 if (unlikely(ptr == NILFS_BMAP_INVALID_PTR)) {
375 printk(KERN_CRIT "%s: invalid pointer: %llu\n", __func__,
376 (unsigned long long)ptr);
377 return -EINVAL;
378 }
372 379
373 return direct->d_ops->dop_assign(direct, key, ptr, bh, 380 return direct->d_ops->dop_assign(direct, key, ptr, bh,
374 blocknr, binfo); 381 blocknr, binfo);
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 4bf1e2c5bac6..b6536bb2a324 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -61,12 +61,6 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
61 map_bh(bh_result, inode->i_sb, blknum); 61 map_bh(bh_result, inode->i_sb, blknum);
62 goto out; 62 goto out;
63 } 63 }
64 if (unlikely(ret == 1)) {
65 printk(KERN_ERR "nilfs_get_block: bmap_lookup returns "
66 "buffer_head pointer (blkoff=%llu, blknum=%lu)\n",
67 (unsigned long long)blkoff, blknum);
68 BUG();
69 }
70 /* data block was not found */ 64 /* data block was not found */
71 if (ret == -ENOENT && create) { 65 if (ret == -ENOENT && create) {
72 struct nilfs_transaction_info ti; 66 struct nilfs_transaction_info ti;
@@ -85,14 +79,14 @@ int nilfs_get_block(struct inode *inode, sector_t blkoff,
85 * However, the page having this block must 79 * However, the page having this block must
86 * be locked in this case. 80 * be locked in this case.
87 */ 81 */
88 printk(KERN_ERR 82 printk(KERN_WARNING
89 "nilfs_get_block: a race condition " 83 "nilfs_get_block: a race condition "
90 "while inserting a data block. " 84 "while inserting a data block. "
91 "(inode number=%lu, file block " 85 "(inode number=%lu, file block "
92 "offset=%llu)\n", 86 "offset=%llu)\n",
93 inode->i_ino, 87 inode->i_ino,
94 (unsigned long long)blkoff); 88 (unsigned long long)blkoff);
95 BUG(); 89 err = 0;
96 } else if (err == -EINVAL) { 90 } else if (err == -EINVAL) {
97 nilfs_error(inode->i_sb, __func__, 91 nilfs_error(inode->i_sb, __func__,
98 "broken bmap (inode=%lu)\n", 92 "broken bmap (inode=%lu)\n",
@@ -621,7 +615,6 @@ void nilfs_truncate(struct inode *inode)
621 struct nilfs_transaction_info ti; 615 struct nilfs_transaction_info ti;
622 struct super_block *sb = inode->i_sb; 616 struct super_block *sb = inode->i_sb;
623 struct nilfs_inode_info *ii = NILFS_I(inode); 617 struct nilfs_inode_info *ii = NILFS_I(inode);
624 int ret;
625 618
626 if (!test_bit(NILFS_I_BMAP, &ii->i_state)) 619 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
627 return; 620 return;
@@ -630,8 +623,7 @@ void nilfs_truncate(struct inode *inode)
630 623
631 blocksize = sb->s_blocksize; 624 blocksize = sb->s_blocksize;
632 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits; 625 blkoff = (inode->i_size + blocksize - 1) >> sb->s_blocksize_bits;
633 ret = nilfs_transaction_begin(sb, &ti, 0); 626 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
634 BUG_ON(ret);
635 627
636 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block); 628 block_truncate_page(inode->i_mapping, inode->i_size, nilfs_get_block);
637 629
@@ -652,7 +644,6 @@ void nilfs_delete_inode(struct inode *inode)
652 struct nilfs_transaction_info ti; 644 struct nilfs_transaction_info ti;
653 struct super_block *sb = inode->i_sb; 645 struct super_block *sb = inode->i_sb;
654 struct nilfs_inode_info *ii = NILFS_I(inode); 646 struct nilfs_inode_info *ii = NILFS_I(inode);
655 int err;
656 647
657 if (unlikely(is_bad_inode(inode))) { 648 if (unlikely(is_bad_inode(inode))) {
658 if (inode->i_data.nrpages) 649 if (inode->i_data.nrpages)
@@ -660,8 +651,8 @@ void nilfs_delete_inode(struct inode *inode)
660 clear_inode(inode); 651 clear_inode(inode);
661 return; 652 return;
662 } 653 }
663 err = nilfs_transaction_begin(sb, &ti, 0); 654 nilfs_transaction_begin(sb, &ti, 0); /* never fails */
664 BUG_ON(err); 655
665 if (inode->i_data.nrpages) 656 if (inode->i_data.nrpages)
666 truncate_inode_pages(&inode->i_data, 0); 657 truncate_inode_pages(&inode->i_data, 0);
667 658
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index cfb27892ffe8..108d281ebca5 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -489,14 +489,14 @@ nilfs_ioctl_do_mark_blocks_dirty(struct the_nilfs *nilfs, __u64 *posp,
489 ret = nilfs_mdt_mark_block_dirty(dat, 489 ret = nilfs_mdt_mark_block_dirty(dat,
490 bdescs[i].bd_offset); 490 bdescs[i].bd_offset);
491 if (ret < 0) { 491 if (ret < 0) {
492 BUG_ON(ret == -ENOENT); 492 WARN_ON(ret == -ENOENT);
493 return ret; 493 return ret;
494 } 494 }
495 } else { 495 } else {
496 ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset, 496 ret = nilfs_bmap_mark(bmap, bdescs[i].bd_offset,
497 bdescs[i].bd_level); 497 bdescs[i].bd_level);
498 if (ret < 0) { 498 if (ret < 0) {
499 BUG_ON(ret == -ENOENT); 499 WARN_ON(ret == -ENOENT);
500 return ret; 500 return ret;
501 } 501 }
502 } 502 }
@@ -519,7 +519,8 @@ nilfs_ioctl_do_free_segments(struct the_nilfs *nilfs, __u64 *posp, int flags,
519 struct nilfs_sb_info *sbi = nilfs_get_writer(nilfs); 519 struct nilfs_sb_info *sbi = nilfs_get_writer(nilfs);
520 int ret; 520 int ret;
521 521
522 BUG_ON(!sbi); 522 if (unlikely(!sbi))
523 return -EROFS;
523 ret = nilfs_segctor_add_segments_to_be_freed( 524 ret = nilfs_segctor_add_segments_to_be_freed(
524 NILFS_SC(sbi), buf, nmembs); 525 NILFS_SC(sbi), buf, nmembs);
525 nilfs_put_writer(nilfs); 526 nilfs_put_writer(nilfs);
@@ -539,6 +540,7 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
539 void __user *argp) 540 void __user *argp)
540{ 541{
541 struct nilfs_argv argv[5]; 542 struct nilfs_argv argv[5];
543 const char *msg;
542 int dir, ret; 544 int dir, ret;
543 545
544 if (copy_from_user(argv, argp, sizeof(argv))) 546 if (copy_from_user(argv, argp, sizeof(argv)))
@@ -546,31 +548,50 @@ int nilfs_ioctl_prepare_clean_segments(struct the_nilfs *nilfs,
546 548
547 dir = _IOC_WRITE; 549 dir = _IOC_WRITE;
548 ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], dir); 550 ret = nilfs_ioctl_move_blocks(nilfs, &argv[0], dir);
549 if (ret < 0) 551 if (ret < 0) {
550 goto out_move_blks; 552 msg = "cannot read source blocks";
553 goto failed;
554 }
551 ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], dir); 555 ret = nilfs_ioctl_delete_checkpoints(nilfs, &argv[1], dir);
552 if (ret < 0) 556 if (ret < 0) {
553 goto out_del_cps; 557 /*
558 * can safely abort because checkpoints can be removed
559 * independently.
560 */
561 msg = "cannot delete checkpoints";
562 goto failed;
563 }
554 ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], dir); 564 ret = nilfs_ioctl_free_vblocknrs(nilfs, &argv[2], dir);
555 if (ret < 0) 565 if (ret < 0) {
556 goto out_free_vbns; 566 /*
567 * can safely abort because DAT file is updated atomically
568 * using a copy-on-write technique.
569 */
570 msg = "cannot delete virtual blocks from DAT file";
571 goto failed;
572 }
557 ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], dir); 573 ret = nilfs_ioctl_mark_blocks_dirty(nilfs, &argv[3], dir);
558 if (ret < 0) 574 if (ret < 0) {
559 goto out_free_vbns; 575 /*
576 * can safely abort because the operation is nondestructive.
577 */
578 msg = "cannot mark copying blocks dirty";
579 goto failed;
580 }
560 ret = nilfs_ioctl_free_segments(nilfs, &argv[4], dir); 581 ret = nilfs_ioctl_free_segments(nilfs, &argv[4], dir);
561 if (ret < 0) 582 if (ret < 0) {
562 goto out_free_segs; 583 /*
563 584 * can safely abort because this operation is atomic.
585 */
586 msg = "cannot set segments to be freed";
587 goto failed;
588 }
564 return 0; 589 return 0;
565 590
566 out_free_segs: 591 failed:
567 BUG(); /* XXX: not implemented yet */
568 out_free_vbns:
569 BUG();/* XXX: not implemented yet */
570 out_del_cps:
571 BUG();/* XXX: not implemented yet */
572 out_move_blks:
573 nilfs_remove_all_gcinode(nilfs); 592 nilfs_remove_all_gcinode(nilfs);
593 printk(KERN_ERR "NILFS: GC failed during preparation: %s: err=%d\n",
594 msg, ret);
574 return ret; 595 return ret;
575} 596}
576 597
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index e0a632b86feb..47dd815433fd 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -154,10 +154,8 @@ nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff,
154 ret = -EBUSY; 154 ret = -EBUSY;
155 goto failed_bh; 155 goto failed_bh;
156 } 156 }
157 } else { 157 } else /* mode == READ */
158 BUG_ON(mode != READ);
159 lock_buffer(bh); 158 lock_buffer(bh);
160 }
161 159
162 if (buffer_uptodate(bh)) { 160 if (buffer_uptodate(bh)) {
163 unlock_buffer(bh); 161 unlock_buffer(bh);
diff --git a/fs/nilfs2/nilfs.h b/fs/nilfs2/nilfs.h
index d08fb1ce5012..a7f5bc724e33 100644
--- a/fs/nilfs2/nilfs.h
+++ b/fs/nilfs2/nilfs.h
@@ -173,7 +173,6 @@ static inline void nilfs_set_transaction_flag(unsigned int flag)
173{ 173{
174 struct nilfs_transaction_info *ti = current->journal_info; 174 struct nilfs_transaction_info *ti = current->journal_info;
175 175
176 BUG_ON(!ti);
177 ti->ti_flags |= flag; 176 ti->ti_flags |= flag;
178} 177}
179 178
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 7b18be8cd47a..1bfbba9c0e9a 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -417,7 +417,7 @@ repeat:
417 dpage = find_lock_page(dmap, offset); 417 dpage = find_lock_page(dmap, offset);
418 if (dpage) { 418 if (dpage) {
419 /* override existing page on the destination cache */ 419 /* override existing page on the destination cache */
420 BUG_ON(PageDirty(dpage)); 420 WARN_ON(PageDirty(dpage));
421 nilfs_copy_page(dpage, page, 0); 421 nilfs_copy_page(dpage, page, 0);
422 unlock_page(dpage); 422 unlock_page(dpage);
423 page_cache_release(dpage); 423 page_cache_release(dpage);
@@ -427,17 +427,15 @@ repeat:
427 /* move the page to the destination cache */ 427 /* move the page to the destination cache */
428 spin_lock_irq(&smap->tree_lock); 428 spin_lock_irq(&smap->tree_lock);
429 page2 = radix_tree_delete(&smap->page_tree, offset); 429 page2 = radix_tree_delete(&smap->page_tree, offset);
430 if (unlikely(page2 != page)) 430 WARN_ON(page2 != page);
431 NILFS_PAGE_BUG(page, "page removal failed " 431
432 "(offset=%lu, page2=%p)",
433 offset, page2);
434 smap->nrpages--; 432 smap->nrpages--;
435 spin_unlock_irq(&smap->tree_lock); 433 spin_unlock_irq(&smap->tree_lock);
436 434
437 spin_lock_irq(&dmap->tree_lock); 435 spin_lock_irq(&dmap->tree_lock);
438 err = radix_tree_insert(&dmap->page_tree, offset, page); 436 err = radix_tree_insert(&dmap->page_tree, offset, page);
439 if (unlikely(err < 0)) { 437 if (unlikely(err < 0)) {
440 BUG_ON(err == -EEXIST); 438 WARN_ON(err == -EEXIST);
441 page->mapping = NULL; 439 page->mapping = NULL;
442 page_cache_release(page); /* for cache */ 440 page_cache_release(page); /* for cache */
443 } else { 441 } else {
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index a4253f34e138..ef387b19682c 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -92,9 +92,6 @@ static int nilfs_warn_segment_error(int err)
92 printk(KERN_WARNING 92 printk(KERN_WARNING
93 "NILFS warning: No super root in the last segment\n"); 93 "NILFS warning: No super root in the last segment\n");
94 break; 94 break;
95 case NILFS_SEG_VALID:
96 default:
97 BUG();
98 } 95 }
99 return -EINVAL; 96 return -EINVAL;
100} 97}
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 24d0fbd4271c..9a87410985b9 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -334,8 +334,7 @@ static void nilfs_transaction_lock(struct nilfs_sb_info *sbi,
334{ 334{
335 struct nilfs_transaction_info *cur_ti = current->journal_info; 335 struct nilfs_transaction_info *cur_ti = current->journal_info;
336 336
337 BUG_ON(cur_ti); 337 WARN_ON(cur_ti);
338 BUG_ON(!ti);
339 ti->ti_flags = NILFS_TI_WRITER; 338 ti->ti_flags = NILFS_TI_WRITER;
340 ti->ti_count = 0; 339 ti->ti_count = 0;
341 ti->ti_save = cur_ti; 340 ti->ti_save = cur_ti;
@@ -546,8 +545,6 @@ static int nilfs_collect_file_data(struct nilfs_sc_info *sci,
546{ 545{
547 int err; 546 int err;
548 547
549 /* BUG_ON(!buffer_dirty(bh)); */
550 /* excluded by scan_dirty_data_buffers() */
551 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 548 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
552 if (unlikely(err < 0)) 549 if (unlikely(err < 0))
553 return nilfs_handle_bmap_error(err, __func__, inode, 550 return nilfs_handle_bmap_error(err, __func__, inode,
@@ -566,8 +563,6 @@ static int nilfs_collect_file_node(struct nilfs_sc_info *sci,
566{ 563{
567 int err; 564 int err;
568 565
569 /* BUG_ON(!buffer_dirty(bh)); */
570 /* excluded by scan_dirty_node_buffers() */
571 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh); 566 err = nilfs_bmap_propagate(NILFS_I(inode)->i_bmap, bh);
572 if (unlikely(err < 0)) 567 if (unlikely(err < 0))
573 return nilfs_handle_bmap_error(err, __func__, inode, 568 return nilfs_handle_bmap_error(err, __func__, inode,
@@ -579,7 +574,7 @@ static int nilfs_collect_file_bmap(struct nilfs_sc_info *sci,
579 struct buffer_head *bh, 574 struct buffer_head *bh,
580 struct inode *inode) 575 struct inode *inode)
581{ 576{
582 BUG_ON(!buffer_dirty(bh)); 577 WARN_ON(!buffer_dirty(bh));
583 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64)); 578 return nilfs_segctor_add_file_block(sci, bh, inode, sizeof(__le64));
584} 579}
585 580
@@ -628,7 +623,7 @@ static int nilfs_collect_dat_data(struct nilfs_sc_info *sci,
628static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci, 623static int nilfs_collect_dat_bmap(struct nilfs_sc_info *sci,
629 struct buffer_head *bh, struct inode *inode) 624 struct buffer_head *bh, struct inode *inode)
630{ 625{
631 BUG_ON(!buffer_dirty(bh)); 626 WARN_ON(!buffer_dirty(bh));
632 return nilfs_segctor_add_file_block(sci, bh, inode, 627 return nilfs_segctor_add_file_block(sci, bh, inode,
633 sizeof(struct nilfs_binfo_dat)); 628 sizeof(struct nilfs_binfo_dat));
634} 629}
@@ -862,9 +857,9 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
862 nilfs_mdt_mark_dirty(nilfs->ns_cpfile); 857 nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
863 nilfs_cpfile_put_checkpoint( 858 nilfs_cpfile_put_checkpoint(
864 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp); 859 nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
865 } else { 860 } else
866 BUG_ON(err == -EINVAL || err == -ENOENT); 861 WARN_ON(err == -EINVAL || err == -ENOENT);
867 } 862
868 return err; 863 return err;
869} 864}
870 865
@@ -879,7 +874,7 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
879 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0, 874 err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
880 &raw_cp, &bh_cp); 875 &raw_cp, &bh_cp);
881 if (unlikely(err)) { 876 if (unlikely(err)) {
882 BUG_ON(err == -EINVAL || err == -ENOENT); 877 WARN_ON(err == -EINVAL || err == -ENOENT);
883 goto failed_ibh; 878 goto failed_ibh;
884 } 879 }
885 raw_cp->cp_snapshot_list.ssl_next = 0; 880 raw_cp->cp_snapshot_list.ssl_next = 0;
@@ -944,7 +939,6 @@ static void nilfs_fill_in_super_root_crc(struct buffer_head *bh_sr, u32 seed)
944 (struct nilfs_super_root *)bh_sr->b_data; 939 (struct nilfs_super_root *)bh_sr->b_data;
945 u32 crc; 940 u32 crc;
946 941
947 BUG_ON(NILFS_SR_BYTES > bh_sr->b_size);
948 crc = crc32_le(seed, 942 crc = crc32_le(seed,
949 (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum), 943 (unsigned char *)raw_sr + sizeof(raw_sr->sr_sum),
950 NILFS_SR_BYTES - sizeof(raw_sr->sr_sum)); 944 NILFS_SR_BYTES - sizeof(raw_sr->sr_sum));
@@ -1022,8 +1016,7 @@ static void nilfs_segctor_cancel_free_segments(struct nilfs_sc_info *sci,
1022 if (!(ent->flags & NILFS_SLH_FREED)) 1016 if (!(ent->flags & NILFS_SLH_FREED))
1023 break; 1017 break;
1024 err = nilfs_sufile_cancel_free(sufile, ent->segnum); 1018 err = nilfs_sufile_cancel_free(sufile, ent->segnum);
1025 BUG_ON(err); 1019 WARN_ON(err); /* do not happen */
1026
1027 ent->flags &= ~NILFS_SLH_FREED; 1020 ent->flags &= ~NILFS_SLH_FREED;
1028 } 1021 }
1029} 1022}
@@ -1472,7 +1465,7 @@ static int nilfs_segctor_extend_segments(struct nilfs_sc_info *sci,
1472 failed: 1465 failed:
1473 list_for_each_entry_safe(segbuf, n, &list, sb_list) { 1466 list_for_each_entry_safe(segbuf, n, &list, sb_list) {
1474 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1467 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1475 BUG_ON(ret); 1468 WARN_ON(ret); /* never fails */
1476 list_del_init(&segbuf->sb_list); 1469 list_del_init(&segbuf->sb_list);
1477 nilfs_segbuf_free(segbuf); 1470 nilfs_segbuf_free(segbuf);
1478 } 1471 }
@@ -1488,7 +1481,7 @@ static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
1488 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1481 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1489 if (nilfs->ns_nextnum != segbuf->sb_nextnum) { 1482 if (nilfs->ns_nextnum != segbuf->sb_nextnum) {
1490 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); 1483 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1491 BUG_ON(ret); 1484 WARN_ON(ret); /* never fails */
1492 } 1485 }
1493 if (segbuf->sb_io_error) { 1486 if (segbuf->sb_io_error) {
1494 /* Case 1: The first segment failed */ 1487 /* Case 1: The first segment failed */
@@ -1504,7 +1497,7 @@ static void nilfs_segctor_free_incomplete_segments(struct nilfs_sc_info *sci,
1504 1497
1505 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1498 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1506 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum); 1499 ret = nilfs_sufile_free(nilfs->ns_sufile, segbuf->sb_nextnum);
1507 BUG_ON(ret); 1500 WARN_ON(ret); /* never fails */
1508 if (!done && segbuf->sb_io_error) { 1501 if (!done && segbuf->sb_io_error) {
1509 if (segbuf->sb_segnum != nilfs->ns_nextnum) 1502 if (segbuf->sb_segnum != nilfs->ns_nextnum)
1510 /* Case 2: extended segment (!= next) failed */ 1503 /* Case 2: extended segment (!= next) failed */
@@ -1558,7 +1551,7 @@ static void nilfs_segctor_update_segusage(struct nilfs_sc_info *sci,
1558 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) { 1551 list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
1559 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, 1552 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1560 &raw_su, &bh_su); 1553 &raw_su, &bh_su);
1561 BUG_ON(ret); /* always succeed because bh_su is dirty */ 1554 WARN_ON(ret); /* always succeed because bh_su is dirty */
1562 live_blocks = segbuf->sb_sum.nblocks + 1555 live_blocks = segbuf->sb_sum.nblocks +
1563 (segbuf->sb_pseg_start - segbuf->sb_fseg_start); 1556 (segbuf->sb_pseg_start - segbuf->sb_fseg_start);
1564 raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime); 1557 raw_su->su_lastmod = cpu_to_le64(sci->sc_seg_ctime);
@@ -1579,7 +1572,7 @@ static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
1579 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs); 1572 segbuf = NILFS_FIRST_SEGBUF(&sci->sc_segbufs);
1580 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, 1573 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1581 &raw_su, &bh_su); 1574 &raw_su, &bh_su);
1582 BUG_ON(ret); /* always succeed because bh_su is dirty */ 1575 WARN_ON(ret); /* always succeed because bh_su is dirty */
1583 raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start - 1576 raw_su->su_nblocks = cpu_to_le32(segbuf->sb_pseg_start -
1584 segbuf->sb_fseg_start); 1577 segbuf->sb_fseg_start);
1585 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su); 1578 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, bh_su);
@@ -1587,7 +1580,7 @@ static void nilfs_segctor_cancel_segusage(struct nilfs_sc_info *sci,
1587 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) { 1580 list_for_each_entry_continue(segbuf, &sci->sc_segbufs, sb_list) {
1588 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum, 1581 ret = nilfs_sufile_get_segment_usage(sufile, segbuf->sb_segnum,
1589 &raw_su, &bh_su); 1582 &raw_su, &bh_su);
1590 BUG_ON(ret); /* always succeed */ 1583 WARN_ON(ret); /* always succeed */
1591 raw_su->su_nblocks = 0; 1584 raw_su->su_nblocks = 0;
1592 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum, 1585 nilfs_sufile_put_segment_usage(sufile, segbuf->sb_segnum,
1593 bh_su); 1586 bh_su);
@@ -1606,7 +1599,7 @@ static void nilfs_segctor_truncate_segments(struct nilfs_sc_info *sci,
1606 list_del_init(&segbuf->sb_list); 1599 list_del_init(&segbuf->sb_list);
1607 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks; 1600 sci->sc_segbuf_nblocks -= segbuf->sb_rest_blocks;
1608 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum); 1601 ret = nilfs_sufile_free(sufile, segbuf->sb_nextnum);
1609 BUG_ON(ret); 1602 WARN_ON(ret);
1610 nilfs_segbuf_free(segbuf); 1603 nilfs_segbuf_free(segbuf);
1611 } 1604 }
1612} 1605}
@@ -1923,7 +1916,6 @@ static int nilfs_page_has_uncleared_buffer(struct page *page)
1923 1916
1924static void __nilfs_end_page_io(struct page *page, int err) 1917static void __nilfs_end_page_io(struct page *page, int err)
1925{ 1918{
1926 /* BUG_ON(err > 0); */
1927 if (!err) { 1919 if (!err) {
1928 if (!nilfs_page_buffers_clean(page)) 1920 if (!nilfs_page_buffers_clean(page))
1929 __set_page_dirty_nobuffers(page); 1921 __set_page_dirty_nobuffers(page);
@@ -2262,7 +2254,7 @@ static int nilfs_segctor_deactivate_segments(struct nilfs_sc_info *sci,
2262 if (unlikely(err)) 2254 if (unlikely(err))
2263 goto failed; 2255 goto failed;
2264 nilfs_segment_usage_clear_active(ent->raw_su); 2256 nilfs_segment_usage_clear_active(ent->raw_su);
2265 BUG_ON(!buffer_dirty(ent->bh_su)); 2257 WARN_ON(!buffer_dirty(ent->bh_su));
2266 } 2258 }
2267 return 0; 2259 return 0;
2268 2260
@@ -2340,7 +2332,6 @@ static int nilfs_segctor_do_construct(struct nilfs_sc_info *sci, int mode)
2340 /* Avoid empty segment */ 2332 /* Avoid empty segment */
2341 if (sci->sc_stage.scnt == NILFS_ST_DONE && 2333 if (sci->sc_stage.scnt == NILFS_ST_DONE &&
2342 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) { 2334 NILFS_SEG_EMPTY(&sci->sc_curseg->sb_sum)) {
2343 BUG_ON(mode == SC_LSEG_SR);
2344 nilfs_segctor_end_construction(sci, nilfs, 1); 2335 nilfs_segctor_end_construction(sci, nilfs, 1);
2345 goto out; 2336 goto out;
2346 } 2337 }
@@ -2479,9 +2470,8 @@ int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *sci,
2479 struct inode *sufile = nilfs->ns_sufile; 2470 struct inode *sufile = nilfs->ns_sufile;
2480 LIST_HEAD(list); 2471 LIST_HEAD(list);
2481 __u64 *pnum; 2472 __u64 *pnum;
2482 const char *flag_name;
2483 size_t i; 2473 size_t i;
2484 int err, err2 = 0; 2474 int err;
2485 2475
2486 for (pnum = segnum, i = 0; i < nsegs; pnum++, i++) { 2476 for (pnum = segnum, i = 0; i < nsegs; pnum++, i++) {
2487 ent = nilfs_alloc_segment_entry(*pnum); 2477 ent = nilfs_alloc_segment_entry(*pnum);
@@ -2495,32 +2485,12 @@ int nilfs_segctor_add_segments_to_be_freed(struct nilfs_sc_info *sci,
2495 if (unlikely(err)) 2485 if (unlikely(err))
2496 goto failed; 2486 goto failed;
2497 2487
2498 if (unlikely(le32_to_cpu(ent->raw_su->su_flags) != 2488 if (unlikely(!nilfs_segment_usage_dirty(ent->raw_su)))
2499 (1UL << NILFS_SEGMENT_USAGE_DIRTY))) { 2489 printk(KERN_WARNING "NILFS: unused segment is "
2500 if (nilfs_segment_usage_clean(ent->raw_su)) 2490 "requested to be cleaned (segnum=%llu)\n",
2501 flag_name = "clean"; 2491 (unsigned long long)ent->segnum);
2502 else if (nilfs_segment_usage_active(ent->raw_su))
2503 flag_name = "active";
2504 else if (nilfs_segment_usage_volatile_active(
2505 ent->raw_su))
2506 flag_name = "volatile active";
2507 else if (!nilfs_segment_usage_dirty(ent->raw_su))
2508 flag_name = "non-dirty";
2509 else
2510 flag_name = "erroneous";
2511
2512 printk(KERN_ERR
2513 "NILFS: %s segment is requested to be cleaned "
2514 "(segnum=%llu)\n",
2515 flag_name, (unsigned long long)ent->segnum);
2516 err2 = -EINVAL;
2517 }
2518 nilfs_close_segment_entry(ent, sufile); 2492 nilfs_close_segment_entry(ent, sufile);
2519 } 2493 }
2520 if (unlikely(err2)) {
2521 err = err2;
2522 goto failed;
2523 }
2524 list_splice(&list, sci->sc_cleaning_segments.prev); 2494 list_splice(&list, sci->sc_cleaning_segments.prev);
2525 return 0; 2495 return 0;
2526 2496
@@ -2705,8 +2675,6 @@ struct nilfs_segctor_req {
2705static void nilfs_segctor_accept(struct nilfs_sc_info *sci, 2675static void nilfs_segctor_accept(struct nilfs_sc_info *sci,
2706 struct nilfs_segctor_req *req) 2676 struct nilfs_segctor_req *req)
2707{ 2677{
2708 BUG_ON(!sci);
2709
2710 req->sc_err = req->sb_err = 0; 2678 req->sc_err = req->sb_err = 0;
2711 spin_lock(&sci->sc_state_lock); 2679 spin_lock(&sci->sc_state_lock);
2712 req->seq_accepted = sci->sc_seq_request; 2680 req->seq_accepted = sci->sc_seq_request;
@@ -3107,7 +3075,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
3107 if (flag || nilfs_segctor_confirm(sci)) 3075 if (flag || nilfs_segctor_confirm(sci))
3108 nilfs_segctor_write_out(sci); 3076 nilfs_segctor_write_out(sci);
3109 3077
3110 BUG_ON(!list_empty(&sci->sc_copied_buffers)); 3078 WARN_ON(!list_empty(&sci->sc_copied_buffers));
3111 3079
3112 if (!list_empty(&sci->sc_dirty_files)) { 3080 if (!list_empty(&sci->sc_dirty_files)) {
3113 nilfs_warning(sbi->s_super, __func__, 3081 nilfs_warning(sbi->s_super, __func__,
@@ -3120,7 +3088,7 @@ static void nilfs_segctor_destroy(struct nilfs_sc_info *sci)
3120 if (!list_empty(&sci->sc_cleaning_segments)) 3088 if (!list_empty(&sci->sc_cleaning_segments))
3121 nilfs_dispose_segment_list(&sci->sc_cleaning_segments); 3089 nilfs_dispose_segment_list(&sci->sc_cleaning_segments);
3122 3090
3123 BUG_ON(!list_empty(&sci->sc_segbufs)); 3091 WARN_ON(!list_empty(&sci->sc_segbufs));
3124 3092
3125 if (sci->sc_sketch_inode) { 3093 if (sci->sc_sketch_inode) {
3126 iput(sci->sc_sketch_inode); 3094 iput(sci->sc_sketch_inode);
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index cc714c72b138..4cf47e03a3ab 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -231,10 +231,11 @@ int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
231 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 231 kaddr = kmap_atomic(su_bh->b_page, KM_USER0);
232 su = nilfs_sufile_block_get_segment_usage( 232 su = nilfs_sufile_block_get_segment_usage(
233 sufile, segnum, su_bh, kaddr); 233 sufile, segnum, su_bh, kaddr);
234 if (!nilfs_segment_usage_clean(su)) { 234 if (unlikely(!nilfs_segment_usage_clean(su))) {
235 printk(KERN_CRIT "%s: segment %llu must be clean\n", 235 printk(KERN_WARNING "%s: segment %llu must be clean\n",
236 __func__, (unsigned long long)segnum); 236 __func__, (unsigned long long)segnum);
237 BUG(); 237 kunmap_atomic(kaddr, KM_USER0);
238 goto out_su_bh;
238 } 239 }
239 nilfs_segment_usage_set_dirty(su); 240 nilfs_segment_usage_set_dirty(su);
240 kunmap_atomic(kaddr, KM_USER0); 241 kunmap_atomic(kaddr, KM_USER0);
@@ -249,11 +250,10 @@ int nilfs_sufile_cancel_free(struct inode *sufile, __u64 segnum)
249 nilfs_mdt_mark_buffer_dirty(su_bh); 250 nilfs_mdt_mark_buffer_dirty(su_bh);
250 nilfs_mdt_mark_dirty(sufile); 251 nilfs_mdt_mark_dirty(sufile);
251 252
253 out_su_bh:
252 brelse(su_bh); 254 brelse(su_bh);
253
254 out_header: 255 out_header:
255 brelse(header_bh); 256 brelse(header_bh);
256
257 out_sem: 257 out_sem:
258 up_write(&NILFS_MDT(sufile)->mi_sem); 258 up_write(&NILFS_MDT(sufile)->mi_sem);
259 return ret; 259 return ret;
@@ -317,7 +317,7 @@ int nilfs_sufile_freev(struct inode *sufile, __u64 *segnum, size_t nsegs)
317 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0); 317 kaddr = kmap_atomic(su_bh[i]->b_page, KM_USER0);
318 su = nilfs_sufile_block_get_segment_usage( 318 su = nilfs_sufile_block_get_segment_usage(
319 sufile, segnum[i], su_bh[i], kaddr); 319 sufile, segnum[i], su_bh[i], kaddr);
320 BUG_ON(nilfs_segment_usage_error(su)); 320 WARN_ON(nilfs_segment_usage_error(su));
321 nilfs_segment_usage_set_clean(su); 321 nilfs_segment_usage_set_clean(su);
322 kunmap_atomic(kaddr, KM_USER0); 322 kunmap_atomic(kaddr, KM_USER0);
323 nilfs_mdt_mark_buffer_dirty(su_bh[i]); 323 nilfs_mdt_mark_buffer_dirty(su_bh[i]);
@@ -385,8 +385,8 @@ int nilfs_sufile_get_segment_usage(struct inode *sufile, __u64 segnum,
385 int ret; 385 int ret;
386 386
387 /* segnum is 0 origin */ 387 /* segnum is 0 origin */
388 BUG_ON(segnum >= nilfs_sufile_get_nsegments(sufile)); 388 if (segnum >= nilfs_sufile_get_nsegments(sufile))
389 389 return -EINVAL;
390 down_write(&NILFS_MDT(sufile)->mi_sem); 390 down_write(&NILFS_MDT(sufile)->mi_sem);
391 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh); 391 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, &bh);
392 if (ret < 0) 392 if (ret < 0)
@@ -515,6 +515,8 @@ int nilfs_sufile_get_ncleansegs(struct inode *sufile, unsigned long *nsegsp)
515 * %-EIO - I/O error. 515 * %-EIO - I/O error.
516 * 516 *
517 * %-ENOMEM - Insufficient amount of memory available. 517 * %-ENOMEM - Insufficient amount of memory available.
518 *
519 * %-EINVAL - Invalid segment usage number.
518 */ 520 */
519int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum) 521int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
520{ 522{
@@ -524,8 +526,11 @@ int nilfs_sufile_set_error(struct inode *sufile, __u64 segnum)
524 void *kaddr; 526 void *kaddr;
525 int ret; 527 int ret;
526 528
527 BUG_ON(segnum >= nilfs_sufile_get_nsegments(sufile)); 529 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) {
528 530 printk(KERN_WARNING "%s: invalid segment number: %llu\n",
531 __func__, (unsigned long long)segnum);
532 return -EINVAL;
533 }
529 down_write(&NILFS_MDT(sufile)->mi_sem); 534 down_write(&NILFS_MDT(sufile)->mi_sem);
530 535
531 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 536 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 2f0e9f7bf152..d0639a6aae9e 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -841,8 +841,11 @@ nilfs_fill_super(struct super_block *sb, void *data, int silent,
841 841
842 if (sb->s_flags & MS_RDONLY) { 842 if (sb->s_flags & MS_RDONLY) {
843 if (nilfs_test_opt(sbi, SNAPSHOT)) { 843 if (nilfs_test_opt(sbi, SNAPSHOT)) {
844 if (!nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, 844 err = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile,
845 sbi->s_snapshot_cno)) { 845 sbi->s_snapshot_cno);
846 if (err < 0)
847 goto failed_sbi;
848 if (!err) {
846 printk(KERN_ERR 849 printk(KERN_ERR
847 "NILFS: The specified checkpoint is " 850 "NILFS: The specified checkpoint is "
848 "not a snapshot " 851 "not a snapshot "
@@ -1163,7 +1166,6 @@ nilfs_get_sb(struct file_system_type *fs_type, int flags,
1163 } else { 1166 } else {
1164 struct nilfs_sb_info *sbi = NILFS_SB(s); 1167 struct nilfs_sb_info *sbi = NILFS_SB(s);
1165 1168
1166 BUG_ON(!sbi || !sbi->s_nilfs);
1167 /* 1169 /*
1168 * s_umount protects super_block from unmount process; 1170 * s_umount protects super_block from unmount process;
1169 * It covers pointers of nilfs_sb_info and the_nilfs. 1171 * It covers pointers of nilfs_sb_info and the_nilfs.