aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk.kim@samsung.com>2013-05-07 07:47:40 -0400
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-05-08 06:54:22 -0400
commit59bbd474abb9dd6a0c1a74df758ec29c7a8b150f (patch)
tree0e1f7f74108b0e9cc4a7aa8a9d80e333344a366a /fs/f2fs
parent23d38844276680abbf33624f56b6779d43f53633 (diff)
f2fs: cover free_nid management with spin_lock
After build_free_nids() searches free nid candidates from nat pages and current journal blocks, it checks all the candidates if they are allocated so that the nat cache has its nid with an allocated block address. In this procedure, previously we used list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list). But, this is not covered by free_nid_list_lock, resulting in null pointer bug. This patch moves this checking routine inside add_free_nid() in order not to use the spin_lock. Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/node.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e42934e689c5..3df43b4efd89 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1249,9 +1249,11 @@ static void __del_from_free_nid_list(struct free_nid *i)
1249 kmem_cache_free(free_nid_slab, i); 1249 kmem_cache_free(free_nid_slab, i);
1250} 1250}
1251 1251
1252static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid) 1252static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1253{ 1253{
1254 struct free_nid *i; 1254 struct free_nid *i;
1255 struct nat_entry *ne;
1256 bool allocated = false;
1255 1257
1256 if (nm_i->fcnt > 2 * MAX_FREE_NIDS) 1258 if (nm_i->fcnt > 2 * MAX_FREE_NIDS)
1257 return -1; 1259 return -1;
@@ -1259,6 +1261,18 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid)
1259 /* 0 nid should not be used */ 1261 /* 0 nid should not be used */
1260 if (nid == 0) 1262 if (nid == 0)
1261 return 0; 1263 return 0;
1264
1265 if (!build)
1266 goto retry;
1267
1268 /* do not add allocated nids */
1269 read_lock(&nm_i->nat_tree_lock);
1270 ne = __lookup_nat_cache(nm_i, nid);
1271 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1272 allocated = true;
1273 read_unlock(&nm_i->nat_tree_lock);
1274 if (allocated)
1275 return 0;
1262retry: 1276retry:
1263 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS); 1277 i = kmem_cache_alloc(free_nid_slab, GFP_NOFS);
1264 if (!i) { 1278 if (!i) {
@@ -1309,7 +1323,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
1309 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1323 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
1310 BUG_ON(blk_addr == NEW_ADDR); 1324 BUG_ON(blk_addr == NEW_ADDR);
1311 if (blk_addr == NULL_ADDR) { 1325 if (blk_addr == NULL_ADDR) {
1312 if (add_free_nid(nm_i, start_nid) < 0) 1326 if (add_free_nid(nm_i, start_nid, true) < 0)
1313 break; 1327 break;
1314 } 1328 }
1315 } 1329 }
@@ -1317,7 +1331,6 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
1317 1331
1318static void build_free_nids(struct f2fs_sb_info *sbi) 1332static void build_free_nids(struct f2fs_sb_info *sbi)
1319{ 1333{
1320 struct free_nid *fnid, *next_fnid;
1321 struct f2fs_nm_info *nm_i = NM_I(sbi); 1334 struct f2fs_nm_info *nm_i = NM_I(sbi);
1322 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA); 1335 struct curseg_info *curseg = CURSEG_I(sbi, CURSEG_HOT_DATA);
1323 struct f2fs_summary_block *sum = curseg->sum_blk; 1336 struct f2fs_summary_block *sum = curseg->sum_blk;
@@ -1354,22 +1367,11 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
1354 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr); 1367 block_t addr = le32_to_cpu(nat_in_journal(sum, i).block_addr);
1355 nid = le32_to_cpu(nid_in_journal(sum, i)); 1368 nid = le32_to_cpu(nid_in_journal(sum, i));
1356 if (addr == NULL_ADDR) 1369 if (addr == NULL_ADDR)
1357 add_free_nid(nm_i, nid); 1370 add_free_nid(nm_i, nid, true);
1358 else 1371 else
1359 remove_free_nid(nm_i, nid); 1372 remove_free_nid(nm_i, nid);
1360 } 1373 }
1361 mutex_unlock(&curseg->curseg_mutex); 1374 mutex_unlock(&curseg->curseg_mutex);
1362
1363 /* remove the free nids from current allocated nids */
1364 list_for_each_entry_safe(fnid, next_fnid, &nm_i->free_nid_list, list) {
1365 struct nat_entry *ne;
1366
1367 read_lock(&nm_i->nat_tree_lock);
1368 ne = __lookup_nat_cache(nm_i, fnid->nid);
1369 if (ne && nat_get_blkaddr(ne) != NULL_ADDR)
1370 remove_free_nid(nm_i, fnid->nid);
1371 read_unlock(&nm_i->nat_tree_lock);
1372 }
1373} 1375}
1374 1376
1375/* 1377/*
@@ -1659,7 +1661,7 @@ flush_now:
1659 } 1661 }
1660 1662
1661 if (nat_get_blkaddr(ne) == NULL_ADDR && 1663 if (nat_get_blkaddr(ne) == NULL_ADDR &&
1662 add_free_nid(NM_I(sbi), nid) <= 0) { 1664 add_free_nid(NM_I(sbi), nid, false) <= 0) {
1663 write_lock(&nm_i->nat_tree_lock); 1665 write_lock(&nm_i->nat_tree_lock);
1664 __del_from_nat_cache(nm_i, ne); 1666 __del_from_nat_cache(nm_i, ne);
1665 write_unlock(&nm_i->nat_tree_lock); 1667 write_unlock(&nm_i->nat_tree_lock);