aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/f2fs/node.c')
-rw-r--r--fs/f2fs/node.c163
1 files changed, 84 insertions, 79 deletions
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 44b8afef43d9..f83326ca32ef 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -31,22 +31,38 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
31{ 31{
32 struct f2fs_nm_info *nm_i = NM_I(sbi); 32 struct f2fs_nm_info *nm_i = NM_I(sbi);
33 struct sysinfo val; 33 struct sysinfo val;
34 unsigned long avail_ram;
34 unsigned long mem_size = 0; 35 unsigned long mem_size = 0;
35 bool res = false; 36 bool res = false;
36 37
37 si_meminfo(&val); 38 si_meminfo(&val);
38 /* give 25%, 25%, 50% memory for each components respectively */ 39
40 /* only uses low memory */
41 avail_ram = val.totalram - val.totalhigh;
42
43 /* give 25%, 25%, 50%, 50% memory for each components respectively */
39 if (type == FREE_NIDS) { 44 if (type == FREE_NIDS) {
40 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >> 12; 45 mem_size = (nm_i->fcnt * sizeof(struct free_nid)) >>
41 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 46 PAGE_CACHE_SHIFT;
47 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
42 } else if (type == NAT_ENTRIES) { 48 } else if (type == NAT_ENTRIES) {
43 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 49 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >>
44 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 50 PAGE_CACHE_SHIFT;
51 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 2);
45 } else if (type == DIRTY_DENTS) { 52 } else if (type == DIRTY_DENTS) {
46 if (sbi->sb->s_bdi->dirty_exceeded) 53 if (sbi->sb->s_bdi->dirty_exceeded)
47 return false; 54 return false;
48 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 55 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
49 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 56 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
57 } else if (type == INO_ENTRIES) {
58 int i;
59
60 if (sbi->sb->s_bdi->dirty_exceeded)
61 return false;
62 for (i = 0; i <= UPDATE_INO; i++)
63 mem_size += (sbi->im[i].ino_num *
64 sizeof(struct ino_entry)) >> PAGE_CACHE_SHIFT;
65 res = mem_size < ((avail_ram * nm_i->ram_thresh / 100) >> 1);
50 } 66 }
51 return res; 67 return res;
52} 68}
@@ -131,7 +147,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
131 147
132 if (get_nat_flag(ne, IS_DIRTY)) 148 if (get_nat_flag(ne, IS_DIRTY))
133 return; 149 return;
134retry: 150
135 head = radix_tree_lookup(&nm_i->nat_set_root, set); 151 head = radix_tree_lookup(&nm_i->nat_set_root, set);
136 if (!head) { 152 if (!head) {
137 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 153 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
@@ -140,11 +156,7 @@ retry:
140 INIT_LIST_HEAD(&head->set_list); 156 INIT_LIST_HEAD(&head->set_list);
141 head->set = set; 157 head->set = set;
142 head->entry_cnt = 0; 158 head->entry_cnt = 0;
143 159 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
144 if (radix_tree_insert(&nm_i->nat_set_root, set, head)) {
145 cond_resched();
146 goto retry;
147 }
148 } 160 }
149 list_move_tail(&ne->list, &head->entry_list); 161 list_move_tail(&ne->list, &head->entry_list);
150 nm_i->dirty_nat_cnt++; 162 nm_i->dirty_nat_cnt++;
@@ -155,7 +167,7 @@ retry:
155static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i, 167static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
156 struct nat_entry *ne) 168 struct nat_entry *ne)
157{ 169{
158 nid_t set = ne->ni.nid / NAT_ENTRY_PER_BLOCK; 170 nid_t set = NAT_BLOCK_OFFSET(ne->ni.nid);
159 struct nat_entry_set *head; 171 struct nat_entry_set *head;
160 172
161 head = radix_tree_lookup(&nm_i->nat_set_root, set); 173 head = radix_tree_lookup(&nm_i->nat_set_root, set);
@@ -180,11 +192,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid)
180 struct nat_entry *e; 192 struct nat_entry *e;
181 bool is_cp = true; 193 bool is_cp = true;
182 194
183 read_lock(&nm_i->nat_tree_lock); 195 down_read(&nm_i->nat_tree_lock);
184 e = __lookup_nat_cache(nm_i, nid); 196 e = __lookup_nat_cache(nm_i, nid);
185 if (e && !get_nat_flag(e, IS_CHECKPOINTED)) 197 if (e && !get_nat_flag(e, IS_CHECKPOINTED))
186 is_cp = false; 198 is_cp = false;
187 read_unlock(&nm_i->nat_tree_lock); 199 up_read(&nm_i->nat_tree_lock);
188 return is_cp; 200 return is_cp;
189} 201}
190 202
@@ -194,11 +206,11 @@ bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino)
194 struct nat_entry *e; 206 struct nat_entry *e;
195 bool fsynced = false; 207 bool fsynced = false;
196 208
197 read_lock(&nm_i->nat_tree_lock); 209 down_read(&nm_i->nat_tree_lock);
198 e = __lookup_nat_cache(nm_i, ino); 210 e = __lookup_nat_cache(nm_i, ino);
199 if (e && get_nat_flag(e, HAS_FSYNCED_INODE)) 211 if (e && get_nat_flag(e, HAS_FSYNCED_INODE))
200 fsynced = true; 212 fsynced = true;
201 read_unlock(&nm_i->nat_tree_lock); 213 up_read(&nm_i->nat_tree_lock);
202 return fsynced; 214 return fsynced;
203} 215}
204 216
@@ -208,13 +220,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino)
208 struct nat_entry *e; 220 struct nat_entry *e;
209 bool need_update = true; 221 bool need_update = true;
210 222
211 read_lock(&nm_i->nat_tree_lock); 223 down_read(&nm_i->nat_tree_lock);
212 e = __lookup_nat_cache(nm_i, ino); 224 e = __lookup_nat_cache(nm_i, ino);
213 if (e && get_nat_flag(e, HAS_LAST_FSYNC) && 225 if (e && get_nat_flag(e, HAS_LAST_FSYNC) &&
214 (get_nat_flag(e, IS_CHECKPOINTED) || 226 (get_nat_flag(e, IS_CHECKPOINTED) ||
215 get_nat_flag(e, HAS_FSYNCED_INODE))) 227 get_nat_flag(e, HAS_FSYNCED_INODE)))
216 need_update = false; 228 need_update = false;
217 read_unlock(&nm_i->nat_tree_lock); 229 up_read(&nm_i->nat_tree_lock);
218 return need_update; 230 return need_update;
219} 231}
220 232
@@ -222,13 +234,8 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
222{ 234{
223 struct nat_entry *new; 235 struct nat_entry *new;
224 236
225 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 237 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
226 if (!new) 238 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
227 return NULL;
228 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
229 kmem_cache_free(nat_entry_slab, new);
230 return NULL;
231 }
232 memset(new, 0, sizeof(struct nat_entry)); 239 memset(new, 0, sizeof(struct nat_entry));
233 nat_set_nid(new, nid); 240 nat_set_nid(new, nid);
234 nat_reset_flag(new); 241 nat_reset_flag(new);
@@ -241,18 +248,14 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
241 struct f2fs_nat_entry *ne) 248 struct f2fs_nat_entry *ne)
242{ 249{
243 struct nat_entry *e; 250 struct nat_entry *e;
244retry: 251
245 write_lock(&nm_i->nat_tree_lock); 252 down_write(&nm_i->nat_tree_lock);
246 e = __lookup_nat_cache(nm_i, nid); 253 e = __lookup_nat_cache(nm_i, nid);
247 if (!e) { 254 if (!e) {
248 e = grab_nat_entry(nm_i, nid); 255 e = grab_nat_entry(nm_i, nid);
249 if (!e) {
250 write_unlock(&nm_i->nat_tree_lock);
251 goto retry;
252 }
253 node_info_from_raw_nat(&e->ni, ne); 256 node_info_from_raw_nat(&e->ni, ne);
254 } 257 }
255 write_unlock(&nm_i->nat_tree_lock); 258 up_write(&nm_i->nat_tree_lock);
256} 259}
257 260
258static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, 261static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
@@ -260,15 +263,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
260{ 263{
261 struct f2fs_nm_info *nm_i = NM_I(sbi); 264 struct f2fs_nm_info *nm_i = NM_I(sbi);
262 struct nat_entry *e; 265 struct nat_entry *e;
263retry: 266
264 write_lock(&nm_i->nat_tree_lock); 267 down_write(&nm_i->nat_tree_lock);
265 e = __lookup_nat_cache(nm_i, ni->nid); 268 e = __lookup_nat_cache(nm_i, ni->nid);
266 if (!e) { 269 if (!e) {
267 e = grab_nat_entry(nm_i, ni->nid); 270 e = grab_nat_entry(nm_i, ni->nid);
268 if (!e) {
269 write_unlock(&nm_i->nat_tree_lock);
270 goto retry;
271 }
272 e->ni = *ni; 271 e->ni = *ni;
273 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 272 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
274 } else if (new_blkaddr == NEW_ADDR) { 273 } else if (new_blkaddr == NEW_ADDR) {
@@ -310,7 +309,7 @@ retry:
310 set_nat_flag(e, HAS_FSYNCED_INODE, true); 309 set_nat_flag(e, HAS_FSYNCED_INODE, true);
311 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); 310 set_nat_flag(e, HAS_LAST_FSYNC, fsync_done);
312 } 311 }
313 write_unlock(&nm_i->nat_tree_lock); 312 up_write(&nm_i->nat_tree_lock);
314} 313}
315 314
316int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) 315int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
@@ -320,7 +319,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
320 if (available_free_memory(sbi, NAT_ENTRIES)) 319 if (available_free_memory(sbi, NAT_ENTRIES))
321 return 0; 320 return 0;
322 321
323 write_lock(&nm_i->nat_tree_lock); 322 down_write(&nm_i->nat_tree_lock);
324 while (nr_shrink && !list_empty(&nm_i->nat_entries)) { 323 while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
325 struct nat_entry *ne; 324 struct nat_entry *ne;
326 ne = list_first_entry(&nm_i->nat_entries, 325 ne = list_first_entry(&nm_i->nat_entries,
@@ -328,7 +327,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink)
328 __del_from_nat_cache(nm_i, ne); 327 __del_from_nat_cache(nm_i, ne);
329 nr_shrink--; 328 nr_shrink--;
330 } 329 }
331 write_unlock(&nm_i->nat_tree_lock); 330 up_write(&nm_i->nat_tree_lock);
332 return nr_shrink; 331 return nr_shrink;
333} 332}
334 333
@@ -351,14 +350,14 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni)
351 ni->nid = nid; 350 ni->nid = nid;
352 351
353 /* Check nat cache */ 352 /* Check nat cache */
354 read_lock(&nm_i->nat_tree_lock); 353 down_read(&nm_i->nat_tree_lock);
355 e = __lookup_nat_cache(nm_i, nid); 354 e = __lookup_nat_cache(nm_i, nid);
356 if (e) { 355 if (e) {
357 ni->ino = nat_get_ino(e); 356 ni->ino = nat_get_ino(e);
358 ni->blk_addr = nat_get_blkaddr(e); 357 ni->blk_addr = nat_get_blkaddr(e);
359 ni->version = nat_get_version(e); 358 ni->version = nat_get_version(e);
360 } 359 }
361 read_unlock(&nm_i->nat_tree_lock); 360 up_read(&nm_i->nat_tree_lock);
362 if (e) 361 if (e)
363 return; 362 return;
364 363
@@ -1298,16 +1297,22 @@ static int f2fs_write_node_page(struct page *page,
1298 return 0; 1297 return 0;
1299 } 1298 }
1300 1299
1301 if (wbc->for_reclaim) 1300 if (wbc->for_reclaim) {
1302 goto redirty_out; 1301 if (!down_read_trylock(&sbi->node_write))
1303 1302 goto redirty_out;
1304 down_read(&sbi->node_write); 1303 } else {
1304 down_read(&sbi->node_write);
1305 }
1305 set_page_writeback(page); 1306 set_page_writeback(page);
1306 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr); 1307 write_node_page(sbi, page, &fio, nid, ni.blk_addr, &new_addr);
1307 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page)); 1308 set_node_addr(sbi, &ni, new_addr, is_fsync_dnode(page));
1308 dec_page_count(sbi, F2FS_DIRTY_NODES); 1309 dec_page_count(sbi, F2FS_DIRTY_NODES);
1309 up_read(&sbi->node_write); 1310 up_read(&sbi->node_write);
1310 unlock_page(page); 1311 unlock_page(page);
1312
1313 if (wbc->for_reclaim)
1314 f2fs_submit_merged_bio(sbi, NODE, WRITE);
1315
1311 return 0; 1316 return 0;
1312 1317
1313redirty_out: 1318redirty_out:
@@ -1410,13 +1415,13 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1410 1415
1411 if (build) { 1416 if (build) {
1412 /* do not add allocated nids */ 1417 /* do not add allocated nids */
1413 read_lock(&nm_i->nat_tree_lock); 1418 down_read(&nm_i->nat_tree_lock);
1414 ne = __lookup_nat_cache(nm_i, nid); 1419 ne = __lookup_nat_cache(nm_i, nid);
1415 if (ne && 1420 if (ne &&
1416 (!get_nat_flag(ne, IS_CHECKPOINTED) || 1421 (!get_nat_flag(ne, IS_CHECKPOINTED) ||
1417 nat_get_blkaddr(ne) != NULL_ADDR)) 1422 nat_get_blkaddr(ne) != NULL_ADDR))
1418 allocated = true; 1423 allocated = true;
1419 read_unlock(&nm_i->nat_tree_lock); 1424 up_read(&nm_i->nat_tree_lock);
1420 if (allocated) 1425 if (allocated)
1421 return 0; 1426 return 0;
1422 } 1427 }
@@ -1425,15 +1430,22 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build)
1425 i->nid = nid; 1430 i->nid = nid;
1426 i->state = NID_NEW; 1431 i->state = NID_NEW;
1427 1432
1433 if (radix_tree_preload(GFP_NOFS)) {
1434 kmem_cache_free(free_nid_slab, i);
1435 return 0;
1436 }
1437
1428 spin_lock(&nm_i->free_nid_list_lock); 1438 spin_lock(&nm_i->free_nid_list_lock);
1429 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) { 1439 if (radix_tree_insert(&nm_i->free_nid_root, i->nid, i)) {
1430 spin_unlock(&nm_i->free_nid_list_lock); 1440 spin_unlock(&nm_i->free_nid_list_lock);
1441 radix_tree_preload_end();
1431 kmem_cache_free(free_nid_slab, i); 1442 kmem_cache_free(free_nid_slab, i);
1432 return 0; 1443 return 0;
1433 } 1444 }
1434 list_add_tail(&i->list, &nm_i->free_nid_list); 1445 list_add_tail(&i->list, &nm_i->free_nid_list);
1435 nm_i->fcnt++; 1446 nm_i->fcnt++;
1436 spin_unlock(&nm_i->free_nid_list_lock); 1447 spin_unlock(&nm_i->free_nid_list_lock);
1448 radix_tree_preload_end();
1437 return 1; 1449 return 1;
1438} 1450}
1439 1451
@@ -1804,21 +1816,15 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
1804 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1816 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1805 1817
1806 raw_ne = nat_in_journal(sum, i); 1818 raw_ne = nat_in_journal(sum, i);
1807retry:
1808 write_lock(&nm_i->nat_tree_lock);
1809 ne = __lookup_nat_cache(nm_i, nid);
1810 if (ne)
1811 goto found;
1812 1819
1813 ne = grab_nat_entry(nm_i, nid); 1820 down_write(&nm_i->nat_tree_lock);
1821 ne = __lookup_nat_cache(nm_i, nid);
1814 if (!ne) { 1822 if (!ne) {
1815 write_unlock(&nm_i->nat_tree_lock); 1823 ne = grab_nat_entry(nm_i, nid);
1816 goto retry; 1824 node_info_from_raw_nat(&ne->ni, &raw_ne);
1817 } 1825 }
1818 node_info_from_raw_nat(&ne->ni, &raw_ne);
1819found:
1820 __set_nat_cache_dirty(nm_i, ne); 1826 __set_nat_cache_dirty(nm_i, ne);
1821 write_unlock(&nm_i->nat_tree_lock); 1827 up_write(&nm_i->nat_tree_lock);
1822 } 1828 }
1823 update_nats_in_cursum(sum, -i); 1829 update_nats_in_cursum(sum, -i);
1824 mutex_unlock(&curseg->curseg_mutex); 1830 mutex_unlock(&curseg->curseg_mutex);
@@ -1889,10 +1895,10 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
1889 } 1895 }
1890 raw_nat_from_node_info(raw_ne, &ne->ni); 1896 raw_nat_from_node_info(raw_ne, &ne->ni);
1891 1897
1892 write_lock(&NM_I(sbi)->nat_tree_lock); 1898 down_write(&NM_I(sbi)->nat_tree_lock);
1893 nat_reset_flag(ne); 1899 nat_reset_flag(ne);
1894 __clear_nat_cache_dirty(NM_I(sbi), ne); 1900 __clear_nat_cache_dirty(NM_I(sbi), ne);
1895 write_unlock(&NM_I(sbi)->nat_tree_lock); 1901 up_write(&NM_I(sbi)->nat_tree_lock);
1896 1902
1897 if (nat_get_blkaddr(ne) == NULL_ADDR) 1903 if (nat_get_blkaddr(ne) == NULL_ADDR)
1898 add_free_nid(sbi, nid, false); 1904 add_free_nid(sbi, nid, false);
@@ -1903,10 +1909,10 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi,
1903 else 1909 else
1904 f2fs_put_page(page, 1); 1910 f2fs_put_page(page, 1);
1905 1911
1906 if (!set->entry_cnt) { 1912 f2fs_bug_on(sbi, set->entry_cnt);
1907 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set); 1913
1908 kmem_cache_free(nat_entry_set_slab, set); 1914 radix_tree_delete(&NM_I(sbi)->nat_set_root, set->set);
1909 } 1915 kmem_cache_free(nat_entry_set_slab, set);
1910} 1916}
1911 1917
1912/* 1918/*
@@ -1923,6 +1929,8 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
1923 nid_t set_idx = 0; 1929 nid_t set_idx = 0;
1924 LIST_HEAD(sets); 1930 LIST_HEAD(sets);
1925 1931
1932 if (!nm_i->dirty_nat_cnt)
1933 return;
1926 /* 1934 /*
1927 * if there are no enough space in journal to store dirty nat 1935 * if there are no enough space in journal to store dirty nat
1928 * entries, remove all entries from journal and merge them 1936 * entries, remove all entries from journal and merge them
@@ -1931,9 +1939,6 @@ void flush_nat_entries(struct f2fs_sb_info *sbi)
1931 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL)) 1939 if (!__has_cursum_space(sum, nm_i->dirty_nat_cnt, NAT_JOURNAL))
1932 remove_nats_in_journal(sbi); 1940 remove_nats_in_journal(sbi);
1933 1941
1934 if (!nm_i->dirty_nat_cnt)
1935 return;
1936
1937 while ((found = __gang_lookup_nat_set(nm_i, 1942 while ((found = __gang_lookup_nat_set(nm_i,
1938 set_idx, NATVEC_SIZE, setvec))) { 1943 set_idx, NATVEC_SIZE, setvec))) {
1939 unsigned idx; 1944 unsigned idx;
@@ -1973,13 +1978,13 @@ static int init_node_manager(struct f2fs_sb_info *sbi)
1973 1978
1974 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC); 1979 INIT_RADIX_TREE(&nm_i->free_nid_root, GFP_ATOMIC);
1975 INIT_LIST_HEAD(&nm_i->free_nid_list); 1980 INIT_LIST_HEAD(&nm_i->free_nid_list);
1976 INIT_RADIX_TREE(&nm_i->nat_root, GFP_ATOMIC); 1981 INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
1977 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_ATOMIC); 1982 INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
1978 INIT_LIST_HEAD(&nm_i->nat_entries); 1983 INIT_LIST_HEAD(&nm_i->nat_entries);
1979 1984
1980 mutex_init(&nm_i->build_lock); 1985 mutex_init(&nm_i->build_lock);
1981 spin_lock_init(&nm_i->free_nid_list_lock); 1986 spin_lock_init(&nm_i->free_nid_list_lock);
1982 rwlock_init(&nm_i->nat_tree_lock); 1987 init_rwsem(&nm_i->nat_tree_lock);
1983 1988
1984 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); 1989 nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid);
1985 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); 1990 nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP);
@@ -2035,7 +2040,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2035 spin_unlock(&nm_i->free_nid_list_lock); 2040 spin_unlock(&nm_i->free_nid_list_lock);
2036 2041
2037 /* destroy nat cache */ 2042 /* destroy nat cache */
2038 write_lock(&nm_i->nat_tree_lock); 2043 down_write(&nm_i->nat_tree_lock);
2039 while ((found = __gang_lookup_nat_cache(nm_i, 2044 while ((found = __gang_lookup_nat_cache(nm_i,
2040 nid, NATVEC_SIZE, natvec))) { 2045 nid, NATVEC_SIZE, natvec))) {
2041 unsigned idx; 2046 unsigned idx;
@@ -2044,7 +2049,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi)
2044 __del_from_nat_cache(nm_i, natvec[idx]); 2049 __del_from_nat_cache(nm_i, natvec[idx]);
2045 } 2050 }
2046 f2fs_bug_on(sbi, nm_i->nat_cnt); 2051 f2fs_bug_on(sbi, nm_i->nat_cnt);
2047 write_unlock(&nm_i->nat_tree_lock); 2052 up_write(&nm_i->nat_tree_lock);
2048 2053
2049 kfree(nm_i->nat_bitmap); 2054 kfree(nm_i->nat_bitmap);
2050 sbi->nm_info = NULL; 2055 sbi->nm_info = NULL;
@@ -2061,17 +2066,17 @@ int __init create_node_manager_caches(void)
2061 free_nid_slab = f2fs_kmem_cache_create("free_nid", 2066 free_nid_slab = f2fs_kmem_cache_create("free_nid",
2062 sizeof(struct free_nid)); 2067 sizeof(struct free_nid));
2063 if (!free_nid_slab) 2068 if (!free_nid_slab)
2064 goto destory_nat_entry; 2069 goto destroy_nat_entry;
2065 2070
2066 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set", 2071 nat_entry_set_slab = f2fs_kmem_cache_create("nat_entry_set",
2067 sizeof(struct nat_entry_set)); 2072 sizeof(struct nat_entry_set));
2068 if (!nat_entry_set_slab) 2073 if (!nat_entry_set_slab)
2069 goto destory_free_nid; 2074 goto destroy_free_nid;
2070 return 0; 2075 return 0;
2071 2076
2072destory_free_nid: 2077destroy_free_nid:
2073 kmem_cache_destroy(free_nid_slab); 2078 kmem_cache_destroy(free_nid_slab);
2074destory_nat_entry: 2079destroy_nat_entry:
2075 kmem_cache_destroy(nat_entry_slab); 2080 kmem_cache_destroy(nat_entry_slab);
2076fail: 2081fail:
2077 return -ENOMEM; 2082 return -ENOMEM;