diff options
author | Jaegeuk Kim <jaegeuk@kernel.org> | 2014-12-04 00:15:10 -0500 |
---|---|---|
committer | Jaegeuk Kim <jaegeuk@kernel.org> | 2014-12-04 00:23:29 -0500 |
commit | 8b26ef98da3387eb57a8a5c1747c6e628948ee0c (patch) | |
tree | 3a36e9ee3b1483c049a91e32f7eda6e54f71e0fc /fs/f2fs | |
parent | 4634d71ed190c99e42ebee450f9a6897d20ee22c (diff) |
f2fs: use rw_semaphore for nat entry lock
Previoulsy, we used rwlock for nat_entry lock.
But, now we have a lot of complex operations in set_node_addr.
(e.g., allocating kernel memories, handling radix_trees, and so on)
So, this patches tries to change spinlock to rw_semaphore to give CPUs to other
threads.
Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r-- | fs/f2fs/f2fs.h | 2 | ||||
-rw-r--r-- | fs/f2fs/node.c | 52 |
2 files changed, 27 insertions, 27 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d04281319dbe..c87314099d26 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h | |||
@@ -332,7 +332,7 @@ struct f2fs_nm_info { | |||
332 | /* NAT cache management */ | 332 | /* NAT cache management */ |
333 | struct radix_tree_root nat_root;/* root of the nat entry cache */ | 333 | struct radix_tree_root nat_root;/* root of the nat entry cache */ |
334 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ | 334 | struct radix_tree_root nat_set_root;/* root of the nat set cache */ |
335 | rwlock_t nat_tree_lock; /* protect nat_tree_lock */ | 335 | struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */ |
336 | struct list_head nat_entries; /* cached nat entry list (clean) */ | 336 | struct list_head nat_entries; /* cached nat entry list (clean) */ |
337 | unsigned int nat_cnt; /* the # of cached nat entries */ | 337 | unsigned int nat_cnt; /* the # of cached nat entries */ |
338 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ | 338 | unsigned int dirty_nat_cnt; /* total num of nat entries in set */ |
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index c59341d5539c..b47555fe175a 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c | |||
@@ -196,11 +196,11 @@ bool is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid) | |||
196 | struct nat_entry *e; | 196 | struct nat_entry *e; |
197 | bool is_cp = true; | 197 | bool is_cp = true; |
198 | 198 | ||
199 | read_lock(&nm_i->nat_tree_lock); | 199 | down_read(&nm_i->nat_tree_lock); |
200 | e = __lookup_nat_cache(nm_i, nid); | 200 | e = __lookup_nat_cache(nm_i, nid); |
201 | if (e && !get_nat_flag(e, IS_CHECKPOINTED)) | 201 | if (e && !get_nat_flag(e, IS_CHECKPOINTED)) |
202 | is_cp = false; | 202 | is_cp = false; |
203 | read_unlock(&nm_i->nat_tree_lock); | 203 | up_read(&nm_i->nat_tree_lock); |
204 | return is_cp; | 204 | return is_cp; |
205 | } | 205 | } |
206 | 206 | ||
@@ -210,11 +210,11 @@ bool has_fsynced_inode(struct f2fs_sb_info *sbi, nid_t ino) | |||
210 | struct nat_entry *e; | 210 | struct nat_entry *e; |
211 | bool fsynced = false; | 211 | bool fsynced = false; |
212 | 212 | ||
213 | read_lock(&nm_i->nat_tree_lock); | 213 | down_read(&nm_i->nat_tree_lock); |
214 | e = __lookup_nat_cache(nm_i, ino); | 214 | e = __lookup_nat_cache(nm_i, ino); |
215 | if (e && get_nat_flag(e, HAS_FSYNCED_INODE)) | 215 | if (e && get_nat_flag(e, HAS_FSYNCED_INODE)) |
216 | fsynced = true; | 216 | fsynced = true; |
217 | read_unlock(&nm_i->nat_tree_lock); | 217 | up_read(&nm_i->nat_tree_lock); |
218 | return fsynced; | 218 | return fsynced; |
219 | } | 219 | } |
220 | 220 | ||
@@ -224,13 +224,13 @@ bool need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino) | |||
224 | struct nat_entry *e; | 224 | struct nat_entry *e; |
225 | bool need_update = true; | 225 | bool need_update = true; |
226 | 226 | ||
227 | read_lock(&nm_i->nat_tree_lock); | 227 | down_read(&nm_i->nat_tree_lock); |
228 | e = __lookup_nat_cache(nm_i, ino); | 228 | e = __lookup_nat_cache(nm_i, ino); |
229 | if (e && get_nat_flag(e, HAS_LAST_FSYNC) && | 229 | if (e && get_nat_flag(e, HAS_LAST_FSYNC) && |
230 | (get_nat_flag(e, IS_CHECKPOINTED) || | 230 | (get_nat_flag(e, IS_CHECKPOINTED) || |
231 | get_nat_flag(e, HAS_FSYNCED_INODE))) | 231 | get_nat_flag(e, HAS_FSYNCED_INODE))) |
232 | need_update = false; | 232 | need_update = false; |
233 | read_unlock(&nm_i->nat_tree_lock); | 233 | up_read(&nm_i->nat_tree_lock); |
234 | return need_update; | 234 | return need_update; |
235 | } | 235 | } |
236 | 236 | ||
@@ -258,17 +258,17 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid, | |||
258 | { | 258 | { |
259 | struct nat_entry *e; | 259 | struct nat_entry *e; |
260 | retry: | 260 | retry: |
261 | write_lock(&nm_i->nat_tree_lock); | 261 | down_write(&nm_i->nat_tree_lock); |
262 | e = __lookup_nat_cache(nm_i, nid); | 262 | e = __lookup_nat_cache(nm_i, nid); |
263 | if (!e) { | 263 | if (!e) { |
264 | e = grab_nat_entry(nm_i, nid); | 264 | e = grab_nat_entry(nm_i, nid); |
265 | if (!e) { | 265 | if (!e) { |
266 | write_unlock(&nm_i->nat_tree_lock); | 266 | up_write(&nm_i->nat_tree_lock); |
267 | goto retry; | 267 | goto retry; |
268 | } | 268 | } |
269 | node_info_from_raw_nat(&e->ni, ne); | 269 | node_info_from_raw_nat(&e->ni, ne); |
270 | } | 270 | } |
271 | write_unlock(&nm_i->nat_tree_lock); | 271 | up_write(&nm_i->nat_tree_lock); |
272 | } | 272 | } |
273 | 273 | ||
274 | static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | 274 | static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, |
@@ -277,12 +277,12 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni, | |||
277 | struct f2fs_nm_info *nm_i = NM_I(sbi); | 277 | struct f2fs_nm_info *nm_i = NM_I(sbi); |
278 | struct nat_entry *e; | 278 | struct nat_entry *e; |
279 | retry: | 279 | retry: |
280 | write_lock(&nm_i->nat_tree_lock); | 280 | down_write(&nm_i->nat_tree_lock); |
281 | e = __lookup_nat_cache(nm_i, ni->nid); | 281 | e = __lookup_nat_cache(nm_i, ni->nid); |
282 | if (!e) { | 282 | if (!e) { |
283 | e = grab_nat_entry(nm_i, ni->nid); | 283 | e = grab_nat_entry(nm_i, ni->nid); |
284 | if (!e) { | 284 | if (!e) { |
285 | write_unlock(&nm_i->nat_tree_lock); | 285 | up_write(&nm_i->nat_tree_lock); |
286 | goto retry; | 286 | goto retry; |
287 | } | 287 | } |
288 | e->ni = *ni; | 288 | e->ni = *ni; |
@@ -326,7 +326,7 @@ retry: | |||
326 | set_nat_flag(e, HAS_FSYNCED_INODE, true); | 326 | set_nat_flag(e, HAS_FSYNCED_INODE, true); |
327 | set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); | 327 | set_nat_flag(e, HAS_LAST_FSYNC, fsync_done); |
328 | } | 328 | } |
329 | write_unlock(&nm_i->nat_tree_lock); | 329 | up_write(&nm_i->nat_tree_lock); |
330 | } | 330 | } |
331 | 331 | ||
332 | int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | 332 | int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) |
@@ -336,7 +336,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
336 | if (available_free_memory(sbi, NAT_ENTRIES)) | 336 | if (available_free_memory(sbi, NAT_ENTRIES)) |
337 | return 0; | 337 | return 0; |
338 | 338 | ||
339 | write_lock(&nm_i->nat_tree_lock); | 339 | down_write(&nm_i->nat_tree_lock); |
340 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { | 340 | while (nr_shrink && !list_empty(&nm_i->nat_entries)) { |
341 | struct nat_entry *ne; | 341 | struct nat_entry *ne; |
342 | ne = list_first_entry(&nm_i->nat_entries, | 342 | ne = list_first_entry(&nm_i->nat_entries, |
@@ -344,7 +344,7 @@ int try_to_free_nats(struct f2fs_sb_info *sbi, int nr_shrink) | |||
344 | __del_from_nat_cache(nm_i, ne); | 344 | __del_from_nat_cache(nm_i, ne); |
345 | nr_shrink--; | 345 | nr_shrink--; |
346 | } | 346 | } |
347 | write_unlock(&nm_i->nat_tree_lock); | 347 | up_write(&nm_i->nat_tree_lock); |
348 | return nr_shrink; | 348 | return nr_shrink; |
349 | } | 349 | } |
350 | 350 | ||
@@ -367,14 +367,14 @@ void get_node_info(struct f2fs_sb_info *sbi, nid_t nid, struct node_info *ni) | |||
367 | ni->nid = nid; | 367 | ni->nid = nid; |
368 | 368 | ||
369 | /* Check nat cache */ | 369 | /* Check nat cache */ |
370 | read_lock(&nm_i->nat_tree_lock); | 370 | down_read(&nm_i->nat_tree_lock); |
371 | e = __lookup_nat_cache(nm_i, nid); | 371 | e = __lookup_nat_cache(nm_i, nid); |
372 | if (e) { | 372 | if (e) { |
373 | ni->ino = nat_get_ino(e); | 373 | ni->ino = nat_get_ino(e); |
374 | ni->blk_addr = nat_get_blkaddr(e); | 374 | ni->blk_addr = nat_get_blkaddr(e); |
375 | ni->version = nat_get_version(e); | 375 | ni->version = nat_get_version(e); |
376 | } | 376 | } |
377 | read_unlock(&nm_i->nat_tree_lock); | 377 | up_read(&nm_i->nat_tree_lock); |
378 | if (e) | 378 | if (e) |
379 | return; | 379 | return; |
380 | 380 | ||
@@ -1432,13 +1432,13 @@ static int add_free_nid(struct f2fs_sb_info *sbi, nid_t nid, bool build) | |||
1432 | 1432 | ||
1433 | if (build) { | 1433 | if (build) { |
1434 | /* do not add allocated nids */ | 1434 | /* do not add allocated nids */ |
1435 | read_lock(&nm_i->nat_tree_lock); | 1435 | down_read(&nm_i->nat_tree_lock); |
1436 | ne = __lookup_nat_cache(nm_i, nid); | 1436 | ne = __lookup_nat_cache(nm_i, nid); |
1437 | if (ne && | 1437 | if (ne && |
1438 | (!get_nat_flag(ne, IS_CHECKPOINTED) || | 1438 | (!get_nat_flag(ne, IS_CHECKPOINTED) || |
1439 | nat_get_blkaddr(ne) != NULL_ADDR)) | 1439 | nat_get_blkaddr(ne) != NULL_ADDR)) |
1440 | allocated = true; | 1440 | allocated = true; |
1441 | read_unlock(&nm_i->nat_tree_lock); | 1441 | up_read(&nm_i->nat_tree_lock); |
1442 | if (allocated) | 1442 | if (allocated) |
1443 | return 0; | 1443 | return 0; |
1444 | } | 1444 | } |
@@ -1827,20 +1827,20 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi) | |||
1827 | 1827 | ||
1828 | raw_ne = nat_in_journal(sum, i); | 1828 | raw_ne = nat_in_journal(sum, i); |
1829 | retry: | 1829 | retry: |
1830 | write_lock(&nm_i->nat_tree_lock); | 1830 | down_write(&nm_i->nat_tree_lock); |
1831 | ne = __lookup_nat_cache(nm_i, nid); | 1831 | ne = __lookup_nat_cache(nm_i, nid); |
1832 | if (ne) | 1832 | if (ne) |
1833 | goto found; | 1833 | goto found; |
1834 | 1834 | ||
1835 | ne = grab_nat_entry(nm_i, nid); | 1835 | ne = grab_nat_entry(nm_i, nid); |
1836 | if (!ne) { | 1836 | if (!ne) { |
1837 | write_unlock(&nm_i->nat_tree_lock); | 1837 | up_write(&nm_i->nat_tree_lock); |
1838 | goto retry; | 1838 | goto retry; |
1839 | } | 1839 | } |
1840 | node_info_from_raw_nat(&ne->ni, &raw_ne); | 1840 | node_info_from_raw_nat(&ne->ni, &raw_ne); |
1841 | found: | 1841 | found: |
1842 | __set_nat_cache_dirty(nm_i, ne); | 1842 | __set_nat_cache_dirty(nm_i, ne); |
1843 | write_unlock(&nm_i->nat_tree_lock); | 1843 | up_write(&nm_i->nat_tree_lock); |
1844 | } | 1844 | } |
1845 | update_nats_in_cursum(sum, -i); | 1845 | update_nats_in_cursum(sum, -i); |
1846 | mutex_unlock(&curseg->curseg_mutex); | 1846 | mutex_unlock(&curseg->curseg_mutex); |
@@ -1911,10 +1911,10 @@ static void __flush_nat_entry_set(struct f2fs_sb_info *sbi, | |||
1911 | } | 1911 | } |
1912 | raw_nat_from_node_info(raw_ne, &ne->ni); | 1912 | raw_nat_from_node_info(raw_ne, &ne->ni); |
1913 | 1913 | ||
1914 | write_lock(&NM_I(sbi)->nat_tree_lock); | 1914 | down_write(&NM_I(sbi)->nat_tree_lock); |
1915 | nat_reset_flag(ne); | 1915 | nat_reset_flag(ne); |
1916 | __clear_nat_cache_dirty(NM_I(sbi), ne); | 1916 | __clear_nat_cache_dirty(NM_I(sbi), ne); |
1917 | write_unlock(&NM_I(sbi)->nat_tree_lock); | 1917 | up_write(&NM_I(sbi)->nat_tree_lock); |
1918 | 1918 | ||
1919 | if (nat_get_blkaddr(ne) == NULL_ADDR) | 1919 | if (nat_get_blkaddr(ne) == NULL_ADDR) |
1920 | add_free_nid(sbi, nid, false); | 1920 | add_free_nid(sbi, nid, false); |
@@ -2000,7 +2000,7 @@ static int init_node_manager(struct f2fs_sb_info *sbi) | |||
2000 | 2000 | ||
2001 | mutex_init(&nm_i->build_lock); | 2001 | mutex_init(&nm_i->build_lock); |
2002 | spin_lock_init(&nm_i->free_nid_list_lock); | 2002 | spin_lock_init(&nm_i->free_nid_list_lock); |
2003 | rwlock_init(&nm_i->nat_tree_lock); | 2003 | init_rwsem(&nm_i->nat_tree_lock); |
2004 | 2004 | ||
2005 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); | 2005 | nm_i->next_scan_nid = le32_to_cpu(sbi->ckpt->next_free_nid); |
2006 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); | 2006 | nm_i->bitmap_size = __bitmap_size(sbi, NAT_BITMAP); |
@@ -2056,7 +2056,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
2056 | spin_unlock(&nm_i->free_nid_list_lock); | 2056 | spin_unlock(&nm_i->free_nid_list_lock); |
2057 | 2057 | ||
2058 | /* destroy nat cache */ | 2058 | /* destroy nat cache */ |
2059 | write_lock(&nm_i->nat_tree_lock); | 2059 | down_write(&nm_i->nat_tree_lock); |
2060 | while ((found = __gang_lookup_nat_cache(nm_i, | 2060 | while ((found = __gang_lookup_nat_cache(nm_i, |
2061 | nid, NATVEC_SIZE, natvec))) { | 2061 | nid, NATVEC_SIZE, natvec))) { |
2062 | unsigned idx; | 2062 | unsigned idx; |
@@ -2065,7 +2065,7 @@ void destroy_node_manager(struct f2fs_sb_info *sbi) | |||
2065 | __del_from_nat_cache(nm_i, natvec[idx]); | 2065 | __del_from_nat_cache(nm_i, natvec[idx]); |
2066 | } | 2066 | } |
2067 | f2fs_bug_on(sbi, nm_i->nat_cnt); | 2067 | f2fs_bug_on(sbi, nm_i->nat_cnt); |
2068 | write_unlock(&nm_i->nat_tree_lock); | 2068 | up_write(&nm_i->nat_tree_lock); |
2069 | 2069 | ||
2070 | kfree(nm_i->nat_bitmap); | 2070 | kfree(nm_i->nat_bitmap); |
2071 | sbi->nm_info = NULL; | 2071 | sbi->nm_info = NULL; |