aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChao Yu <chao2.yu@samsung.com>2013-12-05 04:15:22 -0500
committerJaegeuk Kim <jaegeuk.kim@samsung.com>2013-12-22 20:18:06 -0500
commitcfb271d485d0ec31eb92b51f4fbe54bf6542e8e6 (patch)
tree1565b575545b152af61ba0a18c2860070bd69348
parentb9987a277f1ec9dba203d04c3a20d967c01a1fba (diff)
f2fs: add unlikely() macro for compiler optimization
As we know, some of our branch condition will rarely be true. So we could add 'unlikely' to let compiler optimize these code, by this way we could drop unneeded 'jump' assemble code to improve performance. change log: o add *unlikely* as many as possible across the whole source files at once suggested by Jaegeuk Kim. Suggested-by: Jaegeuk Kim <jaegeuk.kim@samsung.com> Signed-off-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
-rw-r--r--fs/f2fs/checkpoint.c26
-rw-r--r--fs/f2fs/data.c4
-rw-r--r--fs/f2fs/dir.c4
-rw-r--r--fs/f2fs/f2fs.h8
-rw-r--r--fs/f2fs/node.c16
-rw-r--r--fs/f2fs/segment.h2
6 files changed, 33 insertions, 27 deletions
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 38f4a2245085..6b2106685b7a 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -82,13 +82,12 @@ static int f2fs_write_meta_page(struct page *page,
82 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 82 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
83 83
84 /* Should not write any meta pages, if any IO error was occurred */ 84 /* Should not write any meta pages, if any IO error was occurred */
85 if (wbc->for_reclaim || sbi->por_doing || 85 if (unlikely(sbi->por_doing ||
86 is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)) { 86 is_set_ckpt_flags(F2FS_CKPT(sbi), CP_ERROR_FLAG)))
87 dec_page_count(sbi, F2FS_DIRTY_META); 87 goto redirty_out;
88 wbc->pages_skipped++; 88
89 set_page_dirty(page); 89 if (wbc->for_reclaim)
90 return AOP_WRITEPAGE_ACTIVATE; 90 goto redirty_out;
91 }
92 91
93 wait_on_page_writeback(page); 92 wait_on_page_writeback(page);
94 93
@@ -96,6 +95,12 @@ static int f2fs_write_meta_page(struct page *page,
96 dec_page_count(sbi, F2FS_DIRTY_META); 95 dec_page_count(sbi, F2FS_DIRTY_META);
97 unlock_page(page); 96 unlock_page(page);
98 return 0; 97 return 0;
98
99redirty_out:
100 dec_page_count(sbi, F2FS_DIRTY_META);
101 wbc->pages_skipped++;
102 set_page_dirty(page);
103 return AOP_WRITEPAGE_ACTIVATE;
99} 104}
100 105
101static int f2fs_write_meta_pages(struct address_space *mapping, 106static int f2fs_write_meta_pages(struct address_space *mapping,
@@ -137,7 +142,7 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
137 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, 142 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
138 PAGECACHE_TAG_DIRTY, 143 PAGECACHE_TAG_DIRTY,
139 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1); 144 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
140 if (nr_pages == 0) 145 if (unlikely(nr_pages == 0))
141 break; 146 break;
142 147
143 for (i = 0; i < nr_pages; i++) { 148 for (i = 0; i < nr_pages; i++) {
@@ -150,7 +155,8 @@ long sync_meta_pages(struct f2fs_sb_info *sbi, enum page_type type,
150 unlock_page(page); 155 unlock_page(page);
151 break; 156 break;
152 } 157 }
153 if (nwritten++ >= nr_to_write) 158 nwritten++;
159 if (unlikely(nwritten >= nr_to_write))
154 break; 160 break;
155 } 161 }
156 pagevec_release(&pvec); 162 pagevec_release(&pvec);
@@ -200,7 +206,7 @@ int acquire_orphan_inode(struct f2fs_sb_info *sbi)
200 max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE) 206 max_orphans = (sbi->blocks_per_seg - 2 - NR_CURSEG_TYPE)
201 * F2FS_ORPHANS_PER_BLOCK; 207 * F2FS_ORPHANS_PER_BLOCK;
202 mutex_lock(&sbi->orphan_inode_mutex); 208 mutex_lock(&sbi->orphan_inode_mutex);
203 if (sbi->n_orphans >= max_orphans) 209 if (unlikely(sbi->n_orphans >= max_orphans))
204 err = -ENOSPC; 210 err = -ENOSPC;
205 else 211 else
206 sbi->n_orphans++; 212 sbi->n_orphans++;
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 4e2fc09f0e4f..2ce5a9ef508b 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -251,7 +251,7 @@ int reserve_new_block(struct dnode_of_data *dn)
251 251
252 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)) 252 if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
253 return -EPERM; 253 return -EPERM;
254 if (!inc_valid_block_count(sbi, dn->inode, 1)) 254 if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
255 return -ENOSPC; 255 return -ENOSPC;
256 256
257 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 257 trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
@@ -711,7 +711,7 @@ static int f2fs_write_data_page(struct page *page,
711 711
712 zero_user_segment(page, offset, PAGE_CACHE_SIZE); 712 zero_user_segment(page, offset, PAGE_CACHE_SIZE);
713write: 713write:
714 if (sbi->por_doing) { 714 if (unlikely(sbi->por_doing)) {
715 err = AOP_WRITEPAGE_ACTIVATE; 715 err = AOP_WRITEPAGE_ACTIVATE;
716 goto redirty_out; 716 goto redirty_out;
717 } 717 }
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 594fc1bb64ef..0cc26ba07c3b 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -190,7 +190,7 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
190 unsigned int max_depth; 190 unsigned int max_depth;
191 unsigned int level; 191 unsigned int level;
192 192
193 if (namelen > F2FS_NAME_LEN) 193 if (unlikely(namelen > F2FS_NAME_LEN))
194 return NULL; 194 return NULL;
195 195
196 if (npages == 0) 196 if (npages == 0)
@@ -461,7 +461,7 @@ int __f2fs_add_link(struct inode *dir, const struct qstr *name, struct inode *in
461 } 461 }
462 462
463start: 463start:
464 if (current_depth == MAX_DIR_HASH_DEPTH) 464 if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
465 return -ENOSPC; 465 return -ENOSPC;
466 466
467 /* Increase the depth, if required */ 467 /* Increase the depth, if required */
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index 10eca022e1e1..dca18b3bcc62 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -574,7 +574,7 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
574static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 574static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
575{ 575{
576 WARN_ON((nid >= NM_I(sbi)->max_nid)); 576 WARN_ON((nid >= NM_I(sbi)->max_nid));
577 if (nid >= NM_I(sbi)->max_nid) 577 if (unlikely(nid >= NM_I(sbi)->max_nid))
578 return -EINVAL; 578 return -EINVAL;
579 return 0; 579 return 0;
580} 580}
@@ -600,7 +600,7 @@ static inline bool inc_valid_block_count(struct f2fs_sb_info *sbi,
600 spin_lock(&sbi->stat_lock); 600 spin_lock(&sbi->stat_lock);
601 valid_block_count = 601 valid_block_count =
602 sbi->total_valid_block_count + (block_t)count; 602 sbi->total_valid_block_count + (block_t)count;
603 if (valid_block_count > sbi->user_block_count) { 603 if (unlikely(valid_block_count > sbi->user_block_count)) {
604 spin_unlock(&sbi->stat_lock); 604 spin_unlock(&sbi->stat_lock);
605 return false; 605 return false;
606 } 606 }
@@ -719,13 +719,13 @@ static inline bool inc_valid_node_count(struct f2fs_sb_info *sbi,
719 spin_lock(&sbi->stat_lock); 719 spin_lock(&sbi->stat_lock);
720 720
721 valid_block_count = sbi->total_valid_block_count + 1; 721 valid_block_count = sbi->total_valid_block_count + 1;
722 if (valid_block_count > sbi->user_block_count) { 722 if (unlikely(valid_block_count > sbi->user_block_count)) {
723 spin_unlock(&sbi->stat_lock); 723 spin_unlock(&sbi->stat_lock);
724 return false; 724 return false;
725 } 725 }
726 726
727 valid_node_count = sbi->total_valid_node_count + 1; 727 valid_node_count = sbi->total_valid_node_count + 1;
728 if (valid_node_count > sbi->total_node_count) { 728 if (unlikely(valid_node_count > sbi->total_node_count)) {
729 spin_unlock(&sbi->stat_lock); 729 spin_unlock(&sbi->stat_lock);
730 return false; 730 return false;
731 } 731 }
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 099f06f84e29..2e41636be476 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -94,7 +94,7 @@ static void ra_nat_pages(struct f2fs_sb_info *sbi, int nid)
94 int i; 94 int i;
95 95
96 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) { 96 for (i = 0; i < FREE_NID_PAGES; i++, nid += NAT_ENTRY_PER_BLOCK) {
97 if (nid >= nm_i->max_nid) 97 if (unlikely(nid >= nm_i->max_nid))
98 nid = 0; 98 nid = 0;
99 index = current_nat_addr(sbi, nid); 99 index = current_nat_addr(sbi, nid);
100 100
@@ -1160,7 +1160,7 @@ int wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
1160 struct page *page = pvec.pages[i]; 1160 struct page *page = pvec.pages[i];
1161 1161
1162 /* until radix tree lookup accepts end_index */ 1162 /* until radix tree lookup accepts end_index */
1163 if (page->index > end) 1163 if (unlikely(page->index > end))
1164 continue; 1164 continue;
1165 1165
1166 if (ino && ino_of_node(page) == ino) { 1166 if (ino && ino_of_node(page) == ino) {
@@ -1190,7 +1190,7 @@ static int f2fs_write_node_page(struct page *page,
1190 block_t new_addr; 1190 block_t new_addr;
1191 struct node_info ni; 1191 struct node_info ni;
1192 1192
1193 if (sbi->por_doing) 1193 if (unlikely(sbi->por_doing))
1194 goto redirty_out; 1194 goto redirty_out;
1195 1195
1196 wait_on_page_writeback(page); 1196 wait_on_page_writeback(page);
@@ -1326,7 +1326,7 @@ static int add_free_nid(struct f2fs_nm_info *nm_i, nid_t nid, bool build)
1326 return -1; 1326 return -1;
1327 1327
1328 /* 0 nid should not be used */ 1328 /* 0 nid should not be used */
1329 if (nid == 0) 1329 if (unlikely(nid == 0))
1330 return 0; 1330 return 0;
1331 1331
1332 if (build) { 1332 if (build) {
@@ -1379,7 +1379,7 @@ static void scan_nat_page(struct f2fs_nm_info *nm_i,
1379 1379
1380 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) { 1380 for (; i < NAT_ENTRY_PER_BLOCK; i++, start_nid++) {
1381 1381
1382 if (start_nid >= nm_i->max_nid) 1382 if (unlikely(start_nid >= nm_i->max_nid))
1383 break; 1383 break;
1384 1384
1385 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr); 1385 blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
@@ -1413,7 +1413,7 @@ static void build_free_nids(struct f2fs_sb_info *sbi)
1413 f2fs_put_page(page, 1); 1413 f2fs_put_page(page, 1);
1414 1414
1415 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK)); 1415 nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
1416 if (nid >= nm_i->max_nid) 1416 if (unlikely(nid >= nm_i->max_nid))
1417 nid = 0; 1417 nid = 0;
1418 1418
1419 if (i++ == FREE_NID_PAGES) 1419 if (i++ == FREE_NID_PAGES)
@@ -1447,7 +1447,7 @@ bool alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid)
1447 struct free_nid *i = NULL; 1447 struct free_nid *i = NULL;
1448 struct list_head *this; 1448 struct list_head *this;
1449retry: 1449retry:
1450 if (sbi->total_valid_node_count + 1 >= nm_i->max_nid) 1450 if (unlikely(sbi->total_valid_node_count + 1 >= nm_i->max_nid))
1451 return false; 1451 return false;
1452 1452
1453 spin_lock(&nm_i->free_nid_list_lock); 1453 spin_lock(&nm_i->free_nid_list_lock);
@@ -1557,7 +1557,7 @@ int recover_inode_page(struct f2fs_sb_info *sbi, struct page *page)
1557 new_ni = old_ni; 1557 new_ni = old_ni;
1558 new_ni.ino = ino; 1558 new_ni.ino = ino;
1559 1559
1560 if (!inc_valid_node_count(sbi, NULL)) 1560 if (unlikely(!inc_valid_node_count(sbi, NULL)))
1561 WARN_ON(1); 1561 WARN_ON(1);
1562 set_node_addr(sbi, &new_ni, NEW_ADDR); 1562 set_node_addr(sbi, &new_ni, NEW_ADDR);
1563 inc_valid_inode_count(sbi); 1563 inc_valid_inode_count(sbi);
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 26812fc0fa12..ea563760f4b7 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -457,7 +457,7 @@ static inline bool has_not_enough_free_secs(struct f2fs_sb_info *sbi, int freed)
457 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES); 457 int node_secs = get_blocktype_secs(sbi, F2FS_DIRTY_NODES);
458 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS); 458 int dent_secs = get_blocktype_secs(sbi, F2FS_DIRTY_DENTS);
459 459
460 if (sbi->por_doing) 460 if (unlikely(sbi->por_doing))
461 return false; 461 return false;
462 462
463 return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs + 463 return ((free_sections(sbi) + freed) <= (node_secs + 2 * dent_secs +