aboutsummaryrefslogtreecommitdiffstats
path: root/fs/f2fs
diff options
context:
space:
mode:
authorJaegeuk Kim <jaegeuk@kernel.org>2014-12-05 13:39:49 -0500
committerJaegeuk Kim <jaegeuk@kernel.org>2014-12-08 13:35:05 -0500
commit9be32d72becca41d7d9b010d7d9be1d39489414f (patch)
tree7ac34f8b38e3db9411a175b4e5f0ee810c57d7ef /fs/f2fs
parent769ec6e5b7d4a8115447736871be8bffaaba3a7d (diff)
f2fs: do retry operations with cond_resched
This patch revists retrial paths in f2fs. The basic idea is to use cond_resched instead of retrying from the very early stage. Suggested-by: Gu Zheng <guz.fnst@cn.fujitsu.com> Reviewed-by: Chao Yu <chao2.yu@samsung.com> Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
Diffstat (limited to 'fs/f2fs')
-rw-r--r--fs/f2fs/f2fs.h7
-rw-r--r--fs/f2fs/gc.c5
-rw-r--r--fs/f2fs/node.c41
-rw-r--r--fs/f2fs/segment.c5
4 files changed, 20 insertions, 38 deletions
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index c87314099d26..c787fe302918 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -1021,6 +1021,13 @@ retry:
1021 return entry; 1021 return entry;
1022} 1022}
1023 1023
1024static inline void f2fs_radix_tree_insert(struct radix_tree_root *root,
1025 unsigned long index, void *item)
1026{
1027 while (radix_tree_insert(root, index, item))
1028 cond_resched();
1029}
1030
1024#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino) 1031#define RAW_IS_INODE(p) ((p)->footer.nid == (p)->footer.ino)
1025 1032
1026static inline bool IS_INODE(struct page *page) 1033static inline bool IS_INODE(struct page *page)
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 2c58c587a3c6..eec0933a4819 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -356,12 +356,11 @@ static void add_gc_inode(struct gc_inode_list *gc_list, struct inode *inode)
356 iput(inode); 356 iput(inode);
357 return; 357 return;
358 } 358 }
359retry:
360 new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS); 359 new_ie = f2fs_kmem_cache_alloc(winode_slab, GFP_NOFS);
361 new_ie->inode = inode; 360 new_ie->inode = inode;
362 361retry:
363 if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) { 362 if (radix_tree_insert(&gc_list->iroot, inode->i_ino, new_ie)) {
364 kmem_cache_free(winode_slab, new_ie); 363 cond_resched();
365 goto retry; 364 goto retry;
366 } 365 }
367 list_add_tail(&new_ie->list, &gc_list->ilist); 366 list_add_tail(&new_ie->list, &gc_list->ilist);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 8de4f555d530..f83326ca32ef 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -147,7 +147,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i,
147 147
148 if (get_nat_flag(ne, IS_DIRTY)) 148 if (get_nat_flag(ne, IS_DIRTY))
149 return; 149 return;
150retry: 150
151 head = radix_tree_lookup(&nm_i->nat_set_root, set); 151 head = radix_tree_lookup(&nm_i->nat_set_root, set);
152 if (!head) { 152 if (!head) {
153 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); 153 head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC);
@@ -156,11 +156,7 @@ retry:
156 INIT_LIST_HEAD(&head->set_list); 156 INIT_LIST_HEAD(&head->set_list);
157 head->set = set; 157 head->set = set;
158 head->entry_cnt = 0; 158 head->entry_cnt = 0;
159 159 f2fs_radix_tree_insert(&nm_i->nat_set_root, set, head);
160 if (radix_tree_insert(&nm_i->nat_set_root, set, head)) {
161 kmem_cache_free(nat_entry_set_slab, head);
162 goto retry;
163 }
164 } 160 }
165 list_move_tail(&ne->list, &head->entry_list); 161 list_move_tail(&ne->list, &head->entry_list);
166 nm_i->dirty_nat_cnt++; 162 nm_i->dirty_nat_cnt++;
@@ -238,13 +234,8 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid)
238{ 234{
239 struct nat_entry *new; 235 struct nat_entry *new;
240 236
241 new = kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); 237 new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC);
242 if (!new) 238 f2fs_radix_tree_insert(&nm_i->nat_root, nid, new);
243 return NULL;
244 if (radix_tree_insert(&nm_i->nat_root, nid, new)) {
245 kmem_cache_free(nat_entry_slab, new);
246 return NULL;
247 }
248 memset(new, 0, sizeof(struct nat_entry)); 239 memset(new, 0, sizeof(struct nat_entry));
249 nat_set_nid(new, nid); 240 nat_set_nid(new, nid);
250 nat_reset_flag(new); 241 nat_reset_flag(new);
@@ -257,15 +248,11 @@ static void cache_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid,
257 struct f2fs_nat_entry *ne) 248 struct f2fs_nat_entry *ne)
258{ 249{
259 struct nat_entry *e; 250 struct nat_entry *e;
260retry: 251
261 down_write(&nm_i->nat_tree_lock); 252 down_write(&nm_i->nat_tree_lock);
262 e = __lookup_nat_cache(nm_i, nid); 253 e = __lookup_nat_cache(nm_i, nid);
263 if (!e) { 254 if (!e) {
264 e = grab_nat_entry(nm_i, nid); 255 e = grab_nat_entry(nm_i, nid);
265 if (!e) {
266 up_write(&nm_i->nat_tree_lock);
267 goto retry;
268 }
269 node_info_from_raw_nat(&e->ni, ne); 256 node_info_from_raw_nat(&e->ni, ne);
270 } 257 }
271 up_write(&nm_i->nat_tree_lock); 258 up_write(&nm_i->nat_tree_lock);
@@ -276,15 +263,11 @@ static void set_node_addr(struct f2fs_sb_info *sbi, struct node_info *ni,
276{ 263{
277 struct f2fs_nm_info *nm_i = NM_I(sbi); 264 struct f2fs_nm_info *nm_i = NM_I(sbi);
278 struct nat_entry *e; 265 struct nat_entry *e;
279retry: 266
280 down_write(&nm_i->nat_tree_lock); 267 down_write(&nm_i->nat_tree_lock);
281 e = __lookup_nat_cache(nm_i, ni->nid); 268 e = __lookup_nat_cache(nm_i, ni->nid);
282 if (!e) { 269 if (!e) {
283 e = grab_nat_entry(nm_i, ni->nid); 270 e = grab_nat_entry(nm_i, ni->nid);
284 if (!e) {
285 up_write(&nm_i->nat_tree_lock);
286 goto retry;
287 }
288 e->ni = *ni; 271 e->ni = *ni;
289 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR); 272 f2fs_bug_on(sbi, ni->blk_addr == NEW_ADDR);
290 } else if (new_blkaddr == NEW_ADDR) { 273 } else if (new_blkaddr == NEW_ADDR) {
@@ -1833,19 +1816,13 @@ static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
1833 nid_t nid = le32_to_cpu(nid_in_journal(sum, i)); 1816 nid_t nid = le32_to_cpu(nid_in_journal(sum, i));
1834 1817
1835 raw_ne = nat_in_journal(sum, i); 1818 raw_ne = nat_in_journal(sum, i);
1836retry: 1819
1837 down_write(&nm_i->nat_tree_lock); 1820 down_write(&nm_i->nat_tree_lock);
1838 ne = __lookup_nat_cache(nm_i, nid); 1821 ne = __lookup_nat_cache(nm_i, nid);
1839 if (ne)
1840 goto found;
1841
1842 ne = grab_nat_entry(nm_i, nid);
1843 if (!ne) { 1822 if (!ne) {
1844 up_write(&nm_i->nat_tree_lock); 1823 ne = grab_nat_entry(nm_i, nid);
1845 goto retry; 1824 node_info_from_raw_nat(&ne->ni, &raw_ne);
1846 } 1825 }
1847 node_info_from_raw_nat(&ne->ni, &raw_ne);
1848found:
1849 __set_nat_cache_dirty(nm_i, ne); 1826 __set_nat_cache_dirty(nm_i, ne);
1850 up_write(&nm_i->nat_tree_lock); 1827 up_write(&nm_i->nat_tree_lock);
1851 } 1828 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 9a33e34d26ce..c79d67e5045f 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -179,13 +179,13 @@ void register_inmem_page(struct inode *inode, struct page *page)
179 struct f2fs_inode_info *fi = F2FS_I(inode); 179 struct f2fs_inode_info *fi = F2FS_I(inode);
180 struct inmem_pages *new; 180 struct inmem_pages *new;
181 int err; 181 int err;
182retry: 182
183 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS); 183 new = f2fs_kmem_cache_alloc(inmem_entry_slab, GFP_NOFS);
184 184
185 /* add atomic page indices to the list */ 185 /* add atomic page indices to the list */
186 new->page = page; 186 new->page = page;
187 INIT_LIST_HEAD(&new->list); 187 INIT_LIST_HEAD(&new->list);
188 188retry:
189 /* increase reference count with clean state */ 189 /* increase reference count with clean state */
190 mutex_lock(&fi->inmem_lock); 190 mutex_lock(&fi->inmem_lock);
191 err = radix_tree_insert(&fi->inmem_root, page->index, new); 191 err = radix_tree_insert(&fi->inmem_root, page->index, new);
@@ -195,7 +195,6 @@ retry:
195 return; 195 return;
196 } else if (err) { 196 } else if (err) {
197 mutex_unlock(&fi->inmem_lock); 197 mutex_unlock(&fi->inmem_lock);
198 kmem_cache_free(inmem_entry_slab, new);
199 goto retry; 198 goto retry;
200 } 199 }
201 get_page(page); 200 get_page(page);