diff options
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 53 |
1 files changed, 34 insertions, 19 deletions
diff --git a/fs/inode.c b/fs/inode.c index 4bedac32154f..09e2d7a5f1d2 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -71,7 +71,7 @@ static unsigned int i_hash_shift __read_mostly; | |||
71 | * allowing for low-overhead inode sync() operations. | 71 | * allowing for low-overhead inode sync() operations. |
72 | */ | 72 | */ |
73 | 73 | ||
74 | static LIST_HEAD(inode_unused); | 74 | static LIST_HEAD(inode_lru); |
75 | static struct hlist_head *inode_hashtable __read_mostly; | 75 | static struct hlist_head *inode_hashtable __read_mostly; |
76 | 76 | ||
77 | /* | 77 | /* |
@@ -271,6 +271,7 @@ EXPORT_SYMBOL(__destroy_inode); | |||
271 | 271 | ||
272 | static void destroy_inode(struct inode *inode) | 272 | static void destroy_inode(struct inode *inode) |
273 | { | 273 | { |
274 | BUG_ON(!list_empty(&inode->i_lru)); | ||
274 | __destroy_inode(inode); | 275 | __destroy_inode(inode); |
275 | if (inode->i_sb->s_op->destroy_inode) | 276 | if (inode->i_sb->s_op->destroy_inode) |
276 | inode->i_sb->s_op->destroy_inode(inode); | 277 | inode->i_sb->s_op->destroy_inode(inode); |
@@ -289,7 +290,8 @@ void inode_init_once(struct inode *inode) | |||
289 | INIT_HLIST_NODE(&inode->i_hash); | 290 | INIT_HLIST_NODE(&inode->i_hash); |
290 | INIT_LIST_HEAD(&inode->i_dentry); | 291 | INIT_LIST_HEAD(&inode->i_dentry); |
291 | INIT_LIST_HEAD(&inode->i_devices); | 292 | INIT_LIST_HEAD(&inode->i_devices); |
292 | INIT_LIST_HEAD(&inode->i_list); | 293 | INIT_LIST_HEAD(&inode->i_wb_list); |
294 | INIT_LIST_HEAD(&inode->i_lru); | ||
293 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); | 295 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); |
294 | spin_lock_init(&inode->i_data.tree_lock); | 296 | spin_lock_init(&inode->i_data.tree_lock); |
295 | spin_lock_init(&inode->i_data.i_mmap_lock); | 297 | spin_lock_init(&inode->i_data.i_mmap_lock); |
@@ -330,16 +332,16 @@ EXPORT_SYMBOL(ihold); | |||
330 | 332 | ||
331 | static void inode_lru_list_add(struct inode *inode) | 333 | static void inode_lru_list_add(struct inode *inode) |
332 | { | 334 | { |
333 | if (list_empty(&inode->i_list)) { | 335 | if (list_empty(&inode->i_lru)) { |
334 | list_add(&inode->i_list, &inode_unused); | 336 | list_add(&inode->i_lru, &inode_lru); |
335 | percpu_counter_inc(&nr_inodes_unused); | 337 | percpu_counter_inc(&nr_inodes_unused); |
336 | } | 338 | } |
337 | } | 339 | } |
338 | 340 | ||
339 | static void inode_lru_list_del(struct inode *inode) | 341 | static void inode_lru_list_del(struct inode *inode) |
340 | { | 342 | { |
341 | if (!list_empty(&inode->i_list)) { | 343 | if (!list_empty(&inode->i_lru)) { |
342 | list_del_init(&inode->i_list); | 344 | list_del_init(&inode->i_lru); |
343 | percpu_counter_dec(&nr_inodes_unused); | 345 | percpu_counter_dec(&nr_inodes_unused); |
344 | } | 346 | } |
345 | } | 347 | } |
@@ -460,8 +462,8 @@ static void dispose_list(struct list_head *head) | |||
460 | while (!list_empty(head)) { | 462 | while (!list_empty(head)) { |
461 | struct inode *inode; | 463 | struct inode *inode; |
462 | 464 | ||
463 | inode = list_first_entry(head, struct inode, i_list); | 465 | inode = list_first_entry(head, struct inode, i_lru); |
464 | list_del_init(&inode->i_list); | 466 | list_del_init(&inode->i_lru); |
465 | 467 | ||
466 | evict(inode); | 468 | evict(inode); |
467 | 469 | ||
@@ -507,8 +509,14 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) | |||
507 | continue; | 509 | continue; |
508 | } | 510 | } |
509 | 511 | ||
510 | list_move(&inode->i_list, dispose); | ||
511 | inode->i_state |= I_FREEING; | 512 | inode->i_state |= I_FREEING; |
513 | |||
514 | /* | ||
515 | * Move the inode off the IO lists and LRU once I_FREEING is | ||
516 | * set so that it won't get moved back on there if it is dirty. | ||
517 | */ | ||
518 | list_move(&inode->i_lru, dispose); | ||
519 | list_del_init(&inode->i_wb_list); | ||
512 | if (!(inode->i_state & (I_DIRTY | I_SYNC))) | 520 | if (!(inode->i_state & (I_DIRTY | I_SYNC))) |
513 | percpu_counter_dec(&nr_inodes_unused); | 521 | percpu_counter_dec(&nr_inodes_unused); |
514 | } | 522 | } |
@@ -580,10 +588,10 @@ static void prune_icache(int nr_to_scan) | |||
580 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { | 588 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { |
581 | struct inode *inode; | 589 | struct inode *inode; |
582 | 590 | ||
583 | if (list_empty(&inode_unused)) | 591 | if (list_empty(&inode_lru)) |
584 | break; | 592 | break; |
585 | 593 | ||
586 | inode = list_entry(inode_unused.prev, struct inode, i_list); | 594 | inode = list_entry(inode_lru.prev, struct inode, i_lru); |
587 | 595 | ||
588 | /* | 596 | /* |
589 | * Referenced or dirty inodes are still in use. Give them | 597 | * Referenced or dirty inodes are still in use. Give them |
@@ -591,14 +599,14 @@ static void prune_icache(int nr_to_scan) | |||
591 | */ | 599 | */ |
592 | if (atomic_read(&inode->i_count) || | 600 | if (atomic_read(&inode->i_count) || |
593 | (inode->i_state & ~I_REFERENCED)) { | 601 | (inode->i_state & ~I_REFERENCED)) { |
594 | list_del_init(&inode->i_list); | 602 | list_del_init(&inode->i_lru); |
595 | percpu_counter_dec(&nr_inodes_unused); | 603 | percpu_counter_dec(&nr_inodes_unused); |
596 | continue; | 604 | continue; |
597 | } | 605 | } |
598 | 606 | ||
599 | /* recently referenced inodes get one more pass */ | 607 | /* recently referenced inodes get one more pass */ |
600 | if (inode->i_state & I_REFERENCED) { | 608 | if (inode->i_state & I_REFERENCED) { |
601 | list_move(&inode->i_list, &inode_unused); | 609 | list_move(&inode->i_lru, &inode_lru); |
602 | inode->i_state &= ~I_REFERENCED; | 610 | inode->i_state &= ~I_REFERENCED; |
603 | continue; | 611 | continue; |
604 | } | 612 | } |
@@ -611,15 +619,21 @@ static void prune_icache(int nr_to_scan) | |||
611 | iput(inode); | 619 | iput(inode); |
612 | spin_lock(&inode_lock); | 620 | spin_lock(&inode_lock); |
613 | 621 | ||
614 | if (inode != list_entry(inode_unused.next, | 622 | if (inode != list_entry(inode_lru.next, |
615 | struct inode, i_list)) | 623 | struct inode, i_lru)) |
616 | continue; /* wrong inode or list_empty */ | 624 | continue; /* wrong inode or list_empty */ |
617 | if (!can_unuse(inode)) | 625 | if (!can_unuse(inode)) |
618 | continue; | 626 | continue; |
619 | } | 627 | } |
620 | list_move(&inode->i_list, &freeable); | ||
621 | WARN_ON(inode->i_state & I_NEW); | 628 | WARN_ON(inode->i_state & I_NEW); |
622 | inode->i_state |= I_FREEING; | 629 | inode->i_state |= I_FREEING; |
630 | |||
631 | /* | ||
632 | * Move the inode off the IO lists and LRU once I_FREEING is | ||
633 | * set so that it won't get moved back on there if it is dirty. | ||
634 | */ | ||
635 | list_move(&inode->i_lru, &freeable); | ||
636 | list_del_init(&inode->i_wb_list); | ||
623 | percpu_counter_dec(&nr_inodes_unused); | 637 | percpu_counter_dec(&nr_inodes_unused); |
624 | } | 638 | } |
625 | if (current_is_kswapd()) | 639 | if (current_is_kswapd()) |
@@ -1340,15 +1354,16 @@ static void iput_final(struct inode *inode) | |||
1340 | inode->i_state &= ~I_WILL_FREE; | 1354 | inode->i_state &= ~I_WILL_FREE; |
1341 | __remove_inode_hash(inode); | 1355 | __remove_inode_hash(inode); |
1342 | } | 1356 | } |
1357 | |||
1343 | WARN_ON(inode->i_state & I_NEW); | 1358 | WARN_ON(inode->i_state & I_NEW); |
1344 | inode->i_state |= I_FREEING; | 1359 | inode->i_state |= I_FREEING; |
1345 | 1360 | ||
1346 | /* | 1361 | /* |
1347 | * After we delete the inode from the LRU here, we avoid moving dirty | 1362 | * Move the inode off the IO lists and LRU once I_FREEING is |
1348 | * inodes back onto the LRU now because I_FREEING is set and hence | 1363 | * set so that it won't get moved back on there if it is dirty. |
1349 | * writeback_single_inode() won't move the inode around. | ||
1350 | */ | 1364 | */ |
1351 | inode_lru_list_del(inode); | 1365 | inode_lru_list_del(inode); |
1366 | list_del_init(&inode->i_wb_list); | ||
1352 | 1367 | ||
1353 | __inode_sb_list_del(inode); | 1368 | __inode_sb_list_del(inode); |
1354 | spin_unlock(&inode_lock); | 1369 | spin_unlock(&inode_lock); |