diff options
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/drop_caches.c | 4 | ||||
-rw-r--r-- | fs/fs-writeback.c | 35 | ||||
-rw-r--r-- | fs/hugetlbfs/inode.c | 2 | ||||
-rw-r--r-- | fs/inode.c | 65 | ||||
-rw-r--r-- | fs/notify/inode_mark.c | 14 | ||||
-rw-r--r-- | fs/notify/inotify/inotify.c | 16 | ||||
-rw-r--r-- | fs/quota/dquot.c | 6 | ||||
-rw-r--r-- | include/linux/writeback.h | 1 | ||||
-rw-r--r-- | mm/backing-dev.c | 4 |
10 files changed, 30 insertions, 119 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index b34323cfe2da..416a2686ec66 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -1145,7 +1145,7 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size) | |||
1145 | * inode list. | 1145 | * inode list. |
1146 | * | 1146 | * |
1147 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, | 1147 | * mark_buffer_dirty() is atomic. It takes bh->b_page->mapping->private_lock, |
1148 | * mapping->tree_lock and the global inode_lock. | 1148 | * and mapping->tree_lock. |
1149 | */ | 1149 | */ |
1150 | void mark_buffer_dirty(struct buffer_head *bh) | 1150 | void mark_buffer_dirty(struct buffer_head *bh) |
1151 | { | 1151 | { |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 16efb486c693..6947cb2ef50c 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -16,7 +16,6 @@ static void drop_pagecache_sb(struct super_block *sb) | |||
16 | { | 16 | { |
17 | struct inode *inode, *toput_inode = NULL; | 17 | struct inode *inode, *toput_inode = NULL; |
18 | 18 | ||
19 | spin_lock(&inode_lock); | ||
20 | spin_lock(&sb_inode_list_lock); | 19 | spin_lock(&sb_inode_list_lock); |
21 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 20 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
22 | spin_lock(&inode->i_lock); | 21 | spin_lock(&inode->i_lock); |
@@ -28,15 +27,12 @@ static void drop_pagecache_sb(struct super_block *sb) | |||
28 | __iget(inode); | 27 | __iget(inode); |
29 | spin_unlock(&inode->i_lock); | 28 | spin_unlock(&inode->i_lock); |
30 | spin_unlock(&sb_inode_list_lock); | 29 | spin_unlock(&sb_inode_list_lock); |
31 | spin_unlock(&inode_lock); | ||
32 | invalidate_mapping_pages(inode->i_mapping, 0, -1); | 30 | invalidate_mapping_pages(inode->i_mapping, 0, -1); |
33 | iput(toput_inode); | 31 | iput(toput_inode); |
34 | toput_inode = inode; | 32 | toput_inode = inode; |
35 | spin_lock(&inode_lock); | ||
36 | spin_lock(&sb_inode_list_lock); | 33 | spin_lock(&sb_inode_list_lock); |
37 | } | 34 | } |
38 | spin_unlock(&sb_inode_list_lock); | 35 | spin_unlock(&sb_inode_list_lock); |
39 | spin_unlock(&inode_lock); | ||
40 | iput(toput_inode); | 36 | iput(toput_inode); |
41 | } | 37 | } |
42 | 38 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 2c9481e7b01d..07d70704078e 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -311,7 +311,7 @@ static void requeue_io(struct inode *inode) | |||
311 | static void inode_sync_complete(struct inode *inode) | 311 | static void inode_sync_complete(struct inode *inode) |
312 | { | 312 | { |
313 | /* | 313 | /* |
314 | * Prevent speculative execution through spin_unlock(&inode_lock); | 314 | * Prevent speculative execution through spin_unlock(&inode->i_lock); |
315 | */ | 315 | */ |
316 | smp_mb(); | 316 | smp_mb(); |
317 | wake_up_bit(&inode->i_state, __I_SYNC); | 317 | wake_up_bit(&inode->i_state, __I_SYNC); |
@@ -403,9 +403,7 @@ static void inode_wait_for_writeback(struct inode *inode) | |||
403 | do { | 403 | do { |
404 | spin_unlock(&wb_inode_list_lock); | 404 | spin_unlock(&wb_inode_list_lock); |
405 | spin_unlock(&inode->i_lock); | 405 | spin_unlock(&inode->i_lock); |
406 | spin_unlock(&inode_lock); | ||
407 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); | 406 | __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); |
408 | spin_lock(&inode_lock); | ||
409 | spin_lock(&inode->i_lock); | 407 | spin_lock(&inode->i_lock); |
410 | spin_lock(&wb_inode_list_lock); | 408 | spin_lock(&wb_inode_list_lock); |
411 | } while (inode->i_state & I_SYNC); | 409 | } while (inode->i_state & I_SYNC); |
@@ -466,7 +464,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
466 | 464 | ||
467 | spin_unlock(&wb_inode_list_lock); | 465 | spin_unlock(&wb_inode_list_lock); |
468 | spin_unlock(&inode->i_lock); | 466 | spin_unlock(&inode->i_lock); |
469 | spin_unlock(&inode_lock); | ||
470 | 467 | ||
471 | ret = do_writepages(mapping, wbc); | 468 | ret = do_writepages(mapping, wbc); |
472 | 469 | ||
@@ -483,7 +480,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
483 | ret = err; | 480 | ret = err; |
484 | } | 481 | } |
485 | 482 | ||
486 | spin_lock(&inode_lock); | ||
487 | spin_lock(&inode->i_lock); | 483 | spin_lock(&inode->i_lock); |
488 | spin_lock(&wb_inode_list_lock); | 484 | spin_lock(&wb_inode_list_lock); |
489 | inode->i_state &= ~I_SYNC; | 485 | inode->i_state &= ~I_SYNC; |
@@ -628,7 +624,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
628 | struct super_block *sb = wbc->sb, *pin_sb = NULL; | 624 | struct super_block *sb = wbc->sb, *pin_sb = NULL; |
629 | const unsigned long start = jiffies; /* livelock avoidance */ | 625 | const unsigned long start = jiffies; /* livelock avoidance */ |
630 | 626 | ||
631 | spin_lock(&inode_lock); | ||
632 | again: | 627 | again: |
633 | spin_lock(&wb_inode_list_lock); | 628 | spin_lock(&wb_inode_list_lock); |
634 | 629 | ||
@@ -688,10 +683,8 @@ again: | |||
688 | } | 683 | } |
689 | spin_unlock(&wb_inode_list_lock); | 684 | spin_unlock(&wb_inode_list_lock); |
690 | spin_unlock(&inode->i_lock); | 685 | spin_unlock(&inode->i_lock); |
691 | spin_unlock(&inode_lock); | ||
692 | iput(inode); | 686 | iput(inode); |
693 | cond_resched(); | 687 | cond_resched(); |
694 | spin_lock(&inode_lock); | ||
695 | spin_lock(&wb_inode_list_lock); | 688 | spin_lock(&wb_inode_list_lock); |
696 | if (wbc->nr_to_write <= 0) { | 689 | if (wbc->nr_to_write <= 0) { |
697 | wbc->more_io = 1; | 690 | wbc->more_io = 1; |
@@ -703,8 +696,6 @@ again: | |||
703 | spin_unlock(&wb_inode_list_lock); | 696 | spin_unlock(&wb_inode_list_lock); |
704 | 697 | ||
705 | unpin_sb_for_writeback(&pin_sb); | 698 | unpin_sb_for_writeback(&pin_sb); |
706 | |||
707 | spin_unlock(&inode_lock); | ||
708 | /* Leave any unwritten inodes on b_io */ | 699 | /* Leave any unwritten inodes on b_io */ |
709 | } | 700 | } |
710 | 701 | ||
@@ -817,20 +808,17 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
817 | * we'll just busyloop. | 808 | * we'll just busyloop. |
818 | */ | 809 | */ |
819 | retry: | 810 | retry: |
820 | spin_lock(&inode_lock); | ||
821 | spin_lock(&wb_inode_list_lock); | 811 | spin_lock(&wb_inode_list_lock); |
822 | if (!list_empty(&wb->b_more_io)) { | 812 | if (!list_empty(&wb->b_more_io)) { |
823 | inode = list_entry(wb->b_more_io.prev, | 813 | inode = list_entry(wb->b_more_io.prev, |
824 | struct inode, i_list); | 814 | struct inode, i_list); |
825 | if (!spin_trylock(&inode->i_lock)) { | 815 | if (!spin_trylock(&inode->i_lock)) { |
826 | spin_unlock(&wb_inode_list_lock); | 816 | spin_unlock(&wb_inode_list_lock); |
827 | spin_unlock(&inode_lock); | ||
828 | goto retry; | 817 | goto retry; |
829 | } | 818 | } |
830 | inode_wait_for_writeback(inode); | 819 | inode_wait_for_writeback(inode); |
831 | } | 820 | } |
832 | spin_unlock(&wb_inode_list_lock); | 821 | spin_unlock(&wb_inode_list_lock); |
833 | spin_unlock(&inode_lock); | ||
834 | } | 822 | } |
835 | 823 | ||
836 | return wrote; | 824 | return wrote; |
@@ -1085,7 +1073,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1085 | if (unlikely(block_dump)) | 1073 | if (unlikely(block_dump)) |
1086 | block_dump___mark_inode_dirty(inode); | 1074 | block_dump___mark_inode_dirty(inode); |
1087 | 1075 | ||
1088 | spin_lock(&inode_lock); | ||
1089 | spin_lock(&inode->i_lock); | 1076 | spin_lock(&inode->i_lock); |
1090 | if ((inode->i_state & flags) != flags) { | 1077 | if ((inode->i_state & flags) != flags) { |
1091 | const int was_dirty = inode->i_state & I_DIRTY; | 1078 | const int was_dirty = inode->i_state & I_DIRTY; |
@@ -1134,7 +1121,6 @@ void __mark_inode_dirty(struct inode *inode, int flags) | |||
1134 | } | 1121 | } |
1135 | out: | 1122 | out: |
1136 | spin_unlock(&inode->i_lock); | 1123 | spin_unlock(&inode->i_lock); |
1137 | spin_unlock(&inode_lock); | ||
1138 | } | 1124 | } |
1139 | EXPORT_SYMBOL(__mark_inode_dirty); | 1125 | EXPORT_SYMBOL(__mark_inode_dirty); |
1140 | 1126 | ||
@@ -1165,7 +1151,6 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1165 | */ | 1151 | */ |
1166 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1152 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
1167 | 1153 | ||
1168 | spin_lock(&inode_lock); | ||
1169 | spin_lock(&sb_inode_list_lock); | 1154 | spin_lock(&sb_inode_list_lock); |
1170 | 1155 | ||
1171 | /* | 1156 | /* |
@@ -1190,14 +1175,12 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1190 | __iget(inode); | 1175 | __iget(inode); |
1191 | spin_unlock(&inode->i_lock); | 1176 | spin_unlock(&inode->i_lock); |
1192 | spin_unlock(&sb_inode_list_lock); | 1177 | spin_unlock(&sb_inode_list_lock); |
1193 | spin_unlock(&inode_lock); | ||
1194 | /* | 1178 | /* |
1195 | * We hold a reference to 'inode' so it couldn't have | 1179 | * We hold a reference to 'inode' so it couldn't have been |
1196 | * been removed from s_inodes list while we dropped the | 1180 | * removed from s_inodes list while we dropped the |
1197 | * inode_lock. We cannot iput the inode now as we can | 1181 | * sb_inode_list_lock. We cannot iput the inode now as we can |
1198 | * be holding the last reference and we cannot iput it | 1182 | * be holding the last reference and we cannot iput it under |
1199 | * under inode_lock. So we keep the reference and iput | 1183 | * spinlock. So we keep the reference and iput it later. |
1200 | * it later. | ||
1201 | */ | 1184 | */ |
1202 | iput(old_inode); | 1185 | iput(old_inode); |
1203 | old_inode = inode; | 1186 | old_inode = inode; |
@@ -1206,11 +1189,9 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1206 | 1189 | ||
1207 | cond_resched(); | 1190 | cond_resched(); |
1208 | 1191 | ||
1209 | spin_lock(&inode_lock); | ||
1210 | spin_lock(&sb_inode_list_lock); | 1192 | spin_lock(&sb_inode_list_lock); |
1211 | } | 1193 | } |
1212 | spin_unlock(&sb_inode_list_lock); | 1194 | spin_unlock(&sb_inode_list_lock); |
1213 | spin_unlock(&inode_lock); | ||
1214 | iput(old_inode); | 1195 | iput(old_inode); |
1215 | } | 1196 | } |
1216 | 1197 | ||
@@ -1292,13 +1273,11 @@ int write_inode_now(struct inode *inode, int sync) | |||
1292 | wbc.nr_to_write = 0; | 1273 | wbc.nr_to_write = 0; |
1293 | 1274 | ||
1294 | might_sleep(); | 1275 | might_sleep(); |
1295 | spin_lock(&inode_lock); | ||
1296 | spin_lock(&inode->i_lock); | 1276 | spin_lock(&inode->i_lock); |
1297 | spin_lock(&wb_inode_list_lock); | 1277 | spin_lock(&wb_inode_list_lock); |
1298 | ret = writeback_single_inode(inode, &wbc); | 1278 | ret = writeback_single_inode(inode, &wbc); |
1299 | spin_unlock(&wb_inode_list_lock); | 1279 | spin_unlock(&wb_inode_list_lock); |
1300 | spin_unlock(&inode->i_lock); | 1280 | spin_unlock(&inode->i_lock); |
1301 | spin_unlock(&inode_lock); | ||
1302 | if (sync) | 1281 | if (sync) |
1303 | inode_sync_wait(inode); | 1282 | inode_sync_wait(inode); |
1304 | return ret; | 1283 | return ret; |
@@ -1320,13 +1299,11 @@ int sync_inode(struct inode *inode, struct writeback_control *wbc) | |||
1320 | { | 1299 | { |
1321 | int ret; | 1300 | int ret; |
1322 | 1301 | ||
1323 | spin_lock(&inode_lock); | ||
1324 | spin_lock(&inode->i_lock); | 1302 | spin_lock(&inode->i_lock); |
1325 | spin_lock(&wb_inode_list_lock); | 1303 | spin_lock(&wb_inode_list_lock); |
1326 | ret = writeback_single_inode(inode, wbc); | 1304 | ret = writeback_single_inode(inode, wbc); |
1327 | spin_unlock(&wb_inode_list_lock); | 1305 | spin_unlock(&wb_inode_list_lock); |
1328 | spin_unlock(&inode->i_lock); | 1306 | spin_unlock(&inode->i_lock); |
1329 | spin_unlock(&inode_lock); | ||
1330 | return ret; | 1307 | return ret; |
1331 | } | 1308 | } |
1332 | EXPORT_SYMBOL(sync_inode); | 1309 | EXPORT_SYMBOL(sync_inode); |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index ab1e261738bf..5b50a7a2225e 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -377,7 +377,7 @@ static void hugetlbfs_delete_inode(struct inode *inode) | |||
377 | clear_inode(inode); | 377 | clear_inode(inode); |
378 | } | 378 | } |
379 | 379 | ||
380 | static void hugetlbfs_forget_inode(struct inode *inode) __releases(inode_lock) | 380 | static void hugetlbfs_forget_inode(struct inode *inode) |
381 | { | 381 | { |
382 | if (generic_detach_inode(inode)) { | 382 | if (generic_detach_inode(inode)) { |
383 | truncate_hugepages(inode, 0); | 383 | truncate_hugepages(inode, 0); |
diff --git a/fs/inode.c b/fs/inode.c index ab43db313517..77dacd47b492 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -84,7 +84,6 @@ static struct hlist_head *inode_hashtable __read_mostly; | |||
84 | * NOTE! You also have to own the lock if you change | 84 | * NOTE! You also have to own the lock if you change |
85 | * the i_state of an inode while it is in use.. | 85 | * the i_state of an inode while it is in use.. |
86 | */ | 86 | */ |
87 | DEFINE_SPINLOCK(inode_lock); | ||
88 | DEFINE_SPINLOCK(sb_inode_list_lock); | 87 | DEFINE_SPINLOCK(sb_inode_list_lock); |
89 | DEFINE_SPINLOCK(wb_inode_list_lock); | 88 | DEFINE_SPINLOCK(wb_inode_list_lock); |
90 | DEFINE_SPINLOCK(inode_hash_lock); | 89 | DEFINE_SPINLOCK(inode_hash_lock); |
@@ -355,16 +354,14 @@ static void dispose_list(struct list_head *head) | |||
355 | truncate_inode_pages(&inode->i_data, 0); | 354 | truncate_inode_pages(&inode->i_data, 0); |
356 | clear_inode(inode); | 355 | clear_inode(inode); |
357 | 356 | ||
358 | spin_lock(&inode_lock); | ||
359 | spin_lock(&sb_inode_list_lock); | 357 | spin_lock(&sb_inode_list_lock); |
360 | spin_lock(&inode->i_lock); | 358 | spin_lock(&inode->i_lock); |
361 | spin_lock(&inode_hash_lock); | 359 | spin_lock(&inode_hash_lock); |
362 | hlist_del_init(&inode->i_hash); | 360 | hlist_del_init(&inode->i_hash); |
363 | spin_unlock(&inode_hash_lock); | 361 | spin_unlock(&inode_hash_lock); |
364 | list_del_init(&inode->i_sb_list); | 362 | list_del_init(&inode->i_sb_list); |
365 | spin_unlock(&sb_inode_list_lock); | ||
366 | spin_unlock(&inode->i_lock); | 363 | spin_unlock(&inode->i_lock); |
367 | spin_unlock(&inode_lock); | 364 | spin_unlock(&sb_inode_list_lock); |
368 | 365 | ||
369 | wake_up_inode(inode); | 366 | wake_up_inode(inode); |
370 | destroy_inode(inode); | 367 | destroy_inode(inode); |
@@ -392,7 +389,6 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) | |||
392 | * change during umount anymore, and because iprune_sem keeps | 389 | * change during umount anymore, and because iprune_sem keeps |
393 | * shrink_icache_memory() away. | 390 | * shrink_icache_memory() away. |
394 | */ | 391 | */ |
395 | cond_resched_lock(&inode_lock); | ||
396 | cond_resched_lock(&sb_inode_list_lock); | 392 | cond_resched_lock(&sb_inode_list_lock); |
397 | 393 | ||
398 | next = next->next; | 394 | next = next->next; |
@@ -437,13 +433,11 @@ int invalidate_inodes(struct super_block *sb) | |||
437 | LIST_HEAD(throw_away); | 433 | LIST_HEAD(throw_away); |
438 | 434 | ||
439 | down_write(&iprune_sem); | 435 | down_write(&iprune_sem); |
440 | spin_lock(&inode_lock); | ||
441 | spin_lock(&sb_inode_list_lock); | 436 | spin_lock(&sb_inode_list_lock); |
442 | inotify_unmount_inodes(&sb->s_inodes); | 437 | inotify_unmount_inodes(&sb->s_inodes); |
443 | fsnotify_unmount_inodes(&sb->s_inodes); | 438 | fsnotify_unmount_inodes(&sb->s_inodes); |
444 | busy = invalidate_list(&sb->s_inodes, &throw_away); | 439 | busy = invalidate_list(&sb->s_inodes, &throw_away); |
445 | spin_unlock(&sb_inode_list_lock); | 440 | spin_unlock(&sb_inode_list_lock); |
446 | spin_unlock(&inode_lock); | ||
447 | 441 | ||
448 | dispose_list(&throw_away); | 442 | dispose_list(&throw_away); |
449 | up_write(&iprune_sem); | 443 | up_write(&iprune_sem); |
@@ -486,7 +480,6 @@ static void prune_icache(int nr_to_scan) | |||
486 | unsigned long reap = 0; | 480 | unsigned long reap = 0; |
487 | 481 | ||
488 | down_read(&iprune_sem); | 482 | down_read(&iprune_sem); |
489 | spin_lock(&inode_lock); | ||
490 | again: | 483 | again: |
491 | spin_lock(&wb_inode_list_lock); | 484 | spin_lock(&wb_inode_list_lock); |
492 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { | 485 | for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { |
@@ -510,12 +503,10 @@ again: | |||
510 | spin_unlock(&wb_inode_list_lock); | 503 | spin_unlock(&wb_inode_list_lock); |
511 | __iget(inode); | 504 | __iget(inode); |
512 | spin_unlock(&inode->i_lock); | 505 | spin_unlock(&inode->i_lock); |
513 | spin_unlock(&inode_lock); | ||
514 | if (remove_inode_buffers(inode)) | 506 | if (remove_inode_buffers(inode)) |
515 | reap += invalidate_mapping_pages(&inode->i_data, | 507 | reap += invalidate_mapping_pages(&inode->i_data, |
516 | 0, -1); | 508 | 0, -1); |
517 | iput(inode); | 509 | iput(inode); |
518 | spin_lock(&inode_lock); | ||
519 | again2: | 510 | again2: |
520 | spin_lock(&wb_inode_list_lock); | 511 | spin_lock(&wb_inode_list_lock); |
521 | 512 | ||
@@ -542,7 +533,6 @@ again2: | |||
542 | __count_vm_events(KSWAPD_INODESTEAL, reap); | 533 | __count_vm_events(KSWAPD_INODESTEAL, reap); |
543 | else | 534 | else |
544 | __count_vm_events(PGINODESTEAL, reap); | 535 | __count_vm_events(PGINODESTEAL, reap); |
545 | spin_unlock(&inode_lock); | ||
546 | spin_unlock(&wb_inode_list_lock); | 536 | spin_unlock(&wb_inode_list_lock); |
547 | 537 | ||
548 | dispose_list(&freeable); | 538 | dispose_list(&freeable); |
@@ -693,12 +683,10 @@ void inode_add_to_lists(struct super_block *sb, struct inode *inode) | |||
693 | { | 683 | { |
694 | struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); | 684 | struct hlist_head *head = inode_hashtable + hash(sb, inode->i_ino); |
695 | 685 | ||
696 | spin_lock(&inode_lock); | ||
697 | spin_lock(&sb_inode_list_lock); | 686 | spin_lock(&sb_inode_list_lock); |
698 | spin_lock(&inode->i_lock); | 687 | spin_lock(&inode->i_lock); |
699 | __inode_add_to_lists(sb, head, inode); | 688 | __inode_add_to_lists(sb, head, inode); |
700 | spin_unlock(&inode->i_lock); | 689 | spin_unlock(&inode->i_lock); |
701 | spin_unlock(&inode_lock); | ||
702 | } | 690 | } |
703 | EXPORT_SYMBOL_GPL(inode_add_to_lists); | 691 | EXPORT_SYMBOL_GPL(inode_add_to_lists); |
704 | 692 | ||
@@ -724,18 +712,14 @@ struct inode *new_inode(struct super_block *sb) | |||
724 | static atomic_t last_ino = ATOMIC_INIT(0); | 712 | static atomic_t last_ino = ATOMIC_INIT(0); |
725 | struct inode *inode; | 713 | struct inode *inode; |
726 | 714 | ||
727 | spin_lock_prefetch(&inode_lock); | ||
728 | |||
729 | inode = alloc_inode(sb); | 715 | inode = alloc_inode(sb); |
730 | if (inode) { | 716 | if (inode) { |
731 | spin_lock(&inode_lock); | ||
732 | spin_lock(&sb_inode_list_lock); | 717 | spin_lock(&sb_inode_list_lock); |
733 | spin_lock(&inode->i_lock); | 718 | spin_lock(&inode->i_lock); |
734 | inode->i_ino = atomic_inc_return(&last_ino); | 719 | inode->i_ino = atomic_inc_return(&last_ino); |
735 | inode->i_state = 0; | 720 | inode->i_state = 0; |
736 | __inode_add_to_lists(sb, NULL, inode); | 721 | __inode_add_to_lists(sb, NULL, inode); |
737 | spin_unlock(&inode->i_lock); | 722 | spin_unlock(&inode->i_lock); |
738 | spin_unlock(&inode_lock); | ||
739 | } | 723 | } |
740 | return inode; | 724 | return inode; |
741 | } | 725 | } |
@@ -794,7 +778,6 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
794 | if (inode) { | 778 | if (inode) { |
795 | struct inode *old; | 779 | struct inode *old; |
796 | 780 | ||
797 | spin_lock(&inode_lock); | ||
798 | /* We released the lock, so.. */ | 781 | /* We released the lock, so.. */ |
799 | old = find_inode(sb, head, test, data); | 782 | old = find_inode(sb, head, test, data); |
800 | if (!old) { | 783 | if (!old) { |
@@ -806,7 +789,6 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
806 | inode->i_state = I_NEW; | 789 | inode->i_state = I_NEW; |
807 | __inode_add_to_lists(sb, head, inode); | 790 | __inode_add_to_lists(sb, head, inode); |
808 | spin_unlock(&inode->i_lock); | 791 | spin_unlock(&inode->i_lock); |
809 | spin_unlock(&inode_lock); | ||
810 | 792 | ||
811 | /* Return the locked inode with I_NEW set, the | 793 | /* Return the locked inode with I_NEW set, the |
812 | * caller is responsible for filling in the contents | 794 | * caller is responsible for filling in the contents |
@@ -821,7 +803,6 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
821 | */ | 803 | */ |
822 | __iget(old); | 804 | __iget(old); |
823 | spin_unlock(&old->i_lock); | 805 | spin_unlock(&old->i_lock); |
824 | spin_unlock(&inode_lock); | ||
825 | destroy_inode(inode); | 806 | destroy_inode(inode); |
826 | inode = old; | 807 | inode = old; |
827 | wait_on_inode(inode); | 808 | wait_on_inode(inode); |
@@ -831,7 +812,6 @@ static struct inode *get_new_inode(struct super_block *sb, | |||
831 | set_failed: | 812 | set_failed: |
832 | spin_unlock(&inode->i_lock); | 813 | spin_unlock(&inode->i_lock); |
833 | spin_unlock(&sb_inode_list_lock); | 814 | spin_unlock(&sb_inode_list_lock); |
834 | spin_unlock(&inode_lock); | ||
835 | destroy_inode(inode); | 815 | destroy_inode(inode); |
836 | return NULL; | 816 | return NULL; |
837 | } | 817 | } |
@@ -849,7 +829,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
849 | if (inode) { | 829 | if (inode) { |
850 | struct inode *old; | 830 | struct inode *old; |
851 | 831 | ||
852 | spin_lock(&inode_lock); | ||
853 | /* We released the lock, so.. */ | 832 | /* We released the lock, so.. */ |
854 | old = find_inode_fast(sb, head, ino); | 833 | old = find_inode_fast(sb, head, ino); |
855 | if (!old) { | 834 | if (!old) { |
@@ -859,7 +838,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
859 | inode->i_state = I_NEW; | 838 | inode->i_state = I_NEW; |
860 | __inode_add_to_lists(sb, head, inode); | 839 | __inode_add_to_lists(sb, head, inode); |
861 | spin_unlock(&inode->i_lock); | 840 | spin_unlock(&inode->i_lock); |
862 | spin_unlock(&inode_lock); | ||
863 | 841 | ||
864 | /* Return the locked inode with I_NEW set, the | 842 | /* Return the locked inode with I_NEW set, the |
865 | * caller is responsible for filling in the contents | 843 | * caller is responsible for filling in the contents |
@@ -874,7 +852,6 @@ static struct inode *get_new_inode_fast(struct super_block *sb, | |||
874 | */ | 852 | */ |
875 | __iget(old); | 853 | __iget(old); |
876 | spin_unlock(&old->i_lock); | 854 | spin_unlock(&old->i_lock); |
877 | spin_unlock(&inode_lock); | ||
878 | destroy_inode(inode); | 855 | destroy_inode(inode); |
879 | inode = old; | 856 | inode = old; |
880 | wait_on_inode(inode); | 857 | wait_on_inode(inode); |
@@ -924,7 +901,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved) | |||
924 | struct hlist_head *head; | 901 | struct hlist_head *head; |
925 | ino_t res; | 902 | ino_t res; |
926 | 903 | ||
927 | spin_lock(&inode_lock); | ||
928 | spin_lock(&unique_lock); | 904 | spin_lock(&unique_lock); |
929 | do { | 905 | do { |
930 | if (counter <= max_reserved) | 906 | if (counter <= max_reserved) |
@@ -933,7 +909,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved) | |||
933 | head = inode_hashtable + hash(sb, res); | 909 | head = inode_hashtable + hash(sb, res); |
934 | } while (!test_inode_iunique(sb, head, res)); | 910 | } while (!test_inode_iunique(sb, head, res)); |
935 | spin_unlock(&unique_lock); | 911 | spin_unlock(&unique_lock); |
936 | spin_unlock(&inode_lock); | ||
937 | 912 | ||
938 | return res; | 913 | return res; |
939 | } | 914 | } |
@@ -943,7 +918,6 @@ struct inode *igrab(struct inode *inode) | |||
943 | { | 918 | { |
944 | struct inode *ret = inode; | 919 | struct inode *ret = inode; |
945 | 920 | ||
946 | spin_lock(&inode_lock); | ||
947 | spin_lock(&inode->i_lock); | 921 | spin_lock(&inode->i_lock); |
948 | if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) | 922 | if (!(inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE))) |
949 | __iget(inode); | 923 | __iget(inode); |
@@ -955,7 +929,6 @@ struct inode *igrab(struct inode *inode) | |||
955 | */ | 929 | */ |
956 | ret = NULL; | 930 | ret = NULL; |
957 | spin_unlock(&inode->i_lock); | 931 | spin_unlock(&inode->i_lock); |
958 | spin_unlock(&inode_lock); | ||
959 | 932 | ||
960 | return ret; | 933 | return ret; |
961 | } | 934 | } |
@@ -986,17 +959,14 @@ static struct inode *ifind(struct super_block *sb, | |||
986 | { | 959 | { |
987 | struct inode *inode; | 960 | struct inode *inode; |
988 | 961 | ||
989 | spin_lock(&inode_lock); | ||
990 | inode = find_inode(sb, head, test, data); | 962 | inode = find_inode(sb, head, test, data); |
991 | if (inode) { | 963 | if (inode) { |
992 | __iget(inode); | 964 | __iget(inode); |
993 | spin_unlock(&inode->i_lock); | 965 | spin_unlock(&inode->i_lock); |
994 | spin_unlock(&inode_lock); | ||
995 | if (likely(wait)) | 966 | if (likely(wait)) |
996 | wait_on_inode(inode); | 967 | wait_on_inode(inode); |
997 | return inode; | 968 | return inode; |
998 | } | 969 | } |
999 | spin_unlock(&inode_lock); | ||
1000 | return NULL; | 970 | return NULL; |
1001 | } | 971 | } |
1002 | 972 | ||
@@ -1020,16 +990,13 @@ static struct inode *ifind_fast(struct super_block *sb, | |||
1020 | { | 990 | { |
1021 | struct inode *inode; | 991 | struct inode *inode; |
1022 | 992 | ||
1023 | spin_lock(&inode_lock); | ||
1024 | inode = find_inode_fast(sb, head, ino); | 993 | inode = find_inode_fast(sb, head, ino); |
1025 | if (inode) { | 994 | if (inode) { |
1026 | __iget(inode); | 995 | __iget(inode); |
1027 | spin_unlock(&inode->i_lock); | 996 | spin_unlock(&inode->i_lock); |
1028 | spin_unlock(&inode_lock); | ||
1029 | wait_on_inode(inode); | 997 | wait_on_inode(inode); |
1030 | return inode; | 998 | return inode; |
1031 | } | 999 | } |
1032 | spin_unlock(&inode_lock); | ||
1033 | return NULL; | 1000 | return NULL; |
1034 | } | 1001 | } |
1035 | 1002 | ||
@@ -1193,7 +1160,6 @@ int insert_inode_locked(struct inode *inode) | |||
1193 | struct hlist_node *node; | 1160 | struct hlist_node *node; |
1194 | struct inode *old = NULL; | 1161 | struct inode *old = NULL; |
1195 | 1162 | ||
1196 | spin_lock(&inode_lock); | ||
1197 | repeat: | 1163 | repeat: |
1198 | spin_lock(&inode_hash_lock); | 1164 | spin_lock(&inode_hash_lock); |
1199 | hlist_for_each_entry(old, node, head, i_hash) { | 1165 | hlist_for_each_entry(old, node, head, i_hash) { |
@@ -1213,13 +1179,11 @@ repeat: | |||
1213 | /* XXX: initialize inode->i_lock to locked? */ | 1179 | /* XXX: initialize inode->i_lock to locked? */ |
1214 | hlist_add_head(&inode->i_hash, head); | 1180 | hlist_add_head(&inode->i_hash, head); |
1215 | spin_unlock(&inode_hash_lock); | 1181 | spin_unlock(&inode_hash_lock); |
1216 | spin_unlock(&inode_lock); | ||
1217 | return 0; | 1182 | return 0; |
1218 | } | 1183 | } |
1219 | spin_unlock(&inode_hash_lock); | 1184 | spin_unlock(&inode_hash_lock); |
1220 | __iget(old); | 1185 | __iget(old); |
1221 | spin_unlock(&old->i_lock); | 1186 | spin_unlock(&old->i_lock); |
1222 | spin_unlock(&inode_lock); | ||
1223 | wait_on_inode(old); | 1187 | wait_on_inode(old); |
1224 | if (unlikely(!hlist_unhashed(&old->i_hash))) { | 1188 | if (unlikely(!hlist_unhashed(&old->i_hash))) { |
1225 | iput(old); | 1189 | iput(old); |
@@ -1242,7 +1206,6 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval, | |||
1242 | struct hlist_node *node; | 1206 | struct hlist_node *node; |
1243 | struct inode *old = NULL; | 1207 | struct inode *old = NULL; |
1244 | 1208 | ||
1245 | spin_lock(&inode_lock); | ||
1246 | repeat: | 1209 | repeat: |
1247 | spin_lock(&inode_hash_lock); | 1210 | spin_lock(&inode_hash_lock); |
1248 | hlist_for_each_entry(old, node, head, i_hash) { | 1211 | hlist_for_each_entry(old, node, head, i_hash) { |
@@ -1262,13 +1225,11 @@ repeat: | |||
1262 | /* XXX: initialize inode->i_lock to locked? */ | 1225 | /* XXX: initialize inode->i_lock to locked? */ |
1263 | hlist_add_head(&inode->i_hash, head); | 1226 | hlist_add_head(&inode->i_hash, head); |
1264 | spin_unlock(&inode_hash_lock); | 1227 | spin_unlock(&inode_hash_lock); |
1265 | spin_unlock(&inode_lock); | ||
1266 | return 0; | 1228 | return 0; |
1267 | } | 1229 | } |
1268 | spin_unlock(&inode_hash_lock); | 1230 | spin_unlock(&inode_hash_lock); |
1269 | __iget(old); | 1231 | __iget(old); |
1270 | spin_unlock(&old->i_lock); | 1232 | spin_unlock(&old->i_lock); |
1271 | spin_unlock(&inode_lock); | ||
1272 | wait_on_inode(old); | 1233 | wait_on_inode(old); |
1273 | if (unlikely(!hlist_unhashed(&old->i_hash))) { | 1234 | if (unlikely(!hlist_unhashed(&old->i_hash))) { |
1274 | iput(old); | 1235 | iput(old); |
@@ -1291,13 +1252,11 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval) | |||
1291 | { | 1252 | { |
1292 | struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); | 1253 | struct hlist_head *head = inode_hashtable + hash(inode->i_sb, hashval); |
1293 | 1254 | ||
1294 | spin_lock(&inode_lock); | ||
1295 | spin_lock(&inode->i_lock); | 1255 | spin_lock(&inode->i_lock); |
1296 | spin_lock(&inode_hash_lock); | 1256 | spin_lock(&inode_hash_lock); |
1297 | hlist_add_head(&inode->i_hash, head); | 1257 | hlist_add_head(&inode->i_hash, head); |
1298 | spin_unlock(&inode_hash_lock); | 1258 | spin_unlock(&inode_hash_lock); |
1299 | spin_unlock(&inode->i_lock); | 1259 | spin_unlock(&inode->i_lock); |
1300 | spin_unlock(&inode_lock); | ||
1301 | } | 1260 | } |
1302 | EXPORT_SYMBOL(__insert_inode_hash); | 1261 | EXPORT_SYMBOL(__insert_inode_hash); |
1303 | 1262 | ||
@@ -1309,13 +1268,11 @@ EXPORT_SYMBOL(__insert_inode_hash); | |||
1309 | */ | 1268 | */ |
1310 | void remove_inode_hash(struct inode *inode) | 1269 | void remove_inode_hash(struct inode *inode) |
1311 | { | 1270 | { |
1312 | spin_lock(&inode_lock); | ||
1313 | spin_lock(&inode->i_lock); | 1271 | spin_lock(&inode->i_lock); |
1314 | spin_lock(&inode_hash_lock); | 1272 | spin_lock(&inode_hash_lock); |
1315 | hlist_del_init(&inode->i_hash); | 1273 | hlist_del_init(&inode->i_hash); |
1316 | spin_unlock(&inode_hash_lock); | 1274 | spin_unlock(&inode_hash_lock); |
1317 | spin_unlock(&inode->i_lock); | 1275 | spin_unlock(&inode->i_lock); |
1318 | spin_unlock(&inode_lock); | ||
1319 | } | 1276 | } |
1320 | EXPORT_SYMBOL(remove_inode_hash); | 1277 | EXPORT_SYMBOL(remove_inode_hash); |
1321 | 1278 | ||
@@ -1343,7 +1300,6 @@ void generic_delete_inode(struct inode *inode) | |||
1343 | WARN_ON(inode->i_state & I_NEW); | 1300 | WARN_ON(inode->i_state & I_NEW); |
1344 | inode->i_state |= I_FREEING; | 1301 | inode->i_state |= I_FREEING; |
1345 | spin_unlock(&inode->i_lock); | 1302 | spin_unlock(&inode->i_lock); |
1346 | spin_unlock(&inode_lock); | ||
1347 | atomic_dec(&inodes_stat.nr_inodes); | 1303 | atomic_dec(&inodes_stat.nr_inodes); |
1348 | 1304 | ||
1349 | security_inode_delete(inode); | 1305 | security_inode_delete(inode); |
@@ -1361,13 +1317,11 @@ void generic_delete_inode(struct inode *inode) | |||
1361 | truncate_inode_pages(&inode->i_data, 0); | 1317 | truncate_inode_pages(&inode->i_data, 0); |
1362 | clear_inode(inode); | 1318 | clear_inode(inode); |
1363 | } | 1319 | } |
1364 | spin_lock(&inode_lock); | ||
1365 | spin_lock(&inode->i_lock); | 1320 | spin_lock(&inode->i_lock); |
1366 | spin_lock(&inode_hash_lock); | 1321 | spin_lock(&inode_hash_lock); |
1367 | hlist_del_init(&inode->i_hash); | 1322 | hlist_del_init(&inode->i_hash); |
1368 | spin_unlock(&inode_hash_lock); | 1323 | spin_unlock(&inode_hash_lock); |
1369 | spin_unlock(&inode->i_lock); | 1324 | spin_unlock(&inode->i_lock); |
1370 | spin_unlock(&inode_lock); | ||
1371 | wake_up_inode(inode); | 1325 | wake_up_inode(inode); |
1372 | BUG_ON(inode->i_state != I_CLEAR); | 1326 | BUG_ON(inode->i_state != I_CLEAR); |
1373 | destroy_inode(inode); | 1327 | destroy_inode(inode); |
@@ -1397,16 +1351,13 @@ int generic_detach_inode(struct inode *inode) | |||
1397 | if (sb->s_flags & MS_ACTIVE) { | 1351 | if (sb->s_flags & MS_ACTIVE) { |
1398 | spin_unlock(&inode->i_lock); | 1352 | spin_unlock(&inode->i_lock); |
1399 | spin_unlock(&sb_inode_list_lock); | 1353 | spin_unlock(&sb_inode_list_lock); |
1400 | spin_unlock(&inode_lock); | ||
1401 | return 0; | 1354 | return 0; |
1402 | } | 1355 | } |
1403 | WARN_ON(inode->i_state & I_NEW); | 1356 | WARN_ON(inode->i_state & I_NEW); |
1404 | inode->i_state |= I_WILL_FREE; | 1357 | inode->i_state |= I_WILL_FREE; |
1405 | spin_unlock(&inode->i_lock); | 1358 | spin_unlock(&inode->i_lock); |
1406 | spin_unlock(&sb_inode_list_lock); | 1359 | spin_unlock(&sb_inode_list_lock); |
1407 | spin_unlock(&inode_lock); | ||
1408 | write_inode_now(inode, 1); | 1360 | write_inode_now(inode, 1); |
1409 | spin_lock(&inode_lock); | ||
1410 | spin_lock(&sb_inode_list_lock); | 1361 | spin_lock(&sb_inode_list_lock); |
1411 | spin_lock(&inode->i_lock); | 1362 | spin_lock(&inode->i_lock); |
1412 | WARN_ON(inode->i_state & I_NEW); | 1363 | WARN_ON(inode->i_state & I_NEW); |
@@ -1424,7 +1375,6 @@ int generic_detach_inode(struct inode *inode) | |||
1424 | WARN_ON(inode->i_state & I_NEW); | 1375 | WARN_ON(inode->i_state & I_NEW); |
1425 | inode->i_state |= I_FREEING; | 1376 | inode->i_state |= I_FREEING; |
1426 | spin_unlock(&inode->i_lock); | 1377 | spin_unlock(&inode->i_lock); |
1427 | spin_unlock(&inode_lock); | ||
1428 | atomic_dec(&inodes_stat.nr_inodes); | 1378 | atomic_dec(&inodes_stat.nr_inodes); |
1429 | return 1; | 1379 | return 1; |
1430 | } | 1380 | } |
@@ -1490,17 +1440,12 @@ void iput(struct inode *inode) | |||
1490 | if (inode) { | 1440 | if (inode) { |
1491 | BUG_ON(inode->i_state == I_CLEAR); | 1441 | BUG_ON(inode->i_state == I_CLEAR); |
1492 | 1442 | ||
1493 | retry1: | 1443 | retry: |
1494 | spin_lock(&inode->i_lock); | 1444 | spin_lock(&inode->i_lock); |
1495 | if (inode->i_count == 1) { | 1445 | if (inode->i_count == 1) { |
1496 | if (!spin_trylock(&inode_lock)) { | ||
1497 | retry2: | ||
1498 | spin_unlock(&inode->i_lock); | ||
1499 | goto retry1; | ||
1500 | } | ||
1501 | if (!spin_trylock(&sb_inode_list_lock)) { | 1446 | if (!spin_trylock(&sb_inode_list_lock)) { |
1502 | spin_unlock(&inode_lock); | 1447 | spin_unlock(&inode->i_lock); |
1503 | goto retry2; | 1448 | goto retry; |
1504 | } | 1449 | } |
1505 | inode->i_count--; | 1450 | inode->i_count--; |
1506 | iput_final(inode); | 1451 | iput_final(inode); |
@@ -1698,10 +1643,8 @@ static void __wait_on_freeing_inode(struct inode *inode) | |||
1698 | wq = bit_waitqueue(&inode->i_state, __I_NEW); | 1643 | wq = bit_waitqueue(&inode->i_state, __I_NEW); |
1699 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | 1644 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); |
1700 | spin_unlock(&inode->i_lock); | 1645 | spin_unlock(&inode->i_lock); |
1701 | spin_unlock(&inode_lock); | ||
1702 | schedule(); | 1646 | schedule(); |
1703 | finish_wait(wq, &wait.wait); | 1647 | finish_wait(wq, &wait.wait); |
1704 | spin_lock(&inode_lock); | ||
1705 | } | 1648 | } |
1706 | 1649 | ||
1707 | static __initdata unsigned long ihash_entries; | 1650 | static __initdata unsigned long ihash_entries; |
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index 869f94bb040f..81b5bbb3a7ee 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c | |||
@@ -369,13 +369,16 @@ void fsnotify_unmount_inodes(struct list_head *list) | |||
369 | list_for_each_entry_safe(inode, next_i, list, i_sb_list) { | 369 | list_for_each_entry_safe(inode, next_i, list, i_sb_list) { |
370 | struct inode *need_iput_tmp; | 370 | struct inode *need_iput_tmp; |
371 | 371 | ||
372 | spin_lock(&inode->i_lock); | ||
372 | /* | 373 | /* |
373 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, | 374 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, |
374 | * I_WILL_FREE, or I_NEW which is fine because by that point | 375 | * I_WILL_FREE, or I_NEW which is fine because by that point |
375 | * the inode cannot have any associated watches. | 376 | * the inode cannot have any associated watches. |
376 | */ | 377 | */ |
377 | if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) | 378 | if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) { |
379 | spin_unlock(&inode->i_lock); | ||
378 | continue; | 380 | continue; |
381 | } | ||
379 | 382 | ||
380 | /* | 383 | /* |
381 | * If i_count is zero, the inode cannot have any watches and | 384 | * If i_count is zero, the inode cannot have any watches and |
@@ -383,19 +386,20 @@ void fsnotify_unmount_inodes(struct list_head *list) | |||
383 | * evict all inodes with zero i_count from icache which is | 386 | * evict all inodes with zero i_count from icache which is |
384 | * unnecessarily violent and may in fact be illegal to do. | 387 | * unnecessarily violent and may in fact be illegal to do. |
385 | */ | 388 | */ |
386 | if (!inode->i_count) | 389 | if (!inode->i_count) { |
390 | spin_unlock(&inode->i_lock); | ||
387 | continue; | 391 | continue; |
392 | } | ||
388 | 393 | ||
389 | need_iput_tmp = need_iput; | 394 | need_iput_tmp = need_iput; |
390 | need_iput = NULL; | 395 | need_iput = NULL; |
391 | 396 | ||
392 | /* In case fsnotify_inode_delete() drops a reference. */ | 397 | /* In case fsnotify_inode_delete() drops a reference. */ |
393 | if (inode != need_iput_tmp) { | 398 | if (inode != need_iput_tmp) { |
394 | spin_lock(&inode->i_lock); | ||
395 | __iget(inode); | 399 | __iget(inode); |
396 | spin_unlock(&inode->i_lock); | ||
397 | } else | 400 | } else |
398 | need_iput_tmp = NULL; | 401 | need_iput_tmp = NULL; |
402 | spin_unlock(&inode->i_lock); | ||
399 | 403 | ||
400 | /* In case the dropping of a reference would nuke next_i. */ | 404 | /* In case the dropping of a reference would nuke next_i. */ |
401 | if (&next_i->i_sb_list != list) { | 405 | if (&next_i->i_sb_list != list) { |
@@ -416,7 +420,6 @@ void fsnotify_unmount_inodes(struct list_head *list) | |||
416 | * iprune_mutex keeps shrink_icache_memory() away. | 420 | * iprune_mutex keeps shrink_icache_memory() away. |
417 | */ | 421 | */ |
418 | spin_unlock(&sb_inode_list_lock); | 422 | spin_unlock(&sb_inode_list_lock); |
419 | spin_unlock(&inode_lock); | ||
420 | 423 | ||
421 | if (need_iput_tmp) | 424 | if (need_iput_tmp) |
422 | iput(need_iput_tmp); | 425 | iput(need_iput_tmp); |
@@ -428,7 +431,6 @@ void fsnotify_unmount_inodes(struct list_head *list) | |||
428 | 431 | ||
429 | iput(inode); | 432 | iput(inode); |
430 | 433 | ||
431 | spin_lock(&inode_lock); | ||
432 | spin_lock(&sb_inode_list_lock); | 434 | spin_lock(&sb_inode_list_lock); |
433 | } | 435 | } |
434 | } | 436 | } |
diff --git a/fs/notify/inotify/inotify.c b/fs/notify/inotify/inotify.c index 44e90ef0b0a3..7846758db7da 100644 --- a/fs/notify/inotify/inotify.c +++ b/fs/notify/inotify/inotify.c | |||
@@ -394,13 +394,16 @@ void inotify_unmount_inodes(struct list_head *list) | |||
394 | struct inode *need_iput_tmp; | 394 | struct inode *need_iput_tmp; |
395 | struct list_head *watches; | 395 | struct list_head *watches; |
396 | 396 | ||
397 | spin_lock(&inode->i_lock); | ||
397 | /* | 398 | /* |
398 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, | 399 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, |
399 | * I_WILL_FREE, or I_NEW which is fine because by that point | 400 | * I_WILL_FREE, or I_NEW which is fine because by that point |
400 | * the inode cannot have any associated watches. | 401 | * the inode cannot have any associated watches. |
401 | */ | 402 | */ |
402 | if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) | 403 | if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) { |
404 | spin_unlock(&inode->i_lock); | ||
403 | continue; | 405 | continue; |
406 | } | ||
404 | 407 | ||
405 | /* | 408 | /* |
406 | * If i_count is zero, the inode cannot have any watches and | 409 | * If i_count is zero, the inode cannot have any watches and |
@@ -408,18 +411,21 @@ void inotify_unmount_inodes(struct list_head *list) | |||
408 | * evict all inodes with zero i_count from icache which is | 411 | * evict all inodes with zero i_count from icache which is |
409 | * unnecessarily violent and may in fact be illegal to do. | 412 | * unnecessarily violent and may in fact be illegal to do. |
410 | */ | 413 | */ |
411 | if (!inode->i_count) | 414 | if (!inode->i_count) { |
415 | spin_unlock(&inode->i_lock); | ||
412 | continue; | 416 | continue; |
417 | } | ||
413 | 418 | ||
414 | need_iput_tmp = need_iput; | 419 | need_iput_tmp = need_iput; |
415 | need_iput = NULL; | 420 | need_iput = NULL; |
416 | /* In case inotify_remove_watch_locked() drops a reference. */ | 421 | /* In case inotify_remove_watch_locked() drops a reference. */ |
417 | if (inode != need_iput_tmp) { | 422 | if (inode != need_iput_tmp) { |
418 | spin_lock(&inode->i_lock); | ||
419 | __iget(inode); | 423 | __iget(inode); |
420 | spin_unlock(&inode->i_lock); | ||
421 | } else | 424 | } else |
422 | need_iput_tmp = NULL; | 425 | need_iput_tmp = NULL; |
426 | |||
427 | spin_unlock(&inode->i_lock); | ||
428 | |||
423 | /* In case the dropping of a reference would nuke next_i. */ | 429 | /* In case the dropping of a reference would nuke next_i. */ |
424 | if (&next_i->i_sb_list != list) { | 430 | if (&next_i->i_sb_list != list) { |
425 | spin_lock(&next_i->i_lock); | 431 | spin_lock(&next_i->i_lock); |
@@ -439,7 +445,6 @@ void inotify_unmount_inodes(struct list_head *list) | |||
439 | * iprune_mutex keeps shrink_icache_memory() away. | 445 | * iprune_mutex keeps shrink_icache_memory() away. |
440 | */ | 446 | */ |
441 | spin_unlock(&sb_inode_list_lock); | 447 | spin_unlock(&sb_inode_list_lock); |
442 | spin_unlock(&inode_lock); | ||
443 | 448 | ||
444 | if (need_iput_tmp) | 449 | if (need_iput_tmp) |
445 | iput(need_iput_tmp); | 450 | iput(need_iput_tmp); |
@@ -459,7 +464,6 @@ void inotify_unmount_inodes(struct list_head *list) | |||
459 | mutex_unlock(&inode->inotify_mutex); | 464 | mutex_unlock(&inode->inotify_mutex); |
460 | iput(inode); | 465 | iput(inode); |
461 | 466 | ||
462 | spin_lock(&inode_lock); | ||
463 | spin_lock(&sb_inode_list_lock); | 467 | spin_lock(&sb_inode_list_lock); |
464 | } | 468 | } |
465 | } | 469 | } |
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 3cbb87192d7a..5305c71ccea5 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c | |||
@@ -844,7 +844,6 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
844 | struct inode *inode, *old_inode = NULL; | 844 | struct inode *inode, *old_inode = NULL; |
845 | int reserved = 0; | 845 | int reserved = 0; |
846 | 846 | ||
847 | spin_lock(&inode_lock); | ||
848 | spin_lock(&sb_inode_list_lock); | 847 | spin_lock(&sb_inode_list_lock); |
849 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 848 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
850 | spin_lock(&inode->i_lock); | 849 | spin_lock(&inode->i_lock); |
@@ -869,7 +868,6 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
869 | __iget(inode); | 868 | __iget(inode); |
870 | spin_unlock(&inode->i_lock); | 869 | spin_unlock(&inode->i_lock); |
871 | spin_unlock(&sb_inode_list_lock); | 870 | spin_unlock(&sb_inode_list_lock); |
872 | spin_unlock(&inode_lock); | ||
873 | 871 | ||
874 | iput(old_inode); | 872 | iput(old_inode); |
875 | sb->dq_op->initialize(inode, type); | 873 | sb->dq_op->initialize(inode, type); |
@@ -879,11 +877,9 @@ static void add_dquot_ref(struct super_block *sb, int type) | |||
879 | * reference and we cannot iput it under inode_lock. So we | 877 | * reference and we cannot iput it under inode_lock. So we |
880 | * keep the reference and iput it later. */ | 878 | * keep the reference and iput it later. */ |
881 | old_inode = inode; | 879 | old_inode = inode; |
882 | spin_lock(&inode_lock); | ||
883 | spin_lock(&sb_inode_list_lock); | 880 | spin_lock(&sb_inode_list_lock); |
884 | } | 881 | } |
885 | spin_unlock(&sb_inode_list_lock); | 882 | spin_unlock(&sb_inode_list_lock); |
886 | spin_unlock(&inode_lock); | ||
887 | iput(old_inode); | 883 | iput(old_inode); |
888 | 884 | ||
889 | if (reserved) { | 885 | if (reserved) { |
@@ -959,7 +955,6 @@ static void remove_dquot_ref(struct super_block *sb, int type, | |||
959 | { | 955 | { |
960 | struct inode *inode; | 956 | struct inode *inode; |
961 | 957 | ||
962 | spin_lock(&inode_lock); | ||
963 | spin_lock(&sb_inode_list_lock); | 958 | spin_lock(&sb_inode_list_lock); |
964 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 959 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
965 | /* | 960 | /* |
@@ -972,7 +967,6 @@ static void remove_dquot_ref(struct super_block *sb, int type, | |||
972 | remove_inode_dquot_ref(inode, type, tofree_head); | 967 | remove_inode_dquot_ref(inode, type, tofree_head); |
973 | } | 968 | } |
974 | spin_unlock(&sb_inode_list_lock); | 969 | spin_unlock(&sb_inode_list_lock); |
975 | spin_unlock(&inode_lock); | ||
976 | } | 970 | } |
977 | 971 | ||
978 | /* Gather all references from inodes and drop them */ | 972 | /* Gather all references from inodes and drop them */ |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 90ad0abb935f..3e504f49753c 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | struct backing_dev_info; | 10 | struct backing_dev_info; |
11 | 11 | ||
12 | extern spinlock_t inode_lock; | ||
13 | extern spinlock_t sb_inode_list_lock; | 12 | extern spinlock_t sb_inode_list_lock; |
14 | extern spinlock_t wb_inode_list_lock; | 13 | extern spinlock_t wb_inode_list_lock; |
15 | extern spinlock_t inode_hash_lock; | 14 | extern spinlock_t inode_hash_lock; |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ea2a4e42cab1..8a0d9aa7b207 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -71,7 +71,6 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
71 | * RCU on the reader side | 71 | * RCU on the reader side |
72 | */ | 72 | */ |
73 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; | 73 | nr_wb = nr_dirty = nr_io = nr_more_io = 0; |
74 | spin_lock(&inode_lock); | ||
75 | spin_lock(&wb_inode_list_lock); | 74 | spin_lock(&wb_inode_list_lock); |
76 | list_for_each_entry(wb, &bdi->wb_list, list) { | 75 | list_for_each_entry(wb, &bdi->wb_list, list) { |
77 | nr_wb++; | 76 | nr_wb++; |
@@ -83,7 +82,6 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
83 | nr_more_io++; | 82 | nr_more_io++; |
84 | } | 83 | } |
85 | spin_unlock(&wb_inode_list_lock); | 84 | spin_unlock(&wb_inode_list_lock); |
86 | spin_unlock(&inode_lock); | ||
87 | 85 | ||
88 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); | 86 | get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); |
89 | 87 | ||
@@ -698,13 +696,11 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
698 | if (bdi_has_dirty_io(bdi)) { | 696 | if (bdi_has_dirty_io(bdi)) { |
699 | struct bdi_writeback *dst = &default_backing_dev_info.wb; | 697 | struct bdi_writeback *dst = &default_backing_dev_info.wb; |
700 | 698 | ||
701 | spin_lock(&inode_lock); | ||
702 | spin_lock(&wb_inode_list_lock); | 699 | spin_lock(&wb_inode_list_lock); |
703 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); | 700 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); |
704 | list_splice(&bdi->wb.b_io, &dst->b_io); | 701 | list_splice(&bdi->wb.b_io, &dst->b_io); |
705 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); | 702 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); |
706 | spin_unlock(&wb_inode_list_lock); | 703 | spin_unlock(&wb_inode_list_lock); |
707 | spin_unlock(&inode_lock); | ||
708 | } | 704 | } |
709 | 705 | ||
710 | bdi_unregister(bdi); | 706 | bdi_unregister(bdi); |