diff options
Diffstat (limited to 'fs/inode.c')
-rw-r--r-- | fs/inode.c | 43 |
1 files changed, 22 insertions, 21 deletions
diff --git a/fs/inode.c b/fs/inode.c index 6d695037a0a3..96364fae0844 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/cdev.h> | 22 | #include <linux/cdev.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/inotify.h> | ||
24 | 25 | ||
25 | /* | 26 | /* |
26 | * This is needed for the following functions: | 27 | * This is needed for the following functions: |
@@ -202,6 +203,10 @@ void inode_init_once(struct inode *inode) | |||
202 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); | 203 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); |
203 | spin_lock_init(&inode->i_lock); | 204 | spin_lock_init(&inode->i_lock); |
204 | i_size_ordered_init(inode); | 205 | i_size_ordered_init(inode); |
206 | #ifdef CONFIG_INOTIFY | ||
207 | INIT_LIST_HEAD(&inode->inotify_watches); | ||
208 | sema_init(&inode->inotify_sem, 1); | ||
209 | #endif | ||
205 | } | 210 | } |
206 | 211 | ||
207 | EXPORT_SYMBOL(inode_init_once); | 212 | EXPORT_SYMBOL(inode_init_once); |
@@ -282,6 +287,13 @@ static void dispose_list(struct list_head *head) | |||
282 | if (inode->i_data.nrpages) | 287 | if (inode->i_data.nrpages) |
283 | truncate_inode_pages(&inode->i_data, 0); | 288 | truncate_inode_pages(&inode->i_data, 0); |
284 | clear_inode(inode); | 289 | clear_inode(inode); |
290 | |||
291 | spin_lock(&inode_lock); | ||
292 | hlist_del_init(&inode->i_hash); | ||
293 | list_del_init(&inode->i_sb_list); | ||
294 | spin_unlock(&inode_lock); | ||
295 | |||
296 | wake_up_inode(inode); | ||
285 | destroy_inode(inode); | 297 | destroy_inode(inode); |
286 | nr_disposed++; | 298 | nr_disposed++; |
287 | } | 299 | } |
@@ -317,8 +329,6 @@ static int invalidate_list(struct list_head *head, struct list_head *dispose) | |||
317 | inode = list_entry(tmp, struct inode, i_sb_list); | 329 | inode = list_entry(tmp, struct inode, i_sb_list); |
318 | invalidate_inode_buffers(inode); | 330 | invalidate_inode_buffers(inode); |
319 | if (!atomic_read(&inode->i_count)) { | 331 | if (!atomic_read(&inode->i_count)) { |
320 | hlist_del_init(&inode->i_hash); | ||
321 | list_del(&inode->i_sb_list); | ||
322 | list_move(&inode->i_list, dispose); | 332 | list_move(&inode->i_list, dispose); |
323 | inode->i_state |= I_FREEING; | 333 | inode->i_state |= I_FREEING; |
324 | count++; | 334 | count++; |
@@ -346,6 +356,7 @@ int invalidate_inodes(struct super_block * sb) | |||
346 | 356 | ||
347 | down(&iprune_sem); | 357 | down(&iprune_sem); |
348 | spin_lock(&inode_lock); | 358 | spin_lock(&inode_lock); |
359 | inotify_unmount_inodes(&sb->s_inodes); | ||
349 | busy = invalidate_list(&sb->s_inodes, &throw_away); | 360 | busy = invalidate_list(&sb->s_inodes, &throw_away); |
350 | spin_unlock(&inode_lock); | 361 | spin_unlock(&inode_lock); |
351 | 362 | ||
@@ -439,8 +450,6 @@ static void prune_icache(int nr_to_scan) | |||
439 | if (!can_unuse(inode)) | 450 | if (!can_unuse(inode)) |
440 | continue; | 451 | continue; |
441 | } | 452 | } |
442 | hlist_del_init(&inode->i_hash); | ||
443 | list_del_init(&inode->i_sb_list); | ||
444 | list_move(&inode->i_list, &freeable); | 453 | list_move(&inode->i_list, &freeable); |
445 | inode->i_state |= I_FREEING; | 454 | inode->i_state |= I_FREEING; |
446 | nr_pruned++; | 455 | nr_pruned++; |
@@ -1244,29 +1253,21 @@ int inode_wait(void *word) | |||
1244 | } | 1253 | } |
1245 | 1254 | ||
1246 | /* | 1255 | /* |
1247 | * If we try to find an inode in the inode hash while it is being deleted, we | 1256 | * If we try to find an inode in the inode hash while it is being |
1248 | * have to wait until the filesystem completes its deletion before reporting | 1257 | * deleted, we have to wait until the filesystem completes its |
1249 | * that it isn't found. This is because iget will immediately call | 1258 | * deletion before reporting that it isn't found. This function waits |
1250 | * ->read_inode, and we want to be sure that evidence of the deletion is found | 1259 | * until the deletion _might_ have completed. Callers are responsible |
1251 | * by ->read_inode. | 1260 | * to recheck inode state. |
1261 | * | ||
1262 | * It doesn't matter if I_LOCK is not set initially, a call to | ||
1263 | * wake_up_inode() after removing from the hash list will DTRT. | ||
1264 | * | ||
1252 | * This is called with inode_lock held. | 1265 | * This is called with inode_lock held. |
1253 | */ | 1266 | */ |
1254 | static void __wait_on_freeing_inode(struct inode *inode) | 1267 | static void __wait_on_freeing_inode(struct inode *inode) |
1255 | { | 1268 | { |
1256 | wait_queue_head_t *wq; | 1269 | wait_queue_head_t *wq; |
1257 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); | 1270 | DEFINE_WAIT_BIT(wait, &inode->i_state, __I_LOCK); |
1258 | |||
1259 | /* | ||
1260 | * I_FREEING and I_CLEAR are cleared in process context under | ||
1261 | * inode_lock, so we have to give the tasks who would clear them | ||
1262 | * a chance to run and acquire inode_lock. | ||
1263 | */ | ||
1264 | if (!(inode->i_state & I_LOCK)) { | ||
1265 | spin_unlock(&inode_lock); | ||
1266 | yield(); | ||
1267 | spin_lock(&inode_lock); | ||
1268 | return; | ||
1269 | } | ||
1270 | wq = bit_waitqueue(&inode->i_state, __I_LOCK); | 1271 | wq = bit_waitqueue(&inode->i_state, __I_LOCK); |
1271 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); | 1272 | prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); |
1272 | spin_unlock(&inode_lock); | 1273 | spin_unlock(&inode_lock); |