aboutsummaryrefslogtreecommitdiffstats
path: root/fs/file_table.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/file_table.c')
-rw-r--r--fs/file_table.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/fs/file_table.c b/fs/file_table.c
index 485dc0eddd67..b44e4c559786 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -227,7 +227,7 @@ static void __fput(struct file *file)
227{ 227{
228 struct dentry *dentry = file->f_path.dentry; 228 struct dentry *dentry = file->f_path.dentry;
229 struct vfsmount *mnt = file->f_path.mnt; 229 struct vfsmount *mnt = file->f_path.mnt;
230 struct inode *inode = dentry->d_inode; 230 struct inode *inode = file->f_inode;
231 231
232 might_sleep(); 232 might_sleep();
233 233
@@ -265,18 +265,15 @@ static void __fput(struct file *file)
265 mntput(mnt); 265 mntput(mnt);
266} 266}
267 267
268static DEFINE_SPINLOCK(delayed_fput_lock); 268static LLIST_HEAD(delayed_fput_list);
269static LIST_HEAD(delayed_fput_list);
270static void delayed_fput(struct work_struct *unused) 269static void delayed_fput(struct work_struct *unused)
271{ 270{
272 LIST_HEAD(head); 271 struct llist_node *node = llist_del_all(&delayed_fput_list);
273 spin_lock_irq(&delayed_fput_lock); 272 struct llist_node *next;
274 list_splice_init(&delayed_fput_list, &head); 273
275 spin_unlock_irq(&delayed_fput_lock); 274 for (; node; node = next) {
276 while (!list_empty(&head)) { 275 next = llist_next(node);
277 struct file *f = list_first_entry(&head, struct file, f_u.fu_list); 276 __fput(llist_entry(node, struct file, f_u.fu_llist));
278 list_del_init(&f->f_u.fu_list);
279 __fput(f);
280 } 277 }
281} 278}
282 279
@@ -306,18 +303,22 @@ void fput(struct file *file)
306{ 303{
307 if (atomic_long_dec_and_test(&file->f_count)) { 304 if (atomic_long_dec_and_test(&file->f_count)) {
308 struct task_struct *task = current; 305 struct task_struct *task = current;
309 unsigned long flags;
310 306
311 file_sb_list_del(file); 307 file_sb_list_del(file);
312 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) { 308 if (likely(!in_interrupt() && !(task->flags & PF_KTHREAD))) {
313 init_task_work(&file->f_u.fu_rcuhead, ____fput); 309 init_task_work(&file->f_u.fu_rcuhead, ____fput);
314 if (!task_work_add(task, &file->f_u.fu_rcuhead, true)) 310 if (!task_work_add(task, &file->f_u.fu_rcuhead, true))
315 return; 311 return;
312 /*
313 * After this task has run exit_task_work(),
314 * task_work_add() will fail. free_ipc_ns()->
315 * shm_destroy() can do this. Fall through to delayed
316 * fput to avoid leaking *file.
317 */
316 } 318 }
317 spin_lock_irqsave(&delayed_fput_lock, flags); 319
318 list_add(&file->f_u.fu_list, &delayed_fput_list); 320 if (llist_add(&file->f_u.fu_llist, &delayed_fput_list))
319 schedule_work(&delayed_fput_work); 321 schedule_work(&delayed_fput_work);
320 spin_unlock_irqrestore(&delayed_fput_lock, flags);
321 } 322 }
322} 323}
323 324