diff options
99 files changed, 784 insertions, 553 deletions
diff --git a/Documentation/filesystems/caching/backend-api.txt b/Documentation/filesystems/caching/backend-api.txt index 277d1e810670..c0bd5677271b 100644 --- a/Documentation/filesystems/caching/backend-api.txt +++ b/Documentation/filesystems/caching/backend-api.txt | |||
@@ -676,6 +676,29 @@ FS-Cache provides some utilities that a cache backend may make use of: | |||
676 | as possible. | 676 | as possible. |
677 | 677 | ||
678 | 678 | ||
679 | (*) Indicate that a stale object was found and discarded: | ||
680 | |||
681 | void fscache_object_retrying_stale(struct fscache_object *object); | ||
682 | |||
683 | This is called to indicate that the lookup procedure found an object in | ||
684 | the cache that the netfs decided was stale. The object has been | ||
685 | discarded from the cache and the lookup will be performed again. | ||
686 | |||
687 | |||
688 | (*) Indicate that the caching backend killed an object: | ||
689 | |||
690 | void fscache_object_mark_killed(struct fscache_object *object, | ||
691 | enum fscache_why_object_killed why); | ||
692 | |||
693 | This is called to indicate that the cache backend preemptively killed an | ||
694 | object. The why parameter should be set to indicate the reason: | ||
695 | |||
696 | FSCACHE_OBJECT_IS_STALE - the object was stale and needs discarding. | ||
697 | FSCACHE_OBJECT_NO_SPACE - there was insufficient cache space | ||
698 | FSCACHE_OBJECT_WAS_RETIRED - the object was retired when relinquished. | ||
699 | FSCACHE_OBJECT_WAS_CULLED - the object was culled to make space. | ||
700 | |||
701 | |||
679 | (*) Get and release references on a retrieval record: | 702 | (*) Get and release references on a retrieval record: |
680 | 703 | ||
681 | void fscache_get_retrieval(struct fscache_retrieval *op); | 704 | void fscache_get_retrieval(struct fscache_retrieval *op); |
diff --git a/Documentation/filesystems/caching/fscache.txt b/Documentation/filesystems/caching/fscache.txt index 770267af5b3e..50f0a5757f48 100644 --- a/Documentation/filesystems/caching/fscache.txt +++ b/Documentation/filesystems/caching/fscache.txt | |||
@@ -284,8 +284,9 @@ proc files. | |||
284 | enq=N Number of times async ops queued for processing | 284 | enq=N Number of times async ops queued for processing |
285 | can=N Number of async ops cancelled | 285 | can=N Number of async ops cancelled |
286 | rej=N Number of async ops rejected due to object lookup/create failure | 286 | rej=N Number of async ops rejected due to object lookup/create failure |
287 | ini=N Number of async ops initialised | ||
287 | dfr=N Number of async ops queued for deferred release | 288 | dfr=N Number of async ops queued for deferred release |
288 | rel=N Number of async ops released | 289 | rel=N Number of async ops released (should equal ini=N when idle) |
289 | gc=N Number of deferred-release async ops garbage collected | 290 | gc=N Number of deferred-release async ops garbage collected |
290 | CacheOp alo=N Number of in-progress alloc_object() cache ops | 291 | CacheOp alo=N Number of in-progress alloc_object() cache ops |
291 | luo=N Number of in-progress lookup_object() cache ops | 292 | luo=N Number of in-progress lookup_object() cache ops |
@@ -303,6 +304,10 @@ proc files. | |||
303 | wrp=N Number of in-progress write_page() cache ops | 304 | wrp=N Number of in-progress write_page() cache ops |
304 | ucp=N Number of in-progress uncache_page() cache ops | 305 | ucp=N Number of in-progress uncache_page() cache ops |
305 | dsp=N Number of in-progress dissociate_pages() cache ops | 306 | dsp=N Number of in-progress dissociate_pages() cache ops |
307 | CacheEv nsp=N Number of object lookups/creations rejected due to lack of space | ||
308 | stl=N Number of stale objects deleted | ||
309 | rtr=N Number of objects retired when relinquished | ||
310 | cul=N Number of objects culled | ||
306 | 311 | ||
307 | 312 | ||
308 | (*) /proc/fs/fscache/histogram | 313 | (*) /proc/fs/fscache/histogram |
diff --git a/Documentation/filesystems/dax.txt b/Documentation/filesystems/dax.txt index baf41118660d..7af2851d667c 100644 --- a/Documentation/filesystems/dax.txt +++ b/Documentation/filesystems/dax.txt | |||
@@ -18,8 +18,10 @@ Usage | |||
18 | ----- | 18 | ----- |
19 | 19 | ||
20 | If you have a block device which supports DAX, you can make a filesystem | 20 | If you have a block device which supports DAX, you can make a filesystem |
21 | on it as usual. When mounting it, use the -o dax option manually | 21 | on it as usual. The DAX code currently only supports files with a block |
22 | or add 'dax' to the options in /etc/fstab. | 22 | size equal to your kernel's PAGE_SIZE, so you may need to specify a block |
23 | size when creating the filesystem. When mounting it, use the "-o dax" | ||
24 | option on the command line or add 'dax' to the options in /etc/fstab. | ||
23 | 25 | ||
24 | 26 | ||
25 | Implementation Tips for Block Driver Writers | 27 | Implementation Tips for Block Driver Writers |
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting index 68f1c9106573..f24d1b833957 100644 --- a/Documentation/filesystems/porting +++ b/Documentation/filesystems/porting | |||
@@ -500,3 +500,7 @@ in your dentry operations instead. | |||
500 | dentry, it does not get nameidata at all and it gets called only when cookie | 500 | dentry, it does not get nameidata at all and it gets called only when cookie |
501 | is non-NULL. Note that link body isn't available anymore, so if you need it, | 501 | is non-NULL. Note that link body isn't available anymore, so if you need it, |
502 | store it as cookie. | 502 | store it as cookie. |
503 | -- | ||
504 | [mandatory] | ||
505 | __fd_install() & fd_install() can now sleep. Callers should not | ||
506 | hold a spinlock or other resources that do not allow a schedule. | ||
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index e0cf99893212..807f7d61d7a7 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c | |||
@@ -71,15 +71,12 @@ static void print_task_path_n_nm(struct task_struct *tsk, char *buf) | |||
71 | mmput(mm); | 71 | mmput(mm); |
72 | 72 | ||
73 | if (exe_file) { | 73 | if (exe_file) { |
74 | path = exe_file->f_path; | 74 | path_nm = file_path(exe_file, buf, 255); |
75 | path_get(&exe_file->f_path); | ||
76 | fput(exe_file); | 75 | fput(exe_file); |
77 | path_nm = d_path(&path, buf, 255); | ||
78 | path_put(&path); | ||
79 | } | 76 | } |
80 | 77 | ||
81 | done: | 78 | done: |
82 | pr_info("Path: %s\n", path_nm); | 79 | pr_info("Path: %s\n", !IS_ERR(path_nm) ? path_nm : "?"); |
83 | } | 80 | } |
84 | 81 | ||
85 | static void show_faulting_vma(unsigned long address, char *buf) | 82 | static void show_faulting_vma(unsigned long address, char *buf) |
@@ -103,8 +100,7 @@ static void show_faulting_vma(unsigned long address, char *buf) | |||
103 | if (vma && (vma->vm_start <= address)) { | 100 | if (vma && (vma->vm_start <= address)) { |
104 | struct file *file = vma->vm_file; | 101 | struct file *file = vma->vm_file; |
105 | if (file) { | 102 | if (file) { |
106 | struct path *path = &file->f_path; | 103 | nm = file_path(file, buf, PAGE_SIZE - 1); |
107 | nm = d_path(path, buf, PAGE_SIZE - 1); | ||
108 | inode = file_inode(vma->vm_file); | 104 | inode = file_inode(vma->vm_file); |
109 | dev = inode->i_sb->s_dev; | 105 | dev = inode->i_sb->s_dev; |
110 | ino = inode->i_ino; | 106 | ino = inode->i_ino; |
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c index c36efa0c7163..719dd796c12c 100644 --- a/arch/blackfin/kernel/trace.c +++ b/arch/blackfin/kernel/trace.c | |||
@@ -136,7 +136,7 @@ void decode_address(char *buf, unsigned long address) | |||
136 | struct file *file = vma->vm_file; | 136 | struct file *file = vma->vm_file; |
137 | 137 | ||
138 | if (file) { | 138 | if (file) { |
139 | char *d_name = d_path(&file->f_path, _tmpbuf, | 139 | char *d_name = file_path(file, _tmpbuf, |
140 | sizeof(_tmpbuf)); | 140 | sizeof(_tmpbuf)); |
141 | if (!IS_ERR(d_name)) | 141 | if (!IS_ERR(d_name)) |
142 | name = d_name; | 142 | name = d_name; |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 1ba6307be4db..11634fa7ab3c 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -166,7 +166,7 @@ static void spufs_prune_dir(struct dentry *dir) | |||
166 | mutex_lock(&d_inode(dir)->i_mutex); | 166 | mutex_lock(&d_inode(dir)->i_mutex); |
167 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { | 167 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { |
168 | spin_lock(&dentry->d_lock); | 168 | spin_lock(&dentry->d_lock); |
169 | if (!(d_unhashed(dentry)) && d_really_is_positive(dentry)) { | 169 | if (simple_positive(dentry)) { |
170 | dget_dlock(dentry); | 170 | dget_dlock(dentry); |
171 | __d_drop(dentry); | 171 | __d_drop(dentry); |
172 | spin_unlock(&dentry->d_lock); | 172 | spin_unlock(&dentry->d_lock); |
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c index 2eeb0a0f506d..b2e5902bd8f4 100644 --- a/arch/s390/hypfs/inode.c +++ b/arch/s390/hypfs/inode.c | |||
@@ -62,18 +62,13 @@ static void hypfs_add_dentry(struct dentry *dentry) | |||
62 | hypfs_last_dentry = dentry; | 62 | hypfs_last_dentry = dentry; |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline int hypfs_positive(struct dentry *dentry) | ||
66 | { | ||
67 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
68 | } | ||
69 | |||
70 | static void hypfs_remove(struct dentry *dentry) | 65 | static void hypfs_remove(struct dentry *dentry) |
71 | { | 66 | { |
72 | struct dentry *parent; | 67 | struct dentry *parent; |
73 | 68 | ||
74 | parent = dentry->d_parent; | 69 | parent = dentry->d_parent; |
75 | mutex_lock(&d_inode(parent)->i_mutex); | 70 | mutex_lock(&d_inode(parent)->i_mutex); |
76 | if (hypfs_positive(dentry)) { | 71 | if (simple_positive(dentry)) { |
77 | if (d_is_dir(dentry)) | 72 | if (d_is_dir(dentry)) |
78 | simple_rmdir(d_inode(parent), dentry); | 73 | simple_rmdir(d_inode(parent), dentry); |
79 | else | 74 | else |
diff --git a/arch/tile/kernel/stack.c b/arch/tile/kernel/stack.c index 35d34635e4f1..402b9c85a894 100644 --- a/arch/tile/kernel/stack.c +++ b/arch/tile/kernel/stack.c | |||
@@ -332,7 +332,7 @@ static void describe_addr(struct KBacktraceIterator *kbt, | |||
332 | } | 332 | } |
333 | 333 | ||
334 | if (vma->vm_file) { | 334 | if (vma->vm_file) { |
335 | p = d_path(&vma->vm_file->f_path, buf, bufsize); | 335 | p = file_path(vma->vm_file, buf, bufsize); |
336 | if (IS_ERR(p)) | 336 | if (IS_ERR(p)) |
337 | p = "?"; | 337 | p = "?"; |
338 | name = kbasename(p); | 338 | name = kbasename(p); |
diff --git a/arch/tile/mm/elf.c b/arch/tile/mm/elf.c index f7ddae3725a4..6225cc998db1 100644 --- a/arch/tile/mm/elf.c +++ b/arch/tile/mm/elf.c | |||
@@ -56,7 +56,7 @@ static int notify_exec(struct mm_struct *mm) | |||
56 | if (exe_file == NULL) | 56 | if (exe_file == NULL) |
57 | goto done_free; | 57 | goto done_free; |
58 | 58 | ||
59 | path = d_path(&exe_file->f_path, buf, PAGE_SIZE); | 59 | path = file_path(exe_file, buf, PAGE_SIZE); |
60 | if (IS_ERR(path)) | 60 | if (IS_ERR(path)) |
61 | goto done_put; | 61 | goto done_put; |
62 | 62 | ||
diff --git a/drivers/block/drbd/drbd_debugfs.c b/drivers/block/drbd/drbd_debugfs.c index a6ee3d750c30..6b88a35fb048 100644 --- a/drivers/block/drbd/drbd_debugfs.c +++ b/drivers/block/drbd/drbd_debugfs.c | |||
@@ -419,14 +419,6 @@ static int in_flight_summary_show(struct seq_file *m, void *pos) | |||
419 | return 0; | 419 | return 0; |
420 | } | 420 | } |
421 | 421 | ||
422 | /* simple_positive(file->f_path.dentry) respectively debugfs_positive(), | ||
423 | * but neither is "reachable" from here. | ||
424 | * So we have our own inline version of it above. :-( */ | ||
425 | static inline int debugfs_positive(struct dentry *dentry) | ||
426 | { | ||
427 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
428 | } | ||
429 | |||
430 | /* make sure at *open* time that the respective object won't go away. */ | 422 | /* make sure at *open* time that the respective object won't go away. */ |
431 | static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *), | 423 | static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, void *), |
432 | void *data, struct kref *kref, | 424 | void *data, struct kref *kref, |
@@ -444,7 +436,7 @@ static int drbd_single_open(struct file *file, int (*show)(struct seq_file *, vo | |||
444 | /* serialize with d_delete() */ | 436 | /* serialize with d_delete() */ |
445 | mutex_lock(&d_inode(parent)->i_mutex); | 437 | mutex_lock(&d_inode(parent)->i_mutex); |
446 | /* Make sure the object is still alive */ | 438 | /* Make sure the object is still alive */ |
447 | if (debugfs_positive(file->f_path.dentry) | 439 | if (simple_positive(file->f_path.dentry) |
448 | && kref_get_unless_zero(kref)) | 440 | && kref_get_unless_zero(kref)) |
449 | ret = 0; | 441 | ret = 0; |
450 | mutex_unlock(&d_inode(parent)->i_mutex); | 442 | mutex_unlock(&d_inode(parent)->i_mutex); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 40580dc7f41c..f7a4c9d7f721 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -588,7 +588,7 @@ static ssize_t loop_attr_backing_file_show(struct loop_device *lo, char *buf) | |||
588 | 588 | ||
589 | spin_lock_irq(&lo->lo_lock); | 589 | spin_lock_irq(&lo->lo_lock); |
590 | if (lo->lo_backing_file) | 590 | if (lo->lo_backing_file) |
591 | p = d_path(&lo->lo_backing_file->f_path, buf, PAGE_SIZE - 1); | 591 | p = file_path(lo->lo_backing_file, buf, PAGE_SIZE - 1); |
592 | spin_unlock_irq(&lo->lo_lock); | 592 | spin_unlock_irq(&lo->lo_lock); |
593 | 593 | ||
594 | if (IS_ERR_OR_NULL(p)) | 594 | if (IS_ERR_OR_NULL(p)) |
diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index 1ca8e32a9592..25422a3a7238 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c | |||
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name) | |||
277 | } | 277 | } |
278 | 278 | ||
279 | spin_lock(&tmp->d_lock); | 279 | spin_lock(&tmp->d_lock); |
280 | if (!d_unhashed(tmp) && d_really_is_positive(tmp)) { | 280 | if (simple_positive(tmp)) { |
281 | dget_dlock(tmp); | 281 | dget_dlock(tmp); |
282 | __d_drop(tmp); | 282 | __d_drop(tmp); |
283 | spin_unlock(&tmp->d_lock); | 283 | spin_unlock(&tmp->d_lock); |
diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index bdd5d3857203..13ef22bd9459 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c | |||
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name) | |||
455 | } | 455 | } |
456 | 456 | ||
457 | spin_lock(&tmp->d_lock); | 457 | spin_lock(&tmp->d_lock); |
458 | if (!d_unhashed(tmp) && d_really_is_positive(tmp)) { | 458 | if (simple_positive(tmp)) { |
459 | __d_drop(tmp); | 459 | __d_drop(tmp); |
460 | spin_unlock(&tmp->d_lock); | 460 | spin_unlock(&tmp->d_lock); |
461 | simple_unlink(d_inode(parent), tmp); | 461 | simple_unlink(d_inode(parent), tmp); |
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 135a0907e9de..ed2346ddf4c9 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -839,7 +839,7 @@ static void bitmap_file_kick(struct bitmap *bitmap) | |||
839 | if (bitmap->storage.file) { | 839 | if (bitmap->storage.file) { |
840 | path = kmalloc(PAGE_SIZE, GFP_KERNEL); | 840 | path = kmalloc(PAGE_SIZE, GFP_KERNEL); |
841 | if (path) | 841 | if (path) |
842 | ptr = d_path(&bitmap->storage.file->f_path, | 842 | ptr = file_path(bitmap->storage.file, |
843 | path, PAGE_SIZE); | 843 | path, PAGE_SIZE); |
844 | 844 | ||
845 | printk(KERN_ALERT | 845 | printk(KERN_ALERT |
@@ -1927,7 +1927,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap) | |||
1927 | chunk_kb ? "KB" : "B"); | 1927 | chunk_kb ? "KB" : "B"); |
1928 | if (bitmap->storage.file) { | 1928 | if (bitmap->storage.file) { |
1929 | seq_printf(seq, ", file: "); | 1929 | seq_printf(seq, ", file: "); |
1930 | seq_path(seq, &bitmap->storage.file->f_path, " \t\n"); | 1930 | seq_file_path(seq, bitmap->storage.file, " \t\n"); |
1931 | } | 1931 | } |
1932 | 1932 | ||
1933 | seq_printf(seq, "\n"); | 1933 | seq_printf(seq, "\n"); |
diff --git a/drivers/md/md.c b/drivers/md/md.c index df92d30ca054..d429c30cd514 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5766,7 +5766,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg) | |||
5766 | /* bitmap disabled, zero the first byte and copy out */ | 5766 | /* bitmap disabled, zero the first byte and copy out */ |
5767 | if (!mddev->bitmap_info.file) | 5767 | if (!mddev->bitmap_info.file) |
5768 | file->pathname[0] = '\0'; | 5768 | file->pathname[0] = '\0'; |
5769 | else if ((ptr = d_path(&mddev->bitmap_info.file->f_path, | 5769 | else if ((ptr = file_path(mddev->bitmap_info.file, |
5770 | file->pathname, sizeof(file->pathname))), | 5770 | file->pathname, sizeof(file->pathname))), |
5771 | IS_ERR(ptr)) | 5771 | IS_ERR(ptr)) |
5772 | err = PTR_ERR(ptr); | 5772 | err = PTR_ERR(ptr); |
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index 3cc109f3c9c8..d2259c663996 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c | |||
@@ -2936,7 +2936,7 @@ int fsg_common_create_lun(struct fsg_common *common, struct fsg_lun_config *cfg, | |||
2936 | if (fsg_lun_is_open(lun)) { | 2936 | if (fsg_lun_is_open(lun)) { |
2937 | p = "(error)"; | 2937 | p = "(error)"; |
2938 | if (pathbuf) { | 2938 | if (pathbuf) { |
2939 | p = d_path(&lun->filp->f_path, pathbuf, PATH_MAX); | 2939 | p = file_path(lun->filp, pathbuf, PATH_MAX); |
2940 | if (IS_ERR(p)) | 2940 | if (IS_ERR(p)) |
2941 | p = "(error)"; | 2941 | p = "(error)"; |
2942 | } | 2942 | } |
diff --git a/drivers/usb/gadget/function/storage_common.c b/drivers/usb/gadget/function/storage_common.c index 648f9e489b39..d62683017cf3 100644 --- a/drivers/usb/gadget/function/storage_common.c +++ b/drivers/usb/gadget/function/storage_common.c | |||
@@ -341,7 +341,7 @@ ssize_t fsg_show_file(struct fsg_lun *curlun, struct rw_semaphore *filesem, | |||
341 | 341 | ||
342 | down_read(filesem); | 342 | down_read(filesem); |
343 | if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */ | 343 | if (fsg_lun_is_open(curlun)) { /* Get the complete pathname */ |
344 | p = d_path(&curlun->filp->f_path, buf, PAGE_SIZE - 1); | 344 | p = file_path(curlun->filp, buf, PAGE_SIZE - 1); |
345 | if (IS_ERR(p)) | 345 | if (IS_ERR(p)) |
346 | rc = PTR_ERR(p); | 346 | rc = PTR_ERR(p); |
347 | else { | 347 | else { |
diff --git a/fs/affs/affs.h b/fs/affs/affs.h index cffe8370fb44..c69a87eaf57d 100644 --- a/fs/affs/affs.h +++ b/fs/affs/affs.h | |||
@@ -64,7 +64,7 @@ struct affs_inode_info { | |||
64 | /* short cut to get to the affs specific inode data */ | 64 | /* short cut to get to the affs specific inode data */ |
65 | static inline struct affs_inode_info *AFFS_I(struct inode *inode) | 65 | static inline struct affs_inode_info *AFFS_I(struct inode *inode) |
66 | { | 66 | { |
67 | return list_entry(inode, struct affs_inode_info, vfs_inode); | 67 | return container_of(inode, struct affs_inode_info, vfs_inode); |
68 | } | 68 | } |
69 | 69 | ||
70 | /* | 70 | /* |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 5b700ef1e59d..c37149b929be 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -238,11 +238,6 @@ static inline u64 autofs4_get_ino(struct autofs_sb_info *sbi) | |||
238 | return d_inode(sbi->sb->s_root)->i_ino; | 238 | return d_inode(sbi->sb->s_root)->i_ino; |
239 | } | 239 | } |
240 | 240 | ||
241 | static inline int simple_positive(struct dentry *dentry) | ||
242 | { | ||
243 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
244 | } | ||
245 | |||
246 | static inline void __autofs4_add_expiring(struct dentry *dentry) | 241 | static inline void __autofs4_add_expiring(struct dentry *dentry) |
247 | { | 242 | { |
248 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); | 243 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); |
diff --git a/fs/befs/befs.h b/fs/befs/befs.h index 1fead8d56a98..35d19e8731e3 100644 --- a/fs/befs/befs.h +++ b/fs/befs/befs.h | |||
@@ -112,7 +112,7 @@ BEFS_SB(const struct super_block *super) | |||
112 | static inline struct befs_inode_info * | 112 | static inline struct befs_inode_info * |
113 | BEFS_I(const struct inode *inode) | 113 | BEFS_I(const struct inode *inode) |
114 | { | 114 | { |
115 | return list_entry(inode, struct befs_inode_info, vfs_inode); | 115 | return container_of(inode, struct befs_inode_info, vfs_inode); |
116 | } | 116 | } |
117 | 117 | ||
118 | static inline befs_blocknr_t | 118 | static inline befs_blocknr_t |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index cd46e4158830..6b659967898e 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1530,7 +1530,7 @@ static int fill_files_note(struct memelfnote *note) | |||
1530 | file = vma->vm_file; | 1530 | file = vma->vm_file; |
1531 | if (!file) | 1531 | if (!file) |
1532 | continue; | 1532 | continue; |
1533 | filename = d_path(&file->f_path, name_curpos, remaining); | 1533 | filename = file_path(file, name_curpos, remaining); |
1534 | if (IS_ERR(filename)) { | 1534 | if (IS_ERR(filename)) { |
1535 | if (PTR_ERR(filename) == -ENAMETOOLONG) { | 1535 | if (PTR_ERR(filename) == -ENAMETOOLONG) { |
1536 | vfree(data); | 1536 | vfree(data); |
@@ -1540,7 +1540,7 @@ static int fill_files_note(struct memelfnote *note) | |||
1540 | continue; | 1540 | continue; |
1541 | } | 1541 | } |
1542 | 1542 | ||
1543 | /* d_path() fills at the end, move name down */ | 1543 | /* file_path() fills at the end, move name down */ |
1544 | /* n = strlen(filename) + 1: */ | 1544 | /* n = strlen(filename) + 1: */ |
1545 | n = (name_curpos + remaining) - filename; | 1545 | n = (name_curpos + remaining) - filename; |
1546 | remaining = filename - name_curpos; | 1546 | remaining = filename - name_curpos; |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 4fe10f93db8a..198243717da5 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -152,6 +152,9 @@ blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) | |||
152 | struct file *file = iocb->ki_filp; | 152 | struct file *file = iocb->ki_filp; |
153 | struct inode *inode = file->f_mapping->host; | 153 | struct inode *inode = file->f_mapping->host; |
154 | 154 | ||
155 | if (IS_DAX(inode)) | ||
156 | return dax_do_io(iocb, inode, iter, offset, blkdev_get_block, | ||
157 | NULL, DIO_SKIP_DIO_COUNT); | ||
155 | return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset, | 158 | return __blockdev_direct_IO(iocb, inode, I_BDEV(inode), iter, offset, |
156 | blkdev_get_block, NULL, NULL, | 159 | blkdev_get_block, NULL, NULL, |
157 | DIO_SKIP_DIO_COUNT); | 160 | DIO_SKIP_DIO_COUNT); |
@@ -443,6 +446,12 @@ long bdev_direct_access(struct block_device *bdev, sector_t sector, | |||
443 | long avail; | 446 | long avail; |
444 | const struct block_device_operations *ops = bdev->bd_disk->fops; | 447 | const struct block_device_operations *ops = bdev->bd_disk->fops; |
445 | 448 | ||
449 | /* | ||
450 | * The device driver is allowed to sleep, in order to make the | ||
451 | * memory directly accessible. | ||
452 | */ | ||
453 | might_sleep(); | ||
454 | |||
446 | if (size < 0) | 455 | if (size < 0) |
447 | return size; | 456 | return size; |
448 | if (!ops->direct_access) | 457 | if (!ops->direct_access) |
@@ -1170,6 +1179,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1170 | bdev->bd_disk = disk; | 1179 | bdev->bd_disk = disk; |
1171 | bdev->bd_queue = disk->queue; | 1180 | bdev->bd_queue = disk->queue; |
1172 | bdev->bd_contains = bdev; | 1181 | bdev->bd_contains = bdev; |
1182 | bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0; | ||
1173 | if (!partno) { | 1183 | if (!partno) { |
1174 | ret = -ENXIO; | 1184 | ret = -ENXIO; |
1175 | bdev->bd_part = disk_get_part(disk, partno); | 1185 | bdev->bd_part = disk_get_part(disk, partno); |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 795d754327a7..b823fac91c92 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1748,7 +1748,7 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, | |||
1748 | } | 1748 | } |
1749 | 1749 | ||
1750 | current->backing_dev_info = inode_to_bdi(inode); | 1750 | current->backing_dev_info = inode_to_bdi(inode); |
1751 | err = file_remove_suid(file); | 1751 | err = file_remove_privs(file); |
1752 | if (err) { | 1752 | if (err) { |
1753 | mutex_unlock(&inode->i_mutex); | 1753 | mutex_unlock(&inode->i_mutex); |
1754 | goto out; | 1754 | goto out; |
diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h index 8c52472d2efa..aecd0859eacb 100644 --- a/fs/cachefiles/internal.h +++ b/fs/cachefiles/internal.h | |||
@@ -43,7 +43,6 @@ struct cachefiles_object { | |||
43 | loff_t i_size; /* object size */ | 43 | loff_t i_size; /* object size */ |
44 | unsigned long flags; | 44 | unsigned long flags; |
45 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ | 45 | #define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */ |
46 | #define CACHEFILES_OBJECT_BURIED 1 /* T if preemptively buried */ | ||
47 | atomic_t usage; /* object usage count */ | 46 | atomic_t usage; /* object usage count */ |
48 | uint8_t type; /* object type */ | 47 | uint8_t type; /* object type */ |
49 | uint8_t new; /* T if object new */ | 48 | uint8_t new; /* T if object new */ |
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c index ab857ab9f40d..fc1056f5c96a 100644 --- a/fs/cachefiles/namei.c +++ b/fs/cachefiles/namei.c | |||
@@ -97,7 +97,8 @@ static noinline void cachefiles_printk_object(struct cachefiles_object *object, | |||
97 | * call vfs_unlink(), vfs_rmdir() or vfs_rename() | 97 | * call vfs_unlink(), vfs_rmdir() or vfs_rename() |
98 | */ | 98 | */ |
99 | static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, | 99 | static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, |
100 | struct dentry *dentry) | 100 | struct dentry *dentry, |
101 | enum fscache_why_object_killed why) | ||
101 | { | 102 | { |
102 | struct cachefiles_object *object; | 103 | struct cachefiles_object *object; |
103 | struct rb_node *p; | 104 | struct rb_node *p; |
@@ -132,8 +133,9 @@ found_dentry: | |||
132 | pr_err("\n"); | 133 | pr_err("\n"); |
133 | pr_err("Error: Can't preemptively bury live object\n"); | 134 | pr_err("Error: Can't preemptively bury live object\n"); |
134 | cachefiles_printk_object(object, NULL); | 135 | cachefiles_printk_object(object, NULL); |
135 | } else if (test_and_set_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { | 136 | } else { |
136 | pr_err("Error: Object already preemptively buried\n"); | 137 | if (why != FSCACHE_OBJECT_IS_STALE) |
138 | fscache_object_mark_killed(&object->fscache, why); | ||
137 | } | 139 | } |
138 | 140 | ||
139 | write_unlock(&cache->active_lock); | 141 | write_unlock(&cache->active_lock); |
@@ -265,7 +267,8 @@ requeue: | |||
265 | static int cachefiles_bury_object(struct cachefiles_cache *cache, | 267 | static int cachefiles_bury_object(struct cachefiles_cache *cache, |
266 | struct dentry *dir, | 268 | struct dentry *dir, |
267 | struct dentry *rep, | 269 | struct dentry *rep, |
268 | bool preemptive) | 270 | bool preemptive, |
271 | enum fscache_why_object_killed why) | ||
269 | { | 272 | { |
270 | struct dentry *grave, *trap; | 273 | struct dentry *grave, *trap; |
271 | struct path path, path_to_graveyard; | 274 | struct path path, path_to_graveyard; |
@@ -289,7 +292,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache, | |||
289 | ret = vfs_unlink(d_inode(dir), rep, NULL); | 292 | ret = vfs_unlink(d_inode(dir), rep, NULL); |
290 | 293 | ||
291 | if (preemptive) | 294 | if (preemptive) |
292 | cachefiles_mark_object_buried(cache, rep); | 295 | cachefiles_mark_object_buried(cache, rep, why); |
293 | } | 296 | } |
294 | 297 | ||
295 | mutex_unlock(&d_inode(dir)->i_mutex); | 298 | mutex_unlock(&d_inode(dir)->i_mutex); |
@@ -394,7 +397,7 @@ try_again: | |||
394 | "Rename failed with error %d", ret); | 397 | "Rename failed with error %d", ret); |
395 | 398 | ||
396 | if (preemptive) | 399 | if (preemptive) |
397 | cachefiles_mark_object_buried(cache, rep); | 400 | cachefiles_mark_object_buried(cache, rep, why); |
398 | } | 401 | } |
399 | 402 | ||
400 | unlock_rename(cache->graveyard, dir); | 403 | unlock_rename(cache->graveyard, dir); |
@@ -422,7 +425,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
422 | 425 | ||
423 | mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); | 426 | mutex_lock_nested(&d_inode(dir)->i_mutex, I_MUTEX_PARENT); |
424 | 427 | ||
425 | if (test_bit(CACHEFILES_OBJECT_BURIED, &object->flags)) { | 428 | if (test_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->fscache.flags)) { |
426 | /* object allocation for the same key preemptively deleted this | 429 | /* object allocation for the same key preemptively deleted this |
427 | * object's file so that it could create its own file */ | 430 | * object's file so that it could create its own file */ |
428 | _debug("object preemptively buried"); | 431 | _debug("object preemptively buried"); |
@@ -433,7 +436,8 @@ int cachefiles_delete_object(struct cachefiles_cache *cache, | |||
433 | * may have been renamed */ | 436 | * may have been renamed */ |
434 | if (dir == object->dentry->d_parent) { | 437 | if (dir == object->dentry->d_parent) { |
435 | ret = cachefiles_bury_object(cache, dir, | 438 | ret = cachefiles_bury_object(cache, dir, |
436 | object->dentry, false); | 439 | object->dentry, false, |
440 | FSCACHE_OBJECT_WAS_RETIRED); | ||
437 | } else { | 441 | } else { |
438 | /* it got moved, presumably by cachefilesd culling it, | 442 | /* it got moved, presumably by cachefilesd culling it, |
439 | * so it's no longer in the key path and we can ignore | 443 | * so it's no longer in the key path and we can ignore |
@@ -522,7 +526,7 @@ lookup_again: | |||
522 | if (d_is_negative(next)) { | 526 | if (d_is_negative(next)) { |
523 | ret = cachefiles_has_space(cache, 1, 0); | 527 | ret = cachefiles_has_space(cache, 1, 0); |
524 | if (ret < 0) | 528 | if (ret < 0) |
525 | goto create_error; | 529 | goto no_space_error; |
526 | 530 | ||
527 | path.dentry = dir; | 531 | path.dentry = dir; |
528 | ret = security_path_mkdir(&path, next, 0); | 532 | ret = security_path_mkdir(&path, next, 0); |
@@ -551,7 +555,7 @@ lookup_again: | |||
551 | if (d_is_negative(next)) { | 555 | if (d_is_negative(next)) { |
552 | ret = cachefiles_has_space(cache, 1, 0); | 556 | ret = cachefiles_has_space(cache, 1, 0); |
553 | if (ret < 0) | 557 | if (ret < 0) |
554 | goto create_error; | 558 | goto no_space_error; |
555 | 559 | ||
556 | path.dentry = dir; | 560 | path.dentry = dir; |
557 | ret = security_path_mknod(&path, next, S_IFREG, 0); | 561 | ret = security_path_mknod(&path, next, S_IFREG, 0); |
@@ -602,7 +606,8 @@ lookup_again: | |||
602 | * mutex) */ | 606 | * mutex) */ |
603 | object->dentry = NULL; | 607 | object->dentry = NULL; |
604 | 608 | ||
605 | ret = cachefiles_bury_object(cache, dir, next, true); | 609 | ret = cachefiles_bury_object(cache, dir, next, true, |
610 | FSCACHE_OBJECT_IS_STALE); | ||
606 | dput(next); | 611 | dput(next); |
607 | next = NULL; | 612 | next = NULL; |
608 | 613 | ||
@@ -610,6 +615,7 @@ lookup_again: | |||
610 | goto delete_error; | 615 | goto delete_error; |
611 | 616 | ||
612 | _debug("redo lookup"); | 617 | _debug("redo lookup"); |
618 | fscache_object_retrying_stale(&object->fscache); | ||
613 | goto lookup_again; | 619 | goto lookup_again; |
614 | } | 620 | } |
615 | } | 621 | } |
@@ -662,6 +668,8 @@ lookup_again: | |||
662 | _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); | 668 | _leave(" = 0 [%lu]", d_backing_inode(object->dentry)->i_ino); |
663 | return 0; | 669 | return 0; |
664 | 670 | ||
671 | no_space_error: | ||
672 | fscache_object_mark_killed(&object->fscache, FSCACHE_OBJECT_NO_SPACE); | ||
665 | create_error: | 673 | create_error: |
666 | _debug("create error %d", ret); | 674 | _debug("create error %d", ret); |
667 | if (ret == -EIO) | 675 | if (ret == -EIO) |
@@ -927,7 +935,8 @@ int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir, | |||
927 | /* actually remove the victim (drops the dir mutex) */ | 935 | /* actually remove the victim (drops the dir mutex) */ |
928 | _debug("bury"); | 936 | _debug("bury"); |
929 | 937 | ||
930 | ret = cachefiles_bury_object(cache, dir, victim, false); | 938 | ret = cachefiles_bury_object(cache, dir, victim, false, |
939 | FSCACHE_OBJECT_WAS_CULLED); | ||
931 | if (ret < 0) | 940 | if (ret < 0) |
932 | goto error; | 941 | goto error; |
933 | 942 | ||
diff --git a/fs/ceph/file.c b/fs/ceph/file.c index faf92095e105..8b79d87eaf46 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c | |||
@@ -962,7 +962,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
962 | 962 | ||
963 | pos = iocb->ki_pos; | 963 | pos = iocb->ki_pos; |
964 | count = iov_iter_count(from); | 964 | count = iov_iter_count(from); |
965 | err = file_remove_suid(file); | 965 | err = file_remove_privs(file); |
966 | if (err) | 966 | if (err) |
967 | goto out; | 967 | goto out; |
968 | 968 | ||
diff --git a/fs/coda/coda_linux.h b/fs/coda/coda_linux.h index d6f7a76a1f5b..f829fe963f5b 100644 --- a/fs/coda/coda_linux.h +++ b/fs/coda/coda_linux.h | |||
@@ -79,7 +79,7 @@ void coda_sysctl_clean(void); | |||
79 | 79 | ||
80 | static inline struct coda_inode_info *ITOC(struct inode *inode) | 80 | static inline struct coda_inode_info *ITOC(struct inode *inode) |
81 | { | 81 | { |
82 | return list_entry(inode, struct coda_inode_info, vfs_inode); | 82 | return container_of(inode, struct coda_inode_info, vfs_inode); |
83 | } | 83 | } |
84 | 84 | ||
85 | static __inline__ struct CodaFid *coda_i2f(struct inode *inode) | 85 | static __inline__ struct CodaFid *coda_i2f(struct inode *inode) |
diff --git a/fs/configfs/inode.c b/fs/configfs/inode.c index 8d89f5fd0331..eae87575e681 100644 --- a/fs/configfs/inode.c +++ b/fs/configfs/inode.c | |||
@@ -236,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent) | |||
236 | 236 | ||
237 | if (dentry) { | 237 | if (dentry) { |
238 | spin_lock(&dentry->d_lock); | 238 | spin_lock(&dentry->d_lock); |
239 | if (!d_unhashed(dentry) && d_really_is_positive(dentry)) { | 239 | if (simple_positive(dentry)) { |
240 | dget_dlock(dentry); | 240 | dget_dlock(dentry); |
241 | __d_drop(dentry); | 241 | __d_drop(dentry); |
242 | spin_unlock(&dentry->d_lock); | 242 | spin_unlock(&dentry->d_lock); |
diff --git a/fs/coredump.c b/fs/coredump.c index e52e0064feac..c5ecde6f3eed 100644 --- a/fs/coredump.c +++ b/fs/coredump.c | |||
@@ -140,7 +140,7 @@ static int cn_print_exe_file(struct core_name *cn) | |||
140 | goto put_exe_file; | 140 | goto put_exe_file; |
141 | } | 141 | } |
142 | 142 | ||
143 | path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); | 143 | path = file_path(exe_file, pathbuf, PATH_MAX); |
144 | if (IS_ERR(path)) { | 144 | if (IS_ERR(path)) { |
145 | ret = PTR_ERR(path); | 145 | ret = PTR_ERR(path); |
146 | goto free_buf; | 146 | goto free_buf; |
@@ -155,7 +155,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter, | |||
155 | } | 155 | } |
156 | 156 | ||
157 | if (iov_iter_rw(iter) == WRITE) | 157 | if (iov_iter_rw(iter) == WRITE) |
158 | len = copy_from_iter(addr, max - pos, iter); | 158 | len = copy_from_iter_nocache(addr, max - pos, iter); |
159 | else if (!hole) | 159 | else if (!hole) |
160 | len = copy_to_iter(addr, max - pos, iter); | 160 | len = copy_to_iter(addr, max - pos, iter); |
161 | else | 161 | else |
@@ -209,7 +209,8 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, | |||
209 | } | 209 | } |
210 | 210 | ||
211 | /* Protects against truncate */ | 211 | /* Protects against truncate */ |
212 | inode_dio_begin(inode); | 212 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
213 | inode_dio_begin(inode); | ||
213 | 214 | ||
214 | retval = dax_io(inode, iter, pos, end, get_block, &bh); | 215 | retval = dax_io(inode, iter, pos, end, get_block, &bh); |
215 | 216 | ||
@@ -219,7 +220,8 @@ ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode, | |||
219 | if ((retval > 0) && end_io) | 220 | if ((retval > 0) && end_io) |
220 | end_io(iocb, pos, retval, bh.b_private); | 221 | end_io(iocb, pos, retval, bh.b_private); |
221 | 222 | ||
222 | inode_dio_end(inode); | 223 | if (!(flags & DIO_SKIP_DIO_COUNT)) |
224 | inode_dio_end(inode); | ||
223 | out: | 225 | out: |
224 | return retval; | 226 | return retval; |
225 | } | 227 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 910968b4b6bf..7a3f3e5f9cea 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1673,7 +1673,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) | |||
1673 | DCACHE_OP_COMPARE | | 1673 | DCACHE_OP_COMPARE | |
1674 | DCACHE_OP_REVALIDATE | | 1674 | DCACHE_OP_REVALIDATE | |
1675 | DCACHE_OP_WEAK_REVALIDATE | | 1675 | DCACHE_OP_WEAK_REVALIDATE | |
1676 | DCACHE_OP_DELETE )); | 1676 | DCACHE_OP_DELETE | |
1677 | DCACHE_OP_SELECT_INODE)); | ||
1677 | dentry->d_op = op; | 1678 | dentry->d_op = op; |
1678 | if (!op) | 1679 | if (!op) |
1679 | return; | 1680 | return; |
@@ -1689,6 +1690,8 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op) | |||
1689 | dentry->d_flags |= DCACHE_OP_DELETE; | 1690 | dentry->d_flags |= DCACHE_OP_DELETE; |
1690 | if (op->d_prune) | 1691 | if (op->d_prune) |
1691 | dentry->d_flags |= DCACHE_OP_PRUNE; | 1692 | dentry->d_flags |= DCACHE_OP_PRUNE; |
1693 | if (op->d_select_inode) | ||
1694 | dentry->d_flags |= DCACHE_OP_SELECT_INODE; | ||
1692 | 1695 | ||
1693 | } | 1696 | } |
1694 | EXPORT_SYMBOL(d_set_d_op); | 1697 | EXPORT_SYMBOL(d_set_d_op); |
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index d6d1cf004123..c711be8d6a3c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c | |||
@@ -44,11 +44,6 @@ static struct inode *debugfs_get_inode(struct super_block *sb) | |||
44 | return inode; | 44 | return inode; |
45 | } | 45 | } |
46 | 46 | ||
47 | static inline int debugfs_positive(struct dentry *dentry) | ||
48 | { | ||
49 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
50 | } | ||
51 | |||
52 | struct debugfs_mount_opts { | 47 | struct debugfs_mount_opts { |
53 | kuid_t uid; | 48 | kuid_t uid; |
54 | kgid_t gid; | 49 | kgid_t gid; |
@@ -522,7 +517,7 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent) | |||
522 | { | 517 | { |
523 | int ret = 0; | 518 | int ret = 0; |
524 | 519 | ||
525 | if (debugfs_positive(dentry)) { | 520 | if (simple_positive(dentry)) { |
526 | dget(dentry); | 521 | dget(dentry); |
527 | if (d_is_dir(dentry)) | 522 | if (d_is_dir(dentry)) |
528 | ret = simple_rmdir(d_inode(parent), dentry); | 523 | ret = simple_rmdir(d_inode(parent), dentry); |
@@ -602,7 +597,7 @@ void debugfs_remove_recursive(struct dentry *dentry) | |||
602 | */ | 597 | */ |
603 | spin_lock(&parent->d_lock); | 598 | spin_lock(&parent->d_lock); |
604 | list_for_each_entry(child, &parent->d_subdirs, d_child) { | 599 | list_for_each_entry(child, &parent->d_subdirs, d_child) { |
605 | if (!debugfs_positive(child)) | 600 | if (!simple_positive(child)) |
606 | continue; | 601 | continue; |
607 | 602 | ||
608 | /* perhaps simple_empty(child) makes more sense */ | 603 | /* perhaps simple_empty(child) makes more sense */ |
@@ -623,7 +618,7 @@ void debugfs_remove_recursive(struct dentry *dentry) | |||
623 | * from d_subdirs. When releasing the parent->d_lock we can | 618 | * from d_subdirs. When releasing the parent->d_lock we can |
624 | * no longer trust that the next pointer is valid. | 619 | * no longer trust that the next pointer is valid. |
625 | * Restart the loop. We'll skip this one with the | 620 | * Restart the loop. We'll skip this one with the |
626 | * debugfs_positive() check. | 621 | * simple_positive() check. |
627 | */ | 622 | */ |
628 | goto loop; | 623 | goto loop; |
629 | } | 624 | } |
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c index 4deb0b05b011..e5bb2abf77f9 100644 --- a/fs/exofs/dir.c +++ b/fs/exofs/dir.c | |||
@@ -44,12 +44,6 @@ static inline void exofs_put_page(struct page *page) | |||
44 | page_cache_release(page); | 44 | page_cache_release(page); |
45 | } | 45 | } |
46 | 46 | ||
47 | /* Accesses dir's inode->i_size must be called under inode lock */ | ||
48 | static inline unsigned long dir_pages(struct inode *inode) | ||
49 | { | ||
50 | return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
51 | } | ||
52 | |||
53 | static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) | 47 | static unsigned exofs_last_byte(struct inode *inode, unsigned long page_nr) |
54 | { | 48 | { |
55 | loff_t last_byte = inode->i_size; | 49 | loff_t last_byte = inode->i_size; |
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c index 796b491e6978..0c6638b40f21 100644 --- a/fs/ext2/dir.c +++ b/fs/ext2/dir.c | |||
@@ -70,11 +70,6 @@ static inline void ext2_put_page(struct page *page) | |||
70 | page_cache_release(page); | 70 | page_cache_release(page); |
71 | } | 71 | } |
72 | 72 | ||
73 | static inline unsigned long dir_pages(struct inode *inode) | ||
74 | { | ||
75 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
76 | } | ||
77 | |||
78 | /* | 73 | /* |
79 | * Return the offset into page `page_nr' of the last valid | 74 | * Return the offset into page `page_nr' of the last valid |
80 | * byte in that page, plus one. | 75 | * byte in that page, plus one. |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 5c787647afe2..58987b5c514b 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -452,7 +452,7 @@ void __ext4_error_file(struct file *file, const char *function, | |||
452 | es = EXT4_SB(inode->i_sb)->s_es; | 452 | es = EXT4_SB(inode->i_sb)->s_es; |
453 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); | 453 | es->s_last_error_ino = cpu_to_le32(inode->i_ino); |
454 | if (ext4_error_ratelimit(inode->i_sb)) { | 454 | if (ext4_error_ratelimit(inode->i_sb)) { |
455 | path = d_path(&(file->f_path), pathname, sizeof(pathname)); | 455 | path = file_path(file, pathname, sizeof(pathname)); |
456 | if (IS_ERR(path)) | 456 | if (IS_ERR(path)) |
457 | path = "(unknown)"; | 457 | path = "(unknown)"; |
458 | va_start(args, fmt); | 458 | va_start(args, fmt); |
@@ -147,6 +147,13 @@ static int expand_fdtable(struct files_struct *files, int nr) | |||
147 | 147 | ||
148 | spin_unlock(&files->file_lock); | 148 | spin_unlock(&files->file_lock); |
149 | new_fdt = alloc_fdtable(nr); | 149 | new_fdt = alloc_fdtable(nr); |
150 | |||
151 | /* make sure all __fd_install() have seen resize_in_progress | ||
152 | * or have finished their rcu_read_lock_sched() section. | ||
153 | */ | ||
154 | if (atomic_read(&files->count) > 1) | ||
155 | synchronize_sched(); | ||
156 | |||
150 | spin_lock(&files->file_lock); | 157 | spin_lock(&files->file_lock); |
151 | if (!new_fdt) | 158 | if (!new_fdt) |
152 | return -ENOMEM; | 159 | return -ENOMEM; |
@@ -158,21 +165,14 @@ static int expand_fdtable(struct files_struct *files, int nr) | |||
158 | __free_fdtable(new_fdt); | 165 | __free_fdtable(new_fdt); |
159 | return -EMFILE; | 166 | return -EMFILE; |
160 | } | 167 | } |
161 | /* | ||
162 | * Check again since another task may have expanded the fd table while | ||
163 | * we dropped the lock | ||
164 | */ | ||
165 | cur_fdt = files_fdtable(files); | 168 | cur_fdt = files_fdtable(files); |
166 | if (nr >= cur_fdt->max_fds) { | 169 | BUG_ON(nr < cur_fdt->max_fds); |
167 | /* Continue as planned */ | 170 | copy_fdtable(new_fdt, cur_fdt); |
168 | copy_fdtable(new_fdt, cur_fdt); | 171 | rcu_assign_pointer(files->fdt, new_fdt); |
169 | rcu_assign_pointer(files->fdt, new_fdt); | 172 | if (cur_fdt != &files->fdtab) |
170 | if (cur_fdt != &files->fdtab) | 173 | call_rcu(&cur_fdt->rcu, free_fdtable_rcu); |
171 | call_rcu(&cur_fdt->rcu, free_fdtable_rcu); | 174 | /* coupled with smp_rmb() in __fd_install() */ |
172 | } else { | 175 | smp_wmb(); |
173 | /* Somebody else expanded, so undo our attempt */ | ||
174 | __free_fdtable(new_fdt); | ||
175 | } | ||
176 | return 1; | 176 | return 1; |
177 | } | 177 | } |
178 | 178 | ||
@@ -185,21 +185,38 @@ static int expand_fdtable(struct files_struct *files, int nr) | |||
185 | * The files->file_lock should be held on entry, and will be held on exit. | 185 | * The files->file_lock should be held on entry, and will be held on exit. |
186 | */ | 186 | */ |
187 | static int expand_files(struct files_struct *files, int nr) | 187 | static int expand_files(struct files_struct *files, int nr) |
188 | __releases(files->file_lock) | ||
189 | __acquires(files->file_lock) | ||
188 | { | 190 | { |
189 | struct fdtable *fdt; | 191 | struct fdtable *fdt; |
192 | int expanded = 0; | ||
190 | 193 | ||
194 | repeat: | ||
191 | fdt = files_fdtable(files); | 195 | fdt = files_fdtable(files); |
192 | 196 | ||
193 | /* Do we need to expand? */ | 197 | /* Do we need to expand? */ |
194 | if (nr < fdt->max_fds) | 198 | if (nr < fdt->max_fds) |
195 | return 0; | 199 | return expanded; |
196 | 200 | ||
197 | /* Can we expand? */ | 201 | /* Can we expand? */ |
198 | if (nr >= sysctl_nr_open) | 202 | if (nr >= sysctl_nr_open) |
199 | return -EMFILE; | 203 | return -EMFILE; |
200 | 204 | ||
205 | if (unlikely(files->resize_in_progress)) { | ||
206 | spin_unlock(&files->file_lock); | ||
207 | expanded = 1; | ||
208 | wait_event(files->resize_wait, !files->resize_in_progress); | ||
209 | spin_lock(&files->file_lock); | ||
210 | goto repeat; | ||
211 | } | ||
212 | |||
201 | /* All good, so we try */ | 213 | /* All good, so we try */ |
202 | return expand_fdtable(files, nr); | 214 | files->resize_in_progress = true; |
215 | expanded = expand_fdtable(files, nr); | ||
216 | files->resize_in_progress = false; | ||
217 | |||
218 | wake_up_all(&files->resize_wait); | ||
219 | return expanded; | ||
203 | } | 220 | } |
204 | 221 | ||
205 | static inline void __set_close_on_exec(int fd, struct fdtable *fdt) | 222 | static inline void __set_close_on_exec(int fd, struct fdtable *fdt) |
@@ -256,6 +273,8 @@ struct files_struct *dup_fd(struct files_struct *oldf, int *errorp) | |||
256 | atomic_set(&newf->count, 1); | 273 | atomic_set(&newf->count, 1); |
257 | 274 | ||
258 | spin_lock_init(&newf->file_lock); | 275 | spin_lock_init(&newf->file_lock); |
276 | newf->resize_in_progress = false; | ||
277 | init_waitqueue_head(&newf->resize_wait); | ||
259 | newf->next_fd = 0; | 278 | newf->next_fd = 0; |
260 | new_fdt = &newf->fdtab; | 279 | new_fdt = &newf->fdtab; |
261 | new_fdt->max_fds = NR_OPEN_DEFAULT; | 280 | new_fdt->max_fds = NR_OPEN_DEFAULT; |
@@ -553,11 +572,21 @@ void __fd_install(struct files_struct *files, unsigned int fd, | |||
553 | struct file *file) | 572 | struct file *file) |
554 | { | 573 | { |
555 | struct fdtable *fdt; | 574 | struct fdtable *fdt; |
556 | spin_lock(&files->file_lock); | 575 | |
557 | fdt = files_fdtable(files); | 576 | might_sleep(); |
577 | rcu_read_lock_sched(); | ||
578 | |||
579 | while (unlikely(files->resize_in_progress)) { | ||
580 | rcu_read_unlock_sched(); | ||
581 | wait_event(files->resize_wait, !files->resize_in_progress); | ||
582 | rcu_read_lock_sched(); | ||
583 | } | ||
584 | /* coupled with smp_wmb() in expand_fdtable() */ | ||
585 | smp_rmb(); | ||
586 | fdt = rcu_dereference_sched(files->fdt); | ||
558 | BUG_ON(fdt->fd[fd] != NULL); | 587 | BUG_ON(fdt->fd[fd] != NULL); |
559 | rcu_assign_pointer(fdt->fd[fd], file); | 588 | rcu_assign_pointer(fdt->fd[fd], file); |
560 | spin_unlock(&files->file_lock); | 589 | rcu_read_unlock_sched(); |
561 | } | 590 | } |
562 | 591 | ||
563 | void fd_install(unsigned int fd, struct file *file) | 592 | void fd_install(unsigned int fd, struct file *file) |
@@ -635,11 +664,17 @@ static struct file *__fget(unsigned int fd, fmode_t mask) | |||
635 | struct file *file; | 664 | struct file *file; |
636 | 665 | ||
637 | rcu_read_lock(); | 666 | rcu_read_lock(); |
667 | loop: | ||
638 | file = fcheck_files(files, fd); | 668 | file = fcheck_files(files, fd); |
639 | if (file) { | 669 | if (file) { |
640 | /* File object ref couldn't be taken */ | 670 | /* File object ref couldn't be taken. |
641 | if ((file->f_mode & mask) || !get_file_rcu(file)) | 671 | * dup2() atomicity guarantee is the reason |
672 | * we loop to catch the new file (or NULL pointer) | ||
673 | */ | ||
674 | if (file->f_mode & mask) | ||
642 | file = NULL; | 675 | file = NULL; |
676 | else if (!get_file_rcu(file)) | ||
677 | goto loop; | ||
643 | } | 678 | } |
644 | rcu_read_unlock(); | 679 | rcu_read_unlock(); |
645 | 680 | ||
diff --git a/fs/file_table.c b/fs/file_table.c index 294174dcc226..7f9d407c7595 100644 --- a/fs/file_table.c +++ b/fs/file_table.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/cdev.h> | 20 | #include <linux/cdev.h> |
21 | #include <linux/fsnotify.h> | 21 | #include <linux/fsnotify.h> |
22 | #include <linux/sysctl.h> | 22 | #include <linux/sysctl.h> |
23 | #include <linux/lglock.h> | ||
24 | #include <linux/percpu_counter.h> | 23 | #include <linux/percpu_counter.h> |
25 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
26 | #include <linux/hardirq.h> | 25 | #include <linux/hardirq.h> |
diff --git a/fs/freevxfs/vxfs_lookup.c b/fs/freevxfs/vxfs_lookup.c index 99c7f0a37af4..484b32d3234a 100644 --- a/fs/freevxfs/vxfs_lookup.c +++ b/fs/freevxfs/vxfs_lookup.c | |||
@@ -61,13 +61,6 @@ const struct file_operations vxfs_dir_operations = { | |||
61 | .iterate = vxfs_readdir, | 61 | .iterate = vxfs_readdir, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | |||
65 | static inline u_long | ||
66 | dir_pages(struct inode *inode) | ||
67 | { | ||
68 | return (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
69 | } | ||
70 | |||
71 | static inline u_long | 64 | static inline u_long |
72 | dir_blocks(struct inode *ip) | 65 | dir_blocks(struct inode *ip) |
73 | { | 66 | { |
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c index 89acec742e0b..d403c69bee08 100644 --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c | |||
@@ -327,7 +327,8 @@ static int fscache_alloc_object(struct fscache_cache *cache, | |||
327 | 327 | ||
328 | object_already_extant: | 328 | object_already_extant: |
329 | ret = -ENOBUFS; | 329 | ret = -ENOBUFS; |
330 | if (fscache_object_is_dead(object)) { | 330 | if (fscache_object_is_dying(object) || |
331 | fscache_cache_is_broken(object)) { | ||
331 | spin_unlock(&cookie->lock); | 332 | spin_unlock(&cookie->lock); |
332 | goto error; | 333 | goto error; |
333 | } | 334 | } |
@@ -671,7 +672,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie) | |||
671 | if (!op) | 672 | if (!op) |
672 | return -ENOMEM; | 673 | return -ENOMEM; |
673 | 674 | ||
674 | fscache_operation_init(op, NULL, NULL); | 675 | fscache_operation_init(op, NULL, NULL, NULL); |
675 | op->flags = FSCACHE_OP_MYTHREAD | | 676 | op->flags = FSCACHE_OP_MYTHREAD | |
676 | (1 << FSCACHE_OP_WAITING) | | 677 | (1 << FSCACHE_OP_WAITING) | |
677 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 678 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -695,8 +696,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie) | |||
695 | /* the work queue now carries its own ref on the object */ | 696 | /* the work queue now carries its own ref on the object */ |
696 | spin_unlock(&cookie->lock); | 697 | spin_unlock(&cookie->lock); |
697 | 698 | ||
698 | ret = fscache_wait_for_operation_activation(object, op, | 699 | ret = fscache_wait_for_operation_activation(object, op, NULL, NULL); |
699 | NULL, NULL, NULL); | ||
700 | if (ret == 0) { | 700 | if (ret == 0) { |
701 | /* ask the cache to honour the operation */ | 701 | /* ask the cache to honour the operation */ |
702 | ret = object->cache->ops->check_consistency(op); | 702 | ret = object->cache->ops->check_consistency(op); |
diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h index 7872a62ef30c..97ec45110957 100644 --- a/fs/fscache/internal.h +++ b/fs/fscache/internal.h | |||
@@ -124,8 +124,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *, | |||
124 | struct fscache_operation *); | 124 | struct fscache_operation *); |
125 | extern int fscache_submit_op(struct fscache_object *, | 125 | extern int fscache_submit_op(struct fscache_object *, |
126 | struct fscache_operation *); | 126 | struct fscache_operation *); |
127 | extern int fscache_cancel_op(struct fscache_operation *, | 127 | extern int fscache_cancel_op(struct fscache_operation *, bool); |
128 | void (*)(struct fscache_operation *)); | ||
129 | extern void fscache_cancel_all_ops(struct fscache_object *); | 128 | extern void fscache_cancel_all_ops(struct fscache_object *); |
130 | extern void fscache_abort_object(struct fscache_object *); | 129 | extern void fscache_abort_object(struct fscache_object *); |
131 | extern void fscache_start_operations(struct fscache_object *); | 130 | extern void fscache_start_operations(struct fscache_object *); |
@@ -138,8 +137,7 @@ extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *); | |||
138 | extern int fscache_wait_for_operation_activation(struct fscache_object *, | 137 | extern int fscache_wait_for_operation_activation(struct fscache_object *, |
139 | struct fscache_operation *, | 138 | struct fscache_operation *, |
140 | atomic_t *, | 139 | atomic_t *, |
141 | atomic_t *, | 140 | atomic_t *); |
142 | void (*)(struct fscache_operation *)); | ||
143 | extern void fscache_invalidate_writes(struct fscache_cookie *); | 141 | extern void fscache_invalidate_writes(struct fscache_cookie *); |
144 | 142 | ||
145 | /* | 143 | /* |
@@ -164,6 +162,7 @@ extern atomic_t fscache_n_op_pend; | |||
164 | extern atomic_t fscache_n_op_run; | 162 | extern atomic_t fscache_n_op_run; |
165 | extern atomic_t fscache_n_op_enqueue; | 163 | extern atomic_t fscache_n_op_enqueue; |
166 | extern atomic_t fscache_n_op_deferred_release; | 164 | extern atomic_t fscache_n_op_deferred_release; |
165 | extern atomic_t fscache_n_op_initialised; | ||
167 | extern atomic_t fscache_n_op_release; | 166 | extern atomic_t fscache_n_op_release; |
168 | extern atomic_t fscache_n_op_gc; | 167 | extern atomic_t fscache_n_op_gc; |
169 | extern atomic_t fscache_n_op_cancelled; | 168 | extern atomic_t fscache_n_op_cancelled; |
@@ -271,6 +270,11 @@ extern atomic_t fscache_n_cop_write_page; | |||
271 | extern atomic_t fscache_n_cop_uncache_page; | 270 | extern atomic_t fscache_n_cop_uncache_page; |
272 | extern atomic_t fscache_n_cop_dissociate_pages; | 271 | extern atomic_t fscache_n_cop_dissociate_pages; |
273 | 272 | ||
273 | extern atomic_t fscache_n_cache_no_space_reject; | ||
274 | extern atomic_t fscache_n_cache_stale_objects; | ||
275 | extern atomic_t fscache_n_cache_retired_objects; | ||
276 | extern atomic_t fscache_n_cache_culled_objects; | ||
277 | |||
274 | static inline void fscache_stat(atomic_t *stat) | 278 | static inline void fscache_stat(atomic_t *stat) |
275 | { | 279 | { |
276 | atomic_inc(stat); | 280 | atomic_inc(stat); |
diff --git a/fs/fscache/object.c b/fs/fscache/object.c index da032daf0e0d..9e792e30f4db 100644 --- a/fs/fscache/object.c +++ b/fs/fscache/object.c | |||
@@ -328,6 +328,17 @@ void fscache_object_init(struct fscache_object *object, | |||
328 | EXPORT_SYMBOL(fscache_object_init); | 328 | EXPORT_SYMBOL(fscache_object_init); |
329 | 329 | ||
330 | /* | 330 | /* |
331 | * Mark the object as no longer being live, making sure that we synchronise | ||
332 | * against op submission. | ||
333 | */ | ||
334 | static inline void fscache_mark_object_dead(struct fscache_object *object) | ||
335 | { | ||
336 | spin_lock(&object->lock); | ||
337 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | ||
338 | spin_unlock(&object->lock); | ||
339 | } | ||
340 | |||
341 | /* | ||
331 | * Abort object initialisation before we start it. | 342 | * Abort object initialisation before we start it. |
332 | */ | 343 | */ |
333 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, | 344 | static const struct fscache_state *fscache_abort_initialisation(struct fscache_object *object, |
@@ -610,6 +621,8 @@ static const struct fscache_state *fscache_lookup_failure(struct fscache_object | |||
610 | object->cache->ops->lookup_complete(object); | 621 | object->cache->ops->lookup_complete(object); |
611 | fscache_stat_d(&fscache_n_cop_lookup_complete); | 622 | fscache_stat_d(&fscache_n_cop_lookup_complete); |
612 | 623 | ||
624 | set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags); | ||
625 | |||
613 | cookie = object->cookie; | 626 | cookie = object->cookie; |
614 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); | 627 | set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags); |
615 | if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) | 628 | if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) |
@@ -629,7 +642,7 @@ static const struct fscache_state *fscache_kill_object(struct fscache_object *ob | |||
629 | _enter("{OBJ%x,%d,%d},%d", | 642 | _enter("{OBJ%x,%d,%d},%d", |
630 | object->debug_id, object->n_ops, object->n_children, event); | 643 | object->debug_id, object->n_ops, object->n_children, event); |
631 | 644 | ||
632 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 645 | fscache_mark_object_dead(object); |
633 | object->oob_event_mask = 0; | 646 | object->oob_event_mask = 0; |
634 | 647 | ||
635 | if (list_empty(&object->dependents) && | 648 | if (list_empty(&object->dependents) && |
@@ -948,7 +961,8 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj | |||
948 | if (!op) | 961 | if (!op) |
949 | goto nomem; | 962 | goto nomem; |
950 | 963 | ||
951 | fscache_operation_init(op, object->cache->ops->invalidate_object, NULL); | 964 | fscache_operation_init(op, object->cache->ops->invalidate_object, |
965 | NULL, NULL); | ||
952 | op->flags = FSCACHE_OP_ASYNC | | 966 | op->flags = FSCACHE_OP_ASYNC | |
953 | (1 << FSCACHE_OP_EXCLUSIVE) | | 967 | (1 << FSCACHE_OP_EXCLUSIVE) | |
954 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 968 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -974,13 +988,13 @@ static const struct fscache_state *_fscache_invalidate_object(struct fscache_obj | |||
974 | return transit_to(UPDATE_OBJECT); | 988 | return transit_to(UPDATE_OBJECT); |
975 | 989 | ||
976 | nomem: | 990 | nomem: |
977 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 991 | fscache_mark_object_dead(object); |
978 | fscache_unuse_cookie(object); | 992 | fscache_unuse_cookie(object); |
979 | _leave(" [ENOMEM]"); | 993 | _leave(" [ENOMEM]"); |
980 | return transit_to(KILL_OBJECT); | 994 | return transit_to(KILL_OBJECT); |
981 | 995 | ||
982 | submit_op_failed: | 996 | submit_op_failed: |
983 | clear_bit(FSCACHE_OBJECT_IS_LIVE, &object->flags); | 997 | fscache_mark_object_dead(object); |
984 | spin_unlock(&cookie->lock); | 998 | spin_unlock(&cookie->lock); |
985 | fscache_unuse_cookie(object); | 999 | fscache_unuse_cookie(object); |
986 | kfree(op); | 1000 | kfree(op); |
@@ -1016,3 +1030,50 @@ static const struct fscache_state *fscache_update_object(struct fscache_object * | |||
1016 | _leave(""); | 1030 | _leave(""); |
1017 | return transit_to(WAIT_FOR_CMD); | 1031 | return transit_to(WAIT_FOR_CMD); |
1018 | } | 1032 | } |
1033 | |||
1034 | /** | ||
1035 | * fscache_object_retrying_stale - Note retrying stale object | ||
1036 | * @object: The object that will be retried | ||
1037 | * | ||
1038 | * Note that an object lookup found an on-disk object that was adjudged to be | ||
1039 | * stale and has been deleted. The lookup will be retried. | ||
1040 | */ | ||
1041 | void fscache_object_retrying_stale(struct fscache_object *object) | ||
1042 | { | ||
1043 | fscache_stat(&fscache_n_cache_no_space_reject); | ||
1044 | } | ||
1045 | EXPORT_SYMBOL(fscache_object_retrying_stale); | ||
1046 | |||
1047 | /** | ||
1048 | * fscache_object_mark_killed - Note that an object was killed | ||
1049 | * @object: The object that was culled | ||
1050 | * @why: The reason the object was killed. | ||
1051 | * | ||
1052 | * Note that an object was killed. Returns true if the object was | ||
1053 | * already marked killed, false if it wasn't. | ||
1054 | */ | ||
1055 | void fscache_object_mark_killed(struct fscache_object *object, | ||
1056 | enum fscache_why_object_killed why) | ||
1057 | { | ||
1058 | if (test_and_set_bit(FSCACHE_OBJECT_KILLED_BY_CACHE, &object->flags)) { | ||
1059 | pr_err("Error: Object already killed by cache [%s]\n", | ||
1060 | object->cache->identifier); | ||
1061 | return; | ||
1062 | } | ||
1063 | |||
1064 | switch (why) { | ||
1065 | case FSCACHE_OBJECT_NO_SPACE: | ||
1066 | fscache_stat(&fscache_n_cache_no_space_reject); | ||
1067 | break; | ||
1068 | case FSCACHE_OBJECT_IS_STALE: | ||
1069 | fscache_stat(&fscache_n_cache_stale_objects); | ||
1070 | break; | ||
1071 | case FSCACHE_OBJECT_WAS_RETIRED: | ||
1072 | fscache_stat(&fscache_n_cache_retired_objects); | ||
1073 | break; | ||
1074 | case FSCACHE_OBJECT_WAS_CULLED: | ||
1075 | fscache_stat(&fscache_n_cache_culled_objects); | ||
1076 | break; | ||
1077 | } | ||
1078 | } | ||
1079 | EXPORT_SYMBOL(fscache_object_mark_killed); | ||
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c index e7b87a0e5185..de67745e1cd7 100644 --- a/fs/fscache/operation.c +++ b/fs/fscache/operation.c | |||
@@ -20,6 +20,35 @@ | |||
20 | atomic_t fscache_op_debug_id; | 20 | atomic_t fscache_op_debug_id; |
21 | EXPORT_SYMBOL(fscache_op_debug_id); | 21 | EXPORT_SYMBOL(fscache_op_debug_id); |
22 | 22 | ||
23 | static void fscache_operation_dummy_cancel(struct fscache_operation *op) | ||
24 | { | ||
25 | } | ||
26 | |||
27 | /** | ||
28 | * fscache_operation_init - Do basic initialisation of an operation | ||
29 | * @op: The operation to initialise | ||
30 | * @release: The release function to assign | ||
31 | * | ||
32 | * Do basic initialisation of an operation. The caller must still set flags, | ||
33 | * object and processor if needed. | ||
34 | */ | ||
35 | void fscache_operation_init(struct fscache_operation *op, | ||
36 | fscache_operation_processor_t processor, | ||
37 | fscache_operation_cancel_t cancel, | ||
38 | fscache_operation_release_t release) | ||
39 | { | ||
40 | INIT_WORK(&op->work, fscache_op_work_func); | ||
41 | atomic_set(&op->usage, 1); | ||
42 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
43 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
44 | op->processor = processor; | ||
45 | op->cancel = cancel ?: fscache_operation_dummy_cancel; | ||
46 | op->release = release; | ||
47 | INIT_LIST_HEAD(&op->pend_link); | ||
48 | fscache_stat(&fscache_n_op_initialised); | ||
49 | } | ||
50 | EXPORT_SYMBOL(fscache_operation_init); | ||
51 | |||
23 | /** | 52 | /** |
24 | * fscache_enqueue_operation - Enqueue an operation for processing | 53 | * fscache_enqueue_operation - Enqueue an operation for processing |
25 | * @op: The operation to enqueue | 54 | * @op: The operation to enqueue |
@@ -76,6 +105,43 @@ static void fscache_run_op(struct fscache_object *object, | |||
76 | } | 105 | } |
77 | 106 | ||
78 | /* | 107 | /* |
108 | * report an unexpected submission | ||
109 | */ | ||
110 | static void fscache_report_unexpected_submission(struct fscache_object *object, | ||
111 | struct fscache_operation *op, | ||
112 | const struct fscache_state *ostate) | ||
113 | { | ||
114 | static bool once_only; | ||
115 | struct fscache_operation *p; | ||
116 | unsigned n; | ||
117 | |||
118 | if (once_only) | ||
119 | return; | ||
120 | once_only = true; | ||
121 | |||
122 | kdebug("unexpected submission OP%x [OBJ%x %s]", | ||
123 | op->debug_id, object->debug_id, object->state->name); | ||
124 | kdebug("objstate=%s [%s]", object->state->name, ostate->name); | ||
125 | kdebug("objflags=%lx", object->flags); | ||
126 | kdebug("objevent=%lx [%lx]", object->events, object->event_mask); | ||
127 | kdebug("ops=%u inp=%u exc=%u", | ||
128 | object->n_ops, object->n_in_progress, object->n_exclusive); | ||
129 | |||
130 | if (!list_empty(&object->pending_ops)) { | ||
131 | n = 0; | ||
132 | list_for_each_entry(p, &object->pending_ops, pend_link) { | ||
133 | ASSERTCMP(p->object, ==, object); | ||
134 | kdebug("%p %p", op->processor, op->release); | ||
135 | n++; | ||
136 | } | ||
137 | |||
138 | kdebug("n=%u", n); | ||
139 | } | ||
140 | |||
141 | dump_stack(); | ||
142 | } | ||
143 | |||
144 | /* | ||
79 | * submit an exclusive operation for an object | 145 | * submit an exclusive operation for an object |
80 | * - other ops are excluded from running simultaneously with this one | 146 | * - other ops are excluded from running simultaneously with this one |
81 | * - this gets any extra refs it needs on an op | 147 | * - this gets any extra refs it needs on an op |
@@ -83,6 +149,8 @@ static void fscache_run_op(struct fscache_object *object, | |||
83 | int fscache_submit_exclusive_op(struct fscache_object *object, | 149 | int fscache_submit_exclusive_op(struct fscache_object *object, |
84 | struct fscache_operation *op) | 150 | struct fscache_operation *op) |
85 | { | 151 | { |
152 | const struct fscache_state *ostate; | ||
153 | unsigned long flags; | ||
86 | int ret; | 154 | int ret; |
87 | 155 | ||
88 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); | 156 | _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id); |
@@ -95,8 +163,21 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
95 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); | 163 | ASSERTCMP(object->n_ops, >=, object->n_exclusive); |
96 | ASSERT(list_empty(&op->pend_link)); | 164 | ASSERT(list_empty(&op->pend_link)); |
97 | 165 | ||
166 | ostate = object->state; | ||
167 | smp_rmb(); | ||
168 | |||
98 | op->state = FSCACHE_OP_ST_PENDING; | 169 | op->state = FSCACHE_OP_ST_PENDING; |
99 | if (fscache_object_is_active(object)) { | 170 | flags = READ_ONCE(object->flags); |
171 | if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { | ||
172 | fscache_stat(&fscache_n_op_rejected); | ||
173 | op->cancel(op); | ||
174 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
175 | ret = -ENOBUFS; | ||
176 | } else if (unlikely(fscache_cache_is_broken(object))) { | ||
177 | op->cancel(op); | ||
178 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
179 | ret = -EIO; | ||
180 | } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { | ||
100 | op->object = object; | 181 | op->object = object; |
101 | object->n_ops++; | 182 | object->n_ops++; |
102 | object->n_exclusive++; /* reads and writes must wait */ | 183 | object->n_exclusive++; /* reads and writes must wait */ |
@@ -118,7 +199,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
118 | /* need to issue a new write op after this */ | 199 | /* need to issue a new write op after this */ |
119 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); | 200 | clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags); |
120 | ret = 0; | 201 | ret = 0; |
121 | } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { | 202 | } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { |
122 | op->object = object; | 203 | op->object = object; |
123 | object->n_ops++; | 204 | object->n_ops++; |
124 | object->n_exclusive++; /* reads and writes must wait */ | 205 | object->n_exclusive++; /* reads and writes must wait */ |
@@ -126,12 +207,15 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
126 | list_add_tail(&op->pend_link, &object->pending_ops); | 207 | list_add_tail(&op->pend_link, &object->pending_ops); |
127 | fscache_stat(&fscache_n_op_pend); | 208 | fscache_stat(&fscache_n_op_pend); |
128 | ret = 0; | 209 | ret = 0; |
210 | } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { | ||
211 | op->cancel(op); | ||
212 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
213 | ret = -ENOBUFS; | ||
129 | } else { | 214 | } else { |
130 | /* If we're in any other state, there must have been an I/O | 215 | fscache_report_unexpected_submission(object, op, ostate); |
131 | * error of some nature. | 216 | op->cancel(op); |
132 | */ | 217 | op->state = FSCACHE_OP_ST_CANCELLED; |
133 | ASSERT(test_bit(FSCACHE_IOERROR, &object->cache->flags)); | 218 | ret = -ENOBUFS; |
134 | ret = -EIO; | ||
135 | } | 219 | } |
136 | 220 | ||
137 | spin_unlock(&object->lock); | 221 | spin_unlock(&object->lock); |
@@ -139,43 +223,6 @@ int fscache_submit_exclusive_op(struct fscache_object *object, | |||
139 | } | 223 | } |
140 | 224 | ||
141 | /* | 225 | /* |
142 | * report an unexpected submission | ||
143 | */ | ||
144 | static void fscache_report_unexpected_submission(struct fscache_object *object, | ||
145 | struct fscache_operation *op, | ||
146 | const struct fscache_state *ostate) | ||
147 | { | ||
148 | static bool once_only; | ||
149 | struct fscache_operation *p; | ||
150 | unsigned n; | ||
151 | |||
152 | if (once_only) | ||
153 | return; | ||
154 | once_only = true; | ||
155 | |||
156 | kdebug("unexpected submission OP%x [OBJ%x %s]", | ||
157 | op->debug_id, object->debug_id, object->state->name); | ||
158 | kdebug("objstate=%s [%s]", object->state->name, ostate->name); | ||
159 | kdebug("objflags=%lx", object->flags); | ||
160 | kdebug("objevent=%lx [%lx]", object->events, object->event_mask); | ||
161 | kdebug("ops=%u inp=%u exc=%u", | ||
162 | object->n_ops, object->n_in_progress, object->n_exclusive); | ||
163 | |||
164 | if (!list_empty(&object->pending_ops)) { | ||
165 | n = 0; | ||
166 | list_for_each_entry(p, &object->pending_ops, pend_link) { | ||
167 | ASSERTCMP(p->object, ==, object); | ||
168 | kdebug("%p %p", op->processor, op->release); | ||
169 | n++; | ||
170 | } | ||
171 | |||
172 | kdebug("n=%u", n); | ||
173 | } | ||
174 | |||
175 | dump_stack(); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * submit an operation for an object | 226 | * submit an operation for an object |
180 | * - objects may be submitted only in the following states: | 227 | * - objects may be submitted only in the following states: |
181 | * - during object creation (write ops may be submitted) | 228 | * - during object creation (write ops may be submitted) |
@@ -187,6 +234,7 @@ int fscache_submit_op(struct fscache_object *object, | |||
187 | struct fscache_operation *op) | 234 | struct fscache_operation *op) |
188 | { | 235 | { |
189 | const struct fscache_state *ostate; | 236 | const struct fscache_state *ostate; |
237 | unsigned long flags; | ||
190 | int ret; | 238 | int ret; |
191 | 239 | ||
192 | _enter("{OBJ%x OP%x},{%u}", | 240 | _enter("{OBJ%x OP%x},{%u}", |
@@ -204,7 +252,17 @@ int fscache_submit_op(struct fscache_object *object, | |||
204 | smp_rmb(); | 252 | smp_rmb(); |
205 | 253 | ||
206 | op->state = FSCACHE_OP_ST_PENDING; | 254 | op->state = FSCACHE_OP_ST_PENDING; |
207 | if (fscache_object_is_active(object)) { | 255 | flags = READ_ONCE(object->flags); |
256 | if (unlikely(!(flags & BIT(FSCACHE_OBJECT_IS_LIVE)))) { | ||
257 | fscache_stat(&fscache_n_op_rejected); | ||
258 | op->cancel(op); | ||
259 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
260 | ret = -ENOBUFS; | ||
261 | } else if (unlikely(fscache_cache_is_broken(object))) { | ||
262 | op->cancel(op); | ||
263 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
264 | ret = -EIO; | ||
265 | } else if (flags & BIT(FSCACHE_OBJECT_IS_AVAILABLE)) { | ||
208 | op->object = object; | 266 | op->object = object; |
209 | object->n_ops++; | 267 | object->n_ops++; |
210 | 268 | ||
@@ -222,23 +280,21 @@ int fscache_submit_op(struct fscache_object *object, | |||
222 | fscache_run_op(object, op); | 280 | fscache_run_op(object, op); |
223 | } | 281 | } |
224 | ret = 0; | 282 | ret = 0; |
225 | } else if (test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) { | 283 | } else if (flags & BIT(FSCACHE_OBJECT_IS_LOOKED_UP)) { |
226 | op->object = object; | 284 | op->object = object; |
227 | object->n_ops++; | 285 | object->n_ops++; |
228 | atomic_inc(&op->usage); | 286 | atomic_inc(&op->usage); |
229 | list_add_tail(&op->pend_link, &object->pending_ops); | 287 | list_add_tail(&op->pend_link, &object->pending_ops); |
230 | fscache_stat(&fscache_n_op_pend); | 288 | fscache_stat(&fscache_n_op_pend); |
231 | ret = 0; | 289 | ret = 0; |
232 | } else if (fscache_object_is_dying(object)) { | 290 | } else if (flags & BIT(FSCACHE_OBJECT_KILLED_BY_CACHE)) { |
233 | fscache_stat(&fscache_n_op_rejected); | 291 | op->cancel(op); |
234 | op->state = FSCACHE_OP_ST_CANCELLED; | 292 | op->state = FSCACHE_OP_ST_CANCELLED; |
235 | ret = -ENOBUFS; | 293 | ret = -ENOBUFS; |
236 | } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) { | 294 | } else { |
237 | fscache_report_unexpected_submission(object, op, ostate); | 295 | fscache_report_unexpected_submission(object, op, ostate); |
238 | ASSERT(!fscache_object_is_active(object)); | 296 | ASSERT(!fscache_object_is_active(object)); |
239 | op->state = FSCACHE_OP_ST_CANCELLED; | 297 | op->cancel(op); |
240 | ret = -ENOBUFS; | ||
241 | } else { | ||
242 | op->state = FSCACHE_OP_ST_CANCELLED; | 298 | op->state = FSCACHE_OP_ST_CANCELLED; |
243 | ret = -ENOBUFS; | 299 | ret = -ENOBUFS; |
244 | } | 300 | } |
@@ -293,9 +349,10 @@ void fscache_start_operations(struct fscache_object *object) | |||
293 | * cancel an operation that's pending on an object | 349 | * cancel an operation that's pending on an object |
294 | */ | 350 | */ |
295 | int fscache_cancel_op(struct fscache_operation *op, | 351 | int fscache_cancel_op(struct fscache_operation *op, |
296 | void (*do_cancel)(struct fscache_operation *)) | 352 | bool cancel_in_progress_op) |
297 | { | 353 | { |
298 | struct fscache_object *object = op->object; | 354 | struct fscache_object *object = op->object; |
355 | bool put = false; | ||
299 | int ret; | 356 | int ret; |
300 | 357 | ||
301 | _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); | 358 | _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id); |
@@ -309,19 +366,37 @@ int fscache_cancel_op(struct fscache_operation *op, | |||
309 | ret = -EBUSY; | 366 | ret = -EBUSY; |
310 | if (op->state == FSCACHE_OP_ST_PENDING) { | 367 | if (op->state == FSCACHE_OP_ST_PENDING) { |
311 | ASSERT(!list_empty(&op->pend_link)); | 368 | ASSERT(!list_empty(&op->pend_link)); |
312 | fscache_stat(&fscache_n_op_cancelled); | ||
313 | list_del_init(&op->pend_link); | 369 | list_del_init(&op->pend_link); |
314 | if (do_cancel) | 370 | put = true; |
315 | do_cancel(op); | 371 | |
372 | fscache_stat(&fscache_n_op_cancelled); | ||
373 | op->cancel(op); | ||
374 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
375 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | ||
376 | object->n_exclusive--; | ||
377 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | ||
378 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | ||
379 | ret = 0; | ||
380 | } else if (op->state == FSCACHE_OP_ST_IN_PROGRESS && cancel_in_progress_op) { | ||
381 | ASSERTCMP(object->n_in_progress, >, 0); | ||
382 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | ||
383 | object->n_exclusive--; | ||
384 | object->n_in_progress--; | ||
385 | if (object->n_in_progress == 0) | ||
386 | fscache_start_operations(object); | ||
387 | |||
388 | fscache_stat(&fscache_n_op_cancelled); | ||
389 | op->cancel(op); | ||
316 | op->state = FSCACHE_OP_ST_CANCELLED; | 390 | op->state = FSCACHE_OP_ST_CANCELLED; |
317 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 391 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
318 | object->n_exclusive--; | 392 | object->n_exclusive--; |
319 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) | 393 | if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags)) |
320 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); | 394 | wake_up_bit(&op->flags, FSCACHE_OP_WAITING); |
321 | fscache_put_operation(op); | ||
322 | ret = 0; | 395 | ret = 0; |
323 | } | 396 | } |
324 | 397 | ||
398 | if (put) | ||
399 | fscache_put_operation(op); | ||
325 | spin_unlock(&object->lock); | 400 | spin_unlock(&object->lock); |
326 | _leave(" = %d", ret); | 401 | _leave(" = %d", ret); |
327 | return ret; | 402 | return ret; |
@@ -345,6 +420,7 @@ void fscache_cancel_all_ops(struct fscache_object *object) | |||
345 | list_del_init(&op->pend_link); | 420 | list_del_init(&op->pend_link); |
346 | 421 | ||
347 | ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); | 422 | ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING); |
423 | op->cancel(op); | ||
348 | op->state = FSCACHE_OP_ST_CANCELLED; | 424 | op->state = FSCACHE_OP_ST_CANCELLED; |
349 | 425 | ||
350 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 426 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
@@ -377,8 +453,12 @@ void fscache_op_complete(struct fscache_operation *op, bool cancelled) | |||
377 | 453 | ||
378 | spin_lock(&object->lock); | 454 | spin_lock(&object->lock); |
379 | 455 | ||
380 | op->state = cancelled ? | 456 | if (!cancelled) { |
381 | FSCACHE_OP_ST_CANCELLED : FSCACHE_OP_ST_COMPLETE; | 457 | op->state = FSCACHE_OP_ST_COMPLETE; |
458 | } else { | ||
459 | op->cancel(op); | ||
460 | op->state = FSCACHE_OP_ST_CANCELLED; | ||
461 | } | ||
382 | 462 | ||
383 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) | 463 | if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) |
384 | object->n_exclusive--; | 464 | object->n_exclusive--; |
@@ -409,9 +489,9 @@ void fscache_put_operation(struct fscache_operation *op) | |||
409 | return; | 489 | return; |
410 | 490 | ||
411 | _debug("PUT OP"); | 491 | _debug("PUT OP"); |
412 | ASSERTIFCMP(op->state != FSCACHE_OP_ST_COMPLETE, | 492 | ASSERTIFCMP(op->state != FSCACHE_OP_ST_INITIALISED && |
493 | op->state != FSCACHE_OP_ST_COMPLETE, | ||
413 | op->state, ==, FSCACHE_OP_ST_CANCELLED); | 494 | op->state, ==, FSCACHE_OP_ST_CANCELLED); |
414 | op->state = FSCACHE_OP_ST_DEAD; | ||
415 | 495 | ||
416 | fscache_stat(&fscache_n_op_release); | 496 | fscache_stat(&fscache_n_op_release); |
417 | 497 | ||
@@ -419,37 +499,39 @@ void fscache_put_operation(struct fscache_operation *op) | |||
419 | op->release(op); | 499 | op->release(op); |
420 | op->release = NULL; | 500 | op->release = NULL; |
421 | } | 501 | } |
502 | op->state = FSCACHE_OP_ST_DEAD; | ||
422 | 503 | ||
423 | object = op->object; | 504 | object = op->object; |
505 | if (likely(object)) { | ||
506 | if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) | ||
507 | atomic_dec(&object->n_reads); | ||
508 | if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) | ||
509 | fscache_unuse_cookie(object); | ||
510 | |||
511 | /* now... we may get called with the object spinlock held, so we | ||
512 | * complete the cleanup here only if we can immediately acquire the | ||
513 | * lock, and defer it otherwise */ | ||
514 | if (!spin_trylock(&object->lock)) { | ||
515 | _debug("defer put"); | ||
516 | fscache_stat(&fscache_n_op_deferred_release); | ||
517 | |||
518 | cache = object->cache; | ||
519 | spin_lock(&cache->op_gc_list_lock); | ||
520 | list_add_tail(&op->pend_link, &cache->op_gc_list); | ||
521 | spin_unlock(&cache->op_gc_list_lock); | ||
522 | schedule_work(&cache->op_gc); | ||
523 | _leave(" [defer]"); | ||
524 | return; | ||
525 | } | ||
424 | 526 | ||
425 | if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags)) | 527 | ASSERTCMP(object->n_ops, >, 0); |
426 | atomic_dec(&object->n_reads); | 528 | object->n_ops--; |
427 | if (test_bit(FSCACHE_OP_UNUSE_COOKIE, &op->flags)) | 529 | if (object->n_ops == 0) |
428 | fscache_unuse_cookie(object); | 530 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); |
429 | |||
430 | /* now... we may get called with the object spinlock held, so we | ||
431 | * complete the cleanup here only if we can immediately acquire the | ||
432 | * lock, and defer it otherwise */ | ||
433 | if (!spin_trylock(&object->lock)) { | ||
434 | _debug("defer put"); | ||
435 | fscache_stat(&fscache_n_op_deferred_release); | ||
436 | 531 | ||
437 | cache = object->cache; | 532 | spin_unlock(&object->lock); |
438 | spin_lock(&cache->op_gc_list_lock); | ||
439 | list_add_tail(&op->pend_link, &cache->op_gc_list); | ||
440 | spin_unlock(&cache->op_gc_list_lock); | ||
441 | schedule_work(&cache->op_gc); | ||
442 | _leave(" [defer]"); | ||
443 | return; | ||
444 | } | 533 | } |
445 | 534 | ||
446 | ASSERTCMP(object->n_ops, >, 0); | ||
447 | object->n_ops--; | ||
448 | if (object->n_ops == 0) | ||
449 | fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED); | ||
450 | |||
451 | spin_unlock(&object->lock); | ||
452 | |||
453 | kfree(op); | 535 | kfree(op); |
454 | _leave(" [done]"); | 536 | _leave(" [done]"); |
455 | } | 537 | } |
diff --git a/fs/fscache/page.c b/fs/fscache/page.c index de33b3fccca6..483bbc613bf0 100644 --- a/fs/fscache/page.c +++ b/fs/fscache/page.c | |||
@@ -213,7 +213,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) | |||
213 | return -ENOMEM; | 213 | return -ENOMEM; |
214 | } | 214 | } |
215 | 215 | ||
216 | fscache_operation_init(op, fscache_attr_changed_op, NULL); | 216 | fscache_operation_init(op, fscache_attr_changed_op, NULL, NULL); |
217 | op->flags = FSCACHE_OP_ASYNC | | 217 | op->flags = FSCACHE_OP_ASYNC | |
218 | (1 << FSCACHE_OP_EXCLUSIVE) | | 218 | (1 << FSCACHE_OP_EXCLUSIVE) | |
219 | (1 << FSCACHE_OP_UNUSE_COOKIE); | 219 | (1 << FSCACHE_OP_UNUSE_COOKIE); |
@@ -239,7 +239,7 @@ nobufs_dec: | |||
239 | wake_cookie = __fscache_unuse_cookie(cookie); | 239 | wake_cookie = __fscache_unuse_cookie(cookie); |
240 | nobufs: | 240 | nobufs: |
241 | spin_unlock(&cookie->lock); | 241 | spin_unlock(&cookie->lock); |
242 | kfree(op); | 242 | fscache_put_operation(op); |
243 | if (wake_cookie) | 243 | if (wake_cookie) |
244 | __fscache_wake_unused_cookie(cookie); | 244 | __fscache_wake_unused_cookie(cookie); |
245 | fscache_stat(&fscache_n_attr_changed_nobufs); | 245 | fscache_stat(&fscache_n_attr_changed_nobufs); |
@@ -249,6 +249,17 @@ nobufs: | |||
249 | EXPORT_SYMBOL(__fscache_attr_changed); | 249 | EXPORT_SYMBOL(__fscache_attr_changed); |
250 | 250 | ||
251 | /* | 251 | /* |
252 | * Handle cancellation of a pending retrieval op | ||
253 | */ | ||
254 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) | ||
255 | { | ||
256 | struct fscache_retrieval *op = | ||
257 | container_of(_op, struct fscache_retrieval, op); | ||
258 | |||
259 | atomic_set(&op->n_pages, 0); | ||
260 | } | ||
261 | |||
262 | /* | ||
252 | * release a retrieval op reference | 263 | * release a retrieval op reference |
253 | */ | 264 | */ |
254 | static void fscache_release_retrieval_op(struct fscache_operation *_op) | 265 | static void fscache_release_retrieval_op(struct fscache_operation *_op) |
@@ -258,11 +269,12 @@ static void fscache_release_retrieval_op(struct fscache_operation *_op) | |||
258 | 269 | ||
259 | _enter("{OP%x}", op->op.debug_id); | 270 | _enter("{OP%x}", op->op.debug_id); |
260 | 271 | ||
261 | ASSERTCMP(atomic_read(&op->n_pages), ==, 0); | 272 | ASSERTIFCMP(op->op.state != FSCACHE_OP_ST_INITIALISED, |
273 | atomic_read(&op->n_pages), ==, 0); | ||
262 | 274 | ||
263 | fscache_hist(fscache_retrieval_histogram, op->start_time); | 275 | fscache_hist(fscache_retrieval_histogram, op->start_time); |
264 | if (op->context) | 276 | if (op->context) |
265 | fscache_put_context(op->op.object->cookie, op->context); | 277 | fscache_put_context(op->cookie, op->context); |
266 | 278 | ||
267 | _leave(""); | 279 | _leave(""); |
268 | } | 280 | } |
@@ -285,15 +297,24 @@ static struct fscache_retrieval *fscache_alloc_retrieval( | |||
285 | return NULL; | 297 | return NULL; |
286 | } | 298 | } |
287 | 299 | ||
288 | fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op); | 300 | fscache_operation_init(&op->op, NULL, |
301 | fscache_do_cancel_retrieval, | ||
302 | fscache_release_retrieval_op); | ||
289 | op->op.flags = FSCACHE_OP_MYTHREAD | | 303 | op->op.flags = FSCACHE_OP_MYTHREAD | |
290 | (1UL << FSCACHE_OP_WAITING) | | 304 | (1UL << FSCACHE_OP_WAITING) | |
291 | (1UL << FSCACHE_OP_UNUSE_COOKIE); | 305 | (1UL << FSCACHE_OP_UNUSE_COOKIE); |
306 | op->cookie = cookie; | ||
292 | op->mapping = mapping; | 307 | op->mapping = mapping; |
293 | op->end_io_func = end_io_func; | 308 | op->end_io_func = end_io_func; |
294 | op->context = context; | 309 | op->context = context; |
295 | op->start_time = jiffies; | 310 | op->start_time = jiffies; |
296 | INIT_LIST_HEAD(&op->to_do); | 311 | INIT_LIST_HEAD(&op->to_do); |
312 | |||
313 | /* Pin the netfs read context in case we need to do the actual netfs | ||
314 | * read because we've encountered a cache read failure. | ||
315 | */ | ||
316 | if (context) | ||
317 | fscache_get_context(op->cookie, context); | ||
297 | return op; | 318 | return op; |
298 | } | 319 | } |
299 | 320 | ||
@@ -330,24 +351,12 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie) | |||
330 | } | 351 | } |
331 | 352 | ||
332 | /* | 353 | /* |
333 | * Handle cancellation of a pending retrieval op | ||
334 | */ | ||
335 | static void fscache_do_cancel_retrieval(struct fscache_operation *_op) | ||
336 | { | ||
337 | struct fscache_retrieval *op = | ||
338 | container_of(_op, struct fscache_retrieval, op); | ||
339 | |||
340 | atomic_set(&op->n_pages, 0); | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * wait for an object to become active (or dead) | 354 | * wait for an object to become active (or dead) |
345 | */ | 355 | */ |
346 | int fscache_wait_for_operation_activation(struct fscache_object *object, | 356 | int fscache_wait_for_operation_activation(struct fscache_object *object, |
347 | struct fscache_operation *op, | 357 | struct fscache_operation *op, |
348 | atomic_t *stat_op_waits, | 358 | atomic_t *stat_op_waits, |
349 | atomic_t *stat_object_dead, | 359 | atomic_t *stat_object_dead) |
350 | void (*do_cancel)(struct fscache_operation *)) | ||
351 | { | 360 | { |
352 | int ret; | 361 | int ret; |
353 | 362 | ||
@@ -359,7 +368,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object, | |||
359 | fscache_stat(stat_op_waits); | 368 | fscache_stat(stat_op_waits); |
360 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, | 369 | if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING, |
361 | TASK_INTERRUPTIBLE) != 0) { | 370 | TASK_INTERRUPTIBLE) != 0) { |
362 | ret = fscache_cancel_op(op, do_cancel); | 371 | ret = fscache_cancel_op(op, false); |
363 | if (ret == 0) | 372 | if (ret == 0) |
364 | return -ERESTARTSYS; | 373 | return -ERESTARTSYS; |
365 | 374 | ||
@@ -377,11 +386,13 @@ check_if_dead: | |||
377 | _leave(" = -ENOBUFS [cancelled]"); | 386 | _leave(" = -ENOBUFS [cancelled]"); |
378 | return -ENOBUFS; | 387 | return -ENOBUFS; |
379 | } | 388 | } |
380 | if (unlikely(fscache_object_is_dead(object))) { | 389 | if (unlikely(fscache_object_is_dying(object) || |
381 | pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state); | 390 | fscache_cache_is_broken(object))) { |
382 | fscache_cancel_op(op, do_cancel); | 391 | enum fscache_operation_state state = op->state; |
392 | fscache_cancel_op(op, true); | ||
383 | if (stat_object_dead) | 393 | if (stat_object_dead) |
384 | fscache_stat(stat_object_dead); | 394 | fscache_stat(stat_object_dead); |
395 | _leave(" = -ENOBUFS [obj dead %d]", state); | ||
385 | return -ENOBUFS; | 396 | return -ENOBUFS; |
386 | } | 397 | } |
387 | return 0; | 398 | return 0; |
@@ -453,17 +464,12 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie, | |||
453 | 464 | ||
454 | fscache_stat(&fscache_n_retrieval_ops); | 465 | fscache_stat(&fscache_n_retrieval_ops); |
455 | 466 | ||
456 | /* pin the netfs read context in case we need to do the actual netfs | ||
457 | * read because we've encountered a cache read failure */ | ||
458 | fscache_get_context(object->cookie, op->context); | ||
459 | |||
460 | /* we wait for the operation to become active, and then process it | 467 | /* we wait for the operation to become active, and then process it |
461 | * *here*, in this thread, and not in the thread pool */ | 468 | * *here*, in this thread, and not in the thread pool */ |
462 | ret = fscache_wait_for_operation_activation( | 469 | ret = fscache_wait_for_operation_activation( |
463 | object, &op->op, | 470 | object, &op->op, |
464 | __fscache_stat(&fscache_n_retrieval_op_waits), | 471 | __fscache_stat(&fscache_n_retrieval_op_waits), |
465 | __fscache_stat(&fscache_n_retrievals_object_dead), | 472 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
466 | fscache_do_cancel_retrieval); | ||
467 | if (ret < 0) | 473 | if (ret < 0) |
468 | goto error; | 474 | goto error; |
469 | 475 | ||
@@ -503,7 +509,7 @@ nobufs_unlock: | |||
503 | spin_unlock(&cookie->lock); | 509 | spin_unlock(&cookie->lock); |
504 | if (wake_cookie) | 510 | if (wake_cookie) |
505 | __fscache_wake_unused_cookie(cookie); | 511 | __fscache_wake_unused_cookie(cookie); |
506 | kfree(op); | 512 | fscache_put_retrieval(op); |
507 | nobufs: | 513 | nobufs: |
508 | fscache_stat(&fscache_n_retrievals_nobufs); | 514 | fscache_stat(&fscache_n_retrievals_nobufs); |
509 | _leave(" = -ENOBUFS"); | 515 | _leave(" = -ENOBUFS"); |
@@ -584,17 +590,12 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie, | |||
584 | 590 | ||
585 | fscache_stat(&fscache_n_retrieval_ops); | 591 | fscache_stat(&fscache_n_retrieval_ops); |
586 | 592 | ||
587 | /* pin the netfs read context in case we need to do the actual netfs | ||
588 | * read because we've encountered a cache read failure */ | ||
589 | fscache_get_context(object->cookie, op->context); | ||
590 | |||
591 | /* we wait for the operation to become active, and then process it | 593 | /* we wait for the operation to become active, and then process it |
592 | * *here*, in this thread, and not in the thread pool */ | 594 | * *here*, in this thread, and not in the thread pool */ |
593 | ret = fscache_wait_for_operation_activation( | 595 | ret = fscache_wait_for_operation_activation( |
594 | object, &op->op, | 596 | object, &op->op, |
595 | __fscache_stat(&fscache_n_retrieval_op_waits), | 597 | __fscache_stat(&fscache_n_retrieval_op_waits), |
596 | __fscache_stat(&fscache_n_retrievals_object_dead), | 598 | __fscache_stat(&fscache_n_retrievals_object_dead)); |
597 | fscache_do_cancel_retrieval); | ||
598 | if (ret < 0) | 599 | if (ret < 0) |
599 | goto error; | 600 | goto error; |
600 | 601 | ||
@@ -632,7 +633,7 @@ nobufs_unlock_dec: | |||
632 | wake_cookie = __fscache_unuse_cookie(cookie); | 633 | wake_cookie = __fscache_unuse_cookie(cookie); |
633 | nobufs_unlock: | 634 | nobufs_unlock: |
634 | spin_unlock(&cookie->lock); | 635 | spin_unlock(&cookie->lock); |
635 | kfree(op); | 636 | fscache_put_retrieval(op); |
636 | if (wake_cookie) | 637 | if (wake_cookie) |
637 | __fscache_wake_unused_cookie(cookie); | 638 | __fscache_wake_unused_cookie(cookie); |
638 | nobufs: | 639 | nobufs: |
@@ -700,8 +701,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie, | |||
700 | ret = fscache_wait_for_operation_activation( | 701 | ret = fscache_wait_for_operation_activation( |
701 | object, &op->op, | 702 | object, &op->op, |
702 | __fscache_stat(&fscache_n_alloc_op_waits), | 703 | __fscache_stat(&fscache_n_alloc_op_waits), |
703 | __fscache_stat(&fscache_n_allocs_object_dead), | 704 | __fscache_stat(&fscache_n_allocs_object_dead)); |
704 | fscache_do_cancel_retrieval); | ||
705 | if (ret < 0) | 705 | if (ret < 0) |
706 | goto error; | 706 | goto error; |
707 | 707 | ||
@@ -726,7 +726,7 @@ nobufs_unlock_dec: | |||
726 | wake_cookie = __fscache_unuse_cookie(cookie); | 726 | wake_cookie = __fscache_unuse_cookie(cookie); |
727 | nobufs_unlock: | 727 | nobufs_unlock: |
728 | spin_unlock(&cookie->lock); | 728 | spin_unlock(&cookie->lock); |
729 | kfree(op); | 729 | fscache_put_retrieval(op); |
730 | if (wake_cookie) | 730 | if (wake_cookie) |
731 | __fscache_wake_unused_cookie(cookie); | 731 | __fscache_wake_unused_cookie(cookie); |
732 | nobufs: | 732 | nobufs: |
@@ -944,7 +944,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, | |||
944 | if (!op) | 944 | if (!op) |
945 | goto nomem; | 945 | goto nomem; |
946 | 946 | ||
947 | fscache_operation_init(&op->op, fscache_write_op, | 947 | fscache_operation_init(&op->op, fscache_write_op, NULL, |
948 | fscache_release_write_op); | 948 | fscache_release_write_op); |
949 | op->op.flags = FSCACHE_OP_ASYNC | | 949 | op->op.flags = FSCACHE_OP_ASYNC | |
950 | (1 << FSCACHE_OP_WAITING) | | 950 | (1 << FSCACHE_OP_WAITING) | |
@@ -1016,7 +1016,7 @@ already_pending: | |||
1016 | spin_unlock(&object->lock); | 1016 | spin_unlock(&object->lock); |
1017 | spin_unlock(&cookie->lock); | 1017 | spin_unlock(&cookie->lock); |
1018 | radix_tree_preload_end(); | 1018 | radix_tree_preload_end(); |
1019 | kfree(op); | 1019 | fscache_put_operation(&op->op); |
1020 | fscache_stat(&fscache_n_stores_ok); | 1020 | fscache_stat(&fscache_n_stores_ok); |
1021 | _leave(" = 0"); | 1021 | _leave(" = 0"); |
1022 | return 0; | 1022 | return 0; |
@@ -1036,7 +1036,7 @@ nobufs_unlock_obj: | |||
1036 | nobufs: | 1036 | nobufs: |
1037 | spin_unlock(&cookie->lock); | 1037 | spin_unlock(&cookie->lock); |
1038 | radix_tree_preload_end(); | 1038 | radix_tree_preload_end(); |
1039 | kfree(op); | 1039 | fscache_put_operation(&op->op); |
1040 | if (wake_cookie) | 1040 | if (wake_cookie) |
1041 | __fscache_wake_unused_cookie(cookie); | 1041 | __fscache_wake_unused_cookie(cookie); |
1042 | fscache_stat(&fscache_n_stores_nobufs); | 1042 | fscache_stat(&fscache_n_stores_nobufs); |
@@ -1044,7 +1044,7 @@ nobufs: | |||
1044 | return -ENOBUFS; | 1044 | return -ENOBUFS; |
1045 | 1045 | ||
1046 | nomem_free: | 1046 | nomem_free: |
1047 | kfree(op); | 1047 | fscache_put_operation(&op->op); |
1048 | nomem: | 1048 | nomem: |
1049 | fscache_stat(&fscache_n_stores_oom); | 1049 | fscache_stat(&fscache_n_stores_oom); |
1050 | _leave(" = -ENOMEM"); | 1050 | _leave(" = -ENOMEM"); |
diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 40d13c70ef51..7cfa0aacdf6d 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c | |||
@@ -23,6 +23,7 @@ atomic_t fscache_n_op_run; | |||
23 | atomic_t fscache_n_op_enqueue; | 23 | atomic_t fscache_n_op_enqueue; |
24 | atomic_t fscache_n_op_requeue; | 24 | atomic_t fscache_n_op_requeue; |
25 | atomic_t fscache_n_op_deferred_release; | 25 | atomic_t fscache_n_op_deferred_release; |
26 | atomic_t fscache_n_op_initialised; | ||
26 | atomic_t fscache_n_op_release; | 27 | atomic_t fscache_n_op_release; |
27 | atomic_t fscache_n_op_gc; | 28 | atomic_t fscache_n_op_gc; |
28 | atomic_t fscache_n_op_cancelled; | 29 | atomic_t fscache_n_op_cancelled; |
@@ -130,6 +131,11 @@ atomic_t fscache_n_cop_write_page; | |||
130 | atomic_t fscache_n_cop_uncache_page; | 131 | atomic_t fscache_n_cop_uncache_page; |
131 | atomic_t fscache_n_cop_dissociate_pages; | 132 | atomic_t fscache_n_cop_dissociate_pages; |
132 | 133 | ||
134 | atomic_t fscache_n_cache_no_space_reject; | ||
135 | atomic_t fscache_n_cache_stale_objects; | ||
136 | atomic_t fscache_n_cache_retired_objects; | ||
137 | atomic_t fscache_n_cache_culled_objects; | ||
138 | |||
133 | /* | 139 | /* |
134 | * display the general statistics | 140 | * display the general statistics |
135 | */ | 141 | */ |
@@ -246,7 +252,8 @@ static int fscache_stats_show(struct seq_file *m, void *v) | |||
246 | atomic_read(&fscache_n_op_enqueue), | 252 | atomic_read(&fscache_n_op_enqueue), |
247 | atomic_read(&fscache_n_op_cancelled), | 253 | atomic_read(&fscache_n_op_cancelled), |
248 | atomic_read(&fscache_n_op_rejected)); | 254 | atomic_read(&fscache_n_op_rejected)); |
249 | seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n", | 255 | seq_printf(m, "Ops : ini=%u dfr=%u rel=%u gc=%u\n", |
256 | atomic_read(&fscache_n_op_initialised), | ||
250 | atomic_read(&fscache_n_op_deferred_release), | 257 | atomic_read(&fscache_n_op_deferred_release), |
251 | atomic_read(&fscache_n_op_release), | 258 | atomic_read(&fscache_n_op_release), |
252 | atomic_read(&fscache_n_op_gc)); | 259 | atomic_read(&fscache_n_op_gc)); |
@@ -271,6 +278,11 @@ static int fscache_stats_show(struct seq_file *m, void *v) | |||
271 | atomic_read(&fscache_n_cop_write_page), | 278 | atomic_read(&fscache_n_cop_write_page), |
272 | atomic_read(&fscache_n_cop_uncache_page), | 279 | atomic_read(&fscache_n_cop_uncache_page), |
273 | atomic_read(&fscache_n_cop_dissociate_pages)); | 280 | atomic_read(&fscache_n_cop_dissociate_pages)); |
281 | seq_printf(m, "CacheEv: nsp=%d stl=%d rtr=%d cul=%d\n", | ||
282 | atomic_read(&fscache_n_cache_no_space_reject), | ||
283 | atomic_read(&fscache_n_cache_stale_objects), | ||
284 | atomic_read(&fscache_n_cache_retired_objects), | ||
285 | atomic_read(&fscache_n_cache_culled_objects)); | ||
274 | return 0; | 286 | return 0; |
275 | } | 287 | } |
276 | 288 | ||
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 014fa8ba2b51..f523f2f04c19 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -1169,7 +1169,7 @@ static ssize_t fuse_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
1169 | if (err <= 0) | 1169 | if (err <= 0) |
1170 | goto out; | 1170 | goto out; |
1171 | 1171 | ||
1172 | err = file_remove_suid(file); | 1172 | err = file_remove_privs(file); |
1173 | if (err) | 1173 | if (err) |
1174 | goto out; | 1174 | goto out; |
1175 | 1175 | ||
diff --git a/fs/hfs/hfs_fs.h b/fs/hfs/hfs_fs.h index 95d255219b1e..1f1c7dcbcc2f 100644 --- a/fs/hfs/hfs_fs.h +++ b/fs/hfs/hfs_fs.h | |||
@@ -252,7 +252,7 @@ extern void hfs_mark_mdb_dirty(struct super_block *sb); | |||
252 | #define __hfs_u_to_mtime(sec) cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60) | 252 | #define __hfs_u_to_mtime(sec) cpu_to_be32(sec + 2082844800U - sys_tz.tz_minuteswest * 60) |
253 | #define __hfs_m_to_utime(sec) (be32_to_cpu(sec) - 2082844800U + sys_tz.tz_minuteswest * 60) | 253 | #define __hfs_m_to_utime(sec) (be32_to_cpu(sec) - 2082844800U + sys_tz.tz_minuteswest * 60) |
254 | 254 | ||
255 | #define HFS_I(inode) (list_entry(inode, struct hfs_inode_info, vfs_inode)) | 255 | #define HFS_I(inode) (container_of(inode, struct hfs_inode_info, vfs_inode)) |
256 | #define HFS_SB(sb) ((struct hfs_sb_info *)(sb)->s_fs_info) | 256 | #define HFS_SB(sb) ((struct hfs_sb_info *)(sb)->s_fs_info) |
257 | 257 | ||
258 | #define hfs_m_to_utime(time) (struct timespec){ .tv_sec = __hfs_m_to_utime(time) } | 258 | #define hfs_m_to_utime(time) (struct timespec){ .tv_sec = __hfs_m_to_utime(time) } |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index b0441d65fa54..f91a1faf819e 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -263,7 +263,7 @@ struct hfsplus_inode_info { | |||
263 | 263 | ||
264 | static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) | 264 | static inline struct hfsplus_inode_info *HFSPLUS_I(struct inode *inode) |
265 | { | 265 | { |
266 | return list_entry(inode, struct hfsplus_inode_info, vfs_inode); | 266 | return container_of(inode, struct hfsplus_inode_info, vfs_inode); |
267 | } | 267 | } |
268 | 268 | ||
269 | /* | 269 | /* |
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h index b63b75fa00e7..bb04b58d1d69 100644 --- a/fs/hpfs/hpfs_fn.h +++ b/fs/hpfs/hpfs_fn.h | |||
@@ -304,7 +304,7 @@ extern const struct address_space_operations hpfs_symlink_aops; | |||
304 | 304 | ||
305 | static inline struct hpfs_inode_info *hpfs_i(struct inode *inode) | 305 | static inline struct hpfs_inode_info *hpfs_i(struct inode *inode) |
306 | { | 306 | { |
307 | return list_entry(inode, struct hpfs_inode_info, vfs_inode); | 307 | return container_of(inode, struct hpfs_inode_info, vfs_inode); |
308 | } | 308 | } |
309 | 309 | ||
310 | static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb) | 310 | static inline struct hpfs_sb_info *hpfs_sb(struct super_block *sb) |
diff --git a/fs/inode.c b/fs/inode.c index 069721f0cc0e..d30640f7a193 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -841,7 +841,11 @@ unsigned int get_next_ino(void) | |||
841 | } | 841 | } |
842 | #endif | 842 | #endif |
843 | 843 | ||
844 | *p = ++res; | 844 | res++; |
845 | /* get_next_ino should not provide a 0 inode number */ | ||
846 | if (unlikely(!res)) | ||
847 | res++; | ||
848 | *p = res; | ||
845 | put_cpu_var(last_ino); | 849 | put_cpu_var(last_ino); |
846 | return res; | 850 | return res; |
847 | } | 851 | } |
@@ -1674,7 +1678,31 @@ int should_remove_suid(struct dentry *dentry) | |||
1674 | } | 1678 | } |
1675 | EXPORT_SYMBOL(should_remove_suid); | 1679 | EXPORT_SYMBOL(should_remove_suid); |
1676 | 1680 | ||
1677 | static int __remove_suid(struct dentry *dentry, int kill) | 1681 | /* |
1682 | * Return mask of changes for notify_change() that need to be done as a | ||
1683 | * response to write or truncate. Return 0 if nothing has to be changed. | ||
1684 | * Negative value on error (change should be denied). | ||
1685 | */ | ||
1686 | int dentry_needs_remove_privs(struct dentry *dentry) | ||
1687 | { | ||
1688 | struct inode *inode = d_inode(dentry); | ||
1689 | int mask = 0; | ||
1690 | int ret; | ||
1691 | |||
1692 | if (IS_NOSEC(inode)) | ||
1693 | return 0; | ||
1694 | |||
1695 | mask = should_remove_suid(dentry); | ||
1696 | ret = security_inode_need_killpriv(dentry); | ||
1697 | if (ret < 0) | ||
1698 | return ret; | ||
1699 | if (ret) | ||
1700 | mask |= ATTR_KILL_PRIV; | ||
1701 | return mask; | ||
1702 | } | ||
1703 | EXPORT_SYMBOL(dentry_needs_remove_privs); | ||
1704 | |||
1705 | static int __remove_privs(struct dentry *dentry, int kill) | ||
1678 | { | 1706 | { |
1679 | struct iattr newattrs; | 1707 | struct iattr newattrs; |
1680 | 1708 | ||
@@ -1686,33 +1714,32 @@ static int __remove_suid(struct dentry *dentry, int kill) | |||
1686 | return notify_change(dentry, &newattrs, NULL); | 1714 | return notify_change(dentry, &newattrs, NULL); |
1687 | } | 1715 | } |
1688 | 1716 | ||
1689 | int file_remove_suid(struct file *file) | 1717 | /* |
1718 | * Remove special file priviledges (suid, capabilities) when file is written | ||
1719 | * to or truncated. | ||
1720 | */ | ||
1721 | int file_remove_privs(struct file *file) | ||
1690 | { | 1722 | { |
1691 | struct dentry *dentry = file->f_path.dentry; | 1723 | struct dentry *dentry = file->f_path.dentry; |
1692 | struct inode *inode = d_inode(dentry); | 1724 | struct inode *inode = d_inode(dentry); |
1693 | int killsuid; | 1725 | int kill; |
1694 | int killpriv; | ||
1695 | int error = 0; | 1726 | int error = 0; |
1696 | 1727 | ||
1697 | /* Fast path for nothing security related */ | 1728 | /* Fast path for nothing security related */ |
1698 | if (IS_NOSEC(inode)) | 1729 | if (IS_NOSEC(inode)) |
1699 | return 0; | 1730 | return 0; |
1700 | 1731 | ||
1701 | killsuid = should_remove_suid(dentry); | 1732 | kill = file_needs_remove_privs(file); |
1702 | killpriv = security_inode_need_killpriv(dentry); | 1733 | if (kill < 0) |
1703 | 1734 | return kill; | |
1704 | if (killpriv < 0) | 1735 | if (kill) |
1705 | return killpriv; | 1736 | error = __remove_privs(dentry, kill); |
1706 | if (killpriv) | 1737 | if (!error) |
1707 | error = security_inode_killpriv(dentry); | 1738 | inode_has_no_xattr(inode); |
1708 | if (!error && killsuid) | ||
1709 | error = __remove_suid(dentry, killsuid); | ||
1710 | if (!error && (inode->i_sb->s_flags & MS_NOSEC)) | ||
1711 | inode->i_flags |= S_NOSEC; | ||
1712 | 1739 | ||
1713 | return error; | 1740 | return error; |
1714 | } | 1741 | } |
1715 | EXPORT_SYMBOL(file_remove_suid); | 1742 | EXPORT_SYMBOL(file_remove_privs); |
1716 | 1743 | ||
1717 | /** | 1744 | /** |
1718 | * file_update_time - update mtime and ctime time | 1745 | * file_update_time - update mtime and ctime time |
@@ -1967,9 +1994,8 @@ EXPORT_SYMBOL(inode_dio_wait); | |||
1967 | * inode is being instantiated). The reason for the cmpxchg() loop | 1994 | * inode is being instantiated). The reason for the cmpxchg() loop |
1968 | * --- which wouldn't be necessary if all code paths which modify | 1995 | * --- which wouldn't be necessary if all code paths which modify |
1969 | * i_flags actually followed this rule, is that there is at least one | 1996 | * i_flags actually followed this rule, is that there is at least one |
1970 | * code path which doesn't today --- for example, | 1997 | * code path which doesn't today so we use cmpxchg() out of an abundance |
1971 | * __generic_file_aio_write() calls file_remove_suid() without holding | 1998 | * of caution. |
1972 | * i_mutex --- so we use cmpxchg() out of an abundance of caution. | ||
1973 | * | 1999 | * |
1974 | * In the long run, i_mutex is overkill, and we should probably look | 2000 | * In the long run, i_mutex is overkill, and we should probably look |
1975 | * at using the i_lock spinlock to protect i_flags, and then make sure | 2001 | * at using the i_lock spinlock to protect i_flags, and then make sure |
diff --git a/fs/internal.h b/fs/internal.h index 01dce1d1476b..4d5af583ab03 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -107,6 +107,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *, | |||
107 | extern long do_handle_open(int mountdirfd, | 107 | extern long do_handle_open(int mountdirfd, |
108 | struct file_handle __user *ufh, int open_flag); | 108 | struct file_handle __user *ufh, int open_flag); |
109 | extern int open_check_o_direct(struct file *f); | 109 | extern int open_check_o_direct(struct file *f); |
110 | extern int vfs_open(const struct path *, struct file *, const struct cred *); | ||
110 | 111 | ||
111 | /* | 112 | /* |
112 | * inode.c | 113 | * inode.c |
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h index d200a9b8fd5e..824e61ede465 100644 --- a/fs/jffs2/os-linux.h +++ b/fs/jffs2/os-linux.h | |||
@@ -19,7 +19,7 @@ | |||
19 | struct kstatfs; | 19 | struct kstatfs; |
20 | struct kvec; | 20 | struct kvec; |
21 | 21 | ||
22 | #define JFFS2_INODE_INFO(i) (list_entry(i, struct jffs2_inode_info, vfs_inode)) | 22 | #define JFFS2_INODE_INFO(i) (container_of(i, struct jffs2_inode_info, vfs_inode)) |
23 | #define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode) | 23 | #define OFNI_EDONI_2SFFJ(f) (&(f)->vfs_inode) |
24 | #define JFFS2_SB_INFO(sb) (sb->s_fs_info) | 24 | #define JFFS2_SB_INFO(sb) (sb->s_fs_info) |
25 | #define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv) | 25 | #define OFNI_BS_2SFFJ(c) ((struct super_block *)c->os_priv) |
diff --git a/fs/jfs/jfs_incore.h b/fs/jfs/jfs_incore.h index fa7e795bd8ae..1f26d1910409 100644 --- a/fs/jfs/jfs_incore.h +++ b/fs/jfs/jfs_incore.h | |||
@@ -206,7 +206,7 @@ struct jfs_sb_info { | |||
206 | 206 | ||
207 | static inline struct jfs_inode_info *JFS_IP(struct inode *inode) | 207 | static inline struct jfs_inode_info *JFS_IP(struct inode *inode) |
208 | { | 208 | { |
209 | return list_entry(inode, struct jfs_inode_info, vfs_inode); | 209 | return container_of(inode, struct jfs_inode_info, vfs_inode); |
210 | } | 210 | } |
211 | 211 | ||
212 | static inline int jfs_dirtable_inline(struct inode *inode) | 212 | static inline int jfs_dirtable_inline(struct inode *inode) |
diff --git a/fs/libfs.c b/fs/libfs.c index 88a4cb418756..102edfd39000 100644 --- a/fs/libfs.c +++ b/fs/libfs.c | |||
@@ -20,11 +20,6 @@ | |||
20 | 20 | ||
21 | #include "internal.h" | 21 | #include "internal.h" |
22 | 22 | ||
23 | static inline int simple_positive(struct dentry *dentry) | ||
24 | { | ||
25 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
26 | } | ||
27 | |||
28 | int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, | 23 | int simple_getattr(struct vfsmount *mnt, struct dentry *dentry, |
29 | struct kstat *stat) | 24 | struct kstat *stat) |
30 | { | 25 | { |
diff --git a/fs/minix/dir.c b/fs/minix/dir.c index 118e4e7bc935..d19ac258105a 100644 --- a/fs/minix/dir.c +++ b/fs/minix/dir.c | |||
@@ -45,11 +45,6 @@ minix_last_byte(struct inode *inode, unsigned long page_nr) | |||
45 | return last_byte; | 45 | return last_byte; |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline unsigned long dir_pages(struct inode *inode) | ||
49 | { | ||
50 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
51 | } | ||
52 | |||
53 | static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) | 48 | static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) |
54 | { | 49 | { |
55 | struct address_space *mapping = page->mapping; | 50 | struct address_space *mapping = page->mapping; |
diff --git a/fs/minix/minix.h b/fs/minix/minix.h index 1ebd11854622..01ad81dcacc5 100644 --- a/fs/minix/minix.h +++ b/fs/minix/minix.h | |||
@@ -84,7 +84,7 @@ static inline struct minix_sb_info *minix_sb(struct super_block *sb) | |||
84 | 84 | ||
85 | static inline struct minix_inode_info *minix_i(struct inode *inode) | 85 | static inline struct minix_inode_info *minix_i(struct inode *inode) |
86 | { | 86 | { |
87 | return list_entry(inode, struct minix_inode_info, vfs_inode); | 87 | return container_of(inode, struct minix_inode_info, vfs_inode); |
88 | } | 88 | } |
89 | 89 | ||
90 | static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize) | 90 | static inline unsigned minix_blocks_needed(unsigned bits, unsigned blocksize) |
diff --git a/fs/namei.c b/fs/namei.c index 2dad0eaf91d3..ae4e4c18b2ac 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -792,7 +792,7 @@ static void set_root(struct nameidata *nd) | |||
792 | get_fs_root(current->fs, &nd->root); | 792 | get_fs_root(current->fs, &nd->root); |
793 | } | 793 | } |
794 | 794 | ||
795 | static unsigned set_root_rcu(struct nameidata *nd) | 795 | static void set_root_rcu(struct nameidata *nd) |
796 | { | 796 | { |
797 | struct fs_struct *fs = current->fs; | 797 | struct fs_struct *fs = current->fs; |
798 | unsigned seq; | 798 | unsigned seq; |
@@ -802,7 +802,6 @@ static unsigned set_root_rcu(struct nameidata *nd) | |||
802 | nd->root = fs->root; | 802 | nd->root = fs->root; |
803 | nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); | 803 | nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq); |
804 | } while (read_seqcount_retry(&fs->seq, seq)); | 804 | } while (read_seqcount_retry(&fs->seq, seq)); |
805 | return nd->root_seq; | ||
806 | } | 805 | } |
807 | 806 | ||
808 | static void path_put_conditional(struct path *path, struct nameidata *nd) | 807 | static void path_put_conditional(struct path *path, struct nameidata *nd) |
@@ -1998,7 +1997,8 @@ static const char *path_init(struct nameidata *nd, unsigned flags) | |||
1998 | if (*s == '/') { | 1997 | if (*s == '/') { |
1999 | if (flags & LOOKUP_RCU) { | 1998 | if (flags & LOOKUP_RCU) { |
2000 | rcu_read_lock(); | 1999 | rcu_read_lock(); |
2001 | nd->seq = set_root_rcu(nd); | 2000 | set_root_rcu(nd); |
2001 | nd->seq = nd->root_seq; | ||
2002 | } else { | 2002 | } else { |
2003 | set_root(nd); | 2003 | set_root(nd); |
2004 | path_get(&nd->root); | 2004 | path_get(&nd->root); |
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index 80021c709af9..93575e91a7aa 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c | |||
@@ -1145,6 +1145,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1145 | case 0x00: | 1145 | case 0x00: |
1146 | ncp_dbg(1, "renamed %pd -> %pd\n", | 1146 | ncp_dbg(1, "renamed %pd -> %pd\n", |
1147 | old_dentry, new_dentry); | 1147 | old_dentry, new_dentry); |
1148 | ncp_d_prune(old_dentry); | ||
1149 | ncp_d_prune(new_dentry); | ||
1148 | break; | 1150 | break; |
1149 | case 0x9E: | 1151 | case 0x9E: |
1150 | error = -ENAMETOOLONG; | 1152 | error = -ENAMETOOLONG; |
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 21457bb0edd6..547308a5ec6f 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1768,7 +1768,7 @@ EXPORT_SYMBOL_GPL(nfs_mkdir); | |||
1768 | 1768 | ||
1769 | static void nfs_dentry_handle_enoent(struct dentry *dentry) | 1769 | static void nfs_dentry_handle_enoent(struct dentry *dentry) |
1770 | { | 1770 | { |
1771 | if (d_really_is_positive(dentry) && !d_unhashed(dentry)) | 1771 | if (simple_positive(dentry)) |
1772 | d_delete(dentry); | 1772 | d_delete(dentry); |
1773 | } | 1773 | } |
1774 | 1774 | ||
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c index 0ee0bed3649b..6b8b92b19cec 100644 --- a/fs/nilfs2/dir.c +++ b/fs/nilfs2/dir.c | |||
@@ -61,11 +61,6 @@ static inline void nilfs_put_page(struct page *page) | |||
61 | page_cache_release(page); | 61 | page_cache_release(page); |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline unsigned long dir_pages(struct inode *inode) | ||
65 | { | ||
66 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
67 | } | ||
68 | |||
69 | /* | 64 | /* |
70 | * Return the offset into page `page_nr' of the last valid | 65 | * Return the offset into page `page_nr' of the last valid |
71 | * byte in that page, plus one. | 66 | * byte in that page, plus one. |
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index 258d9fe2521a..4a73d6dffabf 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c | |||
@@ -307,31 +307,13 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping, | |||
307 | static ssize_t | 307 | static ssize_t |
308 | nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) | 308 | nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, loff_t offset) |
309 | { | 309 | { |
310 | struct file *file = iocb->ki_filp; | 310 | struct inode *inode = file_inode(iocb->ki_filp); |
311 | struct address_space *mapping = file->f_mapping; | ||
312 | struct inode *inode = file->f_mapping->host; | ||
313 | size_t count = iov_iter_count(iter); | ||
314 | ssize_t size; | ||
315 | 311 | ||
316 | if (iov_iter_rw(iter) == WRITE) | 312 | if (iov_iter_rw(iter) == WRITE) |
317 | return 0; | 313 | return 0; |
318 | 314 | ||
319 | /* Needs synchronization with the cleaner */ | 315 | /* Needs synchronization with the cleaner */ |
320 | size = blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); | 316 | return blockdev_direct_IO(iocb, inode, iter, offset, nilfs_get_block); |
321 | |||
322 | /* | ||
323 | * In case of error extending write may have instantiated a few | ||
324 | * blocks outside i_size. Trim these off again. | ||
325 | */ | ||
326 | if (unlikely(iov_iter_rw(iter) == WRITE && size < 0)) { | ||
327 | loff_t isize = i_size_read(inode); | ||
328 | loff_t end = offset + count; | ||
329 | |||
330 | if (end > isize) | ||
331 | nilfs_write_failed(mapping, end); | ||
332 | } | ||
333 | |||
334 | return size; | ||
335 | } | 317 | } |
336 | 318 | ||
337 | const struct address_space_operations nilfs_aops = { | 319 | const struct address_space_operations nilfs_aops = { |
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index 2cd653670764..262561fea923 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c | |||
@@ -382,7 +382,7 @@ static ssize_t ntfs_prepare_file_for_write(struct kiocb *iocb, | |||
382 | base_ni = ni; | 382 | base_ni = ni; |
383 | if (NInoAttr(ni)) | 383 | if (NInoAttr(ni)) |
384 | base_ni = ni->ext.base_ntfs_ino; | 384 | base_ni = ni->ext.base_ntfs_ino; |
385 | err = file_remove_suid(file); | 385 | err = file_remove_privs(file); |
386 | if (unlikely(err)) | 386 | if (unlikely(err)) |
387 | goto out; | 387 | goto out; |
388 | /* | 388 | /* |
diff --git a/fs/ntfs/inode.h b/fs/ntfs/inode.h index 76b6cfb579d7..b3c3469de6cb 100644 --- a/fs/ntfs/inode.h +++ b/fs/ntfs/inode.h | |||
@@ -239,7 +239,7 @@ typedef struct { | |||
239 | */ | 239 | */ |
240 | static inline ntfs_inode *NTFS_I(struct inode *inode) | 240 | static inline ntfs_inode *NTFS_I(struct inode *inode) |
241 | { | 241 | { |
242 | return (ntfs_inode *)list_entry(inode, big_ntfs_inode, vfs_inode); | 242 | return (ntfs_inode *)container_of(inode, big_ntfs_inode, vfs_inode); |
243 | } | 243 | } |
244 | 244 | ||
245 | static inline struct inode *VFS_I(ntfs_inode *ni) | 245 | static inline struct inode *VFS_I(ntfs_inode *ni) |
@@ -51,8 +51,10 @@ int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs, | |||
51 | newattrs.ia_valid |= ATTR_FILE; | 51 | newattrs.ia_valid |= ATTR_FILE; |
52 | } | 52 | } |
53 | 53 | ||
54 | /* Remove suid/sgid on truncate too */ | 54 | /* Remove suid, sgid, and file capabilities on truncate too */ |
55 | ret = should_remove_suid(dentry); | 55 | ret = dentry_needs_remove_privs(dentry); |
56 | if (ret < 0) | ||
57 | return ret; | ||
56 | if (ret) | 58 | if (ret) |
57 | newattrs.ia_valid |= ret | ATTR_FORCE; | 59 | newattrs.ia_valid |= ret | ATTR_FORCE; |
58 | 60 | ||
@@ -678,18 +680,18 @@ int open_check_o_direct(struct file *f) | |||
678 | } | 680 | } |
679 | 681 | ||
680 | static int do_dentry_open(struct file *f, | 682 | static int do_dentry_open(struct file *f, |
683 | struct inode *inode, | ||
681 | int (*open)(struct inode *, struct file *), | 684 | int (*open)(struct inode *, struct file *), |
682 | const struct cred *cred) | 685 | const struct cred *cred) |
683 | { | 686 | { |
684 | static const struct file_operations empty_fops = {}; | 687 | static const struct file_operations empty_fops = {}; |
685 | struct inode *inode; | ||
686 | int error; | 688 | int error; |
687 | 689 | ||
688 | f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | | 690 | f->f_mode = OPEN_FMODE(f->f_flags) | FMODE_LSEEK | |
689 | FMODE_PREAD | FMODE_PWRITE; | 691 | FMODE_PREAD | FMODE_PWRITE; |
690 | 692 | ||
691 | path_get(&f->f_path); | 693 | path_get(&f->f_path); |
692 | inode = f->f_inode = f->f_path.dentry->d_inode; | 694 | f->f_inode = inode; |
693 | f->f_mapping = inode->i_mapping; | 695 | f->f_mapping = inode->i_mapping; |
694 | 696 | ||
695 | if (unlikely(f->f_flags & O_PATH)) { | 697 | if (unlikely(f->f_flags & O_PATH)) { |
@@ -793,7 +795,8 @@ int finish_open(struct file *file, struct dentry *dentry, | |||
793 | BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ | 795 | BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */ |
794 | 796 | ||
795 | file->f_path.dentry = dentry; | 797 | file->f_path.dentry = dentry; |
796 | error = do_dentry_open(file, open, current_cred()); | 798 | error = do_dentry_open(file, d_backing_inode(dentry), open, |
799 | current_cred()); | ||
797 | if (!error) | 800 | if (!error) |
798 | *opened |= FILE_OPENED; | 801 | *opened |= FILE_OPENED; |
799 | 802 | ||
@@ -822,6 +825,34 @@ int finish_no_open(struct file *file, struct dentry *dentry) | |||
822 | } | 825 | } |
823 | EXPORT_SYMBOL(finish_no_open); | 826 | EXPORT_SYMBOL(finish_no_open); |
824 | 827 | ||
828 | char *file_path(struct file *filp, char *buf, int buflen) | ||
829 | { | ||
830 | return d_path(&filp->f_path, buf, buflen); | ||
831 | } | ||
832 | EXPORT_SYMBOL(file_path); | ||
833 | |||
834 | /** | ||
835 | * vfs_open - open the file at the given path | ||
836 | * @path: path to open | ||
837 | * @file: newly allocated file with f_flag initialized | ||
838 | * @cred: credentials to use | ||
839 | */ | ||
840 | int vfs_open(const struct path *path, struct file *file, | ||
841 | const struct cred *cred) | ||
842 | { | ||
843 | struct dentry *dentry = path->dentry; | ||
844 | struct inode *inode = dentry->d_inode; | ||
845 | |||
846 | file->f_path = *path; | ||
847 | if (dentry->d_flags & DCACHE_OP_SELECT_INODE) { | ||
848 | inode = dentry->d_op->d_select_inode(dentry, file->f_flags); | ||
849 | if (IS_ERR(inode)) | ||
850 | return PTR_ERR(inode); | ||
851 | } | ||
852 | |||
853 | return do_dentry_open(file, inode, NULL, cred); | ||
854 | } | ||
855 | |||
825 | struct file *dentry_open(const struct path *path, int flags, | 856 | struct file *dentry_open(const struct path *path, int flags, |
826 | const struct cred *cred) | 857 | const struct cred *cred) |
827 | { | 858 | { |
@@ -853,26 +884,6 @@ struct file *dentry_open(const struct path *path, int flags, | |||
853 | } | 884 | } |
854 | EXPORT_SYMBOL(dentry_open); | 885 | EXPORT_SYMBOL(dentry_open); |
855 | 886 | ||
856 | /** | ||
857 | * vfs_open - open the file at the given path | ||
858 | * @path: path to open | ||
859 | * @filp: newly allocated file with f_flag initialized | ||
860 | * @cred: credentials to use | ||
861 | */ | ||
862 | int vfs_open(const struct path *path, struct file *filp, | ||
863 | const struct cred *cred) | ||
864 | { | ||
865 | struct inode *inode = path->dentry->d_inode; | ||
866 | |||
867 | if (inode->i_op->dentry_open) | ||
868 | return inode->i_op->dentry_open(path->dentry, filp, cred); | ||
869 | else { | ||
870 | filp->f_path = *path; | ||
871 | return do_dentry_open(filp, NULL, cred); | ||
872 | } | ||
873 | } | ||
874 | EXPORT_SYMBOL(vfs_open); | ||
875 | |||
876 | static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) | 887 | static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) |
877 | { | 888 | { |
878 | int lookup_flags = 0; | 889 | int lookup_flags = 0; |
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c index 308379b2d0b2..f140e3dbfb7b 100644 --- a/fs/overlayfs/inode.c +++ b/fs/overlayfs/inode.c | |||
@@ -337,37 +337,30 @@ static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type, | |||
337 | return true; | 337 | return true; |
338 | } | 338 | } |
339 | 339 | ||
340 | static int ovl_dentry_open(struct dentry *dentry, struct file *file, | 340 | struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags) |
341 | const struct cred *cred) | ||
342 | { | 341 | { |
343 | int err; | 342 | int err; |
344 | struct path realpath; | 343 | struct path realpath; |
345 | enum ovl_path_type type; | 344 | enum ovl_path_type type; |
346 | bool want_write = false; | ||
347 | 345 | ||
348 | type = ovl_path_real(dentry, &realpath); | 346 | type = ovl_path_real(dentry, &realpath); |
349 | if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) { | 347 | if (ovl_open_need_copy_up(file_flags, type, realpath.dentry)) { |
350 | want_write = true; | ||
351 | err = ovl_want_write(dentry); | 348 | err = ovl_want_write(dentry); |
352 | if (err) | 349 | if (err) |
353 | goto out; | 350 | return ERR_PTR(err); |
354 | 351 | ||
355 | if (file->f_flags & O_TRUNC) | 352 | if (file_flags & O_TRUNC) |
356 | err = ovl_copy_up_last(dentry, NULL, true); | 353 | err = ovl_copy_up_last(dentry, NULL, true); |
357 | else | 354 | else |
358 | err = ovl_copy_up(dentry); | 355 | err = ovl_copy_up(dentry); |
356 | ovl_drop_write(dentry); | ||
359 | if (err) | 357 | if (err) |
360 | goto out_drop_write; | 358 | return ERR_PTR(err); |
361 | 359 | ||
362 | ovl_path_upper(dentry, &realpath); | 360 | ovl_path_upper(dentry, &realpath); |
363 | } | 361 | } |
364 | 362 | ||
365 | err = vfs_open(&realpath, file, cred); | 363 | return d_backing_inode(realpath.dentry); |
366 | out_drop_write: | ||
367 | if (want_write) | ||
368 | ovl_drop_write(dentry); | ||
369 | out: | ||
370 | return err; | ||
371 | } | 364 | } |
372 | 365 | ||
373 | static const struct inode_operations ovl_file_inode_operations = { | 366 | static const struct inode_operations ovl_file_inode_operations = { |
@@ -378,7 +371,6 @@ static const struct inode_operations ovl_file_inode_operations = { | |||
378 | .getxattr = ovl_getxattr, | 371 | .getxattr = ovl_getxattr, |
379 | .listxattr = ovl_listxattr, | 372 | .listxattr = ovl_listxattr, |
380 | .removexattr = ovl_removexattr, | 373 | .removexattr = ovl_removexattr, |
381 | .dentry_open = ovl_dentry_open, | ||
382 | }; | 374 | }; |
383 | 375 | ||
384 | static const struct inode_operations ovl_symlink_inode_operations = { | 376 | static const struct inode_operations ovl_symlink_inode_operations = { |
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h index 17ac5afc9ffb..ea5a40b06e3a 100644 --- a/fs/overlayfs/overlayfs.h +++ b/fs/overlayfs/overlayfs.h | |||
@@ -173,6 +173,7 @@ ssize_t ovl_getxattr(struct dentry *dentry, const char *name, | |||
173 | void *value, size_t size); | 173 | void *value, size_t size); |
174 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); | 174 | ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size); |
175 | int ovl_removexattr(struct dentry *dentry, const char *name); | 175 | int ovl_removexattr(struct dentry *dentry, const char *name); |
176 | struct inode *ovl_d_select_inode(struct dentry *dentry, unsigned file_flags); | ||
176 | 177 | ||
177 | struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, | 178 | struct inode *ovl_new_inode(struct super_block *sb, umode_t mode, |
178 | struct ovl_entry *oe); | 179 | struct ovl_entry *oe); |
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c index 8a08c582bc22..7466ff339c66 100644 --- a/fs/overlayfs/super.c +++ b/fs/overlayfs/super.c | |||
@@ -316,6 +316,7 @@ static int ovl_dentry_weak_revalidate(struct dentry *dentry, unsigned int flags) | |||
316 | 316 | ||
317 | static const struct dentry_operations ovl_dentry_operations = { | 317 | static const struct dentry_operations ovl_dentry_operations = { |
318 | .d_release = ovl_dentry_release, | 318 | .d_release = ovl_dentry_release, |
319 | .d_select_inode = ovl_d_select_inode, | ||
319 | }; | 320 | }; |
320 | 321 | ||
321 | static const struct dentry_operations ovl_reval_dentry_operations = { | 322 | static const struct dentry_operations ovl_reval_dentry_operations = { |
diff --git a/fs/posix_acl.c b/fs/posix_acl.c index 84bb65b83570..4fb17ded7d47 100644 --- a/fs/posix_acl.c +++ b/fs/posix_acl.c | |||
@@ -547,51 +547,45 @@ posix_acl_create(struct inode *dir, umode_t *mode, | |||
547 | struct posix_acl **default_acl, struct posix_acl **acl) | 547 | struct posix_acl **default_acl, struct posix_acl **acl) |
548 | { | 548 | { |
549 | struct posix_acl *p; | 549 | struct posix_acl *p; |
550 | struct posix_acl *clone; | ||
550 | int ret; | 551 | int ret; |
551 | 552 | ||
553 | *acl = NULL; | ||
554 | *default_acl = NULL; | ||
555 | |||
552 | if (S_ISLNK(*mode) || !IS_POSIXACL(dir)) | 556 | if (S_ISLNK(*mode) || !IS_POSIXACL(dir)) |
553 | goto no_acl; | 557 | return 0; |
554 | 558 | ||
555 | p = get_acl(dir, ACL_TYPE_DEFAULT); | 559 | p = get_acl(dir, ACL_TYPE_DEFAULT); |
556 | if (IS_ERR(p)) { | 560 | if (!p || p == ERR_PTR(-EOPNOTSUPP)) { |
557 | if (p == ERR_PTR(-EOPNOTSUPP)) | 561 | *mode &= ~current_umask(); |
558 | goto apply_umask; | 562 | return 0; |
559 | return PTR_ERR(p); | ||
560 | } | 563 | } |
564 | if (IS_ERR(p)) | ||
565 | return PTR_ERR(p); | ||
561 | 566 | ||
562 | if (!p) | 567 | clone = posix_acl_clone(p, GFP_NOFS); |
563 | goto apply_umask; | 568 | if (!clone) |
564 | |||
565 | *acl = posix_acl_clone(p, GFP_NOFS); | ||
566 | if (!*acl) | ||
567 | goto no_mem; | 569 | goto no_mem; |
568 | 570 | ||
569 | ret = posix_acl_create_masq(*acl, mode); | 571 | ret = posix_acl_create_masq(clone, mode); |
570 | if (ret < 0) | 572 | if (ret < 0) |
571 | goto no_mem_clone; | 573 | goto no_mem_clone; |
572 | 574 | ||
573 | if (ret == 0) { | 575 | if (ret == 0) |
574 | posix_acl_release(*acl); | 576 | posix_acl_release(clone); |
575 | *acl = NULL; | 577 | else |
576 | } | 578 | *acl = clone; |
577 | 579 | ||
578 | if (!S_ISDIR(*mode)) { | 580 | if (!S_ISDIR(*mode)) |
579 | posix_acl_release(p); | 581 | posix_acl_release(p); |
580 | *default_acl = NULL; | 582 | else |
581 | } else { | ||
582 | *default_acl = p; | 583 | *default_acl = p; |
583 | } | ||
584 | return 0; | ||
585 | 584 | ||
586 | apply_umask: | ||
587 | *mode &= ~current_umask(); | ||
588 | no_acl: | ||
589 | *default_acl = NULL; | ||
590 | *acl = NULL; | ||
591 | return 0; | 585 | return 0; |
592 | 586 | ||
593 | no_mem_clone: | 587 | no_mem_clone: |
594 | posix_acl_release(*acl); | 588 | posix_acl_release(clone); |
595 | no_mem: | 589 | no_mem: |
596 | posix_acl_release(p); | 590 | posix_acl_release(p); |
597 | return -ENOMEM; | 591 | return -ENOMEM; |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index d4a35746cab9..f8595e8b5cd0 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -64,7 +64,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) | |||
64 | 64 | ||
65 | if (file) { | 65 | if (file) { |
66 | seq_pad(m, ' '); | 66 | seq_pad(m, ' '); |
67 | seq_path(m, &file->f_path, ""); | 67 | seq_file_path(m, file, ""); |
68 | } | 68 | } |
69 | 69 | ||
70 | seq_putc(m, '\n'); | 70 | seq_putc(m, '\n'); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6dee68d013ff..ca1e091881d4 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -310,7 +310,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) | |||
310 | */ | 310 | */ |
311 | if (file) { | 311 | if (file) { |
312 | seq_pad(m, ' '); | 312 | seq_pad(m, ' '); |
313 | seq_path(m, &file->f_path, "\n"); | 313 | seq_file_path(m, file, "\n"); |
314 | goto done; | 314 | goto done; |
315 | } | 315 | } |
316 | 316 | ||
@@ -1509,7 +1509,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) | |||
1509 | 1509 | ||
1510 | if (file) { | 1510 | if (file) { |
1511 | seq_puts(m, " file="); | 1511 | seq_puts(m, " file="); |
1512 | seq_path(m, &file->f_path, "\n\t= "); | 1512 | seq_file_path(m, file, "\n\t= "); |
1513 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { | 1513 | } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { |
1514 | seq_puts(m, " heap"); | 1514 | seq_puts(m, " heap"); |
1515 | } else { | 1515 | } else { |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 599ec2e20104..e0d64c92e4f6 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -180,7 +180,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, | |||
180 | 180 | ||
181 | if (file) { | 181 | if (file) { |
182 | seq_pad(m, ' '); | 182 | seq_pad(m, ' '); |
183 | seq_path(m, &file->f_path, ""); | 183 | seq_file_path(m, file, ""); |
184 | } else if (mm) { | 184 | } else if (mm) { |
185 | pid_t tid = pid_of_stack(priv, vma, is_pid); | 185 | pid_t tid = pid_of_stack(priv, vma, is_pid); |
186 | 186 | ||
diff --git a/fs/qnx6/dir.c b/fs/qnx6/dir.c index 8d64bb5366bf..e1f37278cf97 100644 --- a/fs/qnx6/dir.c +++ b/fs/qnx6/dir.c | |||
@@ -32,11 +32,6 @@ static struct page *qnx6_get_page(struct inode *dir, unsigned long n) | |||
32 | return page; | 32 | return page; |
33 | } | 33 | } |
34 | 34 | ||
35 | static inline unsigned long dir_pages(struct inode *inode) | ||
36 | { | ||
37 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
38 | } | ||
39 | |||
40 | static unsigned last_entry(struct inode *inode, unsigned long page_nr) | 35 | static unsigned last_entry(struct inode *inode, unsigned long page_nr) |
41 | { | 36 | { |
42 | unsigned long last_byte = inode->i_size; | 37 | unsigned long last_byte = inode->i_size; |
diff --git a/fs/seq_file.c b/fs/seq_file.c index 1d9c1cbd4d0b..ce9e39fd5daf 100644 --- a/fs/seq_file.c +++ b/fs/seq_file.c | |||
@@ -490,6 +490,20 @@ int seq_path(struct seq_file *m, const struct path *path, const char *esc) | |||
490 | } | 490 | } |
491 | EXPORT_SYMBOL(seq_path); | 491 | EXPORT_SYMBOL(seq_path); |
492 | 492 | ||
493 | /** | ||
494 | * seq_file_path - seq_file interface to print a pathname of a file | ||
495 | * @m: the seq_file handle | ||
496 | * @file: the struct file to print | ||
497 | * @esc: set of characters to escape in the output | ||
498 | * | ||
499 | * return the absolute path to the file. | ||
500 | */ | ||
501 | int seq_file_path(struct seq_file *m, struct file *file, const char *esc) | ||
502 | { | ||
503 | return seq_path(m, &file->f_path, esc); | ||
504 | } | ||
505 | EXPORT_SYMBOL(seq_file_path); | ||
506 | |||
493 | /* | 507 | /* |
494 | * Same as seq_path, but relative to supplied root. | 508 | * Same as seq_path, but relative to supplied root. |
495 | */ | 509 | */ |
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h index 73588e7700ed..d09fcd6fb85d 100644 --- a/fs/squashfs/squashfs_fs_i.h +++ b/fs/squashfs/squashfs_fs_i.h | |||
@@ -49,6 +49,6 @@ struct squashfs_inode_info { | |||
49 | 49 | ||
50 | static inline struct squashfs_inode_info *squashfs_i(struct inode *inode) | 50 | static inline struct squashfs_inode_info *squashfs_i(struct inode *inode) |
51 | { | 51 | { |
52 | return list_entry(inode, struct squashfs_inode_info, vfs_inode); | 52 | return container_of(inode, struct squashfs_inode_info, vfs_inode); |
53 | } | 53 | } |
54 | #endif | 54 | #endif |
diff --git a/fs/super.c b/fs/super.c index 928c20f47af9..b61372354f2b 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -842,7 +842,7 @@ int get_anon_bdev(dev_t *p) | |||
842 | else if (error) | 842 | else if (error) |
843 | return -EAGAIN; | 843 | return -EAGAIN; |
844 | 844 | ||
845 | if (dev == (1 << MINORBITS)) { | 845 | if (dev >= (1 << MINORBITS)) { |
846 | spin_lock(&unnamed_dev_lock); | 846 | spin_lock(&unnamed_dev_lock); |
847 | ida_remove(&unnamed_dev_ida, dev); | 847 | ida_remove(&unnamed_dev_ida, dev); |
848 | if (unnamed_dev_start > dev) | 848 | if (unnamed_dev_start > dev) |
diff --git a/fs/sysv/dir.c b/fs/sysv/dir.c index 8f3555f00c54..63c1bcb224ee 100644 --- a/fs/sysv/dir.c +++ b/fs/sysv/dir.c | |||
@@ -33,11 +33,6 @@ static inline void dir_put_page(struct page *page) | |||
33 | page_cache_release(page); | 33 | page_cache_release(page); |
34 | } | 34 | } |
35 | 35 | ||
36 | static inline unsigned long dir_pages(struct inode *inode) | ||
37 | { | ||
38 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
39 | } | ||
40 | |||
41 | static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) | 36 | static int dir_commit_chunk(struct page *page, loff_t pos, unsigned len) |
42 | { | 37 | { |
43 | struct address_space *mapping = page->mapping; | 38 | struct address_space *mapping = page->mapping; |
diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h index 2c13525131cd..6c212288adcb 100644 --- a/fs/sysv/sysv.h +++ b/fs/sysv/sysv.h | |||
@@ -73,7 +73,7 @@ struct sysv_inode_info { | |||
73 | 73 | ||
74 | static inline struct sysv_inode_info *SYSV_I(struct inode *inode) | 74 | static inline struct sysv_inode_info *SYSV_I(struct inode *inode) |
75 | { | 75 | { |
76 | return list_entry(inode, struct sysv_inode_info, vfs_inode); | 76 | return container_of(inode, struct sysv_inode_info, vfs_inode); |
77 | } | 77 | } |
78 | 78 | ||
79 | static inline struct sysv_sb_info *SYSV_SB(struct super_block *sb) | 79 | static inline struct sysv_sb_info *SYSV_SB(struct super_block *sb) |
diff --git a/fs/tracefs/inode.c b/fs/tracefs/inode.c index a43df11a163f..cbc8d5d2755a 100644 --- a/fs/tracefs/inode.c +++ b/fs/tracefs/inode.c | |||
@@ -496,16 +496,11 @@ struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *pare | |||
496 | return dentry; | 496 | return dentry; |
497 | } | 497 | } |
498 | 498 | ||
499 | static inline int tracefs_positive(struct dentry *dentry) | ||
500 | { | ||
501 | return dentry->d_inode && !d_unhashed(dentry); | ||
502 | } | ||
503 | |||
504 | static int __tracefs_remove(struct dentry *dentry, struct dentry *parent) | 499 | static int __tracefs_remove(struct dentry *dentry, struct dentry *parent) |
505 | { | 500 | { |
506 | int ret = 0; | 501 | int ret = 0; |
507 | 502 | ||
508 | if (tracefs_positive(dentry)) { | 503 | if (simple_positive(dentry)) { |
509 | if (dentry->d_inode) { | 504 | if (dentry->d_inode) { |
510 | dget(dentry); | 505 | dget(dentry); |
511 | switch (dentry->d_inode->i_mode & S_IFMT) { | 506 | switch (dentry->d_inode->i_mode & S_IFMT) { |
@@ -582,7 +577,7 @@ void tracefs_remove_recursive(struct dentry *dentry) | |||
582 | */ | 577 | */ |
583 | spin_lock(&parent->d_lock); | 578 | spin_lock(&parent->d_lock); |
584 | list_for_each_entry(child, &parent->d_subdirs, d_child) { | 579 | list_for_each_entry(child, &parent->d_subdirs, d_child) { |
585 | if (!tracefs_positive(child)) | 580 | if (!simple_positive(child)) |
586 | continue; | 581 | continue; |
587 | 582 | ||
588 | /* perhaps simple_empty(child) makes more sense */ | 583 | /* perhaps simple_empty(child) makes more sense */ |
@@ -603,7 +598,7 @@ void tracefs_remove_recursive(struct dentry *dentry) | |||
603 | * from d_subdirs. When releasing the parent->d_lock we can | 598 | * from d_subdirs. When releasing the parent->d_lock we can |
604 | * no longer trust that the next pointer is valid. | 599 | * no longer trust that the next pointer is valid. |
605 | * Restart the loop. We'll skip this one with the | 600 | * Restart the loop. We'll skip this one with the |
606 | * tracefs_positive() check. | 601 | * simple_positive() check. |
607 | */ | 602 | */ |
608 | goto loop; | 603 | goto loop; |
609 | } | 604 | } |
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h index b5cd8ed2aa12..b1b9a63d8cf3 100644 --- a/fs/udf/udf_i.h +++ b/fs/udf/udf_i.h | |||
@@ -56,7 +56,7 @@ struct udf_inode_info { | |||
56 | 56 | ||
57 | static inline struct udf_inode_info *UDF_I(struct inode *inode) | 57 | static inline struct udf_inode_info *UDF_I(struct inode *inode) |
58 | { | 58 | { |
59 | return list_entry(inode, struct udf_inode_info, vfs_inode); | 59 | return container_of(inode, struct udf_inode_info, vfs_inode); |
60 | } | 60 | } |
61 | 61 | ||
62 | #endif /* _UDF_I_H) */ | 62 | #endif /* _UDF_I_H) */ |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 2c1036080d52..a7106eda5024 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -51,8 +51,8 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) | |||
51 | 51 | ||
52 | if (ufs_fragnum(fragment) + count > uspi->s_fpg) | 52 | if (ufs_fragnum(fragment) + count > uspi->s_fpg) |
53 | ufs_error (sb, "ufs_free_fragments", "internal error"); | 53 | ufs_error (sb, "ufs_free_fragments", "internal error"); |
54 | 54 | ||
55 | lock_ufs(sb); | 55 | mutex_lock(&UFS_SB(sb)->s_lock); |
56 | 56 | ||
57 | cgno = ufs_dtog(uspi, fragment); | 57 | cgno = ufs_dtog(uspi, fragment); |
58 | bit = ufs_dtogd(uspi, fragment); | 58 | bit = ufs_dtogd(uspi, fragment); |
@@ -115,13 +115,13 @@ void ufs_free_fragments(struct inode *inode, u64 fragment, unsigned count) | |||
115 | if (sb->s_flags & MS_SYNCHRONOUS) | 115 | if (sb->s_flags & MS_SYNCHRONOUS) |
116 | ubh_sync_block(UCPI_UBH(ucpi)); | 116 | ubh_sync_block(UCPI_UBH(ucpi)); |
117 | ufs_mark_sb_dirty(sb); | 117 | ufs_mark_sb_dirty(sb); |
118 | 118 | ||
119 | unlock_ufs(sb); | 119 | mutex_unlock(&UFS_SB(sb)->s_lock); |
120 | UFSD("EXIT\n"); | 120 | UFSD("EXIT\n"); |
121 | return; | 121 | return; |
122 | 122 | ||
123 | failed: | 123 | failed: |
124 | unlock_ufs(sb); | 124 | mutex_unlock(&UFS_SB(sb)->s_lock); |
125 | UFSD("EXIT (FAILED)\n"); | 125 | UFSD("EXIT (FAILED)\n"); |
126 | return; | 126 | return; |
127 | } | 127 | } |
@@ -151,7 +151,7 @@ void ufs_free_blocks(struct inode *inode, u64 fragment, unsigned count) | |||
151 | goto failed; | 151 | goto failed; |
152 | } | 152 | } |
153 | 153 | ||
154 | lock_ufs(sb); | 154 | mutex_lock(&UFS_SB(sb)->s_lock); |
155 | 155 | ||
156 | do_more: | 156 | do_more: |
157 | overflow = 0; | 157 | overflow = 0; |
@@ -211,12 +211,12 @@ do_more: | |||
211 | } | 211 | } |
212 | 212 | ||
213 | ufs_mark_sb_dirty(sb); | 213 | ufs_mark_sb_dirty(sb); |
214 | unlock_ufs(sb); | 214 | mutex_unlock(&UFS_SB(sb)->s_lock); |
215 | UFSD("EXIT\n"); | 215 | UFSD("EXIT\n"); |
216 | return; | 216 | return; |
217 | 217 | ||
218 | failed_unlock: | 218 | failed_unlock: |
219 | unlock_ufs(sb); | 219 | mutex_unlock(&UFS_SB(sb)->s_lock); |
220 | failed: | 220 | failed: |
221 | UFSD("EXIT (FAILED)\n"); | 221 | UFSD("EXIT (FAILED)\n"); |
222 | return; | 222 | return; |
@@ -357,7 +357,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
357 | usb1 = ubh_get_usb_first(uspi); | 357 | usb1 = ubh_get_usb_first(uspi); |
358 | *err = -ENOSPC; | 358 | *err = -ENOSPC; |
359 | 359 | ||
360 | lock_ufs(sb); | 360 | mutex_lock(&UFS_SB(sb)->s_lock); |
361 | tmp = ufs_data_ptr_to_cpu(sb, p); | 361 | tmp = ufs_data_ptr_to_cpu(sb, p); |
362 | 362 | ||
363 | if (count + ufs_fragnum(fragment) > uspi->s_fpb) { | 363 | if (count + ufs_fragnum(fragment) > uspi->s_fpb) { |
@@ -378,19 +378,19 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
378 | "fragment %llu, tmp %llu\n", | 378 | "fragment %llu, tmp %llu\n", |
379 | (unsigned long long)fragment, | 379 | (unsigned long long)fragment, |
380 | (unsigned long long)tmp); | 380 | (unsigned long long)tmp); |
381 | unlock_ufs(sb); | 381 | mutex_unlock(&UFS_SB(sb)->s_lock); |
382 | return INVBLOCK; | 382 | return INVBLOCK; |
383 | } | 383 | } |
384 | if (fragment < UFS_I(inode)->i_lastfrag) { | 384 | if (fragment < UFS_I(inode)->i_lastfrag) { |
385 | UFSD("EXIT (ALREADY ALLOCATED)\n"); | 385 | UFSD("EXIT (ALREADY ALLOCATED)\n"); |
386 | unlock_ufs(sb); | 386 | mutex_unlock(&UFS_SB(sb)->s_lock); |
387 | return 0; | 387 | return 0; |
388 | } | 388 | } |
389 | } | 389 | } |
390 | else { | 390 | else { |
391 | if (tmp) { | 391 | if (tmp) { |
392 | UFSD("EXIT (ALREADY ALLOCATED)\n"); | 392 | UFSD("EXIT (ALREADY ALLOCATED)\n"); |
393 | unlock_ufs(sb); | 393 | mutex_unlock(&UFS_SB(sb)->s_lock); |
394 | return 0; | 394 | return 0; |
395 | } | 395 | } |
396 | } | 396 | } |
@@ -399,7 +399,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
399 | * There is not enough space for user on the device | 399 | * There is not enough space for user on the device |
400 | */ | 400 | */ |
401 | if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { | 401 | if (!capable(CAP_SYS_RESOURCE) && ufs_freespace(uspi, UFS_MINFREE) <= 0) { |
402 | unlock_ufs(sb); | 402 | mutex_unlock(&UFS_SB(sb)->s_lock); |
403 | UFSD("EXIT (FAILED)\n"); | 403 | UFSD("EXIT (FAILED)\n"); |
404 | return 0; | 404 | return 0; |
405 | } | 405 | } |
@@ -424,7 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
424 | ufs_clear_frags(inode, result + oldcount, | 424 | ufs_clear_frags(inode, result + oldcount, |
425 | newcount - oldcount, locked_page != NULL); | 425 | newcount - oldcount, locked_page != NULL); |
426 | } | 426 | } |
427 | unlock_ufs(sb); | 427 | mutex_unlock(&UFS_SB(sb)->s_lock); |
428 | UFSD("EXIT, result %llu\n", (unsigned long long)result); | 428 | UFSD("EXIT, result %llu\n", (unsigned long long)result); |
429 | return result; | 429 | return result; |
430 | } | 430 | } |
@@ -439,7 +439,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
439 | fragment + count); | 439 | fragment + count); |
440 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | 440 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, |
441 | locked_page != NULL); | 441 | locked_page != NULL); |
442 | unlock_ufs(sb); | 442 | mutex_unlock(&UFS_SB(sb)->s_lock); |
443 | UFSD("EXIT, result %llu\n", (unsigned long long)result); | 443 | UFSD("EXIT, result %llu\n", (unsigned long long)result); |
444 | return result; | 444 | return result; |
445 | } | 445 | } |
@@ -477,7 +477,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
477 | *err = 0; | 477 | *err = 0; |
478 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, | 478 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
479 | fragment + count); | 479 | fragment + count); |
480 | unlock_ufs(sb); | 480 | mutex_unlock(&UFS_SB(sb)->s_lock); |
481 | if (newcount < request) | 481 | if (newcount < request) |
482 | ufs_free_fragments (inode, result + newcount, request - newcount); | 482 | ufs_free_fragments (inode, result + newcount, request - newcount); |
483 | ufs_free_fragments (inode, tmp, oldcount); | 483 | ufs_free_fragments (inode, tmp, oldcount); |
@@ -485,7 +485,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
485 | return result; | 485 | return result; |
486 | } | 486 | } |
487 | 487 | ||
488 | unlock_ufs(sb); | 488 | mutex_unlock(&UFS_SB(sb)->s_lock); |
489 | UFSD("EXIT (FAILED)\n"); | 489 | UFSD("EXIT (FAILED)\n"); |
490 | return 0; | 490 | return 0; |
491 | } | 491 | } |
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c index 1bfe8cabff0f..74f2e80288bf 100644 --- a/fs/ufs/dir.c +++ b/fs/ufs/dir.c | |||
@@ -65,11 +65,6 @@ static inline void ufs_put_page(struct page *page) | |||
65 | page_cache_release(page); | 65 | page_cache_release(page); |
66 | } | 66 | } |
67 | 67 | ||
68 | static inline unsigned long ufs_dir_pages(struct inode *inode) | ||
69 | { | ||
70 | return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; | ||
71 | } | ||
72 | |||
73 | ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) | 68 | ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) |
74 | { | 69 | { |
75 | ino_t res = 0; | 70 | ino_t res = 0; |
@@ -87,7 +82,8 @@ ino_t ufs_inode_by_name(struct inode *dir, const struct qstr *qstr) | |||
87 | 82 | ||
88 | /* Releases the page */ | 83 | /* Releases the page */ |
89 | void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, | 84 | void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, |
90 | struct page *page, struct inode *inode) | 85 | struct page *page, struct inode *inode, |
86 | bool update_times) | ||
91 | { | 87 | { |
92 | loff_t pos = page_offset(page) + | 88 | loff_t pos = page_offset(page) + |
93 | (char *) de - (char *) page_address(page); | 89 | (char *) de - (char *) page_address(page); |
@@ -103,7 +99,8 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, | |||
103 | 99 | ||
104 | err = ufs_commit_chunk(page, pos, len); | 100 | err = ufs_commit_chunk(page, pos, len); |
105 | ufs_put_page(page); | 101 | ufs_put_page(page); |
106 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | 102 | if (update_times) |
103 | dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; | ||
107 | mark_inode_dirty(dir); | 104 | mark_inode_dirty(dir); |
108 | } | 105 | } |
109 | 106 | ||
@@ -256,7 +253,7 @@ struct ufs_dir_entry *ufs_find_entry(struct inode *dir, const struct qstr *qstr, | |||
256 | int namelen = qstr->len; | 253 | int namelen = qstr->len; |
257 | unsigned reclen = UFS_DIR_REC_LEN(namelen); | 254 | unsigned reclen = UFS_DIR_REC_LEN(namelen); |
258 | unsigned long start, n; | 255 | unsigned long start, n; |
259 | unsigned long npages = ufs_dir_pages(dir); | 256 | unsigned long npages = dir_pages(dir); |
260 | struct page *page = NULL; | 257 | struct page *page = NULL; |
261 | struct ufs_inode_info *ui = UFS_I(dir); | 258 | struct ufs_inode_info *ui = UFS_I(dir); |
262 | struct ufs_dir_entry *de; | 259 | struct ufs_dir_entry *de; |
@@ -320,7 +317,7 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode) | |||
320 | unsigned short rec_len, name_len; | 317 | unsigned short rec_len, name_len; |
321 | struct page *page = NULL; | 318 | struct page *page = NULL; |
322 | struct ufs_dir_entry *de; | 319 | struct ufs_dir_entry *de; |
323 | unsigned long npages = ufs_dir_pages(dir); | 320 | unsigned long npages = dir_pages(dir); |
324 | unsigned long n; | 321 | unsigned long n; |
325 | char *kaddr; | 322 | char *kaddr; |
326 | loff_t pos; | 323 | loff_t pos; |
@@ -437,7 +434,7 @@ ufs_readdir(struct file *file, struct dir_context *ctx) | |||
437 | struct super_block *sb = inode->i_sb; | 434 | struct super_block *sb = inode->i_sb; |
438 | unsigned int offset = pos & ~PAGE_CACHE_MASK; | 435 | unsigned int offset = pos & ~PAGE_CACHE_MASK; |
439 | unsigned long n = pos >> PAGE_CACHE_SHIFT; | 436 | unsigned long n = pos >> PAGE_CACHE_SHIFT; |
440 | unsigned long npages = ufs_dir_pages(inode); | 437 | unsigned long npages = dir_pages(inode); |
441 | unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); | 438 | unsigned chunk_mask = ~(UFS_SB(sb)->s_uspi->s_dirblksize - 1); |
442 | int need_revalidate = file->f_version != inode->i_version; | 439 | int need_revalidate = file->f_version != inode->i_version; |
443 | unsigned flags = UFS_SB(sb)->s_flags; | 440 | unsigned flags = UFS_SB(sb)->s_flags; |
@@ -608,7 +605,7 @@ int ufs_empty_dir(struct inode * inode) | |||
608 | { | 605 | { |
609 | struct super_block *sb = inode->i_sb; | 606 | struct super_block *sb = inode->i_sb; |
610 | struct page *page = NULL; | 607 | struct page *page = NULL; |
611 | unsigned long i, npages = ufs_dir_pages(inode); | 608 | unsigned long i, npages = dir_pages(inode); |
612 | 609 | ||
613 | for (i = 0; i < npages; i++) { | 610 | for (i = 0; i < npages; i++) { |
614 | char *kaddr; | 611 | char *kaddr; |
diff --git a/fs/ufs/ialloc.c b/fs/ufs/ialloc.c index 7caa01652888..fd0203ce1f7f 100644 --- a/fs/ufs/ialloc.c +++ b/fs/ufs/ialloc.c | |||
@@ -69,11 +69,11 @@ void ufs_free_inode (struct inode * inode) | |||
69 | 69 | ||
70 | ino = inode->i_ino; | 70 | ino = inode->i_ino; |
71 | 71 | ||
72 | lock_ufs(sb); | 72 | mutex_lock(&UFS_SB(sb)->s_lock); |
73 | 73 | ||
74 | if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) { | 74 | if (!((ino > 1) && (ino < (uspi->s_ncg * uspi->s_ipg )))) { |
75 | ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino); | 75 | ufs_warning(sb, "ufs_free_inode", "reserved inode or nonexistent inode %u\n", ino); |
76 | unlock_ufs(sb); | 76 | mutex_unlock(&UFS_SB(sb)->s_lock); |
77 | return; | 77 | return; |
78 | } | 78 | } |
79 | 79 | ||
@@ -81,7 +81,7 @@ void ufs_free_inode (struct inode * inode) | |||
81 | bit = ufs_inotocgoff (ino); | 81 | bit = ufs_inotocgoff (ino); |
82 | ucpi = ufs_load_cylinder (sb, cg); | 82 | ucpi = ufs_load_cylinder (sb, cg); |
83 | if (!ucpi) { | 83 | if (!ucpi) { |
84 | unlock_ufs(sb); | 84 | mutex_unlock(&UFS_SB(sb)->s_lock); |
85 | return; | 85 | return; |
86 | } | 86 | } |
87 | ucg = ubh_get_ucg(UCPI_UBH(ucpi)); | 87 | ucg = ubh_get_ucg(UCPI_UBH(ucpi)); |
@@ -115,7 +115,7 @@ void ufs_free_inode (struct inode * inode) | |||
115 | ubh_sync_block(UCPI_UBH(ucpi)); | 115 | ubh_sync_block(UCPI_UBH(ucpi)); |
116 | 116 | ||
117 | ufs_mark_sb_dirty(sb); | 117 | ufs_mark_sb_dirty(sb); |
118 | unlock_ufs(sb); | 118 | mutex_unlock(&UFS_SB(sb)->s_lock); |
119 | UFSD("EXIT\n"); | 119 | UFSD("EXIT\n"); |
120 | } | 120 | } |
121 | 121 | ||
@@ -193,7 +193,7 @@ struct inode *ufs_new_inode(struct inode *dir, umode_t mode) | |||
193 | sbi = UFS_SB(sb); | 193 | sbi = UFS_SB(sb); |
194 | uspi = sbi->s_uspi; | 194 | uspi = sbi->s_uspi; |
195 | 195 | ||
196 | lock_ufs(sb); | 196 | mutex_lock(&sbi->s_lock); |
197 | 197 | ||
198 | /* | 198 | /* |
199 | * Try to place the inode in its parent directory | 199 | * Try to place the inode in its parent directory |
@@ -331,21 +331,21 @@ cg_found: | |||
331 | sync_dirty_buffer(bh); | 331 | sync_dirty_buffer(bh); |
332 | brelse(bh); | 332 | brelse(bh); |
333 | } | 333 | } |
334 | unlock_ufs(sb); | 334 | mutex_unlock(&sbi->s_lock); |
335 | 335 | ||
336 | UFSD("allocating inode %lu\n", inode->i_ino); | 336 | UFSD("allocating inode %lu\n", inode->i_ino); |
337 | UFSD("EXIT\n"); | 337 | UFSD("EXIT\n"); |
338 | return inode; | 338 | return inode; |
339 | 339 | ||
340 | fail_remove_inode: | 340 | fail_remove_inode: |
341 | unlock_ufs(sb); | 341 | mutex_unlock(&sbi->s_lock); |
342 | clear_nlink(inode); | 342 | clear_nlink(inode); |
343 | unlock_new_inode(inode); | 343 | unlock_new_inode(inode); |
344 | iput(inode); | 344 | iput(inode); |
345 | UFSD("EXIT (FAILED): err %d\n", err); | 345 | UFSD("EXIT (FAILED): err %d\n", err); |
346 | return ERR_PTR(err); | 346 | return ERR_PTR(err); |
347 | failed: | 347 | failed: |
348 | unlock_ufs(sb); | 348 | mutex_unlock(&sbi->s_lock); |
349 | make_bad_inode(inode); | 349 | make_bad_inode(inode); |
350 | iput (inode); | 350 | iput (inode); |
351 | UFSD("EXIT (FAILED): err %d\n", err); | 351 | UFSD("EXIT (FAILED): err %d\n", err); |
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c index 99aaf5c9bf4d..f913a6924b23 100644 --- a/fs/ufs/inode.c +++ b/fs/ufs/inode.c | |||
@@ -903,6 +903,9 @@ void ufs_evict_inode(struct inode * inode) | |||
903 | invalidate_inode_buffers(inode); | 903 | invalidate_inode_buffers(inode); |
904 | clear_inode(inode); | 904 | clear_inode(inode); |
905 | 905 | ||
906 | if (want_delete) | 906 | if (want_delete) { |
907 | lock_ufs(inode->i_sb); | ||
907 | ufs_free_inode(inode); | 908 | ufs_free_inode(inode); |
909 | unlock_ufs(inode->i_sb); | ||
910 | } | ||
908 | } | 911 | } |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index f773deb1d2e3..47966554317c 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -56,11 +56,9 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, unsi | |||
56 | if (dentry->d_name.len > UFS_MAXNAMLEN) | 56 | if (dentry->d_name.len > UFS_MAXNAMLEN) |
57 | return ERR_PTR(-ENAMETOOLONG); | 57 | return ERR_PTR(-ENAMETOOLONG); |
58 | 58 | ||
59 | lock_ufs(dir->i_sb); | ||
60 | ino = ufs_inode_by_name(dir, &dentry->d_name); | 59 | ino = ufs_inode_by_name(dir, &dentry->d_name); |
61 | if (ino) | 60 | if (ino) |
62 | inode = ufs_iget(dir->i_sb, ino); | 61 | inode = ufs_iget(dir->i_sb, ino); |
63 | unlock_ufs(dir->i_sb); | ||
64 | return d_splice_alias(inode, dentry); | 62 | return d_splice_alias(inode, dentry); |
65 | } | 63 | } |
66 | 64 | ||
@@ -76,24 +74,16 @@ static int ufs_create (struct inode * dir, struct dentry * dentry, umode_t mode, | |||
76 | bool excl) | 74 | bool excl) |
77 | { | 75 | { |
78 | struct inode *inode; | 76 | struct inode *inode; |
79 | int err; | ||
80 | |||
81 | UFSD("BEGIN\n"); | ||
82 | 77 | ||
83 | inode = ufs_new_inode(dir, mode); | 78 | inode = ufs_new_inode(dir, mode); |
84 | err = PTR_ERR(inode); | 79 | if (IS_ERR(inode)) |
80 | return PTR_ERR(inode); | ||
85 | 81 | ||
86 | if (!IS_ERR(inode)) { | 82 | inode->i_op = &ufs_file_inode_operations; |
87 | inode->i_op = &ufs_file_inode_operations; | 83 | inode->i_fop = &ufs_file_operations; |
88 | inode->i_fop = &ufs_file_operations; | 84 | inode->i_mapping->a_ops = &ufs_aops; |
89 | inode->i_mapping->a_ops = &ufs_aops; | 85 | mark_inode_dirty(inode); |
90 | mark_inode_dirty(inode); | 86 | return ufs_add_nondir(dentry, inode); |
91 | lock_ufs(dir->i_sb); | ||
92 | err = ufs_add_nondir(dentry, inode); | ||
93 | unlock_ufs(dir->i_sb); | ||
94 | } | ||
95 | UFSD("END: err=%d\n", err); | ||
96 | return err; | ||
97 | } | 87 | } |
98 | 88 | ||
99 | static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) | 89 | static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) |
@@ -110,9 +100,7 @@ static int ufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev | |||
110 | init_special_inode(inode, mode, rdev); | 100 | init_special_inode(inode, mode, rdev); |
111 | ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev); | 101 | ufs_set_inode_dev(inode->i_sb, UFS_I(inode), rdev); |
112 | mark_inode_dirty(inode); | 102 | mark_inode_dirty(inode); |
113 | lock_ufs(dir->i_sb); | ||
114 | err = ufs_add_nondir(dentry, inode); | 103 | err = ufs_add_nondir(dentry, inode); |
115 | unlock_ufs(dir->i_sb); | ||
116 | } | 104 | } |
117 | return err; | 105 | return err; |
118 | } | 106 | } |
@@ -121,19 +109,18 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
121 | const char * symname) | 109 | const char * symname) |
122 | { | 110 | { |
123 | struct super_block * sb = dir->i_sb; | 111 | struct super_block * sb = dir->i_sb; |
124 | int err = -ENAMETOOLONG; | 112 | int err; |
125 | unsigned l = strlen(symname)+1; | 113 | unsigned l = strlen(symname)+1; |
126 | struct inode * inode; | 114 | struct inode * inode; |
127 | 115 | ||
128 | if (l > sb->s_blocksize) | 116 | if (l > sb->s_blocksize) |
129 | goto out_notlocked; | 117 | return -ENAMETOOLONG; |
130 | 118 | ||
131 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); | 119 | inode = ufs_new_inode(dir, S_IFLNK | S_IRWXUGO); |
132 | err = PTR_ERR(inode); | 120 | err = PTR_ERR(inode); |
133 | if (IS_ERR(inode)) | 121 | if (IS_ERR(inode)) |
134 | goto out_notlocked; | 122 | return err; |
135 | 123 | ||
136 | lock_ufs(dir->i_sb); | ||
137 | if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { | 124 | if (l > UFS_SB(sb)->s_uspi->s_maxsymlinklen) { |
138 | /* slow symlink */ | 125 | /* slow symlink */ |
139 | inode->i_op = &ufs_symlink_inode_operations; | 126 | inode->i_op = &ufs_symlink_inode_operations; |
@@ -150,17 +137,13 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry, | |||
150 | } | 137 | } |
151 | mark_inode_dirty(inode); | 138 | mark_inode_dirty(inode); |
152 | 139 | ||
153 | err = ufs_add_nondir(dentry, inode); | 140 | return ufs_add_nondir(dentry, inode); |
154 | out: | ||
155 | unlock_ufs(dir->i_sb); | ||
156 | out_notlocked: | ||
157 | return err; | ||
158 | 141 | ||
159 | out_fail: | 142 | out_fail: |
160 | inode_dec_link_count(inode); | 143 | inode_dec_link_count(inode); |
161 | unlock_new_inode(inode); | 144 | unlock_new_inode(inode); |
162 | iput(inode); | 145 | iput(inode); |
163 | goto out; | 146 | return err; |
164 | } | 147 | } |
165 | 148 | ||
166 | static int ufs_link (struct dentry * old_dentry, struct inode * dir, | 149 | static int ufs_link (struct dentry * old_dentry, struct inode * dir, |
@@ -169,14 +152,16 @@ static int ufs_link (struct dentry * old_dentry, struct inode * dir, | |||
169 | struct inode *inode = d_inode(old_dentry); | 152 | struct inode *inode = d_inode(old_dentry); |
170 | int error; | 153 | int error; |
171 | 154 | ||
172 | lock_ufs(dir->i_sb); | ||
173 | |||
174 | inode->i_ctime = CURRENT_TIME_SEC; | 155 | inode->i_ctime = CURRENT_TIME_SEC; |
175 | inode_inc_link_count(inode); | 156 | inode_inc_link_count(inode); |
176 | ihold(inode); | 157 | ihold(inode); |
177 | 158 | ||
178 | error = ufs_add_nondir(dentry, inode); | 159 | error = ufs_add_link(dentry, inode); |
179 | unlock_ufs(dir->i_sb); | 160 | if (error) { |
161 | inode_dec_link_count(inode); | ||
162 | iput(inode); | ||
163 | } else | ||
164 | d_instantiate(dentry, inode); | ||
180 | return error; | 165 | return error; |
181 | } | 166 | } |
182 | 167 | ||
@@ -185,9 +170,12 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
185 | struct inode * inode; | 170 | struct inode * inode; |
186 | int err; | 171 | int err; |
187 | 172 | ||
173 | inode_inc_link_count(dir); | ||
174 | |||
188 | inode = ufs_new_inode(dir, S_IFDIR|mode); | 175 | inode = ufs_new_inode(dir, S_IFDIR|mode); |
176 | err = PTR_ERR(inode); | ||
189 | if (IS_ERR(inode)) | 177 | if (IS_ERR(inode)) |
190 | return PTR_ERR(inode); | 178 | goto out_dir; |
191 | 179 | ||
192 | inode->i_op = &ufs_dir_inode_operations; | 180 | inode->i_op = &ufs_dir_inode_operations; |
193 | inode->i_fop = &ufs_dir_operations; | 181 | inode->i_fop = &ufs_dir_operations; |
@@ -195,9 +183,6 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
195 | 183 | ||
196 | inode_inc_link_count(inode); | 184 | inode_inc_link_count(inode); |
197 | 185 | ||
198 | lock_ufs(dir->i_sb); | ||
199 | inode_inc_link_count(dir); | ||
200 | |||
201 | err = ufs_make_empty(inode, dir); | 186 | err = ufs_make_empty(inode, dir); |
202 | if (err) | 187 | if (err) |
203 | goto out_fail; | 188 | goto out_fail; |
@@ -205,20 +190,19 @@ static int ufs_mkdir(struct inode * dir, struct dentry * dentry, umode_t mode) | |||
205 | err = ufs_add_link(dentry, inode); | 190 | err = ufs_add_link(dentry, inode); |
206 | if (err) | 191 | if (err) |
207 | goto out_fail; | 192 | goto out_fail; |
208 | unlock_ufs(dir->i_sb); | ||
209 | 193 | ||
194 | unlock_new_inode(inode); | ||
210 | d_instantiate(dentry, inode); | 195 | d_instantiate(dentry, inode); |
211 | out: | 196 | return 0; |
212 | return err; | ||
213 | 197 | ||
214 | out_fail: | 198 | out_fail: |
215 | inode_dec_link_count(inode); | 199 | inode_dec_link_count(inode); |
216 | inode_dec_link_count(inode); | 200 | inode_dec_link_count(inode); |
217 | unlock_new_inode(inode); | 201 | unlock_new_inode(inode); |
218 | iput (inode); | 202 | iput (inode); |
203 | out_dir: | ||
219 | inode_dec_link_count(dir); | 204 | inode_dec_link_count(dir); |
220 | unlock_ufs(dir->i_sb); | 205 | return err; |
221 | goto out; | ||
222 | } | 206 | } |
223 | 207 | ||
224 | static int ufs_unlink(struct inode *dir, struct dentry *dentry) | 208 | static int ufs_unlink(struct inode *dir, struct dentry *dentry) |
@@ -248,7 +232,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) | |||
248 | struct inode * inode = d_inode(dentry); | 232 | struct inode * inode = d_inode(dentry); |
249 | int err= -ENOTEMPTY; | 233 | int err= -ENOTEMPTY; |
250 | 234 | ||
251 | lock_ufs(dir->i_sb); | ||
252 | if (ufs_empty_dir (inode)) { | 235 | if (ufs_empty_dir (inode)) { |
253 | err = ufs_unlink(dir, dentry); | 236 | err = ufs_unlink(dir, dentry); |
254 | if (!err) { | 237 | if (!err) { |
@@ -257,7 +240,6 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) | |||
257 | inode_dec_link_count(dir); | 240 | inode_dec_link_count(dir); |
258 | } | 241 | } |
259 | } | 242 | } |
260 | unlock_ufs(dir->i_sb); | ||
261 | return err; | 243 | return err; |
262 | } | 244 | } |
263 | 245 | ||
@@ -295,7 +277,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
295 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); | 277 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); |
296 | if (!new_de) | 278 | if (!new_de) |
297 | goto out_dir; | 279 | goto out_dir; |
298 | ufs_set_link(new_dir, new_de, new_page, old_inode); | 280 | ufs_set_link(new_dir, new_de, new_page, old_inode, 1); |
299 | new_inode->i_ctime = CURRENT_TIME_SEC; | 281 | new_inode->i_ctime = CURRENT_TIME_SEC; |
300 | if (dir_de) | 282 | if (dir_de) |
301 | drop_nlink(new_inode); | 283 | drop_nlink(new_inode); |
@@ -318,7 +300,12 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
318 | mark_inode_dirty(old_inode); | 300 | mark_inode_dirty(old_inode); |
319 | 301 | ||
320 | if (dir_de) { | 302 | if (dir_de) { |
321 | ufs_set_link(old_inode, dir_de, dir_page, new_dir); | 303 | if (old_dir != new_dir) |
304 | ufs_set_link(old_inode, dir_de, dir_page, new_dir, 0); | ||
305 | else { | ||
306 | kunmap(dir_page); | ||
307 | page_cache_release(dir_page); | ||
308 | } | ||
322 | inode_dec_link_count(old_dir); | 309 | inode_dec_link_count(old_dir); |
323 | } | 310 | } |
324 | return 0; | 311 | return 0; |
diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 098508a93c7b..250579a80d90 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c | |||
@@ -695,6 +695,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait) | |||
695 | unsigned flags; | 695 | unsigned flags; |
696 | 696 | ||
697 | lock_ufs(sb); | 697 | lock_ufs(sb); |
698 | mutex_lock(&UFS_SB(sb)->s_lock); | ||
698 | 699 | ||
699 | UFSD("ENTER\n"); | 700 | UFSD("ENTER\n"); |
700 | 701 | ||
@@ -712,6 +713,7 @@ static int ufs_sync_fs(struct super_block *sb, int wait) | |||
712 | ufs_put_cstotal(sb); | 713 | ufs_put_cstotal(sb); |
713 | 714 | ||
714 | UFSD("EXIT\n"); | 715 | UFSD("EXIT\n"); |
716 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
715 | unlock_ufs(sb); | 717 | unlock_ufs(sb); |
716 | 718 | ||
717 | return 0; | 719 | return 0; |
@@ -800,6 +802,7 @@ static int ufs_fill_super(struct super_block *sb, void *data, int silent) | |||
800 | UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY)); | 802 | UFSD("flag %u\n", (int)(sb->s_flags & MS_RDONLY)); |
801 | 803 | ||
802 | mutex_init(&sbi->mutex); | 804 | mutex_init(&sbi->mutex); |
805 | mutex_init(&sbi->s_lock); | ||
803 | spin_lock_init(&sbi->work_lock); | 806 | spin_lock_init(&sbi->work_lock); |
804 | INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); | 807 | INIT_DELAYED_WORK(&sbi->sync_work, delayed_sync_fs); |
805 | /* | 808 | /* |
@@ -1278,6 +1281,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1278 | 1281 | ||
1279 | sync_filesystem(sb); | 1282 | sync_filesystem(sb); |
1280 | lock_ufs(sb); | 1283 | lock_ufs(sb); |
1284 | mutex_lock(&UFS_SB(sb)->s_lock); | ||
1281 | uspi = UFS_SB(sb)->s_uspi; | 1285 | uspi = UFS_SB(sb)->s_uspi; |
1282 | flags = UFS_SB(sb)->s_flags; | 1286 | flags = UFS_SB(sb)->s_flags; |
1283 | usb1 = ubh_get_usb_first(uspi); | 1287 | usb1 = ubh_get_usb_first(uspi); |
@@ -1291,6 +1295,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1291 | new_mount_opt = 0; | 1295 | new_mount_opt = 0; |
1292 | ufs_set_opt (new_mount_opt, ONERROR_LOCK); | 1296 | ufs_set_opt (new_mount_opt, ONERROR_LOCK); |
1293 | if (!ufs_parse_options (data, &new_mount_opt)) { | 1297 | if (!ufs_parse_options (data, &new_mount_opt)) { |
1298 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1294 | unlock_ufs(sb); | 1299 | unlock_ufs(sb); |
1295 | return -EINVAL; | 1300 | return -EINVAL; |
1296 | } | 1301 | } |
@@ -1298,12 +1303,14 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1298 | new_mount_opt |= ufstype; | 1303 | new_mount_opt |= ufstype; |
1299 | } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) { | 1304 | } else if ((new_mount_opt & UFS_MOUNT_UFSTYPE) != ufstype) { |
1300 | pr_err("ufstype can't be changed during remount\n"); | 1305 | pr_err("ufstype can't be changed during remount\n"); |
1306 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1301 | unlock_ufs(sb); | 1307 | unlock_ufs(sb); |
1302 | return -EINVAL; | 1308 | return -EINVAL; |
1303 | } | 1309 | } |
1304 | 1310 | ||
1305 | if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { | 1311 | if ((*mount_flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) { |
1306 | UFS_SB(sb)->s_mount_opt = new_mount_opt; | 1312 | UFS_SB(sb)->s_mount_opt = new_mount_opt; |
1313 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1307 | unlock_ufs(sb); | 1314 | unlock_ufs(sb); |
1308 | return 0; | 1315 | return 0; |
1309 | } | 1316 | } |
@@ -1327,6 +1334,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1327 | */ | 1334 | */ |
1328 | #ifndef CONFIG_UFS_FS_WRITE | 1335 | #ifndef CONFIG_UFS_FS_WRITE |
1329 | pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); | 1336 | pr_err("ufs was compiled with read-only support, can't be mounted as read-write\n"); |
1337 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1330 | unlock_ufs(sb); | 1338 | unlock_ufs(sb); |
1331 | return -EINVAL; | 1339 | return -EINVAL; |
1332 | #else | 1340 | #else |
@@ -1336,11 +1344,13 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1336 | ufstype != UFS_MOUNT_UFSTYPE_SUNx86 && | 1344 | ufstype != UFS_MOUNT_UFSTYPE_SUNx86 && |
1337 | ufstype != UFS_MOUNT_UFSTYPE_UFS2) { | 1345 | ufstype != UFS_MOUNT_UFSTYPE_UFS2) { |
1338 | pr_err("this ufstype is read-only supported\n"); | 1346 | pr_err("this ufstype is read-only supported\n"); |
1347 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1339 | unlock_ufs(sb); | 1348 | unlock_ufs(sb); |
1340 | return -EINVAL; | 1349 | return -EINVAL; |
1341 | } | 1350 | } |
1342 | if (!ufs_read_cylinder_structures(sb)) { | 1351 | if (!ufs_read_cylinder_structures(sb)) { |
1343 | pr_err("failed during remounting\n"); | 1352 | pr_err("failed during remounting\n"); |
1353 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1344 | unlock_ufs(sb); | 1354 | unlock_ufs(sb); |
1345 | return -EPERM; | 1355 | return -EPERM; |
1346 | } | 1356 | } |
@@ -1348,6 +1358,7 @@ static int ufs_remount (struct super_block *sb, int *mount_flags, char *data) | |||
1348 | #endif | 1358 | #endif |
1349 | } | 1359 | } |
1350 | UFS_SB(sb)->s_mount_opt = new_mount_opt; | 1360 | UFS_SB(sb)->s_mount_opt = new_mount_opt; |
1361 | mutex_unlock(&UFS_SB(sb)->s_lock); | ||
1351 | unlock_ufs(sb); | 1362 | unlock_ufs(sb); |
1352 | return 0; | 1363 | return 0; |
1353 | } | 1364 | } |
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h index 2a07396d5f9e..2e31ea2e35a3 100644 --- a/fs/ufs/ufs.h +++ b/fs/ufs/ufs.h | |||
@@ -30,6 +30,7 @@ struct ufs_sb_info { | |||
30 | int work_queued; /* non-zero if the delayed work is queued */ | 30 | int work_queued; /* non-zero if the delayed work is queued */ |
31 | struct delayed_work sync_work; /* FS sync delayed work */ | 31 | struct delayed_work sync_work; /* FS sync delayed work */ |
32 | spinlock_t work_lock; /* protects sync_work and work_queued */ | 32 | spinlock_t work_lock; /* protects sync_work and work_queued */ |
33 | struct mutex s_lock; | ||
33 | }; | 34 | }; |
34 | 35 | ||
35 | struct ufs_inode_info { | 36 | struct ufs_inode_info { |
@@ -105,7 +106,7 @@ extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page | |||
105 | extern int ufs_empty_dir (struct inode *); | 106 | extern int ufs_empty_dir (struct inode *); |
106 | extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); | 107 | extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); |
107 | extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, | 108 | extern void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de, |
108 | struct page *page, struct inode *inode); | 109 | struct page *page, struct inode *inode, bool update_times); |
109 | 110 | ||
110 | /* file.c */ | 111 | /* file.c */ |
111 | extern const struct inode_operations ufs_file_inode_operations; | 112 | extern const struct inode_operations ufs_file_inode_operations; |
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 874507de3485..f0e8249722d4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -577,6 +577,13 @@ restart: | |||
577 | if (error) | 577 | if (error) |
578 | return error; | 578 | return error; |
579 | 579 | ||
580 | /* For changing security info in file_remove_privs() we need i_mutex */ | ||
581 | if (*iolock == XFS_IOLOCK_SHARED && !IS_NOSEC(inode)) { | ||
582 | xfs_rw_iunlock(ip, *iolock); | ||
583 | *iolock = XFS_IOLOCK_EXCL; | ||
584 | xfs_rw_ilock(ip, *iolock); | ||
585 | goto restart; | ||
586 | } | ||
580 | /* | 587 | /* |
581 | * If the offset is beyond the size of the file, we need to zero any | 588 | * If the offset is beyond the size of the file, we need to zero any |
582 | * blocks that fall between the existing EOF and the start of this | 589 | * blocks that fall between the existing EOF and the start of this |
@@ -637,7 +644,9 @@ restart: | |||
637 | * setgid bits if the process is not being run by root. This keeps | 644 | * setgid bits if the process is not being run by root. This keeps |
638 | * people from modifying setuid and setgid binaries. | 645 | * people from modifying setuid and setgid binaries. |
639 | */ | 646 | */ |
640 | return file_remove_suid(file); | 647 | if (!IS_NOSEC(inode)) |
648 | return file_remove_privs(file); | ||
649 | return 0; | ||
641 | } | 650 | } |
642 | 651 | ||
643 | /* | 652 | /* |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index df334cbacc6d..d2d50249b7b2 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -160,6 +160,7 @@ struct dentry_operations { | |||
160 | char *(*d_dname)(struct dentry *, char *, int); | 160 | char *(*d_dname)(struct dentry *, char *, int); |
161 | struct vfsmount *(*d_automount)(struct path *); | 161 | struct vfsmount *(*d_automount)(struct path *); |
162 | int (*d_manage)(struct dentry *, bool); | 162 | int (*d_manage)(struct dentry *, bool); |
163 | struct inode *(*d_select_inode)(struct dentry *, unsigned); | ||
163 | } ____cacheline_aligned; | 164 | } ____cacheline_aligned; |
164 | 165 | ||
165 | /* | 166 | /* |
@@ -225,6 +226,7 @@ struct dentry_operations { | |||
225 | 226 | ||
226 | #define DCACHE_MAY_FREE 0x00800000 | 227 | #define DCACHE_MAY_FREE 0x00800000 |
227 | #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ | 228 | #define DCACHE_FALLTHRU 0x01000000 /* Fall through to lower layer */ |
229 | #define DCACHE_OP_SELECT_INODE 0x02000000 /* Unioned entry: dcache op selects inode */ | ||
228 | 230 | ||
229 | extern seqlock_t rename_lock; | 231 | extern seqlock_t rename_lock; |
230 | 232 | ||
@@ -505,6 +507,11 @@ static inline bool d_really_is_positive(const struct dentry *dentry) | |||
505 | return dentry->d_inode != NULL; | 507 | return dentry->d_inode != NULL; |
506 | } | 508 | } |
507 | 509 | ||
510 | static inline int simple_positive(struct dentry *dentry) | ||
511 | { | ||
512 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
513 | } | ||
514 | |||
508 | extern void d_set_fallthru(struct dentry *dentry); | 515 | extern void d_set_fallthru(struct dentry *dentry); |
509 | 516 | ||
510 | static inline bool d_is_fallthru(const struct dentry *dentry) | 517 | static inline bool d_is_fallthru(const struct dentry *dentry) |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 230f87bdf5ad..fbb88740634a 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -47,6 +47,9 @@ struct files_struct { | |||
47 | * read mostly part | 47 | * read mostly part |
48 | */ | 48 | */ |
49 | atomic_t count; | 49 | atomic_t count; |
50 | bool resize_in_progress; | ||
51 | wait_queue_head_t resize_wait; | ||
52 | |||
50 | struct fdtable __rcu *fdt; | 53 | struct fdtable __rcu *fdt; |
51 | struct fdtable fdtab; | 54 | struct fdtable fdtab; |
52 | /* | 55 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 8a81fcbb0074..a0653e560c26 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1654,7 +1654,6 @@ struct inode_operations { | |||
1654 | int (*set_acl)(struct inode *, struct posix_acl *, int); | 1654 | int (*set_acl)(struct inode *, struct posix_acl *, int); |
1655 | 1655 | ||
1656 | /* WARNING: probably going away soon, do not use! */ | 1656 | /* WARNING: probably going away soon, do not use! */ |
1657 | int (*dentry_open)(struct dentry *, struct file *, const struct cred *); | ||
1658 | } ____cacheline_aligned; | 1657 | } ____cacheline_aligned; |
1659 | 1658 | ||
1660 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, | 1659 | ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, |
@@ -2213,7 +2212,6 @@ extern struct file *file_open_name(struct filename *, int, umode_t); | |||
2213 | extern struct file *filp_open(const char *, int, umode_t); | 2212 | extern struct file *filp_open(const char *, int, umode_t); |
2214 | extern struct file *file_open_root(struct dentry *, struct vfsmount *, | 2213 | extern struct file *file_open_root(struct dentry *, struct vfsmount *, |
2215 | const char *, int); | 2214 | const char *, int); |
2216 | extern int vfs_open(const struct path *, struct file *, const struct cred *); | ||
2217 | extern struct file * dentry_open(const struct path *, int, const struct cred *); | 2215 | extern struct file * dentry_open(const struct path *, int, const struct cred *); |
2218 | extern int filp_close(struct file *, fl_owner_t id); | 2216 | extern int filp_close(struct file *, fl_owner_t id); |
2219 | 2217 | ||
@@ -2530,6 +2528,8 @@ extern struct file * open_exec(const char *); | |||
2530 | extern int is_subdir(struct dentry *, struct dentry *); | 2528 | extern int is_subdir(struct dentry *, struct dentry *); |
2531 | extern int path_is_under(struct path *, struct path *); | 2529 | extern int path_is_under(struct path *, struct path *); |
2532 | 2530 | ||
2531 | extern char *file_path(struct file *, char *, int); | ||
2532 | |||
2533 | #include <linux/err.h> | 2533 | #include <linux/err.h> |
2534 | 2534 | ||
2535 | /* needed for stackable file system support */ | 2535 | /* needed for stackable file system support */ |
@@ -2581,7 +2581,12 @@ extern struct inode *new_inode_pseudo(struct super_block *sb); | |||
2581 | extern struct inode *new_inode(struct super_block *sb); | 2581 | extern struct inode *new_inode(struct super_block *sb); |
2582 | extern void free_inode_nonrcu(struct inode *inode); | 2582 | extern void free_inode_nonrcu(struct inode *inode); |
2583 | extern int should_remove_suid(struct dentry *); | 2583 | extern int should_remove_suid(struct dentry *); |
2584 | extern int file_remove_suid(struct file *); | 2584 | extern int file_remove_privs(struct file *); |
2585 | extern int dentry_needs_remove_privs(struct dentry *dentry); | ||
2586 | static inline int file_needs_remove_privs(struct file *file) | ||
2587 | { | ||
2588 | return dentry_needs_remove_privs(file->f_path.dentry); | ||
2589 | } | ||
2585 | 2590 | ||
2586 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); | 2591 | extern void __insert_inode_hash(struct inode *, unsigned long hashval); |
2587 | static inline void insert_inode_hash(struct inode *inode) | 2592 | static inline void insert_inode_hash(struct inode *inode) |
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h index 771484993ca7..604e1526cd00 100644 --- a/include/linux/fscache-cache.h +++ b/include/linux/fscache-cache.h | |||
@@ -74,6 +74,7 @@ extern wait_queue_head_t fscache_cache_cleared_wq; | |||
74 | */ | 74 | */ |
75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); | 75 | typedef void (*fscache_operation_release_t)(struct fscache_operation *op); |
76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); | 76 | typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); |
77 | typedef void (*fscache_operation_cancel_t)(struct fscache_operation *op); | ||
77 | 78 | ||
78 | enum fscache_operation_state { | 79 | enum fscache_operation_state { |
79 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ | 80 | FSCACHE_OP_ST_BLANK, /* Op is not yet submitted */ |
@@ -109,6 +110,9 @@ struct fscache_operation { | |||
109 | * the op in a non-pool thread */ | 110 | * the op in a non-pool thread */ |
110 | fscache_operation_processor_t processor; | 111 | fscache_operation_processor_t processor; |
111 | 112 | ||
113 | /* Operation cancellation cleanup (optional) */ | ||
114 | fscache_operation_cancel_t cancel; | ||
115 | |||
112 | /* operation releaser */ | 116 | /* operation releaser */ |
113 | fscache_operation_release_t release; | 117 | fscache_operation_release_t release; |
114 | }; | 118 | }; |
@@ -119,33 +123,17 @@ extern void fscache_op_work_func(struct work_struct *work); | |||
119 | extern void fscache_enqueue_operation(struct fscache_operation *); | 123 | extern void fscache_enqueue_operation(struct fscache_operation *); |
120 | extern void fscache_op_complete(struct fscache_operation *, bool); | 124 | extern void fscache_op_complete(struct fscache_operation *, bool); |
121 | extern void fscache_put_operation(struct fscache_operation *); | 125 | extern void fscache_put_operation(struct fscache_operation *); |
122 | 126 | extern void fscache_operation_init(struct fscache_operation *, | |
123 | /** | 127 | fscache_operation_processor_t, |
124 | * fscache_operation_init - Do basic initialisation of an operation | 128 | fscache_operation_cancel_t, |
125 | * @op: The operation to initialise | 129 | fscache_operation_release_t); |
126 | * @release: The release function to assign | ||
127 | * | ||
128 | * Do basic initialisation of an operation. The caller must still set flags, | ||
129 | * object and processor if needed. | ||
130 | */ | ||
131 | static inline void fscache_operation_init(struct fscache_operation *op, | ||
132 | fscache_operation_processor_t processor, | ||
133 | fscache_operation_release_t release) | ||
134 | { | ||
135 | INIT_WORK(&op->work, fscache_op_work_func); | ||
136 | atomic_set(&op->usage, 1); | ||
137 | op->state = FSCACHE_OP_ST_INITIALISED; | ||
138 | op->debug_id = atomic_inc_return(&fscache_op_debug_id); | ||
139 | op->processor = processor; | ||
140 | op->release = release; | ||
141 | INIT_LIST_HEAD(&op->pend_link); | ||
142 | } | ||
143 | 130 | ||
144 | /* | 131 | /* |
145 | * data read operation | 132 | * data read operation |
146 | */ | 133 | */ |
147 | struct fscache_retrieval { | 134 | struct fscache_retrieval { |
148 | struct fscache_operation op; | 135 | struct fscache_operation op; |
136 | struct fscache_cookie *cookie; /* The netfs cookie */ | ||
149 | struct address_space *mapping; /* netfs pages */ | 137 | struct address_space *mapping; /* netfs pages */ |
150 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ | 138 | fscache_rw_complete_t end_io_func; /* function to call on I/O completion */ |
151 | void *context; /* netfs read context (pinned) */ | 139 | void *context; /* netfs read context (pinned) */ |
@@ -371,6 +359,7 @@ struct fscache_object { | |||
371 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ | 359 | #define FSCACHE_OBJECT_IS_LOOKED_UP 4 /* T if object has been looked up */ |
372 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ | 360 | #define FSCACHE_OBJECT_IS_AVAILABLE 5 /* T if object has become active */ |
373 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ | 361 | #define FSCACHE_OBJECT_RETIRED 6 /* T if object was retired on relinquishment */ |
362 | #define FSCACHE_OBJECT_KILLED_BY_CACHE 7 /* T if object was killed by the cache */ | ||
374 | 363 | ||
375 | struct list_head cache_link; /* link in cache->object_list */ | 364 | struct list_head cache_link; /* link in cache->object_list */ |
376 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ | 365 | struct hlist_node cookie_link; /* link in cookie->backing_objects */ |
@@ -410,17 +399,16 @@ static inline bool fscache_object_is_available(struct fscache_object *object) | |||
410 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); | 399 | return test_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags); |
411 | } | 400 | } |
412 | 401 | ||
413 | static inline bool fscache_object_is_active(struct fscache_object *object) | 402 | static inline bool fscache_cache_is_broken(struct fscache_object *object) |
414 | { | 403 | { |
415 | return fscache_object_is_available(object) && | 404 | return test_bit(FSCACHE_IOERROR, &object->cache->flags); |
416 | fscache_object_is_live(object) && | ||
417 | !test_bit(FSCACHE_IOERROR, &object->cache->flags); | ||
418 | } | 405 | } |
419 | 406 | ||
420 | static inline bool fscache_object_is_dead(struct fscache_object *object) | 407 | static inline bool fscache_object_is_active(struct fscache_object *object) |
421 | { | 408 | { |
422 | return fscache_object_is_dying(object) && | 409 | return fscache_object_is_available(object) && |
423 | test_bit(FSCACHE_IOERROR, &object->cache->flags); | 410 | fscache_object_is_live(object) && |
411 | !fscache_cache_is_broken(object); | ||
424 | } | 412 | } |
425 | 413 | ||
426 | /** | 414 | /** |
@@ -551,4 +539,15 @@ extern enum fscache_checkaux fscache_check_aux(struct fscache_object *object, | |||
551 | const void *data, | 539 | const void *data, |
552 | uint16_t datalen); | 540 | uint16_t datalen); |
553 | 541 | ||
542 | extern void fscache_object_retrying_stale(struct fscache_object *object); | ||
543 | |||
544 | enum fscache_why_object_killed { | ||
545 | FSCACHE_OBJECT_IS_STALE, | ||
546 | FSCACHE_OBJECT_NO_SPACE, | ||
547 | FSCACHE_OBJECT_WAS_RETIRED, | ||
548 | FSCACHE_OBJECT_WAS_CULLED, | ||
549 | }; | ||
550 | extern void fscache_object_mark_killed(struct fscache_object *object, | ||
551 | enum fscache_why_object_killed why); | ||
552 | |||
554 | #endif /* _LINUX_FSCACHE_CACHE_H */ | 553 | #endif /* _LINUX_FSCACHE_CACHE_H */ |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index fb0814ca65c7..a6c78e00ea96 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
@@ -671,4 +671,10 @@ static inline int add_to_page_cache(struct page *page, | |||
671 | return error; | 671 | return error; |
672 | } | 672 | } |
673 | 673 | ||
674 | static inline unsigned long dir_pages(struct inode *inode) | ||
675 | { | ||
676 | return (unsigned long)(inode->i_size + PAGE_CACHE_SIZE - 1) >> | ||
677 | PAGE_CACHE_SHIFT; | ||
678 | } | ||
679 | |||
674 | #endif /* _LINUX_PAGEMAP_H */ | 680 | #endif /* _LINUX_PAGEMAP_H */ |
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index afbb1fd77c77..912a7c482649 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -123,6 +123,7 @@ __printf(2, 3) int seq_printf(struct seq_file *, const char *, ...); | |||
123 | __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); | 123 | __printf(2, 0) int seq_vprintf(struct seq_file *, const char *, va_list args); |
124 | 124 | ||
125 | int seq_path(struct seq_file *, const struct path *, const char *); | 125 | int seq_path(struct seq_file *, const struct path *, const char *); |
126 | int seq_file_path(struct seq_file *, struct file *, const char *); | ||
126 | int seq_dentry(struct seq_file *, struct dentry *, const char *); | 127 | int seq_dentry(struct seq_file *, struct dentry *, const char *); |
127 | int seq_path_root(struct seq_file *m, const struct path *path, | 128 | int seq_path_root(struct seq_file *m, const struct path *path, |
128 | const struct path *root, const char *esc); | 129 | const struct path *root, const char *esc); |
diff --git a/kernel/events/core.c b/kernel/events/core.c index d1f37ddd1960..e965cfae4207 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5794,7 +5794,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) | |||
5794 | * need to add enough zero bytes after the string to handle | 5794 | * need to add enough zero bytes after the string to handle |
5795 | * the 64bit alignment we do later. | 5795 | * the 64bit alignment we do later. |
5796 | */ | 5796 | */ |
5797 | name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64)); | 5797 | name = file_path(file, buf, PATH_MAX - sizeof(u64)); |
5798 | if (IS_ERR(name)) { | 5798 | if (IS_ERR(name)) { |
5799 | name = "//toolong"; | 5799 | name = "//toolong"; |
5800 | goto cpy_name; | 5800 | goto cpy_name; |
diff --git a/mm/filemap.c b/mm/filemap.c index 11f10efd637c..1283fc825458 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -2563,7 +2563,7 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) | |||
2563 | 2563 | ||
2564 | /* We can write back this queue in page reclaim */ | 2564 | /* We can write back this queue in page reclaim */ |
2565 | current->backing_dev_info = inode_to_bdi(inode); | 2565 | current->backing_dev_info = inode_to_bdi(inode); |
2566 | err = file_remove_suid(file); | 2566 | err = file_remove_privs(file); |
2567 | if (err) | 2567 | if (err) |
2568 | goto out; | 2568 | goto out; |
2569 | 2569 | ||
diff --git a/mm/memory.c b/mm/memory.c index 11b9ca176740..a84fbb772034 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3726,7 +3726,7 @@ void print_vma_addr(char *prefix, unsigned long ip) | |||
3726 | if (buf) { | 3726 | if (buf) { |
3727 | char *p; | 3727 | char *p; |
3728 | 3728 | ||
3729 | p = d_path(&f->f_path, buf, PAGE_SIZE); | 3729 | p = file_path(f, buf, PAGE_SIZE); |
3730 | if (IS_ERR(p)) | 3730 | if (IS_ERR(p)) |
3731 | p = "?"; | 3731 | p = "?"; |
3732 | printk("%s%s[%lx+%lx]", prefix, kbasename(p), | 3732 | printk("%s%s[%lx+%lx]", prefix, kbasename(p), |
diff --git a/mm/swapfile.c b/mm/swapfile.c index a7e72103f23b..41e4581af7c5 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -2032,7 +2032,7 @@ static int swap_show(struct seq_file *swap, void *v) | |||
2032 | } | 2032 | } |
2033 | 2033 | ||
2034 | file = si->swap_file; | 2034 | file = si->swap_file; |
2035 | len = seq_path(swap, &file->f_path, " \t\n\\"); | 2035 | len = seq_file_path(swap, file, " \t\n\\"); |
2036 | seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", | 2036 | seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", |
2037 | len < 40 ? 40 - len : 1, " ", | 2037 | len < 40 ? 40 - len : 1, " ", |
2038 | S_ISBLK(file_inode(file)->i_mode) ? | 2038 | S_ISBLK(file_inode(file)->i_mode) ? |
diff --git a/net/9p/client.c b/net/9p/client.c index 6f4c4c88db84..498454b3c06c 100644 --- a/net/9p/client.c +++ b/net/9p/client.c | |||
@@ -843,7 +843,8 @@ static struct p9_req_t *p9_client_zc_rpc(struct p9_client *c, int8_t type, | |||
843 | if (err < 0) { | 843 | if (err < 0) { |
844 | if (err == -EIO) | 844 | if (err == -EIO) |
845 | c->status = Disconnected; | 845 | c->status = Disconnected; |
846 | goto reterr; | 846 | if (err != -ERESTARTSYS) |
847 | goto reterr; | ||
847 | } | 848 | } |
848 | if (req->status == REQ_STATUS_ERROR) { | 849 | if (req->status == REQ_STATUS_ERROR) { |
849 | p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); | 850 | p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); |
@@ -1582,6 +1583,10 @@ p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err) | |||
1582 | p9_free_req(clnt, req); | 1583 | p9_free_req(clnt, req); |
1583 | break; | 1584 | break; |
1584 | } | 1585 | } |
1586 | if (rsize < count) { | ||
1587 | pr_err("bogus RREAD count (%d > %d)\n", count, rsize); | ||
1588 | count = rsize; | ||
1589 | } | ||
1585 | 1590 | ||
1586 | p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count); | 1591 | p9_debug(P9_DEBUG_9P, "<<< RREAD count %d\n", count); |
1587 | if (!count) { | 1592 | if (!count) { |
@@ -1647,6 +1652,11 @@ p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err) | |||
1647 | if (*err) { | 1652 | if (*err) { |
1648 | trace_9p_protocol_dump(clnt, req->rc); | 1653 | trace_9p_protocol_dump(clnt, req->rc); |
1649 | p9_free_req(clnt, req); | 1654 | p9_free_req(clnt, req); |
1655 | break; | ||
1656 | } | ||
1657 | if (rsize < count) { | ||
1658 | pr_err("bogus RWRITE count (%d > %d)\n", count, rsize); | ||
1659 | count = rsize; | ||
1650 | } | 1660 | } |
1651 | 1661 | ||
1652 | p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count); | 1662 | p9_debug(P9_DEBUG_9P, "<<< RWRITE count %d\n", count); |
diff --git a/security/inode.c b/security/inode.c index 0e37e4fba8fa..16622aef9bde 100644 --- a/security/inode.c +++ b/security/inode.c | |||
@@ -25,11 +25,6 @@ | |||
25 | static struct vfsmount *mount; | 25 | static struct vfsmount *mount; |
26 | static int mount_count; | 26 | static int mount_count; |
27 | 27 | ||
28 | static inline int positive(struct dentry *dentry) | ||
29 | { | ||
30 | return d_really_is_positive(dentry) && !d_unhashed(dentry); | ||
31 | } | ||
32 | |||
33 | static int fill_super(struct super_block *sb, void *data, int silent) | 28 | static int fill_super(struct super_block *sb, void *data, int silent) |
34 | { | 29 | { |
35 | static struct tree_descr files[] = {{""}}; | 30 | static struct tree_descr files[] = {{""}}; |
@@ -201,14 +196,12 @@ void securityfs_remove(struct dentry *dentry) | |||
201 | return; | 196 | return; |
202 | 197 | ||
203 | mutex_lock(&d_inode(parent)->i_mutex); | 198 | mutex_lock(&d_inode(parent)->i_mutex); |
204 | if (positive(dentry)) { | 199 | if (simple_positive(dentry)) { |
205 | if (d_really_is_positive(dentry)) { | 200 | if (d_is_dir(dentry)) |
206 | if (d_is_dir(dentry)) | 201 | simple_rmdir(d_inode(parent), dentry); |
207 | simple_rmdir(d_inode(parent), dentry); | 202 | else |
208 | else | 203 | simple_unlink(d_inode(parent), dentry); |
209 | simple_unlink(d_inode(parent), dentry); | 204 | dput(dentry); |
210 | dput(dentry); | ||
211 | } | ||
212 | } | 205 | } |
213 | mutex_unlock(&d_inode(parent)->i_mutex); | 206 | mutex_unlock(&d_inode(parent)->i_mutex); |
214 | simple_release_fs(&mount, &mount_count); | 207 | simple_release_fs(&mount, &mount_count); |