aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/aio.c40
-rw-r--r--fs/autofs4/autofs_i.h38
-rw-r--r--fs/autofs4/expire.c8
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/autofs4/root.c616
-rw-r--r--fs/binfmt_elf.c11
-rw-r--r--fs/binfmt_elf_fdpic.c8
-rw-r--r--fs/direct-io.c165
-rw-r--r--fs/ext2/dir.c2
-rw-r--r--fs/ext2/ext2.h1
-rw-r--r--fs/ext2/file.c21
-rw-r--r--fs/ext2/super.c22
-rw-r--r--fs/fat/fat.h3
-rw-r--r--fs/fat/fatent.c25
-rw-r--r--fs/fat/inode.c8
-rw-r--r--fs/fat/misc.c57
-rw-r--r--fs/fscache/object-list.c2
-rw-r--r--fs/hpfs/super.c17
-rw-r--r--fs/jffs2/gc.c3
-rw-r--r--fs/jffs2/readinode.c2
-rw-r--r--fs/jffs2/summary.c2
-rw-r--r--fs/ocfs2/aops.c34
-rw-r--r--fs/proc/base.c4
-rw-r--r--fs/proc/generic.c21
-rw-r--r--fs/proc/inode.c31
-rw-r--r--fs/proc/internal.h10
-rw-r--r--fs/qnx4/bitmap.c24
-rw-r--r--fs/qnx4/inode.c22
-rw-r--r--fs/reiserfs/Makefile6
-rw-r--r--fs/reiserfs/procfs.c65
-rw-r--r--fs/reiserfs/super.c4
-rw-r--r--fs/ufs/dir.c10
-rw-r--r--fs/ufs/namei.c8
-rw-r--r--fs/ufs/super.c52
-rw-r--r--fs/ufs/ufs.h4
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c20
36 files changed, 735 insertions, 633 deletions
diff --git a/fs/aio.c b/fs/aio.c
index c30dfc006108..1cf12b3dd83a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -711,10 +711,8 @@ static ssize_t aio_run_iocb(struct kiocb *iocb)
711 */ 711 */
712 ret = retry(iocb); 712 ret = retry(iocb);
713 713
714 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED) { 714 if (ret != -EIOCBRETRY && ret != -EIOCBQUEUED)
715 BUG_ON(!list_empty(&iocb->ki_wait.task_list));
716 aio_complete(iocb, ret, 0); 715 aio_complete(iocb, ret, 0);
717 }
718out: 716out:
719 spin_lock_irq(&ctx->ctx_lock); 717 spin_lock_irq(&ctx->ctx_lock);
720 718
@@ -866,13 +864,6 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
866 unsigned long flags; 864 unsigned long flags;
867 int run = 0; 865 int run = 0;
868 866
869 /* We're supposed to be the only path putting the iocb back on the run
870 * list. If we find that the iocb is *back* on a wait queue already
871 * than retry has happened before we could queue the iocb. This also
872 * means that the retry could have completed and freed our iocb, no
873 * good. */
874 BUG_ON((!list_empty(&iocb->ki_wait.task_list)));
875
876 spin_lock_irqsave(&ctx->ctx_lock, flags); 867 spin_lock_irqsave(&ctx->ctx_lock, flags);
877 /* set this inside the lock so that we can't race with aio_run_iocb() 868 /* set this inside the lock so that we can't race with aio_run_iocb()
878 * testing it and putting the iocb on the run list under the lock */ 869 * testing it and putting the iocb on the run list under the lock */
@@ -886,7 +877,7 @@ static void try_queue_kicked_iocb(struct kiocb *iocb)
886/* 877/*
887 * kick_iocb: 878 * kick_iocb:
888 * Called typically from a wait queue callback context 879 * Called typically from a wait queue callback context
889 * (aio_wake_function) to trigger a retry of the iocb. 880 * to trigger a retry of the iocb.
890 * The retry is usually executed by aio workqueue 881 * The retry is usually executed by aio workqueue
891 * threads (See aio_kick_handler). 882 * threads (See aio_kick_handler).
892 */ 883 */
@@ -1520,31 +1511,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb)
1520 return 0; 1511 return 0;
1521} 1512}
1522 1513
1523/*
1524 * aio_wake_function:
1525 * wait queue callback function for aio notification,
1526 * Simply triggers a retry of the operation via kick_iocb.
1527 *
1528 * This callback is specified in the wait queue entry in
1529 * a kiocb.
1530 *
1531 * Note:
1532 * This routine is executed with the wait queue lock held.
1533 * Since kick_iocb acquires iocb->ctx->ctx_lock, it nests
1534 * the ioctx lock inside the wait queue lock. This is safe
1535 * because this callback isn't used for wait queues which
1536 * are nested inside ioctx lock (i.e. ctx->wait)
1537 */
1538static int aio_wake_function(wait_queue_t *wait, unsigned mode,
1539 int sync, void *key)
1540{
1541 struct kiocb *iocb = container_of(wait, struct kiocb, ki_wait);
1542
1543 list_del_init(&wait->task_list);
1544 kick_iocb(iocb);
1545 return 1;
1546}
1547
1548static void aio_batch_add(struct address_space *mapping, 1514static void aio_batch_add(struct address_space *mapping,
1549 struct hlist_head *batch_hash) 1515 struct hlist_head *batch_hash)
1550{ 1516{
@@ -1642,8 +1608,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
1642 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; 1608 req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf;
1643 req->ki_left = req->ki_nbytes = iocb->aio_nbytes; 1609 req->ki_left = req->ki_nbytes = iocb->aio_nbytes;
1644 req->ki_opcode = iocb->aio_lio_opcode; 1610 req->ki_opcode = iocb->aio_lio_opcode;
1645 init_waitqueue_func_entry(&req->ki_wait, aio_wake_function);
1646 INIT_LIST_HEAD(&req->ki_wait.task_list);
1647 1611
1648 ret = aio_setup_iocb(req); 1612 ret = aio_setup_iocb(req);
1649 1613
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h
index 8f7cdde41733..0118d67221b2 100644
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
@@ -60,6 +60,11 @@ do { \
60 current->pid, __func__, ##args); \ 60 current->pid, __func__, ##args); \
61} while (0) 61} while (0)
62 62
63struct rehash_entry {
64 struct task_struct *task;
65 struct list_head list;
66};
67
63/* Unified info structure. This is pointed to by both the dentry and 68/* Unified info structure. This is pointed to by both the dentry and
64 inode structures. Each file in the filesystem has an instance of this 69 inode structures. Each file in the filesystem has an instance of this
65 structure. It holds a reference to the dentry, so dentries are never 70 structure. It holds a reference to the dentry, so dentries are never
@@ -75,6 +80,9 @@ struct autofs_info {
75 struct completion expire_complete; 80 struct completion expire_complete;
76 81
77 struct list_head active; 82 struct list_head active;
83 int active_count;
84 struct list_head rehash_list;
85
78 struct list_head expiring; 86 struct list_head expiring;
79 87
80 struct autofs_sb_info *sbi; 88 struct autofs_sb_info *sbi;
@@ -95,6 +103,8 @@ struct autofs_info {
95 103
96#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */ 104#define AUTOFS_INF_EXPIRING (1<<0) /* dentry is in the process of expiring */
97#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */ 105#define AUTOFS_INF_MOUNTPOINT (1<<1) /* mountpoint status for direct expire */
106#define AUTOFS_INF_PENDING (1<<2) /* dentry pending mount */
107#define AUTOFS_INF_REHASH (1<<3) /* dentry in transit to ->lookup() */
98 108
99struct autofs_wait_queue { 109struct autofs_wait_queue {
100 wait_queue_head_t queue; 110 wait_queue_head_t queue;
@@ -161,7 +171,7 @@ static inline int autofs4_ispending(struct dentry *dentry)
161{ 171{
162 struct autofs_info *inf = autofs4_dentry_ino(dentry); 172 struct autofs_info *inf = autofs4_dentry_ino(dentry);
163 173
164 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) 174 if (inf->flags & AUTOFS_INF_PENDING)
165 return 1; 175 return 1;
166 176
167 if (inf->flags & AUTOFS_INF_EXPIRING) 177 if (inf->flags & AUTOFS_INF_EXPIRING)
@@ -264,5 +274,31 @@ out:
264 return ret; 274 return ret;
265} 275}
266 276
277static inline void autofs4_add_expiring(struct dentry *dentry)
278{
279 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
280 struct autofs_info *ino = autofs4_dentry_ino(dentry);
281 if (ino) {
282 spin_lock(&sbi->lookup_lock);
283 if (list_empty(&ino->expiring))
284 list_add(&ino->expiring, &sbi->expiring_list);
285 spin_unlock(&sbi->lookup_lock);
286 }
287 return;
288}
289
290static inline void autofs4_del_expiring(struct dentry *dentry)
291{
292 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
293 struct autofs_info *ino = autofs4_dentry_ino(dentry);
294 if (ino) {
295 spin_lock(&sbi->lookup_lock);
296 if (!list_empty(&ino->expiring))
297 list_del_init(&ino->expiring);
298 spin_unlock(&sbi->lookup_lock);
299 }
300 return;
301}
302
267void autofs4_dentry_release(struct dentry *); 303void autofs4_dentry_release(struct dentry *);
268extern void autofs4_kill_sb(struct super_block *); 304extern void autofs4_kill_sb(struct super_block *);
diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c
index 3da18d453488..74bc9aa6df31 100644
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -27,7 +27,7 @@ static inline int autofs4_can_expire(struct dentry *dentry,
27 return 0; 27 return 0;
28 28
29 /* No point expiring a pending mount */ 29 /* No point expiring a pending mount */
30 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) 30 if (ino->flags & AUTOFS_INF_PENDING)
31 return 0; 31 return 0;
32 32
33 if (!do_now) { 33 if (!do_now) {
@@ -279,6 +279,7 @@ struct dentry *autofs4_expire_direct(struct super_block *sb,
279 root->d_mounted--; 279 root->d_mounted--;
280 } 280 }
281 ino->flags |= AUTOFS_INF_EXPIRING; 281 ino->flags |= AUTOFS_INF_EXPIRING;
282 autofs4_add_expiring(root);
282 init_completion(&ino->expire_complete); 283 init_completion(&ino->expire_complete);
283 spin_unlock(&sbi->fs_lock); 284 spin_unlock(&sbi->fs_lock);
284 return root; 285 return root;
@@ -406,6 +407,7 @@ found:
406 expired, (int)expired->d_name.len, expired->d_name.name); 407 expired, (int)expired->d_name.len, expired->d_name.name);
407 ino = autofs4_dentry_ino(expired); 408 ino = autofs4_dentry_ino(expired);
408 ino->flags |= AUTOFS_INF_EXPIRING; 409 ino->flags |= AUTOFS_INF_EXPIRING;
410 autofs4_add_expiring(expired);
409 init_completion(&ino->expire_complete); 411 init_completion(&ino->expire_complete);
410 spin_unlock(&sbi->fs_lock); 412 spin_unlock(&sbi->fs_lock);
411 spin_lock(&dcache_lock); 413 spin_lock(&dcache_lock);
@@ -433,7 +435,7 @@ int autofs4_expire_wait(struct dentry *dentry)
433 435
434 DPRINTK("expire done status=%d", status); 436 DPRINTK("expire done status=%d", status);
435 437
436 if (d_unhashed(dentry)) 438 if (d_unhashed(dentry) && IS_DEADDIR(dentry->d_inode))
437 return -EAGAIN; 439 return -EAGAIN;
438 440
439 return status; 441 return status;
@@ -473,6 +475,7 @@ int autofs4_expire_run(struct super_block *sb,
473 spin_lock(&sbi->fs_lock); 475 spin_lock(&sbi->fs_lock);
474 ino = autofs4_dentry_ino(dentry); 476 ino = autofs4_dentry_ino(dentry);
475 ino->flags &= ~AUTOFS_INF_EXPIRING; 477 ino->flags &= ~AUTOFS_INF_EXPIRING;
478 autofs4_del_expiring(dentry);
476 complete_all(&ino->expire_complete); 479 complete_all(&ino->expire_complete);
477 spin_unlock(&sbi->fs_lock); 480 spin_unlock(&sbi->fs_lock);
478 481
@@ -503,6 +506,7 @@ int autofs4_do_expire_multi(struct super_block *sb, struct vfsmount *mnt,
503 ino->flags &= ~AUTOFS_INF_MOUNTPOINT; 506 ino->flags &= ~AUTOFS_INF_MOUNTPOINT;
504 } 507 }
505 ino->flags &= ~AUTOFS_INF_EXPIRING; 508 ino->flags &= ~AUTOFS_INF_EXPIRING;
509 autofs4_del_expiring(dentry);
506 complete_all(&ino->expire_complete); 510 complete_all(&ino->expire_complete);
507 spin_unlock(&sbi->fs_lock); 511 spin_unlock(&sbi->fs_lock);
508 dput(dentry); 512 dput(dentry);
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 69c8142da838..d0a3de247458 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -49,6 +49,8 @@ struct autofs_info *autofs4_init_ino(struct autofs_info *ino,
49 ino->dentry = NULL; 49 ino->dentry = NULL;
50 ino->size = 0; 50 ino->size = 0;
51 INIT_LIST_HEAD(&ino->active); 51 INIT_LIST_HEAD(&ino->active);
52 INIT_LIST_HEAD(&ino->rehash_list);
53 ino->active_count = 0;
52 INIT_LIST_HEAD(&ino->expiring); 54 INIT_LIST_HEAD(&ino->expiring);
53 atomic_set(&ino->count, 0); 55 atomic_set(&ino->count, 0);
54 } 56 }
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
index b96a3c57359d..30cc9ddf4b70 100644
--- a/fs/autofs4/root.c
+++ b/fs/autofs4/root.c
@@ -72,6 +72,139 @@ const struct inode_operations autofs4_dir_inode_operations = {
72 .rmdir = autofs4_dir_rmdir, 72 .rmdir = autofs4_dir_rmdir,
73}; 73};
74 74
75static void autofs4_add_active(struct dentry *dentry)
76{
77 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
78 struct autofs_info *ino = autofs4_dentry_ino(dentry);
79 if (ino) {
80 spin_lock(&sbi->lookup_lock);
81 if (!ino->active_count) {
82 if (list_empty(&ino->active))
83 list_add(&ino->active, &sbi->active_list);
84 }
85 ino->active_count++;
86 spin_unlock(&sbi->lookup_lock);
87 }
88 return;
89}
90
91static void autofs4_del_active(struct dentry *dentry)
92{
93 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
94 struct autofs_info *ino = autofs4_dentry_ino(dentry);
95 if (ino) {
96 spin_lock(&sbi->lookup_lock);
97 ino->active_count--;
98 if (!ino->active_count) {
99 if (!list_empty(&ino->active))
100 list_del_init(&ino->active);
101 }
102 spin_unlock(&sbi->lookup_lock);
103 }
104 return;
105}
106
107static void autofs4_add_rehash_entry(struct autofs_info *ino,
108 struct rehash_entry *entry)
109{
110 entry->task = current;
111 INIT_LIST_HEAD(&entry->list);
112 list_add(&entry->list, &ino->rehash_list);
113 return;
114}
115
116static void autofs4_remove_rehash_entry(struct autofs_info *ino)
117{
118 struct list_head *head = &ino->rehash_list;
119 struct rehash_entry *entry;
120 list_for_each_entry(entry, head, list) {
121 if (entry->task == current) {
122 list_del(&entry->list);
123 kfree(entry);
124 break;
125 }
126 }
127 return;
128}
129
130static void autofs4_remove_rehash_entrys(struct autofs_info *ino)
131{
132 struct autofs_sb_info *sbi = ino->sbi;
133 struct rehash_entry *entry, *next;
134 struct list_head *head;
135
136 spin_lock(&sbi->fs_lock);
137 spin_lock(&sbi->lookup_lock);
138 if (!(ino->flags & AUTOFS_INF_REHASH)) {
139 spin_unlock(&sbi->lookup_lock);
140 spin_unlock(&sbi->fs_lock);
141 return;
142 }
143 ino->flags &= ~AUTOFS_INF_REHASH;
144 head = &ino->rehash_list;
145 list_for_each_entry_safe(entry, next, head, list) {
146 list_del(&entry->list);
147 kfree(entry);
148 }
149 spin_unlock(&sbi->lookup_lock);
150 spin_unlock(&sbi->fs_lock);
151 dput(ino->dentry);
152
153 return;
154}
155
156static void autofs4_revalidate_drop(struct dentry *dentry,
157 struct rehash_entry *entry)
158{
159 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
160 struct autofs_info *ino = autofs4_dentry_ino(dentry);
161 /*
162 * Add to the active list so we can pick this up in
163 * ->lookup(). Also add an entry to a rehash list so
164 * we know when there are no dentrys in flight so we
165 * know when we can rehash the dentry.
166 */
167 spin_lock(&sbi->lookup_lock);
168 if (list_empty(&ino->active))
169 list_add(&ino->active, &sbi->active_list);
170 autofs4_add_rehash_entry(ino, entry);
171 spin_unlock(&sbi->lookup_lock);
172 if (!(ino->flags & AUTOFS_INF_REHASH)) {
173 ino->flags |= AUTOFS_INF_REHASH;
174 dget(dentry);
175 spin_lock(&dentry->d_lock);
176 __d_drop(dentry);
177 spin_unlock(&dentry->d_lock);
178 }
179 return;
180}
181
182static void autofs4_revalidate_rehash(struct dentry *dentry)
183{
184 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
185 struct autofs_info *ino = autofs4_dentry_ino(dentry);
186 if (ino->flags & AUTOFS_INF_REHASH) {
187 spin_lock(&sbi->lookup_lock);
188 autofs4_remove_rehash_entry(ino);
189 if (list_empty(&ino->rehash_list)) {
190 spin_unlock(&sbi->lookup_lock);
191 ino->flags &= ~AUTOFS_INF_REHASH;
192 d_rehash(dentry);
193 dput(ino->dentry);
194 } else
195 spin_unlock(&sbi->lookup_lock);
196 }
197 return;
198}
199
200static unsigned int autofs4_need_mount(unsigned int flags)
201{
202 unsigned int res = 0;
203 if (flags & (TRIGGER_FLAGS | TRIGGER_INTENTS))
204 res = 1;
205 return res;
206}
207
75static int autofs4_dir_open(struct inode *inode, struct file *file) 208static int autofs4_dir_open(struct inode *inode, struct file *file)
76{ 209{
77 struct dentry *dentry = file->f_path.dentry; 210 struct dentry *dentry = file->f_path.dentry;
@@ -93,7 +226,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
93 * it. 226 * it.
94 */ 227 */
95 spin_lock(&dcache_lock); 228 spin_lock(&dcache_lock);
96 if (!d_mountpoint(dentry) && __simple_empty(dentry)) { 229 if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
97 spin_unlock(&dcache_lock); 230 spin_unlock(&dcache_lock);
98 return -ENOENT; 231 return -ENOENT;
99 } 232 }
@@ -103,7 +236,7 @@ out:
103 return dcache_dir_open(inode, file); 236 return dcache_dir_open(inode, file);
104} 237}
105 238
106static int try_to_fill_dentry(struct dentry *dentry, int flags) 239static int try_to_fill_dentry(struct dentry *dentry)
107{ 240{
108 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); 241 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
109 struct autofs_info *ino = autofs4_dentry_ino(dentry); 242 struct autofs_info *ino = autofs4_dentry_ino(dentry);
@@ -116,55 +249,17 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags)
116 * Wait for a pending mount, triggering one if there 249 * Wait for a pending mount, triggering one if there
117 * isn't one already 250 * isn't one already
118 */ 251 */
119 if (dentry->d_inode == NULL) { 252 DPRINTK("waiting for mount name=%.*s",
120 DPRINTK("waiting for mount name=%.*s", 253 dentry->d_name.len, dentry->d_name.name);
121 dentry->d_name.len, dentry->d_name.name);
122
123 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
124
125 DPRINTK("mount done status=%d", status);
126
127 /* Turn this into a real negative dentry? */
128 if (status == -ENOENT) {
129 spin_lock(&dentry->d_lock);
130 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
131 spin_unlock(&dentry->d_lock);
132 return status;
133 } else if (status) {
134 /* Return a negative dentry, but leave it "pending" */
135 return status;
136 }
137 /* Trigger mount for path component or follow link */
138 } else if (dentry->d_flags & DCACHE_AUTOFS_PENDING ||
139 flags & (TRIGGER_FLAGS | TRIGGER_INTENTS) ||
140 current->link_count) {
141 DPRINTK("waiting for mount name=%.*s",
142 dentry->d_name.len, dentry->d_name.name);
143
144 spin_lock(&dentry->d_lock);
145 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
146 spin_unlock(&dentry->d_lock);
147 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
148 254
149 DPRINTK("mount done status=%d", status); 255 status = autofs4_wait(sbi, dentry, NFY_MOUNT);
150 256
151 if (status) { 257 DPRINTK("mount done status=%d", status);
152 spin_lock(&dentry->d_lock);
153 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING;
154 spin_unlock(&dentry->d_lock);
155 return status;
156 }
157 }
158
159 /* Initialize expiry counter after successful mount */
160 if (ino)
161 ino->last_used = jiffies;
162 258
163 spin_lock(&dentry->d_lock); 259 /* Update expiry counter */
164 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 260 ino->last_used = jiffies;
165 spin_unlock(&dentry->d_lock);
166 261
167 return 0; 262 return status;
168} 263}
169 264
170/* For autofs direct mounts the follow link triggers the mount */ 265/* For autofs direct mounts the follow link triggers the mount */
@@ -202,27 +297,39 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
202 autofs4_expire_wait(dentry); 297 autofs4_expire_wait(dentry);
203 298
204 /* We trigger a mount for almost all flags */ 299 /* We trigger a mount for almost all flags */
205 lookup_type = nd->flags & (TRIGGER_FLAGS | TRIGGER_INTENTS); 300 lookup_type = autofs4_need_mount(nd->flags);
206 if (!(lookup_type || dentry->d_flags & DCACHE_AUTOFS_PENDING)) 301 spin_lock(&sbi->fs_lock);
302 spin_lock(&dcache_lock);
303 if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
304 spin_unlock(&dcache_lock);
305 spin_unlock(&sbi->fs_lock);
207 goto follow; 306 goto follow;
307 }
208 308
209 /* 309 /*
210 * If the dentry contains directories then it is an autofs 310 * If the dentry contains directories then it is an autofs
211 * multi-mount with no root mount offset. So don't try to 311 * multi-mount with no root mount offset. So don't try to
212 * mount it again. 312 * mount it again.
213 */ 313 */
214 spin_lock(&dcache_lock); 314 if (ino->flags & AUTOFS_INF_PENDING ||
215 if (dentry->d_flags & DCACHE_AUTOFS_PENDING || 315 (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
216 (!d_mountpoint(dentry) && __simple_empty(dentry))) { 316 ino->flags |= AUTOFS_INF_PENDING;
217 spin_unlock(&dcache_lock); 317 spin_unlock(&dcache_lock);
318 spin_unlock(&sbi->fs_lock);
319
320 status = try_to_fill_dentry(dentry);
321
322 spin_lock(&sbi->fs_lock);
323 ino->flags &= ~AUTOFS_INF_PENDING;
324 spin_unlock(&sbi->fs_lock);
218 325
219 status = try_to_fill_dentry(dentry, 0);
220 if (status) 326 if (status)
221 goto out_error; 327 goto out_error;
222 328
223 goto follow; 329 goto follow;
224 } 330 }
225 spin_unlock(&dcache_lock); 331 spin_unlock(&dcache_lock);
332 spin_unlock(&sbi->fs_lock);
226follow: 333follow:
227 /* 334 /*
228 * If there is no root mount it must be an autofs 335 * If there is no root mount it must be an autofs
@@ -254,18 +361,47 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
254{ 361{
255 struct inode *dir = dentry->d_parent->d_inode; 362 struct inode *dir = dentry->d_parent->d_inode;
256 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb); 363 struct autofs_sb_info *sbi = autofs4_sbi(dir->i_sb);
257 int oz_mode = autofs4_oz_mode(sbi); 364 struct autofs_info *ino = autofs4_dentry_ino(dentry);
365 struct rehash_entry *entry;
258 int flags = nd ? nd->flags : 0; 366 int flags = nd ? nd->flags : 0;
259 int status = 1; 367 unsigned int mutex_aquired;
368
369 DPRINTK("name = %.*s oz_mode = %d",
370 dentry->d_name.len, dentry->d_name.name, oz_mode);
371
372 /* Daemon never causes a mount to trigger */
373 if (autofs4_oz_mode(sbi))
374 return 1;
375
376 entry = kmalloc(sizeof(struct rehash_entry), GFP_KERNEL);
377 if (!entry)
378 return -ENOMEM;
379
380 mutex_aquired = mutex_trylock(&dir->i_mutex);
260 381
261 /* Pending dentry */
262 spin_lock(&sbi->fs_lock); 382 spin_lock(&sbi->fs_lock);
383 spin_lock(&dcache_lock);
384 /* Pending dentry */
263 if (autofs4_ispending(dentry)) { 385 if (autofs4_ispending(dentry)) {
264 /* The daemon never causes a mount to trigger */ 386 int status;
265 spin_unlock(&sbi->fs_lock);
266 387
267 if (oz_mode) 388 /*
268 return 1; 389 * We can only unhash and send this to ->lookup() if
390 * the directory mutex is held over d_revalidate() and
391 * ->lookup(). This prevents the VFS from incorrectly
392 * seeing the dentry as non-existent.
393 */
394 ino->flags |= AUTOFS_INF_PENDING;
395 if (!mutex_aquired) {
396 autofs4_revalidate_drop(dentry, entry);
397 spin_unlock(&dcache_lock);
398 spin_unlock(&sbi->fs_lock);
399 return 0;
400 }
401 spin_unlock(&dcache_lock);
402 spin_unlock(&sbi->fs_lock);
403 mutex_unlock(&dir->i_mutex);
404 kfree(entry);
269 405
270 /* 406 /*
271 * If the directory has gone away due to an expire 407 * If the directory has gone away due to an expire
@@ -279,46 +415,82 @@ static int autofs4_revalidate(struct dentry *dentry, struct nameidata *nd)
279 * A zero status is success otherwise we have a 415 * A zero status is success otherwise we have a
280 * negative error code. 416 * negative error code.
281 */ 417 */
282 status = try_to_fill_dentry(dentry, flags); 418 status = try_to_fill_dentry(dentry);
419
420 spin_lock(&sbi->fs_lock);
421 ino->flags &= ~AUTOFS_INF_PENDING;
422 spin_unlock(&sbi->fs_lock);
423
283 if (status == 0) 424 if (status == 0)
284 return 1; 425 return 1;
285 426
286 return status; 427 return status;
287 } 428 }
288 spin_unlock(&sbi->fs_lock);
289
290 /* Negative dentry.. invalidate if "old" */
291 if (dentry->d_inode == NULL)
292 return 0;
293 429
294 /* Check for a non-mountpoint directory with no contents */ 430 /* Check for a non-mountpoint directory with no contents */
295 spin_lock(&dcache_lock);
296 if (S_ISDIR(dentry->d_inode->i_mode) && 431 if (S_ISDIR(dentry->d_inode->i_mode) &&
297 !d_mountpoint(dentry) && 432 !d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
298 __simple_empty(dentry)) {
299 DPRINTK("dentry=%p %.*s, emptydir", 433 DPRINTK("dentry=%p %.*s, emptydir",
300 dentry, dentry->d_name.len, dentry->d_name.name); 434 dentry, dentry->d_name.len, dentry->d_name.name);
301 spin_unlock(&dcache_lock);
302 435
303 /* The daemon never causes a mount to trigger */ 436 if (autofs4_need_mount(flags) || current->link_count) {
304 if (oz_mode) 437 int status;
305 return 1;
306 438
307 /* 439 /*
308 * A zero status is success otherwise we have a 440 * We can only unhash and send this to ->lookup() if
309 * negative error code. 441 * the directory mutex is held over d_revalidate() and
310 */ 442 * ->lookup(). This prevents the VFS from incorrectly
311 status = try_to_fill_dentry(dentry, flags); 443 * seeing the dentry as non-existent.
312 if (status == 0) 444 */
313 return 1; 445 ino->flags |= AUTOFS_INF_PENDING;
446 if (!mutex_aquired) {
447 autofs4_revalidate_drop(dentry, entry);
448 spin_unlock(&dcache_lock);
449 spin_unlock(&sbi->fs_lock);
450 return 0;
451 }
452 spin_unlock(&dcache_lock);
453 spin_unlock(&sbi->fs_lock);
454 mutex_unlock(&dir->i_mutex);
455 kfree(entry);
314 456
315 return status; 457 /*
458 * A zero status is success otherwise we have a
459 * negative error code.
460 */
461 status = try_to_fill_dentry(dentry);
462
463 spin_lock(&sbi->fs_lock);
464 ino->flags &= ~AUTOFS_INF_PENDING;
465 spin_unlock(&sbi->fs_lock);
466
467 if (status == 0)
468 return 1;
469
470 return status;
471 }
316 } 472 }
317 spin_unlock(&dcache_lock); 473 spin_unlock(&dcache_lock);
474 spin_unlock(&sbi->fs_lock);
475
476 if (mutex_aquired)
477 mutex_unlock(&dir->i_mutex);
478
479 kfree(entry);
318 480
319 return 1; 481 return 1;
320} 482}
321 483
484static void autofs4_free_rehash_entrys(struct autofs_info *inf)
485{
486 struct list_head *head = &inf->rehash_list;
487 struct rehash_entry *entry, *next;
488 list_for_each_entry_safe(entry, next, head, list) {
489 list_del(&entry->list);
490 kfree(entry);
491 }
492}
493
322void autofs4_dentry_release(struct dentry *de) 494void autofs4_dentry_release(struct dentry *de)
323{ 495{
324 struct autofs_info *inf; 496 struct autofs_info *inf;
@@ -337,6 +509,8 @@ void autofs4_dentry_release(struct dentry *de)
337 list_del(&inf->active); 509 list_del(&inf->active);
338 if (!list_empty(&inf->expiring)) 510 if (!list_empty(&inf->expiring))
339 list_del(&inf->expiring); 511 list_del(&inf->expiring);
512 if (!list_empty(&inf->rehash_list))
513 autofs4_free_rehash_entrys(inf);
340 spin_unlock(&sbi->lookup_lock); 514 spin_unlock(&sbi->lookup_lock);
341 } 515 }
342 516
@@ -359,35 +533,52 @@ static const struct dentry_operations autofs4_dentry_operations = {
359 .d_release = autofs4_dentry_release, 533 .d_release = autofs4_dentry_release,
360}; 534};
361 535
362static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name) 536static struct dentry *autofs4_lookup_active(struct dentry *dentry)
363{ 537{
538 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
539 struct dentry *parent = dentry->d_parent;
540 struct qstr *name = &dentry->d_name;
364 unsigned int len = name->len; 541 unsigned int len = name->len;
365 unsigned int hash = name->hash; 542 unsigned int hash = name->hash;
366 const unsigned char *str = name->name; 543 const unsigned char *str = name->name;
367 struct list_head *p, *head; 544 struct list_head *p, *head;
368 545
546restart:
369 spin_lock(&dcache_lock); 547 spin_lock(&dcache_lock);
370 spin_lock(&sbi->lookup_lock); 548 spin_lock(&sbi->lookup_lock);
371 head = &sbi->active_list; 549 head = &sbi->active_list;
372 list_for_each(p, head) { 550 list_for_each(p, head) {
373 struct autofs_info *ino; 551 struct autofs_info *ino;
374 struct dentry *dentry; 552 struct dentry *active;
375 struct qstr *qstr; 553 struct qstr *qstr;
376 554
377 ino = list_entry(p, struct autofs_info, active); 555 ino = list_entry(p, struct autofs_info, active);
378 dentry = ino->dentry; 556 active = ino->dentry;
379 557
380 spin_lock(&dentry->d_lock); 558 spin_lock(&active->d_lock);
381 559
382 /* Already gone? */ 560 /* Already gone? */
383 if (atomic_read(&dentry->d_count) == 0) 561 if (atomic_read(&active->d_count) == 0)
384 goto next; 562 goto next;
385 563
386 qstr = &dentry->d_name; 564 if (active->d_inode && IS_DEADDIR(active->d_inode)) {
565 if (!list_empty(&ino->rehash_list)) {
566 dget(active);
567 spin_unlock(&active->d_lock);
568 spin_unlock(&sbi->lookup_lock);
569 spin_unlock(&dcache_lock);
570 autofs4_remove_rehash_entrys(ino);
571 dput(active);
572 goto restart;
573 }
574 goto next;
575 }
576
577 qstr = &active->d_name;
387 578
388 if (dentry->d_name.hash != hash) 579 if (active->d_name.hash != hash)
389 goto next; 580 goto next;
390 if (dentry->d_parent != parent) 581 if (active->d_parent != parent)
391 goto next; 582 goto next;
392 583
393 if (qstr->len != len) 584 if (qstr->len != len)
@@ -395,15 +586,13 @@ static struct dentry *autofs4_lookup_active(struct autofs_sb_info *sbi, struct d
395 if (memcmp(qstr->name, str, len)) 586 if (memcmp(qstr->name, str, len))
396 goto next; 587 goto next;
397 588
398 if (d_unhashed(dentry)) { 589 dget(active);
399 dget(dentry); 590 spin_unlock(&active->d_lock);
400 spin_unlock(&dentry->d_lock); 591 spin_unlock(&sbi->lookup_lock);
401 spin_unlock(&sbi->lookup_lock); 592 spin_unlock(&dcache_lock);
402 spin_unlock(&dcache_lock); 593 return active;
403 return dentry;
404 }
405next: 594next:
406 spin_unlock(&dentry->d_lock); 595 spin_unlock(&active->d_lock);
407 } 596 }
408 spin_unlock(&sbi->lookup_lock); 597 spin_unlock(&sbi->lookup_lock);
409 spin_unlock(&dcache_lock); 598 spin_unlock(&dcache_lock);
@@ -411,8 +600,11 @@ next:
411 return NULL; 600 return NULL;
412} 601}
413 602
414static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct dentry *parent, struct qstr *name) 603static struct dentry *autofs4_lookup_expiring(struct dentry *dentry)
415{ 604{
605 struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
606 struct dentry *parent = dentry->d_parent;
607 struct qstr *name = &dentry->d_name;
416 unsigned int len = name->len; 608 unsigned int len = name->len;
417 unsigned int hash = name->hash; 609 unsigned int hash = name->hash;
418 const unsigned char *str = name->name; 610 const unsigned char *str = name->name;
@@ -423,23 +615,23 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
423 head = &sbi->expiring_list; 615 head = &sbi->expiring_list;
424 list_for_each(p, head) { 616 list_for_each(p, head) {
425 struct autofs_info *ino; 617 struct autofs_info *ino;
426 struct dentry *dentry; 618 struct dentry *expiring;
427 struct qstr *qstr; 619 struct qstr *qstr;
428 620
429 ino = list_entry(p, struct autofs_info, expiring); 621 ino = list_entry(p, struct autofs_info, expiring);
430 dentry = ino->dentry; 622 expiring = ino->dentry;
431 623
432 spin_lock(&dentry->d_lock); 624 spin_lock(&expiring->d_lock);
433 625
434 /* Bad luck, we've already been dentry_iput */ 626 /* Bad luck, we've already been dentry_iput */
435 if (!dentry->d_inode) 627 if (!expiring->d_inode)
436 goto next; 628 goto next;
437 629
438 qstr = &dentry->d_name; 630 qstr = &expiring->d_name;
439 631
440 if (dentry->d_name.hash != hash) 632 if (expiring->d_name.hash != hash)
441 goto next; 633 goto next;
442 if (dentry->d_parent != parent) 634 if (expiring->d_parent != parent)
443 goto next; 635 goto next;
444 636
445 if (qstr->len != len) 637 if (qstr->len != len)
@@ -447,15 +639,13 @@ static struct dentry *autofs4_lookup_expiring(struct autofs_sb_info *sbi, struct
447 if (memcmp(qstr->name, str, len)) 639 if (memcmp(qstr->name, str, len))
448 goto next; 640 goto next;
449 641
450 if (d_unhashed(dentry)) { 642 dget(expiring);
451 dget(dentry); 643 spin_unlock(&expiring->d_lock);
452 spin_unlock(&dentry->d_lock); 644 spin_unlock(&sbi->lookup_lock);
453 spin_unlock(&sbi->lookup_lock); 645 spin_unlock(&dcache_lock);
454 spin_unlock(&dcache_lock); 646 return expiring;
455 return dentry;
456 }
457next: 647next:
458 spin_unlock(&dentry->d_lock); 648 spin_unlock(&expiring->d_lock);
459 } 649 }
460 spin_unlock(&sbi->lookup_lock); 650 spin_unlock(&sbi->lookup_lock);
461 spin_unlock(&dcache_lock); 651 spin_unlock(&dcache_lock);
@@ -463,13 +653,56 @@ next:
463 return NULL; 653 return NULL;
464} 654}
465 655
656static struct autofs_info *init_new_dentry(struct autofs_sb_info *sbi,
657 struct dentry *dentry, int oz_mode)
658{
659 struct autofs_info *ino;
660
661 /*
662 * Mark the dentry incomplete but don't hash it. We do this
663 * to serialize our inode creation operations (symlink and
664 * mkdir) which prevents deadlock during the callback to
665 * the daemon. Subsequent user space lookups for the same
666 * dentry are placed on the wait queue while the daemon
667 * itself is allowed passage unresticted so the create
668 * operation itself can then hash the dentry. Finally,
669 * we check for the hashed dentry and return the newly
670 * hashed dentry.
671 */
672 dentry->d_op = &autofs4_root_dentry_operations;
673
674 /*
675 * And we need to ensure that the same dentry is used for
676 * all following lookup calls until it is hashed so that
677 * the dentry flags are persistent throughout the request.
678 */
679 ino = autofs4_init_ino(NULL, sbi, 0555);
680 if (!ino)
681 return ERR_PTR(-ENOMEM);
682
683 dentry->d_fsdata = ino;
684 ino->dentry = dentry;
685
686 /*
687 * Only set the mount pending flag for new dentrys not created
688 * by the daemon.
689 */
690 if (!oz_mode)
691 ino->flags |= AUTOFS_INF_PENDING;
692
693 d_instantiate(dentry, NULL);
694
695 return ino;
696}
697
466/* Lookups in the root directory */ 698/* Lookups in the root directory */
467static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) 699static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
468{ 700{
469 struct autofs_sb_info *sbi; 701 struct autofs_sb_info *sbi;
470 struct autofs_info *ino; 702 struct autofs_info *ino;
471 struct dentry *expiring, *unhashed; 703 struct dentry *expiring, *active;
472 int oz_mode; 704 int oz_mode;
705 int status = 0;
473 706
474 DPRINTK("name = %.*s", 707 DPRINTK("name = %.*s",
475 dentry->d_name.len, dentry->d_name.name); 708 dentry->d_name.len, dentry->d_name.name);
@@ -484,123 +717,100 @@ static struct dentry *autofs4_lookup(struct inode *dir, struct dentry *dentry, s
484 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d", 717 DPRINTK("pid = %u, pgrp = %u, catatonic = %d, oz_mode = %d",
485 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode); 718 current->pid, task_pgrp_nr(current), sbi->catatonic, oz_mode);
486 719
487 unhashed = autofs4_lookup_active(sbi, dentry->d_parent, &dentry->d_name); 720 spin_lock(&sbi->fs_lock);
488 if (unhashed) 721 active = autofs4_lookup_active(dentry);
489 dentry = unhashed; 722 if (active) {
490 else { 723 dentry = active;
491 /* 724 ino = autofs4_dentry_ino(dentry);
492 * Mark the dentry incomplete but don't hash it. We do this 725 /* If this came from revalidate, rehash it */
493 * to serialize our inode creation operations (symlink and 726 autofs4_revalidate_rehash(dentry);
494 * mkdir) which prevents deadlock during the callback to 727 spin_unlock(&sbi->fs_lock);
495 * the daemon. Subsequent user space lookups for the same 728 } else {
496 * dentry are placed on the wait queue while the daemon 729 spin_unlock(&sbi->fs_lock);
497 * itself is allowed passage unresticted so the create 730 ino = init_new_dentry(sbi, dentry, oz_mode);
498 * operation itself can then hash the dentry. Finally, 731 if (IS_ERR(ino))
499 * we check for the hashed dentry and return the newly 732 return (struct dentry *) ino;
500 * hashed dentry.
501 */
502 dentry->d_op = &autofs4_root_dentry_operations;
503
504 /*
505 * And we need to ensure that the same dentry is used for
506 * all following lookup calls until it is hashed so that
507 * the dentry flags are persistent throughout the request.
508 */
509 ino = autofs4_init_ino(NULL, sbi, 0555);
510 if (!ino)
511 return ERR_PTR(-ENOMEM);
512
513 dentry->d_fsdata = ino;
514 ino->dentry = dentry;
515
516 spin_lock(&sbi->lookup_lock);
517 list_add(&ino->active, &sbi->active_list);
518 spin_unlock(&sbi->lookup_lock);
519
520 d_instantiate(dentry, NULL);
521 } 733 }
522 734
735 autofs4_add_active(dentry);
736
523 if (!oz_mode) { 737 if (!oz_mode) {
738 expiring = autofs4_lookup_expiring(dentry);
524 mutex_unlock(&dir->i_mutex); 739 mutex_unlock(&dir->i_mutex);
525 expiring = autofs4_lookup_expiring(sbi,
526 dentry->d_parent,
527 &dentry->d_name);
528 if (expiring) { 740 if (expiring) {
529 /* 741 /*
530 * If we are racing with expire the request might not 742 * If we are racing with expire the request might not
531 * be quite complete but the directory has been removed 743 * be quite complete but the directory has been removed
532 * so it must have been successful, so just wait for it. 744 * so it must have been successful, so just wait for it.
533 */ 745 */
534 ino = autofs4_dentry_ino(expiring);
535 autofs4_expire_wait(expiring); 746 autofs4_expire_wait(expiring);
536 spin_lock(&sbi->lookup_lock);
537 if (!list_empty(&ino->expiring))
538 list_del_init(&ino->expiring);
539 spin_unlock(&sbi->lookup_lock);
540 dput(expiring); 747 dput(expiring);
541 } 748 }
542 749 status = try_to_fill_dentry(dentry);
543 spin_lock(&dentry->d_lock);
544 dentry->d_flags |= DCACHE_AUTOFS_PENDING;
545 spin_unlock(&dentry->d_lock);
546 if (dentry->d_op && dentry->d_op->d_revalidate)
547 (dentry->d_op->d_revalidate)(dentry, nd);
548 mutex_lock(&dir->i_mutex); 750 mutex_lock(&dir->i_mutex);
751 spin_lock(&sbi->fs_lock);
752 ino->flags &= ~AUTOFS_INF_PENDING;
753 spin_unlock(&sbi->fs_lock);
549 } 754 }
550 755
756 autofs4_del_active(dentry);
757
551 /* 758 /*
552 * If we are still pending, check if we had to handle 759 * If we had a mount fail, check if we had to handle
553 * a signal. If so we can force a restart.. 760 * a signal. If so we can force a restart..
554 */ 761 */
555 if (dentry->d_flags & DCACHE_AUTOFS_PENDING) { 762 if (status) {
556 /* See if we were interrupted */ 763 /* See if we were interrupted */
557 if (signal_pending(current)) { 764 if (signal_pending(current)) {
558 sigset_t *sigset = &current->pending.signal; 765 sigset_t *sigset = &current->pending.signal;
559 if (sigismember (sigset, SIGKILL) || 766 if (sigismember (sigset, SIGKILL) ||
560 sigismember (sigset, SIGQUIT) || 767 sigismember (sigset, SIGQUIT) ||
561 sigismember (sigset, SIGINT)) { 768 sigismember (sigset, SIGINT)) {
562 if (unhashed) 769 if (active)
563 dput(unhashed); 770 dput(active);
564 return ERR_PTR(-ERESTARTNOINTR); 771 return ERR_PTR(-ERESTARTNOINTR);
565 } 772 }
566 } 773 }
567 if (!oz_mode) { 774 }
568 spin_lock(&dentry->d_lock); 775
569 dentry->d_flags &= ~DCACHE_AUTOFS_PENDING; 776 /*
570 spin_unlock(&dentry->d_lock); 777 * User space can (and has done in the past) remove and re-create
778 * this directory during the callback. This can leave us with an
779 * unhashed dentry, but a successful mount! So we need to
780 * perform another cached lookup in case the dentry now exists.
781 */
782 if (!oz_mode && !have_submounts(dentry)) {
783 struct dentry *new;
784 new = d_lookup(dentry->d_parent, &dentry->d_name);
785 if (new) {
786 if (active)
787 dput(active);
788 return new;
789 } else {
790 if (!status)
791 status = -ENOENT;
571 } 792 }
572 } 793 }
573 794
574 /* 795 /*
575 * If this dentry is unhashed, then we shouldn't honour this 796 * If we had a mount failure, return status to user space.
576 * lookup. Returning ENOENT here doesn't do the right thing 797 * If the mount succeeded and we used a dentry from the active queue
577 * for all system calls, but it should be OK for the operations 798 * return it.
578 * we permit from an autofs.
579 */ 799 */
580 if (!oz_mode && d_unhashed(dentry)) { 800 if (status) {
801 dentry = ERR_PTR(status);
802 if (active)
803 dput(active);
804 return dentry;
805 } else {
581 /* 806 /*
582 * A user space application can (and has done in the past) 807 * Valid successful mount, return active dentry or NULL
583 * remove and re-create this directory during the callback. 808 * for a new dentry.
584 * This can leave us with an unhashed dentry, but a
585 * successful mount! So we need to perform another
586 * cached lookup in case the dentry now exists.
587 */ 809 */
588 struct dentry *parent = dentry->d_parent; 810 if (active)
589 struct dentry *new = d_lookup(parent, &dentry->d_name); 811 return active;
590 if (new != NULL)
591 dentry = new;
592 else
593 dentry = ERR_PTR(-ENOENT);
594
595 if (unhashed)
596 dput(unhashed);
597
598 return dentry;
599 } 812 }
600 813
601 if (unhashed)
602 return unhashed;
603
604 return NULL; 814 return NULL;
605} 815}
606 816
@@ -624,11 +834,6 @@ static int autofs4_dir_symlink(struct inode *dir,
624 if (!ino) 834 if (!ino)
625 return -ENOMEM; 835 return -ENOMEM;
626 836
627 spin_lock(&sbi->lookup_lock);
628 if (!list_empty(&ino->active))
629 list_del_init(&ino->active);
630 spin_unlock(&sbi->lookup_lock);
631
632 ino->size = strlen(symname); 837 ino->size = strlen(symname);
633 cp = kmalloc(ino->size + 1, GFP_KERNEL); 838 cp = kmalloc(ino->size + 1, GFP_KERNEL);
634 if (!cp) { 839 if (!cp) {
@@ -705,10 +910,6 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry)
705 dir->i_mtime = CURRENT_TIME; 910 dir->i_mtime = CURRENT_TIME;
706 911
707 spin_lock(&dcache_lock); 912 spin_lock(&dcache_lock);
708 spin_lock(&sbi->lookup_lock);
709 if (list_empty(&ino->expiring))
710 list_add(&ino->expiring, &sbi->expiring_list);
711 spin_unlock(&sbi->lookup_lock);
712 spin_lock(&dentry->d_lock); 913 spin_lock(&dentry->d_lock);
713 __d_drop(dentry); 914 __d_drop(dentry);
714 spin_unlock(&dentry->d_lock); 915 spin_unlock(&dentry->d_lock);
@@ -734,10 +935,6 @@ static int autofs4_dir_rmdir(struct inode *dir, struct dentry *dentry)
734 spin_unlock(&dcache_lock); 935 spin_unlock(&dcache_lock);
735 return -ENOTEMPTY; 936 return -ENOTEMPTY;
736 } 937 }
737 spin_lock(&sbi->lookup_lock);
738 if (list_empty(&ino->expiring))
739 list_add(&ino->expiring, &sbi->expiring_list);
740 spin_unlock(&sbi->lookup_lock);
741 spin_lock(&dentry->d_lock); 938 spin_lock(&dentry->d_lock);
742 __d_drop(dentry); 939 __d_drop(dentry);
743 spin_unlock(&dentry->d_lock); 940 spin_unlock(&dentry->d_lock);
@@ -775,11 +972,6 @@ static int autofs4_dir_mkdir(struct inode *dir, struct dentry *dentry, int mode)
775 if (!ino) 972 if (!ino)
776 return -ENOMEM; 973 return -ENOMEM;
777 974
778 spin_lock(&sbi->lookup_lock);
779 if (!list_empty(&ino->active))
780 list_del_init(&ino->active);
781 spin_unlock(&sbi->lookup_lock);
782
783 inode = autofs4_get_inode(dir->i_sb, ino); 975 inode = autofs4_get_inode(dir->i_sb, ino);
784 if (!inode) { 976 if (!inode) {
785 if (!dentry->d_fsdata) 977 if (!dentry->d_fsdata)
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index d15ea1790bfb..97b6e9efeb7f 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -44,7 +44,7 @@ static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
44 * If we don't support core dumping, then supply a NULL so we 44 * If we don't support core dumping, then supply a NULL so we
45 * don't even try. 45 * don't even try.
46 */ 46 */
47#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 47#ifdef CONFIG_ELF_CORE
48static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit); 48static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
49#else 49#else
50#define elf_core_dump NULL 50#define elf_core_dump NULL
@@ -1101,12 +1101,7 @@ out:
1101 return error; 1101 return error;
1102} 1102}
1103 1103
1104/* 1104#ifdef CONFIG_ELF_CORE
1105 * Note that some platforms still use traditional core dumps and not
1106 * the ELF core dump. Each platform can select it as appropriate.
1107 */
1108#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE)
1109
1110/* 1105/*
1111 * ELF core dumper 1106 * ELF core dumper
1112 * 1107 *
@@ -2063,7 +2058,7 @@ out:
2063 return has_dumped; 2058 return has_dumped;
2064} 2059}
2065 2060
2066#endif /* USE_ELF_CORE_DUMP */ 2061#endif /* CONFIG_ELF_CORE */
2067 2062
2068static int __init init_elf_binfmt(void) 2063static int __init init_elf_binfmt(void)
2069{ 2064{
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index 79d2b1aa389f..7b055385db8e 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -75,14 +75,14 @@ static int elf_fdpic_map_file_constdisp_on_uclinux(struct elf_fdpic_params *,
75static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *, 75static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *,
76 struct file *, struct mm_struct *); 76 struct file *, struct mm_struct *);
77 77
78#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 78#ifdef CONFIG_ELF_CORE
79static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit); 79static int elf_fdpic_core_dump(long, struct pt_regs *, struct file *, unsigned long limit);
80#endif 80#endif
81 81
82static struct linux_binfmt elf_fdpic_format = { 82static struct linux_binfmt elf_fdpic_format = {
83 .module = THIS_MODULE, 83 .module = THIS_MODULE,
84 .load_binary = load_elf_fdpic_binary, 84 .load_binary = load_elf_fdpic_binary,
85#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 85#ifdef CONFIG_ELF_CORE
86 .core_dump = elf_fdpic_core_dump, 86 .core_dump = elf_fdpic_core_dump,
87#endif 87#endif
88 .min_coredump = ELF_EXEC_PAGESIZE, 88 .min_coredump = ELF_EXEC_PAGESIZE,
@@ -1201,7 +1201,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params,
1201 * 1201 *
1202 * Modelled on fs/binfmt_elf.c core dumper 1202 * Modelled on fs/binfmt_elf.c core dumper
1203 */ 1203 */
1204#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 1204#ifdef CONFIG_ELF_CORE
1205 1205
1206/* 1206/*
1207 * These are the only things you should do on a core-file: use only these 1207 * These are the only things you should do on a core-file: use only these
@@ -1826,4 +1826,4 @@ cleanup:
1826#undef NUM_NOTES 1826#undef NUM_NOTES
1827} 1827}
1828 1828
1829#endif /* USE_ELF_CORE_DUMP */ 1829#endif /* CONFIG_ELF_CORE */
diff --git a/fs/direct-io.c b/fs/direct-io.c
index b912270942fa..4012885d027f 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -53,13 +53,6 @@
53 * 53 *
54 * If blkfactor is zero then the user's request was aligned to the filesystem's 54 * If blkfactor is zero then the user's request was aligned to the filesystem's
55 * blocksize. 55 * blocksize.
56 *
57 * lock_type is DIO_LOCKING for regular files on direct-IO-naive filesystems.
58 * This determines whether we need to do the fancy locking which prevents
59 * direct-IO from being able to read uninitialised disk blocks. If its zero
60 * (blockdev) this locking is not done, and if it is DIO_OWN_LOCKING i_mutex is
61 * not held for the entire direct write (taken briefly, initially, during a
62 * direct read though, but its never held for the duration of a direct-IO).
63 */ 56 */
64 57
65struct dio { 58struct dio {
@@ -68,7 +61,7 @@ struct dio {
68 struct inode *inode; 61 struct inode *inode;
69 int rw; 62 int rw;
70 loff_t i_size; /* i_size when submitted */ 63 loff_t i_size; /* i_size when submitted */
71 int lock_type; /* doesn't change */ 64 int flags; /* doesn't change */
72 unsigned blkbits; /* doesn't change */ 65 unsigned blkbits; /* doesn't change */
73 unsigned blkfactor; /* When we're using an alignment which 66 unsigned blkfactor; /* When we're using an alignment which
74 is finer than the filesystem's soft 67 is finer than the filesystem's soft
@@ -104,6 +97,18 @@ struct dio {
104 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */ 97 unsigned cur_page_len; /* Nr of bytes at cur_page_offset */
105 sector_t cur_page_block; /* Where it starts */ 98 sector_t cur_page_block; /* Where it starts */
106 99
100 /* BIO completion state */
101 spinlock_t bio_lock; /* protects BIO fields below */
102 unsigned long refcount; /* direct_io_worker() and bios */
103 struct bio *bio_list; /* singly linked via bi_private */
104 struct task_struct *waiter; /* waiting task (NULL if none) */
105
106 /* AIO related stuff */
107 struct kiocb *iocb; /* kiocb */
108 int is_async; /* is IO async ? */
109 int io_error; /* IO error in completion path */
110 ssize_t result; /* IO result */
111
107 /* 112 /*
108 * Page fetching state. These variables belong to dio_refill_pages(). 113 * Page fetching state. These variables belong to dio_refill_pages().
109 */ 114 */
@@ -115,22 +120,16 @@ struct dio {
115 * Page queue. These variables belong to dio_refill_pages() and 120 * Page queue. These variables belong to dio_refill_pages() and
116 * dio_get_page(). 121 * dio_get_page().
117 */ 122 */
118 struct page *pages[DIO_PAGES]; /* page buffer */
119 unsigned head; /* next page to process */ 123 unsigned head; /* next page to process */
120 unsigned tail; /* last valid page + 1 */ 124 unsigned tail; /* last valid page + 1 */
121 int page_errors; /* errno from get_user_pages() */ 125 int page_errors; /* errno from get_user_pages() */
122 126
123 /* BIO completion state */ 127 /*
124 spinlock_t bio_lock; /* protects BIO fields below */ 128 * pages[] (and any fields placed after it) are not zeroed out at
125 unsigned long refcount; /* direct_io_worker() and bios */ 129 * allocation time. Don't add new fields after pages[] unless you
126 struct bio *bio_list; /* singly linked via bi_private */ 130 * wish that they not be zeroed.
127 struct task_struct *waiter; /* waiting task (NULL if none) */ 131 */
128 132 struct page *pages[DIO_PAGES]; /* page buffer */
129 /* AIO related stuff */
130 struct kiocb *iocb; /* kiocb */
131 int is_async; /* is IO async ? */
132 int io_error; /* IO error in completion path */
133 ssize_t result; /* IO result */
134}; 133};
135 134
136/* 135/*
@@ -240,7 +239,8 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
240 if (dio->end_io && dio->result) 239 if (dio->end_io && dio->result)
241 dio->end_io(dio->iocb, offset, transferred, 240 dio->end_io(dio->iocb, offset, transferred,
242 dio->map_bh.b_private); 241 dio->map_bh.b_private);
243 if (dio->lock_type == DIO_LOCKING) 242
243 if (dio->flags & DIO_LOCKING)
244 /* lockdep: non-owner release */ 244 /* lockdep: non-owner release */
245 up_read_non_owner(&dio->inode->i_alloc_sem); 245 up_read_non_owner(&dio->inode->i_alloc_sem);
246 246
@@ -515,21 +515,24 @@ static int get_more_blocks(struct dio *dio)
515 map_bh->b_state = 0; 515 map_bh->b_state = 0;
516 map_bh->b_size = fs_count << dio->inode->i_blkbits; 516 map_bh->b_size = fs_count << dio->inode->i_blkbits;
517 517
518 /*
519 * For writes inside i_size on a DIO_SKIP_HOLES filesystem we
520 * forbid block creations: only overwrites are permitted.
521 * We will return early to the caller once we see an
522 * unmapped buffer head returned, and the caller will fall
523 * back to buffered I/O.
524 *
525 * Otherwise the decision is left to the get_blocks method,
526 * which may decide to handle it or also return an unmapped
527 * buffer head.
528 */
518 create = dio->rw & WRITE; 529 create = dio->rw & WRITE;
519 if (dio->lock_type == DIO_LOCKING) { 530 if (dio->flags & DIO_SKIP_HOLES) {
520 if (dio->block_in_file < (i_size_read(dio->inode) >> 531 if (dio->block_in_file < (i_size_read(dio->inode) >>
521 dio->blkbits)) 532 dio->blkbits))
522 create = 0; 533 create = 0;
523 } else if (dio->lock_type == DIO_NO_LOCKING) {
524 create = 0;
525 } 534 }
526 535
527 /*
528 * For writes inside i_size we forbid block creations: only
529 * overwrites are permitted. We fall back to buffered writes
530 * at a higher level for inside-i_size block-instantiating
531 * writes.
532 */
533 ret = (*dio->get_block)(dio->inode, fs_startblk, 536 ret = (*dio->get_block)(dio->inode, fs_startblk,
534 map_bh, create); 537 map_bh, create);
535 } 538 }
@@ -1039,7 +1042,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1039 * we can let i_mutex go now that its achieved its purpose 1042 * we can let i_mutex go now that its achieved its purpose
1040 * of protecting us from looking up uninitialized blocks. 1043 * of protecting us from looking up uninitialized blocks.
1041 */ 1044 */
1042 if ((rw == READ) && (dio->lock_type == DIO_LOCKING)) 1045 if (rw == READ && (dio->flags & DIO_LOCKING))
1043 mutex_unlock(&dio->inode->i_mutex); 1046 mutex_unlock(&dio->inode->i_mutex);
1044 1047
1045 /* 1048 /*
@@ -1086,30 +1089,28 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
1086 1089
1087/* 1090/*
1088 * This is a library function for use by filesystem drivers. 1091 * This is a library function for use by filesystem drivers.
1089 * The locking rules are governed by the dio_lock_type parameter.
1090 * 1092 *
1091 * DIO_NO_LOCKING (no locking, for raw block device access) 1093 * The locking rules are governed by the flags parameter:
1092 * For writes, i_mutex is not held on entry; it is never taken. 1094 * - if the flags value contains DIO_LOCKING we use a fancy locking
1095 * scheme for dumb filesystems.
1096 * For writes this function is called under i_mutex and returns with
1097 * i_mutex held, for reads, i_mutex is not held on entry, but it is
1098 * taken and dropped again before returning.
1099 * For reads and writes i_alloc_sem is taken in shared mode and released
1100 * on I/O completion (which may happen asynchronously after returning to
1101 * the caller).
1093 * 1102 *
1094 * DIO_LOCKING (simple locking for regular files) 1103 * - if the flags value does NOT contain DIO_LOCKING we don't use any
1095 * For writes we are called under i_mutex and return with i_mutex held, even 1104 * internal locking but rather rely on the filesystem to synchronize
1096 * though it is internally dropped. 1105 * direct I/O reads/writes versus each other and truncate.
1097 * For reads, i_mutex is not held on entry, but it is taken and dropped before 1106 * For reads and writes both i_mutex and i_alloc_sem are not held on
1098 * returning. 1107 * entry and are never taken.
1099 *
1100 * DIO_OWN_LOCKING (filesystem provides synchronisation and handling of
1101 * uninitialised data, allowing parallel direct readers and writers)
1102 * For writes we are called without i_mutex, return without it, never touch it.
1103 * For reads we are called under i_mutex and return with i_mutex held, even
1104 * though it may be internally dropped.
1105 *
1106 * Additional i_alloc_sem locking requirements described inline below.
1107 */ 1108 */
1108ssize_t 1109ssize_t
1109__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 1110__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1110 struct block_device *bdev, const struct iovec *iov, loff_t offset, 1111 struct block_device *bdev, const struct iovec *iov, loff_t offset,
1111 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, 1112 unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
1112 int dio_lock_type) 1113 int flags)
1113{ 1114{
1114 int seg; 1115 int seg;
1115 size_t size; 1116 size_t size;
@@ -1120,8 +1121,6 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1120 ssize_t retval = -EINVAL; 1121 ssize_t retval = -EINVAL;
1121 loff_t end = offset; 1122 loff_t end = offset;
1122 struct dio *dio; 1123 struct dio *dio;
1123 int release_i_mutex = 0;
1124 int acquire_i_mutex = 0;
1125 1124
1126 if (rw & WRITE) 1125 if (rw & WRITE)
1127 rw = WRITE_ODIRECT_PLUG; 1126 rw = WRITE_ODIRECT_PLUG;
@@ -1151,48 +1150,41 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1151 } 1150 }
1152 } 1151 }
1153 1152
1154 dio = kzalloc(sizeof(*dio), GFP_KERNEL); 1153 dio = kmalloc(sizeof(*dio), GFP_KERNEL);
1155 retval = -ENOMEM; 1154 retval = -ENOMEM;
1156 if (!dio) 1155 if (!dio)
1157 goto out; 1156 goto out;
1158
1159 /* 1157 /*
1160 * For block device access DIO_NO_LOCKING is used, 1158 * Believe it or not, zeroing out the page array caused a .5%
1161 * neither readers nor writers do any locking at all 1159 * performance regression in a database benchmark. So, we take
1162 * For regular files using DIO_LOCKING, 1160 * care to only zero out what's needed.
1163 * readers need to grab i_mutex and i_alloc_sem
1164 * writers need to grab i_alloc_sem only (i_mutex is already held)
1165 * For regular files using DIO_OWN_LOCKING,
1166 * neither readers nor writers take any locks here
1167 */ 1161 */
1168 dio->lock_type = dio_lock_type; 1162 memset(dio, 0, offsetof(struct dio, pages));
1169 if (dio_lock_type != DIO_NO_LOCKING) { 1163
1164 dio->flags = flags;
1165 if (dio->flags & DIO_LOCKING) {
1170 /* watch out for a 0 len io from a tricksy fs */ 1166 /* watch out for a 0 len io from a tricksy fs */
1171 if (rw == READ && end > offset) { 1167 if (rw == READ && end > offset) {
1172 struct address_space *mapping; 1168 struct address_space *mapping =
1169 iocb->ki_filp->f_mapping;
1173 1170
1174 mapping = iocb->ki_filp->f_mapping; 1171 /* will be released by direct_io_worker */
1175 if (dio_lock_type != DIO_OWN_LOCKING) { 1172 mutex_lock(&inode->i_mutex);
1176 mutex_lock(&inode->i_mutex);
1177 release_i_mutex = 1;
1178 }
1179 1173
1180 retval = filemap_write_and_wait_range(mapping, offset, 1174 retval = filemap_write_and_wait_range(mapping, offset,
1181 end - 1); 1175 end - 1);
1182 if (retval) { 1176 if (retval) {
1177 mutex_unlock(&inode->i_mutex);
1183 kfree(dio); 1178 kfree(dio);
1184 goto out; 1179 goto out;
1185 } 1180 }
1186
1187 if (dio_lock_type == DIO_OWN_LOCKING) {
1188 mutex_unlock(&inode->i_mutex);
1189 acquire_i_mutex = 1;
1190 }
1191 } 1181 }
1192 1182
1193 if (dio_lock_type == DIO_LOCKING) 1183 /*
1194 /* lockdep: not the owner will release it */ 1184 * Will be released at I/O completion, possibly in a
1195 down_read_non_owner(&inode->i_alloc_sem); 1185 * different thread.
1186 */
1187 down_read_non_owner(&inode->i_alloc_sem);
1196 } 1188 }
1197 1189
1198 /* 1190 /*
@@ -1210,24 +1202,19 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
1210 /* 1202 /*
1211 * In case of error extending write may have instantiated a few 1203 * In case of error extending write may have instantiated a few
1212 * blocks outside i_size. Trim these off again for DIO_LOCKING. 1204 * blocks outside i_size. Trim these off again for DIO_LOCKING.
1213 * NOTE: DIO_NO_LOCK/DIO_OWN_LOCK callers have to handle this by 1205 *
1214 * it's own meaner. 1206 * NOTE: filesystems with their own locking have to handle this
1207 * on their own.
1215 */ 1208 */
1216 if (unlikely(retval < 0 && (rw & WRITE))) { 1209 if (dio->flags & DIO_LOCKING) {
1217 loff_t isize = i_size_read(inode); 1210 if (unlikely((rw & WRITE) && retval < 0)) {
1218 1211 loff_t isize = i_size_read(inode);
1219 if (end > isize && dio_lock_type == DIO_LOCKING) 1212 if (end > isize)
1220 vmtruncate(inode, isize); 1213 vmtruncate(inode, isize);
1214 }
1221 } 1215 }
1222 1216
1223 if (rw == READ && dio_lock_type == DIO_LOCKING)
1224 release_i_mutex = 0;
1225
1226out: 1217out:
1227 if (release_i_mutex)
1228 mutex_unlock(&inode->i_mutex);
1229 else if (acquire_i_mutex)
1230 mutex_lock(&inode->i_mutex);
1231 return retval; 1218 return retval;
1232} 1219}
1233EXPORT_SYMBOL(__blockdev_direct_IO); 1220EXPORT_SYMBOL(__blockdev_direct_IO);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index fc2bd05d3559..7516957273ed 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -721,5 +721,5 @@ const struct file_operations ext2_dir_operations = {
721#ifdef CONFIG_COMPAT 721#ifdef CONFIG_COMPAT
722 .compat_ioctl = ext2_compat_ioctl, 722 .compat_ioctl = ext2_compat_ioctl,
723#endif 723#endif
724 .fsync = simple_fsync, 724 .fsync = ext2_fsync,
725}; 725};
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index da318b0fa637..061914add3cf 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -155,6 +155,7 @@ extern void ext2_write_super (struct super_block *);
155extern const struct file_operations ext2_dir_operations; 155extern const struct file_operations ext2_dir_operations;
156 156
157/* file.c */ 157/* file.c */
158extern int ext2_fsync(struct file *file, struct dentry *dentry, int datasync);
158extern const struct inode_operations ext2_file_inode_operations; 159extern const struct inode_operations ext2_file_inode_operations;
159extern const struct file_operations ext2_file_operations; 160extern const struct file_operations ext2_file_operations;
160extern const struct file_operations ext2_xip_file_operations; 161extern const struct file_operations ext2_xip_file_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index a2f3afd1a1c1..586e3589d4c2 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -19,6 +19,7 @@
19 */ 19 */
20 20
21#include <linux/time.h> 21#include <linux/time.h>
22#include <linux/pagemap.h>
22#include "ext2.h" 23#include "ext2.h"
23#include "xattr.h" 24#include "xattr.h"
24#include "acl.h" 25#include "acl.h"
@@ -38,6 +39,22 @@ static int ext2_release_file (struct inode * inode, struct file * filp)
38 return 0; 39 return 0;
39} 40}
40 41
42int ext2_fsync(struct file *file, struct dentry *dentry, int datasync)
43{
44 int ret;
45 struct super_block *sb = dentry->d_inode->i_sb;
46 struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
47
48 ret = simple_fsync(file, dentry, datasync);
49 if (ret == -EIO || test_and_clear_bit(AS_EIO, &mapping->flags)) {
50 /* We don't really know where the IO error happened... */
51 ext2_error(sb, __func__,
52 "detected IO error when writing metadata buffers");
53 ret = -EIO;
54 }
55 return ret;
56}
57
41/* 58/*
42 * We have mostly NULL's here: the current defaults are ok for 59 * We have mostly NULL's here: the current defaults are ok for
43 * the ext2 filesystem. 60 * the ext2 filesystem.
@@ -55,7 +72,7 @@ const struct file_operations ext2_file_operations = {
55 .mmap = generic_file_mmap, 72 .mmap = generic_file_mmap,
56 .open = generic_file_open, 73 .open = generic_file_open,
57 .release = ext2_release_file, 74 .release = ext2_release_file,
58 .fsync = simple_fsync, 75 .fsync = ext2_fsync,
59 .splice_read = generic_file_splice_read, 76 .splice_read = generic_file_splice_read,
60 .splice_write = generic_file_splice_write, 77 .splice_write = generic_file_splice_write,
61}; 78};
@@ -72,7 +89,7 @@ const struct file_operations ext2_xip_file_operations = {
72 .mmap = xip_file_mmap, 89 .mmap = xip_file_mmap,
73 .open = generic_file_open, 90 .open = generic_file_open,
74 .release = ext2_release_file, 91 .release = ext2_release_file,
75 .fsync = simple_fsync, 92 .fsync = ext2_fsync,
76}; 93};
77#endif 94#endif
78 95
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 1388802b7803..f9cb54a585ce 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1105,9 +1105,30 @@ failed_sbi:
1105 return ret; 1105 return ret;
1106} 1106}
1107 1107
1108static void ext2_clear_super_error(struct super_block *sb)
1109{
1110 struct buffer_head *sbh = EXT2_SB(sb)->s_sbh;
1111
1112 if (buffer_write_io_error(sbh)) {
1113 /*
1114 * Oh, dear. A previous attempt to write the
1115 * superblock failed. This could happen because the
1116 * USB device was yanked out. Or it could happen to
1117 * be a transient write error and maybe the block will
1118 * be remapped. Nothing we can do but to retry the
1119 * write and hope for the best.
1120 */
1121 printk(KERN_ERR "EXT2-fs: %s previous I/O error to "
1122 "superblock detected", sb->s_id);
1123 clear_buffer_write_io_error(sbh);
1124 set_buffer_uptodate(sbh);
1125 }
1126}
1127
1108static void ext2_commit_super (struct super_block * sb, 1128static void ext2_commit_super (struct super_block * sb,
1109 struct ext2_super_block * es) 1129 struct ext2_super_block * es)
1110{ 1130{
1131 ext2_clear_super_error(sb);
1111 es->s_wtime = cpu_to_le32(get_seconds()); 1132 es->s_wtime = cpu_to_le32(get_seconds());
1112 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 1133 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1113 sb->s_dirt = 0; 1134 sb->s_dirt = 0;
@@ -1115,6 +1136,7 @@ static void ext2_commit_super (struct super_block * sb,
1115 1136
1116static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es) 1137static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
1117{ 1138{
1139 ext2_clear_super_error(sb);
1118 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb)); 1140 es->s_free_blocks_count = cpu_to_le32(ext2_count_free_blocks(sb));
1119 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb)); 1141 es->s_free_inodes_count = cpu_to_le32(ext2_count_free_inodes(sb));
1120 es->s_wtime = cpu_to_le32(get_seconds()); 1142 es->s_wtime = cpu_to_le32(get_seconds());
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 7db0979c6b72..e6efdfa0f6db 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -44,7 +44,8 @@ struct fat_mount_options {
44 nocase:1, /* Does this need case conversion? 0=need case conversion*/ 44 nocase:1, /* Does this need case conversion? 0=need case conversion*/
45 usefree:1, /* Use free_clusters for FAT32 */ 45 usefree:1, /* Use free_clusters for FAT32 */
46 tz_utc:1, /* Filesystem timestamps are in UTC */ 46 tz_utc:1, /* Filesystem timestamps are in UTC */
47 rodir:1; /* allow ATTR_RO for directory */ 47 rodir:1, /* allow ATTR_RO for directory */
48 discard:1; /* Issue discard requests on deletions */
48}; 49};
49 50
50#define FAT_HASH_BITS 8 51#define FAT_HASH_BITS 8
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index a81037721a6f..81184d3b75a3 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -566,16 +566,21 @@ int fat_free_clusters(struct inode *inode, int cluster)
566 goto error; 566 goto error;
567 } 567 }
568 568
569 /* 569 if (sbi->options.discard) {
570 * Issue discard for the sectors we no longer care about, 570 /*
571 * batching contiguous clusters into one request 571 * Issue discard for the sectors we no longer
572 */ 572 * care about, batching contiguous clusters
573 if (cluster != fatent.entry + 1) { 573 * into one request
574 int nr_clus = fatent.entry - first_cl + 1; 574 */
575 575 if (cluster != fatent.entry + 1) {
576 sb_issue_discard(sb, fat_clus_to_blknr(sbi, first_cl), 576 int nr_clus = fatent.entry - first_cl + 1;
577 nr_clus * sbi->sec_per_clus); 577
578 first_cl = cluster; 578 sb_issue_discard(sb,
579 fat_clus_to_blknr(sbi, first_cl),
580 nr_clus * sbi->sec_per_clus);
581
582 first_cl = cluster;
583 }
579 } 584 }
580 585
581 ops->ent_put(&fatent, FAT_ENT_FREE); 586 ops->ent_put(&fatent, FAT_ENT_FREE);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 76b7961ab663..14da530b05ca 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -858,6 +858,8 @@ static int fat_show_options(struct seq_file *m, struct vfsmount *mnt)
858 seq_puts(m, ",errors=panic"); 858 seq_puts(m, ",errors=panic");
859 else 859 else
860 seq_puts(m, ",errors=remount-ro"); 860 seq_puts(m, ",errors=remount-ro");
861 if (opts->discard)
862 seq_puts(m, ",discard");
861 863
862 return 0; 864 return 0;
863} 865}
@@ -871,7 +873,7 @@ enum {
871 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes, 873 Opt_shortname_winnt, Opt_shortname_mixed, Opt_utf8_no, Opt_utf8_yes,
872 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes, 874 Opt_uni_xl_no, Opt_uni_xl_yes, Opt_nonumtail_no, Opt_nonumtail_yes,
873 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont, 875 Opt_obsolate, Opt_flush, Opt_tz_utc, Opt_rodir, Opt_err_cont,
874 Opt_err_panic, Opt_err_ro, Opt_err, 876 Opt_err_panic, Opt_err_ro, Opt_discard, Opt_err,
875}; 877};
876 878
877static const match_table_t fat_tokens = { 879static const match_table_t fat_tokens = {
@@ -899,6 +901,7 @@ static const match_table_t fat_tokens = {
899 {Opt_err_cont, "errors=continue"}, 901 {Opt_err_cont, "errors=continue"},
900 {Opt_err_panic, "errors=panic"}, 902 {Opt_err_panic, "errors=panic"},
901 {Opt_err_ro, "errors=remount-ro"}, 903 {Opt_err_ro, "errors=remount-ro"},
904 {Opt_discard, "discard"},
902 {Opt_obsolate, "conv=binary"}, 905 {Opt_obsolate, "conv=binary"},
903 {Opt_obsolate, "conv=text"}, 906 {Opt_obsolate, "conv=text"},
904 {Opt_obsolate, "conv=auto"}, 907 {Opt_obsolate, "conv=auto"},
@@ -1136,6 +1139,9 @@ static int parse_options(char *options, int is_vfat, int silent, int *debug,
1136 case Opt_rodir: 1139 case Opt_rodir:
1137 opts->rodir = 1; 1140 opts->rodir = 1;
1138 break; 1141 break;
1142 case Opt_discard:
1143 opts->discard = 1;
1144 break;
1139 1145
1140 /* obsolete mount options */ 1146 /* obsolete mount options */
1141 case Opt_obsolate: 1147 case Opt_obsolate:
diff --git a/fs/fat/misc.c b/fs/fat/misc.c
index 0f55f5cb732f..d3da05f26465 100644
--- a/fs/fat/misc.c
+++ b/fs/fat/misc.c
@@ -9,6 +9,7 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/fs.h> 10#include <linux/fs.h>
11#include <linux/buffer_head.h> 11#include <linux/buffer_head.h>
12#include <linux/time.h>
12#include "fat.h" 13#include "fat.h"
13 14
14/* 15/*
@@ -157,10 +158,6 @@ extern struct timezone sys_tz;
157#define SECS_PER_MIN 60 158#define SECS_PER_MIN 60
158#define SECS_PER_HOUR (60 * 60) 159#define SECS_PER_HOUR (60 * 60)
159#define SECS_PER_DAY (SECS_PER_HOUR * 24) 160#define SECS_PER_DAY (SECS_PER_HOUR * 24)
160#define UNIX_SECS_1980 315532800L
161#if BITS_PER_LONG == 64
162#define UNIX_SECS_2108 4354819200L
163#endif
164/* days between 1.1.70 and 1.1.80 (2 leap days) */ 161/* days between 1.1.70 and 1.1.80 (2 leap days) */
165#define DAYS_DELTA (365 * 10 + 2) 162#define DAYS_DELTA (365 * 10 + 2)
166/* 120 (2100 - 1980) isn't leap year */ 163/* 120 (2100 - 1980) isn't leap year */
@@ -213,58 +210,35 @@ void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
213void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts, 210void fat_time_unix2fat(struct msdos_sb_info *sbi, struct timespec *ts,
214 __le16 *time, __le16 *date, u8 *time_cs) 211 __le16 *time, __le16 *date, u8 *time_cs)
215{ 212{
216 time_t second = ts->tv_sec; 213 struct tm tm;
217 time_t day, leap_day, month, year; 214 time_to_tm(ts->tv_sec, sbi->options.tz_utc ? 0 :
215 -sys_tz.tz_minuteswest * 60, &tm);
218 216
219 if (!sbi->options.tz_utc) 217 /* FAT can only support year between 1980 to 2107 */
220 second -= sys_tz.tz_minuteswest * SECS_PER_MIN; 218 if (tm.tm_year < 1980 - 1900) {
221
222 /* Jan 1 GMT 00:00:00 1980. But what about another time zone? */
223 if (second < UNIX_SECS_1980) {
224 *time = 0; 219 *time = 0;
225 *date = cpu_to_le16((0 << 9) | (1 << 5) | 1); 220 *date = cpu_to_le16((0 << 9) | (1 << 5) | 1);
226 if (time_cs) 221 if (time_cs)
227 *time_cs = 0; 222 *time_cs = 0;
228 return; 223 return;
229 } 224 }
230#if BITS_PER_LONG == 64 225 if (tm.tm_year > 2107 - 1900) {
231 if (second >= UNIX_SECS_2108) {
232 *time = cpu_to_le16((23 << 11) | (59 << 5) | 29); 226 *time = cpu_to_le16((23 << 11) | (59 << 5) | 29);
233 *date = cpu_to_le16((127 << 9) | (12 << 5) | 31); 227 *date = cpu_to_le16((127 << 9) | (12 << 5) | 31);
234 if (time_cs) 228 if (time_cs)
235 *time_cs = 199; 229 *time_cs = 199;
236 return; 230 return;
237 } 231 }
238#endif
239 232
240 day = second / SECS_PER_DAY - DAYS_DELTA; 233 /* from 1900 -> from 1980 */
241 year = day / 365; 234 tm.tm_year -= 80;
242 leap_day = (year + 3) / 4; 235 /* 0~11 -> 1~12 */
243 if (year > YEAR_2100) /* 2100 isn't leap year */ 236 tm.tm_mon++;
244 leap_day--; 237 /* 0~59 -> 0~29(2sec counts) */
245 if (year * 365 + leap_day > day) 238 tm.tm_sec >>= 1;
246 year--;
247 leap_day = (year + 3) / 4;
248 if (year > YEAR_2100) /* 2100 isn't leap year */
249 leap_day--;
250 day -= year * 365 + leap_day;
251
252 if (IS_LEAP_YEAR(year) && day == days_in_year[3]) {
253 month = 2;
254 } else {
255 if (IS_LEAP_YEAR(year) && day > days_in_year[3])
256 day--;
257 for (month = 1; month < 12; month++) {
258 if (days_in_year[month + 1] > day)
259 break;
260 }
261 }
262 day -= days_in_year[month];
263 239
264 *time = cpu_to_le16(((second / SECS_PER_HOUR) % 24) << 11 240 *time = cpu_to_le16(tm.tm_hour << 11 | tm.tm_min << 5 | tm.tm_sec);
265 | ((second / SECS_PER_MIN) % 60) << 5 241 *date = cpu_to_le16(tm.tm_year << 9 | tm.tm_mon << 5 | tm.tm_mday);
266 | (second % SECS_PER_MIN) >> 1);
267 *date = cpu_to_le16((year << 9) | (month << 5) | (day + 1));
268 if (time_cs) 242 if (time_cs)
269 *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000; 243 *time_cs = (ts->tv_sec & 1) * 100 + ts->tv_nsec / 10000000;
270} 244}
@@ -285,4 +259,3 @@ int fat_sync_bhs(struct buffer_head **bhs, int nr_bhs)
285 } 259 }
286 return err; 260 return err;
287} 261}
288
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e590242fa41a..3221a0c7944e 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -91,7 +91,7 @@ EXPORT_SYMBOL(fscache_object_destroy);
91 */ 91 */
92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos) 92static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
93{ 93{
94 struct fscache_object *pobj, *obj, *minobj = NULL; 94 struct fscache_object *pobj, *obj = NULL, *minobj = NULL;
95 struct rb_node *p; 95 struct rb_node *p;
96 unsigned long pos; 96 unsigned long pos;
97 97
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index f2feaa06bf26..cadc4ce48656 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -14,6 +14,7 @@
14#include <linux/magic.h> 14#include <linux/magic.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/smp_lock.h> 16#include <linux/smp_lock.h>
17#include <linux/bitmap.h>
17 18
18/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */ 19/* Mark the filesystem dirty, so that chkdsk checks it when os/2 booted */
19 20
@@ -115,15 +116,13 @@ static void hpfs_put_super(struct super_block *s)
115unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno) 116unsigned hpfs_count_one_bitmap(struct super_block *s, secno secno)
116{ 117{
117 struct quad_buffer_head qbh; 118 struct quad_buffer_head qbh;
118 unsigned *bits; 119 unsigned long *bits;
119 unsigned i, count; 120 unsigned count;
120 if (!(bits = hpfs_map_4sectors(s, secno, &qbh, 4))) return 0; 121
121 count = 0; 122 bits = hpfs_map_4sectors(s, secno, &qbh, 4);
122 for (i = 0; i < 2048 / sizeof(unsigned); i++) { 123 if (!bits)
123 unsigned b; 124 return 0;
124 if (!bits[i]) continue; 125 count = bitmap_weight(bits, 2048 * BITS_PER_BYTE);
125 for (b = bits[i]; b; b>>=1) count += b & 1;
126 }
127 hpfs_brelse4(&qbh); 126 hpfs_brelse4(&qbh);
128 return count; 127 return count;
129} 128}
diff --git a/fs/jffs2/gc.c b/fs/jffs2/gc.c
index 090c556ffed2..3b6f2fa12cff 100644
--- a/fs/jffs2/gc.c
+++ b/fs/jffs2/gc.c
@@ -700,7 +700,8 @@ static int jffs2_garbage_collect_metadata(struct jffs2_sb_info *c, struct jffs2_
700 struct jffs2_raw_inode ri; 700 struct jffs2_raw_inode ri;
701 struct jffs2_node_frag *last_frag; 701 struct jffs2_node_frag *last_frag;
702 union jffs2_device_node dev; 702 union jffs2_device_node dev;
703 char *mdata = NULL, mdatalen = 0; 703 char *mdata = NULL;
704 int mdatalen = 0;
704 uint32_t alloclen, ilen; 705 uint32_t alloclen, ilen;
705 int ret; 706 int ret;
706 707
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index 378991cfe40f..e22de8397b74 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1284,7 +1284,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
1284 f->target = NULL; 1284 f->target = NULL;
1285 mutex_unlock(&f->sem); 1285 mutex_unlock(&f->sem);
1286 jffs2_do_clear_inode(c, f); 1286 jffs2_do_clear_inode(c, f);
1287 return -ret; 1287 return ret;
1288 } 1288 }
1289 1289
1290 f->target[je32_to_cpu(latest_node->csize)] = '\0'; 1290 f->target[je32_to_cpu(latest_node->csize)] = '\0';
diff --git a/fs/jffs2/summary.c b/fs/jffs2/summary.c
index 6caf1e1ee26d..800171dca53b 100644
--- a/fs/jffs2/summary.c
+++ b/fs/jffs2/summary.c
@@ -23,7 +23,7 @@
23 23
24int jffs2_sum_init(struct jffs2_sb_info *c) 24int jffs2_sum_init(struct jffs2_sb_info *c)
25{ 25{
26 uint32_t sum_size = max_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE); 26 uint32_t sum_size = min_t(uint32_t, c->sector_size, MAX_SUMMARY_SIZE);
27 27
28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL); 28 c->summary = kzalloc(sizeof(struct jffs2_summary), GFP_KERNEL);
29 29
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index deb2b132ae5e..3dae4a13f6e4 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -547,6 +547,9 @@ bail:
547 * 547 *
548 * called like this: dio->get_blocks(dio->inode, fs_startblk, 548 * called like this: dio->get_blocks(dio->inode, fs_startblk,
549 * fs_count, map_bh, dio->rw == WRITE); 549 * fs_count, map_bh, dio->rw == WRITE);
550 *
551 * Note that we never bother to allocate blocks here, and thus ignore the
552 * create argument.
550 */ 553 */
551static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, 554static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
552 struct buffer_head *bh_result, int create) 555 struct buffer_head *bh_result, int create)
@@ -563,14 +566,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
563 566
564 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode)); 567 inode_blocks = ocfs2_blocks_for_bytes(inode->i_sb, i_size_read(inode));
565 568
566 /*
567 * Any write past EOF is not allowed because we'd be extending.
568 */
569 if (create && (iblock + max_blocks) > inode_blocks) {
570 ret = -EIO;
571 goto bail;
572 }
573
574 /* This figures out the size of the next contiguous block, and 569 /* This figures out the size of the next contiguous block, and
575 * our logical offset */ 570 * our logical offset */
576 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno, 571 ret = ocfs2_extent_map_get_blocks(inode, iblock, &p_blkno,
@@ -582,15 +577,6 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
582 goto bail; 577 goto bail;
583 } 578 }
584 579
585 if (!ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)) && !p_blkno && create) {
586 ocfs2_error(inode->i_sb,
587 "Inode %llu has a hole at block %llu\n",
588 (unsigned long long)OCFS2_I(inode)->ip_blkno,
589 (unsigned long long)iblock);
590 ret = -EROFS;
591 goto bail;
592 }
593
594 /* We should already CoW the refcounted extent. */ 580 /* We should already CoW the refcounted extent. */
595 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED); 581 BUG_ON(ext_flags & OCFS2_EXT_REFCOUNTED);
596 /* 582 /*
@@ -601,20 +587,8 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock,
601 */ 587 */
602 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN)) 588 if (p_blkno && !(ext_flags & OCFS2_EXT_UNWRITTEN))
603 map_bh(bh_result, inode->i_sb, p_blkno); 589 map_bh(bh_result, inode->i_sb, p_blkno);
604 else { 590 else
605 /*
606 * ocfs2_prepare_inode_for_write() should have caught
607 * the case where we'd be filling a hole and triggered
608 * a buffered write instead.
609 */
610 if (create) {
611 ret = -EIO;
612 mlog_errno(ret);
613 goto bail;
614 }
615
616 clear_buffer_mapped(bh_result); 591 clear_buffer_mapped(bh_result);
617 }
618 592
619 /* make sure we don't map more than max_blocks blocks here as 593 /* make sure we don't map more than max_blocks blocks here as
620 that's all the kernel will handle at this point. */ 594 that's all the kernel will handle at this point. */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4df4a464a919..18d5cc62d8ed 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2266,7 +2266,7 @@ static const struct inode_operations proc_attr_dir_inode_operations = {
2266 2266
2267#endif 2267#endif
2268 2268
2269#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2269#ifdef CONFIG_ELF_CORE
2270static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf, 2270static ssize_t proc_coredump_filter_read(struct file *file, char __user *buf,
2271 size_t count, loff_t *ppos) 2271 size_t count, loff_t *ppos)
2272{ 2272{
@@ -2623,7 +2623,7 @@ static const struct pid_entry tgid_base_stuff[] = {
2623#ifdef CONFIG_FAULT_INJECTION 2623#ifdef CONFIG_FAULT_INJECTION
2624 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations), 2624 REG("make-it-fail", S_IRUGO|S_IWUSR, proc_fault_inject_operations),
2625#endif 2625#endif
2626#if defined(USE_ELF_CORE_DUMP) && defined(CONFIG_ELF_CORE) 2626#ifdef CONFIG_ELF_CORE
2627 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations), 2627 REG("coredump_filter", S_IRUGO|S_IWUSR, proc_coredump_filter_operations),
2628#endif 2628#endif
2629#ifdef CONFIG_TASK_IO_ACCOUNTING 2629#ifdef CONFIG_TASK_IO_ACCOUNTING
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index fa678abc9db1..480cb1065eec 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -429,7 +429,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
429 unsigned int ino; 429 unsigned int ino;
430 430
431 ino = de->low_ino; 431 ino = de->low_ino;
432 de_get(de); 432 pde_get(de);
433 spin_unlock(&proc_subdir_lock); 433 spin_unlock(&proc_subdir_lock);
434 error = -EINVAL; 434 error = -EINVAL;
435 inode = proc_get_inode(dir->i_sb, ino, de); 435 inode = proc_get_inode(dir->i_sb, ino, de);
@@ -445,7 +445,7 @@ out_unlock:
445 return NULL; 445 return NULL;
446 } 446 }
447 if (de) 447 if (de)
448 de_put(de); 448 pde_put(de);
449 return ERR_PTR(error); 449 return ERR_PTR(error);
450} 450}
451 451
@@ -509,17 +509,17 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
509 struct proc_dir_entry *next; 509 struct proc_dir_entry *next;
510 510
511 /* filldir passes info to user space */ 511 /* filldir passes info to user space */
512 de_get(de); 512 pde_get(de);
513 spin_unlock(&proc_subdir_lock); 513 spin_unlock(&proc_subdir_lock);
514 if (filldir(dirent, de->name, de->namelen, filp->f_pos, 514 if (filldir(dirent, de->name, de->namelen, filp->f_pos,
515 de->low_ino, de->mode >> 12) < 0) { 515 de->low_ino, de->mode >> 12) < 0) {
516 de_put(de); 516 pde_put(de);
517 goto out; 517 goto out;
518 } 518 }
519 spin_lock(&proc_subdir_lock); 519 spin_lock(&proc_subdir_lock);
520 filp->f_pos++; 520 filp->f_pos++;
521 next = de->next; 521 next = de->next;
522 de_put(de); 522 pde_put(de);
523 de = next; 523 de = next;
524 } while (de); 524 } while (de);
525 spin_unlock(&proc_subdir_lock); 525 spin_unlock(&proc_subdir_lock);
@@ -763,7 +763,7 @@ out:
763 return NULL; 763 return NULL;
764} 764}
765 765
766void free_proc_entry(struct proc_dir_entry *de) 766static void free_proc_entry(struct proc_dir_entry *de)
767{ 767{
768 unsigned int ino = de->low_ino; 768 unsigned int ino = de->low_ino;
769 769
@@ -777,6 +777,12 @@ void free_proc_entry(struct proc_dir_entry *de)
777 kfree(de); 777 kfree(de);
778} 778}
779 779
780void pde_put(struct proc_dir_entry *pde)
781{
782 if (atomic_dec_and_test(&pde->count))
783 free_proc_entry(pde);
784}
785
780/* 786/*
781 * Remove a /proc entry and free it if it's not currently in use. 787 * Remove a /proc entry and free it if it's not currently in use.
782 */ 788 */
@@ -845,6 +851,5 @@ continue_removing:
845 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory " 851 WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
846 "'%s/%s', leaking at least '%s'\n", __func__, 852 "'%s/%s', leaking at least '%s'\n", __func__,
847 de->parent->name, de->name, de->subdir->name); 853 de->parent->name, de->name, de->subdir->name);
848 if (atomic_dec_and_test(&de->count)) 854 pde_put(de);
849 free_proc_entry(de);
850} 855}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index d78ade305541..445a02bcaab3 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -24,29 +24,6 @@
24 24
25#include "internal.h" 25#include "internal.h"
26 26
27struct proc_dir_entry *de_get(struct proc_dir_entry *de)
28{
29 atomic_inc(&de->count);
30 return de;
31}
32
33/*
34 * Decrements the use count and checks for deferred deletion.
35 */
36void de_put(struct proc_dir_entry *de)
37{
38 if (!atomic_read(&de->count)) {
39 printk("de_put: entry %s already free!\n", de->name);
40 return;
41 }
42
43 if (atomic_dec_and_test(&de->count))
44 free_proc_entry(de);
45}
46
47/*
48 * Decrement the use count of the proc_dir_entry.
49 */
50static void proc_delete_inode(struct inode *inode) 27static void proc_delete_inode(struct inode *inode)
51{ 28{
52 struct proc_dir_entry *de; 29 struct proc_dir_entry *de;
@@ -59,7 +36,7 @@ static void proc_delete_inode(struct inode *inode)
59 /* Let go of any associated proc directory entry */ 36 /* Let go of any associated proc directory entry */
60 de = PROC_I(inode)->pde; 37 de = PROC_I(inode)->pde;
61 if (de) 38 if (de)
62 de_put(de); 39 pde_put(de);
63 if (PROC_I(inode)->sysctl) 40 if (PROC_I(inode)->sysctl)
64 sysctl_head_put(PROC_I(inode)->sysctl); 41 sysctl_head_put(PROC_I(inode)->sysctl);
65 clear_inode(inode); 42 clear_inode(inode);
@@ -480,7 +457,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
480 } 457 }
481 unlock_new_inode(inode); 458 unlock_new_inode(inode);
482 } else 459 } else
483 de_put(de); 460 pde_put(de);
484 return inode; 461 return inode;
485} 462}
486 463
@@ -495,7 +472,7 @@ int proc_fill_super(struct super_block *s)
495 s->s_op = &proc_sops; 472 s->s_op = &proc_sops;
496 s->s_time_gran = 1; 473 s->s_time_gran = 1;
497 474
498 de_get(&proc_root); 475 pde_get(&proc_root);
499 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root); 476 root_inode = proc_get_inode(s, PROC_ROOT_INO, &proc_root);
500 if (!root_inode) 477 if (!root_inode)
501 goto out_no_root; 478 goto out_no_root;
@@ -509,6 +486,6 @@ int proc_fill_super(struct super_block *s)
509out_no_root: 486out_no_root:
510 printk("proc_read_super: get root inode failed\n"); 487 printk("proc_read_super: get root inode failed\n");
511 iput(root_inode); 488 iput(root_inode);
512 de_put(&proc_root); 489 pde_put(&proc_root);
513 return -ENOMEM; 490 return -ENOMEM;
514} 491}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 753ca37002c8..1f24a3eddd12 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -61,8 +61,6 @@ extern const struct file_operations proc_pagemap_operations;
61extern const struct file_operations proc_net_operations; 61extern const struct file_operations proc_net_operations;
62extern const struct inode_operations proc_net_inode_operations; 62extern const struct inode_operations proc_net_inode_operations;
63 63
64void free_proc_entry(struct proc_dir_entry *de);
65
66void proc_init_inodecache(void); 64void proc_init_inodecache(void);
67 65
68static inline struct pid *proc_pid(struct inode *inode) 66static inline struct pid *proc_pid(struct inode *inode)
@@ -101,8 +99,12 @@ unsigned long task_vsize(struct mm_struct *);
101int task_statm(struct mm_struct *, int *, int *, int *, int *); 99int task_statm(struct mm_struct *, int *, int *, int *, int *);
102void task_mem(struct seq_file *, struct mm_struct *); 100void task_mem(struct seq_file *, struct mm_struct *);
103 101
104struct proc_dir_entry *de_get(struct proc_dir_entry *de); 102static inline struct proc_dir_entry *pde_get(struct proc_dir_entry *pde)
105void de_put(struct proc_dir_entry *de); 103{
104 atomic_inc(&pde->count);
105 return pde;
106}
107void pde_put(struct proc_dir_entry *pde);
106 108
107extern struct vfsmount *proc_mnt; 109extern struct vfsmount *proc_mnt;
108int proc_fill_super(struct super_block *); 110int proc_fill_super(struct super_block *);
diff --git a/fs/qnx4/bitmap.c b/fs/qnx4/bitmap.c
index 32f5d131a644..22e0d60e53ef 100644
--- a/fs/qnx4/bitmap.c
+++ b/fs/qnx4/bitmap.c
@@ -17,13 +17,6 @@
17#include <linux/bitops.h> 17#include <linux/bitops.h>
18#include "qnx4.h" 18#include "qnx4.h"
19 19
20#if 0
21int qnx4_new_block(struct super_block *sb)
22{
23 return 0;
24}
25#endif /* 0 */
26
27static void count_bits(register const char *bmPart, register int size, 20static void count_bits(register const char *bmPart, register int size,
28 int *const tf) 21 int *const tf)
29{ 22{
@@ -35,22 +28,7 @@ static void count_bits(register const char *bmPart, register int size,
35 } 28 }
36 do { 29 do {
37 b = *bmPart++; 30 b = *bmPart++;
38 if ((b & 1) == 0) 31 tot += 8 - hweight8(b);
39 tot++;
40 if ((b & 2) == 0)
41 tot++;
42 if ((b & 4) == 0)
43 tot++;
44 if ((b & 8) == 0)
45 tot++;
46 if ((b & 16) == 0)
47 tot++;
48 if ((b & 32) == 0)
49 tot++;
50 if ((b & 64) == 0)
51 tot++;
52 if ((b & 128) == 0)
53 tot++;
54 size--; 32 size--;
55 } while (size != 0); 33 } while (size != 0);
56 *tf = tot; 34 *tf = tot;
diff --git a/fs/qnx4/inode.c b/fs/qnx4/inode.c
index 449f5a66dd34..ebf3440d28ca 100644
--- a/fs/qnx4/inode.c
+++ b/fs/qnx4/inode.c
@@ -64,25 +64,7 @@ static struct buffer_head *qnx4_getblk(struct inode *inode, int nr,
64 result = sb_getblk(inode->i_sb, nr); 64 result = sb_getblk(inode->i_sb, nr);
65 return result; 65 return result;
66 } 66 }
67 if (!create) { 67 return NULL;
68 return NULL;
69 }
70#if 0
71 tmp = qnx4_new_block(inode->i_sb);
72 if (!tmp) {
73 return NULL;
74 }
75 result = sb_getblk(inode->i_sb, tmp);
76 if (tst) {
77 qnx4_free_block(inode->i_sb, tmp);
78 brelse(result);
79 goto repeat;
80 }
81 tst = tmp;
82#endif
83 inode->i_ctime = CURRENT_TIME_SEC;
84 mark_inode_dirty(inode);
85 return result;
86} 68}
87 69
88struct buffer_head *qnx4_bread(struct inode *inode, int block, int create) 70struct buffer_head *qnx4_bread(struct inode *inode, int block, int create)
@@ -113,8 +95,6 @@ static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_h
113 if ( phys ) { 95 if ( phys ) {
114 // logical block is before EOF 96 // logical block is before EOF
115 map_bh(bh, inode->i_sb, phys); 97 map_bh(bh, inode->i_sb, phys);
116 } else if ( create ) {
117 // to be done.
118 } 98 }
119 return 0; 99 return 0;
120} 100}
diff --git a/fs/reiserfs/Makefile b/fs/reiserfs/Makefile
index 6a9e30c041dd..792b3cb2cd18 100644
--- a/fs/reiserfs/Makefile
+++ b/fs/reiserfs/Makefile
@@ -7,7 +7,11 @@ obj-$(CONFIG_REISERFS_FS) += reiserfs.o
7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \ 7reiserfs-objs := bitmap.o do_balan.o namei.o inode.o file.o dir.o fix_node.o \
8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \ 8 super.o prints.o objectid.o lbalance.o ibalance.o stree.o \
9 hashes.o tail_conversion.o journal.o resize.o \ 9 hashes.o tail_conversion.o journal.o resize.o \
10 item_ops.o ioctl.o procfs.o xattr.o lock.o 10 item_ops.o ioctl.o xattr.o lock.o
11
12ifeq ($(CONFIG_REISERFS_PROC_INFO),y)
13reiserfs-objs += procfs.o
14endif
11 15
12ifeq ($(CONFIG_REISERFS_FS_XATTR),y) 16ifeq ($(CONFIG_REISERFS_FS_XATTR),y)
13reiserfs-objs += xattr_user.o xattr_trusted.o 17reiserfs-objs += xattr_user.o xattr_trusted.o
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 9229e5514a4e..7a9981196c1c 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -17,8 +17,6 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19 19
20#ifdef CONFIG_REISERFS_PROC_INFO
21
22/* 20/*
23 * LOCKING: 21 * LOCKING:
24 * 22 *
@@ -48,14 +46,6 @@ static int show_version(struct seq_file *m, struct super_block *sb)
48 return 0; 46 return 0;
49} 47}
50 48
51int reiserfs_global_version_in_proc(char *buffer, char **start, off_t offset,
52 int count, int *eof, void *data)
53{
54 *start = buffer;
55 *eof = 1;
56 return 0;
57}
58
59#define SF( x ) ( r -> x ) 49#define SF( x ) ( r -> x )
60#define SFP( x ) SF( s_proc_info_data.x ) 50#define SFP( x ) SF( s_proc_info_data.x )
61#define SFPL( x ) SFP( x[ level ] ) 51#define SFPL( x ) SFP( x[ level ] )
@@ -538,19 +528,6 @@ int reiserfs_proc_info_done(struct super_block *sb)
538 return 0; 528 return 0;
539} 529}
540 530
541struct proc_dir_entry *reiserfs_proc_register_global(char *name,
542 read_proc_t * func)
543{
544 return (proc_info_root) ? create_proc_read_entry(name, 0,
545 proc_info_root,
546 func, NULL) : NULL;
547}
548
549void reiserfs_proc_unregister_global(const char *name)
550{
551 remove_proc_entry(name, proc_info_root);
552}
553
554int reiserfs_proc_info_global_init(void) 531int reiserfs_proc_info_global_init(void)
555{ 532{
556 if (proc_info_root == NULL) { 533 if (proc_info_root == NULL) {
@@ -572,48 +549,6 @@ int reiserfs_proc_info_global_done(void)
572 } 549 }
573 return 0; 550 return 0;
574} 551}
575
576/* REISERFS_PROC_INFO */
577#else
578
579int reiserfs_proc_info_init(struct super_block *sb)
580{
581 return 0;
582}
583int reiserfs_proc_info_done(struct super_block *sb)
584{
585 return 0;
586}
587
588struct proc_dir_entry *reiserfs_proc_register_global(char *name,
589 read_proc_t * func)
590{
591 return NULL;
592}
593
594void reiserfs_proc_unregister_global(const char *name)
595{;
596}
597
598int reiserfs_proc_info_global_init(void)
599{
600 return 0;
601}
602int reiserfs_proc_info_global_done(void)
603{
604 return 0;
605}
606
607int reiserfs_global_version_in_proc(char *buffer, char **start,
608 off_t offset,
609 int count, int *eof, void *data)
610{
611 return 0;
612}
613
614/* REISERFS_PROC_INFO */
615#endif
616
617/* 552/*
618 * Revision 1.1.8.2 2001/07/15 17:08:42 god 553 * Revision 1.1.8.2 2001/07/15 17:08:42 god
619 * . use get_super() in procfs.c 554 * . use get_super() in procfs.c
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 339b0baf2af6..b4a7dd03bdb9 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2222,8 +2222,6 @@ static int __init init_reiserfs_fs(void)
2222 } 2222 }
2223 2223
2224 reiserfs_proc_info_global_init(); 2224 reiserfs_proc_info_global_init();
2225 reiserfs_proc_register_global("version",
2226 reiserfs_global_version_in_proc);
2227 2225
2228 ret = register_filesystem(&reiserfs_fs_type); 2226 ret = register_filesystem(&reiserfs_fs_type);
2229 2227
@@ -2231,7 +2229,6 @@ static int __init init_reiserfs_fs(void)
2231 return 0; 2229 return 0;
2232 } 2230 }
2233 2231
2234 reiserfs_proc_unregister_global("version");
2235 reiserfs_proc_info_global_done(); 2232 reiserfs_proc_info_global_done();
2236 destroy_inodecache(); 2233 destroy_inodecache();
2237 2234
@@ -2240,7 +2237,6 @@ static int __init init_reiserfs_fs(void)
2240 2237
2241static void __exit exit_reiserfs_fs(void) 2238static void __exit exit_reiserfs_fs(void)
2242{ 2239{
2243 reiserfs_proc_unregister_global("version");
2244 reiserfs_proc_info_global_done(); 2240 reiserfs_proc_info_global_done();
2245 unregister_filesystem(&reiserfs_fs_type); 2241 unregister_filesystem(&reiserfs_fs_type);
2246 destroy_inodecache(); 2242 destroy_inodecache();
diff --git a/fs/ufs/dir.c b/fs/ufs/dir.c
index 6f671f1ac271..22af68f8b682 100644
--- a/fs/ufs/dir.c
+++ b/fs/ufs/dir.c
@@ -70,13 +70,13 @@ static inline unsigned long ufs_dir_pages(struct inode *inode)
70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT; 70 return (inode->i_size+PAGE_CACHE_SIZE-1)>>PAGE_CACHE_SHIFT;
71} 71}
72 72
73ino_t ufs_inode_by_name(struct inode *dir, struct dentry *dentry) 73ino_t ufs_inode_by_name(struct inode *dir, struct qstr *qstr)
74{ 74{
75 ino_t res = 0; 75 ino_t res = 0;
76 struct ufs_dir_entry *de; 76 struct ufs_dir_entry *de;
77 struct page *page; 77 struct page *page;
78 78
79 de = ufs_find_entry(dir, dentry, &page); 79 de = ufs_find_entry(dir, qstr, &page);
80 if (de) { 80 if (de) {
81 res = fs32_to_cpu(dir->i_sb, de->d_ino); 81 res = fs32_to_cpu(dir->i_sb, de->d_ino);
82 ufs_put_page(page); 82 ufs_put_page(page);
@@ -249,12 +249,12 @@ struct ufs_dir_entry *ufs_dotdot(struct inode *dir, struct page **p)
249 * (as a parameter - res_dir). Page is returned mapped and unlocked. 249 * (as a parameter - res_dir). Page is returned mapped and unlocked.
250 * Entry is guaranteed to be valid. 250 * Entry is guaranteed to be valid.
251 */ 251 */
252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct dentry *dentry, 252struct ufs_dir_entry *ufs_find_entry(struct inode *dir, struct qstr *qstr,
253 struct page **res_page) 253 struct page **res_page)
254{ 254{
255 struct super_block *sb = dir->i_sb; 255 struct super_block *sb = dir->i_sb;
256 const char *name = dentry->d_name.name; 256 const char *name = qstr->name;
257 int namelen = dentry->d_name.len; 257 int namelen = qstr->len;
258 unsigned reclen = UFS_DIR_REC_LEN(namelen); 258 unsigned reclen = UFS_DIR_REC_LEN(namelen);
259 unsigned long start, n; 259 unsigned long start, n;
260 unsigned long npages = ufs_dir_pages(dir); 260 unsigned long npages = ufs_dir_pages(dir);
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c
index 23119fe7ad62..4c26d9e8bc94 100644
--- a/fs/ufs/namei.c
+++ b/fs/ufs/namei.c
@@ -56,7 +56,7 @@ static struct dentry *ufs_lookup(struct inode * dir, struct dentry *dentry, stru
56 return ERR_PTR(-ENAMETOOLONG); 56 return ERR_PTR(-ENAMETOOLONG);
57 57
58 lock_kernel(); 58 lock_kernel();
59 ino = ufs_inode_by_name(dir, dentry); 59 ino = ufs_inode_by_name(dir, &dentry->d_name);
60 if (ino) { 60 if (ino) {
61 inode = ufs_iget(dir->i_sb, ino); 61 inode = ufs_iget(dir->i_sb, ino);
62 if (IS_ERR(inode)) { 62 if (IS_ERR(inode)) {
@@ -237,7 +237,7 @@ static int ufs_unlink(struct inode *dir, struct dentry *dentry)
237 struct page *page; 237 struct page *page;
238 int err = -ENOENT; 238 int err = -ENOENT;
239 239
240 de = ufs_find_entry(dir, dentry, &page); 240 de = ufs_find_entry(dir, &dentry->d_name, &page);
241 if (!de) 241 if (!de)
242 goto out; 242 goto out;
243 243
@@ -281,7 +281,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
281 struct ufs_dir_entry *old_de; 281 struct ufs_dir_entry *old_de;
282 int err = -ENOENT; 282 int err = -ENOENT;
283 283
284 old_de = ufs_find_entry(old_dir, old_dentry, &old_page); 284 old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page);
285 if (!old_de) 285 if (!old_de)
286 goto out; 286 goto out;
287 287
@@ -301,7 +301,7 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry,
301 goto out_dir; 301 goto out_dir;
302 302
303 err = -ENOENT; 303 err = -ENOENT;
304 new_de = ufs_find_entry(new_dir, new_dentry, &new_page); 304 new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page);
305 if (!new_de) 305 if (!new_de)
306 goto out_dir; 306 goto out_dir;
307 inode_inc_link_count(old_inode); 307 inode_inc_link_count(old_inode);
diff --git a/fs/ufs/super.c b/fs/ufs/super.c
index 5faed7954d0a..143c20bfb04b 100644
--- a/fs/ufs/super.c
+++ b/fs/ufs/super.c
@@ -66,6 +66,7 @@
66 */ 66 */
67 67
68 68
69#include <linux/exportfs.h>
69#include <linux/module.h> 70#include <linux/module.h>
70#include <linux/bitops.h> 71#include <linux/bitops.h>
71 72
@@ -96,6 +97,56 @@
96#include "swab.h" 97#include "swab.h"
97#include "util.h" 98#include "util.h"
98 99
100static struct inode *ufs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation)
101{
102 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi;
103 struct inode *inode;
104
105 if (ino < UFS_ROOTINO || ino > uspi->s_ncg * uspi->s_ipg)
106 return ERR_PTR(-ESTALE);
107
108 inode = ufs_iget(sb, ino);
109 if (IS_ERR(inode))
110 return ERR_CAST(inode);
111 if (generation && inode->i_generation != generation) {
112 iput(inode);
113 return ERR_PTR(-ESTALE);
114 }
115 return inode;
116}
117
118static struct dentry *ufs_fh_to_dentry(struct super_block *sb, struct fid *fid,
119 int fh_len, int fh_type)
120{
121 return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
122}
123
124static struct dentry *ufs_fh_to_parent(struct super_block *sb, struct fid *fid,
125 int fh_len, int fh_type)
126{
127 return generic_fh_to_parent(sb, fid, fh_len, fh_type, ufs_nfs_get_inode);
128}
129
130static struct dentry *ufs_get_parent(struct dentry *child)
131{
132 struct qstr dot_dot = {
133 .name = "..",
134 .len = 2,
135 };
136 ino_t ino;
137
138 ino = ufs_inode_by_name(child->d_inode, &dot_dot);
139 if (!ino)
140 return ERR_PTR(-ENOENT);
141 return d_obtain_alias(ufs_iget(child->d_inode->i_sb, ino));
142}
143
144static const struct export_operations ufs_export_ops = {
145 .fh_to_dentry = ufs_fh_to_dentry,
146 .fh_to_parent = ufs_fh_to_parent,
147 .get_parent = ufs_get_parent,
148};
149
99#ifdef CONFIG_UFS_DEBUG 150#ifdef CONFIG_UFS_DEBUG
100/* 151/*
101 * Print contents of ufs_super_block, useful for debugging 152 * Print contents of ufs_super_block, useful for debugging
@@ -990,6 +1041,7 @@ magic_found:
990 * Read ufs_super_block into internal data structures 1041 * Read ufs_super_block into internal data structures
991 */ 1042 */
992 sb->s_op = &ufs_super_ops; 1043 sb->s_op = &ufs_super_ops;
1044 sb->s_export_op = &ufs_export_ops;
993 sb->dq_op = NULL; /***/ 1045 sb->dq_op = NULL; /***/
994 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic); 1046 sb->s_magic = fs32_to_cpu(sb, usb3->fs_magic);
995 1047
diff --git a/fs/ufs/ufs.h b/fs/ufs/ufs.h
index 644e77e13599..0b4c39bc0d9e 100644
--- a/fs/ufs/ufs.h
+++ b/fs/ufs/ufs.h
@@ -86,9 +86,9 @@ extern void ufs_put_cylinder (struct super_block *, unsigned);
86/* dir.c */ 86/* dir.c */
87extern const struct inode_operations ufs_dir_inode_operations; 87extern const struct inode_operations ufs_dir_inode_operations;
88extern int ufs_add_link (struct dentry *, struct inode *); 88extern int ufs_add_link (struct dentry *, struct inode *);
89extern ino_t ufs_inode_by_name(struct inode *, struct dentry *); 89extern ino_t ufs_inode_by_name(struct inode *, struct qstr *);
90extern int ufs_make_empty(struct inode *, struct inode *); 90extern int ufs_make_empty(struct inode *, struct inode *);
91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct dentry *, struct page **); 91extern struct ufs_dir_entry *ufs_find_entry(struct inode *, struct qstr *, struct page **);
92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *); 92extern int ufs_delete_entry(struct inode *, struct ufs_dir_entry *, struct page *);
93extern int ufs_empty_dir (struct inode *); 93extern int ufs_empty_dir (struct inode *);
94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **); 94extern struct ufs_dir_entry *ufs_dotdot(struct inode *, struct page **);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index d798c54296eb..66abe36c1213 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1474,19 +1474,13 @@ xfs_vm_direct_IO(
1474 1474
1475 bdev = xfs_find_bdev_for_inode(XFS_I(inode)); 1475 bdev = xfs_find_bdev_for_inode(XFS_I(inode));
1476 1476
1477 if (rw == WRITE) { 1477 iocb->private = xfs_alloc_ioend(inode, rw == WRITE ?
1478 iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN); 1478 IOMAP_UNWRITTEN : IOMAP_READ);
1479 ret = blockdev_direct_IO_own_locking(rw, iocb, inode, 1479
1480 bdev, iov, offset, nr_segs, 1480 ret = blockdev_direct_IO_no_locking(rw, iocb, inode, bdev, iov,
1481 xfs_get_blocks_direct, 1481 offset, nr_segs,
1482 xfs_end_io_direct); 1482 xfs_get_blocks_direct,
1483 } else { 1483 xfs_end_io_direct);
1484 iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
1485 ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
1486 bdev, iov, offset, nr_segs,
1487 xfs_get_blocks_direct,
1488 xfs_end_io_direct);
1489 }
1490 1484
1491 if (unlikely(ret != -EIOCBQUEUED && iocb->private)) 1485 if (unlikely(ret != -EIOCBQUEUED && iocb->private))
1492 xfs_destroy_ioend(iocb->private); 1486 xfs_destroy_ioend(iocb->private);