diff options
author | Tejun Heo <tj@kernel.org> | 2009-04-13 21:54:49 -0400 |
---|---|---|
committer | Miklos Szeredi <mszeredi@suse.cz> | 2009-04-28 10:56:35 -0400 |
commit | 6b2db28a7a2da1064df9e179d9b6d07b0bfe156a (patch) | |
tree | 9e24b6876a7b91de466e8a87523a41de694d2148 /fs/fuse | |
parent | fd9db7297749c05fcf5721ce5393a5a8b8772f2a (diff) |
fuse: misc cleanups
* fuse_file_alloc() was structured in weird way. The success path was
split between else block and code following the block. Restructure
the code such that it's easier to read and modify.
* Unindent success path of fuse_release_common() to ease future
changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Diffstat (limited to 'fs/fuse')
-rw-r--r-- | fs/fuse/file.c | 81 |
1 files changed, 44 insertions, 37 deletions
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 06f30e965676..028e17decf2f 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -49,22 +49,26 @@ static int fuse_send_open(struct inode *inode, struct file *file, int isdir, | |||
49 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) | 49 | struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) |
50 | { | 50 | { |
51 | struct fuse_file *ff; | 51 | struct fuse_file *ff; |
52 | |||
52 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); | 53 | ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); |
53 | if (ff) { | 54 | if (unlikely(!ff)) |
54 | ff->reserved_req = fuse_request_alloc(); | 55 | return NULL; |
55 | if (!ff->reserved_req) { | 56 | |
56 | kfree(ff); | 57 | ff->reserved_req = fuse_request_alloc(); |
57 | return NULL; | 58 | if (unlikely(!ff->reserved_req)) { |
58 | } else { | 59 | kfree(ff); |
59 | INIT_LIST_HEAD(&ff->write_entry); | 60 | return NULL; |
60 | atomic_set(&ff->count, 0); | ||
61 | spin_lock(&fc->lock); | ||
62 | ff->kh = ++fc->khctr; | ||
63 | spin_unlock(&fc->lock); | ||
64 | } | ||
65 | RB_CLEAR_NODE(&ff->polled_node); | ||
66 | init_waitqueue_head(&ff->poll_wait); | ||
67 | } | 61 | } |
62 | |||
63 | INIT_LIST_HEAD(&ff->write_entry); | ||
64 | atomic_set(&ff->count, 0); | ||
65 | RB_CLEAR_NODE(&ff->polled_node); | ||
66 | init_waitqueue_head(&ff->poll_wait); | ||
67 | |||
68 | spin_lock(&fc->lock); | ||
69 | ff->kh = ++fc->khctr; | ||
70 | spin_unlock(&fc->lock); | ||
71 | |||
68 | return ff; | 72 | return ff; |
69 | } | 73 | } |
70 | 74 | ||
@@ -158,34 +162,37 @@ void fuse_release_fill(struct fuse_file *ff, u64 nodeid, int flags, int opcode) | |||
158 | 162 | ||
159 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) | 163 | int fuse_release_common(struct inode *inode, struct file *file, int isdir) |
160 | { | 164 | { |
161 | struct fuse_file *ff = file->private_data; | 165 | struct fuse_conn *fc; |
162 | if (ff) { | 166 | struct fuse_file *ff; |
163 | struct fuse_conn *fc = get_fuse_conn(inode); | 167 | struct fuse_req *req; |
164 | struct fuse_req *req = ff->reserved_req; | ||
165 | 168 | ||
166 | fuse_release_fill(ff, get_node_id(inode), file->f_flags, | 169 | ff = file->private_data; |
167 | isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); | 170 | if (unlikely(!ff)) |
171 | return 0; /* return value is ignored by VFS */ | ||
168 | 172 | ||
169 | /* Hold vfsmount and dentry until release is finished */ | 173 | fc = get_fuse_conn(inode); |
170 | req->misc.release.vfsmount = mntget(file->f_path.mnt); | 174 | req = ff->reserved_req; |
171 | req->misc.release.dentry = dget(file->f_path.dentry); | ||
172 | 175 | ||
173 | spin_lock(&fc->lock); | 176 | fuse_release_fill(ff, get_node_id(inode), file->f_flags, |
174 | list_del(&ff->write_entry); | 177 | isdir ? FUSE_RELEASEDIR : FUSE_RELEASE); |
175 | if (!RB_EMPTY_NODE(&ff->polled_node)) | ||
176 | rb_erase(&ff->polled_node, &fc->polled_files); | ||
177 | spin_unlock(&fc->lock); | ||
178 | 178 | ||
179 | wake_up_interruptible_sync(&ff->poll_wait); | 179 | /* Hold vfsmount and dentry until release is finished */ |
180 | /* | 180 | req->misc.release.vfsmount = mntget(file->f_path.mnt); |
181 | * Normally this will send the RELEASE request, | 181 | req->misc.release.dentry = dget(file->f_path.dentry); |
182 | * however if some asynchronous READ or WRITE requests | 182 | |
183 | * are outstanding, the sending will be delayed | 183 | spin_lock(&fc->lock); |
184 | */ | 184 | list_del(&ff->write_entry); |
185 | fuse_file_put(ff); | 185 | if (!RB_EMPTY_NODE(&ff->polled_node)) |
186 | } | 186 | rb_erase(&ff->polled_node, &fc->polled_files); |
187 | spin_unlock(&fc->lock); | ||
187 | 188 | ||
188 | /* Return value is ignored by VFS */ | 189 | wake_up_interruptible_sync(&ff->poll_wait); |
190 | /* | ||
191 | * Normally this will send the RELEASE request, however if | ||
192 | * some asynchronous READ or WRITE requests are outstanding, | ||
193 | * the sending will be delayed. | ||
194 | */ | ||
195 | fuse_file_put(ff); | ||
189 | return 0; | 196 | return 0; |
190 | } | 197 | } |
191 | 198 | ||