diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 16:36:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 16:36:41 -0400 |
commit | e2a0883e4071237d09b604a342c28b96b44a04b3 (patch) | |
tree | aa56f4d376b5eb1c32358c19c2669c2a94e0e1fd /fs/aio.c | |
parent | 3a990a52f9f25f45469e272017a31e7a3fda60ed (diff) | |
parent | 07c0c5d8b8c122b2f2df9ee574ac3083daefc981 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull vfs pile 1 from Al Viro:
"This is _not_ all; in particular, Miklos' and Jan's stuff is not there
yet."
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (64 commits)
ext4: initialization of ext4_li_mtx needs to be done earlier
debugfs-related mode_t whack-a-mole
hfsplus: add an ioctl to bless files
hfsplus: change finder_info to u32
hfsplus: initialise userflags
qnx4: new helper - try_extent()
qnx4: get rid of qnx4_bread/qnx4_getblk
take removal of PF_FORKNOEXEC to flush_old_exec()
trim includes in inode.c
um: uml_dup_mmap() relies on ->mmap_sem being held, but activate_mm() doesn't hold it
um: embed ->stub_pages[] into mmu_context
gadgetfs: list_for_each_safe() misuse
ocfs2: fix leaks on failure exits in module_init
ecryptfs: make register_filesystem() the last potential failure exit
ntfs: forgets to unregister sysctls on register_filesystem() failure
logfs: missing cleanup on register_filesystem() failure
jfs: mising cleanup on register_filesystem() failure
make configfs_pin_fs() return root dentry on success
configfs: configfs_create_dir() has parent dentry in dentry->d_parent
configfs: sanitize configfs_create()
...
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 65 |
1 files changed, 22 insertions, 43 deletions
@@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
199 | static void ctx_rcu_free(struct rcu_head *head) | 199 | static void ctx_rcu_free(struct rcu_head *head) |
200 | { | 200 | { |
201 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | 201 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); |
202 | unsigned nr_events = ctx->max_reqs; | ||
203 | |||
204 | kmem_cache_free(kioctx_cachep, ctx); | 202 | kmem_cache_free(kioctx_cachep, ctx); |
205 | |||
206 | if (nr_events) { | ||
207 | spin_lock(&aio_nr_lock); | ||
208 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
209 | aio_nr -= nr_events; | ||
210 | spin_unlock(&aio_nr_lock); | ||
211 | } | ||
212 | } | 203 | } |
213 | 204 | ||
214 | /* __put_ioctx | 205 | /* __put_ioctx |
@@ -217,13 +208,19 @@ static void ctx_rcu_free(struct rcu_head *head) | |||
217 | */ | 208 | */ |
218 | static void __put_ioctx(struct kioctx *ctx) | 209 | static void __put_ioctx(struct kioctx *ctx) |
219 | { | 210 | { |
211 | unsigned nr_events = ctx->max_reqs; | ||
220 | BUG_ON(ctx->reqs_active); | 212 | BUG_ON(ctx->reqs_active); |
221 | 213 | ||
222 | cancel_delayed_work(&ctx->wq); | 214 | cancel_delayed_work_sync(&ctx->wq); |
223 | cancel_work_sync(&ctx->wq.work); | ||
224 | aio_free_ring(ctx); | 215 | aio_free_ring(ctx); |
225 | mmdrop(ctx->mm); | 216 | mmdrop(ctx->mm); |
226 | ctx->mm = NULL; | 217 | ctx->mm = NULL; |
218 | if (nr_events) { | ||
219 | spin_lock(&aio_nr_lock); | ||
220 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
221 | aio_nr -= nr_events; | ||
222 | spin_unlock(&aio_nr_lock); | ||
223 | } | ||
227 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 224 | pr_debug("__put_ioctx: freeing %p\n", ctx); |
228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 225 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
229 | } | 226 | } |
@@ -247,7 +244,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
247 | { | 244 | { |
248 | struct mm_struct *mm; | 245 | struct mm_struct *mm; |
249 | struct kioctx *ctx; | 246 | struct kioctx *ctx; |
250 | int did_sync = 0; | 247 | int err = -ENOMEM; |
251 | 248 | ||
252 | /* Prevent overflows */ | 249 | /* Prevent overflows */ |
253 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 250 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || |
@@ -256,7 +253,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
256 | return ERR_PTR(-EINVAL); | 253 | return ERR_PTR(-EINVAL); |
257 | } | 254 | } |
258 | 255 | ||
259 | if ((unsigned long)nr_events > aio_max_nr) | 256 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) |
260 | return ERR_PTR(-EAGAIN); | 257 | return ERR_PTR(-EAGAIN); |
261 | 258 | ||
262 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | 259 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
@@ -280,25 +277,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
280 | goto out_freectx; | 277 | goto out_freectx; |
281 | 278 | ||
282 | /* limit the number of system wide aios */ | 279 | /* limit the number of system wide aios */ |
283 | do { | 280 | spin_lock(&aio_nr_lock); |
284 | spin_lock_bh(&aio_nr_lock); | 281 | if (aio_nr + nr_events > aio_max_nr || |
285 | if (aio_nr + nr_events > aio_max_nr || | 282 | aio_nr + nr_events < aio_nr) { |
286 | aio_nr + nr_events < aio_nr) | 283 | spin_unlock(&aio_nr_lock); |
287 | ctx->max_reqs = 0; | ||
288 | else | ||
289 | aio_nr += ctx->max_reqs; | ||
290 | spin_unlock_bh(&aio_nr_lock); | ||
291 | if (ctx->max_reqs || did_sync) | ||
292 | break; | ||
293 | |||
294 | /* wait for rcu callbacks to have completed before giving up */ | ||
295 | synchronize_rcu(); | ||
296 | did_sync = 1; | ||
297 | ctx->max_reqs = nr_events; | ||
298 | } while (1); | ||
299 | |||
300 | if (ctx->max_reqs == 0) | ||
301 | goto out_cleanup; | 284 | goto out_cleanup; |
285 | } | ||
286 | aio_nr += ctx->max_reqs; | ||
287 | spin_unlock(&aio_nr_lock); | ||
302 | 288 | ||
303 | /* now link into global list. */ | 289 | /* now link into global list. */ |
304 | spin_lock(&mm->ioctx_lock); | 290 | spin_lock(&mm->ioctx_lock); |
@@ -310,16 +296,13 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
310 | return ctx; | 296 | return ctx; |
311 | 297 | ||
312 | out_cleanup: | 298 | out_cleanup: |
313 | __put_ioctx(ctx); | 299 | err = -EAGAIN; |
314 | return ERR_PTR(-EAGAIN); | 300 | aio_free_ring(ctx); |
315 | |||
316 | out_freectx: | 301 | out_freectx: |
317 | mmdrop(mm); | 302 | mmdrop(mm); |
318 | kmem_cache_free(kioctx_cachep, ctx); | 303 | kmem_cache_free(kioctx_cachep, ctx); |
319 | ctx = ERR_PTR(-ENOMEM); | 304 | dprintk("aio: error allocating ioctx %d\n", err); |
320 | 305 | return ERR_PTR(err); | |
321 | dprintk("aio: error allocating ioctx %p\n", ctx); | ||
322 | return ctx; | ||
323 | } | 306 | } |
324 | 307 | ||
325 | /* aio_cancel_all | 308 | /* aio_cancel_all |
@@ -407,10 +390,6 @@ void exit_aio(struct mm_struct *mm) | |||
407 | aio_cancel_all(ctx); | 390 | aio_cancel_all(ctx); |
408 | 391 | ||
409 | wait_for_all_aios(ctx); | 392 | wait_for_all_aios(ctx); |
410 | /* | ||
411 | * Ensure we don't leave the ctx on the aio_wq | ||
412 | */ | ||
413 | cancel_work_sync(&ctx->wq.work); | ||
414 | 393 | ||
415 | if (1 != atomic_read(&ctx->users)) | 394 | if (1 != atomic_read(&ctx->users)) |
416 | printk(KERN_DEBUG | 395 | printk(KERN_DEBUG |
@@ -920,7 +899,7 @@ static void aio_kick_handler(struct work_struct *work) | |||
920 | unuse_mm(mm); | 899 | unuse_mm(mm); |
921 | set_fs(oldfs); | 900 | set_fs(oldfs); |
922 | /* | 901 | /* |
923 | * we're in a worker thread already, don't use queue_delayed_work, | 902 | * we're in a worker thread already; no point using non-zero delay |
924 | */ | 903 | */ |
925 | if (requeue) | 904 | if (requeue) |
926 | queue_delayed_work(aio_wq, &ctx->wq, 0); | 905 | queue_delayed_work(aio_wq, &ctx->wq, 0); |