aboutsummaryrefslogtreecommitdiffstats
path: root/fs/aio.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/aio.c')
-rw-r--r--fs/aio.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/fs/aio.c b/fs/aio.c
index edfca5b75535..20bb919eb195 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -42,8 +42,9 @@
42#endif 42#endif
43 43
44/*------ sysctl variables----*/ 44/*------ sysctl variables----*/
45atomic_t aio_nr = ATOMIC_INIT(0); /* current system wide number of aio requests */ 45static DEFINE_SPINLOCK(aio_nr_lock);
46unsigned aio_max_nr = 0x10000; /* system wide maximum number of aio requests */ 46unsigned long aio_nr; /* current system wide number of aio requests */
47unsigned long aio_max_nr = 0x10000; /* system wide maximum number of aio requests */
47/*----end sysctl variables---*/ 48/*----end sysctl variables---*/
48 49
49static kmem_cache_t *kiocb_cachep; 50static kmem_cache_t *kiocb_cachep;
@@ -208,7 +209,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
208 return ERR_PTR(-EINVAL); 209 return ERR_PTR(-EINVAL);
209 } 210 }
210 211
211 if (nr_events > aio_max_nr) 212 if ((unsigned long)nr_events > aio_max_nr)
212 return ERR_PTR(-EAGAIN); 213 return ERR_PTR(-EAGAIN);
213 214
214 ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL); 215 ctx = kmem_cache_alloc(kioctx_cachep, GFP_KERNEL);
@@ -233,8 +234,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
233 goto out_freectx; 234 goto out_freectx;
234 235
235 /* limit the number of system wide aios */ 236 /* limit the number of system wide aios */
236 atomic_add(ctx->max_reqs, &aio_nr); /* undone by __put_ioctx */ 237 spin_lock(&aio_nr_lock);
237 if (unlikely(atomic_read(&aio_nr) > aio_max_nr)) 238 if (aio_nr + ctx->max_reqs > aio_max_nr ||
239 aio_nr + ctx->max_reqs < aio_nr)
240 ctx->max_reqs = 0;
241 else
242 aio_nr += ctx->max_reqs;
243 spin_unlock(&aio_nr_lock);
244 if (ctx->max_reqs == 0)
238 goto out_cleanup; 245 goto out_cleanup;
239 246
240 /* now link into global list. kludge. FIXME */ 247 /* now link into global list. kludge. FIXME */
@@ -248,8 +255,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
248 return ctx; 255 return ctx;
249 256
250out_cleanup: 257out_cleanup:
251 atomic_sub(ctx->max_reqs, &aio_nr);
252 ctx->max_reqs = 0; /* prevent __put_ioctx from sub'ing aio_nr */
253 __put_ioctx(ctx); 258 __put_ioctx(ctx);
254 return ERR_PTR(-EAGAIN); 259 return ERR_PTR(-EAGAIN);
255 260
@@ -374,7 +379,12 @@ void fastcall __put_ioctx(struct kioctx *ctx)
374 pr_debug("__put_ioctx: freeing %p\n", ctx); 379 pr_debug("__put_ioctx: freeing %p\n", ctx);
375 kmem_cache_free(kioctx_cachep, ctx); 380 kmem_cache_free(kioctx_cachep, ctx);
376 381
377 atomic_sub(nr_events, &aio_nr); 382 if (nr_events) {
383 spin_lock(&aio_nr_lock);
384 BUG_ON(aio_nr - nr_events > aio_nr);
385 aio_nr -= nr_events;
386 spin_unlock(&aio_nr_lock);
387 }
378} 388}
379 389
380/* aio_get_req 390/* aio_get_req
@@ -1258,8 +1268,9 @@ asmlinkage long sys_io_setup(unsigned nr_events, aio_context_t __user *ctxp)
1258 goto out; 1268 goto out;
1259 1269
1260 ret = -EINVAL; 1270 ret = -EINVAL;
1261 if (unlikely(ctx || (int)nr_events <= 0)) { 1271 if (unlikely(ctx || nr_events == 0)) {
1262 pr_debug("EINVAL: io_setup: ctx or nr_events > max\n"); 1272 pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
1273 ctx, nr_events);
1263 goto out; 1274 goto out;
1264 } 1275 }
1265 1276