diff options
author | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-10 23:10:35 -0500 |
---|---|---|
committer | Al Viro <viro@zeniv.linux.org.uk> | 2012-03-20 21:29:38 -0400 |
commit | 2dd542b7aeb1c222273cf0593a718d9b44998d9f (patch) | |
tree | aea852e444475a669e1cb21a328b980ba19b9ecb /fs/aio.c | |
parent | e23754f880f10124f0a2848f9d17e361a295378e (diff) |
aio: aio_nr decrements don't need to be delayed
we can do that right in __put_ioctx(); as the result, the loop
in ioctx_alloc() can be killed.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs/aio.c')
-rw-r--r-- | fs/aio.c | 42 |
1 files changed, 14 insertions, 28 deletions
@@ -199,16 +199,7 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
199 | static void ctx_rcu_free(struct rcu_head *head) | 199 | static void ctx_rcu_free(struct rcu_head *head) |
200 | { | 200 | { |
201 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | 201 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); |
202 | unsigned nr_events = ctx->max_reqs; | ||
203 | |||
204 | kmem_cache_free(kioctx_cachep, ctx); | 202 | kmem_cache_free(kioctx_cachep, ctx); |
205 | |||
206 | if (nr_events) { | ||
207 | spin_lock(&aio_nr_lock); | ||
208 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
209 | aio_nr -= nr_events; | ||
210 | spin_unlock(&aio_nr_lock); | ||
211 | } | ||
212 | } | 203 | } |
213 | 204 | ||
214 | /* __put_ioctx | 205 | /* __put_ioctx |
@@ -217,6 +208,7 @@ static void ctx_rcu_free(struct rcu_head *head) | |||
217 | */ | 208 | */ |
218 | static void __put_ioctx(struct kioctx *ctx) | 209 | static void __put_ioctx(struct kioctx *ctx) |
219 | { | 210 | { |
211 | unsigned nr_events = ctx->max_reqs; | ||
220 | BUG_ON(ctx->reqs_active); | 212 | BUG_ON(ctx->reqs_active); |
221 | 213 | ||
222 | cancel_delayed_work(&ctx->wq); | 214 | cancel_delayed_work(&ctx->wq); |
@@ -224,6 +216,12 @@ static void __put_ioctx(struct kioctx *ctx) | |||
224 | aio_free_ring(ctx); | 216 | aio_free_ring(ctx); |
225 | mmdrop(ctx->mm); | 217 | mmdrop(ctx->mm); |
226 | ctx->mm = NULL; | 218 | ctx->mm = NULL; |
219 | if (nr_events) { | ||
220 | spin_lock(&aio_nr_lock); | ||
221 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
222 | aio_nr -= nr_events; | ||
223 | spin_unlock(&aio_nr_lock); | ||
224 | } | ||
227 | pr_debug("__put_ioctx: freeing %p\n", ctx); | 225 | pr_debug("__put_ioctx: freeing %p\n", ctx); |
228 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 226 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
229 | } | 227 | } |
@@ -247,7 +245,6 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
247 | { | 245 | { |
248 | struct mm_struct *mm; | 246 | struct mm_struct *mm; |
249 | struct kioctx *ctx; | 247 | struct kioctx *ctx; |
250 | int did_sync = 0; | ||
251 | int err = -ENOMEM; | 248 | int err = -ENOMEM; |
252 | 249 | ||
253 | /* Prevent overflows */ | 250 | /* Prevent overflows */ |
@@ -257,7 +254,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
257 | return ERR_PTR(-EINVAL); | 254 | return ERR_PTR(-EINVAL); |
258 | } | 255 | } |
259 | 256 | ||
260 | if ((unsigned long)nr_events > aio_max_nr) | 257 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) |
261 | return ERR_PTR(-EAGAIN); | 258 | return ERR_PTR(-EAGAIN); |
262 | 259 | ||
263 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | 260 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
@@ -281,25 +278,14 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
281 | goto out_freectx; | 278 | goto out_freectx; |
282 | 279 | ||
283 | /* limit the number of system wide aios */ | 280 | /* limit the number of system wide aios */ |
284 | do { | 281 | spin_lock_bh(&aio_nr_lock); |
285 | spin_lock_bh(&aio_nr_lock); | 282 | if (aio_nr + nr_events > aio_max_nr || |
286 | if (aio_nr + nr_events > aio_max_nr || | 283 | aio_nr + nr_events < aio_nr) { |
287 | aio_nr + nr_events < aio_nr) | ||
288 | ctx->max_reqs = 0; | ||
289 | else | ||
290 | aio_nr += ctx->max_reqs; | ||
291 | spin_unlock_bh(&aio_nr_lock); | 284 | spin_unlock_bh(&aio_nr_lock); |
292 | if (ctx->max_reqs || did_sync) | ||
293 | break; | ||
294 | |||
295 | /* wait for rcu callbacks to have completed before giving up */ | ||
296 | synchronize_rcu(); | ||
297 | did_sync = 1; | ||
298 | ctx->max_reqs = nr_events; | ||
299 | } while (1); | ||
300 | |||
301 | if (ctx->max_reqs == 0) | ||
302 | goto out_cleanup; | 285 | goto out_cleanup; |
286 | } | ||
287 | aio_nr += ctx->max_reqs; | ||
288 | spin_unlock_bh(&aio_nr_lock); | ||
303 | 289 | ||
304 | /* now link into global list. */ | 290 | /* now link into global list. */ |
305 | spin_lock(&mm->ioctx_lock); | 291 | spin_lock(&mm->ioctx_lock); |