diff options
| author | Al Viro <viro@zeniv.linux.org.uk> | 2015-04-06 17:48:54 -0400 |
|---|---|---|
| committer | Al Viro <viro@zeniv.linux.org.uk> | 2015-04-06 17:50:59 -0400 |
| commit | b2edffdd912b4205899a8efa0974dfbbc3216109 (patch) | |
| tree | 64f8f993ab48978b5a3a13912bba685d48b1a04e /fs | |
| parent | 8f778bbc542ddf8f6243b21d6aca087e709cabdc (diff) | |
fix mremap() vs. ioctx_kill() race
teach ->mremap() method to return an error and have it fail for
aio mappings in process of being killed
Note that in case of ->mremap() failure we need to undo move_page_tables()
we'd already done; we could call ->mremap() first, but then the failure of
move_page_tables() would require undoing whatever _successful_ ->mremap()
has done, which would be a lot more headache in general.
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/aio.c | 17 |
1 files changed, 11 insertions, 6 deletions
| @@ -278,11 +278,11 @@ static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 278 | return 0; | 278 | return 0; |
| 279 | } | 279 | } |
| 280 | 280 | ||
| 281 | static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) | 281 | static int aio_ring_remap(struct file *file, struct vm_area_struct *vma) |
| 282 | { | 282 | { |
| 283 | struct mm_struct *mm = vma->vm_mm; | 283 | struct mm_struct *mm = vma->vm_mm; |
| 284 | struct kioctx_table *table; | 284 | struct kioctx_table *table; |
| 285 | int i; | 285 | int i, res = -EINVAL; |
| 286 | 286 | ||
| 287 | spin_lock(&mm->ioctx_lock); | 287 | spin_lock(&mm->ioctx_lock); |
| 288 | rcu_read_lock(); | 288 | rcu_read_lock(); |
| @@ -292,13 +292,17 @@ static void aio_ring_remap(struct file *file, struct vm_area_struct *vma) | |||
| 292 | 292 | ||
| 293 | ctx = table->table[i]; | 293 | ctx = table->table[i]; |
| 294 | if (ctx && ctx->aio_ring_file == file) { | 294 | if (ctx && ctx->aio_ring_file == file) { |
| 295 | ctx->user_id = ctx->mmap_base = vma->vm_start; | 295 | if (!atomic_read(&ctx->dead)) { |
| 296 | ctx->user_id = ctx->mmap_base = vma->vm_start; | ||
| 297 | res = 0; | ||
| 298 | } | ||
| 296 | break; | 299 | break; |
| 297 | } | 300 | } |
| 298 | } | 301 | } |
| 299 | 302 | ||
| 300 | rcu_read_unlock(); | 303 | rcu_read_unlock(); |
| 301 | spin_unlock(&mm->ioctx_lock); | 304 | spin_unlock(&mm->ioctx_lock); |
| 305 | return res; | ||
| 302 | } | 306 | } |
| 303 | 307 | ||
| 304 | static const struct file_operations aio_ring_fops = { | 308 | static const struct file_operations aio_ring_fops = { |
| @@ -748,11 +752,12 @@ static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, | |||
| 748 | { | 752 | { |
| 749 | struct kioctx_table *table; | 753 | struct kioctx_table *table; |
| 750 | 754 | ||
| 751 | if (atomic_xchg(&ctx->dead, 1)) | 755 | spin_lock(&mm->ioctx_lock); |
| 756 | if (atomic_xchg(&ctx->dead, 1)) { | ||
| 757 | spin_unlock(&mm->ioctx_lock); | ||
| 752 | return -EINVAL; | 758 | return -EINVAL; |
| 759 | } | ||
| 753 | 760 | ||
| 754 | |||
| 755 | spin_lock(&mm->ioctx_lock); | ||
| 756 | table = rcu_dereference_raw(mm->ioctx_table); | 761 | table = rcu_dereference_raw(mm->ioctx_table); |
| 757 | WARN_ON(ctx != table->table[ctx->id]); | 762 | WARN_ON(ctx != table->table[ctx->id]); |
| 758 | table->table[ctx->id] = NULL; | 763 | table->table[ctx->id] = NULL; |
